summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--BUILD/FINISH.sh3
-rwxr-xr-xBUILD/autorun.sh4
-rwxr-xr-xBUILD/compile-alpha-cxx2
-rwxr-xr-xBUILD/compile-alpha-debug2
-rwxr-xr-xBUILD/compile-dist4
-rwxr-xr-xBUILD/compile-ia64-debug-max2
-rwxr-xr-xBUILD/compile-pentium-pgcc1
-rw-r--r--BitKeeper/etc/gone2
-rw-r--r--BitKeeper/etc/logging_ok3
-rwxr-xr-xBitKeeper/triggers/post-commit2
-rw-r--r--Makefile.am8
-rw-r--r--config/ac-macros/ha_berkeley.m42
-rw-r--r--config/ac-macros/ha_innodb.m470
-rw-r--r--config/ac-macros/ha_ndbcluster.m49
-rw-r--r--config/ac-macros/ha_partition.m430
-rw-r--r--configure.in126
-rw-r--r--extra/perror.c2
-rw-r--r--heap/Makefile.am33
-rw-r--r--include/Makefile.am2
-rw-r--r--include/my_base.h1
-rw-r--r--include/my_bitmap.h94
-rw-r--r--include/my_global.h88
-rw-r--r--include/my_trie.h142
-rw-r--r--include/mysql_com.h2
-rw-r--r--include/queues.h3
-rw-r--r--innobase/include/Makefile.i6
-rw-r--r--libmysqld/Makefile.am8
-rw-r--r--myisam/ft_boolean_search.c735
-rw-r--r--mysql-test/include/have_partition.inc4
-rwxr-xr-xmysql-test/mysql-test-run.pl4
-rw-r--r--mysql-test/mysql-test-run.sh4
-rw-r--r--mysql-test/ndb/ndbcluster.sh2
-rw-r--r--mysql-test/r/have_partition.require2
-rw-r--r--mysql-test/r/ndb_partition_key.result71
-rw-r--r--mysql-test/r/ndb_partition_range.result105
-rw-r--r--mysql-test/r/partition.result355
-rw-r--r--mysql-test/r/partition_hash.result66
-rw-r--r--mysql-test/r/partition_list.result342
-rw-r--r--mysql-test/r/partition_order.result733
-rw-r--r--mysql-test/r/partition_range.result455
-rw-r--r--mysql-test/t/ndb_partition_key.test58
-rw-r--r--mysql-test/t/ndb_partition_range.test86
-rw-r--r--mysql-test/t/partition.test494
-rw-r--r--mysql-test/t/partition_hash.test77
-rw-r--r--mysql-test/t/partition_list.test316
-rw-r--r--mysql-test/t/partition_order.test828
-rw-r--r--mysql-test/t/partition_range.test560
-rw-r--r--mysys/Makefile.am8
-rw-r--r--mysys/my_bitmap.c1010
-rw-r--r--mysys/queues.c400
-rw-r--r--mysys/trie.c237
-rw-r--r--ndb/Makefile.am30
-rw-r--r--ndb/config/common.mk.am12
-rw-r--r--ndb/config/type_kernel.mk.am18
-rw-r--r--ndb/config/type_mgmapiclient.mk.am2
-rw-r--r--ndb/config/type_ndbapi.mk.am12
-rw-r--r--ndb/config/type_ndbapiclient.mk.am2
-rw-r--r--ndb/config/type_ndbapitest.mk.am14
-rw-r--r--ndb/config/type_ndbapitools.mk.am15
-rw-r--r--ndb/config/type_util.mk.am6
-rw-r--r--ndb/docs/Makefile.am114
-rw-r--r--ndb/include/Makefile.am51
-rw-r--r--ndb/include/kernel/AttributeHeader.hpp215
-rw-r--r--ndb/include/kernel/GlobalSignalNumbers.h949
-rw-r--r--ndb/include/kernel/ndb_limits.h132
-rw-r--r--ndb/include/kernel/signaldata/CreateFragmentation.hpp101
-rw-r--r--ndb/include/kernel/signaldata/DictTabInfo.hpp515
-rw-r--r--ndb/include/kernel/signaldata/FireTrigOrd.hpp200
-rw-r--r--ndb/include/ndbapi/NdbDictionary.hpp1334
-rw-r--r--ndb/ndbapi-examples/mgmapi_logevent_example/Makefile23
-rw-r--r--ndb/ndbapi-examples/ndbapi_async_example/Makefile23
-rw-r--r--ndb/ndbapi-examples/ndbapi_async_example1/Makefile21
-rw-r--r--ndb/ndbapi-examples/ndbapi_event_example/Makefile23
-rw-r--r--ndb/ndbapi-examples/ndbapi_retries_example/Makefile21
-rw-r--r--ndb/ndbapi-examples/ndbapi_scan_example/Makefile23
-rw-r--r--ndb/ndbapi-examples/ndbapi_simple_example/Makefile23
-rw-r--r--ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile23
-rw-r--r--ndb/src/Makefile.am33
-rw-r--r--ndb/src/common/debugger/Makefile.am25
-rw-r--r--ndb/src/common/debugger/signaldata/CreateFragmentation.cpp56
-rw-r--r--ndb/src/common/debugger/signaldata/Makefile.am47
-rw-r--r--ndb/src/common/logger/Makefile.am25
-rw-r--r--ndb/src/common/mgmcommon/Makefile.am28
-rw-r--r--ndb/src/common/portlib/Makefile.am43
-rw-r--r--ndb/src/common/transporter/Makefile.am36
-rw-r--r--ndb/src/common/util/Makefile.am49
-rw-r--r--ndb/src/common/util/version.c243
-rw-r--r--ndb/src/cw/cpcd/Makefile.am20
-rw-r--r--ndb/src/kernel/Makefile.am75
-rw-r--r--ndb/src/kernel/blocks/backup/Makefile.am24
-rw-r--r--ndb/src/kernel/blocks/cmvmi/Makefile.am24
-rw-r--r--ndb/src/kernel/blocks/dbacc/Dbacc.hpp1470
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccInit.cpp343
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccMain.cpp11817
-rw-r--r--ndb/src/kernel/blocks/dbacc/Makefile.am26
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.cpp12104
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.hpp2021
-rw-r--r--ndb/src/kernel/blocks/dbdict/Makefile.am25
-rw-r--r--ndb/src/kernel/blocks/dbdih/Dbdih.hpp1603
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihMain.cpp14319
-rw-r--r--ndb/src/kernel/blocks/dbdih/Makefile.am23
-rw-r--r--ndb/src/kernel/blocks/dblqh/Dblqh.hpp2956
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhInit.cpp455
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhMain.cpp18635
-rw-r--r--ndb/src/kernel/blocks/dblqh/Makefile.am25
-rw-r--r--ndb/src/kernel/blocks/dbtc/Dbtc.hpp1948
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcMain.cpp13096
-rw-r--r--ndb/src/kernel/blocks/dbtc/Makefile.am23
-rw-r--r--ndb/src/kernel/blocks/dbtup/Dbtup.hpp2469
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp1186
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp1152
-rw-r--r--ndb/src/kernel/blocks/dbtup/Makefile.am42
-rw-r--r--ndb/src/kernel/blocks/dbtux/Makefile.am34
-rw-r--r--ndb/src/kernel/blocks/dbutil/Makefile.am23
-rw-r--r--ndb/src/kernel/blocks/grep/Makefile.am23
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/Makefile.am26
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Makefile.am27
-rw-r--r--ndb/src/kernel/blocks/qmgr/Makefile.am25
-rw-r--r--ndb/src/kernel/blocks/suma/Makefile.am23
-rw-r--r--ndb/src/kernel/blocks/trix/Makefile.am23
-rw-r--r--ndb/src/kernel/error/Makefile.am25
-rw-r--r--ndb/src/kernel/vm/Makefile.am44
-rw-r--r--ndb/src/mgmapi/Makefile.am30
-rw-r--r--ndb/src/mgmclient/Makefile.am58
-rw-r--r--ndb/src/mgmsrv/Makefile.am60
-rw-r--r--ndb/src/ndbapi/Makefile.am62
-rw-r--r--ndb/src/ndbapi/NdbBlob.cpp1589
-rw-r--r--ndb/src/ndbapi/NdbDictionary.cpp1054
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.cpp3163
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.hpp706
-rw-r--r--ndb/test/ndbapi/Makefile.am159
-rw-r--r--ndb/test/ndbapi/bank/Makefile.am24
-rw-r--r--ndb/test/run-test/Makefile.am34
-rw-r--r--ndb/test/run-test/README43
-rw-r--r--ndb/test/src/Makefile.am35
-rw-r--r--ndb/test/tools/Makefile.am30
-rw-r--r--ndb/tools/Makefile.am157
-rw-r--r--ndb/tools/restore/Restore.cpp940
-rw-r--r--scripts/Makefile.am2
-rw-r--r--scripts/mysql_fix_privilege_tables.sql7
-rw-r--r--scripts/mysql_tableinfo.sh494
-rw-r--r--server-tools/Makefile.am3
-rw-r--r--sql/Makefile.am14
-rw-r--r--sql/examples/ha_tina.cc19
-rw-r--r--sql/field.cc44
-rw-r--r--sql/field.h23
-rw-r--r--sql/ha_berkeley.cc8
-rw-r--r--sql/ha_berkeley.h4
-rw-r--r--sql/ha_federated.cc31
-rw-r--r--sql/ha_federated.h2
-rw-r--r--sql/ha_innodb.cc52
-rw-r--r--sql/ha_innodb.h12
-rw-r--r--sql/ha_myisam.cc4
-rw-r--r--sql/ha_myisammrg.cc2
-rw-r--r--sql/ha_ndbcluster.cc679
-rw-r--r--sql/ha_ndbcluster.h36
-rw-r--r--sql/ha_partition.cc3179
-rw-r--r--sql/ha_partition.h916
-rw-r--r--sql/handler.cc201
-rw-r--r--sql/handler.h469
-rw-r--r--sql/item.cc17
-rw-r--r--sql/item_subselect.cc4
-rw-r--r--sql/key.cc83
-rw-r--r--sql/lex.h13
-rw-r--r--sql/lock.cc3
-rw-r--r--sql/log.cc6
-rw-r--r--sql/log_event.cc11
-rw-r--r--sql/mysql_priv.h24
-rw-r--r--sql/mysqld.cc86
-rw-r--r--sql/opt_range.cc76
-rw-r--r--sql/opt_sum.cc4
-rw-r--r--sql/records.cc68
-rw-r--r--sql/repl_failsafe.cc9
-rw-r--r--sql/rpl_filter.cc539
-rw-r--r--sql/rpl_filter.h113
-rw-r--r--sql/set_var.cc1
-rw-r--r--sql/share/errmsg.txt86
-rw-r--r--sql/slave.cc407
-rw-r--r--sql/slave.h37
-rw-r--r--sql/sp.cc4
-rw-r--r--sql/sql_acl.cc52
-rw-r--r--sql/sql_acl.h3
-rw-r--r--sql/sql_base.cc52
-rw-r--r--sql/sql_bitmap.h8
-rw-r--r--sql/sql_cache.cc2
-rw-r--r--sql/sql_class.h20
-rw-r--r--sql/sql_delete.cc26
-rw-r--r--sql/sql_handler.cc6
-rw-r--r--sql/sql_help.cc4
-rw-r--r--sql/sql_insert.cc25
-rw-r--r--sql/sql_lex.cc1
-rw-r--r--sql/sql_lex.h3
-rw-r--r--sql/sql_load.cc17
-rw-r--r--sql/sql_parse.cc24
-rw-r--r--sql/sql_partition.cc3117
-rw-r--r--sql/sql_prepare.cc2
-rw-r--r--sql/sql_repl.cc5
-rw-r--r--sql/sql_repl.h1
-rw-r--r--sql/sql_select.cc106
-rw-r--r--sql/sql_select.h1
-rw-r--r--sql/sql_show.cc34
-rw-r--r--sql/sql_table.cc147
-rw-r--r--sql/sql_udf.cc2
-rw-r--r--sql/sql_update.cc133
-rw-r--r--sql/sql_yacc.yy460
-rw-r--r--sql/table.cc47
-rw-r--r--sql/table.h7
-rw-r--r--sql/tztime.cc10
-rw-r--r--sql/unireg.cc59
-rw-r--r--sql/unireg.h1
-rw-r--r--storage/Makefile.am27
-rw-r--r--storage/bdb/LICENSE (renamed from bdb/LICENSE)0
-rw-r--r--storage/bdb/Makefile.in (renamed from bdb/Makefile.in)0
-rw-r--r--storage/bdb/btree/bt_compare.c (renamed from bdb/btree/bt_compare.c)0
-rw-r--r--storage/bdb/btree/bt_conv.c (renamed from bdb/btree/bt_conv.c)0
-rw-r--r--storage/bdb/btree/bt_curadj.c (renamed from bdb/btree/bt_curadj.c)0
-rw-r--r--storage/bdb/btree/bt_cursor.c (renamed from bdb/btree/bt_cursor.c)0
-rw-r--r--storage/bdb/btree/bt_delete.c (renamed from bdb/btree/bt_delete.c)0
-rw-r--r--storage/bdb/btree/bt_method.c (renamed from bdb/btree/bt_method.c)0
-rw-r--r--storage/bdb/btree/bt_open.c (renamed from bdb/btree/bt_open.c)0
-rw-r--r--storage/bdb/btree/bt_put.c (renamed from bdb/btree/bt_put.c)0
-rw-r--r--storage/bdb/btree/bt_rec.c (renamed from bdb/btree/bt_rec.c)0
-rw-r--r--storage/bdb/btree/bt_reclaim.c (renamed from bdb/btree/bt_reclaim.c)0
-rw-r--r--storage/bdb/btree/bt_recno.c (renamed from bdb/btree/bt_recno.c)0
-rw-r--r--storage/bdb/btree/bt_rsearch.c (renamed from bdb/btree/bt_rsearch.c)0
-rw-r--r--storage/bdb/btree/bt_search.c (renamed from bdb/btree/bt_search.c)0
-rw-r--r--storage/bdb/btree/bt_split.c (renamed from bdb/btree/bt_split.c)0
-rw-r--r--storage/bdb/btree/bt_stat.c (renamed from bdb/btree/bt_stat.c)0
-rw-r--r--storage/bdb/btree/bt_upgrade.c (renamed from bdb/btree/bt_upgrade.c)0
-rw-r--r--storage/bdb/btree/bt_verify.c (renamed from bdb/btree/bt_verify.c)0
-rw-r--r--storage/bdb/btree/btree.src (renamed from bdb/btree/btree.src)0
-rw-r--r--storage/bdb/build_unix/.IGNORE_ME (renamed from bdb/build_unix/.IGNORE_ME)0
-rw-r--r--storage/bdb/build_vxworks/BerkeleyDB.wsp (renamed from bdb/build_vxworks/BerkeleyDB.wsp)0
-rw-r--r--storage/bdb/build_vxworks/dbdemo/README (renamed from bdb/build_vxworks/dbdemo/README)0
-rw-r--r--storage/bdb/build_win32/Berkeley_DB.dsw (renamed from bdb/build_win32/Berkeley_DB.dsw)0
-rw-r--r--storage/bdb/build_win32/app_dsp.src (renamed from bdb/build_win32/app_dsp.src)0
-rw-r--r--storage/bdb/build_win32/build_all.dsp (renamed from bdb/build_win32/build_all.dsp)0
-rw-r--r--storage/bdb/build_win32/db_java_xa.dsp (renamed from bdb/build_win32/db_java_xa.dsp)0
-rw-r--r--storage/bdb/build_win32/db_java_xaj.mak (renamed from bdb/build_win32/db_java_xaj.mak)0
-rw-r--r--storage/bdb/build_win32/db_lib.dsp (renamed from bdb/build_win32/db_lib.dsp)0
-rw-r--r--storage/bdb/build_win32/db_test.src (renamed from bdb/build_win32/db_test.src)0
-rw-r--r--storage/bdb/build_win32/dbkill.cpp (renamed from bdb/build_win32/dbkill.cpp)0
-rw-r--r--storage/bdb/build_win32/dllmain.c (renamed from bdb/build_win32/dllmain.c)0
-rw-r--r--storage/bdb/build_win32/dynamic_dsp.src (renamed from bdb/build_win32/dynamic_dsp.src)0
-rw-r--r--storage/bdb/build_win32/java_dsp.src (renamed from bdb/build_win32/java_dsp.src)0
-rw-r--r--storage/bdb/build_win32/libdb_tcl.def (renamed from bdb/build_win32/libdb_tcl.def)0
-rw-r--r--storage/bdb/build_win32/libdbrc.src (renamed from bdb/build_win32/libdbrc.src)0
-rw-r--r--storage/bdb/build_win32/srcfile_dsp.src (renamed from bdb/build_win32/srcfile_dsp.src)0
-rw-r--r--storage/bdb/build_win32/static_dsp.src (renamed from bdb/build_win32/static_dsp.src)0
-rw-r--r--storage/bdb/build_win32/tcl_dsp.src (renamed from bdb/build_win32/tcl_dsp.src)0
-rw-r--r--storage/bdb/clib/getcwd.c (renamed from bdb/clib/getcwd.c)0
-rw-r--r--storage/bdb/clib/getopt.c (renamed from bdb/clib/getopt.c)0
-rw-r--r--storage/bdb/clib/memcmp.c (renamed from bdb/clib/memcmp.c)0
-rw-r--r--storage/bdb/clib/memmove.c (renamed from bdb/clib/memmove.c)0
-rw-r--r--storage/bdb/clib/raise.c (renamed from bdb/clib/raise.c)0
-rw-r--r--storage/bdb/clib/snprintf.c (renamed from bdb/clib/snprintf.c)0
-rw-r--r--storage/bdb/clib/strcasecmp.c (renamed from bdb/clib/strcasecmp.c)0
-rw-r--r--storage/bdb/clib/strdup.c (renamed from bdb/clib/strdup.c)0
-rw-r--r--storage/bdb/clib/strerror.c (renamed from bdb/clib/strerror.c)0
-rw-r--r--storage/bdb/clib/vsnprintf.c (renamed from bdb/clib/vsnprintf.c)0
-rw-r--r--storage/bdb/common/db_byteorder.c (renamed from bdb/common/db_byteorder.c)0
-rw-r--r--storage/bdb/common/db_err.c (renamed from bdb/common/db_err.c)0
-rw-r--r--storage/bdb/common/db_getlong.c (renamed from bdb/common/db_getlong.c)0
-rw-r--r--storage/bdb/common/db_idspace.c (renamed from bdb/common/db_idspace.c)0
-rw-r--r--storage/bdb/common/db_log2.c (renamed from bdb/common/db_log2.c)0
-rw-r--r--storage/bdb/common/util_arg.c (renamed from bdb/common/util_arg.c)0
-rw-r--r--storage/bdb/common/util_cache.c (renamed from bdb/common/util_cache.c)0
-rw-r--r--storage/bdb/common/util_log.c (renamed from bdb/common/util_log.c)0
-rw-r--r--storage/bdb/common/util_sig.c (renamed from bdb/common/util_sig.c)0
-rw-r--r--storage/bdb/cxx/cxx_db.cpp (renamed from bdb/cxx/cxx_db.cpp)0
-rw-r--r--storage/bdb/cxx/cxx_dbc.cpp (renamed from bdb/cxx/cxx_dbc.cpp)0
-rw-r--r--storage/bdb/cxx/cxx_dbt.cpp (renamed from bdb/cxx/cxx_dbt.cpp)0
-rw-r--r--storage/bdb/cxx/cxx_env.cpp (renamed from bdb/cxx/cxx_env.cpp)0
-rw-r--r--storage/bdb/cxx/cxx_except.cpp (renamed from bdb/cxx/cxx_except.cpp)0
-rw-r--r--storage/bdb/cxx/cxx_lock.cpp (renamed from bdb/cxx/cxx_lock.cpp)0
-rw-r--r--storage/bdb/cxx/cxx_logc.cpp (renamed from bdb/cxx/cxx_logc.cpp)0
-rw-r--r--storage/bdb/cxx/cxx_mpool.cpp (renamed from bdb/cxx/cxx_mpool.cpp)0
-rw-r--r--storage/bdb/cxx/cxx_txn.cpp (renamed from bdb/cxx/cxx_txn.cpp)0
-rw-r--r--storage/bdb/db/crdel.src (renamed from bdb/db/crdel.src)0
-rw-r--r--storage/bdb/db/crdel_rec.c (renamed from bdb/db/crdel_rec.c)0
-rw-r--r--storage/bdb/db/db.c (renamed from bdb/db/db.c)0
-rw-r--r--storage/bdb/db/db.src (renamed from bdb/db/db.src)0
-rw-r--r--storage/bdb/db/db_am.c (renamed from bdb/db/db_am.c)0
-rw-r--r--storage/bdb/db/db_cam.c (renamed from bdb/db/db_cam.c)0
-rw-r--r--storage/bdb/db/db_conv.c (renamed from bdb/db/db_conv.c)0
-rw-r--r--storage/bdb/db/db_dispatch.c (renamed from bdb/db/db_dispatch.c)0
-rw-r--r--storage/bdb/db/db_dup.c (renamed from bdb/db/db_dup.c)0
-rw-r--r--storage/bdb/db/db_iface.c (renamed from bdb/db/db_iface.c)0
-rw-r--r--storage/bdb/db/db_join.c (renamed from bdb/db/db_join.c)0
-rw-r--r--storage/bdb/db/db_meta.c (renamed from bdb/db/db_meta.c)0
-rw-r--r--storage/bdb/db/db_method.c (renamed from bdb/db/db_method.c)0
-rw-r--r--storage/bdb/db/db_open.c (renamed from bdb/db/db_open.c)0
-rw-r--r--storage/bdb/db/db_overflow.c (renamed from bdb/db/db_overflow.c)0
-rw-r--r--storage/bdb/db/db_pr.c (renamed from bdb/db/db_pr.c)0
-rw-r--r--storage/bdb/db/db_rec.c (renamed from bdb/db/db_rec.c)0
-rw-r--r--storage/bdb/db/db_reclaim.c (renamed from bdb/db/db_reclaim.c)0
-rw-r--r--storage/bdb/db/db_remove.c (renamed from bdb/db/db_remove.c)0
-rw-r--r--storage/bdb/db/db_rename.c (renamed from bdb/db/db_rename.c)0
-rw-r--r--storage/bdb/db/db_ret.c (renamed from bdb/db/db_ret.c)0
-rw-r--r--storage/bdb/db/db_truncate.c (renamed from bdb/db/db_truncate.c)0
-rw-r--r--storage/bdb/db/db_upg.c (renamed from bdb/db/db_upg.c)0
-rw-r--r--storage/bdb/db/db_upg_opd.c (renamed from bdb/db/db_upg_opd.c)0
-rw-r--r--storage/bdb/db/db_vrfy.c (renamed from bdb/db/db_vrfy.c)0
-rw-r--r--storage/bdb/db/db_vrfyutil.c (renamed from bdb/db/db_vrfyutil.c)0
-rw-r--r--storage/bdb/db185/db185.c (renamed from bdb/db185/db185.c)0
-rw-r--r--storage/bdb/db185/db185_int.in (renamed from bdb/db185/db185_int.in)0
-rw-r--r--storage/bdb/db_archive/db_archive.c (renamed from bdb/db_archive/db_archive.c)0
-rw-r--r--storage/bdb/db_checkpoint/db_checkpoint.c (renamed from bdb/db_checkpoint/db_checkpoint.c)0
-rw-r--r--storage/bdb/db_deadlock/db_deadlock.c (renamed from bdb/db_deadlock/db_deadlock.c)0
-rw-r--r--storage/bdb/db_dump/db_dump.c (renamed from bdb/db_dump/db_dump.c)0
-rw-r--r--storage/bdb/db_dump185/db_dump185.c (renamed from bdb/db_dump185/db_dump185.c)0
-rw-r--r--storage/bdb/db_load/db_load.c (renamed from bdb/db_load/db_load.c)0
-rw-r--r--storage/bdb/db_printlog/README (renamed from bdb/db_printlog/README)0
-rw-r--r--storage/bdb/db_printlog/commit.awk (renamed from bdb/db_printlog/commit.awk)0
-rw-r--r--storage/bdb/db_printlog/count.awk (renamed from bdb/db_printlog/count.awk)0
-rw-r--r--storage/bdb/db_printlog/db_printlog.c (renamed from bdb/db_printlog/db_printlog.c)0
-rw-r--r--storage/bdb/db_printlog/dbname.awk (renamed from bdb/db_printlog/dbname.awk)0
-rw-r--r--storage/bdb/db_printlog/fileid.awk (renamed from bdb/db_printlog/fileid.awk)0
-rw-r--r--storage/bdb/db_printlog/logstat.awk (renamed from bdb/db_printlog/logstat.awk)0
-rw-r--r--storage/bdb/db_printlog/pgno.awk (renamed from bdb/db_printlog/pgno.awk)0
-rw-r--r--storage/bdb/db_printlog/range.awk (renamed from bdb/db_printlog/range.awk)0
-rw-r--r--storage/bdb/db_printlog/rectype.awk (renamed from bdb/db_printlog/rectype.awk)0
-rw-r--r--storage/bdb/db_printlog/status.awk (renamed from bdb/db_printlog/status.awk)0
-rw-r--r--storage/bdb/db_printlog/txn.awk (renamed from bdb/db_printlog/txn.awk)0
-rw-r--r--storage/bdb/db_recover/db_recover.c (renamed from bdb/db_recover/db_recover.c)0
-rw-r--r--storage/bdb/db_stat/db_stat.c (renamed from bdb/db_stat/db_stat.c)0
-rw-r--r--storage/bdb/db_upgrade/db_upgrade.c (renamed from bdb/db_upgrade/db_upgrade.c)0
-rw-r--r--storage/bdb/db_verify/db_verify.c (renamed from bdb/db_verify/db_verify.c)0
-rw-r--r--storage/bdb/dbinc/btree.h (renamed from bdb/dbinc/btree.h)0
-rw-r--r--storage/bdb/dbinc/crypto.h (renamed from bdb/dbinc/crypto.h)0
-rw-r--r--storage/bdb/dbinc/cxx_common.h (renamed from bdb/dbinc/cxx_common.h)0
-rw-r--r--storage/bdb/dbinc/cxx_except.h (renamed from bdb/dbinc/cxx_except.h)0
-rw-r--r--storage/bdb/dbinc/cxx_int.h (renamed from bdb/dbinc/cxx_int.h)0
-rw-r--r--storage/bdb/dbinc/db.in (renamed from bdb/dbinc/db.in)0
-rw-r--r--storage/bdb/dbinc/db_185.in (renamed from bdb/dbinc/db_185.in)0
-rw-r--r--storage/bdb/dbinc/db_am.h (renamed from bdb/dbinc/db_am.h)0
-rw-r--r--storage/bdb/dbinc/db_cxx.in (renamed from bdb/dbinc/db_cxx.in)0
-rw-r--r--storage/bdb/dbinc/db_dispatch.h (renamed from bdb/dbinc/db_dispatch.h)0
-rw-r--r--storage/bdb/dbinc/db_int.in (renamed from bdb/dbinc/db_int.in)0
-rw-r--r--storage/bdb/dbinc/db_join.h (renamed from bdb/dbinc/db_join.h)0
-rw-r--r--storage/bdb/dbinc/db_page.h (renamed from bdb/dbinc/db_page.h)0
-rw-r--r--storage/bdb/dbinc/db_server_int.h (renamed from bdb/dbinc/db_server_int.h)0
-rw-r--r--storage/bdb/dbinc/db_shash.h (renamed from bdb/dbinc/db_shash.h)0
-rw-r--r--storage/bdb/dbinc/db_swap.h (renamed from bdb/dbinc/db_swap.h)0
-rw-r--r--storage/bdb/dbinc/db_upgrade.h (renamed from bdb/dbinc/db_upgrade.h)0
-rw-r--r--storage/bdb/dbinc/db_verify.h (renamed from bdb/dbinc/db_verify.h)0
-rw-r--r--storage/bdb/dbinc/debug.h (renamed from bdb/dbinc/debug.h)0
-rw-r--r--storage/bdb/dbinc/fop.h (renamed from bdb/dbinc/fop.h)0
-rw-r--r--storage/bdb/dbinc/globals.h (renamed from bdb/dbinc/globals.h)0
-rw-r--r--storage/bdb/dbinc/hash.h (renamed from bdb/dbinc/hash.h)0
-rw-r--r--storage/bdb/dbinc/hmac.h (renamed from bdb/dbinc/hmac.h)0
-rw-r--r--storage/bdb/dbinc/lock.h (renamed from bdb/dbinc/lock.h)0
-rw-r--r--storage/bdb/dbinc/log.h (renamed from bdb/dbinc/log.h)0
-rw-r--r--storage/bdb/dbinc/mp.h (renamed from bdb/dbinc/mp.h)0
-rw-r--r--storage/bdb/dbinc/mutex.h (renamed from bdb/dbinc/mutex.h)0
-rw-r--r--storage/bdb/dbinc/os.h (renamed from bdb/dbinc/os.h)0
-rw-r--r--storage/bdb/dbinc/qam.h (renamed from bdb/dbinc/qam.h)0
-rw-r--r--storage/bdb/dbinc/queue.h (renamed from bdb/dbinc/queue.h)0
-rw-r--r--storage/bdb/dbinc/region.h (renamed from bdb/dbinc/region.h)0
-rw-r--r--storage/bdb/dbinc/rep.h (renamed from bdb/dbinc/rep.h)0
-rw-r--r--storage/bdb/dbinc/shqueue.h (renamed from bdb/dbinc/shqueue.h)0
-rw-r--r--storage/bdb/dbinc/tcl_db.h (renamed from bdb/dbinc/tcl_db.h)0
-rw-r--r--storage/bdb/dbinc/txn.h (renamed from bdb/dbinc/txn.h)0
-rw-r--r--storage/bdb/dbinc/xa.h (renamed from bdb/dbinc/xa.h)0
-rw-r--r--storage/bdb/dbm/dbm.c (renamed from bdb/dbm/dbm.c)0
-rw-r--r--storage/bdb/dbreg/dbreg.c (renamed from bdb/dbreg/dbreg.c)0
-rw-r--r--storage/bdb/dbreg/dbreg.src (renamed from bdb/dbreg/dbreg.src)0
-rw-r--r--storage/bdb/dbreg/dbreg_rec.c (renamed from bdb/dbreg/dbreg_rec.c)0
-rw-r--r--storage/bdb/dbreg/dbreg_util.c (renamed from bdb/dbreg/dbreg_util.c)0
-rw-r--r--storage/bdb/dist/Makefile.in (renamed from bdb/dist/Makefile.in)0
-rw-r--r--storage/bdb/dist/RELEASE (renamed from bdb/dist/RELEASE)0
-rw-r--r--storage/bdb/dist/aclocal/config.ac (renamed from bdb/dist/aclocal/config.ac)0
-rw-r--r--storage/bdb/dist/aclocal/cxx.ac (renamed from bdb/dist/aclocal/cxx.ac)0
-rw-r--r--storage/bdb/dist/aclocal/gcc.ac (renamed from bdb/dist/aclocal/gcc.ac)0
-rw-r--r--storage/bdb/dist/aclocal/libtool.ac (renamed from bdb/dist/aclocal/libtool.ac)0
-rw-r--r--storage/bdb/dist/aclocal/mutex.ac (renamed from bdb/dist/aclocal/mutex.ac)0
-rw-r--r--storage/bdb/dist/aclocal/options.ac (renamed from bdb/dist/aclocal/options.ac)0
-rw-r--r--storage/bdb/dist/aclocal/programs.ac (renamed from bdb/dist/aclocal/programs.ac)0
-rw-r--r--storage/bdb/dist/aclocal/sosuffix.ac (renamed from bdb/dist/aclocal/sosuffix.ac)0
-rw-r--r--storage/bdb/dist/aclocal/tcl.ac (renamed from bdb/dist/aclocal/tcl.ac)0
-rw-r--r--storage/bdb/dist/aclocal/types.ac (renamed from bdb/dist/aclocal/types.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_check_class.ac (renamed from bdb/dist/aclocal_java/ac_check_class.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_check_classpath.ac (renamed from bdb/dist/aclocal_java/ac_check_classpath.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_check_junit.ac (renamed from bdb/dist/aclocal_java/ac_check_junit.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac (renamed from bdb/dist/aclocal_java/ac_check_rqrd_class.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_java_options.ac (renamed from bdb/dist/aclocal_java/ac_java_options.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac (renamed from bdb/dist/aclocal_java/ac_jni_include_dirs.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_jar.ac (renamed from bdb/dist/aclocal_java/ac_prog_jar.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_java.ac (renamed from bdb/dist/aclocal_java/ac_prog_java.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_java_works.ac (renamed from bdb/dist/aclocal_java/ac_prog_java_works.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_javac.ac (renamed from bdb/dist/aclocal_java/ac_prog_javac.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac (renamed from bdb/dist/aclocal_java/ac_prog_javac_works.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac (renamed from bdb/dist/aclocal_java/ac_prog_javadoc.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_javah.ac (renamed from bdb/dist/aclocal_java/ac_prog_javah.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_try_compile_java.ac (renamed from bdb/dist/aclocal_java/ac_try_compile_java.ac)0
-rw-r--r--storage/bdb/dist/aclocal_java/ac_try_run_javac.ac (renamed from bdb/dist/aclocal_java/ac_try_run_javac.ac)0
-rw-r--r--storage/bdb/dist/buildrel (renamed from bdb/dist/buildrel)0
-rwxr-xr-xstorage/bdb/dist/config.guess (renamed from bdb/dist/config.guess)0
-rwxr-xr-xstorage/bdb/dist/config.sub (renamed from bdb/dist/config.sub)0
-rw-r--r--storage/bdb/dist/configure.ac (renamed from bdb/dist/configure.ac)0
-rw-r--r--storage/bdb/dist/db.ecd.in (renamed from bdb/dist/db.ecd.in)0
-rw-r--r--storage/bdb/dist/db.spec.in (renamed from bdb/dist/db.spec.in)0
-rw-r--r--storage/bdb/dist/gen_inc.awk (renamed from bdb/dist/gen_inc.awk)0
-rw-r--r--storage/bdb/dist/gen_rec.awk (renamed from bdb/dist/gen_rec.awk)0
-rw-r--r--storage/bdb/dist/gen_rpc.awk (renamed from bdb/dist/gen_rpc.awk)0
-rwxr-xr-xstorage/bdb/dist/install-sh (renamed from bdb/dist/install-sh)0
-rw-r--r--storage/bdb/dist/ltmain.sh (renamed from bdb/dist/ltmain.sh)0
-rw-r--r--storage/bdb/dist/pubdef.in (renamed from bdb/dist/pubdef.in)0
-rw-r--r--storage/bdb/dist/s_all (renamed from bdb/dist/s_all)0
-rwxr-xr-xstorage/bdb/dist/s_config (renamed from bdb/dist/s_config)0
-rw-r--r--storage/bdb/dist/s_crypto (renamed from bdb/dist/s_crypto)0
-rw-r--r--storage/bdb/dist/s_dir (renamed from bdb/dist/s_dir)0
-rwxr-xr-xstorage/bdb/dist/s_include (renamed from bdb/dist/s_include)0
-rwxr-xr-xstorage/bdb/dist/s_java (renamed from bdb/dist/s_java)0
-rwxr-xr-xstorage/bdb/dist/s_javah (renamed from bdb/dist/s_javah)0
-rwxr-xr-xstorage/bdb/dist/s_perm (renamed from bdb/dist/s_perm)0
-rwxr-xr-xstorage/bdb/dist/s_readme (renamed from bdb/dist/s_readme)0
-rwxr-xr-xstorage/bdb/dist/s_recover (renamed from bdb/dist/s_recover)0
-rw-r--r--storage/bdb/dist/s_rpc (renamed from bdb/dist/s_rpc)0
-rwxr-xr-xstorage/bdb/dist/s_symlink (renamed from bdb/dist/s_symlink)0
-rwxr-xr-xstorage/bdb/dist/s_tags (renamed from bdb/dist/s_tags)0
-rwxr-xr-xstorage/bdb/dist/s_test (renamed from bdb/dist/s_test)0
-rw-r--r--storage/bdb/dist/s_vxworks (renamed from bdb/dist/s_vxworks)0
-rwxr-xr-xstorage/bdb/dist/s_win32 (renamed from bdb/dist/s_win32)0
-rw-r--r--storage/bdb/dist/s_win32_dsp (renamed from bdb/dist/s_win32_dsp)0
-rw-r--r--storage/bdb/dist/srcfiles.in (renamed from bdb/dist/srcfiles.in)0
-rw-r--r--storage/bdb/dist/template/rec_ctemp (renamed from bdb/dist/template/rec_ctemp)0
-rw-r--r--storage/bdb/dist/vx_2.0/BerkeleyDB.wpj (renamed from bdb/dist/vx_2.0/BerkeleyDB.wpj)0
-rw-r--r--storage/bdb/dist/vx_2.0/wpj.in (renamed from bdb/dist/vx_2.0/wpj.in)0
-rw-r--r--storage/bdb/dist/vx_3.1/Makefile.custom (renamed from bdb/dist/vx_3.1/Makefile.custom)0
-rw-r--r--storage/bdb/dist/vx_3.1/cdf.1 (renamed from bdb/dist/vx_3.1/cdf.1)0
-rw-r--r--storage/bdb/dist/vx_3.1/cdf.2 (renamed from bdb/dist/vx_3.1/cdf.2)0
-rw-r--r--storage/bdb/dist/vx_3.1/cdf.3 (renamed from bdb/dist/vx_3.1/cdf.3)0
-rw-r--r--storage/bdb/dist/vx_3.1/component.cdf (renamed from bdb/dist/vx_3.1/component.cdf)0
-rw-r--r--storage/bdb/dist/vx_3.1/component.wpj (renamed from bdb/dist/vx_3.1/component.wpj)0
-rw-r--r--storage/bdb/dist/vx_3.1/wpj.1 (renamed from bdb/dist/vx_3.1/wpj.1)0
-rw-r--r--storage/bdb/dist/vx_3.1/wpj.2 (renamed from bdb/dist/vx_3.1/wpj.2)0
-rw-r--r--storage/bdb/dist/vx_3.1/wpj.3 (renamed from bdb/dist/vx_3.1/wpj.3)0
-rw-r--r--storage/bdb/dist/vx_3.1/wpj.4 (renamed from bdb/dist/vx_3.1/wpj.4)0
-rw-r--r--storage/bdb/dist/vx_3.1/wpj.5 (renamed from bdb/dist/vx_3.1/wpj.5)0
-rwxr-xr-xstorage/bdb/dist/vx_buildcd (renamed from bdb/dist/vx_buildcd)0
-rw-r--r--storage/bdb/dist/vx_config.in (renamed from bdb/dist/vx_config.in)0
-rw-r--r--storage/bdb/dist/vx_setup/CONFIG.in (renamed from bdb/dist/vx_setup/CONFIG.in)0
-rw-r--r--storage/bdb/dist/vx_setup/LICENSE.TXT (renamed from bdb/dist/vx_setup/LICENSE.TXT)0
-rw-r--r--storage/bdb/dist/vx_setup/MESSAGES.TCL (renamed from bdb/dist/vx_setup/MESSAGES.TCL)0
-rw-r--r--storage/bdb/dist/vx_setup/README.in (renamed from bdb/dist/vx_setup/README.in)0
-rw-r--r--storage/bdb/dist/vx_setup/SETUP.BMP (renamed from bdb/dist/vx_setup/SETUP.BMP)bin187962 -> 187962 bytes
-rw-r--r--storage/bdb/dist/vx_setup/vx_allfile.in (renamed from bdb/dist/vx_setup/vx_allfile.in)0
-rw-r--r--storage/bdb/dist/vx_setup/vx_demofile.in (renamed from bdb/dist/vx_setup/vx_demofile.in)0
-rw-r--r--storage/bdb/dist/vx_setup/vx_setup.in (renamed from bdb/dist/vx_setup/vx_setup.in)0
-rw-r--r--storage/bdb/dist/win_config.in (renamed from bdb/dist/win_config.in)0
-rw-r--r--storage/bdb/dist/win_exports.in (renamed from bdb/dist/win_exports.in)0
-rw-r--r--storage/bdb/env/db_salloc.c (renamed from bdb/env/db_salloc.c)0
-rw-r--r--storage/bdb/env/db_shash.c (renamed from bdb/env/db_shash.c)0
-rw-r--r--storage/bdb/env/env_file.c (renamed from bdb/env/env_file.c)0
-rw-r--r--storage/bdb/env/env_method.c (renamed from bdb/env/env_method.c)0
-rw-r--r--storage/bdb/env/env_method.c.b (renamed from bdb/env/env_method.c.b)0
-rw-r--r--storage/bdb/env/env_open.c (renamed from bdb/env/env_open.c)0
-rw-r--r--storage/bdb/env/env_recover.c (renamed from bdb/env/env_recover.c)0
-rw-r--r--storage/bdb/env/env_region.c (renamed from bdb/env/env_region.c)0
-rw-r--r--storage/bdb/fileops/fileops.src (renamed from bdb/fileops/fileops.src)0
-rw-r--r--storage/bdb/fileops/fop_basic.c (renamed from bdb/fileops/fop_basic.c)0
-rw-r--r--storage/bdb/fileops/fop_rec.c (renamed from bdb/fileops/fop_rec.c)0
-rw-r--r--storage/bdb/fileops/fop_util.c (renamed from bdb/fileops/fop_util.c)0
-rw-r--r--storage/bdb/hash/hash.c (renamed from bdb/hash/hash.c)0
-rw-r--r--storage/bdb/hash/hash.src (renamed from bdb/hash/hash.src)0
-rw-r--r--storage/bdb/hash/hash_conv.c (renamed from bdb/hash/hash_conv.c)0
-rw-r--r--storage/bdb/hash/hash_dup.c (renamed from bdb/hash/hash_dup.c)0
-rw-r--r--storage/bdb/hash/hash_func.c (renamed from bdb/hash/hash_func.c)0
-rw-r--r--storage/bdb/hash/hash_meta.c (renamed from bdb/hash/hash_meta.c)0
-rw-r--r--storage/bdb/hash/hash_method.c (renamed from bdb/hash/hash_method.c)0
-rw-r--r--storage/bdb/hash/hash_open.c (renamed from bdb/hash/hash_open.c)0
-rw-r--r--storage/bdb/hash/hash_page.c (renamed from bdb/hash/hash_page.c)0
-rw-r--r--storage/bdb/hash/hash_rec.c (renamed from bdb/hash/hash_rec.c)0
-rw-r--r--storage/bdb/hash/hash_reclaim.c (renamed from bdb/hash/hash_reclaim.c)0
-rw-r--r--storage/bdb/hash/hash_stat.c (renamed from bdb/hash/hash_stat.c)0
-rw-r--r--storage/bdb/hash/hash_upgrade.c (renamed from bdb/hash/hash_upgrade.c)0
-rw-r--r--storage/bdb/hash/hash_verify.c (renamed from bdb/hash/hash_verify.c)0
-rw-r--r--storage/bdb/hmac/hmac.c (renamed from bdb/hmac/hmac.c)0
-rw-r--r--storage/bdb/hmac/sha1.c (renamed from bdb/hmac/sha1.c)0
-rw-r--r--storage/bdb/hsearch/hsearch.c (renamed from bdb/hsearch/hsearch.c)0
-rw-r--r--storage/bdb/libdb_java/checkapi.prl (renamed from bdb/libdb_java/checkapi.prl)0
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_Db.h (renamed from bdb/libdb_java/com_sleepycat_db_Db.h)0
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_DbEnv.h (renamed from bdb/libdb_java/com_sleepycat_db_DbEnv.h)0
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_DbLock.h (renamed from bdb/libdb_java/com_sleepycat_db_DbLock.h)0
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_DbLogc.h (renamed from bdb/libdb_java/com_sleepycat_db_DbLogc.h)0
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_DbLsn.h (renamed from bdb/libdb_java/com_sleepycat_db_DbLsn.h)0
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_DbTxn.h (renamed from bdb/libdb_java/com_sleepycat_db_DbTxn.h)0
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_DbUtil.h (renamed from bdb/libdb_java/com_sleepycat_db_DbUtil.h)0
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_Dbc.h (renamed from bdb/libdb_java/com_sleepycat_db_Dbc.h)0
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_Dbt.h (renamed from bdb/libdb_java/com_sleepycat_db_Dbt.h)0
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h (renamed from bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h)0
-rw-r--r--storage/bdb/libdb_java/java_Db.c (renamed from bdb/libdb_java/java_Db.c)0
-rw-r--r--storage/bdb/libdb_java/java_DbEnv.c (renamed from bdb/libdb_java/java_DbEnv.c)0
-rw-r--r--storage/bdb/libdb_java/java_DbLock.c (renamed from bdb/libdb_java/java_DbLock.c)0
-rw-r--r--storage/bdb/libdb_java/java_DbLogc.c (renamed from bdb/libdb_java/java_DbLogc.c)0
-rw-r--r--storage/bdb/libdb_java/java_DbLsn.c (renamed from bdb/libdb_java/java_DbLsn.c)0
-rw-r--r--storage/bdb/libdb_java/java_DbTxn.c (renamed from bdb/libdb_java/java_DbTxn.c)0
-rw-r--r--storage/bdb/libdb_java/java_DbUtil.c (renamed from bdb/libdb_java/java_DbUtil.c)0
-rw-r--r--storage/bdb/libdb_java/java_DbXAResource.c (renamed from bdb/libdb_java/java_DbXAResource.c)0
-rw-r--r--storage/bdb/libdb_java/java_Dbc.c (renamed from bdb/libdb_java/java_Dbc.c)0
-rw-r--r--storage/bdb/libdb_java/java_Dbt.c (renamed from bdb/libdb_java/java_Dbt.c)0
-rw-r--r--storage/bdb/libdb_java/java_info.c (renamed from bdb/libdb_java/java_info.c)0
-rw-r--r--storage/bdb/libdb_java/java_info.h (renamed from bdb/libdb_java/java_info.h)0
-rw-r--r--storage/bdb/libdb_java/java_locked.c (renamed from bdb/libdb_java/java_locked.c)0
-rw-r--r--storage/bdb/libdb_java/java_locked.h (renamed from bdb/libdb_java/java_locked.h)0
-rw-r--r--storage/bdb/libdb_java/java_util.c (renamed from bdb/libdb_java/java_util.c)0
-rw-r--r--storage/bdb/libdb_java/java_util.h (renamed from bdb/libdb_java/java_util.h)0
-rw-r--r--storage/bdb/lock/Design (renamed from bdb/lock/Design)0
-rw-r--r--storage/bdb/lock/lock.c (renamed from bdb/lock/lock.c)0
-rw-r--r--storage/bdb/lock/lock_deadlock.c (renamed from bdb/lock/lock_deadlock.c)0
-rw-r--r--storage/bdb/lock/lock_method.c (renamed from bdb/lock/lock_method.c)0
-rw-r--r--storage/bdb/lock/lock_region.c (renamed from bdb/lock/lock_region.c)0
-rw-r--r--storage/bdb/lock/lock_stat.c (renamed from bdb/lock/lock_stat.c)0
-rw-r--r--storage/bdb/lock/lock_util.c (renamed from bdb/lock/lock_util.c)0
-rw-r--r--storage/bdb/log/log.c (renamed from bdb/log/log.c)0
-rw-r--r--storage/bdb/log/log_archive.c (renamed from bdb/log/log_archive.c)0
-rw-r--r--storage/bdb/log/log_compare.c (renamed from bdb/log/log_compare.c)0
-rw-r--r--storage/bdb/log/log_get.c (renamed from bdb/log/log_get.c)0
-rw-r--r--storage/bdb/log/log_method.c (renamed from bdb/log/log_method.c)0
-rw-r--r--storage/bdb/log/log_put.c (renamed from bdb/log/log_put.c)0
-rw-r--r--storage/bdb/mp/mp_alloc.c (renamed from bdb/mp/mp_alloc.c)0
-rw-r--r--storage/bdb/mp/mp_bh.c (renamed from bdb/mp/mp_bh.c)0
-rw-r--r--storage/bdb/mp/mp_fget.c (renamed from bdb/mp/mp_fget.c)0
-rw-r--r--storage/bdb/mp/mp_fopen.c (renamed from bdb/mp/mp_fopen.c)0
-rw-r--r--storage/bdb/mp/mp_fput.c (renamed from bdb/mp/mp_fput.c)0
-rw-r--r--storage/bdb/mp/mp_fset.c (renamed from bdb/mp/mp_fset.c)0
-rw-r--r--storage/bdb/mp/mp_method.c (renamed from bdb/mp/mp_method.c)0
-rw-r--r--storage/bdb/mp/mp_region.c (renamed from bdb/mp/mp_region.c)0
-rw-r--r--storage/bdb/mp/mp_register.c (renamed from bdb/mp/mp_register.c)0
-rw-r--r--storage/bdb/mp/mp_stat.c (renamed from bdb/mp/mp_stat.c)0
-rw-r--r--storage/bdb/mp/mp_sync.c (renamed from bdb/mp/mp_sync.c)0
-rw-r--r--storage/bdb/mp/mp_trickle.c (renamed from bdb/mp/mp_trickle.c)0
-rw-r--r--storage/bdb/mutex/README (renamed from bdb/mutex/README)0
-rw-r--r--storage/bdb/mutex/mut_fcntl.c (renamed from bdb/mutex/mut_fcntl.c)0
-rw-r--r--storage/bdb/mutex/mut_pthread.c (renamed from bdb/mutex/mut_pthread.c)0
-rw-r--r--storage/bdb/mutex/mut_tas.c (renamed from bdb/mutex/mut_tas.c)0
-rw-r--r--storage/bdb/mutex/mut_win32.c (renamed from bdb/mutex/mut_win32.c)0
-rw-r--r--storage/bdb/mutex/mutex.c (renamed from bdb/mutex/mutex.c)0
-rw-r--r--storage/bdb/mutex/tm.c (renamed from bdb/mutex/tm.c)0
-rw-r--r--storage/bdb/mutex/uts4_cc.s (renamed from bdb/mutex/uts4_cc.s)0
-rw-r--r--storage/bdb/os/os_abs.c (renamed from bdb/os/os_abs.c)0
-rw-r--r--storage/bdb/os/os_alloc.c (renamed from bdb/os/os_alloc.c)0
-rw-r--r--storage/bdb/os/os_clock.c (renamed from bdb/os/os_clock.c)0
-rw-r--r--storage/bdb/os/os_config.c (renamed from bdb/os/os_config.c)0
-rw-r--r--storage/bdb/os/os_dir.c (renamed from bdb/os/os_dir.c)0
-rw-r--r--storage/bdb/os/os_errno.c (renamed from bdb/os/os_errno.c)0
-rw-r--r--storage/bdb/os/os_fid.c (renamed from bdb/os/os_fid.c)0
-rw-r--r--storage/bdb/os/os_fsync.c (renamed from bdb/os/os_fsync.c)0
-rw-r--r--storage/bdb/os/os_handle.c (renamed from bdb/os/os_handle.c)0
-rw-r--r--storage/bdb/os/os_id.c (renamed from bdb/os/os_id.c)0
-rw-r--r--storage/bdb/os/os_map.c (renamed from bdb/os/os_map.c)0
-rw-r--r--storage/bdb/os/os_method.c (renamed from bdb/os/os_method.c)0
-rw-r--r--storage/bdb/os/os_oflags.c (renamed from bdb/os/os_oflags.c)0
-rw-r--r--storage/bdb/os/os_open.c (renamed from bdb/os/os_open.c)0
-rw-r--r--storage/bdb/os/os_region.c (renamed from bdb/os/os_region.c)0
-rw-r--r--storage/bdb/os/os_rename.c (renamed from bdb/os/os_rename.c)0
-rw-r--r--storage/bdb/os/os_root.c (renamed from bdb/os/os_root.c)0
-rw-r--r--storage/bdb/os/os_rpath.c (renamed from bdb/os/os_rpath.c)0
-rw-r--r--storage/bdb/os/os_rw.c (renamed from bdb/os/os_rw.c)0
-rw-r--r--storage/bdb/os/os_seek.c (renamed from bdb/os/os_seek.c)0
-rw-r--r--storage/bdb/os/os_sleep.c (renamed from bdb/os/os_sleep.c)0
-rw-r--r--storage/bdb/os/os_spin.c (renamed from bdb/os/os_spin.c)0
-rw-r--r--storage/bdb/os/os_stat.c (renamed from bdb/os/os_stat.c)0
-rw-r--r--storage/bdb/os/os_tmpdir.c (renamed from bdb/os/os_tmpdir.c)0
-rw-r--r--storage/bdb/os/os_unlink.c (renamed from bdb/os/os_unlink.c)0
-rw-r--r--storage/bdb/os_vxworks/os_vx_abs.c (renamed from bdb/os_vxworks/os_vx_abs.c)0
-rw-r--r--storage/bdb/os_vxworks/os_vx_config.c (renamed from bdb/os_vxworks/os_vx_config.c)0
-rw-r--r--storage/bdb/os_vxworks/os_vx_map.c (renamed from bdb/os_vxworks/os_vx_map.c)0
-rw-r--r--storage/bdb/os_win32/os_abs.c (renamed from bdb/os_win32/os_abs.c)0
-rw-r--r--storage/bdb/os_win32/os_clock.c (renamed from bdb/os_win32/os_clock.c)0
-rw-r--r--storage/bdb/os_win32/os_config.c (renamed from bdb/os_win32/os_config.c)0
-rw-r--r--storage/bdb/os_win32/os_dir.c (renamed from bdb/os_win32/os_dir.c)0
-rw-r--r--storage/bdb/os_win32/os_errno.c (renamed from bdb/os_win32/os_errno.c)0
-rw-r--r--storage/bdb/os_win32/os_fid.c (renamed from bdb/os_win32/os_fid.c)0
-rw-r--r--storage/bdb/os_win32/os_fsync.c (renamed from bdb/os_win32/os_fsync.c)0
-rw-r--r--storage/bdb/os_win32/os_handle.c (renamed from bdb/os_win32/os_handle.c)0
-rw-r--r--storage/bdb/os_win32/os_map.c (renamed from bdb/os_win32/os_map.c)0
-rw-r--r--storage/bdb/os_win32/os_open.c (renamed from bdb/os_win32/os_open.c)0
-rw-r--r--storage/bdb/os_win32/os_rename.c (renamed from bdb/os_win32/os_rename.c)0
-rw-r--r--storage/bdb/os_win32/os_rw.c (renamed from bdb/os_win32/os_rw.c)0
-rw-r--r--storage/bdb/os_win32/os_seek.c (renamed from bdb/os_win32/os_seek.c)0
-rw-r--r--storage/bdb/os_win32/os_sleep.c (renamed from bdb/os_win32/os_sleep.c)0
-rw-r--r--storage/bdb/os_win32/os_spin.c (renamed from bdb/os_win32/os_spin.c)0
-rw-r--r--storage/bdb/os_win32/os_stat.c (renamed from bdb/os_win32/os_stat.c)0
-rw-r--r--storage/bdb/os_win32/os_type.c (renamed from bdb/os_win32/os_type.c)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/BerkeleyDB.pm (renamed from bdb/perl/BerkeleyDB/BerkeleyDB.pm)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod (renamed from bdb/perl/BerkeleyDB/BerkeleyDB.pod)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P (renamed from bdb/perl/BerkeleyDB/BerkeleyDB.pod.P)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/BerkeleyDB.xs (renamed from bdb/perl/BerkeleyDB/BerkeleyDB.xs)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm (renamed from bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm (renamed from bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/Changes (renamed from bdb/perl/BerkeleyDB/Changes)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/MANIFEST (renamed from bdb/perl/BerkeleyDB/MANIFEST)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/Makefile.PL (renamed from bdb/perl/BerkeleyDB/Makefile.PL)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/README (renamed from bdb/perl/BerkeleyDB/README)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/Todo (renamed from bdb/perl/BerkeleyDB/Todo)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/config.in (renamed from bdb/perl/BerkeleyDB/config.in)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/constants.h (renamed from bdb/perl/BerkeleyDB/constants.h)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/constants.xs (renamed from bdb/perl/BerkeleyDB/constants.xs)0
-rwxr-xr-xstorage/bdb/perl/BerkeleyDB/dbinfo (renamed from bdb/perl/BerkeleyDB/dbinfo)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/hints/dec_osf.pl (renamed from bdb/perl/BerkeleyDB/hints/dec_osf.pl)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/hints/irix_6_5.pl (renamed from bdb/perl/BerkeleyDB/hints/irix_6_5.pl)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/hints/solaris.pl (renamed from bdb/perl/BerkeleyDB/hints/solaris.pl)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/mkconsts (renamed from bdb/perl/BerkeleyDB/mkconsts)0
-rwxr-xr-xstorage/bdb/perl/BerkeleyDB/mkpod (renamed from bdb/perl/BerkeleyDB/mkpod)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.004 (renamed from bdb/perl/BerkeleyDB/patches/5.004)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.004_01 (renamed from bdb/perl/BerkeleyDB/patches/5.004_01)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.004_02 (renamed from bdb/perl/BerkeleyDB/patches/5.004_02)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.004_03 (renamed from bdb/perl/BerkeleyDB/patches/5.004_03)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.004_04 (renamed from bdb/perl/BerkeleyDB/patches/5.004_04)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.004_05 (renamed from bdb/perl/BerkeleyDB/patches/5.004_05)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.005 (renamed from bdb/perl/BerkeleyDB/patches/5.005)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.005_01 (renamed from bdb/perl/BerkeleyDB/patches/5.005_01)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.005_02 (renamed from bdb/perl/BerkeleyDB/patches/5.005_02)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.005_03 (renamed from bdb/perl/BerkeleyDB/patches/5.005_03)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.6.0 (renamed from bdb/perl/BerkeleyDB/patches/5.6.0)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/ppport.h (renamed from bdb/perl/BerkeleyDB/ppport.h)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/scan (renamed from bdb/perl/BerkeleyDB/scan)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/btree.t (renamed from bdb/perl/BerkeleyDB/t/btree.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/destroy.t (renamed from bdb/perl/BerkeleyDB/t/destroy.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/env.t (renamed from bdb/perl/BerkeleyDB/t/env.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/examples.t (renamed from bdb/perl/BerkeleyDB/t/examples.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/examples.t.T (renamed from bdb/perl/BerkeleyDB/t/examples.t.T)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/examples3.t (renamed from bdb/perl/BerkeleyDB/t/examples3.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/examples3.t.T (renamed from bdb/perl/BerkeleyDB/t/examples3.t.T)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/filter.t (renamed from bdb/perl/BerkeleyDB/t/filter.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/hash.t (renamed from bdb/perl/BerkeleyDB/t/hash.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/join.t (renamed from bdb/perl/BerkeleyDB/t/join.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/mldbm.t (renamed from bdb/perl/BerkeleyDB/t/mldbm.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/queue.t (renamed from bdb/perl/BerkeleyDB/t/queue.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/recno.t (renamed from bdb/perl/BerkeleyDB/t/recno.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/strict.t (renamed from bdb/perl/BerkeleyDB/t/strict.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/subdb.t (renamed from bdb/perl/BerkeleyDB/t/subdb.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/txn.t (renamed from bdb/perl/BerkeleyDB/t/txn.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/unknown.t (renamed from bdb/perl/BerkeleyDB/t/unknown.t)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/util.pm (renamed from bdb/perl/BerkeleyDB/t/util.pm)0
-rw-r--r--storage/bdb/perl/BerkeleyDB/typemap (renamed from bdb/perl/BerkeleyDB/typemap)0
-rw-r--r--storage/bdb/perl/DB_File/Changes (renamed from bdb/perl/DB_File/Changes)0
-rw-r--r--storage/bdb/perl/DB_File/DB_File.pm (renamed from bdb/perl/DB_File/DB_File.pm)0
-rw-r--r--storage/bdb/perl/DB_File/DB_File.xs (renamed from bdb/perl/DB_File/DB_File.xs)0
-rw-r--r--storage/bdb/perl/DB_File/DB_File_BS (renamed from bdb/perl/DB_File/DB_File_BS)0
-rw-r--r--storage/bdb/perl/DB_File/MANIFEST (renamed from bdb/perl/DB_File/MANIFEST)0
-rw-r--r--storage/bdb/perl/DB_File/Makefile.PL (renamed from bdb/perl/DB_File/Makefile.PL)0
-rw-r--r--storage/bdb/perl/DB_File/README (renamed from bdb/perl/DB_File/README)0
-rw-r--r--storage/bdb/perl/DB_File/config.in (renamed from bdb/perl/DB_File/config.in)0
-rw-r--r--storage/bdb/perl/DB_File/dbinfo (renamed from bdb/perl/DB_File/dbinfo)0
-rw-r--r--storage/bdb/perl/DB_File/fallback.h (renamed from bdb/perl/DB_File/fallback.h)0
-rw-r--r--storage/bdb/perl/DB_File/fallback.xs (renamed from bdb/perl/DB_File/fallback.xs)0
-rw-r--r--storage/bdb/perl/DB_File/hints/dynixptx.pl (renamed from bdb/perl/DB_File/hints/dynixptx.pl)0
-rw-r--r--storage/bdb/perl/DB_File/hints/sco.pl (renamed from bdb/perl/DB_File/hints/sco.pl)0
-rw-r--r--storage/bdb/perl/DB_File/patches/5.004 (renamed from bdb/perl/DB_File/patches/5.004)0
-rw-r--r--storage/bdb/perl/DB_File/patches/5.004_01 (renamed from bdb/perl/DB_File/patches/5.004_01)0
-rw-r--r--storage/bdb/perl/DB_File/patches/5.004_02 (renamed from bdb/perl/DB_File/patches/5.004_02)0
-rw-r--r--storage/bdb/perl/DB_File/patches/5.004_03 (renamed from bdb/perl/DB_File/patches/5.004_03)0
-rw-r--r--storage/bdb/perl/DB_File/patches/5.004_04 (renamed from bdb/perl/DB_File/patches/5.004_04)0
-rw-r--r--storage/bdb/perl/DB_File/patches/5.004_05 (renamed from bdb/perl/DB_File/patches/5.004_05)0
-rw-r--r--storage/bdb/perl/DB_File/patches/5.005 (renamed from bdb/perl/DB_File/patches/5.005)0
-rw-r--r--storage/bdb/perl/DB_File/patches/5.005_01 (renamed from bdb/perl/DB_File/patches/5.005_01)0
-rw-r--r--storage/bdb/perl/DB_File/patches/5.005_02 (renamed from bdb/perl/DB_File/patches/5.005_02)0
-rw-r--r--storage/bdb/perl/DB_File/patches/5.005_03 (renamed from bdb/perl/DB_File/patches/5.005_03)0
-rw-r--r--storage/bdb/perl/DB_File/patches/5.6.0 (renamed from bdb/perl/DB_File/patches/5.6.0)0
-rw-r--r--storage/bdb/perl/DB_File/ppport.h (renamed from bdb/perl/DB_File/ppport.h)0
-rw-r--r--storage/bdb/perl/DB_File/t/db-btree.t (renamed from bdb/perl/DB_File/t/db-btree.t)0
-rw-r--r--storage/bdb/perl/DB_File/t/db-hash.t (renamed from bdb/perl/DB_File/t/db-hash.t)0
-rw-r--r--storage/bdb/perl/DB_File/t/db-recno.t (renamed from bdb/perl/DB_File/t/db-recno.t)0
-rw-r--r--storage/bdb/perl/DB_File/typemap (renamed from bdb/perl/DB_File/typemap)0
-rw-r--r--storage/bdb/perl/DB_File/version.c (renamed from bdb/perl/DB_File/version.c)0
-rw-r--r--storage/bdb/qam/qam.c (renamed from bdb/qam/qam.c)0
-rw-r--r--storage/bdb/qam/qam.src (renamed from bdb/qam/qam.src)0
-rw-r--r--storage/bdb/qam/qam_conv.c (renamed from bdb/qam/qam_conv.c)0
-rw-r--r--storage/bdb/qam/qam_files.c (renamed from bdb/qam/qam_files.c)0
-rw-r--r--storage/bdb/qam/qam_method.c (renamed from bdb/qam/qam_method.c)0
-rw-r--r--storage/bdb/qam/qam_open.c (renamed from bdb/qam/qam_open.c)0
-rw-r--r--storage/bdb/qam/qam_rec.c (renamed from bdb/qam/qam_rec.c)0
-rw-r--r--storage/bdb/qam/qam_stat.c (renamed from bdb/qam/qam_stat.c)0
-rw-r--r--storage/bdb/qam/qam_upgrade.c (renamed from bdb/qam/qam_upgrade.c)0
-rw-r--r--storage/bdb/qam/qam_verify.c (renamed from bdb/qam/qam_verify.c)0
-rw-r--r--storage/bdb/rep/rep_method.c (renamed from bdb/rep/rep_method.c)0
-rw-r--r--storage/bdb/rep/rep_record.c (renamed from bdb/rep/rep_record.c)0
-rw-r--r--storage/bdb/rep/rep_region.c (renamed from bdb/rep/rep_region.c)0
-rw-r--r--storage/bdb/rep/rep_util.c (renamed from bdb/rep/rep_util.c)0
-rw-r--r--storage/bdb/rpc_client/client.c (renamed from bdb/rpc_client/client.c)0
-rw-r--r--storage/bdb/rpc_client/gen_client_ret.c (renamed from bdb/rpc_client/gen_client_ret.c)0
-rw-r--r--storage/bdb/rpc_server/c/db_server_proc.c.in (renamed from bdb/rpc_server/c/db_server_proc.c.in)0
-rw-r--r--storage/bdb/rpc_server/c/db_server_util.c (renamed from bdb/rpc_server/c/db_server_util.c)0
-rw-r--r--storage/bdb/rpc_server/clsrv.html (renamed from bdb/rpc_server/clsrv.html)0
-rw-r--r--storage/bdb/rpc_server/cxx/db_server_cxxproc.cpp (renamed from bdb/rpc_server/cxx/db_server_cxxproc.cpp)0
-rw-r--r--storage/bdb/rpc_server/cxx/db_server_cxxutil.cpp (renamed from bdb/rpc_server/cxx/db_server_cxxutil.cpp)0
-rw-r--r--storage/bdb/rpc_server/java/DbDispatcher.java (renamed from bdb/rpc_server/java/DbDispatcher.java)0
-rw-r--r--storage/bdb/rpc_server/java/DbServer.java (renamed from bdb/rpc_server/java/DbServer.java)0
-rw-r--r--storage/bdb/rpc_server/java/FreeList.java (renamed from bdb/rpc_server/java/FreeList.java)0
-rw-r--r--storage/bdb/rpc_server/java/LocalIterator.java (renamed from bdb/rpc_server/java/LocalIterator.java)0
-rw-r--r--storage/bdb/rpc_server/java/README (renamed from bdb/rpc_server/java/README)0
-rw-r--r--storage/bdb/rpc_server/java/RpcDb.java (renamed from bdb/rpc_server/java/RpcDb.java)0
-rw-r--r--storage/bdb/rpc_server/java/RpcDbEnv.java (renamed from bdb/rpc_server/java/RpcDbEnv.java)0
-rw-r--r--storage/bdb/rpc_server/java/RpcDbTxn.java (renamed from bdb/rpc_server/java/RpcDbTxn.java)0
-rw-r--r--storage/bdb/rpc_server/java/RpcDbc.java (renamed from bdb/rpc_server/java/RpcDbc.java)0
-rw-r--r--storage/bdb/rpc_server/java/Timer.java (renamed from bdb/rpc_server/java/Timer.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/DbServerStub.java (renamed from bdb/rpc_server/java/gen/DbServerStub.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_associate_msg.java (renamed from bdb/rpc_server/java/gen/__db_associate_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_associate_reply.java (renamed from bdb/rpc_server/java/gen/__db_associate_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java (renamed from bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java (renamed from bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java (renamed from bdb/rpc_server/java/gen/__db_bt_minkey_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java (renamed from bdb/rpc_server/java/gen/__db_bt_minkey_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_close_msg.java (renamed from bdb/rpc_server/java/gen/__db_close_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_close_reply.java (renamed from bdb/rpc_server/java/gen/__db_close_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_create_msg.java (renamed from bdb/rpc_server/java/gen/__db_create_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_create_reply.java (renamed from bdb/rpc_server/java/gen/__db_create_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_cursor_msg.java (renamed from bdb/rpc_server/java/gen/__db_cursor_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_cursor_reply.java (renamed from bdb/rpc_server/java/gen/__db_cursor_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_del_msg.java (renamed from bdb/rpc_server/java/gen/__db_del_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_del_reply.java (renamed from bdb/rpc_server/java/gen/__db_del_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_encrypt_msg.java (renamed from bdb/rpc_server/java/gen/__db_encrypt_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_encrypt_reply.java (renamed from bdb/rpc_server/java/gen/__db_encrypt_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_extentsize_msg.java (renamed from bdb/rpc_server/java/gen/__db_extentsize_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_extentsize_reply.java (renamed from bdb/rpc_server/java/gen/__db_extentsize_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_flags_msg.java (renamed from bdb/rpc_server/java/gen/__db_flags_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_flags_reply.java (renamed from bdb/rpc_server/java/gen/__db_flags_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_get_msg.java (renamed from bdb/rpc_server/java/gen/__db_get_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_get_reply.java (renamed from bdb/rpc_server/java/gen/__db_get_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java (renamed from bdb/rpc_server/java/gen/__db_h_ffactor_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java (renamed from bdb/rpc_server/java/gen/__db_h_ffactor_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_h_nelem_msg.java (renamed from bdb/rpc_server/java/gen/__db_h_nelem_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_h_nelem_reply.java (renamed from bdb/rpc_server/java/gen/__db_h_nelem_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_join_msg.java (renamed from bdb/rpc_server/java/gen/__db_join_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_join_reply.java (renamed from bdb/rpc_server/java/gen/__db_join_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_key_range_msg.java (renamed from bdb/rpc_server/java/gen/__db_key_range_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_key_range_reply.java (renamed from bdb/rpc_server/java/gen/__db_key_range_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_lorder_msg.java (renamed from bdb/rpc_server/java/gen/__db_lorder_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_lorder_reply.java (renamed from bdb/rpc_server/java/gen/__db_lorder_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_open_msg.java (renamed from bdb/rpc_server/java/gen/__db_open_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_open_reply.java (renamed from bdb/rpc_server/java/gen/__db_open_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_pagesize_msg.java (renamed from bdb/rpc_server/java/gen/__db_pagesize_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_pagesize_reply.java (renamed from bdb/rpc_server/java/gen/__db_pagesize_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_pget_msg.java (renamed from bdb/rpc_server/java/gen/__db_pget_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_pget_reply.java (renamed from bdb/rpc_server/java/gen/__db_pget_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_put_msg.java (renamed from bdb/rpc_server/java/gen/__db_put_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_put_reply.java (renamed from bdb/rpc_server/java/gen/__db_put_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_re_delim_msg.java (renamed from bdb/rpc_server/java/gen/__db_re_delim_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_re_delim_reply.java (renamed from bdb/rpc_server/java/gen/__db_re_delim_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_re_len_msg.java (renamed from bdb/rpc_server/java/gen/__db_re_len_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_re_len_reply.java (renamed from bdb/rpc_server/java/gen/__db_re_len_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_re_pad_msg.java (renamed from bdb/rpc_server/java/gen/__db_re_pad_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_re_pad_reply.java (renamed from bdb/rpc_server/java/gen/__db_re_pad_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_remove_msg.java (renamed from bdb/rpc_server/java/gen/__db_remove_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_remove_reply.java (renamed from bdb/rpc_server/java/gen/__db_remove_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_rename_msg.java (renamed from bdb/rpc_server/java/gen/__db_rename_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_rename_reply.java (renamed from bdb/rpc_server/java/gen/__db_rename_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_stat_msg.java (renamed from bdb/rpc_server/java/gen/__db_stat_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_stat_reply.java (renamed from bdb/rpc_server/java/gen/__db_stat_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_sync_msg.java (renamed from bdb/rpc_server/java/gen/__db_sync_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_sync_reply.java (renamed from bdb/rpc_server/java/gen/__db_sync_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_truncate_msg.java (renamed from bdb/rpc_server/java/gen/__db_truncate_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_truncate_reply.java (renamed from bdb/rpc_server/java/gen/__db_truncate_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_close_msg.java (renamed from bdb/rpc_server/java/gen/__dbc_close_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_close_reply.java (renamed from bdb/rpc_server/java/gen/__dbc_close_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_count_msg.java (renamed from bdb/rpc_server/java/gen/__dbc_count_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_count_reply.java (renamed from bdb/rpc_server/java/gen/__dbc_count_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_del_msg.java (renamed from bdb/rpc_server/java/gen/__dbc_del_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_del_reply.java (renamed from bdb/rpc_server/java/gen/__dbc_del_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_dup_msg.java (renamed from bdb/rpc_server/java/gen/__dbc_dup_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_dup_reply.java (renamed from bdb/rpc_server/java/gen/__dbc_dup_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_get_msg.java (renamed from bdb/rpc_server/java/gen/__dbc_get_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_get_reply.java (renamed from bdb/rpc_server/java/gen/__dbc_get_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_pget_msg.java (renamed from bdb/rpc_server/java/gen/__dbc_pget_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_pget_reply.java (renamed from bdb/rpc_server/java/gen/__dbc_pget_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_put_msg.java (renamed from bdb/rpc_server/java/gen/__dbc_put_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_put_reply.java (renamed from bdb/rpc_server/java/gen/__dbc_put_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_cachesize_msg.java (renamed from bdb/rpc_server/java/gen/__env_cachesize_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_cachesize_reply.java (renamed from bdb/rpc_server/java/gen/__env_cachesize_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_close_msg.java (renamed from bdb/rpc_server/java/gen/__env_close_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_close_reply.java (renamed from bdb/rpc_server/java/gen/__env_close_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_create_msg.java (renamed from bdb/rpc_server/java/gen/__env_create_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_create_reply.java (renamed from bdb/rpc_server/java/gen/__env_create_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_dbremove_msg.java (renamed from bdb/rpc_server/java/gen/__env_dbremove_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_dbremove_reply.java (renamed from bdb/rpc_server/java/gen/__env_dbremove_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_dbrename_msg.java (renamed from bdb/rpc_server/java/gen/__env_dbrename_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_dbrename_reply.java (renamed from bdb/rpc_server/java/gen/__env_dbrename_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_encrypt_msg.java (renamed from bdb/rpc_server/java/gen/__env_encrypt_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_encrypt_reply.java (renamed from bdb/rpc_server/java/gen/__env_encrypt_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_flags_msg.java (renamed from bdb/rpc_server/java/gen/__env_flags_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_flags_reply.java (renamed from bdb/rpc_server/java/gen/__env_flags_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_open_msg.java (renamed from bdb/rpc_server/java/gen/__env_open_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_open_reply.java (renamed from bdb/rpc_server/java/gen/__env_open_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_remove_msg.java (renamed from bdb/rpc_server/java/gen/__env_remove_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_remove_reply.java (renamed from bdb/rpc_server/java/gen/__env_remove_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_abort_msg.java (renamed from bdb/rpc_server/java/gen/__txn_abort_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_abort_reply.java (renamed from bdb/rpc_server/java/gen/__txn_abort_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_begin_msg.java (renamed from bdb/rpc_server/java/gen/__txn_begin_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_begin_reply.java (renamed from bdb/rpc_server/java/gen/__txn_begin_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_commit_msg.java (renamed from bdb/rpc_server/java/gen/__txn_commit_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_commit_reply.java (renamed from bdb/rpc_server/java/gen/__txn_commit_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_discard_msg.java (renamed from bdb/rpc_server/java/gen/__txn_discard_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_discard_reply.java (renamed from bdb/rpc_server/java/gen/__txn_discard_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_prepare_msg.java (renamed from bdb/rpc_server/java/gen/__txn_prepare_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_prepare_reply.java (renamed from bdb/rpc_server/java/gen/__txn_prepare_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_recover_msg.java (renamed from bdb/rpc_server/java/gen/__txn_recover_msg.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_recover_reply.java (renamed from bdb/rpc_server/java/gen/__txn_recover_reply.java)0
-rw-r--r--storage/bdb/rpc_server/java/gen/db_server.java (renamed from bdb/rpc_server/java/gen/db_server.java)0
-rw-r--r--storage/bdb/rpc_server/java/jrpcgen.jar (renamed from bdb/rpc_server/java/jrpcgen.jar)bin57125 -> 57125 bytes
-rw-r--r--storage/bdb/rpc_server/java/oncrpc.jar (renamed from bdb/rpc_server/java/oncrpc.jar)bin84122 -> 84122 bytes
-rw-r--r--storage/bdb/rpc_server/java/s_jrpcgen (renamed from bdb/rpc_server/java/s_jrpcgen)0
-rw-r--r--storage/bdb/rpc_server/rpc.src (renamed from bdb/rpc_server/rpc.src)0
-rw-r--r--storage/bdb/tcl/docs/db.html (renamed from bdb/tcl/docs/db.html)0
-rw-r--r--storage/bdb/tcl/docs/env.html (renamed from bdb/tcl/docs/env.html)0
-rw-r--r--storage/bdb/tcl/docs/historic.html (renamed from bdb/tcl/docs/historic.html)0
-rw-r--r--storage/bdb/tcl/docs/index.html (renamed from bdb/tcl/docs/index.html)0
-rw-r--r--storage/bdb/tcl/docs/library.html (renamed from bdb/tcl/docs/library.html)0
-rw-r--r--storage/bdb/tcl/docs/lock.html (renamed from bdb/tcl/docs/lock.html)0
-rw-r--r--storage/bdb/tcl/docs/log.html (renamed from bdb/tcl/docs/log.html)0
-rw-r--r--storage/bdb/tcl/docs/mpool.html (renamed from bdb/tcl/docs/mpool.html)0
-rw-r--r--storage/bdb/tcl/docs/rep.html (renamed from bdb/tcl/docs/rep.html)0
-rw-r--r--storage/bdb/tcl/docs/test.html (renamed from bdb/tcl/docs/test.html)0
-rw-r--r--storage/bdb/tcl/docs/txn.html (renamed from bdb/tcl/docs/txn.html)0
-rw-r--r--storage/bdb/tcl/tcl_compat.c (renamed from bdb/tcl/tcl_compat.c)0
-rw-r--r--storage/bdb/tcl/tcl_db.c (renamed from bdb/tcl/tcl_db.c)0
-rw-r--r--storage/bdb/tcl/tcl_db_pkg.c (renamed from bdb/tcl/tcl_db_pkg.c)0
-rw-r--r--storage/bdb/tcl/tcl_dbcursor.c (renamed from bdb/tcl/tcl_dbcursor.c)0
-rw-r--r--storage/bdb/tcl/tcl_env.c (renamed from bdb/tcl/tcl_env.c)0
-rw-r--r--storage/bdb/tcl/tcl_internal.c (renamed from bdb/tcl/tcl_internal.c)0
-rw-r--r--storage/bdb/tcl/tcl_lock.c (renamed from bdb/tcl/tcl_lock.c)0
-rw-r--r--storage/bdb/tcl/tcl_log.c (renamed from bdb/tcl/tcl_log.c)0
-rw-r--r--storage/bdb/tcl/tcl_mp.c (renamed from bdb/tcl/tcl_mp.c)0
-rw-r--r--storage/bdb/tcl/tcl_rep.c (renamed from bdb/tcl/tcl_rep.c)0
-rw-r--r--storage/bdb/tcl/tcl_txn.c (renamed from bdb/tcl/tcl_txn.c)0
-rw-r--r--storage/bdb/tcl/tcl_util.c (renamed from bdb/tcl/tcl_util.c)0
-rw-r--r--storage/bdb/test/archive.tcl (renamed from bdb/test/archive.tcl)0
-rw-r--r--storage/bdb/test/bigfile001.tcl (renamed from bdb/test/bigfile001.tcl)0
-rw-r--r--storage/bdb/test/bigfile002.tcl (renamed from bdb/test/bigfile002.tcl)0
-rw-r--r--storage/bdb/test/byteorder.tcl (renamed from bdb/test/byteorder.tcl)0
-rw-r--r--storage/bdb/test/conscript.tcl (renamed from bdb/test/conscript.tcl)0
-rw-r--r--storage/bdb/test/dbm.tcl (renamed from bdb/test/dbm.tcl)0
-rw-r--r--storage/bdb/test/dbscript.tcl (renamed from bdb/test/dbscript.tcl)0
-rw-r--r--storage/bdb/test/ddoyscript.tcl (renamed from bdb/test/ddoyscript.tcl)0
-rw-r--r--storage/bdb/test/ddscript.tcl (renamed from bdb/test/ddscript.tcl)0
-rw-r--r--storage/bdb/test/dead001.tcl (renamed from bdb/test/dead001.tcl)0
-rw-r--r--storage/bdb/test/dead002.tcl (renamed from bdb/test/dead002.tcl)0
-rw-r--r--storage/bdb/test/dead003.tcl (renamed from bdb/test/dead003.tcl)0
-rw-r--r--storage/bdb/test/dead004.tcl (renamed from bdb/test/dead004.tcl)0
-rw-r--r--storage/bdb/test/dead005.tcl (renamed from bdb/test/dead005.tcl)0
-rw-r--r--storage/bdb/test/dead006.tcl (renamed from bdb/test/dead006.tcl)0
-rw-r--r--storage/bdb/test/dead007.tcl (renamed from bdb/test/dead007.tcl)0
-rw-r--r--storage/bdb/test/env001.tcl (renamed from bdb/test/env001.tcl)0
-rw-r--r--storage/bdb/test/env002.tcl (renamed from bdb/test/env002.tcl)0
-rw-r--r--storage/bdb/test/env003.tcl (renamed from bdb/test/env003.tcl)0
-rw-r--r--storage/bdb/test/env004.tcl (renamed from bdb/test/env004.tcl)0
-rw-r--r--storage/bdb/test/env005.tcl (renamed from bdb/test/env005.tcl)0
-rw-r--r--storage/bdb/test/env006.tcl (renamed from bdb/test/env006.tcl)0
-rw-r--r--storage/bdb/test/env007.tcl (renamed from bdb/test/env007.tcl)0
-rw-r--r--storage/bdb/test/env008.tcl (renamed from bdb/test/env008.tcl)0
-rw-r--r--storage/bdb/test/env009.tcl (renamed from bdb/test/env009.tcl)0
-rw-r--r--storage/bdb/test/env010.tcl (renamed from bdb/test/env010.tcl)0
-rw-r--r--storage/bdb/test/env011.tcl (renamed from bdb/test/env011.tcl)0
-rw-r--r--storage/bdb/test/hsearch.tcl (renamed from bdb/test/hsearch.tcl)0
-rw-r--r--storage/bdb/test/join.tcl (renamed from bdb/test/join.tcl)0
-rw-r--r--storage/bdb/test/lock001.tcl (renamed from bdb/test/lock001.tcl)0
-rw-r--r--storage/bdb/test/lock002.tcl (renamed from bdb/test/lock002.tcl)0
-rw-r--r--storage/bdb/test/lock003.tcl (renamed from bdb/test/lock003.tcl)0
-rw-r--r--storage/bdb/test/lock004.tcl (renamed from bdb/test/lock004.tcl)0
-rw-r--r--storage/bdb/test/lock005.tcl (renamed from bdb/test/lock005.tcl)0
-rw-r--r--storage/bdb/test/lockscript.tcl (renamed from bdb/test/lockscript.tcl)0
-rw-r--r--storage/bdb/test/log001.tcl (renamed from bdb/test/log001.tcl)0
-rw-r--r--storage/bdb/test/log002.tcl (renamed from bdb/test/log002.tcl)0
-rw-r--r--storage/bdb/test/log003.tcl (renamed from bdb/test/log003.tcl)0
-rw-r--r--storage/bdb/test/log004.tcl (renamed from bdb/test/log004.tcl)0
-rw-r--r--storage/bdb/test/log005.tcl (renamed from bdb/test/log005.tcl)0
-rw-r--r--storage/bdb/test/logtrack.tcl (renamed from bdb/test/logtrack.tcl)0
-rw-r--r--storage/bdb/test/mdbscript.tcl (renamed from bdb/test/mdbscript.tcl)0
-rw-r--r--storage/bdb/test/memp001.tcl (renamed from bdb/test/memp001.tcl)0
-rw-r--r--storage/bdb/test/memp002.tcl (renamed from bdb/test/memp002.tcl)0
-rw-r--r--storage/bdb/test/memp003.tcl (renamed from bdb/test/memp003.tcl)0
-rw-r--r--storage/bdb/test/mpoolscript.tcl (renamed from bdb/test/mpoolscript.tcl)0
-rw-r--r--storage/bdb/test/mutex001.tcl (renamed from bdb/test/mutex001.tcl)0
-rw-r--r--storage/bdb/test/mutex002.tcl (renamed from bdb/test/mutex002.tcl)0
-rw-r--r--storage/bdb/test/mutex003.tcl (renamed from bdb/test/mutex003.tcl)0
-rw-r--r--storage/bdb/test/mutexscript.tcl (renamed from bdb/test/mutexscript.tcl)0
-rw-r--r--storage/bdb/test/ndbm.tcl (renamed from bdb/test/ndbm.tcl)0
-rw-r--r--storage/bdb/test/parallel.tcl (renamed from bdb/test/parallel.tcl)0
-rw-r--r--storage/bdb/test/recd001.tcl (renamed from bdb/test/recd001.tcl)0
-rw-r--r--storage/bdb/test/recd002.tcl (renamed from bdb/test/recd002.tcl)0
-rw-r--r--storage/bdb/test/recd003.tcl (renamed from bdb/test/recd003.tcl)0
-rw-r--r--storage/bdb/test/recd004.tcl (renamed from bdb/test/recd004.tcl)0
-rw-r--r--storage/bdb/test/recd005.tcl (renamed from bdb/test/recd005.tcl)0
-rw-r--r--storage/bdb/test/recd006.tcl (renamed from bdb/test/recd006.tcl)0
-rw-r--r--storage/bdb/test/recd007.tcl (renamed from bdb/test/recd007.tcl)0
-rw-r--r--storage/bdb/test/recd008.tcl (renamed from bdb/test/recd008.tcl)0
-rw-r--r--storage/bdb/test/recd009.tcl (renamed from bdb/test/recd009.tcl)0
-rw-r--r--storage/bdb/test/recd010.tcl (renamed from bdb/test/recd010.tcl)0
-rw-r--r--storage/bdb/test/recd011.tcl (renamed from bdb/test/recd011.tcl)0
-rw-r--r--storage/bdb/test/recd012.tcl (renamed from bdb/test/recd012.tcl)0
-rw-r--r--storage/bdb/test/recd013.tcl (renamed from bdb/test/recd013.tcl)0
-rw-r--r--storage/bdb/test/recd014.tcl (renamed from bdb/test/recd014.tcl)0
-rw-r--r--storage/bdb/test/recd015.tcl (renamed from bdb/test/recd015.tcl)0
-rw-r--r--storage/bdb/test/recd016.tcl (renamed from bdb/test/recd016.tcl)0
-rw-r--r--storage/bdb/test/recd017.tcl (renamed from bdb/test/recd017.tcl)0
-rw-r--r--storage/bdb/test/recd018.tcl (renamed from bdb/test/recd018.tcl)0
-rw-r--r--storage/bdb/test/recd019.tcl (renamed from bdb/test/recd019.tcl)0
-rw-r--r--storage/bdb/test/recd020.tcl (renamed from bdb/test/recd020.tcl)0
-rw-r--r--storage/bdb/test/recd15scr.tcl (renamed from bdb/test/recd15scr.tcl)0
-rw-r--r--storage/bdb/test/recdscript.tcl (renamed from bdb/test/recdscript.tcl)0
-rw-r--r--storage/bdb/test/rep001.tcl (renamed from bdb/test/rep001.tcl)0
-rw-r--r--storage/bdb/test/rep002.tcl (renamed from bdb/test/rep002.tcl)0
-rw-r--r--storage/bdb/test/rep003.tcl (renamed from bdb/test/rep003.tcl)0
-rw-r--r--storage/bdb/test/rep004.tcl (renamed from bdb/test/rep004.tcl)0
-rw-r--r--storage/bdb/test/rep005.tcl (renamed from bdb/test/rep005.tcl)0
-rw-r--r--storage/bdb/test/reputils.tcl (renamed from bdb/test/reputils.tcl)0
-rw-r--r--storage/bdb/test/rpc001.tcl (renamed from bdb/test/rpc001.tcl)0
-rw-r--r--storage/bdb/test/rpc002.tcl (renamed from bdb/test/rpc002.tcl)0
-rw-r--r--storage/bdb/test/rpc003.tcl (renamed from bdb/test/rpc003.tcl)0
-rw-r--r--storage/bdb/test/rpc004.tcl (renamed from bdb/test/rpc004.tcl)0
-rw-r--r--storage/bdb/test/rpc005.tcl (renamed from bdb/test/rpc005.tcl)0
-rw-r--r--storage/bdb/test/rsrc001.tcl (renamed from bdb/test/rsrc001.tcl)0
-rw-r--r--storage/bdb/test/rsrc002.tcl (renamed from bdb/test/rsrc002.tcl)0
-rw-r--r--storage/bdb/test/rsrc003.tcl (renamed from bdb/test/rsrc003.tcl)0
-rw-r--r--storage/bdb/test/rsrc004.tcl (renamed from bdb/test/rsrc004.tcl)0
-rw-r--r--storage/bdb/test/scr001/chk.code (renamed from bdb/test/scr001/chk.code)0
-rw-r--r--storage/bdb/test/scr002/chk.def (renamed from bdb/test/scr002/chk.def)0
-rw-r--r--storage/bdb/test/scr003/chk.define (renamed from bdb/test/scr003/chk.define)0
-rw-r--r--storage/bdb/test/scr004/chk.javafiles (renamed from bdb/test/scr004/chk.javafiles)0
-rw-r--r--storage/bdb/test/scr005/chk.nl (renamed from bdb/test/scr005/chk.nl)0
-rw-r--r--storage/bdb/test/scr006/chk.offt (renamed from bdb/test/scr006/chk.offt)0
-rw-r--r--storage/bdb/test/scr007/chk.proto (renamed from bdb/test/scr007/chk.proto)0
-rw-r--r--storage/bdb/test/scr008/chk.pubdef (renamed from bdb/test/scr008/chk.pubdef)0
-rw-r--r--storage/bdb/test/scr009/chk.srcfiles (renamed from bdb/test/scr009/chk.srcfiles)0
-rw-r--r--storage/bdb/test/scr010/chk.str (renamed from bdb/test/scr010/chk.str)0
-rw-r--r--storage/bdb/test/scr010/spell.ok (renamed from bdb/test/scr010/spell.ok)0
-rw-r--r--storage/bdb/test/scr011/chk.tags (renamed from bdb/test/scr011/chk.tags)0
-rw-r--r--storage/bdb/test/scr012/chk.vx_code (renamed from bdb/test/scr012/chk.vx_code)0
-rw-r--r--storage/bdb/test/scr013/chk.stats (renamed from bdb/test/scr013/chk.stats)0
-rw-r--r--storage/bdb/test/scr014/chk.err (renamed from bdb/test/scr014/chk.err)0
-rw-r--r--storage/bdb/test/scr015/README (renamed from bdb/test/scr015/README)0
-rw-r--r--storage/bdb/test/scr015/TestConstruct01.cpp (renamed from bdb/test/scr015/TestConstruct01.cpp)0
-rw-r--r--storage/bdb/test/scr015/TestConstruct01.testerr (renamed from bdb/test/scr015/TestConstruct01.testerr)0
-rw-r--r--storage/bdb/test/scr015/TestConstruct01.testout (renamed from bdb/test/scr015/TestConstruct01.testout)0
-rw-r--r--storage/bdb/test/scr015/TestExceptInclude.cpp (renamed from bdb/test/scr015/TestExceptInclude.cpp)0
-rw-r--r--storage/bdb/test/scr015/TestGetSetMethods.cpp (renamed from bdb/test/scr015/TestGetSetMethods.cpp)0
-rw-r--r--storage/bdb/test/scr015/TestKeyRange.cpp (renamed from bdb/test/scr015/TestKeyRange.cpp)0
-rw-r--r--storage/bdb/test/scr015/TestKeyRange.testin (renamed from bdb/test/scr015/TestKeyRange.testin)0
-rw-r--r--storage/bdb/test/scr015/TestKeyRange.testout (renamed from bdb/test/scr015/TestKeyRange.testout)0
-rw-r--r--storage/bdb/test/scr015/TestLogc.cpp (renamed from bdb/test/scr015/TestLogc.cpp)0
-rw-r--r--storage/bdb/test/scr015/TestLogc.testout (renamed from bdb/test/scr015/TestLogc.testout)0
-rw-r--r--storage/bdb/test/scr015/TestSimpleAccess.cpp (renamed from bdb/test/scr015/TestSimpleAccess.cpp)0
-rw-r--r--storage/bdb/test/scr015/TestSimpleAccess.testout (renamed from bdb/test/scr015/TestSimpleAccess.testout)0
-rw-r--r--storage/bdb/test/scr015/TestTruncate.cpp (renamed from bdb/test/scr015/TestTruncate.cpp)0
-rw-r--r--storage/bdb/test/scr015/TestTruncate.testout (renamed from bdb/test/scr015/TestTruncate.testout)0
-rw-r--r--storage/bdb/test/scr015/chk.cxxtests (renamed from bdb/test/scr015/chk.cxxtests)0
-rw-r--r--storage/bdb/test/scr015/ignore (renamed from bdb/test/scr015/ignore)0
-rw-r--r--storage/bdb/test/scr015/testall (renamed from bdb/test/scr015/testall)0
-rw-r--r--storage/bdb/test/scr015/testone (renamed from bdb/test/scr015/testone)0
-rw-r--r--storage/bdb/test/scr016/CallbackTest.java (renamed from bdb/test/scr016/CallbackTest.java)0
-rw-r--r--storage/bdb/test/scr016/CallbackTest.testout (renamed from bdb/test/scr016/CallbackTest.testout)0
-rw-r--r--storage/bdb/test/scr016/README (renamed from bdb/test/scr016/README)0
-rw-r--r--storage/bdb/test/scr016/TestAppendRecno.java (renamed from bdb/test/scr016/TestAppendRecno.java)0
-rw-r--r--storage/bdb/test/scr016/TestAppendRecno.testout (renamed from bdb/test/scr016/TestAppendRecno.testout)0
-rw-r--r--storage/bdb/test/scr016/TestAssociate.java (renamed from bdb/test/scr016/TestAssociate.java)0
-rw-r--r--storage/bdb/test/scr016/TestAssociate.testout (renamed from bdb/test/scr016/TestAssociate.testout)0
-rw-r--r--storage/bdb/test/scr016/TestClosedDb.java (renamed from bdb/test/scr016/TestClosedDb.java)0
-rw-r--r--storage/bdb/test/scr016/TestClosedDb.testout (renamed from bdb/test/scr016/TestClosedDb.testout)0
-rw-r--r--storage/bdb/test/scr016/TestConstruct01.java (renamed from bdb/test/scr016/TestConstruct01.java)0
-rw-r--r--storage/bdb/test/scr016/TestConstruct01.testerr (renamed from bdb/test/scr016/TestConstruct01.testerr)0
-rw-r--r--storage/bdb/test/scr016/TestConstruct01.testout (renamed from bdb/test/scr016/TestConstruct01.testout)0
-rw-r--r--storage/bdb/test/scr016/TestConstruct02.java (renamed from bdb/test/scr016/TestConstruct02.java)0
-rw-r--r--storage/bdb/test/scr016/TestConstruct02.testout (renamed from bdb/test/scr016/TestConstruct02.testout)0
-rw-r--r--storage/bdb/test/scr016/TestDbtFlags.java (renamed from bdb/test/scr016/TestDbtFlags.java)0
-rw-r--r--storage/bdb/test/scr016/TestDbtFlags.testerr (renamed from bdb/test/scr016/TestDbtFlags.testerr)0
-rw-r--r--storage/bdb/test/scr016/TestDbtFlags.testout (renamed from bdb/test/scr016/TestDbtFlags.testout)0
-rw-r--r--storage/bdb/test/scr016/TestGetSetMethods.java (renamed from bdb/test/scr016/TestGetSetMethods.java)0
-rw-r--r--storage/bdb/test/scr016/TestKeyRange.java (renamed from bdb/test/scr016/TestKeyRange.java)0
-rw-r--r--storage/bdb/test/scr016/TestKeyRange.testout (renamed from bdb/test/scr016/TestKeyRange.testout)0
-rw-r--r--storage/bdb/test/scr016/TestLockVec.java (renamed from bdb/test/scr016/TestLockVec.java)0
-rw-r--r--storage/bdb/test/scr016/TestLockVec.testout (renamed from bdb/test/scr016/TestLockVec.testout)0
-rw-r--r--storage/bdb/test/scr016/TestLogc.java (renamed from bdb/test/scr016/TestLogc.java)0
-rw-r--r--storage/bdb/test/scr016/TestLogc.testout (renamed from bdb/test/scr016/TestLogc.testout)0
-rw-r--r--storage/bdb/test/scr016/TestOpenEmpty.java (renamed from bdb/test/scr016/TestOpenEmpty.java)0
-rw-r--r--storage/bdb/test/scr016/TestOpenEmpty.testerr (renamed from bdb/test/scr016/TestOpenEmpty.testerr)0
-rw-r--r--storage/bdb/test/scr016/TestReplication.java (renamed from bdb/test/scr016/TestReplication.java)0
-rw-r--r--storage/bdb/test/scr016/TestRpcServer.java (renamed from bdb/test/scr016/TestRpcServer.java)0
-rw-r--r--storage/bdb/test/scr016/TestSameDbt.java (renamed from bdb/test/scr016/TestSameDbt.java)0
-rw-r--r--storage/bdb/test/scr016/TestSameDbt.testout (renamed from bdb/test/scr016/TestSameDbt.testout)0
-rw-r--r--storage/bdb/test/scr016/TestSimpleAccess.java (renamed from bdb/test/scr016/TestSimpleAccess.java)0
-rw-r--r--storage/bdb/test/scr016/TestSimpleAccess.testout (renamed from bdb/test/scr016/TestSimpleAccess.testout)0
-rw-r--r--storage/bdb/test/scr016/TestStat.java (renamed from bdb/test/scr016/TestStat.java)0
-rw-r--r--storage/bdb/test/scr016/TestStat.testout (renamed from bdb/test/scr016/TestStat.testout)0
-rw-r--r--storage/bdb/test/scr016/TestTruncate.java (renamed from bdb/test/scr016/TestTruncate.java)0
-rw-r--r--storage/bdb/test/scr016/TestTruncate.testout (renamed from bdb/test/scr016/TestTruncate.testout)0
-rw-r--r--storage/bdb/test/scr016/TestUtil.java (renamed from bdb/test/scr016/TestUtil.java)0
-rw-r--r--storage/bdb/test/scr016/TestXAServlet.java (renamed from bdb/test/scr016/TestXAServlet.java)0
-rw-r--r--storage/bdb/test/scr016/chk.javatests (renamed from bdb/test/scr016/chk.javatests)0
-rw-r--r--storage/bdb/test/scr016/ignore (renamed from bdb/test/scr016/ignore)0
-rw-r--r--storage/bdb/test/scr016/testall (renamed from bdb/test/scr016/testall)0
-rw-r--r--storage/bdb/test/scr016/testone (renamed from bdb/test/scr016/testone)0
-rw-r--r--storage/bdb/test/scr017/O.BH (renamed from bdb/test/scr017/O.BH)0
-rw-r--r--storage/bdb/test/scr017/O.R (renamed from bdb/test/scr017/O.R)0
-rw-r--r--storage/bdb/test/scr017/chk.db185 (renamed from bdb/test/scr017/chk.db185)0
-rw-r--r--storage/bdb/test/scr017/t.c (renamed from bdb/test/scr017/t.c)0
-rw-r--r--storage/bdb/test/scr018/chk.comma (renamed from bdb/test/scr018/chk.comma)0
-rw-r--r--storage/bdb/test/scr018/t.c (renamed from bdb/test/scr018/t.c)0
-rw-r--r--storage/bdb/test/scr019/chk.include (renamed from bdb/test/scr019/chk.include)0
-rw-r--r--storage/bdb/test/scr020/chk.inc (renamed from bdb/test/scr020/chk.inc)0
-rw-r--r--storage/bdb/test/scr021/chk.flags (renamed from bdb/test/scr021/chk.flags)0
-rw-r--r--storage/bdb/test/scr022/chk.rr (renamed from bdb/test/scr022/chk.rr)0
-rw-r--r--storage/bdb/test/sdb001.tcl (renamed from bdb/test/sdb001.tcl)0
-rw-r--r--storage/bdb/test/sdb002.tcl (renamed from bdb/test/sdb002.tcl)0
-rw-r--r--storage/bdb/test/sdb003.tcl (renamed from bdb/test/sdb003.tcl)0
-rw-r--r--storage/bdb/test/sdb004.tcl (renamed from bdb/test/sdb004.tcl)0
-rw-r--r--storage/bdb/test/sdb005.tcl (renamed from bdb/test/sdb005.tcl)0
-rw-r--r--storage/bdb/test/sdb006.tcl (renamed from bdb/test/sdb006.tcl)0
-rw-r--r--storage/bdb/test/sdb007.tcl (renamed from bdb/test/sdb007.tcl)0
-rw-r--r--storage/bdb/test/sdb008.tcl (renamed from bdb/test/sdb008.tcl)0
-rw-r--r--storage/bdb/test/sdb009.tcl (renamed from bdb/test/sdb009.tcl)0
-rw-r--r--storage/bdb/test/sdb010.tcl (renamed from bdb/test/sdb010.tcl)0
-rw-r--r--storage/bdb/test/sdb011.tcl (renamed from bdb/test/sdb011.tcl)0
-rw-r--r--storage/bdb/test/sdb012.tcl (renamed from bdb/test/sdb012.tcl)0
-rw-r--r--storage/bdb/test/sdbscript.tcl (renamed from bdb/test/sdbscript.tcl)0
-rw-r--r--storage/bdb/test/sdbtest001.tcl (renamed from bdb/test/sdbtest001.tcl)0
-rw-r--r--storage/bdb/test/sdbtest002.tcl (renamed from bdb/test/sdbtest002.tcl)0
-rw-r--r--storage/bdb/test/sdbutils.tcl (renamed from bdb/test/sdbutils.tcl)0
-rw-r--r--storage/bdb/test/sec001.tcl (renamed from bdb/test/sec001.tcl)0
-rw-r--r--storage/bdb/test/sec002.tcl (renamed from bdb/test/sec002.tcl)0
-rw-r--r--storage/bdb/test/shelltest.tcl (renamed from bdb/test/shelltest.tcl)0
-rw-r--r--storage/bdb/test/si001.tcl (renamed from bdb/test/si001.tcl)0
-rw-r--r--storage/bdb/test/si002.tcl (renamed from bdb/test/si002.tcl)0
-rw-r--r--storage/bdb/test/si003.tcl (renamed from bdb/test/si003.tcl)0
-rw-r--r--storage/bdb/test/si004.tcl (renamed from bdb/test/si004.tcl)0
-rw-r--r--storage/bdb/test/si005.tcl (renamed from bdb/test/si005.tcl)0
-rw-r--r--storage/bdb/test/si006.tcl (renamed from bdb/test/si006.tcl)0
-rw-r--r--storage/bdb/test/sindex.tcl (renamed from bdb/test/sindex.tcl)0
-rw-r--r--storage/bdb/test/sysscript.tcl (renamed from bdb/test/sysscript.tcl)0
-rw-r--r--storage/bdb/test/test.tcl (renamed from bdb/test/test.tcl)0
-rw-r--r--storage/bdb/test/test001.tcl (renamed from bdb/test/test001.tcl)0
-rw-r--r--storage/bdb/test/test002.tcl (renamed from bdb/test/test002.tcl)0
-rw-r--r--storage/bdb/test/test003.tcl (renamed from bdb/test/test003.tcl)0
-rw-r--r--storage/bdb/test/test004.tcl (renamed from bdb/test/test004.tcl)0
-rw-r--r--storage/bdb/test/test005.tcl (renamed from bdb/test/test005.tcl)0
-rw-r--r--storage/bdb/test/test006.tcl (renamed from bdb/test/test006.tcl)0
-rw-r--r--storage/bdb/test/test007.tcl (renamed from bdb/test/test007.tcl)0
-rw-r--r--storage/bdb/test/test008.tcl (renamed from bdb/test/test008.tcl)0
-rw-r--r--storage/bdb/test/test009.tcl (renamed from bdb/test/test009.tcl)0
-rw-r--r--storage/bdb/test/test010.tcl (renamed from bdb/test/test010.tcl)0
-rw-r--r--storage/bdb/test/test011.tcl (renamed from bdb/test/test011.tcl)0
-rw-r--r--storage/bdb/test/test012.tcl (renamed from bdb/test/test012.tcl)0
-rw-r--r--storage/bdb/test/test013.tcl (renamed from bdb/test/test013.tcl)0
-rw-r--r--storage/bdb/test/test014.tcl (renamed from bdb/test/test014.tcl)0
-rw-r--r--storage/bdb/test/test015.tcl (renamed from bdb/test/test015.tcl)0
-rw-r--r--storage/bdb/test/test016.tcl (renamed from bdb/test/test016.tcl)0
-rw-r--r--storage/bdb/test/test017.tcl (renamed from bdb/test/test017.tcl)0
-rw-r--r--storage/bdb/test/test018.tcl (renamed from bdb/test/test018.tcl)0
-rw-r--r--storage/bdb/test/test019.tcl (renamed from bdb/test/test019.tcl)0
-rw-r--r--storage/bdb/test/test020.tcl (renamed from bdb/test/test020.tcl)0
-rw-r--r--storage/bdb/test/test021.tcl (renamed from bdb/test/test021.tcl)0
-rw-r--r--storage/bdb/test/test022.tcl (renamed from bdb/test/test022.tcl)0
-rw-r--r--storage/bdb/test/test023.tcl (renamed from bdb/test/test023.tcl)0
-rw-r--r--storage/bdb/test/test024.tcl (renamed from bdb/test/test024.tcl)0
-rw-r--r--storage/bdb/test/test025.tcl (renamed from bdb/test/test025.tcl)0
-rw-r--r--storage/bdb/test/test026.tcl (renamed from bdb/test/test026.tcl)0
-rw-r--r--storage/bdb/test/test027.tcl (renamed from bdb/test/test027.tcl)0
-rw-r--r--storage/bdb/test/test028.tcl (renamed from bdb/test/test028.tcl)0
-rw-r--r--storage/bdb/test/test029.tcl (renamed from bdb/test/test029.tcl)0
-rw-r--r--storage/bdb/test/test030.tcl (renamed from bdb/test/test030.tcl)0
-rw-r--r--storage/bdb/test/test031.tcl (renamed from bdb/test/test031.tcl)0
-rw-r--r--storage/bdb/test/test032.tcl (renamed from bdb/test/test032.tcl)0
-rw-r--r--storage/bdb/test/test033.tcl (renamed from bdb/test/test033.tcl)0
-rw-r--r--storage/bdb/test/test034.tcl (renamed from bdb/test/test034.tcl)0
-rw-r--r--storage/bdb/test/test035.tcl (renamed from bdb/test/test035.tcl)0
-rw-r--r--storage/bdb/test/test036.tcl (renamed from bdb/test/test036.tcl)0
-rw-r--r--storage/bdb/test/test037.tcl (renamed from bdb/test/test037.tcl)0
-rw-r--r--storage/bdb/test/test038.tcl (renamed from bdb/test/test038.tcl)0
-rw-r--r--storage/bdb/test/test039.tcl (renamed from bdb/test/test039.tcl)0
-rw-r--r--storage/bdb/test/test040.tcl (renamed from bdb/test/test040.tcl)0
-rw-r--r--storage/bdb/test/test041.tcl (renamed from bdb/test/test041.tcl)0
-rw-r--r--storage/bdb/test/test042.tcl (renamed from bdb/test/test042.tcl)0
-rw-r--r--storage/bdb/test/test043.tcl (renamed from bdb/test/test043.tcl)0
-rw-r--r--storage/bdb/test/test044.tcl (renamed from bdb/test/test044.tcl)0
-rw-r--r--storage/bdb/test/test045.tcl (renamed from bdb/test/test045.tcl)0
-rw-r--r--storage/bdb/test/test046.tcl (renamed from bdb/test/test046.tcl)0
-rw-r--r--storage/bdb/test/test047.tcl (renamed from bdb/test/test047.tcl)0
-rw-r--r--storage/bdb/test/test048.tcl (renamed from bdb/test/test048.tcl)0
-rw-r--r--storage/bdb/test/test049.tcl (renamed from bdb/test/test049.tcl)0
-rw-r--r--storage/bdb/test/test050.tcl (renamed from bdb/test/test050.tcl)0
-rw-r--r--storage/bdb/test/test051.tcl (renamed from bdb/test/test051.tcl)0
-rw-r--r--storage/bdb/test/test052.tcl (renamed from bdb/test/test052.tcl)0
-rw-r--r--storage/bdb/test/test053.tcl (renamed from bdb/test/test053.tcl)0
-rw-r--r--storage/bdb/test/test054.tcl (renamed from bdb/test/test054.tcl)0
-rw-r--r--storage/bdb/test/test055.tcl (renamed from bdb/test/test055.tcl)0
-rw-r--r--storage/bdb/test/test056.tcl (renamed from bdb/test/test056.tcl)0
-rw-r--r--storage/bdb/test/test057.tcl (renamed from bdb/test/test057.tcl)0
-rw-r--r--storage/bdb/test/test058.tcl (renamed from bdb/test/test058.tcl)0
-rw-r--r--storage/bdb/test/test059.tcl (renamed from bdb/test/test059.tcl)0
-rw-r--r--storage/bdb/test/test060.tcl (renamed from bdb/test/test060.tcl)0
-rw-r--r--storage/bdb/test/test061.tcl (renamed from bdb/test/test061.tcl)0
-rw-r--r--storage/bdb/test/test062.tcl (renamed from bdb/test/test062.tcl)0
-rw-r--r--storage/bdb/test/test063.tcl (renamed from bdb/test/test063.tcl)0
-rw-r--r--storage/bdb/test/test064.tcl (renamed from bdb/test/test064.tcl)0
-rw-r--r--storage/bdb/test/test065.tcl (renamed from bdb/test/test065.tcl)0
-rw-r--r--storage/bdb/test/test066.tcl (renamed from bdb/test/test066.tcl)0
-rw-r--r--storage/bdb/test/test067.tcl (renamed from bdb/test/test067.tcl)0
-rw-r--r--storage/bdb/test/test068.tcl (renamed from bdb/test/test068.tcl)0
-rw-r--r--storage/bdb/test/test069.tcl (renamed from bdb/test/test069.tcl)0
-rw-r--r--storage/bdb/test/test070.tcl (renamed from bdb/test/test070.tcl)0
-rw-r--r--storage/bdb/test/test071.tcl (renamed from bdb/test/test071.tcl)0
-rw-r--r--storage/bdb/test/test072.tcl (renamed from bdb/test/test072.tcl)0
-rw-r--r--storage/bdb/test/test073.tcl (renamed from bdb/test/test073.tcl)0
-rw-r--r--storage/bdb/test/test074.tcl (renamed from bdb/test/test074.tcl)0
-rw-r--r--storage/bdb/test/test075.tcl (renamed from bdb/test/test075.tcl)0
-rw-r--r--storage/bdb/test/test076.tcl (renamed from bdb/test/test076.tcl)0
-rw-r--r--storage/bdb/test/test077.tcl (renamed from bdb/test/test077.tcl)0
-rw-r--r--storage/bdb/test/test078.tcl (renamed from bdb/test/test078.tcl)0
-rw-r--r--storage/bdb/test/test079.tcl (renamed from bdb/test/test079.tcl)0
-rw-r--r--storage/bdb/test/test080.tcl (renamed from bdb/test/test080.tcl)0
-rw-r--r--storage/bdb/test/test081.tcl (renamed from bdb/test/test081.tcl)0
-rw-r--r--storage/bdb/test/test082.tcl (renamed from bdb/test/test082.tcl)0
-rw-r--r--storage/bdb/test/test083.tcl (renamed from bdb/test/test083.tcl)0
-rw-r--r--storage/bdb/test/test084.tcl (renamed from bdb/test/test084.tcl)0
-rw-r--r--storage/bdb/test/test085.tcl (renamed from bdb/test/test085.tcl)0
-rw-r--r--storage/bdb/test/test086.tcl (renamed from bdb/test/test086.tcl)0
-rw-r--r--storage/bdb/test/test087.tcl (renamed from bdb/test/test087.tcl)0
-rw-r--r--storage/bdb/test/test088.tcl (renamed from bdb/test/test088.tcl)0
-rw-r--r--storage/bdb/test/test089.tcl (renamed from bdb/test/test089.tcl)0
-rw-r--r--storage/bdb/test/test090.tcl (renamed from bdb/test/test090.tcl)0
-rw-r--r--storage/bdb/test/test091.tcl (renamed from bdb/test/test091.tcl)0
-rw-r--r--storage/bdb/test/test092.tcl (renamed from bdb/test/test092.tcl)0
-rw-r--r--storage/bdb/test/test093.tcl (renamed from bdb/test/test093.tcl)0
-rw-r--r--storage/bdb/test/test094.tcl (renamed from bdb/test/test094.tcl)0
-rw-r--r--storage/bdb/test/test095.tcl (renamed from bdb/test/test095.tcl)0
-rw-r--r--storage/bdb/test/test096.tcl (renamed from bdb/test/test096.tcl)0
-rw-r--r--storage/bdb/test/test097.tcl (renamed from bdb/test/test097.tcl)0
-rw-r--r--storage/bdb/test/test098.tcl (renamed from bdb/test/test098.tcl)0
-rw-r--r--storage/bdb/test/test099.tcl (renamed from bdb/test/test099.tcl)0
-rw-r--r--storage/bdb/test/test100.tcl (renamed from bdb/test/test100.tcl)0
-rw-r--r--storage/bdb/test/test101.tcl (renamed from bdb/test/test101.tcl)0
-rw-r--r--storage/bdb/test/testparams.tcl (renamed from bdb/test/testparams.tcl)0
-rw-r--r--storage/bdb/test/testutils.tcl (renamed from bdb/test/testutils.tcl)0
-rw-r--r--storage/bdb/test/txn001.tcl (renamed from bdb/test/txn001.tcl)0
-rw-r--r--storage/bdb/test/txn002.tcl (renamed from bdb/test/txn002.tcl)0
-rw-r--r--storage/bdb/test/txn003.tcl (renamed from bdb/test/txn003.tcl)0
-rw-r--r--storage/bdb/test/txn004.tcl (renamed from bdb/test/txn004.tcl)0
-rw-r--r--storage/bdb/test/txn005.tcl (renamed from bdb/test/txn005.tcl)0
-rw-r--r--storage/bdb/test/txn006.tcl (renamed from bdb/test/txn006.tcl)0
-rw-r--r--storage/bdb/test/txn007.tcl (renamed from bdb/test/txn007.tcl)0
-rw-r--r--storage/bdb/test/txn008.tcl (renamed from bdb/test/txn008.tcl)0
-rw-r--r--storage/bdb/test/txn009.tcl (renamed from bdb/test/txn009.tcl)0
-rw-r--r--storage/bdb/test/txnscript.tcl (renamed from bdb/test/txnscript.tcl)0
-rw-r--r--storage/bdb/test/update.tcl (renamed from bdb/test/update.tcl)0
-rw-r--r--storage/bdb/test/upgrade.tcl (renamed from bdb/test/upgrade.tcl)0
-rw-r--r--storage/bdb/test/wordlist (renamed from bdb/test/wordlist)0
-rw-r--r--storage/bdb/test/wrap.tcl (renamed from bdb/test/wrap.tcl)0
-rw-r--r--storage/bdb/txn/txn.c (renamed from bdb/txn/txn.c)0
-rw-r--r--storage/bdb/txn/txn.src (renamed from bdb/txn/txn.src)0
-rw-r--r--storage/bdb/txn/txn_method.c (renamed from bdb/txn/txn_method.c)0
-rw-r--r--storage/bdb/txn/txn_rec.c (renamed from bdb/txn/txn_rec.c)0
-rw-r--r--storage/bdb/txn/txn_recover.c (renamed from bdb/txn/txn_recover.c)0
-rw-r--r--storage/bdb/txn/txn_region.c (renamed from bdb/txn/txn_region.c)0
-rw-r--r--storage/bdb/txn/txn_stat.c (renamed from bdb/txn/txn_stat.c)0
-rw-r--r--storage/bdb/txn/txn_util.c (renamed from bdb/txn/txn_util.c)0
-rw-r--r--storage/bdb/xa/xa.c (renamed from bdb/xa/xa.c)0
-rw-r--r--storage/bdb/xa/xa_db.c (renamed from bdb/xa/xa_db.c)0
-rw-r--r--storage/bdb/xa/xa_map.c (renamed from bdb/xa/xa_map.c)0
-rw-r--r--storage/heap/.cvsignore (renamed from heap/.cvsignore)0
-rw-r--r--storage/heap/ChangeLog (renamed from heap/ChangeLog)0
-rw-r--r--storage/heap/Makefile.am34
-rw-r--r--storage/heap/_check.c (renamed from heap/_check.c)0
-rw-r--r--storage/heap/_rectest.c (renamed from heap/_rectest.c)0
-rw-r--r--storage/heap/heapdef.h (renamed from heap/heapdef.h)0
-rw-r--r--storage/heap/hp_block.c (renamed from heap/hp_block.c)0
-rw-r--r--storage/heap/hp_clear.c (renamed from heap/hp_clear.c)0
-rw-r--r--storage/heap/hp_close.c (renamed from heap/hp_close.c)0
-rw-r--r--storage/heap/hp_create.c (renamed from heap/hp_create.c)0
-rw-r--r--storage/heap/hp_delete.c (renamed from heap/hp_delete.c)0
-rw-r--r--storage/heap/hp_extra.c (renamed from heap/hp_extra.c)0
-rw-r--r--storage/heap/hp_hash.c (renamed from heap/hp_hash.c)0
-rw-r--r--storage/heap/hp_info.c (renamed from heap/hp_info.c)0
-rw-r--r--storage/heap/hp_open.c (renamed from heap/hp_open.c)0
-rw-r--r--storage/heap/hp_panic.c (renamed from heap/hp_panic.c)0
-rw-r--r--storage/heap/hp_rename.c (renamed from heap/hp_rename.c)0
-rw-r--r--storage/heap/hp_rfirst.c (renamed from heap/hp_rfirst.c)0
-rw-r--r--storage/heap/hp_rkey.c (renamed from heap/hp_rkey.c)0
-rw-r--r--storage/heap/hp_rlast.c (renamed from heap/hp_rlast.c)0
-rw-r--r--storage/heap/hp_rnext.c (renamed from heap/hp_rnext.c)0
-rw-r--r--storage/heap/hp_rprev.c (renamed from heap/hp_rprev.c)0
-rw-r--r--storage/heap/hp_rrnd.c (renamed from heap/hp_rrnd.c)0
-rw-r--r--storage/heap/hp_rsame.c (renamed from heap/hp_rsame.c)0
-rw-r--r--storage/heap/hp_scan.c (renamed from heap/hp_scan.c)0
-rw-r--r--storage/heap/hp_static.c (renamed from heap/hp_static.c)0
-rw-r--r--storage/heap/hp_test1.c (renamed from heap/hp_test1.c)0
-rw-r--r--storage/heap/hp_test2.c (renamed from heap/hp_test2.c)0
-rw-r--r--storage/heap/hp_update.c (renamed from heap/hp_update.c)0
-rw-r--r--storage/heap/hp_write.c (renamed from heap/hp_write.c)0
-rwxr-xr-xstorage/heap/make-ccc (renamed from heap/make-ccc)0
-rw-r--r--storage/innobase/Makefile.am (renamed from innobase/Makefile.am)0
-rw-r--r--storage/innobase/btr/Makefile.am (renamed from innobase/btr/Makefile.am)0
-rw-r--r--storage/innobase/btr/btr0btr.c (renamed from innobase/btr/btr0btr.c)0
-rw-r--r--storage/innobase/btr/btr0cur.c (renamed from innobase/btr/btr0cur.c)0
-rw-r--r--storage/innobase/btr/btr0pcur.c (renamed from innobase/btr/btr0pcur.c)0
-rw-r--r--storage/innobase/btr/btr0sea.c (renamed from innobase/btr/btr0sea.c)0
-rw-r--r--storage/innobase/btr/makefilewin (renamed from innobase/btr/makefilewin)0
-rw-r--r--storage/innobase/buf/Makefile.am (renamed from innobase/buf/Makefile.am)0
-rw-r--r--storage/innobase/buf/buf0buf.c (renamed from innobase/buf/buf0buf.c)0
-rw-r--r--storage/innobase/buf/buf0flu.c (renamed from innobase/buf/buf0flu.c)0
-rw-r--r--storage/innobase/buf/buf0lru.c (renamed from innobase/buf/buf0lru.c)0
-rw-r--r--storage/innobase/buf/buf0rea.c (renamed from innobase/buf/buf0rea.c)0
-rw-r--r--storage/innobase/buf/makefilewin (renamed from innobase/buf/makefilewin)0
-rw-r--r--storage/innobase/configure.in (renamed from innobase/configure.in)0
-rw-r--r--storage/innobase/data/Makefile.am (renamed from innobase/data/Makefile.am)0
-rw-r--r--storage/innobase/data/data0data.c (renamed from innobase/data/data0data.c)0
-rw-r--r--storage/innobase/data/data0type.c (renamed from innobase/data/data0type.c)0
-rw-r--r--storage/innobase/data/makefilewin (renamed from innobase/data/makefilewin)0
-rw-r--r--storage/innobase/db/db0err.h (renamed from innobase/db/db0err.h)0
-rw-r--r--storage/innobase/dict/Makefile.am (renamed from innobase/dict/Makefile.am)0
-rw-r--r--storage/innobase/dict/dict0boot.c (renamed from innobase/dict/dict0boot.c)0
-rw-r--r--storage/innobase/dict/dict0crea.c (renamed from innobase/dict/dict0crea.c)0
-rw-r--r--storage/innobase/dict/dict0dict.c (renamed from innobase/dict/dict0dict.c)0
-rw-r--r--storage/innobase/dict/dict0load.c (renamed from innobase/dict/dict0load.c)0
-rw-r--r--storage/innobase/dict/dict0mem.c (renamed from innobase/dict/dict0mem.c)0
-rw-r--r--storage/innobase/dict/makefilewin (renamed from innobase/dict/makefilewin)0
-rw-r--r--storage/innobase/dyn/Makefile.am (renamed from innobase/dyn/Makefile.am)0
-rw-r--r--storage/innobase/dyn/dyn0dyn.c (renamed from innobase/dyn/dyn0dyn.c)0
-rw-r--r--storage/innobase/dyn/makefilewin (renamed from innobase/dyn/makefilewin)0
-rw-r--r--storage/innobase/eval/Makefile.am (renamed from innobase/eval/Makefile.am)0
-rw-r--r--storage/innobase/eval/eval0eval.c (renamed from innobase/eval/eval0eval.c)0
-rw-r--r--storage/innobase/eval/eval0proc.c (renamed from innobase/eval/eval0proc.c)0
-rw-r--r--storage/innobase/eval/makefilewin (renamed from innobase/eval/makefilewin)0
-rw-r--r--storage/innobase/fil/Makefile.am (renamed from innobase/fil/Makefile.am)0
-rw-r--r--storage/innobase/fil/fil0fil.c (renamed from innobase/fil/fil0fil.c)0
-rw-r--r--storage/innobase/fil/makefilewin (renamed from innobase/fil/makefilewin)0
-rw-r--r--storage/innobase/fsp/Makefile.am (renamed from innobase/fsp/Makefile.am)0
-rw-r--r--storage/innobase/fsp/fsp0fsp.c (renamed from innobase/fsp/fsp0fsp.c)0
-rw-r--r--storage/innobase/fsp/makefilewin (renamed from innobase/fsp/makefilewin)0
-rw-r--r--storage/innobase/fut/Makefile.am (renamed from innobase/fut/Makefile.am)0
-rw-r--r--storage/innobase/fut/fut0fut.c (renamed from innobase/fut/fut0fut.c)0
-rw-r--r--storage/innobase/fut/fut0lst.c (renamed from innobase/fut/fut0lst.c)0
-rw-r--r--storage/innobase/fut/makefilewin (renamed from innobase/fut/makefilewin)0
-rw-r--r--storage/innobase/ha/Makefile.am (renamed from innobase/ha/Makefile.am)0
-rw-r--r--storage/innobase/ha/ha0ha.c (renamed from innobase/ha/ha0ha.c)0
-rw-r--r--storage/innobase/ha/hash0hash.c (renamed from innobase/ha/hash0hash.c)0
-rw-r--r--storage/innobase/ha/makefilewin (renamed from innobase/ha/makefilewin)0
-rw-r--r--storage/innobase/ibuf/Makefile.am (renamed from innobase/ibuf/Makefile.am)0
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.c (renamed from innobase/ibuf/ibuf0ibuf.c)0
-rw-r--r--storage/innobase/ibuf/makefilewin (renamed from innobase/ibuf/makefilewin)0
-rw-r--r--storage/innobase/include/Makefile.am (renamed from innobase/include/Makefile.am)0
-rw-r--r--storage/innobase/include/Makefile.i6
-rw-r--r--storage/innobase/include/btr0btr.h (renamed from innobase/include/btr0btr.h)0
-rw-r--r--storage/innobase/include/btr0btr.ic (renamed from innobase/include/btr0btr.ic)0
-rw-r--r--storage/innobase/include/btr0cur.h (renamed from innobase/include/btr0cur.h)0
-rw-r--r--storage/innobase/include/btr0cur.ic (renamed from innobase/include/btr0cur.ic)0
-rw-r--r--storage/innobase/include/btr0pcur.h (renamed from innobase/include/btr0pcur.h)0
-rw-r--r--storage/innobase/include/btr0pcur.ic (renamed from innobase/include/btr0pcur.ic)0
-rw-r--r--storage/innobase/include/btr0sea.h (renamed from innobase/include/btr0sea.h)0
-rw-r--r--storage/innobase/include/btr0sea.ic (renamed from innobase/include/btr0sea.ic)0
-rw-r--r--storage/innobase/include/btr0types.h (renamed from innobase/include/btr0types.h)0
-rw-r--r--storage/innobase/include/buf0buf.h (renamed from innobase/include/buf0buf.h)0
-rw-r--r--storage/innobase/include/buf0buf.ic (renamed from innobase/include/buf0buf.ic)0
-rw-r--r--storage/innobase/include/buf0flu.h (renamed from innobase/include/buf0flu.h)0
-rw-r--r--storage/innobase/include/buf0flu.ic (renamed from innobase/include/buf0flu.ic)0
-rw-r--r--storage/innobase/include/buf0lru.h (renamed from innobase/include/buf0lru.h)0
-rw-r--r--storage/innobase/include/buf0lru.ic (renamed from innobase/include/buf0lru.ic)0
-rw-r--r--storage/innobase/include/buf0rea.h (renamed from innobase/include/buf0rea.h)0
-rw-r--r--storage/innobase/include/buf0types.h (renamed from innobase/include/buf0types.h)0
-rw-r--r--storage/innobase/include/data0data.h (renamed from innobase/include/data0data.h)0
-rw-r--r--storage/innobase/include/data0data.ic (renamed from innobase/include/data0data.ic)0
-rw-r--r--storage/innobase/include/data0type.h (renamed from innobase/include/data0type.h)0
-rw-r--r--storage/innobase/include/data0type.ic (renamed from innobase/include/data0type.ic)0
-rw-r--r--storage/innobase/include/data0types.h (renamed from innobase/include/data0types.h)0
-rw-r--r--storage/innobase/include/db0err.h (renamed from innobase/include/db0err.h)0
-rw-r--r--storage/innobase/include/dict0boot.h (renamed from innobase/include/dict0boot.h)0
-rw-r--r--storage/innobase/include/dict0boot.ic (renamed from innobase/include/dict0boot.ic)0
-rw-r--r--storage/innobase/include/dict0crea.h (renamed from innobase/include/dict0crea.h)0
-rw-r--r--storage/innobase/include/dict0crea.ic (renamed from innobase/include/dict0crea.ic)0
-rw-r--r--storage/innobase/include/dict0dict.h (renamed from innobase/include/dict0dict.h)0
-rw-r--r--storage/innobase/include/dict0dict.ic (renamed from innobase/include/dict0dict.ic)0
-rw-r--r--storage/innobase/include/dict0load.h (renamed from innobase/include/dict0load.h)0
-rw-r--r--storage/innobase/include/dict0load.ic (renamed from innobase/include/dict0load.ic)0
-rw-r--r--storage/innobase/include/dict0mem.h (renamed from innobase/include/dict0mem.h)0
-rw-r--r--storage/innobase/include/dict0mem.ic (renamed from innobase/include/dict0mem.ic)0
-rw-r--r--storage/innobase/include/dict0types.h (renamed from innobase/include/dict0types.h)0
-rw-r--r--storage/innobase/include/dyn0dyn.h (renamed from innobase/include/dyn0dyn.h)0
-rw-r--r--storage/innobase/include/dyn0dyn.ic (renamed from innobase/include/dyn0dyn.ic)0
-rw-r--r--storage/innobase/include/eval0eval.h (renamed from innobase/include/eval0eval.h)0
-rw-r--r--storage/innobase/include/eval0eval.ic (renamed from innobase/include/eval0eval.ic)0
-rw-r--r--storage/innobase/include/eval0proc.h (renamed from innobase/include/eval0proc.h)0
-rw-r--r--storage/innobase/include/eval0proc.ic (renamed from innobase/include/eval0proc.ic)0
-rw-r--r--storage/innobase/include/fil0fil.h (renamed from innobase/include/fil0fil.h)0
-rw-r--r--storage/innobase/include/fsp0fsp.h (renamed from innobase/include/fsp0fsp.h)0
-rw-r--r--storage/innobase/include/fsp0fsp.ic (renamed from innobase/include/fsp0fsp.ic)0
-rw-r--r--storage/innobase/include/fut0fut.h (renamed from innobase/include/fut0fut.h)0
-rw-r--r--storage/innobase/include/fut0fut.ic (renamed from innobase/include/fut0fut.ic)0
-rw-r--r--storage/innobase/include/fut0lst.h (renamed from innobase/include/fut0lst.h)0
-rw-r--r--storage/innobase/include/fut0lst.ic (renamed from innobase/include/fut0lst.ic)0
-rw-r--r--storage/innobase/include/ha0ha.h (renamed from innobase/include/ha0ha.h)0
-rw-r--r--storage/innobase/include/ha0ha.ic (renamed from innobase/include/ha0ha.ic)0
-rw-r--r--storage/innobase/include/hash0hash.h (renamed from innobase/include/hash0hash.h)0
-rw-r--r--storage/innobase/include/hash0hash.ic (renamed from innobase/include/hash0hash.ic)0
-rw-r--r--storage/innobase/include/ibuf0ibuf.h (renamed from innobase/include/ibuf0ibuf.h)0
-rw-r--r--storage/innobase/include/ibuf0ibuf.ic (renamed from innobase/include/ibuf0ibuf.ic)0
-rw-r--r--storage/innobase/include/ibuf0types.h (renamed from innobase/include/ibuf0types.h)0
-rw-r--r--storage/innobase/include/lock0lock.h (renamed from innobase/include/lock0lock.h)0
-rw-r--r--storage/innobase/include/lock0lock.ic (renamed from innobase/include/lock0lock.ic)0
-rw-r--r--storage/innobase/include/lock0types.h (renamed from innobase/include/lock0types.h)0
-rw-r--r--storage/innobase/include/log0log.h (renamed from innobase/include/log0log.h)0
-rw-r--r--storage/innobase/include/log0log.ic (renamed from innobase/include/log0log.ic)0
-rw-r--r--storage/innobase/include/log0recv.h (renamed from innobase/include/log0recv.h)0
-rw-r--r--storage/innobase/include/log0recv.ic (renamed from innobase/include/log0recv.ic)0
-rw-r--r--storage/innobase/include/mach0data.h (renamed from innobase/include/mach0data.h)0
-rw-r--r--storage/innobase/include/mach0data.ic (renamed from innobase/include/mach0data.ic)0
-rw-r--r--storage/innobase/include/makefilewin.i (renamed from innobase/include/makefilewin.i)0
-rw-r--r--storage/innobase/include/mem0dbg.h (renamed from innobase/include/mem0dbg.h)0
-rw-r--r--storage/innobase/include/mem0dbg.ic (renamed from innobase/include/mem0dbg.ic)0
-rw-r--r--storage/innobase/include/mem0mem.h (renamed from innobase/include/mem0mem.h)0
-rw-r--r--storage/innobase/include/mem0mem.ic (renamed from innobase/include/mem0mem.ic)0
-rw-r--r--storage/innobase/include/mem0pool.h (renamed from innobase/include/mem0pool.h)0
-rw-r--r--storage/innobase/include/mem0pool.ic (renamed from innobase/include/mem0pool.ic)0
-rw-r--r--storage/innobase/include/mtr0log.h (renamed from innobase/include/mtr0log.h)0
-rw-r--r--storage/innobase/include/mtr0log.ic (renamed from innobase/include/mtr0log.ic)0
-rw-r--r--storage/innobase/include/mtr0mtr.h (renamed from innobase/include/mtr0mtr.h)0
-rw-r--r--storage/innobase/include/mtr0mtr.ic (renamed from innobase/include/mtr0mtr.ic)0
-rw-r--r--storage/innobase/include/mtr0types.h (renamed from innobase/include/mtr0types.h)0
-rw-r--r--storage/innobase/include/os0file.h (renamed from innobase/include/os0file.h)0
-rw-r--r--storage/innobase/include/os0proc.h (renamed from innobase/include/os0proc.h)0
-rw-r--r--storage/innobase/include/os0proc.ic (renamed from innobase/include/os0proc.ic)0
-rw-r--r--storage/innobase/include/os0sync.h (renamed from innobase/include/os0sync.h)0
-rw-r--r--storage/innobase/include/os0sync.ic (renamed from innobase/include/os0sync.ic)0
-rw-r--r--storage/innobase/include/os0thread.h (renamed from innobase/include/os0thread.h)0
-rw-r--r--storage/innobase/include/os0thread.ic (renamed from innobase/include/os0thread.ic)0
-rw-r--r--storage/innobase/include/page0cur.h (renamed from innobase/include/page0cur.h)0
-rw-r--r--storage/innobase/include/page0cur.ic (renamed from innobase/include/page0cur.ic)0
-rw-r--r--storage/innobase/include/page0page.h (renamed from innobase/include/page0page.h)0
-rw-r--r--storage/innobase/include/page0page.ic (renamed from innobase/include/page0page.ic)0
-rw-r--r--storage/innobase/include/page0types.h (renamed from innobase/include/page0types.h)0
-rw-r--r--storage/innobase/include/pars0grm.h (renamed from innobase/include/pars0grm.h)0
-rw-r--r--storage/innobase/include/pars0opt.h (renamed from innobase/include/pars0opt.h)0
-rw-r--r--storage/innobase/include/pars0opt.ic (renamed from innobase/include/pars0opt.ic)0
-rw-r--r--storage/innobase/include/pars0pars.h (renamed from innobase/include/pars0pars.h)0
-rw-r--r--storage/innobase/include/pars0pars.ic (renamed from innobase/include/pars0pars.ic)0
-rw-r--r--storage/innobase/include/pars0sym.h (renamed from innobase/include/pars0sym.h)0
-rw-r--r--storage/innobase/include/pars0sym.ic (renamed from innobase/include/pars0sym.ic)0
-rw-r--r--storage/innobase/include/pars0types.h (renamed from innobase/include/pars0types.h)0
-rw-r--r--storage/innobase/include/que0que.h (renamed from innobase/include/que0que.h)0
-rw-r--r--storage/innobase/include/que0que.ic (renamed from innobase/include/que0que.ic)0
-rw-r--r--storage/innobase/include/que0types.h (renamed from innobase/include/que0types.h)0
-rw-r--r--storage/innobase/include/read0read.h (renamed from innobase/include/read0read.h)0
-rw-r--r--storage/innobase/include/read0read.ic (renamed from innobase/include/read0read.ic)0
-rw-r--r--storage/innobase/include/read0types.h (renamed from innobase/include/read0types.h)0
-rw-r--r--storage/innobase/include/rem0cmp.h (renamed from innobase/include/rem0cmp.h)0
-rw-r--r--storage/innobase/include/rem0cmp.ic (renamed from innobase/include/rem0cmp.ic)0
-rw-r--r--storage/innobase/include/rem0rec.h (renamed from innobase/include/rem0rec.h)0
-rw-r--r--storage/innobase/include/rem0rec.ic (renamed from innobase/include/rem0rec.ic)0
-rw-r--r--storage/innobase/include/rem0types.h (renamed from innobase/include/rem0types.h)0
-rw-r--r--storage/innobase/include/row0ins.h (renamed from innobase/include/row0ins.h)0
-rw-r--r--storage/innobase/include/row0ins.ic (renamed from innobase/include/row0ins.ic)0
-rw-r--r--storage/innobase/include/row0mysql.h (renamed from innobase/include/row0mysql.h)0
-rw-r--r--storage/innobase/include/row0mysql.ic (renamed from innobase/include/row0mysql.ic)0
-rw-r--r--storage/innobase/include/row0purge.h (renamed from innobase/include/row0purge.h)0
-rw-r--r--storage/innobase/include/row0purge.ic (renamed from innobase/include/row0purge.ic)0
-rw-r--r--storage/innobase/include/row0row.h (renamed from innobase/include/row0row.h)0
-rw-r--r--storage/innobase/include/row0row.ic (renamed from innobase/include/row0row.ic)0
-rw-r--r--storage/innobase/include/row0sel.h (renamed from innobase/include/row0sel.h)0
-rw-r--r--storage/innobase/include/row0sel.ic (renamed from innobase/include/row0sel.ic)0
-rw-r--r--storage/innobase/include/row0types.h (renamed from innobase/include/row0types.h)0
-rw-r--r--storage/innobase/include/row0uins.h (renamed from innobase/include/row0uins.h)0
-rw-r--r--storage/innobase/include/row0uins.ic (renamed from innobase/include/row0uins.ic)0
-rw-r--r--storage/innobase/include/row0umod.h (renamed from innobase/include/row0umod.h)0
-rw-r--r--storage/innobase/include/row0umod.ic (renamed from innobase/include/row0umod.ic)0
-rw-r--r--storage/innobase/include/row0undo.h (renamed from innobase/include/row0undo.h)0
-rw-r--r--storage/innobase/include/row0undo.ic (renamed from innobase/include/row0undo.ic)0
-rw-r--r--storage/innobase/include/row0upd.h (renamed from innobase/include/row0upd.h)0
-rw-r--r--storage/innobase/include/row0upd.ic (renamed from innobase/include/row0upd.ic)0
-rw-r--r--storage/innobase/include/row0vers.h (renamed from innobase/include/row0vers.h)0
-rw-r--r--storage/innobase/include/row0vers.ic (renamed from innobase/include/row0vers.ic)0
-rw-r--r--storage/innobase/include/srv0que.h (renamed from innobase/include/srv0que.h)0
-rw-r--r--storage/innobase/include/srv0srv.h (renamed from innobase/include/srv0srv.h)0
-rw-r--r--storage/innobase/include/srv0srv.ic (renamed from innobase/include/srv0srv.ic)0
-rw-r--r--storage/innobase/include/srv0start.h (renamed from innobase/include/srv0start.h)0
-rw-r--r--storage/innobase/include/sync0arr.h (renamed from innobase/include/sync0arr.h)0
-rw-r--r--storage/innobase/include/sync0arr.ic (renamed from innobase/include/sync0arr.ic)0
-rw-r--r--storage/innobase/include/sync0rw.h (renamed from innobase/include/sync0rw.h)0
-rw-r--r--storage/innobase/include/sync0rw.ic (renamed from innobase/include/sync0rw.ic)0
-rw-r--r--storage/innobase/include/sync0sync.h (renamed from innobase/include/sync0sync.h)0
-rw-r--r--storage/innobase/include/sync0sync.ic (renamed from innobase/include/sync0sync.ic)0
-rw-r--r--storage/innobase/include/sync0types.h (renamed from innobase/include/sync0types.h)0
-rw-r--r--storage/innobase/include/thr0loc.h (renamed from innobase/include/thr0loc.h)0
-rw-r--r--storage/innobase/include/thr0loc.ic (renamed from innobase/include/thr0loc.ic)0
-rw-r--r--storage/innobase/include/trx0purge.h (renamed from innobase/include/trx0purge.h)0
-rw-r--r--storage/innobase/include/trx0purge.ic (renamed from innobase/include/trx0purge.ic)0
-rw-r--r--storage/innobase/include/trx0rec.h (renamed from innobase/include/trx0rec.h)0
-rw-r--r--storage/innobase/include/trx0rec.ic (renamed from innobase/include/trx0rec.ic)0
-rw-r--r--storage/innobase/include/trx0roll.h (renamed from innobase/include/trx0roll.h)0
-rw-r--r--storage/innobase/include/trx0roll.ic (renamed from innobase/include/trx0roll.ic)0
-rw-r--r--storage/innobase/include/trx0rseg.h (renamed from innobase/include/trx0rseg.h)0
-rw-r--r--storage/innobase/include/trx0rseg.ic (renamed from innobase/include/trx0rseg.ic)0
-rw-r--r--storage/innobase/include/trx0sys.h (renamed from innobase/include/trx0sys.h)0
-rw-r--r--storage/innobase/include/trx0sys.ic (renamed from innobase/include/trx0sys.ic)0
-rw-r--r--storage/innobase/include/trx0trx.h (renamed from innobase/include/trx0trx.h)0
-rw-r--r--storage/innobase/include/trx0trx.ic (renamed from innobase/include/trx0trx.ic)0
-rw-r--r--storage/innobase/include/trx0types.h (renamed from innobase/include/trx0types.h)0
-rw-r--r--storage/innobase/include/trx0undo.h (renamed from innobase/include/trx0undo.h)0
-rw-r--r--storage/innobase/include/trx0undo.ic (renamed from innobase/include/trx0undo.ic)0
-rw-r--r--storage/innobase/include/trx0xa.h (renamed from innobase/include/trx0xa.h)0
-rw-r--r--storage/innobase/include/univ.i (renamed from innobase/include/univ.i)0
-rw-r--r--storage/innobase/include/usr0sess.h (renamed from innobase/include/usr0sess.h)0
-rw-r--r--storage/innobase/include/usr0sess.ic (renamed from innobase/include/usr0sess.ic)0
-rw-r--r--storage/innobase/include/usr0types.h (renamed from innobase/include/usr0types.h)0
-rw-r--r--storage/innobase/include/ut0byte.h (renamed from innobase/include/ut0byte.h)0
-rw-r--r--storage/innobase/include/ut0byte.ic (renamed from innobase/include/ut0byte.ic)0
-rw-r--r--storage/innobase/include/ut0dbg.h (renamed from innobase/include/ut0dbg.h)0
-rw-r--r--storage/innobase/include/ut0lst.h (renamed from innobase/include/ut0lst.h)0
-rw-r--r--storage/innobase/include/ut0mem.h (renamed from innobase/include/ut0mem.h)0
-rw-r--r--storage/innobase/include/ut0mem.ic (renamed from innobase/include/ut0mem.ic)0
-rw-r--r--storage/innobase/include/ut0rnd.h (renamed from innobase/include/ut0rnd.h)0
-rw-r--r--storage/innobase/include/ut0rnd.ic (renamed from innobase/include/ut0rnd.ic)0
-rw-r--r--storage/innobase/include/ut0sort.h (renamed from innobase/include/ut0sort.h)0
-rw-r--r--storage/innobase/include/ut0ut.h (renamed from innobase/include/ut0ut.h)0
-rw-r--r--storage/innobase/include/ut0ut.ic (renamed from innobase/include/ut0ut.ic)0
-rw-r--r--storage/innobase/lock/Makefile.am (renamed from innobase/lock/Makefile.am)0
-rw-r--r--storage/innobase/lock/lock0lock.c (renamed from innobase/lock/lock0lock.c)0
-rw-r--r--storage/innobase/lock/makefilewin (renamed from innobase/lock/makefilewin)0
-rw-r--r--storage/innobase/log/Makefile.am (renamed from innobase/log/Makefile.am)0
-rw-r--r--storage/innobase/log/log0log.c (renamed from innobase/log/log0log.c)0
-rw-r--r--storage/innobase/log/log0recv.c (renamed from innobase/log/log0recv.c)0
-rw-r--r--storage/innobase/log/makefilewin (renamed from innobase/log/makefilewin)0
-rw-r--r--storage/innobase/mach/Makefile.am (renamed from innobase/mach/Makefile.am)0
-rw-r--r--storage/innobase/mach/mach0data.c (renamed from innobase/mach/mach0data.c)0
-rw-r--r--storage/innobase/mach/makefilewin (renamed from innobase/mach/makefilewin)0
-rw-r--r--storage/innobase/makefilewin (renamed from innobase/makefilewin)0
-rw-r--r--storage/innobase/mem/Makefile.am (renamed from innobase/mem/Makefile.am)0
-rw-r--r--storage/innobase/mem/makefilewin (renamed from innobase/mem/makefilewin)0
-rw-r--r--storage/innobase/mem/mem0dbg.c (renamed from innobase/mem/mem0dbg.c)0
-rw-r--r--storage/innobase/mem/mem0mem.c (renamed from innobase/mem/mem0mem.c)0
-rw-r--r--storage/innobase/mem/mem0pool.c (renamed from innobase/mem/mem0pool.c)0
-rw-r--r--storage/innobase/mtr/Makefile.am (renamed from innobase/mtr/Makefile.am)0
-rw-r--r--storage/innobase/mtr/makefilewin (renamed from innobase/mtr/makefilewin)0
-rw-r--r--storage/innobase/mtr/mtr0log.c (renamed from innobase/mtr/mtr0log.c)0
-rw-r--r--storage/innobase/mtr/mtr0mtr.c (renamed from innobase/mtr/mtr0mtr.c)0
-rw-r--r--storage/innobase/my_cnf (renamed from innobase/my_cnf)0
-rw-r--r--storage/innobase/os/Makefile.am (renamed from innobase/os/Makefile.am)0
-rw-r--r--storage/innobase/os/makefilewin (renamed from innobase/os/makefilewin)0
-rw-r--r--storage/innobase/os/os0file.c (renamed from innobase/os/os0file.c)0
-rw-r--r--storage/innobase/os/os0proc.c (renamed from innobase/os/os0proc.c)0
-rw-r--r--storage/innobase/os/os0sync.c (renamed from innobase/os/os0sync.c)0
-rw-r--r--storage/innobase/os/os0thread.c (renamed from innobase/os/os0thread.c)0
-rw-r--r--storage/innobase/page/Makefile.am (renamed from innobase/page/Makefile.am)0
-rw-r--r--storage/innobase/page/makefilewin (renamed from innobase/page/makefilewin)0
-rw-r--r--storage/innobase/page/page0cur.c (renamed from innobase/page/page0cur.c)0
-rw-r--r--storage/innobase/page/page0page.c (renamed from innobase/page/page0page.c)0
-rw-r--r--storage/innobase/pars/Makefile.am (renamed from innobase/pars/Makefile.am)0
-rw-r--r--storage/innobase/pars/lexyy.c (renamed from innobase/pars/lexyy.c)0
-rw-r--r--storage/innobase/pars/makefilewin (renamed from innobase/pars/makefilewin)0
-rw-r--r--storage/innobase/pars/pars0grm.c (renamed from innobase/pars/pars0grm.c)0
-rw-r--r--storage/innobase/pars/pars0grm.h (renamed from innobase/pars/pars0grm.h)0
-rw-r--r--storage/innobase/pars/pars0grm.y (renamed from innobase/pars/pars0grm.y)0
-rw-r--r--storage/innobase/pars/pars0lex.l (renamed from innobase/pars/pars0lex.l)0
-rw-r--r--storage/innobase/pars/pars0opt.c (renamed from innobase/pars/pars0opt.c)0
-rw-r--r--storage/innobase/pars/pars0pars.c (renamed from innobase/pars/pars0pars.c)0
-rw-r--r--storage/innobase/pars/pars0sym.c (renamed from innobase/pars/pars0sym.c)0
-rw-r--r--storage/innobase/que/Makefile.am (renamed from innobase/que/Makefile.am)0
-rw-r--r--storage/innobase/que/makefilewin (renamed from innobase/que/makefilewin)0
-rw-r--r--storage/innobase/que/que0que.c (renamed from innobase/que/que0que.c)0
-rw-r--r--storage/innobase/read/Makefile.am (renamed from innobase/read/Makefile.am)0
-rw-r--r--storage/innobase/read/makefilewin (renamed from innobase/read/makefilewin)0
-rw-r--r--storage/innobase/read/read0read.c (renamed from innobase/read/read0read.c)0
-rw-r--r--storage/innobase/rem/Makefile.am (renamed from innobase/rem/Makefile.am)0
-rw-r--r--storage/innobase/rem/makefilewin (renamed from innobase/rem/makefilewin)0
-rw-r--r--storage/innobase/rem/rem0cmp.c (renamed from innobase/rem/rem0cmp.c)0
-rw-r--r--storage/innobase/rem/rem0rec.c (renamed from innobase/rem/rem0rec.c)0
-rw-r--r--storage/innobase/row/Makefile.am (renamed from innobase/row/Makefile.am)0
-rw-r--r--storage/innobase/row/makefilewin (renamed from innobase/row/makefilewin)0
-rw-r--r--storage/innobase/row/row0ins.c (renamed from innobase/row/row0ins.c)0
-rw-r--r--storage/innobase/row/row0mysql.c (renamed from innobase/row/row0mysql.c)0
-rw-r--r--storage/innobase/row/row0purge.c (renamed from innobase/row/row0purge.c)0
-rw-r--r--storage/innobase/row/row0row.c (renamed from innobase/row/row0row.c)0
-rw-r--r--storage/innobase/row/row0sel.c (renamed from innobase/row/row0sel.c)0
-rw-r--r--storage/innobase/row/row0uins.c (renamed from innobase/row/row0uins.c)0
-rw-r--r--storage/innobase/row/row0umod.c (renamed from innobase/row/row0umod.c)0
-rw-r--r--storage/innobase/row/row0undo.c (renamed from innobase/row/row0undo.c)0
-rw-r--r--storage/innobase/row/row0upd.c (renamed from innobase/row/row0upd.c)0
-rw-r--r--storage/innobase/row/row0vers.c (renamed from innobase/row/row0vers.c)0
-rw-r--r--storage/innobase/srv/Makefile.am (renamed from innobase/srv/Makefile.am)0
-rw-r--r--storage/innobase/srv/makefilewin (renamed from innobase/srv/makefilewin)0
-rw-r--r--storage/innobase/srv/srv0que.c (renamed from innobase/srv/srv0que.c)0
-rw-r--r--storage/innobase/srv/srv0srv.c (renamed from innobase/srv/srv0srv.c)0
-rw-r--r--storage/innobase/srv/srv0start.c (renamed from innobase/srv/srv0start.c)0
-rw-r--r--storage/innobase/sync/Makefile.am (renamed from innobase/sync/Makefile.am)0
-rw-r--r--storage/innobase/sync/makefilewin (renamed from innobase/sync/makefilewin)0
-rw-r--r--storage/innobase/sync/sync0arr.c (renamed from innobase/sync/sync0arr.c)0
-rw-r--r--storage/innobase/sync/sync0rw.c (renamed from innobase/sync/sync0rw.c)0
-rw-r--r--storage/innobase/sync/sync0sync.c (renamed from innobase/sync/sync0sync.c)0
-rw-r--r--storage/innobase/thr/Makefile.am (renamed from innobase/thr/Makefile.am)0
-rw-r--r--storage/innobase/thr/makefilewin (renamed from innobase/thr/makefilewin)0
-rw-r--r--storage/innobase/thr/thr0loc.c (renamed from innobase/thr/thr0loc.c)0
-rw-r--r--storage/innobase/trx/Makefile.am (renamed from innobase/trx/Makefile.am)0
-rw-r--r--storage/innobase/trx/makefilewin (renamed from innobase/trx/makefilewin)0
-rw-r--r--storage/innobase/trx/trx0purge.c (renamed from innobase/trx/trx0purge.c)0
-rw-r--r--storage/innobase/trx/trx0rec.c (renamed from innobase/trx/trx0rec.c)0
-rw-r--r--storage/innobase/trx/trx0roll.c (renamed from innobase/trx/trx0roll.c)0
-rw-r--r--storage/innobase/trx/trx0rseg.c (renamed from innobase/trx/trx0rseg.c)0
-rw-r--r--storage/innobase/trx/trx0sys.c (renamed from innobase/trx/trx0sys.c)0
-rw-r--r--storage/innobase/trx/trx0trx.c (renamed from innobase/trx/trx0trx.c)0
-rw-r--r--storage/innobase/trx/trx0undo.c (renamed from innobase/trx/trx0undo.c)0
-rw-r--r--storage/innobase/usr/Makefile.am (renamed from innobase/usr/Makefile.am)0
-rw-r--r--storage/innobase/usr/makefilewin (renamed from innobase/usr/makefilewin)0
-rw-r--r--storage/innobase/usr/usr0sess.c (renamed from innobase/usr/usr0sess.c)0
-rw-r--r--storage/innobase/ut/Makefile.am (renamed from innobase/ut/Makefile.am)0
-rw-r--r--storage/innobase/ut/makefilewin (renamed from innobase/ut/makefilewin)0
-rw-r--r--storage/innobase/ut/ut0byte.c (renamed from innobase/ut/ut0byte.c)0
-rw-r--r--storage/innobase/ut/ut0dbg.c (renamed from innobase/ut/ut0dbg.c)0
-rw-r--r--storage/innobase/ut/ut0mem.c (renamed from innobase/ut/ut0mem.c)0
-rw-r--r--storage/innobase/ut/ut0rnd.c (renamed from innobase/ut/ut0rnd.c)0
-rw-r--r--storage/innobase/ut/ut0ut.c (renamed from innobase/ut/ut0ut.c)0
-rw-r--r--storage/myisam/.cvsignore (renamed from myisam/.cvsignore)0
-rw-r--r--storage/myisam/ChangeLog (renamed from myisam/ChangeLog)0
-rw-r--r--storage/myisam/Makefile.am (renamed from myisam/Makefile.am)0
-rw-r--r--storage/myisam/NEWS (renamed from myisam/NEWS)0
-rw-r--r--storage/myisam/TODO (renamed from myisam/TODO)0
-rw-r--r--storage/myisam/ft_boolean_search.c772
-rw-r--r--storage/myisam/ft_eval.c (renamed from myisam/ft_eval.c)0
-rw-r--r--storage/myisam/ft_eval.h (renamed from myisam/ft_eval.h)0
-rw-r--r--storage/myisam/ft_nlq_search.c (renamed from myisam/ft_nlq_search.c)0
-rw-r--r--storage/myisam/ft_parser.c (renamed from myisam/ft_parser.c)0
-rw-r--r--storage/myisam/ft_static.c (renamed from myisam/ft_static.c)0
-rw-r--r--storage/myisam/ft_stem.c (renamed from myisam/ft_stem.c)0
-rw-r--r--storage/myisam/ft_stopwords.c (renamed from myisam/ft_stopwords.c)0
-rw-r--r--storage/myisam/ft_test1.c (renamed from myisam/ft_test1.c)0
-rw-r--r--storage/myisam/ft_test1.h (renamed from myisam/ft_test1.h)0
-rw-r--r--storage/myisam/ft_update.c (renamed from myisam/ft_update.c)0
-rwxr-xr-xstorage/myisam/ftbench/Ecompare.pl (renamed from myisam/ftbench/Ecompare.pl)0
-rwxr-xr-xstorage/myisam/ftbench/Ecreate.pl (renamed from myisam/ftbench/Ecreate.pl)0
-rwxr-xr-xstorage/myisam/ftbench/Ereport.pl (renamed from myisam/ftbench/Ereport.pl)0
-rw-r--r--storage/myisam/ftbench/README (renamed from myisam/ftbench/README)0
-rwxr-xr-xstorage/myisam/ftbench/ft-test-run.sh (renamed from myisam/ftbench/ft-test-run.sh)0
-rw-r--r--storage/myisam/ftdefs.h (renamed from myisam/ftdefs.h)0
-rw-r--r--storage/myisam/fulltext.h (renamed from myisam/fulltext.h)0
-rwxr-xr-xstorage/myisam/make-ccc (renamed from myisam/make-ccc)0
-rw-r--r--storage/myisam/mi_cache.c (renamed from myisam/mi_cache.c)0
-rw-r--r--storage/myisam/mi_changed.c (renamed from myisam/mi_changed.c)0
-rw-r--r--storage/myisam/mi_check.c (renamed from myisam/mi_check.c)0
-rw-r--r--storage/myisam/mi_checksum.c (renamed from myisam/mi_checksum.c)0
-rw-r--r--storage/myisam/mi_close.c (renamed from myisam/mi_close.c)0
-rw-r--r--storage/myisam/mi_create.c (renamed from myisam/mi_create.c)0
-rw-r--r--storage/myisam/mi_dbug.c (renamed from myisam/mi_dbug.c)0
-rw-r--r--storage/myisam/mi_delete.c (renamed from myisam/mi_delete.c)0
-rw-r--r--storage/myisam/mi_delete_all.c (renamed from myisam/mi_delete_all.c)0
-rw-r--r--storage/myisam/mi_delete_table.c (renamed from myisam/mi_delete_table.c)0
-rw-r--r--storage/myisam/mi_dynrec.c (renamed from myisam/mi_dynrec.c)0
-rw-r--r--storage/myisam/mi_extra.c (renamed from myisam/mi_extra.c)0
-rw-r--r--storage/myisam/mi_info.c (renamed from myisam/mi_info.c)0
-rw-r--r--storage/myisam/mi_key.c (renamed from myisam/mi_key.c)0
-rw-r--r--storage/myisam/mi_keycache.c (renamed from myisam/mi_keycache.c)0
-rw-r--r--storage/myisam/mi_locking.c (renamed from myisam/mi_locking.c)0
-rw-r--r--storage/myisam/mi_log.c (renamed from myisam/mi_log.c)0
-rw-r--r--storage/myisam/mi_open.c (renamed from myisam/mi_open.c)0
-rw-r--r--storage/myisam/mi_packrec.c (renamed from myisam/mi_packrec.c)0
-rw-r--r--storage/myisam/mi_page.c (renamed from myisam/mi_page.c)0
-rw-r--r--storage/myisam/mi_panic.c (renamed from myisam/mi_panic.c)0
-rw-r--r--storage/myisam/mi_preload.c (renamed from myisam/mi_preload.c)0
-rw-r--r--storage/myisam/mi_range.c (renamed from myisam/mi_range.c)0
-rw-r--r--storage/myisam/mi_rename.c (renamed from myisam/mi_rename.c)0
-rw-r--r--storage/myisam/mi_rfirst.c (renamed from myisam/mi_rfirst.c)0
-rw-r--r--storage/myisam/mi_rkey.c (renamed from myisam/mi_rkey.c)0
-rw-r--r--storage/myisam/mi_rlast.c (renamed from myisam/mi_rlast.c)0
-rw-r--r--storage/myisam/mi_rnext.c (renamed from myisam/mi_rnext.c)0
-rw-r--r--storage/myisam/mi_rnext_same.c (renamed from myisam/mi_rnext_same.c)0
-rw-r--r--storage/myisam/mi_rprev.c (renamed from myisam/mi_rprev.c)0
-rw-r--r--storage/myisam/mi_rrnd.c (renamed from myisam/mi_rrnd.c)0
-rw-r--r--storage/myisam/mi_rsame.c (renamed from myisam/mi_rsame.c)0
-rw-r--r--storage/myisam/mi_rsamepos.c (renamed from myisam/mi_rsamepos.c)0
-rw-r--r--storage/myisam/mi_scan.c (renamed from myisam/mi_scan.c)0
-rw-r--r--storage/myisam/mi_search.c (renamed from myisam/mi_search.c)0
-rw-r--r--storage/myisam/mi_static.c (renamed from myisam/mi_static.c)0
-rw-r--r--storage/myisam/mi_statrec.c (renamed from myisam/mi_statrec.c)0
-rw-r--r--storage/myisam/mi_test1.c (renamed from myisam/mi_test1.c)0
-rw-r--r--storage/myisam/mi_test2.c (renamed from myisam/mi_test2.c)0
-rw-r--r--storage/myisam/mi_test3.c (renamed from myisam/mi_test3.c)0
-rw-r--r--storage/myisam/mi_test_all.res (renamed from myisam/mi_test_all.res)0
-rwxr-xr-xstorage/myisam/mi_test_all.sh (renamed from myisam/mi_test_all.sh)0
-rw-r--r--storage/myisam/mi_unique.c (renamed from myisam/mi_unique.c)0
-rw-r--r--storage/myisam/mi_update.c (renamed from myisam/mi_update.c)0
-rw-r--r--storage/myisam/mi_write.c (renamed from myisam/mi_write.c)0
-rw-r--r--storage/myisam/myisam_ftdump.c (renamed from myisam/myisam_ftdump.c)0
-rw-r--r--storage/myisam/myisamchk.c (renamed from myisam/myisamchk.c)0
-rw-r--r--storage/myisam/myisamdef.h (renamed from myisam/myisamdef.h)0
-rw-r--r--storage/myisam/myisamlog.c (renamed from myisam/myisamlog.c)0
-rw-r--r--storage/myisam/myisampack.c (renamed from myisam/myisampack.c)0
-rw-r--r--storage/myisam/rt_index.c (renamed from myisam/rt_index.c)0
-rw-r--r--storage/myisam/rt_index.h (renamed from myisam/rt_index.h)0
-rw-r--r--storage/myisam/rt_key.c (renamed from myisam/rt_key.c)0
-rw-r--r--storage/myisam/rt_key.h (renamed from myisam/rt_key.h)0
-rw-r--r--storage/myisam/rt_mbr.c (renamed from myisam/rt_mbr.c)0
-rw-r--r--storage/myisam/rt_mbr.h (renamed from myisam/rt_mbr.h)0
-rw-r--r--storage/myisam/rt_split.c (renamed from myisam/rt_split.c)0
-rw-r--r--storage/myisam/rt_test.c (renamed from myisam/rt_test.c)0
-rw-r--r--storage/myisam/sort.c (renamed from myisam/sort.c)0
-rw-r--r--storage/myisam/sp_defs.h (renamed from myisam/sp_defs.h)0
-rw-r--r--storage/myisam/sp_key.c (renamed from myisam/sp_key.c)0
-rw-r--r--storage/myisam/sp_test.c (renamed from myisam/sp_test.c)0
-rwxr-xr-xstorage/myisam/test_pack (renamed from myisam/test_pack)0
-rw-r--r--storage/myisammrg/.cvsignore (renamed from myisammrg/.cvsignore)0
-rw-r--r--storage/myisammrg/Makefile.am (renamed from myisammrg/Makefile.am)0
-rwxr-xr-xstorage/myisammrg/make-ccc (renamed from myisammrg/make-ccc)0
-rw-r--r--storage/myisammrg/myrg_close.c (renamed from myisammrg/myrg_close.c)0
-rw-r--r--storage/myisammrg/myrg_create.c (renamed from myisammrg/myrg_create.c)0
-rw-r--r--storage/myisammrg/myrg_def.h (renamed from myisammrg/myrg_def.h)0
-rw-r--r--storage/myisammrg/myrg_delete.c (renamed from myisammrg/myrg_delete.c)0
-rw-r--r--storage/myisammrg/myrg_extra.c (renamed from myisammrg/myrg_extra.c)0
-rw-r--r--storage/myisammrg/myrg_info.c (renamed from myisammrg/myrg_info.c)0
-rw-r--r--storage/myisammrg/myrg_locking.c (renamed from myisammrg/myrg_locking.c)0
-rw-r--r--storage/myisammrg/myrg_open.c (renamed from myisammrg/myrg_open.c)0
-rw-r--r--storage/myisammrg/myrg_panic.c (renamed from myisammrg/myrg_panic.c)0
-rw-r--r--storage/myisammrg/myrg_queue.c (renamed from myisammrg/myrg_queue.c)0
-rw-r--r--storage/myisammrg/myrg_range.c (renamed from myisammrg/myrg_range.c)0
-rw-r--r--storage/myisammrg/myrg_rfirst.c (renamed from myisammrg/myrg_rfirst.c)0
-rw-r--r--storage/myisammrg/myrg_rkey.c (renamed from myisammrg/myrg_rkey.c)0
-rw-r--r--storage/myisammrg/myrg_rlast.c (renamed from myisammrg/myrg_rlast.c)0
-rw-r--r--storage/myisammrg/myrg_rnext.c (renamed from myisammrg/myrg_rnext.c)0
-rw-r--r--storage/myisammrg/myrg_rnext_same.c (renamed from myisammrg/myrg_rnext_same.c)0
-rw-r--r--storage/myisammrg/myrg_rprev.c (renamed from myisammrg/myrg_rprev.c)0
-rw-r--r--storage/myisammrg/myrg_rrnd.c (renamed from myisammrg/myrg_rrnd.c)0
-rw-r--r--storage/myisammrg/myrg_rsame.c (renamed from myisammrg/myrg_rsame.c)0
-rw-r--r--storage/myisammrg/myrg_static.c (renamed from myisammrg/myrg_static.c)0
-rw-r--r--storage/myisammrg/myrg_update.c (renamed from myisammrg/myrg_update.c)0
-rw-r--r--storage/myisammrg/myrg_write.c (renamed from myisammrg/myrg_write.c)0
-rw-r--r--storage/ndb/Makefile.am30
-rw-r--r--storage/ndb/bin/.empty (renamed from ndb/bin/.empty)0
-rwxr-xr-xstorage/ndb/bin/check-regression.sh (renamed from ndb/bin/check-regression.sh)0
-rwxr-xr-xstorage/ndb/bin/makeTestPrograms_html.sh (renamed from ndb/bin/makeTestPrograms_html.sh)0
-rw-r--r--storage/ndb/config/common.mk.am12
-rwxr-xr-xstorage/ndb/config/make-win-dsw.sh (renamed from ndb/config/make-win-dsw.sh)0
-rw-r--r--storage/ndb/config/type_kernel.mk.am18
-rw-r--r--storage/ndb/config/type_mgmapiclient.mk.am2
-rw-r--r--storage/ndb/config/type_ndbapi.mk.am12
-rw-r--r--storage/ndb/config/type_ndbapiclient.mk.am2
-rw-r--r--storage/ndb/config/type_ndbapitest.mk.am14
-rw-r--r--storage/ndb/config/type_ndbapitools.mk.am15
-rw-r--r--storage/ndb/config/type_util.mk.am6
-rwxr-xr-xstorage/ndb/config/win-includes (renamed from ndb/config/win-includes)0
-rw-r--r--storage/ndb/config/win-lib.am (renamed from ndb/config/win-lib.am)0
-rwxr-xr-xstorage/ndb/config/win-libraries (renamed from ndb/config/win-libraries)0
-rwxr-xr-xstorage/ndb/config/win-name (renamed from ndb/config/win-name)0
-rw-r--r--storage/ndb/config/win-prg.am (renamed from ndb/config/win-prg.am)0
-rwxr-xr-xstorage/ndb/config/win-sources (renamed from ndb/config/win-sources)0
-rw-r--r--storage/ndb/demos/1-node/1-api-3/Ndb.cfg (renamed from ndb/demos/1-node/1-api-3/Ndb.cfg)0
-rw-r--r--storage/ndb/demos/1-node/1-db-2/Ndb.cfg (renamed from ndb/demos/1-node/1-db-2/Ndb.cfg)0
-rw-r--r--storage/ndb/demos/1-node/1-mgm-1/Ndb.cfg (renamed from ndb/demos/1-node/1-mgm-1/Ndb.cfg)0
-rw-r--r--storage/ndb/demos/1-node/1-mgm-1/template_config.ini (renamed from ndb/demos/1-node/1-mgm-1/template_config.ini)0
-rw-r--r--storage/ndb/demos/2-node/2-api-4/Ndb.cfg (renamed from ndb/demos/2-node/2-api-4/Ndb.cfg)0
-rw-r--r--storage/ndb/demos/2-node/2-api-5/Ndb.cfg (renamed from ndb/demos/2-node/2-api-5/Ndb.cfg)0
-rw-r--r--storage/ndb/demos/2-node/2-api-6/Ndb.cfg (renamed from ndb/demos/2-node/2-api-6/Ndb.cfg)0
-rw-r--r--storage/ndb/demos/2-node/2-api-7/Ndb.cfg (renamed from ndb/demos/2-node/2-api-7/Ndb.cfg)0
-rw-r--r--storage/ndb/demos/2-node/2-db-2/Ndb.cfg (renamed from ndb/demos/2-node/2-db-2/Ndb.cfg)0
-rw-r--r--storage/ndb/demos/2-node/2-db-3/Ndb.cfg (renamed from ndb/demos/2-node/2-db-3/Ndb.cfg)0
-rw-r--r--storage/ndb/demos/2-node/2-mgm-1/Ndb.cfg (renamed from ndb/demos/2-node/2-mgm-1/Ndb.cfg)0
-rw-r--r--storage/ndb/demos/2-node/2-mgm-1/template_config.ini (renamed from ndb/demos/2-node/2-mgm-1/template_config.ini)0
-rw-r--r--storage/ndb/demos/config-templates/config_template-1-REP.ini (renamed from ndb/demos/config-templates/config_template-1-REP.ini)0
-rw-r--r--storage/ndb/demos/config-templates/config_template-4.ini (renamed from ndb/demos/config-templates/config_template-4.ini)0
-rw-r--r--storage/ndb/demos/config-templates/config_template-install.ini (renamed from ndb/demos/config-templates/config_template-install.ini)0
-rw-r--r--storage/ndb/demos/run_demo1-PS-SS_common.sh (renamed from ndb/demos/run_demo1-PS-SS_common.sh)0
-rwxr-xr-xstorage/ndb/demos/run_demo1-PS.sh (renamed from ndb/demos/run_demo1-PS.sh)0
-rwxr-xr-xstorage/ndb/demos/run_demo1-SS.sh (renamed from ndb/demos/run_demo1-SS.sh)0
-rwxr-xr-xstorage/ndb/demos/run_demo1.sh (renamed from ndb/demos/run_demo1.sh)0
-rwxr-xr-xstorage/ndb/demos/run_demo2.sh (renamed from ndb/demos/run_demo2.sh)0
-rw-r--r--storage/ndb/docs/Makefile.am114
-rw-r--r--storage/ndb/docs/README (renamed from ndb/docs/README)0
-rw-r--r--storage/ndb/docs/doxygen/Doxyfile.mgmapi (renamed from ndb/docs/doxygen/Doxyfile.mgmapi)0
-rw-r--r--storage/ndb/docs/doxygen/Doxyfile.ndb (renamed from ndb/docs/doxygen/Doxyfile.ndb)0
-rw-r--r--storage/ndb/docs/doxygen/Doxyfile.ndbapi (renamed from ndb/docs/doxygen/Doxyfile.ndbapi)0
-rw-r--r--storage/ndb/docs/doxygen/Doxyfile.odbc (renamed from ndb/docs/doxygen/Doxyfile.odbc)0
-rw-r--r--storage/ndb/docs/doxygen/Doxyfile.test (renamed from ndb/docs/doxygen/Doxyfile.test)0
-rw-r--r--storage/ndb/docs/doxygen/header.mgmapi.tex (renamed from ndb/docs/doxygen/header.mgmapi.tex)0
-rw-r--r--storage/ndb/docs/doxygen/header.ndbapi.tex (renamed from ndb/docs/doxygen/header.ndbapi.tex)0
-rwxr-xr-xstorage/ndb/docs/doxygen/postdoxy.pl (renamed from ndb/docs/doxygen/postdoxy.pl)0
-rwxr-xr-xstorage/ndb/docs/doxygen/predoxy.pl (renamed from ndb/docs/doxygen/predoxy.pl)0
-rw-r--r--storage/ndb/docs/wl2077.txt (renamed from ndb/docs/wl2077.txt)0
-rwxr-xr-xstorage/ndb/home/bin/Linuxmkisofs (renamed from ndb/home/bin/Linuxmkisofs)bin503146 -> 503146 bytes
-rwxr-xr-xstorage/ndb/home/bin/Solarismkisofs (renamed from ndb/home/bin/Solarismkisofs)bin634084 -> 634084 bytes
-rwxr-xr-xstorage/ndb/home/bin/cvs2cl.pl (renamed from ndb/home/bin/cvs2cl.pl)0
-rwxr-xr-xstorage/ndb/home/bin/fix-cvs-root (renamed from ndb/home/bin/fix-cvs-root)0
-rwxr-xr-xstorage/ndb/home/bin/import-from-bk.sh (renamed from ndb/home/bin/import-from-bk.sh)0
-rwxr-xr-xstorage/ndb/home/bin/ndb_deploy (renamed from ndb/home/bin/ndb_deploy)0
-rwxr-xr-xstorage/ndb/home/bin/ndbdoxy.pl (renamed from ndb/home/bin/ndbdoxy.pl)0
-rwxr-xr-xstorage/ndb/home/bin/ngcalc (renamed from ndb/home/bin/ngcalc)0
-rw-r--r--storage/ndb/home/bin/parseConfigFile.awk (renamed from ndb/home/bin/parseConfigFile.awk)0
-rwxr-xr-xstorage/ndb/home/bin/setup-test.sh (renamed from ndb/home/bin/setup-test.sh)0
-rw-r--r--storage/ndb/home/bin/signallog2html.lib/signallog2list.awk (renamed from ndb/home/bin/signallog2html.lib/signallog2list.awk)0
-rw-r--r--storage/ndb/home/bin/signallog2html.lib/uniq_blocks.awk (renamed from ndb/home/bin/signallog2html.lib/uniq_blocks.awk)0
-rwxr-xr-xstorage/ndb/home/bin/signallog2html.sh (renamed from ndb/home/bin/signallog2html.sh)0
-rwxr-xr-xstorage/ndb/home/bin/stripcr (renamed from ndb/home/bin/stripcr)0
-rw-r--r--storage/ndb/home/lib/funcs.sh (renamed from ndb/home/lib/funcs.sh)0
-rw-r--r--storage/ndb/include/Makefile.am51
-rw-r--r--storage/ndb/include/debugger/DebuggerNames.hpp (renamed from ndb/include/debugger/DebuggerNames.hpp)0
-rw-r--r--storage/ndb/include/debugger/EventLogger.hpp (renamed from ndb/include/debugger/EventLogger.hpp)0
-rw-r--r--storage/ndb/include/debugger/GrepError.hpp (renamed from ndb/include/debugger/GrepError.hpp)0
-rw-r--r--storage/ndb/include/debugger/SignalLoggerManager.hpp (renamed from ndb/include/debugger/SignalLoggerManager.hpp)0
-rw-r--r--storage/ndb/include/editline/editline.h (renamed from ndb/include/editline/editline.h)0
-rw-r--r--storage/ndb/include/kernel/AttributeDescriptor.hpp (renamed from ndb/include/kernel/AttributeDescriptor.hpp)0
-rw-r--r--storage/ndb/include/kernel/AttributeHeader.hpp215
-rw-r--r--storage/ndb/include/kernel/AttributeList.hpp (renamed from ndb/include/kernel/AttributeList.hpp)0
-rw-r--r--storage/ndb/include/kernel/BlockNumbers.h (renamed from ndb/include/kernel/BlockNumbers.h)0
-rw-r--r--storage/ndb/include/kernel/GlobalSignalNumbers.h949
-rw-r--r--storage/ndb/include/kernel/GrepEvent.hpp (renamed from ndb/include/kernel/GrepEvent.hpp)0
-rw-r--r--storage/ndb/include/kernel/Interpreter.hpp (renamed from ndb/include/kernel/Interpreter.hpp)0
-rw-r--r--storage/ndb/include/kernel/LogLevel.hpp (renamed from ndb/include/kernel/LogLevel.hpp)0
-rw-r--r--storage/ndb/include/kernel/NodeBitmask.hpp (renamed from ndb/include/kernel/NodeBitmask.hpp)0
-rw-r--r--storage/ndb/include/kernel/NodeInfo.hpp (renamed from ndb/include/kernel/NodeInfo.hpp)0
-rw-r--r--storage/ndb/include/kernel/NodeState.hpp (renamed from ndb/include/kernel/NodeState.hpp)0
-rw-r--r--storage/ndb/include/kernel/RefConvert.hpp (renamed from ndb/include/kernel/RefConvert.hpp)0
-rw-r--r--storage/ndb/include/kernel/kernel_config_parameters.h (renamed from ndb/include/kernel/kernel_config_parameters.h)0
-rw-r--r--storage/ndb/include/kernel/kernel_types.h (renamed from ndb/include/kernel/kernel_types.h)0
-rw-r--r--storage/ndb/include/kernel/ndb_limits.h133
-rw-r--r--storage/ndb/include/kernel/signaldata/AbortAll.hpp (renamed from ndb/include/kernel/signaldata/AbortAll.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/AccFrag.hpp (renamed from ndb/include/kernel/signaldata/AccFrag.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/AccLock.hpp (renamed from ndb/include/kernel/signaldata/AccLock.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/AccScan.hpp (renamed from ndb/include/kernel/signaldata/AccScan.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/AccSizeAltReq.hpp (renamed from ndb/include/kernel/signaldata/AccSizeAltReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/AlterIndx.hpp (renamed from ndb/include/kernel/signaldata/AlterIndx.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/AlterTab.hpp (renamed from ndb/include/kernel/signaldata/AlterTab.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/AlterTable.hpp (renamed from ndb/include/kernel/signaldata/AlterTable.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/AlterTrig.hpp (renamed from ndb/include/kernel/signaldata/AlterTrig.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/ApiRegSignalData.hpp (renamed from ndb/include/kernel/signaldata/ApiRegSignalData.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/ApiVersion.hpp (renamed from ndb/include/kernel/signaldata/ApiVersion.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp (renamed from ndb/include/kernel/signaldata/ArbitSignalData.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/AttrInfo.hpp (renamed from ndb/include/kernel/signaldata/AttrInfo.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/BackupContinueB.hpp (renamed from ndb/include/kernel/signaldata/BackupContinueB.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/BackupImpl.hpp (renamed from ndb/include/kernel/signaldata/BackupImpl.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/BackupSignalData.hpp (renamed from ndb/include/kernel/signaldata/BackupSignalData.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/BlockCommitOrd.hpp (renamed from ndb/include/kernel/signaldata/BlockCommitOrd.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/BuildIndx.hpp (renamed from ndb/include/kernel/signaldata/BuildIndx.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CheckNodeGroups.hpp (renamed from ndb/include/kernel/signaldata/CheckNodeGroups.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CloseComReqConf.hpp (renamed from ndb/include/kernel/signaldata/CloseComReqConf.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CmInit.hpp (renamed from ndb/include/kernel/signaldata/CmInit.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CmRegSignalData.hpp (renamed from ndb/include/kernel/signaldata/CmRegSignalData.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CmvmiCfgConf.hpp (renamed from ndb/include/kernel/signaldata/CmvmiCfgConf.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CntrMasterConf.hpp (renamed from ndb/include/kernel/signaldata/CntrMasterConf.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CntrMasterReq.hpp (renamed from ndb/include/kernel/signaldata/CntrMasterReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CntrStart.hpp (renamed from ndb/include/kernel/signaldata/CntrStart.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/ConfigParamId.hpp (renamed from ndb/include/kernel/signaldata/ConfigParamId.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/ContinueFragmented.hpp (renamed from ndb/include/kernel/signaldata/ContinueFragmented.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CopyActive.hpp (renamed from ndb/include/kernel/signaldata/CopyActive.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CopyFrag.hpp (renamed from ndb/include/kernel/signaldata/CopyFrag.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CopyGCIReq.hpp (renamed from ndb/include/kernel/signaldata/CopyGCIReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CreateEvnt.hpp (renamed from ndb/include/kernel/signaldata/CreateEvnt.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CreateFrag.hpp (renamed from ndb/include/kernel/signaldata/CreateFrag.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp101
-rw-r--r--storage/ndb/include/kernel/signaldata/CreateIndx.hpp (renamed from ndb/include/kernel/signaldata/CreateIndx.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CreateTab.hpp (renamed from ndb/include/kernel/signaldata/CreateTab.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CreateTable.hpp (renamed from ndb/include/kernel/signaldata/CreateTable.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/CreateTrig.hpp (renamed from ndb/include/kernel/signaldata/CreateTrig.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DiAddTab.hpp (renamed from ndb/include/kernel/signaldata/DiAddTab.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DiGetNodes.hpp (renamed from ndb/include/kernel/signaldata/DiGetNodes.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DictSchemaInfo.hpp (renamed from ndb/include/kernel/signaldata/DictSchemaInfo.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DictSizeAltReq.hpp (renamed from ndb/include/kernel/signaldata/DictSizeAltReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DictStart.hpp (renamed from ndb/include/kernel/signaldata/DictStart.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DictTabInfo.hpp520
-rw-r--r--storage/ndb/include/kernel/signaldata/DihAddFrag.hpp (renamed from ndb/include/kernel/signaldata/DihAddFrag.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DihContinueB.hpp (renamed from ndb/include/kernel/signaldata/DihContinueB.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DihSizeAltReq.hpp (renamed from ndb/include/kernel/signaldata/DihSizeAltReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DihStartTab.hpp (renamed from ndb/include/kernel/signaldata/DihStartTab.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DihSwitchReplica.hpp (renamed from ndb/include/kernel/signaldata/DihSwitchReplica.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DisconnectRep.hpp (renamed from ndb/include/kernel/signaldata/DisconnectRep.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DropIndx.hpp (renamed from ndb/include/kernel/signaldata/DropIndx.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DropTab.hpp (renamed from ndb/include/kernel/signaldata/DropTab.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DropTabFile.hpp (renamed from ndb/include/kernel/signaldata/DropTabFile.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DropTable.hpp (renamed from ndb/include/kernel/signaldata/DropTable.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DropTrig.hpp (renamed from ndb/include/kernel/signaldata/DropTrig.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp (renamed from ndb/include/kernel/signaldata/DumpStateOrd.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/EmptyLcp.hpp (renamed from ndb/include/kernel/signaldata/EmptyLcp.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/EndTo.hpp (renamed from ndb/include/kernel/signaldata/EndTo.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/EventReport.hpp (renamed from ndb/include/kernel/signaldata/EventReport.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/EventSubscribeReq.hpp (renamed from ndb/include/kernel/signaldata/EventSubscribeReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/ExecFragReq.hpp (renamed from ndb/include/kernel/signaldata/ExecFragReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/FailRep.hpp (renamed from ndb/include/kernel/signaldata/FailRep.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp201
-rw-r--r--storage/ndb/include/kernel/signaldata/FsAppendReq.hpp (renamed from ndb/include/kernel/signaldata/FsAppendReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/FsCloseReq.hpp (renamed from ndb/include/kernel/signaldata/FsCloseReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/FsConf.hpp (renamed from ndb/include/kernel/signaldata/FsConf.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/FsOpenReq.hpp (renamed from ndb/include/kernel/signaldata/FsOpenReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/FsReadWriteReq.hpp (renamed from ndb/include/kernel/signaldata/FsReadWriteReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/FsRef.hpp (renamed from ndb/include/kernel/signaldata/FsRef.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/FsRemoveReq.hpp (renamed from ndb/include/kernel/signaldata/FsRemoveReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/GCPSave.hpp (renamed from ndb/include/kernel/signaldata/GCPSave.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/GetTabInfo.hpp (renamed from ndb/include/kernel/signaldata/GetTabInfo.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/GetTableId.hpp (renamed from ndb/include/kernel/signaldata/GetTableId.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/GrepImpl.hpp (renamed from ndb/include/kernel/signaldata/GrepImpl.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/HotSpareRep.hpp (renamed from ndb/include/kernel/signaldata/HotSpareRep.hpp)0
-rwxr-xr-xstorage/ndb/include/kernel/signaldata/IndxAttrInfo.hpp (renamed from ndb/include/kernel/signaldata/IndxAttrInfo.hpp)0
-rwxr-xr-xstorage/ndb/include/kernel/signaldata/IndxKeyInfo.hpp (renamed from ndb/include/kernel/signaldata/IndxKeyInfo.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/InvalidateNodeLCPConf.hpp (renamed from ndb/include/kernel/signaldata/InvalidateNodeLCPConf.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/InvalidateNodeLCPReq.hpp (renamed from ndb/include/kernel/signaldata/InvalidateNodeLCPReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/KeyInfo.hpp (renamed from ndb/include/kernel/signaldata/KeyInfo.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/LCP.hpp (renamed from ndb/include/kernel/signaldata/LCP.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/ListTables.hpp (renamed from ndb/include/kernel/signaldata/ListTables.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/LqhFrag.hpp (renamed from ndb/include/kernel/signaldata/LqhFrag.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/LqhKey.hpp (renamed from ndb/include/kernel/signaldata/LqhKey.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/LqhSizeAltReq.hpp (renamed from ndb/include/kernel/signaldata/LqhSizeAltReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/LqhTransConf.hpp (renamed from ndb/include/kernel/signaldata/LqhTransConf.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/ManagementServer.hpp (renamed from ndb/include/kernel/signaldata/ManagementServer.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/MasterGCP.hpp (renamed from ndb/include/kernel/signaldata/MasterGCP.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/MasterLCP.hpp (renamed from ndb/include/kernel/signaldata/MasterLCP.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/NFCompleteRep.hpp (renamed from ndb/include/kernel/signaldata/NFCompleteRep.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/NdbSttor.hpp (renamed from ndb/include/kernel/signaldata/NdbSttor.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/NdbfsContinueB.hpp (renamed from ndb/include/kernel/signaldata/NdbfsContinueB.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/NextScan.hpp (renamed from ndb/include/kernel/signaldata/NextScan.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/NodeFailRep.hpp (renamed from ndb/include/kernel/signaldata/NodeFailRep.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/NodeStateSignalData.hpp (renamed from ndb/include/kernel/signaldata/NodeStateSignalData.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/PackedSignal.hpp (renamed from ndb/include/kernel/signaldata/PackedSignal.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/PrepDropTab.hpp (renamed from ndb/include/kernel/signaldata/PrepDropTab.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/PrepFailReqRef.hpp (renamed from ndb/include/kernel/signaldata/PrepFailReqRef.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/ReadConfig.hpp (renamed from ndb/include/kernel/signaldata/ReadConfig.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/ReadNodesConf.hpp (renamed from ndb/include/kernel/signaldata/ReadNodesConf.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/RelTabMem.hpp (renamed from ndb/include/kernel/signaldata/RelTabMem.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/RepImpl.hpp (renamed from ndb/include/kernel/signaldata/RepImpl.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/ResumeReq.hpp (renamed from ndb/include/kernel/signaldata/ResumeReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/ScanFrag.hpp (renamed from ndb/include/kernel/signaldata/ScanFrag.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/ScanTab.hpp (renamed from ndb/include/kernel/signaldata/ScanTab.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp (renamed from ndb/include/kernel/signaldata/SetLogLevelOrd.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/SetVarReq.hpp (renamed from ndb/include/kernel/signaldata/SetVarReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/SignalData.hpp (renamed from ndb/include/kernel/signaldata/SignalData.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/SignalDataPrint.hpp (renamed from ndb/include/kernel/signaldata/SignalDataPrint.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/SignalDroppedRep.hpp (renamed from ndb/include/kernel/signaldata/SignalDroppedRep.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/SrFragidConf.hpp (renamed from ndb/include/kernel/signaldata/SrFragidConf.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/StartFragReq.hpp (renamed from ndb/include/kernel/signaldata/StartFragReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/StartInfo.hpp (renamed from ndb/include/kernel/signaldata/StartInfo.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/StartMe.hpp (renamed from ndb/include/kernel/signaldata/StartMe.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/StartOrd.hpp (renamed from ndb/include/kernel/signaldata/StartOrd.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/StartPerm.hpp (renamed from ndb/include/kernel/signaldata/StartPerm.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/StartRec.hpp (renamed from ndb/include/kernel/signaldata/StartRec.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/StartTo.hpp (renamed from ndb/include/kernel/signaldata/StartTo.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/StopMe.hpp (renamed from ndb/include/kernel/signaldata/StopMe.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/StopPerm.hpp (renamed from ndb/include/kernel/signaldata/StopPerm.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/StopReq.hpp (renamed from ndb/include/kernel/signaldata/StopReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/SumaImpl.hpp (renamed from ndb/include/kernel/signaldata/SumaImpl.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/SystemError.hpp (renamed from ndb/include/kernel/signaldata/SystemError.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TamperOrd.hpp (renamed from ndb/include/kernel/signaldata/TamperOrd.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TcCommit.hpp (renamed from ndb/include/kernel/signaldata/TcCommit.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TcContinueB.hpp (renamed from ndb/include/kernel/signaldata/TcContinueB.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TcHbRep.hpp (renamed from ndb/include/kernel/signaldata/TcHbRep.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TcIndx.hpp (renamed from ndb/include/kernel/signaldata/TcIndx.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TcKeyConf.hpp (renamed from ndb/include/kernel/signaldata/TcKeyConf.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TcKeyFailConf.hpp (renamed from ndb/include/kernel/signaldata/TcKeyFailConf.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TcKeyRef.hpp (renamed from ndb/include/kernel/signaldata/TcKeyRef.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TcKeyReq.hpp (renamed from ndb/include/kernel/signaldata/TcKeyReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp (renamed from ndb/include/kernel/signaldata/TcRollbackRep.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TcSizeAltReq.hpp (renamed from ndb/include/kernel/signaldata/TcSizeAltReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TestOrd.hpp (renamed from ndb/include/kernel/signaldata/TestOrd.hpp)0
-rwxr-xr-xstorage/ndb/include/kernel/signaldata/TransIdAI.hpp (renamed from ndb/include/kernel/signaldata/TransIdAI.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TrigAttrInfo.hpp (renamed from ndb/include/kernel/signaldata/TrigAttrInfo.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TupCommit.hpp (renamed from ndb/include/kernel/signaldata/TupCommit.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TupFrag.hpp (renamed from ndb/include/kernel/signaldata/TupFrag.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TupKey.hpp (renamed from ndb/include/kernel/signaldata/TupKey.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TupSizeAltReq.hpp (renamed from ndb/include/kernel/signaldata/TupSizeAltReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TuxBound.hpp (renamed from ndb/include/kernel/signaldata/TuxBound.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TuxContinueB.hpp (renamed from ndb/include/kernel/signaldata/TuxContinueB.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TuxMaint.hpp (renamed from ndb/include/kernel/signaldata/TuxMaint.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/TuxSizeAltReq.hpp (renamed from ndb/include/kernel/signaldata/TuxSizeAltReq.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/UpdateTo.hpp (renamed from ndb/include/kernel/signaldata/UpdateTo.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/UpgradeStartup.hpp (renamed from ndb/include/kernel/signaldata/UpgradeStartup.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/UtilDelete.hpp (renamed from ndb/include/kernel/signaldata/UtilDelete.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/UtilExecute.hpp (renamed from ndb/include/kernel/signaldata/UtilExecute.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/UtilLock.hpp (renamed from ndb/include/kernel/signaldata/UtilLock.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/UtilPrepare.hpp (renamed from ndb/include/kernel/signaldata/UtilPrepare.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/UtilRelease.hpp (renamed from ndb/include/kernel/signaldata/UtilRelease.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/UtilSequence.hpp (renamed from ndb/include/kernel/signaldata/UtilSequence.hpp)0
-rw-r--r--storage/ndb/include/kernel/signaldata/WaitGCP.hpp (renamed from ndb/include/kernel/signaldata/WaitGCP.hpp)0
-rw-r--r--storage/ndb/include/kernel/trigger_definitions.h (renamed from ndb/include/kernel/trigger_definitions.h)0
-rw-r--r--storage/ndb/include/logger/ConsoleLogHandler.hpp (renamed from ndb/include/logger/ConsoleLogHandler.hpp)0
-rw-r--r--storage/ndb/include/logger/FileLogHandler.hpp (renamed from ndb/include/logger/FileLogHandler.hpp)0
-rw-r--r--storage/ndb/include/logger/LogHandler.hpp (renamed from ndb/include/logger/LogHandler.hpp)0
-rw-r--r--storage/ndb/include/logger/Logger.hpp (renamed from ndb/include/logger/Logger.hpp)0
-rw-r--r--storage/ndb/include/logger/SysLogHandler.hpp (renamed from ndb/include/logger/SysLogHandler.hpp)0
-rw-r--r--storage/ndb/include/mgmapi/mgmapi.h (renamed from ndb/include/mgmapi/mgmapi.h)0
-rw-r--r--storage/ndb/include/mgmapi/mgmapi_config_parameters.h (renamed from ndb/include/mgmapi/mgmapi_config_parameters.h)0
-rw-r--r--storage/ndb/include/mgmapi/mgmapi_config_parameters_debug.h (renamed from ndb/include/mgmapi/mgmapi_config_parameters_debug.h)0
-rw-r--r--storage/ndb/include/mgmapi/mgmapi_debug.h (renamed from ndb/include/mgmapi/mgmapi_debug.h)0
-rw-r--r--storage/ndb/include/mgmapi/ndb_logevent.h (renamed from ndb/include/mgmapi/ndb_logevent.h)0
-rw-r--r--storage/ndb/include/mgmcommon/ConfigRetriever.hpp (renamed from ndb/include/mgmcommon/ConfigRetriever.hpp)0
-rw-r--r--storage/ndb/include/mgmcommon/IPCConfig.hpp (renamed from ndb/include/mgmcommon/IPCConfig.hpp)0
-rw-r--r--storage/ndb/include/mgmcommon/MgmtErrorReporter.hpp (renamed from ndb/include/mgmcommon/MgmtErrorReporter.hpp)0
-rw-r--r--storage/ndb/include/ndb_constants.h (renamed from ndb/include/ndb_constants.h)0
-rw-r--r--storage/ndb/include/ndb_global.h.in (renamed from ndb/include/ndb_global.h.in)0
-rw-r--r--storage/ndb/include/ndb_init.h (renamed from ndb/include/ndb_init.h)0
-rw-r--r--storage/ndb/include/ndb_net.h (renamed from ndb/include/ndb_net.h)0
-rw-r--r--storage/ndb/include/ndb_types.h.in (renamed from ndb/include/ndb_types.h.in)0
-rw-r--r--storage/ndb/include/ndb_version.h.in (renamed from ndb/include/ndb_version.h.in)0
-rw-r--r--storage/ndb/include/ndbapi/Ndb.hpp (renamed from ndb/include/ndbapi/Ndb.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/NdbApi.hpp (renamed from ndb/include/ndbapi/NdbApi.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/NdbBlob.hpp (renamed from ndb/include/ndbapi/NdbBlob.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/NdbDictionary.hpp1348
-rw-r--r--storage/ndb/include/ndbapi/NdbError.hpp (renamed from ndb/include/ndbapi/NdbError.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/NdbEventOperation.hpp (renamed from ndb/include/ndbapi/NdbEventOperation.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/NdbIndexOperation.hpp (renamed from ndb/include/ndbapi/NdbIndexOperation.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp (renamed from ndb/include/ndbapi/NdbIndexScanOperation.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/NdbOperation.hpp (renamed from ndb/include/ndbapi/NdbOperation.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/NdbPool.hpp (renamed from ndb/include/ndbapi/NdbPool.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/NdbRecAttr.hpp (renamed from ndb/include/ndbapi/NdbRecAttr.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/NdbReceiver.hpp (renamed from ndb/include/ndbapi/NdbReceiver.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/NdbScanFilter.hpp (renamed from ndb/include/ndbapi/NdbScanFilter.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/NdbScanOperation.hpp (renamed from ndb/include/ndbapi/NdbScanOperation.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/NdbTransaction.hpp (renamed from ndb/include/ndbapi/NdbTransaction.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/ndb_cluster_connection.hpp (renamed from ndb/include/ndbapi/ndb_cluster_connection.hpp)0
-rw-r--r--storage/ndb/include/ndbapi/ndb_opt_defaults.h (renamed from ndb/include/ndbapi/ndb_opt_defaults.h)0
-rw-r--r--storage/ndb/include/ndbapi/ndbapi_limits.h (renamed from ndb/include/ndbapi/ndbapi_limits.h)0
-rw-r--r--storage/ndb/include/ndbapi/ndberror.h (renamed from ndb/include/ndbapi/ndberror.h)0
-rw-r--r--storage/ndb/include/newtonapi/dba.h (renamed from ndb/include/newtonapi/dba.h)0
-rw-r--r--storage/ndb/include/newtonapi/defs/pcn_types.h (renamed from ndb/include/newtonapi/defs/pcn_types.h)0
-rw-r--r--storage/ndb/include/portlib/NdbCondition.h (renamed from ndb/include/portlib/NdbCondition.h)0
-rw-r--r--storage/ndb/include/portlib/NdbConfig.h (renamed from ndb/include/portlib/NdbConfig.h)0
-rw-r--r--storage/ndb/include/portlib/NdbDaemon.h (renamed from ndb/include/portlib/NdbDaemon.h)0
-rw-r--r--storage/ndb/include/portlib/NdbEnv.h (renamed from ndb/include/portlib/NdbEnv.h)0
-rw-r--r--storage/ndb/include/portlib/NdbHost.h (renamed from ndb/include/portlib/NdbHost.h)0
-rw-r--r--storage/ndb/include/portlib/NdbMain.h (renamed from ndb/include/portlib/NdbMain.h)0
-rw-r--r--storage/ndb/include/portlib/NdbMem.h (renamed from ndb/include/portlib/NdbMem.h)0
-rw-r--r--storage/ndb/include/portlib/NdbMutex.h (renamed from ndb/include/portlib/NdbMutex.h)0
-rw-r--r--storage/ndb/include/portlib/NdbSleep.h (renamed from ndb/include/portlib/NdbSleep.h)0
-rw-r--r--storage/ndb/include/portlib/NdbTCP.h (renamed from ndb/include/portlib/NdbTCP.h)0
-rw-r--r--storage/ndb/include/portlib/NdbThread.h (renamed from ndb/include/portlib/NdbThread.h)0
-rw-r--r--storage/ndb/include/portlib/NdbTick.h (renamed from ndb/include/portlib/NdbTick.h)0
-rw-r--r--storage/ndb/include/portlib/PortDefs.h (renamed from ndb/include/portlib/PortDefs.h)0
-rw-r--r--storage/ndb/include/portlib/prefetch.h (renamed from ndb/include/portlib/prefetch.h)0
-rw-r--r--storage/ndb/include/transporter/TransporterCallback.hpp (renamed from ndb/include/transporter/TransporterCallback.hpp)0
-rw-r--r--storage/ndb/include/transporter/TransporterDefinitions.hpp (renamed from ndb/include/transporter/TransporterDefinitions.hpp)0
-rw-r--r--storage/ndb/include/transporter/TransporterRegistry.hpp (renamed from ndb/include/transporter/TransporterRegistry.hpp)0
-rw-r--r--storage/ndb/include/util/Base64.hpp (renamed from ndb/include/util/Base64.hpp)0
-rw-r--r--storage/ndb/include/util/BaseString.hpp (renamed from ndb/include/util/BaseString.hpp)0
-rw-r--r--storage/ndb/include/util/Bitmask.hpp (renamed from ndb/include/util/Bitmask.hpp)0
-rw-r--r--storage/ndb/include/util/ConfigValues.hpp (renamed from ndb/include/util/ConfigValues.hpp)0
-rw-r--r--storage/ndb/include/util/File.hpp (renamed from ndb/include/util/File.hpp)0
-rw-r--r--storage/ndb/include/util/InputStream.hpp (renamed from ndb/include/util/InputStream.hpp)0
-rw-r--r--storage/ndb/include/util/NdbAutoPtr.hpp (renamed from ndb/include/util/NdbAutoPtr.hpp)0
-rw-r--r--storage/ndb/include/util/NdbOut.hpp (renamed from ndb/include/util/NdbOut.hpp)0
-rw-r--r--storage/ndb/include/util/NdbSqlUtil.hpp (renamed from ndb/include/util/NdbSqlUtil.hpp)0
-rw-r--r--storage/ndb/include/util/OutputStream.hpp (renamed from ndb/include/util/OutputStream.hpp)0
-rw-r--r--storage/ndb/include/util/Parser.hpp (renamed from ndb/include/util/Parser.hpp)0
-rw-r--r--storage/ndb/include/util/Properties.hpp (renamed from ndb/include/util/Properties.hpp)0
-rw-r--r--storage/ndb/include/util/SimpleProperties.hpp (renamed from ndb/include/util/SimpleProperties.hpp)0
-rw-r--r--storage/ndb/include/util/SocketAuthenticator.hpp (renamed from ndb/include/util/SocketAuthenticator.hpp)0
-rw-r--r--storage/ndb/include/util/SocketClient.hpp (renamed from ndb/include/util/SocketClient.hpp)0
-rw-r--r--storage/ndb/include/util/SocketServer.hpp (renamed from ndb/include/util/SocketServer.hpp)0
-rw-r--r--storage/ndb/include/util/UtilBuffer.hpp (renamed from ndb/include/util/UtilBuffer.hpp)0
-rw-r--r--storage/ndb/include/util/Vector.hpp (renamed from ndb/include/util/Vector.hpp)0
-rw-r--r--storage/ndb/include/util/basestring_vsnprintf.h (renamed from ndb/include/util/basestring_vsnprintf.h)0
-rw-r--r--storage/ndb/include/util/md5_hash.hpp (renamed from ndb/include/util/md5_hash.hpp)0
-rw-r--r--storage/ndb/include/util/ndb_opts.h (renamed from ndb/include/util/ndb_opts.h)0
-rw-r--r--storage/ndb/include/util/random.h (renamed from ndb/include/util/random.h)0
-rw-r--r--storage/ndb/include/util/socket_io.h (renamed from ndb/include/util/socket_io.h)0
-rw-r--r--storage/ndb/include/util/uucode.h (renamed from ndb/include/util/uucode.h)0
-rw-r--r--storage/ndb/include/util/version.h (renamed from ndb/include/util/version.h)0
-rw-r--r--storage/ndb/lib/.empty (renamed from ndb/lib/.empty)0
-rw-r--r--storage/ndb/ndbapi-examples/Makefile (renamed from ndb/ndbapi-examples/Makefile)0
-rw-r--r--storage/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile23
-rw-r--r--storage/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp (renamed from ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_async_example/Makefile23
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp (renamed from ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_async_example/readme.txt (renamed from ndb/ndbapi-examples/ndbapi_async_example/readme.txt)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_async_example1/Makefile22
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp (renamed from ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_event_example/Makefile23
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp (renamed from ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_retries_example/Makefile22
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp (renamed from ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_scan_example/Makefile23
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp (renamed from ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt (renamed from ndb/ndbapi-examples/ndbapi_scan_example/readme.txt)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple_example/Makefile23
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp (renamed from ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile23
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp (renamed from ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp)0
-rw-r--r--storage/ndb/src/Makefile.am33
-rw-r--r--storage/ndb/src/common/Makefile.am (renamed from ndb/src/common/Makefile.am)0
-rw-r--r--storage/ndb/src/common/debugger/BlockNames.cpp (renamed from ndb/src/common/debugger/BlockNames.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/DebuggerNames.cpp (renamed from ndb/src/common/debugger/DebuggerNames.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/EventLogger.cpp (renamed from ndb/src/common/debugger/EventLogger.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/GrepError.cpp (renamed from ndb/src/common/debugger/GrepError.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/Makefile.am25
-rw-r--r--storage/ndb/src/common/debugger/SignalLoggerManager.cpp (renamed from ndb/src/common/debugger/SignalLoggerManager.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/AccLock.cpp (renamed from ndb/src/common/debugger/signaldata/AccLock.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/AlterIndx.cpp (renamed from ndb/src/common/debugger/signaldata/AlterIndx.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/AlterTab.cpp (renamed from ndb/src/common/debugger/signaldata/AlterTab.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/AlterTable.cpp (renamed from ndb/src/common/debugger/signaldata/AlterTable.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/AlterTrig.cpp (renamed from ndb/src/common/debugger/signaldata/AlterTrig.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp (renamed from ndb/src/common/debugger/signaldata/BackupImpl.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp (renamed from ndb/src/common/debugger/signaldata/BackupSignalData.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/CloseComReqConf.cpp (renamed from ndb/src/common/debugger/signaldata/CloseComReqConf.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/CntrStart.cpp (renamed from ndb/src/common/debugger/signaldata/CntrStart.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/ContinueB.cpp (renamed from ndb/src/common/debugger/signaldata/ContinueB.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/CopyGCI.cpp (renamed from ndb/src/common/debugger/signaldata/CopyGCI.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/CreateEvnt.cpp (renamed from ndb/src/common/debugger/signaldata/CreateEvnt.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp55
-rw-r--r--storage/ndb/src/common/debugger/signaldata/CreateIndx.cpp (renamed from ndb/src/common/debugger/signaldata/CreateIndx.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/CreateTrig.cpp (renamed from ndb/src/common/debugger/signaldata/CreateTrig.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp (renamed from ndb/src/common/debugger/signaldata/DictTabInfo.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/DihContinueB.cpp (renamed from ndb/src/common/debugger/signaldata/DihContinueB.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/DihSwitchReplicaReq.cpp (renamed from ndb/src/common/debugger/signaldata/DihSwitchReplicaReq.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/DisconnectRep.cpp (renamed from ndb/src/common/debugger/signaldata/DisconnectRep.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/DropIndx.cpp (renamed from ndb/src/common/debugger/signaldata/DropIndx.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/DropTab.cpp (renamed from ndb/src/common/debugger/signaldata/DropTab.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/DropTrig.cpp (renamed from ndb/src/common/debugger/signaldata/DropTrig.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/FailRep.cpp (renamed from ndb/src/common/debugger/signaldata/FailRep.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/FireTrigOrd.cpp (renamed from ndb/src/common/debugger/signaldata/FireTrigOrd.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/FsAppendReq.cpp (renamed from ndb/src/common/debugger/signaldata/FsAppendReq.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/FsCloseReq.cpp (renamed from ndb/src/common/debugger/signaldata/FsCloseReq.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/FsConf.cpp (renamed from ndb/src/common/debugger/signaldata/FsConf.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/FsOpenReq.cpp (renamed from ndb/src/common/debugger/signaldata/FsOpenReq.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/FsReadWriteReq.cpp (renamed from ndb/src/common/debugger/signaldata/FsReadWriteReq.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/FsRef.cpp (renamed from ndb/src/common/debugger/signaldata/FsRef.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/GCPSave.cpp (renamed from ndb/src/common/debugger/signaldata/GCPSave.cpp)0
-rwxr-xr-xstorage/ndb/src/common/debugger/signaldata/IndxAttrInfo.cpp (renamed from ndb/src/common/debugger/signaldata/IndxAttrInfo.cpp)0
-rwxr-xr-xstorage/ndb/src/common/debugger/signaldata/IndxKeyInfo.cpp (renamed from ndb/src/common/debugger/signaldata/IndxKeyInfo.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/LCP.cpp (renamed from ndb/src/common/debugger/signaldata/LCP.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp (renamed from ndb/src/common/debugger/signaldata/LqhFrag.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/LqhKey.cpp (renamed from ndb/src/common/debugger/signaldata/LqhKey.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/LqhTrans.cpp (renamed from ndb/src/common/debugger/signaldata/LqhTrans.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/Makefile.am47
-rw-r--r--storage/ndb/src/common/debugger/signaldata/MasterLCP.cpp (renamed from ndb/src/common/debugger/signaldata/MasterLCP.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/NFCompleteRep.cpp (renamed from ndb/src/common/debugger/signaldata/NFCompleteRep.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/NdbSttor.cpp (renamed from ndb/src/common/debugger/signaldata/NdbSttor.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp (renamed from ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/PackedSignal.cpp (renamed from ndb/src/common/debugger/signaldata/PackedSignal.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/PrepDropTab.cpp (renamed from ndb/src/common/debugger/signaldata/PrepDropTab.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/PrepFailReqRef.cpp (renamed from ndb/src/common/debugger/signaldata/PrepFailReqRef.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/ReadNodesConf.cpp (renamed from ndb/src/common/debugger/signaldata/ReadNodesConf.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/ScanFrag.cpp (renamed from ndb/src/common/debugger/signaldata/ScanFrag.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/ScanTab.cpp (renamed from ndb/src/common/debugger/signaldata/ScanTab.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp (renamed from ndb/src/common/debugger/signaldata/SignalDataPrint.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/SignalDroppedRep.cpp (renamed from ndb/src/common/debugger/signaldata/SignalDroppedRep.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/SignalNames.cpp (renamed from ndb/src/common/debugger/signaldata/SignalNames.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/StartRec.cpp (renamed from ndb/src/common/debugger/signaldata/StartRec.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp (renamed from ndb/src/common/debugger/signaldata/SumaImpl.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/SystemError.cpp (renamed from ndb/src/common/debugger/signaldata/SystemError.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/TcIndx.cpp (renamed from ndb/src/common/debugger/signaldata/TcIndx.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp (renamed from ndb/src/common/debugger/signaldata/TcKeyConf.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/TcKeyRef.cpp (renamed from ndb/src/common/debugger/signaldata/TcKeyRef.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp (renamed from ndb/src/common/debugger/signaldata/TcKeyReq.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/TcRollbackRep.cpp (renamed from ndb/src/common/debugger/signaldata/TcRollbackRep.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/TrigAttrInfo.cpp (renamed from ndb/src/common/debugger/signaldata/TrigAttrInfo.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/TupCommit.cpp (renamed from ndb/src/common/debugger/signaldata/TupCommit.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/TupKey.cpp (renamed from ndb/src/common/debugger/signaldata/TupKey.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/TuxMaint.cpp (renamed from ndb/src/common/debugger/signaldata/TuxMaint.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/UtilDelete.cpp (renamed from ndb/src/common/debugger/signaldata/UtilDelete.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/UtilExecute.cpp (renamed from ndb/src/common/debugger/signaldata/UtilExecute.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/UtilLock.cpp (renamed from ndb/src/common/debugger/signaldata/UtilLock.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/UtilPrepare.cpp (renamed from ndb/src/common/debugger/signaldata/UtilPrepare.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/UtilSequence.cpp (renamed from ndb/src/common/debugger/signaldata/UtilSequence.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/print.awk (renamed from ndb/src/common/debugger/signaldata/print.awk)0
-rw-r--r--storage/ndb/src/common/logger/ConsoleLogHandler.cpp (renamed from ndb/src/common/logger/ConsoleLogHandler.cpp)0
-rw-r--r--storage/ndb/src/common/logger/FileLogHandler.cpp (renamed from ndb/src/common/logger/FileLogHandler.cpp)0
-rw-r--r--storage/ndb/src/common/logger/LogHandler.cpp (renamed from ndb/src/common/logger/LogHandler.cpp)0
-rw-r--r--storage/ndb/src/common/logger/LogHandlerList.cpp (renamed from ndb/src/common/logger/LogHandlerList.cpp)0
-rw-r--r--storage/ndb/src/common/logger/LogHandlerList.hpp (renamed from ndb/src/common/logger/LogHandlerList.hpp)0
-rw-r--r--storage/ndb/src/common/logger/Logger.cpp (renamed from ndb/src/common/logger/Logger.cpp)0
-rw-r--r--storage/ndb/src/common/logger/Makefile.am25
-rw-r--r--storage/ndb/src/common/logger/SysLogHandler.cpp (renamed from ndb/src/common/logger/SysLogHandler.cpp)0
-rw-r--r--storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp (renamed from ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp)0
-rw-r--r--storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.hpp (renamed from ndb/src/common/logger/listtest/LogHandlerListUnitTest.hpp)0
-rw-r--r--storage/ndb/src/common/logger/listtest/Makefile (renamed from ndb/src/common/logger/listtest/Makefile)0
-rw-r--r--storage/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp (renamed from ndb/src/common/logger/loggertest/LoggerUnitTest.cpp)0
-rw-r--r--storage/ndb/src/common/logger/loggertest/LoggerUnitTest.hpp (renamed from ndb/src/common/logger/loggertest/LoggerUnitTest.hpp)0
-rw-r--r--storage/ndb/src/common/logger/loggertest/Makefile (renamed from ndb/src/common/logger/loggertest/Makefile)0
-rw-r--r--storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp (renamed from ndb/src/common/mgmcommon/ConfigRetriever.cpp)0
-rw-r--r--storage/ndb/src/common/mgmcommon/IPCConfig.cpp (renamed from ndb/src/common/mgmcommon/IPCConfig.cpp)0
-rw-r--r--storage/ndb/src/common/mgmcommon/Makefile.am28
-rw-r--r--storage/ndb/src/common/mgmcommon/printConfig/Makefile (renamed from ndb/src/common/mgmcommon/printConfig/Makefile)0
-rw-r--r--storage/ndb/src/common/mgmcommon/printConfig/printConfig.cpp (renamed from ndb/src/common/mgmcommon/printConfig/printConfig.cpp)0
-rw-r--r--storage/ndb/src/common/portlib/Makefile.am43
-rw-r--r--storage/ndb/src/common/portlib/NdbCondition.c (renamed from ndb/src/common/portlib/NdbCondition.c)0
-rw-r--r--storage/ndb/src/common/portlib/NdbConfig.c (renamed from ndb/src/common/portlib/NdbConfig.c)0
-rw-r--r--storage/ndb/src/common/portlib/NdbDaemon.c (renamed from ndb/src/common/portlib/NdbDaemon.c)0
-rw-r--r--storage/ndb/src/common/portlib/NdbEnv.c (renamed from ndb/src/common/portlib/NdbEnv.c)0
-rw-r--r--storage/ndb/src/common/portlib/NdbHost.c (renamed from ndb/src/common/portlib/NdbHost.c)0
-rw-r--r--storage/ndb/src/common/portlib/NdbMem.c (renamed from ndb/src/common/portlib/NdbMem.c)0
-rw-r--r--storage/ndb/src/common/portlib/NdbMutex.c (renamed from ndb/src/common/portlib/NdbMutex.c)0
-rw-r--r--storage/ndb/src/common/portlib/NdbPortLibTest.cpp (renamed from ndb/src/common/portlib/NdbPortLibTest.cpp)0
-rw-r--r--storage/ndb/src/common/portlib/NdbSleep.c (renamed from ndb/src/common/portlib/NdbSleep.c)0
-rw-r--r--storage/ndb/src/common/portlib/NdbTCP.cpp (renamed from ndb/src/common/portlib/NdbTCP.cpp)0
-rw-r--r--storage/ndb/src/common/portlib/NdbThread.c (renamed from ndb/src/common/portlib/NdbThread.c)0
-rw-r--r--storage/ndb/src/common/portlib/NdbTick.c (renamed from ndb/src/common/portlib/NdbTick.c)0
-rw-r--r--storage/ndb/src/common/portlib/gcc.cpp (renamed from ndb/src/common/portlib/gcc.cpp)0
-rw-r--r--storage/ndb/src/common/portlib/memtest.c (renamed from ndb/src/common/portlib/memtest.c)0
-rw-r--r--storage/ndb/src/common/portlib/mmslist.cpp (renamed from ndb/src/common/portlib/mmslist.cpp)0
-rw-r--r--storage/ndb/src/common/portlib/mmstest.cpp (renamed from ndb/src/common/portlib/mmstest.cpp)0
-rw-r--r--storage/ndb/src/common/portlib/munmaptest.cpp (renamed from ndb/src/common/portlib/munmaptest.cpp)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/memtest/Makefile (renamed from ndb/src/common/portlib/old_dirs/memtest/Makefile)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/memtest/munmaptest/Makefile (renamed from ndb/src/common/portlib/old_dirs/memtest/munmaptest/Makefile)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/ose/Makefile (renamed from ndb/src/common/portlib/old_dirs/ose/Makefile)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/ose/NdbCondition.c (renamed from ndb/src/common/portlib/old_dirs/ose/NdbCondition.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/ose/NdbConditionOSE.h (renamed from ndb/src/common/portlib/old_dirs/ose/NdbConditionOSE.h)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/ose/NdbEnv.c (renamed from ndb/src/common/portlib/old_dirs/ose/NdbEnv.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/ose/NdbHost.c (renamed from ndb/src/common/portlib/old_dirs/ose/NdbHost.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/ose/NdbMem.c (renamed from ndb/src/common/portlib/old_dirs/ose/NdbMem.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/ose/NdbMem_SoftOse.cpp (renamed from ndb/src/common/portlib/old_dirs/ose/NdbMem_SoftOse.cpp)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/ose/NdbMutex.c (renamed from ndb/src/common/portlib/old_dirs/ose/NdbMutex.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/ose/NdbOut.cpp (renamed from ndb/src/common/portlib/old_dirs/ose/NdbOut.cpp)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/ose/NdbSleep.c (renamed from ndb/src/common/portlib/old_dirs/ose/NdbSleep.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/ose/NdbTCP.c (renamed from ndb/src/common/portlib/old_dirs/ose/NdbTCP.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/ose/NdbThread.c (renamed from ndb/src/common/portlib/old_dirs/ose/NdbThread.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/ose/NdbTick.c (renamed from ndb/src/common/portlib/old_dirs/ose/NdbTick.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/test/Makefile (renamed from ndb/src/common/portlib/old_dirs/test/Makefile)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/win32/Makefile (renamed from ndb/src/common/portlib/old_dirs/win32/Makefile)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/win32/NdbCondition.c (renamed from ndb/src/common/portlib/old_dirs/win32/NdbCondition.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/win32/NdbDaemon.c (renamed from ndb/src/common/portlib/old_dirs/win32/NdbDaemon.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/win32/NdbEnv.c (renamed from ndb/src/common/portlib/old_dirs/win32/NdbEnv.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/win32/NdbHost.c (renamed from ndb/src/common/portlib/old_dirs/win32/NdbHost.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/win32/NdbMem.c (renamed from ndb/src/common/portlib/old_dirs/win32/NdbMem.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/win32/NdbMutex.c (renamed from ndb/src/common/portlib/old_dirs/win32/NdbMutex.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/win32/NdbSleep.c (renamed from ndb/src/common/portlib/old_dirs/win32/NdbSleep.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/win32/NdbTCP.c (renamed from ndb/src/common/portlib/old_dirs/win32/NdbTCP.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/win32/NdbThread.c (renamed from ndb/src/common/portlib/old_dirs/win32/NdbThread.c)0
-rw-r--r--storage/ndb/src/common/portlib/old_dirs/win32/NdbTick.c (renamed from ndb/src/common/portlib/old_dirs/win32/NdbTick.c)0
-rw-r--r--storage/ndb/src/common/portlib/win32/NdbCondition.c (renamed from ndb/src/common/portlib/win32/NdbCondition.c)0
-rw-r--r--storage/ndb/src/common/portlib/win32/NdbDaemon.c (renamed from ndb/src/common/portlib/win32/NdbDaemon.c)0
-rw-r--r--storage/ndb/src/common/portlib/win32/NdbEnv.c (renamed from ndb/src/common/portlib/win32/NdbEnv.c)0
-rw-r--r--storage/ndb/src/common/portlib/win32/NdbHost.c (renamed from ndb/src/common/portlib/win32/NdbHost.c)0
-rw-r--r--storage/ndb/src/common/portlib/win32/NdbMem.c (renamed from ndb/src/common/portlib/win32/NdbMem.c)0
-rw-r--r--storage/ndb/src/common/portlib/win32/NdbMutex.c (renamed from ndb/src/common/portlib/win32/NdbMutex.c)0
-rw-r--r--storage/ndb/src/common/portlib/win32/NdbSleep.c (renamed from ndb/src/common/portlib/win32/NdbSleep.c)0
-rw-r--r--storage/ndb/src/common/portlib/win32/NdbTCP.c (renamed from ndb/src/common/portlib/win32/NdbTCP.c)0
-rw-r--r--storage/ndb/src/common/portlib/win32/NdbThread.c (renamed from ndb/src/common/portlib/win32/NdbThread.c)0
-rw-r--r--storage/ndb/src/common/portlib/win32/NdbTick.c (renamed from ndb/src/common/portlib/win32/NdbTick.c)0
-rw-r--r--storage/ndb/src/common/transporter/Makefile.am36
-rw-r--r--storage/ndb/src/common/transporter/OSE_Receiver.cpp (renamed from ndb/src/common/transporter/OSE_Receiver.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/OSE_Receiver.hpp (renamed from ndb/src/common/transporter/OSE_Receiver.hpp)0
-rw-r--r--storage/ndb/src/common/transporter/OSE_Signals.hpp (renamed from ndb/src/common/transporter/OSE_Signals.hpp)0
-rw-r--r--storage/ndb/src/common/transporter/OSE_Transporter.cpp (renamed from ndb/src/common/transporter/OSE_Transporter.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/OSE_Transporter.hpp (renamed from ndb/src/common/transporter/OSE_Transporter.hpp)0
-rw-r--r--storage/ndb/src/common/transporter/Packer.cpp (renamed from ndb/src/common/transporter/Packer.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/Packer.hpp (renamed from ndb/src/common/transporter/Packer.hpp)0
-rw-r--r--storage/ndb/src/common/transporter/SCI_Transporter.cpp (renamed from ndb/src/common/transporter/SCI_Transporter.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/SCI_Transporter.hpp (renamed from ndb/src/common/transporter/SCI_Transporter.hpp)0
-rw-r--r--storage/ndb/src/common/transporter/SHM_Buffer.hpp (renamed from ndb/src/common/transporter/SHM_Buffer.hpp)0
-rw-r--r--storage/ndb/src/common/transporter/SHM_Transporter.cpp (renamed from ndb/src/common/transporter/SHM_Transporter.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/SHM_Transporter.hpp (renamed from ndb/src/common/transporter/SHM_Transporter.hpp)0
-rw-r--r--storage/ndb/src/common/transporter/SHM_Transporter.unix.cpp (renamed from ndb/src/common/transporter/SHM_Transporter.unix.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/SHM_Transporter.win32.cpp (renamed from ndb/src/common/transporter/SHM_Transporter.win32.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/SendBuffer.cpp (renamed from ndb/src/common/transporter/SendBuffer.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/SendBuffer.hpp (renamed from ndb/src/common/transporter/SendBuffer.hpp)0
-rw-r--r--storage/ndb/src/common/transporter/TCP_Transporter.cpp (renamed from ndb/src/common/transporter/TCP_Transporter.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/TCP_Transporter.hpp (renamed from ndb/src/common/transporter/TCP_Transporter.hpp)0
-rw-r--r--storage/ndb/src/common/transporter/Transporter.cpp (renamed from ndb/src/common/transporter/Transporter.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/Transporter.hpp (renamed from ndb/src/common/transporter/Transporter.hpp)0
-rw-r--r--storage/ndb/src/common/transporter/TransporterInternalDefinitions.hpp (renamed from ndb/src/common/transporter/TransporterInternalDefinitions.hpp)0
-rw-r--r--storage/ndb/src/common/transporter/TransporterRegistry.cpp (renamed from ndb/src/common/transporter/TransporterRegistry.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/basictest/Makefile (renamed from ndb/src/common/transporter/basictest/Makefile)0
-rw-r--r--storage/ndb/src/common/transporter/basictest/basicTransporterTest.cpp (renamed from ndb/src/common/transporter/basictest/basicTransporterTest.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/buddy.cpp (renamed from ndb/src/common/transporter/buddy.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/buddy.hpp (renamed from ndb/src/common/transporter/buddy.hpp)0
-rw-r--r--storage/ndb/src/common/transporter/failoverSCI/Makefile (renamed from ndb/src/common/transporter/failoverSCI/Makefile)0
-rw-r--r--storage/ndb/src/common/transporter/failoverSCI/failoverSCI.cpp (renamed from ndb/src/common/transporter/failoverSCI/failoverSCI.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/perftest/Makefile (renamed from ndb/src/common/transporter/perftest/Makefile)0
-rw-r--r--storage/ndb/src/common/transporter/perftest/perfTransporterTest.cpp (renamed from ndb/src/common/transporter/perftest/perfTransporterTest.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/priotest/Makefile (renamed from ndb/src/common/transporter/priotest/Makefile)0
-rw-r--r--storage/ndb/src/common/transporter/priotest/prioOSE/Makefile (renamed from ndb/src/common/transporter/priotest/prioOSE/Makefile)0
-rw-r--r--storage/ndb/src/common/transporter/priotest/prioSCI/Makefile (renamed from ndb/src/common/transporter/priotest/prioSCI/Makefile)0
-rw-r--r--storage/ndb/src/common/transporter/priotest/prioSCI/prioSCI.cpp (renamed from ndb/src/common/transporter/priotest/prioSCI/prioSCI.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/priotest/prioSHM/Makefile (renamed from ndb/src/common/transporter/priotest/prioSHM/Makefile)0
-rw-r--r--storage/ndb/src/common/transporter/priotest/prioSHM/prioSHM.cpp (renamed from ndb/src/common/transporter/priotest/prioSHM/prioSHM.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/priotest/prioTCP/Makefile (renamed from ndb/src/common/transporter/priotest/prioTCP/Makefile)0
-rw-r--r--storage/ndb/src/common/transporter/priotest/prioTCP/prioTCP.cpp (renamed from ndb/src/common/transporter/priotest/prioTCP/prioTCP.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/priotest/prioTransporterTest.cpp (renamed from ndb/src/common/transporter/priotest/prioTransporterTest.cpp)0
-rw-r--r--storage/ndb/src/common/transporter/priotest/prioTransporterTest.hpp (renamed from ndb/src/common/transporter/priotest/prioTransporterTest.hpp)0
-rw-r--r--storage/ndb/src/common/util/Base64.cpp (renamed from ndb/src/common/util/Base64.cpp)0
-rw-r--r--storage/ndb/src/common/util/BaseString.cpp (renamed from ndb/src/common/util/BaseString.cpp)0
-rw-r--r--storage/ndb/src/common/util/Bitmask.cpp (renamed from ndb/src/common/util/Bitmask.cpp)0
-rw-r--r--storage/ndb/src/common/util/ConfigValues.cpp (renamed from ndb/src/common/util/ConfigValues.cpp)0
-rw-r--r--storage/ndb/src/common/util/File.cpp (renamed from ndb/src/common/util/File.cpp)0
-rw-r--r--storage/ndb/src/common/util/InputStream.cpp (renamed from ndb/src/common/util/InputStream.cpp)0
-rw-r--r--storage/ndb/src/common/util/Makefile.am49
-rw-r--r--storage/ndb/src/common/util/NdbErrHnd.cpp (renamed from ndb/src/common/util/NdbErrHnd.cpp)0
-rw-r--r--storage/ndb/src/common/util/NdbOut.cpp (renamed from ndb/src/common/util/NdbOut.cpp)0
-rw-r--r--storage/ndb/src/common/util/NdbSqlUtil.cpp (renamed from ndb/src/common/util/NdbSqlUtil.cpp)0
-rw-r--r--storage/ndb/src/common/util/OutputStream.cpp (renamed from ndb/src/common/util/OutputStream.cpp)0
-rw-r--r--storage/ndb/src/common/util/Parser.cpp (renamed from ndb/src/common/util/Parser.cpp)0
-rw-r--r--storage/ndb/src/common/util/Properties.cpp (renamed from ndb/src/common/util/Properties.cpp)0
-rw-r--r--storage/ndb/src/common/util/SimpleProperties.cpp (renamed from ndb/src/common/util/SimpleProperties.cpp)0
-rw-r--r--storage/ndb/src/common/util/SocketAuthenticator.cpp (renamed from ndb/src/common/util/SocketAuthenticator.cpp)0
-rw-r--r--storage/ndb/src/common/util/SocketClient.cpp (renamed from ndb/src/common/util/SocketClient.cpp)0
-rw-r--r--storage/ndb/src/common/util/SocketServer.cpp (renamed from ndb/src/common/util/SocketServer.cpp)0
-rw-r--r--storage/ndb/src/common/util/basestring_vsnprintf.c (renamed from ndb/src/common/util/basestring_vsnprintf.c)0
-rw-r--r--storage/ndb/src/common/util/filetest/FileUnitTest.cpp (renamed from ndb/src/common/util/filetest/FileUnitTest.cpp)0
-rw-r--r--storage/ndb/src/common/util/filetest/FileUnitTest.hpp (renamed from ndb/src/common/util/filetest/FileUnitTest.hpp)0
-rw-r--r--storage/ndb/src/common/util/filetest/Makefile (renamed from ndb/src/common/util/filetest/Makefile)0
-rw-r--r--storage/ndb/src/common/util/getarg.cat3 (renamed from ndb/src/common/util/getarg.cat3)0
-rw-r--r--storage/ndb/src/common/util/md5_hash.cpp (renamed from ndb/src/common/util/md5_hash.cpp)0
-rw-r--r--storage/ndb/src/common/util/ndb_init.c (renamed from ndb/src/common/util/ndb_init.c)0
-rw-r--r--storage/ndb/src/common/util/new.cpp (renamed from ndb/src/common/util/new.cpp)0
-rw-r--r--storage/ndb/src/common/util/random.c (renamed from ndb/src/common/util/random.c)0
-rw-r--r--storage/ndb/src/common/util/socket_io.cpp (renamed from ndb/src/common/util/socket_io.cpp)0
-rw-r--r--storage/ndb/src/common/util/strdup.c (renamed from ndb/src/common/util/strdup.c)0
-rw-r--r--storage/ndb/src/common/util/testConfigValues/Makefile (renamed from ndb/src/common/util/testConfigValues/Makefile)0
-rw-r--r--storage/ndb/src/common/util/testConfigValues/testConfigValues.cpp (renamed from ndb/src/common/util/testConfigValues/testConfigValues.cpp)0
-rw-r--r--storage/ndb/src/common/util/testProperties/Makefile (renamed from ndb/src/common/util/testProperties/Makefile)0
-rw-r--r--storage/ndb/src/common/util/testProperties/testProperties.cpp (renamed from ndb/src/common/util/testProperties/testProperties.cpp)0
-rw-r--r--storage/ndb/src/common/util/testSimpleProperties/Makefile (renamed from ndb/src/common/util/testSimpleProperties/Makefile)0
-rw-r--r--storage/ndb/src/common/util/testSimpleProperties/sp_test.cpp (renamed from ndb/src/common/util/testSimpleProperties/sp_test.cpp)0
-rw-r--r--storage/ndb/src/common/util/uucode.c (renamed from ndb/src/common/util/uucode.c)0
-rw-r--r--storage/ndb/src/common/util/version.c244
-rw-r--r--storage/ndb/src/cw/Makefile.am (renamed from ndb/src/cw/Makefile.am)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.cpp (renamed from ndb/src/cw/cpcc-win32/C++/CPC_GUI.cpp)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsp (renamed from ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsp)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsw (renamed from ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsw)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.h (renamed from ndb/src/cw/cpcc-win32/C++/CPC_GUI.h)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.ico (renamed from ndb/src/cw/cpcc-win32/C++/CPC_GUI.ico)bin1078 -> 1078 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.rc (renamed from ndb/src/cw/cpcc-win32/C++/CPC_GUI.rc)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.sln (renamed from ndb/src/cw/cpcc-win32/C++/CPC_GUI.sln)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.suo (renamed from ndb/src/cw/cpcc-win32/C++/CPC_GUI.suo)bin8704 -> 8704 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj (renamed from ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/Closed.ICO (renamed from ndb/src/cw/cpcc-win32/C++/Closed.ICO)bin1078 -> 1078 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/NdbControls.cpp (renamed from ndb/src/cw/cpcc-win32/C++/NdbControls.cpp)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/Open.ICO (renamed from ndb/src/cw/cpcc-win32/C++/Open.ICO)bin1078 -> 1078 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/StdAfx.cpp (renamed from ndb/src/cw/cpcc-win32/C++/StdAfx.cpp)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/StdAfx.h (renamed from ndb/src/cw/cpcc-win32/C++/StdAfx.h)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/TreeView.cpp (renamed from ndb/src/cw/cpcc-win32/C++/TreeView.cpp)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/TreeView.h (renamed from ndb/src/cw/cpcc-win32/C++/TreeView.h)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/bmp00001.bmp (renamed from ndb/src/cw/cpcc-win32/C++/bmp00001.bmp)bin622 -> 622 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/resource.h (renamed from ndb/src/cw/cpcc-win32/C++/resource.h)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/small.ico (renamed from ndb/src/cw/cpcc-win32/C++/small.ico)bin318 -> 318 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/C++/toolbar.bmp (renamed from ndb/src/cw/cpcc-win32/C++/toolbar.bmp)bin622 -> 622 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/App.ico (renamed from ndb/src/cw/cpcc-win32/csharp/App.ico)bin1078 -> 1078 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/CPC_Form.cs (renamed from ndb/src/cw/cpcc-win32/csharp/CPC_Form.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/Computer.cs (renamed from ndb/src/cw/cpcc-win32/csharp/Computer.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/ComputerAddDialog.cs (renamed from ndb/src/cw/cpcc-win32/csharp/ComputerAddDialog.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/ComputerRemoveDialog.cs (renamed from ndb/src/cw/cpcc-win32/csharp/ComputerRemoveDialog.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/DATABASE.ICO (renamed from ndb/src/cw/cpcc-win32/csharp/DATABASE.ICO)bin1078 -> 1078 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/Database.cs (renamed from ndb/src/cw/cpcc-win32/csharp/Database.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj (renamed from ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj.user (renamed from ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj.user)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.ncb (renamed from ndb/src/cw/cpcc-win32/csharp/NDB_CPC.ncb)bin19456 -> 19456 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.sln (renamed from ndb/src/cw/cpcc-win32/csharp/NDB_CPC.sln)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/PanelWizard.cs (renamed from ndb/src/cw/cpcc-win32/csharp/PanelWizard.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/Process.cs (renamed from ndb/src/cw/cpcc-win32/csharp/Process.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/ProcessDefineDialog.cs (renamed from ndb/src/cw/cpcc-win32/csharp/ProcessDefineDialog.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/fileaccess/FileMgmt.cs (renamed from ndb/src/cw/cpcc-win32/csharp/fileaccess/FileMgmt.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/simpleparser/SimpleCPCParser.cs (renamed from ndb/src/cw/cpcc-win32/csharp/simpleparser/SimpleCPCParser.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/SocketComm.cs (renamed from ndb/src/cw/cpcc-win32/csharp/socketcomm/SocketComm.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/myTcpClient.cs (renamed from ndb/src/cw/cpcc-win32/csharp/socketcomm/myTcpClient.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/startDatabaseDlg.cs (renamed from ndb/src/cw/cpcc-win32/csharp/startDatabaseDlg.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/csharp/telnetclient/telnetClient.cs (renamed from ndb/src/cw/cpcc-win32/csharp/telnetclient/telnetClient.cs)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Computer.cls (renamed from ndb/src/cw/cpcc-win32/vb6/Computer.cls)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Database.cls (renamed from ndb/src/cw/cpcc-win32/vb6/Database.cls)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Icon 110.ico (renamed from ndb/src/cw/cpcc-win32/vb6/Icon 110.ico)bin766 -> 766 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Icon 231.ico (renamed from ndb/src/cw/cpcc-win32/vb6/Icon 231.ico)bin766 -> 766 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Icon 237.ico (renamed from ndb/src/cw/cpcc-win32/vb6/Icon 237.ico)bin766 -> 766 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Icon 241.ico (renamed from ndb/src/cw/cpcc-win32/vb6/Icon 241.ico)bin766 -> 766 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Icon 242.ico (renamed from ndb/src/cw/cpcc-win32/vb6/Icon 242.ico)bin766 -> 766 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Icon 270.ico (renamed from ndb/src/cw/cpcc-win32/vb6/Icon 270.ico)bin766 -> 766 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Icon 271.ico (renamed from ndb/src/cw/cpcc-win32/vb6/Icon 271.ico)bin766 -> 766 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Icon 273.ico (renamed from ndb/src/cw/cpcc-win32/vb6/Icon 273.ico)bin766 -> 766 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Icon 31.ico (renamed from ndb/src/cw/cpcc-win32/vb6/Icon 31.ico)bin766 -> 766 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Icon 337.ico (renamed from ndb/src/cw/cpcc-win32/vb6/Icon 337.ico)bin766 -> 766 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Icon 338.ico (renamed from ndb/src/cw/cpcc-win32/vb6/Icon 338.ico)bin766 -> 766 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Icon 339.ico (renamed from ndb/src/cw/cpcc-win32/vb6/Icon 339.ico)bin766 -> 766 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/MSSCCPRJ.SCC (renamed from ndb/src/cw/cpcc-win32/vb6/MSSCCPRJ.SCC)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Module1.bas (renamed from ndb/src/cw/cpcc-win32/vb6/Module1.bas)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbp (renamed from ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbp)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbw (renamed from ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbw)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/Process.cls (renamed from ndb/src/cw/cpcc-win32/vb6/Process.cls)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/closed folder.ico (renamed from ndb/src/cw/cpcc-win32/vb6/closed folder.ico)bin10134 -> 10134 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/computer.ico (renamed from ndb/src/cw/cpcc-win32/vb6/computer.ico)bin10134 -> 10134 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/frmAbout.frm (renamed from ndb/src/cw/cpcc-win32/vb6/frmAbout.frm)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/frmLogin.frm (renamed from ndb/src/cw/cpcc-win32/vb6/frmLogin.frm)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/frmMain.frm (renamed from ndb/src/cw/cpcc-win32/vb6/frmMain.frm)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frm (renamed from ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frm)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frx (renamed from ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frx)bin4 -> 4 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase.frx (renamed from ndb/src/cw/cpcc-win32/vb6/frmNewDatabase.frx)bin12 -> 12 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase1.frm (renamed from ndb/src/cw/cpcc-win32/vb6/frmNewDatabase1.frm)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.frm (renamed from ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.frm)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.log (renamed from ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.log)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase3.frm (renamed from ndb/src/cw/cpcc-win32/vb6/frmNewDatabase3.frm)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/frmOptions.frm (renamed from ndb/src/cw/cpcc-win32/vb6/frmOptions.frm)0
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/frmSplash.frx (renamed from ndb/src/cw/cpcc-win32/vb6/frmSplash.frx)bin70450 -> 70450 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/networking.ico (renamed from ndb/src/cw/cpcc-win32/vb6/networking.ico)bin10134 -> 10134 bytes
-rw-r--r--storage/ndb/src/cw/cpcc-win32/vb6/open folder.ico (renamed from ndb/src/cw/cpcc-win32/vb6/open folder.ico)bin10134 -> 10134 bytes
-rw-r--r--storage/ndb/src/cw/cpcd/APIService.cpp (renamed from ndb/src/cw/cpcd/APIService.cpp)0
-rw-r--r--storage/ndb/src/cw/cpcd/APIService.hpp (renamed from ndb/src/cw/cpcd/APIService.hpp)0
-rw-r--r--storage/ndb/src/cw/cpcd/CPCD.cpp (renamed from ndb/src/cw/cpcd/CPCD.cpp)0
-rw-r--r--storage/ndb/src/cw/cpcd/CPCD.hpp (renamed from ndb/src/cw/cpcd/CPCD.hpp)0
-rw-r--r--storage/ndb/src/cw/cpcd/Makefile.am20
-rw-r--r--storage/ndb/src/cw/cpcd/Monitor.cpp (renamed from ndb/src/cw/cpcd/Monitor.cpp)0
-rw-r--r--storage/ndb/src/cw/cpcd/Process.cpp (renamed from ndb/src/cw/cpcd/Process.cpp)0
-rw-r--r--storage/ndb/src/cw/cpcd/common.cpp (renamed from ndb/src/cw/cpcd/common.cpp)0
-rw-r--r--storage/ndb/src/cw/cpcd/common.hpp (renamed from ndb/src/cw/cpcd/common.hpp)0
-rw-r--r--storage/ndb/src/cw/cpcd/main.cpp (renamed from ndb/src/cw/cpcd/main.cpp)0
-rw-r--r--storage/ndb/src/cw/test/socketclient/Makefile (renamed from ndb/src/cw/test/socketclient/Makefile)0
-rw-r--r--storage/ndb/src/cw/test/socketclient/socketClientTest.cpp (renamed from ndb/src/cw/test/socketclient/socketClientTest.cpp)0
-rw-r--r--storage/ndb/src/cw/util/ClientInterface.cpp (renamed from ndb/src/cw/util/ClientInterface.cpp)0
-rw-r--r--storage/ndb/src/cw/util/ClientInterface.hpp (renamed from ndb/src/cw/util/ClientInterface.hpp)0
-rw-r--r--storage/ndb/src/cw/util/Makefile (renamed from ndb/src/cw/util/Makefile)0
-rw-r--r--storage/ndb/src/cw/util/SocketRegistry.cpp (renamed from ndb/src/cw/util/SocketRegistry.cpp)0
-rw-r--r--storage/ndb/src/cw/util/SocketRegistry.hpp (renamed from ndb/src/cw/util/SocketRegistry.hpp)0
-rw-r--r--storage/ndb/src/cw/util/SocketService.cpp (renamed from ndb/src/cw/util/SocketService.cpp)0
-rw-r--r--storage/ndb/src/cw/util/SocketService.hpp (renamed from ndb/src/cw/util/SocketService.hpp)0
-rw-r--r--storage/ndb/src/external/WIN32.x86/sci/lib/SISCI_LIBRARY_WIN32.TXT (renamed from ndb/src/external/WIN32.x86/sci/lib/SISCI_LIBRARY_WIN32.TXT)0
-rw-r--r--storage/ndb/src/external/WIN32.x86/sci/lib/scilib.lib (renamed from ndb/src/external/WIN32.x86/sci/lib/scilib.lib)bin17918 -> 17918 bytes
-rw-r--r--storage/ndb/src/external/WIN32.x86/sci/lib/scilib_md.lib (renamed from ndb/src/external/WIN32.x86/sci/lib/scilib_md.lib)bin18000 -> 18000 bytes
-rw-r--r--storage/ndb/src/external/WIN32.x86/sci/lib/scilib_mt.lib (renamed from ndb/src/external/WIN32.x86/sci/lib/scilib_mt.lib)bin17924 -> 17924 bytes
-rw-r--r--storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api.lib (renamed from ndb/src/external/WIN32.x86/sci/lib/sisci_api.lib)bin264284 -> 264284 bytes
-rw-r--r--storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_md.lib (renamed from ndb/src/external/WIN32.x86/sci/lib/sisci_api_md.lib)bin265578 -> 265578 bytes
-rw-r--r--storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_mt.lib (renamed from ndb/src/external/WIN32.x86/sci/lib/sisci_api_mt.lib)bin264386 -> 264386 bytes
-rw-r--r--storage/ndb/src/kernel/Makefile.am75
-rw-r--r--storage/ndb/src/kernel/SimBlockList.cpp (renamed from ndb/src/kernel/SimBlockList.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ERROR_codes.txt (renamed from ndb/src/kernel/blocks/ERROR_codes.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/Makefile.am (renamed from ndb/src/kernel/blocks/Makefile.am)0
-rw-r--r--storage/ndb/src/kernel/blocks/NodeRestart.new.txt (renamed from ndb/src/kernel/blocks/NodeRestart.new.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/NodeRestart.txt (renamed from ndb/src/kernel/blocks/NodeRestart.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/Start.txt (renamed from ndb/src/kernel/blocks/Start.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/SystemRestart.new.txt (renamed from ndb/src/kernel/blocks/SystemRestart.new.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/SystemRestart.txt (renamed from ndb/src/kernel/blocks/SystemRestart.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Backup.cpp (renamed from ndb/src/kernel/blocks/backup/Backup.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Backup.hpp (renamed from ndb/src/kernel/blocks/backup/Backup.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Backup.txt (renamed from ndb/src/kernel/blocks/backup/Backup.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp (renamed from ndb/src/kernel/blocks/backup/BackupFormat.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/backup/BackupInit.cpp (renamed from ndb/src/kernel/blocks/backup/BackupInit.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp (renamed from ndb/src/kernel/blocks/backup/FsBuffer.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Makefile.am24
-rw-r--r--storage/ndb/src/kernel/blocks/backup/read.cpp (renamed from ndb/src/kernel/blocks/backup/read.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp (renamed from ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp (renamed from ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Makefile.am24
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp1470
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp343
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp11817
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/Makefile.am26
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/CreateIndex.txt (renamed from ndb/src/kernel/blocks/dbdict/CreateIndex.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt (renamed from ndb/src/kernel/blocks/dbdict/CreateTable.new.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/CreateTable.txt (renamed from ndb/src/kernel/blocks/dbdict/CreateTable.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp12144
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp2025
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt (renamed from ndb/src/kernel/blocks/dbdict/Dbdict.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/DropTable.txt (renamed from ndb/src/kernel/blocks/dbdict/DropTable.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Event.txt (renamed from ndb/src/kernel/blocks/dbdict/Event.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Makefile.am25
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl (renamed from ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp (renamed from ndb/src/kernel/blocks/dbdict/SchemaFile.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl (renamed from ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp (renamed from ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp1606
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp (renamed from ndb/src/kernel/blocks/dbdih/DbdihInit.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp14352
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/LCP.txt (renamed from ndb/src/kernel/blocks/dbdih/LCP.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/Makefile.am23
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp (renamed from ndb/src/kernel/blocks/dbdih/Sysfile.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/printSysfile/Makefile (renamed from ndb/src/kernel/blocks/dbdih/printSysfile/Makefile)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp (renamed from ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp2956
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp455
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp18635
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/Makefile.am25
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/redoLogReader/Makefile (renamed from ndb/src/kernel/blocks/dblqh/redoLogReader/Makefile)0
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp (renamed from ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp (renamed from ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp (renamed from ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp1955
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp (renamed from ndb/src/kernel/blocks/dbtc/DbtcInit.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp13140
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/Makefile.am23
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp (renamed from ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp2469
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupGen.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp1186
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupScan.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp1153
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupUndoLog.cpp (renamed from ndb/src/kernel/blocks/dbtup/DbtupUndoLog.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Makefile.am42
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Notes.txt (renamed from ndb/src/kernel/blocks/dbtup/Notes.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp (renamed from ndb/src/kernel/blocks/dbtux/Dbtux.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp (renamed from ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp (renamed from ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp (renamed from ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp (renamed from ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp (renamed from ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp (renamed from ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp (renamed from ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp (renamed from ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp (renamed from ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/Makefile.am34
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/Times.txt (renamed from ndb/src/kernel/blocks/dbtux/Times.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html (renamed from ndb/src/kernel/blocks/dbtux/tuxstatus.html)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp (renamed from ndb/src/kernel/blocks/dbutil/DbUtil.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp (renamed from ndb/src/kernel/blocks/dbutil/DbUtil.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt (renamed from ndb/src/kernel/blocks/dbutil/DbUtil.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbutil/Makefile.am23
-rw-r--r--storage/ndb/src/kernel/blocks/grep/Grep.cpp (renamed from ndb/src/kernel/blocks/grep/Grep.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/grep/Grep.hpp (renamed from ndb/src/kernel/blocks/grep/Grep.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/grep/GrepInit.cpp (renamed from ndb/src/kernel/blocks/grep/GrepInit.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/grep/Makefile.am23
-rw-r--r--storage/ndb/src/kernel/blocks/grep/systab_test/Makefile (renamed from ndb/src/kernel/blocks/grep/systab_test/Makefile)0
-rw-r--r--storage/ndb/src/kernel/blocks/grep/systab_test/grep_systab_test.cpp (renamed from ndb/src/kernel/blocks/grep/systab_test/grep_systab_test.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/mutexes.hpp (renamed from ndb/src/kernel/blocks/mutexes.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/Makefile.am26
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp (renamed from ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp (renamed from ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp (renamed from ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp (renamed from ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp (renamed from ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp (renamed from ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp (renamed from ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/Makefile (renamed from ndb/src/kernel/blocks/ndbfs/AsyncFileTest/Makefile)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp (renamed from ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp (renamed from ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp (renamed from ndb/src/kernel/blocks/ndbfs/Filename.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp (renamed from ndb/src/kernel/blocks/ndbfs/Filename.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/Makefile.am27
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp (renamed from ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp (renamed from ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp (renamed from ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile (renamed from ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp (renamed from ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp (renamed from ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp (renamed from ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp (renamed from ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp (renamed from ndb/src/kernel/blocks/ndbfs/Pool.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp (renamed from ndb/src/kernel/blocks/ndbfs/VoidFs.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/new-block.tar.gz (renamed from ndb/src/kernel/blocks/new-block.tar.gz)bin1816 -> 1816 bytes
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/Makefile.am25
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp (renamed from ndb/src/kernel/blocks/qmgr/Qmgr.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp (renamed from ndb/src/kernel/blocks/qmgr/QmgrInit.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp (renamed from ndb/src/kernel/blocks/qmgr/QmgrMain.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/timer.hpp (renamed from ndb/src/kernel/blocks/qmgr/timer.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Makefile.am23
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Suma.cpp (renamed from ndb/src/kernel/blocks/suma/Suma.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Suma.hpp (renamed from ndb/src/kernel/blocks/suma/Suma.hpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Suma.txt (renamed from ndb/src/kernel/blocks/suma/Suma.txt)0
-rw-r--r--storage/ndb/src/kernel/blocks/suma/SumaInit.cpp (renamed from ndb/src/kernel/blocks/suma/SumaInit.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/trix/Makefile.am23
-rw-r--r--storage/ndb/src/kernel/blocks/trix/Trix.cpp (renamed from ndb/src/kernel/blocks/trix/Trix.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/trix/Trix.hpp (renamed from ndb/src/kernel/blocks/trix/Trix.hpp)0
-rw-r--r--storage/ndb/src/kernel/error/Error.hpp (renamed from ndb/src/kernel/error/Error.hpp)0
-rw-r--r--storage/ndb/src/kernel/error/ErrorHandlingMacros.hpp (renamed from ndb/src/kernel/error/ErrorHandlingMacros.hpp)0
-rw-r--r--storage/ndb/src/kernel/error/ErrorMessages.cpp (renamed from ndb/src/kernel/error/ErrorMessages.cpp)0
-rw-r--r--storage/ndb/src/kernel/error/ErrorMessages.hpp (renamed from ndb/src/kernel/error/ErrorMessages.hpp)0
-rw-r--r--storage/ndb/src/kernel/error/ErrorReporter.cpp (renamed from ndb/src/kernel/error/ErrorReporter.cpp)0
-rw-r--r--storage/ndb/src/kernel/error/ErrorReporter.hpp (renamed from ndb/src/kernel/error/ErrorReporter.hpp)0
-rw-r--r--storage/ndb/src/kernel/error/Makefile.am25
-rw-r--r--storage/ndb/src/kernel/error/TimeModule.cpp (renamed from ndb/src/kernel/error/TimeModule.cpp)0
-rw-r--r--storage/ndb/src/kernel/error/TimeModule.hpp (renamed from ndb/src/kernel/error/TimeModule.hpp)0
-rw-r--r--storage/ndb/src/kernel/main.cpp (renamed from ndb/src/kernel/main.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/Array.hpp (renamed from ndb/src/kernel/vm/Array.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/ArrayFifoList.hpp (renamed from ndb/src/kernel/vm/ArrayFifoList.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/ArrayList.hpp (renamed from ndb/src/kernel/vm/ArrayList.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/ArrayPool.hpp (renamed from ndb/src/kernel/vm/ArrayPool.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/CArray.hpp (renamed from ndb/src/kernel/vm/CArray.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/Callback.hpp (renamed from ndb/src/kernel/vm/Callback.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/ClusterConfiguration.cpp (renamed from ndb/src/kernel/vm/ClusterConfiguration.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/ClusterConfiguration.hpp (renamed from ndb/src/kernel/vm/ClusterConfiguration.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/Configuration.cpp (renamed from ndb/src/kernel/vm/Configuration.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/Configuration.hpp (renamed from ndb/src/kernel/vm/Configuration.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/DLFifoList.hpp (renamed from ndb/src/kernel/vm/DLFifoList.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/DLHashTable.hpp (renamed from ndb/src/kernel/vm/DLHashTable.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/DLHashTable2.hpp (renamed from ndb/src/kernel/vm/DLHashTable2.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/DLList.hpp (renamed from ndb/src/kernel/vm/DLList.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/DataBuffer.hpp (renamed from ndb/src/kernel/vm/DataBuffer.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/Emulator.cpp (renamed from ndb/src/kernel/vm/Emulator.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/Emulator.hpp (renamed from ndb/src/kernel/vm/Emulator.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/FastScheduler.cpp (renamed from ndb/src/kernel/vm/FastScheduler.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/FastScheduler.hpp (renamed from ndb/src/kernel/vm/FastScheduler.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/GlobalData.hpp (renamed from ndb/src/kernel/vm/GlobalData.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/KeyTable.hpp (renamed from ndb/src/kernel/vm/KeyTable.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/KeyTable2.hpp (renamed from ndb/src/kernel/vm/KeyTable2.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/LongSignal.hpp (renamed from ndb/src/kernel/vm/LongSignal.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/Makefile.am44
-rw-r--r--storage/ndb/src/kernel/vm/MetaData.cpp (renamed from ndb/src/kernel/vm/MetaData.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/MetaData.hpp (renamed from ndb/src/kernel/vm/MetaData.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/Mutex.cpp (renamed from ndb/src/kernel/vm/Mutex.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/Mutex.hpp (renamed from ndb/src/kernel/vm/Mutex.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/Prio.hpp (renamed from ndb/src/kernel/vm/Prio.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/RequestTracker.hpp (renamed from ndb/src/kernel/vm/RequestTracker.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/SLList.hpp (renamed from ndb/src/kernel/vm/SLList.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/SafeCounter.cpp (renamed from ndb/src/kernel/vm/SafeCounter.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/SafeCounter.hpp (renamed from ndb/src/kernel/vm/SafeCounter.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/SectionReader.cpp (renamed from ndb/src/kernel/vm/SectionReader.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/SectionReader.hpp (renamed from ndb/src/kernel/vm/SectionReader.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/SignalCounter.hpp (renamed from ndb/src/kernel/vm/SignalCounter.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/SimBlockList.hpp (renamed from ndb/src/kernel/vm/SimBlockList.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/SimplePropertiesSection.cpp (renamed from ndb/src/kernel/vm/SimplePropertiesSection.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/SimulatedBlock.cpp (renamed from ndb/src/kernel/vm/SimulatedBlock.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/SimulatedBlock.hpp (renamed from ndb/src/kernel/vm/SimulatedBlock.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/SuperPool.cpp (renamed from ndb/src/kernel/vm/SuperPool.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/SuperPool.hpp (renamed from ndb/src/kernel/vm/SuperPool.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/ThreadConfig.cpp (renamed from ndb/src/kernel/vm/ThreadConfig.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/ThreadConfig.hpp (renamed from ndb/src/kernel/vm/ThreadConfig.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/TimeQueue.cpp (renamed from ndb/src/kernel/vm/TimeQueue.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/TimeQueue.hpp (renamed from ndb/src/kernel/vm/TimeQueue.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/TransporterCallback.cpp (renamed from ndb/src/kernel/vm/TransporterCallback.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/VMSignal.cpp (renamed from ndb/src/kernel/vm/VMSignal.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/VMSignal.hpp (renamed from ndb/src/kernel/vm/VMSignal.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/WaitQueue.hpp (renamed from ndb/src/kernel/vm/WaitQueue.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/WatchDog.cpp (renamed from ndb/src/kernel/vm/WatchDog.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/WatchDog.hpp (renamed from ndb/src/kernel/vm/WatchDog.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/al_test/Makefile (renamed from ndb/src/kernel/vm/al_test/Makefile)0
-rw-r--r--storage/ndb/src/kernel/vm/al_test/arrayListTest.cpp (renamed from ndb/src/kernel/vm/al_test/arrayListTest.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/al_test/arrayPoolTest.cpp (renamed from ndb/src/kernel/vm/al_test/arrayPoolTest.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/al_test/main.cpp (renamed from ndb/src/kernel/vm/al_test/main.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/pc.hpp (renamed from ndb/src/kernel/vm/pc.hpp)0
-rw-r--r--storage/ndb/src/kernel/vm/testCopy/Makefile (renamed from ndb/src/kernel/vm/testCopy/Makefile)0
-rw-r--r--storage/ndb/src/kernel/vm/testCopy/rr.cpp (renamed from ndb/src/kernel/vm/testCopy/rr.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/testCopy/testCopy.cpp (renamed from ndb/src/kernel/vm/testCopy/testCopy.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/testDataBuffer/Makefile (renamed from ndb/src/kernel/vm/testDataBuffer/Makefile)0
-rw-r--r--storage/ndb/src/kernel/vm/testDataBuffer/testDataBuffer.cpp (renamed from ndb/src/kernel/vm/testDataBuffer/testDataBuffer.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/testLongSig/Makefile (renamed from ndb/src/kernel/vm/testLongSig/Makefile)0
-rw-r--r--storage/ndb/src/kernel/vm/testLongSig/testLongSig.cpp (renamed from ndb/src/kernel/vm/testLongSig/testLongSig.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/testSimplePropertiesSection/Makefile (renamed from ndb/src/kernel/vm/testSimplePropertiesSection/Makefile)0
-rw-r--r--storage/ndb/src/kernel/vm/testSimplePropertiesSection/test.cpp (renamed from ndb/src/kernel/vm/testSimplePropertiesSection/test.cpp)0
-rw-r--r--storage/ndb/src/kernel/vm/testSuperPool.cpp (renamed from ndb/src/kernel/vm/testSuperPool.cpp)0
-rw-r--r--storage/ndb/src/mgmapi/LocalConfig.cpp (renamed from ndb/src/mgmapi/LocalConfig.cpp)0
-rw-r--r--storage/ndb/src/mgmapi/LocalConfig.hpp (renamed from ndb/src/mgmapi/LocalConfig.hpp)0
-rw-r--r--storage/ndb/src/mgmapi/Makefile.am30
-rw-r--r--storage/ndb/src/mgmapi/mgmapi.cpp (renamed from ndb/src/mgmapi/mgmapi.cpp)0
-rw-r--r--storage/ndb/src/mgmapi/mgmapi_configuration.cpp (renamed from ndb/src/mgmapi/mgmapi_configuration.cpp)0
-rw-r--r--storage/ndb/src/mgmapi/mgmapi_configuration.hpp (renamed from ndb/src/mgmapi/mgmapi_configuration.hpp)0
-rw-r--r--storage/ndb/src/mgmapi/mgmapi_internal.h (renamed from ndb/src/mgmapi/mgmapi_internal.h)0
-rw-r--r--storage/ndb/src/mgmapi/ndb_logevent.cpp (renamed from ndb/src/mgmapi/ndb_logevent.cpp)0
-rw-r--r--storage/ndb/src/mgmapi/ndb_logevent.hpp (renamed from ndb/src/mgmapi/ndb_logevent.hpp)0
-rw-r--r--storage/ndb/src/mgmapi/test/Makefile (renamed from ndb/src/mgmapi/test/Makefile)0
-rw-r--r--storage/ndb/src/mgmapi/test/keso.c (renamed from ndb/src/mgmapi/test/keso.c)0
-rw-r--r--storage/ndb/src/mgmapi/test/mgmSrvApi.cpp (renamed from ndb/src/mgmapi/test/mgmSrvApi.cpp)0
-rw-r--r--storage/ndb/src/mgmclient/CommandInterpreter.cpp (renamed from ndb/src/mgmclient/CommandInterpreter.cpp)0
-rw-r--r--storage/ndb/src/mgmclient/Makefile.am58
-rw-r--r--storage/ndb/src/mgmclient/main.cpp (renamed from ndb/src/mgmclient/main.cpp)0
-rw-r--r--storage/ndb/src/mgmclient/ndb_mgmclient.h (renamed from ndb/src/mgmclient/ndb_mgmclient.h)0
-rw-r--r--storage/ndb/src/mgmclient/ndb_mgmclient.hpp (renamed from ndb/src/mgmclient/ndb_mgmclient.hpp)0
-rw-r--r--storage/ndb/src/mgmclient/test_cpcd/Makefile (renamed from ndb/src/mgmclient/test_cpcd/Makefile)0
-rw-r--r--storage/ndb/src/mgmclient/test_cpcd/test_cpcd.cpp (renamed from ndb/src/mgmclient/test_cpcd/test_cpcd.cpp)0
-rw-r--r--storage/ndb/src/mgmsrv/Config.cpp (renamed from ndb/src/mgmsrv/Config.cpp)0
-rw-r--r--storage/ndb/src/mgmsrv/Config.hpp (renamed from ndb/src/mgmsrv/Config.hpp)0
-rw-r--r--storage/ndb/src/mgmsrv/ConfigInfo.cpp (renamed from ndb/src/mgmsrv/ConfigInfo.cpp)0
-rw-r--r--storage/ndb/src/mgmsrv/ConfigInfo.hpp (renamed from ndb/src/mgmsrv/ConfigInfo.hpp)0
-rw-r--r--storage/ndb/src/mgmsrv/InitConfigFileParser.cpp (renamed from ndb/src/mgmsrv/InitConfigFileParser.cpp)0
-rw-r--r--storage/ndb/src/mgmsrv/InitConfigFileParser.hpp (renamed from ndb/src/mgmsrv/InitConfigFileParser.hpp)0
-rw-r--r--storage/ndb/src/mgmsrv/Makefile.am60
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.cpp (renamed from ndb/src/mgmsrv/MgmtSrvr.cpp)0
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.hpp (renamed from ndb/src/mgmsrv/MgmtSrvr.hpp)0
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvrConfig.cpp (renamed from ndb/src/mgmsrv/MgmtSrvrConfig.cpp)0
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp (renamed from ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp)0
-rw-r--r--storage/ndb/src/mgmsrv/Services.cpp (renamed from ndb/src/mgmsrv/Services.cpp)0
-rw-r--r--storage/ndb/src/mgmsrv/Services.hpp (renamed from ndb/src/mgmsrv/Services.hpp)0
-rw-r--r--storage/ndb/src/mgmsrv/SignalQueue.cpp (renamed from ndb/src/mgmsrv/SignalQueue.cpp)0
-rw-r--r--storage/ndb/src/mgmsrv/SignalQueue.hpp (renamed from ndb/src/mgmsrv/SignalQueue.hpp)0
-rw-r--r--storage/ndb/src/mgmsrv/convertStrToInt.cpp (renamed from ndb/src/mgmsrv/convertStrToInt.cpp)0
-rw-r--r--storage/ndb/src/mgmsrv/convertStrToInt.hpp (renamed from ndb/src/mgmsrv/convertStrToInt.hpp)0
-rw-r--r--storage/ndb/src/mgmsrv/main.cpp (renamed from ndb/src/mgmsrv/main.cpp)0
-rw-r--r--storage/ndb/src/mgmsrv/mkconfig/Makefile (renamed from ndb/src/mgmsrv/mkconfig/Makefile)0
-rw-r--r--storage/ndb/src/mgmsrv/mkconfig/mkconfig.cpp (renamed from ndb/src/mgmsrv/mkconfig/mkconfig.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/API.hpp (renamed from ndb/src/ndbapi/API.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/ClusterMgr.cpp (renamed from ndb/src/ndbapi/ClusterMgr.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/ClusterMgr.hpp (renamed from ndb/src/ndbapi/ClusterMgr.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/DictCache.cpp (renamed from ndb/src/ndbapi/DictCache.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/DictCache.hpp (renamed from ndb/src/ndbapi/DictCache.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/Makefile.am62
-rw-r--r--storage/ndb/src/ndbapi/Ndb.cpp (renamed from ndb/src/ndbapi/Ndb.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbApiSignal.cpp (renamed from ndb/src/ndbapi/NdbApiSignal.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbApiSignal.hpp (renamed from ndb/src/ndbapi/NdbApiSignal.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbBlob.cpp1619
-rw-r--r--storage/ndb/src/ndbapi/NdbBlobImpl.hpp (renamed from ndb/src/ndbapi/NdbBlobImpl.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionary.cpp1072
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp3197
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp708
-rw-r--r--storage/ndb/src/ndbapi/NdbErrorOut.cpp (renamed from ndb/src/ndbapi/NdbErrorOut.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbEventOperation.cpp (renamed from ndb/src/ndbapi/NdbEventOperation.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp (renamed from ndb/src/ndbapi/NdbEventOperationImpl.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp (renamed from ndb/src/ndbapi/NdbEventOperationImpl.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbImpl.hpp (renamed from ndb/src/ndbapi/NdbImpl.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbIndexOperation.cpp (renamed from ndb/src/ndbapi/NdbIndexOperation.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbLinHash.hpp (renamed from ndb/src/ndbapi/NdbLinHash.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbOperation.cpp (renamed from ndb/src/ndbapi/NdbOperation.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbOperationDefine.cpp (renamed from ndb/src/ndbapi/NdbOperationDefine.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbOperationExec.cpp (renamed from ndb/src/ndbapi/NdbOperationExec.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbOperationInt.cpp (renamed from ndb/src/ndbapi/NdbOperationInt.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbOperationScan.cpp (renamed from ndb/src/ndbapi/NdbOperationScan.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbOperationSearch.cpp (renamed from ndb/src/ndbapi/NdbOperationSearch.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbPool.cpp (renamed from ndb/src/ndbapi/NdbPool.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbPoolImpl.cpp (renamed from ndb/src/ndbapi/NdbPoolImpl.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbPoolImpl.hpp (renamed from ndb/src/ndbapi/NdbPoolImpl.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbRecAttr.cpp (renamed from ndb/src/ndbapi/NdbRecAttr.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbReceiver.cpp (renamed from ndb/src/ndbapi/NdbReceiver.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbScanFilter.cpp (renamed from ndb/src/ndbapi/NdbScanFilter.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbScanOperation.cpp (renamed from ndb/src/ndbapi/NdbScanOperation.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbTransaction.cpp (renamed from ndb/src/ndbapi/NdbTransaction.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbTransactionScan.cpp (renamed from ndb/src/ndbapi/NdbTransactionScan.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbUtil.cpp (renamed from ndb/src/ndbapi/NdbUtil.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbUtil.hpp (renamed from ndb/src/ndbapi/NdbUtil.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/NdbWaiter.hpp (renamed from ndb/src/ndbapi/NdbWaiter.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/Ndberr.cpp (renamed from ndb/src/ndbapi/Ndberr.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/Ndbif.cpp (renamed from ndb/src/ndbapi/Ndbif.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/Ndbinit.cpp (renamed from ndb/src/ndbapi/Ndbinit.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/Ndblist.cpp (renamed from ndb/src/ndbapi/Ndblist.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/ObjectMap.hpp (renamed from ndb/src/ndbapi/ObjectMap.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/ScanOperation.txt (renamed from ndb/src/ndbapi/ScanOperation.txt)0
-rw-r--r--storage/ndb/src/ndbapi/TransporterFacade.cpp (renamed from ndb/src/ndbapi/TransporterFacade.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/TransporterFacade.hpp (renamed from ndb/src/ndbapi/TransporterFacade.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/ndb_cluster_connection.cpp (renamed from ndb/src/ndbapi/ndb_cluster_connection.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp (renamed from ndb/src/ndbapi/ndb_cluster_connection_impl.hpp)0
-rw-r--r--storage/ndb/src/ndbapi/ndberror.c (renamed from ndb/src/ndbapi/ndberror.c)0
-rw-r--r--storage/ndb/src/ndbapi/signal-sender/Makefile (renamed from ndb/src/ndbapi/signal-sender/Makefile)0
-rw-r--r--storage/ndb/src/ndbapi/signal-sender/SignalSender.cpp (renamed from ndb/src/ndbapi/signal-sender/SignalSender.cpp)0
-rw-r--r--storage/ndb/src/ndbapi/signal-sender/SignalSender.hpp (renamed from ndb/src/ndbapi/signal-sender/SignalSender.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/Makefile (renamed from ndb/src/old_files/client/Makefile)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/Extra.mk (renamed from ndb/src/old_files/client/odbc/Extra.mk)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/Makefile (renamed from ndb/src/old_files/client/odbc/Makefile)0
-rwxr-xr-xstorage/ndb/src/old_files/client/odbc/NdbOdbc.cpp (renamed from ndb/src/old_files/client/odbc/NdbOdbc.cpp)0
-rwxr-xr-xstorage/ndb/src/old_files/client/odbc/NdbOdbc.def (renamed from ndb/src/old_files/client/odbc/NdbOdbc.def)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/CodeGen.cpp (renamed from ndb/src/old_files/client/odbc/codegen/CodeGen.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/CodeGen.hpp (renamed from ndb/src/old_files/client/odbc/codegen/CodeGen.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_base.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_base.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_base.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_base.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_column.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_column.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_column.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_column.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_comp_op.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_comp_op.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_comp_op.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_comp_op.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_create_index.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_create_index.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_create_index.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_create_index.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_create_row.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_create_row.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_create_row.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_create_row.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_create_table.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_create_table.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_create_table.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_create_table.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_data_type.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_data_type.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_data_type.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_data_type.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_ddl.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_ddl.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_ddl.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_ddl.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_ddl_column.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_ddl_column.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_ddl_row.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_ddl_row.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_delete.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_delete.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_delete.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_delete.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_delete_index.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_delete_index.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_delete_index.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_delete_index.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_delete_scan.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_delete_scan.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_dml.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_dml.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_dml.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_dml.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_dml_column.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_dml_column.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_dml_column.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_dml_column.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_dml_row.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_dml_row.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_dml_row.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_dml_row.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_drop_index.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_drop_index.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_drop_index.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_drop_index.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_drop_table.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_drop_table.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_drop_table.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_drop_table.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_column.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_column.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_column.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_column.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_const.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_const.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_const.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_const.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_conv.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_conv.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_func.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_func.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_func.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_func.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_op.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_op.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_op.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_op.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_param.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_param.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_param.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_param.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_row.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_row.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_expr_row.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_expr_row.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_idx_column.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_idx_column.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_idx_column.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_idx_column.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_insert.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_insert.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_insert.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_insert.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_pred.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_pred.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_pred.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_pred.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_pred_op.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_pred_op.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_pred_op.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_pred_op.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_count.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_count.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_count.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_count.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_distinct.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_distinct.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_filter.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_filter.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_filter.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_filter.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_group.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_group.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_group.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_group.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_index.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_index.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_index.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_index.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_join.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_join.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_join.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_join.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_lookup.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_lookup.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_project.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_project.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_project.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_project.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_range.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_range.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_range.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_range.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_repeat.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_repeat.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_scan.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_scan.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_scan.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_scan.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_sort.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_sort.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_sort.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_sort.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_sys.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_sys.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_query_sys.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_query_sys.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_root.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_root.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_root.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_root.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_select.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_select.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_select.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_select.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_set_row.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_set_row.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_set_row.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_set_row.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_stmt.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_stmt.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_stmt.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_stmt.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_table.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_table.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_table.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_table.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_table_list.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_table_list.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_table_list.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_table_list.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_update.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_update.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_update.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_update.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_update_index.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_update_index.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_update_index.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_update_index.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_update_lookup.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_update_lookup.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_update_scan.cpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_update_scan.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Code_update_scan.hpp (renamed from ndb/src/old_files/client/odbc/codegen/Code_update_scan.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/Makefile (renamed from ndb/src/old_files/client/odbc/codegen/Makefile)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/SimpleGram.ypp (renamed from ndb/src/old_files/client/odbc/codegen/SimpleGram.ypp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/SimpleParser.cpp (renamed from ndb/src/old_files/client/odbc/codegen/SimpleParser.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/SimpleParser.hpp (renamed from ndb/src/old_files/client/odbc/codegen/SimpleParser.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/codegen/SimpleScan.lpp (renamed from ndb/src/old_files/client/odbc/codegen/SimpleScan.lpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/AttrArea.cpp (renamed from ndb/src/old_files/client/odbc/common/AttrArea.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/AttrArea.hpp (renamed from ndb/src/old_files/client/odbc/common/AttrArea.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/CodeTree.cpp (renamed from ndb/src/old_files/client/odbc/common/CodeTree.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/CodeTree.hpp (renamed from ndb/src/old_files/client/odbc/common/CodeTree.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/ConnArea.cpp (renamed from ndb/src/old_files/client/odbc/common/ConnArea.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/ConnArea.hpp (renamed from ndb/src/old_files/client/odbc/common/ConnArea.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/Ctx.cpp (renamed from ndb/src/old_files/client/odbc/common/Ctx.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/Ctx.hpp (renamed from ndb/src/old_files/client/odbc/common/Ctx.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/DataField.cpp (renamed from ndb/src/old_files/client/odbc/common/DataField.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/DataField.hpp (renamed from ndb/src/old_files/client/odbc/common/DataField.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/DataRow.cpp (renamed from ndb/src/old_files/client/odbc/common/DataRow.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/DataRow.hpp (renamed from ndb/src/old_files/client/odbc/common/DataRow.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/DataType.cpp (renamed from ndb/src/old_files/client/odbc/common/DataType.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/DataType.hpp (renamed from ndb/src/old_files/client/odbc/common/DataType.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/DescArea.cpp (renamed from ndb/src/old_files/client/odbc/common/DescArea.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/DescArea.hpp (renamed from ndb/src/old_files/client/odbc/common/DescArea.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/DiagArea.cpp (renamed from ndb/src/old_files/client/odbc/common/DiagArea.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/DiagArea.hpp (renamed from ndb/src/old_files/client/odbc/common/DiagArea.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/Makefile (renamed from ndb/src/old_files/client/odbc/common/Makefile)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/OdbcData.cpp (renamed from ndb/src/old_files/client/odbc/common/OdbcData.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/OdbcData.hpp (renamed from ndb/src/old_files/client/odbc/common/OdbcData.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/ResultArea.cpp (renamed from ndb/src/old_files/client/odbc/common/ResultArea.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/ResultArea.hpp (renamed from ndb/src/old_files/client/odbc/common/ResultArea.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/Sqlstate.cpp (renamed from ndb/src/old_files/client/odbc/common/Sqlstate.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/Sqlstate.hpp (renamed from ndb/src/old_files/client/odbc/common/Sqlstate.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/StmtArea.cpp (renamed from ndb/src/old_files/client/odbc/common/StmtArea.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/StmtArea.hpp (renamed from ndb/src/old_files/client/odbc/common/StmtArea.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/StmtInfo.cpp (renamed from ndb/src/old_files/client/odbc/common/StmtInfo.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/StmtInfo.hpp (renamed from ndb/src/old_files/client/odbc/common/StmtInfo.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/common.cpp (renamed from ndb/src/old_files/client/odbc/common/common.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/common/common.hpp (renamed from ndb/src/old_files/client/odbc/common/common.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/dictionary/DictCatalog.cpp (renamed from ndb/src/old_files/client/odbc/dictionary/DictCatalog.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/dictionary/DictCatalog.hpp (renamed from ndb/src/old_files/client/odbc/dictionary/DictCatalog.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/dictionary/DictColumn.cpp (renamed from ndb/src/old_files/client/odbc/dictionary/DictColumn.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/dictionary/DictColumn.hpp (renamed from ndb/src/old_files/client/odbc/dictionary/DictColumn.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/dictionary/DictIndex.cpp (renamed from ndb/src/old_files/client/odbc/dictionary/DictIndex.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/dictionary/DictIndex.hpp (renamed from ndb/src/old_files/client/odbc/dictionary/DictIndex.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/dictionary/DictSchema.cpp (renamed from ndb/src/old_files/client/odbc/dictionary/DictSchema.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/dictionary/DictSchema.hpp (renamed from ndb/src/old_files/client/odbc/dictionary/DictSchema.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/dictionary/DictSys.cpp (renamed from ndb/src/old_files/client/odbc/dictionary/DictSys.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/dictionary/DictSys.hpp (renamed from ndb/src/old_files/client/odbc/dictionary/DictSys.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/dictionary/DictTable.cpp (renamed from ndb/src/old_files/client/odbc/dictionary/DictTable.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/dictionary/DictTable.hpp (renamed from ndb/src/old_files/client/odbc/dictionary/DictTable.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/dictionary/Makefile (renamed from ndb/src/old_files/client/odbc/dictionary/Makefile)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/docs/class.fig (renamed from ndb/src/old_files/client/odbc/docs/class.fig)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/docs/descfield.pl (renamed from ndb/src/old_files/client/odbc/docs/descfield.pl)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/docs/diag.txt (renamed from ndb/src/old_files/client/odbc/docs/diag.txt)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/docs/getinfo.pl (renamed from ndb/src/old_files/client/odbc/docs/getinfo.pl)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/docs/gettypeinfo.pl (renamed from ndb/src/old_files/client/odbc/docs/gettypeinfo.pl)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/docs/handleattr.pl (renamed from ndb/src/old_files/client/odbc/docs/handleattr.pl)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/docs/main.hpp (renamed from ndb/src/old_files/client/odbc/docs/main.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/docs/ndbodbc.html (renamed from ndb/src/old_files/client/odbc/docs/ndbodbc.html)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/docs/select.fig (renamed from ndb/src/old_files/client/odbc/docs/select.fig)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/docs/systables.pl (renamed from ndb/src/old_files/client/odbc/docs/systables.pl)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/docs/type.txt (renamed from ndb/src/old_files/client/odbc/docs/type.txt)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/Func.data (renamed from ndb/src/old_files/client/odbc/driver/Func.data)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/Func.pl (renamed from ndb/src/old_files/client/odbc/driver/Func.pl)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/Makefile (renamed from ndb/src/old_files/client/odbc/driver/Makefile)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLAllocConnect.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLAllocConnect.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLAllocEnv.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLAllocEnv.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLAllocHandle.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLAllocHandle.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLAllocHandleStd.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLAllocHandleStd.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLAllocStmt.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLAllocStmt.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLBindCol.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLBindCol.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLBindParam.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLBindParam.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLBindParameter.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLBindParameter.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLBrowseConnect.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLBrowseConnect.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLBulkOperations.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLBulkOperations.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLCancel.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLCancel.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLCloseCursor.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLCloseCursor.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLColAttribute.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLColAttribute.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLColAttributes.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLColAttributes.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLColumnPrivileges.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLColumnPrivileges.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLColumns.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLColumns.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLConnect.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLConnect.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLCopyDesc.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLCopyDesc.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLDataSources.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLDataSources.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLDescribeCol.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLDescribeCol.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLDescribeParam.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLDescribeParam.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLDisconnect.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLDisconnect.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLDriverConnect.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLDriverConnect.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLDrivers.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLDrivers.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLEndTran.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLEndTran.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLError.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLError.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLExecDirect.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLExecDirect.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLExecute.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLExecute.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLExtendedFetch.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLExtendedFetch.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLFetch.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLFetch.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLFetchScroll.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLFetchScroll.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLForeignKeys.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLForeignKeys.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLFreeConnect.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLFreeConnect.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLFreeEnv.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLFreeEnv.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLFreeHandle.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLFreeHandle.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLFreeStmt.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLFreeStmt.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetConnectAttr.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetConnectAttr.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetConnectOption.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetConnectOption.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetCursorName.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetCursorName.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetData.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetData.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetDescField.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetDescField.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetDescRec.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetDescRec.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetDiagField.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetDiagField.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetDiagRec.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetDiagRec.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetEnvAttr.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetEnvAttr.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetFunctions.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetFunctions.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetInfo.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetInfo.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetStmtAttr.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetStmtAttr.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetStmtOption.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetStmtOption.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLGetTypeInfo.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLGetTypeInfo.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLMoreResults.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLMoreResults.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLNativeSql.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLNativeSql.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLNumParams.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLNumParams.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLNumResultCols.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLNumResultCols.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLParamData.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLParamData.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLParamOptions.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLParamOptions.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLPrepare.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLPrepare.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLPrimaryKeys.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLPrimaryKeys.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLProcedureColumns.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLProcedureColumns.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLProcedures.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLProcedures.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLPutData.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLPutData.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLRowCount.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLRowCount.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLSetConnectAttr.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLSetConnectAttr.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLSetConnectOption.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLSetConnectOption.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLSetCursorName.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLSetCursorName.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLSetDescField.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLSetDescField.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLSetDescRec.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLSetDescRec.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLSetEnvAttr.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLSetEnvAttr.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLSetParam.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLSetParam.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLSetPos.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLSetPos.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLSetScrollOptions.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLSetScrollOptions.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLSetStmtAttr.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLSetStmtAttr.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLSetStmtOption.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLSetStmtOption.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLSpecialColumns.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLSpecialColumns.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLStatistics.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLStatistics.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLTablePrivileges.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLTablePrivileges.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLTables.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLTables.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/SQLTransact.cpp (renamed from ndb/src/old_files/client/odbc/driver/SQLTransact.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/driver.cpp (renamed from ndb/src/old_files/client/odbc/driver/driver.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/driver/driver.hpp (renamed from ndb/src/old_files/client/odbc/driver/driver.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_comp_op.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_comp_op.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_create_index.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_create_index.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_create_table.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_create_table.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_delete_index.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_delete_index.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_delete_lookup.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_delete_lookup.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_delete_scan.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_delete_scan.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_drop_index.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_drop_index.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_drop_table.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_drop_table.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_expr_conv.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_expr_conv.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_expr_func.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_expr_func.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_expr_op.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_expr_op.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_insert.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_insert.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_pred_op.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_pred_op.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_query_index.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_query_index.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_query_lookup.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_query_lookup.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_query_range.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_query_range.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_query_scan.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_query_scan.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_query_sys.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_query_sys.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_update_index.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_update_index.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_update_lookup.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_update_lookup.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Exec_update_scan.cpp (renamed from ndb/src/old_files/client/odbc/executor/Exec_update_scan.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Executor.cpp (renamed from ndb/src/old_files/client/odbc/executor/Executor.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Executor.hpp (renamed from ndb/src/old_files/client/odbc/executor/Executor.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/executor/Makefile (renamed from ndb/src/old_files/client/odbc/executor/Makefile)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/AttrDbc.cpp (renamed from ndb/src/old_files/client/odbc/handles/AttrDbc.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/AttrEnv.cpp (renamed from ndb/src/old_files/client/odbc/handles/AttrEnv.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/AttrRoot.cpp (renamed from ndb/src/old_files/client/odbc/handles/AttrRoot.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/AttrStmt.cpp (renamed from ndb/src/old_files/client/odbc/handles/AttrStmt.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/DescSpec.cpp (renamed from ndb/src/old_files/client/odbc/handles/DescSpec.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/FuncTab.cpp (renamed from ndb/src/old_files/client/odbc/handles/FuncTab.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/HandleBase.cpp (renamed from ndb/src/old_files/client/odbc/handles/HandleBase.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/HandleBase.hpp (renamed from ndb/src/old_files/client/odbc/handles/HandleBase.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/HandleDbc.cpp (renamed from ndb/src/old_files/client/odbc/handles/HandleDbc.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/HandleDbc.hpp (renamed from ndb/src/old_files/client/odbc/handles/HandleDbc.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/HandleDesc.cpp (renamed from ndb/src/old_files/client/odbc/handles/HandleDesc.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/HandleDesc.hpp (renamed from ndb/src/old_files/client/odbc/handles/HandleDesc.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/HandleEnv.cpp (renamed from ndb/src/old_files/client/odbc/handles/HandleEnv.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/HandleEnv.hpp (renamed from ndb/src/old_files/client/odbc/handles/HandleEnv.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/HandleRoot.cpp (renamed from ndb/src/old_files/client/odbc/handles/HandleRoot.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/HandleRoot.hpp (renamed from ndb/src/old_files/client/odbc/handles/HandleRoot.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/HandleStmt.cpp (renamed from ndb/src/old_files/client/odbc/handles/HandleStmt.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/HandleStmt.hpp (renamed from ndb/src/old_files/client/odbc/handles/HandleStmt.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/InfoTab.cpp (renamed from ndb/src/old_files/client/odbc/handles/InfoTab.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/Makefile (renamed from ndb/src/old_files/client/odbc/handles/Makefile)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/PoolNdb.cpp (renamed from ndb/src/old_files/client/odbc/handles/PoolNdb.cpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/PoolNdb.hpp (renamed from ndb/src/old_files/client/odbc/handles/PoolNdb.hpp)0
-rw-r--r--storage/ndb/src/old_files/client/odbc/handles/handles.hpp (renamed from ndb/src/old_files/client/odbc/handles/handles.hpp)0
-rw-r--r--storage/ndb/src/old_files/ndbbaseclient/Makefile (renamed from ndb/src/old_files/ndbbaseclient/Makefile)0
-rw-r--r--storage/ndb/src/old_files/ndbbaseclient/ndbbaseclient_dummy.cpp (renamed from ndb/src/old_files/ndbbaseclient/ndbbaseclient_dummy.cpp)0
-rw-r--r--storage/ndb/src/old_files/ndbclient/Makefile (renamed from ndb/src/old_files/ndbclient/Makefile)0
-rw-r--r--storage/ndb/src/old_files/ndbclient/ndbclient_dummy.cpp (renamed from ndb/src/old_files/ndbclient/ndbclient_dummy.cpp)0
-rw-r--r--storage/ndb/src/old_files/newtonapi/Makefile (renamed from ndb/src/old_files/newtonapi/Makefile)0
-rw-r--r--storage/ndb/src/old_files/newtonapi/dba_binding.cpp (renamed from ndb/src/old_files/newtonapi/dba_binding.cpp)0
-rw-r--r--storage/ndb/src/old_files/newtonapi/dba_bulkread.cpp (renamed from ndb/src/old_files/newtonapi/dba_bulkread.cpp)0
-rw-r--r--storage/ndb/src/old_files/newtonapi/dba_config.cpp (renamed from ndb/src/old_files/newtonapi/dba_config.cpp)0
-rw-r--r--storage/ndb/src/old_files/newtonapi/dba_dac.cpp (renamed from ndb/src/old_files/newtonapi/dba_dac.cpp)0
-rw-r--r--storage/ndb/src/old_files/newtonapi/dba_error.cpp (renamed from ndb/src/old_files/newtonapi/dba_error.cpp)0
-rw-r--r--storage/ndb/src/old_files/newtonapi/dba_init.cpp (renamed from ndb/src/old_files/newtonapi/dba_init.cpp)0
-rw-r--r--storage/ndb/src/old_files/newtonapi/dba_internal.hpp (renamed from ndb/src/old_files/newtonapi/dba_internal.hpp)0
-rw-r--r--storage/ndb/src/old_files/newtonapi/dba_process.cpp (renamed from ndb/src/old_files/newtonapi/dba_process.cpp)0
-rw-r--r--storage/ndb/src/old_files/newtonapi/dba_process.hpp (renamed from ndb/src/old_files/newtonapi/dba_process.hpp)0
-rw-r--r--storage/ndb/src/old_files/newtonapi/dba_schema.cpp (renamed from ndb/src/old_files/newtonapi/dba_schema.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/ExtSender.cpp (renamed from ndb/src/old_files/rep/ExtSender.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/ExtSender.hpp (renamed from ndb/src/old_files/rep/ExtSender.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/Makefile (renamed from ndb/src/old_files/rep/Makefile)0
-rw-r--r--storage/ndb/src/old_files/rep/NodeConnectInfo.hpp (renamed from ndb/src/old_files/rep/NodeConnectInfo.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/README (renamed from ndb/src/old_files/rep/README)0
-rw-r--r--storage/ndb/src/old_files/rep/RepApiInterpreter.cpp (renamed from ndb/src/old_files/rep/RepApiInterpreter.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/RepApiInterpreter.hpp (renamed from ndb/src/old_files/rep/RepApiInterpreter.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/RepApiService.cpp (renamed from ndb/src/old_files/rep/RepApiService.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/RepApiService.hpp (renamed from ndb/src/old_files/rep/RepApiService.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/RepCommandInterpreter.cpp (renamed from ndb/src/old_files/rep/RepCommandInterpreter.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/RepCommandInterpreter.hpp (renamed from ndb/src/old_files/rep/RepCommandInterpreter.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/RepComponents.cpp (renamed from ndb/src/old_files/rep/RepComponents.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/RepComponents.hpp (renamed from ndb/src/old_files/rep/RepComponents.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/RepMain.cpp (renamed from ndb/src/old_files/rep/RepMain.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/Requestor.cpp (renamed from ndb/src/old_files/rep/Requestor.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/Requestor.hpp (renamed from ndb/src/old_files/rep/Requestor.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/RequestorSubscriptions.cpp (renamed from ndb/src/old_files/rep/RequestorSubscriptions.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/SignalQueue.cpp (renamed from ndb/src/old_files/rep/SignalQueue.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/SignalQueue.hpp (renamed from ndb/src/old_files/rep/SignalQueue.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/TODO (renamed from ndb/src/old_files/rep/TODO)0
-rw-r--r--storage/ndb/src/old_files/rep/adapters/AppNDB.cpp (renamed from ndb/src/old_files/rep/adapters/AppNDB.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/adapters/AppNDB.hpp (renamed from ndb/src/old_files/rep/adapters/AppNDB.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/adapters/ExtAPI.cpp (renamed from ndb/src/old_files/rep/adapters/ExtAPI.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/adapters/ExtAPI.hpp (renamed from ndb/src/old_files/rep/adapters/ExtAPI.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/adapters/ExtNDB.cpp (renamed from ndb/src/old_files/rep/adapters/ExtNDB.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/adapters/ExtNDB.hpp (renamed from ndb/src/old_files/rep/adapters/ExtNDB.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/adapters/Makefile (renamed from ndb/src/old_files/rep/adapters/Makefile)0
-rw-r--r--storage/ndb/src/old_files/rep/adapters/TableInfoPs.hpp (renamed from ndb/src/old_files/rep/adapters/TableInfoPs.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/dbug_hack.cpp (renamed from ndb/src/old_files/rep/dbug_hack.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/rep_version.hpp (renamed from ndb/src/old_files/rep/rep_version.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/repapi/Makefile (renamed from ndb/src/old_files/rep/repapi/Makefile)0
-rw-r--r--storage/ndb/src/old_files/rep/repapi/repapi.cpp (renamed from ndb/src/old_files/rep/repapi/repapi.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/repapi/repapi.h (renamed from ndb/src/old_files/rep/repapi/repapi.h)0
-rw-r--r--storage/ndb/src/old_files/rep/state/Channel.cpp (renamed from ndb/src/old_files/rep/state/Channel.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/state/Channel.hpp (renamed from ndb/src/old_files/rep/state/Channel.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/state/Interval.cpp (renamed from ndb/src/old_files/rep/state/Interval.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/state/Interval.hpp (renamed from ndb/src/old_files/rep/state/Interval.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/state/Makefile (renamed from ndb/src/old_files/rep/state/Makefile)0
-rw-r--r--storage/ndb/src/old_files/rep/state/RepState.cpp (renamed from ndb/src/old_files/rep/state/RepState.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/state/RepState.hpp (renamed from ndb/src/old_files/rep/state/RepState.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/state/RepStateEvent.cpp (renamed from ndb/src/old_files/rep/state/RepStateEvent.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/state/RepStateRequests.cpp (renamed from ndb/src/old_files/rep/state/RepStateRequests.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/state/testInterval/Makefile (renamed from ndb/src/old_files/rep/state/testInterval/Makefile)0
-rw-r--r--storage/ndb/src/old_files/rep/state/testInterval/testInterval.cpp (renamed from ndb/src/old_files/rep/state/testInterval/testInterval.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/state/testRepState/Makefile (renamed from ndb/src/old_files/rep/state/testRepState/Makefile)0
-rw-r--r--storage/ndb/src/old_files/rep/state/testRepState/testRequestor.cpp (renamed from ndb/src/old_files/rep/state/testRepState/testRequestor.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/state/testRepState/testRequestor.hpp (renamed from ndb/src/old_files/rep/state/testRepState/testRequestor.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/GCIBuffer.cpp (renamed from ndb/src/old_files/rep/storage/GCIBuffer.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/GCIBuffer.hpp (renamed from ndb/src/old_files/rep/storage/GCIBuffer.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/GCIContainer.cpp (renamed from ndb/src/old_files/rep/storage/GCIContainer.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/GCIContainer.hpp (renamed from ndb/src/old_files/rep/storage/GCIContainer.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/GCIContainerPS.cpp (renamed from ndb/src/old_files/rep/storage/GCIContainerPS.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/GCIContainerPS.hpp (renamed from ndb/src/old_files/rep/storage/GCIContainerPS.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/GCIPage.cpp (renamed from ndb/src/old_files/rep/storage/GCIPage.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/GCIPage.hpp (renamed from ndb/src/old_files/rep/storage/GCIPage.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/LogRecord.hpp (renamed from ndb/src/old_files/rep/storage/LogRecord.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/Makefile (renamed from ndb/src/old_files/rep/storage/Makefile)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/NodeConnectInfo.hpp (renamed from ndb/src/old_files/rep/storage/NodeConnectInfo.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/NodeGroup.cpp (renamed from ndb/src/old_files/rep/storage/NodeGroup.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/NodeGroup.hpp (renamed from ndb/src/old_files/rep/storage/NodeGroup.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/NodeGroupInfo.cpp (renamed from ndb/src/old_files/rep/storage/NodeGroupInfo.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/storage/NodeGroupInfo.hpp (renamed from ndb/src/old_files/rep/storage/NodeGroupInfo.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/transfer/Makefile (renamed from ndb/src/old_files/rep/transfer/Makefile)0
-rw-r--r--storage/ndb/src/old_files/rep/transfer/TransPS.cpp (renamed from ndb/src/old_files/rep/transfer/TransPS.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/transfer/TransPS.hpp (renamed from ndb/src/old_files/rep/transfer/TransPS.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/transfer/TransSS.cpp (renamed from ndb/src/old_files/rep/transfer/TransSS.cpp)0
-rw-r--r--storage/ndb/src/old_files/rep/transfer/TransSS.hpp (renamed from ndb/src/old_files/rep/transfer/TransSS.hpp)0
-rw-r--r--storage/ndb/src/old_files/rep/transfer/TransSSSubscriptions.cpp (renamed from ndb/src/old_files/rep/transfer/TransSSSubscriptions.cpp)0
-rw-r--r--storage/ndb/test/Makefile.am (renamed from ndb/test/Makefile.am)0
-rw-r--r--storage/ndb/test/include/CpcClient.hpp (renamed from ndb/test/include/CpcClient.hpp)0
-rw-r--r--storage/ndb/test/include/HugoAsynchTransactions.hpp (renamed from ndb/test/include/HugoAsynchTransactions.hpp)0
-rw-r--r--storage/ndb/test/include/HugoCalculator.hpp (renamed from ndb/test/include/HugoCalculator.hpp)0
-rw-r--r--storage/ndb/test/include/HugoOperations.hpp (renamed from ndb/test/include/HugoOperations.hpp)0
-rw-r--r--storage/ndb/test/include/HugoTransactions.hpp (renamed from ndb/test/include/HugoTransactions.hpp)0
-rw-r--r--storage/ndb/test/include/NDBT.hpp (renamed from ndb/test/include/NDBT.hpp)0
-rw-r--r--storage/ndb/test/include/NDBT_DataSet.hpp (renamed from ndb/test/include/NDBT_DataSet.hpp)0
-rw-r--r--storage/ndb/test/include/NDBT_DataSetTransaction.hpp (renamed from ndb/test/include/NDBT_DataSetTransaction.hpp)0
-rw-r--r--storage/ndb/test/include/NDBT_Error.hpp (renamed from ndb/test/include/NDBT_Error.hpp)0
-rw-r--r--storage/ndb/test/include/NDBT_Output.hpp (renamed from ndb/test/include/NDBT_Output.hpp)0
-rw-r--r--storage/ndb/test/include/NDBT_ResultRow.hpp (renamed from ndb/test/include/NDBT_ResultRow.hpp)0
-rw-r--r--storage/ndb/test/include/NDBT_ReturnCodes.h (renamed from ndb/test/include/NDBT_ReturnCodes.h)0
-rw-r--r--storage/ndb/test/include/NDBT_Stats.hpp (renamed from ndb/test/include/NDBT_Stats.hpp)0
-rw-r--r--storage/ndb/test/include/NDBT_Table.hpp (renamed from ndb/test/include/NDBT_Table.hpp)0
-rw-r--r--storage/ndb/test/include/NDBT_Tables.hpp (renamed from ndb/test/include/NDBT_Tables.hpp)0
-rw-r--r--storage/ndb/test/include/NDBT_Test.hpp (renamed from ndb/test/include/NDBT_Test.hpp)0
-rw-r--r--storage/ndb/test/include/NdbBackup.hpp (renamed from ndb/test/include/NdbBackup.hpp)0
-rw-r--r--storage/ndb/test/include/NdbConfig.hpp (renamed from ndb/test/include/NdbConfig.hpp)0
-rw-r--r--storage/ndb/test/include/NdbGrep.hpp (renamed from ndb/test/include/NdbGrep.hpp)0
-rw-r--r--storage/ndb/test/include/NdbRestarter.hpp (renamed from ndb/test/include/NdbRestarter.hpp)0
-rw-r--r--storage/ndb/test/include/NdbRestarts.hpp (renamed from ndb/test/include/NdbRestarts.hpp)0
-rw-r--r--storage/ndb/test/include/NdbSchemaCon.hpp (renamed from ndb/test/include/NdbSchemaCon.hpp)0
-rw-r--r--storage/ndb/test/include/NdbSchemaOp.hpp (renamed from ndb/test/include/NdbSchemaOp.hpp)0
-rw-r--r--storage/ndb/test/include/NdbTest.hpp (renamed from ndb/test/include/NdbTest.hpp)0
-rw-r--r--storage/ndb/test/include/NdbTimer.hpp (renamed from ndb/test/include/NdbTimer.hpp)0
-rw-r--r--storage/ndb/test/include/TestNdbEventOperation.hpp (renamed from ndb/test/include/TestNdbEventOperation.hpp)0
-rw-r--r--storage/ndb/test/include/UtilTransactions.hpp (renamed from ndb/test/include/UtilTransactions.hpp)0
-rw-r--r--storage/ndb/test/include/getarg.h (renamed from ndb/test/include/getarg.h)0
-rw-r--r--storage/ndb/test/ndbapi/InsertRecs.cpp (renamed from ndb/test/ndbapi/InsertRecs.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/Makefile.am159
-rw-r--r--storage/ndb/test/ndbapi/ScanFilter.hpp (renamed from ndb/test/ndbapi/ScanFilter.hpp)0
-rw-r--r--storage/ndb/test/ndbapi/ScanFunctions.hpp (renamed from ndb/test/ndbapi/ScanFunctions.hpp)0
-rw-r--r--storage/ndb/test/ndbapi/ScanInterpretTest.hpp (renamed from ndb/test/ndbapi/ScanInterpretTest.hpp)0
-rw-r--r--storage/ndb/test/ndbapi/TraceNdbApi.cpp (renamed from ndb/test/ndbapi/TraceNdbApi.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/VerifyNdbApi.cpp (renamed from ndb/test/ndbapi/VerifyNdbApi.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/acid.cpp (renamed from ndb/test/ndbapi/acid.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/acid2.cpp (renamed from ndb/test/ndbapi/acid2.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/adoInsertRecs.cpp (renamed from ndb/test/ndbapi/adoInsertRecs.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/asyncGenerator.cpp (renamed from ndb/test/ndbapi/asyncGenerator.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bank/Bank.cpp (renamed from ndb/test/ndbapi/bank/Bank.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bank/Bank.hpp (renamed from ndb/test/ndbapi/bank/Bank.hpp)0
-rw-r--r--storage/ndb/test/ndbapi/bank/BankLoad.cpp (renamed from ndb/test/ndbapi/bank/BankLoad.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bank/Makefile.am24
-rw-r--r--storage/ndb/test/ndbapi/bank/bankCreator.cpp (renamed from ndb/test/ndbapi/bank/bankCreator.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bank/bankMakeGL.cpp (renamed from ndb/test/ndbapi/bank/bankMakeGL.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bank/bankSumAccounts.cpp (renamed from ndb/test/ndbapi/bank/bankSumAccounts.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bank/bankTimer.cpp (renamed from ndb/test/ndbapi/bank/bankTimer.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bank/bankTransactionMaker.cpp (renamed from ndb/test/ndbapi/bank/bankTransactionMaker.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp (renamed from ndb/test/ndbapi/bank/bankValidateAllGLs.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bank/testBank.cpp (renamed from ndb/test/ndbapi/bank/testBank.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/asyncGenerator.cpp (renamed from ndb/test/ndbapi/bench/asyncGenerator.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/dbGenerator.h (renamed from ndb/test/ndbapi/bench/dbGenerator.h)0
-rw-r--r--storage/ndb/test/ndbapi/bench/dbPopulate.cpp (renamed from ndb/test/ndbapi/bench/dbPopulate.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/dbPopulate.h (renamed from ndb/test/ndbapi/bench/dbPopulate.h)0
-rw-r--r--storage/ndb/test/ndbapi/bench/macros.h (renamed from ndb/test/ndbapi/bench/macros.h)0
-rw-r--r--storage/ndb/test/ndbapi/bench/mainAsyncGenerator.cpp (renamed from ndb/test/ndbapi/bench/mainAsyncGenerator.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/mainPopulate.cpp (renamed from ndb/test/ndbapi/bench/mainPopulate.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/ndb_async1.cpp (renamed from ndb/test/ndbapi/bench/ndb_async1.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/ndb_async2.cpp (renamed from ndb/test/ndbapi/bench/ndb_async2.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/ndb_error.hpp (renamed from ndb/test/ndbapi/bench/ndb_error.hpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/ndb_schema.hpp (renamed from ndb/test/ndbapi/bench/ndb_schema.hpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/ndb_user_transaction.cpp (renamed from ndb/test/ndbapi/bench/ndb_user_transaction.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/ndb_user_transaction2.cpp (renamed from ndb/test/ndbapi/bench/ndb_user_transaction2.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/ndb_user_transaction3.cpp (renamed from ndb/test/ndbapi/bench/ndb_user_transaction3.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/ndb_user_transaction4.cpp (renamed from ndb/test/ndbapi/bench/ndb_user_transaction4.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/ndb_user_transaction5.cpp (renamed from ndb/test/ndbapi/bench/ndb_user_transaction5.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/ndb_user_transaction6.cpp (renamed from ndb/test/ndbapi/bench/ndb_user_transaction6.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/testData.h (renamed from ndb/test/ndbapi/bench/testData.h)0
-rw-r--r--storage/ndb/test/ndbapi/bench/testDefinitions.h (renamed from ndb/test/ndbapi/bench/testDefinitions.h)0
-rw-r--r--storage/ndb/test/ndbapi/bench/userInterface.cpp (renamed from ndb/test/ndbapi/bench/userInterface.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bench/userInterface.h (renamed from ndb/test/ndbapi/bench/userInterface.h)0
-rw-r--r--storage/ndb/test/ndbapi/benchronja.cpp (renamed from ndb/test/ndbapi/benchronja.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/bulk_copy.cpp (renamed from ndb/test/ndbapi/bulk_copy.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/cdrserver.cpp (renamed from ndb/test/ndbapi/cdrserver.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/celloDb.cpp (renamed from ndb/test/ndbapi/celloDb.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/create_all_tabs.cpp (renamed from ndb/test/ndbapi/create_all_tabs.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/create_tab.cpp (renamed from ndb/test/ndbapi/create_tab.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/drop_all_tabs.cpp (renamed from ndb/test/ndbapi/drop_all_tabs.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/flexAsynch.cpp (renamed from ndb/test/ndbapi/flexAsynch.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/flexBench.cpp (renamed from ndb/test/ndbapi/flexBench.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/flexHammer.cpp (renamed from ndb/test/ndbapi/flexHammer.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/flexScan.cpp (renamed from ndb/test/ndbapi/flexScan.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/flexTT.cpp (renamed from ndb/test/ndbapi/flexTT.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/flexTimedAsynch.cpp (renamed from ndb/test/ndbapi/flexTimedAsynch.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/flex_bench_mysql.cpp (renamed from ndb/test/ndbapi/flex_bench_mysql.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/index.cpp (renamed from ndb/test/ndbapi/index.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/index2.cpp (renamed from ndb/test/ndbapi/index2.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/initronja.cpp (renamed from ndb/test/ndbapi/initronja.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/interpreterInTup.cpp (renamed from ndb/test/ndbapi/interpreterInTup.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/mainAsyncGenerator.cpp (renamed from ndb/test/ndbapi/mainAsyncGenerator.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/msa.cpp (renamed from ndb/test/ndbapi/msa.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/ndb_async1.cpp (renamed from ndb/test/ndbapi/ndb_async1.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/ndb_async2.cpp (renamed from ndb/test/ndbapi/ndb_async2.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/ndb_user_populate.cpp (renamed from ndb/test/ndbapi/ndb_user_populate.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/ndb_user_transaction.cpp (renamed from ndb/test/ndbapi/ndb_user_transaction.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/ndb_user_transaction2.cpp (renamed from ndb/test/ndbapi/ndb_user_transaction2.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/ndb_user_transaction3.cpp (renamed from ndb/test/ndbapi/ndb_user_transaction3.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/ndb_user_transaction4.cpp (renamed from ndb/test/ndbapi/ndb_user_transaction4.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/ndb_user_transaction5.cpp (renamed from ndb/test/ndbapi/ndb_user_transaction5.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/ndb_user_transaction6.cpp (renamed from ndb/test/ndbapi/ndb_user_transaction6.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/acid/Makefile (renamed from ndb/test/ndbapi/old_dirs/acid/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/acid2/Makefile (renamed from ndb/test/ndbapi/old_dirs/acid2/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/acid2/TraceNdbApi.hpp (renamed from ndb/test/ndbapi/old_dirs/acid2/TraceNdbApi.hpp)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/acid2/VerifyNdbApi.hpp (renamed from ndb/test/ndbapi/old_dirs/acid2/VerifyNdbApi.hpp)0
-rwxr-xr-xstorage/ndb/test/ndbapi/old_dirs/basicAsynch/Makefile (renamed from ndb/test/ndbapi/old_dirs/basicAsynch/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/bulk_copy/Makefile (renamed from ndb/test/ndbapi/old_dirs/bulk_copy/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/create_all_tabs/Makefile (renamed from ndb/test/ndbapi/old_dirs/create_all_tabs/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/create_tab/Makefile (renamed from ndb/test/ndbapi/old_dirs/create_tab/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/drop_all_tabs/Makefile (renamed from ndb/test/ndbapi/old_dirs/drop_all_tabs/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/flexAsynch/Makefile (renamed from ndb/test/ndbapi/old_dirs/flexAsynch/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/flexBench/Makefile.am (renamed from ndb/test/ndbapi/old_dirs/flexBench/Makefile.am)0
-rwxr-xr-xstorage/ndb/test/ndbapi/old_dirs/flexBench/ndbplot.pl (renamed from ndb/test/ndbapi/old_dirs/flexBench/ndbplot.pl)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/flexHammer/Makefile (renamed from ndb/test/ndbapi/old_dirs/flexHammer/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/flexHammer/README (renamed from ndb/test/ndbapi/old_dirs/flexHammer/README)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/flexScan/Makefile (renamed from ndb/test/ndbapi/old_dirs/flexScan/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/flexScan/README (renamed from ndb/test/ndbapi/old_dirs/flexScan/README)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/flexTT/Makefile (renamed from ndb/test/ndbapi/old_dirs/flexTT/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/flexTimedAsynch/Makefile (renamed from ndb/test/ndbapi/old_dirs/flexTimedAsynch/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/flex_bench_mysql/Makefile (renamed from ndb/test/ndbapi/old_dirs/flex_bench_mysql/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/indexTest/Makefile (renamed from ndb/test/ndbapi/old_dirs/indexTest/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/indexTest2/Makefile (renamed from ndb/test/ndbapi/old_dirs/indexTest2/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/interpreterInTup/Makefile (renamed from ndb/test/ndbapi/old_dirs/interpreterInTup/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/Makefile (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/Makefile (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/async-src/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/generator/Makefile (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/async-src/generator/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/dbGenerator.h (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/dbGenerator.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/testData.h (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/testData.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/userInterface.h (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/userInterface.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/Makefile (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/macros.h (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/macros.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/ndb_error.hpp (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/ndb_error.hpp)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/bin/.empty (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/bin/.empty)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/include/ndb_schema.hpp (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/include/ndb_schema.hpp)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/include/testDefinitions.h (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/include/testDefinitions.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/lib/.empty (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/lib/.empty)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/Makefile (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/script/Makefile)0
-rwxr-xr-xstorage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l-p10.sh (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l-p10.sh)0
-rwxr-xr-xstorage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l.sh (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l.sh)0
-rwxr-xr-xstorage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-p10.sh (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-p10.sh)0
-rwxr-xr-xstorage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench.sh (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench.sh)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/Makefile (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/README (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/README)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/Makefile (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.c (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.c)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.h (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/mainGenerator.c (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/mainGenerator.c)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/testData.h (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/include/testData.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/userInterface.h (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/include/userInterface.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.linux (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.linux)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.sparc (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.sparc)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/Makefile (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.c (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.c)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.h (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/mainPopulate.c (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/mainPopulate.c)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/Makefile (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/user/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/localDbPrepare.c (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/user/localDbPrepare.c)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/macros.h (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/user/macros.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/ndb_error.hpp (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/user/ndb_error.hpp)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/Makefile (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userHandle.h (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userHandle.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userInterface.c (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userInterface.c)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userTransaction.c (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userTransaction.c)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userHandle.h (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userHandle.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userInterface.cpp (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userInterface.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userTransaction.c (renamed from ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userTransaction.c)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/restarter/Makefile (renamed from ndb/test/ndbapi/old_dirs/restarter/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/restarter2/Makefile (renamed from ndb/test/ndbapi/old_dirs/restarter2/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/restarts/Makefile (renamed from ndb/test/ndbapi/old_dirs/restarts/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/ronja/Makefile (renamed from ndb/test/ndbapi/old_dirs/ronja/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/ronja/benchronja/Makefile (renamed from ndb/test/ndbapi/old_dirs/ronja/benchronja/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/ronja/initronja/Makefile (renamed from ndb/test/ndbapi/old_dirs/ronja/initronja/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/telco/Makefile (renamed from ndb/test/ndbapi/old_dirs/telco/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/telco/readme (renamed from ndb/test/ndbapi/old_dirs/telco/readme)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testBackup/Makefile (renamed from ndb/test/ndbapi/old_dirs/testBackup/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testBasic/Makefile (renamed from ndb/test/ndbapi/old_dirs/testBasic/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testBlobs/Makefile (renamed from ndb/test/ndbapi/old_dirs/testBlobs/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testDataBuffers/Makefile (renamed from ndb/test/ndbapi/old_dirs/testDataBuffers/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testDict/Makefile (renamed from ndb/test/ndbapi/old_dirs/testDict/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testGrep/Makefile (renamed from ndb/test/ndbapi/old_dirs/testGrep/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testGrep/verify/Makefile (renamed from ndb/test/ndbapi/old_dirs/testGrep/verify/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testIndex/Makefile (renamed from ndb/test/ndbapi/old_dirs/testIndex/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testInterpreter/Makefile (renamed from ndb/test/ndbapi/old_dirs/testInterpreter/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testMgm/Makefile (renamed from ndb/test/ndbapi/old_dirs/testMgm/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testNdbApi/Makefile (renamed from ndb/test/ndbapi/old_dirs/testNdbApi/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testNodeRestart/Makefile (renamed from ndb/test/ndbapi/old_dirs/testNodeRestart/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testOIBasic/Makefile (renamed from ndb/test/ndbapi/old_dirs/testOIBasic/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testOIBasic/times.txt (renamed from ndb/test/ndbapi/old_dirs/testOIBasic/times.txt)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testOperations/Makefile (renamed from ndb/test/ndbapi/old_dirs/testOperations/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testOrderedIndex/Makefile (renamed from ndb/test/ndbapi/old_dirs/testOrderedIndex/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testRestartGci/Makefile (renamed from ndb/test/ndbapi/old_dirs/testRestartGci/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testScan/Makefile (renamed from ndb/test/ndbapi/old_dirs/testScan/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testScanInterpreter/Makefile (renamed from ndb/test/ndbapi/old_dirs/testScanInterpreter/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testSystemRestart/Makefile (renamed from ndb/test/ndbapi/old_dirs/testSystemRestart/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testTimeout/Makefile (renamed from ndb/test/ndbapi/old_dirs/testTimeout/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/testTransactions/Makefile (renamed from ndb/test/ndbapi/old_dirs/testTransactions/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/test_event/Makefile (renamed from ndb/test/ndbapi/old_dirs/test_event/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/vw_test/Makefile (renamed from ndb/test/ndbapi/old_dirs/vw_test/Makefile)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/vw_test/bcd.h (renamed from ndb/test/ndbapi/old_dirs/vw_test/bcd.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/vw_test/script/client_start (renamed from ndb/test/ndbapi/old_dirs/vw_test/script/client_start)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/vw_test/utv.h (renamed from ndb/test/ndbapi/old_dirs/vw_test/utv.h)0
-rw-r--r--storage/ndb/test/ndbapi/old_dirs/vw_test/vcdrfunc.h (renamed from ndb/test/ndbapi/old_dirs/vw_test/vcdrfunc.h)0
-rw-r--r--storage/ndb/test/ndbapi/restarter.cpp (renamed from ndb/test/ndbapi/restarter.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/restarter2.cpp (renamed from ndb/test/ndbapi/restarter2.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/restarts.cpp (renamed from ndb/test/ndbapi/restarts.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/size.cpp (renamed from ndb/test/ndbapi/size.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/slow_select.cpp (renamed from ndb/test/ndbapi/slow_select.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testBackup.cpp (renamed from ndb/test/ndbapi/testBackup.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testBasic.cpp (renamed from ndb/test/ndbapi/testBasic.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testBasicAsynch.cpp (renamed from ndb/test/ndbapi/testBasicAsynch.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testBitfield.cpp (renamed from ndb/test/ndbapi/testBitfield.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testBlobs.cpp (renamed from ndb/test/ndbapi/testBlobs.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testDataBuffers.cpp (renamed from ndb/test/ndbapi/testDataBuffers.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testDeadlock.cpp (renamed from ndb/test/ndbapi/testDeadlock.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testDict.cpp (renamed from ndb/test/ndbapi/testDict.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testGrep.cpp (renamed from ndb/test/ndbapi/testGrep.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testGrepVerify.cpp (renamed from ndb/test/ndbapi/testGrepVerify.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testIndex.cpp (renamed from ndb/test/ndbapi/testIndex.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testInterpreter.cpp (renamed from ndb/test/ndbapi/testInterpreter.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testLcp.cpp (renamed from ndb/test/ndbapi/testLcp.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testMgm.cpp (renamed from ndb/test/ndbapi/testMgm.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testNdbApi.cpp (renamed from ndb/test/ndbapi/testNdbApi.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testNodeRestart.cpp (renamed from ndb/test/ndbapi/testNodeRestart.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testOIBasic.cpp (renamed from ndb/test/ndbapi/testOIBasic.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testOperations.cpp (renamed from ndb/test/ndbapi/testOperations.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testOrderedIndex.cpp (renamed from ndb/test/ndbapi/testOrderedIndex.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testPartitioning.cpp (renamed from ndb/test/ndbapi/testPartitioning.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testReadPerf.cpp (renamed from ndb/test/ndbapi/testReadPerf.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testRestartGci.cpp (renamed from ndb/test/ndbapi/testRestartGci.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testScan.cpp (renamed from ndb/test/ndbapi/testScan.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testScanInterpreter.cpp (renamed from ndb/test/ndbapi/testScanInterpreter.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testScanPerf.cpp (renamed from ndb/test/ndbapi/testScanPerf.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testSystemRestart.cpp (renamed from ndb/test/ndbapi/testSystemRestart.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testTimeout.cpp (renamed from ndb/test/ndbapi/testTimeout.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/testTransactions.cpp (renamed from ndb/test/ndbapi/testTransactions.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/test_event.cpp (renamed from ndb/test/ndbapi/test_event.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/test_event_multi_table.cpp (renamed from ndb/test/ndbapi/test_event_multi_table.cpp)0
-rw-r--r--storage/ndb/test/ndbapi/userInterface.cpp (renamed from ndb/test/ndbapi/userInterface.cpp)0
-rw-r--r--storage/ndb/test/ndbnet/test.run (renamed from ndb/test/ndbnet/test.run)0
-rw-r--r--storage/ndb/test/ndbnet/testError.run (renamed from ndb/test/ndbnet/testError.run)0
-rw-r--r--storage/ndb/test/ndbnet/testMNF.run (renamed from ndb/test/ndbnet/testMNF.run)0
-rw-r--r--storage/ndb/test/ndbnet/testNR.run (renamed from ndb/test/ndbnet/testNR.run)0
-rw-r--r--storage/ndb/test/ndbnet/testNR1.run (renamed from ndb/test/ndbnet/testNR1.run)0
-rw-r--r--storage/ndb/test/ndbnet/testNR4.run (renamed from ndb/test/ndbnet/testNR4.run)0
-rw-r--r--storage/ndb/test/ndbnet/testSRhang.run (renamed from ndb/test/ndbnet/testSRhang.run)0
-rw-r--r--storage/ndb/test/ndbnet/testTR295.run (renamed from ndb/test/ndbnet/testTR295.run)0
-rw-r--r--storage/ndb/test/newtonapi/basic_test/Makefile (renamed from ndb/test/newtonapi/basic_test/Makefile)0
-rw-r--r--storage/ndb/test/newtonapi/basic_test/basic/Makefile (renamed from ndb/test/newtonapi/basic_test/basic/Makefile)0
-rw-r--r--storage/ndb/test/newtonapi/basic_test/basic/basic.cpp (renamed from ndb/test/newtonapi/basic_test/basic/basic.cpp)0
-rw-r--r--storage/ndb/test/newtonapi/basic_test/bulk_read/Makefile (renamed from ndb/test/newtonapi/basic_test/bulk_read/Makefile)0
-rw-r--r--storage/ndb/test/newtonapi/basic_test/bulk_read/br_test.cpp (renamed from ndb/test/newtonapi/basic_test/bulk_read/br_test.cpp)0
-rw-r--r--storage/ndb/test/newtonapi/basic_test/common.cpp (renamed from ndb/test/newtonapi/basic_test/common.cpp)0
-rw-r--r--storage/ndb/test/newtonapi/basic_test/common.hpp (renamed from ndb/test/newtonapi/basic_test/common.hpp)0
-rw-r--r--storage/ndb/test/newtonapi/basic_test/ptr_binding/Makefile (renamed from ndb/test/newtonapi/basic_test/ptr_binding/Makefile)0
-rw-r--r--storage/ndb/test/newtonapi/basic_test/ptr_binding/ptr_binding_test.cpp (renamed from ndb/test/newtonapi/basic_test/ptr_binding/ptr_binding_test.cpp)0
-rw-r--r--storage/ndb/test/newtonapi/basic_test/too_basic.cpp (renamed from ndb/test/newtonapi/basic_test/too_basic.cpp)0
-rw-r--r--storage/ndb/test/newtonapi/perf_test/Makefile (renamed from ndb/test/newtonapi/perf_test/Makefile)0
-rw-r--r--storage/ndb/test/newtonapi/perf_test/perf.cpp (renamed from ndb/test/newtonapi/perf_test/perf.cpp)0
-rw-r--r--storage/ndb/test/odbc/SQL99_test/Makefile (renamed from ndb/test/odbc/SQL99_test/Makefile)0
-rw-r--r--storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp (renamed from ndb/test/odbc/SQL99_test/SQL99_test.cpp)0
-rw-r--r--storage/ndb/test/odbc/SQL99_test/SQL99_test.h (renamed from ndb/test/odbc/SQL99_test/SQL99_test.h)0
-rw-r--r--storage/ndb/test/odbc/client/Makefile (renamed from ndb/test/odbc/client/Makefile)0
-rw-r--r--storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE.cpp (renamed from ndb/test/odbc/client/NDBT_ALLOCHANDLE.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE_HDBC.cpp (renamed from ndb/test/odbc/client/NDBT_ALLOCHANDLE_HDBC.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/NDBT_SQLConnect.cpp (renamed from ndb/test/odbc/client/NDBT_SQLConnect.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/NDBT_SQLPrepare.cpp (renamed from ndb/test/odbc/client/NDBT_SQLPrepare.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLAllocEnvTest.cpp (renamed from ndb/test/odbc/client/SQLAllocEnvTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLAllocHandleTest.cpp (renamed from ndb/test/odbc/client/SQLAllocHandleTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLAllocHandleTest_bf.cpp (renamed from ndb/test/odbc/client/SQLAllocHandleTest_bf.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLBindColTest.cpp (renamed from ndb/test/odbc/client/SQLBindColTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLBindParameterTest.cpp (renamed from ndb/test/odbc/client/SQLBindParameterTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLCancelTest.cpp (renamed from ndb/test/odbc/client/SQLCancelTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLCloseCursorTest.cpp (renamed from ndb/test/odbc/client/SQLCloseCursorTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLColAttributeTest.cpp (renamed from ndb/test/odbc/client/SQLColAttributeTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLColAttributeTest1.cpp (renamed from ndb/test/odbc/client/SQLColAttributeTest1.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLColAttributeTest2.cpp (renamed from ndb/test/odbc/client/SQLColAttributeTest2.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLColAttributeTest3.cpp (renamed from ndb/test/odbc/client/SQLColAttributeTest3.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLConnectTest.cpp (renamed from ndb/test/odbc/client/SQLConnectTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLCopyDescTest.cpp (renamed from ndb/test/odbc/client/SQLCopyDescTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLDescribeColTest.cpp (renamed from ndb/test/odbc/client/SQLDescribeColTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLDisconnectTest.cpp (renamed from ndb/test/odbc/client/SQLDisconnectTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLDriverConnectTest.cpp (renamed from ndb/test/odbc/client/SQLDriverConnectTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLEndTranTest.cpp (renamed from ndb/test/odbc/client/SQLEndTranTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLErrorTest.cpp (renamed from ndb/test/odbc/client/SQLErrorTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLExecDirectTest.cpp (renamed from ndb/test/odbc/client/SQLExecDirectTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLExecuteTest.cpp (renamed from ndb/test/odbc/client/SQLExecuteTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLFetchScrollTest.cpp (renamed from ndb/test/odbc/client/SQLFetchScrollTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLFetchTest.cpp (renamed from ndb/test/odbc/client/SQLFetchTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLFreeHandleTest.cpp (renamed from ndb/test/odbc/client/SQLFreeHandleTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLFreeStmtTest.cpp (renamed from ndb/test/odbc/client/SQLFreeStmtTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLGetConnectAttrTest.cpp (renamed from ndb/test/odbc/client/SQLGetConnectAttrTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLGetCursorNameTest.cpp (renamed from ndb/test/odbc/client/SQLGetCursorNameTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLGetDataTest.cpp (renamed from ndb/test/odbc/client/SQLGetDataTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLGetDescFieldTest.cpp (renamed from ndb/test/odbc/client/SQLGetDescFieldTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLGetDescRecTest.cpp (renamed from ndb/test/odbc/client/SQLGetDescRecTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLGetDiagFieldTest.cpp (renamed from ndb/test/odbc/client/SQLGetDiagFieldTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLGetDiagRecSimpleTest.cpp (renamed from ndb/test/odbc/client/SQLGetDiagRecSimpleTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLGetDiagRecTest.cpp (renamed from ndb/test/odbc/client/SQLGetDiagRecTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLGetEnvAttrTest.cpp (renamed from ndb/test/odbc/client/SQLGetEnvAttrTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLGetFunctionsTest.cpp (renamed from ndb/test/odbc/client/SQLGetFunctionsTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLGetInfoTest.cpp (renamed from ndb/test/odbc/client/SQLGetInfoTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLGetStmtAttrTest.cpp (renamed from ndb/test/odbc/client/SQLGetStmtAttrTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLGetTypeInfoTest.cpp (renamed from ndb/test/odbc/client/SQLGetTypeInfoTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLMoreResultsTest.cpp (renamed from ndb/test/odbc/client/SQLMoreResultsTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLNumResultColsTest.cpp (renamed from ndb/test/odbc/client/SQLNumResultColsTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLParamDataTest.cpp (renamed from ndb/test/odbc/client/SQLParamDataTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLPrepareTest.cpp (renamed from ndb/test/odbc/client/SQLPrepareTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLPutDataTest.cpp (renamed from ndb/test/odbc/client/SQLPutDataTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLRowCountTest.cpp (renamed from ndb/test/odbc/client/SQLRowCountTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLSetConnectAttrTest.cpp (renamed from ndb/test/odbc/client/SQLSetConnectAttrTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLSetCursorNameTest.cpp (renamed from ndb/test/odbc/client/SQLSetCursorNameTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLSetDescFieldTest.cpp (renamed from ndb/test/odbc/client/SQLSetDescFieldTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLSetDescRecTest.cpp (renamed from ndb/test/odbc/client/SQLSetDescRecTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLSetEnvAttrTest.cpp (renamed from ndb/test/odbc/client/SQLSetEnvAttrTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLSetStmtAttrTest.cpp (renamed from ndb/test/odbc/client/SQLSetStmtAttrTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLTablesTest.cpp (renamed from ndb/test/odbc/client/SQLTablesTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/SQLTransactTest.cpp (renamed from ndb/test/odbc/client/SQLTransactTest.cpp)0
-rw-r--r--storage/ndb/test/odbc/client/common.hpp (renamed from ndb/test/odbc/client/common.hpp)0
-rw-r--r--storage/ndb/test/odbc/client/main.cpp (renamed from ndb/test/odbc/client/main.cpp)0
-rw-r--r--storage/ndb/test/odbc/dm-iodbc/Makefile (renamed from ndb/test/odbc/dm-iodbc/Makefile)0
-rw-r--r--storage/ndb/test/odbc/dm-unixodbc/Makefile (renamed from ndb/test/odbc/dm-unixodbc/Makefile)0
-rw-r--r--storage/ndb/test/odbc/driver/Makefile (renamed from ndb/test/odbc/driver/Makefile)0
-rw-r--r--storage/ndb/test/odbc/driver/testOdbcDriver.cpp (renamed from ndb/test/odbc/driver/testOdbcDriver.cpp)0
-rw-r--r--storage/ndb/test/odbc/test_compiler/Makefile (renamed from ndb/test/odbc/test_compiler/Makefile)0
-rw-r--r--storage/ndb/test/odbc/test_compiler/test_compiler.cpp (renamed from ndb/test/odbc/test_compiler/test_compiler.cpp)0
-rw-r--r--storage/ndb/test/run-test/16node-tests.txt (renamed from ndb/test/run-test/16node-tests.txt)0
-rw-r--r--storage/ndb/test/run-test/Makefile.am34
-rw-r--r--storage/ndb/test/run-test/README43
-rw-r--r--storage/ndb/test/run-test/README.ATRT (renamed from ndb/test/run-test/README.ATRT)0
-rwxr-xr-xstorage/ndb/test/run-test/atrt-analyze-result.sh (renamed from ndb/test/run-test/atrt-analyze-result.sh)0
-rwxr-xr-xstorage/ndb/test/run-test/atrt-clear-result.sh (renamed from ndb/test/run-test/atrt-clear-result.sh)0
-rw-r--r--storage/ndb/test/run-test/atrt-example.tgz (renamed from ndb/test/run-test/atrt-example.tgz)bin2196 -> 2196 bytes
-rwxr-xr-xstorage/ndb/test/run-test/atrt-gather-result.sh (renamed from ndb/test/run-test/atrt-gather-result.sh)0
-rwxr-xr-xstorage/ndb/test/run-test/atrt-mysql-test-run (renamed from ndb/test/run-test/atrt-mysql-test-run)0
-rwxr-xr-xstorage/ndb/test/run-test/atrt-setup.sh (renamed from ndb/test/run-test/atrt-setup.sh)0
-rwxr-xr-xstorage/ndb/test/run-test/atrt-testBackup (renamed from ndb/test/run-test/atrt-testBackup)0
-rw-r--r--storage/ndb/test/run-test/basic.txt (renamed from ndb/test/run-test/basic.txt)0
-rw-r--r--storage/ndb/test/run-test/conf-daily-basic-dl145a.txt (renamed from ndb/test/run-test/conf-daily-basic-dl145a.txt)0
-rw-r--r--storage/ndb/test/run-test/conf-daily-basic-ndbmaster.txt (renamed from ndb/test/run-test/conf-daily-basic-ndbmaster.txt)0
-rw-r--r--storage/ndb/test/run-test/conf-daily-basic-shark.txt (renamed from ndb/test/run-test/conf-daily-basic-shark.txt)0
-rw-r--r--storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt (renamed from ndb/test/run-test/conf-daily-devel-ndbmaster.txt)0
-rw-r--r--storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt (renamed from ndb/test/run-test/conf-daily-sql-ndbmaster.txt)0
-rw-r--r--storage/ndb/test/run-test/daily-basic-tests.txt (renamed from ndb/test/run-test/daily-basic-tests.txt)0
-rw-r--r--storage/ndb/test/run-test/daily-devel-tests.txt (renamed from ndb/test/run-test/daily-devel-tests.txt)0
-rw-r--r--storage/ndb/test/run-test/example.conf (renamed from ndb/test/run-test/example.conf)0
-rw-r--r--storage/ndb/test/run-test/main.cpp (renamed from ndb/test/run-test/main.cpp)0
-rwxr-xr-xstorage/ndb/test/run-test/make-config.sh (renamed from ndb/test/run-test/make-config.sh)0
-rwxr-xr-xstorage/ndb/test/run-test/make-html-reports.sh (renamed from ndb/test/run-test/make-html-reports.sh)0
-rwxr-xr-xstorage/ndb/test/run-test/make-index.sh (renamed from ndb/test/run-test/make-index.sh)0
-rwxr-xr-xstorage/ndb/test/run-test/ndb-autotest.sh (renamed from ndb/test/run-test/ndb-autotest.sh)0
-rw-r--r--storage/ndb/test/run-test/run-test.hpp (renamed from ndb/test/run-test/run-test.hpp)0
-rw-r--r--storage/ndb/test/src/CpcClient.cpp (renamed from ndb/test/src/CpcClient.cpp)0
-rw-r--r--storage/ndb/test/src/HugoAsynchTransactions.cpp (renamed from ndb/test/src/HugoAsynchTransactions.cpp)0
-rw-r--r--storage/ndb/test/src/HugoCalculator.cpp (renamed from ndb/test/src/HugoCalculator.cpp)0
-rw-r--r--storage/ndb/test/src/HugoOperations.cpp (renamed from ndb/test/src/HugoOperations.cpp)0
-rw-r--r--storage/ndb/test/src/HugoTransactions.cpp (renamed from ndb/test/src/HugoTransactions.cpp)0
-rw-r--r--storage/ndb/test/src/Makefile.am35
-rw-r--r--storage/ndb/test/src/NDBT_Error.cpp (renamed from ndb/test/src/NDBT_Error.cpp)0
-rw-r--r--storage/ndb/test/src/NDBT_Output.cpp (renamed from ndb/test/src/NDBT_Output.cpp)0
-rw-r--r--storage/ndb/test/src/NDBT_ResultRow.cpp (renamed from ndb/test/src/NDBT_ResultRow.cpp)0
-rw-r--r--storage/ndb/test/src/NDBT_ReturnCodes.cpp (renamed from ndb/test/src/NDBT_ReturnCodes.cpp)0
-rw-r--r--storage/ndb/test/src/NDBT_Table.cpp (renamed from ndb/test/src/NDBT_Table.cpp)0
-rw-r--r--storage/ndb/test/src/NDBT_Tables.cpp (renamed from ndb/test/src/NDBT_Tables.cpp)0
-rw-r--r--storage/ndb/test/src/NDBT_Test.cpp (renamed from ndb/test/src/NDBT_Test.cpp)0
-rw-r--r--storage/ndb/test/src/NdbBackup.cpp (renamed from ndb/test/src/NdbBackup.cpp)0
-rw-r--r--storage/ndb/test/src/NdbConfig.cpp (renamed from ndb/test/src/NdbConfig.cpp)0
-rw-r--r--storage/ndb/test/src/NdbGrep.cpp (renamed from ndb/test/src/NdbGrep.cpp)0
-rw-r--r--storage/ndb/test/src/NdbRestarter.cpp (renamed from ndb/test/src/NdbRestarter.cpp)0
-rw-r--r--storage/ndb/test/src/NdbRestarts.cpp (renamed from ndb/test/src/NdbRestarts.cpp)0
-rw-r--r--storage/ndb/test/src/NdbSchemaCon.cpp (renamed from ndb/test/src/NdbSchemaCon.cpp)0
-rw-r--r--storage/ndb/test/src/NdbSchemaOp.cpp (renamed from ndb/test/src/NdbSchemaOp.cpp)0
-rw-r--r--storage/ndb/test/src/UtilTransactions.cpp (renamed from ndb/test/src/UtilTransactions.cpp)0
-rw-r--r--storage/ndb/test/src/getarg.c (renamed from ndb/test/src/getarg.c)0
-rw-r--r--storage/ndb/test/tools/Makefile.am30
-rw-r--r--storage/ndb/test/tools/copy_tab.cpp (renamed from ndb/test/tools/copy_tab.cpp)0
-rw-r--r--storage/ndb/test/tools/cpcc.cpp (renamed from ndb/test/tools/cpcc.cpp)0
-rw-r--r--storage/ndb/test/tools/create_index.cpp (renamed from ndb/test/tools/create_index.cpp)0
-rw-r--r--storage/ndb/test/tools/hugoCalculator.cpp (renamed from ndb/test/tools/hugoCalculator.cpp)0
-rw-r--r--storage/ndb/test/tools/hugoFill.cpp (renamed from ndb/test/tools/hugoFill.cpp)0
-rw-r--r--storage/ndb/test/tools/hugoLoad.cpp (renamed from ndb/test/tools/hugoLoad.cpp)0
-rw-r--r--storage/ndb/test/tools/hugoLockRecords.cpp (renamed from ndb/test/tools/hugoLockRecords.cpp)0
-rw-r--r--storage/ndb/test/tools/hugoPkDelete.cpp (renamed from ndb/test/tools/hugoPkDelete.cpp)0
-rw-r--r--storage/ndb/test/tools/hugoPkRead.cpp (renamed from ndb/test/tools/hugoPkRead.cpp)0
-rw-r--r--storage/ndb/test/tools/hugoPkReadRecord.cpp (renamed from ndb/test/tools/hugoPkReadRecord.cpp)0
-rw-r--r--storage/ndb/test/tools/hugoPkUpdate.cpp (renamed from ndb/test/tools/hugoPkUpdate.cpp)0
-rw-r--r--storage/ndb/test/tools/hugoScanRead.cpp (renamed from ndb/test/tools/hugoScanRead.cpp)0
-rw-r--r--storage/ndb/test/tools/hugoScanUpdate.cpp (renamed from ndb/test/tools/hugoScanUpdate.cpp)0
-rw-r--r--storage/ndb/test/tools/old_dirs/hugoCalculator/Makefile (renamed from ndb/test/tools/old_dirs/hugoCalculator/Makefile)0
-rw-r--r--storage/ndb/test/tools/old_dirs/hugoFill/Makefile (renamed from ndb/test/tools/old_dirs/hugoFill/Makefile)0
-rw-r--r--storage/ndb/test/tools/old_dirs/hugoLoad/Makefile (renamed from ndb/test/tools/old_dirs/hugoLoad/Makefile)0
-rw-r--r--storage/ndb/test/tools/old_dirs/hugoLockRecords/Makefile (renamed from ndb/test/tools/old_dirs/hugoLockRecords/Makefile)0
-rw-r--r--storage/ndb/test/tools/old_dirs/hugoPkDelete/Makefile (renamed from ndb/test/tools/old_dirs/hugoPkDelete/Makefile)0
-rw-r--r--storage/ndb/test/tools/old_dirs/hugoPkRead/Makefile (renamed from ndb/test/tools/old_dirs/hugoPkRead/Makefile)0
-rw-r--r--storage/ndb/test/tools/old_dirs/hugoPkReadRecord/Makefile (renamed from ndb/test/tools/old_dirs/hugoPkReadRecord/Makefile)0
-rw-r--r--storage/ndb/test/tools/old_dirs/hugoPkUpdate/Makefile (renamed from ndb/test/tools/old_dirs/hugoPkUpdate/Makefile)0
-rw-r--r--storage/ndb/test/tools/old_dirs/hugoScanRead/Makefile (renamed from ndb/test/tools/old_dirs/hugoScanRead/Makefile)0
-rw-r--r--storage/ndb/test/tools/old_dirs/hugoScanUpdate/Makefile (renamed from ndb/test/tools/old_dirs/hugoScanUpdate/Makefile)0
-rw-r--r--storage/ndb/test/tools/old_dirs/restart/Makefile (renamed from ndb/test/tools/old_dirs/restart/Makefile)0
-rw-r--r--storage/ndb/test/tools/old_dirs/transproxy/Makefile (renamed from ndb/test/tools/old_dirs/transproxy/Makefile)0
-rw-r--r--storage/ndb/test/tools/old_dirs/verify_index/Makefile (renamed from ndb/test/tools/old_dirs/verify_index/Makefile)0
-rw-r--r--storage/ndb/test/tools/old_dirs/waiter/waiter.cpp (renamed from ndb/test/tools/old_dirs/waiter/waiter.cpp)0
-rw-r--r--storage/ndb/test/tools/restart.cpp (renamed from ndb/test/tools/restart.cpp)0
-rw-r--r--storage/ndb/test/tools/transproxy.cpp (renamed from ndb/test/tools/transproxy.cpp)0
-rw-r--r--storage/ndb/test/tools/verify_index.cpp (renamed from ndb/test/tools/verify_index.cpp)0
-rw-r--r--storage/ndb/tools/Makefile.am157
-rwxr-xr-xstorage/ndb/tools/clean-links.sh (renamed from ndb/tools/clean-links.sh)0
-rw-r--r--storage/ndb/tools/delete_all.cpp (renamed from ndb/tools/delete_all.cpp)0
-rw-r--r--storage/ndb/tools/desc.cpp (renamed from ndb/tools/desc.cpp)0
-rw-r--r--storage/ndb/tools/drop_index.cpp (renamed from ndb/tools/drop_index.cpp)0
-rw-r--r--storage/ndb/tools/drop_tab.cpp (renamed from ndb/tools/drop_tab.cpp)0
-rw-r--r--storage/ndb/tools/listTables.cpp (renamed from ndb/tools/listTables.cpp)0
-rw-r--r--storage/ndb/tools/make-errors.pl (renamed from ndb/tools/make-errors.pl)0
-rwxr-xr-xstorage/ndb/tools/make-links.sh (renamed from ndb/tools/make-links.sh)0
-rw-r--r--storage/ndb/tools/ndb_test_platform.cpp (renamed from ndb/tools/ndb_test_platform.cpp)0
-rw-r--r--storage/ndb/tools/ndbsql.cpp (renamed from ndb/tools/ndbsql.cpp)0
-rw-r--r--storage/ndb/tools/old_dirs/copy_tab/Makefile (renamed from ndb/tools/old_dirs/copy_tab/Makefile)0
-rw-r--r--storage/ndb/tools/old_dirs/cpcc/Makefile (renamed from ndb/tools/old_dirs/cpcc/Makefile)0
-rw-r--r--storage/ndb/tools/old_dirs/create_index/Makefile (renamed from ndb/tools/old_dirs/create_index/Makefile)0
-rw-r--r--storage/ndb/tools/old_dirs/delete_all/Makefile (renamed from ndb/tools/old_dirs/delete_all/Makefile)0
-rw-r--r--storage/ndb/tools/old_dirs/desc/Makefile (renamed from ndb/tools/old_dirs/desc/Makefile)0
-rw-r--r--storage/ndb/tools/old_dirs/drop_index/Makefile (renamed from ndb/tools/old_dirs/drop_index/Makefile)0
-rw-r--r--storage/ndb/tools/old_dirs/drop_tab/Makefile (renamed from ndb/tools/old_dirs/drop_tab/Makefile)0
-rw-r--r--storage/ndb/tools/old_dirs/list_tables/Makefile (renamed from ndb/tools/old_dirs/list_tables/Makefile)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/Makefile.PL (renamed from ndb/tools/old_dirs/ndbnet/Makefile.PL)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Base.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Base.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Client.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Client.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Command.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Command.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Config.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Config.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Database.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Database.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Env.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Env.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Node.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Node.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeApi.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeApi.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeDb.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeDb.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeMgmt.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeMgmt.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Server.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Server.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerINET.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerINET.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerUNIX.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerUNIX.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Run.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Base.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Base.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Database.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Database.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Env.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Env.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Node.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Node.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Util.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Base.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Base.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Dir.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Dir.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Event.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Event.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/File.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Util/File.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/IO.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Util/IO.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Lock.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Lock.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Log.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Log.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Socket.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Socket.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketINET.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketINET.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketUNIX.pm (renamed from ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketUNIX.pm)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/ndbnet.pl (renamed from ndb/tools/old_dirs/ndbnet/ndbnet.pl)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/ndbnetd.pl (renamed from ndb/tools/old_dirs/ndbnet/ndbnetd.pl)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbnet/ndbrun (renamed from ndb/tools/old_dirs/ndbnet/ndbrun)0
-rw-r--r--storage/ndb/tools/old_dirs/ndbsql/Makefile (renamed from ndb/tools/old_dirs/ndbsql/Makefile)0
-rw-r--r--storage/ndb/tools/old_dirs/select_all/Makefile (renamed from ndb/tools/old_dirs/select_all/Makefile)0
-rw-r--r--storage/ndb/tools/old_dirs/select_count/Makefile (renamed from ndb/tools/old_dirs/select_count/Makefile)0
-rw-r--r--storage/ndb/tools/old_dirs/src/counterviewer/CounterViewer.java (renamed from ndb/tools/old_dirs/src/counterviewer/CounterViewer.java)0
-rw-r--r--storage/ndb/tools/restore/Restore.cpp941
-rw-r--r--storage/ndb/tools/restore/Restore.hpp (renamed from ndb/tools/restore/Restore.hpp)0
-rw-r--r--storage/ndb/tools/restore/consumer.cpp (renamed from ndb/tools/restore/consumer.cpp)0
-rw-r--r--storage/ndb/tools/restore/consumer.hpp (renamed from ndb/tools/restore/consumer.hpp)0
-rw-r--r--storage/ndb/tools/restore/consumer_printer.cpp (renamed from ndb/tools/restore/consumer_printer.cpp)0
-rw-r--r--storage/ndb/tools/restore/consumer_printer.hpp (renamed from ndb/tools/restore/consumer_printer.hpp)0
-rw-r--r--storage/ndb/tools/restore/consumer_restore.cpp (renamed from ndb/tools/restore/consumer_restore.cpp)0
-rw-r--r--storage/ndb/tools/restore/consumer_restore.hpp (renamed from ndb/tools/restore/consumer_restore.hpp)0
-rw-r--r--storage/ndb/tools/restore/consumer_restorem.cpp (renamed from ndb/tools/restore/consumer_restorem.cpp)0
-rw-r--r--storage/ndb/tools/restore/restore_main.cpp (renamed from ndb/tools/restore/restore_main.cpp)0
-rwxr-xr-xstorage/ndb/tools/rgrep (renamed from ndb/tools/rgrep)0
-rw-r--r--storage/ndb/tools/select_all.cpp (renamed from ndb/tools/select_all.cpp)0
-rw-r--r--storage/ndb/tools/select_count.cpp (renamed from ndb/tools/select_count.cpp)0
-rw-r--r--storage/ndb/tools/waiter.cpp (renamed from ndb/tools/waiter.cpp)0
-rw-r--r--support-files/mysql.spec.sh5
3473 files changed, 116899 insertions, 101525 deletions
diff --git a/BUILD/FINISH.sh b/BUILD/FINISH.sh
index 8cd78bf4165..6ee8b27e472 100644
--- a/BUILD/FINISH.sh
+++ b/BUILD/FINISH.sh
@@ -5,8 +5,7 @@ configure="./configure $base_configs $extra_configs"
commands="\
$make -k distclean || true
-/bin/rm -rf */.deps/*.P config.cache innobase/config.cache bdb/build_unix/config.cache bdb/dist/autom4te.cache autom4te.cache innobase/autom4te.cache;
-
+/bin/rm -rf */.deps/*.P config.cache storage/innobase/config.cache storage/bdb/build_unix/config.cache bdb/dist/autom4te.cache autom4te.cache innobase/autom4te.cache;
path=`dirname $0`
. \"$path/autorun.sh\""
diff --git a/BUILD/autorun.sh b/BUILD/autorun.sh
index 47a80a709a8..82fd1722bd0 100755
--- a/BUILD/autorun.sh
+++ b/BUILD/autorun.sh
@@ -17,5 +17,5 @@ fi
# and --force to overwrite them if they already exist
automake --add-missing --force || die "Can't execute automake"
autoconf || die "Can't execute autoconf"
-(cd bdb/dist && sh s_all)
-(cd innobase && aclocal && autoheader && aclocal && automake && autoconf)
+(cd storage/bdb/dist && sh s_all)
+(cd storage/innobase && aclocal && autoheader && aclocal && automake && autoconf)
diff --git a/BUILD/compile-alpha-cxx b/BUILD/compile-alpha-cxx
index c49846fd964..610d358ef82 100755
--- a/BUILD/compile-alpha-cxx
+++ b/BUILD/compile-alpha-cxx
@@ -1,7 +1,7 @@
/bin/rm -f */.deps/*.P */*.o
make -k clean
/bin/rm -f */.deps/*.P */*.o
-/bin/rm -f */.deps/*.P config.cache innobase/config.cache bdb/build_unix/config.cache mysql-*.tar.gz
+/bin/rm -f */.deps/*.P config.cache storage/innobase/config.cache storage/bdb/build_unix/config.cache mysql-*.tar.gz
path=`dirname $0`
. "$path/autorun.sh"
diff --git a/BUILD/compile-alpha-debug b/BUILD/compile-alpha-debug
index 113c2151461..8beffa65cb5 100755
--- a/BUILD/compile-alpha-debug
+++ b/BUILD/compile-alpha-debug
@@ -1,7 +1,7 @@
/bin/rm -f */.deps/*.P */*.o
make -k clean
/bin/rm -f */.deps/*.P */*.o
-/bin/rm -f */.deps/*.P config.cache innobase/config.cache bdb/build_unix/config.cache mysql-*.tar.gz
+/bin/rm -f */.deps/*.P config.cache storage/innobase/config.cache storage/bdb/build_unix/config.cache mysql-*.tar.gz
path=`dirname $0`
. "$path/autorun.sh"
diff --git a/BUILD/compile-dist b/BUILD/compile-dist
index 39095f59ffa..a8d180eace1 100755
--- a/BUILD/compile-dist
+++ b/BUILD/compile-dist
@@ -12,8 +12,8 @@ autoheader
libtoolize --automake --force --copy
automake --force --add-missing --copy
autoconf
-(cd bdb/dist && sh s_all)
-(cd innobase && aclocal && autoheader && aclocal && automake && autoconf)
+(cd storage/bdb/dist && sh s_all)
+(cd storage/innobase && aclocal && autoheader && aclocal && automake && autoconf)
# Default to gcc for CC and CXX
if test -z "$CXX" ; then
diff --git a/BUILD/compile-ia64-debug-max b/BUILD/compile-ia64-debug-max
index 5082844f088..d5f931c8d51 100755
--- a/BUILD/compile-ia64-debug-max
+++ b/BUILD/compile-ia64-debug-max
@@ -1,5 +1,5 @@
gmake -k clean || true
-/bin/rm -f */.deps/*.P config.cache innobase/config.cache bdb/build_unix/config.cache
+/bin/rm -f */.deps/*.P config.cache storage/innobase/config.cache storage/bdb/build_unix/config.cache
path=`dirname $0`
. "$path/autorun.sh"
diff --git a/BUILD/compile-pentium-pgcc b/BUILD/compile-pentium-pgcc
index 639f108bb2b..411241451cf 100755
--- a/BUILD/compile-pentium-pgcc
+++ b/BUILD/compile-pentium-pgcc
@@ -2,7 +2,6 @@ AM_MAKEFLAGS="-j 2"
gmake -k clean || true
/bin/rm -f */.deps/*.P config.cache
-
path=`dirname $0`
. "$path/autorun.sh"
diff --git a/BitKeeper/etc/gone b/BitKeeper/etc/gone
index 7c9741f7e79..5964232680e 100644
--- a/BitKeeper/etc/gone
+++ b/BitKeeper/etc/gone
@@ -1184,9 +1184,11 @@ mwagner@evoq.home.mwagner.org|mysql-test/xml/xsl/README|20001013051514|26509|cd4
mwagner@evoq.home.mwagner.org|mysql-test/xml/xsl/mysqltest.xsl|20001013051514|27425|1b8f6ec4f1b5f634
mwagner@work.mysql.com|mysql-test/r/3.23/sel000001.result|20001010091454|28284|383913ae4505ec86
mwagner@work.mysql.com|mysql-test/r/3.23/sel000002.result|20001010091454|29230|d1787e6fd5dbc1cc
+mysql-test/t/reserved_win_names-master.opt
ndb/src/client/Makefile
nick@nick.leippe.com|mysql-test/r/rpl_empty_master_crash.result|20020531235552|47718|615f521be2132141
nick@nick.leippe.com|mysql-test/t/rpl_empty_master_crash.test|20020531235552|52328|99464e737639ccc6
+reggie@mdk10.(none)|BitKeeper/deleted/.del-reserved_win_names-master.opt~e56da049a7ce9a5b|20050523193219|41081
reggie@mdk10.(none)|mysql-test/t/reserved_win_names-master.opt|20050520210356|14878|e56da049a7ce9a5b
sasha@mysql.sashanet.com|BitKeeper/etc/logging_ok|20000801000905|12967|5b7d847a2158554
sasha@mysql.sashanet.com|build-tags|20011125054855|05181|7afb7e785b80f97
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index e9cc35b4c7e..416ab8dbbde 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -18,6 +18,7 @@ antony@ltantony.dsl-verizon.net
antony@ltantony.mysql.com
antony@ltantony.rdg.cyberkinetica.com
antony@ltantony.rdg.cyberkinetica.homeunix.net
+antony@ltantony.xiphis.org
arjen@bitbike.com
arjen@co3064164-a.bitbike.com
arjen@fred.bitbike.com
@@ -139,6 +140,7 @@ magnus@msdesk.mysql.com
magnus@neptunus.(none)
magnus@shellback.(none)
marko@hundin.mysql.fi
+marty@flipper.bredbandsbolaget.se
marty@linux.site
marty@shark.
mats@mysql.com
@@ -208,6 +210,7 @@ papa@gbichot.local
patg@krsna.
patg@krsna.patg.net
patg@patrick-galbraiths-computer.local
+patg@patrick.local
patg@pc248.lfp.kcls.org
patg@radha.local
paul@central.snake.net
diff --git a/BitKeeper/triggers/post-commit b/BitKeeper/triggers/post-commit
index fe263b79325..e3f86fa995a 100755
--- a/BitKeeper/triggers/post-commit
+++ b/BitKeeper/triggers/post-commit
@@ -5,7 +5,7 @@ FROM=$USER@mysql.com
INTERNALS=internals@lists.mysql.com
DOCS=docs-commit@mysql.com
LIMIT=10000
-VERSION="5.0"
+VERSION="5.1"
if [ "$REAL_EMAIL" = "" ]
then
diff --git a/Makefile.am b/Makefile.am
index 6eb7421880e..830c68df3f5 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -30,9 +30,11 @@ SUBDIRS = . include @docs_dirs@ @zlib_dir@ @yassl_dir@ \
DIST_SUBDIRS = . include @docs_dirs@ zlib \
@readline_topdir@ sql-common \
@thread_dirs@ pstack \
- @sql_union_dirs@ scripts @man_dirs@ tests SSL\
- BUILD netware os2 @libmysqld_dirs@ \
- @bench_dirs@ support-files @tools_dirs@
+ strings mysys dbug extra regex storage \
+ vio sql libmysql_r libmysql client scripts \
+ @man_dirs@ tests SSL\
+ BUILD netware os2 @libmysqld_dirs@\
+ @bench_dirs@ support-files server-tools tools
# Relink after clean
linked_sources = linked_client_sources linked_server_sources \
diff --git a/config/ac-macros/ha_berkeley.m4 b/config/ac-macros/ha_berkeley.m4
index 732c7730816..5463f10ca98 100644
--- a/config/ac-macros/ha_berkeley.m4
+++ b/config/ac-macros/ha_berkeley.m4
@@ -187,7 +187,7 @@ AC_DEFUN([MYSQL_SEARCH_FOR_BDB], [
dnl echo ["MYSQL_SEARCH_FOR_BDB"]
bdb_dir_ok="no BerkeleyDB found"
- for test_dir in $srcdir/bdb $srcdir/db-*.*.* /usr/local/BerkeleyDB*; do
+ for test_dir in $srcdir/storage/bdb $srcdir/db-*.*.* /usr/local/BerkeleyDB*; do
dnl echo "-----------> Looking at ($test_dir; `cd $test_dir && pwd`)"
MYSQL_CHECK_BDB_DIR([$test_dir])
if test X"$bdb_dir_ok" = Xsource || test X"$bdb_dir_ok" = Xinstalled; then
diff --git a/config/ac-macros/ha_innodb.m4 b/config/ac-macros/ha_innodb.m4
index 17f0fab3e90..287b77c8851 100644
--- a/config/ac-macros/ha_innodb.m4
+++ b/config/ac-macros/ha_innodb.m4
@@ -20,45 +20,45 @@ AC_DEFUN([MYSQL_CHECK_INNODB], [
AC_MSG_RESULT([Using Innodb])
AC_DEFINE([HAVE_INNOBASE_DB], [1], [Using Innobase DB])
have_innodb="yes"
- innodb_includes="-I../innobase/include"
+ innodb_includes="-I\$(top_builddir)/innobase/include"
innodb_system_libs=""
dnl Some libs are listed several times, in order for gcc to sort out
dnl circular references.
innodb_libs="\
- \$(top_builddir)/innobase/usr/libusr.a\
- \$(top_builddir)/innobase/srv/libsrv.a\
- \$(top_builddir)/innobase/dict/libdict.a\
- \$(top_builddir)/innobase/que/libque.a\
- \$(top_builddir)/innobase/srv/libsrv.a\
- \$(top_builddir)/innobase/ibuf/libibuf.a\
- \$(top_builddir)/innobase/row/librow.a\
- \$(top_builddir)/innobase/pars/libpars.a\
- \$(top_builddir)/innobase/btr/libbtr.a\
- \$(top_builddir)/innobase/trx/libtrx.a\
- \$(top_builddir)/innobase/read/libread.a\
- \$(top_builddir)/innobase/usr/libusr.a\
- \$(top_builddir)/innobase/buf/libbuf.a\
- \$(top_builddir)/innobase/ibuf/libibuf.a\
- \$(top_builddir)/innobase/eval/libeval.a\
- \$(top_builddir)/innobase/log/liblog.a\
- \$(top_builddir)/innobase/fsp/libfsp.a\
- \$(top_builddir)/innobase/fut/libfut.a\
- \$(top_builddir)/innobase/fil/libfil.a\
- \$(top_builddir)/innobase/lock/liblock.a\
- \$(top_builddir)/innobase/mtr/libmtr.a\
- \$(top_builddir)/innobase/page/libpage.a\
- \$(top_builddir)/innobase/rem/librem.a\
- \$(top_builddir)/innobase/thr/libthr.a\
- \$(top_builddir)/innobase/sync/libsync.a\
- \$(top_builddir)/innobase/data/libdata.a\
- \$(top_builddir)/innobase/mach/libmach.a\
- \$(top_builddir)/innobase/ha/libha.a\
- \$(top_builddir)/innobase/dyn/libdyn.a\
- \$(top_builddir)/innobase/mem/libmem.a\
- \$(top_builddir)/innobase/sync/libsync.a\
- \$(top_builddir)/innobase/ut/libut.a\
- \$(top_builddir)/innobase/os/libos.a\
- \$(top_builddir)/innobase/ut/libut.a"
+ \$(top_builddir)/storage/innobase/usr/libusr.a\
+ \$(top_builddir)/storage/innobase/srv/libsrv.a\
+ \$(top_builddir)/storage/innobase/dict/libdict.a\
+ \$(top_builddir)/storage/innobase/que/libque.a\
+ \$(top_builddir)/storage/innobase/srv/libsrv.a\
+ \$(top_builddir)/storage/innobase/ibuf/libibuf.a\
+ \$(top_builddir)/storage/innobase/row/librow.a\
+ \$(top_builddir)/storage/innobase/pars/libpars.a\
+ \$(top_builddir)/storage/innobase/btr/libbtr.a\
+ \$(top_builddir)/storage/innobase/trx/libtrx.a\
+ \$(top_builddir)/storage/innobase/read/libread.a\
+ \$(top_builddir)/storage/innobase/usr/libusr.a\
+ \$(top_builddir)/storage/innobase/buf/libbuf.a\
+ \$(top_builddir)/storage/innobase/ibuf/libibuf.a\
+ \$(top_builddir)/storage/innobase/eval/libeval.a\
+ \$(top_builddir)/storage/innobase/log/liblog.a\
+ \$(top_builddir)/storage/innobase/fsp/libfsp.a\
+ \$(top_builddir)/storage/innobase/fut/libfut.a\
+ \$(top_builddir)/storage/innobase/fil/libfil.a\
+ \$(top_builddir)/storage/innobase/lock/liblock.a\
+ \$(top_builddir)/storage/innobase/mtr/libmtr.a\
+ \$(top_builddir)/storage/innobase/page/libpage.a\
+ \$(top_builddir)/storage/innobase/rem/librem.a\
+ \$(top_builddir)/storage/innobase/thr/libthr.a\
+ \$(top_builddir)/storage/innobase/sync/libsync.a\
+ \$(top_builddir)/storage/innobase/data/libdata.a\
+ \$(top_builddir)/storage/innobase/mach/libmach.a\
+ \$(top_builddir)/storage/innobase/ha/libha.a\
+ \$(top_builddir)/storage/innobase/dyn/libdyn.a\
+ \$(top_builddir)/storage/innobase/mem/libmem.a\
+ \$(top_builddir)/storage/innobase/sync/libsync.a\
+ \$(top_builddir)/storage/innobase/ut/libut.a\
+ \$(top_builddir)/storage/innobase/os/libos.a\
+ \$(top_builddir)/storage/innobase/ut/libut.a"
AC_CHECK_LIB(rt, aio_read, [innodb_system_libs="-lrt"])
;;
diff --git a/config/ac-macros/ha_ndbcluster.m4 b/config/ac-macros/ha_ndbcluster.m4
index dc5e0e73558..28b41b522fc 100644
--- a/config/ac-macros/ha_ndbcluster.m4
+++ b/config/ac-macros/ha_ndbcluster.m4
@@ -128,13 +128,14 @@ AC_DEFUN([MYSQL_CHECK_NDBCLUSTER], [
ndb_mgmclient_libs=
case "$ndbcluster" in
yes )
- AC_MSG_RESULT([Using NDB Cluster])
+ AC_MSG_RESULT([Using NDB Cluster and Partitioning])
AC_DEFINE([HAVE_NDBCLUSTER_DB], [1], [Using Ndb Cluster DB])
+ AC_DEFINE([HAVE_PARTITION_DB], [1], [Builds Partition DB])
have_ndbcluster="yes"
- ndbcluster_includes="-I../ndb/include -I../ndb/include/ndbapi"
- ndbcluster_libs="\$(top_builddir)/ndb/src/.libs/libndbclient.a"
+ ndbcluster_includes="-I\$(top_builddir)/storage/ndb/include -I\$(top_builddir)/storage/ndb/include/ndbapi"
+ ndbcluster_libs="\$(top_builddir)/storage/ndb/src/.libs/libndbclient.a"
ndbcluster_system_libs=""
- ndb_mgmclient_libs="\$(top_builddir)/ndb/src/mgmclient/libndbmgmclient.la"
+ ndb_mgmclient_libs="\$(top_builddir)/storage/ndb/src/mgmclient/libndbmgmclient.la"
MYSQL_CHECK_NDB_OPTIONS
;;
* )
diff --git a/config/ac-macros/ha_partition.m4 b/config/ac-macros/ha_partition.m4
new file mode 100644
index 00000000000..1cfb5135771
--- /dev/null
+++ b/config/ac-macros/ha_partition.m4
@@ -0,0 +1,30 @@
+dnl ---------------------------------------------------------------------------
+dnl Macro: MYSQL_CHECK_PARTITIONDB
+dnl Sets HAVE_PARTITION_DB if --with-partition is used
+dnl ---------------------------------------------------------------------------
+AC_DEFUN([MYSQL_CHECK_PARTITIONDB], [
+ AC_ARG_WITH([partition],
+ [
+ --with-partition
+ Enable the Partition Storage Engine],
+ [partitiondb="$withval"],
+ [partitiondb=no])
+ AC_MSG_CHECKING([for partition])
+
+ case "$partitiondb" in
+ yes )
+ AC_DEFINE([HAVE_PARTITION_DB], [1], [Builds Partition DB])
+ AC_MSG_RESULT([yes])
+ [partitiondb=yes]
+ ;;
+ * )
+ AC_MSG_RESULT([no])
+ [partitiondb=no]
+ ;;
+ esac
+
+])
+dnl ---------------------------------------------------------------------------
+dnl END OF MYSQL_CHECK_PARTITION SECTION
+dnl ---------------------------------------------------------------------------
+
diff --git a/configure.in b/configure.in
index 12293ecf5ec..684e6331c27 100644
--- a/configure.in
+++ b/configure.in
@@ -5,8 +5,9 @@ AC_PREREQ(2.57)dnl Minimum Autoconf version required.
AC_INIT(sql/mysqld.cc)
AC_CANONICAL_SYSTEM
+# The Docs Makefile.am parses this line!
# Don't forget to also update the NDB lines below.
-AM_INIT_AUTOMAKE(mysql, 5.0.10-beta)
+AM_INIT_AUTOMAKE(mysql, 5.1.0-alpha)
AM_CONFIG_HEADER(config.h)
PROTOCOL_VERSION=10
@@ -16,9 +17,9 @@ SHARED_LIB_VERSION=15:0:0
# ndb version
NDB_VERSION_MAJOR=5
-NDB_VERSION_MINOR=0
-NDB_VERSION_BUILD=10
-NDB_VERSION_STATUS="beta"
+NDB_VERSION_MINOR=1
+NDB_VERSION_BUILD=0
+NDB_VERSION_STATUS="alpha"
# Set all version vars based on $VERSION. How do we do this more elegant ?
# Remember that regexps needs to quote [ and ] since this is run through m4
@@ -2436,6 +2437,7 @@ MYSQL_CHECK_CSVDB
MYSQL_CHECK_BLACKHOLEDB
MYSQL_CHECK_NDBCLUSTER
MYSQL_CHECK_FEDERATED
+MYSQL_CHECK_PARTITIONDB
# If we have threads generate some library functions and test programs
sql_server_dirs=
@@ -2496,7 +2498,7 @@ then
if test X"$have_berkeley_db" != Xyes; then
# we must build berkeley db from source
sql_server_dirs="$sql_server_dirs $have_berkeley_db"
- AC_CONFIG_FILES(bdb/Makefile)
+ AC_CONFIG_FILES(storage/bdb/Makefile)
echo "CONFIGURING FOR BERKELEY DB"
bdb_conf_flags="--disable-shared"
@@ -2522,8 +2524,10 @@ then
rel_srcdir=
case "$srcdir" in
/* ) rel_srcdir="$srcdir" ;;
- * ) rel_srcdir="../../$srcdir" ;;
+ * ) rel_srcdir="../../../$srcdir" ;;
esac
+ echo $bdb/build_unix
+ echo $rel_srcdir/$bdb/dist/configure
(cd $bdb/build_unix && \
sh $rel_srcdir/$bdb/dist/configure $bdb_conf_flags) || \
AC_MSG_ERROR([could not configure Berkeley DB])
@@ -2536,12 +2540,12 @@ dnl echo "bdb = '$bdb'; inc = '$bdb_includes', lib = '$bdb_libs'"
else
if test -d bdb; then :
else
- mkdir bdb && mkdir bdb/build_unix
+ mkdir storage/bdb && mkdir storage/bdb/build_unix
fi
- if test -r bdb/build_unix/db.h; then :
+ if test -r storage/bdb/build_unix/db.h; then :
else
- cat <<EOF > bdb/build_unix/db.h
+ cat <<EOF > storage/bdb/build_unix/db.h
This file is a placeholder to fool make. The way that automake
and GNU make work together causes some files to depend on this
@@ -2564,8 +2568,8 @@ EOF
if test X"$have_innodb" = Xyes
then
innodb_conf_flags=""
- sql_server_dirs="$sql_server_dirs innobase"
- AC_CONFIG_SUBDIRS(innobase)
+ sql_server_dirs="$sql_server_dirs storage/innobase"
+ AC_CONFIG_SUBDIRS(storage/innobase)
fi
case $SYSTEM_TYPE-$MACHINE_TYPE-$ac_cv_prog_gcc-$have_ndbcluster in
@@ -2587,12 +2591,12 @@ esac
echo
exit 1
fi
- sql_server_dirs="$sql_server_dirs ndb"
+ sql_server_dirs="$sql_server_dirs storage/ndb"
fi
#
# END of configuration for optional table handlers
#
- sql_server_dirs="$sql_server_dirs myisam myisammrg heap vio sql"
+ sql_server_dirs="$sql_server_dirs storage/myisam storage/myisammrg storage/heap vio sql"
fi
@@ -2725,51 +2729,53 @@ AC_SUBST([NDB_SIZEOF_INT])
AC_SUBST([NDB_SIZEOF_LONG])
AC_SUBST([NDB_SIZEOF_LONG_LONG])
-AC_CONFIG_FILES(ndb/Makefile ndb/include/Makefile dnl
- ndb/src/Makefile ndb/src/common/Makefile dnl
- ndb/docs/Makefile dnl
- ndb/tools/Makefile dnl
- ndb/src/common/debugger/Makefile dnl
- ndb/src/common/debugger/signaldata/Makefile dnl
- ndb/src/common/portlib/Makefile dnl
- ndb/src/common/util/Makefile dnl
- ndb/src/common/logger/Makefile dnl
- ndb/src/common/transporter/Makefile dnl
- ndb/src/common/mgmcommon/Makefile dnl
- ndb/src/kernel/Makefile dnl
- ndb/src/kernel/error/Makefile dnl
- ndb/src/kernel/blocks/Makefile dnl
- ndb/src/kernel/blocks/cmvmi/Makefile dnl
- ndb/src/kernel/blocks/dbacc/Makefile dnl
- ndb/src/kernel/blocks/dbdict/Makefile dnl
- ndb/src/kernel/blocks/dbdih/Makefile dnl
- ndb/src/kernel/blocks/dblqh/Makefile dnl
- ndb/src/kernel/blocks/dbtc/Makefile dnl
- ndb/src/kernel/blocks/dbtup/Makefile dnl
- ndb/src/kernel/blocks/ndbfs/Makefile dnl
- ndb/src/kernel/blocks/ndbcntr/Makefile dnl
- ndb/src/kernel/blocks/qmgr/Makefile dnl
- ndb/src/kernel/blocks/trix/Makefile dnl
- ndb/src/kernel/blocks/backup/Makefile dnl
- ndb/src/kernel/blocks/dbutil/Makefile dnl
- ndb/src/kernel/blocks/suma/Makefile dnl
- ndb/src/kernel/blocks/grep/Makefile dnl
- ndb/src/kernel/blocks/dbtux/Makefile dnl
- ndb/src/kernel/vm/Makefile dnl
- ndb/src/mgmapi/Makefile dnl
- ndb/src/ndbapi/Makefile dnl
- ndb/src/mgmsrv/Makefile dnl
- ndb/src/mgmclient/Makefile dnl
- ndb/src/cw/Makefile dnl
- ndb/src/cw/cpcd/Makefile dnl
- ndb/test/Makefile dnl
- ndb/test/src/Makefile dnl
- ndb/test/ndbapi/Makefile dnl
- ndb/test/ndbapi/bank/Makefile dnl
- ndb/test/tools/Makefile dnl
- ndb/test/run-test/Makefile mysql-test/ndb/Makefile dnl
- ndb/include/ndb_version.h ndb/include/ndb_global.h dnl
- ndb/include/ndb_types.h dnl
+AC_CONFIG_FILES(storage/ndb/Makefile storage/ndb/include/Makefile dnl
+ storage/ndb/src/Makefile storage/ndb/src/common/Makefile dnl
+ storage/ndb/docs/Makefile dnl
+ storage/ndb/tools/Makefile dnl
+ storage/ndb/src/common/debugger/Makefile dnl
+ storage/ndb/src/common/debugger/signaldata/Makefile dnl
+ storage/ndb/src/common/portlib/Makefile dnl
+ storage/ndb/src/common/util/Makefile dnl
+ storage/ndb/src/common/logger/Makefile dnl
+ storage/ndb/src/common/transporter/Makefile dnl
+ storage/ndb/src/common/mgmcommon/Makefile dnl
+ storage/ndb/src/kernel/Makefile dnl
+ storage/ndb/src/kernel/error/Makefile dnl
+ storage/ndb/src/kernel/blocks/Makefile dnl
+ storage/ndb/src/kernel/blocks/cmvmi/Makefile dnl
+ storage/ndb/src/kernel/blocks/dbacc/Makefile dnl
+ storage/ndb/src/kernel/blocks/dbdict/Makefile dnl
+ storage/ndb/src/kernel/blocks/dbdih/Makefile dnl
+ storage/ndb/src/kernel/blocks/dblqh/Makefile dnl
+ storage/ndb/src/kernel/blocks/dbtc/Makefile dnl
+ storage/ndb/src/kernel/blocks/dbtup/Makefile dnl
+ storage/ndb/src/kernel/blocks/ndbfs/Makefile dnl
+ storage/ndb/src/kernel/blocks/ndbcntr/Makefile dnl
+ storage/ndb/src/kernel/blocks/qmgr/Makefile dnl
+ storage/ndb/src/kernel/blocks/trix/Makefile dnl
+ storage/ndb/src/kernel/blocks/backup/Makefile dnl
+ storage/ndb/src/kernel/blocks/dbutil/Makefile dnl
+ storage/ndb/src/kernel/blocks/suma/Makefile dnl
+ storage/ndb/src/kernel/blocks/grep/Makefile dnl
+ storage/ndb/src/kernel/blocks/dbtux/Makefile dnl
+ storage/ndb/src/kernel/vm/Makefile dnl
+ storage/ndb/src/mgmapi/Makefile dnl
+ storage/ndb/src/ndbapi/Makefile dnl
+ storage/ndb/src/mgmsrv/Makefile dnl
+ storage/ndb/src/mgmclient/Makefile dnl
+ storage/ndb/src/cw/Makefile dnl
+ storage/ndb/src/cw/cpcd/Makefile dnl
+ storage/ndb/test/Makefile dnl
+ storage/ndb/test/src/Makefile dnl
+ storage/ndb/test/ndbapi/Makefile dnl
+ storage/ndb/test/ndbapi/bank/Makefile dnl
+ storage/ndb/test/tools/Makefile dnl
+ storage/ndb/test/run-test/Makefile dnl
+ storage/ndb/include/ndb_version.h dnl
+ storage/ndb/include/ndb_global.h dnl
+ storage/ndb/include/ndb_types.h dnl
+ mysql-test/ndb/Makefile dnl
)
fi
@@ -2777,8 +2783,8 @@ AC_SUBST(MAKE_BINARY_DISTRIBUTION_OPTIONS)
# Output results
AC_CONFIG_FILES(Makefile extra/Makefile mysys/Makefile dnl
- strings/Makefile regex/Makefile heap/Makefile dnl
- myisam/Makefile myisammrg/Makefile dnl
+ strings/Makefile regex/Makefile storage/Makefile storage/heap/Makefile dnl
+ storage/myisam/Makefile storage/myisammrg/Makefile dnl
os2/Makefile os2/include/Makefile os2/include/sys/Makefile dnl
man/Makefile BUILD/Makefile vio/Makefile dnl
libmysql_r/Makefile libmysqld/Makefile libmysqld/examples/Makefile dnl
diff --git a/extra/perror.c b/extra/perror.c
index dedd558e4cf..551dc3096d4 100644
--- a/extra/perror.c
+++ b/extra/perror.c
@@ -24,7 +24,7 @@
#include <errno.h>
#include <my_getopt.h>
#ifdef HAVE_NDBCLUSTER_DB
-#include "../ndb/src/ndbapi/ndberror.c"
+#include "../storage/ndb/src/ndbapi/ndberror.c"
#endif
static my_bool verbose, print_all_codes;
diff --git a/heap/Makefile.am b/heap/Makefile.am
deleted file mode 100644
index 567c7774751..00000000000
--- a/heap/Makefile.am
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include
-LDADD = libheap.a ../mysys/libmysys.a ../dbug/libdbug.a \
- ../strings/libmystrings.a
-pkglib_LIBRARIES = libheap.a
-noinst_PROGRAMS = hp_test1 hp_test2
-hp_test1_LDFLAGS = @NOINST_LDFLAGS@
-hp_test2_LDFLAGS = @NOINST_LDFLAGS@
-noinst_HEADERS = heapdef.h
-libheap_a_SOURCES = hp_open.c hp_extra.c hp_close.c hp_panic.c hp_info.c \
- hp_rrnd.c hp_scan.c hp_update.c hp_write.c hp_delete.c \
- hp_rsame.c hp_create.c hp_rename.c hp_rfirst.c \
- hp_rnext.c hp_rlast.c hp_rprev.c hp_clear.c \
- hp_rkey.c hp_block.c \
- hp_hash.c _check.c _rectest.c hp_static.c
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/include/Makefile.am b/include/Makefile.am
index 5f426843950..53b83bd7d88 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -28,7 +28,7 @@ noinst_HEADERS = config-win.h config-os2.h config-netware.h \
myisam.h myisampack.h myisammrg.h ft_global.h\
mysys_err.h my_base.h help_start.h help_end.h \
my_nosys.h my_alarm.h queues.h rijndael.h sha1.h \
- my_aes.h my_tree.h hash.h thr_alarm.h \
+ my_aes.h my_tree.h my_trie.h hash.h thr_alarm.h \
thr_lock.h t_ctype.h violite.h md5.h \
mysql_version.h.in my_handler.h my_time.h decimal.h
diff --git a/include/my_base.h b/include/my_base.h
index c76cf8c604e..0a2f5c2393e 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -388,6 +388,7 @@ enum data_file_type {
#define EQ_RANGE 32
#define NULL_RANGE 64
#define GEOM_FLAG 128
+#define SKIP_RANGE 256
typedef struct st_key_range
{
diff --git a/include/my_bitmap.h b/include/my_bitmap.h
index f4fe28266e4..98dd49a1228 100644
--- a/include/my_bitmap.h
+++ b/include/my_bitmap.h
@@ -21,10 +21,13 @@
#define MY_BIT_NONE (~(uint) 0)
+
typedef struct st_bitmap
{
- uchar *bitmap;
- uint bitmap_size; /* number of bytes occupied by the above */
+ uint32 *bitmap;
+ uint n_bits; /* number of bits occupied by the above */
+ uint32 last_word_mask;
+ uint32 *last_word_ptr;
/*
mutex will be acquired for the duration of each bitmap operation if
thread_safe flag in bitmap_init was set. Otherwise, we optimize by not
@@ -38,33 +41,98 @@ typedef struct st_bitmap
#ifdef __cplusplus
extern "C" {
#endif
-extern my_bool bitmap_cmp(const MY_BITMAP *map1, const MY_BITMAP *map2);
-extern my_bool bitmap_init(MY_BITMAP *map, uchar *buf, uint bitmap_size, my_bool thread_safe);
+extern my_bool bitmap_init(MY_BITMAP *map, uint32 *buf, uint n_bits, my_bool thread_safe);
extern my_bool bitmap_is_clear_all(const MY_BITMAP *map);
extern my_bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size);
-extern my_bool bitmap_is_set(const MY_BITMAP *map, uint bitmap_bit);
extern my_bool bitmap_is_set_all(const MY_BITMAP *map);
extern my_bool bitmap_is_subset(const MY_BITMAP *map1, const MY_BITMAP *map2);
extern my_bool bitmap_test_and_set(MY_BITMAP *map, uint bitmap_bit);
extern my_bool bitmap_fast_test_and_set(MY_BITMAP *map, uint bitmap_bit);
extern uint bitmap_set_next(MY_BITMAP *map);
extern uint bitmap_get_first(const MY_BITMAP *map);
+extern uint bitmap_get_first_set(const MY_BITMAP *map);
extern uint bitmap_bits_set(const MY_BITMAP *map);
-extern void bitmap_clear_all(MY_BITMAP *map);
-extern void bitmap_clear_bit(MY_BITMAP *map, uint bitmap_bit);
extern void bitmap_free(MY_BITMAP *map);
-extern void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2);
extern void bitmap_set_above(MY_BITMAP *map, uint from_byte, uint use_bit);
-extern void bitmap_set_all(MY_BITMAP *map);
-extern void bitmap_set_bit(MY_BITMAP *map, uint bitmap_bit);
extern void bitmap_set_prefix(MY_BITMAP *map, uint prefix_size);
+extern void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2);
extern void bitmap_subtract(MY_BITMAP *map, const MY_BITMAP *map2);
extern void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2);
+extern uint bitmap_lock_set_next(MY_BITMAP *map);
+extern void bitmap_lock_clear_bit(MY_BITMAP *map, uint bitmap_bit);
+#ifdef NOT_USED
+extern uint bitmap_lock_bits_set(const MY_BITMAP *map);
+extern my_bool bitmap_lock_is_set_all(const MY_BITMAP *map);
+extern uint bitmap_lock_get_first(const MY_BITMAP *map);
+extern uint bitmap_lock_get_first_set(const MY_BITMAP *map);
+extern my_bool bitmap_lock_is_subset(const MY_BITMAP *map1,
+ const MY_BITMAP *map2);
+extern my_bool bitmap_lock_is_prefix(const MY_BITMAP *map, uint prefix_size);
+extern my_bool bitmap_lock_is_set(const MY_BITMAP *map, uint bitmap_bit);
+extern my_bool bitmap_lock_is_clear_all(const MY_BITMAP *map);
+extern my_bool bitmap_lock_cmp(const MY_BITMAP *map1, const MY_BITMAP *map2);
+extern void bitmap_lock_set_all(MY_BITMAP *map);
+extern void bitmap_lock_clear_all(MY_BITMAP *map);
+extern void bitmap_lock_set_bit(MY_BITMAP *map, uint bitmap_bit);
+extern void bitmap_lock_flip_bit(MY_BITMAP *map, uint bitmap_bit);
+extern void bitmap_lock_set_prefix(MY_BITMAP *map, uint prefix_size);
+extern void bitmap_lock_intersect(MY_BITMAP *map, const MY_BITMAP *map2);
+extern void bitmap_lock_subtract(MY_BITMAP *map, const MY_BITMAP *map2);
+extern void bitmap_lock_union(MY_BITMAP *map, const MY_BITMAP *map2);
+extern void bitmap_lock_xor(MY_BITMAP *map, const MY_BITMAP *map2);
+extern void bitmap_lock_invert(MY_BITMAP *map);
+#endif
/* Fast, not thread safe, bitmap functions */
-#define bitmap_fast_set_bit(MAP, BIT) (MAP)->bitmap[(BIT) / 8] |= (1 << ((BIT) & 7))
-#define bitmap_fast_clear_bit(MAP, BIT) (MAP)->bitmap[(BIT) / 8] &= ~ (1 << ((BIT) & 7))
-#define bitmap_fast_is_set(MAP, BIT) (MAP)->bitmap[(BIT) / 8] & (1 << ((BIT) & 7))
+#define no_bytes_in_map(map) (((map)->n_bits + 7)/8)
+#define no_words_in_map(map) (((map)->n_bits + 31)/32)
+#define bytes_word_aligned(bytes) (4*((bytes + 3)/4))
+#define _bitmap_set_bit(MAP, BIT) (((uchar*)(MAP)->bitmap)[(BIT) / 8] \
+ |= (1 << ((BIT) & 7)))
+#define _bitmap_flip_bit(MAP, BIT) (((uchar*)(MAP)->bitmap)[(BIT) / 8] \
+ ^= (1 << ((BIT) & 7)))
+#define _bitmap_clear_bit(MAP, BIT) (((uchar*)(MAP)->bitmap)[(BIT) / 8] \
+ &= ~ (1 << ((BIT) & 7)))
+#define _bitmap_is_set(MAP, BIT) (((uchar*)(MAP)->bitmap)[(BIT) / 8] \
+ & (1 << ((BIT) & 7)))
+#ifndef DBUG_OFF
+inline uint32
+bitmap_set_bit(MY_BITMAP *map,uint bit)
+{
+ DBUG_ASSERT(bit < (map)->n_bits);
+ return _bitmap_set_bit(map,bit);
+}
+inline uint32
+bitmap_flip_bit(MY_BITMAP *map,uint bit)
+{
+ DBUG_ASSERT(bit < (map)->n_bits);
+ return _bitmap_flip_bit(map,bit);
+}
+inline uint32
+bitmap_clear_bit(MY_BITMAP *map,uint bit)
+{
+ DBUG_ASSERT(bit < (map)->n_bits);
+ return _bitmap_clear_bit(map,bit);
+}
+inline uint32
+bitmap_is_set(const MY_BITMAP *map,uint bit)
+{
+ DBUG_ASSERT(bit < (map)->n_bits);
+ return _bitmap_is_set(map,bit);
+}
+#else
+#define bitmap_set_bit(MAP, BIT) _bitmap_set_bit(MAP, BIT)
+#define bitmap_flip_bit(MAP, BIT) _bitmap_flip_bit(MAP, BIT)
+#define bitmap_clear_bit(MAP, BIT) _bitmap_clear_bit(MAP, BIT)
+#define bitmap_is_set(MAP, BIT) _bitmap_is_set(MAP, BIT)
+#endif
+#define bitmap_cmp(MAP1, MAP2) \
+ (memcmp((MAP1)->bitmap, (MAP2)->bitmap, 4*no_words_in_map((MAP1)))==0)
+#define bitmap_clear_all(MAP) \
+ { memset((MAP)->bitmap, 0, 4*no_words_in_map((MAP))); \
+ *(MAP)->last_word_ptr|= (MAP)->last_word_mask; }
+#define bitmap_set_all(MAP) \
+ (memset((MAP)->bitmap, 0xFF, 4*no_words_in_map((MAP))))
#ifdef __cplusplus
}
diff --git a/include/my_global.h b/include/my_global.h
index 95763f64e55..f81216e3add 100644
--- a/include/my_global.h
+++ b/include/my_global.h
@@ -101,6 +101,94 @@
#define unlikely(x) __builtin_expect((x),0)
+/*
+ The macros below are useful in optimising places where it has been
+ discovered that cache misses stall the process and where a prefetch
+ of the cache line can improve matters. This is available in GCC 3.1.1
+ and later versions.
+ PREFETCH_READ says that addr is going to be used for reading and that
+ it is to be kept in caches if possible for a while
+ PREFETCH_WRITE also says that the item to be cached is likely to be
+ updated.
+ The *LOCALITY scripts are also available for experimentation purposes
+ mostly and should only be used if they are verified to improve matters.
+ For more input see GCC manual (available in GCC 3.1.1 and later)
+*/
+
+#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR > 10)
+#define PREFETCH_READ(addr) __builtin_prefetch(addr, 0, 3)
+#define PREFETCH_WRITE(addr) \
+ __builtin_prefetch(addr, 1, 3)
+#define PREFETCH_READ_LOCALITY(addr, locality) \
+ __builtin_prefetch(addr, 0, locality)
+#define PREFETCH_WRITE_LOCALITY(addr, locality) \
+ __builtin_prefetch(addr, 1, locality)
+#else
+#define PREFETCH_READ(addr)
+#define PREFETCH_READ_LOCALITY(addr, locality)
+#define PREFETCH_WRITE(addr)
+#define PREFETCH_WRITE_LOCALITY(addr, locality)
+#endif
+
+/*
+ The following macro is used to ensure that code often used in most
+ SQL statements and definitely for parts of the SQL processing are
+ kept in a code segment by itself. This has the advantage that the
+ risk of common code being overlapping in caches of the CPU is less.
+ This can be a cause of big performance problems.
+ Routines should be put in this category with care and when they are
+ put there one should also strive to make as much of the error handling
+ as possible (or uncommon code of the routine) to execute in a
+ separate method to avoid moving to much code to this code segment.
+
+ It is very easy to use, simply add HOT_METHOD at the end of the
+ function declaration.
+ For more input see GCC manual (available in GCC 2.95 and later)
+*/
+
+#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR > 94)
+#define HOT_METHOD \
+ __attribute__ ((section ("hot_code_section")))
+#else
+#define HOT_METHOD
+#endif
+
+/*
+ The following macro is used to ensure that popular global variables
+ are located next to each other to avoid that they contend for the
+ same cache lines.
+
+ It is very easy to use, simply add HOT_DATA at the end of the declaration
+ of the variable, the variable must be initialised because of the way
+ that linker works so a declaration using HOT_DATA should look like:
+ uint global_hot_data HOT_DATA = 0;
+ For more input see GCC manual (available in GCC 2.95 and later)
+*/
+
+#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR > 94)
+#define HOT_DATA \
+ __attribute__ ((section ("hot_data_section")))
+#else
+#define HOT_DATA
+#endif
+
+
+/*
+ The following macros are used to control inlining a bit more than
+ usual. These macros are used to ensure that inlining always or
+ never occurs (independent of compilation mode).
+ For more input see GCC manual (available in GCC 3.1.1 and later)
+*/
+
+#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR > 10)
+#define ALWAYS_INLINE __attribute__ ((always_inline))
+#define NEVER_INLINE __attribute__ ((noinline))
+#else
+#define ALWAYS_INLINE
+#define NEVER_INLINE
+#endif
+
+
/* Fix problem with S_ISLNK() on Linux */
#if defined(TARGET_OS_LINUX)
#undef _GNU_SOURCE
diff --git a/include/my_trie.h b/include/my_trie.h
new file mode 100644
index 00000000000..a8534cb11c1
--- /dev/null
+++ b/include/my_trie.h
@@ -0,0 +1,142 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef _trie_h
+#define _trie_h
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct st_trie_node
+{
+ uint16 leaf; /* Depth from root node if match, 0 else */
+ byte c; /* Label on this edge */
+ struct st_trie_node *next; /* Next label */
+ struct st_trie_node *links; /* Array of edges leaving this node */
+ struct st_trie_node *fail; /* AC failure function */
+} TRIE_NODE;
+
+typedef struct st_trie
+{
+ TRIE_NODE root;
+ MEM_ROOT mem_root;
+ CHARSET_INFO *charset;
+ uint32 nnodes;
+ uint32 nwords;
+} TRIE;
+
+typedef struct st_ac_trie_state
+{
+ TRIE *trie;
+ TRIE_NODE *node;
+} AC_TRIE_STATE;
+
+extern TRIE *trie_init (TRIE *trie, CHARSET_INFO *charset);
+extern void trie_free (TRIE *trie);
+extern my_bool trie_insert (TRIE *trie, const byte *key, uint keylen);
+extern my_bool ac_trie_prepare (TRIE *trie);
+extern void ac_trie_init (TRIE *trie, AC_TRIE_STATE *state);
+
+
+/* `trie_goto' is internal function and shouldn't be used. */
+
+static inline TRIE_NODE *trie_goto (TRIE_NODE *root, TRIE_NODE *node, byte c)
+{
+ TRIE_NODE *next;
+ DBUG_ENTER("trie_goto");
+ for (next= node->links; next; next= next->next)
+ if (next->c == c)
+ DBUG_RETURN(next);
+ if (root == node)
+ DBUG_RETURN(root);
+ DBUG_RETURN(NULL);
+}
+
+
+/*
+ SYNOPSIS
+ int ac_trie_next (AC_TRIE_STATE *state, byte *c);
+ state - valid pointer to `AC_TRIE_STATE'
+ c - character to lookup
+
+ DESCRIPTION
+ Implementation of search using Aho-Corasick automaton.
+ Performs char-by-char search.
+
+ RETURN VALUE
+ `ac_trie_next' returns length of matched word or 0.
+*/
+
+static inline int ac_trie_next (AC_TRIE_STATE *state, byte *c)
+{
+ TRIE_NODE *root, *node;
+ DBUG_ENTER("ac_trie_next");
+ DBUG_ASSERT(state && c);
+ root= &state->trie->root;
+ node= state->node;
+ while (! (state->node= trie_goto(root, node, *c)))
+ node= node->fail;
+ DBUG_RETURN(state->node->leaf);
+}
+
+
+/*
+ SYNOPSIS
+ my_bool trie_search (TRIE *trie, const byte *key, uint keylen);
+ trie - valid pointer to `TRIE'
+ key - valid pointer to key to insert
+ keylen - non-0 key length
+
+ DESCRIPTION
+ Performs key lookup in trie.
+
+ RETURN VALUE
+ `trie_search' returns `true' if key is in `trie'. Otherwise,
+ `false' is returned.
+
+ NOTES
+ Consecutive search here is "best by test". arrays are very short, so
+ binary search or hashing would add too much complexity that would
+ overweight speed gain. Especially because compiler can optimize simple
+ consecutive loop better (tested)
+*/
+
+static inline my_bool trie_search (TRIE *trie, const byte *key, uint keylen)
+{
+ TRIE_NODE *node;
+ uint k;
+ DBUG_ENTER("trie_search");
+ DBUG_ASSERT(trie && key && keylen);
+ node= &trie->root;
+
+ for (k= 0; k < keylen; k++)
+ {
+ byte p;
+ if (! (node= node->links))
+ DBUG_RETURN(FALSE);
+ p= key[k];
+ while (p != node->c)
+ if (! (node= node->next))
+ DBUG_RETURN(FALSE);
+ }
+
+ DBUG_RETURN(node->leaf > 0);
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/include/mysql_com.h b/include/mysql_com.h
index 969fba4a433..8dcc50e15ec 100644
--- a/include/mysql_com.h
+++ b/include/mysql_com.h
@@ -88,6 +88,8 @@ enum enum_server_command
#define GROUP_FLAG 32768 /* Intern: Group field */
#define UNIQUE_FLAG 65536 /* Intern: Used by sql_yacc */
#define BINCMP_FLAG 131072 /* Intern: Used by sql_yacc */
+#define GET_FIXED_FIELDS_FLAG (1 << 18) /* Used to get fields in item tree */
+#define FIELD_IN_PART_FUNC_FLAG (1 << 19)/* Field part of partition func */
#define REFRESH_GRANT 1 /* Refresh grant tables */
#define REFRESH_LOG 2 /* Start on new log file */
diff --git a/include/queues.h b/include/queues.h
index 02ab768198e..a8b676b763c 100644
--- a/include/queues.h
+++ b/include/queues.h
@@ -41,6 +41,9 @@ typedef struct st_queue {
#define queue_element(queue,index) ((queue)->root[index+1])
#define queue_end(queue) ((queue)->root[(queue)->elements])
#define queue_replaced(queue) _downheap(queue,1)
+#define queue_set_cmp_arg(queue, set_arg) (queue)->first_cmp_arg= set_arg
+#define queue_set_max_at_top(queue, set_arg) \
+ (queue)->max_at_top= set_arg ? (-1 ^ 1) : 0
typedef int (*queue_compare)(void *,byte *, byte *);
int init_queue(QUEUE *queue,uint max_elements,uint offset_to_key,
diff --git a/innobase/include/Makefile.i b/innobase/include/Makefile.i
deleted file mode 100644
index f3e3fbe989e..00000000000
--- a/innobase/include/Makefile.i
+++ /dev/null
@@ -1,6 +0,0 @@
-# Makefile included in Makefile.am in every subdirectory
-
-INCLUDES = -I$(srcdir)/../include -I$(srcdir)/../../include -I../../include
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/libmysqld/Makefile.am b/libmysqld/Makefile.am
index 9aef03f20d2..9c322478ad9 100644
--- a/libmysqld/Makefile.am
+++ b/libmysqld/Makefile.am
@@ -62,7 +62,7 @@ sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \
unireg.cc uniques.cc stacktrace.c sql_union.cc hash_filo.cc \
spatial.cc gstream.cc sql_help.cc tztime.cc protocol_cursor.cc \
sp_head.cc sp_pcontext.cc sp.cc sp_cache.cc sp_rcontext.cc \
- parse_file.cc sql_view.cc sql_trigger.cc my_decimal.cc \
+ parse_file.cc rpl_filter.cc sql_view.cc sql_trigger.cc my_decimal.cc \
ha_blackhole.cc
libmysqld_int_a_SOURCES= $(libmysqld_sources) $(libmysqlsources) $(sqlsources) $(sqlexamplessources)
@@ -73,9 +73,9 @@ sql_yacc.cc sql_yacc.h: $(top_srcdir)/sql/sql_yacc.yy
# The following libraries should be included in libmysqld.a
INC_LIB= $(top_builddir)/regex/libregex.a \
- $(top_builddir)/myisam/libmyisam.a \
- $(top_builddir)/myisammrg/libmyisammrg.a \
- $(top_builddir)/heap/libheap.a \
+ $(top_builddir)/storage/myisam/libmyisam.a \
+ $(top_builddir)/storage/myisammrg/libmyisammrg.a \
+ $(top_builddir)/storage/heap/libheap.a \
@innodb_libs@ @bdb_libs_with_path@ \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/strings/libmystrings.a \
diff --git a/myisam/ft_boolean_search.c b/myisam/ft_boolean_search.c
deleted file mode 100644
index 35c41b7d2d6..00000000000
--- a/myisam/ft_boolean_search.c
+++ /dev/null
@@ -1,735 +0,0 @@
-/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/* Written by Sergei A. Golubchik, who has a shared copyright to this code */
-
-/* TODO: add caching - pre-read several index entries at once */
-
-#define FT_CORE
-#include "ftdefs.h"
-
-/* search with boolean queries */
-
-static double _wghts[11]=
-{
- 0.131687242798354,
- 0.197530864197531,
- 0.296296296296296,
- 0.444444444444444,
- 0.666666666666667,
- 1.000000000000000,
- 1.500000000000000,
- 2.250000000000000,
- 3.375000000000000,
- 5.062500000000000,
- 7.593750000000000};
-static double *wghts=_wghts+5; /* wghts[i] = 1.5**i */
-
-static double _nwghts[11]=
-{
- -0.065843621399177,
- -0.098765432098766,
- -0.148148148148148,
- -0.222222222222222,
- -0.333333333333334,
- -0.500000000000000,
- -0.750000000000000,
- -1.125000000000000,
- -1.687500000000000,
- -2.531250000000000,
- -3.796875000000000};
-static double *nwghts=_nwghts+5; /* nwghts[i] = -0.5*1.5**i */
-
-#define FTB_FLAG_TRUNC 1
-/* At most one of the following flags can be set */
-#define FTB_FLAG_YES 2
-#define FTB_FLAG_NO 4
-#define FTB_FLAG_WONLY 8
-
-typedef struct st_ftb_expr FTB_EXPR;
-struct st_ftb_expr
-{
- FTB_EXPR *up;
- uint flags;
-/* ^^^^^^^^^^^^^^^^^^ FTB_{EXPR,WORD} common section */
- my_off_t docid[2];
- float weight;
- float cur_weight;
- LIST *phrase; /* phrase words */
- uint yesses; /* number of "yes" words matched */
- uint nos; /* number of "no" words matched */
- uint ythresh; /* number of "yes" words in expr */
- uint yweaks; /* number of "yes" words for scan only */
-};
-
-typedef struct st_ftb_word
-{
- FTB_EXPR *up;
- uint flags;
-/* ^^^^^^^^^^^^^^^^^^ FTB_{EXPR,WORD} common section */
- my_off_t docid[2]; /* for index search and for scan */
- my_off_t key_root;
- MI_KEYDEF *keyinfo;
- float weight;
- uint ndepth;
- uint len;
- uchar off;
- byte word[1];
-} FTB_WORD;
-
-typedef struct st_ft_info
-{
- struct _ft_vft *please;
- MI_INFO *info;
- CHARSET_INFO *charset;
- FTB_EXPR *root;
- FTB_WORD **list;
- MEM_ROOT mem_root;
- QUEUE queue;
- TREE no_dupes;
- my_off_t lastpos;
- uint keynr;
- uchar with_scan;
- enum { UNINITIALIZED, READY, INDEX_SEARCH, INDEX_DONE } state;
-} FTB;
-
-static int FTB_WORD_cmp(my_off_t *v, FTB_WORD *a, FTB_WORD *b)
-{
- int i;
-
- /* if a==curdoc, take it as a < b */
- if (v && a->docid[0] == *v)
- return -1;
-
- /* ORDER BY docid, ndepth DESC */
- i=CMP_NUM(a->docid[0], b->docid[0]);
- if (!i)
- i=CMP_NUM(b->ndepth,a->ndepth);
- return i;
-}
-
-static int FTB_WORD_cmp_list(CHARSET_INFO *cs, FTB_WORD **a, FTB_WORD **b)
-{
- /* ORDER BY word DESC, ndepth DESC */
- int i= mi_compare_text(cs, (uchar*) (*b)->word+1,(*b)->len-1,
- (uchar*) (*a)->word+1,(*a)->len-1,0,0);
- if (!i)
- i=CMP_NUM((*b)->ndepth,(*a)->ndepth);
- return i;
-}
-
-static void _ftb_parse_query(FTB *ftb, byte **start, byte *end,
- FTB_EXPR *up, uint depth, byte *up_quot)
-{
- byte res;
- FTB_PARAM param;
- FT_WORD w;
- FTB_WORD *ftbw;
- FTB_EXPR *ftbe;
- FT_WORD *phrase_word;
- LIST *phrase_list;
- uint extra=HA_FT_WLEN+ftb->info->s->rec_reflength; /* just a shortcut */
-
- if (ftb->state != UNINITIALIZED)
- return;
-
- param.prev=' ';
- param.quot= up_quot;
- while ((res=ft_get_word(ftb->charset,start,end,&w,&param)))
- {
- int r=param.plusminus;
- float weight= (float) (param.pmsign ? nwghts : wghts)[(r>5)?5:((r<-5)?-5:r)];
- switch (res) {
- case 1: /* word found */
- ftbw=(FTB_WORD *)alloc_root(&ftb->mem_root,
- sizeof(FTB_WORD) +
- (param.trunc ? MI_MAX_KEY_BUFF :
- w.len*ftb->charset->mbmaxlen+extra));
- ftbw->len=w.len+1;
- ftbw->flags=0;
- ftbw->off=0;
- if (param.yesno>0) ftbw->flags|=FTB_FLAG_YES;
- if (param.yesno<0) ftbw->flags|=FTB_FLAG_NO;
- if (param.trunc) ftbw->flags|=FTB_FLAG_TRUNC;
- ftbw->weight=weight;
- ftbw->up=up;
- ftbw->docid[0]=ftbw->docid[1]=HA_OFFSET_ERROR;
- ftbw->ndepth= (param.yesno<0) + depth;
- ftbw->key_root=HA_OFFSET_ERROR;
- memcpy(ftbw->word+1, w.pos, w.len);
- ftbw->word[0]=w.len;
- if (param.yesno > 0) up->ythresh++;
- queue_insert(& ftb->queue, (byte *)ftbw);
- ftb->with_scan|=(param.trunc & FTB_FLAG_TRUNC);
- case 4: /* not indexed word (stopword or too short/long) */
- if (! up_quot) break;
- phrase_word= (FT_WORD *)alloc_root(&ftb->mem_root, sizeof(FT_WORD));
- phrase_list= (LIST *)alloc_root(&ftb->mem_root, sizeof(LIST));
- phrase_word->pos= w.pos;
- phrase_word->len= w.len;
- phrase_list->data= (void *)phrase_word;
- up->phrase= list_add(up->phrase, phrase_list);
- break;
- case 2: /* left bracket */
- ftbe=(FTB_EXPR *)alloc_root(&ftb->mem_root, sizeof(FTB_EXPR));
- ftbe->flags=0;
- if (param.yesno>0) ftbe->flags|=FTB_FLAG_YES;
- if (param.yesno<0) ftbe->flags|=FTB_FLAG_NO;
- ftbe->weight=weight;
- ftbe->up=up;
- ftbe->ythresh=ftbe->yweaks=0;
- ftbe->docid[0]=ftbe->docid[1]=HA_OFFSET_ERROR;
- ftbe->phrase= NULL;
- if (param.quot) ftb->with_scan|=2;
- if (param.yesno > 0) up->ythresh++;
- _ftb_parse_query(ftb, start, end, ftbe, depth+1, param.quot);
- param.quot=0;
- break;
- case 3: /* right bracket */
- if (up_quot) up->phrase= list_reverse(up->phrase);
- return;
- }
- }
- return;
-}
-
-static int _ftb_no_dupes_cmp(void* not_used __attribute__((unused)),
- const void *a,const void *b)
-{
- return CMP_NUM((*((my_off_t*)a)), (*((my_off_t*)b)));
-}
-
-/* returns 1 if the search was finished (must-word wasn't found) */
-static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search)
-{
- int r;
- int subkeys=1;
- my_bool can_go_down;
- MI_INFO *info=ftb->info;
- uint off, extra=HA_FT_WLEN+info->s->base.rec_reflength;
- byte *lastkey_buf=ftbw->word+ftbw->off;
-
- LINT_INIT(off);
- if (ftbw->flags & FTB_FLAG_TRUNC)
- lastkey_buf+=ftbw->len;
-
- if (init_search)
- {
- ftbw->key_root=info->s->state.key_root[ftb->keynr];
- ftbw->keyinfo=info->s->keyinfo+ftb->keynr;
-
- r=_mi_search(info, ftbw->keyinfo, (uchar*) ftbw->word, ftbw->len,
- SEARCH_FIND | SEARCH_BIGGER, ftbw->key_root);
- }
- else
- {
- r=_mi_search(info, ftbw->keyinfo, (uchar*) lastkey_buf,
- USE_WHOLE_KEY, SEARCH_BIGGER, ftbw->key_root);
- }
-
- can_go_down=(!ftbw->off && (init_search || (ftbw->flags & FTB_FLAG_TRUNC)));
- /* Skip rows inserted by concurrent insert */
- while (!r)
- {
- if (can_go_down)
- {
- /* going down ? */
- off=info->lastkey_length-extra;
- subkeys=ft_sintXkorr(info->lastkey+off);
- }
- if (subkeys<0 || info->lastpos < info->state->data_file_length)
- break;
- r= _mi_search_next(info, ftbw->keyinfo, info->lastkey,
- info->lastkey_length,
- SEARCH_BIGGER, ftbw->key_root);
- }
-
- if (!r && !ftbw->off)
- {
- r= mi_compare_text(ftb->charset,
- info->lastkey+1,
- info->lastkey_length-extra-1,
- (uchar*) ftbw->word+1,
- ftbw->len-1,
- (my_bool) (ftbw->flags & FTB_FLAG_TRUNC),0);
- }
-
- if (r) /* not found */
- {
- if (!ftbw->off || !(ftbw->flags & FTB_FLAG_TRUNC))
- {
- ftbw->docid[0]=HA_OFFSET_ERROR;
- if ((ftbw->flags & FTB_FLAG_YES) && ftbw->up->up==0)
- {
- /*
- This word MUST BE present in every document returned,
- so we can stop the search right now
- */
- ftb->state=INDEX_DONE;
- return 1; /* search is done */
- }
- else
- return 0;
- }
-
- /* going up to the first-level tree to continue search there */
- _mi_dpointer(info, (uchar*) (lastkey_buf+HA_FT_WLEN), ftbw->key_root);
- ftbw->key_root=info->s->state.key_root[ftb->keynr];
- ftbw->keyinfo=info->s->keyinfo+ftb->keynr;
- ftbw->off=0;
- return _ft2_search(ftb, ftbw, 0);
- }
-
- /* matching key found */
- memcpy(lastkey_buf, info->lastkey, info->lastkey_length);
- if (lastkey_buf == ftbw->word)
- ftbw->len=info->lastkey_length-extra;
-
- /* going down ? */
- if (subkeys<0)
- {
- /*
- yep, going down, to the second-level tree
- TODO here: subkey-based optimization
- */
- ftbw->off=off;
- ftbw->key_root=info->lastpos;
- ftbw->keyinfo=& info->s->ft2_keyinfo;
- r=_mi_search_first(info, ftbw->keyinfo, ftbw->key_root);
- DBUG_ASSERT(r==0); /* found something */
- memcpy(lastkey_buf+off, info->lastkey, info->lastkey_length);
- }
- ftbw->docid[0]=info->lastpos;
- return 0;
-}
-
-static void _ftb_init_index_search(FT_INFO *ftb)
-{
- int i;
- FTB_WORD *ftbw;
-
- if ((ftb->state != READY && ftb->state !=INDEX_DONE) ||
- ftb->keynr == NO_SUCH_KEY)
- return;
- ftb->state=INDEX_SEARCH;
-
- for (i=ftb->queue.elements; i; i--)
- {
- ftbw=(FTB_WORD *)(ftb->queue.root[i]);
-
- if (ftbw->flags & FTB_FLAG_TRUNC)
- {
- /*
- special treatment for truncation operator
- 1. there are some (besides this) +words
- | no need to search in the index, it can never ADD new rows
- | to the result, and to remove half-matched rows we do scan anyway
- 2. -trunc*
- | same as 1.
- 3. in 1 and 2, +/- need not be on the same expr. level,
- but can be on any upper level, as in +word +(trunc1* trunc2*)
- 4. otherwise
- | We have to index-search for this prefix.
- | It may cause duplicates, as in the index (sorted by <word,docid>)
- | <aaaa,row1>
- | <aabb,row2>
- | <aacc,row1>
- | Searching for "aa*" will find row1 twice...
- */
- FTB_EXPR *ftbe;
- for (ftbe=(FTB_EXPR*)ftbw;
- ftbe->up && !(ftbe->up->flags & FTB_FLAG_TRUNC);
- ftbe->up->flags|= FTB_FLAG_TRUNC, ftbe=ftbe->up)
- {
- if (ftbe->flags & FTB_FLAG_NO || /* 2 */
- ftbe->up->ythresh - ftbe->up->yweaks >1) /* 1 */
- {
- FTB_EXPR *top_ftbe=ftbe->up;
- ftbw->docid[0]=HA_OFFSET_ERROR;
- for (ftbe=(FTB_EXPR *)ftbw;
- ftbe != top_ftbe && !(ftbe->flags & FTB_FLAG_NO);
- ftbe=ftbe->up)
- ftbe->up->yweaks++;
- ftbe=0;
- break;
- }
- }
- if (!ftbe)
- continue;
- /* 4 */
- if (!is_tree_inited(& ftb->no_dupes))
- init_tree(& ftb->no_dupes,0,0,sizeof(my_off_t),
- _ftb_no_dupes_cmp,0,0,0);
- else
- reset_tree(& ftb->no_dupes);
- }
-
- ftbw->off=0; /* in case of reinit */
- if (_ft2_search(ftb, ftbw, 1))
- return;
- }
- queue_fix(& ftb->queue);
-}
-
-
-FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query,
- uint query_len, CHARSET_INFO *cs)
-{
- FTB *ftb;
- FTB_EXPR *ftbe;
- uint res;
-
- if (!(ftb=(FTB *)my_malloc(sizeof(FTB), MYF(MY_WME))))
- return 0;
- ftb->please= (struct _ft_vft *) & _ft_vft_boolean;
- ftb->state=UNINITIALIZED;
- ftb->info=info;
- ftb->keynr=keynr;
- ftb->charset=cs;
- DBUG_ASSERT(keynr==NO_SUCH_KEY || cs == info->s->keyinfo[keynr].seg->charset);
- ftb->with_scan=0;
- ftb->lastpos=HA_OFFSET_ERROR;
- bzero(& ftb->no_dupes, sizeof(TREE));
-
- init_alloc_root(&ftb->mem_root, 1024, 1024);
-
- /*
- Hack: instead of init_queue, we'll use reinit queue to be able
- to alloc queue with alloc_root()
- */
- res=ftb->queue.max_elements=1+query_len/2;
- if (!(ftb->queue.root=
- (byte **)alloc_root(&ftb->mem_root, (res+1)*sizeof(void*))))
- goto err;
- reinit_queue(& ftb->queue, res, 0, 0,
- (int (*)(void*,byte*,byte*))FTB_WORD_cmp, 0);
- if (!(ftbe=(FTB_EXPR *)alloc_root(&ftb->mem_root, sizeof(FTB_EXPR))))
- goto err;
- ftbe->weight=1;
- ftbe->flags=FTB_FLAG_YES;
- ftbe->nos=1;
- ftbe->up=0;
- ftbe->ythresh=ftbe->yweaks=0;
- ftbe->docid[0]=ftbe->docid[1]=HA_OFFSET_ERROR;
- ftbe->phrase= NULL;
- ftb->root=ftbe;
- _ftb_parse_query(ftb, &query, query+query_len, ftbe, 0, NULL);
- ftb->list=(FTB_WORD **)alloc_root(&ftb->mem_root,
- sizeof(FTB_WORD *)*ftb->queue.elements);
- memcpy(ftb->list, ftb->queue.root+1, sizeof(FTB_WORD *)*ftb->queue.elements);
- qsort2(ftb->list, ftb->queue.elements, sizeof(FTB_WORD *),
- (qsort2_cmp)FTB_WORD_cmp_list, ftb->charset);
- if (ftb->queue.elements<2) ftb->with_scan &= ~FTB_FLAG_TRUNC;
- ftb->state=READY;
- return ftb;
-err:
- free_root(& ftb->mem_root, MYF(0));
- my_free((gptr)ftb,MYF(0));
- return 0;
-}
-
-
-/*
- Checks if given buffer matches phrase list.
-
- SYNOPSIS
- _ftb_check_phrase()
- s0 start of buffer
- e0 end of buffer
- phrase broken into list phrase
- cs charset info
-
- RETURN VALUE
- 1 is returned if phrase found, 0 else.
-*/
-
-static int _ftb_check_phrase(const byte *s0, const byte *e0,
- LIST *phrase, CHARSET_INFO *cs)
-{
- FT_WORD h_word;
- const byte *h_start= s0;
- DBUG_ENTER("_ftb_strstr");
- DBUG_ASSERT(phrase);
-
- while (ft_simple_get_word(cs, (byte **)&h_start, e0, &h_word, FALSE))
- {
- FT_WORD *n_word;
- LIST *phrase_element= phrase;
- const byte *h_start1= h_start;
- for (;;)
- {
- n_word= (FT_WORD *)phrase_element->data;
- if (my_strnncoll(cs, h_word.pos, h_word.len, n_word->pos, n_word->len))
- break;
- if (! (phrase_element= phrase_element->next))
- DBUG_RETURN(1);
- if (! ft_simple_get_word(cs, (byte **)&h_start1, e0, &h_word, FALSE))
- DBUG_RETURN(0);
- }
- }
- DBUG_RETURN(0);
-}
-
-
-static void _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_orig)
-{
- FT_SEG_ITERATOR ftsi;
- FTB_EXPR *ftbe;
- float weight=ftbw->weight;
- int yn=ftbw->flags, ythresh, mode=(ftsi_orig != 0);
- my_off_t curdoc=ftbw->docid[mode];
-
- for (ftbe=ftbw->up; ftbe; ftbe=ftbe->up)
- {
- ythresh = ftbe->ythresh - (mode ? 0 : ftbe->yweaks);
- if (ftbe->docid[mode] != curdoc)
- {
- ftbe->cur_weight=0;
- ftbe->yesses=ftbe->nos=0;
- ftbe->docid[mode]=curdoc;
- }
- if (ftbe->nos)
- break;
- if (yn & FTB_FLAG_YES)
- {
- weight /= ftbe->ythresh;
- ftbe->cur_weight += weight;
- if ((int) ++ftbe->yesses == ythresh)
- {
- yn=ftbe->flags;
- weight=ftbe->cur_weight*ftbe->weight;
- if (mode && ftbe->phrase)
- {
- int not_found=1;
-
- memcpy(&ftsi, ftsi_orig, sizeof(ftsi));
- while (_mi_ft_segiterator(&ftsi) && not_found)
- {
- if (!ftsi.pos)
- continue;
- not_found = ! _ftb_check_phrase(ftsi.pos, ftsi.pos+ftsi.len,
- ftbe->phrase, ftb->charset);
- }
- if (not_found) break;
- } /* ftbe->quot */
- }
- else
- break;
- }
- else
- if (yn & FTB_FLAG_NO)
- {
- /*
- NOTE: special sort function of queue assures that all
- (yn & FTB_FLAG_NO) != 0
- events for every particular subexpression will
- "auto-magically" happen BEFORE all the
- (yn & FTB_FLAG_YES) != 0 events. So no
- already matched expression can become not-matched again.
- */
- ++ftbe->nos;
- break;
- }
- else
- {
- if (ftbe->ythresh)
- weight/=3;
- ftbe->cur_weight += weight;
- if ((int) ftbe->yesses < ythresh)
- break;
- if (!(yn & FTB_FLAG_WONLY))
- yn= ((int) ftbe->yesses++ == ythresh) ? ftbe->flags : FTB_FLAG_WONLY ;
- weight*= ftbe->weight;
- }
- }
-}
-
-
-int ft_boolean_read_next(FT_INFO *ftb, char *record)
-{
- FTB_EXPR *ftbe;
- FTB_WORD *ftbw;
- MI_INFO *info=ftb->info;
- my_off_t curdoc;
-
- if (ftb->state != INDEX_SEARCH && ftb->state != INDEX_DONE)
- return -1;
-
- /* black magic ON */
- if ((int) _mi_check_index(info, ftb->keynr) < 0)
- return my_errno;
- if (_mi_readinfo(info, F_RDLCK, 1))
- return my_errno;
- /* black magic OFF */
-
- if (!ftb->queue.elements)
- return my_errno=HA_ERR_END_OF_FILE;
-
- /* Attention!!! Address of a local variable is used here! See err: label */
- ftb->queue.first_cmp_arg=(void *)&curdoc;
-
- while (ftb->state == INDEX_SEARCH &&
- (curdoc=((FTB_WORD *)queue_top(& ftb->queue))->docid[0]) !=
- HA_OFFSET_ERROR)
- {
- while (curdoc == (ftbw=(FTB_WORD *)queue_top(& ftb->queue))->docid[0])
- {
- _ftb_climb_the_tree(ftb, ftbw, 0);
-
- /* update queue */
- _ft2_search(ftb, ftbw, 0);
- queue_replaced(& ftb->queue);
- }
-
- ftbe=ftb->root;
- if (ftbe->docid[0]==curdoc && ftbe->cur_weight>0 &&
- ftbe->yesses>=(ftbe->ythresh-ftbe->yweaks) && !ftbe->nos)
- {
- /* curdoc matched ! */
- if (is_tree_inited(&ftb->no_dupes) &&
- tree_insert(&ftb->no_dupes, &curdoc, 0,
- ftb->no_dupes.custom_arg)->count >1)
- /* but it managed already to get past this line once */
- continue;
-
- info->lastpos=curdoc;
- /* Clear all states, except that the table was updated */
- info->update&= (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
-
- if (!(*info->read_record)(info,curdoc,record))
- {
- info->update|= HA_STATE_AKTIV; /* Record is read */
- if (ftb->with_scan && ft_boolean_find_relevance(ftb,record,0)==0)
- continue; /* no match */
- my_errno=0;
- goto err;
- }
- goto err;
- }
- }
- ftb->state=INDEX_DONE;
- my_errno=HA_ERR_END_OF_FILE;
-err:
- ftb->queue.first_cmp_arg=(void *)0;
- return my_errno;
-}
-
-
-float ft_boolean_find_relevance(FT_INFO *ftb, byte *record, uint length)
-{
- FT_WORD word;
- FTB_WORD *ftbw;
- FTB_EXPR *ftbe;
- FT_SEG_ITERATOR ftsi, ftsi2;
- const byte *end;
- my_off_t docid=ftb->info->lastpos;
-
- if (docid == HA_OFFSET_ERROR)
- return -2.0;
- if (!ftb->queue.elements)
- return 0;
-
- if (ftb->state != INDEX_SEARCH && docid <= ftb->lastpos)
- {
- FTB_EXPR *x;
- uint i;
-
- for (i=0; i < ftb->queue.elements; i++)
- {
- ftb->list[i]->docid[1]=HA_OFFSET_ERROR;
- for (x=ftb->list[i]->up; x; x=x->up)
- x->docid[1]=HA_OFFSET_ERROR;
- }
- }
-
- ftb->lastpos=docid;
-
- if (ftb->keynr==NO_SUCH_KEY)
- _mi_ft_segiterator_dummy_init(record, length, &ftsi);
- else
- _mi_ft_segiterator_init(ftb->info, ftb->keynr, record, &ftsi);
- memcpy(&ftsi2, &ftsi, sizeof(ftsi));
-
- while (_mi_ft_segiterator(&ftsi))
- {
- if (!ftsi.pos)
- continue;
-
- end=ftsi.pos+ftsi.len;
- while (ft_simple_get_word(ftb->charset, (byte **) &ftsi.pos,
- (byte *) end, &word, TRUE))
- {
- int a, b, c;
- for (a=0, b=ftb->queue.elements, c=(a+b)/2; b-a>1; c=(a+b)/2)
- {
- ftbw=ftb->list[c];
- if (mi_compare_text(ftb->charset, (uchar*) word.pos, word.len,
- (uchar*) ftbw->word+1, ftbw->len-1,
- (my_bool) (ftbw->flags&FTB_FLAG_TRUNC),0) >0)
- b=c;
- else
- a=c;
- }
- for (; c>=0; c--)
- {
- ftbw=ftb->list[c];
- if (mi_compare_text(ftb->charset, (uchar*) word.pos, word.len,
- (uchar*) ftbw->word+1,ftbw->len-1,
- (my_bool) (ftbw->flags&FTB_FLAG_TRUNC),0))
- break;
- if (ftbw->docid[1] == docid)
- continue;
- ftbw->docid[1]=docid;
- _ftb_climb_the_tree(ftb, ftbw, &ftsi2);
- }
- }
- }
-
- ftbe=ftb->root;
- if (ftbe->docid[1]==docid && ftbe->cur_weight>0 &&
- ftbe->yesses>=ftbe->ythresh && !ftbe->nos)
- { /* row matched ! */
- return ftbe->cur_weight;
- }
- else
- { /* match failed ! */
- return 0.0;
- }
-}
-
-
-void ft_boolean_close_search(FT_INFO *ftb)
-{
- if (is_tree_inited(& ftb->no_dupes))
- {
- delete_tree(& ftb->no_dupes);
- }
- free_root(& ftb->mem_root, MYF(0));
- my_free((gptr)ftb,MYF(0));
-}
-
-
-float ft_boolean_get_relevance(FT_INFO *ftb)
-{
- return ftb->root->cur_weight;
-}
-
-
-void ft_boolean_reinit_search(FT_INFO *ftb)
-{
- _ftb_init_index_search(ftb);
-}
-
diff --git a/mysql-test/include/have_partition.inc b/mysql-test/include/have_partition.inc
new file mode 100644
index 00000000000..4b663c71c19
--- /dev/null
+++ b/mysql-test/include/have_partition.inc
@@ -0,0 +1,4 @@
+-- require r/have_partition.require
+disable_query_log;
+show variables like "have_partition_engine";
+enable_query_log;
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index 0bc32c9eaeb..b70379d8597 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -816,8 +816,8 @@ sub executable_setup () {
$exe_mysql= mtr_exe_exists("$path_client_bindir/mysql");
$exe_mysql_fix_system_tables=
mtr_script_exists("$glob_basedir/scripts/mysql_fix_privilege_tables");
- $path_ndb_tools_dir= mtr_path_exists("$glob_basedir/ndb/tools");
- $exe_ndb_mgm= "$glob_basedir/ndb/src/mgmclient/ndb_mgm";
+ $path_ndb_tools_dir= mtr_path_exists("$glob_basedir/storage/ndb/tools");
+ $exe_ndb_mgm= "$glob_basedir/storage/ndb/src/mgmclient/ndb_mgm";
}
else
{
diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh
index 081fec21563..4ca591452d2 100644
--- a/mysql-test/mysql-test-run.sh
+++ b/mysql-test/mysql-test-run.sh
@@ -583,8 +583,8 @@ if [ x$SOURCE_DIST = x1 ] ; then
CHARSETSDIR="$BASEDIR/sql/share/charsets"
INSTALL_DB="./install_test_db"
MYSQL_FIX_SYSTEM_TABLES="$BASEDIR/scripts/mysql_fix_privilege_tables"
- NDB_TOOLS_DIR="$BASEDIR/ndb/tools"
- NDB_MGM="$BASEDIR/ndb/src/mgmclient/ndb_mgm"
+ NDB_TOOLS_DIR="$BASEDIR/storage/ndb/tools"
+ NDB_MGM="$BASEDIR/storage/ndb/src/mgmclient/ndb_mgm"
if [ -n "$USE_PURIFY" ] ; then
PSUP="$MYSQL_TEST_DIR/suppress.purify"
diff --git a/mysql-test/ndb/ndbcluster.sh b/mysql-test/ndb/ndbcluster.sh
index c09c013552e..e80c3594ee8 100644
--- a/mysql-test/ndb/ndbcluster.sh
+++ b/mysql-test/ndb/ndbcluster.sh
@@ -18,7 +18,7 @@ cd $CWD
# Are we using a source or a binary distribution?
if [ -d ../sql ] ; then
SOURCE_DIST=1
- ndbtop=$BASEDIR/ndb
+ ndbtop=$BASEDIR/storage/ndb
exec_ndb=$ndbtop/src/kernel/ndbd
exec_mgmtsrvr=$ndbtop/src/mgmsrv/ndb_mgmd
exec_waiter=$ndbtop/tools/ndb_waiter
diff --git a/mysql-test/r/have_partition.require b/mysql-test/r/have_partition.require
new file mode 100644
index 00000000000..8fdd98032c2
--- /dev/null
+++ b/mysql-test/r/have_partition.require
@@ -0,0 +1,2 @@
+Variable_name Value
+have_partition_engine YES
diff --git a/mysql-test/r/ndb_partition_key.result b/mysql-test/r/ndb_partition_key.result
new file mode 100644
index 00000000000..5893859fd72
--- /dev/null
+++ b/mysql-test/r/ndb_partition_key.result
@@ -0,0 +1,71 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a int, b int, c int, d int, PRIMARY KEY(a,b,c))
+ENGINE = NDB
+PARTITION BY KEY (a,b);
+insert into t1 values (1,1,1,1);
+select * from t1;
+a b c d
+1 1 1 1
+update t1 set d = 2 where a = 1 and b = 1 and c = 1;
+select * from t1;
+a b c d
+1 1 1 2
+delete from t1;
+select * from t1;
+a b c d
+drop table t1;
+CREATE TABLE t1 (a int, b int, c int, d int, PRIMARY KEY(a,b))
+ENGINE = NDB
+PARTITION BY KEY (c);
+ERROR HY000: A PRIMARY KEY need to include all fields in the partition function
+CREATE TABLE t1 (a int, b int, c int, PRIMARY KEY(a,b))
+ENGINE = NDB
+PARTITION BY KEY (a);
+insert into t1 values
+(1,1,3),(1,2,3),(1,3,3),(1,4,3),(1,5,3),(1,6,3),
+(1,7,3),(1,8,3),(1,9,3),(1,10,3),(1,11,3),(1,12,3);
+select * from t1 order by b;
+a b c
+1 1 3
+1 2 3
+1 3 3
+1 4 3
+1 5 3
+1 6 3
+1 7 3
+1 8 3
+1 9 3
+1 10 3
+1 11 3
+1 12 3
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(10) COLLATE latin1_bin, c INT, d INT,
+PRIMARY KEY USING HASH (a,b,c))
+ENGINE=NDB
+DEFAULT CHARSET=latin1
+PARTITION BY KEY (b);
+insert into t1 values (1,"a",1,1),(2,"a",1,1),(3,"a",1,1);
+-- t1 --
+
+Fragment type: 5
+K Value: 6
+Min load factor: 78
+Max load factor: 80
+Temporary table: no
+Number of attributes: 4
+Number of primary keys: 3
+Length of frm data: 301
+TableStatus: Retrieved
+-- Attributes --
+a Int PRIMARY KEY
+b Char(10;latin1_bin) PRIMARY KEY DISTRIBUTION KEY
+c Int PRIMARY KEY
+d Int NULL
+
+-- Indexes --
+PRIMARY KEY(a, b, c) - UniqueHashIndex
+
+
+NDBT_ProgramExit: 0 - OK
+
+DROP TABLE t1;
diff --git a/mysql-test/r/ndb_partition_range.result b/mysql-test/r/ndb_partition_range.result
new file mode 100644
index 00000000000..63bd89e1d1c
--- /dev/null
+++ b/mysql-test/r/ndb_partition_range.result
@@ -0,0 +1,105 @@
+drop table if exists t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b),
+index (a))
+engine = ndb
+partition by range (a)
+partitions 3
+(partition x1 values less than (5),
+partition x2 values less than (10),
+partition x3 values less than (20));
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (6, 1, 1);
+INSERT into t1 values (10, 1, 1);
+INSERT into t1 values (15, 1, 1);
+select * from t1 order by a;
+a b c
+1 1 1
+6 1 1
+10 1 1
+15 1 1
+select * from t1 where a=1 order by a;
+a b c
+1 1 1
+select * from t1 where a=15 and b=1 order by a;
+a b c
+15 1 1
+select * from t1 where a=21 and b=1 order by a;
+a b c
+select * from t1 where a=21 order by a;
+a b c
+select * from t1 where a in (1,6,10,21) order by a;
+a b c
+1 1 1
+6 1 1
+10 1 1
+select * from t1 where b=1 and a in (1,6,10,21) order by a;
+a b c
+1 1 1
+6 1 1
+10 1 1
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(b),
+unique (a))
+engine = ndb
+partition by range (b)
+partitions 3
+(partition x1 values less than (5),
+partition x2 values less than (10),
+partition x3 values less than (20));
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (2, 6, 1);
+INSERT into t1 values (3, 10, 1);
+INSERT into t1 values (4, 15, 1);
+select * from t1 order by a;
+a b c
+1 1 1
+2 6 1
+3 10 1
+4 15 1
+UPDATE t1 set a = 5 WHERE b = 15;
+select * from t1 order by a;
+a b c
+1 1 1
+2 6 1
+3 10 1
+5 15 1
+UPDATE t1 set a = 6 WHERE a = 5;
+select * from t1 order by a;
+a b c
+1 1 1
+2 6 1
+3 10 1
+6 15 1
+select * from t1 where b=1 order by b;
+a b c
+1 1 1
+select * from t1 where b=15 and a=1 order by b;
+a b c
+select * from t1 where b=21 and a=1 order by b;
+a b c
+select * from t1 where b=21 order by b;
+a b c
+select * from t1 where b in (1,6,10,21) order by b;
+a b c
+1 1 1
+2 6 1
+3 10 1
+select * from t1 where a in (1,2,5,6) order by b;
+a b c
+1 1 1
+2 6 1
+6 15 1
+select * from t1 where a=1 and b in (1,6,10,21) order by b;
+a b c
+1 1 1
+DELETE from t1 WHERE b = 6;
+DELETE from t1 WHERE a = 6;
+drop table t1;
diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result
new file mode 100644
index 00000000000..2220fc69602
--- /dev/null
+++ b/mysql-test/r/partition.result
@@ -0,0 +1,355 @@
+drop table if exists t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a);
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a, b);
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+partitions 3
+(partition x1, partition x2, partition x3);
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+partitions 3
+(partition x1 nodegroup 0,
+partition x2 nodegroup 1,
+partition x3 nodegroup 2);
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+partitions 3
+(partition x1 engine myisam,
+partition x2 engine myisam,
+partition x3 engine myisam);
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+partitions 3
+(partition x1 tablespace ts1,
+partition x2 tablespace ts2,
+partition x3 tablespace ts3);
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 3
+(partition x1 values in (1,2,9,4) tablespace ts1,
+partition x2 values in (3, 11, 5, 7) tablespace ts2,
+partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (b*a)
+partitions 3
+(partition x1 values in (1,2,9,4) tablespace ts1,
+partition x2 values in (3, 11, 5, 7) tablespace ts2,
+partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (b*a)
+(partition x1 values in (1) tablespace ts1,
+partition x2 values in (3, 11, 5, 7) tablespace ts2,
+partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
+drop table t1;
+partition by list (a)
+partitions 3
+(partition x1 values in (1,2,9,4) tablespace ts1,
+partition x2 values in (3, 11, 5, 7) tablespace ts2,
+partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
+ERROR 42000: Partitioning can not be used stand-alone in query near 'partition by list (a)
+partitions 3
+(partition x1 values in (1,2,9,4) tablespace ' at line 1
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2;
+ERROR HY000: For LIST partitions each partition must be defined
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (sin(a))
+partitions 3
+(partition x1 values in (1,2,9,4) tablespace ts1,
+partition x2 values in (3, 11, 5, 7) tablespace ts2,
+partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
+ERROR HY000: The PARTITION function returns the wrong type
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a+2)
+partitions 3
+(partition x1 tablespace ts1,
+partition x2 tablespace ts2,
+partition x3 tablespace ts3);
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '+2)
+partitions 3
+(partition x1 tablespace ts1,
+partition x2 tablespace ts2,
+part' at line 6
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+partitions 3
+(partition tablespace ts1,
+partition x2 tablespace ts2,
+partition x3 tablespace ts3);
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ts1,
+partition x2 tablespace ts2,
+partition x3 tablespace ts3)' at line 8
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a,d)
+partitions 3
+(partition x1 tablespace ts1,
+partition x2 tablespace ts2,
+partition x3 tablespace ts3);
+ERROR HY000: Field in list of fields for partition function not found in table
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (a + d)
+partitions 3
+(partition x1 tablespace ts1,
+partition x2 tablespace ts2,
+partition x3 tablespace ts3);
+ERROR 42S22: Unknown column 'd' in 'partition function'
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (sin(a))
+partitions 3
+(partition x1 tablespace ts1,
+partition x2 tablespace ts2,
+partition x3 tablespace ts3);
+ERROR HY000: The PARTITION function returns the wrong type
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+partitions 3
+(partition x1, partition x2);
+ERROR 42000: Wrong number of partitions defined, mismatch with previous setting near ')' at line 8
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (rand(a))
+partitions 2
+(partition x1, partition x2);
+ERROR 42000: Constant/Random expression in (sub)partitioning function is not allowed near ')
+partitions 2
+(partition x1, partition x2)' at line 6
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (rand(a))
+partitions 2
+(partition x1 values less than (0), partition x2 values less than (2));
+ERROR 42000: Constant/Random expression in (sub)partitioning function is not allowed near ')
+partitions 2
+(partition x1 values less than (0), partition x2 values less than' at line 6
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (rand(a))
+partitions 2
+(partition x1 values in (1), partition x2 values in (2));
+ERROR 42000: Constant/Random expression in (sub)partitioning function is not allowed near ')
+partitions 2
+(partition x1 values in (1), partition x2 values in (2))' at line 6
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (a)
+partitions 2
+(partition x1 values less than (4),
+partition x2 values less than (5));
+ERROR HY000: Only RANGE PARTITIONING can use VALUES LESS THAN in partition definition
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (a)
+partitions 2
+(partition x1 values in (4),
+partition x2 values in (5));
+ERROR HY000: Only LIST PARTITIONING can use VALUES IN in partition definition
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (a)
+partitions 2
+(partition x1 values in (4,6),
+partition x2 values in (5,7));
+ERROR HY000: Only LIST PARTITIONING can use VALUES IN in partition definition
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by key (b);
+ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by key (a, b);
+ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by hash (a+b);
+ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by key (b);
+ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by key (a, b);
+ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by hash (a+b);
+ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by hash (rand(a+b));
+ERROR 42000: Constant/Random expression in (sub)partitioning function is not allowed near ')' at line 7
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by hash (sin(a+b))
+(partition x1 (subpartition x11, subpartition x12),
+partition x2 (subpartition x21, subpartition x22));
+ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by key (a+b)
+(partition x1 values less than (1) (subpartition x11, subpartition x12),
+partition x2 values less than (2) (subpartition x21, subpartition x22));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '+b)
+(partition x1 values less than (1) (subpartition x11, subpartition x12),
+par' at line 7
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by key (a,d)
+(partition x1 values less than (1) (subpartition x11, subpartition x12),
+partition x2 values less than (2) (subpartition x21, subpartition x22));
+ERROR HY000: Field in list of fields for partition function not found in table
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by hash (3+4);
+ERROR HY000: It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+d)
+(partition x1 values less than (1) (subpartition x11, subpartition x12),
+partition x2 values less than (2) (subpartition x21, subpartition x22));
+ERROR 42S22: Unknown column 'd' in 'partition function'
diff --git a/mysql-test/r/partition_hash.result b/mysql-test/r/partition_hash.result
new file mode 100644
index 00000000000..2165630e4fb
--- /dev/null
+++ b/mysql-test/r/partition_hash.result
@@ -0,0 +1,66 @@
+drop table if exists t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (a + 2)
+partitions 3
+(partition x1 tablespace ts1,
+partition x2 tablespace ts2,
+partition x3 tablespace ts3);
+insert into t1 values (1,1,1);
+insert into t1 values (2,1,1);
+insert into t1 values (3,1,1);
+insert into t1 values (4,1,1);
+insert into t1 values (5,1,1);
+select * from t1;
+a b c
+1 1 1
+4 1 1
+2 1 1
+5 1 1
+3 1 1
+update t1 set c=3 where b=1;
+select * from t1;
+a b c
+1 1 3
+4 1 3
+2 1 3
+5 1 3
+3 1 3
+select b from t1 where a=3;
+b
+1
+select b,c from t1 where a=1 AND b=1;
+b c
+1 3
+delete from t1 where a=1;
+delete from t1 where c=3;
+select * from t1;
+a b c
+ALTER TABLE t1
+partition by hash (a + 3)
+partitions 3
+(partition x1 tablespace ts1,
+partition x2 tablespace ts2,
+partition x3 tablespace ts3);
+select * from t1;
+a b c
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (a)
+(partition x1);
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+(partition x1);
+drop table t1;
diff --git a/mysql-test/r/partition_list.result b/mysql-test/r/partition_list.result
new file mode 100644
index 00000000000..40cdfd8399c
--- /dev/null
+++ b/mysql-test/r/partition_list.result
@@ -0,0 +1,342 @@
+drop table if exists t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null)
+partition by list(a)
+partitions 2
+(partition x123 values in (1,5,6),
+partition x234 values in (4,7,8));
+INSERT into t1 VALUES (1,1,1);
+INSERT into t1 VALUES (2,1,1);
+ERROR HY000: Got error 1 from storage engine
+INSERT into t1 VALUES (3,1,1);
+ERROR HY000: Got error 1 from storage engine
+INSERT into t1 VALUES (4,1,1);
+INSERT into t1 VALUES (5,1,1);
+INSERT into t1 VALUES (6,1,1);
+INSERT into t1 VALUES (7,1,1);
+INSERT into t1 VALUES (8,1,1);
+INSERT into t1 VALUES (9,1,1);
+ERROR HY000: Got error 1 from storage engine
+INSERT into t1 VALUES (1,2,1);
+INSERT into t1 VALUES (1,3,1);
+INSERT into t1 VALUES (1,4,1);
+INSERT into t1 VALUES (7,2,1);
+INSERT into t1 VALUES (7,3,1);
+INSERT into t1 VALUES (7,4,1);
+SELECT * from t1;
+a b c
+1 1 1
+5 1 1
+6 1 1
+1 2 1
+1 3 1
+1 4 1
+4 1 1
+7 1 1
+8 1 1
+7 2 1
+7 3 1
+7 4 1
+SELECT * from t1 WHERE a=1;
+a b c
+1 1 1
+1 2 1
+1 3 1
+1 4 1
+SELECT * from t1 WHERE a=7;
+a b c
+7 1 1
+7 2 1
+7 3 1
+7 4 1
+SELECT * from t1 WHERE b=2;
+a b c
+1 2 1
+7 2 1
+UPDATE t1 SET a=8 WHERE a=7 AND b=3;
+SELECT * from t1;
+a b c
+1 1 1
+5 1 1
+6 1 1
+1 2 1
+1 3 1
+1 4 1
+4 1 1
+7 1 1
+8 1 1
+7 2 1
+8 3 1
+7 4 1
+UPDATE t1 SET a=8 WHERE a=5 AND b=1;
+SELECT * from t1;
+a b c
+1 1 1
+6 1 1
+1 2 1
+1 3 1
+1 4 1
+4 1 1
+7 1 1
+8 1 1
+7 2 1
+8 3 1
+7 4 1
+8 1 1
+DELETE from t1 WHERE a=8;
+SELECT * from t1;
+a b c
+1 1 1
+6 1 1
+1 2 1
+1 3 1
+1 4 1
+4 1 1
+7 1 1
+7 2 1
+7 4 1
+DELETE from t1 WHERE a=2;
+SELECT * from t1;
+a b c
+1 1 1
+6 1 1
+1 2 1
+1 3 1
+1 4 1
+4 1 1
+7 1 1
+7 2 1
+7 4 1
+DELETE from t1 WHERE a=5 OR a=6;
+SELECT * from t1;
+a b c
+1 1 1
+1 2 1
+1 3 1
+1 4 1
+4 1 1
+7 1 1
+7 2 1
+7 4 1
+ALTER TABLE t1
+partition by list(a)
+partitions 2
+(partition x123 values in (1,5,6),
+partition x234 values in (4,7,8));
+SELECT * from t1;
+a b c
+1 1 1
+1 2 1
+1 3 1
+1 4 1
+4 1 1
+7 1 1
+7 2 1
+7 4 1
+INSERT into t1 VALUES (6,2,1);
+INSERT into t1 VALUES (2,2,1);
+ERROR HY000: Got error 1 from storage engine
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by list (a)
+subpartition by hash (a+b)
+( partition x1 values in (1,2,3)
+( subpartition x11 nodegroup 0,
+subpartition x12 nodegroup 1),
+partition x2 values in (4,5,6)
+( subpartition x21 nodegroup 0,
+subpartition x22 nodegroup 1)
+);
+INSERT into t1 VALUES (1,1,1);
+INSERT into t1 VALUES (4,1,1);
+INSERT into t1 VALUES (7,1,1);
+ERROR HY000: Got error 1 from storage engine
+UPDATE t1 SET a=5 WHERE a=1;
+SELECT * from t1;
+a b c
+5 1 1
+4 1 1
+UPDATE t1 SET a=6 WHERE a=4;
+SELECT * from t1;
+a b c
+5 1 1
+6 1 1
+DELETE from t1 WHERE a=6;
+SELECT * from t1;
+a b c
+5 1 1
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by list (a)
+subpartition by hash (a+b)
+subpartitions 3
+( partition x1 values in (1,2,4)
+( subpartition x11 nodegroup 0,
+subpartition x12 nodegroup 1),
+partition x2 values in (3,5,6)
+( subpartition x21 nodegroup 0,
+subpartition x22 nodegroup 1)
+);
+ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '),
+partition x2 values in (3,5,6)
+( subpartition x21 nodegroup 0,
+subpartition x' at line 11
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by list (a)
+subpartition by hash (a+b)
+( partition x1 values in (1)
+( subpartition x11 nodegroup 0,
+subpartition xextra,
+subpartition x12 nodegroup 1),
+partition x2 values in (2)
+( subpartition x21 nodegroup 0,
+subpartition x22 nodegroup 1)
+);
+ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near ')
+)' at line 14
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by list (a+b)
+( partition x1
+( subpartition x11 engine myisam,
+subpartition x12 engine myisam),
+partition x2
+( subpartition x21 engine myisam,
+subpartition x22 engine myisam)
+);
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'list (a+b)
+( partition x1
+( subpartition x11 engine myisam,
+subpartition x12 eng' at line 7
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by list (a+b)
+( partition x1
+( subpartition x11 engine myisam values in (0),
+subpartition x12 engine myisam values in (1)),
+partition x2
+( subpartition x21 engine myisam values in (0),
+subpartition x22 engine myisam values in (1))
+);
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'list (a+b)
+( partition x1
+( subpartition x11 engine myisam values in (0),
+subpar' at line 7
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+(partition x1 values in (1,2,9,4) tablespace ts1);
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a);
+ERROR HY000: For LIST partitions each partition must be defined
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (3+4)
+partitions 2
+(partition x1 values in (4) tablespace ts1,
+partition x2 values in (8) tablespace ts2);
+ERROR HY000: Constant/Random expression in (sub)partitioning function is not allowed
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a+d)
+partitions 2
+(partition x1 values in (4) tablespace ts1,
+partition x2 values in (8) tablespace ts2);
+ERROR 42S22: Unknown column 'd' in 'partition function'
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values in (4),
+partition x2);
+ERROR HY000: LIST PARTITIONING requires definition of VALUES IN for each partition
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values in (4),
+partition x2 values less than (5));
+ERROR HY000: Only RANGE PARTITIONING can use VALUES LESS THAN in partition definition
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values in (4,6),
+partition x2);
+ERROR HY000: LIST PARTITIONING requires definition of VALUES IN for each partition
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values in (4, 12+9),
+partition x2 values in (3, 21));
+ERROR HY000: Multiple definition of same constant in list partitioning
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values in (4.0, 12+8),
+partition x2 values in (3, 21));
+ERROR HY000: VALUES IN value must be of same type as partition function
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values in 4,
+partition x2 values in (5));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '4,
+partition x2 values in (5))' at line 8
diff --git a/mysql-test/r/partition_order.result b/mysql-test/r/partition_order.result
new file mode 100644
index 00000000000..7a1ab1d6dc8
--- /dev/null
+++ b/mysql-test/r/partition_order.result
@@ -0,0 +1,733 @@
+drop table if exists t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b tinyint not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b tinyint unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b smallint not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b smallint unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b mediumint not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b mediumint unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b bigint unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b bigint not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b bigint not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b float not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b double not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b double unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b float unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b double precision not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b double precision unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b decimal not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b char(10) not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+select * from t1 force index (b) where b > 0 order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b varchar(10) not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+select * from t1 force index (b) where b > '0' order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b varchar(10) not null,
+primary key(a),
+index (b(5)))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+select * from t1 force index (b) where b > '0' order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b varchar(10) binary not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+select * from t1 force index (b) where b > '0' order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b tinytext not null,
+primary key(a),
+index (b(10)))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+select * from t1 force index (b) where b > '0' order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b text not null,
+primary key(a),
+index (b(10)))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+select * from t1 force index (b) where b > '0' order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b mediumtext not null,
+primary key(a),
+index (b(10)))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+select * from t1 force index (b) where b > '0' order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b longtext not null,
+primary key(a),
+index (b(10)))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+select * from t1 force index (b) where b > '0' order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b enum('1','2', '4', '5') not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+select * from t1 force index (b) where b >= '1' order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b set('1','2', '4', '5') not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+select * from t1 force index (b) where b >= '1' order by b;
+a b
+1 1
+35 2
+30 4
+2 5
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b date not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '2001-01-01');
+INSERT into t1 values (2, '2005-01-01');
+INSERT into t1 values (30, '2004-01-01');
+INSERT into t1 values (35, '2002-01-01');
+select * from t1 force index (b) where b > '2000-01-01' order by b;
+a b
+1 2001-01-01
+35 2002-01-01
+30 2004-01-01
+2 2005-01-01
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b datetime not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '2001-01-01 00:00:00');
+INSERT into t1 values (2, '2005-01-01 00:00:00');
+INSERT into t1 values (30, '2004-01-01 00:00:00');
+INSERT into t1 values (35, '2002-01-01 00:00:00');
+select * from t1 force index (b) where b > '2000-01-01 00:00:00' order by b;
+a b
+1 2001-01-01 00:00:00
+35 2002-01-01 00:00:00
+30 2004-01-01 00:00:00
+2 2005-01-01 00:00:00
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b timestamp not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '2001-01-01 00:00:00');
+INSERT into t1 values (2, '2005-01-01 00:00:00');
+INSERT into t1 values (30, '2004-01-01 00:00:00');
+INSERT into t1 values (35, '2002-01-01 00:00:00');
+select * from t1 force index (b) where b > '2000-01-01 00:00:00' order by b;
+a b
+1 2001-01-01 00:00:00
+35 2002-01-01 00:00:00
+30 2004-01-01 00:00:00
+2 2005-01-01 00:00:00
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b time not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, '01:00:00');
+INSERT into t1 values (2, '05:00:00');
+INSERT into t1 values (30, '04:00:00');
+INSERT into t1 values (35, '02:00:00');
+select * from t1 force index (b) where b > '00:00:00' order by b;
+a b
+1 01:00:00
+35 02:00:00
+30 04:00:00
+2 05:00:00
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b year not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 2001);
+INSERT into t1 values (2, 2005);
+INSERT into t1 values (30, 2004);
+INSERT into t1 values (35, 2002);
+select * from t1 force index (b) where b > 2000 order by b;
+a b
+1 2001
+35 2002
+30 2004
+2 2005
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b bit(5) not null,
+c int,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, b'00001', NULL);
+INSERT into t1 values (2, b'00101', 2);
+INSERT into t1 values (30, b'00100', 2);
+INSERT into t1 values (35, b'00010', NULL);
+select a from t1 force index (b) where b > b'00000' order by b;
+a
+1
+35
+30
+2
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b bit(15) not null,
+c int,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, b'000000000000001', NULL);
+INSERT into t1 values (2, b'001010000000101', 2);
+INSERT into t1 values (30, b'001000000000100', 2);
+INSERT into t1 values (35, b'000100000000010', NULL);
+select a from t1 force index (b) where b > b'000000000000000' order by b;
+a
+1
+35
+30
+2
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+partition x2 values less than (100));
+INSERT into t1 values (1, 1);
+INSERT into t1 values (5, NULL);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+INSERT into t1 values (40, NULL);
+select * from t1 force index (b) where b < 10 OR b IS NULL order by b;
+a b
+5 NULL
+40 NULL
+1 1
+35 2
+30 4
+2 5
+drop table t1;
diff --git a/mysql-test/r/partition_range.result b/mysql-test/r/partition_range.result
new file mode 100644
index 00000000000..fc2924c6357
--- /dev/null
+++ b/mysql-test/r/partition_range.result
@@ -0,0 +1,455 @@
+drop table if exists t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) tablespace ts1,
+partition x2 values less than (10) tablespace ts2,
+partition x3 values less than maxvalue tablespace ts3);
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (6, 1, 1);
+INSERT into t1 values (10, 1, 1);
+INSERT into t1 values (15, 1, 1);
+select * from t1;
+a b c
+1 1 1
+6 1 1
+10 1 1
+15 1 1
+ALTER TABLE t1
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) tablespace ts1,
+partition x2 values less than (10) tablespace ts2,
+partition x3 values less than maxvalue tablespace ts3);
+select * from t1;
+a b c
+1 1 1
+6 1 1
+10 1 1
+15 1 1
+drop table if exists t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null)
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) tablespace ts1,
+partition x2 values less than (10) tablespace ts2,
+partition x3 values less than maxvalue tablespace ts3);
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (6, 1, 1);
+INSERT into t1 values (10, 1, 1);
+INSERT into t1 values (15, 1, 1);
+select * from t1;
+a b c
+1 1 1
+6 1 1
+10 1 1
+15 1 1
+ALTER TABLE t1
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) tablespace ts1,
+partition x2 values less than (10) tablespace ts2,
+partition x3 values less than maxvalue tablespace ts3);
+select * from t1;
+a b c
+1 1 1
+6 1 1
+10 1 1
+15 1 1
+drop table if exists t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) tablespace ts1,
+partition x2 values less than (10) tablespace ts2,
+partition x3 values less than (15) tablespace ts3);
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (6, 1, 1);
+INSERT into t1 values (10, 1, 1);
+INSERT into t1 values (15, 1, 1);
+ERROR HY000: Got error 1 from storage engine
+select * from t1;
+a b c
+1 1 1
+6 1 1
+10 1 1
+ALTER TABLE t1
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) tablespace ts1,
+partition x2 values less than (10) tablespace ts2,
+partition x3 values less than (15) tablespace ts3);
+select * from t1;
+a b c
+1 1 1
+6 1 1
+10 1 1
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+(partition x1 values less than (1));
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a);
+ERROR HY000: For RANGE partitions each partition must be defined
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a+d)
+partitions 2
+(partition x1 values less than (4) tablespace ts1,
+partition x2 values less than (8) tablespace ts2);
+ERROR 42S22: Unknown column 'd' in 'partition function'
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (4.0) tablespace ts1,
+partition x2 values less than (8) tablespace ts2);
+ERROR HY000: VALUES LESS THAN value must be of same type as partition function
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (3+4)
+partitions 2
+(partition x1 values less than (4) tablespace ts1,
+partition x2 values less than (8) tablespace ts2);
+ERROR HY000: Constant/Random expression in (sub)partitioning function is not allowed
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (4),
+partition x2);
+ERROR HY000: RANGE PARTITIONING requires definition of VALUES LESS THAN for each partition
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values in (4),
+partition x2);
+ERROR HY000: Only LIST PARTITIONING can use VALUES IN in partition definition
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values in (4),
+partition x2 values less than (5));
+ERROR HY000: Only LIST PARTITIONING can use VALUES IN in partition definition
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values less than 4,
+partition x2 values less than (5));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '4,
+partition x2 values less than (5))' at line 8
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values less than maxvalue,
+partition x2 values less than (5));
+ERROR 42000: MAXVALUE can only be used in last partition definition near '))' at line 9
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values less than maxvalue,
+partition x2 values less than maxvalue);
+ERROR 42000: MAXVALUE can only be used in last partition definition near 'maxvalue)' at line 9
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (4),
+partition x2 values less than (3));
+ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (sin(a))
+partitions 2
+(partition x1 values less than (4),
+partition x2 values less than (5));
+ERROR HY000: The PARTITION function returns the wrong type
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+( subpartition x11,
+subpartition x12),
+partition x2 values less than (5)
+( subpartition x21,
+subpartition x22)
+);
+SELECT * from t1;
+a b c
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+( subpartition x11 tablespace t1 engine myisam nodegroup 0,
+subpartition x12 tablespace t2 engine myisam nodegroup 1),
+partition x2 values less than (5)
+( subpartition x21 tablespace t1 engine myisam nodegroup 0,
+subpartition x22 tablespace t2 engine myisam nodegroup 1)
+);
+SELECT * from t1;
+a b c
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+( subpartition x11 tablespace t1 nodegroup 0,
+subpartition x12 tablespace t2 nodegroup 1),
+partition x2 values less than (5)
+( subpartition x21 tablespace t1 nodegroup 0,
+subpartition x22 tablespace t2 nodegroup 1)
+);
+SELECT * from t1;
+a b c
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+( subpartition x11 engine myisam nodegroup 0,
+subpartition x12 engine myisam nodegroup 1),
+partition x2 values less than (5)
+( subpartition x21 engine myisam nodegroup 0,
+subpartition x22 engine myisam nodegroup 1)
+);
+INSERT into t1 VALUES (1,1,1);
+INSERT into t1 VALUES (4,1,1);
+INSERT into t1 VALUES (5,1,1);
+ERROR HY000: Got error 1 from storage engine
+SELECT * from t1;
+a b c
+1 1 1
+4 1 1
+ALTER TABLE t1
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+( subpartition x11 engine myisam nodegroup 0,
+subpartition x12 engine myisam nodegroup 1),
+partition x2 values less than (5)
+( subpartition x21 engine myisam nodegroup 0,
+subpartition x22 engine myisam nodegroup 1)
+);
+SELECT * from t1;
+a b c
+1 1 1
+4 1 1
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+( subpartition x11 tablespace t1 engine myisam,
+subpartition x12 tablespace t2 engine myisam),
+partition x2 values less than (5)
+( subpartition x21 tablespace t1 engine myisam,
+subpartition x22 tablespace t2 engine myisam)
+);
+INSERT into t1 VALUES (1,1,1);
+INSERT into t1 VALUES (4,1,1);
+INSERT into t1 VALUES (5,1,1);
+ERROR HY000: Got error 1 from storage engine
+SELECT * from t1;
+a b c
+1 1 1
+4 1 1
+ALTER TABLE t1
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+( subpartition x11 tablespace t1 engine myisam,
+subpartition x12 tablespace t2 engine myisam),
+partition x2 values less than (5)
+( subpartition x21 tablespace t1 engine myisam,
+subpartition x22 tablespace t2 engine myisam)
+);
+SELECT * from t1;
+a b c
+1 1 1
+4 1 1
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+( subpartition x11 tablespace t1,
+subpartition x12 tablespace t2),
+partition x2 values less than (5)
+( subpartition x21 tablespace t1,
+subpartition x22 tablespace t2)
+);
+INSERT into t1 VALUES (1,1,1);
+INSERT into t1 VALUES (4,1,1);
+INSERT into t1 VALUES (5,1,1);
+ERROR HY000: Got error 1 from storage engine
+SELECT * from t1;
+a b c
+1 1 1
+4 1 1
+ALTER TABLE t1
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+( subpartition x11 tablespace t1 engine myisam,
+subpartition x12 tablespace t2 engine myisam),
+partition x2 values less than (5)
+( subpartition x21 tablespace t1 engine myisam,
+subpartition x22 tablespace t2 engine myisam)
+);
+SELECT * from t1;
+a b c
+1 1 1
+4 1 1
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+( subpartition x11 engine myisam,
+subpartition x12 engine myisam),
+partition x2 values less than (5)
+( subpartition x21 engine myisam,
+subpartition x22 engine myisam)
+);
+INSERT into t1 VALUES (1,1,1);
+INSERT into t1 VALUES (4,1,1);
+INSERT into t1 VALUES (5,1,1);
+ERROR HY000: Got error 1 from storage engine
+SELECT * from t1;
+a b c
+1 1 1
+4 1 1
+ALTER TABLE t1
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+( subpartition x11 engine myisam,
+subpartition x12 engine myisam),
+partition x2 values less than (5)
+( subpartition x21 engine myisam,
+subpartition x22 engine myisam)
+);
+SELECT * from t1;
+a b c
+1 1 1
+4 1 1
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a+b)
+subpartition by key (a)
+( partition x1
+( subpartition x11 engine myisam,
+subpartition x12 engine myisam),
+partition x2
+( subpartition x21 engine myisam,
+subpartition x22 engine myisam)
+);
+ERROR HY000: RANGE PARTITIONING requires definition of VALUES LESS THAN for each partition
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by range (a+b)
+( partition x1
+( subpartition x11 engine myisam values less than (0),
+subpartition x12 engine myisam values less than (1)),
+partition x2
+( subpartition x21 engine myisam values less than (0),
+subpartition x22 engine myisam values less than (1))
+);
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'range (a+b)
+( partition x1
+( subpartition x11 engine myisam values less than (0)' at line 7
diff --git a/mysql-test/t/ndb_partition_key.test b/mysql-test/t/ndb_partition_key.test
new file mode 100644
index 00000000000..31d3b63122d
--- /dev/null
+++ b/mysql-test/t/ndb_partition_key.test
@@ -0,0 +1,58 @@
+-- source include/have_ndb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+#
+# Basic syntax test
+#
+
+# Support for partition key verified
+CREATE TABLE t1 (a int, b int, c int, d int, PRIMARY KEY(a,b,c))
+ ENGINE = NDB
+ PARTITION BY KEY (a,b);
+
+insert into t1 values (1,1,1,1);
+select * from t1;
+update t1 set d = 2 where a = 1 and b = 1 and c = 1;
+select * from t1;
+delete from t1;
+select * from t1;
+
+drop table t1;
+
+# only support for partition key on primary key
+--error 1453
+CREATE TABLE t1 (a int, b int, c int, d int, PRIMARY KEY(a,b))
+ ENGINE = NDB
+ PARTITION BY KEY (c);
+
+CREATE TABLE t1 (a int, b int, c int, PRIMARY KEY(a,b))
+ ENGINE = NDB
+ PARTITION BY KEY (a);
+
+insert into t1 values
+ (1,1,3),(1,2,3),(1,3,3),(1,4,3),(1,5,3),(1,6,3),
+ (1,7,3),(1,8,3),(1,9,3),(1,10,3),(1,11,3),(1,12,3);
+
+select * from t1 order by b;
+
+DROP TABLE t1;
+
+#
+# Test partition and char support
+#
+
+CREATE TABLE t1 (a INT, b CHAR(10) COLLATE latin1_bin, c INT, d INT,
+ PRIMARY KEY USING HASH (a,b,c))
+ ENGINE=NDB
+ DEFAULT CHARSET=latin1
+ PARTITION BY KEY (b);
+
+insert into t1 values (1,"a",1,1),(2,"a",1,1),(3,"a",1,1);
+
+# should show only one attribute with DISTRIBUTION KEY
+--exec $NDB_TOOLS_DIR/ndb_desc --no-defaults -d test t1 | sed 's/Version: [0-9]*//'
+
+DROP TABLE t1;
diff --git a/mysql-test/t/ndb_partition_range.test b/mysql-test/t/ndb_partition_range.test
new file mode 100644
index 00000000000..35d2d33a722
--- /dev/null
+++ b/mysql-test/t/ndb_partition_range.test
@@ -0,0 +1,86 @@
+-- source include/have_ndb.inc
+#--disable_abort_on_error
+#
+# Simple test for the partition storage engine
+# Focuses on range partitioning tests
+#
+#-- source include/have_partition.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+#
+# Partition by range, basic
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b),
+index (a))
+engine = ndb
+partition by range (a)
+partitions 3
+(partition x1 values less than (5),
+ partition x2 values less than (10),
+ partition x3 values less than (20));
+
+# Simple insert and verify test
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (6, 1, 1);
+INSERT into t1 values (10, 1, 1);
+INSERT into t1 values (15, 1, 1);
+
+select * from t1 order by a;
+
+select * from t1 where a=1 order by a;
+select * from t1 where a=15 and b=1 order by a;
+select * from t1 where a=21 and b=1 order by a;
+select * from t1 where a=21 order by a;
+select * from t1 where a in (1,6,10,21) order by a;
+select * from t1 where b=1 and a in (1,6,10,21) order by a;
+
+drop table t1;
+
+#
+# Partition by range, basic
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(b),
+unique (a))
+engine = ndb
+partition by range (b)
+partitions 3
+(partition x1 values less than (5),
+ partition x2 values less than (10),
+ partition x3 values less than (20));
+
+# Simple insert and verify test
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (2, 6, 1);
+INSERT into t1 values (3, 10, 1);
+INSERT into t1 values (4, 15, 1);
+
+select * from t1 order by a;
+UPDATE t1 set a = 5 WHERE b = 15;
+select * from t1 order by a;
+UPDATE t1 set a = 6 WHERE a = 5;
+select * from t1 order by a;
+
+select * from t1 where b=1 order by b;
+select * from t1 where b=15 and a=1 order by b;
+select * from t1 where b=21 and a=1 order by b;
+select * from t1 where b=21 order by b;
+select * from t1 where b in (1,6,10,21) order by b;
+select * from t1 where a in (1,2,5,6) order by b;
+select * from t1 where a=1 and b in (1,6,10,21) order by b;
+
+DELETE from t1 WHERE b = 6;
+DELETE from t1 WHERE a = 6;
+
+drop table t1;
+
diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test
new file mode 100644
index 00000000000..49a938fa25d
--- /dev/null
+++ b/mysql-test/t/partition.test
@@ -0,0 +1,494 @@
+#--disable_abort_on_error
+#
+# Simple test for the partition storage engine
+# Taken fromm the select test
+#
+-- source include/have_partition.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+#
+# Partition by key no partition defined => OK
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a);
+
+drop table t1;
+#
+# Partition by key no partition, list of fields
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a, b);
+
+drop table t1;
+#
+# Partition by key specified 3 partitions and defined 3 => ok
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+partitions 3
+(partition x1, partition x2, partition x3);
+
+drop table t1;
+#
+# Partition by key specifying nodegroup
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+partitions 3
+(partition x1 nodegroup 0,
+ partition x2 nodegroup 1,
+ partition x3 nodegroup 2);
+
+drop table t1;
+#
+# Partition by key specifying engine
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+partitions 3
+(partition x1 engine myisam,
+ partition x2 engine myisam,
+ partition x3 engine myisam);
+
+drop table t1;
+#
+# Partition by key specifying tablespace
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+partitions 3
+(partition x1 tablespace ts1,
+ partition x2 tablespace ts2,
+ partition x3 tablespace ts3);
+
+drop table t1;
+
+#
+# Partition by key list, basic
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 3
+(partition x1 values in (1,2,9,4) tablespace ts1,
+ partition x2 values in (3, 11, 5, 7) tablespace ts2,
+ partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
+
+drop table t1;
+#
+# Partition by key list, list function
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (b*a)
+partitions 3
+(partition x1 values in (1,2,9,4) tablespace ts1,
+ partition x2 values in (3, 11, 5, 7) tablespace ts2,
+ partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
+
+drop table t1;
+
+#
+# Partition by key list, list function, no spec of #partitions
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (b*a)
+(partition x1 values in (1) tablespace ts1,
+ partition x2 values in (3, 11, 5, 7) tablespace ts2,
+ partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
+
+drop table t1;
+
+#
+# Partition by key stand-alone error
+#
+--error 1064
+partition by list (a)
+partitions 3
+(partition x1 values in (1,2,9,4) tablespace ts1,
+ partition x2 values in (3, 11, 5, 7) tablespace ts2,
+ partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
+
+#
+# Partition by key list, number of partitions defined, no partition defined
+#
+--error 1441
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2;
+
+#
+# Partition by key list, wrong result type
+#
+--error 1440
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (sin(a))
+partitions 3
+(partition x1 values in (1,2,9,4) tablespace ts1,
+ partition x2 values in (3, 11, 5, 7) tablespace ts2,
+ partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
+
+#
+# Partition by key, partition function not allowed
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a+2)
+partitions 3
+(partition x1 tablespace ts1,
+ partition x2 tablespace ts2,
+ partition x3 tablespace ts3);
+
+#
+# Partition by key, no partition name
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+partitions 3
+(partition tablespace ts1,
+ partition x2 tablespace ts2,
+ partition x3 tablespace ts3);
+
+#
+# Partition by key, invalid field in field list
+#
+--error 1437
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a,d)
+partitions 3
+(partition x1 tablespace ts1,
+ partition x2 tablespace ts2,
+ partition x3 tablespace ts3);
+#
+# Partition by hash, invalid field in function
+#
+--error 1054
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (a + d)
+partitions 3
+(partition x1 tablespace ts1,
+ partition x2 tablespace ts2,
+ partition x3 tablespace ts3);
+
+#
+# Partition by hash, invalid result type
+#
+--error 1440
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (sin(a))
+partitions 3
+(partition x1 tablespace ts1,
+ partition x2 tablespace ts2,
+ partition x3 tablespace ts3);
+
+#
+# Partition by key specified 3 partitions but only defined 2 => error
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+partitions 3
+(partition x1, partition x2);
+
+#
+# Partition by key specified 3 partitions but only defined 2 => error
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (rand(a))
+partitions 2
+(partition x1, partition x2);
+
+#
+# Partition by key specified 3 partitions but only defined 2 => error
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (rand(a))
+partitions 2
+(partition x1 values less than (0), partition x2 values less than (2));
+
+#
+# Partition by key specified 3 partitions but only defined 2 => error
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (rand(a))
+partitions 2
+(partition x1 values in (1), partition x2 values in (2));
+
+#
+# Partition by hash, values less than error
+#
+--error 1430
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (a)
+partitions 2
+(partition x1 values less than (4),
+ partition x2 values less than (5));
+
+#
+# Partition by hash, values in error
+#
+--error 1430
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (a)
+partitions 2
+(partition x1 values in (4),
+ partition x2 values in (5));
+
+#
+# Partition by hash, values in error
+#
+--error 1430
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (a)
+partitions 2
+(partition x1 values in (4,6),
+ partition x2 values in (5,7));
+
+#
+# Subpartition by key, no partitions defined, single field
+#
+--error 1449
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by key (b);
+
+#
+# Subpartition by key, no partitions defined, list of fields
+#
+--error 1449
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by key (a, b);
+
+#
+# Subpartition by hash, no partitions defined
+#
+--error 1449
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by hash (a+b);
+
+#
+# Subpartition by key, no partitions defined, single field
+#
+--error 1449
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by key (b);
+
+#
+# Subpartition by key, no partitions defined, list of fields
+#
+--error 1449
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by key (a, b);
+
+#
+# Subpartition by hash, no partitions defined
+#
+--error 1449
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by hash (a+b);
+
+#
+# Subpartition by hash, no partitions defined, wrong subpartition function
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by hash (rand(a+b));
+
+#
+# Subpartition by hash, wrong subpartition function
+#
+--error 1449
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by hash (sin(a+b))
+(partition x1 (subpartition x11, subpartition x12),
+ partition x2 (subpartition x21, subpartition x22));
+
+#
+# Subpartition by hash, no partitions defined, wrong subpartition function
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by key (a+b)
+(partition x1 values less than (1) (subpartition x11, subpartition x12),
+ partition x2 values less than (2) (subpartition x21, subpartition x22));
+
+#
+# Subpartition by hash, no partitions defined, wrong subpartition function
+#
+--error 1437
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by key (a,d)
+(partition x1 values less than (1) (subpartition x11, subpartition x12),
+ partition x2 values less than (2) (subpartition x21, subpartition x22));
+
+#
+# Subpartition by hash, no partitions defined, wrong subpartition function
+#
+--error 1449
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by hash (3+4);
+
+#
+# Subpartition by hash, no partitions defined, wrong subpartition function
+#
+--error 1054
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+d)
+(partition x1 values less than (1) (subpartition x11, subpartition x12),
+ partition x2 values less than (2) (subpartition x21, subpartition x22));
+
diff --git a/mysql-test/t/partition_hash.test b/mysql-test/t/partition_hash.test
new file mode 100644
index 00000000000..aa1acfe891f
--- /dev/null
+++ b/mysql-test/t/partition_hash.test
@@ -0,0 +1,77 @@
+#--disable_abort_on_error
+#
+# Simple test for the partition storage engine
+# Taken fromm the select test
+#
+-- source include/have_partition.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+#
+# Partition by hash, basic
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (a + 2)
+partitions 3
+(partition x1 tablespace ts1,
+ partition x2 tablespace ts2,
+ partition x3 tablespace ts3);
+
+insert into t1 values (1,1,1);
+insert into t1 values (2,1,1);
+insert into t1 values (3,1,1);
+insert into t1 values (4,1,1);
+insert into t1 values (5,1,1);
+
+select * from t1;
+
+update t1 set c=3 where b=1;
+select * from t1;
+
+select b from t1 where a=3;
+select b,c from t1 where a=1 AND b=1;
+
+delete from t1 where a=1;
+delete from t1 where c=3;
+
+select * from t1;
+
+ALTER TABLE t1
+partition by hash (a + 3)
+partitions 3
+(partition x1 tablespace ts1,
+ partition x2 tablespace ts2,
+ partition x3 tablespace ts3);
+select * from t1;
+drop table t1;
+
+#
+# Partition by hash, only one partition
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by hash (a)
+(partition x1);
+
+drop table t1;
+#
+# Partition by key, only one partition
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by key (a)
+(partition x1);
+
+drop table t1;
diff --git a/mysql-test/t/partition_list.test b/mysql-test/t/partition_list.test
new file mode 100644
index 00000000000..6432b8eb747
--- /dev/null
+++ b/mysql-test/t/partition_list.test
@@ -0,0 +1,316 @@
+#--disable_abort_on_error
+#
+# Simple test for the partition storage engine
+# testing list partitioning
+#
+-- source include/have_partition.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+#
+# Test ordinary list partitioning that it works ok
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null)
+partition by list(a)
+partitions 2
+(partition x123 values in (1,5,6),
+ partition x234 values in (4,7,8));
+
+INSERT into t1 VALUES (1,1,1);
+--error 1030
+INSERT into t1 VALUES (2,1,1);
+--error 1030
+INSERT into t1 VALUES (3,1,1);
+INSERT into t1 VALUES (4,1,1);
+INSERT into t1 VALUES (5,1,1);
+INSERT into t1 VALUES (6,1,1);
+INSERT into t1 VALUES (7,1,1);
+INSERT into t1 VALUES (8,1,1);
+--error 1030
+INSERT into t1 VALUES (9,1,1);
+INSERT into t1 VALUES (1,2,1);
+INSERT into t1 VALUES (1,3,1);
+INSERT into t1 VALUES (1,4,1);
+INSERT into t1 VALUES (7,2,1);
+INSERT into t1 VALUES (7,3,1);
+INSERT into t1 VALUES (7,4,1);
+
+SELECT * from t1;
+SELECT * from t1 WHERE a=1;
+SELECT * from t1 WHERE a=7;
+SELECT * from t1 WHERE b=2;
+
+UPDATE t1 SET a=8 WHERE a=7 AND b=3;
+SELECT * from t1;
+UPDATE t1 SET a=8 WHERE a=5 AND b=1;
+SELECT * from t1;
+
+DELETE from t1 WHERE a=8;
+SELECT * from t1;
+DELETE from t1 WHERE a=2;
+SELECT * from t1;
+DELETE from t1 WHERE a=5 OR a=6;
+SELECT * from t1;
+
+ALTER TABLE t1
+partition by list(a)
+partitions 2
+(partition x123 values in (1,5,6),
+ partition x234 values in (4,7,8));
+SELECT * from t1;
+INSERT into t1 VALUES (6,2,1);
+--error 1030
+INSERT into t1 VALUES (2,2,1);
+
+drop table t1;
+#
+# Subpartition by hash, two partitions and two subpartitions
+# Defined node group
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by list (a)
+subpartition by hash (a+b)
+( partition x1 values in (1,2,3)
+ ( subpartition x11 nodegroup 0,
+ subpartition x12 nodegroup 1),
+ partition x2 values in (4,5,6)
+ ( subpartition x21 nodegroup 0,
+ subpartition x22 nodegroup 1)
+);
+
+INSERT into t1 VALUES (1,1,1);
+INSERT into t1 VALUES (4,1,1);
+--error 1030
+INSERT into t1 VALUES (7,1,1);
+UPDATE t1 SET a=5 WHERE a=1;
+SELECT * from t1;
+UPDATE t1 SET a=6 WHERE a=4;
+SELECT * from t1;
+DELETE from t1 WHERE a=6;
+SELECT * from t1;
+
+drop table t1;
+
+#
+# Subpartition by hash, wrong number of subpartitions
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by list (a)
+subpartition by hash (a+b)
+subpartitions 3
+( partition x1 values in (1,2,4)
+ ( subpartition x11 nodegroup 0,
+ subpartition x12 nodegroup 1),
+ partition x2 values in (3,5,6)
+ ( subpartition x21 nodegroup 0,
+ subpartition x22 nodegroup 1)
+);
+
+#
+# Subpartition by hash, wrong number of subpartitions
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by list (a)
+subpartition by hash (a+b)
+( partition x1 values in (1)
+ ( subpartition x11 nodegroup 0,
+ subpartition xextra,
+ subpartition x12 nodegroup 1),
+ partition x2 values in (2)
+ ( subpartition x21 nodegroup 0,
+ subpartition x22 nodegroup 1)
+);
+
+#
+# Subpartition by list => error
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by list (a+b)
+( partition x1
+ ( subpartition x11 engine myisam,
+ subpartition x12 engine myisam),
+ partition x2
+ ( subpartition x21 engine myisam,
+ subpartition x22 engine myisam)
+);
+
+#
+# Subpartition by list => error
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by list (a+b)
+( partition x1
+ ( subpartition x11 engine myisam values in (0),
+ subpartition x12 engine myisam values in (1)),
+ partition x2
+ ( subpartition x21 engine myisam values in (0),
+ subpartition x22 engine myisam values in (1))
+);
+
+#
+# Partition by list, only one partition => ok
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+(partition x1 values in (1,2,9,4) tablespace ts1);
+
+drop table t1;
+#
+# Partition by list, no partition => error
+#
+--error 1441
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a);
+
+#
+# Partition by list, constant partition function not allowed
+#
+--error 1435
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (3+4)
+partitions 2
+(partition x1 values in (4) tablespace ts1,
+ partition x2 values in (8) tablespace ts2);
+
+#
+# Partition by list, invalid field in function
+#
+--error 1054
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a+d)
+partitions 2
+(partition x1 values in (4) tablespace ts1,
+ partition x2 values in (8) tablespace ts2);
+
+#
+# Partition by list, no values in definition
+#
+--error 1429
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values in (4),
+ partition x2);
+
+#
+# Partition by list, values less than error
+#
+--error 1430
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values in (4),
+ partition x2 values less than (5));
+
+#
+# Partition by list, no values in definition
+#
+--error 1429
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values in (4,6),
+ partition x2);
+
+#
+# Partition by list, duplicate values
+#
+--error 1444
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values in (4, 12+9),
+ partition x2 values in (3, 21));
+
+#
+# Partition by list, wrong constant result type (not INT)
+#
+--error 1443
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values in (4.0, 12+8),
+ partition x2 values in (3, 21));
+
+#
+# Partition by list, missing parenthesis
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values in 4,
+ partition x2 values in (5));
+
+
diff --git a/mysql-test/t/partition_order.test b/mysql-test/t/partition_order.test
new file mode 100644
index 00000000000..1e1b3339d64
--- /dev/null
+++ b/mysql-test/t/partition_order.test
@@ -0,0 +1,828 @@
+#--disable_abort_on_error
+#
+# Simple test for the partition storage engine
+# Focuses on tests of ordered index read
+#
+-- source include/have_partition.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+#
+# Ordered index read, int type
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 order by b;
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, unsigned int type
+#
+CREATE TABLE t1 (
+a int not null,
+b int unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, tiny int type
+#
+CREATE TABLE t1 (
+a int not null,
+b tinyint not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, unsigned tinyint type
+#
+CREATE TABLE t1 (
+a int not null,
+b tinyint unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, smallint type
+#
+CREATE TABLE t1 (
+a int not null,
+b smallint not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, unsigned smallint type
+#
+CREATE TABLE t1 (
+a int not null,
+b smallint unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+#
+# Ordered index read, mediumint type
+#
+CREATE TABLE t1 (
+a int not null,
+b mediumint not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, unsigned int type
+#
+CREATE TABLE t1 (
+a int not null,
+b mediumint unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, unsigned bigint type
+#
+CREATE TABLE t1 (
+a int not null,
+b bigint unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, bigint type
+#
+CREATE TABLE t1 (
+a int not null,
+b bigint not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+#
+# Ordered index read, bigint type
+#
+CREATE TABLE t1 (
+a int not null,
+b bigint not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, float type
+#
+CREATE TABLE t1 (
+a int not null,
+b float not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, double type
+#
+CREATE TABLE t1 (
+a int not null,
+b double not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, unsigned double type
+#
+CREATE TABLE t1 (
+a int not null,
+b double unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, unsigned float type
+#
+CREATE TABLE t1 (
+a int not null,
+b float unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, double precision type
+#
+CREATE TABLE t1 (
+a int not null,
+b double precision not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+#
+# Ordered index read, unsigned double precision type
+#
+CREATE TABLE t1 (
+a int not null,
+b double precision unsigned not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, decimal type
+#
+CREATE TABLE t1 (
+a int not null,
+b decimal not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+#
+# Ordered index read, char type
+#
+CREATE TABLE t1 (
+a int not null,
+b char(10) not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+
+select * from t1 force index (b) where b > 0 order by b;
+
+drop table t1;
+
+#
+# Ordered index read, varchar type
+#
+CREATE TABLE t1 (
+a int not null,
+b varchar(10) not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+
+select * from t1 force index (b) where b > '0' order by b;
+
+drop table t1;
+#
+# Ordered index read, varchar type limited index size
+#
+CREATE TABLE t1 (
+a int not null,
+b varchar(10) not null,
+primary key(a),
+index (b(5)))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+
+select * from t1 force index (b) where b > '0' order by b;
+
+drop table t1;
+
+#
+# Ordered index read, varchar binary type
+#
+CREATE TABLE t1 (
+a int not null,
+b varchar(10) binary not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+
+select * from t1 force index (b) where b > '0' order by b;
+
+drop table t1;
+
+#
+# Ordered index read, tinytext type
+#
+CREATE TABLE t1 (
+a int not null,
+b tinytext not null,
+primary key(a),
+index (b(10)))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+
+select * from t1 force index (b) where b > '0' order by b;
+
+drop table t1;
+#
+# Ordered index read, text type
+#
+CREATE TABLE t1 (
+a int not null,
+b text not null,
+primary key(a),
+index (b(10)))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+
+select * from t1 force index (b) where b > '0' order by b;
+
+drop table t1;
+
+#
+# Ordered index read, mediumtext type
+#
+CREATE TABLE t1 (
+a int not null,
+b mediumtext not null,
+primary key(a),
+index (b(10)))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+
+select * from t1 force index (b) where b > '0' order by b;
+
+drop table t1;
+#
+# Ordered index read, longtext type
+#
+CREATE TABLE t1 (
+a int not null,
+b longtext not null,
+primary key(a),
+index (b(10)))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+
+select * from t1 force index (b) where b > '0' order by b;
+
+drop table t1;
+#
+# Ordered index read, enum type
+#
+CREATE TABLE t1 (
+a int not null,
+b enum('1','2', '4', '5') not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+
+select * from t1 force index (b) where b >= '1' order by b;
+
+drop table t1;
+#
+# Ordered index read, set type
+#
+CREATE TABLE t1 (
+a int not null,
+b set('1','2', '4', '5') not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '1');
+INSERT into t1 values (2, '5');
+INSERT into t1 values (30, '4');
+INSERT into t1 values (35, '2');
+
+select * from t1 force index (b) where b >= '1' order by b;
+
+drop table t1;
+#
+# Ordered index read, date type
+#
+CREATE TABLE t1 (
+a int not null,
+b date not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '2001-01-01');
+INSERT into t1 values (2, '2005-01-01');
+INSERT into t1 values (30, '2004-01-01');
+INSERT into t1 values (35, '2002-01-01');
+
+select * from t1 force index (b) where b > '2000-01-01' order by b;
+
+drop table t1;
+#
+# Ordered index read, datetime type
+#
+CREATE TABLE t1 (
+a int not null,
+b datetime not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '2001-01-01 00:00:00');
+INSERT into t1 values (2, '2005-01-01 00:00:00');
+INSERT into t1 values (30, '2004-01-01 00:00:00');
+INSERT into t1 values (35, '2002-01-01 00:00:00');
+
+select * from t1 force index (b) where b > '2000-01-01 00:00:00' order by b;
+
+drop table t1;
+#
+# Ordered index read, timestamp type
+#
+CREATE TABLE t1 (
+a int not null,
+b timestamp not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '2001-01-01 00:00:00');
+INSERT into t1 values (2, '2005-01-01 00:00:00');
+INSERT into t1 values (30, '2004-01-01 00:00:00');
+INSERT into t1 values (35, '2002-01-01 00:00:00');
+
+select * from t1 force index (b) where b > '2000-01-01 00:00:00' order by b;
+
+drop table t1;
+#
+# Ordered index read, time type
+#
+CREATE TABLE t1 (
+a int not null,
+b time not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, '01:00:00');
+INSERT into t1 values (2, '05:00:00');
+INSERT into t1 values (30, '04:00:00');
+INSERT into t1 values (35, '02:00:00');
+
+select * from t1 force index (b) where b > '00:00:00' order by b;
+
+drop table t1;
+#
+# Ordered index read, year type
+#
+CREATE TABLE t1 (
+a int not null,
+b year not null,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 2001);
+INSERT into t1 values (2, 2005);
+INSERT into t1 values (30, 2004);
+INSERT into t1 values (35, 2002);
+
+select * from t1 force index (b) where b > 2000 order by b;
+
+drop table t1;
+#
+# Ordered index read, bit(5) type
+#
+CREATE TABLE t1 (
+a int not null,
+b bit(5) not null,
+c int,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, b'00001', NULL);
+INSERT into t1 values (2, b'00101', 2);
+INSERT into t1 values (30, b'00100', 2);
+INSERT into t1 values (35, b'00010', NULL);
+
+select a from t1 force index (b) where b > b'00000' order by b;
+
+drop table t1;
+#
+# Ordered index read, bit(15) type
+#
+CREATE TABLE t1 (
+a int not null,
+b bit(15) not null,
+c int,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, b'000000000000001', NULL);
+INSERT into t1 values (2, b'001010000000101', 2);
+INSERT into t1 values (30, b'001000000000100', 2);
+INSERT into t1 values (35, b'000100000000010', NULL);
+
+select a from t1 force index (b) where b > b'000000000000000' order by b;
+
+drop table t1;
+
+#
+# Ordered index read, NULL values
+#
+CREATE TABLE t1 (
+a int not null,
+b int,
+primary key(a),
+index (b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (25),
+ partition x2 values less than (100));
+
+# Insert a couple of tuples
+INSERT into t1 values (1, 1);
+INSERT into t1 values (5, NULL);
+INSERT into t1 values (2, 5);
+INSERT into t1 values (30, 4);
+INSERT into t1 values (35, 2);
+INSERT into t1 values (40, NULL);
+
+select * from t1 force index (b) where b < 10 OR b IS NULL order by b;
+
+drop table t1;
diff --git a/mysql-test/t/partition_range.test b/mysql-test/t/partition_range.test
new file mode 100644
index 00000000000..e5c1ff795e6
--- /dev/null
+++ b/mysql-test/t/partition_range.test
@@ -0,0 +1,560 @@
+#--disable_abort_on_error
+#
+# Simple test for the partition storage engine
+# Focuses on range partitioning tests
+#
+-- source include/have_partition.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+#
+# Partition by range, basic
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) tablespace ts1,
+ partition x2 values less than (10) tablespace ts2,
+ partition x3 values less than maxvalue tablespace ts3);
+
+# Simple insert and verify test
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (6, 1, 1);
+INSERT into t1 values (10, 1, 1);
+INSERT into t1 values (15, 1, 1);
+
+select * from t1;
+
+ALTER TABLE t1
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) tablespace ts1,
+ partition x2 values less than (10) tablespace ts2,
+ partition x3 values less than maxvalue tablespace ts3);
+
+select * from t1;
+
+drop table if exists t1;
+
+#
+# Partition by range, basic
+# No primary key
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null)
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) tablespace ts1,
+ partition x2 values less than (10) tablespace ts2,
+ partition x3 values less than maxvalue tablespace ts3);
+
+# Simple insert and verify test
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (6, 1, 1);
+INSERT into t1 values (10, 1, 1);
+INSERT into t1 values (15, 1, 1);
+
+select * from t1;
+
+ALTER TABLE t1
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) tablespace ts1,
+ partition x2 values less than (10) tablespace ts2,
+ partition x3 values less than maxvalue tablespace ts3);
+
+select * from t1;
+
+drop table if exists t1;
+
+#
+# Partition by range, basic
+# No max value used
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) tablespace ts1,
+ partition x2 values less than (10) tablespace ts2,
+ partition x3 values less than (15) tablespace ts3);
+
+
+# Simple insert and verify test
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (6, 1, 1);
+INSERT into t1 values (10, 1, 1);
+--error 1030
+INSERT into t1 values (15, 1, 1);
+
+select * from t1;
+
+ALTER TABLE t1
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) tablespace ts1,
+ partition x2 values less than (10) tablespace ts2,
+ partition x3 values less than (15) tablespace ts3);
+
+select * from t1;
+
+drop table t1;
+
+#
+# Partition by range, only one partition
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+(partition x1 values less than (1));
+
+drop table t1;
+#
+# Partition by range, no partition => error
+#
+--error 1441
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a);
+
+#
+# Partition by range, invalid field in function
+#
+--error 1054
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a+d)
+partitions 2
+(partition x1 values less than (4) tablespace ts1,
+ partition x2 values less than (8) tablespace ts2);
+
+#
+# Partition by range, inconsistent partition function and constants
+#
+--error 1443
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (4.0) tablespace ts1,
+ partition x2 values less than (8) tablespace ts2);
+
+#
+# Partition by range, constant partition function not allowed
+#
+--error 1435
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (3+4)
+partitions 2
+(partition x1 values less than (4) tablespace ts1,
+ partition x2 values less than (8) tablespace ts2);
+
+#
+# Partition by range, no values less than definition
+#
+--error 1429
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (4),
+ partition x2);
+
+#
+# Partition by range, no values in definition allowed
+#
+--error 1430
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values in (4),
+ partition x2);
+
+#
+# Partition by range, values in error
+#
+--error 1430
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values in (4),
+ partition x2 values less than (5));
+
+#
+# Partition by range, missing parenthesis
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by list (a)
+partitions 2
+(partition x1 values less than 4,
+ partition x2 values less than (5));
+
+#
+# Partition by range, maxvalue in wrong place
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values less than maxvalue,
+ partition x2 values less than (5));
+
+#
+# Partition by range, maxvalue in several places
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values less than maxvalue,
+ partition x2 values less than maxvalue);
+
+#
+# Partition by range, not increasing ranges
+#
+--error 1442
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (a)
+partitions 2
+(partition x1 values less than (4),
+ partition x2 values less than (3));
+
+#
+# Partition by range, wrong result type of partition function
+#
+--error 1440
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b))
+partition by range (sin(a))
+partitions 2
+(partition x1 values less than (4),
+ partition x2 values less than (5));
+
+#
+# Subpartition by hash, two partitions and two subpartitions
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+ ( subpartition x11,
+ subpartition x12),
+ partition x2 values less than (5)
+ ( subpartition x21,
+ subpartition x22)
+);
+
+SELECT * from t1;
+
+drop table t1;
+
+#
+# Subpartition by hash, two partitions and two subpartitions
+# Defined tablespace, engine and node group
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+ ( subpartition x11 tablespace t1 engine myisam nodegroup 0,
+ subpartition x12 tablespace t2 engine myisam nodegroup 1),
+ partition x2 values less than (5)
+ ( subpartition x21 tablespace t1 engine myisam nodegroup 0,
+ subpartition x22 tablespace t2 engine myisam nodegroup 1)
+);
+
+SELECT * from t1;
+
+drop table t1;
+
+#
+# Subpartition by hash, two partitions and two subpartitions
+# Defined tablespace, node group
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+ ( subpartition x11 tablespace t1 nodegroup 0,
+ subpartition x12 tablespace t2 nodegroup 1),
+ partition x2 values less than (5)
+ ( subpartition x21 tablespace t1 nodegroup 0,
+ subpartition x22 tablespace t2 nodegroup 1)
+);
+
+SELECT * from t1;
+
+drop table t1;
+
+#
+# Subpartition by hash, two partitions and two subpartitions
+# Defined engine and node group
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+ ( subpartition x11 engine myisam nodegroup 0,
+ subpartition x12 engine myisam nodegroup 1),
+ partition x2 values less than (5)
+ ( subpartition x21 engine myisam nodegroup 0,
+ subpartition x22 engine myisam nodegroup 1)
+);
+
+INSERT into t1 VALUES (1,1,1);
+INSERT into t1 VALUES (4,1,1);
+--error 1030
+INSERT into t1 VALUES (5,1,1);
+
+SELECT * from t1;
+
+ALTER TABLE t1
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+ ( subpartition x11 engine myisam nodegroup 0,
+ subpartition x12 engine myisam nodegroup 1),
+ partition x2 values less than (5)
+ ( subpartition x21 engine myisam nodegroup 0,
+ subpartition x22 engine myisam nodegroup 1)
+);
+
+SELECT * from t1;
+
+drop table t1;
+
+#
+# Subpartition by hash, two partitions and two subpartitions
+# Defined tablespace, engine
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+ ( subpartition x11 tablespace t1 engine myisam,
+ subpartition x12 tablespace t2 engine myisam),
+ partition x2 values less than (5)
+ ( subpartition x21 tablespace t1 engine myisam,
+ subpartition x22 tablespace t2 engine myisam)
+);
+
+INSERT into t1 VALUES (1,1,1);
+INSERT into t1 VALUES (4,1,1);
+--error 1030
+INSERT into t1 VALUES (5,1,1);
+
+SELECT * from t1;
+
+ALTER TABLE t1
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+ ( subpartition x11 tablespace t1 engine myisam,
+ subpartition x12 tablespace t2 engine myisam),
+ partition x2 values less than (5)
+ ( subpartition x21 tablespace t1 engine myisam,
+ subpartition x22 tablespace t2 engine myisam)
+);
+
+SELECT * from t1;
+
+drop table t1;
+
+#
+# Subpartition by hash, two partitions and two subpartitions
+# Defined tablespace
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+ ( subpartition x11 tablespace t1,
+ subpartition x12 tablespace t2),
+ partition x2 values less than (5)
+ ( subpartition x21 tablespace t1,
+ subpartition x22 tablespace t2)
+);
+
+INSERT into t1 VALUES (1,1,1);
+INSERT into t1 VALUES (4,1,1);
+--error 1030
+INSERT into t1 VALUES (5,1,1);
+
+SELECT * from t1;
+
+ALTER TABLE t1
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+ ( subpartition x11 tablespace t1 engine myisam,
+ subpartition x12 tablespace t2 engine myisam),
+ partition x2 values less than (5)
+ ( subpartition x21 tablespace t1 engine myisam,
+ subpartition x22 tablespace t2 engine myisam)
+);
+
+SELECT * from t1;
+
+drop table t1;
+
+#
+# Subpartition by hash, two partitions and two subpartitions
+# Defined engine
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+ ( subpartition x11 engine myisam,
+ subpartition x12 engine myisam),
+ partition x2 values less than (5)
+ ( subpartition x21 engine myisam,
+ subpartition x22 engine myisam)
+);
+
+INSERT into t1 VALUES (1,1,1);
+INSERT into t1 VALUES (4,1,1);
+--error 1030
+INSERT into t1 VALUES (5,1,1);
+
+SELECT * from t1;
+
+ALTER TABLE t1
+partition by range (a)
+subpartition by hash (a+b)
+( partition x1 values less than (1)
+ ( subpartition x11 engine myisam,
+ subpartition x12 engine myisam),
+ partition x2 values less than (5)
+ ( subpartition x21 engine myisam,
+ subpartition x22 engine myisam)
+);
+
+SELECT * from t1;
+
+drop table t1;
+
+#
+# Subpartition with range => error
+#
+--error 1429
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by range (a+b)
+subpartition by key (a)
+( partition x1
+ ( subpartition x11 engine myisam,
+ subpartition x12 engine myisam),
+ partition x2
+ ( subpartition x21 engine myisam,
+ subpartition x22 engine myisam)
+);
+
+#
+# Subpartition with range => error
+#
+--error 1064
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key (a,b))
+partition by key (a)
+subpartition by range (a+b)
+( partition x1
+ ( subpartition x11 engine myisam values less than (0),
+ subpartition x12 engine myisam values less than (1)),
+ partition x2
+ ( subpartition x21 engine myisam values less than (0),
+ subpartition x22 engine myisam values less than (1))
+);
+
diff --git a/mysys/Makefile.am b/mysys/Makefile.am
index 9c58c18cf59..6b983dc38dd 100644
--- a/mysys/Makefile.am
+++ b/mysys/Makefile.am
@@ -43,7 +43,7 @@ libmysys_a_SOURCES = my_init.c my_getwd.c mf_getdate.c my_mmap.c \
mf_wcomp.c mf_wfile.c my_gethwaddr.c \
mf_qsort.c mf_qsort2.c mf_sort.c \
ptr_cmp.c mf_radix.c queues.c \
- tree.c list.c hash.c array.c string.c typelib.c \
+ tree.c trie.c list.c hash.c array.c string.c typelib.c \
my_copy.c my_append.c my_lib.c \
my_delete.c my_rename.c my_redel.c \
my_chsize.c my_lread.c my_lwrite.c my_clock.c \
@@ -82,6 +82,12 @@ FLAGS=$(DEFS) $(INCLUDES) $(CPPFLAGS) $(CFLAGS) @NOINST_LDFLAGS@
# which automaticly removes the object files you use to compile a final program
#
+test_bitmap$(EXEEXT): my_bitmap.c $(LIBRARIES)
+ $(LINK) $(FLAGS) -DMAIN ./my_bitmap.c $(LDADD) $(LIBS)
+
+test_priority_queue$(EXEEXT): queues.c $(LIBRARIES)
+ $(LINK) $(FLAGS) -DMAIN ./queues.c $(LDADD) $(LIBS)
+
test_thr_alarm$(EXEEXT): thr_alarm.c $(LIBRARIES)
$(CP) $(srcdir)/thr_alarm.c ./test_thr_alarm.c
$(LINK) $(FLAGS) -DMAIN ./test_thr_alarm.c $(LDADD) $(LIBS)
diff --git a/mysys/my_bitmap.c b/mysys/my_bitmap.c
index 4a917fc8287..3c25321b715 100644
--- a/mysys/my_bitmap.c
+++ b/mysys/my_bitmap.c
@@ -20,26 +20,70 @@
API limitations (or, rather asserted safety assumptions,
to encourage correct programming)
- * the size of the used bitmap is less than ~(uint) 0
- * it's a multiple of 8 (for efficiency reasons)
- * when arguments are a bitmap and a bit number, the number
- must be within bitmap size
- * bitmap_set_prefix() is an exception - one can use ~0 to set all bits
- * when both arguments are bitmaps, they must be of the same size
- * bitmap_intersect() is an exception :)
- (for for Bitmap::intersect(ulonglong map2buff))
-
- If THREAD is defined all bitmap operations except bitmap_init/bitmap_free
- are thread-safe.
+ * the internal size is a set of 32 bit words
+ * the number of bits specified in creation can be any number > 0
+ * there are THREAD safe versions of most calls called bitmap_lock_*
+ many of those are not used and not compiled normally but the code
+ already exist for them in an #ifdef:ed part. These can only be used
+ if THREAD was specified in bitmap_init
TODO:
Make assembler THREAD safe versions of these using test-and-set instructions
+
+ Original version created by Sergei Golubchik 2001 - 2004.
+ New version written and test program added and some changes to the interface
+ was made by Mikael Ronström 2005, with assistance of Tomas Ulin and Mats
+ Kindahl.
*/
#include "mysys_priv.h"
#include <my_bitmap.h>
#include <m_string.h>
+void create_last_word_mask(MY_BITMAP *map)
+{
+ /* Get the number of used bits (1..8) in the last byte */
+ unsigned int const used= 1U + ((map->n_bits-1U) & 0x7U);
+
+ /*
+ * Create a mask with the upper 'unused' bits set and the lower 'used'
+ * bits clear. The bits within each byte is stored in big-endian order.
+ */
+ unsigned char const mask= (~((1 << used) - 1)) & 255;
+
+ /*
+ The first bytes are to be set to zero since they represent real bits
+ in the bitvector. The last bytes are set to 0xFF since they represent
+ bytes not used by the bitvector. Finally the last byte contains bits
+ as set by the mask above.
+ */
+ unsigned char *ptr= (unsigned char*)&map->last_word_mask;
+
+ map->last_word_ptr= map->bitmap + no_words_in_map(map)-1;
+ switch (no_bytes_in_map(map)&3)
+ {
+ case 1:
+ map->last_word_mask= ~0U;
+ ptr[0]= mask;
+ return;
+
+ case 2:
+ map->last_word_mask= ~0U;
+ ptr[0]= 0;
+ ptr[1]= mask;
+ return;
+ case 3:
+ map->last_word_mask= 0U;
+ ptr[2]= mask;
+ ptr[3]= 0xFFU;
+ return;
+ case 0:
+ map->last_word_mask= 0U;
+ ptr[3]= mask;
+ return;
+ }
+}
+
static inline void bitmap_lock(MY_BITMAP *map)
{
#ifdef THREAD
@@ -57,29 +101,39 @@ static inline void bitmap_unlock(MY_BITMAP *map)
}
-my_bool bitmap_init(MY_BITMAP *map, uchar *buf, uint bitmap_size,
+my_bool bitmap_init(MY_BITMAP *map, uint32 *buf, uint n_bits,
my_bool thread_safe)
{
DBUG_ENTER("bitmap_init");
-
- DBUG_ASSERT((bitmap_size & 7) == 0);
- bitmap_size/=8;
- if (!(map->bitmap=buf) &&
- !(map->bitmap= (uchar*) my_malloc(bitmap_size +
- (thread_safe ?
- sizeof(pthread_mutex_t) : 0),
- MYF(MY_WME | MY_ZEROFILL))))
- return 1;
- map->bitmap_size=bitmap_size;
+ DBUG_ASSERT(n_bits > 0);
+ if (!buf)
+ {
+ uint size_in_bytes= ((n_bits+31)/32)*4
+#ifdef THREAD
+ +(thread_safe ? sizeof(pthread_mutex_t) : 0)
+#endif
+ ;
+ if (!(buf= (uint32*) my_malloc(size_in_bytes, MYF(MY_WME))))
+ return 1;
+ }
+#ifdef THREAD
+ else
+ DBUG_ASSERT(thread_safe == 0);
+#endif
#ifdef THREAD
if (thread_safe)
{
- map->mutex=(pthread_mutex_t *)(map->bitmap+bitmap_size);
+ map->mutex=(pthread_mutex_t *)buf;
pthread_mutex_init(map->mutex, MY_MUTEX_INIT_FAST);
+ buf+= sizeof(pthread_mutex_t)/4;
}
else
map->mutex=0;
#endif
+ map->bitmap= buf;
+ map->n_bits=n_bits;
+ create_last_word_mask(map);
+ bitmap_clear_all(map);
DBUG_RETURN(0);
}
@@ -90,25 +144,21 @@ void bitmap_free(MY_BITMAP *map)
if (map->bitmap)
{
#ifdef THREAD
- if (map->mutex)
+ char *buf= (char *)map->mutex;
+ if (buf)
pthread_mutex_destroy(map->mutex);
-#endif
+ else
+ buf=(char*) map->bitmap;
+ my_free(buf, MYF(0));
+#else
my_free((char*) map->bitmap, MYF(0));
+#endif
map->bitmap=0;
}
DBUG_VOID_RETURN;
}
-void bitmap_set_bit(MY_BITMAP *map, uint bitmap_bit)
-{
- DBUG_ASSERT(map->bitmap && bitmap_bit < map->bitmap_size*8);
- bitmap_lock(map);
- bitmap_fast_set_bit(map, bitmap_bit);
- bitmap_unlock(map);
-}
-
-
/*
test if bit already set and set it if it was not (thread unsafe method)
@@ -124,7 +174,7 @@ void bitmap_set_bit(MY_BITMAP *map, uint bitmap_bit)
my_bool bitmap_fast_test_and_set(MY_BITMAP *map, uint bitmap_bit)
{
- uchar *byte= map->bitmap + (bitmap_bit / 8);
+ uchar *byte= (uchar*)map->bitmap + (bitmap_bit / 8);
uchar bit= 1 << ((bitmap_bit) & 7);
uchar res= (*byte) & bit;
*byte|= bit;
@@ -148,7 +198,7 @@ my_bool bitmap_fast_test_and_set(MY_BITMAP *map, uint bitmap_bit)
my_bool bitmap_test_and_set(MY_BITMAP *map, uint bitmap_bit)
{
my_bool res;
- DBUG_ASSERT(map->bitmap && bitmap_bit < map->bitmap_size*8);
+ DBUG_ASSERT(map->bitmap && bitmap_bit < map->n_bits);
bitmap_lock(map);
res= bitmap_fast_test_and_set(map, bitmap_bit);
bitmap_unlock(map);
@@ -157,173 +207,113 @@ my_bool bitmap_test_and_set(MY_BITMAP *map, uint bitmap_bit)
uint bitmap_set_next(MY_BITMAP *map)
{
- uchar *bitmap=map->bitmap;
- uint bit_found = MY_BIT_NONE;
- uint bitmap_size=map->bitmap_size*8;
- uint i;
-
+ uint bit_found;
DBUG_ASSERT(map->bitmap);
- bitmap_lock(map);
- for (i=0; i < bitmap_size ; i++, bitmap++)
- {
- if (*bitmap != 0xff)
- { /* Found slot with free bit */
- uint b;
- for (b=0; ; b++)
- {
- if (!(*bitmap & (1 << b)))
- {
- *bitmap |= 1<<b;
- bit_found = (i*8)+b;
- break;
- }
- }
- break; /* Found bit */
- }
- }
- bitmap_unlock(map);
+ if ((bit_found= bitmap_get_first(map)) != MY_BIT_NONE)
+ bitmap_set_bit(map, bit_found);
return bit_found;
}
-void bitmap_clear_bit(MY_BITMAP *map, uint bitmap_bit)
-{
- DBUG_ASSERT(map->bitmap && bitmap_bit < map->bitmap_size*8);
- bitmap_lock(map);
- bitmap_fast_clear_bit(map, bitmap_bit);
- bitmap_unlock(map);
-}
-
-
void bitmap_set_prefix(MY_BITMAP *map, uint prefix_size)
{
- uint prefix_bytes, prefix_bits;
+ uint prefix_bytes, prefix_bits, d;
+ uchar *m= (uchar *)map->bitmap;
DBUG_ASSERT(map->bitmap &&
- (prefix_size <= map->bitmap_size*8 || prefix_size == (uint) ~0));
- bitmap_lock(map);
- set_if_smaller(prefix_size, map->bitmap_size*8);
+ (prefix_size <= map->n_bits || prefix_size == (uint) ~0));
+ set_if_smaller(prefix_size, map->n_bits);
if ((prefix_bytes= prefix_size / 8))
- memset(map->bitmap, 0xff, prefix_bytes);
+ memset(m, 0xff, prefix_bytes);
+ m+= prefix_bytes;
if ((prefix_bits= prefix_size & 7))
- map->bitmap[prefix_bytes++]= (1 << prefix_bits)-1;
- if (prefix_bytes < map->bitmap_size)
- bzero(map->bitmap+prefix_bytes, map->bitmap_size-prefix_bytes);
- bitmap_unlock(map);
-}
-
-
-void bitmap_clear_all(MY_BITMAP *map)
-{
- bitmap_set_prefix(map, 0);
-}
-
-
-void bitmap_set_all(MY_BITMAP *map)
-{
- bitmap_set_prefix(map, ~0);
+ *m++= (1 << prefix_bits)-1;
+ if ((d= no_bytes_in_map(map)-prefix_bytes))
+ bzero(m, d);
+ *map->last_word_ptr|= map->last_word_mask; /*Set last bits*/
}
my_bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size)
{
- uint prefix_bits= prefix_size & 7, res= 0;
- uchar *m= map->bitmap, *end_prefix= map->bitmap+prefix_size/8,
- *end= map->bitmap+map->bitmap_size;
-
- DBUG_ASSERT(map->bitmap && prefix_size <= map->bitmap_size*8);
+ uint prefix_bits= prefix_size & 0x7, res;
+ uchar *m= (uchar*)map->bitmap;
+ uchar *end_prefix= m+prefix_size/8;
+ uchar *end;
+ DBUG_ASSERT(m && prefix_size <= map->n_bits);
+ end= m+no_bytes_in_map(map);
- bitmap_lock((MY_BITMAP *)map);
while (m < end_prefix)
if (*m++ != 0xff)
- goto ret;
+ return 0;
+ *map->last_word_ptr^= map->last_word_mask; /*Clear bits*/
+ res= 0;
if (prefix_bits && *m++ != (1 << prefix_bits)-1)
goto ret;
while (m < end)
if (*m++ != 0)
goto ret;
-
- res=1;
+ res= 1;
ret:
- bitmap_unlock((MY_BITMAP *)map);
- return res;
+ *map->last_word_ptr|= map->last_word_mask; /*Set bits again*/
+ return res;
}
-my_bool bitmap_is_clear_all(const MY_BITMAP *map)
-{
- return bitmap_is_prefix(map, 0);
-}
-
my_bool bitmap_is_set_all(const MY_BITMAP *map)
{
- return bitmap_is_prefix(map, map->bitmap_size*8);
+ uint32 *data_ptr= map->bitmap;
+ uint32 *end= map->last_word_ptr;
+ for (; data_ptr <= end; data_ptr++)
+ if (*data_ptr != 0xFFFFFFFF)
+ return FALSE;
+ return TRUE;
}
-my_bool bitmap_is_set(const MY_BITMAP *map, uint bitmap_bit)
+my_bool bitmap_is_clear_all(const MY_BITMAP *map)
{
- DBUG_ASSERT(map->bitmap && bitmap_bit < map->bitmap_size*8);
- return bitmap_fast_is_set(map, bitmap_bit);
+ uint32 *data_ptr= map->bitmap;
+ uint32 *end;
+ if (*map->last_word_ptr != map->last_word_mask)
+ return FALSE;
+ end= map->last_word_ptr;
+ for (; data_ptr < end; data_ptr++)
+ if (*data_ptr)
+ return FALSE;
+ return TRUE;
}
my_bool bitmap_is_subset(const MY_BITMAP *map1, const MY_BITMAP *map2)
{
- uint res=0;
- uchar *m1=map1->bitmap, *m2=map2->bitmap, *end;
+ uint32 *m1= map1->bitmap, *m2= map2->bitmap, *end;
DBUG_ASSERT(map1->bitmap && map2->bitmap &&
- map1->bitmap_size==map2->bitmap_size);
- bitmap_lock((MY_BITMAP *)map1);
- bitmap_lock((MY_BITMAP *)map2);
+ map1->n_bits==map2->n_bits);
- end= m1+map1->bitmap_size;
+ end= map1->last_word_ptr;
- while (m1 < end)
+ while (m1 <= end)
{
if ((*m1++) & ~(*m2++))
- goto ret;
+ return 0;
}
-
- res=1;
-ret:
- bitmap_unlock((MY_BITMAP *)map2);
- bitmap_unlock((MY_BITMAP *)map1);
- return res;
-}
-
-
-my_bool bitmap_cmp(const MY_BITMAP *map1, const MY_BITMAP *map2)
-{
- uint res;
-
- DBUG_ASSERT(map1->bitmap && map2->bitmap &&
- map1->bitmap_size==map2->bitmap_size);
- bitmap_lock((MY_BITMAP *)map1);
- bitmap_lock((MY_BITMAP *)map2);
-
- res= memcmp(map1->bitmap, map2->bitmap, map1->bitmap_size)==0;
-
- bitmap_unlock((MY_BITMAP *)map2);
- bitmap_unlock((MY_BITMAP *)map1);
- return res;
+ return 1;
}
void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2)
{
- uchar *to=map->bitmap, *from=map2->bitmap, *end;
- uint len=map->bitmap_size, len2=map2->bitmap_size;
+ uint32 *to= map->bitmap, *from= map2->bitmap, *end;
+ uint len= no_words_in_map(map), len2 = no_words_in_map(map2);
DBUG_ASSERT(map->bitmap && map2->bitmap);
- bitmap_lock(map);
- bitmap_lock((MY_BITMAP *)map2);
end= to+min(len,len2);
-
+ *map2->last_word_ptr^= map2->last_word_mask; /*Clear last bits in map2*/
while (to < end)
*to++ &= *from++;
@@ -333,9 +323,8 @@ void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2)
while (to < end)
*to++=0;
}
-
- bitmap_unlock((MY_BITMAP *)map2);
- bitmap_unlock(map);
+ *map2->last_word_ptr|= map2->last_word_mask; /*Set last bits in map*/
+ *map->last_word_ptr|= map->last_word_mask; /*Set last bits in map2*/
}
@@ -362,47 +351,291 @@ void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2)
void bitmap_set_above(MY_BITMAP *map, uint from_byte, uint use_bit)
{
uchar use_byte= use_bit ? 0xff : 0;
- uchar *to= map->bitmap + from_byte;
- uchar *end= map->bitmap + map->bitmap_size;
+ uchar *to= (uchar *)map->bitmap + from_byte;
+ uchar *end= (uchar *)map->bitmap + (map->n_bits+7)/8;
while (to < end)
*to++= use_byte;
+ *map->last_word_ptr|= map->last_word_mask; /*Set last bits again*/
}
void bitmap_subtract(MY_BITMAP *map, const MY_BITMAP *map2)
{
- uchar *to=map->bitmap, *from=map2->bitmap, *end;
+ uint32 *to= map->bitmap, *from= map2->bitmap, *end;
+ DBUG_ASSERT(map->bitmap && map2->bitmap &&
+ map->n_bits==map2->n_bits);
+
+ end= map->last_word_ptr;
+
+ while (to <= end)
+ *to++ &= ~(*from++);
+ *map->last_word_ptr|= map->last_word_mask; /*Set last bits again*/
+}
+
+
+void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2)
+{
+ uint32 *to= map->bitmap, *from= map2->bitmap, *end;
+
+ DBUG_ASSERT(map->bitmap && map2->bitmap &&
+ map->n_bits==map2->n_bits);
+ end= map->last_word_ptr;
+ while (to <= end)
+ *to++ |= *from++;
+}
+
+
+void bitmap_xor(MY_BITMAP *map, const MY_BITMAP *map2)
+{
+ uint32 *to= map->bitmap, *from= map2->bitmap, *end= map->last_word_ptr;
DBUG_ASSERT(map->bitmap && map2->bitmap &&
- map->bitmap_size==map2->bitmap_size);
+ map->n_bits==map2->n_bits);
+ while (to <= end)
+ *to++ ^= *from++;
+ *map->last_word_ptr|= map->last_word_mask; /*Set last bits again*/
+}
+
+
+void bitmap_invert(MY_BITMAP *map)
+{
+ uint32 *to= map->bitmap, *end;
+
+ DBUG_ASSERT(map->bitmap);
+ end= map->last_word_ptr;
+
+ while (to <= end)
+ *to++ ^= 0xFFFFFFFF;
+ *map->last_word_ptr|= map->last_word_mask; /*Set last bits again*/
+}
+
+
+uint bitmap_bits_set(const MY_BITMAP *map)
+{
+ uchar *m= (uchar*)map->bitmap;
+ uchar *end= m + no_bytes_in_map(map);
+ uint res= 0;
+
+ DBUG_ASSERT(map->bitmap);
+ *map->last_word_ptr^=map->last_word_mask; /*Reset last bits to zero*/
+ while (m < end)
+ res+= my_count_bits_ushort(*m++);
+ *map->last_word_ptr^=map->last_word_mask; /*Set last bits to one again*/
+ return res;
+}
+
+uint bitmap_get_first_set(const MY_BITMAP *map)
+{
+ uchar *byte_ptr;
+ uint bit_found,i,j,k;
+ uint32 *data_ptr, *end= map->last_word_ptr;
+
+ DBUG_ASSERT(map->bitmap);
+ data_ptr= map->bitmap;
+ for (i=0; data_ptr <= end; data_ptr++, i++)
+ {
+ if (*data_ptr)
+ {
+ byte_ptr= (uchar*)data_ptr;
+ for (j=0; ; j++, byte_ptr++)
+ {
+ if (*byte_ptr)
+ {
+ for (k=0; ; k++)
+ {
+ if (*byte_ptr & (1 << k))
+ {
+ bit_found= (i*32) + (j*8) + k;
+ if (bit_found == map->n_bits)
+ return MY_BIT_NONE;
+ return bit_found;
+ }
+ }
+ DBUG_ASSERT(1);
+ }
+ }
+ DBUG_ASSERT(1);
+ }
+ }
+ return MY_BIT_NONE;
+}
+
+
+uint bitmap_get_first(const MY_BITMAP *map)
+{
+ uchar *byte_ptr;
+ uint bit_found= MY_BIT_NONE, i,j,k;
+ uint32 *data_ptr, *end= map->last_word_ptr;
+
+ DBUG_ASSERT(map->bitmap);
+ data_ptr= map->bitmap;
+ for (i=0; data_ptr <= end; data_ptr++, i++)
+ {
+ if (*data_ptr != 0xFFFFFFFF)
+ {
+ byte_ptr= (uchar*)data_ptr;
+ for (j=0; ; j++, byte_ptr++)
+ {
+ if (*byte_ptr != 0xFF)
+ {
+ for (k=0; ; k++)
+ {
+ if (!(*byte_ptr & (1 << k)))
+ {
+ bit_found= (i*32) + (j*8) + k;
+ if (bit_found == map->n_bits)
+ return MY_BIT_NONE;
+ return bit_found;
+ }
+ }
+ DBUG_ASSERT(1);
+ }
+ }
+ DBUG_ASSERT(1);
+ }
+ }
+ return MY_BIT_NONE;
+}
+
+
+uint bitmap_lock_set_next(MY_BITMAP *map)
+{
+ uint bit_found;
bitmap_lock(map);
- bitmap_lock((MY_BITMAP *)map2);
+ bit_found= bitmap_set_next(map);
+ bitmap_unlock(map);
+ return bit_found;
+}
- end= to+map->bitmap_size;
- while (to < end)
- *to++ &= ~(*from++);
+void bitmap_lock_clear_bit(MY_BITMAP *map, uint bitmap_bit)
+{
+ bitmap_lock(map);
+ DBUG_ASSERT(map->bitmap && bitmap_bit < map->n_bits);
+ bitmap_clear_bit(map, bitmap_bit);
+ bitmap_unlock(map);
+}
- bitmap_unlock((MY_BITMAP *)map2);
+
+#ifdef NOT_USED
+my_bool bitmap_lock_is_prefix(const MY_BITMAP *map, uint prefix_size)
+{
+ my_bool res;
+ bitmap_lock((MY_BITMAP *)map);
+ res= bitmap_is_prefix(map, prefix_size);
+ bitmap_unlock((MY_BITMAP *)map);
+ return res;
+}
+
+
+void bitmap_lock_set_all(MY_BITMAP *map)
+{
+ bitmap_lock(map);
+ bitmap_set_all(map);
bitmap_unlock(map);
}
-void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2)
+void bitmap_lock_clear_all(MY_BITMAP *map)
{
- uchar *to=map->bitmap, *from=map2->bitmap, *end;
+ bitmap_lock(map);
+ bitmap_clear_all(map);
+ bitmap_unlock(map);
+}
- DBUG_ASSERT(map->bitmap && map2->bitmap &&
- map->bitmap_size==map2->bitmap_size);
+
+void bitmap_lock_set_prefix(MY_BITMAP *map, uint prefix_size)
+{
bitmap_lock(map);
+ bitmap_set_prefix(map, prefix_size);
+ bitmap_unlock(map);
+}
+
+
+my_bool bitmap_lock_is_clear_all(const MY_BITMAP *map)
+{
+ uint res;
+ bitmap_lock((MY_BITMAP *)map);
+ res= bitmap_is_clear_all(map);
+ bitmap_unlock((MY_BITMAP *)map);
+ return res;
+}
+
+
+my_bool bitmap_lock_is_set_all(const MY_BITMAP *map)
+{
+ uint res;
+ bitmap_lock((MY_BITMAP *)map);
+ res= bitmap_is_set_all(map);
+ bitmap_unlock((MY_BITMAP *)map);
+ return res;
+}
+
+
+my_bool bitmap_lock_is_set(const MY_BITMAP *map, uint bitmap_bit)
+{
+ my_bool res;
+ DBUG_ASSERT(map->bitmap && bitmap_bit < map->n_bits);
+ bitmap_lock((MY_BITMAP *)map);
+ res= bitmap_is_set(map, bitmap_bit);
+ bitmap_unlock((MY_BITMAP *)map);
+ return res;
+}
+
+
+my_bool bitmap_lock_is_subset(const MY_BITMAP *map1, const MY_BITMAP *map2)
+{
+ uint res;
+ bitmap_lock((MY_BITMAP *)map1);
bitmap_lock((MY_BITMAP *)map2);
+ res= bitmap_is_subset(map1, map2);
+ bitmap_unlock((MY_BITMAP *)map2);
+ bitmap_unlock((MY_BITMAP *)map1);
+ return res;
+}
- end= to+map->bitmap_size;
- while (to < end)
- *to++ |= *from++;
+my_bool bitmap_lock_cmp(const MY_BITMAP *map1, const MY_BITMAP *map2)
+{
+ uint res;
+
+ DBUG_ASSERT(map1->bitmap && map2->bitmap &&
+ map1->n_bits==map2->n_bits);
+ bitmap_lock((MY_BITMAP *)map1);
+ bitmap_lock((MY_BITMAP *)map2);
+ res= bitmap_cmp(map1, map2);
+ bitmap_unlock((MY_BITMAP *)map2);
+ bitmap_unlock((MY_BITMAP *)map1);
+ return res;
+}
+
+
+void bitmap_lock_intersect(MY_BITMAP *map, const MY_BITMAP *map2)
+{
+ bitmap_lock(map);
+ bitmap_lock((MY_BITMAP *)map2);
+ bitmap_intersect(map, map2);
+ bitmap_unlock((MY_BITMAP *)map2);
+ bitmap_unlock(map);
+}
+
+void bitmap_lock_subtract(MY_BITMAP *map, const MY_BITMAP *map2)
+{
+ bitmap_lock(map);
+ bitmap_lock((MY_BITMAP *)map2);
+ bitmap_subtract(map, map2);
+ bitmap_unlock((MY_BITMAP *)map2);
+ bitmap_unlock(map);
+}
+
+
+void bitmap_lock_union(MY_BITMAP *map, const MY_BITMAP *map2)
+{
+ bitmap_lock(map);
+ bitmap_lock((MY_BITMAP *)map2);
+ bitmap_union(map, map2);
bitmap_unlock((MY_BITMAP *)map2);
bitmap_unlock(map);
}
@@ -415,19 +648,12 @@ void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2)
RETURN
Number of set bits in the bitmap.
*/
-
-uint bitmap_bits_set(const MY_BITMAP *map)
-{
- uchar *m= map->bitmap;
- uchar *end= m + map->bitmap_size;
- uint res= 0;
-
- DBUG_ASSERT(map->bitmap);
+uint bitmap_lock_bits_set(const MY_BITMAP *map)
+{
+ uint res;
bitmap_lock((MY_BITMAP *)map);
- while (m < end)
- {
- res+= my_count_bits_ushort(*m++);
- }
+ DBUG_ASSERT(map->bitmap);
+ res= bitmap_bits_set(map);
bitmap_unlock((MY_BITMAP *)map);
return res;
}
@@ -440,33 +666,421 @@ uint bitmap_bits_set(const MY_BITMAP *map)
RETURN
Number of first unset bit in the bitmap or MY_BIT_NONE if all bits are set.
*/
+uint bitmap_lock_get_first(const MY_BITMAP *map)
+{
+ uint res;
+ bitmap_lock((MY_BITMAP*)map);
+ res= bitmap_get_first(map);
+ bitmap_unlock((MY_BITMAP*)map);
+ return res;
+}
-uint bitmap_get_first(const MY_BITMAP *map)
+
+uint bitmap_lock_get_first_set(const MY_BITMAP *map)
+{
+ uint res;
+ bitmap_lock((MY_BITMAP*)map);
+ res= bitmap_get_first_set(map);
+ bitmap_unlock((MY_BITMAP*)map);
+ return res;
+}
+
+
+void bitmap_lock_set_bit(MY_BITMAP *map, uint bitmap_bit)
+{
+ DBUG_ASSERT(map->bitmap && bitmap_bit < map->n_bits);
+ bitmap_lock(map);
+ bitmap_set_bit(map, bitmap_bit);
+ bitmap_unlock(map);
+}
+
+
+void bitmap_lock_flip_bit(MY_BITMAP *map, uint bitmap_bit)
+{
+ DBUG_ASSERT(map->bitmap && bitmap_bit < map->n_bits);
+ bitmap_lock(map);
+ bitmap_flip_bit(map, bitmap_bit);
+ bitmap_unlock(map);
+}
+#endif
+#ifdef MAIN
+
+static void bitmap_print(MY_BITMAP *map)
+{
+ uint32 *to= map->bitmap, *end= map->last_word_ptr;
+ while (to <= end)
+ {
+ fprintf(stderr,"0x%x ", *to++);
+ }
+ fprintf(stderr,"\n");
+}
+
+uint get_rand_bit(uint bitsize)
+{
+ return (rand() % bitsize);
+}
+
+bool test_set_get_clear_bit(MY_BITMAP *map, uint bitsize)
+{
+ uint i, test_bit;
+ uint no_loops= bitsize > 128 ? 128 : bitsize;
+ for (i=0; i < no_loops; i++)
+ {
+ test_bit= get_rand_bit(bitsize);
+ bitmap_set_bit(map, test_bit);
+ if (!bitmap_is_set(map, test_bit))
+ goto error1;
+ bitmap_clear_bit(map, test_bit);
+ if (bitmap_is_set(map, test_bit))
+ goto error2;
+ }
+ return FALSE;
+error1:
+ printf("Error in set bit, bit %u, bitsize = %u", test_bit, bitsize);
+ return TRUE;
+error2:
+ printf("Error in clear bit, bit %u, bitsize = %u", test_bit, bitsize);
+ return TRUE;
+}
+
+bool test_flip_bit(MY_BITMAP *map, uint bitsize)
+{
+ uint i, test_bit;
+ uint no_loops= bitsize > 128 ? 128 : bitsize;
+ for (i=0; i < no_loops; i++)
+ {
+ test_bit= get_rand_bit(bitsize);
+ bitmap_flip_bit(map, test_bit);
+ if (!bitmap_is_set(map, test_bit))
+ goto error1;
+ bitmap_flip_bit(map, test_bit);
+ if (bitmap_is_set(map, test_bit))
+ goto error2;
+ }
+ return FALSE;
+error1:
+ printf("Error in flip bit 1, bit %u, bitsize = %u", test_bit, bitsize);
+ return TRUE;
+error2:
+ printf("Error in flip bit 2, bit %u, bitsize = %u", test_bit, bitsize);
+ return TRUE;
+}
+
+bool test_operators(MY_BITMAP *map, uint bitsize)
+{
+ return FALSE;
+}
+
+bool test_get_all_bits(MY_BITMAP *map, uint bitsize)
{
- uchar *bitmap=map->bitmap;
- uint bit_found = MY_BIT_NONE;
- uint bitmap_size=map->bitmap_size*8;
uint i;
+ bitmap_set_all(map);
+ if (!bitmap_is_set_all(map))
+ goto error1;
+ if (!bitmap_is_prefix(map, bitsize))
+ goto error5;
+ bitmap_clear_all(map);
+ if (!bitmap_is_clear_all(map))
+ goto error2;
+ if (!bitmap_is_prefix(map, 0))
+ goto error6;
+ for (i=0; i<bitsize;i++)
+ bitmap_set_bit(map, i);
+ if (!bitmap_is_set_all(map))
+ goto error3;
+ for (i=0; i<bitsize;i++)
+ bitmap_clear_bit(map, i);
+ if (!bitmap_is_clear_all(map))
+ goto error4;
+ return FALSE;
+error1:
+ printf("Error in set_all, bitsize = %u", bitsize);
+ return TRUE;
+error2:
+ printf("Error in clear_all, bitsize = %u", bitsize);
+ return TRUE;
+error3:
+ printf("Error in bitmap_is_set_all, bitsize = %u", bitsize);
+ return TRUE;
+error4:
+ printf("Error in bitmap_is_clear_all, bitsize = %u", bitsize);
+ return TRUE;
+error5:
+ printf("Error in set_all through set_prefix, bitsize = %u", bitsize);
+ return TRUE;
+error6:
+ printf("Error in clear_all through set_prefix, bitsize = %u", bitsize);
+ return TRUE;
+}
- DBUG_ASSERT(map->bitmap);
- bitmap_lock((MY_BITMAP *)map);
- for (i=0; i < bitmap_size ; i++, bitmap++)
+bool test_compare_operators(MY_BITMAP *map, uint bitsize)
+{
+ uint i, j, test_bit1, test_bit2, test_bit3,test_bit4;
+ uint no_loops= bitsize > 128 ? 128 : bitsize;
+ MY_BITMAP map2_obj, map3_obj;
+ MY_BITMAP *map2= &map2_obj, *map3= &map3_obj;
+ uint32 map2buf[1024];
+ uint32 map3buf[1024];
+ bitmap_init(&map2_obj, map2buf, bitsize, FALSE);
+ bitmap_init(&map3_obj, map3buf, bitsize, FALSE);
+ bitmap_clear_all(map2);
+ bitmap_clear_all(map3);
+ for (i=0; i < no_loops; i++)
{
- if (*bitmap != 0xff)
- { /* Found slot with free bit */
- uint b;
- for (b=0; ; b++)
- {
- if (!(*bitmap & (1 << b)))
- {
- bit_found = (i*8)+b;
- break;
- }
- }
- break; /* Found bit */
+ test_bit1=get_rand_bit(bitsize);
+ bitmap_set_prefix(map, test_bit1);
+ test_bit2=get_rand_bit(bitsize);
+ bitmap_set_prefix(map2, test_bit2);
+ bitmap_intersect(map, map2);
+ test_bit3= test_bit2 < test_bit1 ? test_bit2 : test_bit1;
+ bitmap_set_prefix(map3, test_bit3);
+ if (!bitmap_cmp(map, map3))
+ goto error1;
+ bitmap_clear_all(map);
+ bitmap_clear_all(map2);
+ bitmap_clear_all(map3);
+ test_bit1=get_rand_bit(bitsize);
+ test_bit2=get_rand_bit(bitsize);
+ test_bit3=get_rand_bit(bitsize);
+ bitmap_set_prefix(map, test_bit1);
+ bitmap_set_prefix(map2, test_bit2);
+ test_bit3= test_bit2 > test_bit1 ? test_bit2 : test_bit1;
+ bitmap_set_prefix(map3, test_bit3);
+ bitmap_union(map, map2);
+ if (!bitmap_cmp(map, map3))
+ goto error2;
+ bitmap_clear_all(map);
+ bitmap_clear_all(map2);
+ bitmap_clear_all(map3);
+ test_bit1=get_rand_bit(bitsize);
+ test_bit2=get_rand_bit(bitsize);
+ test_bit3=get_rand_bit(bitsize);
+ bitmap_set_prefix(map, test_bit1);
+ bitmap_set_prefix(map2, test_bit2);
+ bitmap_xor(map, map2);
+ test_bit3= test_bit2 > test_bit1 ? test_bit2 : test_bit1;
+ test_bit4= test_bit2 < test_bit1 ? test_bit2 : test_bit1;
+ bitmap_set_prefix(map3, test_bit3);
+ for (j=0; j < test_bit4; j++)
+ bitmap_clear_bit(map3, j);
+ if (!bitmap_cmp(map, map3))
+ goto error3;
+ bitmap_clear_all(map);
+ bitmap_clear_all(map2);
+ bitmap_clear_all(map3);
+ test_bit1=get_rand_bit(bitsize);
+ test_bit2=get_rand_bit(bitsize);
+ test_bit3=get_rand_bit(bitsize);
+ bitmap_set_prefix(map, test_bit1);
+ bitmap_set_prefix(map2, test_bit2);
+ bitmap_subtract(map, map2);
+ if (test_bit2 < test_bit1)
+ {
+ bitmap_set_prefix(map3, test_bit1);
+ for (j=0; j < test_bit2; j++)
+ bitmap_clear_bit(map3, j);
}
+ if (!bitmap_cmp(map, map3))
+ goto error4;
+ bitmap_clear_all(map);
+ bitmap_clear_all(map2);
+ bitmap_clear_all(map3);
+ test_bit1=get_rand_bit(bitsize);
+ bitmap_set_prefix(map, test_bit1);
+ bitmap_invert(map);
+ bitmap_set_all(map3);
+ for (j=0; j < test_bit1; j++)
+ bitmap_clear_bit(map3, j);
+ if (!bitmap_cmp(map, map3))
+ goto error5;
+ bitmap_clear_all(map);
+ bitmap_clear_all(map3);
}
- bitmap_unlock((MY_BITMAP *)map);
- return bit_found;
+ return FALSE;
+error1:
+ printf("intersect error bitsize=%u,size1=%u,size2=%u", bitsize,
+ test_bit1,test_bit2);
+ return TRUE;
+error2:
+ printf("union error bitsize=%u,size1=%u,size2=%u", bitsize,
+ test_bit1,test_bit2);
+ return TRUE;
+error3:
+ printf("xor error bitsize=%u,size1=%u,size2=%u", bitsize,
+ test_bit1,test_bit2);
+ return TRUE;
+error4:
+ printf("subtract error bitsize=%u,size1=%u,size2=%u", bitsize,
+ test_bit1,test_bit2);
+ return TRUE;
+error5:
+ printf("invert error bitsize=%u,size=%u", bitsize,
+ test_bit1);
+ return TRUE;
+}
+
+bool test_count_bits_set(MY_BITMAP *map, uint bitsize)
+{
+ uint i, bit_count=0, test_bit;
+ uint no_loops= bitsize > 128 ? 128 : bitsize;
+ for (i=0; i < no_loops; i++)
+ {
+ test_bit=get_rand_bit(bitsize);
+ if (!bitmap_is_set(map, test_bit))
+ {
+ bitmap_set_bit(map, test_bit);
+ bit_count++;
+ }
+ }
+ if (bit_count==0 && bitsize > 0)
+ goto error1;
+ if (bitmap_bits_set(map) != bit_count)
+ goto error2;
+ return FALSE;
+error1:
+ printf("No bits set bitsize = %u", bitsize);
+ return TRUE;
+error2:
+ printf("Wrong count of bits set, bitsize = %u", bitsize);
+ return TRUE;
+}
+
+bool test_get_first_bit(MY_BITMAP *map, uint bitsize)
+{
+ uint i, j, test_bit;
+ uint no_loops= bitsize > 128 ? 128 : bitsize;
+ for (i=0; i < no_loops; i++)
+ {
+ test_bit=get_rand_bit(bitsize);
+ bitmap_set_bit(map, test_bit);
+ if (bitmap_get_first_set(map) != test_bit)
+ goto error1;
+ bitmap_set_all(map);
+ bitmap_clear_bit(map, test_bit);
+ if (bitmap_get_first(map) != test_bit)
+ goto error2;
+ bitmap_clear_all(map);
+ }
+ return FALSE;
+error1:
+ printf("get_first_set error bitsize=%u,prefix_size=%u",bitsize,test_bit);
+ return TRUE;
+error2:
+ printf("get_first error bitsize= %u, prefix_size= %u",bitsize,test_bit);
+ return TRUE;
+}
+
+bool test_get_next_bit(MY_BITMAP *map, uint bitsize)
+{
+ uint i, j, test_bit;
+ uint no_loops= bitsize > 128 ? 128 : bitsize;
+ for (i=0; i < no_loops; i++)
+ {
+ test_bit=get_rand_bit(bitsize);
+ for (j=0; j < test_bit; j++)
+ bitmap_set_next(map);
+ if (!bitmap_is_prefix(map, test_bit))
+ goto error1;
+ bitmap_clear_all(map);
+ }
+ return FALSE;
+error1:
+ printf("get_next error bitsize= %u, prefix_size= %u", bitsize,test_bit);
+ return TRUE;
+}
+
+bool test_prefix(MY_BITMAP *map, uint bitsize)
+{
+ uint i, j, test_bit;
+ uint no_loops= bitsize > 128 ? 128 : bitsize;
+ for (i=0; i < no_loops; i++)
+ {
+ test_bit=get_rand_bit(bitsize);
+ bitmap_set_prefix(map, test_bit);
+ if (!bitmap_is_prefix(map, test_bit))
+ goto error1;
+ bitmap_clear_all(map);
+ for (j=0; j < test_bit; j++)
+ bitmap_set_bit(map, j);
+ if (!bitmap_is_prefix(map, test_bit))
+ goto error2;
+ bitmap_set_all(map);
+ for (j=bitsize - 1; ~(j-test_bit); j--)
+ bitmap_clear_bit(map, j);
+ if (!bitmap_is_prefix(map, test_bit))
+ goto error3;
+ bitmap_clear_all(map);
+ }
+ return FALSE;
+error1:
+ printf("prefix1 error bitsize = %u, prefix_size = %u", bitsize,test_bit);
+ return TRUE;
+error2:
+ printf("prefix2 error bitsize = %u, prefix_size = %u", bitsize,test_bit);
+ return TRUE;
+error3:
+ printf("prefix3 error bitsize = %u, prefix_size = %u", bitsize,test_bit);
+ return TRUE;
}
+
+bool do_test(uint bitsize)
+{
+ MY_BITMAP map;
+ uint32 buf[1024];
+ if (bitmap_init(&map, buf, bitsize, FALSE))
+ {
+ printf("init error for bitsize %d", bitsize);
+ goto error;
+ }
+ if (test_set_get_clear_bit(&map,bitsize))
+ goto error;
+ bitmap_clear_all(&map);
+ if (test_flip_bit(&map,bitsize))
+ goto error;
+ bitmap_clear_all(&map);
+ if (test_operators(&map,bitsize))
+ goto error;
+ bitmap_clear_all(&map);
+ if (test_get_all_bits(&map, bitsize))
+ goto error;
+ bitmap_clear_all(&map);
+ if (test_compare_operators(&map,bitsize))
+ goto error;
+ bitmap_clear_all(&map);
+ if (test_count_bits_set(&map,bitsize))
+ goto error;
+ bitmap_clear_all(&map);
+ if (test_get_first_bit(&map,bitsize))
+ goto error;
+ bitmap_clear_all(&map);
+ if (test_get_next_bit(&map,bitsize))
+ goto error;
+ if (test_prefix(&map,bitsize))
+ goto error;
+ return FALSE;
+error:
+ printf("\n");
+ return TRUE;
+}
+
+int main()
+{
+ int i;
+ for (i= 1; i < 4096; i++)
+ {
+ printf("Start test for bitsize=%u\n",i);
+ if (do_test(i))
+ return -1;
+ }
+ printf("OK\n");
+ return 0;
+}
+
+/*
+ In directory mysys:
+ make test_bitmap
+ will build the bitmap tests and ./test_bitmap will execute it
+*/
+
+#endif
diff --git a/mysys/queues.c b/mysys/queues.c
index ecf1058af41..0e4e251f7e7 100644
--- a/mysys/queues.c
+++ b/mysys/queues.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000 MySQL AB
+/* Copyright (C) 2000, 2005 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -17,6 +17,10 @@
/*
Code for generell handling of priority Queues.
Implemention of queues from "Algoritms in C" by Robert Sedgewick.
+ An optimisation of _downheap suggested in Exercise 7.51 in "Data
+ Structures & Algorithms in C++" by Mark Allen Weiss, Second Edition
+ was implemented by Mikael Ronström 2005. Also the O(N) algorithm
+ of queue_fix was implemented.
*/
#include "mysys_priv.h"
@@ -214,8 +218,64 @@ void queue_replaced(QUEUE *queue)
}
#endif
- /* Fix heap when index have changed */
+#ifndef OLD_VERSION
+
+void _downheap(register QUEUE *queue, uint idx)
+{
+ byte *element;
+ uint elements,half_queue,offset_to_key, next_index;
+ bool first= TRUE;
+ uint start_idx= idx;
+
+ offset_to_key=queue->offset_to_key;
+ element=queue->root[idx];
+ half_queue=(elements=queue->elements) >> 1;
+
+ while (idx <= half_queue)
+ {
+ int cmp;
+ next_index=idx+idx;
+ if (next_index < elements &&
+ (queue->compare(queue->first_cmp_arg,
+ queue->root[next_index]+offset_to_key,
+ queue->root[next_index+1]+offset_to_key) ^
+ queue->max_at_top) > 0)
+ next_index++;
+ if (first &&
+ (((cmp=queue->compare(queue->first_cmp_arg,
+ queue->root[next_index]+offset_to_key,
+ element+offset_to_key)) == 0) ||
+ ((cmp ^ queue->max_at_top) > 0)))
+ {
+ queue->root[idx]= element;
+ return;
+ }
+ queue->root[idx]=queue->root[next_index];
+ idx=next_index;
+ first= FALSE;
+ }
+
+ next_index= idx >> 1;
+ while (next_index > start_idx)
+ {
+ if ((queue->compare(queue->first_cmp_arg,
+ queue->root[next_index]+offset_to_key,
+ element+offset_to_key) ^
+ queue->max_at_top) < 0)
+ break;
+ queue->root[idx]=queue->root[next_index];
+ idx=next_index;
+ next_index= idx >> 1;
+ }
+ queue->root[idx]=element;
+}
+#else
+ /*
+ The old _downheap version is kept for comparisons with the benchmark
+ suit or new benchmarks anyone wants to run for comparisons.
+ */
+ /* Fix heap when index have changed */
void _downheap(register QUEUE *queue, uint idx)
{
byte *element;
@@ -247,20 +307,336 @@ void _downheap(register QUEUE *queue, uint idx)
}
-static int queue_fix_cmp(QUEUE *queue, void **a, void **b)
-{
- return queue->compare(queue->first_cmp_arg,
- (byte*) (*a)+queue->offset_to_key,
- (byte*) (*b)+queue->offset_to_key);
-}
+#endif
/*
- Fix heap when every element was changed,
- actually, it can be done better, in linear time, not in n*log(n)
+ Fix heap when every element was changed.
*/
void queue_fix(QUEUE *queue)
{
- qsort2(queue->root+1,queue->elements, sizeof(void *),
- (qsort2_cmp)queue_fix_cmp, queue);
+ uint i;
+ for (i= queue->elements >> 1; i > 0; i--)
+ _downheap(queue, i);
+}
+
+#ifdef MAIN
+ /*
+ A test program for the priority queue implementation.
+ It can also be used to benchmark changes of the implementation
+ Build by doing the following in the directory mysys
+ make test_priority_queue
+ ./test_priority_queue
+
+ Written by Mikael Ronström, 2005
+ */
+
+static uint num_array[1025];
+static uint tot_no_parts= 0;
+static uint tot_no_loops= 0;
+static uint expected_part= 0;
+static uint expected_num= 0;
+static bool max_ind= 0;
+static bool fix_used= 0;
+static ulonglong start_time= 0;
+
+static bool is_divisible_by(uint num, uint divisor)
+{
+ uint quotient= num / divisor;
+ if (quotient * divisor == num)
+ return TRUE;
+ return FALSE;
+}
+
+void calculate_next()
+{
+ uint part= expected_part, num= expected_num;
+ uint no_parts= tot_no_parts;
+ if (max_ind)
+ {
+ do
+ {
+ while (++part <= no_parts)
+ {
+ if (is_divisible_by(num, part) &&
+ (num <= ((1 << 21) + part)))
+ {
+ expected_part= part;
+ expected_num= num;
+ return;
+ }
+ }
+ part= 0;
+ } while (--num);
+ }
+ else
+ {
+ do
+ {
+ while (--part > 0)
+ {
+ if (is_divisible_by(num, part))
+ {
+ expected_part= part;
+ expected_num= num;
+ return;
+ }
+ }
+ part= no_parts + 1;
+ } while (++num);
+ }
+}
+
+void calculate_end_next(uint part)
+{
+ uint no_parts= tot_no_parts, num;
+ num_array[part]= 0;
+ if (max_ind)
+ {
+ expected_num= 0;
+ for (part= no_parts; part > 0 ; part--)
+ {
+ if (num_array[part])
+ {
+ num= num_array[part] & 0x3FFFFF;
+ if (num >= expected_num)
+ {
+ expected_num= num;
+ expected_part= part;
+ }
+ }
+ }
+ if (expected_num == 0)
+ expected_part= 0;
+ }
+ else
+ {
+ expected_num= 0xFFFFFFFF;
+ for (part= 1; part <= no_parts; part++)
+ {
+ if (num_array[part])
+ {
+ num= num_array[part] & 0x3FFFFF;
+ if (num <= expected_num)
+ {
+ expected_num= num;
+ expected_part= part;
+ }
+ }
+ }
+ if (expected_num == 0xFFFFFFFF)
+ expected_part= 0;
+ }
+ return;
+}
+static int test_compare(void *null_arg, byte *a, byte *b)
+{
+ uint a_num= (*(uint*)a) & 0x3FFFFF;
+ uint b_num= (*(uint*)b) & 0x3FFFFF;
+ uint a_part, b_part;
+ if (a_num > b_num)
+ return +1;
+ if (a_num < b_num)
+ return -1;
+ a_part= (*(uint*)a) >> 22;
+ b_part= (*(uint*)b) >> 22;
+ if (a_part < b_part)
+ return +1;
+ if (a_part > b_part)
+ return -1;
+ return 0;
+}
+
+bool check_num(uint num_part)
+{
+ uint part= num_part >> 22;
+ uint num= num_part & 0x3FFFFF;
+ if (part == expected_part)
+ if (num == expected_num)
+ return FALSE;
+ printf("Expect part %u Expect num 0x%x got part %u num 0x%x max_ind %u fix_used %u \n",
+ expected_part, expected_num, part, num, max_ind, fix_used);
+ return TRUE;
+}
+
+
+void perform_insert(QUEUE *queue)
+{
+ uint i= 1, no_parts= tot_no_parts;
+ uint backward_start= 0;
+
+ expected_part= 1;
+ expected_num= 1;
+
+ if (max_ind)
+ backward_start= 1 << 21;
+
+ do
+ {
+ uint num= (i + backward_start);
+ if (max_ind)
+ {
+ while (!is_divisible_by(num, i))
+ num--;
+ if (max_ind && (num > expected_num ||
+ (num == expected_num && i < expected_part)))
+ {
+ expected_num= num;
+ expected_part= i;
+ }
+ }
+ num_array[i]= num + (i << 22);
+ if (fix_used)
+ queue_element(queue, i-1)= (byte*)&num_array[i];
+ else
+ queue_insert(queue, (byte*)&num_array[i]);
+ } while (++i <= no_parts);
+ if (fix_used)
+ {
+ queue->elements= no_parts;
+ queue_fix(queue);
+ }
+}
+
+bool perform_ins_del(QUEUE *queue, bool max_ind)
+{
+ uint i= 0, no_loops= tot_no_loops, j= tot_no_parts;
+ do
+ {
+ uint num_part= *(uint*)queue_top(queue);
+ uint part= num_part >> 22;
+ if (check_num(num_part))
+ return TRUE;
+ if (j++ >= no_loops)
+ {
+ calculate_end_next(part);
+ queue_remove(queue, (uint) 0);
+ }
+ else
+ {
+ calculate_next();
+ if (max_ind)
+ num_array[part]-= part;
+ else
+ num_array[part]+= part;
+ queue_top(queue)= (byte*)&num_array[part];
+ queue_replaced(queue);
+ }
+ } while (++i < no_loops);
+ return FALSE;
+}
+
+bool do_test(uint no_parts, uint l_max_ind, bool l_fix_used)
+{
+ QUEUE queue;
+ bool result;
+ max_ind= l_max_ind;
+ fix_used= l_fix_used;
+ init_queue(&queue, no_parts, 0, max_ind, test_compare, NULL);
+ tot_no_parts= no_parts;
+ tot_no_loops= 1024;
+ perform_insert(&queue);
+ if ((result= perform_ins_del(&queue, max_ind)))
+ delete_queue(&queue);
+ if (result)
+ {
+ printf("Error\n");
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static void start_measurement()
+{
+ start_time= my_getsystime();
+}
+
+static void stop_measurement()
+{
+ ulonglong stop_time= my_getsystime();
+ uint time_in_micros;
+ stop_time-= start_time;
+ stop_time/= 10; /* Convert to microseconds */
+ time_in_micros= (uint)stop_time;
+ printf("Time expired is %u microseconds \n", time_in_micros);
+}
+
+static void benchmark_test()
+{
+ QUEUE queue_real;
+ QUEUE *queue= &queue_real;
+ uint i, add;
+ fix_used= TRUE;
+ max_ind= FALSE;
+ tot_no_parts= 1024;
+ init_queue(queue, tot_no_parts, 0, max_ind, test_compare, NULL);
+ /*
+ First benchmark whether queue_fix is faster than using queue_insert
+ for sizes of 16 partitions.
+ */
+ for (tot_no_parts= 2, add=2; tot_no_parts < 128;
+ tot_no_parts+= add, add++)
+ {
+ printf("Start benchmark queue_fix, tot_no_parts= %u \n", tot_no_parts);
+ start_measurement();
+ for (i= 0; i < 128; i++)
+ {
+ perform_insert(queue);
+ queue_remove_all(queue);
+ }
+ stop_measurement();
+
+ fix_used= FALSE;
+ printf("Start benchmark queue_insert\n");
+ start_measurement();
+ for (i= 0; i < 128; i++)
+ {
+ perform_insert(queue);
+ queue_remove_all(queue);
+ }
+ stop_measurement();
+ }
+ /*
+ Now benchmark insertion and deletion of 16400 elements.
+ Used in consecutive runs this shows whether the optimised _downheap
+ is faster than the standard implementation.
+ */
+ printf("Start benchmarking _downheap \n");
+ start_measurement();
+ perform_insert(queue);
+ for (i= 0; i < 65536; i++)
+ {
+ uint num, part;
+ num= *(uint*)queue_top(queue);
+ num+= 16;
+ part= num >> 22;
+ num_array[part]= num;
+ queue_top(queue)= (byte*)&num_array[part];
+ queue_replaced(queue);
+ }
+ for (i= 0; i < 16; i++)
+ queue_remove(queue, (uint) 0);
+ queue_remove_all(queue);
+ stop_measurement();
+}
+
+int main()
+{
+ int i, add= 1;
+ for (i= 1; i < 1024; i+=add, add++)
+ {
+ printf("Start test for priority queue of size %u\n", i);
+ if (do_test(i, 0, 1))
+ return -1;
+ if (do_test(i, 1, 1))
+ return -1;
+ if (do_test(i, 0, 0))
+ return -1;
+ if (do_test(i, 1, 0))
+ return -1;
+ }
+ benchmark_test();
+ printf("OK\n");
+ return 0;
}
+#endif
diff --git a/mysys/trie.c b/mysys/trie.c
new file mode 100644
index 00000000000..1f638f8f732
--- /dev/null
+++ b/mysys/trie.c
@@ -0,0 +1,237 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ Implementation of trie and Aho-Corasick automaton.
+ Supports only charsets that can be compared byte-wise.
+
+ TODO:
+ Add character frequencies. Can increase lookup speed
+ up to 30%.
+ Implement character-wise comparision.
+*/
+
+
+#include "mysys_priv.h"
+#include <m_string.h>
+#include <my_trie.h>
+#include <my_base.h>
+
+
+/*
+ SYNOPSIS
+ TRIE *trie_init (TRIE *trie, CHARSET_INFO *charset);
+
+ DESCRIPTION
+ Allocates or initializes a `TRIE' object. If `trie' is a `NULL'
+ pointer, the function allocates, initializes, and returns a new
+ object. Otherwise, the object is initialized and the address of
+ the object is returned. If `trie_init()' allocates a new object,
+ it will be freed when `trie_free()' is called.
+
+ RETURN VALUE
+ An initialized `TRIE*' object. `NULL' if there was insufficient
+ memory to allocate a new object.
+*/
+
+TRIE *trie_init (TRIE *trie, CHARSET_INFO *charset)
+{
+ MEM_ROOT mem_root;
+ DBUG_ENTER("trie_init");
+ DBUG_ASSERT(charset);
+ init_alloc_root(&mem_root,
+ (sizeof(TRIE_NODE) * 128) + ALLOC_ROOT_MIN_BLOCK_SIZE,
+ sizeof(TRIE_NODE) * 128);
+ if (! trie)
+ {
+ if (! (trie= (TRIE *)alloc_root(&mem_root, sizeof(TRIE))))
+ {
+ free_root(&mem_root, MYF(0));
+ DBUG_RETURN(NULL);
+ }
+ }
+
+ memcpy(&trie->mem_root, &mem_root, sizeof(MEM_ROOT));
+ trie->root.leaf= 0;
+ trie->root.c= 0;
+ trie->root.next= NULL;
+ trie->root.links= NULL;
+ trie->root.fail= NULL;
+ trie->charset= charset;
+ trie->nnodes= 0;
+ trie->nwords= 0;
+ DBUG_RETURN(trie);
+}
+
+
+/*
+ SYNOPSIS
+ void trie_free (TRIE *trie);
+ trie - valid pointer to `TRIE'
+
+ DESCRIPTION
+ Frees the memory allocated for a `trie'.
+
+ RETURN VALUE
+ None.
+*/
+
+void trie_free (TRIE *trie)
+{
+ MEM_ROOT mem_root;
+ DBUG_ENTER("trie_free");
+ DBUG_ASSERT(trie);
+ memcpy(&mem_root, &trie->mem_root, sizeof(MEM_ROOT));
+ free_root(&mem_root, MYF(0));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ SYNOPSIS
+ my_bool trie_insert (TRIE *trie, const byte *key, uint keylen);
+ trie - valid pointer to `TRIE'
+ key - valid pointer to key to insert
+ keylen - non-0 key length
+
+ DESCRIPTION
+ Inserts new key into trie.
+
+ RETURN VALUE
+ Upon successful completion, `trie_insert' returns `FALSE'. Otherwise
+ `TRUE' is returned.
+
+ NOTES
+ If this function fails you must assume `trie' is broken.
+ However it can be freed with trie_free().
+*/
+
+my_bool trie_insert (TRIE *trie, const byte *key, uint keylen)
+{
+ TRIE_NODE *node;
+ TRIE_NODE *next;
+ byte p;
+ uint k;
+ DBUG_ENTER("trie_insert");
+ DBUG_ASSERT(trie && key && keylen);
+ node= &trie->root;
+ trie->root.fail= NULL;
+ for (k= 0; k < keylen; k++)
+ {
+ p= key[k];
+ for (next= node->links; next; next= next->next)
+ if (next->c == p)
+ break;
+
+ if (! next)
+ {
+ TRIE_NODE *tmp= (TRIE_NODE *)alloc_root(&trie->mem_root,
+ sizeof(TRIE_NODE));
+ if (! tmp)
+ DBUG_RETURN(TRUE);
+ tmp->leaf= 0;
+ tmp->c= p;
+ tmp->links= tmp->fail= tmp->next= NULL;
+ trie->nnodes++;
+ if (! node->links)
+ {
+ node->links= tmp;
+ }
+ else
+ {
+ for (next= node->links; next->next; next= next->next) /* no-op */;
+ next->next= tmp;
+ }
+ node= tmp;
+ }
+ else
+ {
+ node= next;
+ }
+ }
+ node->leaf= keylen;
+ trie->nwords++;
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ SYNOPSIS
+ my_bool trie_prepare (TRIE *trie);
+ trie - valid pointer to `TRIE'
+
+ DESCRIPTION
+ Constructs Aho-Corasick automaton.
+
+ RETURN VALUE
+ Upon successful completion, `trie_prepare' returns `FALSE'. Otherwise
+ `TRUE' is returned.
+*/
+
+my_bool ac_trie_prepare (TRIE *trie)
+{
+ TRIE_NODE **tmp_nodes;
+ TRIE_NODE *node;
+ uint32 fnode= 0;
+ uint32 lnode= 0;
+ DBUG_ENTER("trie_prepare");
+ DBUG_ASSERT(trie);
+
+ tmp_nodes= (TRIE_NODE **)my_malloc(trie->nnodes * sizeof(TRIE_NODE *), MYF(0));
+ if (! tmp_nodes)
+ DBUG_RETURN(TRUE);
+
+ trie->root.fail= &trie->root;
+ for (node= trie->root.links; node; node= node->next)
+ {
+ node->fail= &trie->root;
+ tmp_nodes[lnode++]= node;
+ }
+
+ while (fnode < lnode)
+ {
+ TRIE_NODE *current= (TRIE_NODE *)tmp_nodes[fnode++];
+ for (node= current->links; node; node= node->next)
+ {
+ TRIE_NODE *fail= current->fail;
+ tmp_nodes[lnode++]= node;
+ while (! (node->fail= trie_goto(&trie->root, fail, node->c)))
+ fail= fail->fail;
+ }
+ }
+ my_free((gptr)tmp_nodes, MYF(0));
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ SYNOPSIS
+ void ac_trie_init (TRIE *trie, AC_TRIE_STATE *state);
+ trie - valid pointer to `TRIE'
+ state - value pointer to `AC_TRIE_STATE'
+
+ DESCRIPTION
+ Initializes `AC_TRIE_STATE' object.
+*/
+
+void ac_trie_init (TRIE *trie, AC_TRIE_STATE *state)
+{
+ DBUG_ENTER("ac_trie_init");
+ DBUG_ASSERT(trie && state);
+ state->trie= trie;
+ state->node= &trie->root;
+ DBUG_VOID_RETURN;
+}
diff --git a/ndb/Makefile.am b/ndb/Makefile.am
deleted file mode 100644
index 3aac54b38ee..00000000000
--- a/ndb/Makefile.am
+++ /dev/null
@@ -1,30 +0,0 @@
-SUBDIRS = src tools . include @ndb_opt_subdirs@
-DIST_SUBDIRS = src tools include test docs
-EXTRA_DIST = config ndbapi-examples
-
-include $(top_srcdir)/ndb/config/common.mk.am
-
-dist-hook:
- -rm -rf `find $(distdir) -type d -name SCCS`
- -rm -rf `find $(distdir) -type d -name old_files`
- -rm -rf `find $(distdir)/ndbapi-examples -name '*.o'`
- list='$(SUBDIRS)'; for subdir in $$list; do \
- if test "$$subdir" != "." -a "$$subdir" != "include"; then \
- files="`find $$subdir -name '*\.h'` `find $$subdir -name '*\.hpp'`"; \
- for f in $$files; do \
- if test -d "$(distdir)/`dirname $$f`" -a ! -e "$(distdir)/$$f"; then \
- cp $$f $(distdir)/$$f; \
- fi; \
- done; \
- fi; \
- done
-
-windoze:
- for i in `find . -name 'Makefile.am'`; do make -C `dirname $$i` windoze-dsp; done
-
-windoze-dsp:
-
-all-windoze-dsp: windoze
- find . -name '*.dsp' | grep -v SCCS | xargs unix2dos
- $(top_srcdir)/ndb/config/make-win-dsw.sh | unix2dos > ndb.dsw
- tar cvfz ndb-win-dsp.tar.gz ndb.dsw `find . -name '*.dsp' | grep -v SCCS`
diff --git a/ndb/config/common.mk.am b/ndb/config/common.mk.am
deleted file mode 100644
index 869e2fae91d..00000000000
--- a/ndb/config/common.mk.am
+++ /dev/null
@@ -1,12 +0,0 @@
-ndbbindir = "$(libexecdir)"
-ndbtoolsdir = "$(bindir)"
-ndbtestdir = "$(bindir)"
-ndblibdir = "$(pkglibdir)"
-ndbincludedir = "$(pkgincludedir)/ndb"
-ndbapiincludedir = "$(pkgincludedir)/ndb/ndbapi"
-mgmapiincludedir = "$(pkgincludedir)/ndb/mgmapi"
-
-INCLUDES = $(INCLUDES_LOC)
-LDADD = $(top_srcdir)/ndb/src/common/portlib/gcc.cpp $(LDADD_LOC)
-DEFS = @DEFS@ @NDB_DEFS@ $(DEFS_LOC) $(NDB_EXTRA_FLAGS)
-NDB_CXXFLAGS=@ndb_cxxflags_fix@ $(NDB_CXXFLAGS_LOC)
diff --git a/ndb/config/type_kernel.mk.am b/ndb/config/type_kernel.mk.am
deleted file mode 100644
index 703876ee2e9..00000000000
--- a/ndb/config/type_kernel.mk.am
+++ /dev/null
@@ -1,18 +0,0 @@
-
-INCLUDES += \
- -I$(srcdir) -I$(top_srcdir)/include \
- -I$(top_srcdir)/ndb/include \
- -I$(top_srcdir)/ndb/src/kernel/vm \
- -I$(top_srcdir)/ndb/src/kernel/error \
- -I$(top_srcdir)/ndb/src/kernel \
- -I$(top_srcdir)/ndb/include/kernel \
- -I$(top_srcdir)/ndb/include/transporter \
- -I$(top_srcdir)/ndb/include/debugger \
- -I$(top_srcdir)/ndb/include/mgmapi \
- -I$(top_srcdir)/ndb/include/mgmcommon \
- -I$(top_srcdir)/ndb/include/ndbapi \
- -I$(top_srcdir)/ndb/include/util \
- -I$(top_srcdir)/ndb/include/portlib \
- -I$(top_srcdir)/ndb/include/logger
-
-#AM_LDFLAGS = @ndb_ldflags@
diff --git a/ndb/config/type_mgmapiclient.mk.am b/ndb/config/type_mgmapiclient.mk.am
deleted file mode 100644
index 1ef4a81d67e..00000000000
--- a/ndb/config/type_mgmapiclient.mk.am
+++ /dev/null
@@ -1,2 +0,0 @@
-
-INCLUDES += -I$(top_srcdir)/ndb/include/mgmapi
diff --git a/ndb/config/type_ndbapi.mk.am b/ndb/config/type_ndbapi.mk.am
deleted file mode 100644
index ed648273aea..00000000000
--- a/ndb/config/type_ndbapi.mk.am
+++ /dev/null
@@ -1,12 +0,0 @@
-
-INCLUDES += \
- -I$(srcdir) -I$(top_srcdir)/include -I$(top_srcdir)/ndb/include \
- -I$(top_srcdir)/ndb/include/kernel \
- -I$(top_srcdir)/ndb/include/transporter \
- -I$(top_srcdir)/ndb/include/debugger \
- -I$(top_srcdir)/ndb/include/mgmapi \
- -I$(top_srcdir)/ndb/include/mgmcommon \
- -I$(top_srcdir)/ndb/include/ndbapi \
- -I$(top_srcdir)/ndb/include/util \
- -I$(top_srcdir)/ndb/include/portlib \
- -I$(top_srcdir)/ndb/include/logger
diff --git a/ndb/config/type_ndbapiclient.mk.am b/ndb/config/type_ndbapiclient.mk.am
deleted file mode 100644
index 88b57e49e19..00000000000
--- a/ndb/config/type_ndbapiclient.mk.am
+++ /dev/null
@@ -1,2 +0,0 @@
-
-INCLUDES += -I$(top_srcdir)/ndb/include/ndbapi
diff --git a/ndb/config/type_ndbapitest.mk.am b/ndb/config/type_ndbapitest.mk.am
deleted file mode 100644
index 392c4e9fc70..00000000000
--- a/ndb/config/type_ndbapitest.mk.am
+++ /dev/null
@@ -1,14 +0,0 @@
-
-LDADD += $(top_builddir)/ndb/test/src/libNDBT.a \
- $(top_builddir)/ndb/src/libndbclient.la \
- $(top_builddir)/dbug/libdbug.a \
- $(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
-
-INCLUDES += -I$(top_srcdir) -I$(top_srcdir)/include \
- -I$(top_srcdir)/ndb/include \
- -I$(top_srcdir)/ndb/include/ndbapi \
- -I$(top_srcdir)/ndb/include/util \
- -I$(top_srcdir)/ndb/include/portlib \
- -I$(top_srcdir)/ndb/test/include \
- -I$(top_srcdir)/ndb/include/mgmapi
diff --git a/ndb/config/type_ndbapitools.mk.am b/ndb/config/type_ndbapitools.mk.am
deleted file mode 100644
index d4eb090112d..00000000000
--- a/ndb/config/type_ndbapitools.mk.am
+++ /dev/null
@@ -1,15 +0,0 @@
-
-LDADD += \
- $(top_builddir)/ndb/src/libndbclient.la \
- $(top_builddir)/dbug/libdbug.a \
- $(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
-
-INCLUDES += -I$(srcdir) -I$(top_srcdir)/include \
- -I$(top_srcdir)/ndb/include \
- -I$(top_srcdir)/ndb/include/ndbapi \
- -I$(top_srcdir)/ndb/include/util \
- -I$(top_srcdir)/ndb/include/portlib \
- -I$(top_srcdir)/ndb/test/include \
- -I$(top_srcdir)/ndb/include/mgmapi \
- -I$(top_srcdir)/ndb/include/kernel
diff --git a/ndb/config/type_util.mk.am b/ndb/config/type_util.mk.am
deleted file mode 100644
index 0dfa77b7a7c..00000000000
--- a/ndb/config/type_util.mk.am
+++ /dev/null
@@ -1,6 +0,0 @@
-
-INCLUDES += -I$(srcdir) -I$(top_srcdir)/include \
- -I$(top_srcdir)/ndb/include \
- -I$(top_srcdir)/ndb/include/util \
- -I$(top_srcdir)/ndb/include/portlib \
- -I$(top_srcdir)/ndb/include/logger
diff --git a/ndb/docs/Makefile.am b/ndb/docs/Makefile.am
deleted file mode 100644
index afa91857771..00000000000
--- a/ndb/docs/Makefile.am
+++ /dev/null
@@ -1,114 +0,0 @@
-DOXYDIR = doxygen
-noinst_HEADERS = $(DOXYDIR)/predoxy.pl $(DOXYDIR)/postdoxy.pl $(DOXYDIR)/Doxyfile.ndbapi $(DOXYDIR)/Doxyfile.mgmapi $(DOXYDIR)/header.ndbapi.tex $(DOXYDIR)/header.mgmapi.tex
-
-all: do-check-html ndbapidoc-html mgmapidoc-html
-all-pdf: do-check-pdf ndbapidoc-pdf mgmapidoc-pdf
-
-DOXYTMP = .doxytmp
-DOXYOUT = .doxyout
-
-NDB_RELEASE = @NDB_VERSION_MAJOR@.@NDB_VERSION_MINOR@.@NDB_VERSION_BUILD@-@NDB_VERSION_STATUS@
-
-clean-local:
- rm -rf ndbapi.pdf ndbapi.html mgmapi.pdf mgmapi.html
- rm -rf $(DOXYTMP) $(DOXYOUT)
-
-do-check-html:
- @set -x; \
- if test @PERL@ = no ; then \
- echo "Perl needed to make docs"; \
- exit 1; \
- fi; \
- if test @DOXYGEN@ = no ; then \
- echo "Doxygen needed to make docs"; \
- exit 1; \
- fi;
-
-do-check-pdf: do-check-html
- if test @PDFLATEX@ = no ; then \
- echo "Pdflatex needed to make docs"; \
- exit 1; \
- fi; \
- if test @MAKEINDEX@ = no ; then \
- echo "Makeindex needed to make docs"; \
- exit 1; \
- fi;
-
-###
-#
-# NDB API Programmer's Guide
-#
-ndbapidoc-html: ndbapi.html
-ndbapidoc-pdf: ndbapi.pdf
-
-ndbapi.html: $(noinst_HEADERS)
- @set -x; \
- export NDB_RELEASE=$(NDB_RELEASE); \
- @RM@ -f ndbapi.pdf ndbapi.html; \
- @RM@ -rf $(DOXYTMP) $(DOXYOUT); \
- mkdir -p $(DOXYTMP) $(DOXYOUT); \
- @CP@ $(top_srcdir)/ndb/include/ndbapi/* $(DOXYTMP); \
- @CP@ $(top_srcdir)/ndb/ndbapi-examples/*/*.[ch]pp $(DOXYTMP); \
- @PERL@ $(DOXYDIR)/predoxy.pl; \
- mv footer.html $(DOXYTMP); \
- (cd $(DOXYTMP) ; @DOXYGEN@ ../$(DOXYDIR)/Doxyfile.ndbapi); \
- @PERL@ $(DOXYDIR)/postdoxy.pl $(DOXYOUT)/ndbapi.latex "MySQL Cluster NDB API Programmer Guide"; \
- (cd $(DOXYOUT) && \
- find ndbapi.html -print | cpio -pdm ..);
-
-ndbapi.pdf: ndbapi.html
- (cd $(DOXYOUT)/ndbapi.latex && \
- @PDFLATEX@ refman.tex && @MAKEINDEX@ refman && @PDFLATEX@ refman.tex && \
- cp -p refman.pdf ../../ndbapi.pdf);
-
-###
-#
-# MGM API Guide
-#
-mgmapidoc-html: mgmapi.html
-mgmapidoc-pdf: mgmapi.pdf
-
-mgmapi.html: $(noinst_HEADERS)
- @set -x; \
- export NDB_RELEASE=$(NDB_RELEASE); \
- @RM@ -f mgmapi.pdf mgmapi.html; \
- @RM@ -rf $(DOXYTMP) $(DOXYOUT); \
- mkdir -p $(DOXYTMP) $(DOXYOUT); \
- @CP@ $(top_srcdir)/ndb/include/mgmapi/* $(DOXYTMP); \
- @PERL@ $(DOXYDIR)/predoxy.pl; \
- mv footer.html $(DOXYTMP); \
- (cd $(DOXYTMP) ; @DOXYGEN@ ../$(DOXYDIR)/Doxyfile.mgmapi); \
- @PERL@ $(DOXYDIR)/postdoxy.pl $(DOXYOUT)/mgmapi.latex "MySQL Cluster MGM API Guide"; \
- (cd $(DOXYOUT) && \
- find mgmapi.html -print | cpio -pdm ..);
-
-mgmapi.pdf: mgmapi.html
- (cd $(DOXYOUT)/mgmapi.latex && \
- @PDFLATEX@ refman.tex && @MAKEINDEX@ refman && @PDFLATEX@ refman.tex && \
- cp -p refman.pdf ../../mgmapi.pdf);
-
-###
-#
-# Complete Source Browser except for
-# ndbapi odbc test tools win32 lib examples docs CVS config bin
-# include/ndbapi
-# include/newtonapi src/newtonapi
-# include/mgmapi src/mgmapi
-# src/client
-ndbdoc: DUMMY
- mkdir -p $(OUTDIR)
- cd $(top_srcdir)/ndb ; $(DOXYGEN) $(DOXYDIR)/Doxyfile.ndb
-
-###
-#
-# odbcdoc - Complete Source Browser for NDB ODBC (src/client/odbc)
-
-odbcdoc: DUMMY
- mkdir -p $(OUTDIR)
- cd $(top_srcdir)/ndb ; $(DOXYGEN) $(DOXYDIR)/Doxyfile.odbc
-
-testdoc: DUMMY
- mkdir -p $(OUTDIR)
- cd $(top_srcdir)/ndb ; $(DOXYGEN) $(DOXYDIR)/Doxyfile.test
-
-windoze-dsp:
diff --git a/ndb/include/Makefile.am b/ndb/include/Makefile.am
deleted file mode 100644
index 10f297492e9..00000000000
--- a/ndb/include/Makefile.am
+++ /dev/null
@@ -1,51 +0,0 @@
-
-include $(top_srcdir)/ndb/config/common.mk.am
-
-ndbinclude_HEADERS = \
-ndb_constants.h \
-ndb_init.h \
-ndb_types.h \
-ndb_version.h
-
-ndbapiinclude_HEADERS = \
-ndbapi/ndbapi_limits.h \
-ndbapi/ndb_opt_defaults.h \
-ndbapi/Ndb.hpp \
-ndbapi/NdbApi.hpp \
-ndbapi/NdbTransaction.hpp \
-ndbapi/NdbDictionary.hpp \
-ndbapi/NdbError.hpp \
-ndbapi/NdbEventOperation.hpp \
-ndbapi/NdbIndexOperation.hpp \
-ndbapi/NdbOperation.hpp \
-ndbapi/ndb_cluster_connection.hpp \
-ndbapi/NdbBlob.hpp \
-ndbapi/NdbPool.hpp \
-ndbapi/NdbRecAttr.hpp \
-ndbapi/NdbReceiver.hpp \
-ndbapi/NdbScanFilter.hpp \
-ndbapi/NdbScanOperation.hpp \
-ndbapi/NdbIndexScanOperation.hpp \
-ndbapi/ndberror.h
-
-mgmapiinclude_HEADERS = \
-mgmapi/mgmapi.h \
-mgmapi/mgmapi_debug.h \
-mgmapi/mgmapi_config_parameters.h \
-mgmapi/mgmapi_config_parameters_debug.h \
-mgmapi/ndb_logevent.h
-
-noinst_HEADERS = \
-ndb_global.h \
-ndb_net.h
-
-EXTRA_DIST = debugger editline kernel logger mgmcommon \
-portlib transporter util
-
-dist-hook:
- -rm -rf `find $(distdir) -type d -name SCCS`
-
-windoze-dsp:
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/ndb/include/kernel/AttributeHeader.hpp b/ndb/include/kernel/AttributeHeader.hpp
deleted file mode 100644
index 3cb432067eb..00000000000
--- a/ndb/include/kernel/AttributeHeader.hpp
+++ /dev/null
@@ -1,215 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef ATTRIBUTE_HEADER
-#define ATTRIBUTE_HEADER
-
-/**
- * @class AttributeHeader
- * @brief Header passed in front of every attribute value in AttrInfo signal
- */
-class AttributeHeader {
- friend class Dbtup;
- friend class Backup;
- friend class NdbOperation;
- friend class DbUtil;
- friend class Suma;
-
-public:
- /**
- * Psuedo columns
- */
- STATIC_CONST( PSUEDO = 0x8000 );
- STATIC_CONST( FRAGMENT = 0xFFFE ); // Read fragment no
- STATIC_CONST( ROW_COUNT = 0xFFFD ); // Read row count (committed)
- STATIC_CONST( COMMIT_COUNT = 0xFFFC ); // Read commit count
- STATIC_CONST( RANGE_NO = 0xFFFB ); // Read range no (when batched ranges)
-
- STATIC_CONST( ROW_SIZE = 0xFFFA );
- STATIC_CONST( FRAGMENT_MEMORY= 0xFFF9 );
-
- /** Initialize AttributeHeader at location aHeaderPtr */
- static AttributeHeader& init(void* aHeaderPtr, Uint32 anAttributeId,
- Uint32 aDataSize);
-
- /** Returns size of AttributeHeader (usually one or two words) */
- Uint32 getHeaderSize() const; // In 32-bit words
-
- /** Store AttributeHeader in location given as argument */
- void insertHeader(Uint32*);
-
- /** Get next attribute header (if there is one) */
- AttributeHeader* getNext() const;
-
- /** Get location of attribute value */
- Uint32* getDataPtr() const;
-
- /** Getters and Setters */
- Uint32 getAttributeId() const;
- void setAttributeId(Uint32);
- Uint32 getDataSize() const; // In 32-bit words
- void setDataSize(Uint32);
- bool isNULL() const;
- void setNULL();
-
- /** Print **/
- //void print(NdbOut&);
- void print(FILE*);
-
- static Uint32 getDataSize(Uint32);
-
-public:
- AttributeHeader(Uint32 = 0);
- AttributeHeader(Uint32 anAttributeId, Uint32 aDataSize);
- ~AttributeHeader();
-
- Uint32 m_value;
-};
-
-/**
- * 1111111111222222222233
- * 01234567890123456789012345678901
- * ssssssssssssss eiiiiiiiiiiiiiiii
- *
- * i = Attribute Id
- * s = Size of current "chunk" - 14 Bits -> 16384 (words) = 65k
- * Including optional extra word(s).
- * e - Element data/Blob, read element of array
- * If == 0 next data word contains attribute value.
- * If == 1 next data word contains:
- * For Array of Fixed size Elements
- * Start Index (16 bit), Stop Index(16 bit)
- * For Blob
- * Start offset (32 bit) (length is defined in previous word)
- *
- * An attribute value equal to "null" is represented by setting s == 0.
- *
- * Bit 14 is not yet used.
- */
-
-inline
-AttributeHeader& AttributeHeader::init(void* aHeaderPtr, Uint32 anAttributeId,
- Uint32 aDataSize)
-{
- return * new (aHeaderPtr) AttributeHeader(anAttributeId, aDataSize);
-}
-
-inline
-AttributeHeader::AttributeHeader(Uint32 aHeader)
-{
- m_value = aHeader;
-}
-
-inline
-AttributeHeader::AttributeHeader(Uint32 anAttributeId, Uint32 aDataSize)
-{
- m_value = 0;
- this->setAttributeId(anAttributeId);
- this->setDataSize(aDataSize);
-}
-
-inline
-AttributeHeader::~AttributeHeader()
-{}
-
-inline
-Uint32 AttributeHeader::getHeaderSize() const
-{
- // Should check 'e' bit here
- return 1;
-}
-
-inline
-Uint32 AttributeHeader::getAttributeId() const
-{
- return (m_value & 0xFFFF0000) >> 16;
-}
-
-inline
-void AttributeHeader::setAttributeId(Uint32 anAttributeId)
-{
- m_value &= 0x0000FFFF; // Clear attribute id
- m_value |= (anAttributeId << 16);
-}
-
-inline
-Uint32 AttributeHeader::getDataSize() const
-{
- return (m_value & 0x3FFF);
-}
-
-inline
-void AttributeHeader::setDataSize(Uint32 aDataSize)
-{
- m_value &= (~0x3FFF);
- m_value |= aDataSize;
-}
-
-inline
-bool AttributeHeader::isNULL() const
-{
- return (getDataSize() == 0);
-}
-
-inline
-void AttributeHeader::setNULL()
-{
- setDataSize(0);
-}
-
-inline
-Uint32* AttributeHeader::getDataPtr() const
-{
- return (Uint32*)&m_value + getHeaderSize();
-}
-
-inline
-void AttributeHeader::insertHeader(Uint32* target)
-{
- *target = m_value;
-}
-
-inline
-AttributeHeader*
-AttributeHeader::getNext() const {
- return (AttributeHeader*)(getDataPtr() + getDataSize());
-}
-
-inline
-void
-//AttributeHeader::print(NdbOut& output) {
-AttributeHeader::print(FILE* output) {
- fprintf(output, "AttributeId: H\'%.8x (D\'%d), DataSize: H\'%.8x (D\'%d), "
- "isNULL: %d\n",
- getAttributeId(), getAttributeId(),
- getDataSize(), getDataSize(),
- isNULL());
-}
-
-inline
-Uint32
-AttributeHeader::getDataSize(Uint32 m_value){
- return (m_value & 0x3FFF);
-}
-
-#endif
-
-
-
-
-
-
-
diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h
deleted file mode 100644
index 9413f4ef56a..00000000000
--- a/ndb/include/kernel/GlobalSignalNumbers.h
+++ /dev/null
@@ -1,949 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef GLOBAL_SIGNAL_NUMBERS_H
-#define GLOBAL_SIGNAL_NUMBERS_H
-
-#include <kernel_types.h>
-/**
- * NOTE
- *
- * When adding a new signal, remember to update MAX_GSN and SignalNames.cpp
- */
-const GlobalSignalNumber MAX_GSN = 712;
-
-struct GsnName {
- GlobalSignalNumber gsn;
- const char * name;
-};
-
-extern const GsnName SignalNames[];
-extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
-
-/**
- * These are used by API and kernel
- */
-#define GSN_API_REGCONF 1
-#define GSN_API_REGREF 2
-#define GSN_API_REGREQ 3
-
-#define GSN_ATTRINFO 4
-#define GSN_TRANSID_AI 5
-#define GSN_KEYINFO 6
-#define GSN_READCONF 7
-
-#define GSN_TCKEY_FAILCONF 8
-#define GSN_TCKEY_FAILREF 9
-#define GSN_TCKEYCONF 10
-#define GSN_TCKEYREF 11
-#define GSN_TCKEYREQ 12
-
-#define GSN_TCROLLBACKCONF 13
-#define GSN_TCROLLBACKREF 14
-#define GSN_TCROLLBACKREQ 15
-#define GSN_TCROLLBACKREP 16
-
-#define GSN_TC_COMMITCONF 17
-#define GSN_TC_COMMITREF 18
-#define GSN_TC_COMMITREQ 19
-#define GSN_TC_HBREP 20
-
-#define GSN_TRANSID_AI_R 21
-#define GSN_KEYINFO20_R 22
-
-#define GSN_GET_TABINFOREF 23
-#define GSN_GET_TABINFOREQ 24
-#define GSN_GET_TABINFO_CONF 190
-
-#define GSN_GET_TABLEID_REQ 683
-#define GSN_GET_TABLEID_REF 684
-#define GSN_GET_TABLEID_CONF 685
-
-#define GSN_DIHNDBTAMPER 25
-#define GSN_NODE_FAILREP 26
-#define GSN_NF_COMPLETEREP 27
-
-#define GSN_SCAN_NEXTREQ 28
-#define GSN_SCAN_TABCONF 29
-/* 30 unused */
-#define GSN_SCAN_TABREF 31
-#define GSN_SCAN_TABREQ 32
-#define GSN_KEYINFO20 33
-
-#define GSN_TCRELEASECONF 34
-#define GSN_TCRELEASEREF 35
-#define GSN_TCRELEASEREQ 36
-
-#define GSN_TCSEIZECONF 37
-#define GSN_TCSEIZEREF 38
-#define GSN_TCSEIZEREQ 39
-
-/* 40 unused */
-/* 41 unused */
-/* 42 unused */
-/* 43 unused */
-/* 44 unused */
-/* 45 unused */
-/* 46 unused */
-/* 47 unused */
-/* 48 unused */
-/* 49 unused */
-/* 50 unused */
-/* 51 unused */
-/* 52 unused */
-/* 53 unused */
-/* 54 unused */
-/* 55 unused */
-/* 56 unused */
-/* 57 unused */
-/* 58 unused */
-/* 59 unused */
-/* 60 unused */
-/* 61 unused */
-/* 62 unused */
-/* 63 unused */
-/* 64 unused */
-/* 65 unused */
-/* 66 unused */
-
-/**
- * These are used only by kernel
- */
-
-#define GSN_ACC_ABORTCONF 67
-/* 68 unused */
-/* 69 unused */
-/* 70 unused */
-#define GSN_ACC_ABORTREQ 71
-#define GSN_ACC_CHECK_SCAN 72
-#define GSN_ACC_COMMITCONF 73
-#define GSN_ACC_COMMITREQ 74
-#define GSN_ACC_CONTOPCONF 75
-#define GSN_ACC_CONTOPREQ 76
-#define GSN_ACC_LCPCONF 77
-#define GSN_ACC_LCPREF 78
-#define GSN_ACC_LCPREQ 79
-#define GSN_ACC_LCPSTARTED 80
-#define GSN_ACC_OVER_REC 81
-
-#define GSN_ACC_SAVE_PAGES 83
-#define GSN_ACC_SCAN_INFO 84
-#define GSN_ACC_SCAN_INFO24 85
-#define GSN_ACC_SCANCONF 86
-#define GSN_ACC_SCANREF 87
-#define GSN_ACC_SCANREQ 88
-#define GSN_ACC_SRCONF 89
-#define GSN_ACC_SRREF 90
-#define GSN_ACC_SRREQ 91
-#define GSN_ACC_TO_CONF 92
-#define GSN_ACC_TO_REF 93
-#define GSN_ACC_TO_REQ 94
-#define GSN_ACCFRAGCONF 95
-#define GSN_ACCFRAGREF 96
-#define GSN_ACCFRAGREQ 97
-#define GSN_ACCKEYCONF 98
-#define GSN_ACCKEYREF 99
-#define GSN_ACCKEYREQ 100
-#define GSN_ACCMINUPDATE 101
-#define GSN_ACCSEIZECONF 103
-#define GSN_ACCSEIZEREF 104
-#define GSN_ACCSEIZEREQ 105
-#define GSN_ACCUPDATECONF 106
-#define GSN_ACCUPDATEKEY 107
-#define GSN_ACCUPDATEREF 108
-
-#define GSN_ADD_FRAGCONF 109
-#define GSN_ADD_FRAGREF 110
-#define GSN_ADD_FRAGREQ 111
-
-#define GSN_API_FAILCONF 113
-#define GSN_API_FAILREQ 114
-#define GSN_CNTR_START_REQ 115
-/* 116 not unused */
-#define GSN_CNTR_START_REF 117
-#define GSN_CNTR_START_CONF 118
-#define GSN_CNTR_START_REP 119
-/* 120 unused */
-/* 121 unused */
-/* 122 unused */
-/* 123 unused */
-/* 124 unused */
-#define GSN_CHECK_LCP_STOP 125
-#define GSN_CLOSE_COMCONF 126 /* local */
-#define GSN_CLOSE_COMREQ 127 /* local */
-#define GSN_CM_ACKADD 128 /* distr. */
-/* 129 unused */
-#define GSN_CM_ADD 130 /* distr. */
-/* 131 unused */
-/* 132 not unused */
-/* 133 not unused */
-#define GSN_CM_HEARTBEAT 134 /* distr. */
-/* 135 unused */
-/* 136 unused */
-/* 137 unused */
-#define GSN_CM_NODEINFOCONF 138 /* distr. */
-#define GSN_CM_NODEINFOREF 139 /* distr. */
-#define GSN_CM_NODEINFOREQ 140 /* distr. */
-#define GSN_CM_REGCONF 141 /* distr. */
-#define GSN_CM_REGREF 142 /* distr. */
-#define GSN_CM_REGREQ 143 /* distr. */
-/* 144 unused */
-/* 145 unused */
-/* 146 unused */
-#define GSN_CM_ADD_REP 147 /* local */
-/* 148 unused */
-/* 149 unused */
-/* 150 unused */
-#define GSN_CNTR_WAITREP 151 /* distr. */
-#define GSN_COMMIT 152
-#define GSN_COMMIT_FAILCONF 153
-#define GSN_COMMIT_FAILREQ 154
-#define GSN_COMMITCONF 155
-#define GSN_COMMITREQ 156
-#define GSN_COMMITTED 157
-#define GSN_COMPLETE 159
-#define GSN_COMPLETECONF 160
-#define GSN_COMPLETED 161
-#define GSN_COMPLETEREQ 162
-#define GSN_CONNECT_REP 163
-#define GSN_CONTINUEB 164
-/* 165 not unused */
-#define GSN_COPY_ACTIVECONF 166
-#define GSN_COPY_ACTIVEREF 167
-#define GSN_COPY_ACTIVEREQ 168
-#define GSN_COPY_FRAGCONF 169
-#define GSN_COPY_FRAGREF 170
-#define GSN_COPY_FRAGREQ 171
-#define GSN_COPY_GCICONF 172
-#define GSN_COPY_GCIREQ 173
-#define GSN_COPY_STATECONF 174
-#define GSN_COPY_STATEREQ 175
-#define GSN_COPY_TABCONF 176
-#define GSN_COPY_TABREQ 177
-#define GSN_CREATE_FRAGCONF 178
-#define GSN_CREATE_FRAGREF 179
-#define GSN_CREATE_FRAGREQ 180
-#define GSN_DEBUG_SIG 181
-#define GSN_DI_FCOUNTCONF 182
-#define GSN_DI_FCOUNTREF 183
-#define GSN_DI_FCOUNTREQ 184
-#define GSN_DIADDTABCONF 185
-#define GSN_DIADDTABREF 186
-#define GSN_DIADDTABREQ 187
-/* 188 not unused */
-/* 189 not unused */
-/* 190 not unused */
-#define GSN_DICTSTARTCONF 191
-#define GSN_DICTSTARTREQ 192
-
-#define GSN_LIST_TABLES_REQ 193
-#define GSN_LIST_TABLES_CONF 194
-
-#define GSN_ABORT 195
-#define GSN_ABORTCONF 196
-#define GSN_ABORTED 197
-#define GSN_ABORTREQ 198
-
-/******************************************
- * DROP TABLE
- *
- */
-
-/**
- * This is drop table's public interface
- */
-#define GSN_DROP_TABLE_REQ 82
-#define GSN_DROP_TABLE_REF 102
-#define GSN_DROP_TABLE_CONF 112
-
-/**
- * This is used for implementing drop table
- */
-#define GSN_PREP_DROP_TAB_REQ 199
-#define GSN_PREP_DROP_TAB_REF 200
-#define GSN_PREP_DROP_TAB_CONF 201
-
-#define GSN_DROP_TAB_REQ 202
-#define GSN_DROP_TAB_REF 203
-#define GSN_DROP_TAB_CONF 204
-
-#define GSN_WAIT_DROP_TAB_REQ 208
-#define GSN_WAIT_DROP_TAB_REF 209
-#define GSN_WAIT_DROP_TAB_CONF 216
-
-/*****************************************/
-
-#define GSN_UPDATE_TOCONF 205
-#define GSN_UPDATE_TOREF 206
-#define GSN_UPDATE_TOREQ 207
-
-#define GSN_DIGETNODESCONF 210
-#define GSN_DIGETNODESREF 211
-#define GSN_DIGETNODESREQ 212
-#define GSN_DIGETPRIMCONF 213
-#define GSN_DIGETPRIMREF 214
-#define GSN_DIGETPRIMREQ 215
-
-#define GSN_DIH_RESTARTCONF 217
-#define GSN_DIH_RESTARTREF 218
-#define GSN_DIH_RESTARTREQ 219
-
-/* 220 not unused */
-/* 221 not unused */
-/* 222 not unused */
-
-#define GSN_EMPTY_LCP_REQ 223
-#define GSN_EMPTY_LCP_CONF 224
-
-#define GSN_SCHEMA_INFO 225
-#define GSN_SCHEMA_INFOCONF 226
-
-#define GSN_MASTER_GCPCONF 227
-#define GSN_MASTER_GCPREF 228
-#define GSN_MASTER_GCPREQ 229
-
-/* 230 not unused */
-/* 231 not unused */
-
-#define GSN_DIRELEASECONF 232
-#define GSN_DIRELEASEREF 233
-#define GSN_DIRELEASEREQ 234
-#define GSN_DISCONNECT_REP 235
-#define GSN_DISEIZECONF 236
-#define GSN_DISEIZEREF 237
-#define GSN_DISEIZEREQ 238
-#define GSN_DIVERIFYCONF 239
-#define GSN_DIVERIFYREF 240
-#define GSN_DIVERIFYREQ 241
-#define GSN_ENABLE_COMORD 242
-#define GSN_END_LCPCONF 243
-#define GSN_END_LCPREQ 244
-#define GSN_END_TOCONF 245
-#define GSN_END_TOREQ 246
-#define GSN_EVENT_REP 247
-#define GSN_EXEC_FRAGCONF 248
-#define GSN_EXEC_FRAGREF 249
-#define GSN_EXEC_FRAGREQ 250
-#define GSN_EXEC_SRCONF 251
-#define GSN_EXEC_SRREQ 252
-#define GSN_EXPANDCHECK2 253
-#define GSN_FAIL_REP 254
-#define GSN_FSCLOSECONF 255
-#define GSN_FSCLOSEREF 256
-#define GSN_FSCLOSEREQ 257
-#define GSN_FSAPPENDCONF 258
-#define GSN_FSOPENCONF 259
-#define GSN_FSOPENREF 260
-#define GSN_FSOPENREQ 261
-#define GSN_FSREADCONF 262
-#define GSN_FSREADREF 263
-#define GSN_FSREADREQ 264
-#define GSN_FSSYNCCONF 265
-#define GSN_FSSYNCREF 266
-#define GSN_FSSYNCREQ 267
-#define GSN_FSAPPENDREQ 268
-#define GSN_FSAPPENDREF 269
-#define GSN_FSWRITECONF 270
-#define GSN_FSWRITEREF 271
-#define GSN_FSWRITEREQ 272
-#define GSN_GCP_ABORT 273
-#define GSN_GCP_ABORTED 274
-#define GSN_GCP_COMMIT 275
-#define GSN_GCP_NODEFINISH 276
-#define GSN_GCP_NOMORETRANS 277
-#define GSN_GCP_PREPARE 278
-#define GSN_GCP_PREPARECONF 279
-#define GSN_GCP_PREPAREREF 280
-#define GSN_GCP_SAVECONF 281
-#define GSN_GCP_SAVEREF 282
-#define GSN_GCP_SAVEREQ 283
-#define GSN_GCP_TCFINISHED 284
-#define GSN_SR_FRAGIDCONF 285
-#define GSN_SR_FRAGIDREF 286
-#define GSN_SR_FRAGIDREQ 287
-#define GSN_GETGCICONF 288
-#define GSN_GETGCIREQ 289
-#define GSN_HOT_SPAREREP 290
-#define GSN_INCL_NODECONF 291
-#define GSN_INCL_NODEREF 292
-#define GSN_INCL_NODEREQ 293
-#define GSN_LCP_FRAGIDCONF 294
-#define GSN_LCP_FRAGIDREF 295
-#define GSN_LCP_FRAGIDREQ 296
-#define GSN_LCP_HOLDOPCONF 297
-#define GSN_LCP_HOLDOPREF 298
-#define GSN_LCP_HOLDOPREQ 299
-#define GSN_SHRINKCHECK2 301
-#define GSN_GET_SCHEMA_INFOREQ 302
-/* 303 not unused */
-/* 304 not unused */
-#define GSN_LQH_RESTART_OP 305
-#define GSN_LQH_TRANSCONF 306
-#define GSN_LQH_TRANSREQ 307
-#define GSN_LQHADDATTCONF 308
-#define GSN_LQHADDATTREF 309
-#define GSN_LQHADDATTREQ 310
-#define GSN_LQHFRAGCONF 311
-#define GSN_LQHFRAGREF 312
-#define GSN_LQHFRAGREQ 313
-#define GSN_LQHKEYCONF 314
-#define GSN_LQHKEYREF 315
-#define GSN_LQHKEYREQ 316
-
-#define GSN_MASTER_LCPCONF 318
-#define GSN_MASTER_LCPREF 319
-#define GSN_MASTER_LCPREQ 320
-
-#define GSN_MEMCHECKCONF 321
-#define GSN_MEMCHECKREQ 322
-#define GSN_NDB_FAILCONF 323
-#define GSN_NDB_STARTCONF 324
-#define GSN_NDB_STARTREF 325
-#define GSN_NDB_STARTREQ 326
-#define GSN_NDB_STTOR 327
-#define GSN_NDB_STTORRY 328
-#define GSN_NDB_TAMPER 329
-#define GSN_NEXT_SCANCONF 330
-#define GSN_NEXT_SCANREF 331
-#define GSN_NEXT_SCANREQ 332
-#define GSN_NEXTOPERATION 333
-
-#define GSN_READ_CONFIG_REQ 334 /* new name for sizealt, local */
-#define GSN_READ_CONFIG_CONF 335 /* new name for sizealt, local */
-
-/* 336 unused */
-/* 337 unused */
-/* 338 unused */
-#define GSN_OPEN_COMCONF 339
-#define GSN_OPEN_COMREF 340
-#define GSN_OPEN_COMREQ 341
-#define GSN_PACKED_SIGNAL 342
-#define GSN_PREP_FAILCONF 343
-#define GSN_PREP_FAILREF 344
-#define GSN_PREP_FAILREQ 345
-#define GSN_PRES_TOCONF 346
-#define GSN_PRES_TOREQ 347
-#define GSN_READ_NODESCONF 348
-#define GSN_READ_NODESREF 349
-#define GSN_READ_NODESREQ 350
-#define GSN_SCAN_FRAGCONF 351
-#define GSN_SCAN_FRAGREF 352
-#define GSN_SCAN_FRAGREQ 353
-#define GSN_SCAN_HBREP 354
-#define GSN_SCAN_PROCCONF 355
-#define GSN_SCAN_PROCREQ 356
-#define GSN_SEND_PACKED 357
-#define GSN_SET_LOGLEVELORD 358
-
-#define GSN_LQH_ALLOCREQ 359
-#define GSN_TUP_ALLOCREQ 360
-#define GSN_TUP_DEALLOCREQ 361
-
-/* 362 not unused */
-
-#define GSN_TUP_WRITELOG_REQ 363
-#define GSN_LQH_WRITELOG_REQ 364
-
-#define GSN_LCP_FRAG_REP 300
-#define GSN_LCP_FRAG_ORD 365
-#define GSN_LCP_COMPLETE_REP 158
-
-#define GSN_START_LCP_REQ 317
-#define GSN_START_LCP_CONF 366
-
-#define GSN_UNBLO_DICTCONF 367
-#define GSN_UNBLO_DICTREQ 368
-#define GSN_START_COPYCONF 369
-#define GSN_START_COPYREF 370
-#define GSN_START_COPYREQ 371
-#define GSN_START_EXEC_SR 372
-#define GSN_START_FRAGCONF 373
-#define GSN_START_FRAGREF 374
-#define GSN_START_FRAGREQ 375
-#define GSN_START_LCP_REF 376
-#define GSN_START_LCP_ROUND 377
-#define GSN_START_MECONF 378
-#define GSN_START_MEREF 379
-#define GSN_START_MEREQ 380
-#define GSN_START_PERMCONF 381
-#define GSN_START_PERMREF 382
-#define GSN_START_PERMREQ 383
-#define GSN_START_RECCONF 384
-#define GSN_START_RECREF 385
-#define GSN_START_RECREQ 386
-#define GSN_START_TOCONF 387
-#define GSN_START_TOREQ 388
-#define GSN_STORED_PROCCONF 389
-#define GSN_STORED_PROCREF 390
-#define GSN_STORED_PROCREQ 391
-#define GSN_STTOR 392
-#define GSN_STTORRY 393
-#define GSN_BACKUP_TRIG_REQ 394
-#define GSN_SYSTEM_ERROR 395
-#define GSN_TAB_COMMITCONF 396
-#define GSN_TAB_COMMITREF 397
-#define GSN_TAB_COMMITREQ 398
-#define GSN_TAKE_OVERTCCONF 399
-#define GSN_TAKE_OVERTCREQ 400
-#define GSN_TC_CLOPSIZECONF 401
-#define GSN_TC_CLOPSIZEREQ 402
-#define GSN_TC_SCHVERCONF 403
-#define GSN_TC_SCHVERREQ 404
-#define GSN_TCGETOPSIZECONF 405
-#define GSN_TCGETOPSIZEREQ 406
-#define GSN_TEST_ORD 407
-#define GSN_TESTSIG 408
-#define GSN_TIME_SIGNAL 409
-/* 410 unused */
-/* 411 unused */
-/* 412 unused */
-#define GSN_TUP_ABORTREQ 414
-#define GSN_TUP_ADD_ATTCONF 415
-#define GSN_TUP_ADD_ATTRREF 416
-#define GSN_TUP_ADD_ATTRREQ 417
-#define GSN_TUP_ATTRINFO 418
-#define GSN_TUP_COMMITREQ 419
-/* 420 unused */
-#define GSN_TUP_LCPCONF 421
-#define GSN_TUP_LCPREF 422
-#define GSN_TUP_LCPREQ 423
-#define GSN_TUP_LCPSTARTED 424
-#define GSN_TUP_PREPLCPCONF 425
-#define GSN_TUP_PREPLCPREF 426
-#define GSN_TUP_PREPLCPREQ 427
-#define GSN_TUP_SRCONF 428
-#define GSN_TUP_SRREF 429
-#define GSN_TUP_SRREQ 430
-#define GSN_TUPFRAGCONF 431
-#define GSN_TUPFRAGREF 432
-#define GSN_TUPFRAGREQ 433
-#define GSN_TUPKEYCONF 434
-#define GSN_TUPKEYREF 435
-#define GSN_TUPKEYREQ 436
-#define GSN_TUPRELEASECONF 437
-#define GSN_TUPRELEASEREF 438
-#define GSN_TUPRELEASEREQ 439
-#define GSN_TUPSEIZECONF 440
-#define GSN_TUPSEIZEREF 441
-#define GSN_TUPSEIZEREQ 442
-
-#define GSN_ABORT_ALL_REQ 445
-#define GSN_ABORT_ALL_REF 446
-#define GSN_ABORT_ALL_CONF 447
-
-#define GSN_STATISTICS_REQ 448
-#define GSN_STOP_ORD 449
-#define GSN_TAMPER_ORD 450
-#define GSN_SET_VAR_REQ 451
-#define GSN_SET_VAR_CONF 452
-#define GSN_SET_VAR_REF 453
-#define GSN_STATISTICS_CONF 454
-
-#define GSN_START_ORD 455
-/* 456 unused */
-/* 457 unused */
-
-#define GSN_EVENT_SUBSCRIBE_REQ 458
-#define GSN_EVENT_SUBSCRIBE_CONF 459
-#define GSN_EVENT_SUBSCRIBE_REF 460
-#define GSN_ACC_COM_BLOCK 461
-#define GSN_ACC_COM_UNBLOCK 462
-#define GSN_TUP_COM_BLOCK 463
-#define GSN_TUP_COM_UNBLOCK 464
-
-#define GSN_DUMP_STATE_ORD 465
-
-#define GSN_START_INFOREQ 466
-#define GSN_START_INFOREF 467
-#define GSN_START_INFOCONF 468
-
-#define GSN_TC_COMMIT_ACK 469
-#define GSN_REMOVE_MARKER_ORD 470
-
-#define GSN_CHECKNODEGROUPSREQ 471
-#define GSN_CHECKNODEGROUPSCONF 472
-
-/* 473 unused */
-#define GSN_ARBIT_PREPREQ 474
-#define GSN_ARBIT_PREPCONF 475
-#define GSN_ARBIT_PREPREF 476
-#define GSN_ARBIT_STARTREQ 477
-#define GSN_ARBIT_STARTCONF 478
-#define GSN_ARBIT_STARTREF 479
-#define GSN_ARBIT_CHOOSEREQ 480
-#define GSN_ARBIT_CHOOSECONF 481
-#define GSN_ARBIT_CHOOSEREF 482
-#define GSN_ARBIT_STOPORD 483
-#define GSN_ARBIT_STOPREP 484
-
-#define GSN_BLOCK_COMMIT_ORD 485
-#define GSN_UNBLOCK_COMMIT_ORD 486
-
-#define GSN_NODE_STATE_REP 487
-#define GSN_CHANGE_NODE_STATE_REQ 488
-#define GSN_CHANGE_NODE_STATE_CONF 489
-
-#define GSN_DIH_SWITCH_REPLICA_REQ 490
-#define GSN_DIH_SWITCH_REPLICA_CONF 491
-#define GSN_DIH_SWITCH_REPLICA_REF 492
-
-#define GSN_STOP_PERM_REQ 493
-#define GSN_STOP_PERM_REF 494
-#define GSN_STOP_PERM_CONF 495
-
-#define GSN_STOP_ME_REQ 496
-#define GSN_STOP_ME_REF 497
-#define GSN_STOP_ME_CONF 498
-
-#define GSN_WAIT_GCP_REQ 499
-#define GSN_WAIT_GCP_REF 500
-#define GSN_WAIT_GCP_CONF 501
-
-/* 502 not used */
-
-/**
- * Trigger and index signals
- */
-
-/**
- * These are used by API and kernel
- */
-#define GSN_TRIG_ATTRINFO 503
-#define GSN_CREATE_TRIG_REQ 504
-#define GSN_CREATE_TRIG_CONF 505
-#define GSN_CREATE_TRIG_REF 506
-#define GSN_ALTER_TRIG_REQ 507
-#define GSN_ALTER_TRIG_CONF 508
-#define GSN_ALTER_TRIG_REF 509
-#define GSN_CREATE_INDX_REQ 510
-#define GSN_CREATE_INDX_CONF 511
-#define GSN_CREATE_INDX_REF 512
-#define GSN_DROP_TRIG_REQ 513
-#define GSN_DROP_TRIG_CONF 514
-#define GSN_DROP_TRIG_REF 515
-#define GSN_DROP_INDX_REQ 516
-#define GSN_DROP_INDX_CONF 517
-#define GSN_DROP_INDX_REF 518
-#define GSN_TCINDXREQ 519
-#define GSN_TCINDXCONF 520
-#define GSN_TCINDXREF 521
-#define GSN_INDXKEYINFO 522
-#define GSN_INDXATTRINFO 523
-#define GSN_TCINDXNEXTREQ 524
-#define GSN_TCINDXNEXTCONF 525
-#define GSN_TCINDXNEXREF 526
-#define GSN_FIRE_TRIG_ORD 527
-
-/**
- * These are used only by kernel
- */
-#define GSN_BUILDINDXREQ 528
-#define GSN_BUILDINDXCONF 529
-#define GSN_BUILDINDXREF 530
-
-/**
- * Backup interface
- */
-#define GSN_BACKUP_REQ 531
-#define GSN_BACKUP_DATA 532
-#define GSN_BACKUP_REF 533
-#define GSN_BACKUP_CONF 534
-
-#define GSN_ABORT_BACKUP_ORD 535
-
-#define GSN_BACKUP_ABORT_REP 536
-#define GSN_BACKUP_COMPLETE_REP 537
-#define GSN_BACKUP_NF_COMPLETE_REP 538
-
-/**
- * Internal backup signals
- */
-#define GSN_DEFINE_BACKUP_REQ 539
-#define GSN_DEFINE_BACKUP_REF 540
-#define GSN_DEFINE_BACKUP_CONF 541
-
-#define GSN_START_BACKUP_REQ 542
-#define GSN_START_BACKUP_REF 543
-#define GSN_START_BACKUP_CONF 544
-
-#define GSN_BACKUP_FRAGMENT_REQ 545
-#define GSN_BACKUP_FRAGMENT_REF 546
-#define GSN_BACKUP_FRAGMENT_CONF 547
-
-#define GSN_STOP_BACKUP_REQ 548
-#define GSN_STOP_BACKUP_REF 549
-#define GSN_STOP_BACKUP_CONF 550
-
-/**
- * Used for master take-over / API status request
- */
-#define GSN_BACKUP_STATUS_REQ 551
-#define GSN_BACKUP_STATUS_REF 116
-#define GSN_BACKUP_STATUS_CONF 165
-
-/**
- * Db sequence signals
- */
-#define GSN_UTIL_SEQUENCE_REQ 552
-#define GSN_UTIL_SEQUENCE_REF 553
-#define GSN_UTIL_SEQUENCE_CONF 554
-
-#define GSN_FSREMOVEREQ 555
-#define GSN_FSREMOVEREF 556
-#define GSN_FSREMOVECONF 557
-
-#define GSN_UTIL_PREPARE_REQ 558
-#define GSN_UTIL_PREPARE_CONF 559
-#define GSN_UTIL_PREPARE_REF 560
-
-#define GSN_UTIL_EXECUTE_REQ 561
-#define GSN_UTIL_EXECUTE_CONF 562
-#define GSN_UTIL_EXECUTE_REF 563
-
-#define GSN_UTIL_RELEASE_REQ 564
-#define GSN_UTIL_RELEASE_CONF 565
-#define GSN_UTIL_RELEASE_REF 566
-
-/**
- * When dropping a long signal due to lack of memory resources
- */
-#define GSN_SIGNAL_DROPPED_REP 567
-#define GSN_CONTINUE_FRAGMENTED 568
-
-/**
- * Suma participant interface
- */
-#define GSN_SUB_REMOVE_REQ 569
-#define GSN_SUB_REMOVE_REF 570
-#define GSN_SUB_REMOVE_CONF 571
-#define GSN_SUB_STOP_REQ 572
-#define GSN_SUB_STOP_REF 573
-#define GSN_SUB_STOP_CONF 574
-/* 575 unused */
-#define GSN_SUB_CREATE_REQ 576
-#define GSN_SUB_CREATE_REF 577
-#define GSN_SUB_CREATE_CONF 578
-#define GSN_SUB_START_REQ 579
-#define GSN_SUB_START_REF 580
-#define GSN_SUB_START_CONF 581
-#define GSN_SUB_SYNC_REQ 582
-#define GSN_SUB_SYNC_REF 583
-#define GSN_SUB_SYNC_CONF 584
-#define GSN_SUB_META_DATA 585
-#define GSN_SUB_TABLE_DATA 586
-
-#define GSN_CREATE_TABLE_REQ 587
-#define GSN_CREATE_TABLE_REF 588
-#define GSN_CREATE_TABLE_CONF 589
-
-#define GSN_ALTER_TABLE_REQ 624
-#define GSN_ALTER_TABLE_REF 625
-#define GSN_ALTER_TABLE_CONF 626
-
-#define GSN_SUB_SYNC_CONTINUE_REQ 590
-#define GSN_SUB_SYNC_CONTINUE_REF 591
-#define GSN_SUB_SYNC_CONTINUE_CONF 592
-#define GSN_SUB_GCP_COMPLETE_REP 593
-
-#define GSN_CREATE_FRAGMENTATION_REQ 594
-#define GSN_CREATE_FRAGMENTATION_REF 595
-#define GSN_CREATE_FRAGMENTATION_CONF 596
-
-#define GSN_CREATE_TAB_REQ 597
-#define GSN_CREATE_TAB_REF 598
-#define GSN_CREATE_TAB_CONF 599
-
-#define GSN_ALTER_TAB_REQ 600
-#define GSN_ALTER_TAB_REF 601
-#define GSN_ALTER_TAB_CONF 602
-
-#define GSN_ALTER_INDX_REQ 603
-#define GSN_ALTER_INDX_REF 604
-#define GSN_ALTER_INDX_CONF 605
-
-/**
- * Grep signals
- */
-#define GSN_GREP_SUB_CREATE_REQ 606
-#define GSN_GREP_SUB_CREATE_REF 607
-#define GSN_GREP_SUB_CREATE_CONF 608
-#define GSN_GREP_CREATE_REQ 609
-#define GSN_GREP_CREATE_REF 610
-#define GSN_GREP_CREATE_CONF 611
-
-#define GSN_GREP_SUB_START_REQ 612
-#define GSN_GREP_SUB_START_REF 613
-#define GSN_GREP_SUB_START_CONF 614
-#define GSN_GREP_START_REQ 615
-#define GSN_GREP_START_REF 616
-#define GSN_GREP_START_CONF 617
-
-#define GSN_GREP_SUB_SYNC_REQ 618
-#define GSN_GREP_SUB_SYNC_REF 619
-#define GSN_GREP_SUB_SYNC_CONF 620
-#define GSN_GREP_SYNC_REQ 621
-#define GSN_GREP_SYNC_REF 622
-#define GSN_GREP_SYNC_CONF 623
-
-/**
- * REP signals
- */
-#define GSN_REP_WAITGCP_REQ 627
-#define GSN_REP_WAITGCP_REF 628
-#define GSN_REP_WAITGCP_CONF 629
-#define GSN_GREP_WAITGCP_REQ 630
-#define GSN_GREP_WAITGCP_REF 631
-#define GSN_GREP_WAITGCP_CONF 632
-#define GSN_REP_GET_GCI_REQ 633
-#define GSN_REP_GET_GCI_REF 634
-#define GSN_REP_GET_GCI_CONF 635
-#define GSN_REP_GET_GCIBUFFER_REQ 636
-#define GSN_REP_GET_GCIBUFFER_REF 637
-#define GSN_REP_GET_GCIBUFFER_CONF 638
-#define GSN_REP_INSERT_GCIBUFFER_REQ 639
-#define GSN_REP_INSERT_GCIBUFFER_REF 640
-#define GSN_REP_INSERT_GCIBUFFER_CONF 641
-#define GSN_REP_CLEAR_PS_GCIBUFFER_REQ 642
-#define GSN_REP_CLEAR_PS_GCIBUFFER_REF 643
-#define GSN_REP_CLEAR_PS_GCIBUFFER_CONF 644
-#define GSN_REP_CLEAR_SS_GCIBUFFER_REQ 645
-#define GSN_REP_CLEAR_SS_GCIBUFFER_REF 646
-#define GSN_REP_CLEAR_SS_GCIBUFFER_CONF 647
-#define GSN_REP_DATA_PAGE 648
-#define GSN_REP_GCIBUFFER_ACC_REP 649
-
-#define GSN_GREP_SUB_REMOVE_REQ 650
-#define GSN_GREP_SUB_REMOVE_REF 651
-#define GSN_GREP_SUB_REMOVE_CONF 652
-#define GSN_GREP_REMOVE_REQ 653
-#define GSN_GREP_REMOVE_REF 654
-#define GSN_GREP_REMOVE_CONF 655
-
-/* Start Global Replication */
-#define GSN_GREP_REQ 656
-
-/**
- * Management server
- */
-#define GSN_MGM_LOCK_CONFIG_REQ 657
-#define GSN_MGM_LOCK_CONFIG_REP 658
-#define GSN_MGM_UNLOCK_CONFIG_REQ 659
-#define GSN_MGM_UNLOCK_CONFIG_REP 660
-
-#define GSN_UTIL_CREATE_LOCK_REQ 132
-#define GSN_UTIL_CREATE_LOCK_REF 133
-#define GSN_UTIL_CREATE_LOCK_CONF 188
-
-#define GSN_UTIL_DESTROY_LOCK_REQ 189
-#define GSN_UTIL_DESTROY_LOCK_REF 220
-#define GSN_UTIL_DESTROY_LOCK_CONF 221
-
-#define GSN_UTIL_LOCK_REQ 222
-#define GSN_UTIL_LOCK_REF 230
-#define GSN_UTIL_LOCK_CONF 231
-
-#define GSN_UTIL_UNLOCK_REQ 303
-#define GSN_UTIL_UNLOCK_REF 304
-#define GSN_UTIL_UNLOCK_CONF 362
-
-/* SUMA */
-#define GSN_CREATE_SUBID_REQ 661
-#define GSN_CREATE_SUBID_REF 662
-#define GSN_CREATE_SUBID_CONF 663
-
-/* GREP */
-#define GSN_GREP_CREATE_SUBID_REQ 664
-#define GSN_GREP_CREATE_SUBID_REF 665
-#define GSN_GREP_CREATE_SUBID_CONF 666
-#define GSN_REP_DROP_TABLE_REQ 667
-#define GSN_REP_DROP_TABLE_REF 668
-#define GSN_REP_DROP_TABLE_CONF 669
-
-/*
- * TUX
- */
-#define GSN_TUXFRAGREQ 670
-#define GSN_TUXFRAGCONF 671
-#define GSN_TUXFRAGREF 672
-#define GSN_TUX_ADD_ATTRREQ 673
-#define GSN_TUX_ADD_ATTRCONF 674
-#define GSN_TUX_ADD_ATTRREF 675
-
-/*
- * REP
- */
-#define GSN_REP_DISCONNECT_REP 676
-
-#define GSN_TUX_MAINT_REQ 677
-#define GSN_TUX_MAINT_CONF 678
-#define GSN_TUX_MAINT_REF 679
-
-/* not used 680 */
-/* not used 681 */
-
-/**
- * from mgmtsrvr to NDBCNTR
- */
-#define GSN_RESUME_REQ 682
-#define GSN_STOP_REQ 443
-#define GSN_STOP_REF 444
-#define GSN_API_VERSION_REQ 697
-#define GSN_API_VERSION_CONF 698
-
-/* not used 686 */
-/* not used 687 */
-/* not used 689 */
-/* not used 690 */
-
-/**
- * SUMA restart protocol
- */
-#define GSN_SUMA_START_ME 691
-#define GSN_SUMA_HANDOVER_REQ 692
-#define GSN_SUMA_HANDOVER_CONF 693
-
-/* not used 694 */
-/* not used 695 */
-/* not used 696 */
-
-/**
- * GREP restart protocol
- */
-#define GSN_GREP_START_ME 706
-#define GSN_GREP_ADD_SUB_REQ 707
-#define GSN_GREP_ADD_SUB_REF 708
-#define GSN_GREP_ADD_SUB_CONF 709
-
-
-/*
- * EVENT Signals
- */
-#define GSN_SUB_GCP_COMPLETE_ACC 699
-
-#define GSN_CREATE_EVNT_REQ 700
-#define GSN_CREATE_EVNT_CONF 701
-#define GSN_CREATE_EVNT_REF 702
-
-#define GSN_DROP_EVNT_REQ 703
-#define GSN_DROP_EVNT_CONF 704
-#define GSN_DROP_EVNT_REF 705
-
-#define GSN_TUX_BOUND_INFO 710
-
-#define GSN_ACC_LOCKREQ 711
-#define GSN_READ_PSUEDO_REQ 712
-
-#endif
diff --git a/ndb/include/kernel/ndb_limits.h b/ndb/include/kernel/ndb_limits.h
deleted file mode 100644
index e60153e60ec..00000000000
--- a/ndb/include/kernel/ndb_limits.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef NDB_LIMITS_H
-#define NDB_LIMITS_H
-
-#include <mysql.h>
-
-#define RNIL 0xffffff00
-
-/**
- * Note that actual value = MAX_NODES - 1,
- * since NodeId = 0 can not be used
- */
-#define MAX_NDB_NODES 49
-#define MAX_NODES 64
-
-/**
- * MAX_API_NODES = MAX_NODES - No of NDB Nodes in use
- */
-
-/**
- * The maximum number of replicas in the system
- */
-#define MAX_REPLICAS 4
-
-/**
- * The maximum number of local checkpoints stored at a time
- */
-#define MAX_LCP_STORED 3
-
-/**
- * The maximum number of log execution rounds at system restart
- */
-#define MAX_LOG_EXEC 4
-
-/**
- * The maximum number of tuples per page
- **/
-#define MAX_TUPLES_PER_PAGE 8191
-#define MAX_TUPLES_BITS 13 /* 13 bits = 8191 tuples per page */
-#define MAX_TABLES 20320 /* SchemaFile.hpp */
-#define MAX_TAB_NAME_SIZE 128
-#define MAX_ATTR_NAME_SIZE NAME_LEN /* From mysql_com.h */
-#define MAX_ATTR_DEFAULT_VALUE_SIZE 128
-#define MAX_ATTRIBUTES_IN_TABLE 128
-#define MAX_ATTRIBUTES_IN_INDEX 32
-#define MAX_TUPLE_SIZE_IN_WORDS 2013
-#define MAX_KEY_SIZE_IN_WORDS 1023
-#define MAX_FRM_DATA_SIZE 6000
-#define MAX_NULL_BITS 4096
-#define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES))
-
-#define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1)
-/*
- * Max Number of Records to fetch per SCAN_NEXTREQ in a scan in LQH. The
- * API can order a multiple of this number of records at a time since
- * fragments can be scanned in parallel.
- */
-#define MAX_PARALLEL_OP_PER_SCAN 992
-/*
-* The default batch size. Configurable parameter.
-*/
-#define DEF_BATCH_SIZE 64
-/*
-* When calculating the number of records sent from LQH in each batch
-* one uses SCAN_BATCH_SIZE divided by the expected size of signals
-* per row. This gives the batch size used for the scan. The NDB API
-* will receive one batch from each node at a time so there has to be
-* some care taken also so that the NDB API is not overloaded with
-* signals.
-* This parameter is configurable, this is the default value.
-*/
-#define SCAN_BATCH_SIZE 32768
-/*
-* To protect the NDB API from overload we also define a maximum total
-* batch size from all nodes. This parameter should most likely be
-* configurable, or dependent on sendBufferSize.
-* This parameter is configurable, this is the default value.
-*/
-#define MAX_SCAN_BATCH_SIZE 262144
-/*
- * Maximum number of Parallel Scan queries on one hash index fragment
- */
-#define MAX_PARALLEL_SCANS_PER_FRAG 12
-/*
- * Maximum parallel ordered index scans per primary table fragment.
- * Implementation limit is (256 minus 12).
- */
-#define MAX_PARALLEL_INDEX_SCANS_PER_FRAG 32
-
-/**
- * Computed defines
- */
-#define MAXNROFATTRIBUTESINWORDS (MAX_ATTRIBUTES_IN_TABLE / 32)
-
-/*
- * Ordered index constants. Make configurable per index later.
- */
-#define MAX_TTREE_NODE_SIZE 64 /* total words in node */
-#define MAX_TTREE_PREF_SIZE 4 /* words in min prefix */
-#define MAX_TTREE_NODE_SLACK 2 /* diff between max and min occupancy */
-
-/*
- * Blobs.
- */
-#define NDB_BLOB_HEAD_SIZE 2 /* sizeof(NdbBlob::Head) >> 2 */
-
-/*
- * Character sets.
- */
-#define MAX_XFRM_MULTIPLY 8 /* max expansion when normalizing */
-
-/*
- * Long signals
- */
-#define NDB_SECTION_SEGMENT_SZ 60
-
-#endif
diff --git a/ndb/include/kernel/signaldata/CreateFragmentation.hpp b/ndb/include/kernel/signaldata/CreateFragmentation.hpp
deleted file mode 100644
index 7d53dd91154..00000000000
--- a/ndb/include/kernel/signaldata/CreateFragmentation.hpp
+++ /dev/null
@@ -1,101 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef CREATE_FRAGMENTATION_REQ_HPP
-#define CREATE_FRAGMENTATION_REQ_HPP
-
-#include "SignalData.hpp"
-
-class CreateFragmentationReq {
- /**
- * Sender(s)
- */
- friend class Dbdict;
-
- /**
- * Receiver(s)
- */
- friend class Dbdih;
-
- friend bool printCREATE_FRAGMENTATION_REQ(FILE *,
- const Uint32 *, Uint32, Uint16);
-public:
- STATIC_CONST( SignalLength = 6 );
-
-private:
- Uint32 senderRef;
- Uint32 senderData;
- Uint32 fragmentationType;
- Uint32 noOfFragments;
- Uint32 fragmentNode;
- Uint32 primaryTableId; // use same fragmentation as this table if not RNIL
-};
-
-class CreateFragmentationRef {
- /**
- * Sender(s)
- */
- friend class Dbdih;
-
- /**
- * Receiver(s)
- */
- friend class Dbdict;
-
- friend bool printCREATE_FRAGMENTATION_REF(FILE *,
- const Uint32 *, Uint32, Uint16);
-public:
- STATIC_CONST( SignalLength = 3 );
-
- enum ErrorCode {
- OK = 0
- ,InvalidFragmentationType = 1
- ,InvalidNodeId = 2
- ,InvalidNodeType = 3
- ,InvalidPrimaryTable = 4
- };
-
-private:
- Uint32 senderRef;
- Uint32 senderData;
- Uint32 errorCode;
-};
-
-class CreateFragmentationConf {
- /**
- * Sender(s)
- */
- friend class Dbdih;
-
- /**
- * Receiver(s)
- */
- friend class Dbdict;
-
- friend bool printCREATE_FRAGMENTATION_CONF(FILE *,
- const Uint32 *, Uint32, Uint16);
-public:
- STATIC_CONST( SignalLength = 4 );
- SECTION( FRAGMENTS = 0 );
-
-private:
- Uint32 senderRef;
- Uint32 senderData;
- Uint32 noOfReplicas;
- Uint32 noOfFragments;
-};
-
-#endif
diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp
deleted file mode 100644
index 09b00cf8993..00000000000
--- a/ndb/include/kernel/signaldata/DictTabInfo.hpp
+++ /dev/null
@@ -1,515 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef DICT_TAB_INFO_HPP
-#define DICT_TAB_INFO_HPP
-
-#include "SignalData.hpp"
-#include <AttributeDescriptor.hpp>
-#include <SimpleProperties.hpp>
-#include <ndb_limits.h>
-#include <trigger_definitions.h>
-#include <NdbSqlUtil.hpp>
-
-#ifndef my_decimal_h
-
-// sql/my_decimal.h requires many more sql/*.h new to ndb
-// for now, copy the bit we need TODO proper fix
-
-#define DECIMAL_MAX_LENGTH ((8 * 9) - 8)
-
-#ifndef NOT_FIXED_DEC
-#define NOT_FIXED_DEC 31
-#endif
-
-C_MODE_START
-extern int decimal_bin_size(int, int);
-C_MODE_END
-
-inline int my_decimal_get_binary_size(uint precision, uint scale)
-{
- return decimal_bin_size((int)precision, (int)scale);
-}
-
-#endif
-
-#define DTIMAP(x, y, z) \
- { DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 }
-
-#define DTIMAP2(x, y, z, u, v) \
- { DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 }
-
-#define DTIMAPS(x, y, z, u, v) \
- { DictTabInfo::y, offsetof(x, z), SimpleProperties::StringValue, u, v, 0 }
-
-#define DTIMAPB(x, y, z, u, v, l) \
- { DictTabInfo::y, offsetof(x, z), SimpleProperties::BinaryValue, u, v, \
- offsetof(x, l) }
-
-#define DTIBREAK(x) \
- { DictTabInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 }
-
-class DictTabInfo {
- /**
- * Sender(s) / Reciver(s)
- */
- // Blocks
- friend class Backup;
- friend class Dbdict;
- friend class Ndbcntr;
- friend class Trix;
- friend class DbUtil;
- // API
- friend class NdbSchemaOp;
-
- /**
- * For printing
- */
- friend bool printDICTTABINFO(FILE * output,
- const Uint32 * theData,
- Uint32 len,
- Uint16 receiverBlockNo);
-
-public:
- enum RequestType {
- CreateTableFromAPI = 1,
- AddTableFromDict = 2, // Between DICT's
- CopyTable = 3, // Between DICT's
- ReadTableFromDiskSR = 4, // Local in DICT
- GetTabInfoConf = 5,
- AlterTableFromAPI = 6
- };
-
- enum KeyValues {
- TableName = 1, // String, Mandatory
- TableId = 2, //Mandatory between DICT's otherwise not allowed
- TableVersion = 3, //Mandatory between DICT's otherwise not allowed
- TableLoggedFlag = 4, //Default Logged
- NoOfKeyAttr = 5, //Default 1
- NoOfAttributes = 6, //Mandatory
- NoOfNullable = 7, //Deafult 0
- NoOfVariable = 8, //Default 0
- TableKValue = 9, //Default 6
- MinLoadFactor = 10, //Default 70
- MaxLoadFactor = 11, //Default 80
- KeyLength = 12, //Default 1 (No of words in primary key)
- FragmentTypeVal = 13, //Default AllNodesSmallTable
- TableTypeVal = 18, //Default TableType::UserTable
- PrimaryTable = 19, //Mandatory for index otherwise RNIL
- PrimaryTableId = 20, //ditto
- IndexState = 21,
- InsertTriggerId = 22,
- UpdateTriggerId = 23,
- DeleteTriggerId = 24,
- CustomTriggerId = 25,
- FrmLen = 26,
- FrmData = 27,
- FragmentCount = 128, // No of fragments in table (!fragment replicas)
- FragmentDataLen = 129,
- FragmentData = 130, // CREATE_FRAGMENTATION reply
- TableEnd = 999,
-
- AttributeName = 1000, // String, Mandatory
- AttributeId = 1001, //Mandatory between DICT's otherwise not allowed
- AttributeType = 1002, //for osu 4.1->5.0.x
- AttributeSize = 1003, //Default DictTabInfo::a32Bit
- AttributeArraySize = 1005, //Default 1
- AttributeKeyFlag = 1006, //Default noKey
- AttributeStorage = 1007, //Default MainMemory
- AttributeNullableFlag = 1008, //Default NotNullable
- AttributeDKey = 1010, //Default NotDKey
- AttributeExtType = 1013, //Default ExtUnsigned
- AttributeExtPrecision = 1014, //Default 0
- AttributeExtScale = 1015, //Default 0
- AttributeExtLength = 1016, //Default 0
- AttributeAutoIncrement = 1017, //Default false
- AttributeDefaultValue = 1018, //Default value (printable string)
- AttributeEnd = 1999 //
- };
- // ----------------------------------------------------------------------
- // Part of the protocol is that we only transfer parameters which do not
- // have a default value. Thus the default values are part of the protocol.
- // ----------------------------------------------------------------------
-
-
-
- // FragmentType constants
- enum FragmentType {
- AllNodesSmallTable = 0,
- AllNodesMediumTable = 1,
- AllNodesLargeTable = 2,
- SingleFragment = 3
- };
-
- // TableType constants + objects
- enum TableType {
- UndefTableType = 0,
- SystemTable = 1,
- UserTable = 2,
- UniqueHashIndex = 3,
- HashIndex = 4,
- UniqueOrderedIndex = 5,
- OrderedIndex = 6,
- // constant 10 hardcoded in Dbdict.cpp
- HashIndexTrigger = 10 + TriggerType::SECONDARY_INDEX,
- SubscriptionTrigger = 10 + TriggerType::SUBSCRIPTION,
- ReadOnlyConstraint = 10 + TriggerType::READ_ONLY_CONSTRAINT,
- IndexTrigger = 10 + TriggerType::ORDERED_INDEX
- };
- static inline bool
- isTable(int tableType) {
- return
- tableType == SystemTable ||
- tableType == UserTable;
- }
- static inline bool
- isIndex(int tableType) {
- return
- tableType == UniqueHashIndex ||
- tableType == HashIndex ||
- tableType == UniqueOrderedIndex ||
- tableType == OrderedIndex;
- }
- static inline bool
- isUniqueIndex(int tableType) {
- return
- tableType == UniqueHashIndex ||
- tableType == UniqueOrderedIndex;
- }
- static inline bool
- isNonUniqueIndex(int tableType) {
- return
- tableType == HashIndex ||
- tableType == OrderedIndex;
- }
- static inline bool
- isHashIndex(int tableType) {
- return
- tableType == UniqueHashIndex ||
- tableType == HashIndex;
- }
- static inline bool
- isOrderedIndex(int tableType) {
- return
- tableType == UniqueOrderedIndex ||
- tableType == OrderedIndex;
- }
-
- // Object state for translating from/to API
- enum ObjectState {
- StateUndefined = 0,
- StateOffline = 1,
- StateBuilding = 2,
- StateDropping = 3,
- StateOnline = 4,
- StateBroken = 9
- };
-
- // Object store for translating from/to API
- enum ObjectStore {
- StoreUndefined = 0,
- StoreTemporary = 1,
- StorePermanent = 2
- };
-
- // AttributeSize constants
- STATIC_CONST( aBit = 0 );
- STATIC_CONST( an8Bit = 3 );
- STATIC_CONST( a16Bit = 4 );
- STATIC_CONST( a32Bit = 5 );
- STATIC_CONST( a64Bit = 6 );
- STATIC_CONST( a128Bit = 7 );
-
- // Table data interpretation
- struct Table {
- char TableName[MAX_TAB_NAME_SIZE];
- Uint32 TableId;
- char PrimaryTable[MAX_TAB_NAME_SIZE]; // Only used when "index"
- Uint32 PrimaryTableId;
- Uint32 TableLoggedFlag;
- Uint32 NoOfKeyAttr;
- Uint32 NoOfAttributes;
- Uint32 NoOfNullable;
- Uint32 NoOfVariable;
- Uint32 TableKValue;
- Uint32 MinLoadFactor;
- Uint32 MaxLoadFactor;
- Uint32 KeyLength;
- Uint32 FragmentType;
- Uint32 TableStorage;
- Uint32 TableType;
- Uint32 TableVersion;
- Uint32 IndexState;
- Uint32 InsertTriggerId;
- Uint32 UpdateTriggerId;
- Uint32 DeleteTriggerId;
- Uint32 CustomTriggerId;
- Uint32 FrmLen;
- char FrmData[MAX_FRM_DATA_SIZE];
- Uint32 FragmentCount;
- Uint32 FragmentDataLen;
- Uint16 FragmentData[(MAX_FRAGMENT_DATA_BYTES+1)/2];
-
- void init();
- };
-
- static const
- SimpleProperties::SP2StructMapping TableMapping[];
-
- static const Uint32 TableMappingSize;
-
- // AttributeExtType values
- enum ExtType {
- ExtUndefined = NdbSqlUtil::Type::Undefined,
- ExtTinyint = NdbSqlUtil::Type::Tinyint,
- ExtTinyunsigned = NdbSqlUtil::Type::Tinyunsigned,
- ExtSmallint = NdbSqlUtil::Type::Smallint,
- ExtSmallunsigned = NdbSqlUtil::Type::Smallunsigned,
- ExtMediumint = NdbSqlUtil::Type::Mediumint,
- ExtMediumunsigned = NdbSqlUtil::Type::Mediumunsigned,
- ExtInt = NdbSqlUtil::Type::Int,
- ExtUnsigned = NdbSqlUtil::Type::Unsigned,
- ExtBigint = NdbSqlUtil::Type::Bigint,
- ExtBigunsigned = NdbSqlUtil::Type::Bigunsigned,
- ExtFloat = NdbSqlUtil::Type::Float,
- ExtDouble = NdbSqlUtil::Type::Double,
- ExtOlddecimal = NdbSqlUtil::Type::Olddecimal,
- ExtOlddecimalunsigned = NdbSqlUtil::Type::Olddecimalunsigned,
- ExtDecimal = NdbSqlUtil::Type::Decimal,
- ExtDecimalunsigned = NdbSqlUtil::Type::Decimalunsigned,
- ExtChar = NdbSqlUtil::Type::Char,
- ExtVarchar = NdbSqlUtil::Type::Varchar,
- ExtBinary = NdbSqlUtil::Type::Binary,
- ExtVarbinary = NdbSqlUtil::Type::Varbinary,
- ExtDatetime = NdbSqlUtil::Type::Datetime,
- ExtDate = NdbSqlUtil::Type::Date,
- ExtBlob = NdbSqlUtil::Type::Blob,
- ExtText = NdbSqlUtil::Type::Text,
- ExtBit = NdbSqlUtil::Type::Bit,
- ExtLongvarchar = NdbSqlUtil::Type::Longvarchar,
- ExtLongvarbinary = NdbSqlUtil::Type::Longvarbinary,
- ExtTime = NdbSqlUtil::Type::Time,
- ExtYear = NdbSqlUtil::Type::Year,
- ExtTimestamp = NdbSqlUtil::Type::Timestamp
- };
-
- // Attribute data interpretation
- struct Attribute {
- char AttributeName[MAX_TAB_NAME_SIZE];
- Uint32 AttributeId;
- Uint32 AttributeType; // for osu 4.1->5.0.x
- Uint32 AttributeSize;
- Uint32 AttributeArraySize;
- Uint32 AttributeKeyFlag;
- Uint32 AttributeNullableFlag;
- Uint32 AttributeDKey;
- Uint32 AttributeExtType;
- Uint32 AttributeExtPrecision;
- Uint32 AttributeExtScale;
- Uint32 AttributeExtLength;
- Uint32 AttributeAutoIncrement;
- char AttributeDefaultValue[MAX_ATTR_DEFAULT_VALUE_SIZE];
-
- void init();
-
- inline
- Uint32 sizeInWords()
- {
- return ((1 << AttributeSize) * AttributeArraySize + 31) >> 5;
- }
-
- // compute old-sty|e attribute size and array size
- inline bool
- translateExtType() {
- switch (AttributeExtType) {
- case DictTabInfo::ExtUndefined:
- return false;
- case DictTabInfo::ExtTinyint:
- case DictTabInfo::ExtTinyunsigned:
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = AttributeExtLength;
- break;
- case DictTabInfo::ExtSmallint:
- case DictTabInfo::ExtSmallunsigned:
- AttributeSize = DictTabInfo::a16Bit;
- AttributeArraySize = AttributeExtLength;
- break;
- case DictTabInfo::ExtMediumint:
- case DictTabInfo::ExtMediumunsigned:
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = 3 * AttributeExtLength;
- break;
- case DictTabInfo::ExtInt:
- case DictTabInfo::ExtUnsigned:
- AttributeSize = DictTabInfo::a32Bit;
- AttributeArraySize = AttributeExtLength;
- break;
- case DictTabInfo::ExtBigint:
- case DictTabInfo::ExtBigunsigned:
- AttributeSize = DictTabInfo::a64Bit;
- AttributeArraySize = AttributeExtLength;
- break;
- case DictTabInfo::ExtFloat:
- AttributeSize = DictTabInfo::a32Bit;
- AttributeArraySize = AttributeExtLength;
- break;
- case DictTabInfo::ExtDouble:
- AttributeSize = DictTabInfo::a64Bit;
- AttributeArraySize = AttributeExtLength;
- break;
- case DictTabInfo::ExtOlddecimal:
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize =
- (1 + AttributeExtPrecision + (int(AttributeExtScale) > 0)) *
- AttributeExtLength;
- break;
- case DictTabInfo::ExtOlddecimalunsigned:
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize =
- (0 + AttributeExtPrecision + (int(AttributeExtScale) > 0)) *
- AttributeExtLength;
- break;
- case DictTabInfo::ExtDecimal:
- case DictTabInfo::ExtDecimalunsigned:
- {
- // copy from Field_new_decimal ctor
- uint precision = AttributeExtPrecision;
- uint scale = AttributeExtScale;
- if (precision > DECIMAL_MAX_LENGTH || scale >= NOT_FIXED_DEC)
- precision = DECIMAL_MAX_LENGTH;
- uint bin_size = my_decimal_get_binary_size(precision, scale);
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = bin_size * AttributeExtLength;
- }
- break;
- case DictTabInfo::ExtChar:
- case DictTabInfo::ExtBinary:
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = AttributeExtLength;
- break;
- case DictTabInfo::ExtVarchar:
- case DictTabInfo::ExtVarbinary:
- if (AttributeExtLength > 0xff)
- return false;
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = AttributeExtLength + 1;
- break;
- case DictTabInfo::ExtDatetime:
- // to fix
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = 8 * AttributeExtLength;
- break;
- case DictTabInfo::ExtDate:
- // to fix
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = 3 * AttributeExtLength;
- break;
- case DictTabInfo::ExtBlob:
- case DictTabInfo::ExtText:
- AttributeSize = DictTabInfo::an8Bit;
- // head + inline part (length in precision lower half)
- AttributeArraySize = (NDB_BLOB_HEAD_SIZE << 2) + (AttributeExtPrecision & 0xFFFF);
- break;
- case DictTabInfo::ExtBit:
- AttributeSize = DictTabInfo::aBit;
- AttributeArraySize = AttributeExtLength;
- break;
- case DictTabInfo::ExtLongvarchar:
- case DictTabInfo::ExtLongvarbinary:
- if (AttributeExtLength > 0xffff)
- return false;
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = AttributeExtLength + 2;
- break;
- case DictTabInfo::ExtTime:
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = 3 * AttributeExtLength;
- break;
- case DictTabInfo::ExtYear:
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = 1 * AttributeExtLength;
- break;
- case DictTabInfo::ExtTimestamp:
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = 4 * AttributeExtLength;
- break;
- default:
- return false;
- };
- return true;
- }
-
- inline void print(FILE *out) {
- fprintf(out, "AttributeId = %d\n", AttributeId);
- fprintf(out, "AttributeType = %d\n", AttributeType);
- fprintf(out, "AttributeSize = %d\n", AttributeSize);
- fprintf(out, "AttributeArraySize = %d\n", AttributeArraySize);
- fprintf(out, "AttributeKeyFlag = %d\n", AttributeKeyFlag);
- fprintf(out, "AttributeStorage = %d\n", AttributeStorage);
- fprintf(out, "AttributeNullableFlag = %d\n", AttributeNullableFlag);
- fprintf(out, "AttributeDKey = %d\n", AttributeDKey);
- fprintf(out, "AttributeGroup = %d\n", AttributeGroup);
- fprintf(out, "AttributeAutoIncrement = %d\n", AttributeAutoIncrement);
- fprintf(out, "AttributeExtType = %d\n", AttributeExtType);
- fprintf(out, "AttributeExtPrecision = %d\n", AttributeExtPrecision);
- fprintf(out, "AttributeExtScale = %d\n", AttributeExtScale);
- fprintf(out, "AttributeExtLength = %d\n", AttributeExtLength);
- fprintf(out, "AttributeDefaultValue = \"%s\"\n",
- AttributeDefaultValue ? AttributeDefaultValue : "");
- }
- };
-
- static const
- SimpleProperties::SP2StructMapping AttributeMapping[];
-
- static const Uint32 AttributeMappingSize;
-
- // Signal constants
- STATIC_CONST( DataLength = 20 );
- STATIC_CONST( HeaderLength = 5 );
-
-private:
- Uint32 senderRef;
- Uint32 senderData;
- Uint32 requestType;
- Uint32 totalLen;
- Uint32 offset;
-
- /**
- * Length of this data = signal->length() - HeaderLength
- * Sender block ref = signal->senderBlockRef()
- */
-
- Uint32 tabInfoData[DataLength];
-
-public:
- enum Depricated
- {
- AttributeDGroup = 1009, //Default NotDGroup
- AttributeStoredInd = 1011, //Default NotStored
- SecondTableId = 17, //Mandatory between DICT's otherwise not allowed
- FragmentKeyTypeVal = 16 //Default PrimaryKey
- };
-
- enum Unimplemented
- {
- TableStorageVal = 14, //Default StorageType::MainMemory
- ScanOptimised = 15, //Default updateOptimised
- AttributeGroup = 1012 //Default 0
- };
-};
-
-#endif
diff --git a/ndb/include/kernel/signaldata/FireTrigOrd.hpp b/ndb/include/kernel/signaldata/FireTrigOrd.hpp
deleted file mode 100644
index 20a0a863094..00000000000
--- a/ndb/include/kernel/signaldata/FireTrigOrd.hpp
+++ /dev/null
@@ -1,200 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef FIRE_TRIG_ORD_HPP
-#define FIRE_TRIG_ORD_HPP
-
-#include "SignalData.hpp"
-#include <NodeBitmask.hpp>
-#include <trigger_definitions.h>
-#include <string.h>
-
-/**
- * FireTrigOrd
- *
- * This signal is sent by TUP to signal
- * that a trigger has fired
- */
-class FireTrigOrd {
- /**
- * Sender(s)
- */
- // API
-
- /**
- * Sender(s) / Reciver(s)
- */
- friend class Dbtup;
-
- /**
- * Reciver(s)
- */
- friend class Dbtc;
- friend class Backup;
- friend class SumaParticipant;
-
- /**
- * For printing
- */
- friend bool printFIRE_TRIG_ORD(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo);
-
-public:
- STATIC_CONST( SignalLength = 7 );
- STATIC_CONST( SignalWithGCILength = 8 );
- STATIC_CONST( SignalWithHashValueLength = 9 );
-
-private:
- Uint32 m_connectionPtr;
- Uint32 m_userRef;
- Uint32 m_triggerId;
- TriggerEvent::Value m_triggerEvent;
- Uint32 m_noPrimKeyWords;
- Uint32 m_noBeforeValueWords;
- Uint32 m_noAfterValueWords;
- Uint32 m_gci;
- Uint32 m_hashValue;
- // Public methods
-public:
- Uint32 getConnectionPtr() const;
- void setConnectionPtr(Uint32);
- Uint32 getUserRef() const;
- void setUserRef(Uint32);
- Uint32 getTriggerId() const;
- void setTriggerId(Uint32 anIndxId);
- TriggerEvent::Value getTriggerEvent() const;
- void setTriggerEvent(TriggerEvent::Value);
- Uint32 getNoOfPrimaryKeyWords() const;
- void setNoOfPrimaryKeyWords(Uint32);
- Uint32 getNoOfBeforeValueWords() const;
- void setNoOfBeforeValueWords(Uint32);
- Uint32 getNoOfAfterValueWords() const;
- void setNoOfAfterValueWords(Uint32);
- Uint32 getGCI() const;
- void setGCI(Uint32);
- Uint32 getHashValue() const;
- void setHashValue(Uint32);
-};
-
-inline
-Uint32 FireTrigOrd::getConnectionPtr() const
-{
- return m_connectionPtr;
-}
-
-inline
-void FireTrigOrd::setConnectionPtr(Uint32 aConnectionPtr)
-{
- m_connectionPtr = aConnectionPtr;
-}
-
-inline
-Uint32 FireTrigOrd::getUserRef() const
-{
- return m_userRef;
-}
-
-inline
-void FireTrigOrd::setUserRef(Uint32 aUserRef)
-{
- m_userRef = aUserRef;
-}
-
-inline
-Uint32 FireTrigOrd::getTriggerId() const
-{
- return m_triggerId;
-}
-
-inline
-void FireTrigOrd::setTriggerId(Uint32 aTriggerId)
-{
- m_triggerId = aTriggerId;
-}
-
-inline
-TriggerEvent::Value FireTrigOrd::getTriggerEvent() const
-{
- return m_triggerEvent;
-}
-
-inline
-void FireTrigOrd::setTriggerEvent(TriggerEvent::Value aTriggerEvent)
-{
- m_triggerEvent = aTriggerEvent;
-}
-
-inline
-Uint32 FireTrigOrd::getNoOfPrimaryKeyWords() const
-{
- return m_noPrimKeyWords;
-}
-
-inline
-void FireTrigOrd::setNoOfPrimaryKeyWords(Uint32 noPrim)
-{
- m_noPrimKeyWords = noPrim;
-}
-
-inline
-Uint32 FireTrigOrd::getNoOfBeforeValueWords() const
-{
- return m_noBeforeValueWords;
-}
-
-inline
-void FireTrigOrd::setNoOfBeforeValueWords(Uint32 noBefore)
-{
- m_noBeforeValueWords = noBefore;
-}
-
-inline
-Uint32 FireTrigOrd::getNoOfAfterValueWords() const
-{
- return m_noAfterValueWords;
-}
-
-inline
-void FireTrigOrd::setNoOfAfterValueWords(Uint32 noAfter)
-{
- m_noAfterValueWords = noAfter;
-}
-
-inline
-Uint32 FireTrigOrd::getGCI() const
-{
- return m_gci;
-}
-
-inline
-void FireTrigOrd::setGCI(Uint32 aGCI)
-{
- m_gci = aGCI;
-}
-
-inline
-Uint32 FireTrigOrd::getHashValue() const
-{
- return m_hashValue;
-}
-
-inline
-void FireTrigOrd::setHashValue(Uint32 flag)
-{
- m_hashValue = flag;
-}
-
-
-#endif
diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp
deleted file mode 100644
index 86130be4c4b..00000000000
--- a/ndb/include/ndbapi/NdbDictionary.hpp
+++ /dev/null
@@ -1,1334 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef NdbDictionary_H
-#define NdbDictionary_H
-
-#include <ndb_types.h>
-
-class Ndb;
-struct charset_info_st;
-typedef struct charset_info_st CHARSET_INFO;
-
-/**
- * @class NdbDictionary
- * @brief Data dictionary class
- *
- * The preferred and supported way to create and drop tables and indexes
- * in ndb is through the
- * MySQL Server (see MySQL reference Manual, section MySQL Cluster).
- *
- * Tables and indexes that are created directly through the
- * NdbDictionary class
- * can not be viewed from the MySQL Server.
- * Dropping indexes directly via the NdbApi will cause inconsistencies
- * if they were originally created from a MySQL Cluster.
- *
- * This class supports schema data enquiries such as:
- * -# Enquiries about tables
- * (Dictionary::getTable, Table::getNoOfColumns,
- * Table::getPrimaryKey, and Table::getNoOfPrimaryKeys)
- * -# Enquiries about indexes
- * (Dictionary::getIndex, Index::getNoOfColumns,
- * and Index::getColumn)
- *
- * This class supports schema data definition such as:
- * -# Creating tables (Dictionary::createTable) and table columns
- * -# Dropping tables (Dictionary::dropTable)
- * -# Creating secondary indexes (Dictionary::createIndex)
- * -# Dropping secondary indexes (Dictionary::dropIndex)
- *
- * NdbDictionary has several help (inner) classes to support this:
- * -# NdbDictionary::Dictionary the dictionary handling dictionary objects
- * -# NdbDictionary::Table for creating tables
- * -# NdbDictionary::Column for creating table columns
- * -# NdbDictionary::Index for creating secondary indexes
- *
- * See @ref ndbapi_simple_index.cpp for details of usage.
- */
-class NdbDictionary {
-public:
- /**
- * @class Object
- * @brief Meta information about a database object (a table, index, etc)
- */
- class Object {
- public:
- /**
- * Status of object
- */
- enum Status {
- New, ///< The object only exist in memory and
- ///< has not been created in the NDB Kernel
- Changed, ///< The object has been modified in memory
- ///< and has to be commited in NDB Kernel for
- ///< changes to take effect
- Retrieved, ///< The object exist and has been read
- ///< into main memory from NDB Kernel
- Invalid ///< The object has been invalidated
- ///< and should not be used
-
- };
-
- /**
- * Get status of object
- */
- virtual Status getObjectStatus() const = 0;
-
- /**
- * Get version of object
- */
- virtual int getObjectVersion() const = 0;
-
- /**
- * Object type
- */
- enum Type {
- TypeUndefined = 0, ///< Undefined
- SystemTable = 1, ///< System table
- UserTable = 2, ///< User table (may be temporary)
- UniqueHashIndex = 3, ///< Unique un-ordered hash index
- OrderedIndex = 6, ///< Non-unique ordered index
- HashIndexTrigger = 7, ///< Index maintenance, internal
- IndexTrigger = 8, ///< Index maintenance, internal
- SubscriptionTrigger = 9,///< Backup or replication, internal
- ReadOnlyConstraint = 10 ///< Trigger, internal
- };
-
- /**
- * Object state
- */
- enum State {
- StateUndefined = 0, ///< Undefined
- StateOffline = 1, ///< Offline, not usable
- StateBuilding = 2, ///< Building, not yet usable
- StateDropping = 3, ///< Offlining or dropping, not usable
- StateOnline = 4, ///< Online, usable
- StateBroken = 9 ///< Broken, should be dropped and re-created
- };
-
- /**
- * Object store
- */
- enum Store {
- StoreUndefined = 0, ///< Undefined
- StoreTemporary = 1, ///< Object or data deleted on system restart
- StorePermanent = 2 ///< Permanent. logged to disk
- };
-
- /**
- * Type of fragmentation.
- *
- * This parameter specifies how data in the table or index will
- * be distributed among the db nodes in the cluster.<br>
- * The bigger the table the more number of fragments should be used.
- * Note that all replicas count as same "fragment".<br>
- * For a table, default is FragAllMedium. For a unique hash index,
- * default is taken from underlying table and cannot currently
- * be changed.
- */
- enum FragmentType {
- FragUndefined = 0, ///< Fragmentation type undefined or default
- FragSingle = 1, ///< Only one fragment
- FragAllSmall = 2, ///< One fragment per node, default
- FragAllMedium = 3, ///< two fragments per node
- FragAllLarge = 4 ///< Four fragments per node.
- };
- };
-
- class Table; // forward declaration
-
- /**
- * @class Column
- * @brief Represents a column in an NDB Cluster table
- *
- * Each column has a type. The type of a column is determined by a number
- * of type specifiers.
- * The type specifiers are:
- * - Builtin type
- * - Array length or max length
- * - Precision and scale (not used yet)
- * - Character set for string types
- * - Inline and part sizes for blobs
- *
- * Types in general correspond to MySQL types and their variants.
- * Data formats are same as in MySQL. NDB API provides no support for
- * constructing such formats. NDB kernel checks them however.
- */
- class Column {
- public:
- /**
- * The builtin column types
- */
- enum Type {
- Undefined = NDB_TYPE_UNDEFINED, ///< Undefined
- Tinyint = NDB_TYPE_TINYINT, ///< 8 bit. 1 byte signed integer, can be used in array
- Tinyunsigned = NDB_TYPE_TINYUNSIGNED, ///< 8 bit. 1 byte unsigned integer, can be used in array
- Smallint = NDB_TYPE_SMALLINT, ///< 16 bit. 2 byte signed integer, can be used in array
- Smallunsigned = NDB_TYPE_SMALLUNSIGNED, ///< 16 bit. 2 byte unsigned integer, can be used in array
- Mediumint = NDB_TYPE_MEDIUMINT, ///< 24 bit. 3 byte signed integer, can be used in array
- Mediumunsigned = NDB_TYPE_MEDIUMUNSIGNED,///< 24 bit. 3 byte unsigned integer, can be used in array
- Int = NDB_TYPE_INT, ///< 32 bit. 4 byte signed integer, can be used in array
- Unsigned = NDB_TYPE_UNSIGNED, ///< 32 bit. 4 byte unsigned integer, can be used in array
- Bigint = NDB_TYPE_BIGINT, ///< 64 bit. 8 byte signed integer, can be used in array
- Bigunsigned = NDB_TYPE_BIGUNSIGNED, ///< 64 Bit. 8 byte signed integer, can be used in array
- Float = NDB_TYPE_FLOAT, ///< 32-bit float. 4 bytes float, can be used in array
- Double = NDB_TYPE_DOUBLE, ///< 64-bit float. 8 byte float, can be used in array
- Olddecimal = NDB_TYPE_OLDDECIMAL, ///< MySQL < 5.0 signed decimal, Precision, Scale
- Olddecimalunsigned = NDB_TYPE_OLDDECIMALUNSIGNED,
- Decimal = NDB_TYPE_DECIMAL, ///< MySQL >= 5.0 signed decimal, Precision, Scale
- Decimalunsigned = NDB_TYPE_DECIMALUNSIGNED,
- Char = NDB_TYPE_CHAR, ///< Len. A fixed array of 1-byte chars
- Varchar = NDB_TYPE_VARCHAR, ///< Length bytes: 1, Max: 255
- Binary = NDB_TYPE_BINARY, ///< Len
- Varbinary = NDB_TYPE_VARBINARY, ///< Length bytes: 1, Max: 255
- Datetime = NDB_TYPE_DATETIME, ///< Precision down to 1 sec (sizeof(Datetime) == 8 bytes )
- Date = NDB_TYPE_DATE, ///< Precision down to 1 day(sizeof(Date) == 4 bytes )
- Blob = NDB_TYPE_BLOB, ///< Binary large object (see NdbBlob)
- Text = NDB_TYPE_TEXT, ///< Text blob
- Bit = NDB_TYPE_BIT, ///< Bit, length specifies no of bits
- Longvarchar = NDB_TYPE_LONGVARCHAR, ///< Length bytes: 2, little-endian
- Longvarbinary = NDB_TYPE_LONGVARBINARY, ///< Length bytes: 2, little-endian
- Time = NDB_TYPE_TIME, ///< Time without date
- Year = NDB_TYPE_YEAR, ///< Year 1901-2155 (1 byte)
- Timestamp = NDB_TYPE_TIMESTAMP ///< Unix time
- };
-
- /**
- * @name General
- * @{
- */
-
- /**
- * Get name of column
- * @return Name of the column
- */
- const char* getName() const;
-
- /**
- * Get if the column is nullable or not
- */
- bool getNullable() const;
-
- /**
- * Check if column is part of primary key
- */
- bool getPrimaryKey() const;
-
- /**
- * Get number of column (horizontal position within table)
- */
- int getColumnNo() const;
-
- /**
- * Check if column is equal to some other column
- * @param column Column to compare with
- * @return true if column is equal to some other column otherwise false.
- */
- bool equal(const Column& column) const;
-
-
- /** @} *******************************************************************/
- /**
- * @name Get Type Specifiers
- * @{
- */
-
- /**
- * Get type of column
- */
- Type getType() const;
-
- /**
- * Get precision of column.
- * @note Only applicable for decimal types
- */
- int getPrecision() const;
-
- /**
- * Get scale of column.
- * @note Only applicable for decimal types
- */
- int getScale() const;
-
- /**
- * Get length for column
- * Array length for column or max length for variable length arrays.
- */
- int getLength() const;
-
- /**
- * For Char or Varchar or Text, get MySQL CHARSET_INFO. This
- * specifies both character set and collation. See get_charset()
- * etc in MySQL. (The cs is not "const" in MySQL).
- */
- CHARSET_INFO* getCharset() const;
-
-
- /**
- * For blob, get "inline size" i.e. number of initial bytes
- * to store in table's blob attribute. This part is normally in
- * main memory and can be indexed and interpreted.
- */
- int getInlineSize() const;
-
- /**
- * For blob, get "part size" i.e. number of bytes to store in
- * each tuple of the "blob table". Can be set to zero to omit parts
- * and to allow only inline bytes ("tinyblob").
- */
- int getPartSize() const;
-
- /**
- * For blob, set or get "stripe size" i.e. number of consecutive
- * <em>parts</em> to store in each node group.
- */
- int getStripeSize() const;
-
- /**
- * Get size of element
- */
- int getSize() const;
-
- /**
- * Check if column is part of partition key
- *
- * A <em>partition key</em> is a set of attributes which are used
- * to distribute the tuples onto the NDB nodes.
- * The partition key uses the NDB Cluster hashing function.
- *
- * An example where this is useful is TPC-C where it might be
- * good to use the warehouse id and district id as the partition key.
- * This would place all data for a specific district and warehouse
- * in the same database node.
- *
- * Locally in the fragments the full primary key
- * will still be used with the hashing algorithm.
- *
- * @return true then the column is part of
- * the partition key.
- */
- bool getPartitionKey() const;
-#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
- inline bool getDistributionKey() const { return getPartitionKey(); };
-#endif
-
- /** @} *******************************************************************/
-
-
- /**
- * @name Column creation
- * @{
- *
- * These operations should normally not be performed in an NbdApi program
- * as results will not be visable in the MySQL Server
- *
- */
-
- /**
- * Constructor
- * @param name Name of column
- */
- Column(const char * name = "");
- /**
- * Copy constructor
- * @param column Column to be copied
- */
- Column(const Column& column);
- ~Column();
-
- /**
- * Set name of column
- * @param name Name of the column
- */
- void setName(const char * name);
-
- /**
- * Set whether column is nullable or not
- */
- void setNullable(bool);
-
- /**
- * Set that column is part of primary key
- */
- void setPrimaryKey(bool);
-
- /**
- * Set type of column
- * @param type Type of column
- *
- * @note setType resets <em>all</em> column attributes
- * to (type dependent) defaults and should be the first
- * method to call. Default type is Unsigned.
- */
- void setType(Type type);
-
- /**
- * Set precision of column.
- * @note Only applicable for decimal types
- */
- void setPrecision(int);
-
- /**
- * Set scale of column.
- * @note Only applicable for decimal types
- */
- void setScale(int);
-
- /**
- * Set length for column
- * Array length for column or max length for variable length arrays.
- */
- void setLength(int length);
-
- /**
- * For Char or Varchar or Text, get MySQL CHARSET_INFO. This
- * specifies both character set and collation. See get_charset()
- * etc in MySQL. (The cs is not "const" in MySQL).
- */
- void setCharset(CHARSET_INFO* cs);
-
- /**
- * For blob, get "inline size" i.e. number of initial bytes
- * to store in table's blob attribute. This part is normally in
- * main memory and can be indexed and interpreted.
- */
- void setInlineSize(int size);
-
- /**
- * For blob, get "part size" i.e. number of bytes to store in
- * each tuple of the "blob table". Can be set to zero to omit parts
- * and to allow only inline bytes ("tinyblob").
- */
- void setPartSize(int size);
-
- /**
- * For blob, get "stripe size" i.e. number of consecutive
- * <em>parts</em> to store in each node group.
- */
- void setStripeSize(int size);
-
- /**
- * Set partition key
- * @see getPartitionKey
- *
- * @param enable If set to true, then the column will be part of
- * the partition key.
- */
- void setPartitionKey(bool enable);
-#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
- inline void setDistributionKey(bool enable)
- { setPartitionKey(enable); };
-#endif
-
- /** @} *******************************************************************/
-
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- const Table * getBlobTable() const;
-
- void setAutoIncrement(bool);
- bool getAutoIncrement() const;
- void setAutoIncrementInitialValue(Uint64 val);
- void setDefaultValue(const char*);
- const char* getDefaultValue() const;
-
- static const Column * FRAGMENT;
- static const Column * FRAGMENT_MEMORY;
- static const Column * ROW_COUNT;
- static const Column * COMMIT_COUNT;
- static const Column * ROW_SIZE;
- static const Column * RANGE_NO;
-
- int getSizeInBytes() const;
-#endif
-
- private:
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- friend class NdbRecAttr;
- friend class NdbColumnImpl;
-#endif
- class NdbColumnImpl & m_impl;
- Column(NdbColumnImpl&);
- Column& operator=(const Column&);
- };
-
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- /**
- * ???
- */
- typedef Column Attribute;
-#endif
-
- /**
- * @brief Represents a table in NDB Cluster
- *
- * <em>TableSize</em><br>
- * When calculating the data storage one should add the size of all
- * attributes (each attributeconsumes at least 4 bytes) and also an overhead
- * of 12 byte. Variable size attributes (not supported yet) will have a
- * size of 12 bytes plus the actual data storage parts where there is an
- * additional overhead based on the size of the variable part.<br>
- * An example table with 5 attributes:
- * one 64 bit attribute, one 32 bit attribute,
- * two 16 bit attributes and one array of 64 8 bits.
- * This table will consume
- * 12 (overhead) + 8 + 4 + 2*4 (4 is minimum) + 64 = 96 bytes per record.
- * Additionally an overhead of about 2 % as page headers and waste should
- * be allocated. Thus, 1 million records should consume 96 MBytes
- * plus the overhead 2 MByte and rounded up to 100 000 kBytes.<br>
- *
- */
- class Table : public Object {
- public:
- /**
- * @name General
- * @{
- */
-
- /**
- * Get table name
- */
- const char * getName() const;
-
- /**
- * Get table id
- */
- int getTableId() const;
-
- /**
- * Get column definition via name.
- * @return null if none existing name
- */
- const Column* getColumn(const char * name) const;
-
- /**
- * Get column definition via index in table.
- * @return null if none existing name
- */
- Column* getColumn(const int attributeId);
-
- /**
- * Get column definition via name.
- * @return null if none existing name
- */
- Column* getColumn(const char * name);
-
- /**
- * Get column definition via index in table.
- * @return null if none existing name
- */
- const Column* getColumn(const int attributeId) const;
-
- /** @} *******************************************************************/
- /**
- * @name Storage
- * @{
- */
-
- /**
- * If set to false, then the table is a temporary
- * table and is not logged to disk.
- *
- * In case of a system restart the table will still
- * be defined and exist but will be empty.
- * Thus no checkpointing and no logging is performed on the table.
- *
- * The default value is true and indicates a normal table
- * with full checkpointing and logging activated.
- */
- bool getLogging() const;
-
- /**
- * Get fragmentation type
- */
- FragmentType getFragmentType() const;
-
- /**
- * Get KValue (Hash parameter.)
- * Only allowed value is 6.
- * Later implementations might add flexibility in this parameter.
- */
- int getKValue() const;
-
- /**
- * Get MinLoadFactor (Hash parameter.)
- * This value specifies the load factor when starting to shrink
- * the hash table.
- * It must be smaller than MaxLoadFactor.
- * Both these factors are given in percentage.
- */
- int getMinLoadFactor() const;
-
- /**
- * Get MaxLoadFactor (Hash parameter.)
- * This value specifies the load factor when starting to split
- * the containers in the local hash tables.
- * 100 is the maximum which will optimize memory usage.
- * A lower figure will store less information in each container and thus
- * find the key faster but consume more memory.
- */
- int getMaxLoadFactor() const;
-
- /** @} *******************************************************************/
- /**
- * @name Other
- * @{
- */
-
- /**
- * Get number of columns in the table
- */
- int getNoOfColumns() const;
-
- /**
- * Get number of primary keys in the table
- */
- int getNoOfPrimaryKeys() const;
-
- /**
- * Get name of primary key
- */
- const char* getPrimaryKey(int no) const;
-
- /**
- * Check if table is equal to some other table
- */
- bool equal(const Table&) const;
-
- /**
- * Get frm file stored with this table
- */
- const void* getFrmData() const;
- Uint32 getFrmLength() const;
-
- /** @} *******************************************************************/
-
- /**
- * @name Table creation
- * @{
- *
- * These methods should normally not be used in an application as
- * the result is not accessible from the MySQL Server
- *
- */
-
- /**
- * Constructor
- * @param name Name of table
- */
- Table(const char * name = "");
-
- /**
- * Copy constructor
- * @param table Table to be copied
- */
- Table(const Table& table);
- virtual ~Table();
-
- /**
- * Assignment operator, deep copy
- * @param table Table to be copied
- */
- Table& operator=(const Table& table);
-
- /**
- * Name of table
- * @param name Name of table
- */
- void setName(const char * name);
-
- /**
- * Add a column definition to a table
- * @note creates a copy
- */
- void addColumn(const Column &);
-
- /**
- * @see NdbDictionary::Table::getLogging.
- */
- void setLogging(bool);
-
- /**
- * Set fragmentation type
- */
- void setFragmentType(FragmentType);
-
- /**
- * Set KValue (Hash parameter.)
- * Only allowed value is 6.
- * Later implementations might add flexibility in this parameter.
- */
- void setKValue(int kValue);
-
- /**
- * Set MinLoadFactor (Hash parameter.)
- * This value specifies the load factor when starting to shrink
- * the hash table.
- * It must be smaller than MaxLoadFactor.
- * Both these factors are given in percentage.
- */
- void setMinLoadFactor(int);
-
- /**
- * Set MaxLoadFactor (Hash parameter.)
- * This value specifies the load factor when starting to split
- * the containers in the local hash tables.
- * 100 is the maximum which will optimize memory usage.
- * A lower figure will store less information in each container and thus
- * find the key faster but consume more memory.
- */
- void setMaxLoadFactor(int);
-
- /**
- * Get table object type
- */
- Object::Type getObjectType() const;
-
- /**
- * Get object status
- */
- virtual Object::Status getObjectStatus() const;
-
- /**
- * Get object version
- */
- virtual int getObjectVersion() const;
-
- /**
- * Set frm file to store with this table
- */
- void setFrm(const void* data, Uint32 len);
-
- /**
- * Set table object type
- */
- void setObjectType(Object::Type type);
-
- /** @} *******************************************************************/
-
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- void setStoredTable(bool x) { setLogging(x); }
- bool getStoredTable() const { return getLogging(); }
-
- int getRowSizeInBytes() const ;
- int createTableInDb(Ndb*, bool existingEqualIsOk = true) const ;
-
- int getReplicaCount() const ;
-#endif
-
- private:
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- friend class NdbTableImpl;
-#endif
- class NdbTableImpl & m_impl;
- Table(NdbTableImpl&);
- };
-
- /**
- * @class Index
- * @brief Represents an index in an NDB Cluster
- */
- class Index : public Object {
- public:
-
- /**
- * @name Getting Index properties
- * @{
- */
-
- /**
- * Get the name of an index
- */
- const char * getName() const;
-
- /**
- * Get the name of the table being indexed
- */
- const char * getTable() const;
-
- /**
- * Get the number of columns in the index
- */
- unsigned getNoOfColumns() const;
-
-#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
- /**
- * Get the number of columns in the index
- * Depricated, use getNoOfColumns instead.
- */
- int getNoOfIndexColumns() const;
-#endif
-
- /**
- * Get a specific column in the index
- */
- const Column * getColumn(unsigned no) const ;
-
-#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
- /**
- * Get a specific column name in the index
- * Depricated, use getColumn instead.
- */
- const char * getIndexColumn(int no) const ;
-#endif
-
- /**
- * Represents type of index
- */
- enum Type {
- Undefined = 0, ///< Undefined object type (initial value)
- UniqueHashIndex = 3, ///< Unique un-ordered hash index
- ///< (only one currently supported)
- OrderedIndex = 6 ///< Non-unique ordered index
- };
-
- /**
- * Get index type of the index
- */
- Type getType() const;
-
- /**
- * Check if index is set to be stored on disk
- *
- * @return if true then logging id enabled
- *
- * @note Non-logged indexes are rebuilt at system restart.
- * @note Ordered index does not currently support logging.
- */
- bool getLogging() const;
-
- /**
- * Get object status
- */
- virtual Object::Status getObjectStatus() const;
-
- /**
- * Get object version
- */
- virtual int getObjectVersion() const;
-
- /** @} *******************************************************************/
-
- /**
- * @name Index creation
- * @{
- *
- * These methods should normally not be used in an application as
- * the result will not be visible from the MySQL Server
- *
- */
-
- /**
- * Constructor
- * @param name Name of index
- */
- Index(const char * name = "");
- virtual ~Index();
-
- /**
- * Set the name of an index
- */
- void setName(const char * name);
-
- /**
- * Define the name of the table to be indexed
- */
- void setTable(const char * name);
-
- /**
- * Add a column to the index definition
- * Note that the order of columns will be in
- * the order they are added (only matters for ordered indexes).
- */
- void addColumn(const Column & c);
-
- /**
- * Add a column name to the index definition
- * Note that the order of indexes will be in
- * the order they are added (only matters for ordered indexes).
- */
- void addColumnName(const char * name);
-
-#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
- /**
- * Add a column name to the index definition
- * Note that the order of indexes will be in
- * the order they are added (only matters for ordered indexes).
- * Depricated, use addColumnName instead.
- */
- void addIndexColumn(const char * name);
-#endif
-
- /**
- * Add several column names to the index definition
- * Note that the order of indexes will be in
- * the order they are added (only matters for ordered indexes).
- */
- void addColumnNames(unsigned noOfNames, const char ** names);
-
-#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
- /**
- * Add several column names to the index definition
- * Note that the order of indexes will be in
- * the order they are added (only matters for ordered indexes).
- * Depricated, use addColumnNames instead.
- */
- void addIndexColumns(int noOfNames, const char ** names);
-#endif
-
- /**
- * Set index type of the index
- */
- void setType(Type type);
-
- /**
- * Enable/Disable index storage on disk
- *
- * @param enable If enable is set to true, then logging becomes enabled
- *
- * @see NdbDictionary::Index::getLogging
- */
- void setLogging(bool enable);
-
-#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
- void setStoredIndex(bool x) { setLogging(x); }
- bool getStoredIndex() const { return getLogging(); }
-#endif
-
- /** @} *******************************************************************/
-
- private:
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- friend class NdbIndexImpl;
-#endif
- class NdbIndexImpl & m_impl;
- Index(NdbIndexImpl&);
- };
-
- /**
- * @brief Represents an Event in NDB Cluster
- *
- */
- class Event : public Object {
- public:
- /**
- * Specifies the type of database operations an Event listens to
- */
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- /** TableEvent must match 1 << TriggerEvent */
-#endif
- enum TableEvent {
- TE_INSERT=1, ///< Insert event on table
- TE_DELETE=2, ///< Delete event on table
- TE_UPDATE=4, ///< Update event on table
- TE_ALL=7 ///< Any/all event on table (not relevant when
- ///< events are received)
- };
- /**
- * Specifies the durability of an event
- * (future version may supply other types)
- */
- enum EventDurability {
- ED_UNDEFINED
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- = 0
-#endif
-#if 0 // not supported
- ,ED_SESSION = 1,
- // Only this API can use it
- // and it's deleted after api has disconnected or ndb has restarted
-
- ED_TEMPORARY = 2
- // All API's can use it,
- // But's its removed when ndb is restarted
-#endif
- ,ED_PERMANENT ///< All API's can use it.
- ///< It's still defined after a cluster system restart
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- = 3
-#endif
- };
-
- /**
- * Constructor
- * @param name Name of event
- */
- Event(const char *name);
- /**
- * Constructor
- * @param name Name of event
- * @param table Reference retrieved from NdbDictionary
- */
- Event(const char *name, const NdbDictionary::Table& table);
- virtual ~Event();
- /**
- * Set unique identifier for the event
- */
- void setName(const char *name);
- /**
- * Get unique identifier for the event
- */
- const char *getName() const;
- /**
- * Define table on which events should be detected
- *
- * @note calling this method will default to detection
- * of events on all columns. Calling subsequent
- * addEventColumn calls will override this.
- *
- * @param table reference retrieved from NdbDictionary
- */
- void setTable(const NdbDictionary::Table& table);
- /**
- * Set table for which events should be detected
- *
- * @note preferred way is using setTable(const NdbDictionary::Table&)
- * or constructor with table object parameter
- */
- void setTable(const char *tableName);
- /**
- * Get table name for events
- *
- * @return table name
- */
- const char* getTableName() const;
- /**
- * Add type of event that should be detected
- */
- void addTableEvent(const TableEvent te);
- /**
- * Set durability of the event
- */
- void setDurability(EventDurability);
- /**
- * Get durability of the event
- */
- EventDurability getDurability() const;
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- void addColumn(const Column &c);
-#endif
- /**
- * Add a column on which events should be detected
- *
- * @param attrId Column id
- *
- * @note errors will mot be detected until createEvent() is called
- */
- void addEventColumn(unsigned attrId);
- /**
- * Add a column on which events should be detected
- *
- * @param columnName Column name
- *
- * @note errors will not be detected until createEvent() is called
- */
- void addEventColumn(const char * columnName);
- /**
- * Add several columns on which events should be detected
- *
- * @param n Number of columns
- * @param columnNames Column names
- *
- * @note errors will mot be detected until
- * NdbDictionary::Dictionary::createEvent() is called
- */
- void addEventColumns(int n, const char ** columnNames);
-
- /**
- * Get no of columns defined in an Event
- *
- * @return Number of columns, -1 on error
- */
- int getNoOfEventColumns() const;
-
- /**
- * Get object status
- */
- virtual Object::Status getObjectStatus() const;
-
- /**
- * Get object version
- */
- virtual int getObjectVersion() const;
-
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- void print();
-#endif
-
- private:
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- friend class NdbEventImpl;
- friend class NdbEventOperationImpl;
-#endif
- class NdbEventImpl & m_impl;
- Event(NdbEventImpl&);
- };
-
- /**
- * @class Dictionary
- * @brief Dictionary for defining and retreiving meta data
- */
- class Dictionary {
- public:
- /**
- * @class List
- * @brief Structure for retrieving lists of object names
- */
- struct List {
- /**
- * @struct Element
- * @brief Object to be stored in an NdbDictionary::Dictionary::List
- */
- struct Element {
- unsigned id; ///< Id of object
- Object::Type type; ///< Type of object
- Object::State state; ///< State of object
- Object::Store store; ///< How object is stored
- char * database; ///< In what database the object resides
- char * schema; ///< What schema the object is defined in
- char * name; ///< Name of object
- Element() :
- id(0),
- type(Object::TypeUndefined),
- state(Object::StateUndefined),
- store(Object::StoreUndefined),
- database(0),
- schema(0),
- name(0) {
- }
- };
- unsigned count; ///< Number of elements in list
- Element * elements; ///< Pointer to array of elements
- List() : count(0), elements(0) {}
- ~List() {
- if (elements != 0) {
- for (unsigned i = 0; i < count; i++) {
- delete[] elements[i].database;
- delete[] elements[i].schema;
- delete[] elements[i].name;
- elements[i].name = 0;
- }
- delete[] elements;
- count = 0;
- elements = 0;
- }
- }
- };
-
- /**
- * @name General
- * @{
- */
-
- /**
- * Fetch list of all objects, optionally restricted to given type.
- *
- * @param list List of objects returned in the dictionary
- * @param type Restrict returned list to only contain objects of
- * this type
- *
- * @return -1 if error.
- *
- */
- int listObjects(List & list, Object::Type type = Object::TypeUndefined);
- int listObjects(List & list,
- Object::Type type = Object::TypeUndefined) const;
-
- /**
- * Get the latest error
- *
- * @return Error object.
- */
- const struct NdbError & getNdbError() const;
-
- /** @} *******************************************************************/
-
- /**
- * @name Retrieving references to Tables and Indexes
- * @{
- */
-
- /**
- * Get table with given name, NULL if undefined
- * @param name Name of table to get
- * @return table if successful otherwise NULL.
- */
- const Table * getTable(const char * name) const;
-
- /**
- * Get index with given name, NULL if undefined
- * @param indexName Name of index to get.
- * @param tableName Name of table that index belongs to.
- * @return index if successful, otherwise 0.
- */
- const Index * getIndex(const char * indexName,
- const char * tableName) const;
-
- /**
- * Fetch list of indexes of given table.
- * @param list Reference to list where to store the listed indexes
- * @param tableName Name of table that index belongs to.
- * @return 0 if successful, otherwise -1
- */
- int listIndexes(List & list, const char * tableName);
- int listIndexes(List & list, const char * tableName) const;
-
- /** @} *******************************************************************/
- /**
- * @name Events
- * @{
- */
-
- /**
- * Create event given defined Event instance
- * @param event Event to create
- * @return 0 if successful otherwise -1.
- */
- int createEvent(const Event &event);
-
- /**
- * Drop event with given name
- * @param eventName Name of event to drop.
- * @return 0 if successful otherwise -1.
- */
- int dropEvent(const char * eventName);
-
- /**
- * Get event with given name.
- * @param eventName Name of event to get.
- * @return an Event if successful, otherwise NULL.
- */
- const Event * getEvent(const char * eventName);
-
- /** @} *******************************************************************/
-
- /**
- * @name Table creation
- * @{
- *
- * These methods should normally not be used in an application as
- * the result will not be visible from the MySQL Server
- */
-
- /**
- * Create defined table given defined Table instance
- * @param table Table to create
- * @return 0 if successful otherwise -1.
- */
- int createTable(const Table &table);
-
- /**
- * Drop table given retrieved Table instance
- * @param table Table to drop
- * @return 0 if successful otherwise -1.
- */
- int dropTable(Table & table);
-
- /**
- * Drop table given table name
- * @param name Name of table to drop
- * @return 0 if successful otherwise -1.
- */
- int dropTable(const char * name);
-
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- /**
- * Alter defined table given defined Table instance
- * @param table Table to alter
- * @return -2 (incompatible version) <br>
- * -1 general error <br>
- * 0 success
- */
- int alterTable(const Table &table);
-
- /**
- * Invalidate cached table object
- * @param name Name of table to invalidate
- */
- void invalidateTable(const char * name);
-#endif
-
- /**
- * Remove table from local cache
- */
- void removeCachedTable(const char * table);
- /**
- * Remove index from local cache
- */
- void removeCachedIndex(const char * index, const char * table);
-
-
- /** @} *******************************************************************/
- /**
- * @name Index creation
- * @{
- *
- * These methods should normally not be used in an application as
- * the result will not be visible from the MySQL Server
- *
- */
-
- /**
- * Create index given defined Index instance
- * @param index Index to create
- * @return 0 if successful otherwise -1.
- */
- int createIndex(const Index &index);
-
- /**
- * Drop index with given name
- * @param indexName Name of index to drop.
- * @param tableName Name of table that index belongs to.
- * @return 0 if successful otherwise -1.
- */
- int dropIndex(const char * indexName,
- const char * tableName);
-
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- /**
- * Invalidate cached index object
- */
- void invalidateIndex(const char * indexName,
- const char * tableName);
-#endif
-
- /** @} *******************************************************************/
-
- protected:
- Dictionary(Ndb & ndb);
- ~Dictionary();
-
- private:
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- friend class NdbDictionaryImpl;
- friend class UtilTransactions;
- friend class NdbBlob;
-#endif
- class NdbDictionaryImpl & m_impl;
- Dictionary(NdbDictionaryImpl&);
- const Table * getIndexTable(const char * indexName,
- const char * tableName) const;
- public:
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- const Table * getTable(const char * name, void **data) const;
- void set_local_table_data_size(unsigned sz);
-#endif
- };
-};
-
-class NdbOut& operator <<(class NdbOut& out, const NdbDictionary::Column& col);
-
-#endif
diff --git a/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile b/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile
deleted file mode 100644
index c1ca32dfe17..00000000000
--- a/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-TARGET = mgmapi_logevent
-SRCS = $(TARGET).cpp
-OBJS = $(TARGET).o
-CXX = g++
-CFLAGS = -c -Wall -fno-rtti -fno-exceptions
-CXXFLAGS =
-DEBUG =
-LFLAGS = -Wall
-TOP_SRCDIR = ../../..
-INCLUDE_DIR = $(TOP_SRCDIR)
-LIB_DIR = -L$(TOP_SRCDIR)/ndb/src/.libs \
- -L$(TOP_SRCDIR)/libmysql_r/.libs \
- -L$(TOP_SRCDIR)/mysys
-SYS_LIB =
-
-$(TARGET): $(OBJS)
- $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lz $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/ndb/include -I$(INCLUDE_DIR)/ndb/include/mgmapi -I$(INCLUDE_DIR)/ndb/include/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_async_example/Makefile b/ndb/ndbapi-examples/ndbapi_async_example/Makefile
deleted file mode 100644
index 55e4a13343f..00000000000
--- a/ndb/ndbapi-examples/ndbapi_async_example/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-TARGET = ndbapi_async
-SRCS = $(TARGET).cpp
-OBJS = $(TARGET).o
-CXX = g++
-CFLAGS = -g -c -Wall -fno-rtti -fno-exceptions
-CXXFLAGS = -g
-DEBUG =
-LFLAGS = -Wall
-TOP_SRCDIR = ../../..
-INCLUDE_DIR = $(TOP_SRCDIR)
-LIB_DIR = -L$(TOP_SRCDIR)/ndb/src/.libs \
- -L$(TOP_SRCDIR)/libmysql_r/.libs \
- -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
-SYS_LIB =
-
-$(TARGET): $(OBJS)
- $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/extra -I$(INCLUDE_DIR)/ndb/include -I$(INCLUDE_DIR)/ndb/include/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_async_example1/Makefile b/ndb/ndbapi-examples/ndbapi_async_example1/Makefile
deleted file mode 100644
index 7f6ea0b4d25..00000000000
--- a/ndb/ndbapi-examples/ndbapi_async_example1/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-TARGET = ndbapi_async1
-SRCS = ndbapi_async1.cpp
-OBJS = ndbapi_async1.o
-CXX = g++
-CFLAGS = -c -Wall -fno-rtti -fno-exceptions
-DEBUG =
-LFLAGS = -Wall
-INCLUDE_DIR = ../../include
-LIB_DIR = -L../../src/.libs \
- -L../../../libmysql_r/.libs \
- -L../../../mysys -L../../../strings
-SYS_LIB =
-
-$(TARGET): $(OBJS)
- $(CXX) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_event_example/Makefile b/ndb/ndbapi-examples/ndbapi_event_example/Makefile
deleted file mode 100644
index 12e109c654f..00000000000
--- a/ndb/ndbapi-examples/ndbapi_event_example/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-TARGET = ndbapi_event
-SRCS = ndbapi_event.cpp
-OBJS = ndbapi_event.o
-CXX = g++
-CFLAGS = -c -Wall -fno-rtti -fno-exceptions
-CXXFLAGS =
-DEBUG =
-LFLAGS = -Wall
-TOP_SRCDIR = ../../..
-INCLUDE_DIR = $(TOP_SRCDIR)/ndb/include
-LIB_DIR = -L$(TOP_SRCDIR)/ndb/src/.libs \
- -L$(TOP_SRCDIR)/libmysql_r/.libs \
- -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
-SYS_LIB =
-
-$(TARGET): $(OBJS)
- $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_retries_example/Makefile b/ndb/ndbapi-examples/ndbapi_retries_example/Makefile
deleted file mode 100644
index 829a7009031..00000000000
--- a/ndb/ndbapi-examples/ndbapi_retries_example/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-TARGET = ndbapi_retries
-SRCS = ndbapi_retries.cpp
-OBJS = ndbapi_retries.o
-CXX = g++
-CFLAGS = -c -Wall -fno-rtti -fno-exceptions
-DEBUG =
-LFLAGS = -Wall
-INCLUDE_DIR = ../../include
-LIB_DIR = -L../../src/.libs \
- -L../../../libmysql_r/.libs \
- -L../../../mysys -L../../../strings
-SYS_LIB =
-
-$(TARGET): $(OBJS)
- $(CXX) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_scan_example/Makefile b/ndb/ndbapi-examples/ndbapi_scan_example/Makefile
deleted file mode 100644
index 31886b02bf1..00000000000
--- a/ndb/ndbapi-examples/ndbapi_scan_example/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-TARGET = ndbapi_scan
-SRCS = $(TARGET).cpp
-OBJS = $(TARGET).o
-CXX = g++
-CFLAGS = -g -c -Wall -fno-rtti -fno-exceptions
-CXXFLAGS = -g
-DEBUG =
-LFLAGS = -Wall
-TOP_SRCDIR = ../../..
-INCLUDE_DIR = $(TOP_SRCDIR)
-LIB_DIR = -L$(TOP_SRCDIR)/ndb/src/.libs \
- -L$(TOP_SRCDIR)/libmysql_r/.libs \
- -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
-SYS_LIB =
-
-$(TARGET): $(OBJS)
- $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/extra -I$(INCLUDE_DIR)/ndb/include -I$(INCLUDE_DIR)/ndb/include/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_simple_example/Makefile b/ndb/ndbapi-examples/ndbapi_simple_example/Makefile
deleted file mode 100644
index 0a59584fb66..00000000000
--- a/ndb/ndbapi-examples/ndbapi_simple_example/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-TARGET = ndbapi_simple
-SRCS = $(TARGET).cpp
-OBJS = $(TARGET).o
-CXX = g++
-CFLAGS = -c -Wall -fno-rtti -fno-exceptions
-CXXFLAGS =
-DEBUG =
-LFLAGS = -Wall
-TOP_SRCDIR = ../../..
-INCLUDE_DIR = $(TOP_SRCDIR)
-LIB_DIR = -L$(TOP_SRCDIR)/ndb/src/.libs \
- -L$(TOP_SRCDIR)/libmysql_r/.libs \
- -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
-SYS_LIB =
-
-$(TARGET): $(OBJS)
- $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/ndb/include -I$(INCLUDE_DIR)/ndb/include/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile b/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile
deleted file mode 100644
index d4356055935..00000000000
--- a/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-TARGET = ndbapi_simple_index
-SRCS = $(TARGET).cpp
-OBJS = $(TARGET).o
-CXX = g++
-CFLAGS = -c -Wall -fno-rtti -fno-exceptions
-CXXFLAGS =
-DEBUG =
-LFLAGS = -Wall
-TOP_SRCDIR = ../../..
-INCLUDE_DIR = $(TOP_SRCDIR)
-LIB_DIR = -L$(TOP_SRCDIR)/ndb/src/.libs \
- -L$(TOP_SRCDIR)/libmysql_r/.libs \
- -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
-SYS_LIB =
-
-$(TARGET): $(OBJS)
- $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/ndb/include -I$(INCLUDE_DIR)/ndb/include/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/src/Makefile.am b/ndb/src/Makefile.am
deleted file mode 100644
index d35790a2e43..00000000000
--- a/ndb/src/Makefile.am
+++ /dev/null
@@ -1,33 +0,0 @@
-SUBDIRS = common mgmapi ndbapi . kernel mgmclient mgmsrv cw
-
-include $(top_srcdir)/ndb/config/common.mk.am
-
-ndblib_LTLIBRARIES = libndbclient.la
-
-libndbclient_la_SOURCES =
-
-libndbclient_la_LIBADD = \
- ndbapi/libndbapi.la \
- common/transporter/libtransporter.la \
- common/debugger/libtrace.la \
- common/debugger/signaldata/libsignaldataprint.la \
- mgmapi/libmgmapi.la \
- common/mgmcommon/libmgmsrvcommon.la \
- common/logger/liblogger.la \
- common/portlib/libportlib.la \
- common/util/libgeneral.la
-
-windoze-dsp: libndbclient.dsp
-
-libndbclient.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(ndblib_LTLIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ dummy.cpp
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(libndbclient_la_LIBADD)
- @touch dummy.cpp
diff --git a/ndb/src/common/debugger/Makefile.am b/ndb/src/common/debugger/Makefile.am
deleted file mode 100644
index e25a11c9bee..00000000000
--- a/ndb/src/common/debugger/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-SUBDIRS = signaldata
-
-noinst_LTLIBRARIES = libtrace.la
-
-libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp EventLogger.cpp GrepError.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libtrace.dsp
-
-libtrace.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libtrace_la_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp b/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp
deleted file mode 100644
index 027f743b5ea..00000000000
--- a/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include <signaldata/CreateFragmentation.hpp>
-
-bool
-printCREATE_FRAGMENTATION_REQ(FILE * output, const Uint32 * theData,
- Uint32 len, Uint16 receiverBlockNo) {
- const CreateFragmentationReq * const sig = (CreateFragmentationReq *)theData;
- fprintf(output, " senderRef: %x\n", sig->senderRef);
- fprintf(output, " senderData: %x\n", sig->senderData);
- fprintf(output, " fragmentationType: %x\n", sig->fragmentationType);
- fprintf(output, " noOfFragments: %x\n", sig->noOfFragments);
- fprintf(output, " fragmentNode: %x\n", sig->fragmentNode);
- if (sig->primaryTableId == RNIL)
- fprintf(output, " primaryTableId: none\n");
- else
- fprintf(output, " primaryTableId: %x\n", sig->primaryTableId);
- return true;
-}
-
-bool
-printCREATE_FRAGMENTATION_REF(FILE * output, const Uint32 * theData,
- Uint32 len, Uint16 receiverBlockNo) {
- const CreateFragmentationRef * const sig = (CreateFragmentationRef *)theData;
- fprintf(output, " senderRef: %x\n", sig->senderRef);
- fprintf(output, " senderData: %x\n", sig->senderData);
- fprintf(output, " errorCode: %x\n", sig->errorCode);
- return true;
-}
-
-bool
-printCREATE_FRAGMENTATION_CONF(FILE * output, const Uint32 * theData,
- Uint32 len, Uint16 receiverBlockNo) {
- const CreateFragmentationConf * const sig =
- (CreateFragmentationConf *)theData;
- fprintf(output, " senderRef: %x\n", sig->senderRef);
- fprintf(output, " senderData: %x\n", sig->senderData);
- fprintf(output, " noOfReplicas: %x\n", sig->noOfReplicas);
- fprintf(output, " noOfFragments: %x\n", sig->noOfFragments);
- return true;
-}
-
diff --git a/ndb/src/common/debugger/signaldata/Makefile.am b/ndb/src/common/debugger/signaldata/Makefile.am
deleted file mode 100644
index 9146d552568..00000000000
--- a/ndb/src/common/debugger/signaldata/Makefile.am
+++ /dev/null
@@ -1,47 +0,0 @@
-
-noinst_LTLIBRARIES = libsignaldataprint.la
-
-libsignaldataprint_la_SOURCES = \
- TcKeyReq.cpp TcKeyConf.cpp TcKeyRef.cpp \
- TcRollbackRep.cpp \
- TupKey.cpp TupCommit.cpp LqhKey.cpp \
- FsOpenReq.cpp FsCloseReq.cpp FsRef.cpp FsConf.cpp FsReadWriteReq.cpp\
- SignalDataPrint.cpp SignalNames.cpp \
- ContinueB.cpp DihContinueB.cpp NdbfsContinueB.cpp \
- CloseComReqConf.cpp PackedSignal.cpp PrepFailReqRef.cpp \
- GCPSave.cpp DictTabInfo.cpp \
- AlterTable.cpp AlterTab.cpp \
- CreateTrig.cpp AlterTrig.cpp DropTrig.cpp \
- FireTrigOrd.cpp TrigAttrInfo.cpp \
- CreateIndx.cpp AlterIndx.cpp DropIndx.cpp TcIndx.cpp \
- IndxKeyInfo.cpp IndxAttrInfo.cpp \
- FsAppendReq.cpp ScanTab.cpp \
- BackupImpl.cpp BackupSignalData.cpp \
- UtilSequence.cpp UtilPrepare.cpp UtilDelete.cpp UtilExecute.cpp \
- LqhFrag.cpp DropTab.cpp PrepDropTab.cpp LCP.cpp MasterLCP.cpp \
- CopyGCI.cpp SystemError.cpp StartRec.cpp NFCompleteRep.cpp \
- FailRep.cpp DisconnectRep.cpp SignalDroppedRep.cpp \
- SumaImpl.cpp NdbSttor.cpp CreateFragmentation.cpp \
- UtilLock.cpp TuxMaint.cpp AccLock.cpp \
- LqhTrans.cpp ReadNodesConf.cpp CntrStart.cpp \
- ScanFrag.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_ndbapi.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libsignaldataprint.dsp
-
-libsignaldataprint.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libsignaldataprint_la_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/logger/Makefile.am b/ndb/src/common/logger/Makefile.am
deleted file mode 100644
index 0af21f9fbde..00000000000
--- a/ndb/src/common/logger/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-
-noinst_LTLIBRARIES = liblogger.la
-
-SOURCE_WIN = Logger.cpp LogHandlerList.cpp LogHandler.cpp \
- ConsoleLogHandler.cpp FileLogHandler.cpp
-liblogger_la_SOURCES = $(SOURCE_WIN) SysLogHandler.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_ndbapi.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-windoze-dsp: liblogger.dsp
-
-liblogger.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(SOURCE_WIN)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/mgmcommon/Makefile.am b/ndb/src/common/mgmcommon/Makefile.am
deleted file mode 100644
index 104bf0b29f2..00000000000
--- a/ndb/src/common/mgmcommon/Makefile.am
+++ /dev/null
@@ -1,28 +0,0 @@
-noinst_LTLIBRARIES = libmgmsrvcommon.la
-
-libmgmsrvcommon_la_SOURCES = \
- ConfigRetriever.cpp \
- IPCConfig.cpp
-
-INCLUDES_LOC = -I$(top_srcdir)/ndb/src/mgmapi -I$(top_srcdir)/ndb/src/mgmsrv
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_ndbapi.mk.am
-include $(top_srcdir)/ndb/config/type_mgmapiclient.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libmgmsrvcommon.dsp
-
-libmgmsrvcommon.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libmgmsrvcommon_la_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/portlib/Makefile.am b/ndb/src/common/portlib/Makefile.am
deleted file mode 100644
index 99138a7414e..00000000000
--- a/ndb/src/common/portlib/Makefile.am
+++ /dev/null
@@ -1,43 +0,0 @@
-noinst_HEADERS = gcc.cpp
-
-noinst_LTLIBRARIES = libportlib.la
-
-libportlib_la_SOURCES = \
- NdbCondition.c NdbMutex.c NdbSleep.c NdbTick.c \
- NdbEnv.c NdbThread.c NdbHost.c NdbTCP.cpp \
- NdbDaemon.c NdbMem.c \
- NdbConfig.c
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_util.mk.am
-
-EXTRA_PROGRAMS = memtest PortLibTest munmaptest
-
-PortLibTest_SOURCES = NdbPortLibTest.cpp
-munmaptest_SOURCES = munmaptest.cpp
-
-# Don't update the files from bitkeeper
-WIN_src = win32/NdbCondition.c \
- win32/NdbDaemon.c \
- win32/NdbEnv.c \
- win32/NdbHost.c \
- win32/NdbMem.c \
- win32/NdbMutex.c \
- win32/NdbSleep.c \
- win32/NdbTCP.c \
- win32/NdbThread.c \
- win32/NdbTick.c
-
-windoze-dsp: libportlib.dsp
-
-libportlib.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(WIN_src)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/transporter/Makefile.am b/ndb/src/common/transporter/Makefile.am
deleted file mode 100644
index 4c277097a91..00000000000
--- a/ndb/src/common/transporter/Makefile.am
+++ /dev/null
@@ -1,36 +0,0 @@
-
-noinst_LTLIBRARIES = libtransporter.la
-
-libtransporter_la_SOURCES = \
- Transporter.cpp \
- SendBuffer.cpp \
- TCP_Transporter.cpp \
- TransporterRegistry.cpp \
- Packer.cpp
-
-EXTRA_libtransporter_la_SOURCES = SHM_Transporter.cpp SHM_Transporter.unix.cpp SCI_Transporter.cpp
-
-libtransporter_la_LIBADD = @ndb_transporter_opt_objs@
-libtransporter_la_DEPENDENCIES = @ndb_transporter_opt_objs@
-
-INCLUDES_LOC = -I$(top_srcdir)/ndb/include/mgmapi -I$(top_srcdir)/ndb/src/mgmapi -I$(top_srcdir)/ndb/include/debugger -I$(top_srcdir)/ndb/include/kernel -I$(top_srcdir)/ndb/include/transporter @NDB_SCI_INCLUDES@
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_util.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libtransporter.dsp
-
-libtransporter.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libtransporter_la_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/util/Makefile.am b/ndb/src/common/util/Makefile.am
deleted file mode 100644
index 2719d14ee92..00000000000
--- a/ndb/src/common/util/Makefile.am
+++ /dev/null
@@ -1,49 +0,0 @@
-
-noinst_LTLIBRARIES = libgeneral.la
-
-libgeneral_la_SOURCES = \
- File.cpp md5_hash.cpp Properties.cpp socket_io.cpp \
- SimpleProperties.cpp Parser.cpp InputStream.cpp \
- SocketServer.cpp SocketClient.cpp SocketAuthenticator.cpp\
- OutputStream.cpp NdbOut.cpp BaseString.cpp Base64.cpp \
- NdbSqlUtil.cpp new.cpp \
- uucode.c random.c version.c \
- strdup.c \
- ConfigValues.cpp ndb_init.c basestring_vsnprintf.c \
- Bitmask.cpp
-
-EXTRA_PROGRAMS = testBitmask
-testBitmask_SOURCES = testBitmask.cpp
-testBitmask_LDFLAGS = @ndb_bin_am_ldflags@ \
- $(top_builddir)/ndb/src/libndbclient.la \
- $(top_builddir)/dbug/libdbug.a \
- $(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a
-
-testBitmask.cpp : Bitmask.cpp
- rm -f testBitmask.cpp
- @LN_CP_F@ Bitmask.cpp testBitmask.cpp
-
-testBitmask.o: $(testBitmask_SOURCES)
- $(CXXCOMPILE) -c $(INCLUDES) -D__TEST_BITMASK__ $<
-
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_util.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libgeneral.dsp
-
-libgeneral.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libgeneral_la_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/util/version.c b/ndb/src/common/util/version.c
deleted file mode 100644
index a7e103c3dab..00000000000
--- a/ndb/src/common/util/version.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include <ndb_global.h>
-#include <ndb_version.h>
-#include <version.h>
-#include <basestring_vsnprintf.h>
-#include <NdbEnv.h>
-#include <NdbOut.hpp>
-
-Uint32 getMajor(Uint32 version) {
- return (version >> 16) & 0xFF;
-}
-
-Uint32 getMinor(Uint32 version) {
- return (version >> 8) & 0xFF;
-}
-
-Uint32 getBuild(Uint32 version) {
- return (version >> 0) & 0xFF;
-}
-
-Uint32 makeVersion(Uint32 major, Uint32 minor, Uint32 build) {
- return MAKE_VERSION(major, minor, build);
-
-}
-
-char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ];
-const char * getVersionString(Uint32 version, const char * status,
- char *buf, unsigned sz)
-{
- if (status && status[0] != 0)
- basestring_snprintf(buf, sz,
- "Version %d.%d.%d (%s)",
- getMajor(version),
- getMinor(version),
- getBuild(version),
- status);
- else
- basestring_snprintf(buf, sz,
- "Version %d.%d.%d",
- getMajor(version),
- getMinor(version),
- getBuild(version));
- return buf;
-}
-
-typedef enum {
- UG_Null,
- UG_Range,
- UG_Exact
-} UG_MatchType;
-
-struct NdbUpGradeCompatible {
- Uint32 ownVersion;
- Uint32 otherVersion;
- UG_MatchType matchType;
-};
-
-/*#define TEST_VERSION*/
-
-#define HAVE_NDB_SETVERSION
-#ifdef HAVE_NDB_SETVERSION
-Uint32 ndbOwnVersionTesting = 0;
-void
-ndbSetOwnVersion() {
- char buf[256];
- if (NdbEnv_GetEnv("NDB_SETVERSION", buf, sizeof(buf))) {
- Uint32 _v1,_v2,_v3;
- if (sscanf(buf, "%u.%u.%u", &_v1, &_v2, &_v3) == 3) {
- ndbOwnVersionTesting = MAKE_VERSION(_v1,_v2,_v3);
- ndbout_c("Testing: Version set to 0x%x", ndbOwnVersionTesting);
- }
- }
-}
-#else
-void ndbSetOwnVersion() {}
-#endif
-
-#ifndef TEST_VERSION
-struct NdbUpGradeCompatible ndbCompatibleTable_full[] = {
- { MAKE_VERSION(5,0,NDB_VERSION_BUILD), MAKE_VERSION(5,0,3), UG_Range},
- { MAKE_VERSION(5,0,3), MAKE_VERSION(5,0,2), UG_Exact },
- { MAKE_VERSION(4,1,12), MAKE_VERSION(4,1,10), UG_Range },
- { MAKE_VERSION(4,1,10), MAKE_VERSION(4,1,9), UG_Exact },
- { MAKE_VERSION(4,1,9), MAKE_VERSION(4,1,8), UG_Exact },
- { MAKE_VERSION(3,5,2), MAKE_VERSION(3,5,1), UG_Exact },
- { 0, 0, UG_Null }
-};
-
-struct NdbUpGradeCompatible ndbCompatibleTable_upgrade[] = {
- { MAKE_VERSION(5,0,2), MAKE_VERSION(4,1,8), UG_Exact },
- { MAKE_VERSION(3,5,4), MAKE_VERSION(3,5,3), UG_Exact },
- { 0, 0, UG_Null }
-};
-
-#else /* testing purposes */
-
-struct NdbUpGradeCompatible ndbCompatibleTable_full[] = {
- { MAKE_VERSION(4,1,5), MAKE_VERSION(4,1,0), UG_Range },
- { MAKE_VERSION(3,6,9), MAKE_VERSION(3,6,1), UG_Range },
- { MAKE_VERSION(3,6,2), MAKE_VERSION(3,6,1), UG_Range },
- { MAKE_VERSION(3,5,7), MAKE_VERSION(3,5,0), UG_Range },
- { MAKE_VERSION(3,5,1), MAKE_VERSION(3,5,0), UG_Range },
- { NDB_VERSION_D , MAKE_VERSION(NDB_VERSION_MAJOR,NDB_VERSION_MINOR,2), UG_Range },
- { 0, 0, UG_Null }
-};
-
-struct NdbUpGradeCompatible ndbCompatibleTable_upgrade[] = {
- { MAKE_VERSION(4,1,5), MAKE_VERSION(3,6,9), UG_Exact },
- { MAKE_VERSION(3,6,2), MAKE_VERSION(3,5,7), UG_Exact },
- { MAKE_VERSION(3,5,1), NDB_VERSION_D , UG_Exact },
- { 0, 0, UG_Null }
-};
-
-
-#endif
-
-void ndbPrintVersion()
-{
- printf("Version: %u.%u.%u\n",
- getMajor(ndbGetOwnVersion()),
- getMinor(ndbGetOwnVersion()),
- getBuild(ndbGetOwnVersion()));
-}
-
-Uint32
-ndbGetOwnVersion()
-{
-#ifdef HAVE_NDB_SETVERSION
- if (ndbOwnVersionTesting == 0)
- return NDB_VERSION_D;
- else
- return ndbOwnVersionTesting;
-#else
- return NDB_VERSION_D;
-#endif
-}
-
-int
-ndbSearchUpgradeCompatibleTable(Uint32 ownVersion, Uint32 otherVersion,
- struct NdbUpGradeCompatible table[])
-{
- int i;
- for (i = 0; table[i].ownVersion != 0 && table[i].otherVersion != 0; i++) {
- if (table[i].ownVersion == ownVersion ||
- table[i].ownVersion == (Uint32) ~0) {
- switch (table[i].matchType) {
- case UG_Range:
- if (otherVersion >= table[i].otherVersion){
- return 1;
- }
- break;
- case UG_Exact:
- if (otherVersion == table[i].otherVersion){
- return 1;
- }
- break;
- default:
- break;
- }
- }
- }
- return 0;
-}
-
-int
-ndbCompatible(Uint32 ownVersion, Uint32 otherVersion, struct NdbUpGradeCompatible table[])
-{
- if (otherVersion >= ownVersion) {
- return 1;
- }
- return ndbSearchUpgradeCompatibleTable(ownVersion, otherVersion, table);
-}
-
-int
-ndbCompatible_full(Uint32 ownVersion, Uint32 otherVersion)
-{
- return ndbCompatible(ownVersion, otherVersion, ndbCompatibleTable_full);
-}
-
-int
-ndbCompatible_upgrade(Uint32 ownVersion, Uint32 otherVersion)
-{
- if (ndbCompatible_full(ownVersion, otherVersion))
- return 1;
- return ndbCompatible(ownVersion, otherVersion, ndbCompatibleTable_upgrade);
-}
-
-int
-ndbCompatible_mgmt_ndb(Uint32 ownVersion, Uint32 otherVersion)
-{
- return ndbCompatible_upgrade(ownVersion, otherVersion);
-}
-
-int
-ndbCompatible_mgmt_api(Uint32 ownVersion, Uint32 otherVersion)
-{
- return ndbCompatible_upgrade(ownVersion, otherVersion);
-}
-
-int
-ndbCompatible_ndb_mgmt(Uint32 ownVersion, Uint32 otherVersion)
-{
- return ndbCompatible_full(ownVersion, otherVersion);
-}
-
-int
-ndbCompatible_api_mgmt(Uint32 ownVersion, Uint32 otherVersion)
-{
- return ndbCompatible_full(ownVersion, otherVersion);
-}
-
-int
-ndbCompatible_api_ndb(Uint32 ownVersion, Uint32 otherVersion)
-{
- return ndbCompatible_full(ownVersion, otherVersion);
-}
-
-int
-ndbCompatible_ndb_api(Uint32 ownVersion, Uint32 otherVersion)
-{
- return ndbCompatible_upgrade(ownVersion, otherVersion);
-}
-
-int
-ndbCompatible_ndb_ndb(Uint32 ownVersion, Uint32 otherVersion)
-{
- return ndbCompatible_upgrade(ownVersion, otherVersion);
-}
diff --git a/ndb/src/cw/cpcd/Makefile.am b/ndb/src/cw/cpcd/Makefile.am
deleted file mode 100644
index 75f557b2af7..00000000000
--- a/ndb/src/cw/cpcd/Makefile.am
+++ /dev/null
@@ -1,20 +0,0 @@
-
-ndbbin_PROGRAMS = ndb_cpcd
-
-ndb_cpcd_SOURCES = main.cpp CPCD.cpp Process.cpp APIService.cpp Monitor.cpp common.cpp
-
-LDADD_LOC = \
- $(top_builddir)/ndb/src/libndbclient.la \
- $(top_builddir)/dbug/libdbug.a \
- $(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_util.mk.am
-
-ndb_cpcd_LDFLAGS = @ndb_bin_am_ldflags@
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp:
diff --git a/ndb/src/kernel/Makefile.am b/ndb/src/kernel/Makefile.am
deleted file mode 100644
index 55d3c5a578f..00000000000
--- a/ndb/src/kernel/Makefile.am
+++ /dev/null
@@ -1,75 +0,0 @@
-SUBDIRS = error blocks vm
-
-include $(top_srcdir)/ndb/config/common.mk.am
-
-ndbbin_PROGRAMS = ndbd
-
-ndbd_SOURCES = main.cpp SimBlockList.cpp
-
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-INCLUDES += \
- -Iblocks/cmvmi \
- -Iblocks/dbacc \
- -Iblocks/dbdict \
- -Iblocks/dbdih \
- -Iblocks/dblqh \
- -Iblocks/dbtc \
- -Iblocks/dbtup \
- -Iblocks/ndbfs \
- -Iblocks/ndbcntr \
- -Iblocks/qmgr \
- -Iblocks/trix \
- -Iblocks/backup \
- -Iblocks/dbutil \
- -Iblocks/suma \
- -Iblocks/grep \
- -Iblocks/dbtux
-
-LDADD += \
- blocks/cmvmi/libcmvmi.a \
- blocks/dbacc/libdbacc.a \
- blocks/dbdict/libdbdict.a \
- blocks/dbdih/libdbdih.a \
- blocks/dblqh/libdblqh.a \
- blocks/dbtc/libdbtc.a \
- blocks/dbtup/libdbtup.a \
- blocks/ndbfs/libndbfs.a \
- blocks/ndbcntr/libndbcntr.a \
- blocks/qmgr/libqmgr.a \
- blocks/trix/libtrix.a \
- blocks/backup/libbackup.a \
- blocks/dbutil/libdbutil.a \
- blocks/suma/libsuma.a \
- blocks/grep/libgrep.a \
- blocks/dbtux/libdbtux.a \
- vm/libkernel.a \
- error/liberror.a \
- $(top_builddir)/ndb/src/common/transporter/libtransporter.la \
- $(top_builddir)/ndb/src/common/debugger/libtrace.la \
- $(top_builddir)/ndb/src/common/debugger/signaldata/libsignaldataprint.la \
- $(top_builddir)/ndb/src/common/logger/liblogger.la \
- $(top_builddir)/ndb/src/common/mgmcommon/libmgmsrvcommon.la \
- $(top_builddir)/ndb/src/mgmapi/libmgmapi.la \
- $(top_builddir)/ndb/src/common/portlib/libportlib.la \
- $(top_builddir)/ndb/src/common/util/libgeneral.la \
- $(top_builddir)/dbug/libdbug.a \
- $(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: ndbd.dsp
-
-ndbd.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(ndbbin_PROGRAMS)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(ndbd_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
diff --git a/ndb/src/kernel/blocks/backup/Makefile.am b/ndb/src/kernel/blocks/backup/Makefile.am
deleted file mode 100644
index c8f44f31292..00000000000
--- a/ndb/src/kernel/blocks/backup/Makefile.am
+++ /dev/null
@@ -1,24 +0,0 @@
-
-noinst_LIBRARIES = libbackup.a
-
-libbackup_a_SOURCES = Backup.cpp BackupInit.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libbackup.dsp
-
-libbackup.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libbackup_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/cmvmi/Makefile.am b/ndb/src/kernel/blocks/cmvmi/Makefile.am
deleted file mode 100644
index dc2e12746fd..00000000000
--- a/ndb/src/kernel/blocks/cmvmi/Makefile.am
+++ /dev/null
@@ -1,24 +0,0 @@
-
-noinst_LIBRARIES = libcmvmi.a
-
-libcmvmi_a_SOURCES = Cmvmi.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libcmvmi.dsp
-
-libcmvmi.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libcmvmi_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
deleted file mode 100644
index a2d6fe4d64a..00000000000
--- a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
+++ /dev/null
@@ -1,1470 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef DBACC_H
-#define DBACC_H
-
-
-
-#include <pc.hpp>
-#include <SimulatedBlock.hpp>
-
-// primary key is stored in TUP
-#include <Dbtup.hpp>
-
-#ifdef DBACC_C
-// Debug Macros
-#define dbgWord32(ptr, ind, val)
-
-/*
-#define dbgWord32(ptr, ind, val) \
-if(debug_jan){ \
-tmp_val = val; \
-switch(ind){ \
-case 1: strcpy(tmp_string, "ZPOS_PAGE_TYPE "); \
-break; \
-case 2: strcpy(tmp_string, "ZPOS_NO_ELEM_IN_PAGE"); \
-break; \
-case 3: strcpy(tmp_string, "ZPOS_CHECKSUM "); \
-break; \
-case 4: strcpy(tmp_string, "ZPOS_OVERFLOWREC "); \
-break; \
-case 5: strcpy(tmp_string, "ZPOS_FREE_AREA_IN_PAGE"); \
-break; \
-case 6: strcpy(tmp_string, "ZPOS_LAST_INDEX "); \
-break; \
-case 7: strcpy(tmp_string, "ZPOS_INSERT_INDEX "); \
-break; \
-case 8: strcpy(tmp_string, "ZPOS_ARRAY_POS "); \
-break; \
-case 9: strcpy(tmp_string, "ZPOS_NEXT_FREE_INDEX"); \
-break; \
-case 10: strcpy(tmp_string, "ZPOS_NEXT_PAGE "); \
-break; \
-case 11: strcpy(tmp_string, "ZPOS_PREV_PAGE "); \
-break; \
-default: sprintf(tmp_string, "%-20d", ind);\
-} \
-ndbout << "Ptr: " << ptr.p->word32 << " \tIndex: " << tmp_string << " \tValue: " << tmp_val << " \tLINE: " << __LINE__ << endl; \
-}\
-*/
-
-#define dbgUndoword(ptr, ind, val)
-
-// Constants
-/** ------------------------------------------------------------------------
- * THESE ARE CONSTANTS THAT ARE USED FOR DEFINING THE SIZE OF BUFFERS, THE
- * SIZE OF PAGE HEADERS, THE NUMBER OF BUFFERS IN A PAGE AND A NUMBER OF
- * OTHER CONSTANTS WHICH ARE CHANGED WHEN THE BUFFER SIZE IS CHANGED.
- * ----------------------------------------------------------------------- */
-#define ZHEAD_SIZE 32
-#define ZCON_HEAD_SIZE 2
-#define ZBUF_SIZE 28
-#define ZEMPTYLIST 72
-#define ZUP_LIMIT 14
-#define ZDOWN_LIMIT 12
-#define ZSHIFT_PLUS 5
-#define ZSHIFT_MINUS 2
-#define ZFREE_LIMIT 65
-#define ZNO_CONTAINERS 64
-#define ZELEM_HEAD_SIZE 1
-/* ------------------------------------------------------------------------- */
-/* THESE CONSTANTS DEFINE THE USE OF THE PAGE HEADER IN THE INDEX PAGES. */
-/* ------------------------------------------------------------------------- */
-#define ZPOS_PAGE_ID 0
-#define ZPOS_PAGE_TYPE 1
-#define ZPOS_PAGE_TYPE_BIT 14
-#define ZPOS_EMPTY_LIST 1
-#define ZPOS_ALLOC_CONTAINERS 2
-#define ZPOS_CHECKSUM 3
-#define ZPOS_OVERFLOWREC 4
-#define ZPOS_NO_ELEM_IN_PAGE 2
-#define ZPOS_FREE_AREA_IN_PAGE 5
-#define ZPOS_LAST_INDEX 6
-#define ZPOS_INSERT_INDEX 7
-#define ZPOS_ARRAY_POS 8
-#define ZPOS_NEXT_FREE_INDEX 9
-#define ZPOS_NEXT_PAGE 10
-#define ZPOS_PREV_PAGE 11
-#define ZNORMAL_PAGE_TYPE 0
-#define ZOVERFLOW_PAGE_TYPE 1
-#define ZDEFAULT_LIST 3
-#define ZWORDS_IN_PAGE 2048
-/* --------------------------------------------------------------------------------- */
-/* CONSTANTS FOR THE ZERO PAGES */
-/* --------------------------------------------------------------------------------- */
-#define ZPAGEZERO_PREV_UNDOP 8
-#define ZPAGEZERO_NO_OVER_PAGE 9
-#define ZPAGEZERO_TABID 10
-#define ZPAGEZERO_FRAGID0 11
-#define ZPAGEZERO_FRAGID1 12
-#define ZPAGEZERO_HASH_CHECK 13
-#define ZPAGEZERO_DIRSIZE 14
-#define ZPAGEZERO_EXPCOUNTER 15
-#define ZPAGEZERO_NEXT_UNDO_FILE 16
-#define ZPAGEZERO_SLACK 17
-#define ZPAGEZERO_NO_PAGES 18
-#define ZPAGEZERO_HASHCHECKBIT 19
-#define ZPAGEZERO_K 20
-#define ZPAGEZERO_LHFRAGBITS 21
-#define ZPAGEZERO_LHDIRBITS 22
-#define ZPAGEZERO_LOCALKEYLEN 23
-#define ZPAGEZERO_MAXP 24
-#define ZPAGEZERO_MAXLOADFACTOR 25
-#define ZPAGEZERO_MINLOADFACTOR 26
-#define ZPAGEZERO_MYFID 27
-#define ZPAGEZERO_LAST_OVER_INDEX 28
-#define ZPAGEZERO_P 29
-#define ZPAGEZERO_NO_OF_ELEMENTS 30
-#define ZPAGEZERO_ELEMENT_LENGTH 31
-#define ZPAGEZERO_KEY_LENGTH 32
-#define ZPAGEZERO_NODETYPE 33
-#define ZPAGEZERO_SLACK_CHECK 34
-/* --------------------------------------------------------------------------------- */
-/* CONSTANTS IN ALPHABETICAL ORDER */
-/* --------------------------------------------------------------------------------- */
-#define ZADDFRAG 0
-#define ZCOPY_NEXT 1
-#define ZCOPY_NEXT_COMMIT 2
-#define ZCOPY_COMMIT 3
-#define ZCOPY_REPEAT 4
-#define ZCOPY_ABORT 5
-#define ZCOPY_CLOSE 6
-#define ZDIRARRAY 68
-#define ZDIRRANGESIZE 65
-//#define ZEMPTY_FRAGMENT 0
-#define ZFRAGMENTSIZE 64
-#define ZFIRSTTIME 1
-#define ZFS_CONNECTSIZE 300
-#define ZFS_OPSIZE 100
-#define ZKEYINKEYREQ 4
-#define ZLCP_CONNECTSIZE 30
-#define ZLEFT 1
-#define ZLOCALLOGFILE 2
-#define ZLOCKED 0
-#define ZMAXSCANSIGNALLEN 20
-#define ZMAINKEYLEN 8
-#define ZMAX_UNDO_VERSION 4
-#define ZNO_OF_DISK_VERSION 3
-#define ZNO_OF_OP_PER_SIGNAL 20
-//#define ZNOT_EMPTY_FRAGMENT 1
-#define ZNR_OF_UNDO_PAGE_GROUP 16
-#define ZOP_HEAD_INFO_LN 3
-#define ZOPRECSIZE 740
-#define ZOVERFLOWRECSIZE 5
-#define ZPAGE8_BASE_ADD 1
-#define ZPAGESIZE 128
-#define ZPARALLEL_QUEUE 1
-#define ZPDIRECTORY 1
-#define ZSCAN_MAX_LOCK 4
-#define ZSERIAL_QUEUE 2
-#define ZSPH1 1
-#define ZSPH2 2
-#define ZSPH3 3
-#define ZSPH6 6
-#define ZREADLOCK 0
-#define ZRIGHT 2
-#define ZROOTFRAGMENTSIZE 32
-#define ZSCAN_LOCK_ALL 3
-#define ZSCAN_OP 5
-#define ZSCAN_REC_SIZE 256
-#define ZSR_VERSION_REC_SIZE 16
-#define ZSTAND_BY 2
-#define ZTABLESIZE 16
-#define ZTABMAXINDEX 3
-#define ZUNDEFINED_OP 6
-#define ZUNDOHEADSIZE 7
-#define ZUNLOCKED 1
-#define ZUNDOPAGE_BASE_ADD 2
-#define ZUNDOPAGEINDEXBITS 13
-#define ZUNDOPAGEINDEX_MASK 0x1fff
-#define ZWRITEPAGESIZE 8
-#define ZWRITE_UNDOPAGESIZE 2
-#define ZMIN_UNDO_PAGES_AT_COMMIT 4
-#define ZMIN_UNDO_PAGES_AT_OPERATION 10
-#define ZMIN_UNDO_PAGES_AT_EXPAND 16
-
-/* --------------------------------------------------------------------------------- */
-/* CONTINUEB CODES */
-/* --------------------------------------------------------------------------------- */
-#define ZLOAD_BAL_LCP_TIMER 0
-#define ZINITIALISE_RECORDS 1
-#define ZSR_READ_PAGES_ALLOC 2
-#define ZSTART_UNDO 3
-#define ZSEND_SCAN_HBREP 4
-#define ZREL_ROOT_FRAG 5
-#define ZREL_FRAG 6
-#define ZREL_DIR 7
-#define ZREPORT_MEMORY_USAGE 8
-#define ZLCP_OP_WRITE_RT_BREAK 9
-
-/* ------------------------------------------------------------------------- */
-/* ERROR CODES */
-/* ------------------------------------------------------------------------- */
-#define ZLIMIT_OF_ERROR 600 // Limit check for error codes
-#define ZCHECKROOT_ERROR 601 // Delete fragment error code
-#define ZCONNECT_SIZE_ERROR 602 // ACC_SEIZEREF
-#define ZDIR_RANGE_ERROR 603 // Add fragment error code
-#define ZFULL_FRAGRECORD_ERROR 604 // Add fragment error code
-#define ZFULL_ROOTFRAGRECORD_ERROR 605 // Add fragment error code
-#define ZROOTFRAG_STATE_ERROR 606 // Add fragment
-#define ZOVERTAB_REC_ERROR 607 // Add fragment
-
-#define ZSCAN_REFACC_CONNECT_ERROR 608 // ACC_SCANREF
-#define ZFOUR_ACTIVE_SCAN_ERROR 609 // ACC_SCANREF
-#define ZNULL_SCAN_REC_ERROR 610 // ACC_SCANREF
-
-#define ZDIRSIZE_ERROR 623
-#define ZOVER_REC_ERROR 624 // Insufficient Space
-#define ZPAGESIZE_ERROR 625
-#define ZTUPLE_DELETED_ERROR 626
-#define ZREAD_ERROR 626
-#define ZWRITE_ERROR 630
-#define ZTO_OP_STATE_ERROR 631
-#define ZTOO_EARLY_ACCESS_ERROR 632
-#define ZTEMPORARY_ACC_UNDO_FAILURE 677
-#endif
-
-class ElementHeader {
- /**
- *
- * l = Locked -- If true contains operation else scan bits + hash value
- * s = Scan bits
- * h = Hash value
- * o = Operation ptr I
- *
- * 1111111111222222222233
- * 01234567890123456789012345678901
- * lssssssssssss hhhhhhhhhhhhhhhh
- * ooooooooooooooooooooooooooooooo
- */
-public:
- STATIC_CONST( HASH_VALUE_PART_MASK = 0xFFFF );
-
- static bool getLocked(Uint32 data);
- static bool getUnlocked(Uint32 data);
- static Uint32 getScanBits(Uint32 data);
- static Uint32 getHashValuePart(Uint32 data);
- static Uint32 getOpPtrI(Uint32 data);
-
- static Uint32 setLocked(Uint32 opPtrI);
- static Uint32 setUnlocked(Uint32 hashValuePart, Uint32 scanBits);
- static Uint32 setScanBit(Uint32 header, Uint32 scanBit);
- static Uint32 clearScanBit(Uint32 header, Uint32 scanBit);
-};
-
-inline
-bool
-ElementHeader::getLocked(Uint32 data){
- return (data & 1) == 0;
-}
-
-inline
-bool
-ElementHeader::getUnlocked(Uint32 data){
- return (data & 1) == 1;
-}
-
-inline
-Uint32
-ElementHeader::getScanBits(Uint32 data){
- assert(getUnlocked(data));
- return (data >> 1) & ((1 << MAX_PARALLEL_SCANS_PER_FRAG) - 1);
-}
-
-inline
-Uint32
-ElementHeader::getHashValuePart(Uint32 data){
- assert(getUnlocked(data));
- return data >> 16;
-}
-
-inline
-Uint32
-ElementHeader::getOpPtrI(Uint32 data){
- assert(getLocked(data));
- return data >> 1;
-}
-
-inline
-Uint32
-ElementHeader::setLocked(Uint32 opPtrI){
- return (opPtrI << 1) + 0;
-}
-inline
-Uint32
-ElementHeader::setUnlocked(Uint32 hashValue, Uint32 scanBits){
- return (hashValue << 16) + (scanBits << 1) + 1;
-}
-
-inline
-Uint32
-ElementHeader::setScanBit(Uint32 header, Uint32 scanBit){
- assert(getUnlocked(header));
- return header | (scanBit << 1);
-}
-
-inline
-Uint32
-ElementHeader::clearScanBit(Uint32 header, Uint32 scanBit){
- assert(getUnlocked(header));
- return header & (~(scanBit << 1));
-}
-
-
-class Dbacc: public SimulatedBlock {
-public:
-// State values
-enum State {
- FREEFRAG = 0,
- ACTIVEFRAG = 1,
- SEND_QUE_OP = 2,
- WAIT_ACC_LCPREQ = 3,
- LCP_SEND_PAGES = 4,
- LCP_SEND_OVER_PAGES = 5,
- LCP_SEND_ZERO_PAGE = 6,
- SR_READ_PAGES = 7,
- SR_READ_OVER_PAGES = 8,
- WAIT_ZERO_PAGE_STORED = 9,
- WAIT_NOTHING = 10,
- WAIT_OPEN_UNDO_LCP = 11,
- WAIT_OPEN_UNDO_LCP_NEXT = 12,
- WAIT_OPEN_DATA_FILE_FOR_READ = 13,
- WAIT_OPEN_DATA_FILE_FOR_WRITE = 14,
- OPEN_UNDO_FILE_SR = 15,
- READ_UNDO_PAGE = 16,
- READ_UNDO_PAGE_AND_CLOSE = 17,
- WAIT_READ_DATA = 18,
- WAIT_READ_PAGE_ZERO = 19,
- WAIT_WRITE_DATA = 20,
- WAIT_WRITE_UNDO = 21,
- WAIT_WRITE_UNDO_EXIT = 22,
- WAIT_CLOSE_UNDO = 23,
- LCP_CLOSE_DATA = 24,
- SR_CLOSE_DATA = 25,
- WAIT_ONE_CONF = 26,
- WAIT_TWO_CONF = 27,
- LCP_FREE = 28,
- LCP_ACTIVE = 29,
- FREE_OP = 30,
- WAIT_EXE_OP = 32,
- WAIT_IN_QUEUE = 34,
- EXE_OP = 35,
- SCAN_ACTIVE = 36,
- SCAN_WAIT_IN_QUEUE = 37,
- IDLE = 39,
- ACTIVE = 40,
- WAIT_COMMIT_ABORT = 41,
- ABORT = 42,
- ABORTADDFRAG = 43,
- REFUSEADDFRAG = 44,
- DELETEFRAG = 45,
- DELETETABLE = 46,
- UNDEFINEDROOT = 47,
- ADDFIRSTFRAG = 48,
- ADDSECONDFRAG = 49,
- DELETEFIRSTFRAG = 50,
- DELETESECONDFRAG = 51,
- ACTIVEROOT = 52,
- LCP_CREATION = 53
-};
-
-// Records
-
-/* --------------------------------------------------------------------------------- */
-/* UNDO HEADER RECORD */
-/* --------------------------------------------------------------------------------- */
-
- struct UndoHeader {
- enum UndoHeaderType{
- ZPAGE_INFO = 0,
- ZOVER_PAGE_INFO = 1,
- ZOP_INFO = 2,
- ZNO_UNDORECORD_TYPES = 3
- };
- UintR tableId;
- UintR rootFragId;
- UintR localFragId;
- UintR variousInfo;
- UintR logicalPageId;
- UintR prevUndoAddressForThisFrag;
- UintR prevUndoAddress;
- };
-
-/* --------------------------------------------------------------------------------- */
-/* DIRECTORY RANGE */
-/* --------------------------------------------------------------------------------- */
- struct DirRange {
- Uint32 dirArray[256];
- }; /* p2c: size = 1024 bytes */
-
- typedef Ptr<DirRange> DirRangePtr;
-
-/* --------------------------------------------------------------------------------- */
-/* DIRECTORYARRAY */
-/* --------------------------------------------------------------------------------- */
-struct Directoryarray {
- Uint32 pagep[256];
-}; /* p2c: size = 1024 bytes */
-
- typedef Ptr<Directoryarray> DirectoryarrayPtr;
-
-/* --------------------------------------------------------------------------------- */
-/* FRAGMENTREC. ALL INFORMATION ABOUT FRAMENT AND HASH TABLE IS SAVED IN FRAGMENT */
-/* REC A POINTER TO FRAGMENT RECORD IS SAVED IN ROOTFRAGMENTREC FRAGMENT */
-/* --------------------------------------------------------------------------------- */
-struct Fragmentrec {
-//-----------------------------------------------------------------------------
-// References to long key pages with free area. Some type of buddy structure
-// where references in higher index have more free space.
-//-----------------------------------------------------------------------------
- Uint32 longKeyPageArray[4];
-
-//-----------------------------------------------------------------------------
-// These variables keep track of allocated pages, the number of them and the
-// start file page of them. Used during local checkpoints.
-//-----------------------------------------------------------------------------
- Uint32 datapages[8];
- Uint32 activeDataPage;
- Uint32 activeDataFilePage;
-
-//-----------------------------------------------------------------------------
-// Temporary variables used during shrink and expand process.
-//-----------------------------------------------------------------------------
- Uint32 expReceivePageptr;
- Uint32 expReceiveIndex;
- Uint32 expReceiveForward;
- Uint32 expSenderDirIndex;
- Uint32 expSenderDirptr;
- Uint32 expSenderIndex;
- Uint32 expSenderPageptr;
-
-//-----------------------------------------------------------------------------
-// List of lock owners and list of lock waiters to support LCP handling
-//-----------------------------------------------------------------------------
- Uint32 lockOwnersList;
- Uint32 firstWaitInQueOp;
- Uint32 lastWaitInQueOp;
- Uint32 sentWaitInQueOp;
-
-//-----------------------------------------------------------------------------
-// References to Directory Ranges (which in turn references directories, which
-// in its turn references the pages) for the bucket pages and the overflow
-// bucket pages.
-//-----------------------------------------------------------------------------
- Uint32 directory;
- Uint32 dirsize;
- Uint32 overflowdir;
- Uint32 lastOverIndex;
-
-//-----------------------------------------------------------------------------
-// These variables are used to support LCP and Restore from disk.
-// lcpDirIndex: used during LCP as the frag page id currently stored.
-// lcpMaxDirIndex: The dirsize at start of LCP.
-// lcpMaxOverDirIndex: The xx at start of LCP
-// During a LCP one writes the minimum of the number of pages in the directory
-// and the number of pages at the start of the LCP.
-// noStoredPages: Number of bucket pages written in LCP used at restore
-// noOfOverStoredPages: Number of overflow pages written in LCP used at restore
-// This variable is also used during LCP to calculate this number.
-//-----------------------------------------------------------------------------
- Uint32 lcpDirIndex;
- Uint32 lcpMaxDirIndex;
- Uint32 lcpMaxOverDirIndex;
- Uint32 noStoredPages;
- Uint32 noOfStoredOverPages;
-
-//-----------------------------------------------------------------------------
-// We have a list of overflow pages with free areas. We have a special record,
-// the overflow record representing these pages. The reason is that the
-// same record is also used to represent pages in the directory array that have
-// been released since they were empty (there were however higher indexes with
-// data in them). These are put in the firstFreeDirIndexRec-list.
-// An overflow record representing a page can only be in one of these lists.
-//-----------------------------------------------------------------------------
- Uint32 firstOverflowRec;
- Uint32 lastOverflowRec;
- Uint32 firstFreeDirindexRec;
-
-//-----------------------------------------------------------------------------
-// localCheckpId is used during execution of UNDO log to ensure that we only
-// apply UNDO log records from the restored LCP of the fragment.
-// lcpLqhPtr keeps track of LQH record for this fragment to checkpoint
-//-----------------------------------------------------------------------------
- Uint32 localCheckpId;
- Uint32 lcpLqhPtr;
-
-//-----------------------------------------------------------------------------
-// Counter keeping track of how many times we have expanded. We need to ensure
-// that we do not shrink so many times that this variable becomes negative.
-//-----------------------------------------------------------------------------
- Uint32 expandCounter;
-//-----------------------------------------------------------------------------
-// Reference to record for open file at LCP and restore
-//-----------------------------------------------------------------------------
- Uint32 fsConnPtr;
-
-//-----------------------------------------------------------------------------
-// These variables are important for the linear hashing algorithm.
-// localkeylen is the size of the local key (1 and 2 is currently supported)
-// maxloadfactor is the factor specifying when to expand
-// minloadfactor is the factor specifying when to shrink (hysteresis model)
-// maxp and p
-// maxp and p is the variables most central to linear hashing. p + maxp + 1 is the
-// current number of buckets. maxp is the largest value of the type 2**n - 1
-// which is smaller than the number of buckets. These values are used to find
-// correct bucket with the aid of the hash value.
-//
-// slack is the variable keeping track of whether we have inserted more than
-// the current size is suitable for or less. Slack together with the boundaries
-// set by maxloadfactor and minloadfactor decides when to expand/shrink
-// slackCheck When slack goes over this value it is time to expand.
-// slackCheck = (maxp + p + 1)*(maxloadfactor - minloadfactor) or
-// bucketSize * hysteresis
-//-----------------------------------------------------------------------------
- Uint32 localkeylen;
- Uint32 maxp;
- Uint32 maxloadfactor;
- Uint32 minloadfactor;
- Uint32 p;
- Uint32 slack;
- Uint32 slackCheck;
-
-//-----------------------------------------------------------------------------
-// myfid is the fragment id of the fragment
-// myroot is the reference to the root fragment record
-// nextfreefrag is the next free fragment if linked into a free list
-//-----------------------------------------------------------------------------
- Uint32 myfid;
- Uint32 myroot;
- Uint32 myTableId;
- Uint32 nextfreefrag;
-
-//-----------------------------------------------------------------------------
-// This variable is used during restore to keep track of page id of read pages.
-// During read of bucket pages this is used to calculate the page id and also
-// to verify that the page id of the read page is correct. During read of over-
-// flow pages it is only used to keep track of the number of pages read.
-//-----------------------------------------------------------------------------
- Uint32 nextAllocPage;
-
-//-----------------------------------------------------------------------------
-// Keeps track of undo position for fragment during LCP and restore.
-//-----------------------------------------------------------------------------
- Uint32 prevUndoposition;
-
-//-----------------------------------------------------------------------------
-// Page reference during LCP and restore of page zero where fragment data is
-// saved
-//-----------------------------------------------------------------------------
- Uint32 zeroPagePtr;
-
-//-----------------------------------------------------------------------------
-// Number of pages read from file during restore
-//-----------------------------------------------------------------------------
- Uint32 noOfExpectedPages;
-
-//-----------------------------------------------------------------------------
-// Fragment State, mostly applicable during LCP and restore
-//-----------------------------------------------------------------------------
- State fragState;
-
-//-----------------------------------------------------------------------------
-// Keep track of number of outstanding writes of UNDO log records to ensure that
-// we have saved all UNDO info before concluding local checkpoint.
-//-----------------------------------------------------------------------------
- Uint32 nrWaitWriteUndoExit;
-
-//-----------------------------------------------------------------------------
-// lastUndoIsStored is used to handle parallel writes of UNDO log and pages to
-// know when LCP is completed
-//-----------------------------------------------------------------------------
- Uint8 lastUndoIsStored;
-
-//-----------------------------------------------------------------------------
-// Set to ZTRUE when local checkpoint freeze occurs and set to ZFALSE when
-// local checkpoint concludes.
-//-----------------------------------------------------------------------------
- Uint8 createLcp;
-
-//-----------------------------------------------------------------------------
-// Flag indicating whether we are in the load phase of restore still.
-//-----------------------------------------------------------------------------
- Uint8 loadingFlag;
-
-//-----------------------------------------------------------------------------
-// elementLength: Length of element in bucket and overflow pages
-// keyLength: Length of key
-//-----------------------------------------------------------------------------
- Uint8 elementLength;
- Uint16 keyLength;
-
-//-----------------------------------------------------------------------------
-// This flag is used to avoid sending a big number of expand or shrink signals
-// when simultaneously committing many inserts or deletes.
-//-----------------------------------------------------------------------------
- Uint8 expandFlag;
-
-//-----------------------------------------------------------------------------
-// hashcheckbit is the bit to check whether to send element to split bucket or not
-// k (== 6) is the number of buckets per page
-// lhfragbits is the number of bits used to calculate the fragment id
-// lhdirbits is the number of bits used to calculate the page id
-//-----------------------------------------------------------------------------
- Uint8 hashcheckbit;
- Uint8 k;
- Uint8 lhfragbits;
- Uint8 lhdirbits;
-
-//-----------------------------------------------------------------------------
-// nodetype can only be STORED in this release. Is currently only set, never read
-// stopQueOp is indicator that locked operations will not start until LCP have
-// released the lock on the fragment
-//-----------------------------------------------------------------------------
- Uint8 nodetype;
- Uint8 stopQueOp;
-
-//-----------------------------------------------------------------------------
-// flag to avoid accessing table record if no char attributes
-//-----------------------------------------------------------------------------
- Uint8 hasCharAttr;
-};
-
- typedef Ptr<Fragmentrec> FragmentrecPtr;
-
-/* --------------------------------------------------------------------------------- */
-/* FS_CONNECTREC */
-/* --------------------------------------------------------------------------------- */
-struct FsConnectrec {
- Uint32 fsNext;
- Uint32 fsPrev;
- Uint32 fragrecPtr;
- Uint32 fsPtr;
- State fsState;
- Uint8 activeFragId;
- Uint8 fsPart;
-}; /* p2c: size = 24 bytes */
-
- typedef Ptr<FsConnectrec> FsConnectrecPtr;
-
-/* --------------------------------------------------------------------------------- */
-/* FS_OPREC */
-/* --------------------------------------------------------------------------------- */
-struct FsOprec {
- Uint32 fsOpnext;
- Uint32 fsOpfragrecPtr;
- Uint32 fsConptr;
- State fsOpstate;
- Uint16 fsOpMemPage;
-}; /* p2c: size = 20 bytes */
-
- typedef Ptr<FsOprec> FsOprecPtr;
-
-/* --------------------------------------------------------------------------------- */
-/* LCP_CONNECTREC */
-/* --------------------------------------------------------------------------------- */
-struct LcpConnectrec {
- Uint32 nextLcpConn;
- Uint32 lcpUserptr;
- Uint32 rootrecptr;
- State syncUndopageState;
- State lcpstate;
- Uint32 lcpUserblockref;
- Uint16 localCheckPid;
- Uint8 noOfLcpConf;
-};
- typedef Ptr<LcpConnectrec> LcpConnectrecPtr;
-
-/* --------------------------------------------------------------------------------- */
-/* OPERATIONREC */
-/* --------------------------------------------------------------------------------- */
-struct Operationrec {
- Uint32 keydata[8];
- Uint32 localdata[2];
- Uint32 elementIsforward;
- Uint32 elementPage;
- Uint32 elementPointer;
- Uint32 fid;
- Uint32 fragptr;
- Uint32 hashvaluePart;
- Uint32 hashValue;
- Uint32 insertDeleteLen;
- Uint32 keyinfoPage;
- Uint32 nextLockOwnerOp;
- Uint32 nextOp;
- Uint32 nextParallelQue;
- Uint32 nextQueOp;
- Uint32 nextSerialQue;
- Uint32 prevOp;
- Uint32 prevLockOwnerOp;
- Uint32 prevParallelQue;
- Uint32 prevQueOp;
- Uint32 prevSerialQue;
- Uint32 scanRecPtr;
- Uint32 transId1;
- Uint32 transId2;
- Uint32 longPagePtr;
- Uint32 longKeyPageIndex;
- State opState;
- Uint32 userptr;
- State transactionstate;
- Uint16 elementContainer;
- Uint16 tupkeylen;
- Uint32 xfrmtupkeylen;
- Uint32 userblockref;
- Uint32 scanBits;
- Uint8 elementIsDisappeared;
- Uint8 insertIsDone;
- Uint8 lockMode;
- Uint8 lockOwner;
- Uint8 nodeType;
- Uint8 operation;
- Uint8 opSimple;
- Uint8 dirtyRead;
- Uint8 commitDeleteCheckFlag;
- Uint8 isAccLockReq;
- Uint8 isUndoLogReq;
-}; /* p2c: size = 168 bytes */
-
- typedef Ptr<Operationrec> OperationrecPtr;
-
-/* --------------------------------------------------------------------------------- */
-/* OVERFLOW_RECORD */
-/* --------------------------------------------------------------------------------- */
-struct OverflowRecord {
- Uint32 dirindex;
- Uint32 nextOverRec;
- Uint32 nextOverList;
- Uint32 prevOverRec;
- Uint32 prevOverList;
- Uint32 overpage;
- Uint32 nextfreeoverrec;
-};
-
- typedef Ptr<OverflowRecord> OverflowRecordPtr;
-
-/* --------------------------------------------------------------------------------- */
-/* PAGE8 */
-/* --------------------------------------------------------------------------------- */
-struct Page8 {
- Uint32 word32[2048];
-}; /* p2c: size = 8192 bytes */
-
- typedef Ptr<Page8> Page8Ptr;
-
-/* --------------------------------------------------------------------------------- */
-/* ROOTFRAGMENTREC */
-/* DURING EXPAND FRAGMENT PROCESS, EACH FRAGMEND WILL BE EXPAND INTO TWO */
-/* NEW FRAGMENTS.TO MAKE THIS PROCESS EASIER, DURING ADD FRAGMENT PROCESS */
-/* NEXT FRAGMENT IDENTIIES WILL BE CALCULATED, AND TWO FRAGMENTS WILL BE */
-/* ADDED IN (NDBACC). THEREBY EXPAND OF FRAGMENT CAN BE PERFORMED QUICK AND */
-/* EASY.THE NEW FRAGMENT ID SENDS TO TUP MANAGER FOR ALL OPERATION PROCESS. */
-/* --------------------------------------------------------------------------------- */
-struct Rootfragmentrec {
- Uint32 scan[MAX_PARALLEL_SCANS_PER_FRAG];
- Uint32 fragmentptr[2];
- Uint32 fragmentid[2];
- Uint32 lcpPtr;
- Uint32 mytabptr;
- Uint32 nextroot;
- Uint32 roothashcheck;
- Uint32 noOfElements;
- Uint32 m_commit_count;
- State rootState;
-}; /* p2c: size = 72 bytes */
-
- typedef Ptr<Rootfragmentrec> RootfragmentrecPtr;
-
-/* --------------------------------------------------------------------------------- */
-/* SCAN_REC */
-/* --------------------------------------------------------------------------------- */
-struct ScanRec {
- enum ScanState {
- WAIT_NEXT,
- SCAN_DISCONNECT
- };
- enum ScanBucketState {
- FIRST_LAP,
- SECOND_LAP,
- SCAN_COMPLETED
- };
- Uint32 activeLocalFrag;
- Uint32 rootPtr;
- Uint32 nextBucketIndex;
- Uint32 scanNextfreerec;
- Uint32 scanFirstActiveOp;
- Uint32 scanFirstLockedOp;
- Uint32 scanLastLockedOp;
- Uint32 scanFirstQueuedOp;
- Uint32 scanLastQueuedOp;
- Uint32 scanUserptr;
- Uint32 scanTrid1;
- Uint32 scanTrid2;
- Uint32 startNoOfBuckets;
- Uint32 minBucketIndexToRescan;
- Uint32 maxBucketIndexToRescan;
- Uint32 scanOpsAllocated;
- ScanBucketState scanBucketState;
- ScanState scanState;
- Uint16 scanLockHeld;
- Uint32 scanUserblockref;
- Uint32 scanMask;
- Uint8 scanLockMode;
- Uint8 scanKeyinfoFlag;
- Uint8 scanTimer;
- Uint8 scanContinuebCounter;
- Uint8 scanReadCommittedFlag;
-};
-
- typedef Ptr<ScanRec> ScanRecPtr;
-
-/* --------------------------------------------------------------------------------- */
-/* SR_VERSION_REC */
-/* --------------------------------------------------------------------------------- */
-struct SrVersionRec {
- Uint32 nextFreeSr;
- Uint32 checkPointId;
- Uint32 prevAddress;
- Uint32 srUnused; /* p2c: Not used */
-}; /* p2c: size = 16 bytes */
-
- typedef Ptr<SrVersionRec> SrVersionRecPtr;
-
-/* --------------------------------------------------------------------------------- */
-/* TABREC */
-/* --------------------------------------------------------------------------------- */
-struct Tabrec {
- Uint32 fragholder[MAX_FRAG_PER_NODE];
- Uint32 fragptrholder[MAX_FRAG_PER_NODE];
- Uint32 tabUserPtr;
- BlockReference tabUserRef;
-
- Uint8 noOfKeyAttr;
- Uint8 hasCharAttr;
- struct KeyAttr {
- Uint32 attributeDescriptor;
- CHARSET_INFO* charsetInfo;
- } keyAttr[MAX_ATTRIBUTES_IN_INDEX];
-};
- typedef Ptr<Tabrec> TabrecPtr;
-
-/* --------------------------------------------------------------------------------- */
-/* UNDOPAGE */
-/* --------------------------------------------------------------------------------- */
-struct Undopage {
- Uint32 undoword[8192];
-}; /* p2c: size = 32768 bytes */
-
- typedef Ptr<Undopage> UndopagePtr;
-
-public:
- Dbacc(const class Configuration &);
- virtual ~Dbacc();
-
- // pointer to TUP instance in this thread
- Dbtup* c_tup;
-
-private:
- BLOCK_DEFINES(Dbacc);
-
- // Transit signals
- void execDEBUG_SIG(Signal* signal);
- void execCONTINUEB(Signal* signal);
- void execACC_CHECK_SCAN(Signal* signal);
- void execEXPANDCHECK2(Signal* signal);
- void execSHRINKCHECK2(Signal* signal);
- void execACC_OVER_REC(Signal* signal);
- void execACC_SAVE_PAGES(Signal* signal);
- void execNEXTOPERATION(Signal* signal);
- void execREAD_PSUEDO_REQ(Signal* signal);
-
- // Received signals
- void execSTTOR(Signal* signal);
- void execSR_FRAGIDREQ(Signal* signal);
- void execLCP_FRAGIDREQ(Signal* signal);
- void execLCP_HOLDOPREQ(Signal* signal);
- void execEND_LCPREQ(Signal* signal);
- void execACC_LCPREQ(Signal* signal);
- void execSTART_RECREQ(Signal* signal);
- void execACC_CONTOPREQ(Signal* signal);
- void execACCKEYREQ(Signal* signal);
- void execACCSEIZEREQ(Signal* signal);
- void execACCFRAGREQ(Signal* signal);
- void execTC_SCHVERREQ(Signal* signal);
- void execACC_SRREQ(Signal* signal);
- void execNEXT_SCANREQ(Signal* signal);
- void execACC_ABORTREQ(Signal* signal);
- void execACC_SCANREQ(Signal* signal);
- void execACCMINUPDATE(Signal* signal);
- void execACC_COMMITREQ(Signal* signal);
- void execACC_TO_REQ(Signal* signal);
- void execACC_LOCKREQ(Signal* signal);
- void execFSOPENCONF(Signal* signal);
- void execFSOPENREF(Signal* signal);
- void execFSCLOSECONF(Signal* signal);
- void execFSCLOSEREF(Signal* signal);
- void execFSWRITECONF(Signal* signal);
- void execFSWRITEREF(Signal* signal);
- void execFSREADCONF(Signal* signal);
- void execFSREADREF(Signal* signal);
- void execNDB_STTOR(Signal* signal);
- void execDROP_TAB_REQ(Signal* signal);
- void execFSREMOVECONF(Signal* signal);
- void execFSREMOVEREF(Signal* signal);
- void execREAD_CONFIG_REQ(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
- void execDUMP_STATE_ORD(Signal* signal);
-
- // Statement blocks
- void ACCKEY_error(Uint32 fromWhere);
-
- void commitDeleteCheck();
-
- void initRootFragPageZero(RootfragmentrecPtr, Page8Ptr);
- void initRootFragSr(RootfragmentrecPtr, Page8Ptr);
- void initFragAdd(Signal*, Uint32 rootFragIndex, Uint32 rootIndex, FragmentrecPtr);
- void initFragPageZero(FragmentrecPtr, Page8Ptr);
- void initFragSr(FragmentrecPtr, Page8Ptr);
- void initFragGeneral(FragmentrecPtr);
- void verifyFragCorrect(FragmentrecPtr regFragPtr);
- void sendFSREMOVEREQ(Signal* signal, Uint32 tableId);
- void releaseFragResources(Signal* signal, Uint32 fragIndex);
- void releaseRootFragRecord(Signal* signal, RootfragmentrecPtr rootPtr);
- void releaseRootFragResources(Signal* signal, Uint32 tableId);
- void releaseDirResources(Signal* signal,
- Uint32 fragIndex,
- Uint32 dirIndex,
- Uint32 startIndex);
- void releaseDirectoryResources(Signal* signal,
- Uint32 fragIndex,
- Uint32 dirIndex,
- Uint32 startIndex,
- Uint32 directoryIndex);
- void releaseOverflowResources(Signal* signal, FragmentrecPtr regFragPtr);
- void releaseDirIndexResources(Signal* signal, FragmentrecPtr regFragPtr);
- void releaseFragRecord(Signal* signal, FragmentrecPtr regFragPtr);
- Uint32 remainingUndoPages();
- void updateLastUndoPageIdWritten(Signal* signal, Uint32 aNewValue);
- void updateUndoPositionPage(Signal* signal, Uint32 aNewValue);
- void srCheckPage(Signal* signal);
- void srCheckContainer(Signal* signal);
- void initScanFragmentPart(Signal* signal);
- Uint32 checkScanExpand(Signal* signal);
- Uint32 checkScanShrink(Signal* signal);
- void initialiseDirRec(Signal* signal);
- void initialiseDirRangeRec(Signal* signal);
- void initialiseFragRec(Signal* signal);
- void initialiseFsConnectionRec(Signal* signal);
- void initialiseFsOpRec(Signal* signal);
- void initialiseLcpConnectionRec(Signal* signal);
- void initialiseOperationRec(Signal* signal);
- void initialiseOverflowRec(Signal* signal);
- void initialisePageRec(Signal* signal);
- void initialiseLcpPages(Signal* signal);
- void initialiseRootfragRec(Signal* signal);
- void initialiseScanRec(Signal* signal);
- void initialiseSrVerRec(Signal* signal);
- void initialiseTableRec(Signal* signal);
- bool addfragtotab(Signal* signal, Uint32 rootIndex, Uint32 fragId);
- void initOpRec(Signal* signal);
- void sendAcckeyconf(Signal* signal);
- Uint32 placeReadInLockQueue(Signal* signal);
- void placeSerialQueueRead(Signal* signal);
- void checkOnlyReadEntry(Signal* signal);
- Uint32 getNoParallelTransaction(const Operationrec*);
- void moveLastParallelQueue(Signal* signal);
- void moveLastParallelQueueWrite(Signal* signal);
- Uint32 placeWriteInLockQueue(Signal* signal);
- void placeSerialQueueWrite(Signal* signal);
- void expandcontainer(Signal* signal);
- void shrinkcontainer(Signal* signal);
- void nextcontainerinfoExp(Signal* signal);
- void lcpCopyPage(Signal* signal);
- void lcpUpdatePage(Signal* signal);
- void checkUndoPages(Signal* signal);
- void undoWritingProcess(Signal* signal);
- void writeUndoDataInfo(Signal* signal);
- void writeUndoHeader(Signal* signal,
- Uint32 logicalPageId,
- UndoHeader::UndoHeaderType pageType);
- void writeUndoOpInfo(Signal* signal);
- void checksumControl(Signal* signal, Uint32 checkPage);
- void startActiveUndo(Signal* signal);
- void releaseAndCommitActiveOps(Signal* signal);
- void releaseAndCommitQueuedOps(Signal* signal);
- void releaseAndAbortLockedOps(Signal* signal);
- void containerinfo(Signal* signal);
- bool getScanElement(Signal* signal);
- void initScanOpRec(Signal* signal);
- void nextcontainerinfo(Signal* signal);
- void putActiveScanOp(Signal* signal);
- void putOpScanLockQue();
- void putReadyScanQueue(Signal* signal, Uint32 scanRecIndex);
- void releaseScanBucket(Signal* signal);
- void releaseScanContainer(Signal* signal);
- void releaseScanRec(Signal* signal);
- bool searchScanContainer(Signal* signal);
- void sendNextScanConf(Signal* signal);
- void setlock(Signal* signal);
- void takeOutActiveScanOp(Signal* signal);
- void takeOutScanLockQueue(Uint32 scanRecIndex);
- void takeOutReadyScanQueue(Signal* signal);
- void insertElement(Signal* signal);
- void insertContainer(Signal* signal);
- void addnewcontainer(Signal* signal);
- void getfreelist(Signal* signal);
- void increaselistcont(Signal* signal);
- void seizeLeftlist(Signal* signal);
- void seizeRightlist(Signal* signal);
- Uint32 readTablePk(Uint32 localkey1);
- void getElement(Signal* signal);
- void getdirindex(Signal* signal);
- void commitdelete(Signal* signal, bool systemRestart);
- void deleteElement(Signal* signal);
- void getLastAndRemove(Signal* signal);
- void releaseLeftlist(Signal* signal);
- void releaseRightlist(Signal* signal);
- void checkoverfreelist(Signal* signal);
- void abortOperation(Signal* signal);
- void accAbortReqLab(Signal* signal, bool sendConf);
- void commitOperation(Signal* signal);
- void copyOpInfo(Signal* signal);
- Uint32 executeNextOperation(Signal* signal);
- void releaselock(Signal* signal);
- void takeOutFragWaitQue(Signal* signal);
- void check_lock_upgrade(Signal* signal, OperationrecPtr lock_owner,
- OperationrecPtr release_op);
- void allocOverflowPage(Signal* signal);
- bool getrootfragmentrec(Signal* signal, RootfragmentrecPtr&, Uint32 fragId);
- void insertLockOwnersList(Signal* signal, const OperationrecPtr&);
- void takeOutLockOwnersList(Signal* signal, const OperationrecPtr&);
- void initFsOpRec(Signal* signal);
- void initLcpConnRec(Signal* signal);
- void initOverpage(Signal* signal);
- void initPage(Signal* signal);
- void initRootfragrec(Signal* signal);
- void putOpInFragWaitQue(Signal* signal);
- void putOverflowRecInFrag(Signal* signal);
- void putRecInFreeOverdir(Signal* signal);
- void releaseDirectory(Signal* signal);
- void releaseDirrange(Signal* signal);
- void releaseFsConnRec(Signal* signal);
- void releaseFsOpRec(Signal* signal);
- void releaseLcpConnectRec(Signal* signal);
- void releaseOpRec(Signal* signal);
- void releaseOverflowRec(Signal* signal);
- void releaseOverpage(Signal* signal);
- void releasePage(Signal* signal);
- void releaseLcpPage(Signal* signal);
- void releaseSrRec(Signal* signal);
- void releaseLogicalPage(Fragmentrec * fragP, Uint32 logicalPageId);
- void seizeDirectory(Signal* signal);
- void seizeDirrange(Signal* signal);
- void seizeFragrec(Signal* signal);
- void seizeFsConnectRec(Signal* signal);
- void seizeFsOpRec(Signal* signal);
- void seizeLcpConnectRec(Signal* signal);
- void seizeOpRec(Signal* signal);
- void seizeOverRec(Signal* signal);
- void seizePage(Signal* signal);
- void seizeLcpPage(Page8Ptr&);
- void seizeRootfragrec(Signal* signal);
- void seizeScanRec(Signal* signal);
- void seizeSrVerRec(Signal* signal);
- void sendSystemerror(Signal* signal);
- void takeRecOutOfFreeOverdir(Signal* signal);
- void takeRecOutOfFreeOverpage(Signal* signal);
- void sendScanHbRep(Signal* signal, Uint32);
-
- void addFragRefuse(Signal* signal, Uint32 errorCode);
- void ndbsttorryLab(Signal* signal);
- void srCloseDataFileLab(Signal* signal);
- void acckeyref1Lab(Signal* signal, Uint32 result_code);
- void insertelementLab(Signal* signal);
- void startUndoLab(Signal* signal);
- void checkNextFragmentLab(Signal* signal);
- void endofexpLab(Signal* signal);
- void endofshrinkbucketLab(Signal* signal);
- void srStartUndoLab(Signal* signal);
- void senddatapagesLab(Signal* signal);
- void undoNext2Lab(Signal* signal);
- void sttorrysignalLab(Signal* signal);
- void sendholdconfsignalLab(Signal* signal);
- void accIsLockedLab(Signal* signal);
- void insertExistElemLab(Signal* signal);
- void refaccConnectLab(Signal* signal);
- void srReadOverPagesLab(Signal* signal);
- void releaseScanLab(Signal* signal);
- void lcpOpenUndofileConfLab(Signal* signal);
- void srFsOpenConfLab(Signal* signal);
- void checkSyncUndoPagesLab(Signal* signal);
- void sendaccSrconfLab(Signal* signal);
- void checkSendLcpConfLab(Signal* signal);
- void endsaveoverpageLab(Signal* signal);
- void lcpCloseDataFileLab(Signal* signal);
- void srOpenDataFileLoopLab(Signal* signal);
- void srReadPagesLab(Signal* signal);
- void srDoUndoLab(Signal* signal);
- void ndbrestart1Lab(Signal* signal);
- void initialiseRecordsLab(Signal* signal, Uint32 ref, Uint32 data);
- void srReadPagesAllocLab(Signal* signal);
- void checkNextBucketLab(Signal* signal);
- void endsavepageLab(Signal* signal);
- void saveZeroPageLab(Signal* signal);
- void srAllocPage0011Lab(Signal* signal);
- void sendLcpFragidconfLab(Signal* signal);
- void savepagesLab(Signal* signal);
- void saveOverPagesLab(Signal* signal);
- void srReadPageZeroLab(Signal* signal);
- void storeDataPageInDirectoryLab(Signal* signal);
- void lcpFsOpenConfLab(Signal* signal);
-
- void zpagesize_error(const char* where);
-
- void reportMemoryUsage(Signal* signal, int gth);
- void lcp_write_op_to_undolog(Signal* signal);
- void reenable_expand_after_redo_log_exection_complete(Signal*);
-
- // charsets
- void xfrmKeyData(Signal* signal);
-
- // Initialisation
- void initData();
- void initRecords();
-
- // Variables
-/* --------------------------------------------------------------------------------- */
-/* DIRECTORY RANGE */
-/* --------------------------------------------------------------------------------- */
- DirRange *dirRange;
- DirRangePtr expDirRangePtr;
- DirRangePtr gnsDirRangePtr;
- DirRangePtr newDirRangePtr;
- DirRangePtr rdDirRangePtr;
- DirRangePtr nciOverflowrangeptr;
- Uint32 cdirrangesize;
- Uint32 cfirstfreeDirrange;
-/* --------------------------------------------------------------------------------- */
-/* DIRECTORYARRAY */
-/* --------------------------------------------------------------------------------- */
- Directoryarray *directoryarray;
- DirectoryarrayPtr expDirptr;
- DirectoryarrayPtr rdDirptr;
- DirectoryarrayPtr sdDirptr;
- DirectoryarrayPtr nciOverflowDirptr;
- Uint32 cdirarraysize;
- Uint32 cdirmemory;
- Uint32 cfirstfreedir;
-/* --------------------------------------------------------------------------------- */
-/* FRAGMENTREC. ALL INFORMATION ABOUT FRAMENT AND HASH TABLE IS SAVED IN FRAGMENT */
-/* REC A POINTER TO FRAGMENT RECORD IS SAVED IN ROOTFRAGMENTREC FRAGMENT */
-/* --------------------------------------------------------------------------------- */
- Fragmentrec *fragmentrec;
- FragmentrecPtr fragrecptr;
- Uint32 cfirstfreefrag;
- Uint32 cfragmentsize;
-/* --------------------------------------------------------------------------------- */
-/* FS_CONNECTREC */
-/* --------------------------------------------------------------------------------- */
- FsConnectrec *fsConnectrec;
- FsConnectrecPtr fsConnectptr;
- Uint32 cfsConnectsize;
- Uint32 cfsFirstfreeconnect;
-/* --------------------------------------------------------------------------------- */
-/* FS_OPREC */
-/* --------------------------------------------------------------------------------- */
- FsOprec *fsOprec;
- FsOprecPtr fsOpptr;
- Uint32 cfsOpsize;
- Uint32 cfsFirstfreeop;
-/* --------------------------------------------------------------------------------- */
-/* LCP_CONNECTREC */
-/* --------------------------------------------------------------------------------- */
- LcpConnectrec *lcpConnectrec;
- LcpConnectrecPtr lcpConnectptr;
- Uint32 clcpConnectsize;
- Uint32 cfirstfreelcpConnect;
-/* --------------------------------------------------------------------------------- */
-/* OPERATIONREC */
-/* --------------------------------------------------------------------------------- */
- Operationrec *operationrec;
- OperationrecPtr operationRecPtr;
- OperationrecPtr idrOperationRecPtr;
- OperationrecPtr copyInOperPtr;
- OperationrecPtr copyOperPtr;
- OperationrecPtr mlpqOperPtr;
- OperationrecPtr queOperPtr;
- OperationrecPtr readWriteOpPtr;
- Uint32 cfreeopRec;
- Uint32 coprecsize;
-/* --------------------------------------------------------------------------------- */
-/* OVERFLOW_RECORD */
-/* --------------------------------------------------------------------------------- */
- OverflowRecord *overflowRecord;
- OverflowRecordPtr iopOverflowRecPtr;
- OverflowRecordPtr tfoOverflowRecPtr;
- OverflowRecordPtr porOverflowRecPtr;
- OverflowRecordPtr priOverflowRecPtr;
- OverflowRecordPtr rorOverflowRecPtr;
- OverflowRecordPtr sorOverflowRecPtr;
- OverflowRecordPtr troOverflowRecPtr;
- Uint32 cfirstfreeoverrec;
- Uint32 coverflowrecsize;
-
-/* --------------------------------------------------------------------------------- */
-/* PAGE8 */
-/* --------------------------------------------------------------------------------- */
- Page8 *page8;
- /* 8 KB PAGE */
- Page8Ptr ancPageptr;
- Page8Ptr colPageptr;
- Page8Ptr ccoPageptr;
- Page8Ptr datapageptr;
- Page8Ptr delPageptr;
- Page8Ptr excPageptr;
- Page8Ptr expPageptr;
- Page8Ptr gdiPageptr;
- Page8Ptr gePageptr;
- Page8Ptr gflPageptr;
- Page8Ptr idrPageptr;
- Page8Ptr ilcPageptr;
- Page8Ptr inpPageptr;
- Page8Ptr iopPageptr;
- Page8Ptr lastPageptr;
- Page8Ptr lastPrevpageptr;
- Page8Ptr lcnPageptr;
- Page8Ptr lcnCopyPageptr;
- Page8Ptr lupPageptr;
- Page8Ptr priPageptr;
- Page8Ptr pwiPageptr;
- Page8Ptr ciPageidptr;
- Page8Ptr gsePageidptr;
- Page8Ptr isoPageptr;
- Page8Ptr nciPageidptr;
- Page8Ptr rsbPageidptr;
- Page8Ptr rscPageidptr;
- Page8Ptr slPageidptr;
- Page8Ptr sscPageidptr;
- Page8Ptr rlPageptr;
- Page8Ptr rlpPageptr;
- Page8Ptr ropPageptr;
- Page8Ptr rpPageptr;
- Page8Ptr slPageptr;
- Page8Ptr spPageptr;
- Uint32 cfirstfreepage;
- Uint32 cfreepage;
- Uint32 cpagesize;
- Uint32 cfirstfreeLcpPage;
- Uint32 cnoOfAllocatedPages;
- Uint32 cnoLcpPages;
-/* --------------------------------------------------------------------------------- */
-/* ROOTFRAGMENTREC */
-/* DURING EXPAND FRAGMENT PROCESS, EACH FRAGMEND WILL BE EXPAND INTO TWO */
-/* NEW FRAGMENTS.TO MAKE THIS PROCESS EASIER, DURING ADD FRAGMENT PROCESS */
-/* NEXT FRAGMENT IDENTIIES WILL BE CALCULATED, AND TWO FRAGMENTS WILL BE */
-/* ADDED IN (NDBACC). THEREBY EXPAND OF FRAGMENT CAN BE PERFORMED QUICK AND */
-/* EASY.THE NEW FRAGMENT ID SENDS TO TUP MANAGER FOR ALL OPERATION PROCESS. */
-/* --------------------------------------------------------------------------------- */
- Rootfragmentrec *rootfragmentrec;
- RootfragmentrecPtr rootfragrecptr;
- Uint32 crootfragmentsize;
- Uint32 cfirstfreerootfrag;
-/* --------------------------------------------------------------------------------- */
-/* SCAN_REC */
-/* --------------------------------------------------------------------------------- */
- ScanRec *scanRec;
- ScanRecPtr scanPtr;
- Uint32 cscanRecSize;
- Uint32 cfirstFreeScanRec;
-/* --------------------------------------------------------------------------------- */
-/* SR_VERSION_REC */
-/* --------------------------------------------------------------------------------- */
- SrVersionRec *srVersionRec;
- SrVersionRecPtr srVersionPtr;
- Uint32 csrVersionRecSize;
- Uint32 cfirstFreeSrVersionRec;
-/* --------------------------------------------------------------------------------- */
-/* TABREC */
-/* --------------------------------------------------------------------------------- */
- Tabrec *tabrec;
- TabrecPtr tabptr;
- Uint32 ctablesize;
-/* --------------------------------------------------------------------------------- */
-/* UNDOPAGE */
-/* --------------------------------------------------------------------------------- */
- Undopage *undopage;
- /* 32 KB PAGE */
- UndopagePtr undopageptr;
- Uint32 tpwiElementptr;
- Uint32 tpriElementptr;
- Uint32 tgseElementptr;
- Uint32 tgseContainerptr;
- Uint32 trlHead;
- Uint32 trlRelCon;
- Uint32 trlNextused;
- Uint32 trlPrevused;
- Uint32 tlcnChecksum;
- Uint32 tlupElemIndex;
- Uint32 tlupIndex;
- Uint32 tlupForward;
- Uint32 tancNext;
- Uint32 tancBufType;
- Uint32 tancContainerptr;
- Uint32 tancPageindex;
- Uint32 tancPageid;
- Uint32 tidrResult;
- Uint32 tidrElemhead;
- Uint32 tidrForward;
- Uint32 tidrPageindex;
- Uint32 tidrContainerptr;
- Uint32 tidrContainerhead;
- Uint32 tlastForward;
- Uint32 tlastPageindex;
- Uint32 tlastContainerlen;
- Uint32 tlastElementptr;
- Uint32 tlastContainerptr;
- Uint32 tlastContainerhead;
- Uint32 trlPageindex;
- Uint32 tdelContainerptr;
- Uint32 tdelElementptr;
- Uint32 tdelForward;
- Uint32 tiopPageId;
- Uint32 tipPageId;
- Uint32 tgeLocked;
- Uint32 tgeResult;
- Uint32 tgeContainerptr;
- Uint32 tgeElementptr;
- Uint32 tgeForward;
- Uint32 tundoElemIndex;
- Uint32 texpReceivedBucket;
- Uint32 texpDirInd;
- Uint32 texpDirRangeIndex;
- Uint32 texpDirPageIndex;
- Uint32 tdata0;
- Uint32 tcheckpointid;
- Uint32 tciContainerptr;
- Uint32 tnciContainerptr;
- Uint32 tisoContainerptr;
- Uint32 trscContainerptr;
- Uint32 tsscContainerptr;
- Uint32 tciContainerlen;
- Uint32 trscContainerlen;
- Uint32 tsscContainerlen;
- Uint32 tciContainerhead;
- Uint32 tnciContainerhead;
- Uint32 tslElementptr;
- Uint32 tisoElementptr;
- Uint32 tsscElementptr;
- Uint32 tfid;
- Uint32 tscanFlag;
- Uint32 theadundoindex;
- Uint32 tgflBufType;
- Uint32 tgseIsforward;
- Uint32 tsscIsforward;
- Uint32 trscIsforward;
- Uint32 tciIsforward;
- Uint32 tnciIsforward;
- Uint32 tisoIsforward;
- Uint32 tgseIsLocked;
- Uint32 tsscIsLocked;
- Uint32 tkeylen;
- Uint32 tmp;
- Uint32 tmpP;
- Uint32 tmpP2;
- Uint32 tmp1;
- Uint32 tmp2;
- Uint32 tgflPageindex;
- Uint32 tmpindex;
- Uint32 tslNextfree;
- Uint32 tslPageindex;
- Uint32 tgsePageindex;
- Uint32 tnciNextSamePage;
- Uint32 tslPrevfree;
- Uint32 tciPageindex;
- Uint32 trsbPageindex;
- Uint32 tnciPageindex;
- Uint32 tlastPrevconptr;
- Uint32 tresult;
- Uint32 tslUpdateHeader;
- Uint32 tuserptr;
- BlockReference tuserblockref;
- Uint32 tundoindex;
- Uint32 tlqhPointer;
- Uint32 tholdSentOp;
- Uint32 tholdMore;
- Uint32 tlcpLqhCheckV;
- Uint32 tgdiPageindex;
- Uint32 tiopIndex;
- Uint32 tnciTmp;
- Uint32 tullIndex;
- Uint32 turlIndex;
- Uint32 tlfrTmp1;
- Uint32 tlfrTmp2;
- Uint32 tscanTrid1;
- Uint32 tscanTrid2;
-
- Uint16 clastUndoPageIdWritten;
- Uint32 cactiveCheckpId;
- Uint32 cactiveRootfrag;
- Uint32 cactiveSrFsPtr;
- Uint32 cactiveUndoFilePage;
- Uint32 cactiveOpenUndoFsPtr;
- Uint32 cactiveSrUndoPage;
- Uint32 cprevUndoaddress;
- Uint32 creadyUndoaddress;
- Uint32 ctest;
- Uint32 cundoLogActive;
- Uint32 clqhPtr;
- BlockReference clqhBlockRef;
- Uint32 cminusOne;
- NodeId cmynodeid;
- Uint32 cactiveUndoFileVersion;
- BlockReference cownBlockref;
- BlockReference cndbcntrRef;
- Uint16 csignalkey;
- Uint32 cundopagesize;
- Uint32 cundoposition;
- Uint32 cundoElemIndex;
- Uint32 cundoinfolength;
- Uint32 czero;
- Uint32 csrVersList[16];
- Uint32 clblPageCounter;
- Uint32 clblPageOver;
- Uint32 clblPagesPerTick;
- Uint32 clblPagesPerTickAfterSr;
- Uint32 csystemRestart;
- Uint32 cexcForward;
- Uint32 cexcPageindex;
- Uint32 cexcContainerptr;
- Uint32 cexcContainerhead;
- Uint32 cexcContainerlen;
- Uint32 cexcElementptr;
- Uint32 cexcPrevconptr;
- Uint32 cexcMovedLen;
- Uint32 cexcPrevpageptr;
- Uint32 cexcPrevpageindex;
- Uint32 cexcPrevforward;
- Uint32 clocalkey[32];
- union {
- Uint32 ckeys[2048];
- Uint64 ckeys_align;
- };
-
- Uint32 c_errorInsert3000_TableId;
- Uint32 cSrUndoRecords[UndoHeader::ZNO_UNDORECORD_TYPES];
-};
-
-#endif
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
deleted file mode 100644
index 90839163a72..00000000000
--- a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
+++ /dev/null
@@ -1,343 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-
-#define DBACC_C
-#include "Dbacc.hpp"
-
-#define DEBUG(x) { ndbout << "ACC::" << x << endl; }
-
-void Dbacc::initData()
-{
- cdirarraysize = ZDIRARRAY;
- coprecsize = ZOPRECSIZE;
- cpagesize = ZPAGESIZE;
- clcpConnectsize = ZLCP_CONNECTSIZE;
- ctablesize = ZTABLESIZE;
- cfragmentsize = ZFRAGMENTSIZE;
- crootfragmentsize = ZROOTFRAGMENTSIZE;
- cdirrangesize = ZDIRRANGESIZE;
- coverflowrecsize = ZOVERFLOWRECSIZE;
- cfsConnectsize = ZFS_CONNECTSIZE;
- cfsOpsize = ZFS_OPSIZE;
- cscanRecSize = ZSCAN_REC_SIZE;
- csrVersionRecSize = ZSR_VERSION_REC_SIZE;
-
-
- dirRange = 0;
- directoryarray = 0;
- fragmentrec = 0;
- fsConnectrec = 0;
- fsOprec = 0;
- lcpConnectrec = 0;
- operationrec = 0;
- overflowRecord = 0;
- page8 = 0;
- rootfragmentrec = 0;
- scanRec = 0;
- srVersionRec = 0;
- tabrec = 0;
- undopage = 0;
-
- // Records with constant sizes
-}//Dbacc::initData()
-
-void Dbacc::initRecords()
-{
- // Records with dynamic sizes
- dirRange = (DirRange*)allocRecord("DirRange",
- sizeof(DirRange),
- cdirrangesize);
-
- directoryarray = (Directoryarray*)allocRecord("Directoryarray",
- sizeof(Directoryarray),
- cdirarraysize);
-
- fragmentrec = (Fragmentrec*)allocRecord("Fragmentrec",
- sizeof(Fragmentrec),
- cfragmentsize);
-
- fsConnectrec = (FsConnectrec*)allocRecord("FsConnectrec",
- sizeof(FsConnectrec),
- cfsConnectsize);
-
- fsOprec = (FsOprec*)allocRecord("FsOprec",
- sizeof(FsOprec),
- cfsOpsize);
-
- lcpConnectrec = (LcpConnectrec*)allocRecord("LcpConnectrec",
- sizeof(LcpConnectrec),
- clcpConnectsize);
-
- operationrec = (Operationrec*)allocRecord("Operationrec",
- sizeof(Operationrec),
- coprecsize);
-
- overflowRecord = (OverflowRecord*)allocRecord("OverflowRecord",
- sizeof(OverflowRecord),
- coverflowrecsize);
-
- page8 = (Page8*)allocRecord("Page8",
- sizeof(Page8),
- cpagesize,
- false);
-
- rootfragmentrec = (Rootfragmentrec*)allocRecord("Rootfragmentrec",
- sizeof(Rootfragmentrec),
- crootfragmentsize);
-
- scanRec = (ScanRec*)allocRecord("ScanRec",
- sizeof(ScanRec),
- cscanRecSize);
-
- srVersionRec = (SrVersionRec*)allocRecord("SrVersionRec",
- sizeof(SrVersionRec),
- csrVersionRecSize);
-
- tabrec = (Tabrec*)allocRecord("Tabrec",
- sizeof(Tabrec),
- ctablesize);
-
- undopage = (Undopage*)allocRecord("Undopage",
- sizeof(Undopage),
- cundopagesize,
- false);
-
- // Initialize BAT for interface to file system
-
- NewVARIABLE* bat = allocateBat(3);
- bat[1].WA = &page8->word32[0];
- bat[1].nrr = cpagesize;
- bat[1].ClusterSize = sizeof(Page8);
- bat[1].bits.q = 11;
- bat[1].bits.v = 5;
- bat[2].WA = &undopage->undoword[0];
- bat[2].nrr = cundopagesize;
- bat[2].ClusterSize = sizeof(Undopage);
- bat[2].bits.q = 13;
- bat[2].bits.v = 5;
-}//Dbacc::initRecords()
-
-Dbacc::Dbacc(const class Configuration & conf):
- SimulatedBlock(DBACC, conf),
- c_tup(0)
-{
- Uint32 log_page_size= 0;
- BLOCK_CONSTRUCTOR(Dbacc);
-
- const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
- ndbrequire(p != 0);
-
- ndb_mgm_get_int_parameter(p, CFG_DB_UNDO_INDEX_BUFFER,
- &log_page_size);
-
- /**
- * Always set page size in half MBytes
- */
- cundopagesize= (log_page_size / sizeof(Undopage));
- Uint32 mega_byte_part= cundopagesize & 15;
- if (mega_byte_part != 0) {
- jam();
- cundopagesize+= (16 - mega_byte_part);
- }
-
- // Transit signals
- addRecSignal(GSN_DUMP_STATE_ORD, &Dbacc::execDUMP_STATE_ORD);
- addRecSignal(GSN_DEBUG_SIG, &Dbacc::execDEBUG_SIG);
- addRecSignal(GSN_CONTINUEB, &Dbacc::execCONTINUEB);
- addRecSignal(GSN_ACC_CHECK_SCAN, &Dbacc::execACC_CHECK_SCAN);
- addRecSignal(GSN_EXPANDCHECK2, &Dbacc::execEXPANDCHECK2);
- addRecSignal(GSN_SHRINKCHECK2, &Dbacc::execSHRINKCHECK2);
- addRecSignal(GSN_ACC_OVER_REC, &Dbacc::execACC_OVER_REC);
- addRecSignal(GSN_ACC_SAVE_PAGES, &Dbacc::execACC_SAVE_PAGES);
- addRecSignal(GSN_NEXTOPERATION, &Dbacc::execNEXTOPERATION);
- addRecSignal(GSN_READ_PSUEDO_REQ, &Dbacc::execREAD_PSUEDO_REQ);
-
- // Received signals
- addRecSignal(GSN_STTOR, &Dbacc::execSTTOR);
- addRecSignal(GSN_SR_FRAGIDREQ, &Dbacc::execSR_FRAGIDREQ);
- addRecSignal(GSN_LCP_FRAGIDREQ, &Dbacc::execLCP_FRAGIDREQ);
- addRecSignal(GSN_LCP_HOLDOPREQ, &Dbacc::execLCP_HOLDOPREQ);
- addRecSignal(GSN_END_LCPREQ, &Dbacc::execEND_LCPREQ);
- addRecSignal(GSN_ACC_LCPREQ, &Dbacc::execACC_LCPREQ);
- addRecSignal(GSN_START_RECREQ, &Dbacc::execSTART_RECREQ);
- addRecSignal(GSN_ACC_CONTOPREQ, &Dbacc::execACC_CONTOPREQ);
- addRecSignal(GSN_ACCKEYREQ, &Dbacc::execACCKEYREQ);
- addRecSignal(GSN_ACCSEIZEREQ, &Dbacc::execACCSEIZEREQ);
- addRecSignal(GSN_ACCFRAGREQ, &Dbacc::execACCFRAGREQ);
- addRecSignal(GSN_TC_SCHVERREQ, &Dbacc::execTC_SCHVERREQ);
- addRecSignal(GSN_ACC_SRREQ, &Dbacc::execACC_SRREQ);
- addRecSignal(GSN_NEXT_SCANREQ, &Dbacc::execNEXT_SCANREQ);
- addRecSignal(GSN_ACC_ABORTREQ, &Dbacc::execACC_ABORTREQ);
- addRecSignal(GSN_ACC_SCANREQ, &Dbacc::execACC_SCANREQ);
- addRecSignal(GSN_ACCMINUPDATE, &Dbacc::execACCMINUPDATE);
- addRecSignal(GSN_ACC_COMMITREQ, &Dbacc::execACC_COMMITREQ);
- addRecSignal(GSN_ACC_TO_REQ, &Dbacc::execACC_TO_REQ);
- addRecSignal(GSN_ACC_LOCKREQ, &Dbacc::execACC_LOCKREQ);
- addRecSignal(GSN_FSOPENCONF, &Dbacc::execFSOPENCONF);
- addRecSignal(GSN_FSOPENREF, &Dbacc::execFSOPENREF);
- addRecSignal(GSN_FSCLOSECONF, &Dbacc::execFSCLOSECONF);
- addRecSignal(GSN_FSCLOSEREF, &Dbacc::execFSCLOSEREF);
- addRecSignal(GSN_FSWRITECONF, &Dbacc::execFSWRITECONF);
- addRecSignal(GSN_FSWRITEREF, &Dbacc::execFSWRITEREF);
- addRecSignal(GSN_FSREADCONF, &Dbacc::execFSREADCONF);
- addRecSignal(GSN_FSREADREF, &Dbacc::execFSREADREF);
- addRecSignal(GSN_NDB_STTOR, &Dbacc::execNDB_STTOR);
- addRecSignal(GSN_DROP_TAB_REQ, &Dbacc::execDROP_TAB_REQ);
- addRecSignal(GSN_FSREMOVECONF, &Dbacc::execFSREMOVECONF);
- addRecSignal(GSN_FSREMOVEREF, &Dbacc::execFSREMOVEREF);
- addRecSignal(GSN_READ_CONFIG_REQ, &Dbacc::execREAD_CONFIG_REQ, true);
- addRecSignal(GSN_SET_VAR_REQ, &Dbacc::execSET_VAR_REQ);
-
- initData();
-
-#ifdef VM_TRACE
- {
- void* tmp[] = { &expDirRangePtr,
- &gnsDirRangePtr,
- &newDirRangePtr,
- &rdDirRangePtr,
- &nciOverflowrangeptr,
- &expDirptr,
- &rdDirptr,
- &sdDirptr,
- &nciOverflowDirptr,
- &fragrecptr,
- &fsConnectptr,
- &fsOpptr,
- &lcpConnectptr,
- &operationRecPtr,
- &idrOperationRecPtr,
- &copyInOperPtr,
- &copyOperPtr,
- &mlpqOperPtr,
- &queOperPtr,
- &readWriteOpPtr,
- &iopOverflowRecPtr,
- &tfoOverflowRecPtr,
- &porOverflowRecPtr,
- &priOverflowRecPtr,
- &rorOverflowRecPtr,
- &sorOverflowRecPtr,
- &troOverflowRecPtr,
- &ancPageptr,
- &colPageptr,
- &ccoPageptr,
- &datapageptr,
- &delPageptr,
- &excPageptr,
- &expPageptr,
- &gdiPageptr,
- &gePageptr,
- &gflPageptr,
- &idrPageptr,
- &ilcPageptr,
- &inpPageptr,
- &iopPageptr,
- &lastPageptr,
- &lastPrevpageptr,
- &lcnPageptr,
- &lcnCopyPageptr,
- &lupPageptr,
- &priPageptr,
- &pwiPageptr,
- &ciPageidptr,
- &gsePageidptr,
- &isoPageptr,
- &nciPageidptr,
- &rsbPageidptr,
- &rscPageidptr,
- &slPageidptr,
- &sscPageidptr,
- &rlPageptr,
- &rlpPageptr,
- &ropPageptr,
- &rpPageptr,
- &slPageptr,
- &spPageptr,
- &rootfragrecptr,
- &scanPtr,
- &srVersionPtr,
- &tabptr,
- &undopageptr
- };
- init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0]));
- }
-#endif
-}//Dbacc::Dbacc()
-
-Dbacc::~Dbacc()
-{
- deallocRecord((void **)&dirRange, "DirRange",
- sizeof(DirRange),
- cdirrangesize);
-
- deallocRecord((void **)&directoryarray, "Directoryarray",
- sizeof(Directoryarray),
- cdirarraysize);
-
- deallocRecord((void **)&fragmentrec, "Fragmentrec",
- sizeof(Fragmentrec),
- cfragmentsize);
-
- deallocRecord((void **)&fsConnectrec, "FsConnectrec",
- sizeof(FsConnectrec),
- cfsConnectsize);
-
- deallocRecord((void **)&fsOprec, "FsOprec",
- sizeof(FsOprec),
- cfsOpsize);
-
- deallocRecord((void **)&lcpConnectrec, "LcpConnectrec",
- sizeof(LcpConnectrec),
- clcpConnectsize);
-
- deallocRecord((void **)&operationrec, "Operationrec",
- sizeof(Operationrec),
- coprecsize);
-
- deallocRecord((void **)&overflowRecord, "OverflowRecord",
- sizeof(OverflowRecord),
- coverflowrecsize);
-
- deallocRecord((void **)&page8, "Page8",
- sizeof(Page8),
- cpagesize);
-
- deallocRecord((void **)&rootfragmentrec, "Rootfragmentrec",
- sizeof(Rootfragmentrec),
- crootfragmentsize);
-
- deallocRecord((void **)&scanRec, "ScanRec",
- sizeof(ScanRec),
- cscanRecSize);
-
- deallocRecord((void **)&srVersionRec, "SrVersionRec",
- sizeof(SrVersionRec),
- csrVersionRecSize);
-
- deallocRecord((void **)&tabrec, "Tabrec",
- sizeof(Tabrec),
- ctablesize);
-
- deallocRecord((void **)&undopage, "Undopage",
- sizeof(Undopage),
- cundopagesize);
-
-}//Dbacc::~Dbacc()
-
-BLOCK_FUNCTIONS(Dbacc)
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
deleted file mode 100644
index 24f9715c8b4..00000000000
--- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
+++ /dev/null
@@ -1,11817 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#define DBACC_C
-#include "Dbacc.hpp"
-#include <my_sys.h>
-
-#include <AttributeHeader.hpp>
-#include <signaldata/AccFrag.hpp>
-#include <signaldata/AccScan.hpp>
-#include <signaldata/AccLock.hpp>
-#include <signaldata/EventReport.hpp>
-#include <signaldata/FsConf.hpp>
-#include <signaldata/FsRef.hpp>
-#include <signaldata/FsRemoveReq.hpp>
-#include <signaldata/DropTab.hpp>
-#include <signaldata/DumpStateOrd.hpp>
-#include <SectionReader.hpp>
-
-// TO_DO_RONM is a label for comments on what needs to be improved in future versions
-// when more time is given.
-
-#ifdef VM_TRACE
-#define DEBUG(x) ndbout << "DBACC: "<< x << endl;
-#else
-#define DEBUG(x)
-#endif
-
-
-Uint32
-Dbacc::remainingUndoPages(){
- Uint32 HeadPage = cundoposition >> ZUNDOPAGEINDEXBITS;
- Uint32 TailPage = clastUndoPageIdWritten;
-
- // Head must be larger or same as tail
- ndbrequire(HeadPage>=TailPage);
-
- Uint32 UsedPages = HeadPage - TailPage;
- Int32 Remaining = cundopagesize - UsedPages;
-
- // There can not be more than cundopagesize remaining
- if (Remaining <= 0){
- // No more undolog, crash node
- progError(__LINE__,
- ERR_NO_MORE_UNDOLOG,
- "There are more than 1Mbyte undolog writes outstanding");
- }
- return Remaining;
-}
-
-void
-Dbacc::updateLastUndoPageIdWritten(Signal* signal, Uint32 aNewValue){
- if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_COMMIT) {
- clastUndoPageIdWritten = aNewValue;
- if (remainingUndoPages() >= ZMIN_UNDO_PAGES_AT_COMMIT) {
- jam();
- EXECUTE_DIRECT(DBLQH, GSN_ACC_COM_UNBLOCK, signal, 1);
- jamEntry();
- }//if
- } else {
- clastUndoPageIdWritten = aNewValue;
- }//if
-}//Dbacc::updateLastUndoPageIdWritten()
-
-void
-Dbacc::updateUndoPositionPage(Signal* signal, Uint32 aNewValue){
- if (remainingUndoPages() >= ZMIN_UNDO_PAGES_AT_COMMIT) {
- cundoposition = aNewValue;
- if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_COMMIT) {
- jam();
- EXECUTE_DIRECT(DBLQH, GSN_ACC_COM_BLOCK, signal, 1);
- jamEntry();
- }//if
- } else {
- cundoposition = aNewValue;
- }//if
-}//Dbacc::updateUndoPositionPage()
-
-// Signal entries and statement blocks
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* COMMON SIGNAL RECEPTION MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-
-/* --------------------------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* CONTINUEB CONTINUE SIGNAL */
-/* ******************------------------------------+ */
-/* SENDER: ACC, LEVEL B */
-void Dbacc::execCONTINUEB(Signal* signal)
-{
- Uint32 tcase;
-
- jamEntry();
- tcase = signal->theData[0];
- tdata0 = signal->theData[1];
- tresult = 0;
- switch (tcase) {
- case ZLOAD_BAL_LCP_TIMER:
- if (clblPageOver == 0) {
- jam();
- clblPageCounter = clblPagesPerTick;
- } else {
- if (clblPageOver > clblPagesPerTick) {
- jam();
- clblPageOver = clblPageOver - clblPagesPerTick;
- } else {
- jam();
- clblPageOver = 0;
- clblPageCounter = clblPagesPerTick - clblPageOver;
- }//if
- }//if
- signal->theData[0] = ZLOAD_BAL_LCP_TIMER;
- sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 1);
- return;
- break;
- case ZINITIALISE_RECORDS:
- jam();
- initialiseRecordsLab(signal, signal->theData[3], signal->theData[4]);
- return;
- break;
- case ZSR_READ_PAGES_ALLOC:
- jam();
- fragrecptr.i = tdata0;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- srReadPagesAllocLab(signal);
- return;
- break;
- case ZSTART_UNDO:
- jam();
- startUndoLab(signal);
- return;
- break;
- case ZSEND_SCAN_HBREP:
- jam();
- sendScanHbRep(signal, tdata0);
- break;
- case ZREL_ROOT_FRAG:
- {
- jam();
- Uint32 tableId = signal->theData[1];
- releaseRootFragResources(signal, tableId);
- break;
- }
- case ZREL_FRAG:
- {
- jam();
- Uint32 fragIndex = signal->theData[1];
- releaseFragResources(signal, fragIndex);
- break;
- }
- case ZREL_DIR:
- {
- jam();
- Uint32 fragIndex = signal->theData[1];
- Uint32 dirIndex = signal->theData[2];
- Uint32 startIndex = signal->theData[3];
- releaseDirResources(signal, fragIndex, dirIndex, startIndex);
- break;
- }
- case ZREPORT_MEMORY_USAGE:{
- jam();
- static int c_currentMemUsed = 0;
- int now = (cnoOfAllocatedPages * 100)/cpagesize;
- const int thresholds[] = { 99, 90, 80, 0};
-
- Uint32 i = 0;
- const Uint32 sz = sizeof(thresholds)/sizeof(thresholds[0]);
- for(i = 0; i<sz; i++){
- if(now >= thresholds[i]){
- now = thresholds[i];
- break;
- }
- }
-
- if(now != c_currentMemUsed){
- reportMemoryUsage(signal, now > c_currentMemUsed ? 1 : -1);
- }
-
- c_currentMemUsed = now;
-
- signal->theData[0] = ZREPORT_MEMORY_USAGE;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 2000, 1);
- return;
- }
-
- case ZLCP_OP_WRITE_RT_BREAK:
- {
- operationRecPtr.i= signal->theData[1];
- fragrecptr.i= signal->theData[2];
- lcpConnectptr.i= signal->theData[3];
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- lcp_write_op_to_undolog(signal);
- return;
- }
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbacc::execCONTINUEB()
-
-/* ******************--------------------------------------------------------------- */
-/* FSCLOSECONF CLOSE FILE CONF */
-/* ******************------------------------------+ */
-/* SENDER: FS, LEVEL B */
-void Dbacc::execFSCLOSECONF(Signal* signal)
-{
- jamEntry();
- fsConnectptr.i = signal->theData[0];
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- tresult = 0;
- switch (fsConnectptr.p->fsState) {
- case WAIT_CLOSE_UNDO:
- jam();
- releaseFsConnRec(signal);
- break;
- case LCP_CLOSE_DATA:
- jam();
- checkSyncUndoPagesLab(signal);
- return;
- break;
- case SR_CLOSE_DATA:
- jam();
- sendaccSrconfLab(signal);
- return;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbacc::execFSCLOSECONF()
-
-/* ******************--------------------------------------------------------------- */
-/* FSCLOSEREF OPENFILE CONF */
-/* ******************------------------------------+ */
-/* SENDER: FS, LEVEL B */
-void Dbacc::execFSCLOSEREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dbacc::execFSCLOSEREF()
-
-/* ******************--------------------------------------------------------------- */
-/* FSOPENCONF OPENFILE CONF */
-/* ******************------------------------------+ */
-/* SENDER: FS, LEVEL B */
-void Dbacc::execFSOPENCONF(Signal* signal)
-{
- jamEntry();
- fsConnectptr.i = signal->theData[0];
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- tuserptr = signal->theData[1];
- tresult = 0; /* RESULT CHECK VALUE */
- switch (fsConnectptr.p->fsState) {
- case WAIT_OPEN_UNDO_LCP:
- jam();
- lcpOpenUndofileConfLab(signal);
- return;
- break;
- case WAIT_OPEN_UNDO_LCP_NEXT:
- jam();
- fsConnectptr.p->fsPtr = tuserptr;
- return;
- break;
- case OPEN_UNDO_FILE_SR:
- jam();
- fsConnectptr.p->fsPtr = tuserptr;
- srStartUndoLab(signal);
- return;
- break;
- case WAIT_OPEN_DATA_FILE_FOR_WRITE:
- jam();
- lcpFsOpenConfLab(signal);
- return;
- break;
- case WAIT_OPEN_DATA_FILE_FOR_READ:
- jam();
- fsConnectptr.p->fsPtr = tuserptr;
- srFsOpenConfLab(signal);
- return;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbacc::execFSOPENCONF()
-
-/* ******************--------------------------------------------------------------- */
-/* FSOPENREF OPENFILE REF */
-/* ******************------------------------------+ */
-/* SENDER: FS, LEVEL B */
-void Dbacc::execFSOPENREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dbacc::execFSOPENREF()
-
-/* ******************--------------------------------------------------------------- */
-/* FSREADCONF OPENFILE CONF */
-/* ******************------------------------------+ */
-/* SENDER: FS, LEVEL B */
-void Dbacc::execFSREADCONF(Signal* signal)
-{
- jamEntry();
- fsConnectptr.i = signal->theData[0];
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- tresult = 0; /* RESULT CHECK VALUE */
- switch (fsConnectptr.p->fsState) {
- case WAIT_READ_PAGE_ZERO:
- jam();
- fragrecptr.i = fsConnectptr.p->fragrecPtr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- srReadPageZeroLab(signal);
- return;
- break;
- case WAIT_READ_DATA:
- jam();
- fragrecptr.i = fsConnectptr.p->fragrecPtr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- storeDataPageInDirectoryLab(signal);
- return;
- break;
- case READ_UNDO_PAGE:
- jam();
- srDoUndoLab(signal);
- return;
- break;
- case READ_UNDO_PAGE_AND_CLOSE:
- jam();
- fsConnectptr.p->fsState = WAIT_CLOSE_UNDO;
- /* ************************ */
- /* FSCLOSEREQ */
- /* ************************ */
- signal->theData[0] = fsConnectptr.p->fsPtr;
- signal->theData[1] = cownBlockref;
- signal->theData[2] = fsConnectptr.i;
- signal->theData[3] = 0;
- sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
- /* FLAG = DO NOT DELETE FILE */
- srDoUndoLab(signal);
- return;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbacc::execFSREADCONF()
-
-/* ******************--------------------------------------------------------------- */
-/* FSREADRREF OPENFILE CONF */
-/* ******************------------------------------+ */
-/* SENDER: FS, LEVEL B */
-void Dbacc::execFSREADREF(Signal* signal)
-{
- jamEntry();
- progError(0, __LINE__, "Read of file refused");
- return;
-}//Dbacc::execFSREADREF()
-
-/* ******************--------------------------------------------------------------- */
-/* FSWRITECONF OPENFILE CONF */
-/* ******************------------------------------+ */
-/* SENDER: FS, LEVEL B */
-void Dbacc::execFSWRITECONF(Signal* signal)
-{
- jamEntry();
- fsOpptr.i = signal->theData[0];
- ptrCheckGuard(fsOpptr, cfsOpsize, fsOprec);
- /* FS_OPERATION PTR */
- tresult = 0; /* RESULT CHECK VALUE */
- fsConnectptr.i = fsOpptr.p->fsConptr;
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- fragrecptr.i = fsOpptr.p->fsOpfragrecPtr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- switch (fsOpptr.p->fsOpstate) {
- case WAIT_WRITE_UNDO:
- jam();
- updateLastUndoPageIdWritten(signal, fsOpptr.p->fsOpMemPage);
- releaseFsOpRec(signal);
- if (fragrecptr.p->nrWaitWriteUndoExit == 0) {
- jam();
- checkSendLcpConfLab(signal);
- return;
- } else {
- jam();
- fragrecptr.p->lastUndoIsStored = ZTRUE;
- }//if
- return;
- break;
- case WAIT_WRITE_UNDO_EXIT:
- jam();
- updateLastUndoPageIdWritten(signal, fsOpptr.p->fsOpMemPage);
- releaseFsOpRec(signal);
- if (fragrecptr.p->nrWaitWriteUndoExit > 0) {
- jam();
- fragrecptr.p->nrWaitWriteUndoExit--;
- }//if
- if (fsConnectptr.p->fsState == WAIT_CLOSE_UNDO) {
- jam();
- /* ************************ */
- /* FSCLOSEREQ */
- /* ************************ */
- signal->theData[0] = fsConnectptr.p->fsPtr;
- signal->theData[1] = cownBlockref;
- signal->theData[2] = fsConnectptr.i;
- signal->theData[3] = ZFALSE;
- sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
- }//if
- if (fragrecptr.p->nrWaitWriteUndoExit == 0) {
- if (fragrecptr.p->lastUndoIsStored == ZTRUE) {
- jam();
- fragrecptr.p->lastUndoIsStored = ZFALSE;
- checkSendLcpConfLab(signal);
- return;
- }//if
- }//if
- return;
- break;
- case WAIT_WRITE_DATA:
- jam();
- releaseFsOpRec(signal);
- fragrecptr.p->activeDataFilePage += ZWRITEPAGESIZE;
- fragrecptr.p->activeDataPage = 0;
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- switch (fragrecptr.p->fragState) {
- case LCP_SEND_PAGES:
- jam();
- savepagesLab(signal);
- return;
- break;
- case LCP_SEND_OVER_PAGES:
- jam();
- saveOverPagesLab(signal);
- return;
- break;
- case LCP_SEND_ZERO_PAGE:
- jam();
- saveZeroPageLab(signal);
- return;
- break;
- case WAIT_ZERO_PAGE_STORED:
- jam();
- lcpCloseDataFileLab(signal);
- return;
- break;
- default:
- ndbrequire(false);
- return;
- break;
- }//switch
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbacc::execFSWRITECONF()
-
-/* ******************--------------------------------------------------------------- */
-/* FSWRITEREF OPENFILE CONF */
-/* ******************------------------------------+ */
-/* SENDER: FS, LEVEL B */
-void Dbacc::execFSWRITEREF(Signal* signal)
-{
- jamEntry();
- progError(0, __LINE__, "Write to file refused");
- return;
-}//Dbacc::execFSWRITEREF()
-
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* */
-/* END OF COMMON SIGNAL RECEPTION MODULE */
-/* */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* */
-/* SYSTEM RESTART MODULE */
-/* */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-void Dbacc::execNDB_STTOR(Signal* signal)
-{
- Uint32 tstartphase;
- Uint32 tStartType;
-
- jamEntry();
- cndbcntrRef = signal->theData[0];
- cmynodeid = signal->theData[1];
- tstartphase = signal->theData[2];
- tStartType = signal->theData[3];
- switch (tstartphase) {
- case ZSPH1:
- jam();
- ndbsttorryLab(signal);
- return;
- break;
- case ZSPH2:
- cnoLcpPages = 2 * (ZWRITEPAGESIZE + 1);
- initialiseLcpPages(signal);
- ndbsttorryLab(signal);
- return;
- break;
- case ZSPH3:
- if ((tStartType == NodeState::ST_NODE_RESTART) ||
- (tStartType == NodeState::ST_INITIAL_NODE_RESTART)) {
- jam();
- //---------------------------------------------
- // csystemRestart is used to check what is needed
- // during log execution. When starting a node it
- // is not a log execution and rather a normal
- // execution. Thus we reset the variable here to
- // avoid unnecessary system crashes.
- //---------------------------------------------
- csystemRestart = ZFALSE;
- }//if
-
- signal->theData[0] = ZLOAD_BAL_LCP_TIMER;
- sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 1);
- break;
- case ZSPH6:
- jam();
- clblPagesPerTick = clblPagesPerTickAfterSr;
- csystemRestart = ZFALSE;
-
- signal->theData[0] = ZREPORT_MEMORY_USAGE;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 2000, 1);
- break;
- default:
- jam();
- /*empty*/;
- break;
- }//switch
- ndbsttorryLab(signal);
- return;
-}//Dbacc::execNDB_STTOR()
-
-/* ******************--------------------------------------------------------------- */
-/* STTOR START / RESTART */
-/* ******************------------------------------+ */
-/* SENDER: ANY, LEVEL B */
-void Dbacc::execSTTOR(Signal* signal)
-{
- jamEntry();
- Uint32 tstartphase = signal->theData[1];
- switch (tstartphase) {
- case 1:
- jam();
- c_tup = (Dbtup*)globalData.getBlock(DBTUP);
- ndbrequire(c_tup != 0);
- break;
- }
- tuserblockref = signal->theData[3];
- csignalkey = signal->theData[6];
- sttorrysignalLab(signal);
- return;
-}//Dbacc::execSTTOR()
-
-/* --------------------------------------------------------------------------------- */
-/* ZSPH1 */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::ndbrestart1Lab(Signal* signal)
-{
- cmynodeid = globalData.ownId;
- cownBlockref = numberToRef(DBACC, cmynodeid);
- czero = 0;
- cminusOne = czero - 1;
- ctest = 0;
- cundoLogActive = ZFALSE;
- csystemRestart = ZTRUE;
- clblPageOver = 0;
- clblPageCounter = 0;
- cactiveUndoFilePage = 0;
- cprevUndoaddress = cminusOne;
- cundoposition = 0;
- clastUndoPageIdWritten = 0;
- cactiveUndoFileVersion = RNIL;
- cactiveOpenUndoFsPtr = RNIL;
- for (Uint32 tmp = 0; tmp < ZMAX_UNDO_VERSION; tmp++) {
- csrVersList[tmp] = RNIL;
- }//for
- return;
-}//Dbacc::ndbrestart1Lab()
-
-void Dbacc::initialiseRecordsLab(Signal* signal, Uint32 ref, Uint32 data)
-{
- switch (tdata0) {
- case 0:
- jam();
- initialiseTableRec(signal);
- break;
- case 1:
- jam();
- initialiseFsConnectionRec(signal);
- break;
- case 2:
- jam();
- initialiseFsOpRec(signal);
- break;
- case 3:
- jam();
- initialiseLcpConnectionRec(signal);
- break;
- case 4:
- jam();
- initialiseDirRec(signal);
- break;
- case 5:
- jam();
- initialiseDirRangeRec(signal);
- break;
- case 6:
- jam();
- initialiseFragRec(signal);
- break;
- case 7:
- jam();
- initialiseOverflowRec(signal);
- break;
- case 8:
- jam();
- initialiseOperationRec(signal);
- break;
- case 9:
- jam();
- initialisePageRec(signal);
- break;
- case 10:
- jam();
- initialiseRootfragRec(signal);
- break;
- case 11:
- jam();
- initialiseScanRec(signal);
- break;
- case 12:
- jam();
- initialiseSrVerRec(signal);
-
- {
- ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = data;
- sendSignal(ref, GSN_READ_CONFIG_CONF, signal,
- ReadConfigConf::SignalLength, JBB);
- }
- return;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
-
- signal->theData[0] = ZINITIALISE_RECORDS;
- signal->theData[1] = tdata0 + 1;
- signal->theData[2] = 0;
- signal->theData[3] = ref;
- signal->theData[4] = data;
- sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
- return;
-}//Dbacc::initialiseRecordsLab()
-
-/* *********************************<< */
-/* NDB_STTORRY */
-/* *********************************<< */
-void Dbacc::ndbsttorryLab(Signal* signal)
-{
- signal->theData[0] = cownBlockref;
- sendSignal(cndbcntrRef, GSN_NDB_STTORRY, signal, 1, JBB);
- return;
-}//Dbacc::ndbsttorryLab()
-
-/* *********************************<< */
-/* SIZEALT_REP SIZE ALTERATION */
-/* *********************************<< */
-void Dbacc::execREAD_CONFIG_REQ(Signal* signal)
-{
- const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
- Uint32 ref = req->senderRef;
- Uint32 senderData = req->senderData;
- ndbrequire(req->noOfParameters == 0);
-
- jamEntry();
-
- const ndb_mgm_configuration_iterator * p =
- theConfiguration.getOwnConfigIterator();
- ndbrequire(p != 0);
-
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_DIR_RANGE, &cdirrangesize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_DIR_ARRAY, &cdirarraysize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_FRAGMENT, &cfragmentsize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_OP_RECS, &coprecsize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_OVERFLOW_RECS,
- &coverflowrecsize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_PAGE8, &cpagesize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_ROOT_FRAG,
- &crootfragmentsize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_TABLE, &ctablesize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_SCAN, &cscanRecSize));
- initRecords();
- ndbrestart1Lab(signal);
-
- clblPagesPerTick = 50;
- //ndb_mgm_get_int_parameter(p, CFG_DB_, &clblPagesPerTick);
-
- clblPagesPerTickAfterSr = 50;
- //ndb_mgm_get_int_parameter(p, CFG_DB_, &clblPagesPerTickAfterSr);
-
- tdata0 = 0;
- initialiseRecordsLab(signal, ref, senderData);
- return;
-}//Dbacc::execSIZEALT_REP()
-
-/* *********************************<< */
-/* STTORRY */
-/* *********************************<< */
-void Dbacc::sttorrysignalLab(Signal* signal)
-{
- signal->theData[0] = csignalkey;
- signal->theData[1] = 3;
- /* BLOCK CATEGORY */
- signal->theData[2] = 2;
- /* SIGNAL VERSION NUMBER */
- signal->theData[3] = ZSPH1;
- signal->theData[4] = 255;
- sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
- /* END OF START PHASES */
- return;
-}//Dbacc::sttorrysignalLab()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_DIR_REC */
-/* INITIALATES THE DIRECTORY RECORDS. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialiseDirRec(Signal* signal)
-{
- DirectoryarrayPtr idrDirptr;
- ndbrequire(cdirarraysize > 0);
- for (idrDirptr.i = 0; idrDirptr.i < cdirarraysize; idrDirptr.i++) {
- refresh_watch_dog();
- ptrAss(idrDirptr, directoryarray);
- for (Uint32 i = 0; i <= 255; i++) {
- idrDirptr.p->pagep[i] = RNIL;
- }//for
- }//for
- cdirmemory = 0;
- cfirstfreedir = RNIL;
-}//Dbacc::initialiseDirRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_DIR_RANGE_REC */
-/* INITIALATES THE DIR_RANGE RECORDS. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialiseDirRangeRec(Signal* signal)
-{
- DirRangePtr idrDirRangePtr;
-
- ndbrequire(cdirrangesize > 0);
- for (idrDirRangePtr.i = 0; idrDirRangePtr.i < cdirrangesize; idrDirRangePtr.i++) {
- refresh_watch_dog();
- ptrAss(idrDirRangePtr, dirRange);
- idrDirRangePtr.p->dirArray[0] = idrDirRangePtr.i + 1;
- for (Uint32 i = 1; i < 256; i++) {
- idrDirRangePtr.p->dirArray[i] = RNIL;
- }//for
- }//for
- idrDirRangePtr.i = cdirrangesize - 1;
- ptrAss(idrDirRangePtr, dirRange);
- idrDirRangePtr.p->dirArray[0] = RNIL;
- cfirstfreeDirrange = 0;
-}//Dbacc::initialiseDirRangeRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_FRAG_REC */
-/* INITIALATES THE FRAGMENT RECORDS. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialiseFragRec(Signal* signal)
-{
- FragmentrecPtr regFragPtr;
- ndbrequire(cfragmentsize > 0);
- for (regFragPtr.i = 0; regFragPtr.i < cfragmentsize; regFragPtr.i++) {
- jam();
- refresh_watch_dog();
- ptrAss(regFragPtr, fragmentrec);
- initFragGeneral(regFragPtr);
- regFragPtr.p->nextfreefrag = regFragPtr.i + 1;
- }//for
- regFragPtr.i = cfragmentsize - 1;
- ptrAss(regFragPtr, fragmentrec);
- regFragPtr.p->nextfreefrag = RNIL;
- cfirstfreefrag = 0;
-}//Dbacc::initialiseFragRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_FS_CONNECTION_REC */
-/* INITIALATES THE FS_CONNECTION RECORDS */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialiseFsConnectionRec(Signal* signal)
-{
- ndbrequire(cfsConnectsize > 0);
- for (fsConnectptr.i = 0; fsConnectptr.i < cfsConnectsize; fsConnectptr.i++) {
- ptrAss(fsConnectptr, fsConnectrec);
- fsConnectptr.p->fsNext = fsConnectptr.i + 1;
- fsConnectptr.p->fsPrev = RNIL;
- fsConnectptr.p->fragrecPtr = RNIL;
- fsConnectptr.p->fsState = WAIT_NOTHING;
- }//for
- fsConnectptr.i = cfsConnectsize - 1;
- ptrAss(fsConnectptr, fsConnectrec);
- fsConnectptr.p->fsNext = RNIL; /* INITIALITES THE LAST CONNECTRECORD */
- cfsFirstfreeconnect = 0; /* INITIATES THE FIRST FREE CONNECT RECORD */
-}//Dbacc::initialiseFsConnectionRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_FS_OP_REC */
-/* INITIALATES THE FS_OP RECORDS */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialiseFsOpRec(Signal* signal)
-{
- ndbrequire(cfsOpsize > 0);
- for (fsOpptr.i = 0; fsOpptr.i < cfsOpsize; fsOpptr.i++) {
- ptrAss(fsOpptr, fsOprec);
- fsOpptr.p->fsOpnext = fsOpptr.i + 1;
- fsOpptr.p->fsOpfragrecPtr = RNIL;
- fsOpptr.p->fsConptr = RNIL;
- fsOpptr.p->fsOpstate = WAIT_NOTHING;
- }//for
- fsOpptr.i = cfsOpsize - 1;
- ptrAss(fsOpptr, fsOprec);
- fsOpptr.p->fsOpnext = RNIL;
- cfsFirstfreeop = 0;
-}//Dbacc::initialiseFsOpRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_LCP_CONNECTION_REC */
-/* INITIALATES THE LCP_CONNECTION RECORDS */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialiseLcpConnectionRec(Signal* signal)
-{
- ndbrequire(clcpConnectsize > 0);
- for (lcpConnectptr.i = 0; lcpConnectptr.i < clcpConnectsize; lcpConnectptr.i++) {
- ptrAss(lcpConnectptr, lcpConnectrec);
- lcpConnectptr.p->nextLcpConn = lcpConnectptr.i + 1;
- lcpConnectptr.p->lcpUserptr = RNIL;
- lcpConnectptr.p->rootrecptr = RNIL;
- lcpConnectptr.p->lcpstate = LCP_FREE;
- }//for
- lcpConnectptr.i = clcpConnectsize - 1;
- ptrAss(lcpConnectptr, lcpConnectrec);
- lcpConnectptr.p->nextLcpConn = RNIL;
- cfirstfreelcpConnect = 0;
-}//Dbacc::initialiseLcpConnectionRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_OPERATION_REC */
-/* INITIALATES THE OPERATION RECORDS. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialiseOperationRec(Signal* signal)
-{
- ndbrequire(coprecsize > 0);
- for (operationRecPtr.i = 0; operationRecPtr.i < coprecsize; operationRecPtr.i++) {
- refresh_watch_dog();
- ptrAss(operationRecPtr, operationrec);
- operationRecPtr.p->transactionstate = IDLE;
- operationRecPtr.p->operation = ZUNDEFINED_OP;
- operationRecPtr.p->opState = FREE_OP;
- operationRecPtr.p->nextOp = operationRecPtr.i + 1;
- }//for
- operationRecPtr.i = coprecsize - 1;
- ptrAss(operationRecPtr, operationrec);
- operationRecPtr.p->nextOp = RNIL;
- cfreeopRec = 0;
-}//Dbacc::initialiseOperationRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_OVERFLOW_REC */
-/* INITIALATES THE OVERFLOW RECORDS */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialiseOverflowRec(Signal* signal)
-{
- OverflowRecordPtr iorOverflowRecPtr;
-
- ndbrequire(coverflowrecsize > 0);
- for (iorOverflowRecPtr.i = 0; iorOverflowRecPtr.i < coverflowrecsize; iorOverflowRecPtr.i++) {
- refresh_watch_dog();
- ptrAss(iorOverflowRecPtr, overflowRecord);
- iorOverflowRecPtr.p->nextfreeoverrec = iorOverflowRecPtr.i + 1;
- }//for
- iorOverflowRecPtr.i = coverflowrecsize - 1;
- ptrAss(iorOverflowRecPtr, overflowRecord);
- iorOverflowRecPtr.p->nextfreeoverrec = RNIL;
- cfirstfreeoverrec = 0;
-}//Dbacc::initialiseOverflowRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_PAGE_REC */
-/* INITIALATES THE PAGE RECORDS. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialisePageRec(Signal* signal)
-{
- ndbrequire(cpagesize > 0);
- cfreepage = 0;
- cfirstfreepage = RNIL;
- cnoOfAllocatedPages = 0;
-}//Dbacc::initialisePageRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_LCP_PAGES */
-/* INITIALATES THE LCP PAGE RECORDS. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialiseLcpPages(Signal* signal)
-{
- Uint32 tilpIndex;
-
- ndbrequire(cnoLcpPages >= (2 * (ZWRITEPAGESIZE + 1)));
- /* --------------------------------------------------------------------------------- */
- /* AN ABSOLUTE MINIMUM IS THAT WE HAVE 16 LCP PAGES TO HANDLE TWO CONCURRENT */
- /* LCP'S ON LOCAL FRAGMENTS. */
- /* --------------------------------------------------------------------------------- */
- ndbrequire(cpagesize >= (cnoLcpPages + 8));
- /* --------------------------------------------------------------------------------- */
- /* THE NUMBER OF PAGES MUST BE AT LEAST 8 PLUS THE NUMBER OF PAGES REQUIRED BY */
- /* THE LOCAL CHECKPOINT PROCESS. THIS NUMBER IS 8 TIMES THE PARALLELISM OF */
- /* LOCAL CHECKPOINTS. */
- /* --------------------------------------------------------------------------------- */
- /* --------------------------------------------------------------------------------- */
- /* WE SET UP A LINKED LIST OF PAGES FOR EXCLUSIVE USE BY LOCAL CHECKPOINTS. */
- /* --------------------------------------------------------------------------------- */
- cfirstfreeLcpPage = RNIL;
- for (tilpIndex = 0; tilpIndex < cnoLcpPages; tilpIndex++) {
- jam();
- seizePage(signal);
- rlpPageptr = spPageptr;
- releaseLcpPage(signal);
- }//for
-}//Dbacc::initialiseLcpPages()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_ROOTFRAG_REC */
-/* INITIALATES THE ROOTFRAG RECORDS. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialiseRootfragRec(Signal* signal)
-{
- ndbrequire(crootfragmentsize > 0);
- for (rootfragrecptr.i = 0; rootfragrecptr.i < crootfragmentsize; rootfragrecptr.i++) {
- refresh_watch_dog();
- ptrAss(rootfragrecptr, rootfragmentrec);
- rootfragrecptr.p->nextroot = rootfragrecptr.i + 1;
- rootfragrecptr.p->fragmentptr[0] = RNIL;
- rootfragrecptr.p->fragmentptr[1] = RNIL;
- }//for
- rootfragrecptr.i = crootfragmentsize - 1;
- ptrAss(rootfragrecptr, rootfragmentrec);
- rootfragrecptr.p->nextroot = RNIL;
- cfirstfreerootfrag = 0;
-}//Dbacc::initialiseRootfragRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_SCAN_REC */
-/* INITIALATES THE QUE_SCAN RECORDS. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialiseScanRec(Signal* signal)
-{
- ndbrequire(cscanRecSize > 0);
- for (scanPtr.i = 0; scanPtr.i < cscanRecSize; scanPtr.i++) {
- ptrAss(scanPtr, scanRec);
- scanPtr.p->scanNextfreerec = scanPtr.i + 1;
- scanPtr.p->scanState = ScanRec::SCAN_DISCONNECT;
- scanPtr.p->scanTimer = 0;
- scanPtr.p->scanContinuebCounter = 0;
- }//for
- scanPtr.i = cscanRecSize - 1;
- ptrAss(scanPtr, scanRec);
- scanPtr.p->scanNextfreerec = RNIL;
- cfirstFreeScanRec = 0;
-}//Dbacc::initialiseScanRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_SR_VER_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialiseSrVerRec(Signal* signal)
-{
- ndbrequire(csrVersionRecSize > 0);
- for (srVersionPtr.i = 0; srVersionPtr.i < csrVersionRecSize; srVersionPtr.i++) {
- ptrAss(srVersionPtr, srVersionRec);
- srVersionPtr.p->nextFreeSr = srVersionPtr.i + 1;
- }//for
- srVersionPtr.i = csrVersionRecSize - 1;
- ptrAss(srVersionPtr, srVersionRec);
- srVersionPtr.p->nextFreeSr = RNIL;
- cfirstFreeSrVersionRec = 0;
-}//Dbacc::initialiseSrVerRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INITIALISE_TABLE_REC */
-/* INITIALATES THE TABLE RECORDS. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initialiseTableRec(Signal* signal)
-{
- ndbrequire(ctablesize > 0);
- for (tabptr.i = 0; tabptr.i < ctablesize; tabptr.i++) {
- refresh_watch_dog();
- ptrAss(tabptr, tabrec);
- for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
- tabptr.p->fragholder[i] = RNIL;
- tabptr.p->fragptrholder[i] = RNIL;
- }//for
- tabptr.p->noOfKeyAttr = 0;
- tabptr.p->hasCharAttr = 0;
- for (Uint32 k = 0; k < MAX_ATTRIBUTES_IN_INDEX; k++) {
- tabptr.p->keyAttr[k].attributeDescriptor = 0;
- tabptr.p->keyAttr[k].charsetInfo = 0;
- }
- }//for
-}//Dbacc::initialiseTableRec()
-
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* END OF SYSTEM RESTART MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* ADD/DELETE FRAGMENT MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-
-void Dbacc::initRootfragrec(Signal* signal)
-{
- const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
- rootfragrecptr.p->mytabptr = req->tableId;
- rootfragrecptr.p->roothashcheck = req->kValue + req->lhFragBits;
- rootfragrecptr.p->noOfElements = 0;
- rootfragrecptr.p->m_commit_count = 0;
- for (Uint32 i = 0; i < MAX_PARALLEL_SCANS_PER_FRAG; i++) {
- rootfragrecptr.p->scan[i] = RNIL;
- }//for
-}//Dbacc::initRootfragrec()
-
-void Dbacc::execACCFRAGREQ(Signal* signal)
-{
- const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
- jamEntry();
- if (ERROR_INSERTED(3001)) {
- jam();
- addFragRefuse(signal, 1);
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }
- tabptr.i = req->tableId;
-#ifndef VM_TRACE
- // config mismatch - do not crash if release compiled
- if (tabptr.i >= ctablesize) {
- jam();
- addFragRefuse(signal, 640);
- return;
- }
-#endif
- ptrCheckGuard(tabptr, ctablesize, tabrec);
- ndbrequire((req->reqInfo & 0xF) == ZADDFRAG);
- ndbrequire(!getrootfragmentrec(signal, rootfragrecptr, req->fragId));
- if (cfirstfreerootfrag == RNIL) {
- jam();
- addFragRefuse(signal, ZFULL_ROOTFRAGRECORD_ERROR);
- return;
- }//if
- seizeRootfragrec(signal);
- if (!addfragtotab(signal, rootfragrecptr.i, req->fragId)) {
- jam();
- releaseRootFragRecord(signal, rootfragrecptr);
- addFragRefuse(signal, ZFULL_ROOTFRAGRECORD_ERROR);
- return;
- }//if
- initRootfragrec(signal);
- for (Uint32 i = 0; i < 2; i++) {
- jam();
- if (cfirstfreefrag == RNIL) {
- jam();
- addFragRefuse(signal, ZFULL_FRAGRECORD_ERROR);
- return;
- }//if
- seizeFragrec(signal);
- initFragGeneral(fragrecptr);
- initFragAdd(signal, i, rootfragrecptr.i, fragrecptr);
- rootfragrecptr.p->fragmentptr[i] = fragrecptr.i;
- rootfragrecptr.p->fragmentid[i] = fragrecptr.p->myfid;
- if (cfirstfreeDirrange == RNIL) {
- jam();
- addFragRefuse(signal, ZDIR_RANGE_ERROR);
- return;
- } else {
- jam();
- seizeDirrange(signal);
- }//if
- fragrecptr.p->directory = newDirRangePtr.i;
- seizeDirectory(signal);
- if (tresult < ZLIMIT_OF_ERROR) {
- jam();
- newDirRangePtr.p->dirArray[0] = sdDirptr.i;
- } else {
- jam();
- addFragRefuse(signal, tresult);
- return;
- }//if
- seizePage(signal);
- if (tresult > ZLIMIT_OF_ERROR) {
- jam();
- addFragRefuse(signal, tresult);
- return;
- }//if
- sdDirptr.p->pagep[0] = spPageptr.i;
- tipPageId = 0;
- inpPageptr = spPageptr;
- initPage(signal);
- if (cfirstfreeDirrange == RNIL) {
- jam();
- addFragRefuse(signal, ZDIR_RANGE_ERROR);
- return;
- } else {
- jam();
- seizeDirrange(signal);
- }//if
- fragrecptr.p->overflowdir = newDirRangePtr.i;
- seizeDirectory(signal);
- if (tresult < ZLIMIT_OF_ERROR) {
- jam();
- newDirRangePtr.p->dirArray[0] = sdDirptr.i;
- } else {
- jam();
- addFragRefuse(signal, tresult);
- return;
- }//if
- }//for
- Uint32 userPtr = req->userPtr;
- BlockReference retRef = req->userRef;
- rootfragrecptr.p->rootState = ACTIVEROOT;
- AccFragConf * const conf = (AccFragConf*)&signal->theData[0];
-
- conf->userPtr = userPtr;
- conf->rootFragPtr = rootfragrecptr.i;
- conf->fragId[0] = rootfragrecptr.p->fragmentid[0];
- conf->fragId[1] = rootfragrecptr.p->fragmentid[1];
- conf->fragPtr[0] = rootfragrecptr.p->fragmentptr[0];
- conf->fragPtr[1] = rootfragrecptr.p->fragmentptr[1];
- conf->rootHashCheck = rootfragrecptr.p->roothashcheck;
- sendSignal(retRef, GSN_ACCFRAGCONF, signal, AccFragConf::SignalLength, JBB);
-}//Dbacc::execACCFRAGREQ()
-
-void Dbacc::addFragRefuse(Signal* signal, Uint32 errorCode)
-{
- const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
- AccFragRef * const ref = (AccFragRef*)&signal->theData[0];
- Uint32 userPtr = req->userPtr;
- BlockReference retRef = req->userRef;
-
- ref->userPtr = userPtr;
- ref->errorCode = errorCode;
- sendSignal(retRef, GSN_ACCFRAGREF, signal, AccFragRef::SignalLength, JBB);
- return;
-}//Dbacc::addFragRefuseEarly()
-
-void
-Dbacc::execTC_SCHVERREQ(Signal* signal)
-{
- jamEntry();
- if (! assembleFragments(signal)) {
- jam();
- return;
- }
- tabptr.i = signal->theData[0];
- ptrCheckGuard(tabptr, ctablesize, tabrec);
- Uint32 noOfKeyAttr = signal->theData[6];
- ndbrequire(noOfKeyAttr <= MAX_ATTRIBUTES_IN_INDEX);
- Uint32 hasCharAttr = 0;
-
- SegmentedSectionPtr s0Ptr;
- signal->getSection(s0Ptr, 0);
- SectionReader r0(s0Ptr, getSectionSegmentPool());
- Uint32 i = 0;
- while (i < noOfKeyAttr) {
- jam();
- Uint32 attributeDescriptor = ~0;
- Uint32 csNumber = ~0;
- if (! r0.getWord(&attributeDescriptor) ||
- ! r0.getWord(&csNumber)) {
- jam();
- break;
- }
- CHARSET_INFO* cs = 0;
- if (csNumber != 0) {
- cs = all_charsets[csNumber];
- ndbrequire(cs != 0);
- hasCharAttr = 1;
- }
- tabptr.p->keyAttr[i].attributeDescriptor = attributeDescriptor;
- tabptr.p->keyAttr[i].charsetInfo = cs;
- i++;
- }
- ndbrequire(i == noOfKeyAttr);
- releaseSections(signal);
-
- tabptr.p->noOfKeyAttr = noOfKeyAttr;
- tabptr.p->hasCharAttr = hasCharAttr;
-
- // copy char attr flag to each fragment
- for (Uint32 i1 = 0; i1 < MAX_FRAG_PER_NODE; i1++) {
- jam();
- if (tabptr.p->fragptrholder[i1] != RNIL) {
- rootfragrecptr.i = tabptr.p->fragptrholder[i1];
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- for (Uint32 i2 = 0; i2 < 2; i2++) {
- fragrecptr.i = rootfragrecptr.p->fragmentptr[i2];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- fragrecptr.p->hasCharAttr = hasCharAttr;
- }
- }
- }
-
- // no reply to DICT
-}
-
-void
-Dbacc::execDROP_TAB_REQ(Signal* signal){
- jamEntry();
- DropTabReq* req = (DropTabReq*)signal->getDataPtr();
-
- TabrecPtr tabPtr;
- tabPtr.i = req->tableId;
- ptrCheckGuard(tabPtr, ctablesize, tabrec);
-
- tabPtr.p->tabUserRef = req->senderRef;
- tabPtr.p->tabUserPtr = req->senderData;
-
- signal->theData[0] = ZREL_ROOT_FRAG;
- signal->theData[1] = tabPtr.i;
- sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
-}
-
-void Dbacc::releaseRootFragResources(Signal* signal, Uint32 tableId)
-{
- RootfragmentrecPtr rootPtr;
- TabrecPtr tabPtr;
- tabPtr.i = tableId;
- ptrCheckGuard(tabPtr, ctablesize, tabrec);
- for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
- jam();
- if (tabPtr.p->fragholder[i] != RNIL) {
- jam();
- Uint32 fragIndex;
- rootPtr.i = tabPtr.p->fragptrholder[i];
- ptrCheckGuard(rootPtr, crootfragmentsize, rootfragmentrec);
- if (rootPtr.p->fragmentptr[0] != RNIL) {
- jam();
- fragIndex = rootPtr.p->fragmentptr[0];
- rootPtr.p->fragmentptr[0] = RNIL;
- } else if (rootPtr.p->fragmentptr[1] != RNIL) {
- jam();
- fragIndex = rootPtr.p->fragmentptr[1];
- rootPtr.p->fragmentptr[1] = RNIL;
- } else {
- jam();
- releaseRootFragRecord(signal, rootPtr);
- tabPtr.p->fragholder[i] = RNIL;
- tabPtr.p->fragptrholder[i] = RNIL;
- continue;
- }//if
- releaseFragResources(signal, fragIndex);
- return;
- }//if
- }//for
-
- /**
- * Finished...
- */
- sendFSREMOVEREQ(signal, tableId);
-}//Dbacc::releaseRootFragResources()
-
-void Dbacc::releaseRootFragRecord(Signal* signal, RootfragmentrecPtr rootPtr)
-{
- rootPtr.p->nextroot = cfirstfreerootfrag;
- cfirstfreerootfrag = rootPtr.i;
-}//Dbacc::releaseRootFragRecord()
-
-void Dbacc::releaseFragResources(Signal* signal, Uint32 fragIndex)
-{
- FragmentrecPtr regFragPtr;
- regFragPtr.i = fragIndex;
- ptrCheckGuard(regFragPtr, cfragmentsize, fragmentrec);
- verifyFragCorrect(regFragPtr);
- if (regFragPtr.p->directory != RNIL) {
- jam();
- releaseDirResources(signal, regFragPtr.i, regFragPtr.p->directory, 0);
- regFragPtr.p->directory = RNIL;
- } else if (regFragPtr.p->overflowdir != RNIL) {
- jam();
- releaseDirResources(signal, regFragPtr.i, regFragPtr.p->overflowdir, 0);
- regFragPtr.p->overflowdir = RNIL;
- } else if (regFragPtr.p->firstOverflowRec != RNIL) {
- jam();
- releaseOverflowResources(signal, regFragPtr);
- } else if (regFragPtr.p->firstFreeDirindexRec != RNIL) {
- jam();
- releaseDirIndexResources(signal, regFragPtr);
- } else {
- RootfragmentrecPtr rootPtr;
- jam();
- rootPtr.i = regFragPtr.p->myroot;
- ptrCheckGuard(rootPtr, crootfragmentsize, rootfragmentrec);
- releaseFragRecord(signal, regFragPtr);
- signal->theData[0] = ZREL_ROOT_FRAG;
- signal->theData[1] = rootPtr.p->mytabptr;
- sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
- }//if
-}//Dbacc::releaseFragResources()
-
-void Dbacc::verifyFragCorrect(FragmentrecPtr regFragPtr)
-{
- for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) {
- jam();
- ndbrequire(regFragPtr.p->datapages[i] == RNIL);
- }//for
- ndbrequire(regFragPtr.p->lockOwnersList == RNIL);
- ndbrequire(regFragPtr.p->firstWaitInQueOp == RNIL);
- ndbrequire(regFragPtr.p->lastWaitInQueOp == RNIL);
- ndbrequire(regFragPtr.p->sentWaitInQueOp == RNIL);
- //ndbrequire(regFragPtr.p->fsConnPtr == RNIL);
- ndbrequire(regFragPtr.p->zeroPagePtr == RNIL);
- ndbrequire(regFragPtr.p->nrWaitWriteUndoExit == 0);
- ndbrequire(regFragPtr.p->sentWaitInQueOp == RNIL);
-}//Dbacc::verifyFragCorrect()
-
-void Dbacc::releaseDirResources(Signal* signal,
- Uint32 fragIndex,
- Uint32 dirIndex,
- Uint32 startIndex)
-{
- DirRangePtr regDirRangePtr;
- regDirRangePtr.i = dirIndex;
- ptrCheckGuard(regDirRangePtr, cdirrangesize, dirRange);
- for (Uint32 i = startIndex; i < 256; i++) {
- jam();
- if (regDirRangePtr.p->dirArray[i] != RNIL) {
- jam();
- Uint32 directoryIndex = regDirRangePtr.p->dirArray[i];
- regDirRangePtr.p->dirArray[i] = RNIL;
- releaseDirectoryResources(signal, fragIndex, dirIndex, (i + 1), directoryIndex);
- return;
- }//if
- }//for
- rdDirRangePtr = regDirRangePtr;
- releaseDirrange(signal);
- signal->theData[0] = ZREL_FRAG;
- signal->theData[1] = fragIndex;
- sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
-}//Dbacc::releaseDirResources()
-
-void Dbacc::releaseDirectoryResources(Signal* signal,
- Uint32 fragIndex,
- Uint32 dirIndex,
- Uint32 startIndex,
- Uint32 directoryIndex)
-{
- DirectoryarrayPtr regDirPtr;
- regDirPtr.i = directoryIndex;
- ptrCheckGuard(regDirPtr, cdirarraysize, directoryarray);
- for (Uint32 i = 0; i < 256; i++) {
- jam();
- if (regDirPtr.p->pagep[i] != RNIL) {
- jam();
- rpPageptr.i = regDirPtr.p->pagep[i];
- ptrCheckGuard(rpPageptr, cpagesize, page8);
- releasePage(signal);
- regDirPtr.p->pagep[i] = RNIL;
- }//if
- }//for
- rdDirptr = regDirPtr;
- releaseDirectory(signal);
- signal->theData[0] = ZREL_DIR;
- signal->theData[1] = fragIndex;
- signal->theData[2] = dirIndex;
- signal->theData[3] = startIndex;
- sendSignal(cownBlockref, GSN_CONTINUEB, signal, 4, JBB);
-}//Dbacc::releaseDirectoryResources()
-
-void Dbacc::releaseOverflowResources(Signal* signal, FragmentrecPtr regFragPtr)
-{
- Uint32 loopCount = 0;
- OverflowRecordPtr regOverflowRecPtr;
- while ((regFragPtr.p->firstOverflowRec != RNIL) &&
- (loopCount < 1)) {
- jam();
- regOverflowRecPtr.i = regFragPtr.p->firstOverflowRec;
- ptrCheckGuard(regOverflowRecPtr, coverflowrecsize, overflowRecord);
- regFragPtr.p->firstOverflowRec = regOverflowRecPtr.p->nextOverRec;
- rorOverflowRecPtr = regOverflowRecPtr;
- releaseOverflowRec(signal);
- loopCount++;
- }//while
- signal->theData[0] = ZREL_FRAG;
- signal->theData[1] = regFragPtr.i;
- sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
-}//Dbacc::releaseOverflowResources()
-
-void Dbacc::releaseDirIndexResources(Signal* signal, FragmentrecPtr regFragPtr)
-{
- Uint32 loopCount = 0;
- OverflowRecordPtr regOverflowRecPtr;
- while ((regFragPtr.p->firstFreeDirindexRec != RNIL) &&
- (loopCount < 1)) {
- jam();
- regOverflowRecPtr.i = regFragPtr.p->firstFreeDirindexRec;
- ptrCheckGuard(regOverflowRecPtr, coverflowrecsize, overflowRecord);
- regFragPtr.p->firstFreeDirindexRec = regOverflowRecPtr.p->nextOverList;
- rorOverflowRecPtr = regOverflowRecPtr;
- releaseOverflowRec(signal);
- loopCount++;
- }//while
- signal->theData[0] = ZREL_FRAG;
- signal->theData[1] = regFragPtr.i;
- sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
-}//Dbacc::releaseDirIndexResources()
-
-void Dbacc::releaseFragRecord(Signal* signal, FragmentrecPtr regFragPtr)
-{
- regFragPtr.p->nextfreefrag = cfirstfreefrag;
- cfirstfreefrag = regFragPtr.i;
- initFragGeneral(regFragPtr);
-}//Dbacc::releaseFragRecord()
-
-void Dbacc::sendFSREMOVEREQ(Signal* signal, Uint32 tableId)
-{
- FsRemoveReq * const fsReq = (FsRemoveReq *)signal->getDataPtrSend();
- fsReq->userReference = cownBlockref;
- fsReq->userPointer = tableId;
- fsReq->fileNumber[0] = tableId;
- fsReq->fileNumber[1] = (Uint32)-1; // Remove all fragments
- fsReq->fileNumber[2] = (Uint32)-1; // Remove all data files within fragment
- fsReq->fileNumber[3] = 255 | // No P-value used here
- (3 << 8) | // Data-files in D3
- (0 << 16) | // Data-files
- (1 << 24); // Version 1 of fileNumber
- fsReq->directory = 1;
- fsReq->ownDirectory = 1;
- sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, FsRemoveReq::SignalLength, JBA);
-}//Dbacc::sendFSREMOVEREQ()
-
-void Dbacc::execFSREMOVECONF(Signal* signal)
-{
- FsConf * const fsConf = (FsConf *)signal->getDataPtrSend();
- TabrecPtr tabPtr;
- tabPtr.i = fsConf->userPointer;
- ptrCheckGuard(tabPtr, ctablesize, tabrec);
-
- DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend();
- dropConf->senderRef = reference();
- dropConf->senderData = tabPtr.p->tabUserPtr;
- dropConf->tableId = tabPtr.i;
- sendSignal(tabPtr.p->tabUserRef, GSN_DROP_TAB_CONF,
- signal, DropTabConf::SignalLength, JBB);
-
- tabPtr.p->tabUserPtr = RNIL;
- tabPtr.p->tabUserRef = 0;
-}//Dbacc::execFSREMOVECONF()
-
-void Dbacc::execFSREMOVEREF(Signal* signal)
-{
- ndbrequire(false);
-}//Dbacc::execFSREMOVEREF()
-
-/* -------------------------------------------------------------------------- */
-/* ADDFRAGTOTAB */
-/* DESCRIPTION: PUTS A FRAGMENT ID AND A POINTER TO ITS RECORD INTO */
-/* TABLE ARRRAY OF THE TABLE RECORD. */
-/* -------------------------------------------------------------------------- */
-bool Dbacc::addfragtotab(Signal* signal, Uint32 rootIndex, Uint32 fid)
-{
- for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
- jam();
- if (tabptr.p->fragholder[i] == RNIL) {
- jam();
- tabptr.p->fragholder[i] = fid;
- tabptr.p->fragptrholder[i] = rootIndex;
- return true;
- }//if
- }//for
- return false;
-}//Dbacc::addfragtotab()
-
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* END OF ADD/DELETE FRAGMENT MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* CONNECTION MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* ACCSEIZEREQ SEIZE REQ */
-/* SENDER: LQH, LEVEL B */
-/* ENTER ACCSEIZEREQ WITH */
-/* TUSERPTR , CONECTION PTR OF LQH */
-/* TUSERBLOCKREF BLOCK REFERENCE OF LQH */
-/* ******************--------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* ACCSEIZEREQ SEIZE REQ */
-/* ******************------------------------------+ */
-/* SENDER: LQH, LEVEL B */
-void Dbacc::execACCSEIZEREQ(Signal* signal)
-{
- jamEntry();
- tuserptr = signal->theData[0];
- /* CONECTION PTR OF LQH */
- tuserblockref = signal->theData[1];
- /* BLOCK REFERENCE OF LQH */
- tresult = 0;
- if (cfreeopRec == RNIL) {
- jam();
- refaccConnectLab(signal);
- return;
- }//if
- seizeOpRec(signal);
- ptrGuard(operationRecPtr);
- operationRecPtr.p->userptr = tuserptr;
- operationRecPtr.p->userblockref = tuserblockref;
- operationRecPtr.p->operation = ZUNDEFINED_OP;
- operationRecPtr.p->transactionstate = IDLE;
- /* ******************************< */
- /* ACCSEIZECONF */
- /* ******************************< */
- signal->theData[0] = tuserptr;
- signal->theData[1] = operationRecPtr.i;
- sendSignal(tuserblockref, GSN_ACCSEIZECONF, signal, 2, JBB);
- return;
-}//Dbacc::execACCSEIZEREQ()
-
-void Dbacc::refaccConnectLab(Signal* signal)
-{
- tresult = ZCONNECT_SIZE_ERROR;
- /* ******************************< */
- /* ACCSEIZEREF */
- /* ******************************< */
- signal->theData[0] = tuserptr;
- signal->theData[1] = tresult;
- sendSignal(tuserblockref, GSN_ACCSEIZEREF, signal, 2, JBB);
- return;
-}//Dbacc::refaccConnectLab()
-
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* END OF CONNECTION MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* EXECUTE OPERATION MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* INIT_OP_REC */
-/* INFORMATION WHICH IS RECIEVED BY ACCKEYREQ WILL BE SAVED */
-/* IN THE OPERATION RECORD. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initOpRec(Signal* signal)
-{
- register Uint32 Treqinfo;
-
- Treqinfo = signal->theData[2];
-
- operationRecPtr.p->hashValue = signal->theData[3];
- operationRecPtr.p->tupkeylen = signal->theData[4];
- operationRecPtr.p->xfrmtupkeylen = signal->theData[4];
- operationRecPtr.p->transId1 = signal->theData[5];
- operationRecPtr.p->transId2 = signal->theData[6];
- operationRecPtr.p->transactionstate = ACTIVE;
- operationRecPtr.p->commitDeleteCheckFlag = ZFALSE;
- operationRecPtr.p->operation = Treqinfo & 0x7;
- /* --------------------------------------------------------------------------------- */
- // opSimple is not used in this version. Is needed for deadlock handling later on.
- /* --------------------------------------------------------------------------------- */
- // operationRecPtr.p->opSimple = (Treqinfo >> 3) & 0x1;
-
- operationRecPtr.p->lockMode = (Treqinfo >> 4) & 0x3;
-
- Uint32 readFlag = (((Treqinfo >> 4) & 0x3) == 0); // Only 1 if Read
- Uint32 dirtyFlag = (((Treqinfo >> 6) & 0x1) == 1); // Only 1 if Dirty
- Uint32 dirtyReadFlag = readFlag & dirtyFlag;
- operationRecPtr.p->dirtyRead = dirtyReadFlag;
-
- operationRecPtr.p->nodeType = (Treqinfo >> 7) & 0x3;
- operationRecPtr.p->fid = fragrecptr.p->myfid;
- operationRecPtr.p->fragptr = fragrecptr.i;
- operationRecPtr.p->nextParallelQue = RNIL;
- operationRecPtr.p->prevParallelQue = RNIL;
- operationRecPtr.p->prevQueOp = RNIL;
- operationRecPtr.p->nextQueOp = RNIL;
- operationRecPtr.p->nextSerialQue = RNIL;
- operationRecPtr.p->prevSerialQue = RNIL;
- operationRecPtr.p->elementPage = RNIL;
- operationRecPtr.p->keyinfoPage = RNIL;
- operationRecPtr.p->lockOwner = ZFALSE;
- operationRecPtr.p->insertIsDone = ZFALSE;
- operationRecPtr.p->elementIsDisappeared = ZFALSE;
- operationRecPtr.p->insertDeleteLen = fragrecptr.p->elementLength;
- operationRecPtr.p->longPagePtr = RNIL;
- operationRecPtr.p->longKeyPageIndex = RNIL;
- operationRecPtr.p->scanRecPtr = RNIL;
-
- // bit to mark lock operation
- operationRecPtr.p->isAccLockReq = (Treqinfo >> 31) & 0x1;
-
- // undo log is not run via ACCKEYREQ
- operationRecPtr.p->isUndoLogReq = 0;
-}//Dbacc::initOpRec()
-
-/* --------------------------------------------------------------------------------- */
-/* SEND_ACCKEYCONF */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::sendAcckeyconf(Signal* signal)
-{
- signal->theData[0] = operationRecPtr.p->userptr;
- signal->theData[1] = operationRecPtr.p->operation;
- signal->theData[2] = operationRecPtr.p->fid;
- signal->theData[3] = operationRecPtr.p->localdata[0];
- signal->theData[4] = operationRecPtr.p->localdata[1];
- signal->theData[5] = fragrecptr.p->localkeylen;
-}//Dbacc::sendAcckeyconf()
-
-
-void Dbacc::ACCKEY_error(Uint32 fromWhere)
-{
- switch(fromWhere) {
- case 0:
- ndbrequire(false);
- case 1:
- ndbrequire(false);
- case 2:
- ndbrequire(false);
- case 3:
- ndbrequire(false);
- case 4:
- ndbrequire(false);
- case 5:
- ndbrequire(false);
- case 6:
- ndbrequire(false);
- case 7:
- ndbrequire(false);
- case 8:
- ndbrequire(false);
- case 9:
- ndbrequire(false);
- default:
- ndbrequire(false);
- }//switch
-}//Dbacc::ACCKEY_error()
-
-/* ******************--------------------------------------------------------------- */
-/* ACCKEYREQ REQUEST FOR INSERT, DELETE, */
-/* RERAD AND UPDATE, A TUPLE. */
-/* SENDER: LQH, LEVEL B */
-/* SIGNAL DATA: OPERATION_REC_PTR, CONNECTION PTR */
-/* TABPTR, TABLE ID = TABLE RECORD POINTER */
-/* TREQINFO, */
-/* THASHVALUE, HASH VALUE OF THE TUP */
-/* TKEYLEN, LENGTH OF THE PRIMARY KEYS */
-/* TKEY1, PRIMARY KEY 1 */
-/* TKEY2, PRIMARY KEY 2 */
-/* TKEY3, PRIMARY KEY 3 */
-/* TKEY4, PRIMARY KEY 4 */
-/* ******************--------------------------------------------------------------- */
-void Dbacc::execACCKEYREQ(Signal* signal)
-{
- jamEntry();
- operationRecPtr.i = signal->theData[0]; /* CONNECTION PTR */
- fragrecptr.i = signal->theData[1]; /* FRAGMENT RECORD POINTER */
- if (!((operationRecPtr.i < coprecsize) ||
- (fragrecptr.i < cfragmentsize))) {
- ACCKEY_error(0);
- return;
- }//if
- ptrAss(operationRecPtr, operationrec);
- ptrAss(fragrecptr, fragmentrec);
- ndbrequire(operationRecPtr.p->transactionstate == IDLE);
-
- initOpRec(signal);
- // normalize key if any char attr
- if (! operationRecPtr.p->isAccLockReq && fragrecptr.p->hasCharAttr)
- xfrmKeyData(signal);
-
- /*---------------------------------------------------------------*/
- /* */
- /* WE WILL USE THE HASH VALUE TO LOOK UP THE PROPER MEMORY */
- /* PAGE AND MEMORY PAGE INDEX TO START THE SEARCH WITHIN. */
- /* WE REMEMBER THESE ADDRESS IF WE LATER NEED TO INSERT */
- /* THE ITEM AFTER NOT FINDING THE ITEM. */
- /*---------------------------------------------------------------*/
- getElement(signal);
-
- if (tgeResult == ZTRUE) {
- switch (operationRecPtr.p->operation) {
- case ZREAD:
- case ZUPDATE:
- case ZDELETE:
- case ZWRITE:
- case ZSCAN_OP:
- if (!tgeLocked){
- if(operationRecPtr.p->operation == ZWRITE)
- {
- jam();
- operationRecPtr.p->operation = ZUPDATE;
- }
- sendAcckeyconf(signal);
- if (operationRecPtr.p->dirtyRead == ZFALSE) {
- /*---------------------------------------------------------------*/
- // It is not a dirty read. We proceed by locking and continue with
- // the operation.
- /*---------------------------------------------------------------*/
- Uint32 eh = gePageptr.p->word32[tgeElementptr];
- operationRecPtr.p->scanBits = ElementHeader::getScanBits(eh);
- operationRecPtr.p->hashvaluePart = ElementHeader::getHashValuePart(eh);
- operationRecPtr.p->elementPage = gePageptr.i;
- operationRecPtr.p->elementContainer = tgeContainerptr;
- operationRecPtr.p->elementPointer = tgeElementptr;
- operationRecPtr.p->elementIsforward = tgeForward;
-
- eh = ElementHeader::setLocked(operationRecPtr.i);
- dbgWord32(gePageptr, tgeElementptr, eh);
- gePageptr.p->word32[tgeElementptr] = eh;
-
- insertLockOwnersList(signal , operationRecPtr);
- return;
- } else {
- jam();
- /*---------------------------------------------------------------*/
- // It is a dirty read. We do not lock anything. Set state to
- // IDLE since no COMMIT call will come.
- /*---------------------------------------------------------------*/
- operationRecPtr.p->transactionstate = IDLE;
- operationRecPtr.p->operation = ZUNDEFINED_OP;
- return;
- }//if
- } else {
- jam();
- accIsLockedLab(signal);
- return;
- }//if
- break;
- case ZINSERT:
- jam();
- insertExistElemLab(signal);
- return;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- } else if (tgeResult == ZFALSE) {
- switch (operationRecPtr.p->operation) {
- case ZINSERT:
- case ZWRITE:
- jam();
- // If a write operation makes an insert we switch operation to ZINSERT so
- // that the commit-method knows an insert has been made and updates noOfElements.
- operationRecPtr.p->operation = ZINSERT;
- operationRecPtr.p->insertIsDone = ZTRUE;
- insertelementLab(signal);
- return;
- break;
- case ZREAD:
- case ZUPDATE:
- case ZDELETE:
- case ZSCAN_OP:
- jam();
- acckeyref1Lab(signal, ZREAD_ERROR);
- return;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- } else {
- jam();
- acckeyref1Lab(signal, tgeResult);
- return;
- }//if
- return;
-}//Dbacc::execACCKEYREQ()
-
-void
-Dbacc::xfrmKeyData(Signal* signal)
-{
- tabptr.i = fragrecptr.p->myTableId;
- ptrCheckGuard(tabptr, ctablesize, tabrec);
-
- Uint32 dst[1024 * MAX_XFRM_MULTIPLY];
- Uint32 dstSize = (sizeof(dst) >> 2);
- Uint32* src = &signal->theData[7];
- const Uint32 noOfKeyAttr = tabptr.p->noOfKeyAttr;
- Uint32 dstPos = 0;
- Uint32 srcPos = 0;
- Uint32 i = 0;
-
- while (i < noOfKeyAttr) {
- const Tabrec::KeyAttr& keyAttr = tabptr.p->keyAttr[i];
-
- Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(keyAttr.attributeDescriptor);
- Uint32 srcWords = (srcBytes + 3) / 4;
- Uint32 dstWords = ~0;
- uchar* dstPtr = (uchar*)&dst[dstPos];
- const uchar* srcPtr = (const uchar*)&src[srcPos];
- CHARSET_INFO* cs = keyAttr.charsetInfo;
-
- if (cs == 0) {
- jam();
- memcpy(dstPtr, srcPtr, srcWords << 2);
- dstWords = srcWords;
- } else {
- jam();
- Uint32 typeId = AttributeDescriptor::getType(keyAttr.attributeDescriptor);
- Uint32 lb, len;
- bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
- ndbrequire(ok);
- Uint32 xmul = cs->strxfrm_multiply;
- if (xmul == 0)
- xmul = 1;
- // see comment in DbtcMain.cpp
- Uint32 dstLen = xmul * (srcBytes - lb);
- ndbrequire(dstLen <= ((dstSize - dstPos) << 2));
- int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
- ndbrequire(n != -1);
- while ((n & 3) != 0)
- dstPtr[n++] = 0;
- dstWords = (n >> 2);
- }
- dstPos += dstWords;
- srcPos += srcWords;
- i++;
- }
- memcpy(src, dst, dstPos << 2);
- operationRecPtr.p->xfrmtupkeylen = dstPos;
-}
-
-void Dbacc::accIsLockedLab(Signal* signal)
-{
- ndbrequire(csystemRestart == ZFALSE);
- queOperPtr.i = ElementHeader::getOpPtrI(gePageptr.p->word32[tgeElementptr]);
- ptrCheckGuard(queOperPtr, coprecsize, operationrec);
- if (operationRecPtr.p->dirtyRead == ZFALSE) {
- Uint32 return_result;
- if (operationRecPtr.p->lockMode == ZREADLOCK) {
- jam();
- priPageptr = gePageptr;
- tpriElementptr = tgeElementptr;
- return_result = placeReadInLockQueue(signal);
- } else {
- jam();
- pwiPageptr = gePageptr;
- tpwiElementptr = tgeElementptr;
- return_result = placeWriteInLockQueue(signal);
- }//if
- if (return_result == ZPARALLEL_QUEUE) {
- jam();
- sendAcckeyconf(signal);
- return;
- } else if (return_result == ZSERIAL_QUEUE) {
- jam();
- signal->theData[0] = RNIL;
- return;
- } else if (return_result == ZWRITE_ERROR) {
- jam();
- acckeyref1Lab(signal, return_result);
- return;
- }//if
- ndbrequire(false);
- } else {
- if (queOperPtr.p->elementIsDisappeared == ZFALSE) {
- jam();
- /*---------------------------------------------------------------*/
- // It is a dirty read. We do not lock anything. Set state to
- // IDLE since no COMMIT call will arrive.
- /*---------------------------------------------------------------*/
- sendAcckeyconf(signal);
- operationRecPtr.p->transactionstate = IDLE;
- operationRecPtr.p->operation = ZUNDEFINED_OP;
- return;
- } else {
- jam();
- /*---------------------------------------------------------------*/
- // The tuple does not exist in the committed world currently.
- // Report read error.
- /*---------------------------------------------------------------*/
- acckeyref1Lab(signal, ZREAD_ERROR);
- return;
- }//if
- }//if
-}//Dbacc::accIsLockedLab()
-
-/* --------------------------------------------------------------------------------- */
-/* I N S E R T E X I S T E L E M E N T */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::insertExistElemLab(Signal* signal)
-{
- if (!tgeLocked){
- jam();
- acckeyref1Lab(signal, ZWRITE_ERROR);/* THE ELEMENT ALREADY EXIST */
- return;
- }//if
- accIsLockedLab(signal);
-}//Dbacc::insertExistElemLab()
-
-/* --------------------------------------------------------------------------------- */
-/* INSERTELEMENT */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::insertelementLab(Signal* signal)
-{
- if (fragrecptr.p->createLcp == ZTRUE) {
- if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_OPERATION) {
- jam();
- acckeyref1Lab(signal, ZTEMPORARY_ACC_UNDO_FAILURE);
- return;
- }//if
- }//if
- if (fragrecptr.p->firstOverflowRec == RNIL) {
- jam();
- allocOverflowPage(signal);
- if (tresult > ZLIMIT_OF_ERROR) {
- jam();
- acckeyref1Lab(signal, tresult);
- return;
- }//if
- }//if
- if (fragrecptr.p->keyLength != operationRecPtr.p->tupkeylen) {
- // historical
- ndbrequire(fragrecptr.p->keyLength == 0);
- }//if
-
- signal->theData[0] = operationRecPtr.p->userptr;
- Uint32 blockNo = refToBlock(operationRecPtr.p->userblockref);
- EXECUTE_DIRECT(blockNo, GSN_LQH_ALLOCREQ, signal, 1);
- jamEntry();
- if (signal->theData[0] != 0) {
- jam();
- Uint32 result_code = signal->theData[0];
- acckeyref1Lab(signal, result_code);
- return;
- }//if
- Uint32 localKey = (signal->theData[1] << MAX_TUPLES_BITS) + signal->theData[2];
-
- insertLockOwnersList(signal, operationRecPtr);
-
- const Uint32 tmp = fragrecptr.p->k + fragrecptr.p->lhfragbits;
- operationRecPtr.p->hashvaluePart =
- (operationRecPtr.p->hashValue >> tmp) & 0xFFFF;
- operationRecPtr.p->scanBits = 0; /* NOT ANY ACTIVE SCAN */
- tidrElemhead = ElementHeader::setLocked(operationRecPtr.i);
- idrPageptr = gdiPageptr;
- tidrPageindex = tgdiPageindex;
- tidrForward = ZTRUE;
- idrOperationRecPtr = operationRecPtr;
- clocalkey[0] = localKey;
- operationRecPtr.p->localdata[0] = localKey;
- /* --------------------------------------------------------------------------------- */
- /* WE SET THE LOCAL KEY TO MINUS ONE TO INDICATE IT IS NOT YET VALID. */
- /* --------------------------------------------------------------------------------- */
- insertElement(signal);
- sendAcckeyconf(signal);
- return;
-}//Dbacc::insertelementLab()
-
-/* --------------------------------------------------------------------------------- */
-/* PLACE_READ_IN_LOCK_QUEUE */
-/* INPUT: OPERATION_REC_PTR OUR OPERATION POINTER */
-/* QUE_OPER_PTR LOCK QUEUE OWNER OPERATION POINTER */
-/* PRI_PAGEPTR PAGE POINTER OF ELEMENT */
-/* TPRI_ELEMENTPTR ELEMENT POINTER OF ELEMENT */
-/* OUTPUT TRESULT = */
-/* ZPARALLEL_QUEUE OPERATION PLACED IN PARALLEL QUEUE */
-/* OPERATION CAN PROCEED NOW. */
-/* ZSERIAL_QUEUE OPERATION PLACED IN SERIAL QUEUE */
-/* ERROR CODE OPERATION NEEDS ABORTING */
-/* THE ELEMENT WAS LOCKED AND WE WANT TO READ THE TUPLE. WE WILL CHECK THE LOCK */
-/* QUEUES TO PERFORM THE PROPER ACTION. */
-/* */
-/* IN SOME PLACES IN THE CODE BELOW THAT HANDLES WHAT TO DO WHEN THE TUPLE IS LOCKED */
-/* WE DO ASSUME THAT NEXT_PARALLEL_QUEUE AND NEXT_SERIAL_QUEUE ON OPERATION_REC_PTR */
-/* HAVE BEEN INITIALISED TO RNIL. THUS WE DO NOT PERFORM THIS ONCE MORE EVEN IF IT */
-/* COULD BE NICE FOR READABILITY. */
-/* --------------------------------------------------------------------------------- */
-Uint32 Dbacc::placeReadInLockQueue(Signal* signal)
-{
- if (getNoParallelTransaction(queOperPtr.p) == 1) {
- if ((queOperPtr.p->transId1 == operationRecPtr.p->transId1) &&
- (queOperPtr.p->transId2 == operationRecPtr.p->transId2)) {
- /* --------------------------------------------------------------------------------- */
- /* WE ARE PERFORMING A READ OPERATION AND THIS TRANSACTION ALREADY OWNS THE LOCK */
- /* ALONE. PUT THE OPERATION LAST IN THE PARALLEL QUEUE. */
- /* --------------------------------------------------------------------------------- */
- jam();
- mlpqOperPtr = queOperPtr;
- moveLastParallelQueue(signal);
- operationRecPtr.p->localdata[0] = queOperPtr.p->localdata[0];
- operationRecPtr.p->localdata[1] = queOperPtr.p->localdata[1];
- operationRecPtr.p->prevParallelQue = mlpqOperPtr.i;
- mlpqOperPtr.p->nextParallelQue = operationRecPtr.i;
- switch (queOperPtr.p->lockMode) {
- case ZREADLOCK:
- jam();
- /*empty*/;
- break;
- default:
- jam();
- /* --------------------------------------------------------------------------------- */
- /* IF THE TRANSACTION PREVIOUSLY SET A WRITE LOCK WE MUST ENSURE THAT ALL */
- /* OPERATIONS IN THE PARALLEL QUEUE HAVE WRITE LOCK MODE TO AVOID STRANGE BUGS.*/
- /* --------------------------------------------------------------------------------- */
- operationRecPtr.p->lockMode = queOperPtr.p->lockMode;
- break;
- }//switch
- return ZPARALLEL_QUEUE;
- }//if
- }//if
- if (queOperPtr.p->nextSerialQue == RNIL) {
- /* --------------------------------------------------------------------------------- */
- /* WE ARE PERFORMING A READ OPERATION AND THERE IS NO SERIAL QUEUE. IF THERE IS NO */
- /* WRITE OPERATION THAT OWNS THE LOCK OR ANY WRITE OPERATION IN THE PARALLEL QUEUE */
- /* IT IS ENOUGH TO CHECK THE LOCK MODE OF THE LEADER IN THE PARALLEL QUEUE. IF IT IS */
- /* A READ LOCK THEN WE PLACE OURSELVES IN THE PARALLEL QUEUE OTHERWISE WE GO ON TO */
- /* PLACE OURSELVES IN THE SERIAL QUEUE. */
- /* --------------------------------------------------------------------------------- */
- switch (queOperPtr.p->lockMode) {
- case ZREADLOCK:
- jam();
- mlpqOperPtr = queOperPtr;
- moveLastParallelQueue(signal);
- operationRecPtr.p->prevParallelQue = mlpqOperPtr.i;
- mlpqOperPtr.p->nextParallelQue = operationRecPtr.i;
- operationRecPtr.p->localdata[0] = queOperPtr.p->localdata[0];
- operationRecPtr.p->localdata[1] = queOperPtr.p->localdata[1];
- return ZPARALLEL_QUEUE;
- default:
- jam();
- queOperPtr.p->nextSerialQue = operationRecPtr.i;
- operationRecPtr.p->prevSerialQue = queOperPtr.i;
- putOpInFragWaitQue(signal);
- break;
- }//switch
- } else {
- jam();
- placeSerialQueueRead(signal);
- }//if
- return ZSERIAL_QUEUE;
-}//Dbacc::placeReadInLockQueue()
-
-/* --------------------------------------------------------------------------------- */
-/* WE WILL CHECK IF THIS TRANSACTION IS ALREADY PLACED AT SOME SPOT IN THE PARALLEL */
-/* SERIAL QUEUE WITHOUT ANY NEIGHBORS FROM OTHER TRANSACTION. IF SO WE WILL INSERT */
-/* IT IN THAT PARALLEL QUEUE. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::placeSerialQueueRead(Signal* signal)
-{
- readWriteOpPtr.i = queOperPtr.p->nextSerialQue;
- ptrCheckGuard(readWriteOpPtr, coprecsize, operationrec);
- PSQR_LOOP:
- jam();
- if (readWriteOpPtr.p->nextSerialQue == RNIL) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THERE WAS NO PREVIOUS OPERATION IN THIS TRANSACTION WHICH WE COULD PUT IT */
- /* IN THE PARALLEL QUEUE TOGETHER WITH. */
- /* --------------------------------------------------------------------------------- */
- checkOnlyReadEntry(signal);
- return;
- }//if
- if (getNoParallelTransaction(readWriteOpPtr.p) == 1) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THERE WAS ONLY ONE TRANSACTION INVOLVED IN THE PARALLEL QUEUE. IF THIS IS OUR */
- /* TRANSACTION WE CAN STILL GET HOLD OF THE LOCK. */
- /* --------------------------------------------------------------------------------- */
- if ((readWriteOpPtr.p->transId1 == operationRecPtr.p->transId1) &&
- (readWriteOpPtr.p->transId2 == operationRecPtr.p->transId2)) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WE ARE PERFORMING A READ IN THE SAME TRANSACTION WHERE WE ALREADY */
- /* PREVIOUSLY HAVE EXECUTED AN OPERATION. INSERT-DELETE, READ-UPDATE, READ-READ, */
- /* UPDATE-UPDATE, UPDATE-DELETE, READ-DELETE, INSERT-READ, INSERT-UPDATE ARE ALLOWED */
- /* COMBINATIONS. A NEW INSERT AFTER A DELETE IS NOT ALLOWED AND SUCH AN INSERT WILL */
- /* GO TO THE SERIAL LOCK QUEUE WHICH IT WILL NOT LEAVE UNTIL A TIME-OUT AND THE */
- /* TRANSACTION IS ABORTED. READS AND UPDATES AFTER DELETES IS ALSO NOT ALLOWED. */
- /* --------------------------------------------------------------------------------- */
- mlpqOperPtr = readWriteOpPtr;
- moveLastParallelQueue(signal);
- readWriteOpPtr = mlpqOperPtr;
- operationRecPtr.p->prevParallelQue = readWriteOpPtr.i;
- readWriteOpPtr.p->nextParallelQue = operationRecPtr.i;
- operationRecPtr.p->localdata[0] = readWriteOpPtr.p->localdata[0];
- operationRecPtr.p->localdata[1] = readWriteOpPtr.p->localdata[1];
- switch (readWriteOpPtr.p->lockMode) {
- case ZREADLOCK:
- jam();
- /*empty*/;
- break;
- default:
- jam();
- /* --------------------------------------------------------------------------------- */
- /* IF THE TRANSACTION PREVIOUSLY SET A WRITE LOCK WE MUST ENSURE THAT ALL */
- /* OPERATIONS IN THE PARALLEL QUEUE HAVE WRITE LOCK MODE TO AVOID STRANGE BUGS.*/
- /* --------------------------------------------------------------------------------- */
- operationRecPtr.p->lockMode = readWriteOpPtr.p->lockMode;
- break;
- }//switch
- putOpInFragWaitQue(signal);
- return;
- }//if
- }//if
- readWriteOpPtr.i = readWriteOpPtr.p->nextSerialQue;
- ptrCheckGuard(readWriteOpPtr, coprecsize, operationrec);
- goto PSQR_LOOP;
-}//Dbacc::placeSerialQueueRead()
-
-/* --------------------------------------------------------------------------------- */
-/* WE WILL CHECK IF THE LAST ENTRY IN THE SERIAL QUEUE CONTAINS ONLY READ */
-/* OPERATIONS. IF SO WE WILL INSERT IT IN THAT PARALLEL QUEUE. OTHERWISE WE */
-/* WILL PLACE IT AT THE END OF THE SERIAL QUEUE. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::checkOnlyReadEntry(Signal* signal)
-{
- switch (readWriteOpPtr.p->lockMode) {
- case ZREADLOCK:
- jam();
- /* --------------------------------------------------------------------------------- */
- /* SINCE THIS LAST QUEUE ONLY CONTAINS READ LOCKS WE CAN JOIN THE PARALLEL QUEUE AT */
- /* THE END. */
- /* --------------------------------------------------------------------------------- */
- mlpqOperPtr = readWriteOpPtr;
- moveLastParallelQueue(signal);
- readWriteOpPtr = mlpqOperPtr;
- operationRecPtr.p->prevParallelQue = readWriteOpPtr.i;
- readWriteOpPtr.p->nextParallelQue = operationRecPtr.i;
- operationRecPtr.p->localdata[0] = readWriteOpPtr.p->localdata[0];
- operationRecPtr.p->localdata[1] = readWriteOpPtr.p->localdata[1];
- break;
- default:
- jam(); /* PUT THE OPERATION RECORD IN THE SERIAL QUEUE */
- readWriteOpPtr.p->nextSerialQue = operationRecPtr.i;
- operationRecPtr.p->prevSerialQue = readWriteOpPtr.i;
- break;
- }//switch
- putOpInFragWaitQue(signal);
-}//Dbacc::checkOnlyReadEntry()
-
-/* --------------------------------------------------------------------------------- */
-/* GET_NO_PARALLEL_TRANSACTION */
-/* --------------------------------------------------------------------------------- */
-Uint32
-Dbacc::getNoParallelTransaction(const Operationrec * op)
-{
- OperationrecPtr tmp;
-
- tmp.i= op->nextParallelQue;
- Uint32 transId[2] = { op->transId1, op->transId2 };
- while (tmp.i != RNIL)
- {
- jam();
- ptrCheckGuard(tmp, coprecsize, operationrec);
- if (tmp.p->transId1 == transId[0] && tmp.p->transId2 == transId[1])
- tmp.i = tmp.p->nextParallelQue;
- else
- return 2;
- }
- return 1;
-}//Dbacc::getNoParallelTransaction()
-
-void Dbacc::moveLastParallelQueue(Signal* signal)
-{
- while (mlpqOperPtr.p->nextParallelQue != RNIL) {
- jam();
- mlpqOperPtr.i = mlpqOperPtr.p->nextParallelQue;
- ptrCheckGuard(mlpqOperPtr, coprecsize, operationrec);
- }//if
-}//Dbacc::moveLastParallelQueue()
-
-void Dbacc::moveLastParallelQueueWrite(Signal* signal)
-{
- /* --------------------------------------------------------------------------------- */
- /* ENSURE THAT ALL OPERATIONS HAVE LOCK MODE SET TO WRITE SINCE WE INSERT A */
- /* WRITE LOCK INTO THE PARALLEL QUEUE. */
- /* --------------------------------------------------------------------------------- */
- while (mlpqOperPtr.p->nextParallelQue != RNIL) {
- jam();
- mlpqOperPtr.p->lockMode = operationRecPtr.p->lockMode;
- mlpqOperPtr.i = mlpqOperPtr.p->nextParallelQue;
- ptrCheckGuard(mlpqOperPtr, coprecsize, operationrec);
- }//if
- mlpqOperPtr.p->lockMode = operationRecPtr.p->lockMode;
-}//Dbacc::moveLastParallelQueueWrite()
-
-/* --------------------------------------------------------------------------------- */
-/* PLACE_WRITE_IN_LOCK_QUEUE */
-/* INPUT: OPERATION_REC_PTR OUR OPERATION POINTER */
-/* QUE_OPER_PTR LOCK QUEUE OWNER OPERATION POINTER */
-/* PWI_PAGEPTR PAGE POINTER OF ELEMENT */
-/* TPWI_ELEMENTPTR ELEMENT POINTER OF ELEMENT */
-/* OUTPUT TRESULT = */
-/* ZPARALLEL_QUEUE OPERATION PLACED IN PARALLEL QUEUE */
-/* OPERATION CAN PROCEED NOW. */
-/* ZSERIAL_QUEUE OPERATION PLACED IN SERIAL QUEUE */
-/* ERROR CODE OPERATION NEEDS ABORTING */
-/* --------------------------------------------------------------------------------- */
-Uint32 Dbacc::placeWriteInLockQueue(Signal* signal)
-{
- if (!((getNoParallelTransaction(queOperPtr.p) == 1) &&
- (queOperPtr.p->transId1 == operationRecPtr.p->transId1) &&
- (queOperPtr.p->transId2 == operationRecPtr.p->transId2))) {
- jam();
- placeSerialQueueWrite(signal);
- return ZSERIAL_QUEUE;
- }//if
-
- /*
- WE ARE PERFORMING AN READ EXCLUSIVE, INSERT, UPDATE OR DELETE IN THE SAME
- TRANSACTION WHERE WE PREVIOUSLY HAVE EXECUTED AN OPERATION.
- Read-All, Update-All, Insert-All and Delete-Insert are allowed
- combinations.
- Delete-Read, Delete-Update and Delete-Delete are not an allowed
- combination and will result in tuple not found error.
- */
- mlpqOperPtr = queOperPtr;
- moveLastParallelQueueWrite(signal);
-
- if (operationRecPtr.p->operation == ZINSERT &&
- mlpqOperPtr.p->operation != ZDELETE){
- jam();
- return ZWRITE_ERROR;
- }//if
-
- if(operationRecPtr.p->operation == ZWRITE)
- {
- operationRecPtr.p->operation =
- (mlpqOperPtr.p->operation == ZDELETE) ? ZINSERT : ZUPDATE;
- }
-
- operationRecPtr.p->localdata[0] = queOperPtr.p->localdata[0];
- operationRecPtr.p->localdata[1] = queOperPtr.p->localdata[1];
- operationRecPtr.p->prevParallelQue = mlpqOperPtr.i;
- mlpqOperPtr.p->nextParallelQue = operationRecPtr.i;
- return ZPARALLEL_QUEUE;
-}//Dbacc::placeWriteInLockQueue()
-
-/* --------------------------------------------------------------------------------- */
-/* WE HAVE TO PLACE IT SOMEWHERE IN THE SERIAL QUEUE INSTEAD. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::placeSerialQueueWrite(Signal* signal)
-{
- readWriteOpPtr = queOperPtr;
- PSQW_LOOP:
- if (readWriteOpPtr.p->nextSerialQue == RNIL) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WE COULD NOT PUT IN ANY PARALLEL QUEUE. WE MUST PUT IT LAST IN THE SERIAL QUEUE. */
- /* --------------------------------------------------------------------------------- */
- readWriteOpPtr.p->nextSerialQue = operationRecPtr.i;
- operationRecPtr.p->prevSerialQue = readWriteOpPtr.i;
- putOpInFragWaitQue(signal);
- return;
- }//if
- readWriteOpPtr.i = readWriteOpPtr.p->nextSerialQue;
- ptrCheckGuard(readWriteOpPtr, coprecsize, operationrec);
- if (getNoParallelTransaction(readWriteOpPtr.p) == 1) {
- /* --------------------------------------------------------------------------------- */
- /* THERE WAS ONLY ONE TRANSACTION INVOLVED IN THE PARALLEL QUEUE. IF THIS IS OUR */
- /* TRANSACTION WE CAN STILL GET HOLD OF THE LOCK. */
- /* --------------------------------------------------------------------------------- */
- if ((readWriteOpPtr.p->transId1 == operationRecPtr.p->transId1) &&
- (readWriteOpPtr.p->transId2 == operationRecPtr.p->transId2)) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WE ARE PERFORMING AN UPDATE OR DELETE IN THE SAME TRANSACTION WHERE WE ALREADY */
- /* PREVIOUSLY HAVE EXECUTED AN OPERATION. INSERT-DELETE, READ-UPDATE, READ-READ, */
- /* UPDATE-UPDATE, UPDATE-DELETE, READ-DELETE, INSERT-READ, INSERT-UPDATE ARE ALLOWED */
- /* COMBINATIONS. A NEW INSERT AFTER A DELETE IS NOT ALLOWED AND SUCH AN INSERT WILL */
- /* GO TO THE SERIAL LOCK QUEUE WHICH IT WILL NOT LEAVE UNTIL A TIME-OUT AND THE */
- /* TRANSACTION IS ABORTED. READS AND UPDATES AFTER DELETES IS ALSO NOT ALLOWED. */
- /* --------------------------------------------------------------------------------- */
- mlpqOperPtr = readWriteOpPtr;
- moveLastParallelQueueWrite(signal);
- readWriteOpPtr = mlpqOperPtr;
- operationRecPtr.p->prevParallelQue = readWriteOpPtr.i;
- readWriteOpPtr.p->nextParallelQue = operationRecPtr.i;
- operationRecPtr.p->localdata[0] = readWriteOpPtr.p->localdata[0];
- operationRecPtr.p->localdata[1] = readWriteOpPtr.p->localdata[1];
- putOpInFragWaitQue(signal);
- return;
- }//if
- }//if
- goto PSQW_LOOP;
-}//Dbacc::placeSerialQueueWrite()
-
-/* ------------------------------------------------------------------------- */
-/* ACC KEYREQ END */
-/* ------------------------------------------------------------------------- */
-void Dbacc::acckeyref1Lab(Signal* signal, Uint32 result_code)
-{
- if (operationRecPtr.p->keyinfoPage != RNIL) {
- jam();
- rpPageptr.i = operationRecPtr.p->keyinfoPage;
- ptrCheckGuard(rpPageptr, cpagesize, page8);
- releasePage(signal);
- operationRecPtr.p->keyinfoPage = RNIL;
- }//if
- operationRecPtr.p->transactionstate = WAIT_COMMIT_ABORT;
- /* ************************<< */
- /* ACCKEYREF */
- /* ************************<< */
- signal->theData[0] = cminusOne;
- signal->theData[1] = result_code;
- return;
-}//Dbacc::acckeyref1Lab()
-
-/* ******************--------------------------------------------------------------- */
-/* ACCMINUPDATE UPDATE LOCAL KEY REQ */
-/* DESCRIPTION: UPDATES LOCAL KEY OF AN ELEMENTS IN THE HASH TABLE */
-/* THIS SIGNAL IS WAITED AFTER ANY INSERT REQ */
-/* ENTER ACCMINUPDATE WITH SENDER: LQH, LEVEL B */
-/* OPERATION_REC_PTR, OPERATION RECORD PTR */
-/* CLOCALKEY(0), LOCAL KEY 1 */
-/* CLOCALKEY(1) LOCAL KEY 2 */
-/* ******************--------------------------------------------------------------- */
-void Dbacc::execACCMINUPDATE(Signal* signal)
-{
- Page8Ptr ulkPageidptr;
- Uint32 tulkLocalPtr;
- Uint32 tlocalkey1, tlocalkey2;
- Uint32 TlogStart;
-
- jamEntry();
- operationRecPtr.i = signal->theData[0];
- tlocalkey1 = signal->theData[1];
- tlocalkey2 = signal->theData[2];
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- if (operationRecPtr.p->transactionstate == ACTIVE) {
- fragrecptr.i = operationRecPtr.p->fragptr;
- ulkPageidptr.i = operationRecPtr.p->elementPage;
- tulkLocalPtr = operationRecPtr.p->elementPointer + operationRecPtr.p->elementIsforward;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- ptrCheckGuard(ulkPageidptr, cpagesize, page8);
- if (fragrecptr.p->createLcp == ZTRUE) {
- //----------------------------------------------------------
- // To avoid undo log the element header we take care to only
- // undo log the local key part.
- //----------------------------------------------------------
- if (operationRecPtr.p->elementIsforward == 1) {
- jam();
- TlogStart = tulkLocalPtr;
- } else {
- jam();
- TlogStart = tulkLocalPtr - fragrecptr.p->localkeylen + 1;
- }//if
- datapageptr.p = ulkPageidptr.p;
- cundoinfolength = fragrecptr.p->localkeylen;
- cundoElemIndex = TlogStart;
- undoWritingProcess(signal);
- }//if
- dbgWord32(ulkPageidptr, tulkLocalPtr, tlocalkey1);
- arrGuard(tulkLocalPtr, 2048);
- ulkPageidptr.p->word32[tulkLocalPtr] = tlocalkey1;
- operationRecPtr.p->localdata[0] = tlocalkey1;
- if (fragrecptr.p->localkeylen == 1) {
- return;
- } else if (fragrecptr.p->localkeylen == 2) {
- jam();
- tulkLocalPtr = tulkLocalPtr + operationRecPtr.p->elementIsforward;
- operationRecPtr.p->localdata[1] = tlocalkey2;
- dbgWord32(ulkPageidptr, tulkLocalPtr, tlocalkey2);
- arrGuard(tulkLocalPtr, 2048);
- ulkPageidptr.p->word32[tulkLocalPtr] = tlocalkey2;
- return;
- } else {
- jam();
- }//if
- }//if
- ndbrequire(false);
-}//Dbacc::execACCMINUPDATE()
-
-/* ******************--------------------------------------------------------------- */
-/* ACC_COMMITREQ COMMIT TRANSACTION */
-/* SENDER: LQH, LEVEL B */
-/* INPUT: OPERATION_REC_PTR , */
-/* ******************--------------------------------------------------------------- */
-void Dbacc::execACC_COMMITREQ(Signal* signal)
-{
- Uint8 Toperation;
- jamEntry();
- operationRecPtr.i = signal->theData[0];
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- ndbrequire(operationRecPtr.p->transactionstate == ACTIVE);
- fragrecptr.i = operationRecPtr.p->fragptr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- commitOperation(signal);
- Toperation = operationRecPtr.p->operation;
- operationRecPtr.p->transactionstate = IDLE;
- operationRecPtr.p->operation = ZUNDEFINED_OP;
- if(Toperation != ZREAD){
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- rootfragrecptr.p->m_commit_count++;
- if (Toperation != ZINSERT) {
- if (Toperation != ZDELETE) {
- return;
- } else {
- jam();
- rootfragrecptr.p->noOfElements--;
- fragrecptr.p->slack += operationRecPtr.p->insertDeleteLen;
- if (fragrecptr.p->slack > fragrecptr.p->slackCheck) {
- /* TIME FOR JOIN BUCKETS PROCESS */
- if (fragrecptr.p->expandCounter > 0) {
- if (fragrecptr.p->expandFlag < 2) {
- jam();
- signal->theData[0] = fragrecptr.i;
- signal->theData[1] = fragrecptr.p->p;
- signal->theData[2] = fragrecptr.p->maxp;
- signal->theData[3] = fragrecptr.p->expandFlag;
- fragrecptr.p->expandFlag = 2;
- sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 4, JBB);
- }//if
- }//if
- }//if
- }//if
- } else {
- jam(); /* EXPAND PROCESS HANDLING */
- rootfragrecptr.p->noOfElements++;
- fragrecptr.p->slack -= operationRecPtr.p->insertDeleteLen;
- if (fragrecptr.p->slack >= (1u << 31)) {
- /* IT MEANS THAT IF SLACK < ZERO */
- if (fragrecptr.p->expandFlag == 0) {
- jam();
- fragrecptr.p->expandFlag = 2;
- signal->theData[0] = fragrecptr.i;
- signal->theData[1] = fragrecptr.p->p;
- signal->theData[2] = fragrecptr.p->maxp;
- sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB);
- }//if
- }//if
- }//if
- }
- return;
-}//Dbacc::execACC_COMMITREQ()
-
-/* ******************--------------------------------------------------------------- */
-/* ACC ABORT REQ ABORT ALL OPERATION OF THE TRANSACTION */
-/* ******************------------------------------+ */
-/* SENDER: LQH, LEVEL B */
-/* ******************--------------------------------------------------------------- */
-/* ACC ABORT REQ ABORT TRANSACTION */
-/* ******************------------------------------+ */
-/* SENDER: LQH, LEVEL B */
-void Dbacc::execACC_ABORTREQ(Signal* signal)
-{
- jamEntry();
- accAbortReqLab(signal, true);
-}//Dbacc::execACC_ABORTREQ()
-
-void Dbacc::accAbortReqLab(Signal* signal, bool sendConf)
-{
- operationRecPtr.i = signal->theData[0];
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- tresult = 0; /* ZFALSE */
- if ((operationRecPtr.p->transactionstate == ACTIVE) ||
- (operationRecPtr.p->transactionstate == WAIT_COMMIT_ABORT)) {
- jam();
- fragrecptr.i = operationRecPtr.p->fragptr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- operationRecPtr.p->transactionstate = ABORT;
- abortOperation(signal);
- } else {
- ndbrequire(operationRecPtr.p->transactionstate == IDLE);
- jam();
- }//if
- operationRecPtr.p->transactionstate = IDLE;
- operationRecPtr.p->operation = ZUNDEFINED_OP;
- if (! sendConf)
- return;
- signal->theData[0] = operationRecPtr.p->userptr;
- sendSignal(operationRecPtr.p->userblockref, GSN_ACC_ABORTCONF, signal, 1, JBB);
- return;
-}//Dbacc::accAbortReqLab()
-
-/*
- * Lock or unlock tuple.
- */
-void Dbacc::execACC_LOCKREQ(Signal* signal)
-{
- jamEntry();
- AccLockReq* sig = (AccLockReq*)signal->getDataPtrSend();
- AccLockReq reqCopy = *sig;
- AccLockReq* const req = &reqCopy;
- Uint32 lockOp = (req->requestInfo & 0xFF);
- if (lockOp == AccLockReq::LockShared ||
- lockOp == AccLockReq::LockExclusive) {
- jam();
- // find table
- tabptr.i = req->tableId;
- ptrCheckGuard(tabptr, ctablesize, tabrec);
- // find fragment (TUX will know it)
- if (req->fragPtrI == RNIL) {
- for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
- jam();
- if (tabptr.p->fragptrholder[i] != RNIL) {
- rootfragrecptr.i = tabptr.p->fragptrholder[i];
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- if (rootfragrecptr.p->fragmentid[0] == req->fragId) {
- jam();
- req->fragPtrI = rootfragrecptr.p->fragmentptr[0];
- break;
- }
- if (rootfragrecptr.p->fragmentid[1] == req->fragId) {
- jam();
- req->fragPtrI = rootfragrecptr.p->fragmentptr[1];
- break;
- }
- }
- }
- }
- fragrecptr.i = req->fragPtrI;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- ndbrequire(req->fragId == fragrecptr.p->myfid);
- // caller must be explicit here
- ndbrequire(req->accOpPtr == RNIL);
- // seize operation to hold the lock
- if (cfreeopRec != RNIL) {
- jam();
- seizeOpRec(signal);
- // init as in ACCSEIZEREQ
- operationRecPtr.p->userptr = req->userPtr;
- operationRecPtr.p->userblockref = req->userRef;
- operationRecPtr.p->operation = ZUNDEFINED_OP;
- operationRecPtr.p->transactionstate = IDLE;
- // do read with lock via ACCKEYREQ
- Uint32 lockMode = (lockOp == AccLockReq::LockShared) ? 0 : 1;
- Uint32 opCode = ZSCAN_OP;
- signal->theData[0] = operationRecPtr.i;
- signal->theData[1] = fragrecptr.i;
- signal->theData[2] = opCode | (lockMode << 4) | (1u << 31);
- signal->theData[3] = req->hashValue;
- signal->theData[4] = 1; // fake primKeyLen
- signal->theData[5] = req->transId1;
- signal->theData[6] = req->transId2;
- // enter local key in place of PK
- signal->theData[7] = req->tupAddr;
- EXECUTE_DIRECT(DBACC, GSN_ACCKEYREQ, signal, 8);
- // translate the result
- if (signal->theData[0] < RNIL) {
- jam();
- req->returnCode = AccLockReq::Success;
- req->accOpPtr = operationRecPtr.i;
- } else if (signal->theData[0] == RNIL) {
- jam();
- req->returnCode = AccLockReq::IsBlocked;
- req->accOpPtr = operationRecPtr.i;
- } else {
- ndbrequire(signal->theData[0] == (UintR)-1);
- releaseOpRec(signal);
- req->returnCode = AccLockReq::Refused;
- req->accOpPtr = RNIL;
- }
- } else {
- jam();
- req->returnCode = AccLockReq::NoFreeOp;
- }
- *sig = *req;
- return;
- }
- if (lockOp == AccLockReq::Unlock) {
- jam();
- // do unlock via ACC_COMMITREQ (immediate)
- signal->theData[0] = req->accOpPtr;
- EXECUTE_DIRECT(DBACC, GSN_ACC_COMMITREQ, signal, 1);
- releaseOpRec(signal);
- req->returnCode = AccLockReq::Success;
- *sig = *req;
- return;
- }
- if (lockOp == AccLockReq::Abort) {
- jam();
- // do abort via ACC_ABORTREQ (immediate)
- signal->theData[0] = req->accOpPtr;
- accAbortReqLab(signal, false);
- releaseOpRec(signal);
- req->returnCode = AccLockReq::Success;
- *sig = *req;
- return;
- }
- if (lockOp == AccLockReq::AbortWithConf) {
- jam();
- // do abort via ACC_ABORTREQ (with conf signal)
- signal->theData[0] = req->accOpPtr;
- accAbortReqLab(signal, true);
- releaseOpRec(signal);
- req->returnCode = AccLockReq::Success;
- *sig = *req;
- return;
- }
- ndbrequire(false);
-}
-
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* END OF EXECUTE OPERATION MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* MODULE: INSERT */
-/* THE FOLLOWING SUBROUTINES ARE ONLY USED BY INSERT_ELEMENT. THIS */
-/* ROUTINE IS THE SOLE INTERFACE TO INSERT ELEMENTS INTO THE INDEX. */
-/* CURRENT USERS ARE INSERT REQUESTS, EXPAND CONTAINER AND SHRINK */
-/* CONTAINER. */
-/* */
-/* THE FOLLOWING SUBROUTINES ARE INCLUDED IN THIS MODULE: */
-/* INSERT_ELEMENT */
-/* INSERT_CONTAINER */
-/* ADDNEWCONTAINER */
-/* GETFREELIST */
-/* INCREASELISTCONT */
-/* SEIZE_LEFTLIST */
-/* SEIZE_RIGHTLIST */
-/* */
-/* THESE ROUTINES ARE ONLY USED BY THIS MODULE AND BY NO ONE ELSE. */
-/* ALSO THE ROUTINES MAKE NO USE OF ROUTINES IN OTHER MODULES. */
-/* TAKE_REC_OUT_OF_FREE_OVERPAGE AND RELEASE_OVERFLOW_REC ARE */
-/* EXCEPTIONS TO THIS RULE. */
-/* */
-/* THE ONLY SHORT-LIVED VARIABLES USED IN OTHER PARTS OF THE BLOCK ARE */
-/* THOSE DEFINED AS INPUT AND OUTPUT IN INSERT_ELEMENT */
-/* SHORT-LIVED VARIABLES INCLUDE TEMPORARY VARIABLES, COMMON VARIABLES */
-/* AND POINTER VARIABLES. */
-/* THE ONLY EXCEPTION TO THIS RULE IS FRAGRECPTR WHICH POINTS TO THE */
-/* FRAGMENT RECORD. THIS IS MORE LESS STATIC ALWAYS DURING A SIGNAL */
-/* EXECUTION. */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* INSERT_ELEMENT */
-/* INPUT: */
-/* IDR_PAGEPTR (POINTER TO THE ACTIVE PAGE REC) */
-/* TIDR_PAGEINDEX (INDEX OF THE CONTAINER) */
-/* TIDR_FORWARD (DIRECTION FORWARD OR BACKWARD) */
-/* TIDR_ELEMHEAD (HEADER OF ELEMENT TO BE INSERTED */
-/* CIDR_KEYS(ARRAY OF TUPLE KEYS) */
-/* CLOCALKEY(ARRAY OF LOCAL KEYS). */
-/* FRAGRECPTR */
-/* IDR_OPERATION_REC_PTR */
-/* TIDR_KEY_LEN */
-/* */
-/* OUTPUT: */
-/* TIDR_PAGEINDEX (PAGE INDEX OF INSERTED ELEMENT) */
-/* IDR_PAGEPTR (PAGE POINTER OF INSERTED ELEMENT) */
-/* TIDR_FORWARD (CONTAINER DIRECTION OF INSERTED ELEMENT) */
-/* NONE */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::insertElement(Signal* signal)
-{
- DirRangePtr inrOverflowrangeptr;
- DirectoryarrayPtr inrOverflowDirptr;
- OverflowRecordPtr inrOverflowRecPtr;
- Page8Ptr inrNewPageptr;
- Uint32 tinrNextSamePage;
- Uint32 tinrTmp;
-
- do {
- insertContainer(signal);
- if (tidrResult != ZFALSE) {
- jam();
- return;
- /* INSERTION IS DONE, OR */
- /* AN ERROR IS DETECTED */
- }//if
- if (((tidrContainerhead >> 7) & 0x3) != 0) {
- tinrNextSamePage = (tidrContainerhead >> 9) & 0x1; /* CHECK BIT FOR CHECKING WHERE */
- /* THE NEXT CONTAINER IS IN THE SAME PAGE */
- tidrPageindex = tidrContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */
- if (((tidrContainerhead >> 7) & 3) == ZLEFT) {
- jam();
- tidrForward = ZTRUE;
- } else if (((tidrContainerhead >> 7) & 3) == ZRIGHT) {
- jam();
- tidrForward = cminusOne;
- } else {
- ndbrequire(false);
- return;
- }//if
- if (tinrNextSamePage == ZFALSE) {
- jam(); /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */
- tinrTmp = idrPageptr.p->word32[tidrContainerptr + 1];
- inrOverflowrangeptr.i = fragrecptr.p->overflowdir;
- ptrCheckGuard(inrOverflowrangeptr, cdirrangesize, dirRange);
- arrGuard((tinrTmp >> 8), 256);
- inrOverflowDirptr.i = inrOverflowrangeptr.p->dirArray[tinrTmp >> 8];
- ptrCheckGuard(inrOverflowDirptr, cdirarraysize, directoryarray);
- idrPageptr.i = inrOverflowDirptr.p->pagep[tinrTmp & 0xff];
- ptrCheckGuard(idrPageptr, cpagesize, page8);
- }//if
- ndbrequire(tidrPageindex < ZEMPTYLIST);
- } else {
- break;
- }//if
- } while (1);
- gflPageptr.p = idrPageptr.p;
- getfreelist(signal);
- if (tgflPageindex == ZEMPTYLIST) {
- jam();
- /* NO FREE BUFFER IS FOUND */
- if (fragrecptr.p->firstOverflowRec == RNIL) {
- jam();
- allocOverflowPage(signal);
- ndbrequire(tresult <= ZLIMIT_OF_ERROR);
- }//if
- inrOverflowRecPtr.i = fragrecptr.p->firstOverflowRec;
- ptrCheckGuard(inrOverflowRecPtr, coverflowrecsize, overflowRecord);
- inrNewPageptr.i = inrOverflowRecPtr.p->overpage;
- ptrCheckGuard(inrNewPageptr, cpagesize, page8);
- gflPageptr.p = inrNewPageptr.p;
- getfreelist(signal);
- ndbrequire(tgflPageindex != ZEMPTYLIST);
- tancNext = 0;
- } else {
- jam();
- inrNewPageptr = idrPageptr;
- tancNext = 1;
- }//if
- tslUpdateHeader = ZTRUE;
- tslPageindex = tgflPageindex;
- slPageptr.p = inrNewPageptr.p;
- if (tgflBufType == ZLEFT) {
- seizeLeftlist(signal);
- tidrForward = ZTRUE;
- } else {
- seizeRightlist(signal);
- tidrForward = cminusOne;
- }//if
- tancPageindex = tgflPageindex;
- tancPageid = inrNewPageptr.p->word32[ZPOS_PAGE_ID];
- tancBufType = tgflBufType;
- tancContainerptr = tidrContainerptr;
- ancPageptr.p = idrPageptr.p;
- addnewcontainer(signal);
-
- idrPageptr = inrNewPageptr;
- tidrPageindex = tgflPageindex;
- insertContainer(signal);
- ndbrequire(tidrResult == ZTRUE);
-}//Dbacc::insertElement()
-
-/* --------------------------------------------------------------------------------- */
-/* INSERT_CONTAINER */
-/* INPUT: */
-/* IDR_PAGEPTR (POINTER TO THE ACTIVE PAGE REC) */
-/* TIDR_PAGEINDEX (INDEX OF THE CONTAINER) */
-/* TIDR_FORWARD (DIRECTION FORWARD OR BACKWARD) */
-/* TIDR_ELEMHEAD (HEADER OF ELEMENT TO BE INSERTED */
-/* CKEYS(ARRAY OF TUPLE KEYS) */
-/* CLOCALKEY(ARRAY 0F LOCAL KEYS). */
-/* TIDR_KEY_LEN */
-/* FRAGRECPTR */
-/* IDR_OPERATION_REC_PTR */
-/* OUTPUT: */
-/* TIDR_RESULT (ZTRUE FOR SUCCESS AND ZFALSE OTHERWISE) */
-/* TIDR_CONTAINERHEAD (HEADER OF CONTAINER) */
-/* TIDR_CONTAINERPTR (POINTER TO CONTAINER HEADER) */
-/* */
-/* DESCRIPTION: */
-/* THE FREE AREA OF THE CONTAINER WILL BE CALCULATED. IF IT IS */
-/* LARGER THAN OR EQUAL THE ELEMENT LENGTH. THE ELEMENT WILL BE */
-/* INSERT IN THE CONTAINER AND CONTAINER HEAD WILL BE UPDATED. */
-/* THIS ROUTINE ALWAYS DEALS WITH ONLY ONE CONTAINER AND DO NEVER */
-/* START ANYTHING OUTSIDE OF THIS CONTAINER. */
-/* */
-/* SHORT FORM: IDR */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::insertContainer(Signal* signal)
-{
- Uint32 tidrContainerlen;
- Uint32 tidrConfreelen;
- Uint32 tidrNextSide;
- Uint32 tidrNextConLen;
- Uint32 tidrIndex;
- Uint32 tidrInputIndex;
- Uint32 tidrContLen;
- Uint32 guard26;
-
- tidrResult = ZFALSE;
- tidrContainerptr = (tidrPageindex << ZSHIFT_PLUS) - (tidrPageindex << ZSHIFT_MINUS);
- tidrContainerptr = tidrContainerptr + ZHEAD_SIZE;
- /* --------------------------------------------------------------------------------- */
- /* CALCULATE THE POINTER TO THE ELEMENT TO BE INSERTED AND THE POINTER TO THE */
- /* CONTAINER HEADER OF THE OTHER SIDE OF THE BUFFER. */
- /* --------------------------------------------------------------------------------- */
- if (tidrForward == ZTRUE) {
- jam();
- tidrNextSide = tidrContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
- arrGuard(tidrNextSide + 1, 2048);
- tidrContainerhead = idrPageptr.p->word32[tidrContainerptr];
- tidrContainerlen = tidrContainerhead >> 26;
- tidrIndex = tidrContainerptr + tidrContainerlen;
- } else {
- jam();
- tidrNextSide = tidrContainerptr;
- tidrContainerptr = tidrContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
- arrGuard(tidrContainerptr + 1, 2048);
- tidrContainerhead = idrPageptr.p->word32[tidrContainerptr];
- tidrContainerlen = tidrContainerhead >> 26;
- tidrIndex = (tidrContainerptr - tidrContainerlen) + (ZCON_HEAD_SIZE - 1);
- }//if
- if (tidrContainerlen > (ZBUF_SIZE - 3)) {
- return;
- }//if
- tidrConfreelen = ZBUF_SIZE - tidrContainerlen;
- /* --------------------------------------------------------------------------------- */
- /* WE CALCULATE THE TOTAL LENGTH THE CONTAINER CAN EXPAND TO */
- /* THIS INCLUDES THE OTHER SIDE OF THE BUFFER IF POSSIBLE TO EXPAND THERE. */
- /* --------------------------------------------------------------------------------- */
- if (((tidrContainerhead >> 10) & 1) == 0) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WE HAVE NOT EXPANDED TO THE ENTIRE BUFFER YET. WE CAN THUS READ THE OTHER */
- /* SIDE'S CONTAINER HEADER TO READ HIS LENGTH. */
- /* --------------------------------------------------------------------------------- */
- tidrNextConLen = idrPageptr.p->word32[tidrNextSide] >> 26;
- tidrConfreelen = tidrConfreelen - tidrNextConLen;
- if (tidrConfreelen > ZBUF_SIZE) {
- ndbrequire(false);
- /* --------------------------------------------------------------------------------- */
- /* THE BUFFERS ARE PLACED ON TOP OF EACH OTHER. THIS SHOULD NEVER OCCUR. */
- /* --------------------------------------------------------------------------------- */
- return;
- }//if
- } else {
- jam();
- tidrNextConLen = 1; /* INDICATE OTHER SIDE IS NOT PART OF FREE LIST */
- }//if
- if (tidrConfreelen < fragrecptr.p->elementLength) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THE CONTAINER COULD NOT BE EXPANDED TO FIT THE NEW ELEMENT. WE HAVE TO */
- /* RETURN AND FIND A NEW CONTAINER TO INSERT IT INTO. */
- /* --------------------------------------------------------------------------------- */
- return;
- }//if
- tidrContainerlen = tidrContainerlen + fragrecptr.p->elementLength;
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- datapageptr.p = idrPageptr.p;
- cundoElemIndex = tidrContainerptr;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- if (tidrNextConLen == 0) {
- /* EACH SIDE OF THE BUFFER WHICH BELONG TO A FREE */
- /* LIST, HAS ZERO AS LENGTH. */
- if (tidrContainerlen > ZUP_LIMIT) {
- dbgWord32(idrPageptr, tidrContainerptr, idrPageptr.p->word32[tidrContainerptr] | (1 << 10));
- idrPageptr.p->word32[tidrContainerptr] = idrPageptr.p->word32[tidrContainerptr] | (1 << 10);
- tslUpdateHeader = ZFALSE;
- tslPageindex = tidrPageindex;
- slPageptr.p = idrPageptr.p;
- if (tidrForward == ZTRUE) {
- jam();
- seizeRightlist(signal); /* REMOVE THE RIGHT SIDE OF THE BUFFER FROM THE LIST */
- } else {
- jam();
- /* OF THE FREE CONTAINERS */
- seizeLeftlist(signal); /* REMOVE THE LEFT SIDE OF THE BUFFER FROM THE LIST */
- }//if
- }//if
- }//if
- /* OF THE FREE CONTAINERS */
- /* --------------------------------------------------------------------------------- */
- /* WE HAVE NOW FOUND A FREE SPOT IN THE CURRENT CONTAINER. WE INSERT THE */
- /* ELEMENT HERE. THE ELEMENT CONTAINS A HEADER, A LOCAL KEY AND A TUPLE KEY. */
- /* BEFORE INSERTING THE ELEMENT WE WILL UPDATE THE OPERATION RECORD WITH THE */
- /* DATA CONCERNING WHERE WE INSERTED THE ELEMENT. THIS MAKES IT EASY TO FIND */
- /* THIS INFORMATION WHEN WE RETURN TO UPDATE THE LOCAL KEY OR RETURN TO COMMIT */
- /* OR ABORT THE INSERT. IF NO OPERATION RECORD EXIST IT MEANS THAT WE ARE */
- /* PERFORMING THIS AS A PART OF THE EXPAND OR SHRINK PROCESS. */
- /* --------------------------------------------------------------------------------- */
- if (idrOperationRecPtr.i != RNIL) {
- jam();
- idrOperationRecPtr.p->elementIsforward = tidrForward;
- idrOperationRecPtr.p->elementPage = idrPageptr.i;
- idrOperationRecPtr.p->elementContainer = tidrContainerptr;
- idrOperationRecPtr.p->elementPointer = tidrIndex;
- }//if
- /* --------------------------------------------------------------------------------- */
- /* WE CHOOSE TO UNDO LOG INSERTS BY WRITING THE BEFORE VALUE TO THE UNDO LOG. */
- /* WE COULD ALSO HAVE DONE THIS BY WRITING THIS BEFORE VALUE WHEN DELETING */
- /* ELEMENTS. WE CHOOSE TO PUT IT HERE SINCE WE THEREBY ENSURE THAT WE ALWAYS */
- /* UNDO LOG ALL WRITES TO PAGE MEMORY. IT SHOULD BE EASIER TO MAINTAIN SUCH A */
- /* STRUCTURE. IT IS RATHER DIFFICULT TO MAINTAIN A LOGICAL STRUCTURE WHERE */
- /* DELETES ARE INSERTS AND INSERTS ARE PURELY DELETES. */
- /* --------------------------------------------------------------------------------- */
- if (fragrecptr.p->createLcp == ZTRUE) {
- if (tidrForward == ZTRUE) {
- cundoElemIndex = tidrIndex;
- } else {
- cundoElemIndex = (tidrIndex + 1) - fragrecptr.p->elementLength;
- }//if
- cundoinfolength = fragrecptr.p->elementLength;
- undoWritingProcess(signal);
- }//if
- dbgWord32(idrPageptr, tidrIndex, tidrElemhead);
- idrPageptr.p->word32[tidrIndex] = tidrElemhead; /* INSERTS THE HEAD OF THE ELEMENT */
- tidrIndex += tidrForward;
- guard26 = fragrecptr.p->localkeylen - 1;
- arrGuard(guard26, 2);
- for (tidrInputIndex = 0; tidrInputIndex <= guard26; tidrInputIndex++) {
- dbgWord32(idrPageptr, tidrIndex, clocalkey[tidrInputIndex]);
- arrGuard(tidrIndex, 2048);
- idrPageptr.p->word32[tidrIndex] = clocalkey[tidrInputIndex]; /* INSERTS LOCALKEY */
- tidrIndex += tidrForward;
- }//for
- tidrContLen = idrPageptr.p->word32[tidrContainerptr] << 6;
- tidrContLen = tidrContLen >> 6;
- dbgWord32(idrPageptr, tidrContainerptr, (tidrContainerlen << 26) | tidrContLen);
- idrPageptr.p->word32[tidrContainerptr] = (tidrContainerlen << 26) | tidrContLen;
- tidrResult = ZTRUE;
-}//Dbacc::insertContainer()
-
-/* --------------------------------------------------------------------------------- */
-/* ADDNEWCONTAINER */
-/* INPUT: */
-/* TANC_CONTAINERPTR */
-/* ANC_PAGEPTR */
-/* TANC_NEXT */
-/* TANC_PAGEINDEX */
-/* TANC_BUF_TYPE */
-/* TANC_PAGEID */
-/* OUTPUT: */
-/* NONE */
-/* */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::addnewcontainer(Signal* signal)
-{
- Uint32 tancTmp1;
-
- if (fragrecptr.p->createLcp == ZTRUE) {
- cundoElemIndex = tancContainerptr;
- datapageptr.p = ancPageptr.p;
- cundoinfolength = 2;
- undoWritingProcess(signal); /* WHEN UNDO PROCESS HAS STARTED, */
- }//if
- /* THE OLD DATA IS STORED ON AN UNDO PAGE */
- /* --------------------------------------------------------------------------------- */
- /* KEEP LENGTH INFORMATION IN BIT 26-31. */
- /* SET BIT 9 INDICATING IF NEXT BUFFER IN THE SAME PAGE USING TANC_NEXT. */
- /* SET TYPE OF NEXT CONTAINER IN BIT 7-8. */
- /* SET PAGE INDEX OF NEXT CONTAINER IN BIT 0-6. */
- /* KEEP INDICATOR OF OWNING OTHER SIDE OF BUFFER IN BIT 10. */
- /* --------------------------------------------------------------------------------- */
- tancTmp1 = ancPageptr.p->word32[tancContainerptr] >> 10;
- tancTmp1 = tancTmp1 << 1;
- tancTmp1 = tancTmp1 | tancNext;
- tancTmp1 = tancTmp1 << 2;
- tancTmp1 = tancTmp1 | tancBufType; /* TYPE OF THE NEXT CONTAINER */
- tancTmp1 = tancTmp1 << 7;
- tancTmp1 = tancTmp1 | tancPageindex;
- dbgWord32(ancPageptr, tancContainerptr, tancTmp1);
- ancPageptr.p->word32[tancContainerptr] = tancTmp1; /* HEAD OF THE CONTAINER IS UPDATED */
- dbgWord32(ancPageptr, tancContainerptr + 1, tancPageid);
- ancPageptr.p->word32[tancContainerptr + 1] = tancPageid;
-}//Dbacc::addnewcontainer()
-
-/* --------------------------------------------------------------------------------- */
-/* GETFREELIST */
-/* INPUT: */
-/* GFL_PAGEPTR (POINTER TO A PAGE RECORD). */
-/* OUTPUT: */
-/* TGFL_PAGEINDEX(POINTER TO A FREE BUFFER IN THE FREEPAGE), AND */
-/* TGFL_BUF_TYPE( TYPE OF THE FREE BUFFER). */
-/* DESCRIPTION: SEARCHS IN THE FREE LIST OF THE FREE BUFFER IN THE PAGE HEAD */
-/* (WORD32(1)),AND RETURN ADDRESS OF A FREE BUFFER OR NIL. */
-/* THE FREE BUFFER CAN BE A RIGHT CONTAINER OR A LEFT ONE */
-/* THE KIND OF THE CONTAINER IS NOTED BY TGFL_BUF_TYPE. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::getfreelist(Signal* signal)
-{
- Uint32 tgflTmp;
-
- tgflTmp = gflPageptr.p->word32[ZPOS_EMPTY_LIST];
- tgflPageindex = (tgflTmp >> 7) & 0x7f; /* LEFT FREE LIST */
- tgflBufType = ZLEFT;
- if (tgflPageindex == ZEMPTYLIST) {
- jam();
- tgflPageindex = tgflTmp & 0x7f; /* RIGHT FREE LIST */
- tgflBufType = ZRIGHT;
- }//if
- ndbrequire(tgflPageindex <= ZEMPTYLIST);
-}//Dbacc::getfreelist()
-
-/* --------------------------------------------------------------------------------- */
-/* INCREASELISTCONT */
-/* INPUT: */
-/* ILC_PAGEPTR PAGE POINTER TO INCREASE NUMBER OF CONTAINERS IN */
-/* A CONTAINER OF AN OVERFLOW PAGE (FREEPAGEPTR) IS ALLOCATED, NR OF */
-/* ALLOCATED CONTAINER HAVE TO BE INCRESE BY ONE . */
-/* IF THE NUMBER OF ALLOCATED CONTAINERS IS ABOVE THE FREE LIMIT WE WILL */
-/* REMOVE THE PAGE FROM THE FREE LIST. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::increaselistcont(Signal* signal)
-{
- OverflowRecordPtr ilcOverflowRecPtr;
-
- dbgWord32(ilcPageptr, ZPOS_ALLOC_CONTAINERS, ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] + 1);
- ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] + 1;
- if (ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] > ZFREE_LIMIT) {
- if (ilcPageptr.p->word32[ZPOS_OVERFLOWREC] != RNIL) {
- jam();
- ilcOverflowRecPtr.i = ilcPageptr.p->word32[ZPOS_OVERFLOWREC];
- dbgWord32(ilcPageptr, ZPOS_OVERFLOWREC, RNIL);
- ilcPageptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
- ptrCheckGuard(ilcOverflowRecPtr, coverflowrecsize, overflowRecord);
- tfoOverflowRecPtr = ilcOverflowRecPtr;
- takeRecOutOfFreeOverpage(signal);
- rorOverflowRecPtr = ilcOverflowRecPtr;
- releaseOverflowRec(signal);
- }//if
- }//if
-}//Dbacc::increaselistcont()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE_LEFTLIST */
-/* INPUT: */
-/* TSL_PAGEINDEX PAGE INDEX OF CONTAINER TO SEIZE */
-/* SL_PAGEPTR PAGE POINTER OF CONTAINER TO SEIZE */
-/* TSL_UPDATE_HEADER SHOULD WE UPDATE THE CONTAINER HEADER */
-/* */
-/* OUTPUT: */
-/* NONE */
-/* DESCRIPTION: THE BUFFER NOTED BY TSL_PAGEINDEX WILL BE REMOVED FROM THE */
-/* LIST OF LEFT FREE CONTAINER, IN THE HEADER OF THE PAGE */
-/* (FREEPAGEPTR). PREVIOUS AND NEXT BUFFER OF REMOVED BUFFER */
-/* WILL BE UPDATED. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeLeftlist(Signal* signal)
-{
- Uint32 tsllTmp1;
- Uint32 tsllNewHead;
- Uint32 tsllHeadIndex;
- Uint32 tsllTmp;
-
- tsllHeadIndex = ((tslPageindex << ZSHIFT_PLUS) - (tslPageindex << ZSHIFT_MINUS)) + ZHEAD_SIZE;
- arrGuard(tsllHeadIndex + 1, 2048);
- tslNextfree = slPageptr.p->word32[tsllHeadIndex];
- tslPrevfree = slPageptr.p->word32[tsllHeadIndex + 1];
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- datapageptr.p = slPageptr.p;
- cundoElemIndex = tsllHeadIndex;
- cundoinfolength = 2;
- undoWritingProcess(signal);
- }//if
- if (fragrecptr.p->createLcp == ZTRUE) {
- cundoElemIndex = ZPOS_EMPTY_LIST;
- cundoinfolength = 2;
- undoWritingProcess(signal);
- }//if
- if (tslPrevfree == ZEMPTYLIST) {
- jam();
- /* UPDATE FREE LIST OF LEFT CONTAINER IN PAGE HEAD */
- tsllTmp1 = slPageptr.p->word32[ZPOS_EMPTY_LIST];
- tsllTmp = tsllTmp1 & 0x7f;
- tsllTmp1 = (tsllTmp1 >> 14) << 14;
- tsllTmp1 = (tsllTmp1 | (tslNextfree << 7)) | tsllTmp;
- dbgWord32(slPageptr, ZPOS_EMPTY_LIST, tsllTmp1);
- slPageptr.p->word32[ZPOS_EMPTY_LIST] = tsllTmp1;
- } else {
- ndbrequire(tslPrevfree < ZEMPTYLIST);
- jam();
- tsllTmp = ((tslPrevfree << ZSHIFT_PLUS) - (tslPrevfree << ZSHIFT_MINUS)) + ZHEAD_SIZE;
- if (fragrecptr.p->createLcp == ZTRUE) {
- cundoElemIndex = tsllTmp;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- dbgWord32(slPageptr, tsllTmp, tslNextfree);
- slPageptr.p->word32[tsllTmp] = tslNextfree;
- }//if
- if (tslNextfree < ZEMPTYLIST) {
- jam();
- tsllTmp = (((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ZHEAD_SIZE) + 1;
- if (fragrecptr.p->createLcp == ZTRUE) {
- cundoElemIndex = tsllTmp;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- dbgWord32(slPageptr, tsllTmp, tslPrevfree);
- slPageptr.p->word32[tsllTmp] = tslPrevfree;
- } else {
- ndbrequire(tslNextfree == ZEMPTYLIST);
- jam();
- }//if
- /* --------------------------------------------------------------------------------- */
- /* IF WE ARE UPDATING THE HEADER WE ARE CREATING A NEW CONTAINER IN THE PAGE. */
- /* TO BE ABLE TO FIND ALL LOCKED ELEMENTS WE KEEP ALL CONTAINERS IN LINKED */
- /* LISTS IN THE PAGE. */
- /* */
- /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 16-22 THAT REFERS TO THE */
- /* FIRST CONTAINER IN A LIST OF USED RIGHT CONTAINERS IN THE PAGE. */
- /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 23-29 THAT REFERS TO THE */
- /* FIRST CONTAINER IN A LIST OF USED LEFT CONTAINERS IN THE PAGE. */
- /* EACH CONTAINER IN THE LIST CONTAINS A NEXT POINTER IN BIT 11-17 AND IT */
- /* CONTAINS A PREVIOUS POINTER IN BIT 18-24. */
- /* WE ALSO SET BIT 25 TO INDICATE THAT IT IS A CONTAINER HEADER. */
- /* --------------------------------------------------------------------------------- */
- if (tslUpdateHeader == ZTRUE) {
- jam();
- tslNextfree = (slPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f;
- tsllNewHead = ZCON_HEAD_SIZE;
- tsllNewHead = ((tsllNewHead << 8) + ZEMPTYLIST) + (1 << 7);
- tsllNewHead = (tsllNewHead << 7) + tslNextfree;
- tsllNewHead = tsllNewHead << 11;
- dbgWord32(slPageptr, tsllHeadIndex, tsllNewHead);
- slPageptr.p->word32[tsllHeadIndex] = tsllNewHead;
- tsllTmp = slPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xc07fffff;
- tsllTmp = tsllTmp | (tslPageindex << 23);
- dbgWord32(slPageptr, ZPOS_EMPTY_LIST, tsllTmp);
- slPageptr.p->word32[ZPOS_EMPTY_LIST] = tsllTmp;
- if (tslNextfree < ZEMPTYLIST) {
- jam();
- tsllTmp = ((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ZHEAD_SIZE;
- if (fragrecptr.p->createLcp == ZTRUE) {
- cundoElemIndex = tsllTmp;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- tsllTmp1 = slPageptr.p->word32[tsllTmp] & 0xfe03ffff;
- tsllTmp1 = tsllTmp1 | (tslPageindex << 18);
- dbgWord32(slPageptr, tsllTmp, tsllTmp1);
- slPageptr.p->word32[tsllTmp] = tsllTmp1;
- } else {
- ndbrequire(tslNextfree == ZEMPTYLIST);
- jam();
- }//if
- }//if
- ilcPageptr.p = slPageptr.p;
- increaselistcont(signal);
-}//Dbacc::seizeLeftlist()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE_RIGHTLIST */
-/* DESCRIPTION: THE BUFFER NOTED BY TSL_PAGEINDEX WILL BE REMOVED FROM THE */
-/* LIST OF RIGHT FREE CONTAINER, IN THE HEADER OF THE PAGE */
-/* (SL_PAGEPTR). PREVIOUS AND NEXT BUFFER OF REMOVED BUFFER */
-/* WILL BE UPDATED. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeRightlist(Signal* signal)
-{
- Uint32 tsrlTmp1;
- Uint32 tsrlNewHead;
- Uint32 tsrlHeadIndex;
- Uint32 tsrlTmp;
-
- tsrlHeadIndex = ((tslPageindex << ZSHIFT_PLUS) - (tslPageindex << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
- arrGuard(tsrlHeadIndex + 1, 2048);
- tslNextfree = slPageptr.p->word32[tsrlHeadIndex];
- tslPrevfree = slPageptr.p->word32[tsrlHeadIndex + 1];
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- datapageptr.p = slPageptr.p;
- cundoElemIndex = tsrlHeadIndex;
- cundoinfolength = 2;
- undoWritingProcess(signal);
- }//if
- if (fragrecptr.p->createLcp == ZTRUE) {
- cundoElemIndex = ZPOS_EMPTY_LIST;
- cundoinfolength = 2;
- undoWritingProcess(signal);
- }//if
- if (tslPrevfree == ZEMPTYLIST) {
- jam();
- tsrlTmp = slPageptr.p->word32[ZPOS_EMPTY_LIST];
- dbgWord32(slPageptr, ZPOS_EMPTY_LIST, ((tsrlTmp >> 7) << 7) | tslNextfree);
- slPageptr.p->word32[ZPOS_EMPTY_LIST] = ((tsrlTmp >> 7) << 7) | tslNextfree;
- } else {
- ndbrequire(tslPrevfree < ZEMPTYLIST);
- jam();
- tsrlTmp = ((tslPrevfree << ZSHIFT_PLUS) - (tslPrevfree << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
- if (fragrecptr.p->createLcp == ZTRUE) {
- cundoElemIndex = tsrlTmp;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- dbgWord32(slPageptr, tsrlTmp, tslNextfree);
- slPageptr.p->word32[tsrlTmp] = tslNextfree;
- }//if
- if (tslNextfree < ZEMPTYLIST) {
- jam();
- tsrlTmp = ((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - (ZCON_HEAD_SIZE - 1));
- if (fragrecptr.p->createLcp == ZTRUE) {
- cundoElemIndex = tsrlTmp;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- dbgWord32(slPageptr, tsrlTmp, tslPrevfree);
- slPageptr.p->word32[tsrlTmp] = tslPrevfree;
- } else {
- ndbrequire(tslNextfree == ZEMPTYLIST);
- jam();
- }//if
- /* --------------------------------------------------------------------------------- */
- /* IF WE ARE UPDATING THE HEADER WE ARE CREATING A NEW CONTAINER IN THE PAGE. */
- /* TO BE ABLE TO FIND ALL LOCKED ELEMENTS WE KEEP ALL CONTAINERS IN LINKED */
- /* LISTS IN THE PAGE. */
- /* */
- /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 16-22 THAT REFERS TO THE */
- /* FIRST CONTAINER IN A LIST OF USED RIGHT CONTAINERS IN THE PAGE. */
- /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 23-29 THAT REFERS TO THE */
- /* FIRST CONTAINER IN A LIST OF USED LEFT CONTAINERS IN THE PAGE. */
- /* EACH CONTAINER IN THE LIST CONTAINS A NEXT POINTER IN BIT 11-17 AND IT */
- /* CONTAINS A PREVIOUS POINTER IN BIT 18-24. */
- /* --------------------------------------------------------------------------------- */
- if (tslUpdateHeader == ZTRUE) {
- jam();
- tslNextfree = (slPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f;
- tsrlNewHead = ZCON_HEAD_SIZE;
- tsrlNewHead = ((tsrlNewHead << 8) + ZEMPTYLIST) + (1 << 7);
- tsrlNewHead = (tsrlNewHead << 7) + tslNextfree;
- tsrlNewHead = tsrlNewHead << 11;
- dbgWord32(slPageptr, tsrlHeadIndex, tsrlNewHead);
- slPageptr.p->word32[tsrlHeadIndex] = tsrlNewHead;
- tsrlTmp = slPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xff80ffff;
- dbgWord32(slPageptr, ZPOS_EMPTY_LIST, tsrlTmp | (tslPageindex << 16));
- slPageptr.p->word32[ZPOS_EMPTY_LIST] = tsrlTmp | (tslPageindex << 16);
- if (tslNextfree < ZEMPTYLIST) {
- jam();
- tsrlTmp = ((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- cundoElemIndex = tsrlTmp;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- tsrlTmp1 = slPageptr.p->word32[tsrlTmp] & 0xfe03ffff;
- dbgWord32(slPageptr, tsrlTmp, tsrlTmp1 | (tslPageindex << 18));
- slPageptr.p->word32[tsrlTmp] = tsrlTmp1 | (tslPageindex << 18);
- } else {
- ndbrequire(tslNextfree == ZEMPTYLIST);
- jam();
- }//if
- }//if
- ilcPageptr.p = slPageptr.p;
- increaselistcont(signal);
-}//Dbacc::seizeRightlist()
-
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* END OF INSERT_ELEMENT MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* MODULE: GET_ELEMENT */
-/* THE FOLLOWING SUBROUTINES ARE ONLY USED BY GET_ELEMENT AND */
-/* GETDIRINDEX. THIS ROUTINE IS THE SOLE INTERFACE TO GET ELEMENTS */
-/* FROM THE INDEX. CURRENT USERS ARE ALL REQUESTS AND EXECUTE UNDO LOG */
-/* */
-/* THE FOLLOWING SUBROUTINES ARE INCLUDED IN THIS MODULE: */
-/* GET_ELEMENT */
-/* GET_DIRINDEX */
-/* SEARCH_LONG_KEY */
-/* */
-/* THESE ROUTINES ARE ONLY USED BY THIS MODULE AND BY NO ONE ELSE. */
-/* ALSO THE ROUTINES MAKE NO USE OF ROUTINES IN OTHER MODULES. */
-/* THE ONLY SHORT-LIVED VARIABLES USED IN OTHER PARTS OF THE BLOCK ARE */
-/* THOSE DEFINED AS INPUT AND OUTPUT IN GET_ELEMENT AND GETDIRINDEX */
-/* SHORT-LIVED VARIABLES INCLUDE TEMPORARY VARIABLES, COMMON VARIABLES */
-/* AND POINTER VARIABLES. */
-/* THE ONLY EXCEPTION TO THIS RULE IS FRAGRECPTR WHICH POINTS TO THE */
-/* FRAGMENT RECORD. THIS IS MORE LESS STATIC ALWAYS DURING A SIGNAL */
-/* EXECUTION. */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* GETDIRINDEX */
-/* SUPPORT ROUTINE FOR INSERT ELEMENT, GET ELEMENT AND COMMITDELETE */
-/* INPUT:FRAGRECPTR ( POINTER TO THE ACTIVE FRAGMENT REC) */
-/* OPERATION_REC_PTR (POINTER TO THE OPERATION REC). */
-/* */
-/* OUTPUT:GDI_PAGEPTR ( POINTER TO THE PAGE OF THE ELEMENT) */
-/* TGDI_PAGEINDEX ( INDEX OF THE ELEMENT IN THE PAGE). */
-/* */
-/* DESCRIPTION: CHECK THE HASH VALUE OF THE OPERATION REC AND CALCULATE THE */
-/* THE ADDRESS OF THE ELEMENT IN THE HASH TABLE,(GDI_PAGEPTR, */
-/* TGDI_PAGEINDEX) ACCORDING TO LH3. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::getdirindex(Signal* signal)
-{
- DirRangePtr gdiDirRangePtr;
- DirectoryarrayPtr gdiDirptr;
- Uint32 tgdiTmp;
- Uint32 tgdiAddress;
-
- tgdiTmp = fragrecptr.p->k + fragrecptr.p->lhfragbits; /* OBS K = 6 */
- tgdiPageindex = operationRecPtr.p->hashValue & ((1 << fragrecptr.p->k) - 1);
- tgdiTmp = operationRecPtr.p->hashValue >> tgdiTmp;
- tgdiTmp = (tgdiTmp << fragrecptr.p->k) | tgdiPageindex;
- tgdiAddress = tgdiTmp & fragrecptr.p->maxp;
- gdiDirRangePtr.i = fragrecptr.p->directory;
- ptrCheckGuard(gdiDirRangePtr, cdirrangesize, dirRange);
- if (tgdiAddress < fragrecptr.p->p) {
- jam();
- tgdiAddress = tgdiTmp & ((fragrecptr.p->maxp << 1) | 1);
- }//if
- tgdiTmp = tgdiAddress >> fragrecptr.p->k;
- arrGuard((tgdiTmp >> 8), 256);
- gdiDirptr.i = gdiDirRangePtr.p->dirArray[tgdiTmp >> 8];
- ptrCheckGuard(gdiDirptr, cdirarraysize, directoryarray);
- gdiPageptr.i = gdiDirptr.p->pagep[tgdiTmp & 0xff]; /* DIRECTORY INDEX OF SEND BUCKET PAGE */
- ptrCheckGuard(gdiPageptr, cpagesize, page8);
-}//Dbacc::getdirindex()
-
-Uint32
-Dbacc::readTablePk(Uint32 localkey1)
-{
- Uint32 tableId = fragrecptr.p->myTableId;
- Uint32 fragId = fragrecptr.p->myfid;
- Uint32 fragPageId = localkey1 >> MAX_TUPLES_BITS;
- Uint32 pageIndex = localkey1 & ((1 << MAX_TUPLES_BITS ) - 1);
-#ifdef VM_TRACE
- memset(ckeys, 0x1f, (fragrecptr.p->keyLength * MAX_XFRM_MULTIPLY) << 2);
-#endif
- int ret = c_tup->accReadPk(tableId, fragId, fragPageId, pageIndex, ckeys, true);
- ndbrequire(ret > 0);
- return ret;
-}
-
-/* --------------------------------------------------------------------------------- */
-/* GET_ELEMENT */
-/* INPUT: */
-/* OPERATION_REC_PTR */
-/* FRAGRECPTR */
-/* OUTPUT: */
-/* TGE_RESULT RESULT SUCCESS = ZTRUE OTHERWISE ZFALSE */
-/* TGE_LOCKED LOCK INFORMATION IF SUCCESSFUL RESULT */
-/* GE_PAGEPTR PAGE POINTER OF FOUND ELEMENT */
-/* TGE_CONTAINERPTR CONTAINER INDEX OF FOUND ELEMENT */
-/* TGE_ELEMENTPTR ELEMENT INDEX OF FOUND ELEMENT */
-/* TGE_FORWARD DIRECTION OF CONTAINER WHERE ELEMENT FOUND */
-/* */
-/* DESCRIPTION: THE SUBROUTIN GOES THROUGH ALL CONTAINERS OF THE ACTIVE */
-/* BUCKET, AND SERCH FOR ELEMENT.THE PRIMARY KEYS WHICH IS SAVED */
-/* IN THE OPERATION REC ARE THE CHECK ITEMS IN THE SEARCHING. */
-/* --------------------------------------------------------------------------------- */
-
-#if __ia64 == 1
-#if __INTEL_COMPILER == 810
-int ndb_acc_ia64_icc810_dummy_var = 0;
-void ndb_acc_ia64_icc810_dummy_func()
-{
- ndb_acc_ia64_icc810_dummy_var++;
-}
-#endif
-#endif
-
-void Dbacc::getElement(Signal* signal)
-{
- DirRangePtr geOverflowrangeptr;
- DirectoryarrayPtr geOverflowDirptr;
- OperationrecPtr geTmpOperationRecPtr;
- Uint32 tgeElementHeader;
- Uint32 tgeElemStep;
- Uint32 tgeContainerhead;
- Uint32 tgePageindex;
- Uint32 tgeActivePageDir;
- Uint32 tgeNextptrtype;
- register Uint32 tgeKeyptr;
- register Uint32 tgeRemLen;
- register Uint32 TelemLen = fragrecptr.p->elementLength;
- register Uint32* Tkeydata = (Uint32*)&signal->theData[7];
-
- getdirindex(signal);
- tgePageindex = tgdiPageindex;
- gePageptr = gdiPageptr;
- tgeResult = ZFALSE;
- /*
- * The value seached is
- * - table key for ACCKEYREQ, stored in TUP
- * - local key (1 word) for ACC_LOCKREQ and UNDO, stored in ACC
- */
- const bool searchLocalKey =
- operationRecPtr.p->isAccLockReq || operationRecPtr.p->isUndoLogReq;
-
- ndbrequire(TelemLen == ZELEM_HEAD_SIZE + fragrecptr.p->localkeylen);
- tgeNextptrtype = ZLEFT;
- tgeLocked = 0;
-
- const Uint32 tmp = fragrecptr.p->k + fragrecptr.p->lhfragbits;
- const Uint32 opHashValuePart = (operationRecPtr.p->hashValue >> tmp) &0xFFFF;
- do {
- tgeContainerptr = (tgePageindex << ZSHIFT_PLUS) - (tgePageindex << ZSHIFT_MINUS);
- if (tgeNextptrtype == ZLEFT) {
- jam();
- tgeContainerptr = tgeContainerptr + ZHEAD_SIZE;
- tgeElementptr = tgeContainerptr + ZCON_HEAD_SIZE;
- tgeKeyptr = (tgeElementptr + ZELEM_HEAD_SIZE) + fragrecptr.p->localkeylen;
- tgeElemStep = TelemLen;
- tgeForward = 1;
- if (tgeContainerptr >= 2048) { ACCKEY_error(4); return;}
- tgeRemLen = gePageptr.p->word32[tgeContainerptr] >> 26;
- if ((tgeContainerptr + tgeRemLen - 1) >= 2048) { ACCKEY_error(5); return;}
- } else if (tgeNextptrtype == ZRIGHT) {
- jam();
- tgeContainerptr = tgeContainerptr + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
- tgeElementptr = tgeContainerptr - 1;
- tgeKeyptr = (tgeElementptr - ZELEM_HEAD_SIZE) - fragrecptr.p->localkeylen;
- tgeElemStep = 0 - TelemLen;
- tgeForward = (Uint32)-1;
- if (tgeContainerptr >= 2048) { ACCKEY_error(4); return;}
- tgeRemLen = gePageptr.p->word32[tgeContainerptr] >> 26;
- if ((tgeContainerptr - tgeRemLen) >= 2048) { ACCKEY_error(5); return;}
- } else {
- ACCKEY_error(6); return;
- }//if
- if (tgeRemLen >= ZCON_HEAD_SIZE + TelemLen) {
- if (tgeRemLen > ZBUF_SIZE) {
- ACCKEY_error(7); return;
- }//if
- /* --------------------------------------------------------------------------------- */
- // There is at least one element in this container. Check if it is the element
- // searched for.
- /* --------------------------------------------------------------------------------- */
- do {
- tgeElementHeader = gePageptr.p->word32[tgeElementptr];
- tgeRemLen = tgeRemLen - TelemLen;
- Uint32 hashValuePart;
- if (ElementHeader::getLocked(tgeElementHeader)) {
- jam();
- geTmpOperationRecPtr.i = ElementHeader::getOpPtrI(tgeElementHeader);
- ptrCheckGuard(geTmpOperationRecPtr, coprecsize, operationrec);
- hashValuePart = geTmpOperationRecPtr.p->hashvaluePart;
- } else {
- jam();
- hashValuePart = ElementHeader::getHashValuePart(tgeElementHeader);
- }
- if (hashValuePart == opHashValuePart) {
- jam();
- Uint32 localkey1 = gePageptr.p->word32[tgeElementptr + tgeForward];
- Uint32 localkey2 = 0;
- bool found;
- if (! searchLocalKey) {
- Uint32 len = readTablePk(localkey1);
- found = (len == operationRecPtr.p->xfrmtupkeylen) &&
- (memcmp(Tkeydata, ckeys, len << 2) == 0);
- } else {
- jam();
- found = (localkey1 == Tkeydata[0]);
- }
- if (found) {
- jam();
- tgeLocked = ElementHeader::getLocked(tgeElementHeader);
- tgeResult = ZTRUE;
- operationRecPtr.p->localdata[0] = localkey1;
- operationRecPtr.p->localdata[1] = localkey2;
- return;
- }
- }
- if (tgeRemLen <= ZCON_HEAD_SIZE) {
- break;
- }
- tgeElementptr = tgeElementptr + tgeElemStep;
- } while (true);
- }//if
- if (tgeRemLen != ZCON_HEAD_SIZE) {
- ACCKEY_error(8); return;
- }//if
- tgeContainerhead = gePageptr.p->word32[tgeContainerptr];
- tgeNextptrtype = (tgeContainerhead >> 7) & 0x3;
- if (tgeNextptrtype == 0) {
- jam();
- return; /* NO MORE CONTAINER */
- }//if
- tgePageindex = tgeContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */
- if (tgePageindex > ZEMPTYLIST) {
- ACCKEY_error(9); return;
- }//if
- if (((tgeContainerhead >> 9) & 1) == ZFALSE) {
- jam();
- tgeActivePageDir = gePageptr.p->word32[tgeContainerptr + 1]; /* NEXT PAGE ID */
- geOverflowrangeptr.i = fragrecptr.p->overflowdir;
- ptrCheckGuard(geOverflowrangeptr, cdirrangesize, dirRange);
- arrGuard((tgeActivePageDir >> 8), 256);
- geOverflowDirptr.i = geOverflowrangeptr.p->dirArray[tgeActivePageDir >> 8];
- ptrCheckGuard(geOverflowDirptr, cdirarraysize, directoryarray);
- gePageptr.i = geOverflowDirptr.p->pagep[tgeActivePageDir & 0xff];
- ptrCheckGuard(gePageptr, cpagesize, page8);
- }//if
- } while (1);
- return;
-}//Dbacc::getElement()
-
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* END OF GET_ELEMENT MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* MODULE: DELETE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* COMMITDELETE */
-/* INPUT: OPERATION_REC_PTR, PTR TO AN OPERATION RECORD. */
-/* FRAGRECPTR, PTR TO A FRAGMENT RECORD */
-/* */
-/* OUTPUT: */
-/* NONE */
-/* DESCRIPTION: DELETE OPERATIONS WILL BE COMPLETED AT THE COMMIT OF TRANSA- */
-/* CTION. THIS SUBROUTINE SEARCHS FOR ELEMENT AND DELETES IT. IT DOES SO BY */
-/* REPLACING IT WITH THE LAST ELEMENT IN THE BUCKET. IF THE DELETED ELEMENT */
-/* IS ALSO THE LAST ELEMENT THEN IT IS ONLY NECESSARY TO REMOVE THE ELEMENT. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::commitdelete(Signal* signal, bool systemRestart)
-{
- if (!systemRestart) {
- jam();
- signal->theData[0] = fragrecptr.p->myfid;
- signal->theData[1] = fragrecptr.p->myTableId;
- signal->theData[2] = operationRecPtr.p->localdata[0];
- Uint32 localKey = operationRecPtr.p->localdata[0];
- Uint32 pageId = localKey >> MAX_TUPLES_BITS;
- Uint32 pageIndex = localKey & ((1 << MAX_TUPLES_BITS) - 1);
- signal->theData[2] = pageId;
- signal->theData[3] = pageIndex;
- EXECUTE_DIRECT(DBTUP, GSN_TUP_DEALLOCREQ, signal, 4);
- jamEntry();
- }//if
- getdirindex(signal);
- tlastPageindex = tgdiPageindex;
- lastPageptr.i = gdiPageptr.i;
- lastPageptr.p = gdiPageptr.p;
- tlastForward = ZTRUE;
- tlastContainerptr = (tlastPageindex << ZSHIFT_PLUS) - (tlastPageindex << ZSHIFT_MINUS);
- tlastContainerptr = tlastContainerptr + ZHEAD_SIZE;
- arrGuard(tlastContainerptr, 2048);
- tlastContainerhead = lastPageptr.p->word32[tlastContainerptr];
- tlastContainerlen = tlastContainerhead >> 26;
- lastPrevpageptr.i = RNIL;
- ptrNull(lastPrevpageptr);
- tlastPrevconptr = 0;
- getLastAndRemove(signal);
-
- delPageptr.i = operationRecPtr.p->elementPage;
- ptrCheckGuard(delPageptr, cpagesize, page8);
- tdelElementptr = operationRecPtr.p->elementPointer;
- /* --------------------------------------------------------------------------------- */
- // Here we have to take extreme care since we do not want locks to end up after the
- // log execution. Thus it is necessary to put back the element in unlocked shape.
- // We thus update the element header to ensure we log an unlocked element. We do not
- // need to restore it later since it is deleted immediately anyway.
- /* --------------------------------------------------------------------------------- */
- const Uint32 hv = operationRecPtr.p->hashvaluePart;
- const Uint32 eh = ElementHeader::setUnlocked(hv, 0);
- delPageptr.p->word32[tdelElementptr] = eh;
- if (operationRecPtr.p->elementPage == lastPageptr.i) {
- if (operationRecPtr.p->elementPointer == tlastElementptr) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THE LAST ELEMENT WAS THE ELEMENT TO BE DELETED. WE NEED NOT COPY IT. */
- /* --------------------------------------------------------------------------------- */
- return;
- }//if
- }//if
- /* --------------------------------------------------------------------------------- */
- /* THE DELETED ELEMENT IS NOT THE LAST. WE READ THE LAST ELEMENT AND OVERWRITE THE */
- /* DELETED ELEMENT. */
- /* --------------------------------------------------------------------------------- */
- tdelContainerptr = operationRecPtr.p->elementContainer;
- tdelForward = operationRecPtr.p->elementIsforward;
- deleteElement(signal);
-}//Dbacc::commitdelete()
-
-/* --------------------------------------------------------------------------------- */
-/* DELETE_ELEMENT */
-/* INPUT: FRAGRECPTR, POINTER TO A FRAGMENT RECORD */
-/* LAST_PAGEPTR, POINTER TO THE PAGE OF THE LAST ELEMENT */
-/* DEL_PAGEPTR, POINTER TO THE PAGE OF THE DELETED ELEMENT */
-/* TLAST_ELEMENTPTR, ELEMENT POINTER OF THE LAST ELEMENT */
-/* TDEL_ELEMENTPTR, ELEMENT POINTER OF THE DELETED ELEMENT */
-/* TLAST_FORWARD, DIRECTION OF LAST ELEMENT */
-/* TDEL_FORWARD, DIRECTION OF DELETED ELEMENT */
-/* TDEL_CONTAINERPTR, CONTAINER POINTER OF DELETED ELEMENT */
-/* DESCRIPTION: COPY LAST ELEMENT TO DELETED ELEMENT AND UPDATE UNDO LOG AND */
-/* UPDATE ANY ACTIVE OPERATION ON THE MOVED ELEMENT. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::deleteElement(Signal* signal)
-{
- OperationrecPtr deOperationRecPtr;
- Uint32 tdeIndex;
- Uint32 tlastMoveElemptr;
- Uint32 tdelMoveElemptr;
- Uint32 guard31;
-
- if (tlastElementptr >= 2048)
- goto deleteElement_index_error1;
- {
- const Uint32 tdeElemhead = lastPageptr.p->word32[tlastElementptr];
- if (fragrecptr.p->createLcp == ZTRUE) {
- datapageptr.p = delPageptr.p;
- cundoinfolength = fragrecptr.p->elementLength;
- if (tdelForward == ZTRUE) {
- jam();
- cundoElemIndex = tdelElementptr;
- } else {
- jam();
- cundoElemIndex = (tdelElementptr + 1) - fragrecptr.p->elementLength;
- }//if
- undoWritingProcess(signal);
- }//if
- tlastMoveElemptr = tlastElementptr;
- tdelMoveElemptr = tdelElementptr;
- guard31 = fragrecptr.p->elementLength - 1;
- for (tdeIndex = 0; tdeIndex <= guard31; tdeIndex++) {
- dbgWord32(delPageptr, tdelMoveElemptr, lastPageptr.p->word32[tlastMoveElemptr]);
- if ((tlastMoveElemptr >= 2048) ||
- (tdelMoveElemptr >= 2048))
- goto deleteElement_index_error2;
- delPageptr.p->word32[tdelMoveElemptr] = lastPageptr.p->word32[tlastMoveElemptr];
- tdelMoveElemptr = tdelMoveElemptr + tdelForward;
- tlastMoveElemptr = tlastMoveElemptr + tlastForward;
- }//for
- if (ElementHeader::getLocked(tdeElemhead)) {
- /* --------------------------------------------------------------------------------- */
- /* THE LAST ELEMENT IS LOCKED AND IS THUS REFERENCED BY AN OPERATION RECORD. WE NEED */
- /* TO UPDATE THE OPERATION RECORD WITH THE NEW REFERENCE TO THE ELEMENT. */
- /* --------------------------------------------------------------------------------- */
- deOperationRecPtr.i = ElementHeader::getOpPtrI(tdeElemhead);
- ptrCheckGuard(deOperationRecPtr, coprecsize, operationrec);
- if (cundoLogActive == ZFALSE) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WE DO NOT BOTHER WITH THIS INFORMATION DURING EXECUTION OF THE UNDO LOG. */
- /* --------------------------------------------------------------------------------- */
- deOperationRecPtr.p->elementPage = delPageptr.i;
- deOperationRecPtr.p->elementContainer = tdelContainerptr;
- deOperationRecPtr.p->elementPointer = tdelElementptr;
- deOperationRecPtr.p->elementIsforward = tdelForward;
- }//if
- /* --------------------------------------------------------------------------------- */
- // We need to take extreme care to not install locked records after system restart.
- // An undo of the delete will reinstall the moved record. We have to ensure that the
- // lock is removed to ensure that no such thing happen.
- /* --------------------------------------------------------------------------------- */
- Uint32 eh = ElementHeader::setUnlocked(deOperationRecPtr.p->hashvaluePart,
- 0);
- lastPageptr.p->word32[tlastElementptr] = eh;
- }//if
- return;
- }
-
- deleteElement_index_error1:
- arrGuard(tlastElementptr, 2048);
- return;
-
- deleteElement_index_error2:
- arrGuard(tdelMoveElemptr + guard31, 2048);
- arrGuard(tlastMoveElemptr, 2048);
- return;
-
-}//Dbacc::deleteElement()
-
-/* --------------------------------------------------------------------------------- */
-/* GET_LAST_AND_REMOVE */
-/* INPUT: */
-/* LAST_PAGEPTR PAGE POINTER OF FIRST CONTAINER IN SEARCH OF LAST*/
-/* TLAST_CONTAINERPTR CONTAINER INDEX OF THE SAME */
-/* TLAST_CONTAINERHEAD CONTAINER HEADER OF THE SAME */
-/* TLAST_PAGEINDEX PAGE INDEX OF THE SAME */
-/* TLAST_FORWARD CONTAINER DIRECTION OF THE SAME */
-/* TLAST_CONTAINERLEN CONTAINER LENGTH OF THE SAME */
-/* LAST_PREVPAGEPTR PAGE POINTER OF PREVIOUS CONTAINER OF THE SAME */
-/* TLAST_PREVCONPTR CONTAINER INDEX OF PREVIOUS CONTAINER OF THE SAME*/
-/* */
-/* OUTPUT: */
-/* ALL VARIABLES FROM INPUT BUT NOW CONTAINING INFO ABOUT LAST */
-/* CONTAINER. */
-/* TLAST_ELEMENTPTR LAST ELEMENT POINTER IN LAST CONTAINER */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::getLastAndRemove(Signal* signal)
-{
- DirRangePtr glrOverflowrangeptr;
- DirectoryarrayPtr glrOverflowDirptr;
- Uint32 tglrHead;
- Uint32 tglrTmp;
-
- GLR_LOOP_10:
- if (((tlastContainerhead >> 7) & 0x3) != 0) {
- jam();
- lastPrevpageptr.i = lastPageptr.i;
- lastPrevpageptr.p = lastPageptr.p;
- tlastPrevconptr = tlastContainerptr;
- tlastPageindex = tlastContainerhead & 0x7f;
- if (((tlastContainerhead >> 9) & 0x1) == ZFALSE) {
- jam();
- arrGuard(tlastContainerptr + 1, 2048);
- tglrTmp = lastPageptr.p->word32[tlastContainerptr + 1];
- glrOverflowrangeptr.i = fragrecptr.p->overflowdir;
- ptrCheckGuard(glrOverflowrangeptr, cdirrangesize, dirRange);
- arrGuard((tglrTmp >> 8), 256);
- glrOverflowDirptr.i = glrOverflowrangeptr.p->dirArray[tglrTmp >> 8];
- ptrCheckGuard(glrOverflowDirptr, cdirarraysize, directoryarray);
- lastPageptr.i = glrOverflowDirptr.p->pagep[tglrTmp & 0xff];
- ptrCheckGuard(lastPageptr, cpagesize, page8);
- }//if
- tlastContainerptr = (tlastPageindex << ZSHIFT_PLUS) - (tlastPageindex << ZSHIFT_MINUS);
- if (((tlastContainerhead >> 7) & 3) == ZLEFT) {
- jam();
- tlastForward = ZTRUE;
- tlastContainerptr = tlastContainerptr + ZHEAD_SIZE;
- } else if (((tlastContainerhead >> 7) & 3) == ZRIGHT) {
- jam();
- tlastForward = cminusOne;
- tlastContainerptr = ((tlastContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE;
- } else {
- ndbrequire(false);
- return;
- }//if
- arrGuard(tlastContainerptr, 2048);
- tlastContainerhead = lastPageptr.p->word32[tlastContainerptr];
- tlastContainerlen = tlastContainerhead >> 26;
- ndbrequire(tlastContainerlen >= ((Uint32)ZCON_HEAD_SIZE + fragrecptr.p->elementLength));
- goto GLR_LOOP_10;
- }//if
- tlastContainerlen = tlastContainerlen - fragrecptr.p->elementLength;
- if (tlastForward == ZTRUE) {
- jam();
- tlastElementptr = tlastContainerptr + tlastContainerlen;
- } else {
- jam();
- tlastElementptr = (tlastContainerptr + (ZCON_HEAD_SIZE - 1)) - tlastContainerlen;
- }//if
- rlPageptr.i = lastPageptr.i;
- rlPageptr.p = lastPageptr.p;
- trlPageindex = tlastPageindex;
- if (((tlastContainerhead >> 10) & 1) == 1) {
- /* --------------------------------------------------------------------------------- */
- /* WE HAVE OWNERSHIP OF BOTH PARTS OF THE CONTAINER ENDS. */
- /* --------------------------------------------------------------------------------- */
- if (tlastContainerlen < ZDOWN_LIMIT) {
- /* --------------------------------------------------------------------------------- */
- /* WE HAVE DECREASED THE SIZE BELOW THE DOWN LIMIT, WE MUST GIVE UP THE OTHER */
- /* SIDE OF THE BUFFER. */
- /* --------------------------------------------------------------------------------- */
- tlastContainerhead = tlastContainerhead ^ (1 << 10);
- trlRelCon = ZFALSE;
- if (tlastForward == ZTRUE) {
- jam();
- turlIndex = tlastContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
- releaseRightlist(signal);
- } else {
- jam();
- tullIndex = tlastContainerptr - (ZBUF_SIZE - ZCON_HEAD_SIZE);
- releaseLeftlist(signal);
- }//if
- }//if
- }//if
- if (tlastContainerlen <= 2) {
- ndbrequire(tlastContainerlen == 2);
- if (lastPrevpageptr.i != RNIL) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THE LAST CONTAINER IS EMPTY AND IS NOT THE FIRST CONTAINER WHICH IS NOT REMOVED. */
- /* DELETE THE LAST CONTAINER AND UPDATE THE PREVIOUS CONTAINER. ALSO PUT THIS */
- /* CONTAINER IN FREE CONTAINER LIST OF THE PAGE. */
- /* --------------------------------------------------------------------------------- */
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- datapageptr.p = lastPrevpageptr.p;
- cundoElemIndex = tlastPrevconptr;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- ndbrequire(tlastPrevconptr < 2048);
- tglrTmp = lastPrevpageptr.p->word32[tlastPrevconptr] >> 9;
- dbgWord32(lastPrevpageptr, tlastPrevconptr, tglrTmp << 9);
- lastPrevpageptr.p->word32[tlastPrevconptr] = tglrTmp << 9;
- trlRelCon = ZTRUE;
- if (tlastForward == ZTRUE) {
- jam();
- tullIndex = tlastContainerptr;
- releaseLeftlist(signal);
- } else {
- jam();
- turlIndex = tlastContainerptr;
- releaseRightlist(signal);
- }//if
- return;
- }//if
- }//if
- tglrHead = tlastContainerhead << 6;
- tglrHead = tglrHead >> 6;
- tglrHead = tglrHead | (tlastContainerlen << 26);
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- datapageptr.p = lastPageptr.p;
- cundoElemIndex = tlastContainerptr;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- dbgWord32(lastPageptr, tlastContainerptr, tglrHead);
- arrGuard(tlastContainerptr, 2048);
- lastPageptr.p->word32[tlastContainerptr] = tglrHead;
-}//Dbacc::getLastAndRemove()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_LEFTLIST */
-/* INPUT: */
-/* RL_PAGEPTR PAGE POINTER OF CONTAINER TO BE RELEASED */
-/* TRL_PAGEINDEX PAGE INDEX OF CONTAINER TO BE RELEASED */
-/* TURL_INDEX INDEX OF CONTAINER TO BE RELEASED */
-/* TRL_REL_CON TRUE IF CONTAINER RELEASED OTHERWISE ONLY */
-/* A PART IS RELEASED. */
-/* */
-/* OUTPUT: */
-/* NONE */
-/* */
-/* THE FREE LIST OF LEFT FREE BUFFER IN THE PAGE WILL BE UPDATE */
-/* TULL_INDEX IS INDEX TO THE FIRST WORD IN THE LEFT SIDE OF THE BUFFER */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseLeftlist(Signal* signal)
-{
- Uint32 tullTmp;
- Uint32 tullTmp1;
-
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- datapageptr.p = rlPageptr.p;
- cundoElemIndex = tullIndex;
- cundoinfolength = 2;
- undoWritingProcess(signal);
- }//if
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- cundoElemIndex = ZPOS_EMPTY_LIST;
- cundoinfolength = 2;
- undoWritingProcess(signal);
- }//if
- /* --------------------------------------------------------------------------------- */
- /* IF A CONTAINER IS RELEASED AND NOT ONLY A PART THEN WE HAVE TO REMOVE IT */
- /* FROM THE LIST OF USED CONTAINERS IN THE PAGE. THIS IN ORDER TO ENSURE THAT */
- /* WE CAN FIND ALL LOCKED ELEMENTS DURING LOCAL CHECKPOINT. */
- /* --------------------------------------------------------------------------------- */
- if (trlRelCon == ZTRUE) {
- arrGuard(tullIndex, 2048);
- trlHead = rlPageptr.p->word32[tullIndex];
- trlNextused = (trlHead >> 11) & 0x7f;
- trlPrevused = (trlHead >> 18) & 0x7f;
- if (trlNextused < ZEMPTYLIST) {
- jam();
- tullTmp1 = (trlNextused << ZSHIFT_PLUS) - (trlNextused << ZSHIFT_MINUS);
- tullTmp1 = tullTmp1 + ZHEAD_SIZE;
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- cundoElemIndex = tullTmp1;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- tullTmp = rlPageptr.p->word32[tullTmp1] & 0xfe03ffff;
- dbgWord32(rlPageptr, tullTmp1, tullTmp | (trlPrevused << 18));
- rlPageptr.p->word32[tullTmp1] = tullTmp | (trlPrevused << 18);
- } else {
- ndbrequire(trlNextused == ZEMPTYLIST);
- jam();
- }//if
- if (trlPrevused < ZEMPTYLIST) {
- jam();
- tullTmp1 = (trlPrevused << ZSHIFT_PLUS) - (trlPrevused << ZSHIFT_MINUS);
- tullTmp1 = tullTmp1 + ZHEAD_SIZE;
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- cundoElemIndex = tullTmp1;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- tullTmp = rlPageptr.p->word32[tullTmp1] & 0xfffc07ff;
- dbgWord32(rlPageptr, tullTmp1, tullTmp | (trlNextused << 11));
- rlPageptr.p->word32[tullTmp1] = tullTmp | (trlNextused << 11);
- } else {
- ndbrequire(trlPrevused == ZEMPTYLIST);
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WE ARE FIRST IN THE LIST AND THUS WE NEED TO UPDATE THE FIRST POINTER. */
- /* --------------------------------------------------------------------------------- */
- tullTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xc07fffff;
- dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, tullTmp | (trlNextused << 23));
- rlPageptr.p->word32[ZPOS_EMPTY_LIST] = tullTmp | (trlNextused << 23);
- }//if
- }//if
- dbgWord32(rlPageptr, tullIndex + 1, ZEMPTYLIST);
- arrGuard(tullIndex + 1, 2048);
- rlPageptr.p->word32[tullIndex + 1] = ZEMPTYLIST;
- tullTmp1 = (rlPageptr.p->word32[ZPOS_EMPTY_LIST] >> 7) & 0x7f;
- dbgWord32(rlPageptr, tullIndex, tullTmp1);
- arrGuard(tullIndex, 2048);
- rlPageptr.p->word32[tullIndex] = tullTmp1;
- if (tullTmp1 < ZEMPTYLIST) {
- jam();
- tullTmp1 = (tullTmp1 << ZSHIFT_PLUS) - (tullTmp1 << ZSHIFT_MINUS);
- tullTmp1 = (tullTmp1 + ZHEAD_SIZE) + 1;
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- cundoElemIndex = tullTmp1;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- dbgWord32(rlPageptr, tullTmp1, trlPageindex);
- rlPageptr.p->word32[tullTmp1] = trlPageindex; /* UPDATES PREV POINTER IN THE NEXT FREE */
- } else {
- ndbrequire(tullTmp1 == ZEMPTYLIST);
- }//if
- tullTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST];
- tullTmp = (((tullTmp >> 14) << 14) | (trlPageindex << 7)) | (tullTmp & 0x7f);
- dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, tullTmp);
- rlPageptr.p->word32[ZPOS_EMPTY_LIST] = tullTmp;
- dbgWord32(rlPageptr, ZPOS_ALLOC_CONTAINERS, rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1);
- rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1;
- ndbrequire(rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] <= ZNIL);
- if (((rlPageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3) == 1) {
- jam();
- colPageptr.i = rlPageptr.i;
- colPageptr.p = rlPageptr.p;
- ptrCheck(colPageptr, cpagesize, page8);
- checkoverfreelist(signal);
- }//if
-}//Dbacc::releaseLeftlist()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_RIGHTLIST */
-/* INPUT: */
-/* RL_PAGEPTR PAGE POINTER OF CONTAINER TO BE RELEASED */
-/* TRL_PAGEINDEX PAGE INDEX OF CONTAINER TO BE RELEASED */
-/* TURL_INDEX INDEX OF CONTAINER TO BE RELEASED */
-/* TRL_REL_CON TRUE IF CONTAINER RELEASED OTHERWISE ONLY */
-/* A PART IS RELEASED. */
-/* */
-/* OUTPUT: */
-/* NONE */
-/* */
-/* THE FREE LIST OF RIGHT FREE BUFFER IN THE PAGE WILL BE UPDATE. */
-/* TURL_INDEX IS INDEX TO THE FIRST WORD IN THE RIGHT SIDE OF */
-/* THE BUFFER, WHICH IS THE LAST WORD IN THE BUFFER. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseRightlist(Signal* signal)
-{
- Uint32 turlTmp1;
- Uint32 turlTmp;
-
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- datapageptr.p = rlPageptr.p;
- cundoElemIndex = turlIndex;
- cundoinfolength = 2;
- undoWritingProcess(signal);
- }//if
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- cundoElemIndex = ZPOS_EMPTY_LIST;
- cundoinfolength = 2;
- undoWritingProcess(signal);
- }//if
- /* --------------------------------------------------------------------------------- */
- /* IF A CONTAINER IS RELEASED AND NOT ONLY A PART THEN WE HAVE TO REMOVE IT */
- /* FROM THE LIST OF USED CONTAINERS IN THE PAGE. THIS IN ORDER TO ENSURE THAT */
- /* WE CAN FIND ALL LOCKED ELEMENTS DURING LOCAL CHECKPOINT. */
- /* --------------------------------------------------------------------------------- */
- if (trlRelCon == ZTRUE) {
- jam();
- arrGuard(turlIndex, 2048);
- trlHead = rlPageptr.p->word32[turlIndex];
- trlNextused = (trlHead >> 11) & 0x7f;
- trlPrevused = (trlHead >> 18) & 0x7f;
- if (trlNextused < ZEMPTYLIST) {
- jam();
- turlTmp1 = (trlNextused << ZSHIFT_PLUS) - (trlNextused << ZSHIFT_MINUS);
- turlTmp1 = turlTmp1 + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- cundoElemIndex = turlTmp1;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- turlTmp = rlPageptr.p->word32[turlTmp1] & 0xfe03ffff;
- dbgWord32(rlPageptr, turlTmp1, turlTmp | (trlPrevused << 18));
- rlPageptr.p->word32[turlTmp1] = turlTmp | (trlPrevused << 18);
- } else {
- ndbrequire(trlNextused == ZEMPTYLIST);
- jam();
- }//if
- if (trlPrevused < ZEMPTYLIST) {
- jam();
- turlTmp1 = (trlPrevused << ZSHIFT_PLUS) - (trlPrevused << ZSHIFT_MINUS);
- turlTmp1 = turlTmp1 + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- cundoElemIndex = turlTmp1;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- turlTmp = rlPageptr.p->word32[turlTmp1] & 0xfffc07ff;
- dbgWord32(rlPageptr, turlTmp1, turlTmp | (trlNextused << 11));
- rlPageptr.p->word32[turlTmp1] = turlTmp | (trlNextused << 11);
- } else {
- ndbrequire(trlPrevused == ZEMPTYLIST);
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WE ARE FIRST IN THE LIST AND THUS WE NEED TO UPDATE THE FIRST POINTER */
- /* OF THE RIGHT CONTAINER LIST. */
- /* --------------------------------------------------------------------------------- */
- turlTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xff80ffff;
- dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, turlTmp | (trlNextused << 16));
- rlPageptr.p->word32[ZPOS_EMPTY_LIST] = turlTmp | (trlNextused << 16);
- }//if
- }//if
- dbgWord32(rlPageptr, turlIndex + 1, ZEMPTYLIST);
- arrGuard(turlIndex + 1, 2048);
- rlPageptr.p->word32[turlIndex + 1] = ZEMPTYLIST;
- turlTmp1 = rlPageptr.p->word32[ZPOS_EMPTY_LIST] & 0x7f;
- dbgWord32(rlPageptr, turlIndex, turlTmp1);
- arrGuard(turlIndex, 2048);
- rlPageptr.p->word32[turlIndex] = turlTmp1;
- if (turlTmp1 < ZEMPTYLIST) {
- jam();
- turlTmp = (turlTmp1 << ZSHIFT_PLUS) - (turlTmp1 << ZSHIFT_MINUS);
- turlTmp = turlTmp + ((ZHEAD_SIZE + ZBUF_SIZE) - (ZCON_HEAD_SIZE - 1));
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- cundoElemIndex = turlTmp;
- cundoinfolength = 1;
- undoWritingProcess(signal);
- }//if
- dbgWord32(rlPageptr, turlTmp, trlPageindex);
- rlPageptr.p->word32[turlTmp] = trlPageindex; /* UPDATES PREV POINTER IN THE NEXT FREE */
- } else {
- ndbrequire(turlTmp1 == ZEMPTYLIST);
- }//if
- turlTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST];
- dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, ((turlTmp >> 7) << 7) | trlPageindex);
- rlPageptr.p->word32[ZPOS_EMPTY_LIST] = ((turlTmp >> 7) << 7) | trlPageindex;
- dbgWord32(rlPageptr, ZPOS_ALLOC_CONTAINERS, rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1);
- rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1;
- ndbrequire(rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] <= ZNIL);
- if (((rlPageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3) == 1) {
- jam();
- colPageptr.i = rlPageptr.i;
- colPageptr.p = rlPageptr.p;
- checkoverfreelist(signal);
- }//if
-}//Dbacc::releaseRightlist()
-
-/* --------------------------------------------------------------------------------- */
-/* CHECKOVERFREELIST */
-/* INPUT: COL_PAGEPTR, POINTER OF AN OVERFLOW PAGE RECORD. */
-/* DESCRIPTION: CHECKS IF THE PAGE HAVE TO PUT IN FREE LIST OF OVER FLOW */
-/* PAGES. WHEN IT HAVE TO, AN OVERFLOW REC PTR WILL BE ALLOCATED */
-/* TO KEEP NFORMATION ABOUT THE PAGE. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::checkoverfreelist(Signal* signal)
-{
- Uint32 tcolTmp;
-
- if (fragrecptr.p->loadingFlag == ZFALSE) {
- tcolTmp = colPageptr.p->word32[ZPOS_ALLOC_CONTAINERS];
- if (tcolTmp <= ZFREE_LIMIT) {
- if (tcolTmp == 0) {
- jam();
- ropPageptr = colPageptr;
- releaseOverpage(signal);
- } else {
- jam();
- if (colPageptr.p->word32[ZPOS_OVERFLOWREC] == RNIL) {
- ndbrequire(cfirstfreeoverrec != RNIL);
- jam();
- seizeOverRec(signal);
- sorOverflowRecPtr.p->dirindex = colPageptr.p->word32[ZPOS_PAGE_ID];
- sorOverflowRecPtr.p->overpage = colPageptr.i;
- dbgWord32(colPageptr, ZPOS_OVERFLOWREC, sorOverflowRecPtr.i);
- colPageptr.p->word32[ZPOS_OVERFLOWREC] = sorOverflowRecPtr.i;
- porOverflowRecPtr = sorOverflowRecPtr;
- putOverflowRecInFrag(signal);
- }//if
- }//if
- }//if
- }//if
-}//Dbacc::checkoverfreelist()
-
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* */
-/* END OF DELETE MODULE */
-/* */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* */
-/* COMMIT AND ABORT MODULE */
-/* */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ABORT_OPERATION */
-/*DESCRIPTION: AN OPERATION RECORD CAN BE IN A LOCK QUEUE OF AN ELEMENT OR */
-/*OWNS THE LOCK. BY THIS SUBROUTINE THE LOCK STATE OF THE OPERATION WILL */
-/*BE CHECKED. THE OPERATION RECORD WILL BE REMOVED FROM THE QUEUE IF IT */
-/*BELONGED TO ANY ONE, OTHERWISE THE ELEMENT HEAD WILL BE UPDATED. */
-/* ------------------------------------------------------------------------- */
-void Dbacc::abortOperation(Signal* signal)
-{
- OperationrecPtr aboOperRecPtr;
- OperationrecPtr TaboOperRecPtr;
- Page8Ptr aboPageidptr;
- Uint32 taboElementptr;
- Uint32 tmp2Olq;
-
- if (operationRecPtr.p->lockOwner == ZTRUE) {
- takeOutLockOwnersList(signal, operationRecPtr);
- if (operationRecPtr.p->insertIsDone == ZTRUE) {
- jam();
- operationRecPtr.p->elementIsDisappeared = ZTRUE;
- }//if
- if ((operationRecPtr.p->nextParallelQue != RNIL) ||
- (operationRecPtr.p->nextSerialQue != RNIL)) {
- jam();
- releaselock(signal);
- } else {
- /* --------------------------------------------------------------------------------- */
- /* WE ARE OWNER OF THE LOCK AND NO OTHER OPERATIONS ARE QUEUED. IF INSERT OR STANDBY */
- /* WE DELETE THE ELEMENT OTHERWISE WE REMOVE THE LOCK FROM THE ELEMENT. */
- /* --------------------------------------------------------------------------------- */
- if (operationRecPtr.p->elementIsDisappeared == ZFALSE) {
- jam();
- taboElementptr = operationRecPtr.p->elementPointer;
- aboPageidptr.i = operationRecPtr.p->elementPage;
- tmp2Olq = ElementHeader::setUnlocked(operationRecPtr.p->hashvaluePart,
- operationRecPtr.p->scanBits);
- ptrCheckGuard(aboPageidptr, cpagesize, page8);
- dbgWord32(aboPageidptr, taboElementptr, tmp2Olq);
- arrGuard(taboElementptr, 2048);
- aboPageidptr.p->word32[taboElementptr] = tmp2Olq;
- return;
- } else {
- jam();
- commitdelete(signal, false);
- }//if
- }//if
- } else {
- /* --------------------------------------------------------------- */
- // We are not the lock owner.
- /* --------------------------------------------------------------- */
- jam();
- takeOutFragWaitQue(signal);
- if (operationRecPtr.p->prevParallelQue != RNIL) {
- jam();
- /* ---------------------------------------------------------------------------------- */
- /* SINCE WE ARE NOT QUEUE LEADER WE NEED NOT CONSIDER IF THE ELEMENT IS TO BE DELETED.*/
- /* We will simply remove it from the parallel list without any other rearrangements. */
- /* ---------------------------------------------------------------------------------- */
- aboOperRecPtr.i = operationRecPtr.p->prevParallelQue;
- ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
- aboOperRecPtr.p->nextParallelQue = operationRecPtr.p->nextParallelQue;
- if (operationRecPtr.p->nextParallelQue != RNIL) {
- jam();
- aboOperRecPtr.i = operationRecPtr.p->nextParallelQue;
- ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
- aboOperRecPtr.p->prevParallelQue = operationRecPtr.p->prevParallelQue;
- }//if
- } else if (operationRecPtr.p->prevSerialQue != RNIL) {
- /* ------------------------------------------------------------------------- */
- // We are not in the parallel queue owning the lock. Thus we are in another parallel
- // queue longer down in the serial queue. We are however first since prevParallelQue
- // == RNIL.
- /* ------------------------------------------------------------------------- */
- if (operationRecPtr.p->nextParallelQue != RNIL) {
- jam();
- /* ------------------------------------------------------------------------- */
- // We have an operation in the queue after us. We simply rearrange this parallel queue.
- // The new leader of this parallel queue will be operation in the serial queue.
- /* ------------------------------------------------------------------------- */
- aboOperRecPtr.i = operationRecPtr.p->nextParallelQue;
- ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
- aboOperRecPtr.p->nextSerialQue = operationRecPtr.p->nextSerialQue;
- aboOperRecPtr.p->prevSerialQue = operationRecPtr.p->prevSerialQue;
- aboOperRecPtr.p->prevParallelQue = RNIL; // Queue Leader
- if (operationRecPtr.p->nextSerialQue != RNIL) {
- jam();
- TaboOperRecPtr.i = operationRecPtr.p->nextSerialQue;
- ptrCheckGuard(TaboOperRecPtr, coprecsize, operationrec);
- TaboOperRecPtr.p->prevSerialQue = aboOperRecPtr.i;
- }//if
- TaboOperRecPtr.i = operationRecPtr.p->prevSerialQue;
- ptrCheckGuard(TaboOperRecPtr, coprecsize, operationrec);
- TaboOperRecPtr.p->nextSerialQue = aboOperRecPtr.i;
- } else {
- jam();
- /* ------------------------------------------------------------------------- */
- // We are the only operation in this parallel queue. We will thus shrink the serial
- // queue.
- /* ------------------------------------------------------------------------- */
- aboOperRecPtr.i = operationRecPtr.p->prevSerialQue;
- ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
- aboOperRecPtr.p->nextSerialQue = operationRecPtr.p->nextSerialQue;
- if (operationRecPtr.p->nextSerialQue != RNIL) {
- jam();
- aboOperRecPtr.i = operationRecPtr.p->nextSerialQue;
- ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
- aboOperRecPtr.p->prevSerialQue = operationRecPtr.p->prevSerialQue;
- }//if
- }//if
- }//if
- }//if
- /* ------------------------------------------------------------------------- */
- // If prevParallelQue = RNIL and prevSerialQue = RNIL and we are not owner of the
- // lock then we cannot be in any lock queue at all.
- /* ------------------------------------------------------------------------- */
-}//Dbacc::abortOperation()
-
-void Dbacc::commitDeleteCheck()
-{
- OperationrecPtr opPtr;
- OperationrecPtr lastOpPtr;
- OperationrecPtr deleteOpPtr;
- bool elementDeleted = false;
- bool deleteCheckOngoing = true;
- Uint32 hashValue = 0;
- lastOpPtr = operationRecPtr;
- opPtr.i = operationRecPtr.p->nextParallelQue;
- while (opPtr.i != RNIL) {
- jam();
- ptrCheckGuard(opPtr, coprecsize, operationrec);
- lastOpPtr = opPtr;
- opPtr.i = opPtr.p->nextParallelQue;
- }//while
- deleteOpPtr = lastOpPtr;
- do {
- if (deleteOpPtr.p->operation == ZDELETE) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* IF THE CURRENT OPERATION TO BE COMMITTED IS A DELETE OPERATION DUE TO A */
- /* SCAN-TAKEOVER THE ACTUAL DELETE WILL BE PERFORMED BY THE PREVIOUS OPERATION (SCAN)*/
- /* IN THE PARALLEL QUEUE WHICH OWNS THE LOCK.THE PROBLEM IS THAT THE SCAN OPERATION */
- /* DOES NOT HAVE A HASH VALUE ASSIGNED TO IT SO WE COPY IT FROM THIS OPERATION. */
- /* */
- /* WE ASSUME THAT THIS SOLUTION WILL WORK BECAUSE THE ONLY WAY A SCAN CAN PERFORM */
- /* A DELETE IS BY BEING FOLLOWED BY A NORMAL DELETE-OPERATION THAT HAS A HASH VALUE. */
- /* --------------------------------------------------------------------------------- */
- hashValue = deleteOpPtr.p->hashValue;
- elementDeleted = true;
- deleteCheckOngoing = false;
- } else if ((deleteOpPtr.p->operation == ZREAD) ||
- (deleteOpPtr.p->operation == ZSCAN_OP)) {
- /* --------------------------------------------------------------------------------- */
- /* We are trying to find out whether the commit will in the end delete the tuple. */
- /* Normally the delete will be the last operation in the list of operations on this */
- /* It is however possible to issue reads and scans in the same savepoint as the */
- /* delete operation was issued and these can end up after the delete in the list of */
- /* operations in the parallel queue. Thus if we discover a read or a scan we have to */
- /* continue scanning the list looking for a delete operation. */
- /* --------------------------------------------------------------------------------- */
- deleteOpPtr.i = deleteOpPtr.p->prevParallelQue;
- if (deleteOpPtr.i == RNIL) {
- jam();
- deleteCheckOngoing = false;
- } else {
- jam();
- ptrCheckGuard(deleteOpPtr, coprecsize, operationrec);
- }//if
- } else {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* Finding an UPDATE or INSERT before finding a DELETE means we cannot be deleting */
- /* as the end result of this transaction. */
- /* --------------------------------------------------------------------------------- */
- deleteCheckOngoing = false;
- }//if
- } while (deleteCheckOngoing);
- opPtr = lastOpPtr;
- do {
- jam();
- opPtr.p->commitDeleteCheckFlag = ZTRUE;
- if (elementDeleted) {
- jam();
- opPtr.p->elementIsDisappeared = ZTRUE;
- opPtr.p->hashValue = hashValue;
- }//if
- opPtr.i = opPtr.p->prevParallelQue;
- if (opPtr.i == RNIL) {
- jam();
- break;
- }//if
- ptrCheckGuard(opPtr, coprecsize, operationrec);
- } while (true);
-}//Dbacc::commitDeleteCheck()
-
-/* ------------------------------------------------------------------------- */
-/* COMMIT_OPERATION */
-/* INPUT: OPERATION_REC_PTR, POINTER TO AN OPERATION RECORD */
-/* DESCRIPTION: THE OPERATION RECORD WILL BE TAKE OUT OF ANY LOCK QUEUE. */
-/* IF IT OWNS THE ELEMENT LOCK. HEAD OF THE ELEMENT WILL BE UPDATED. */
-/* ------------------------------------------------------------------------- */
-void Dbacc::commitOperation(Signal* signal)
-{
- OperationrecPtr tolqTmpPtr;
- Page8Ptr coPageidptr;
- Uint32 tcoElementptr;
- Uint32 tmp2Olq;
-
- if ((operationRecPtr.p->commitDeleteCheckFlag == ZFALSE) &&
- (operationRecPtr.p->operation != ZSCAN_OP) &&
- (operationRecPtr.p->operation != ZREAD)) {
- jam();
- /* This method is used to check whether the end result of the transaction
- will be to delete the tuple. In this case all operation will be marked
- with elementIsDisappeared = true to ensure that the last operation
- committed will remove the tuple. We only run this once per transaction
- (commitDeleteCheckFlag = true if performed earlier) and we don't
- execute this code when committing a scan operation since committing
- a scan operation only means that the scan is continuing and the scan
- lock is released.
- */
- commitDeleteCheck();
- }//if
- if (operationRecPtr.p->lockOwner == ZTRUE) {
- takeOutLockOwnersList(signal, operationRecPtr);
- if ((operationRecPtr.p->nextParallelQue == RNIL) &&
- (operationRecPtr.p->nextSerialQue == RNIL) &&
- (operationRecPtr.p->elementIsDisappeared == ZFALSE)) {
- /*
- This is the normal path through the commit for operations owning the
- lock without any queues and not a delete operation.
- */
- coPageidptr.i = operationRecPtr.p->elementPage;
- tcoElementptr = operationRecPtr.p->elementPointer;
- tmp2Olq = ElementHeader::setUnlocked(operationRecPtr.p->hashvaluePart,
- operationRecPtr.p->scanBits);
- ptrCheckGuard(coPageidptr, cpagesize, page8);
- dbgWord32(coPageidptr, tcoElementptr, tmp2Olq);
- arrGuard(tcoElementptr, 2048);
- coPageidptr.p->word32[tcoElementptr] = tmp2Olq;
- return;
- } else if ((operationRecPtr.p->nextParallelQue != RNIL) ||
- (operationRecPtr.p->nextSerialQue != RNIL)) {
- jam();
- /*
- The case when there is a queue lined up.
- Release the lock and pass it to the next operation lined up.
- */
- releaselock(signal);
- return;
- } else {
- jam();
- /*
- No queue and elementIsDisappeared is true. We perform the actual delete
- operation.
- */
- commitdelete(signal, false);
- return;
- }//if
- } else {
- /*
- THE OPERATION DOES NOT OWN THE LOCK. IT MUST BE IN A LOCK QUEUE OF THE
- ELEMENT.
- */
- ndbrequire(operationRecPtr.p->prevParallelQue != RNIL);
- jam();
- tolqTmpPtr.i = operationRecPtr.p->prevParallelQue;
- ptrCheckGuard(tolqTmpPtr, coprecsize, operationrec);
- tolqTmpPtr.p->nextParallelQue = operationRecPtr.p->nextParallelQue;
- if (operationRecPtr.p->nextParallelQue != RNIL) {
- jam();
- tolqTmpPtr.i = operationRecPtr.p->nextParallelQue;
- ptrCheckGuard(tolqTmpPtr, coprecsize, operationrec);
- tolqTmpPtr.p->prevParallelQue = operationRecPtr.p->prevParallelQue;
- }//if
-
- /**
- * Check possible lock upgrade
- * 1) Find lock owner
- * 2) Count transactions in parallel que
- * 3) If count == 1 and TRANSID(next serial) == TRANSID(lock owner)
- * upgrade next serial
- */
- if(operationRecPtr.p->lockMode)
- {
- jam();
- /**
- * Committing a non shared operation can't lead to lock upgrade
- */
- return;
- }
-
- OperationrecPtr lock_owner;
- lock_owner.i = operationRecPtr.p->prevParallelQue;
- ptrCheckGuard(lock_owner, coprecsize, operationrec);
- Uint32 transid[2] = { lock_owner.p->transId1,
- lock_owner.p->transId2 };
-
-
- while(lock_owner.p->prevParallelQue != RNIL)
- {
- lock_owner.i = lock_owner.p->prevParallelQue;
- ptrCheckGuard(lock_owner, coprecsize, operationrec);
-
- if(lock_owner.p->transId1 != transid[0] ||
- lock_owner.p->transId2 != transid[1])
- {
- jam();
- /**
- * If more than 1 trans in lock queue -> no lock upgrade
- */
- return;
- }
- }
-
- check_lock_upgrade(signal, lock_owner, operationRecPtr);
- }
-}//Dbacc::commitOperation()
-
-void
-Dbacc::check_lock_upgrade(Signal* signal,
- OperationrecPtr lock_owner,
- OperationrecPtr release_op)
-{
- if((lock_owner.p->transId1 == release_op.p->transId1 &&
- lock_owner.p->transId2 == release_op.p->transId2) ||
- release_op.p->lockMode ||
- lock_owner.p->nextSerialQue == RNIL)
- {
- jam();
- /**
- * No lock upgrade if same trans or lock owner has no serial queue
- * or releasing non shared op
- */
- return;
- }
-
- OperationrecPtr next;
- next.i = lock_owner.p->nextSerialQue;
- ptrCheckGuard(next, coprecsize, operationrec);
-
- if(lock_owner.p->transId1 != next.p->transId1 ||
- lock_owner.p->transId2 != next.p->transId2)
- {
- jam();
- /**
- * No lock upgrad if !same trans in serial queue
- */
- return;
- }
-
- if (getNoParallelTransaction(lock_owner.p) > 1)
- {
- jam();
- /**
- * No lock upgrade if more than 1 transaction in parallell queue
- */
- return;
- }
-
- if (getNoParallelTransaction(next.p) > 1)
- {
- jam();
- /**
- * No lock upgrade if more than 1 transaction in next's parallell queue
- */
- return;
- }
-
- OperationrecPtr tmp;
- tmp.i = lock_owner.p->nextSerialQue = next.p->nextSerialQue;
- if(tmp.i != RNIL)
- {
- ptrCheckGuard(tmp, coprecsize, operationrec);
- ndbassert(tmp.p->prevSerialQue == next.i);
- tmp.p->prevSerialQue = lock_owner.i;
- }
- next.p->nextSerialQue = next.p->prevSerialQue = RNIL;
-
- // Find end of parallell que
- tmp = lock_owner;
- Uint32 lockMode = next.p->lockMode > lock_owner.p->lockMode ?
- next.p->lockMode : lock_owner.p->lockMode;
- while(tmp.p->nextParallelQue != RNIL)
- {
- jam();
- tmp.i = tmp.p->nextParallelQue;
- tmp.p->lockMode = lockMode;
- ptrCheckGuard(tmp, coprecsize, operationrec);
- }
- tmp.p->lockMode = lockMode;
-
- next.p->prevParallelQue = tmp.i;
- tmp.p->nextParallelQue = next.i;
-
- OperationrecPtr save = operationRecPtr;
-
- Uint32 localdata[2];
- localdata[0] = lock_owner.p->localdata[0];
- localdata[1] = lock_owner.p->localdata[1];
- do {
- next.p->localdata[0] = localdata[0];
- next.p->localdata[1] = localdata[1];
- next.p->lockMode = lockMode;
-
- operationRecPtr = next;
- executeNextOperation(signal);
- if (next.p->nextParallelQue != RNIL)
- {
- jam();
- next.i = next.p->nextParallelQue;
- ptrCheckGuard(next, coprecsize, operationrec);
- } else {
- jam();
- break;
- }//if
- } while (1);
-
- operationRecPtr = save;
-
-}
-
-/* ------------------------------------------------------------------------- */
-/* RELEASELOCK */
-/* RESETS LOCK OF AN ELEMENT. */
-/* INFORMATION ABOUT THE ELEMENT IS SAVED IN THE OPERATION RECORD */
-/* THESE INFORMATION IS USED TO UPDATE HEADER OF THE ELEMENT */
-/* ------------------------------------------------------------------------- */
-void Dbacc::releaselock(Signal* signal)
-{
- OperationrecPtr rloOperPtr;
- OperationrecPtr trlOperPtr;
- OperationrecPtr trlTmpOperPtr;
- Uint32 TelementIsDisappeared;
-
- trlOperPtr.i = RNIL;
- if (operationRecPtr.p->nextParallelQue != RNIL) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* NEXT OPERATION TAKES OVER THE LOCK. We will simply move the info from the leader */
- // to the new queue leader.
- /* --------------------------------------------------------------------------------- */
- trlOperPtr.i = operationRecPtr.p->nextParallelQue;
- ptrCheckGuard(trlOperPtr, coprecsize, operationrec);
- copyInOperPtr = trlOperPtr;
- copyOperPtr = operationRecPtr;
- copyOpInfo(signal);
- trlOperPtr.p->prevParallelQue = RNIL;
- if (operationRecPtr.p->nextSerialQue != RNIL) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THERE IS A SERIAL QUEUE. MOVE IT FROM RELEASED OP REC TO THE NEW LOCK OWNER. */
- /* --------------------------------------------------------------------------------- */
- trlOperPtr.p->nextSerialQue = operationRecPtr.p->nextSerialQue;
- trlTmpOperPtr.i = trlOperPtr.p->nextSerialQue;
- ptrCheckGuard(trlTmpOperPtr, coprecsize, operationrec);
- trlTmpOperPtr.p->prevSerialQue = trlOperPtr.i;
- }//if
-
- check_lock_upgrade(signal, copyInOperPtr, operationRecPtr);
- /* --------------------------------------------------------------------------------- */
- /* SINCE THERE ARE STILL ITEMS IN THE PARALLEL QUEUE WE NEED NOT WORRY ABOUT */
- /* STARTING QUEUED OPERATIONS. THUS WE CAN END HERE. */
- /* --------------------------------------------------------------------------------- */
- } else {
- ndbrequire(operationRecPtr.p->nextSerialQue != RNIL);
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THE PARALLEL QUEUE IS EMPTY AND THE SERIAL QUEUE IS NOT EMPTY. WE NEED TO */
- /* REARRANGE LISTS AND START A NUMBER OF OPERATIONS. */
- /* --------------------------------------------------------------------------------- */
- trlOperPtr.i = operationRecPtr.p->nextSerialQue;
- ptrCheckGuard(trlOperPtr, coprecsize, operationrec);
- copyOperPtr = operationRecPtr;
- copyInOperPtr = trlOperPtr;
- copyOpInfo(signal);
- trlOperPtr.p->prevSerialQue = RNIL;
- ndbrequire(trlOperPtr.p->prevParallelQue == RNIL);
- /* --------------------------------------------------------------------------------- */
- /* WE HAVE MOVED TO THE NEXT PARALLEL QUEUE. WE MUST START ALL OF THOSE */
- /* OPERATIONS WHICH UP TILL NOW HAVE BEEN QUEUED WAITING FOR THE LOCK. */
- /* --------------------------------------------------------------------------------- */
- rloOperPtr = operationRecPtr;
- trlTmpOperPtr = trlOperPtr;
- TelementIsDisappeared = trlOperPtr.p->elementIsDisappeared;
- Uint32 ThashValue = trlOperPtr.p->hashValue;
- do {
- /* --------------------------------------------------------------------------------- */
- // Ensure that all operations in the queue are assigned with the elementIsDisappeared
- // to ensure that the element is removed after a previous delete. An insert does
- // however revert this decision since the element is put back again. Local checkpoints
- // complicate life here since they do not execute the next operation but simply change
- // the state on the operation. We need to set-up the variable elementIsDisappeared
- // properly even when local checkpoints and inserts/writes after deletes occur.
- /* --------------------------------------------------------------------------------- */
- trlTmpOperPtr.p->elementIsDisappeared = TelementIsDisappeared;
- if (TelementIsDisappeared == ZTRUE) {
- /* --------------------------------------------------------------------------------- */
- // If the elementIsDisappeared is set then we know that the hashValue is also set
- // since it always originates from a committing abort or a aborting insert. Scans
- // do not initialise the hashValue and must have this value initialised if they are
- // to successfully commit the delete.
- /* --------------------------------------------------------------------------------- */
- jam();
- trlTmpOperPtr.p->hashValue = ThashValue;
- }//if
- trlTmpOperPtr.p->localdata[0] = trlOperPtr.p->localdata[0];
- trlTmpOperPtr.p->localdata[1] = trlOperPtr.p->localdata[1];
- /* --------------------------------------------------------------------------------- */
- // Restart the queued operation.
- /* --------------------------------------------------------------------------------- */
- operationRecPtr = trlTmpOperPtr;
- TelementIsDisappeared = executeNextOperation(signal);
- ThashValue = operationRecPtr.p->hashValue;
- if (trlTmpOperPtr.p->nextParallelQue != RNIL) {
- jam();
- /* --------------------------------------------------------------------------------- */
- // We will continue with the next operation in the parallel queue and start this as
- // well.
- /* --------------------------------------------------------------------------------- */
- trlTmpOperPtr.i = trlTmpOperPtr.p->nextParallelQue;
- ptrCheckGuard(trlTmpOperPtr, coprecsize, operationrec);
- } else {
- jam();
- break;
- }//if
- } while (1);
- operationRecPtr = rloOperPtr;
- }//if
-
- // Insert the next op into the lock owner list
- insertLockOwnersList(signal, trlOperPtr);
- return;
-}//Dbacc::releaselock()
-
-/* --------------------------------------------------------------------------------- */
-/* COPY_OP_INFO */
-/* INPUT: COPY_IN_OPER_PTR AND COPY_OPER_PTR. */
-/* DESCRIPTION:INFORMATION ABOUT THE ELEMENT WILL BE MOVED FROM OPERATION */
-/* REC TO QUEUE OP REC. QUE OP REC TAKES OVER THE LOCK. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::copyOpInfo(Signal* signal)
-{
- Page8Ptr coiPageidptr;
-
- copyInOperPtr.p->elementPage = copyOperPtr.p->elementPage;
- copyInOperPtr.p->elementIsforward = copyOperPtr.p->elementIsforward;
- copyInOperPtr.p->elementContainer = copyOperPtr.p->elementContainer;
- copyInOperPtr.p->elementPointer = copyOperPtr.p->elementPointer;
- copyInOperPtr.p->scanBits = copyOperPtr.p->scanBits;
- copyInOperPtr.p->hashvaluePart = copyOperPtr.p->hashvaluePart;
- copyInOperPtr.p->elementIsDisappeared = copyOperPtr.p->elementIsDisappeared;
- if (copyInOperPtr.p->elementIsDisappeared == ZTRUE) {
- /* --------------------------------------------------------------------------------- */
- // If the elementIsDisappeared is set then we know that the hashValue is also set
- // since it always originates from a committing abort or a aborting insert. Scans
- // do not initialise the hashValue and must have this value initialised if they are
- // to successfully commit the delete.
- /* --------------------------------------------------------------------------------- */
- jam();
- copyInOperPtr.p->hashValue = copyOperPtr.p->hashValue;
- }//if
- coiPageidptr.i = copyOperPtr.p->elementPage;
- ptrCheckGuard(coiPageidptr, cpagesize, page8);
- const Uint32 tmp = ElementHeader::setLocked(copyInOperPtr.i);
- dbgWord32(coiPageidptr, copyOperPtr.p->elementPointer, tmp);
- arrGuard(copyOperPtr.p->elementPointer, 2048);
- coiPageidptr.p->word32[copyOperPtr.p->elementPointer] = tmp;
- copyInOperPtr.p->localdata[0] = copyOperPtr.p->localdata[0];
- copyInOperPtr.p->localdata[1] = copyOperPtr.p->localdata[1];
-}//Dbacc::copyOpInfo()
-
-/* ******************--------------------------------------------------------------- */
-/* EXECUTE NEXT OPERATION */
-/* NEXT OPERATION IN A LOCK QUEUE WILL BE EXECUTED. */
-/* --------------------------------------------------------------------------------- */
-Uint32 Dbacc::executeNextOperation(Signal* signal)
-{
- ndbrequire(operationRecPtr.p->transactionstate == ACTIVE);
- if (fragrecptr.p->stopQueOp == ZTRUE) {
- Uint32 TelemDisappeared;
- jam();
- TelemDisappeared = operationRecPtr.p->elementIsDisappeared;
- if ((operationRecPtr.p->elementIsDisappeared == ZTRUE) &&
- (operationRecPtr.p->prevParallelQue == RNIL) &&
- ((operationRecPtr.p->operation == ZINSERT) ||
- (operationRecPtr.p->operation == ZWRITE))) {
- jam();
- /* --------------------------------------------------------------------------------- */
- // In this case we do not wish to change the elementIsDisappeared since that would
- // create an error the next time this method is called for this operation after local
- // checkpoint starts up operations again. We must however ensure that operations
- // that follow in the queue do not get the value ZTRUE when actually an INSERT/WRITE
- // precedes them (only if the INSERT/WRITE is the first operation).
- /* --------------------------------------------------------------------------------- */
- TelemDisappeared = ZFALSE;
- }//if
- /* --------------------------------------------------------------------------------- */
- /* A LOCAL CHECKPOINT HAS STOPPED OPERATIONS. WE MUST NOT START THE OPERATION */
- /* AT THIS TIME. WE SET THE STATE TO INDICATE THAT WE ARE READY TO START AS */
- /* SOON AS WE ARE ALLOWED. */
- /* --------------------------------------------------------------------------------- */
- operationRecPtr.p->opState = WAIT_EXE_OP;
- return TelemDisappeared;
- }//if
- takeOutFragWaitQue(signal);
- if (operationRecPtr.p->elementIsDisappeared == ZTRUE) {
- /* --------------------------------------------------------------------------------- */
- /* PREVIOUS OPERATION WAS DELETE OPERATION AND THE ELEMENT IS ALREADY DELETED. */
- /* --------------------------------------------------------------------------------- */
- if (((operationRecPtr.p->operation != ZINSERT) &&
- (operationRecPtr.p->operation != ZWRITE)) ||
- (operationRecPtr.p->prevParallelQue != RNIL)) {
- if (operationRecPtr.p->operation != ZSCAN_OP ||
- operationRecPtr.p->isAccLockReq) {
- jam();
- /* --------------------------------------------------------------------------------- */
- // Updates and reads with a previous delete simply aborts with read error indicating
- // that tuple did not exist. Also inserts and writes not being the first operation.
- /* --------------------------------------------------------------------------------- */
- operationRecPtr.p->transactionstate = WAIT_COMMIT_ABORT;
- signal->theData[0] = operationRecPtr.p->userptr;
- signal->theData[1] = ZREAD_ERROR;
- sendSignal(operationRecPtr.p->userblockref, GSN_ACCKEYREF, signal, 2, JBB);
- return operationRecPtr.p->elementIsDisappeared;
- } else {
- /* --------------------------------------------------------------------------------- */
- /* ABORT OF OPERATION NEEDED BUT THE OPERATION IS A SCAN => SPECIAL TREATMENT. */
- /* IF THE SCAN WAITS IN QUEUE THEN WE MUST REMOVE THE OPERATION FROM THE SCAN */
- /* LOCK QUEUE AND IF NO MORE OPERATIONS ARE QUEUED THEN WE SHOULD RESTART THE */
- /* SCAN PROCESS. OTHERWISE WE SIMPLY RELEASE THE OPERATION AND DECREASE THE */
- /* NUMBER OF LOCKS HELD. */
- /* --------------------------------------------------------------------------------- */
- takeOutScanLockQueue(operationRecPtr.p->scanRecPtr);
- putReadyScanQueue(signal, operationRecPtr.p->scanRecPtr);
- return operationRecPtr.p->elementIsDisappeared;
- }//if
- }//if
- /* --------------------------------------------------------------------------------- */
- // Insert and writes can continue but need to be converted to inserts.
- /* --------------------------------------------------------------------------------- */
- jam();
- operationRecPtr.p->elementIsDisappeared = ZFALSE;
- operationRecPtr.p->operation = ZINSERT;
- operationRecPtr.p->insertIsDone = ZTRUE;
- } else if (operationRecPtr.p->operation == ZINSERT) {
- bool abortFlag = true;
- if (operationRecPtr.p->prevParallelQue != RNIL) {
- OperationrecPtr prevOpPtr;
- jam();
- prevOpPtr.i = operationRecPtr.p->prevParallelQue;
- ptrCheckGuard(prevOpPtr, coprecsize, operationrec);
- if (prevOpPtr.p->operation == ZDELETE) {
- jam();
- abortFlag = false;
- }//if
- }//if
- if (abortFlag) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* ELEMENT STILL REMAINS AND WE ARE TRYING TO INSERT IT AGAIN. THIS IS CLEARLY */
- /* NOT A GOOD IDEA. */
- /* --------------------------------------------------------------------------------- */
- operationRecPtr.p->transactionstate = WAIT_COMMIT_ABORT;
- signal->theData[0] = operationRecPtr.p->userptr;
- signal->theData[1] = ZWRITE_ERROR;
- sendSignal(operationRecPtr.p->userblockref, GSN_ACCKEYREF, signal, 2, JBB);
- return operationRecPtr.p->elementIsDisappeared;
- }//if
- }//if
- if (operationRecPtr.p->operation == ZSCAN_OP &&
- ! operationRecPtr.p->isAccLockReq) {
- jam();
- takeOutScanLockQueue(operationRecPtr.p->scanRecPtr);
- putReadyScanQueue(signal, operationRecPtr.p->scanRecPtr);
- } else {
- jam();
- sendAcckeyconf(signal);
- sendSignal(operationRecPtr.p->userblockref, GSN_ACCKEYCONF, signal, 6, JBB);
- }//if
- return operationRecPtr.p->elementIsDisappeared;
-}//Dbacc::executeNextOperation()
-
-/* --------------------------------------------------------------------------------- */
-/* TAKE_OUT_FRAG_WAIT_QUE */
-/* DESCRIPTION: AN OPERATION WHICH OWNS A LOCK OF AN ELEMENT, IS IN A LIST */
-/* OF THE FRAGMENT. THIS LIST IS USED TO STOP THE QUEUE OPERATION */
-/* DURING CREATE CHECK POINT PROSESS FOR STOP AND RESTART OF THE */
-/* OPERATIONS. THIS SUBRUTIN TAKES A OPERATION RECORD OUT OF THE LIST */
-/* -------------------------------------------------------------------------------- */
-void Dbacc::takeOutFragWaitQue(Signal* signal)
-{
- OperationrecPtr tofwqOperRecPtr;
-
- if (operationRecPtr.p->opState == WAIT_IN_QUEUE) {
- if (fragrecptr.p->sentWaitInQueOp == operationRecPtr.i) {
- jam();
- fragrecptr.p->sentWaitInQueOp = operationRecPtr.p->nextQueOp;
- }//if
- if (operationRecPtr.p->prevQueOp != RNIL) {
- jam();
- tofwqOperRecPtr.i = operationRecPtr.p->prevQueOp;
- ptrCheckGuard(tofwqOperRecPtr, coprecsize, operationrec);
- tofwqOperRecPtr.p->nextQueOp = operationRecPtr.p->nextQueOp;
- } else {
- jam();
- fragrecptr.p->firstWaitInQueOp = operationRecPtr.p->nextQueOp;
- }//if
- if (operationRecPtr.p->nextQueOp != RNIL) {
- jam();
- tofwqOperRecPtr.i = operationRecPtr.p->nextQueOp;
- ptrCheckGuard(tofwqOperRecPtr, coprecsize, operationrec);
- tofwqOperRecPtr.p->prevQueOp = operationRecPtr.p->prevQueOp;
- } else {
- jam();
- fragrecptr.p->lastWaitInQueOp = operationRecPtr.p->prevQueOp;
- }//if
- operationRecPtr.p->opState = FREE_OP;
- return;
- } else {
- ndbrequire(operationRecPtr.p->opState == FREE_OP);
- }//if
-}//Dbacc::takeOutFragWaitQue()
-
-/**
- * takeOutLockOwnersList
- *
- * Description: Take out an operation from the doubly linked
- * lock owners list on the fragment.
- *
- */
-void Dbacc::takeOutLockOwnersList(Signal* signal,
- const OperationrecPtr& outOperPtr)
-{
- const Uint32 Tprev = outOperPtr.p->prevLockOwnerOp;
- const Uint32 Tnext = outOperPtr.p->nextLockOwnerOp;
-
-#ifdef VM_TRACE
- // Check that operation is already in the list
- OperationrecPtr tmpOperPtr;
- bool inList = false;
- tmpOperPtr.i = fragrecptr.p->lockOwnersList;
- while (tmpOperPtr.i != RNIL){
- ptrCheckGuard(tmpOperPtr, coprecsize, operationrec);
- if (tmpOperPtr.i == outOperPtr.i)
- inList = true;
- tmpOperPtr.i = tmpOperPtr.p->nextLockOwnerOp;
- }
- ndbrequire(inList == true);
-#endif
-
- ndbrequire(outOperPtr.p->lockOwner == ZTRUE);
- outOperPtr.p->lockOwner = ZFALSE;
-
- // Fast path through the code for the common case.
- if ((Tprev == RNIL) && (Tnext == RNIL)) {
- ndbrequire(fragrecptr.p->lockOwnersList == outOperPtr.i);
- fragrecptr.p->lockOwnersList = RNIL;
- return;
- }
-
- // Check previous operation
- if (Tprev != RNIL) {
- jam();
- arrGuard(Tprev, coprecsize);
- operationrec[Tprev].nextLockOwnerOp = Tnext;
- } else {
- fragrecptr.p->lockOwnersList = Tnext;
- }//if
-
- // Check next operation
- if (Tnext == RNIL) {
- return;
- } else {
- jam();
- arrGuard(Tnext, coprecsize);
- operationrec[Tnext].prevLockOwnerOp = Tprev;
- }//if
-
- return;
-}//Dbacc::takeOutLockOwnersList()
-
-/**
- * insertLockOwnersList
- *
- * Description: Insert an operation first in the dubly linked lock owners
- * list on the fragment.
- *
- */
-void Dbacc::insertLockOwnersList(Signal* signal,
- const OperationrecPtr& insOperPtr)
-{
- OperationrecPtr tmpOperPtr;
-
-#ifdef VM_TRACE
- // Check that operation is not already in list
- tmpOperPtr.i = fragrecptr.p->lockOwnersList;
- while(tmpOperPtr.i != RNIL){
- ptrCheckGuard(tmpOperPtr, coprecsize, operationrec);
- ndbrequire(tmpOperPtr.i != insOperPtr.i);
- tmpOperPtr.i = tmpOperPtr.p->nextLockOwnerOp;
- }
-#endif
-
- ndbrequire(insOperPtr.p->lockOwner == ZFALSE);
-
- insOperPtr.p->lockOwner = ZTRUE;
- insOperPtr.p->prevLockOwnerOp = RNIL;
- tmpOperPtr.i = fragrecptr.p->lockOwnersList;
- fragrecptr.p->lockOwnersList = insOperPtr.i;
- insOperPtr.p->nextLockOwnerOp = tmpOperPtr.i;
- if (tmpOperPtr.i == RNIL) {
- return;
- } else {
- jam();
- ptrCheckGuard(tmpOperPtr, coprecsize, operationrec);
- tmpOperPtr.p->prevLockOwnerOp = insOperPtr.i;
- }//if
-}//Dbacc::insertLockOwnersList()
-
-
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* END OF COMMIT AND ABORT MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* ALLOC_OVERFLOW_PAGE */
-/* DESCRIPTION: */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::allocOverflowPage(Signal* signal)
-{
- DirRangePtr aopDirRangePtr;
- DirectoryarrayPtr aopOverflowDirptr;
- OverflowRecordPtr aopOverflowRecPtr;
- Uint32 taopTmp1;
- Uint32 taopTmp2;
- Uint32 taopTmp3;
-
- tresult = 0;
- if ((cfirstfreepage == RNIL) &&
- (cfreepage >= cpagesize)) {
- jam();
- zpagesize_error("Dbacc::allocOverflowPage");
- tresult = ZPAGESIZE_ERROR;
- return;
- }//if
- if (fragrecptr.p->firstFreeDirindexRec != RNIL) {
- jam();
- /* FRAGRECPTR:FIRST_FREE_DIRINDEX_REC POINTS */
- /* TO THE FIRST ELEMENT IN A FREE LIST OF THE */
- /* DIRECTORY INDEX WICH HAVE NULL AS PAGE */
- aopOverflowRecPtr.i = fragrecptr.p->firstFreeDirindexRec;
- ptrCheckGuard(aopOverflowRecPtr, coverflowrecsize, overflowRecord);
- troOverflowRecPtr.p = aopOverflowRecPtr.p;
- takeRecOutOfFreeOverdir(signal);
- } else if (cfirstfreeoverrec == RNIL) {
- jam();
- tresult = ZOVER_REC_ERROR;
- return;
- } else if ((cfirstfreedir == RNIL) &&
- (cdirarraysize <= cdirmemory)) {
- jam();
- tresult = ZDIRSIZE_ERROR;
- return;
- } else {
- jam();
- seizeOverRec(signal);
- aopOverflowRecPtr = sorOverflowRecPtr;
- aopOverflowRecPtr.p->dirindex = fragrecptr.p->lastOverIndex;
- }//if
- aopOverflowRecPtr.p->nextOverRec = RNIL;
- aopOverflowRecPtr.p->prevOverRec = RNIL;
- fragrecptr.p->firstOverflowRec = aopOverflowRecPtr.i;
- fragrecptr.p->lastOverflowRec = aopOverflowRecPtr.i;
- taopTmp1 = aopOverflowRecPtr.p->dirindex;
- aopDirRangePtr.i = fragrecptr.p->overflowdir;
- taopTmp2 = taopTmp1 >> 8;
- taopTmp3 = taopTmp1 & 0xff;
- ptrCheckGuard(aopDirRangePtr, cdirrangesize, dirRange);
- arrGuard(taopTmp2, 256);
- if (aopDirRangePtr.p->dirArray[taopTmp2] == RNIL) {
- jam();
- seizeDirectory(signal);
- ndbrequire(tresult <= ZLIMIT_OF_ERROR);
- aopDirRangePtr.p->dirArray[taopTmp2] = sdDirptr.i;
- }//if
- aopOverflowDirptr.i = aopDirRangePtr.p->dirArray[taopTmp2];
- seizePage(signal);
- ndbrequire(tresult <= ZLIMIT_OF_ERROR);
- ptrCheckGuard(aopOverflowDirptr, cdirarraysize, directoryarray);
- aopOverflowDirptr.p->pagep[taopTmp3] = spPageptr.i;
- tiopPageId = aopOverflowRecPtr.p->dirindex;
- iopOverflowRecPtr = aopOverflowRecPtr;
- iopPageptr = spPageptr;
- initOverpage(signal);
- aopOverflowRecPtr.p->overpage = spPageptr.i;
- if (fragrecptr.p->lastOverIndex <= aopOverflowRecPtr.p->dirindex) {
- jam();
- ndbrequire(fragrecptr.p->lastOverIndex == aopOverflowRecPtr.p->dirindex);
- fragrecptr.p->lastOverIndex++;
- }//if
-}//Dbacc::allocOverflowPage()
-
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* EXPAND/SHRINK MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/*EXPANDCHECK EXPAND BUCKET ORD */
-/* SENDER: ACC, LEVEL B */
-/* INPUT: FRAGRECPTR, POINTS TO A FRAGMENT RECORD. */
-/* DESCRIPTION: A BUCKET OF A FRAGMENT PAGE WILL BE EXPAND INTO TWO BUCKETS */
-/* ACCORDING TO LH3. */
-/* ******************--------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* EXPANDCHECK EXPAND BUCKET ORD */
-/* ******************------------------------------+ */
-/* SENDER: ACC, LEVEL B */
-/* A BUCKET OF THE FRAGMENT WILL */
-/* BE EXPANDED ACORDING TO LH3, */
-/* AND COMMIT TRANSACTION PROCESS */
-/* WILL BE CONTINUED */
-Uint32 Dbacc::checkScanExpand(Signal* signal)
-{
- Uint32 Ti;
- Uint32 TreturnCode = 0;
- Uint32 TPageIndex;
- Uint32 TDirInd;
- Uint32 TSplit;
- Uint32 TreleaseInd = 0;
- Uint32 TreleaseScanBucket;
- Uint32 TreleaseScanIndicator[4];
- DirectoryarrayPtr TDirptr;
- DirRangePtr TDirRangePtr;
- Page8Ptr TPageptr;
- ScanRecPtr TscanPtr;
- RootfragmentrecPtr Trootfragrecptr;
-
- Trootfragrecptr.i = fragrecptr.p->myroot;
- TSplit = fragrecptr.p->p;
- ptrCheckGuard(Trootfragrecptr, crootfragmentsize, rootfragmentrec);
- for (Ti = 0; Ti < 4; Ti++) {
- TreleaseScanIndicator[Ti] = 0;
- if (Trootfragrecptr.p->scan[Ti] != RNIL) {
- //-------------------------------------------------------------
- // A scan is ongoing on this particular local fragment. We have
- // to check its current state.
- //-------------------------------------------------------------
- TscanPtr.i = Trootfragrecptr.p->scan[Ti];
- ptrCheckGuard(TscanPtr, cscanRecSize, scanRec);
- if (TscanPtr.p->activeLocalFrag == fragrecptr.i) {
- if (TscanPtr.p->scanBucketState == ScanRec::FIRST_LAP) {
- if (TSplit == TscanPtr.p->nextBucketIndex) {
- jam();
- //-------------------------------------------------------------
- // We are currently scanning this bucket. We cannot split it
- // simultaneously with the scan. We have to pass this offer for
- // splitting the bucket.
- //-------------------------------------------------------------
- TreturnCode = 1;
- return TreturnCode;
- } else if (TSplit > TscanPtr.p->nextBucketIndex) {
- jam();
- //-------------------------------------------------------------
- // This bucket has not yet been scanned. We must reset the scanned
- // bit indicator for this scan on this bucket.
- //-------------------------------------------------------------
- TreleaseScanIndicator[Ti] = 1;
- TreleaseInd = 1;
- } else {
- jam();
- }//if
- } else if (TscanPtr.p->scanBucketState == ScanRec::SECOND_LAP) {
- jam();
- //-------------------------------------------------------------
- // We are performing a second lap to handle buckets that was
- // merged during the first lap of scanning. During this second
- // lap we do not allow any splits or merges.
- //-------------------------------------------------------------
- TreturnCode = 1;
- return TreturnCode;
- } else {
- ndbrequire(TscanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED);
- jam();
- //-------------------------------------------------------------
- // The scan is completed and we can thus go ahead and perform
- // the split.
- //-------------------------------------------------------------
- }//if
- }//if
- }//if
- }//for
- if (TreleaseInd == 1) {
- TreleaseScanBucket = TSplit;
- TDirRangePtr.i = fragrecptr.p->directory;
- TPageIndex = TreleaseScanBucket & ((1 << fragrecptr.p->k) - 1); /* PAGE INDEX OBS K = 6 */
- TDirInd = TreleaseScanBucket >> fragrecptr.p->k; /* DIRECTORY INDEX OBS K = 6 */
- ptrCheckGuard(TDirRangePtr, cdirrangesize, dirRange);
- arrGuard((TDirInd >> 8), 256);
- TDirptr.i = TDirRangePtr.p->dirArray[TDirInd >> 8];
- ptrCheckGuard(TDirptr, cdirarraysize, directoryarray);
- TPageptr.i = TDirptr.p->pagep[TDirInd & 0xff];
- ptrCheckGuard(TPageptr, cpagesize, page8);
- for (Ti = 0; Ti < 4; Ti++) {
- if (TreleaseScanIndicator[Ti] == 1) {
- jam();
- scanPtr.i = Trootfragrecptr.p->scan[Ti];
- ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
- rsbPageidptr = TPageptr;
- trsbPageindex = TPageIndex;
- releaseScanBucket(signal);
- }//if
- }//for
- }//if
- return TreturnCode;
-}//Dbacc::checkScanExpand()
-
-void Dbacc::execEXPANDCHECK2(Signal* signal)
-{
- jamEntry();
-
- if(refToBlock(signal->getSendersBlockRef()) == DBLQH){
- jam();
- reenable_expand_after_redo_log_exection_complete(signal);
- return;
- }
-
- DirectoryarrayPtr newDirptr;
-
- fragrecptr.i = signal->theData[0];
- tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
- Uint32 tmp = 1;
- tmp = tmp << 31;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- fragrecptr.p->expandFlag = 0;
- if (fragrecptr.p->slack < tmp) {
- jam();
- /* IT MEANS THAT IF SLACK > ZERO */
- /*--------------------------------------------------------------*/
- /* THE SLACK HAS IMPROVED AND IS NOW ACCEPTABLE AND WE */
- /* CAN FORGET ABOUT THE EXPAND PROCESS. */
- /*--------------------------------------------------------------*/
- return;
- }//if
- if (fragrecptr.p->firstOverflowRec == RNIL) {
- jam();
- allocOverflowPage(signal);
- if (tresult > ZLIMIT_OF_ERROR) {
- jam();
- /*--------------------------------------------------------------*/
- /* WE COULD NOT ALLOCATE ANY OVERFLOW PAGE. THUS WE HAVE TO STOP*/
- /* THE EXPAND SINCE WE CANNOT GUARANTEE ITS COMPLETION. */
- /*--------------------------------------------------------------*/
- return;
- }//if
- }//if
- if (cfirstfreepage == RNIL) {
- if (cfreepage >= cpagesize) {
- jam();
- /*--------------------------------------------------------------*/
- /* WE HAVE TO STOP THE EXPAND PROCESS SINCE THERE ARE NO FREE */
- /* PAGES. THIS MEANS THAT WE COULD BE FORCED TO CRASH SINCE WE */
- /* CANNOT COMPLETE THE EXPAND. TO AVOID THE CRASH WE EXIT HERE. */
- /*--------------------------------------------------------------*/
- return;
- }//if
- }//if
- if (checkScanExpand(signal) == 1) {
- jam();
- /*--------------------------------------------------------------*/
- // A scan state was inconsistent with performing an expand
- // operation.
- /*--------------------------------------------------------------*/
- return;
- }//if
- if (fragrecptr.p->createLcp == ZTRUE) {
- if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_EXPAND) {
- jam();
- /*--------------------------------------------------------------*/
- // We did not have enough undo log buffers to start up an
- // expand operation
- /*--------------------------------------------------------------*/
- return;
- }//if
- }//if
-
- /*--------------------------------------------------------------------------*/
- /* WE START BY FINDING THE PAGE, THE PAGE INDEX AND THE PAGE DIRECTORY*/
- /* OF THE NEW BUCKET WHICH SHALL RECEIVE THE ELEMENT WHICH HAVE A 1 IN*/
- /* THE NEXT HASH BIT. THIS BIT IS USED IN THE SPLIT MECHANISM TO */
- /* DECIDE WHICH ELEMENT GOES WHERE. */
- /*--------------------------------------------------------------------------*/
- expDirRangePtr.i = fragrecptr.p->directory;
- texpReceivedBucket = (fragrecptr.p->maxp + fragrecptr.p->p) + 1; /* RECEIVED BUCKET */
- texpDirInd = texpReceivedBucket >> fragrecptr.p->k;
- newDirptr.i = RNIL;
- ptrNull(newDirptr);
- texpDirRangeIndex = texpDirInd >> 8;
- ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
- arrGuard(texpDirRangeIndex, 256);
- expDirptr.i = expDirRangePtr.p->dirArray[texpDirRangeIndex];
- if (expDirptr.i == RNIL) {
- jam();
- seizeDirectory(signal);
- if (tresult > ZLIMIT_OF_ERROR) {
- jam();
- return;
- } else {
- jam();
- newDirptr = sdDirptr;
- expDirptr = sdDirptr;
- expDirRangePtr.p->dirArray[texpDirRangeIndex] = sdDirptr.i;
- }//if
- } else {
- ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
- }//if
- texpDirPageIndex = texpDirInd & 0xff;
- expPageptr.i = expDirptr.p->pagep[texpDirPageIndex];
- if (expPageptr.i == RNIL) {
- jam();
- seizePage(signal);
- if (tresult > ZLIMIT_OF_ERROR) {
- jam();
- if (newDirptr.i != RNIL) {
- jam();
- rdDirptr.i = newDirptr.i;
- releaseDirectory(signal);
- }//if
- return;
- }//if
- expDirptr.p->pagep[texpDirPageIndex] = spPageptr.i;
- tipPageId = texpDirInd;
- inpPageptr = spPageptr;
- initPage(signal);
- fragrecptr.p->dirsize++;
- expPageptr = spPageptr;
- } else {
- ptrCheckGuard(expPageptr, cpagesize, page8);
- }//if
-
- fragrecptr.p->expReceivePageptr = expPageptr.i;
- fragrecptr.p->expReceiveIndex = texpReceivedBucket & ((1 << fragrecptr.p->k) - 1);
- /*--------------------------------------------------------------------------*/
- /* THE NEXT ACTION IS TO FIND THE PAGE, THE PAGE INDEX AND THE PAGE */
- /* DIRECTORY OF THE BUCKET TO BE SPLIT. */
- /*--------------------------------------------------------------------------*/
- expDirRangePtr.i = fragrecptr.p->directory;
- cexcPageindex = fragrecptr.p->p & ((1 << fragrecptr.p->k) - 1); /* PAGE INDEX OBS K = 6 */
- texpDirInd = fragrecptr.p->p >> fragrecptr.p->k; /* DIRECTORY INDEX OBS K = 6 */
- ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
- arrGuard((texpDirInd >> 8), 256);
- expDirptr.i = expDirRangePtr.p->dirArray[texpDirInd >> 8];
- ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
- excPageptr.i = expDirptr.p->pagep[texpDirInd & 0xff];
- fragrecptr.p->expSenderIndex = cexcPageindex;
- fragrecptr.p->expSenderPageptr = excPageptr.i;
- if (excPageptr.i == RNIL) {
- jam();
- endofexpLab(signal); /* EMPTY BUCKET */
- return;
- }//if
- fragrecptr.p->expReceiveForward = ZTRUE;
- ptrCheckGuard(excPageptr, cpagesize, page8);
- expandcontainer(signal);
- endofexpLab(signal);
- return;
-}//Dbacc::execEXPANDCHECK2()
-
-void Dbacc::endofexpLab(Signal* signal)
-{
- fragrecptr.p->p++;
- fragrecptr.p->slack += fragrecptr.p->maxloadfactor;
- fragrecptr.p->expandCounter++;
- if (fragrecptr.p->p > fragrecptr.p->maxp) {
- jam();
- fragrecptr.p->maxp = (fragrecptr.p->maxp << 1) | 1;
- fragrecptr.p->lhdirbits++;
- fragrecptr.p->hashcheckbit++;
- fragrecptr.p->p = 0;
- }//if
- Uint32 noOfBuckets = (fragrecptr.p->maxp + 1) + fragrecptr.p->p;
- Uint32 Thysteres = fragrecptr.p->maxloadfactor - fragrecptr.p->minloadfactor;
- fragrecptr.p->slackCheck = noOfBuckets * Thysteres;
- if (fragrecptr.p->slack > (1u << 31)) {
- jam();
- /* IT MEANS THAT IF SLACK < ZERO */
- /* --------------------------------------------------------------------------------- */
- /* IT IS STILL NECESSARY TO EXPAND THE FRAGMENT EVEN MORE. START IT FROM HERE */
- /* WITHOUT WAITING FOR NEXT COMMIT ON THE FRAGMENT. */
- /* --------------------------------------------------------------------------------- */
- fragrecptr.p->expandFlag = 2;
- signal->theData[0] = fragrecptr.i;
- signal->theData[1] = fragrecptr.p->p;
- signal->theData[2] = fragrecptr.p->maxp;
- sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB);
- }//if
- return;
-}//Dbacc::endofexpLab()
-
-void Dbacc::reenable_expand_after_redo_log_exection_complete(Signal* signal){
-
- tabptr.i = signal->theData[0];
- Uint32 fragId = signal->theData[1];
-
- ptrCheckGuard(tabptr, ctablesize, tabrec);
- ndbrequire(getrootfragmentrec(signal, rootfragrecptr, fragId));
-#if 0
- ndbout_c("reenable expand check for table %d fragment: %d",
- tabptr.i, fragId);
-#endif
-
- for (Uint32 i = 0; i < 2; i++) {
- fragrecptr.i = rootfragrecptr.p->fragmentptr[i];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- switch(fragrecptr.p->expandFlag){
- case 0:
- /**
- * Hmm... this means that it's alreay has been reenabled...
- */
- ndbassert(false);
- continue;
- case 1:
- /**
- * Nothing is going on start expand check
- */
- case 2:
- /**
- * A shrink is running, do expand check anyway
- * (to reset expandFlag)
- */
- fragrecptr.p->expandFlag = 2;
- signal->theData[0] = fragrecptr.i;
- signal->theData[1] = fragrecptr.p->p;
- signal->theData[2] = fragrecptr.p->maxp;
- sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB);
- break;
- }
- }
-}
-
-void Dbacc::execDEBUG_SIG(Signal* signal)
-{
- jamEntry();
- expPageptr.i = signal->theData[0];
-
- progError(__LINE__,
- ERR_SR_UNDOLOG);
- return;
-}//Dbacc::execDEBUG_SIG()
-
-/* --------------------------------------------------------------------------------- */
-/* EXPANDCONTAINER */
-/* INPUT: EXC_PAGEPTR (POINTER TO THE ACTIVE PAGE RECORD) */
-/* CEXC_PAGEINDEX (INDEX OF THE BUCKET). */
-/* */
-/* DESCRIPTION: THE HASH VALUE OF ALL ELEMENTS IN THE CONTAINER WILL BE */
-/* CHECKED. SOME OF THIS ELEMENTS HAVE TO MOVE TO THE NEW CONTAINER */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::expandcontainer(Signal* signal)
-{
- Uint32 texcHashvalue;
- Uint32 texcTmp;
- Uint32 texcIndex;
- Uint32 guard20;
-
- cexcPrevpageptr = RNIL;
- cexcPrevconptr = 0;
- cexcForward = ZTRUE;
- EXP_CONTAINER_LOOP:
- cexcContainerptr = (cexcPageindex << ZSHIFT_PLUS) - (cexcPageindex << ZSHIFT_MINUS);
- if (cexcForward == ZTRUE) {
- jam();
- cexcContainerptr = cexcContainerptr + ZHEAD_SIZE;
- cexcElementptr = cexcContainerptr + ZCON_HEAD_SIZE;
- } else {
- jam();
- cexcContainerptr = ((cexcContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE;
- cexcElementptr = cexcContainerptr - 1;
- }//if
- arrGuard(cexcContainerptr, 2048);
- cexcContainerhead = excPageptr.p->word32[cexcContainerptr];
- cexcContainerlen = cexcContainerhead >> 26;
- cexcMovedLen = ZCON_HEAD_SIZE;
- if (cexcContainerlen <= ZCON_HEAD_SIZE) {
- ndbrequire(cexcContainerlen >= ZCON_HEAD_SIZE);
- jam();
- goto NEXT_ELEMENT;
- }//if
- NEXT_ELEMENT_LOOP:
- idrOperationRecPtr.i = RNIL;
- ptrNull(idrOperationRecPtr);
- /* --------------------------------------------------------------------------------- */
- /* CEXC_PAGEINDEX PAGE INDEX OF CURRENT CONTAINER BEING EXAMINED. */
- /* CEXC_CONTAINERPTR INDEX OF CURRENT CONTAINER BEING EXAMINED. */
- /* CEXC_ELEMENTPTR INDEX OF CURRENT ELEMENT BEING EXAMINED. */
- /* EXC_PAGEPTR PAGE WHERE CURRENT ELEMENT RESIDES. */
- /* CEXC_PREVPAGEPTR PAGE OF PREVIOUS CONTAINER. */
- /* CEXC_PREVCONPTR INDEX OF PREVIOUS CONTAINER */
- /* CEXC_FORWARD DIRECTION OF CURRENT CONTAINER */
- /* --------------------------------------------------------------------------------- */
- arrGuard(cexcElementptr, 2048);
- tidrElemhead = excPageptr.p->word32[cexcElementptr];
- if (ElementHeader::getUnlocked(tidrElemhead)){
- jam();
- texcHashvalue = ElementHeader::getHashValuePart(tidrElemhead);
- } else {
- jam();
- idrOperationRecPtr.i = ElementHeader::getOpPtrI(tidrElemhead);
- ptrCheckGuard(idrOperationRecPtr, coprecsize, operationrec);
- texcHashvalue = idrOperationRecPtr.p->hashvaluePart;
- if ((fragrecptr.p->createLcp == ZTRUE) &&
- (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) != 0)) {
- jam();
- /* --------------------------------------------------------------------------------- */
- // During local checkpoints we must ensure that we restore the element header in
- // unlocked state and with the hash value part there with tuple status zeroed.
- // Otherwise a later insert over the same element will write an UNDO log that will
- // ensure that the now removed element is restored together with its locked element
- // header and without the hash value part.
- /* --------------------------------------------------------------------------------- */
- const Uint32 hv = idrOperationRecPtr.p->hashvaluePart;
- const Uint32 eh = ElementHeader::setUnlocked(hv, 0);
- excPageptr.p->word32[cexcElementptr] = eh;
- }//if
- }//if
- if (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) == 0) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THIS ELEMENT IS NOT TO BE MOVED. WE CALCULATE THE WHEREABOUTS OF THE NEXT */
- /* ELEMENT AND PROCEED WITH THAT OR END THE SEARCH IF THERE ARE NO MORE */
- /* ELEMENTS IN THIS CONTAINER. */
- /* --------------------------------------------------------------------------------- */
- goto NEXT_ELEMENT;
- }//if
- /* --------------------------------------------------------------------------------- */
- /* THE HASH BIT WAS SET AND WE SHALL MOVE THIS ELEMENT TO THE NEW BUCKET. */
- /* WE START BY READING THE ELEMENT TO BE ABLE TO INSERT IT INTO THE NEW BUCKET.*/
- /* THEN WE INSERT THE ELEMENT INTO THE NEW BUCKET. THE NEXT STEP IS TO DELETE */
- /* THE ELEMENT FROM THIS BUCKET. THIS IS PERFORMED BY REPLACING IT WITH THE */
- /* LAST ELEMENT IN THE BUCKET. IF THIS ELEMENT IS TO BE MOVED WE MOVE IT AND */
- /* GET THE LAST ELEMENT AGAIN UNTIL WE EITHER FIND ONE THAT STAYS OR THIS */
- /* ELEMENT IS THE LAST ELEMENT. */
- /* --------------------------------------------------------------------------------- */
- texcTmp = cexcElementptr + cexcForward;
- guard20 = fragrecptr.p->localkeylen - 1;
- for (texcIndex = 0; texcIndex <= guard20; texcIndex++) {
- arrGuard(texcIndex, 2);
- arrGuard(texcTmp, 2048);
- clocalkey[texcIndex] = excPageptr.p->word32[texcTmp];
- texcTmp = texcTmp + cexcForward;
- }//for
- tidrPageindex = fragrecptr.p->expReceiveIndex;
- idrPageptr.i = fragrecptr.p->expReceivePageptr;
- ptrCheckGuard(idrPageptr, cpagesize, page8);
- tidrForward = fragrecptr.p->expReceiveForward;
- insertElement(signal);
- fragrecptr.p->expReceiveIndex = tidrPageindex;
- fragrecptr.p->expReceivePageptr = idrPageptr.i;
- fragrecptr.p->expReceiveForward = tidrForward;
- REMOVE_LAST_LOOP:
- jam();
- lastPageptr.i = excPageptr.i;
- lastPageptr.p = excPageptr.p;
- tlastContainerptr = cexcContainerptr;
- lastPrevpageptr.i = cexcPrevpageptr;
- ptrCheck(lastPrevpageptr, cpagesize, page8);
- tlastPrevconptr = cexcPrevconptr;
- arrGuard(tlastContainerptr, 2048);
- tlastContainerhead = lastPageptr.p->word32[tlastContainerptr];
- tlastContainerlen = tlastContainerhead >> 26;
- tlastForward = cexcForward;
- tlastPageindex = cexcPageindex;
- getLastAndRemove(signal);
- if (excPageptr.i == lastPageptr.i) {
- if (cexcElementptr == tlastElementptr) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THE CURRENT ELEMENT WAS ALSO THE LAST ELEMENT. */
- /* --------------------------------------------------------------------------------- */
- return;
- }//if
- }//if
- /* --------------------------------------------------------------------------------- */
- /* THE CURRENT ELEMENT WAS NOT THE LAST ELEMENT. IF THE LAST ELEMENT SHOULD */
- /* STAY WE COPY IT TO THE POSITION OF THE CURRENT ELEMENT, OTHERWISE WE INSERT */
- /* INTO THE NEW BUCKET, REMOVE IT AND TRY WITH THE NEW LAST ELEMENT. */
- /* --------------------------------------------------------------------------------- */
- idrOperationRecPtr.i = RNIL;
- ptrNull(idrOperationRecPtr);
- arrGuard(tlastElementptr, 2048);
- tidrElemhead = lastPageptr.p->word32[tlastElementptr];
- if (ElementHeader::getUnlocked(tidrElemhead)) {
- jam();
- texcHashvalue = ElementHeader::getHashValuePart(tidrElemhead);
- } else {
- jam();
- idrOperationRecPtr.i = ElementHeader::getOpPtrI(tidrElemhead);
- ptrCheckGuard(idrOperationRecPtr, coprecsize, operationrec);
- texcHashvalue = idrOperationRecPtr.p->hashvaluePart;
- if ((fragrecptr.p->createLcp == ZTRUE) &&
- (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) != 0)) {
- jam();
- /* --------------------------------------------------------------------------------- */
- // During local checkpoints we must ensure that we restore the element header in
- // unlocked state and with the hash value part there with tuple status zeroed.
- // Otherwise a later insert over the same element will write an UNDO log that will
- // ensure that the now removed element is restored together with its locked element
- // header and without the hash value part.
- /* --------------------------------------------------------------------------------- */
- const Uint32 hv = idrOperationRecPtr.p->hashvaluePart;
- const Uint32 eh = ElementHeader::setUnlocked(hv, 0);
- lastPageptr.p->word32[tlastElementptr] = eh;
- }//if
- }//if
- if (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) == 0) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THE LAST ELEMENT IS NOT TO BE MOVED. WE COPY IT TO THE CURRENT ELEMENT. */
- /* --------------------------------------------------------------------------------- */
- delPageptr = excPageptr;
- tdelContainerptr = cexcContainerptr;
- tdelForward = cexcForward;
- tdelElementptr = cexcElementptr;
- deleteElement(signal);
- } else {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THE LAST ELEMENT IS ALSO TO BE MOVED. */
- /* --------------------------------------------------------------------------------- */
- texcTmp = tlastElementptr + tlastForward;
- for (texcIndex = 0; texcIndex < fragrecptr.p->localkeylen; texcIndex++) {
- arrGuard(texcIndex, 2);
- arrGuard(texcTmp, 2048);
- clocalkey[texcIndex] = lastPageptr.p->word32[texcTmp];
- texcTmp = texcTmp + tlastForward;
- }//for
- tidrPageindex = fragrecptr.p->expReceiveIndex;
- idrPageptr.i = fragrecptr.p->expReceivePageptr;
- ptrCheckGuard(idrPageptr, cpagesize, page8);
- tidrForward = fragrecptr.p->expReceiveForward;
- insertElement(signal);
- fragrecptr.p->expReceiveIndex = tidrPageindex;
- fragrecptr.p->expReceivePageptr = idrPageptr.i;
- fragrecptr.p->expReceiveForward = tidrForward;
- goto REMOVE_LAST_LOOP;
- }//if
- NEXT_ELEMENT:
- arrGuard(cexcContainerptr, 2048);
- cexcContainerhead = excPageptr.p->word32[cexcContainerptr];
- cexcMovedLen = cexcMovedLen + fragrecptr.p->elementLength;
- if ((cexcContainerhead >> 26) > cexcMovedLen) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WE HAVE NOT YET MOVED THE COMPLETE CONTAINER. WE PROCEED WITH THE NEXT */
- /* ELEMENT IN THE CONTAINER. IT IS IMPORTANT TO READ THE CONTAINER LENGTH */
- /* FROM THE CONTAINER HEADER SINCE IT MIGHT CHANGE BY REMOVING THE LAST */
- /* ELEMENT IN THE BUCKET. */
- /* --------------------------------------------------------------------------------- */
- cexcElementptr = cexcElementptr + (cexcForward * fragrecptr.p->elementLength);
- goto NEXT_ELEMENT_LOOP;
- }//if
- if (((cexcContainerhead >> 7) & 3) != 0) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WE PROCEED TO THE NEXT CONTAINER IN THE BUCKET. */
- /* --------------------------------------------------------------------------------- */
- cexcPrevpageptr = excPageptr.i;
- cexcPrevconptr = cexcContainerptr;
- nextcontainerinfoExp(signal);
- goto EXP_CONTAINER_LOOP;
- }//if
-}//Dbacc::expandcontainer()
-
-/* ******************--------------------------------------------------------------- */
-/* SHRINKCHECK JOIN BUCKET ORD */
-/* SENDER: ACC, LEVEL B */
-/* INPUT: FRAGRECPTR, POINTS TO A FRAGMENT RECORD. */
-/* DESCRIPTION: TWO BUCKET OF A FRAGMENT PAGE WILL BE JOINED TOGETHER */
-/* ACCORDING TO LH3. */
-/* ******************--------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* SHRINKCHECK JOIN BUCKET ORD */
-/* ******************------------------------------+ */
-/* SENDER: ACC, LEVEL B */
-/* TWO BUCKETS OF THE FRAGMENT */
-/* WILL BE JOINED ACORDING TO LH3 */
-/* AND COMMIT TRANSACTION PROCESS */
-/* WILL BE CONTINUED */
-Uint32 Dbacc::checkScanShrink(Signal* signal)
-{
- Uint32 Ti;
- Uint32 TreturnCode = 0;
- Uint32 TPageIndex;
- Uint32 TDirInd;
- Uint32 TmergeDest;
- Uint32 TmergeSource;
- Uint32 TreleaseScanBucket;
- Uint32 TreleaseInd = 0;
- Uint32 TreleaseScanIndicator[4];
- DirectoryarrayPtr TDirptr;
- DirRangePtr TDirRangePtr;
- Page8Ptr TPageptr;
- ScanRecPtr TscanPtr;
- RootfragmentrecPtr Trootfragrecptr;
-
- Trootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(Trootfragrecptr, crootfragmentsize, rootfragmentrec);
- if (fragrecptr.p->p == 0) {
- jam();
- TmergeDest = fragrecptr.p->maxp >> 1;
- } else {
- jam();
- TmergeDest = fragrecptr.p->p - 1;
- }//if
- TmergeSource = fragrecptr.p->maxp + fragrecptr.p->p;
- for (Ti = 0; Ti < 4; Ti++) {
- TreleaseScanIndicator[Ti] = 0;
- if (Trootfragrecptr.p->scan[Ti] != RNIL) {
- TscanPtr.i = Trootfragrecptr.p->scan[Ti];
- ptrCheckGuard(TscanPtr, cscanRecSize, scanRec);
- if (TscanPtr.p->activeLocalFrag == fragrecptr.i) {
- //-------------------------------------------------------------
- // A scan is ongoing on this particular local fragment. We have
- // to check its current state.
- //-------------------------------------------------------------
- if (TscanPtr.p->scanBucketState == ScanRec::FIRST_LAP) {
- jam();
- if ((TmergeDest == TscanPtr.p->nextBucketIndex) ||
- (TmergeSource == TscanPtr.p->nextBucketIndex)) {
- jam();
- //-------------------------------------------------------------
- // We are currently scanning one of the buckets involved in the
- // merge. We cannot merge while simultaneously performing a scan.
- // We have to pass this offer for merging the buckets.
- //-------------------------------------------------------------
- TreturnCode = 1;
- return TreturnCode;
- } else if (TmergeDest < TscanPtr.p->nextBucketIndex) {
- jam();
- TreleaseScanIndicator[Ti] = 1;
- TreleaseInd = 1;
- }//if
- } else if (TscanPtr.p->scanBucketState == ScanRec::SECOND_LAP) {
- jam();
- //-------------------------------------------------------------
- // We are performing a second lap to handle buckets that was
- // merged during the first lap of scanning. During this second
- // lap we do not allow any splits or merges.
- //-------------------------------------------------------------
- TreturnCode = 1;
- return TreturnCode;
- } else if (TscanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) {
- jam();
- //-------------------------------------------------------------
- // The scan is completed and we can thus go ahead and perform
- // the split.
- //-------------------------------------------------------------
- } else {
- jam();
- sendSystemerror(signal);
- return TreturnCode;
- }//if
- }//if
- }//if
- }//for
- if (TreleaseInd == 1) {
- jam();
- TreleaseScanBucket = TmergeSource;
- TDirRangePtr.i = fragrecptr.p->directory;
- TPageIndex = TreleaseScanBucket & ((1 << fragrecptr.p->k) - 1); /* PAGE INDEX OBS K = 6 */
- TDirInd = TreleaseScanBucket >> fragrecptr.p->k; /* DIRECTORY INDEX OBS K = 6 */
- ptrCheckGuard(TDirRangePtr, cdirrangesize, dirRange);
- arrGuard((TDirInd >> 8), 256);
- TDirptr.i = TDirRangePtr.p->dirArray[TDirInd >> 8];
- ptrCheckGuard(TDirptr, cdirarraysize, directoryarray);
- TPageptr.i = TDirptr.p->pagep[TDirInd & 0xff];
- ptrCheckGuard(TPageptr, cpagesize, page8);
- for (Ti = 0; Ti < 4; Ti++) {
- if (TreleaseScanIndicator[Ti] == 1) {
- jam();
- scanPtr.i = Trootfragrecptr.p->scan[Ti];
- ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
- rsbPageidptr.i = TPageptr.i;
- rsbPageidptr.p = TPageptr.p;
- trsbPageindex = TPageIndex;
- releaseScanBucket(signal);
- if (TmergeDest < scanPtr.p->minBucketIndexToRescan) {
- jam();
- //-------------------------------------------------------------
- // We have to keep track of the starting bucket to Rescan in the
- // second lap.
- //-------------------------------------------------------------
- scanPtr.p->minBucketIndexToRescan = TmergeDest;
- }//if
- if (TmergeDest > scanPtr.p->maxBucketIndexToRescan) {
- jam();
- //-------------------------------------------------------------
- // We have to keep track of the ending bucket to Rescan in the
- // second lap.
- //-------------------------------------------------------------
- scanPtr.p->maxBucketIndexToRescan = TmergeDest;
- }//if
- }//if
- }//for
- }//if
- return TreturnCode;
-}//Dbacc::checkScanShrink()
-
-void Dbacc::execSHRINKCHECK2(Signal* signal)
-{
- Uint32 tshrTmp1;
-
- jamEntry();
- fragrecptr.i = signal->theData[0];
- Uint32 oldFlag = signal->theData[3];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- fragrecptr.p->expandFlag = oldFlag;
- tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
- if (fragrecptr.p->slack <= fragrecptr.p->slackCheck) {
- jam();
- /* TIME FOR JOIN BUCKETS PROCESS */
- /*--------------------------------------------------------------*/
- /* NO LONGER NECESSARY TO SHRINK THE FRAGMENT. */
- /*--------------------------------------------------------------*/
- return;
- }//if
- if (fragrecptr.p->slack > (1u << 31)) {
- jam();
- /*--------------------------------------------------------------*/
- /* THE SLACK IS NEGATIVE, IN THIS CASE WE WILL NOT NEED ANY */
- /* SHRINK. */
- /*--------------------------------------------------------------*/
- return;
- }//if
- texpDirInd = (fragrecptr.p->maxp + fragrecptr.p->p) >> fragrecptr.p->k;
- if (((fragrecptr.p->maxp + fragrecptr.p->p) & ((1 << fragrecptr.p->k) - 1)) == 0) {
- if (fragrecptr.p->createLcp == ZTRUE) {
- if (fragrecptr.p->fragState == LCP_SEND_PAGES) {
- if (fragrecptr.p->lcpMaxDirIndex > texpDirInd) {
- if (fragrecptr.p->lcpDirIndex <= texpDirInd) {
- jam();
- /*--------------------------------------------------------------*/
- /* WE DO NOT ALLOW ANY SHRINKS THAT REMOVE PAGES THAT ARE */
- /* NEEDED AS PART OF THE LOCAL CHECKPOINT. */
- /*--------------------------------------------------------------*/
- return;
- }//if
- }//if
- }//if
- }//if
- }//if
- if (fragrecptr.p->firstOverflowRec == RNIL) {
- jam();
- allocOverflowPage(signal);
- if (tresult > ZLIMIT_OF_ERROR) {
- jam();
- return;
- }//if
- }//if
- if (cfirstfreepage == RNIL) {
- if (cfreepage >= cpagesize) {
- jam();
- /*--------------------------------------------------------------*/
- /* WE HAVE TO STOP THE SHRINK PROCESS SINCE THERE ARE NO FREE */
- /* PAGES. THIS MEANS THAT WE COULD BE FORCED TO CRASH SINCE WE */
- /* CANNOT COMPLETE THE SHRINK. TO AVOID THE CRASH WE EXIT HERE. */
- /*--------------------------------------------------------------*/
- return;
- }//if
- }//if
- if (checkScanShrink(signal) == 1) {
- jam();
- /*--------------------------------------------------------------*/
- // A scan state was inconsistent with performing a shrink
- // operation.
- /*--------------------------------------------------------------*/
- return;
- }//if
- if (fragrecptr.p->createLcp == ZTRUE) {
- if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_EXPAND) {
- jam();
- /*--------------------------------------------------------------*/
- // We did not have enough undo log buffers to start up an
- // shrink operation
- /*--------------------------------------------------------------*/
- return;
- }//if
- }//if
- if (fragrecptr.p->p == 0) {
- jam();
- fragrecptr.p->maxp = fragrecptr.p->maxp >> 1;
- fragrecptr.p->p = fragrecptr.p->maxp;
- fragrecptr.p->lhdirbits--;
- fragrecptr.p->hashcheckbit--;
- } else {
- jam();
- fragrecptr.p->p--;
- }//if
-
- /*--------------------------------------------------------------------------*/
- /* WE START BY FINDING THE NECESSARY INFORMATION OF THE BUCKET TO BE */
- /* REMOVED WHICH WILL SEND ITS ELEMENTS TO THE RECEIVING BUCKET. */
- /*--------------------------------------------------------------------------*/
- expDirRangePtr.i = fragrecptr.p->directory;
- cexcPageindex = ((fragrecptr.p->maxp + fragrecptr.p->p) + 1) & ((1 << fragrecptr.p->k) - 1);
- texpDirInd = ((fragrecptr.p->maxp + fragrecptr.p->p) + 1) >> fragrecptr.p->k;
- texpDirRangeIndex = texpDirInd >> 8;
- texpDirPageIndex = texpDirInd & 0xff;
- ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
- arrGuard(texpDirRangeIndex, 256);
- expDirptr.i = expDirRangePtr.p->dirArray[texpDirRangeIndex];
- ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
- excPageptr.i = expDirptr.p->pagep[texpDirPageIndex];
- fragrecptr.p->expSenderDirptr = expDirptr.i;
- fragrecptr.p->expSenderIndex = cexcPageindex;
- fragrecptr.p->expSenderPageptr = excPageptr.i;
- fragrecptr.p->expSenderDirIndex = texpDirInd;
- /*--------------------------------------------------------------------------*/
- /* WE NOW PROCEED BY FINDING THE NECESSARY INFORMATION ABOUT THE */
- /* RECEIVING BUCKET. */
- /*--------------------------------------------------------------------------*/
- expDirRangePtr.i = fragrecptr.p->directory;
- texpReceivedBucket = fragrecptr.p->p >> fragrecptr.p->k;
- ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
- arrGuard((texpReceivedBucket >> 8), 256);
- expDirptr.i = expDirRangePtr.p->dirArray[texpReceivedBucket >> 8];
- ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
- fragrecptr.p->expReceivePageptr = expDirptr.p->pagep[texpReceivedBucket & 0xff];
- fragrecptr.p->expReceiveIndex = fragrecptr.p->p & ((1 << fragrecptr.p->k) - 1);
- fragrecptr.p->expReceiveForward = ZTRUE;
- if (excPageptr.i == RNIL) {
- jam();
- endofshrinkbucketLab(signal); /* EMPTY BUCKET */
- return;
- }//if
- /*--------------------------------------------------------------------------*/
- /* INITIALISE THE VARIABLES FOR THE SHRINK PROCESS. */
- /*--------------------------------------------------------------------------*/
- ptrCheckGuard(excPageptr, cpagesize, page8);
- cexcForward = ZTRUE;
- cexcContainerptr = (cexcPageindex << ZSHIFT_PLUS) - (cexcPageindex << ZSHIFT_MINUS);
- cexcContainerptr = cexcContainerptr + ZHEAD_SIZE;
- arrGuard(cexcContainerptr, 2048);
- cexcContainerhead = excPageptr.p->word32[cexcContainerptr];
- cexcContainerlen = cexcContainerhead >> 26;
- if (cexcContainerlen <= ZCON_HEAD_SIZE) {
- ndbrequire(cexcContainerlen == ZCON_HEAD_SIZE);
- } else {
- jam();
- shrinkcontainer(signal);
- }//if
- /*--------------------------------------------------------------------------*/
- /* THIS CONTAINER IS NOT YET EMPTY AND WE REMOVE ALL THE ELEMENTS. */
- /*--------------------------------------------------------------------------*/
- if (((cexcContainerhead >> 10) & 1) == 1) {
- jam();
- rlPageptr = excPageptr;
- trlPageindex = cexcPageindex;
- trlRelCon = ZFALSE;
- turlIndex = cexcContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
- releaseRightlist(signal);
- }//if
- tshrTmp1 = ZCON_HEAD_SIZE;
- tshrTmp1 = tshrTmp1 << 26;
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- datapageptr.p = excPageptr.p;
- cundoinfolength = 1;
- cundoElemIndex = cexcContainerptr;
- undoWritingProcess(signal);
- }//if
- dbgWord32(excPageptr, cexcContainerptr, tshrTmp1);
- arrGuard(cexcContainerptr, 2048);
- excPageptr.p->word32[cexcContainerptr] = tshrTmp1;
- if (((cexcContainerhead >> 7) & 0x3) == 0) {
- jam();
- endofshrinkbucketLab(signal);
- return;
- }//if
- nextcontainerinfoExp(signal);
- do {
- cexcContainerptr = (cexcPageindex << ZSHIFT_PLUS) - (cexcPageindex << ZSHIFT_MINUS);
- if (cexcForward == ZTRUE) {
- jam();
- cexcContainerptr = cexcContainerptr + ZHEAD_SIZE;
- } else {
- jam();
- cexcContainerptr = ((cexcContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE;
- }//if
- arrGuard(cexcContainerptr, 2048);
- cexcContainerhead = excPageptr.p->word32[cexcContainerptr];
- cexcContainerlen = cexcContainerhead >> 26;
- ndbrequire(cexcContainerlen > ZCON_HEAD_SIZE);
- /*--------------------------------------------------------------------------*/
- /* THIS CONTAINER IS NOT YET EMPTY AND WE REMOVE ALL THE ELEMENTS. */
- /*--------------------------------------------------------------------------*/
- shrinkcontainer(signal);
- cexcPrevpageptr = excPageptr.i;
- cexcPrevpageindex = cexcPageindex;
- cexcPrevforward = cexcForward;
- if (((cexcContainerhead >> 7) & 0x3) != 0) {
- jam();
- /*--------------------------------------------------------------------------*/
- /* WE MUST CALL THE NEXT CONTAINER INFO ROUTINE BEFORE WE RELEASE THE */
- /* CONTAINER SINCE THE RELEASE WILL OVERWRITE THE NEXT POINTER. */
- /*--------------------------------------------------------------------------*/
- nextcontainerinfoExp(signal);
- }//if
- rlPageptr.i = cexcPrevpageptr;
- ptrCheckGuard(rlPageptr, cpagesize, page8);
- trlPageindex = cexcPrevpageindex;
- if (cexcPrevforward == ZTRUE) {
- jam();
- if (((cexcContainerhead >> 10) & 1) == 1) {
- jam();
- trlRelCon = ZFALSE;
- turlIndex = cexcContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
- releaseRightlist(signal);
- }//if
- trlRelCon = ZTRUE;
- tullIndex = cexcContainerptr;
- releaseLeftlist(signal);
- } else {
- jam();
- if (((cexcContainerhead >> 10) & 1) == 1) {
- jam();
- trlRelCon = ZFALSE;
- tullIndex = cexcContainerptr - (ZBUF_SIZE - ZCON_HEAD_SIZE);
- releaseLeftlist(signal);
- }//if
- trlRelCon = ZTRUE;
- turlIndex = cexcContainerptr;
- releaseRightlist(signal);
- }//if
- } while (((cexcContainerhead >> 7) & 0x3) != 0);
- endofshrinkbucketLab(signal);
- return;
-}//Dbacc::execSHRINKCHECK2()
-
-void Dbacc::endofshrinkbucketLab(Signal* signal)
-{
- fragrecptr.p->expandCounter--;
- fragrecptr.p->slack -= fragrecptr.p->maxloadfactor;
- if (fragrecptr.p->expSenderIndex == 0) {
- jam();
- fragrecptr.p->dirsize--;
- if (fragrecptr.p->expSenderPageptr != RNIL) {
- jam();
- rpPageptr.i = fragrecptr.p->expSenderPageptr;
- ptrCheckGuard(rpPageptr, cpagesize, page8);
- releasePage(signal);
- expDirptr.i = fragrecptr.p->expSenderDirptr;
- ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
- expDirptr.p->pagep[fragrecptr.p->expSenderDirIndex & 0xff] = RNIL;
- }//if
- if (((((fragrecptr.p->p + fragrecptr.p->maxp) + 1) >> fragrecptr.p->k) & 0xff) == 0) {
- jam();
- rdDirptr.i = fragrecptr.p->expSenderDirptr;
- releaseDirectory(signal);
- expDirRangePtr.i = fragrecptr.p->directory;
- ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
- arrGuard((fragrecptr.p->expSenderDirIndex >> 8), 256);
- expDirRangePtr.p->dirArray[fragrecptr.p->expSenderDirIndex >> 8] = RNIL;
- }//if
- }//if
- if (fragrecptr.p->slack < (1u << 31)) {
- jam();
- /*--------------------------------------------------------------*/
- /* THE SLACK IS POSITIVE, IN THIS CASE WE WILL CHECK WHETHER */
- /* WE WILL CONTINUE PERFORM ANOTHER SHRINK. */
- /*--------------------------------------------------------------*/
- Uint32 noOfBuckets = (fragrecptr.p->maxp + 1) + fragrecptr.p->p;
- Uint32 Thysteresis = fragrecptr.p->maxloadfactor - fragrecptr.p->minloadfactor;
- fragrecptr.p->slackCheck = noOfBuckets * Thysteresis;
- if (fragrecptr.p->slack > Thysteresis) {
- /*--------------------------------------------------------------*/
- /* IT IS STILL NECESSARY TO SHRINK THE FRAGMENT MORE. THIS*/
- /* CAN HAPPEN WHEN A NUMBER OF SHRINKS GET REJECTED */
- /* DURING A LOCAL CHECKPOINT. WE START A NEW SHRINK */
- /* IMMEDIATELY FROM HERE WITHOUT WAITING FOR A COMMIT TO */
- /* START IT. */
- /*--------------------------------------------------------------*/
- if (fragrecptr.p->expandCounter > 0) {
- jam();
- /*--------------------------------------------------------------*/
- /* IT IS VERY IMPORTANT TO NOT TRY TO SHRINK MORE THAN */
- /* WAS EXPANDED. IF MAXP IS SET TO A VALUE BELOW 63 THEN */
- /* WE WILL LOSE RECORDS SINCE GETDIRINDEX CANNOT HANDLE */
- /* SHRINKING BELOW 2^K - 1 (NOW 63). THIS WAS A BUG THAT */
- /* WAS REMOVED 2000-05-12. */
- /*--------------------------------------------------------------*/
- signal->theData[0] = fragrecptr.i;
- signal->theData[1] = fragrecptr.p->p;
- signal->theData[2] = fragrecptr.p->maxp;
- signal->theData[3] = fragrecptr.p->expandFlag;
- ndbrequire(fragrecptr.p->expandFlag < 2);
- fragrecptr.p->expandFlag = 2;
- sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 4, JBB);
- }//if
- }//if
- }//if
- ndbrequire(fragrecptr.p->maxp >= (Uint32)((1 << fragrecptr.p->k) - 1));
- return;
-}//Dbacc::endofshrinkbucketLab()
-
-/* --------------------------------------------------------------------------------- */
-/* SHRINKCONTAINER */
-/* INPUT: EXC_PAGEPTR (POINTER TO THE ACTIVE PAGE RECORD) */
-/* CEXC_CONTAINERLEN (LENGTH OF THE CONTAINER). */
-/* CEXC_CONTAINERPTR (ARRAY INDEX OF THE CONTAINER). */
-/* CEXC_FORWARD (CONTAINER FORWARD (+1) OR BACKWARD (-1)) */
-/* */
-/* DESCRIPTION: ALL ELEMENTS OF THE ACTIVE CONTAINER HAVE TO MOVE TO THE NEW */
-/* CONTAINER. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::shrinkcontainer(Signal* signal)
-{
- Uint32 tshrElementptr;
- Uint32 tshrRemLen;
- Uint32 tshrInc;
- Uint32 tshrTmp;
- Uint32 tshrIndex;
- Uint32 guard21;
-
- tshrRemLen = cexcContainerlen - ZCON_HEAD_SIZE;
- tshrInc = fragrecptr.p->elementLength;
- if (cexcForward == ZTRUE) {
- jam();
- tshrElementptr = cexcContainerptr + ZCON_HEAD_SIZE;
- } else {
- jam();
- tshrElementptr = cexcContainerptr - 1;
- }//if
- SHR_LOOP:
- idrOperationRecPtr.i = RNIL;
- ptrNull(idrOperationRecPtr);
- /* --------------------------------------------------------------------------------- */
- /* THE CODE BELOW IS ALL USED TO PREPARE FOR THE CALL TO INSERT_ELEMENT AND */
- /* HANDLE THE RESULT FROM INSERT_ELEMENT. INSERT_ELEMENT INSERTS THE ELEMENT */
- /* INTO ANOTHER BUCKET. */
- /* --------------------------------------------------------------------------------- */
- arrGuard(tshrElementptr, 2048);
- tidrElemhead = excPageptr.p->word32[tshrElementptr];
- if (ElementHeader::getLocked(tidrElemhead)) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* IF THE ELEMENT IS LOCKED WE MUST UPDATE THE ELEMENT INFO IN THE OPERATION */
- /* RECORD OWNING THE LOCK. WE DO THIS BY READING THE OPERATION RECORD POINTER */
- /* FROM THE ELEMENT HEADER. */
- /* --------------------------------------------------------------------------------- */
- idrOperationRecPtr.i = ElementHeader::getOpPtrI(tidrElemhead);
- ptrCheckGuard(idrOperationRecPtr, coprecsize, operationrec);
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- /* --------------------------------------------------------------------------------- */
- // During local checkpoints we must ensure that we restore the element header in
- // unlocked state and with the hash value part there with tuple status zeroed.
- // Otherwise a later insert over the same element will write an UNDO log that will
- // ensure that the now removed element is restored together with its locked element
- // header and without the hash value part.
- /* --------------------------------------------------------------------------------- */
- const Uint32 hv = idrOperationRecPtr.p->hashvaluePart;
- const Uint32 eh = ElementHeader::setUnlocked(hv, 0);
- excPageptr.p->word32[tshrElementptr] = eh;
- }//if
- }//if
- tshrTmp = tshrElementptr + cexcForward;
- guard21 = fragrecptr.p->localkeylen - 1;
- for (tshrIndex = 0; tshrIndex <= guard21; tshrIndex++) {
- arrGuard(tshrIndex, 2);
- arrGuard(tshrTmp, 2048);
- clocalkey[tshrIndex] = excPageptr.p->word32[tshrTmp];
- tshrTmp = tshrTmp + cexcForward;
- }//for
- tidrPageindex = fragrecptr.p->expReceiveIndex;
- idrPageptr.i = fragrecptr.p->expReceivePageptr;
- ptrCheckGuard(idrPageptr, cpagesize, page8);
- tidrForward = fragrecptr.p->expReceiveForward;
- insertElement(signal);
- /* --------------------------------------------------------------------------------- */
- /* TAKE CARE OF RESULT FROM INSERT_ELEMENT. */
- /* --------------------------------------------------------------------------------- */
- fragrecptr.p->expReceiveIndex = tidrPageindex;
- fragrecptr.p->expReceivePageptr = idrPageptr.i;
- fragrecptr.p->expReceiveForward = tidrForward;
- if (tshrRemLen < tshrInc) {
- jam();
- sendSystemerror(signal);
- }//if
- tshrRemLen = tshrRemLen - tshrInc;
- if (tshrRemLen != 0) {
- jam();
- tshrElementptr = tshrTmp;
- goto SHR_LOOP;
- }//if
-}//Dbacc::shrinkcontainer()
-
-/* --------------------------------------------------------------------------------- */
-/* NEXTCONTAINERINFO_EXP */
-/* DESCRIPTION:THE CONTAINER HEAD WILL BE CHECKED TO CALCULATE INFORMATION */
-/* ABOUT NEXT CONTAINER IN THE BUCKET. */
-/* INPUT: CEXC_CONTAINERHEAD */
-/* CEXC_CONTAINERPTR */
-/* EXC_PAGEPTR */
-/* OUTPUT: */
-/* CEXC_PAGEINDEX (INDEX FROM WHICH PAGE INDEX CAN BE CALCULATED. */
-/* EXC_PAGEPTR (PAGE REFERENCE OF NEXT CONTAINER) */
-/* CEXC_FORWARD */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::nextcontainerinfoExp(Signal* signal)
-{
- tnciNextSamePage = (cexcContainerhead >> 9) & 0x1; /* CHECK BIT FOR CHECKING WHERE */
- /* THE NEXT CONTAINER IS IN THE SAME PAGE */
- cexcPageindex = cexcContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */
- if (((cexcContainerhead >> 7) & 3) == ZLEFT) {
- jam();
- cexcForward = ZTRUE;
- } else if (((cexcContainerhead >> 7) & 3) == ZRIGHT) {
- jam();
- cexcForward = cminusOne;
- } else {
- jam();
- sendSystemerror(signal);
- cexcForward = 0; /* DUMMY FOR COMPILER */
- }//if
- if (tnciNextSamePage == ZFALSE) {
- jam();
- /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */
- arrGuard(cexcContainerptr + 1, 2048);
- tnciTmp = excPageptr.p->word32[cexcContainerptr + 1];
- nciOverflowrangeptr.i = fragrecptr.p->overflowdir;
- ptrCheckGuard(nciOverflowrangeptr, cdirrangesize, dirRange);
- arrGuard((tnciTmp >> 8), 256);
- nciOverflowDirptr.i = nciOverflowrangeptr.p->dirArray[tnciTmp >> 8];
- ptrCheckGuard(nciOverflowDirptr, cdirarraysize, directoryarray);
- excPageptr.i = nciOverflowDirptr.p->pagep[tnciTmp & 0xff];
- ptrCheckGuard(excPageptr, cpagesize, page8);
- }//if
-}//Dbacc::nextcontainerinfoExp()
-
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* END OF EXPAND/SHRINK MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* LOCAL CHECKPOINT MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* LCP_FRAGIDREQ */
-/* SENDER: LQH, LEVEL B */
-/* ENTER LCP_FRAGIDREQ WITH */
-/* TUSERPTR LQH CONNECTION PTR */
-/* TUSERBLOCKREF, LQH BLOCK REFERENCE */
-/* TCHECKPOINTID, THE CHECKPOINT NUMBER TO USE */
-/* (E.G. 1,2 OR 3) */
-/* TABPTR, TABLE ID = TABLE RECORD POINTER */
-/* TFID ROOT FRAGMENT ID */
-/* CACTIVE_UNDO_FILE_VERSION UNDO FILE VERSION 0,1,2 OR 3. */
-/* ******************--------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* LCP_FRAGIDREQ REQUEST FOR LIST OF STOPED OPERATION */
-/* ******************------------------------------+ */
-/* SENDER: LQH, LEVEL B */
-void Dbacc::execLCP_FRAGIDREQ(Signal* signal)
-{
- jamEntry();
- tuserptr = signal->theData[0]; /* LQH CONNECTION PTR */
- tuserblockref = signal->theData[1]; /* LQH BLOCK REFERENCE */
- tcheckpointid = signal->theData[2]; /* THE CHECKPOINT NUMBER TO USE */
- /* (E.G. 1,2 OR 3) */
- tabptr.i = signal->theData[3]; /* TABLE ID = TABLE RECORD POINTER */
- ptrCheck(tabptr, ctablesize, tabrec);
- tfid = signal->theData[4]; /* ROOT FRAGMENT ID */
- cactiveUndoFileVersion = signal->theData[5]; /* UNDO FILE VERSION 0,1,2 OR 3. */
- tresult = 0;
- ndbrequire(getrootfragmentrec(signal, rootfragrecptr, tfid));
- ndbrequire(rootfragrecptr.p->rootState == ACTIVEROOT);
- seizeLcpConnectRec(signal);
- initLcpConnRec(signal);
- lcpConnectptr.p->rootrecptr = rootfragrecptr.i;
- rootfragrecptr.p->lcpPtr = lcpConnectptr.i;
- lcpConnectptr.p->localCheckPid = tcheckpointid;
- lcpConnectptr.p->lcpstate = LCP_ACTIVE;
- rootfragrecptr.p->rootState = LCP_CREATION;
- fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
- /* D6 AT FSOPENREQ =#010003FF. */
- tlfrTmp1 = 0x010003ff; /* FILE TYPE = .DATA ,VERSION OF FILENAME = 1 */
- tlfrTmp2 = 0x301; /* D7 CREATE, WRITE ONLY, TRUNCATE TO ZERO */
- ndbrequire(cfsFirstfreeconnect != RNIL);
- seizeFsConnectRec(signal);
- fsConnectptr.p->fragrecPtr = fragrecptr.i;
- fsConnectptr.p->fsState = WAIT_OPEN_DATA_FILE_FOR_WRITE;
- /* ----------- FILENAME (FILESYSTEM)/D3/DBACC/"T"TABID/"F"FRAGID/"S"VERSIONID.DATA ------------ */
- /* ************************ */
- /* FSOPENREQ */
- /* ************************ */
- signal->theData[0] = cownBlockref;
- signal->theData[1] = fsConnectptr.i;
- signal->theData[2] = tabptr.i; /* TABLE IDENTITY */
- signal->theData[3] = rootfragrecptr.p->fragmentid[0]; /* FRAGMENT IDENTITY */
- signal->theData[4] = lcpConnectptr.p->localCheckPid; /* CHECKPOINT ID */
- signal->theData[5] = tlfrTmp1;
- signal->theData[6] = tlfrTmp2;
- sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
- return;
-}//Dbacc::execLCP_FRAGIDREQ()
-
-/* ******************--------------------------------------------------------------- */
-/* FSOPENCONF OPENFILE CONF */
-/* SENDER: FS, LEVEL B */
-/* ENTER FSOPENCONF WITH */
-/* FS_CONNECTPTR, FS_CONNECTION PTR */
-/* TUSERPOINTER, FILE POINTER */
-/* ******************--------------------------------------------------------------- */
-void Dbacc::lcpFsOpenConfLab(Signal* signal)
-{
- fsConnectptr.p->fsPtr = tuserptr;
- fragrecptr.i = fsConnectptr.p->fragrecPtr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- rootfragrecptr.i = fragrecptr.p->myroot;
- fragrecptr.p->activeDataFilePage = 1; /* ZERO IS KEPT FOR PAGE_ZERO */
- fragrecptr.p->fsConnPtr = fsConnectptr.i;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- if (rootfragrecptr.p->fragmentptr[0] == fragrecptr.i) {
- jam();
- fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
- ptrCheck(fragrecptr, cfragmentsize, fragmentrec);
- /* ----------- FILENAME (FILESYSTEM)/D3/DBACC/"T"TABID/"F"FRAGID/"S"VERSIONID.DATA ------------ */
- /* D6 AT FSOPENREQ =#010003FF. */
- tlfrTmp1 = 0x010003ff; /* FILE TYPE = .DATA ,VERSION OF FILENAME = 1 */
- tlfrTmp2 = 0x301; /* D7 CREATE, WRITE ONLY, TRUNCATE TO ZERO */
- ndbrequire(cfsFirstfreeconnect != RNIL);
- seizeFsConnectRec(signal);
- fsConnectptr.p->fragrecPtr = fragrecptr.i;
- fsConnectptr.p->fsState = WAIT_OPEN_DATA_FILE_FOR_WRITE;
- /* ************************ */
- /* FSOPENREQ */
- /* ************************ */
- signal->theData[0] = cownBlockref;
- signal->theData[1] = fsConnectptr.i;
- signal->theData[2] = rootfragrecptr.p->mytabptr; /* TABLE IDENTITY */
- signal->theData[3] = rootfragrecptr.p->fragmentid[1]; /* FRAGMENT IDENTITY */
- signal->theData[4] = lcpConnectptr.p->localCheckPid; /* CHECKPOINT ID */
- signal->theData[5] = tlfrTmp1;
- signal->theData[6] = tlfrTmp2;
- sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
- return;
- } else {
- ndbrequire(rootfragrecptr.p->fragmentptr[1] == fragrecptr.i);
- }//if
- /*---- BOTH DATA FILES ARE OPEN------*/
- /* ----IF THE UNDO FILE IS CLOSED , OPEN IT.----- */
- if (cactiveOpenUndoFsPtr != RNIL) {
- jam();
- sendLcpFragidconfLab(signal);
- return;
- }//if
- cactiveUndoFilePage = 0;
- cprevUndoaddress = cminusOne;
- cundoposition = 0;
- clastUndoPageIdWritten = 0;
- ndbrequire(cfsFirstfreeconnect != RNIL);
- seizeFsConnectRec(signal);
- fsConnectptr.p->fsState = WAIT_OPEN_UNDO_LCP;
- fsConnectptr.p->fsPart = 0; /* FILE INDEX, SECOND FILE IN THE DIRECTORY */
- cactiveOpenUndoFsPtr = fsConnectptr.i;
- cactiveRootfrag = rootfragrecptr.i;
- tlfrTmp1 = 1; /* FILE VERSION */
- tlfrTmp1 = (tlfrTmp1 << 8) + ZLOCALLOGFILE; /* .LOCLOG = 2 */
- tlfrTmp1 = (tlfrTmp1 << 8) + 4; /* ROOT DIRECTORY = D4 */
- tlfrTmp1 = (tlfrTmp1 << 8) + fsConnectptr.p->fsPart; /* P2 */
- tlfrTmp2 = 0x302; /* D7 CREATE , READ / WRITE , TRUNCATE TO ZERO */
- /* ---FILE NAME "D4"/"DBACC"/LCP_CONNECTPTR:LOCAL_CHECK_PID/FS_CONNECTPTR:FS_PART".LOCLOG-- */
- /* ************************ */
- /* FSOPENREQ */
- /* ************************ */
- signal->theData[0] = cownBlockref;
- signal->theData[1] = fsConnectptr.i;
- signal->theData[2] = cminusOne; /* #FFFFFFFF */
- signal->theData[3] = cminusOne; /* #FFFFFFFF */
- signal->theData[4] = cactiveUndoFileVersion;
- /* A GROUP OF UNDO FILES WHICH ARE UPDATED */
- signal->theData[5] = tlfrTmp1;
- signal->theData[6] = tlfrTmp2;
- sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
- return;
-}//Dbacc::lcpFsOpenConfLab()
-
-void Dbacc::lcpOpenUndofileConfLab(Signal* signal)
-{
- ptrGuard(fsConnectptr);
- fsConnectptr.p->fsState = WAIT_NOTHING;
- rootfragrecptr.i = cactiveRootfrag;
- ptrCheck(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- fsConnectptr.p->fsPtr = tuserptr;
- sendLcpFragidconfLab(signal);
- return;
-}//Dbacc::lcpOpenUndofileConfLab()
-
-void Dbacc::sendLcpFragidconfLab(Signal* signal)
-{
- ptrGuard(rootfragrecptr);
- lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- /* ************************ */
- /* LCP_FRAGIDCONF */
- /* ************************ */
- signal->theData[0] = lcpConnectptr.p->lcpUserptr;
- signal->theData[1] = lcpConnectptr.i;
- signal->theData[2] = 2;
- /* NO OF LOCAL FRAGMENTS */
- signal->theData[3] = rootfragrecptr.p->fragmentid[0];
- signal->theData[4] = rootfragrecptr.p->fragmentid[1];
- signal->theData[5] = RNIL;
- signal->theData[6] = RNIL;
- sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_LCP_FRAGIDCONF, signal, 7, JBB);
- return;
-}//Dbacc::sendLcpFragidconfLab()
-
-/* ******************--------------------------------------------------------------- */
-/* LCP_HOLDOPERATION REQUEST FOR LIST OF STOPED OPERATION */
-/* SENDER: LQH, LEVEL B */
-/* ENTER LCP_HOLDOPREQ WITH */
-/* LCP_CONNECTPTR CONNECTION POINTER */
-/* TFID, LOCAL FRAGMENT ID */
-/* THOLD_PREV_SENT_OP NR OF SENT OPERATIONS AT */
-/* PREVIOUS SIGNALS */
-/* TLQH_POINTER LQH USER POINTER */
-/* ******************--------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* LCP_HOLDOPERATION REQUEST FOR LIST OF STOPED OPERATION */
-/* ******************------------------------------+ */
-/* SENDER: LQH, LEVEL B */
-void Dbacc::execLCP_HOLDOPREQ(Signal* signal)
-{
- Uint32 tholdPrevSentOp;
-
- jamEntry();
- lcpConnectptr.i = signal->theData[0]; /* CONNECTION POINTER */
- tfid = signal->theData[1]; /* LOCAL FRAGMENT ID */
- tholdPrevSentOp = signal->theData[2]; /* NR OF SENT OPERATIONS AT */
- /* PREVIOUS SIGNALS */
- tlqhPointer = signal->theData[3]; /* LQH USER POINTER */
-
- tresult = 0;
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
- rootfragrecptr.i = lcpConnectptr.p->rootrecptr;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- if (rootfragrecptr.p->fragmentid[0] == tfid) {
- jam();
- fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- } else {
- ndbrequire(rootfragrecptr.p->fragmentid[1] == tfid);
- jam();
- fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
- }//if
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- fragrecptr.p->lcpLqhPtr = tlqhPointer;
- if (tholdPrevSentOp != 0) {
- ndbrequire(fragrecptr.p->fragState == SEND_QUE_OP);
- } else if (tholdPrevSentOp == 0) {
- jam();
- fragrecptr.p->fragState = SEND_QUE_OP;
- fragrecptr.p->stopQueOp = ZTRUE;
- fragrecptr.p->sentWaitInQueOp = fragrecptr.p->firstWaitInQueOp;
- }//if
- tholdSentOp = 0; /* NR OF OPERATION WHICH ARE SENT THIS TIME */
- operationRecPtr.i = fragrecptr.p->sentWaitInQueOp;
-
- /* --------------------------------------------- */
- /* GO THROUGH ALL OPERATION IN THE WAIT */
- /* LIST AND SEND THE LQH CONNECTION PTR OF THE */
- /* OPERATIONS TO THE LQH BLOCK. MAX 23 0PERATION */
- /* PER SIGNAL */
- /* --------------------------------------------- */
- while (operationRecPtr.i != RNIL) {
- jam();
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- ckeys[tholdSentOp] = operationRecPtr.p->userptr;
- operationRecPtr.i = operationRecPtr.p->nextQueOp;
- tholdSentOp++;
- if ((tholdSentOp >= 23) &&
- (operationRecPtr.i != RNIL)) {
- jam();
- /* ----------------------------------------------- */
- /* THERE IS MORE THAN 23 WAIT OPERATION. WE */
- /* HAVE TO SEND THESE 23 AND WAITE FOR NEXT SIGNAL */
- /* ----------------------------------------------- */
- tholdMore = ZTRUE; /* SECOUND DATA AT THE CONF SIGNAL , = MORE */
- fragrecptr.p->sentWaitInQueOp = operationRecPtr.i;
- sendholdconfsignalLab(signal);
- return;
- }//if
- }//while
- /* ----------------------------------------------- */
- /* OPERATION_REC_PTR = RNIL */
- /* THERE IS NO MORE WAITING OPERATION, STATE OF */
- /* THE FRAGMENT RRECORD IS CHANGED AND RETURN */
- /* SIGNAL IS SENT */
- /* ----------------------------------------------- */
- fragrecptr.p->sentWaitInQueOp = RNIL;
- tholdMore = ZFALSE; /* SECOND DATA AT THE CONF SIGNAL , = NOT MORE */
- fragrecptr.p->fragState = WAIT_ACC_LCPREQ;
- sendholdconfsignalLab(signal);
- return;
-}//Dbacc::execLCP_HOLDOPREQ()
-
-void Dbacc::sendholdconfsignalLab(Signal* signal)
-{
- tholdMore = (tholdMore << 16) + tholdSentOp;
- /* SECOND SIGNAL DATA, LENGTH + MORE */
- /* ************************ */
- /* LCP_HOLDOPCONF */
- /* ************************ */
- signal->theData[0] = fragrecptr.p->lcpLqhPtr;
- signal->theData[1] = tholdMore;
- signal->theData[2] = ckeys[0];
- signal->theData[3] = ckeys[1];
- signal->theData[4] = ckeys[2];
- signal->theData[5] = ckeys[3];
- signal->theData[6] = ckeys[4];
- signal->theData[7] = ckeys[5];
- signal->theData[8] = ckeys[6];
- signal->theData[9] = ckeys[7];
- signal->theData[10] = ckeys[8];
- signal->theData[11] = ckeys[9];
- signal->theData[12] = ckeys[10];
- signal->theData[13] = ckeys[11];
- signal->theData[14] = ckeys[12];
- signal->theData[15] = ckeys[13];
- signal->theData[16] = ckeys[14];
- signal->theData[17] = ckeys[15];
- signal->theData[18] = ckeys[16];
- signal->theData[19] = ckeys[17];
- signal->theData[20] = ckeys[18];
- signal->theData[21] = ckeys[19];
- signal->theData[22] = ckeys[20];
- signal->theData[23] = ckeys[21];
- signal->theData[24] = ckeys[22];
- sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_LCP_HOLDOPCONF, signal, 25, JBA);
- return;
-}//Dbacc::sendholdconfsignalLab()
-
-/**
- * execACC_LCPREQ
- * Perform local checkpoint of a fragment
- *
- * SENDER: LQH, LEVEL B
- * ENTER ACC_LCPREQ WITH
- * LCP_CONNECTPTR, OPERATION RECORD PTR
- * TLCP_LQH_CHECK_V, LQH'S LOCAL FRAG CHECK VALUE
- * TLCP_LOCAL_FRAG_ID, LOCAL FRAG ID
- *
- */
-void Dbacc::execACC_LCPREQ(Signal* signal)
-{
- Uint32 tlcpLocalFragId;
- Uint32 tlcpLqhCheckV;
-
- jamEntry();
- lcpConnectptr.i = signal->theData[0]; // CONNECTION PTR
- tlcpLqhCheckV = signal->theData[1]; // LQH'S LOCAL FRAG CHECK VALUE
- tlcpLocalFragId = signal->theData[2]; // LOCAL FRAG ID
- tresult = 0;
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
-
- rootfragrecptr.i = lcpConnectptr.p->rootrecptr;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- if (rootfragrecptr.p->fragmentid[0] == tlcpLocalFragId) {
- jam();
- fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
- } else {
- ndbrequire(rootfragrecptr.p->fragmentid[1] == tlcpLocalFragId);
- jam();
- fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
- }//if
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- ndbrequire(fragrecptr.p->fragState == WAIT_ACC_LCPREQ);
- fragrecptr.p->lcpLqhPtr = tlcpLqhCheckV;
-
- Page8Ptr zeroPagePtr;
- seizeLcpPage(zeroPagePtr);
- fragrecptr.p->zeroPagePtr = zeroPagePtr.i;
- fragrecptr.p->prevUndoposition = cminusOne;
- initRootFragPageZero(rootfragrecptr, zeroPagePtr);
- initFragPageZero(fragrecptr, zeroPagePtr);
- /*-----------------------------------------------------------------*/
- /* SEIZE ZERO PAGE FIRST AND THEN SEIZE DATA PAGES IN */
- /* BACKWARDS ORDER. THIS IS TO ENSURE THAT WE GET THE PAGES */
- /* IN ORDER. ON WINDOWS NT THIS WILL BE A BENEFIT SINCE WE */
- /* CAN THEN DO 1 WRITE_FILE INSTEAD OF 8. */
- /* WHEN WE RELEASE THE PAGES WE RELEASE THEM IN THE OPPOSITE */
- /* ORDER. */
- /*-----------------------------------------------------------------*/
- for (Uint32 taspTmp = ZWRITEPAGESIZE - 1; (Uint32)~taspTmp; taspTmp--) {
- Page8Ptr dataPagePtr;
- jam();
- ndbrequire(fragrecptr.p->datapages[taspTmp] == RNIL);
- seizeLcpPage(dataPagePtr);
- fragrecptr.p->datapages[taspTmp] = dataPagePtr.i;
- }//for
- fragrecptr.p->lcpMaxDirIndex = fragrecptr.p->dirsize;
- fragrecptr.p->lcpMaxOverDirIndex = fragrecptr.p->lastOverIndex;
- fragrecptr.p->createLcp = ZTRUE;
- operationRecPtr.i = fragrecptr.p->lockOwnersList;
- lcp_write_op_to_undolog(signal);
-}
-
-void
-Dbacc::lcp_write_op_to_undolog(Signal* signal)
-{
- bool delay_continueb= false;
- Uint32 i, j;
- for (i= 0; i < 16; i++) {
- jam();
- if (remainingUndoPages() <= ZMIN_UNDO_PAGES_AT_COMMIT) {
- jam();
- delay_continueb= true;
- break;
- }
- for (j= 0; j < 32; j++) {
- if (operationRecPtr.i == RNIL) {
- jam();
- break;
- }
- jam();
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
-
- if ((operationRecPtr.p->operation == ZINSERT) ||
- (operationRecPtr.p->elementIsDisappeared == ZTRUE)){
- /*******************************************************************
- * Only log inserts and elements that are marked as dissapeared.
- * All other operations update the element header and that is handled
- * when pages are written to disk
- ********************************************************************/
- undopageptr.i = (cundoposition>>ZUNDOPAGEINDEXBITS) & (cundopagesize-1);
- ptrAss(undopageptr, undopage);
- theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
- tundoindex = theadundoindex + ZUNDOHEADSIZE;
-
- writeUndoOpInfo(signal);/* THE INFORMATION ABOUT ELEMENT HEADER, STORED*/
- /* IN OP REC, IS WRITTEN AT UNDO PAGES */
- cundoElemIndex = 0;/* DEFAULT VALUE USED BY WRITE_UNDO_HEADER SUBROTINE */
- writeUndoHeader(signal, RNIL, UndoHeader::ZOP_INFO); /* WRITE THE HEAD OF THE UNDO ELEMENT */
- checkUndoPages(signal); /* SEND UNDO PAGE TO DISK WHEN A GROUP OF */
- /* UNDO PAGES,CURRENTLY 8, IS FILLED */
- }
- operationRecPtr.i = operationRecPtr.p->nextLockOwnerOp;
- }
- if (operationRecPtr.i == RNIL) {
- jam();
- break;
- }
- }
- if (operationRecPtr.i != RNIL) {
- jam();
- signal->theData[0]= ZLCP_OP_WRITE_RT_BREAK;
- signal->theData[1]= operationRecPtr.i;
- signal->theData[2]= fragrecptr.i;
- signal->theData[3]= lcpConnectptr.i;
- if (delay_continueb) {
- jam();
- sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 10, 4);
- } else {
- jam();
- sendSignal(cownBlockref, GSN_CONTINUEB, signal, 4, JBB);
- }
- return;
- }
-
- signal->theData[0] = fragrecptr.p->lcpLqhPtr;
- sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_LCPSTARTED,
- signal, 1, JBA);
-
- fragrecptr.p->activeDataPage = 0;
- fragrecptr.p->lcpDirIndex = 0;
- fragrecptr.p->fragState = LCP_SEND_PAGES;
-
- signal->theData[0] = lcpConnectptr.i;
- signal->theData[1] = fragrecptr.i;
- sendSignal(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 2, JBB);
-}
-
-/* ******************--------------------------------------------------------------- */
-/* ACC_SAVE_PAGES A GROUP OF PAGES IS ALLOCATED. THE PAGES AND OVERFLOW */
-/* PAGES OF THE FRAGMENT ARE COPIED IN THEM AND IS SEND TO */
-/* THE DATA FILE OF THE CHECK POINT. */
-/* SENDER: ACC, LEVEL B */
-/* ENTER ACC_SAVE_PAGES WITH */
-/* LCP_CONNECTPTR, CONNECTION RECORD PTR */
-/* FRAGRECPTR FRAGMENT RECORD PTR */
-/* ******************--------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* ACC_SAVE_PAGES REQUEST TO SEND THE PAGE TO DISK */
-/* ******************------------------------------+ UNDO PAGES */
-/* SENDER: ACC, LEVEL B */
-void Dbacc::execACC_SAVE_PAGES(Signal* signal)
-{
- jamEntry();
- lcpConnectptr.i = signal->theData[0];
- /* CONNECTION RECORD PTR */
- fragrecptr.i = signal->theData[1];
- /* FRAGMENT RECORD PTR */
- tresult = 0;
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- if (lcpConnectptr.p->lcpstate != LCP_ACTIVE) {
- jam();
- sendSystemerror(signal);
- return;
- }//if
- if (ERROR_INSERTED(3000)) {
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- if (rootfragrecptr.p->mytabptr == c_errorInsert3000_TableId){
- ndbout << "Delay writing of datapages" << endl;
- // Delay writing of pages
- jam();
- sendSignalWithDelay(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 1000, 2);
- return;
- }
- }
- if (clblPageCounter == 0) {
- jam();
- signal->theData[0] = lcpConnectptr.i;
- signal->theData[1] = fragrecptr.i;
- sendSignalWithDelay(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 100, 2);
- return;
- } else {
- jam();
- clblPageCounter = clblPageCounter - 1;
- }//if
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- if (fragrecptr.p->fragState == LCP_SEND_PAGES) {
- jam();
- savepagesLab(signal);
- return;
- } else {
- if (fragrecptr.p->fragState == LCP_SEND_OVER_PAGES) {
- jam();
- saveOverPagesLab(signal);
- return;
- } else {
- ndbrequire(fragrecptr.p->fragState == LCP_SEND_ZERO_PAGE);
- jam();
- saveZeroPageLab(signal);
- return;
- }//if
- }//if
-}//Dbacc::execACC_SAVE_PAGES()
-
-void Dbacc::savepagesLab(Signal* signal)
-{
- DirRangePtr spDirRangePtr;
- DirectoryarrayPtr spDirptr;
- Page8Ptr aspPageptr;
- Page8Ptr aspCopyPageptr;
- Uint32 taspDirindex;
- Uint32 taspDirIndex;
- Uint32 taspIndex;
-
- if ((fragrecptr.p->lcpDirIndex >= fragrecptr.p->dirsize) ||
- (fragrecptr.p->lcpDirIndex >= fragrecptr.p->lcpMaxDirIndex)) {
- jam();
- endsavepageLab(signal);
- return;
- }//if
- /* SOME EXPAND PROCESSES HAVE BEEN PERFORMED. */
- /* THE ADDED PAGE ARE NOT SENT TO DISK */
- arrGuard(fragrecptr.p->activeDataPage, 8);
- aspCopyPageptr.i = fragrecptr.p->datapages[fragrecptr.p->activeDataPage];
- ptrCheckGuard(aspCopyPageptr, cpagesize, page8);
- taspDirindex = fragrecptr.p->lcpDirIndex; /* DIRECTORY OF ACTIVE PAGE */
- spDirRangePtr.i = fragrecptr.p->directory;
- taspDirIndex = taspDirindex >> 8;
- taspIndex = taspDirindex & 0xff;
- ptrCheckGuard(spDirRangePtr, cdirrangesize, dirRange);
- arrGuard(taspDirIndex, 256);
- spDirptr.i = spDirRangePtr.p->dirArray[taspDirIndex];
- ptrCheckGuard(spDirptr, cdirarraysize, directoryarray);
- aspPageptr.i = spDirptr.p->pagep[taspIndex];
- ptrCheckGuard(aspPageptr, cpagesize, page8);
- ndbrequire(aspPageptr.p->word32[ZPOS_PAGE_ID] == fragrecptr.p->lcpDirIndex);
- lcnPageptr = aspPageptr;
- lcnCopyPageptr = aspCopyPageptr;
- lcpCopyPage(signal);
- fragrecptr.p->lcpDirIndex++;
- fragrecptr.p->activeDataPage++;
- if (fragrecptr.p->activeDataPage < ZWRITEPAGESIZE) {
- jam();
- signal->theData[0] = lcpConnectptr.i;
- signal->theData[1] = fragrecptr.i;
- sendSignal(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 2, JBB);
- return;
- }//if
- senddatapagesLab(signal);
- return;
-}//Dbacc::savepagesLab()
-
-/* FRAGRECPTR:ACTIVE_DATA_PAGE = ZWRITEPAGESIZE */
-/* SEND A GROUP OF PAGES TO DISK */
-void Dbacc::senddatapagesLab(Signal* signal)
-{
- fsConnectptr.i = fragrecptr.p->fsConnPtr;
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- seizeFsOpRec(signal);
- initFsOpRec(signal);
- fsOpptr.p->fsOpstate = WAIT_WRITE_DATA;
- ndbrequire(fragrecptr.p->activeDataPage <= 8);
- for (Uint32 i = 0; i < fragrecptr.p->activeDataPage; i++) {
- signal->theData[i + 6] = fragrecptr.p->datapages[i];
- }//for
- signal->theData[fragrecptr.p->activeDataPage + 6] = fragrecptr.p->activeDataFilePage;
- signal->theData[0] = fsConnectptr.p->fsPtr;
- signal->theData[1] = cownBlockref;
- signal->theData[2] = fsOpptr.i;
- signal->theData[3] = 0x2;
- signal->theData[4] = ZPAGE8_BASE_ADD;
- signal->theData[5] = fragrecptr.p->activeDataPage;
- sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 15, JBA);
- return;
-}//Dbacc::senddatapagesLab()
-
-void Dbacc::endsavepageLab(Signal* signal)
-{
- Page8Ptr espPageidptr;
-
- espPageidptr.i = fragrecptr.p->zeroPagePtr;
- ptrCheckGuard(espPageidptr, cpagesize, page8);
- dbgWord32(espPageidptr, ZPAGEZERO_NO_PAGES, fragrecptr.p->lcpDirIndex);
- espPageidptr.p->word32[ZPAGEZERO_NO_PAGES] = fragrecptr.p->lcpDirIndex;
- fragrecptr.p->fragState = LCP_SEND_OVER_PAGES;
- fragrecptr.p->noOfStoredOverPages = 0;
- fragrecptr.p->lcpDirIndex = 0;
- saveOverPagesLab(signal);
- return;
-}//Dbacc::endsavepageLab()
-
-/* ******************--------------------------------------------------------------- */
-/* ACC_SAVE_OVER_PAGES CONTINUE SAVING THE LEFT OVERPAGES. */
-/* ******************--------------------------------------------------------------- */
-void Dbacc::saveOverPagesLab(Signal* signal)
-{
- DirRangePtr sopDirRangePtr;
- DirectoryarrayPtr sopOverflowDirptr;
- Page8Ptr sopPageptr;
- Page8Ptr sopCopyPageptr;
- Uint32 tsopDirindex;
- Uint32 tsopDirInd;
- Uint32 tsopIndex;
-
- if ((fragrecptr.p->lcpDirIndex >= fragrecptr.p->lastOverIndex) ||
- (fragrecptr.p->lcpDirIndex >= fragrecptr.p->lcpMaxOverDirIndex)) {
- jam();
- endsaveoverpageLab(signal);
- return;
- }//if
- arrGuard(fragrecptr.p->activeDataPage, 8);
- sopCopyPageptr.i = fragrecptr.p->datapages[fragrecptr.p->activeDataPage];
- ptrCheckGuard(sopCopyPageptr, cpagesize, page8);
- tsopDirindex = fragrecptr.p->lcpDirIndex;
- sopDirRangePtr.i = fragrecptr.p->overflowdir;
- tsopDirInd = tsopDirindex >> 8;
- tsopIndex = tsopDirindex & 0xff;
- ptrCheckGuard(sopDirRangePtr, cdirrangesize, dirRange);
- arrGuard(tsopDirInd, 256);
- sopOverflowDirptr.i = sopDirRangePtr.p->dirArray[tsopDirInd];
- ptrCheckGuard(sopOverflowDirptr, cdirarraysize, directoryarray);
- sopPageptr.i = sopOverflowDirptr.p->pagep[tsopIndex];
- fragrecptr.p->lcpDirIndex++;
- if (sopPageptr.i != RNIL) {
- jam();
- ptrCheckGuard(sopPageptr, cpagesize, page8);
- ndbrequire(sopPageptr.p->word32[ZPOS_PAGE_ID] == tsopDirindex);
- ndbrequire(((sopPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) != ZNORMAL_PAGE_TYPE);
- lcnPageptr = sopPageptr;
- lcnCopyPageptr = sopCopyPageptr;
- lcpCopyPage(signal);
- fragrecptr.p->noOfStoredOverPages++;
- fragrecptr.p->activeDataPage++;
- if ((sopPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] == 0)) {
- //ndbrequire(((sopPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == ZOVERFLOW_PAGE_TYPE);
- if (((sopPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) ==
- ZOVERFLOW_PAGE_TYPE) {
- /*--------------------------------------------------------------------------------*/
- /* THE PAGE IS EMPTY AND WAITING TO BE RELEASED. IT COULD NOT BE RELEASED */
- /* EARLIER SINCE IT WAS PART OF A LOCAL CHECKPOINT. */
- /*--------------------------------------------------------------------------------*/
- jam();
- ropPageptr = sopPageptr;
- releaseOverpage(signal);
- } else {
- jam();
- sendSystemerror(signal);
- }
- }//if
- }
- if (fragrecptr.p->activeDataPage == ZWRITEPAGESIZE) {
- jam();
- senddatapagesLab(signal);
- return;
- }//if
- signal->theData[0] = lcpConnectptr.i;
- signal->theData[1] = fragrecptr.i;
- sendSignal(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 2, JBB);
- return;
-}//Dbacc::saveOverPagesLab()
-
-void Dbacc::endsaveoverpageLab(Signal* signal)
-{
- Page8Ptr esoPageidptr;
-
- esoPageidptr.i = fragrecptr.p->zeroPagePtr;
- ptrCheckGuard(esoPageidptr, cpagesize, page8);
- dbgWord32(esoPageidptr, ZPAGEZERO_NO_OVER_PAGE, fragrecptr.p->noOfStoredOverPages);
- esoPageidptr.p->word32[ZPAGEZERO_NO_OVER_PAGE] = fragrecptr.p->noOfStoredOverPages;
- fragrecptr.p->fragState = LCP_SEND_ZERO_PAGE;
- if (fragrecptr.p->activeDataPage != 0) {
- jam();
- senddatapagesLab(signal); /* SEND LEFT PAGES TO DISK */
- return;
- }//if
- saveZeroPageLab(signal);
- return;
-}//Dbacc::endsaveoverpageLab()
-
-/* ******************--------------------------------------------------------------- */
-/* ACC_SAVE_ZERO_PAGE PAGE ZERO IS SENT TO DISK.IT IS THE LAST STAGE AT THE */
-/* CREATION LCP. ACC_LCPCONF IS RETURND. */
-/* ******************--------------------------------------------------------------- */
-void Dbacc::saveZeroPageLab(Signal* signal)
-{
- Page8Ptr szpPageidptr;
- Uint32 Tchs;
- Uint32 Ti;
-
- fragrecptr.p->createLcp = ZFALSE;
- fsConnectptr.i = fragrecptr.p->fsConnPtr;
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- szpPageidptr.i = fragrecptr.p->zeroPagePtr;
- ptrCheckGuard(szpPageidptr, cpagesize, page8);
- dbgWord32(szpPageidptr, ZPAGEZERO_PREV_UNDOP, fragrecptr.p->prevUndoposition);
- szpPageidptr.p->word32[ZPAGEZERO_PREV_UNDOP] = fragrecptr.p->prevUndoposition;
- dbgWord32(szpPageidptr, ZPAGEZERO_NEXT_UNDO_FILE, cactiveUndoFileVersion);
- szpPageidptr.p->word32[ZPAGEZERO_NEXT_UNDO_FILE] = cactiveUndoFileVersion;
- fragrecptr.p->fragState = WAIT_ZERO_PAGE_STORED;
-
- /* --------------------------------------------------------------------------------- */
- // Calculate the checksum and store it for the zero page of the fragment.
- /* --------------------------------------------------------------------------------- */
- szpPageidptr.p->word32[ZPOS_CHECKSUM] = 0;
- Tchs = 0;
- for (Ti = 0; Ti < 2048; Ti++) {
- Tchs = Tchs ^ szpPageidptr.p->word32[Ti];
- }//for
- szpPageidptr.p->word32[ZPOS_CHECKSUM] = Tchs;
- dbgWord32(szpPageidptr, ZPOS_CHECKSUM, Tchs);
-
- seizeFsOpRec(signal);
- initFsOpRec(signal);
- fsOpptr.p->fsOpstate = WAIT_WRITE_DATA;
- if (clblPageCounter > 0) {
- jam();
- clblPageCounter = clblPageCounter - 1;
- } else {
- jam();
- clblPageOver = clblPageOver + 1;
- }//if
- /* ************************ */
- /* FSWRITEREQ */
- /* ************************ */
- signal->theData[0] = fsConnectptr.p->fsPtr;
- signal->theData[1] = cownBlockref;
- signal->theData[2] = fsOpptr.i;
- signal->theData[3] = 0x10;
- /* FLAG = LIST MEM PAGES, LIST FILE PAGES */
- /* SYNC FILE AFTER WRITING */
- signal->theData[4] = ZPAGE8_BASE_ADD;
- signal->theData[5] = 1;
- /* NO OF PAGES */
- signal->theData[6] = fragrecptr.p->zeroPagePtr;
- /* ZERO PAGE */
- signal->theData[7] = 0;
- sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
- /* ZERO PAGE AT DATA FILE */
- return;
-}//Dbacc::saveZeroPageLab()
-
-/* ******************--------------------------------------------------------------- */
-/* FSWRITECONF OPENFILE CONF */
-/* ENTER FSWRITECONF WITH SENDER: FS, LEVEL B */
-/* FS_OPPTR FS_CONNECTION PTR */
-/* ******************--------------------------------------------------------------- */
-void Dbacc::lcpCloseDataFileLab(Signal* signal)
-{
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- fsConnectptr.p->fsState = LCP_CLOSE_DATA;
- /* ************************ */
- /* FSCLOSEREQ */
- /* ************************ */
- /* CLOSE DATA FILE */
- signal->theData[0] = fsConnectptr.p->fsPtr;
- signal->theData[1] = cownBlockref;
- signal->theData[2] = fsConnectptr.i;
- signal->theData[3] = ZFALSE;
- sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
- /* FLAG = 0, DO NOT DELETE FILE */
- return;
-}//Dbacc::lcpCloseDataFileLab()
-
-void Dbacc::checkSyncUndoPagesLab(Signal* signal)
-{
- fragrecptr.i = fsConnectptr.p->fragrecPtr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- releaseFsConnRec(signal);
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- switch (lcpConnectptr.p->syncUndopageState) {
- case WAIT_NOTHING:
- jam();
- lcpConnectptr.p->syncUndopageState = WAIT_ONE_CONF;
- break;
- case WAIT_ONE_CONF:
- jam();
- lcpConnectptr.p->syncUndopageState = WAIT_TWO_CONF;
- break;
- default:
- jam();
- sendSystemerror(signal);
- return;
- break;
- }//switch
-
- /* ACTIVE UNDO PAGE ID */
- Uint32 tundoPageId = cundoposition >> ZUNDOPAGEINDEXBITS;
- tmp1 = tundoPageId - (tundoPageId & (ZWRITE_UNDOPAGESIZE - 1));
- /* START PAGE OF THE LAST UNDO PAGES GROUP */
- tmp2 = (tundoPageId - tmp1) + 1; /* NO OF LEFT UNDO PAGES */
- tmp1 = tmp1 & (cundopagesize - 1); /* 1 MBYTE PAGE WINDOW IN MEMORY */
- fsConnectptr.i = cactiveOpenUndoFsPtr;
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- seizeFsOpRec(signal);
- initFsOpRec(signal);
- fsOpptr.p->fsOpstate = WAIT_WRITE_UNDO;
- fsOpptr.p->fsOpMemPage = tundoPageId; /* RECORD MEMORY PAGE WRITTEN */
- if (clblPageCounter >= (4 * tmp2)) {
- jam();
- clblPageCounter = clblPageCounter - (4 * tmp2);
- } else {
- jam();
- clblPageOver = clblPageOver + ((4 * tmp2) - clblPageCounter);
- clblPageCounter = 0;
- }//if
- /* ************************ */
- /* FSWRITEREQ */
- /* ************************ */
- signal->theData[0] = fsConnectptr.p->fsPtr;
- signal->theData[1] = cownBlockref;
- signal->theData[2] = fsOpptr.i;
- /* FLAG = START MEM PAGES, START FILE PAGES */
- /* SYNC FILE AFTER WRITING */
- signal->theData[3] = 0x11;
- signal->theData[4] = ZUNDOPAGE_BASE_ADD;
- /* NO OF UNDO PAGES */
- signal->theData[5] = tmp2;
- /* FIRST MEMORY PAGE */
- signal->theData[6] = tmp1;
- /* ACTIVE PAGE AT UNDO FILE */
- signal->theData[7] = cactiveUndoFilePage;
- sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
-
- return;
-}//Dbacc::checkSyncUndoPagesLab()
-
-void Dbacc::checkSendLcpConfLab(Signal* signal)
-{
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
- switch (lcpConnectptr.p->syncUndopageState) {
- case WAIT_ONE_CONF:
- jam();
- lcpConnectptr.p->syncUndopageState = WAIT_NOTHING;
- break;
- case WAIT_TWO_CONF:
- jam();
- lcpConnectptr.p->syncUndopageState = WAIT_ONE_CONF;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- lcpConnectptr.p->noOfLcpConf++;
- ndbrequire(lcpConnectptr.p->noOfLcpConf <= 4);
- fragrecptr.p->fragState = ACTIVEFRAG;
- rlpPageptr.i = fragrecptr.p->zeroPagePtr;
- ptrCheckGuard(rlpPageptr, cpagesize, page8);
- releaseLcpPage(signal);
- fragrecptr.p->zeroPagePtr = RNIL;
- for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) {
- jam();
- if (fragrecptr.p->datapages[i] != RNIL) {
- jam();
- rlpPageptr.i = fragrecptr.p->datapages[i];
- ptrCheckGuard(rlpPageptr, cpagesize, page8);
- releaseLcpPage(signal);
- fragrecptr.p->datapages[i] = RNIL;
- }//if
- }//for
- signal->theData[0] = fragrecptr.p->lcpLqhPtr;
- sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_LCPCONF, signal, 1, JBB);
- if (lcpConnectptr.p->noOfLcpConf == 4) {
- jam();
- releaseLcpConnectRec(signal);
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- rootfragrecptr.p->rootState = ACTIVEROOT;
- }//if
-}//Dbacc::checkSendLcpConfLab()
-
-/* ******************--------------------------------------------------------------- */
-/* ACC_CONTOPREQ */
-/* SENDER: LQH, LEVEL B */
-/* ENTER ACC_CONTOPREQ WITH */
-/* LCP_CONNECTPTR */
-/* TMP1 LOCAL FRAG ID */
-/* ******************--------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* ACC_CONTOPREQ COMMIT TRANSACTION */
-/* ******************------------------------------+ */
-/* SENDER: LQH, LEVEL B */
-void Dbacc::execACC_CONTOPREQ(Signal* signal)
-{
- Uint32 tcorLocalFrag;
-
- jamEntry();
- lcpConnectptr.i = signal->theData[0];
- /* CONNECTION PTR */
- tcorLocalFrag = signal->theData[1];
- /* LOCAL FRAG ID */
- tresult = 0;
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- if(ERROR_INSERTED(3002) && lcpConnectptr.p->noOfLcpConf < 2)
- {
- sendSignalWithDelay(cownBlockref, GSN_ACC_CONTOPREQ, signal, 300,
- signal->getLength());
- return;
- }
-
- ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
- rootfragrecptr.i = lcpConnectptr.p->rootrecptr;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- if (rootfragrecptr.p->fragmentid[0] == tcorLocalFrag) {
- jam();
- fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- } else {
- ndbrequire(rootfragrecptr.p->fragmentid[1] == tcorLocalFrag);
- jam();
- fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- }//if
- operationRecPtr.i = fragrecptr.p->firstWaitInQueOp;
- fragrecptr.p->sentWaitInQueOp = RNIL;
- fragrecptr.p->stopQueOp = ZFALSE;
- while (operationRecPtr.i != RNIL) {
- jam();
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- if (operationRecPtr.p->opState == WAIT_EXE_OP) {
- jam();
- //------------------------------------------------------------
- // Indicate that we are now a normal waiter in the queue. We
- // will remove the operation from the queue as part of starting
- // operation again.
- //------------------------------------------------------------
- operationRecPtr.p->opState = WAIT_IN_QUEUE;
- executeNextOperation(signal);
- }//if
- operationRecPtr.i = operationRecPtr.p->nextQueOp;
- }//while
- signal->theData[0] = fragrecptr.p->lcpLqhPtr;
- sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_CONTOPCONF, signal, 1, JBA);
-
- lcpConnectptr.p->noOfLcpConf++;
- if (lcpConnectptr.p->noOfLcpConf == 4) {
- jam();
- releaseLcpConnectRec(signal);
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- rootfragrecptr.p->rootState = ACTIVEROOT;
- }//if
- return; /* ALL QUEUED OPERATION ARE RESTARTED IF NEEDED. */
-}//Dbacc::execACC_CONTOPREQ()
-
-/* ******************--------------------------------------------------------------- */
-/* END_LCPREQ END OF LOCAL CHECK POINT */
-/* ENTER END_LCPREQ WITH SENDER: LQH, LEVEL B */
-/* CLQH_PTR, LQH PTR */
-/* CLQH_BLOCK_REF LQH BLOCK REF */
-/* ******************--------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* END_LCPREQ PERFORM A LOCAL CHECK POINT */
-/* ******************------------------------------+ */
-/* SENDER: LQH, LEVEL B */
-void Dbacc::execEND_LCPREQ(Signal* signal)
-{
- jamEntry();
- clqhPtr = signal->theData[0];
- /* LQH PTR */
- clqhBlockRef = signal->theData[1];
- /* LQH BLOCK REF */
- tresult = 0;
- fsConnectptr.i = cactiveOpenUndoFsPtr;
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- fsConnectptr.p->fsState = WAIT_CLOSE_UNDO; /* CLOSE FILE AFTER WRITTING */
- /* ************************ */
- /* FSCLOSEREQ */
- /* ************************ */
- signal->theData[0] = fsConnectptr.p->fsPtr;
- signal->theData[1] = cownBlockref;
- signal->theData[2] = fsConnectptr.i;
- signal->theData[3] = ZFALSE;
- sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
- /* FLAG = 0, DO NOT DELETE FILE */
- cactiveUndoFileVersion = RNIL;
- cactiveOpenUndoFsPtr = RNIL;
- /* ************************ */
- /* END_LCPCONF */
- /* ************************ */
- signal->theData[0] = clqhPtr;
- sendSignal(clqhBlockRef, GSN_END_LCPCONF, signal, 1, JBB);
- return;
-}//Dbacc::execEND_LCPREQ()
-
-/*-----------------------------------------------------------------*/
-/* WHEN WE COPY THE PAGE WE ALSO WRITE THE ELEMENT HEADER AS */
-/* UNLOCKED IF THEY ARE CURRENTLY LOCKED. */
-/*-----------------------------------------------------------------*/
-void Dbacc::lcpCopyPage(Signal* signal)
-{
- Uint32 tlcnNextContainer;
- Uint32 tlcnTmp;
- Uint32 tlcnConIndex;
- Uint32 tlcnIndex;
- Uint32 Tmp1;
- Uint32 Tmp2;
- Uint32 Tmp3;
- Uint32 Tmp4;
- Uint32 Ti;
- Uint32 Tchs;
- Uint32 Tlimit;
-
- Tchs = 0;
- lupPageptr.p = lcnCopyPageptr.p;
- lcnPageptr.p->word32[ZPOS_CHECKSUM] = Tchs;
- for (Ti = 0; Ti < 32 ; Ti++) {
- Tlimit = 16 + (Ti << 6);
- for (tlcnTmp = (Ti << 6); tlcnTmp < Tlimit; tlcnTmp ++) {
- Tmp1 = lcnPageptr.p->word32[tlcnTmp];
- Tmp2 = lcnPageptr.p->word32[tlcnTmp + 16];
- Tmp3 = lcnPageptr.p->word32[tlcnTmp + 32];
- Tmp4 = lcnPageptr.p->word32[tlcnTmp + 48];
-
- lcnCopyPageptr.p->word32[tlcnTmp] = Tmp1;
- lcnCopyPageptr.p->word32[tlcnTmp + 16] = Tmp2;
- lcnCopyPageptr.p->word32[tlcnTmp + 32] = Tmp3;
- lcnCopyPageptr.p->word32[tlcnTmp + 48] = Tmp4;
-
- Tchs = Tchs ^ Tmp1;
- Tchs = Tchs ^ Tmp2;
- Tchs = Tchs ^ Tmp3;
- Tchs = Tchs ^ Tmp4;
- }//for
- }//for
- tlcnChecksum = Tchs;
- if (((lcnCopyPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == ZNORMAL_PAGE_TYPE) {
- jam();
- /*-----------------------------------------------------------------*/
- /* TAKE CARE OF ALL 64 BUFFERS ADDRESSED BY ALGORITHM IN */
- /* FIRST PAGE. IF THEY ARE EMPTY THEY STILL HAVE A CONTAINER */
- /* HEADER OF 2 WORDS. */
- /*-----------------------------------------------------------------*/
- tlcnConIndex = ZHEAD_SIZE;
- tlupForward = 1;
- for (tlcnIndex = 0; tlcnIndex <= ZNO_CONTAINERS - 1; tlcnIndex++) {
- tlupIndex = tlcnConIndex;
- tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
- lcpUpdatePage(signal);
- tlcnConIndex = tlcnConIndex + ZBUF_SIZE;
- }//for
- }//if
- /*-----------------------------------------------------------------*/
- /* TAKE CARE OF ALL USED BUFFERS ON THE LEFT SIDE. */
- /*-----------------------------------------------------------------*/
- tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f;
- while (tlcnNextContainer < ZEMPTYLIST) {
- tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
- tlcnConIndex = tlcnConIndex + ZHEAD_SIZE;
- tlupIndex = tlcnConIndex;
- tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
- tlupForward = 1;
- lcpUpdatePage(signal);
- tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
- }//while
- if (tlcnNextContainer == ZEMPTYLIST) {
- jam();
- /*empty*/;
- } else {
- jam();
- sendSystemerror(signal);
- return;
- }//if
- /*-----------------------------------------------------------------*/
- /* TAKE CARE OF ALL USED BUFFERS ON THE RIGHT SIDE. */
- /*-----------------------------------------------------------------*/
- tlupForward = cminusOne;
- tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f;
- while (tlcnNextContainer < ZEMPTYLIST) {
- tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
- tlcnConIndex = tlcnConIndex + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
- tlupIndex = tlcnConIndex;
- tlupElemIndex = tlcnConIndex - 1;
- lcpUpdatePage(signal);
- tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
- }//while
- if (tlcnNextContainer == ZEMPTYLIST) {
- jam();
- /*empty*/;
- } else {
- jam();
- sendSystemerror(signal);
- return;
- }//if
- lcnCopyPageptr.p->word32[ZPOS_CHECKSUM] = tlcnChecksum;
-}//Dbacc::lcpCopyPage()
-
-/* --------------------------------------------------------------------------------- */
-/* THIS SUBROUTINE GOES THROUGH ONE CONTAINER TO CHECK FOR LOCKED ELEMENTS AND */
-/* UPDATING THEM TO ENSURE ALL ELEMENTS ARE UNLOCKED ON DISK. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::lcpUpdatePage(Signal* signal)
-{
- OperationrecPtr lupOperationRecPtr;
- Uint32 tlupElemHead;
- Uint32 tlupElemLen;
- Uint32 tlupElemStep;
- Uint32 tlupConLen;
-
- tlupConLen = lupPageptr.p->word32[tlupIndex] >> 26;
- tlupElemLen = fragrecptr.p->elementLength;
- tlupElemStep = tlupForward * tlupElemLen;
- while (tlupConLen > ZCON_HEAD_SIZE) {
- jam();
- tlupElemHead = lupPageptr.p->word32[tlupElemIndex];
- if (ElementHeader::getLocked(tlupElemHead)) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WHEN CHANGING THE ELEMENT HEADER WE ALSO HAVE TO UPDATE THE CHECKSUM. IN */
- /* DOING THIS WE USE THE FORMULA (A XOR B) XOR B = A WHICH MEANS THAT IF WE */
- /* XOR SOMETHING TWICE WITH THE SAME OPERAND THEN WE RETURN TO THE ORIGINAL */
- /* VALUE. THEN WE ALSO HAVE TO USE THE NEW ELEMENT HEADER IN THE CHECKSUM */
- /* CALCULATION. */
- /* --------------------------------------------------------------------------------- */
- tlcnChecksum = tlcnChecksum ^ tlupElemHead;
- lupOperationRecPtr.i = ElementHeader::getOpPtrI(tlupElemHead);
- ptrCheckGuard(lupOperationRecPtr, coprecsize, operationrec);
- const Uint32 hv = lupOperationRecPtr.p->hashvaluePart;
- tlupElemHead = ElementHeader::setUnlocked(hv , 0);
- arrGuard(tlupElemIndex, 2048);
- lupPageptr.p->word32[tlupElemIndex] = tlupElemHead;
- tlcnChecksum = tlcnChecksum ^ tlupElemHead;
- }//if
- tlupConLen = tlupConLen - tlupElemLen;
- tlupElemIndex = tlupElemIndex + tlupElemStep;
- }//while
- if (tlupConLen < ZCON_HEAD_SIZE) {
- jam();
- sendSystemerror(signal);
- }//if
-}//Dbacc::lcpUpdatePage()
-
-/*-----------------------------------------------------------------*/
-// At a system restart we check that the page do not contain any
-// locks that hinder the system restart procedure.
-/*-----------------------------------------------------------------*/
-void Dbacc::srCheckPage(Signal* signal)
-{
- Uint32 tlcnNextContainer;
- Uint32 tlcnConIndex;
- Uint32 tlcnIndex;
-
- lupPageptr.p = lcnCopyPageptr.p;
- if (((lcnCopyPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == ZNORMAL_PAGE_TYPE) {
- jam();
- /*-----------------------------------------------------------------*/
- /* TAKE CARE OF ALL 64 BUFFERS ADDRESSED BY ALGORITHM IN */
- /* FIRST PAGE. IF THEY ARE EMPTY THEY STILL HAVE A CONTAINER */
- /* HEADER OF 2 WORDS. */
- /*-----------------------------------------------------------------*/
- tlcnConIndex = ZHEAD_SIZE;
- tlupForward = 1;
- for (tlcnIndex = 0; tlcnIndex <= ZNO_CONTAINERS - 1; tlcnIndex++) {
- tlupIndex = tlcnConIndex;
- tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
- srCheckContainer(signal);
- if (tresult != 0) {
- jam();
- return;
- }//if
- tlcnConIndex = tlcnConIndex + ZBUF_SIZE;
- }//for
- }//if
- /*-----------------------------------------------------------------*/
- /* TAKE CARE OF ALL USED BUFFERS ON THE LEFT SIDE. */
- /*-----------------------------------------------------------------*/
- tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f;
- while (tlcnNextContainer < ZEMPTYLIST) {
- tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
- tlcnConIndex = tlcnConIndex + ZHEAD_SIZE;
- tlupIndex = tlcnConIndex;
- tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
- tlupForward = 1;
- srCheckContainer(signal);
- if (tresult != 0) {
- jam();
- return;
- }//if
- tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
- }//while
- if (tlcnNextContainer == ZEMPTYLIST) {
- jam();
- /*empty*/;
- } else {
- jam();
- tresult = 4;
- return;
- }//if
- /*-----------------------------------------------------------------*/
- /* TAKE CARE OF ALL USED BUFFERS ON THE RIGHT SIDE. */
- /*-----------------------------------------------------------------*/
- tlupForward = cminusOne;
- tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f;
- while (tlcnNextContainer < ZEMPTYLIST) {
- tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
- tlcnConIndex = tlcnConIndex + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
- tlupIndex = tlcnConIndex;
- tlupElemIndex = tlcnConIndex - 1;
- srCheckContainer(signal);
- if (tresult != 0) {
- jam();
- return;
- }//if
- tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
- }//while
- if (tlcnNextContainer == ZEMPTYLIST) {
- jam();
- /*empty*/;
- } else {
- jam();
- tresult = 4;
- return;
- }//if
-}//Dbacc::srCheckPage()
-
-/* --------------------------------------------------------------------------------- */
-/* THIS SUBROUTINE GOES THROUGH ONE CONTAINER TO CHECK FOR LOCKED ELEMENTS AND */
-/* UPDATING THEM TO ENSURE ALL ELEMENTS ARE UNLOCKED ON DISK. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::srCheckContainer(Signal* signal)
-{
- Uint32 tlupElemLen;
- Uint32 tlupElemStep;
- Uint32 tlupConLen;
-
- tlupConLen = lupPageptr.p->word32[tlupIndex] >> 26;
- tlupElemLen = fragrecptr.p->elementLength;
- tlupElemStep = tlupForward * tlupElemLen;
- while (tlupConLen > ZCON_HEAD_SIZE) {
- jam();
- const Uint32 tlupElemHead = lupPageptr.p->word32[tlupElemIndex];
- if (ElementHeader::getLocked(tlupElemHead)){
- jam();
- //-------------------------------------------------------
- // This is absolutely undesirable. We have a lock remaining
- // after the system restart. We send a crash signal that will
- // enter the trace file.
- //-------------------------------------------------------
- tresult = 2;
- return;
- }//if
- tlupConLen = tlupConLen - tlupElemLen;
- tlupElemIndex = tlupElemIndex + tlupElemStep;
- }//while
- if (tlupConLen < ZCON_HEAD_SIZE) {
- jam();
- tresult = 3;
- }//if
- return;
-}//Dbacc::srCheckContainer()
-
-/* ------------------------------------------------------------------------- */
-/* CHECK_UNDO_PAGES */
-/* DESCRIPTION: CHECKS WHEN A PAGE OR A GROUP OF UNDO PAGES IS FILLED.WHEN */
-/* A PAGE IS FILLED, CUNDOPOSITION WILL BE UPDATE, THE NEW */
-/* POSITION IS THE BEGNING OF THE NEXT UNDO PAGE. */
-/* IN CASE THAT A GROUP IS FILLED THE PAGES ARE SENT TO DISK, */
-/* AND A NEW GROUP IS CHOSEN. */
-/* ------------------------------------------------------------------------- */
-void Dbacc::checkUndoPages(Signal* signal)
-{
-
- fragrecptr.p->prevUndoposition = cundoposition;
- cprevUndoaddress = cundoposition;
-
- // Calculate active undo page id
- Uint32 tundoPageId = cundoposition >> ZUNDOPAGEINDEXBITS;
-
- /**
- * WE WILL WRITE UNTIL WE HAVE ABOUT 8 KBYTE REMAINING ON THE 32 KBYTE
- * PAGE. THIS IS TO ENSURE THAT WE DO NOT HAVE ANY UNDO LOG RECORDS THAT PASS
- * A PAGE BOUNDARIE. THIS SIMPLIFIES CODING TRADING SOME INEFFICIENCY.
- */
- static const Uint32 ZMAXUNDOPAGEINDEX = 7100;
- if (tundoindex < ZMAXUNDOPAGEINDEX) {
- jam();
- cundoposition = (tundoPageId << ZUNDOPAGEINDEXBITS) + tundoindex;
- return;
- }//if
-
- /**
- * WE CHECK IF MORE THAN 1 MBYTE OF WRITES ARE OUTSTANDING TO THE UNDO FILE.
- * IF SO WE HAVE TO CRASH SINCE WE HAVE NO MORE SPACE TO WRITE UNDO LOG
- * RECORDS IN
- */
- Uint16 nextUndoPageId = tundoPageId + 1;
- updateUndoPositionPage(signal, nextUndoPageId << ZUNDOPAGEINDEXBITS);
-
- if ((tundoPageId & (ZWRITE_UNDOPAGESIZE - 1)) == (ZWRITE_UNDOPAGESIZE - 1)) {
- jam();
- /* ---------- SEND A GROUP OF UNDO PAGES TO DISK --------- */
- fsConnectptr.i = cactiveOpenUndoFsPtr;
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- Uint32 tcupTmp1 = (tundoPageId - ZWRITE_UNDOPAGESIZE) + 1;
- tcupTmp1 = tcupTmp1 & (cundopagesize - 1); /* 1 MBYTE PAGE WINDOW */
- seizeFsOpRec(signal);
- initFsOpRec(signal);
- fsOpptr.p->fsOpstate = WAIT_WRITE_UNDO_EXIT;
- fsOpptr.p->fsOpMemPage = tundoPageId;
- fragrecptr.p->nrWaitWriteUndoExit++;
- if (clblPageCounter >= 8) {
- jam();
- clblPageCounter = clblPageCounter - 8;
- } else {
- jam();
- clblPageOver = clblPageOver + (8 - clblPageCounter);
- clblPageCounter = 0;
- }//if
- /* ************************ */
- /* FSWRITEREQ */
- /* ************************ */
- signal->theData[0] = fsConnectptr.p->fsPtr;
- signal->theData[1] = cownBlockref;
- signal->theData[2] = fsOpptr.i;
- signal->theData[3] = 0x1;
- /* FLAG = START MEM PAGES, START FILE PAGES */
- signal->theData[4] = ZUNDOPAGE_BASE_ADD;
- signal->theData[5] = ZWRITE_UNDOPAGESIZE;
- signal->theData[6] = tcupTmp1;
- signal->theData[7] = cactiveUndoFilePage;
- sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
- cactiveUndoFilePage = cactiveUndoFilePage + ZWRITE_UNDOPAGESIZE;
- }//if
-}//Dbacc::checkUndoPages()
-
-/* --------------------------------------------------------------------------------- */
-/* UNDO_WRITING_PROCESS */
-/* INPUT: FRAGRECPTR, CUNDO_ELEM_INDEX, DATAPAGEPTR, CUNDOINFOLENGTH */
-/* DESCRIPTION: WHEN THE PROCESS OF CREATION LOCAL CHECK POINT HAS */
-/* STARTED. IF THE ACTIVE PAGE IS NOT ALREADY SENT TO DISK, THE */
-/* OLD VALUE OF THE ITEM WHICH IS GOING TO BE CHECKED IS STORED ON */
-/* THE ACTIVE UNDO PAGE. INFORMATION ABOUT UNDO PROCESS IN THE */
-/* BLOCK AND IN THE FRAGMENT WILL BE UPDATED. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::undoWritingProcess(Signal* signal)
-{
- const Uint32 tactivePageDir = datapageptr.p->word32[ZPOS_PAGE_ID];
- const Uint32 tpageType = (datapageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3;
- if (fragrecptr.p->fragState == LCP_SEND_PAGES) {
- if (tpageType == ZNORMAL_PAGE_TYPE) {
- /* --------------------------------------------------------------------------- */
- /* HANDLING OF LOG OF NORMAL PAGES DURING WRITE OF NORMAL PAGES. */
- /* --------------------------------------------------------------------------- */
- if (tactivePageDir < fragrecptr.p->lcpDirIndex) {
- jam();
- /* ------------------------------------------------------------------- */
- /* THIS PAGE HAS ALREADY BEEN WRITTEN IN THE LOCAL CHECKPOINT. */
- /* ------------------------------------------------------------------- */
- /*empty*/;
- } else {
- if (tactivePageDir >= fragrecptr.p->lcpMaxDirIndex) {
- jam();
- /* --------------------------------------------------------------------------- */
- /* OBVIOUSLY THE FRAGMENT HAS EXPANDED SINCE THE START OF THE LOCAL CHECKPOINT.*/
- /* WE NEED NOT LOG ANY UPDATES OF PAGES THAT DID NOT EXIST AT START OF LCP. */
- /* --------------------------------------------------------------------------- */
- /*empty*/;
- } else {
- jam();
- /* --------------------------------------------------------------------------- */
- /* IN ALL OTHER CASES WE HAVE TO WRITE TO THE UNDO LOG. */
- /* --------------------------------------------------------------------------- */
- undopageptr.i = (cundoposition >> ZUNDOPAGEINDEXBITS) & (cundopagesize - 1);
- ptrAss(undopageptr, undopage);
- theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
- tundoindex = theadundoindex + ZUNDOHEADSIZE;
- writeUndoHeader(signal, tactivePageDir, UndoHeader::ZPAGE_INFO);
- tundoElemIndex = cundoElemIndex;
- writeUndoDataInfo(signal);
- checkUndoPages(signal);
- }//if
- }//if
- } else if (tpageType == ZOVERFLOW_PAGE_TYPE) {
- /* --------------------------------------------------------------------------------- */
- /* OVERFLOW PAGE HANDLING DURING WRITE OF NORMAL PAGES. */
- /* --------------------------------------------------------------------------------- */
- if (tactivePageDir >= fragrecptr.p->lcpMaxOverDirIndex) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* OBVIOUSLY THE FRAGMENT HAS EXPANDED THE NUMBER OF OVERFLOW PAGES SINCE THE */
- /* START OF THE LOCAL CHECKPOINT. WE NEED NOT LOG ANY UPDATES OF PAGES THAT DID*/
- /* NOT EXIST AT START OF LCP. */
- /* --------------------------------------------------------------------------------- */
- /*empty*/;
- } else {
- jam();
- undopageptr.i = (cundoposition >> ZUNDOPAGEINDEXBITS) & (cundopagesize - 1);
- ptrAss(undopageptr, undopage);
- theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
- tundoindex = theadundoindex + ZUNDOHEADSIZE;
- writeUndoHeader(signal, tactivePageDir, UndoHeader::ZOVER_PAGE_INFO);
- tundoElemIndex = cundoElemIndex;
- writeUndoDataInfo(signal);
- checkUndoPages(signal);
- }//if
- } else {
- jam();
- /* --------------------------------------------------------------------------- */
- /* ONLY PAGE INFO AND OVERFLOW PAGE INFO CAN BE LOGGED BY THIS ROUTINE. A */
- /* SERIOUS ERROR. */
- /* --------------------------------------------------------------------------- */
- sendSystemerror(signal);
- }
- } else {
- if (fragrecptr.p->fragState == LCP_SEND_OVER_PAGES) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* DURING WRITE OF OVERFLOW PAGES WE NEED NOT WORRY ANYMORE ABOUT NORMAL PAGES.*/
- /* --------------------------------------------------------------------------------- */
- if (tpageType == ZOVERFLOW_PAGE_TYPE) {
- if (tactivePageDir < fragrecptr.p->lcpDirIndex) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THIS PAGE HAS ALREADY BEEN WRITTEN IN THE LOCAL CHECKPOINT. */
- /* --------------------------------------------------------------------------------- */
- /*empty*/;
- } else {
- if (tactivePageDir >= fragrecptr.p->lcpMaxOverDirIndex) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* OBVIOUSLY THE FRAGMENT HAS EXPANDED THE NUMBER OF OVERFLOW PAGES SINCE THE */
- /* START OF THE LOCAL CHECKPOINT. WE NEED NOT LOG ANY UPDATES OF PAGES THAT DID*/
- /* NOT EXIST AT START OF LCP. */
- /* --------------------------------------------------------------------------------- */
- /*empty*/;
- } else {
- jam();
- undopageptr.i = (cundoposition >> ZUNDOPAGEINDEXBITS) & (cundopagesize - 1);
- ptrAss(undopageptr, undopage);
- theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
- tundoindex = theadundoindex + ZUNDOHEADSIZE;
- writeUndoHeader(signal, tactivePageDir, UndoHeader::ZOVER_PAGE_INFO);
- tundoElemIndex = cundoElemIndex;
- writeUndoDataInfo(signal);
- checkUndoPages(signal);
- }//if
- }//if
- }
- }//if
- }//if
-}//Dbacc::undoWritingProcess()
-
-/* --------------------------------------------------------------------------------- */
-/* OTHER STATES MEANS THAT WE HAVE ALREADY WRITTEN ALL PAGES BUT NOT YET RESET */
-/* THE CREATE_LCP FLAG. */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* WRITE_UNDO_DATA_INFO */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::writeUndoDataInfo(Signal* signal)
-{
- Uint32 twudiIndex;
- Uint32 guard22;
-
- guard22 = cundoinfolength;
- arrGuard((tundoindex + guard22 - 1), 8192);
- arrGuard((tundoElemIndex + guard22 - 1), 2048);
- for (twudiIndex = 1; twudiIndex <= guard22; twudiIndex++) {
- undopageptr.p->undoword[tundoindex] = datapageptr.p->word32[tundoElemIndex];
- tundoindex++;
- tundoElemIndex++;
- }//for
-}//Dbacc::writeUndoDataInfo()
-
-/* --------------------------------------------------------------------------------- */
-/* WRITE_UNDO_HEADER */
-/* THE HEAD OF UNDO ELEMENT IS 24 BYTES AND CONTAINS THE FOLLOWING INFORMATION: */
-/* TABLE IDENTITY 32 BITS */
-/* ROOT FRAGMENT IDENTITY 32 BITS */
-/* LOCAL FRAGMENT IDENTITY 32 BITS */
-/* LENGTH OF ELEMENT INF0 (BIT 31 - 18) 14 BITS */
-/* INFO TYPE (BIT 17 - 14) 4 BITS */
-/* PAGE INDEX OF THE FIRST FIELD IN THE FRAGMENT (BIT 13 - 0) 14 BITS */
-/* DIRECTORY INDEX OF THE PAGE IN THE FRAGMENT 32 BITS */
-/* ADDRESS OF THE PREVIOUS ELEMENT OF THE FRAGMENT 64 BITS */
-/* ADDRESS OF THE PREVIOUS ELEMENT IN THE UNDO PAGES 64 BITS */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::writeUndoHeader(Signal* signal,
- Uint32 logicalPageId,
- UndoHeader::UndoHeaderType pageType)
-{
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- arrGuard(theadundoindex + 6, 8192);
-
- // Set the structpointer to point at the undo page at the right address.
- UndoHeader * const & undoHeaderPtr =
- (UndoHeader *) &undopageptr.p->undoword[theadundoindex];
-
- undoHeaderPtr->tableId = rootfragrecptr.p->mytabptr;
- undoHeaderPtr->rootFragId = rootfragrecptr.p->fragmentid[0] >> 1;
- undoHeaderPtr->localFragId = fragrecptr.p->myfid;
- ndbrequire((undoHeaderPtr->localFragId >> 1) == undoHeaderPtr->rootFragId);
- Uint32 Ttmp = cundoinfolength;
- Ttmp = (Ttmp << 4) + pageType;
- Ttmp = Ttmp << 14;
- undoHeaderPtr->variousInfo = Ttmp + cundoElemIndex;
- undoHeaderPtr->logicalPageId = logicalPageId;
- undoHeaderPtr->prevUndoAddressForThisFrag = fragrecptr.p->prevUndoposition;
- undoHeaderPtr->prevUndoAddress = cprevUndoaddress;
-}//Dbacc::writeUndoHeader()
-
-/* --------------------------------------------------------------------------------- */
-/* WRITE_UNDO_OP_INFO */
-/* FOR A LOCKED ELEMENT, OPERATION TYPE, UNDO OF ELEMENT HEADER AND THE LENGTH OF*/
-/* THE TUPLE KEY HAVE TO BE SAVED IN UNDO PAGES. IN THIS CASE AN UNDO ELEMENT */
-/* INCLUDES THE FLLOWING ITEMS. */
-/* OPERATION TYPE 32 BITS */
-/* HASH VALUE 32 BITS */
-/* LENGTH OF THE TUPLE = N 32 BITS */
-/* TUPLE KEYS N * 32 BITS */
-/* */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::writeUndoOpInfo(Signal* signal)
-{
- Page8Ptr locPageptr;
-
- arrGuard((tundoindex + 3), 8192);
- undopageptr.p->undoword[tundoindex] = operationRecPtr.p->operation;
- undopageptr.p->undoword[tundoindex + 1] = operationRecPtr.p->hashValue;
- undopageptr.p->undoword[tundoindex + 2] = operationRecPtr.p->tupkeylen;
- tundoindex = tundoindex + 3;
- // log localkey1
- locPageptr.i = operationRecPtr.p->elementPage;
- ptrCheckGuard(locPageptr, cpagesize, page8);
- Uint32 Tforward = operationRecPtr.p->elementIsforward;
- Uint32 TelemPtr = operationRecPtr.p->elementPointer;
- TelemPtr += Tforward; // ZELEM_HEAD_SIZE
- arrGuard(tundoindex+1, 8192);
- undopageptr.p->undoword[tundoindex] = locPageptr.p->word32[TelemPtr];
- tundoindex++;
- cundoinfolength = ZOP_HEAD_INFO_LN + 1;
-}//Dbacc::writeUndoOpInfo()
-
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* END OF LOCAL CHECKPOINT MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* SYSTEM RESTART MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* SR_FRAGIDREQ REQUEST FOR RESTART OF A FRAGMENT */
-/* SENDER: LQH, LEVEL B */
-/* ENTER SR_FRAGIDREQ WITH */
-/* TUSERPTR, LQH CONNECTION PTR */
-/* TUSERBLOCKREF, LQH BLOCK REFERENCE */
-/* TCHECKPOINTID, THE CHECKPOINT NUMBER TO USE */
-/* (E.G. 1,2 OR 3) */
-/* TABPTR, TABLE ID = TABLE RECORD POINTER */
-/* TFID, ROOT FRAGMENT ID */
-/* ******************--------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* SR_FRAGIDREQ REQUEST FOR LIST OF STOPED OPERATION */
-/* ******************------------------------------+ */
-/* SENDER: LQH, LEVEL B */
-void Dbacc::execSR_FRAGIDREQ(Signal* signal)
-{
- jamEntry();
- tuserptr = signal->theData[0]; /* LQH CONNECTION PTR */
- tuserblockref = signal->theData[1]; /* LQH BLOCK REFERENCE */
- tcheckpointid = signal->theData[2]; /* THE CHECKPOINT NUMBER TO USE */
- /* (E.G. 1,2 OR 3) */
- tabptr.i = signal->theData[3];
- ptrCheckGuard(tabptr, ctablesize, tabrec);
- /* TABLE ID = TABLE RECORD POINTER */
- tfid = signal->theData[4]; /* ROOT FRAGMENT ID */
- tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
- seizeLcpConnectRec(signal);
- initLcpConnRec(signal);
-
- ndbrequire(getrootfragmentrec(signal, rootfragrecptr, tfid));
- rootfragrecptr.p->lcpPtr = lcpConnectptr.i;
- lcpConnectptr.p->rootrecptr = rootfragrecptr.i;
- lcpConnectptr.p->localCheckPid = tcheckpointid;
- for (Uint32 i = 0; i < 2; i++) {
- Page8Ptr zeroPagePtr;
- jam();
- fragrecptr.i = rootfragrecptr.p->fragmentptr[i];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- seizeLcpPage(zeroPagePtr);
- fragrecptr.p->zeroPagePtr = zeroPagePtr.i;
- }//for
-
- /* ---------------------------OPEN THE DATA FILE WHICH BELONGS TO TFID AND TCHECK POINT ---- */
- fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- tfid = rootfragrecptr.p->fragmentid[0];
- tmp = 0;
- srOpenDataFileLoopLab(signal);
-
- return;
-}//Dbacc::execSR_FRAGIDREQ()
-
-void Dbacc::srOpenDataFileLoopLab(Signal* signal)
-{
- /* D6 AT FSOPENREQ. FILE TYPE = .DATA */
- tmp1 = 0x010003ff; /* VERSION OF FILENAME = 1 */
- tmp2 = 0x0; /* D7 DON'T CREATE, READ ONLY */
- ndbrequire(cfsFirstfreeconnect != RNIL);
- seizeFsConnectRec(signal);
-
- fragrecptr.p->fsConnPtr = fsConnectptr.i;
- fsConnectptr.p->fragrecPtr = fragrecptr.i;
- fsConnectptr.p->fsState = WAIT_OPEN_DATA_FILE_FOR_READ;
- fsConnectptr.p->activeFragId = tmp; /* LOCAL FRAG INDEX */
- /* ************************ */
- /* FSOPENREQ */
- /* ************************ */
- signal->theData[0] = cownBlockref;
- signal->theData[1] = fsConnectptr.i;
- signal->theData[2] = rootfragrecptr.p->mytabptr; /* TABLE IDENTITY */
- signal->theData[3] = tfid; /* FRAGMENT IDENTITY */
- signal->theData[4] = lcpConnectptr.p->localCheckPid; /* CHECKPOINT ID */
- signal->theData[5] = tmp1;
- signal->theData[6] = tmp2;
- sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
- return;
-}//Dbacc::srOpenDataFileLoopLab()
-
-void Dbacc::srFsOpenConfLab(Signal* signal)
-{
- fsConnectptr.p->fsState = WAIT_READ_PAGE_ZERO;
- /* ------------------------ READ ZERO PAGE ---------- */
- fragrecptr.i = fsConnectptr.p->fragrecPtr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- signal->theData[0] = fsConnectptr.p->fsPtr;
- signal->theData[1] = cownBlockref;
- signal->theData[2] = fsConnectptr.i;
- signal->theData[3] = 0x0;
- /* FLAG = LIST MEM PAGES, LIST FILE PAGES */
- signal->theData[4] = ZPAGE8_BASE_ADD;
- signal->theData[5] = 1; /* NO OF PAGES */
- signal->theData[6] = fragrecptr.p->zeroPagePtr; /* ZERO PAGE */
- signal->theData[7] = 0; /* PAGE ZERO OF THE DATA FILE */
- sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
- return;
-}//Dbacc::srFsOpenConfLab()
-
-void Dbacc::srReadPageZeroLab(Signal* signal)
-{
- Page8Ptr srzPageptr;
-
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- fragrecptr.p->activeDataFilePage = 1;
- srzPageptr.i = fragrecptr.p->zeroPagePtr;
- ptrCheckGuard(srzPageptr, cpagesize, page8);
- /* --------------------------------------------------------------------------------- */
- // Check that the checksum of the zero page is ok.
- /* --------------------------------------------------------------------------------- */
- ccoPageptr.p = srzPageptr.p;
- checksumControl(signal, (Uint32)0);
- if (tresult > 0) {
- jam();
- return; // We will crash through a DEBUG_SIG
- }//if
-
- ndbrequire(srzPageptr.p->word32[ZPAGEZERO_FRAGID0] == rootfragrecptr.p->fragmentid[0]);
- lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- if (fsConnectptr.p->activeFragId == 0) {
- jam();
- rootfragrecptr.p->fragmentid[1] = srzPageptr.p->word32[ZPAGEZERO_FRAGID1];
- /* ---------------------------OPEN THE DATA FILE FOR NEXT LOCAL FRAGMENT ----------- ---- */
- tfid = rootfragrecptr.p->fragmentid[1];
- tmp = 1; /* LOCAL FRAG INDEX */
- fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- srOpenDataFileLoopLab(signal);
- return;
- } else {
- jam();
- lcpConnectptr.p->lcpstate = LCP_ACTIVE;
- signal->theData[0] = lcpConnectptr.p->lcpUserptr;
- signal->theData[1] = lcpConnectptr.i;
- signal->theData[2] = 2; /* NO OF LOCAL FRAGMENTS */
- signal->theData[3] = srzPageptr.p->word32[ZPAGEZERO_FRAGID0];
- /* ROOTFRAGRECPTR:FRAGMENTID(0) */
- signal->theData[4] = srzPageptr.p->word32[ZPAGEZERO_FRAGID1];
- /* ROOTFRAGRECPTR:FRAGMENTID(1) */
- signal->theData[5] = RNIL;
- signal->theData[6] = RNIL;
- signal->theData[7] = rootfragrecptr.p->fragmentptr[0];
- signal->theData[8] = rootfragrecptr.p->fragmentptr[1];
- signal->theData[9] = srzPageptr.p->word32[ZPAGEZERO_HASH_CHECK];
- sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_SR_FRAGIDCONF, signal, 10, JBB);
- }//if
- return;
-}//Dbacc::srReadPageZeroLab()
-
-void Dbacc::initFragAdd(Signal* signal,
- Uint32 rootFragIndex,
- Uint32 rootIndex,
- FragmentrecPtr regFragPtr)
-{
- const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
- Uint32 lhFragBits = req->lhFragBits + 1;
- Uint32 minLoadFactor = (req->minLoadFactor * ZBUF_SIZE) / 100;
- Uint32 maxLoadFactor = (req->maxLoadFactor * ZBUF_SIZE) / 100;
- if (minLoadFactor >= maxLoadFactor) {
- jam();
- minLoadFactor = maxLoadFactor - 1;
- }//if
- regFragPtr.p->fragState = ACTIVEFRAG;
- // NOTE: next line must match calculation in Dblqh::execLQHFRAGREQ
- regFragPtr.p->myfid = (req->fragId << 1) | rootFragIndex;
- regFragPtr.p->myroot = rootIndex;
- regFragPtr.p->myTableId = req->tableId;
- ndbrequire(req->kValue == 6);
- regFragPtr.p->k = req->kValue; /* TK_SIZE = 6 IN THIS VERSION */
- regFragPtr.p->expandCounter = 0;
-
- /**
- * Only allow shrink during SR
- * - to make sure we don't run out of pages during REDO log execution
- *
- * Is later restored to 0 by LQH at end of REDO log execution
- */
- regFragPtr.p->expandFlag = (getNodeState().getSystemRestartInProgress()?1:0);
- regFragPtr.p->p = 0;
- regFragPtr.p->maxp = (1 << req->kValue) - 1;
- regFragPtr.p->minloadfactor = minLoadFactor;
- regFragPtr.p->maxloadfactor = maxLoadFactor;
- regFragPtr.p->slack = (regFragPtr.p->maxp + 1) * maxLoadFactor;
- regFragPtr.p->lhfragbits = lhFragBits;
- regFragPtr.p->lhdirbits = 0;
- regFragPtr.p->hashcheckbit = 0; //lhFragBits;
- regFragPtr.p->localkeylen = req->localKeyLen;
- regFragPtr.p->nodetype = (req->reqInfo >> 4) & 0x3;
- regFragPtr.p->lastOverIndex = 0;
- regFragPtr.p->dirsize = 1;
- regFragPtr.p->loadingFlag = ZFALSE;
- regFragPtr.p->keyLength = req->keyLength;
- ndbrequire(req->keyLength != 0);
- regFragPtr.p->elementLength = ZELEM_HEAD_SIZE + regFragPtr.p->localkeylen;
- Uint32 Tmp1 = (regFragPtr.p->maxp + 1) + regFragPtr.p->p;
- Uint32 Tmp2 = regFragPtr.p->maxloadfactor - regFragPtr.p->minloadfactor;
- Tmp2 = Tmp1 * Tmp2;
- regFragPtr.p->slackCheck = Tmp2;
-}//Dbacc::initFragAdd()
-
-void Dbacc::initFragGeneral(FragmentrecPtr regFragPtr)
-{
- regFragPtr.p->directory = RNIL;
- regFragPtr.p->overflowdir = RNIL;
- regFragPtr.p->fsConnPtr = RNIL;
- regFragPtr.p->firstOverflowRec = RNIL;
- regFragPtr.p->lastOverflowRec = RNIL;
- regFragPtr.p->firstWaitInQueOp = RNIL;
- regFragPtr.p->lastWaitInQueOp = RNIL;
- regFragPtr.p->sentWaitInQueOp = RNIL;
- regFragPtr.p->lockOwnersList = RNIL;
- regFragPtr.p->firstFreeDirindexRec = RNIL;
- regFragPtr.p->zeroPagePtr = RNIL;
-
- regFragPtr.p->activeDataPage = 0;
- regFragPtr.p->createLcp = ZFALSE;
- regFragPtr.p->stopQueOp = ZFALSE;
- regFragPtr.p->hasCharAttr = ZFALSE;
- regFragPtr.p->nextAllocPage = 0;
- regFragPtr.p->nrWaitWriteUndoExit = 0;
- regFragPtr.p->lastUndoIsStored = ZFALSE;
- regFragPtr.p->loadingFlag = ZFALSE;
- regFragPtr.p->fragState = FREEFRAG;
- for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) {
- regFragPtr.p->datapages[i] = RNIL;
- }//for
- for (Uint32 j = 0; j < 4; j++) {
- regFragPtr.p->longKeyPageArray[j] = RNIL;
- }//for
-}//Dbacc::initFragGeneral()
-
-void Dbacc::initFragSr(FragmentrecPtr regFragPtr, Page8Ptr regPagePtr)
-{
- regFragPtr.p->prevUndoposition = regPagePtr.p->word32[ZPAGEZERO_PREV_UNDOP];
- regFragPtr.p->noOfStoredOverPages = regPagePtr.p->word32[ZPAGEZERO_NO_OVER_PAGE];
- regFragPtr.p->noStoredPages = regPagePtr.p->word32[ZPAGEZERO_NO_PAGES];
- regFragPtr.p->dirsize = regPagePtr.p->word32[ZPAGEZERO_DIRSIZE];
- regFragPtr.p->expandCounter = regPagePtr.p->word32[ZPAGEZERO_EXPCOUNTER];
- regFragPtr.p->slack = regPagePtr.p->word32[ZPAGEZERO_SLACK];
- regFragPtr.p->hashcheckbit = regPagePtr.p->word32[ZPAGEZERO_HASHCHECKBIT];
- regFragPtr.p->k = regPagePtr.p->word32[ZPAGEZERO_K];
- regFragPtr.p->lhfragbits = regPagePtr.p->word32[ZPAGEZERO_LHFRAGBITS];
- regFragPtr.p->lhdirbits = regPagePtr.p->word32[ZPAGEZERO_LHDIRBITS];
- regFragPtr.p->localkeylen = regPagePtr.p->word32[ZPAGEZERO_LOCALKEYLEN];
- regFragPtr.p->maxp = regPagePtr.p->word32[ZPAGEZERO_MAXP];
- regFragPtr.p->maxloadfactor = regPagePtr.p->word32[ZPAGEZERO_MAXLOADFACTOR];
- regFragPtr.p->minloadfactor = regPagePtr.p->word32[ZPAGEZERO_MINLOADFACTOR];
- regFragPtr.p->myfid = regPagePtr.p->word32[ZPAGEZERO_MYFID];
- regFragPtr.p->lastOverIndex = regPagePtr.p->word32[ZPAGEZERO_LAST_OVER_INDEX];
- regFragPtr.p->nodetype = regPagePtr.p->word32[ZPAGEZERO_NODETYPE];
- regFragPtr.p->p = regPagePtr.p->word32[ZPAGEZERO_P];
- regFragPtr.p->elementLength = regPagePtr.p->word32[ZPAGEZERO_ELEMENT_LENGTH];
- regFragPtr.p->keyLength = regPagePtr.p->word32[ZPAGEZERO_KEY_LENGTH];
- regFragPtr.p->slackCheck = regPagePtr.p->word32[ZPAGEZERO_SLACK_CHECK];
-
- regFragPtr.p->loadingFlag = ZTRUE;
-
-}//Dbacc::initFragSr()
-
-void Dbacc::initFragPageZero(FragmentrecPtr regFragPtr, Page8Ptr regPagePtr)
-{
- //------------------------------------------------------------------
- // PREV_UNDOP, NEXT_UNDO_FILE, NO_OVER_PAGE, NO_PAGES
- // is set at end of copy phase
- //------------------------------------------------------------------
- regPagePtr.p->word32[ZPAGEZERO_DIRSIZE] = regFragPtr.p->dirsize;
- regPagePtr.p->word32[ZPAGEZERO_EXPCOUNTER] = regFragPtr.p->expandCounter;
- regPagePtr.p->word32[ZPAGEZERO_SLACK] = regFragPtr.p->slack;
- regPagePtr.p->word32[ZPAGEZERO_HASHCHECKBIT] = regFragPtr.p->hashcheckbit;
- regPagePtr.p->word32[ZPAGEZERO_K] = regFragPtr.p->k;
- regPagePtr.p->word32[ZPAGEZERO_LHFRAGBITS] = regFragPtr.p->lhfragbits;
- regPagePtr.p->word32[ZPAGEZERO_LHDIRBITS] = regFragPtr.p->lhdirbits;
- regPagePtr.p->word32[ZPAGEZERO_LOCALKEYLEN] = regFragPtr.p->localkeylen;
- regPagePtr.p->word32[ZPAGEZERO_MAXP] = regFragPtr.p->maxp;
- regPagePtr.p->word32[ZPAGEZERO_MAXLOADFACTOR] = regFragPtr.p->maxloadfactor;
- regPagePtr.p->word32[ZPAGEZERO_MINLOADFACTOR] = regFragPtr.p->minloadfactor;
- regPagePtr.p->word32[ZPAGEZERO_MYFID] = regFragPtr.p->myfid;
- regPagePtr.p->word32[ZPAGEZERO_LAST_OVER_INDEX] = regFragPtr.p->lastOverIndex;
- regPagePtr.p->word32[ZPAGEZERO_NODETYPE] = regFragPtr.p->nodetype;
- regPagePtr.p->word32[ZPAGEZERO_P] = regFragPtr.p->p;
- regPagePtr.p->word32[ZPAGEZERO_ELEMENT_LENGTH] = regFragPtr.p->elementLength;
- regPagePtr.p->word32[ZPAGEZERO_KEY_LENGTH] = regFragPtr.p->keyLength;
- regPagePtr.p->word32[ZPAGEZERO_SLACK_CHECK] = regFragPtr.p->slackCheck;
-}//Dbacc::initFragPageZero()
-
-void Dbacc::initRootFragPageZero(RootfragmentrecPtr rootPtr, Page8Ptr regPagePtr)
-{
- regPagePtr.p->word32[ZPAGEZERO_TABID] = rootPtr.p->mytabptr;
- regPagePtr.p->word32[ZPAGEZERO_FRAGID0] = rootPtr.p->fragmentid[0];
- regPagePtr.p->word32[ZPAGEZERO_FRAGID1] = rootPtr.p->fragmentid[1];
- regPagePtr.p->word32[ZPAGEZERO_HASH_CHECK] = rootPtr.p->roothashcheck;
- regPagePtr.p->word32[ZPAGEZERO_NO_OF_ELEMENTS] = rootPtr.p->noOfElements;
-}//Dbacc::initRootFragPageZero()
-
-void Dbacc::initRootFragSr(RootfragmentrecPtr rootPtr, Page8Ptr regPagePtr)
-{
- rootPtr.p->roothashcheck = regPagePtr.p->word32[ZPAGEZERO_HASH_CHECK];
- rootPtr.p->noOfElements = regPagePtr.p->word32[ZPAGEZERO_NO_OF_ELEMENTS];
-}//Dbacc::initRootFragSr()
-
-/* ******************--------------------------------------------------------------- */
-/* ACC_SRREQ SYSTEM RESTART OF A LOCAL CHECK POINT */
-/* SENDER: LQH, LEVEL B */
-/* ENTER ACC_SRREQ WITH */
-/* LCP_CONNECTPTR, OPERATION RECORD PTR */
-/* TMP2, LQH'S LOCAL FRAG CHECK VALUE */
-/* TFID, LOCAL FRAG ID */
-/* TMP1, LOCAL CHECKPOINT ID */
-/* ******************--------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* ACC_SRREQ PERFORM A LOCAL CHECK POINT */
-/* ******************------------------------------+ */
-/* SENDER: LQH, LEVEL B */
-void Dbacc::execACC_SRREQ(Signal* signal)
-{
- Page8Ptr asrPageidptr;
- jamEntry();
- lcpConnectptr.i = signal->theData[0];
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- Uint32 lqhPtr = signal->theData[1];
- Uint32 fragId = signal->theData[2];
- Uint32 lcpId = signal->theData[3];
- tresult = 0;
- ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
- rootfragrecptr.i = lcpConnectptr.p->rootrecptr;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- if (rootfragrecptr.p->fragmentid[0] == fragId) {
- jam();
- fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
- } else {
- ndbrequire(rootfragrecptr.p->fragmentid[1] == fragId);
- jam();
- fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
- }//if
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- fragrecptr.p->lcpLqhPtr = lqhPtr;
- fragrecptr.p->localCheckpId = lcpId;
- asrPageidptr.i = fragrecptr.p->zeroPagePtr;
- ptrCheckGuard(asrPageidptr, cpagesize, page8);
- ndbrequire(asrPageidptr.p->word32[ZPAGEZERO_TABID] == rootfragrecptr.p->mytabptr);
- ndbrequire(asrPageidptr.p->word32[ZPAGEZERO_FRAGID0] == rootfragrecptr.p->fragmentid[0]);
- ndbrequire(asrPageidptr.p->word32[ZPAGEZERO_FRAGID1] == rootfragrecptr.p->fragmentid[1]);
- initRootFragSr(rootfragrecptr, asrPageidptr);
- initFragSr(fragrecptr, asrPageidptr);
- for (Uint32 i = 0; i < ZMAX_UNDO_VERSION; i++) {
- jam();
- if (csrVersList[i] != RNIL) {
- jam();
- srVersionPtr.i = csrVersList[i];
- ptrCheckGuard(srVersionPtr, csrVersionRecSize, srVersionRec);
- if (fragrecptr.p->localCheckpId == srVersionPtr.p->checkPointId) {
- jam();
- ndbrequire(srVersionPtr.p->checkPointId == asrPageidptr.p->word32[ZPAGEZERO_NEXT_UNDO_FILE]);
- /*--------------------------------------------------------------------------------*/
- /* SINCE -1 IS THE END OF LOG CODE WE MUST TREAT THIS CODE WITH CARE. WHEN */
- /* COMPARING IT IS LARGER THAN EVERYTHING ELSE BUT SHOULD BE TREATED AS THE */
- /* SMALLEST POSSIBLE VALUE, MEANING EMPTY. */
- /*--------------------------------------------------------------------------------*/
- if (fragrecptr.p->prevUndoposition != cminusOne) {
- if (srVersionPtr.p->prevAddress < fragrecptr.p->prevUndoposition) {
- jam();
- srVersionPtr.p->prevAddress = fragrecptr.p->prevUndoposition;
- } else if (srVersionPtr.p->prevAddress == cminusOne) {
- jam();
- srVersionPtr.p->prevAddress = fragrecptr.p->prevUndoposition;
- }//if
- }//if
- srAllocPage0011Lab(signal);
- return;
- }//if
- } else {
- jam();
- seizeSrVerRec(signal);
- srVersionPtr.p->checkPointId = fragrecptr.p->localCheckpId;
- srVersionPtr.p->prevAddress = fragrecptr.p->prevUndoposition;
- csrVersList[i] = srVersionPtr.i;
- srAllocPage0011Lab(signal);
- return;
- }//if
- }//for
- ndbrequire(false);
-}//Dbacc::execACC_SRREQ()
-
-void
-Dbacc::releaseLogicalPage(Fragmentrec * fragP, Uint32 logicalPageId){
- Ptr<struct DirRange> dirRangePtr;
- dirRangePtr.i = fragP->directory;
- ptrCheckGuard(dirRangePtr, cdirrangesize, dirRange);
-
- const Uint32 lp1 = logicalPageId >> 8;
- const Uint32 lp2 = logicalPageId & 0xFF;
- ndbrequire(lp1 < 256);
-
- Ptr<struct Directoryarray> dirArrPtr;
- dirArrPtr.i = dirRangePtr.p->dirArray[lp1];
- ptrCheckGuard(dirArrPtr, cdirarraysize, directoryarray);
-
- const Uint32 physicalPageId = dirArrPtr.p->pagep[lp2];
-
- rpPageptr.i = physicalPageId;
- ptrCheckGuard(rpPageptr, cpagesize, page8);
- releasePage(0);
-
- dirArrPtr.p->pagep[lp2] = RNIL;
-}
-
-void Dbacc::srAllocPage0011Lab(Signal* signal)
-{
- releaseLogicalPage(fragrecptr.p, 0);
-
-#if JONAS
- ndbrequire(cfirstfreeDirrange != RNIL);
- seizeDirrange(signal);
- fragrecptr.p->directory = newDirRangePtr.i;
- ndbrequire(cfirstfreeDirrange != RNIL);
- seizeDirrange(signal);
- fragrecptr.p->overflowdir = newDirRangePtr.i;
- seizeDirectory(signal);
- ndbrequire(tresult < ZLIMIT_OF_ERROR);
- newDirRangePtr.p->dirArray[0] = sdDirptr.i;
-#endif
-
- fragrecptr.p->nextAllocPage = 0;
- fragrecptr.p->fragState = SR_READ_PAGES;
- srReadPagesLab(signal);
- return;
-}//Dbacc::srAllocPage0011Lab()
-
-void Dbacc::srReadPagesLab(Signal* signal)
-{
- if (fragrecptr.p->nextAllocPage >= fragrecptr.p->noStoredPages) {
- /*--------------------------------------------------------------------------------*/
- /* WE HAVE NOW READ ALL NORMAL PAGES FROM THE FILE. */
- /*--------------------------------------------------------------------------------*/
- if (fragrecptr.p->nextAllocPage == fragrecptr.p->dirsize) {
- jam();
- /*--------------------------------------------------------------------------------*/
- /* WE HAVE NOW READ ALL NORMAL PAGES AND ALLOCATED ALL THE NEEDED PAGES. */
- /*--------------------------------------------------------------------------------*/
- fragrecptr.p->nextAllocPage = 0; /* THE NEXT OVER FLOW PAGE WHICH WILL BE READ */
- fragrecptr.p->fragState = SR_READ_OVER_PAGES;
- srReadOverPagesLab(signal);
- } else {
- ndbrequire(fragrecptr.p->nextAllocPage < fragrecptr.p->dirsize);
- jam();
- /*--------------------------------------------------------------------------------*/
- /* WE NEEDED TO ALLOCATE PAGES THAT WERE DEALLOCATED DURING THE LOCAL */
- /* CHECKPOINT. */
- /* ALLOCATE THE PAGE AND INITIALISE IT. THEN WE INSERT A REAL-TIME BREAK. */
- /*--------------------------------------------------------------------------------*/
- seizePage(signal);
- ndbrequire(tresult <= ZLIMIT_OF_ERROR);
- tipPageId = fragrecptr.p->nextAllocPage;
- inpPageptr.i = spPageptr.i;
- ptrCheckGuard(inpPageptr, cpagesize, page8);
- initPage(signal);
- fragrecptr.p->noOfExpectedPages = 1;
- fragrecptr.p->datapages[0] = spPageptr.i;
- signal->theData[0] = ZSR_READ_PAGES_ALLOC;
- signal->theData[1] = fragrecptr.i;
- sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
- }//if
- return;
- }//if
- Uint32 limitLoop;
- if ((fragrecptr.p->noStoredPages - fragrecptr.p->nextAllocPage) < ZWRITEPAGESIZE) {
- jam();
- limitLoop = fragrecptr.p->noStoredPages - fragrecptr.p->nextAllocPage;
- } else {
- jam();
- limitLoop = ZWRITEPAGESIZE;
- }//if
- ndbrequire(limitLoop <= 8);
- for (Uint32 i = 0; i < limitLoop; i++) {
- jam();
- seizePage(signal);
- ndbrequire(tresult <= ZLIMIT_OF_ERROR);
- fragrecptr.p->datapages[i] = spPageptr.i;
- signal->theData[i + 6] = spPageptr.i;
- }//for
- signal->theData[limitLoop + 6] = fragrecptr.p->activeDataFilePage;
- fragrecptr.p->noOfExpectedPages = limitLoop;
- /* -----------------SEND READ PAGES SIGNAL TO THE FILE MANAGER --------- */
- fsConnectptr.i = fragrecptr.p->fsConnPtr;
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- fsConnectptr.p->fsState = WAIT_READ_DATA;
- signal->theData[0] = fsConnectptr.p->fsPtr;
- signal->theData[1] = cownBlockref;
- signal->theData[2] = fsConnectptr.i;
- signal->theData[3] = 2;
- /* FLAG = LIST MEM PAGES, RANGE OF FILE PAGES */
- signal->theData[4] = ZPAGE8_BASE_ADD;
- signal->theData[5] = fragrecptr.p->noOfExpectedPages;
- sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 15, JBA);
- return;
-}//Dbacc::srReadPagesLab()
-
-void Dbacc::storeDataPageInDirectoryLab(Signal* signal)
-{
- fragrecptr.p->activeDataFilePage += fragrecptr.p->noOfExpectedPages;
- srReadPagesAllocLab(signal);
- return;
-}//Dbacc::storeDataPageInDirectoryLab()
-
-void Dbacc::srReadPagesAllocLab(Signal* signal)
-{
- DirRangePtr srpDirRangePtr;
- DirectoryarrayPtr srpDirptr;
- DirectoryarrayPtr srpOverflowDirptr;
- Page8Ptr srpPageidptr;
-
- if (fragrecptr.p->fragState == SR_READ_PAGES) {
- jam();
- for (Uint32 i = 0; i < fragrecptr.p->noOfExpectedPages; i++) {
- jam();
- tmpP = fragrecptr.p->nextAllocPage;
- srpDirRangePtr.i = fragrecptr.p->directory;
- tmpP2 = tmpP >> 8;
- tmp = tmpP & 0xff;
- ptrCheckGuard(srpDirRangePtr, cdirrangesize, dirRange);
- arrGuard(tmpP2, 256);
- if (srpDirRangePtr.p->dirArray[tmpP2] == RNIL) {
- seizeDirectory(signal);
- ndbrequire(tresult <= ZLIMIT_OF_ERROR);
- srpDirptr.i = sdDirptr.i;
- srpDirRangePtr.p->dirArray[tmpP2] = srpDirptr.i;
- } else {
- jam();
- srpDirptr.i = srpDirRangePtr.p->dirArray[tmpP2];
- }//if
- ptrCheckGuard(srpDirptr, cdirarraysize, directoryarray);
- arrGuard(i, 8);
- srpDirptr.p->pagep[tmp] = fragrecptr.p->datapages[i];
- srpPageidptr.i = fragrecptr.p->datapages[i];
- ptrCheckGuard(srpPageidptr, cpagesize, page8);
- ndbrequire(srpPageidptr.p->word32[ZPOS_PAGE_ID] == fragrecptr.p->nextAllocPage);
- ndbrequire(((srpPageidptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == 0);
- ccoPageptr.p = srpPageidptr.p;
- checksumControl(signal, (Uint32)1);
- if (tresult > 0) {
- jam();
- return; // We will crash through a DEBUG_SIG
- }//if
- dbgWord32(srpPageidptr, ZPOS_OVERFLOWREC, RNIL);
- srpPageidptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
- fragrecptr.p->datapages[i] = RNIL;
- fragrecptr.p->nextAllocPage++;
- }//for
- srReadPagesLab(signal);
- return;
- } else {
- ndbrequire(fragrecptr.p->fragState == SR_READ_OVER_PAGES);
- for (Uint32 i = 0; i < fragrecptr.p->noOfExpectedPages; i++) {
- jam();
- arrGuard(i, 8);
- srpPageidptr.i = fragrecptr.p->datapages[i];
- ptrCheckGuard(srpPageidptr, cpagesize, page8);
- tmpP = srpPageidptr.p->word32[ZPOS_PAGE_ID]; /* DIR INDEX OF THE OVERFLOW PAGE */
- /*--------------------------------------------------------------------------------*/
- /* IT IS POSSIBLE THAT WE HAVE LOGICAL PAGES WHICH ARE NOT PART OF THE LOCAL*/
- /* CHECKPOINT. THUS WE USE THE LOGICAL PAGE ID FROM THE PAGE HERE. */
- /*--------------------------------------------------------------------------------*/
- srpDirRangePtr.i = fragrecptr.p->overflowdir;
- tmpP2 = tmpP >> 8;
- tmpP = tmpP & 0xff;
- ptrCheckGuard(srpDirRangePtr, cdirrangesize, dirRange);
- arrGuard(tmpP2, 256);
- if (srpDirRangePtr.p->dirArray[tmpP2] == RNIL) {
- jam();
- seizeDirectory(signal);
- ndbrequire(tresult <= ZLIMIT_OF_ERROR);
- srpDirRangePtr.p->dirArray[tmpP2] = sdDirptr.i;
- }//if
- srpOverflowDirptr.i = srpDirRangePtr.p->dirArray[tmpP2];
- ndbrequire(((srpPageidptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) != 0);
- ndbrequire(((srpPageidptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) != 3);
- ptrCheckGuard(srpOverflowDirptr, cdirarraysize, directoryarray);
- ndbrequire(srpOverflowDirptr.p->pagep[tmpP] == RNIL);
- srpOverflowDirptr.p->pagep[tmpP] = srpPageidptr.i;
- ccoPageptr.p = srpPageidptr.p;
- checksumControl(signal, (Uint32)1);
- ndbrequire(tresult == 0);
- dbgWord32(srpPageidptr, ZPOS_OVERFLOWREC, RNIL);
- srpPageidptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
- fragrecptr.p->nextAllocPage++;
- }//for
- srReadOverPagesLab(signal);
- return;
- }//if
-}//Dbacc::srReadPagesAllocLab()
-
-void Dbacc::srReadOverPagesLab(Signal* signal)
-{
- if (fragrecptr.p->nextAllocPage >= fragrecptr.p->noOfStoredOverPages) {
- fragrecptr.p->nextAllocPage = 0;
- if (fragrecptr.p->prevUndoposition == cminusOne) {
- jam();
- /* ************************ */
- /* ACC_OVER_REC */
- /* ************************ */
- /*--------------------------------------------------------------------------------*/
- /* UPDATE FREE LIST OF OVERFLOW PAGES AS PART OF SYSTEM RESTART AFTER */
- /* READING PAGES AND EXECUTING THE UNDO LOG. */
- /*--------------------------------------------------------------------------------*/
- signal->theData[0] = fragrecptr.i;
- sendSignal(cownBlockref, GSN_ACC_OVER_REC, signal, 1, JBB);
- } else {
- jam();
- srCloseDataFileLab(signal);
- }//if
- return;
- }//if
- Uint32 limitLoop;
- if ((fragrecptr.p->noOfStoredOverPages - fragrecptr.p->nextAllocPage) < ZWRITEPAGESIZE) {
- jam();
- limitLoop = fragrecptr.p->noOfStoredOverPages - fragrecptr.p->nextAllocPage;
- } else {
- jam();
- limitLoop = ZWRITEPAGESIZE;
- }//if
- ndbrequire(limitLoop <= 8);
- for (Uint32 i = 0; i < limitLoop; i++) {
- jam();
- seizePage(signal);
- ndbrequire(tresult <= ZLIMIT_OF_ERROR);
- fragrecptr.p->datapages[i] = spPageptr.i;
- signal->theData[i + 6] = spPageptr.i;
- }//for
- fragrecptr.p->noOfExpectedPages = limitLoop;
- signal->theData[limitLoop + 6] = fragrecptr.p->activeDataFilePage;
- /* -----------------SEND READ PAGES SIGNAL TO THE FILE MANAGER --------- */
- fsConnectptr.i = fragrecptr.p->fsConnPtr;
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- fsConnectptr.p->fsState = WAIT_READ_DATA;
- signal->theData[0] = fsConnectptr.p->fsPtr;
- signal->theData[1] = cownBlockref;
- signal->theData[2] = fsConnectptr.i;
- signal->theData[3] = 2;
- signal->theData[4] = ZPAGE8_BASE_ADD;
- signal->theData[5] = fragrecptr.p->noOfExpectedPages;
- sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 15, JBA);
- return;
-}//Dbacc::srReadOverPagesLab()
-
-void Dbacc::srCloseDataFileLab(Signal* signal)
-{
- fsConnectptr.i = fragrecptr.p->fsConnPtr;
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- fsConnectptr.p->fsState = SR_CLOSE_DATA;
- /* ************************ */
- /* FSCLOSEREQ */
- /* ************************ */
- signal->theData[0] = fsConnectptr.p->fsPtr;
- signal->theData[1] = cownBlockref;
- signal->theData[2] = fsConnectptr.i;
- signal->theData[3] = 0;
- sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
- return;
-}//Dbacc::srCloseDataFileLab()
-
-/* ************************ */
-/* ACC_SRCONF */
-/* ************************ */
-void Dbacc::sendaccSrconfLab(Signal* signal)
-{
- fragrecptr.i = fsConnectptr.p->fragrecPtr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- releaseFsConnRec(signal);
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- fragrecptr.p->fragState = ACTIVEFRAG;
- fragrecptr.p->fsConnPtr = RNIL;
- for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) {
- fragrecptr.p->datapages[i] = RNIL;
- }//for
- rlpPageptr.i = fragrecptr.p->zeroPagePtr;
- ptrCheckGuard(rlpPageptr, cpagesize, page8);
- releaseLcpPage(signal);
- fragrecptr.p->zeroPagePtr = RNIL;
- signal->theData[0] = fragrecptr.p->lcpLqhPtr;
- sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_SRCONF, signal, 1, JBB);
- lcpConnectptr.p->noOfLcpConf++;
- if (lcpConnectptr.p->noOfLcpConf == 2) {
- jam();
- releaseLcpConnectRec(signal);
- rootfragrecptr.p->lcpPtr = RNIL;
- rootfragrecptr.p->rootState = ACTIVEROOT;
- }//if
- return;
-}//Dbacc::sendaccSrconfLab()
-
-/* --------------------------------------------------------------------------------- */
-/* CHECKSUM_CONTROL */
-/* INPUT: CCO_PAGEPTR */
-/* OUTPUT: TRESULT */
-/* */
-/* CHECK THAT CHECKSUM IN PAGE IS CORRECT TO ENSURE THAT NO ONE HAS CORRUPTED */
-/* THE PAGE INFORMATION. WHEN CALCULATING THE CHECKSUM WE REMOVE THE CHECKSUM */
-/* ITSELF FROM THE CHECKSUM BY XOR'ING THE CHECKSUM TWICE. WHEN CALCULATING */
-/* THE CHECKSUM THE CHECKSUM WORD IS ZERO WHICH MEANS NO CHANGE FROM XOR'ING. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::checksumControl(Signal* signal, Uint32 checkPage)
-{
- Uint32 Tchs;
- Uint32 tccoIndex;
- Uint32 Ti;
- Uint32 Tmp1;
- Uint32 Tmp2;
- Uint32 Tmp3;
- Uint32 Tmp4;
- Uint32 Tlimit;
-
- Tchs = 0;
- for (Ti = 0; Ti < 32 ; Ti++) {
- Tlimit = 16 + (Ti << 6);
- for (tccoIndex = (Ti << 6); tccoIndex < Tlimit; tccoIndex ++) {
- Tmp1 = ccoPageptr.p->word32[tccoIndex];
- Tmp2 = ccoPageptr.p->word32[tccoIndex + 16];
- Tmp3 = ccoPageptr.p->word32[tccoIndex + 32];
- Tmp4 = ccoPageptr.p->word32[tccoIndex + 48];
-
- Tchs = Tchs ^ Tmp1;
- Tchs = Tchs ^ Tmp2;
- Tchs = Tchs ^ Tmp3;
- Tchs = Tchs ^ Tmp4;
- }//for
- }//for
- if (Tchs == 0) {
- tresult = 0;
- if (checkPage != 0) {
- jam();
- lcnCopyPageptr.p = ccoPageptr.p;
- srCheckPage(signal);
- }//if
- } else {
- tresult = 1;
- }//if
- if (tresult != 0) {
- jam();
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- signal->theData[0] = RNIL;
- signal->theData[1] = rootfragrecptr.p->mytabptr;
- signal->theData[2] = fragrecptr.p->myfid;
- signal->theData[3] = ccoPageptr.p->word32[ZPOS_PAGE_ID];
- signal->theData[4] = tlupElemIndex;
- signal->theData[5] = ccoPageptr.p->word32[ZPOS_PAGE_TYPE];
- signal->theData[6] = tresult;
- sendSignal(cownBlockref, GSN_DEBUG_SIG, signal, 7, JBA);
- }//if
-}//Dbacc::checksumControl()
-
-/* ******************--------------------------------------------------------------- */
-/* START_RECREQ REQUEST TO START UNDO PROCESS */
-/* SENDER: LQH, LEVEL B */
-/* ENTER START_RECREQ WITH */
-/* CLQH_PTR, LQH CONNECTION PTR */
-/* CLQH_BLOCK_REF, LQH BLOCK REFERENCE */
-/* ******************--------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* START_RECREQ REQUEST TO START UNDO PROCESS */
-/* ******************------------------------------+ */
-/* SENDER: LQH, LEVEL B */
-void Dbacc::execSTART_RECREQ(Signal* signal)
-{
- jamEntry();
- clqhPtr = signal->theData[0]; /* LQH CONNECTION PTR */
- clqhBlockRef = signal->theData[1]; /* LQH BLOCK REFERENCE */
- tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
- for (int i = 0; i < UndoHeader::ZNO_UNDORECORD_TYPES; i++)
- cSrUndoRecords[i] = 0;
- startUndoLab(signal);
- return;
-}//Dbacc::execSTART_RECREQ()
-
-void Dbacc::startUndoLab(Signal* signal)
-{
- cundoLogActive = ZTRUE;
- /* ----- OPEN UNDO FILES --------- */
- for (tmp = 0; tmp <= ZMAX_UNDO_VERSION - 1; tmp++) {
- jam();
- if (csrVersList[tmp] != RNIL) {
- jam();
- /*---------------------------------------------------------------------------*/
- /* SELECT THE NEXT SYSTEM RESTART RECORD WHICH CONTAINS AN UNDO LOG */
- /* THAT NEEDS TO BE EXECUTED AND SET UP THE DATA TO EXECUTE IT. */
- /*---------------------------------------------------------------------------*/
- srVersionPtr.i = csrVersList[tmp];
- csrVersList[tmp] = RNIL;
- ptrCheckGuard(srVersionPtr, csrVersionRecSize, srVersionRec);
- cactiveUndoFilePage = srVersionPtr.p->prevAddress >> 13;
- cprevUndoaddress = srVersionPtr.p->prevAddress;
- cactiveCheckpId = srVersionPtr.p->checkPointId;
-
- releaseSrRec(signal);
- startActiveUndo(signal);
- return;
- }//if
- }//for
-
- // Send report of how many undo log records where executed
- signal->theData[0] = NDB_LE_UNDORecordsExecuted;
- signal->theData[1] = DBACC; // From block
- signal->theData[2] = 0; // Total records executed
- for (int i = 0; i < 10; i++){
- if (i < UndoHeader::ZNO_UNDORECORD_TYPES){
- signal->theData[i+3] = cSrUndoRecords[i];
- signal->theData[2] += cSrUndoRecords[i];
- }else{
- signal->theData[i+3] = 0;
- }
- }
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 12, JBB);
-
- /* ******************************< */
- /* START_RECCONF */
- /* ******************************< */
- /*---------------------------------------------------------------------------*/
- /* REPORT COMPLETION OF UNDO LOG EXECUTION. */
- /*---------------------------------------------------------------------------*/
- cundoLogActive = ZFALSE;
- signal->theData[0] = clqhPtr;
- sendSignal(clqhBlockRef, GSN_START_RECCONF, signal, 1, JBB);
- /* LQH CONNECTION PTR */
- return;
-}//Dbacc::startUndoLab()
-
-/*---------------------------------------------------------------------------*/
-/* START THE UNDO OF AN UNDO LOG FILE BY OPENING THE UNDO LOG FILE. */
-/*---------------------------------------------------------------------------*/
-void Dbacc::startActiveUndo(Signal* signal)
-{
- if (cprevUndoaddress == cminusOne) {
- jam();
- /*---------------------------------------------------------------------------*/
- /* THERE WAS NO UNDO LOG INFORMATION IN THIS LOG FILE. WE GET THE NEXT */
- /* OR REPORT COMPLETION. */
- /*---------------------------------------------------------------------------*/
- signal->theData[0] = ZSTART_UNDO;
- sendSignal(cownBlockref, GSN_CONTINUEB, signal, 1, JBB);
- } else {
- jam();
- /*---------------------------------------------------------------------------*/
- /* OPEN THE LOG FILE PERTAINING TO THIS UNDO LOG. */
- /*---------------------------------------------------------------------------*/
- if (cfsFirstfreeconnect == RNIL) {
- jam();
- sendSystemerror(signal);
- }//if
- seizeFsConnectRec(signal);
- cactiveSrFsPtr = fsConnectptr.i;
- fsConnectptr.p->fsState = OPEN_UNDO_FILE_SR;
- fsConnectptr.p->fsPart = 0;
- tmp1 = 1; /* FILE VERSION ? */
- tmp1 = (tmp1 << 8) + ZLOCALLOGFILE; /* .LOCLOG = 2 */
- tmp1 = (tmp1 << 8) + 4; /* ROOT DIRECTORY = D4 */
- tmp1 = (tmp1 << 8) + fsConnectptr.p->fsPart; /* P2 */
- tmp2 = 0x0; /* D7 DON'T CREATE , READ ONLY */
- /* DON'T TRUNCATE TO ZERO */
- /* ---FILE NAME "D4"/"DBACC"/LCP_CONNECTPTR:LOCAL_CHECK_PID/FS_CONNECTPTR:FS_PART".LOCLOG-- */
- /* ************************ */
- /* FSOPENREQ */
- /* ************************ */
- signal->theData[0] = cownBlockref;
- signal->theData[1] = fsConnectptr.i;
- signal->theData[2] = cminusOne; /* #FFFFFFFF */
- signal->theData[3] = cminusOne; /* #FFFFFFFF */
- signal->theData[4] = cactiveCheckpId; /* CHECKPOINT VERSION */
- signal->theData[5] = tmp1;
- signal->theData[6] = tmp2;
- sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
- }//if
-}//Dbacc::startActiveUndo()
-
-/* ------- READ A GROUP OF UNDO PAGES --------------- */
-void Dbacc::srStartUndoLab(Signal* signal)
-{
- /*---------------------------------------------------------------------------*/
- /* ALL LOG FILES HAVE BEEN OPENED. WE CAN NOW READ DATA FROM THE LAST */
- /* PAGE IN THE LAST LOG FILE AND BACKWARDS UNTIL WE REACH THE VERY */
- /* FIRST UNDO LOG RECORD. */
- /*---------------------------------------------------------------------------*/
- if (cactiveUndoFilePage >= ZWRITE_UNDOPAGESIZE) {
- jam();
- tmp1 = ZWRITE_UNDOPAGESIZE; /* NO OF READ UNDO PAGES */
- cactiveSrUndoPage = ZWRITE_UNDOPAGESIZE - 1; /* LAST PAGE */
- } else {
- jam();
- tmp1 = cactiveUndoFilePage + 1; /* NO OF READ UNDO PAGES */
- cactiveSrUndoPage = cactiveUndoFilePage;
- }//if
- fsConnectptr.i = cactiveSrFsPtr;
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- signal->theData[0] = fsConnectptr.p->fsPtr;
- signal->theData[1] = cownBlockref;
- signal->theData[2] = fsConnectptr.i;
- signal->theData[3] = 0;
- /* FLAG = LIST MEM PAGES, LIST FILE PAGES */
- signal->theData[4] = ZUNDOPAGE_BASE_ADD;
- signal->theData[5] = tmp1;
- signal->theData[6] = 0;
- signal->theData[7] = (cactiveUndoFilePage - tmp1) + 1;
- signal->theData[8] = 1;
- signal->theData[9] = cactiveUndoFilePage;
-
- sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 10, JBA);
- if (tmp1 > cactiveUndoFilePage) {
- jam();
- /*---------------------------------------------------------------------------*/
- /* THIS IS THE LAST READ IN THIS LOG FILE. WE SET THE ACTIVE FILE */
- /* POINTER. IF IT IS THE FIRST WE SHOULD NEVER ATTEMPT ANY MORE READS */
- /* SINCE WE SHOULD ENCOUNTER A FIRST LOG RECORD WITH PREVIOUS PAGE ID */
- /* EQUAL TO RNIL. */
- /*---------------------------------------------------------------------------*/
- cactiveSrFsPtr = RNIL;
- fsConnectptr.p->fsState = READ_UNDO_PAGE_AND_CLOSE;
- } else {
- jam();
- /*---------------------------------------------------------------------------*/
- /* WE STILL HAVE MORE INFORMATION IN THIS LOG FILE. WE ONLY MOVE BACK */
- /* THE FILE PAGE. */
- /*---------------------------------------------------------------------------*/
- cactiveUndoFilePage = cactiveUndoFilePage - tmp1;
- fsConnectptr.p->fsState = READ_UNDO_PAGE;
- }//if
- return;
-}//Dbacc::srStartUndoLab()
-
-/* ------- DO UNDO ---------------------------*/
-/* ******************--------------------------------------------------------------- */
-/* NEXTOPERATION ORD FOR EXECUTION OF NEXT OP */
-/* ******************------------------------------+ */
-/* SENDER: ACC, LEVEL B */
-void Dbacc::execNEXTOPERATION(Signal* signal)
-{
- jamEntry();
- tresult = 0;
- srDoUndoLab(signal);
- return;
-}//Dbacc::execNEXTOPERATION()
-
-void Dbacc::srDoUndoLab(Signal* signal)
-{
- DirRangePtr souDirRangePtr;
- DirectoryarrayPtr souDirptr;
- Page8Ptr souPageidptr;
- Uint32 tundoPageindex;
- UndoHeader *undoHeaderPtr;
- Uint32 tmpindex;
-
- jam();
- undopageptr.i = cactiveSrUndoPage;
- ptrCheckGuard(undopageptr, cundopagesize, undopage);
- /*---------------------------------------------------------------------------*/
- /* LAYOUT OF AN UNDO LOG RECORD: */
- /* ***************************** */
- /* */
- /* |----------------------------------------------------| */
- /* | TABLE ID | */
- /* |----------------------------------------------------| */
- /* | ROOT FRAGMENT ID | */
- /* |----------------------------------------------------| */
- /* | LOCAL FRAGMENT ID | */
- /* |----------------------------------------------------| */
- /* | UNDO INFO LEN 14 b | TYPE 4 b | PAGE INDEX 14 b | */
- /* |----------------------------------------------------| */
- /* | INDEX INTO PAGE DIRECTORY (LOGICAL PAGE ID) | */
- /* |----------------------------------------------------| */
- /* | PREVIOUS UNDO LOG RECORD FOR THE FRAGMENT | */
- /* |----------------------------------------------------| */
- /* | PREVIOUS UNDO LOG RECORD FOR ALL FRAGMENTS | */
- /* |----------------------------------------------------| */
- /* | TYPE SPECIFIC PART | */
- /* |----------------------------------------------------| */
- /*---------------------------------------------------------------------------*/
- /*---------------------------------------------------------------------------*/
- /* SET THE PAGE POINTER. WE ONLY WORK WITH TWO PAGES IN THIS RESTART */
- /* ACTIVITY. GET THE PAGE POINTER AND THE PAGE INDEX TO READ FROM. */
- /*---------------------------------------------------------------------------*/
- tundoindex = cprevUndoaddress & ZUNDOPAGEINDEX_MASK; //0x1fff, 13 bits.
- undoHeaderPtr = (UndoHeader *) &undopageptr.p->undoword[tundoindex];
- tundoindex = tundoindex + ZUNDOHEADSIZE;
-
- /*------------------------------------------------------------------------*/
- /* READ TABLE ID AND ROOT FRAGMENT ID AND USE THIS TO GET ROOT RECORD. */
- /*------------------------------------------------------------------------*/
- arrGuard((tundoindex + 6), 8192);
-
- // TABLE ID
- tabptr.i = undoHeaderPtr->tableId;
- ptrCheckGuard(tabptr, ctablesize, tabrec);
-
- // ROOT FRAGMENT ID
- tfid = undoHeaderPtr->rootFragId;
- ndbrequire((undoHeaderPtr->localFragId >> 1) == undoHeaderPtr->rootFragId);
- if (!getrootfragmentrec(signal, rootfragrecptr, tfid)) {
- jam();
- /*---------------------------------------------------------------------*/
- /* THE ROOT RECORD WAS NOT FOUND. OBVIOUSLY WE ARE NOT RESTARTING THIS */
- /* FRAGMENT. WE THUS IGNORE THIS LOG RECORD AND PROCEED WITH THE NEXT. */
- /*---------------------------------------------------------------------*/
- creadyUndoaddress = cprevUndoaddress;
- // PREVIOUS UNDO LOG RECORD FOR ALL FRAGMENTS
- cprevUndoaddress = undoHeaderPtr->prevUndoAddress;
- undoNext2Lab(signal);
-#ifdef VM_TRACE
- ndbout_c("ignoring root fid %d", (int)tfid);
-#endif
- return;
- }//if
- /*-----------------------------------------------------------------------*/
- /* READ THE LOCAL FRAGMENT ID AND VERIFY THAT IT IS CORRECT. */
- /*-----------------------------------------------------------------------*/
- if (rootfragrecptr.p->fragmentid[0] == undoHeaderPtr->localFragId) {
- jam();
- fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- } else {
- if (rootfragrecptr.p->fragmentid[1] == undoHeaderPtr->localFragId) {
- jam();
- fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- } else {
- jam();
- progError(__LINE__, 0, "Invalid local fragment id in undo log");
- return;
- }//if
- }//if
- /*------------------------------------------------------------------------*/
- /* READ UNDO INFO LENGTH, TYPE OF LOG RECORD AND PAGE INDEX WHERE TO */
- /* APPLY THIS LOG RECORD. ALSO STEP INDEX TO PREPARE READ OF LOGICAL */
- /* PAGE ID. SET TMPINDEX TO INDEX THE FIRST WORD IN THE TYPE SPECIFIC */
- /* PART. */
- /*------------------------------------------------------------------------*/
- // UNDO INFO LENGTH 14 b | TYPE 4 b | PAGE INDEX 14 b
- const Uint32 tmp1 = undoHeaderPtr->variousInfo;
- cundoinfolength = tmp1 >> 18;
- const Uint32 tpageType = (tmp1 >> 14) & 0xf;
- tundoPageindex = tmp1 & 0x3fff;
-
- // INDEX INTO PAGE DIRECTORY (LOGICAL PAGE ID)
- tmpP = undoHeaderPtr->logicalPageId ;
- tmpindex = tundoindex;
- arrGuard((tmpindex + cundoinfolength - 1), 8192);
- if (fragrecptr.p->localCheckpId != cactiveCheckpId) {
- jam();
- /*-----------------------------------------------------------------------*/
- /* THE FRAGMENT DID EXIST BUT IS NOT AFFECTED BY THIS UNDO LOG */
- /* EXECUTION. EITHER IT BELONGS TO ANOTHER OR IT IS CREATED AND ONLY IN */
- /* NEED OF EXECUTION OF REDO LOG RECORDS FROM LQH. */
- /*-----------------------------------------------------------------------*/
- creadyUndoaddress = cprevUndoaddress;
- // PREVIOUS UNDO LOG RECORD FOR ALL FRAGMENTS
- cprevUndoaddress = undoHeaderPtr->prevUndoAddress;
-
- undoNext2Lab(signal);
- return;
- }//if
- /*-----------------------------------------------------------------------*/
- /* VERIFY CONSISTENCY OF UNDO LOG RECORDS. */
- /*-----------------------------------------------------------------------*/
- ndbrequire(fragrecptr.p->prevUndoposition == cprevUndoaddress);
- cSrUndoRecords[tpageType]++;
- switch(tpageType){
-
- case UndoHeader::ZPAGE_INFO:{
- jam();
- /*----------------------------------------------------------------------*/
- /* WE HAVE TO UNDO UPDATES IN A NORMAL PAGE. GET THE PAGE POINTER BY */
- /* USING THE LOGICAL PAGE ID. THEN RESET THE OLD VALUE IN THE PAGE BY */
- /* USING THE OLD DATA WHICH IS STORED IN THIS UNDO LOG RECORD. */
- /*----------------------------------------------------------------------*/
- souDirRangePtr.i = fragrecptr.p->directory;
- tmpP2 = tmpP >> 8;
- tmpP = tmpP & 0xff;
- ptrCheckGuard(souDirRangePtr, cdirrangesize, dirRange);
- arrGuard(tmpP2, 256);
- souDirptr.i = souDirRangePtr.p->dirArray[tmpP2];
- ptrCheckGuard(souDirptr, cdirarraysize, directoryarray);
- souPageidptr.i = souDirptr.p->pagep[tmpP];
- ptrCheckGuard(souPageidptr, cpagesize, page8);
- Uint32 loopLimit = tundoPageindex + cundoinfolength;
- ndbrequire(loopLimit <= 2048);
- for (Uint32 tmp = tundoPageindex; tmp < loopLimit; tmp++) {
- dbgWord32(souPageidptr, tmp, undopageptr.p->undoword[tmpindex]);
- souPageidptr.p->word32[tmp] = undopageptr.p->undoword[tmpindex];
- tmpindex = tmpindex + 1;
- }//for
- break;
- }
-
- case UndoHeader::ZOVER_PAGE_INFO:{
- jam();
- /*----------------------------------------------------------------------*/
- /* WE HAVE TO UNDO UPDATES IN AN OVERFLOW PAGE. GET THE PAGE POINTER BY*/
- /* USING THE LOGICAL PAGE ID. THEN RESET THE OLD VALUE IN THE PAGE BY */
- /* USING THE OLD DATA WHICH IS STORED IN THIS UNDO LOG RECORD. */
- /*----------------------------------------------------------------------*/
- souDirRangePtr.i = fragrecptr.p->overflowdir;
- tmpP2 = tmpP >> 8;
- tmpP = tmpP & 0xff;
- ptrCheckGuard(souDirRangePtr, cdirrangesize, dirRange);
- arrGuard(tmpP2, 256);
- souDirptr.i = souDirRangePtr.p->dirArray[tmpP2];
- ptrCheckGuard(souDirptr, cdirarraysize, directoryarray);
- souPageidptr.i = souDirptr.p->pagep[tmpP];
- ptrCheckGuard(souPageidptr, cpagesize, page8);
- Uint32 loopLimit = tundoPageindex + cundoinfolength;
- ndbrequire(loopLimit <= 2048);
- for (Uint32 tmp = tundoPageindex; tmp < loopLimit; tmp++) {
- dbgWord32(souPageidptr, tmp, undopageptr.p->undoword[tmpindex]);
- souPageidptr.p->word32[tmp] = undopageptr.p->undoword[tmpindex];
- tmpindex = tmpindex + 1;
- }//for
- break;
- }
-
- case UndoHeader::ZOP_INFO: {
- jam();
- /*---------------------------------------------------------------------*/
- /* AN OPERATION WAS ACTIVE WHEN LOCAL CHECKPOINT WAS EXECUTED. WE NEED */
- /* TO RESET THE LOCKS IT HAS SET. IF THE OPERATION WAS AN INSERT OR */
- /* THE ELEMENT WAS MARKED AS DISSAPEARED IT WILL ALSO BE REMOVED */
- /* FROM THE PAGE */
- /* */
- /* BEGIN BY SEARCHING AFTER THE ELEMENT, WHEN FOUND UNDO THE */
- /* CHANGES ON THE ELEMENT HEADER. IF IT WAS AN INSERT OPERATION OR */
- /* MARKED AS DISSAPEARED PROCEED BY REMOVING THE ELEMENT. */
- /*---------------------------------------------------------------------*/
- seizeOpRec(signal);
- // Initialise the opRec
- operationRecPtr.p->transId1 = 0;
- operationRecPtr.p->transId2 = RNIL;
- operationRecPtr.p->transactionstate = ACTIVE;
- operationRecPtr.p->commitDeleteCheckFlag = ZFALSE;
- operationRecPtr.p->lockMode = 0;
- operationRecPtr.p->dirtyRead = 0;
- operationRecPtr.p->nodeType = 0;
- operationRecPtr.p->fid = fragrecptr.p->myfid;
- operationRecPtr.p->nextParallelQue = RNIL;
- operationRecPtr.p->prevParallelQue = RNIL;
- operationRecPtr.p->nextQueOp = RNIL;
- operationRecPtr.p->prevQueOp = RNIL;
- operationRecPtr.p->nextSerialQue = RNIL;
- operationRecPtr.p->prevSerialQue = RNIL;
- operationRecPtr.p->elementPage = RNIL;
- operationRecPtr.p->keyinfoPage = RNIL;
- operationRecPtr.p->insertIsDone = ZFALSE;
- operationRecPtr.p->lockOwner = ZFALSE;
- operationRecPtr.p->elementIsDisappeared = ZFALSE;
- operationRecPtr.p->insertDeleteLen = fragrecptr.p->elementLength;
- operationRecPtr.p->longPagePtr = RNIL;
- operationRecPtr.p->longKeyPageIndex = RNIL;
- operationRecPtr.p->scanRecPtr = RNIL;
- operationRecPtr.p->isAccLockReq = ZFALSE;
- operationRecPtr.p->isUndoLogReq = ZTRUE;
-
- // Read operation values from undo page
- operationRecPtr.p->operation = undopageptr.p->undoword[tmpindex];
- tmpindex++;
- operationRecPtr.p->hashValue = undopageptr.p->undoword[tmpindex];
- tmpindex++;
- const Uint32 tkeylen = undopageptr.p->undoword[tmpindex];
- tmpindex++;
- operationRecPtr.p->tupkeylen = tkeylen;
- operationRecPtr.p->xfrmtupkeylen = 0; // not used
- operationRecPtr.p->fragptr = fragrecptr.i;
-
- ndbrequire(fragrecptr.p->keyLength != 0 &&
- fragrecptr.p->keyLength == tkeylen);
-
- // Read localkey1 from undo page
- signal->theData[7 + 0] = undopageptr.p->undoword[tmpindex];
- tmpindex = tmpindex + 1;
- arrGuard((tmpindex - 1), 8192);
- getElement(signal);
- if (tgeResult != ZTRUE) {
- jam();
- signal->theData[0] = RNIL;
- signal->theData[1] = tabptr.i;
- signal->theData[2] = cactiveCheckpId;
- signal->theData[3] = cprevUndoaddress;
- signal->theData[4] = operationRecPtr.p->operation;
- signal->theData[5] = operationRecPtr.p->hashValue;
- signal->theData[6] = operationRecPtr.p->tupkeylen;
- sendSignal(cownBlockref, GSN_DEBUG_SIG, signal, 11, JBA);
- return;
- }//if
-
- operationRecPtr.p->elementPage = gePageptr.i;
- operationRecPtr.p->elementContainer = tgeContainerptr;
- operationRecPtr.p->elementPointer = tgeElementptr;
- operationRecPtr.p->elementIsforward = tgeForward;
-
- commitdelete(signal, true);
- releaseOpRec(signal);
- break;
- }
-
- default:
- jam();
- progError(__LINE__, 0, "Invalid pagetype in undo log");
- break;
-
- }//switch(tpageType)
-
- /*----------------------------------------------------------------------*/
- /* READ THE PAGE ID AND THE PAGE INDEX OF THE PREVIOUS UNDO LOG RECORD */
- /* FOR THIS FRAGMENT. */
- /*----------------------------------------------------------------------*/
- fragrecptr.p->prevUndoposition = undoHeaderPtr->prevUndoAddressForThisFrag;
- /*----------------------------------------------------------------------*/
- /* READ THE PAGE ID AND THE PAGE INDEX OF THE PREVIOUS UNDO LOG RECORD */
- /* FOR THIS UNDO LOG. */
- /*----------------------------------------------------------------------*/
- creadyUndoaddress = cprevUndoaddress;
- cprevUndoaddress = undoHeaderPtr->prevUndoAddress;
-
- if (fragrecptr.p->prevUndoposition == cminusOne) {
- jam();
- /*---------------------------------------------------------------------*/
- /* WE HAVE NOW EXECUTED ALL UNDO LOG RECORDS FOR THIS FRAGMENT. WE */
- /* NOW NEED TO UPDATE THE FREE LIST OF OVERFLOW PAGES. */
- /*---------------------------------------------------------------------*/
- ndbrequire(fragrecptr.p->nextAllocPage == 0);
-
- signal->theData[0] = fragrecptr.i;
- sendSignal(cownBlockref, GSN_ACC_OVER_REC, signal, 1, JBB);
- return;
- }//if
- undoNext2Lab(signal);
- return;
-}//Dbacc::srDoUndoLab()
-
-void Dbacc::undoNext2Lab(Signal* signal)
-{
- /*---------------------------------------------------------------------------*/
- /* EXECUTE NEXT UNDO LOG RECORD. */
- /*---------------------------------------------------------------------------*/
- if (cprevUndoaddress == cminusOne) {
- jam();
- /*---------------------------------------------------------------------------*/
- /* WE HAVE EXECUTED THIS UNDO LOG TO COMPLETION. IT IS NOW TIME TO TAKE*/
- /* OF THE NEXT UNDO LOG OR REPORT COMPLETION OF UNDO LOG EXECUTION. */
- /*---------------------------------------------------------------------------*/
- signal->theData[0] = ZSTART_UNDO;
- sendSignal(cownBlockref, GSN_CONTINUEB, signal, 1, JBB);
- return;
- }//if
- if ((creadyUndoaddress >> 13) != (cprevUndoaddress >> 13)) {
- /*---------------------------------------------------------------------------*/
- /* WE ARE CHANGING PAGE. */
- /*---------------------------------------------------------------------------*/
- if (cactiveSrUndoPage == 0) {
- jam();
- /*---------------------------------------------------------------------------*/
- /* WE HAVE READ AND EXECUTED ALL UNDO LOG INFORMATION IN THE CURRENTLY */
- /* READ PAGES. WE STILL HAVE MORE INFORMATION TO READ FROM FILE SINCE */
- /* WE HAVEN'T FOUND THE FIRST LOG RECORD IN THE LOG FILE YET. */
- /*---------------------------------------------------------------------------*/
- srStartUndoLab(signal);
- return;
- } else {
- jam();
- /*---------------------------------------------------------------------------*/
- /* WE HAVE ANOTHER PAGE READ THAT WE NEED TO EXECUTE. */
- /*---------------------------------------------------------------------------*/
- cactiveSrUndoPage = cactiveSrUndoPage - 1;
- }//if
- }//if
- /*---------------------------------------------------------------------------*/
- /* REAL-TIME BREAK */
- /*---------------------------------------------------------------------------*/
- /* ******************************< */
- /* NEXTOPERATION */
- /* ******************************< */
- sendSignal(cownBlockref, GSN_NEXTOPERATION, signal, 1, JBB);
- return;
-}//Dbacc::undoNext2Lab()
-
-/*-----------------------------------------------------------------------------------*/
-/* AFTER COMPLETING THE READING OF DATA PAGES FROM DISK AND EXECUTING THE UNDO */
-/* LOG WE ARE READY TO UPDATE THE FREE LIST OF OVERFLOW PAGES. THIS LIST MUST */
-/* BE BUILT AGAIN SINCE IT IS NOT CHECKPOINTED. WHEN THE PAGES ARE ALLOCATED */
-/* THEY ARE NOT PART OF ANY LIST. PAGES CAN EITHER BE PUT IN FREE LIST, NOT */
-/* IN FREE LIST OR BE PUT INTO LIST OF LONG KEY PAGES. */
-/*-----------------------------------------------------------------------------------*/
-void Dbacc::execACC_OVER_REC(Signal* signal)
-{
- DirRangePtr pnoDirRangePtr;
- DirectoryarrayPtr pnoOverflowDirptr;
- Page8Ptr pnoPageidptr;
- Uint32 tpnoPageType;
- Uint32 toverPageCheck;
-
- jamEntry();
- fragrecptr.i = signal->theData[0];
- toverPageCheck = 0;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- ndbrequire((fragrecptr.p->nextAllocPage != 0) ||
- (fragrecptr.p->firstOverflowRec == RNIL));
- /*-----------------------------------------------------------------------------------*/
- /* WHO HAS PUT SOMETHING INTO THE LIST BEFORE WE EVEN STARTED PUTTING THINGS */
- /* THERE. */
- /*-----------------------------------------------------------------------------------*/
- ndbrequire(fragrecptr.p->loadingFlag == ZTRUE);
- /*---------------------------------------------------------------------------*/
- /* LOADING HAS STOPPED BEFORE WE HAVE LOADED, SYSTEM ERROR. */
- /*---------------------------------------------------------------------------*/
- while (toverPageCheck < ZNO_OF_OP_PER_SIGNAL) {
- jam();
- if (fragrecptr.p->nextAllocPage >= fragrecptr.p->lastOverIndex) {
- jam();
- fragrecptr.p->loadingFlag = ZFALSE;
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- if (rootfragrecptr.p->lcpPtr != RNIL) {
- jam();
- srCloseDataFileLab(signal);
- } else {
- jam();
- undoNext2Lab(signal);
- }//if
- return;
- }//if
- tmpP = fragrecptr.p->nextAllocPage;
- pnoDirRangePtr.i = fragrecptr.p->overflowdir;
- tmpP2 = tmpP >> 8;
- tmpP = tmpP & 0xff;
- arrGuard(tmpP2, 256);
- ptrCheckGuard(pnoDirRangePtr, cdirrangesize, dirRange);
- if (pnoDirRangePtr.p->dirArray[tmpP2] == RNIL) {
- jam();
- pnoPageidptr.i = RNIL;
- } else {
- pnoOverflowDirptr.i = pnoDirRangePtr.p->dirArray[tmpP2];
- if (pnoOverflowDirptr.i == RNIL) {
- jam();
- pnoPageidptr.i = RNIL;
- } else {
- jam();
- ptrCheckGuard(pnoOverflowDirptr, cdirarraysize, directoryarray);
- pnoPageidptr.i = pnoOverflowDirptr.p->pagep[tmpP];
- }//if
- }//if
- if (pnoPageidptr.i == RNIL) {
- jam();
- seizeOverRec(signal);
- sorOverflowRecPtr.p->dirindex = fragrecptr.p->nextAllocPage;
- sorOverflowRecPtr.p->overpage = RNIL;
- priOverflowRecPtr = sorOverflowRecPtr;
- putRecInFreeOverdir(signal);
- } else {
- ptrCheckGuard(pnoPageidptr, cpagesize, page8);
- tpnoPageType = pnoPageidptr.p->word32[ZPOS_PAGE_TYPE];
- tpnoPageType = (tpnoPageType >> ZPOS_PAGE_TYPE_BIT) & 3;
- if (pnoPageidptr.p->word32[ZPOS_ALLOC_CONTAINERS] > ZFREE_LIMIT) {
- jam();
- dbgWord32(pnoPageidptr, ZPOS_OVERFLOWREC, RNIL);
- pnoPageidptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
- ndbrequire(pnoPageidptr.p->word32[ZPOS_PAGE_ID] == fragrecptr.p->nextAllocPage);
- } else {
- jam();
- seizeOverRec(signal);
- sorOverflowRecPtr.p->dirindex = pnoPageidptr.p->word32[ZPOS_PAGE_ID];
- ndbrequire(sorOverflowRecPtr.p->dirindex == fragrecptr.p->nextAllocPage);
- dbgWord32(pnoPageidptr, ZPOS_OVERFLOWREC, sorOverflowRecPtr.i);
- pnoPageidptr.p->word32[ZPOS_OVERFLOWREC] = sorOverflowRecPtr.i;
- sorOverflowRecPtr.p->overpage = pnoPageidptr.i;
- porOverflowRecPtr = sorOverflowRecPtr;
- putOverflowRecInFrag(signal);
- if (pnoPageidptr.p->word32[ZPOS_ALLOC_CONTAINERS] == 0) {
- jam();
- ropPageptr = pnoPageidptr;
- releaseOverpage(signal);
- }//if
- }//if
- }//if
- fragrecptr.p->nextAllocPage++;
- toverPageCheck++;
- }//while
- signal->theData[0] = fragrecptr.i;
- sendSignal(cownBlockref, GSN_ACC_OVER_REC, signal, 1, JBB);
-}//Dbacc::execACC_OVER_REC()
-
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* END OF SYSTEM RESTART MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* SCAN MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* ACC_SCANREQ START OF A SCAN PROCESS */
-/* SENDER: LQH, LEVEL B */
-/* ENTER ACC_SCANREQ WITH */
-/* TUSERPTR, LQH SCAN_CONNECT POINTER */
-/* TUSERBLOCKREF, LQH BLOCK REFERENCE */
-/* TABPTR, TABLE IDENTITY AND PTR */
-/* TFID ROOT FRAGMENT IDENTITY */
-/* TSCAN_FLAG , = ZCOPY, ZSCAN, ZSCAN_LOCK_ALL */
-/* ZREADLOCK, ZWRITELOCK */
-/* TSCAN_TRID1 , TRANSACTION ID PART 1 */
-/* TSCAN_TRID2 TRANSACTION ID PART 2 */
-/* ******************--------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* ACC_SCANREQ START OF A SCAN PROCESS */
-/* ******************------------------------------+ */
-/* SENDER: LQH, LEVEL B */
-void Dbacc::execACC_SCANREQ(Signal* signal)
-{
- jamEntry();
- AccScanReq * req = (AccScanReq*)&signal->theData[0];
- tuserptr = req->senderData;
- tuserblockref = req->senderRef;
- tabptr.i = req->tableId;
- tfid = req->fragmentNo;
- tscanFlag = req->requestInfo;
- tscanTrid1 = req->transId1;
- tscanTrid2 = req->transId2;
-
- tresult = 0;
- ptrCheckGuard(tabptr, ctablesize, tabrec);
- ndbrequire(getrootfragmentrec(signal,rootfragrecptr, tfid));
-
- Uint32 i;
- for (i = 0; i < MAX_PARALLEL_SCANS_PER_FRAG; i++) {
- jam();
- if (rootfragrecptr.p->scan[i] == RNIL) {
- jam();
- break;
- }
- }
- ndbrequire(i != MAX_PARALLEL_SCANS_PER_FRAG);
- ndbrequire(cfirstFreeScanRec != RNIL);
- seizeScanRec(signal);
-
- rootfragrecptr.p->scan[i] = scanPtr.i;
- scanPtr.p->scanBucketState = ScanRec::FIRST_LAP;
- scanPtr.p->scanLockMode = AccScanReq::getLockMode(tscanFlag);
- scanPtr.p->scanReadCommittedFlag = AccScanReq::getReadCommittedFlag(tscanFlag);
-
- /* TWELVE BITS OF THE ELEMENT HEAD ARE SCAN */
- /* CHECK BITS. THE MASK NOTES WHICH BIT IS */
- /* ALLOCATED FOR THE ACTIVE SCAN */
- scanPtr.p->scanMask = 1 << i;
- scanPtr.p->scanUserptr = tuserptr;
- scanPtr.p->scanUserblockref = tuserblockref;
- scanPtr.p->scanTrid1 = tscanTrid1;
- scanPtr.p->scanTrid2 = tscanTrid2;
- scanPtr.p->rootPtr = rootfragrecptr.i;
- scanPtr.p->scanLockHeld = 0;
- scanPtr.p->scanOpsAllocated = 0;
- scanPtr.p->scanFirstActiveOp = RNIL;
- scanPtr.p->scanFirstQueuedOp = RNIL;
- scanPtr.p->scanLastQueuedOp = RNIL;
- scanPtr.p->scanFirstLockedOp = RNIL;
- scanPtr.p->scanLastLockedOp = RNIL;
- scanPtr.p->scanState = ScanRec::WAIT_NEXT;
- fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- initScanFragmentPart(signal);
-
- /*------------------------------------------------------*/
- /* We start the timeout loop for the scan process here. */
- /*------------------------------------------------------*/
- ndbrequire(scanPtr.p->scanTimer == 0);
- if (scanPtr.p->scanContinuebCounter == 0) {
- jam();
- scanPtr.p->scanContinuebCounter = 1;
- signal->theData[0] = ZSEND_SCAN_HBREP;
- signal->theData[1] = scanPtr.i;
- sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 2);
- }//if
- scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter;
- /* ************************ */
- /* ACC_SCANCONF */
- /* ************************ */
- signal->theData[0] = scanPtr.p->scanUserptr;
- signal->theData[1] = scanPtr.i;
- signal->theData[2] = 2;
- /* NR OF LOCAL FRAGMENT */
- signal->theData[3] = rootfragrecptr.p->fragmentid[0];
- signal->theData[4] = rootfragrecptr.p->fragmentid[1];
- signal->theData[7] = AccScanConf::ZNOT_EMPTY_FRAGMENT;
- sendSignal(scanPtr.p->scanUserblockref, GSN_ACC_SCANCONF, signal, 8, JBB);
- /* NOT EMPTY FRAGMENT */
- return;
-}//Dbacc::execACC_SCANREQ()
-
-/* ******************--------------------------------------------------------------- */
-/* NEXT_SCANREQ REQUEST FOR NEXT ELEMENT OF */
-/* ******************------------------------------+ A FRAGMENT. */
-/* SENDER: LQH, LEVEL B */
-void Dbacc::execNEXT_SCANREQ(Signal* signal)
-{
- Uint32 tscanNextFlag;
- jamEntry();
- scanPtr.i = signal->theData[0];
- operationRecPtr.i = signal->theData[1];
- tscanNextFlag = signal->theData[2];
- /* ------------------------------------------ */
- /* 1 = ZCOPY_NEXT GET NEXT ELEMENT */
- /* 2 = ZCOPY_NEXT_COMMIT COMMIT THE */
- /* ACTIVE ELEMENT AND GET THE NEXT ONE */
- /* 3 = ZCOPY_COMMIT COMMIT THE ACTIVE ELEMENT */
- /* 4 = ZCOPY_REPEAT GET THE ACTIVE ELEMENT */
- /* 5 = ZCOPY_ABORT RELOCK THE ACTIVE ELEMENT */
- /* 6 = ZCOPY_CLOSE THE SCAN PROCESS IS READY */
- /* ------------------------------------------ */
- tresult = 0;
- ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
- ndbrequire(scanPtr.p->scanState == ScanRec::WAIT_NEXT);
-
- scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter;
- switch (tscanNextFlag) {
- case ZCOPY_NEXT:
- jam();
- /*empty*/;
- break;
- case ZCOPY_NEXT_COMMIT:
- case ZCOPY_COMMIT:
- jam();
- /* --------------------------------------------------------------------------------- */
- /* COMMIT ACTIVE OPERATION. SEND NEXT SCAN ELEMENT IF IT IS ZCOPY_NEXT_COMMIT. */
- /* --------------------------------------------------------------------------------- */
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- fragrecptr.i = operationRecPtr.p->fragptr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- if (!scanPtr.p->scanReadCommittedFlag) {
- if (fragrecptr.p->createLcp == ZTRUE) {
- if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_COMMIT) {
- jam();
- /*--------------------------------------------------------------*/
- // We did not have enough undo log buffers to safely commit an
- // operation. Try again in 10 milliseconds.
- /*--------------------------------------------------------------*/
- sendSignalWithDelay(cownBlockref, GSN_NEXT_SCANREQ, signal, 10, 3);
- return;
- }//if
- }//if
- commitOperation(signal);
- }//if
- takeOutActiveScanOp(signal);
- releaseOpRec(signal);
- scanPtr.p->scanOpsAllocated--;
- if (tscanNextFlag == ZCOPY_COMMIT) {
- jam();
- signal->theData[0] = scanPtr.p->scanUserptr;
- Uint32 blockNo = refToBlock(scanPtr.p->scanUserblockref);
- EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 1);
- return;
- }//if
- break;
- case ZCOPY_CLOSE:
- jam();
- fragrecptr.i = scanPtr.p->activeLocalFrag;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- if (!scanPtr.p->scanReadCommittedFlag) {
- if (fragrecptr.p->createLcp == ZTRUE) {
- if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_OPERATION) {
- jam();
- /*--------------------------------------------------------------*/
- // We did not have enough undo log buffers to commit a set of
- // operations. Try again in 10 milliseconds.
- /*--------------------------------------------------------------*/
- sendSignalWithDelay(cownBlockref, GSN_NEXT_SCANREQ, signal, 10, 3);
- return;
- }//if
- }//if
- }//if
- /* --------------------------------------------------------------------------------- */
- /* THE SCAN PROCESS IS FINISHED. RELOCK ALL LOCKED EL. RELESE ALL INVOLVED REC. */
- /* --------------------------------------------------------------------------------- */
- releaseScanLab(signal);
- return;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- signal->theData[0] = scanPtr.i;
- signal->theData[1] = AccCheckScan::ZNOT_CHECK_LCP_STOP;
- execACC_CHECK_SCAN(signal);
- return;
-}//Dbacc::execNEXT_SCANREQ()
-
-void Dbacc::checkNextBucketLab(Signal* signal)
-{
- DirRangePtr cscDirRangePtr;
- DirectoryarrayPtr cscDirptr;
- DirectoryarrayPtr tnsDirptr;
- Page8Ptr nsPageptr;
- Page8Ptr cscPageidptr;
- Page8Ptr gnsPageidptr;
- Page8Ptr tnsPageidptr;
- Uint32 tnsElementptr;
- Uint32 tnsContainerptr;
- Uint32 tnsIsLocked;
- Uint32 tnsTmp1;
- Uint32 tnsTmp2;
- Uint32 tnsCopyIndex1;
- Uint32 tnsCopyIndex2;
- Uint32 tnsCopyDir;
-
- tnsCopyDir = scanPtr.p->nextBucketIndex >> fragrecptr.p->k;
- tnsCopyIndex1 = tnsCopyDir >> 8;
- tnsCopyIndex2 = tnsCopyDir & 0xff;
- arrGuard(tnsCopyIndex1, 256);
- tnsDirptr.i = gnsDirRangePtr.p->dirArray[tnsCopyIndex1];
- ptrCheckGuard(tnsDirptr, cdirarraysize, directoryarray);
- tnsPageidptr.i = tnsDirptr.p->pagep[tnsCopyIndex2];
- ptrCheckGuard(tnsPageidptr, cpagesize, page8);
- gnsPageidptr.i = tnsPageidptr.i;
- gnsPageidptr.p = tnsPageidptr.p;
- tnsTmp1 = (1 << fragrecptr.p->k) - 1;
- tgsePageindex = scanPtr.p->nextBucketIndex & tnsTmp1;
- gsePageidptr.i = gnsPageidptr.i;
- gsePageidptr.p = gnsPageidptr.p;
- if (!getScanElement(signal)) {
- scanPtr.p->nextBucketIndex++;
- if (scanPtr.p->scanBucketState == ScanRec::SECOND_LAP) {
- if (scanPtr.p->nextBucketIndex > scanPtr.p->maxBucketIndexToRescan) {
- /* --------------------------------------------------------------------------------- */
- // We have finished the rescan phase. We are ready to proceed with the next fragment part.
- /* --------------------------------------------------------------------------------- */
- jam();
- checkNextFragmentLab(signal);
- return;
- }//if
- } else if (scanPtr.p->scanBucketState == ScanRec::FIRST_LAP) {
- if ((fragrecptr.p->p + fragrecptr.p->maxp) < scanPtr.p->nextBucketIndex) {
- /* --------------------------------------------------------------------------------- */
- // All buckets have been scanned a first time.
- /* --------------------------------------------------------------------------------- */
- if (scanPtr.p->minBucketIndexToRescan == 0xFFFFFFFF) {
- jam();
- /* --------------------------------------------------------------------------------- */
- // We have not had any merges behind the scan. Thus it is not necessary to perform
- // any rescan any buckets and we can proceed immediately with the next fragment part.
- /* --------------------------------------------------------------------------------- */
- checkNextFragmentLab(signal);
- return;
- } else {
- jam();
- /* --------------------------------------------------------------------------------- */
- // Some buckets are in the need of rescanning due to merges that have moved records
- // from in front of the scan to behind the scan. During the merges we kept track of
- // which buckets that need a rescan. We start with the minimum and end with maximum.
- /* --------------------------------------------------------------------------------- */
- scanPtr.p->nextBucketIndex = scanPtr.p->minBucketIndexToRescan;
- scanPtr.p->scanBucketState = ScanRec::SECOND_LAP;
- if (scanPtr.p->maxBucketIndexToRescan > (fragrecptr.p->p + fragrecptr.p->maxp)) {
- jam();
- /* --------------------------------------------------------------------------------- */
- // If we have had so many merges that the maximum is bigger than the number of buckets
- // then we will simply satisfy ourselves with scanning to the end. This can only happen
- // after bringing down the total of buckets to less than half and the minimum should
- // be 0 otherwise there is some problem.
- /* --------------------------------------------------------------------------------- */
- if (scanPtr.p->minBucketIndexToRescan != 0) {
- jam();
- sendSystemerror(signal);
- return;
- }//if
- scanPtr.p->maxBucketIndexToRescan = fragrecptr.p->p + fragrecptr.p->maxp;
- }//if
- }//if
- }//if
- }//if
- if ((scanPtr.p->scanBucketState == ScanRec::FIRST_LAP) &&
- (scanPtr.p->nextBucketIndex <= scanPtr.p->startNoOfBuckets)) {
- /* --------------------------------------------------------------------------------- */
- // We will only reset the scan indicator on the buckets that existed at the start of the
- // scan. The others will be handled by the split and merge code.
- /* --------------------------------------------------------------------------------- */
- tnsTmp2 = (1 << fragrecptr.p->k) - 1;
- trsbPageindex = scanPtr.p->nextBucketIndex & tnsTmp2;
- if (trsbPageindex != 0) {
- jam();
- rsbPageidptr.i = gnsPageidptr.i;
- rsbPageidptr.p = gnsPageidptr.p;
- } else {
- jam();
- cscDirRangePtr.i = fragrecptr.p->directory;
- tmpP = scanPtr.p->nextBucketIndex >> fragrecptr.p->k;
- tmpP2 = tmpP >> 8;
- tmpP = tmpP & 0xff;
- ptrCheckGuard(cscDirRangePtr, cdirrangesize, dirRange);
- arrGuard(tmpP2, 256);
- cscDirptr.i = cscDirRangePtr.p->dirArray[tmpP2];
- ptrCheckGuard(cscDirptr, cdirarraysize, directoryarray);
- cscPageidptr.i = cscDirptr.p->pagep[tmpP];
- ptrCheckGuard(cscPageidptr, cpagesize, page8);
- tmp1 = (1 << fragrecptr.p->k) - 1;
- trsbPageindex = scanPtr.p->nextBucketIndex & tmp1;
- rsbPageidptr.i = cscPageidptr.i;
- rsbPageidptr.p = cscPageidptr.p;
- }//if
- releaseScanBucket(signal);
- }//if
- signal->theData[0] = scanPtr.i;
- signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
- sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
- return;
- }//if
- /* ----------------------------------------------------------------------- */
- /* AN ELEMENT WHICH HAVE NOT BEEN SCANNED WAS FOUND. WE WILL PREPARE IT */
- /* TO BE SENT TO THE LQH BLOCK FOR FURTHER PROCESSING. */
- /* WE ASSUME THERE ARE OPERATION RECORDS AVAILABLE SINCE LQH SHOULD HAVE*/
- /* GUARANTEED THAT THROUGH EARLY BOOKING. */
- /* ----------------------------------------------------------------------- */
- tnsIsLocked = tgseIsLocked;
- tnsElementptr = tgseElementptr;
- tnsContainerptr = tgseContainerptr;
- nsPageptr.i = gsePageidptr.i;
- nsPageptr.p = gsePageidptr.p;
- seizeOpRec(signal);
- tisoIsforward = tgseIsforward;
- tisoContainerptr = tnsContainerptr;
- tisoElementptr = tnsElementptr;
- isoPageptr.i = nsPageptr.i;
- isoPageptr.p = nsPageptr.p;
- initScanOpRec(signal);
-
- if (!tnsIsLocked){
- if (!scanPtr.p->scanReadCommittedFlag) {
- jam();
- slPageidptr = nsPageptr;
- tslElementptr = tnsElementptr;
- setlock(signal);
- insertLockOwnersList(signal, operationRecPtr);
- }//if
- } else {
- arrGuard(tnsElementptr, 2048);
- queOperPtr.i =
- ElementHeader::getOpPtrI(nsPageptr.p->word32[tnsElementptr]);
- ptrCheckGuard(queOperPtr, coprecsize, operationrec);
- if (queOperPtr.p->elementIsDisappeared == ZTRUE) {
- jam();
- /* --------------------------------------------------------------------------------- */
- // If the lock owner indicates the element is disappeared then we will not report this
- // tuple. We will continue with the next tuple.
- /* --------------------------------------------------------------------------------- */
- releaseOpRec(signal);
- scanPtr.p->scanOpsAllocated--;
- signal->theData[0] = scanPtr.i;
- signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
- sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
- return;
- }//if
- if (!scanPtr.p->scanReadCommittedFlag) {
- Uint32 return_result;
- if (scanPtr.p->scanLockMode == ZREADLOCK) {
- jam();
- priPageptr = nsPageptr;
- tpriElementptr = tnsElementptr;
- return_result = placeReadInLockQueue(signal);
- } else {
- jam();
- pwiPageptr = nsPageptr;
- tpwiElementptr = tnsElementptr;
- return_result = placeWriteInLockQueue(signal);
- }//if
- if (return_result == ZSERIAL_QUEUE) {
- /* --------------------------------------------------------------------------------- */
- /* WE PLACED THE OPERATION INTO A SERIAL QUEUE AND THUS WE HAVE TO WAIT FOR */
- /* THE LOCK TO BE RELEASED. WE CONTINUE WITH THE NEXT ELEMENT. */
- /* --------------------------------------------------------------------------------- */
- putOpScanLockQue(); /* PUT THE OP IN A QUE IN THE SCAN REC */
- signal->theData[0] = scanPtr.i;
- signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
- sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
- return;
- } else if (return_result == ZWRITE_ERROR) {
- jam();
- /* --------------------------------------------------------------------------------- */
- // The tuple is either not committed yet or a delete in the same transaction (not
- // possible here since we are a scan). Thus we simply continue with the next tuple.
- /* --------------------------------------------------------------------------------- */
- releaseOpRec(signal);
- scanPtr.p->scanOpsAllocated--;
- signal->theData[0] = scanPtr.i;
- signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
- sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
- return;
- }//if
- ndbassert(return_result == ZPARALLEL_QUEUE);
- }//if
- }//if
- /* --------------------------------------------------------------------------------- */
- // Committed read proceed without caring for locks immediately down here except when
- // the tuple was deleted permanently and no new operation has inserted it again.
- /* --------------------------------------------------------------------------------- */
- putActiveScanOp(signal);
- sendNextScanConf(signal);
- return;
-}//Dbacc::checkNextBucketLab()
-
-
-void Dbacc::checkNextFragmentLab(Signal* signal)
-{
- RootfragmentrecPtr cnfRootfragrecptr;
-
- cnfRootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(cnfRootfragrecptr, crootfragmentsize, rootfragmentrec);
- if (scanPtr.p->activeLocalFrag == cnfRootfragrecptr.p->fragmentptr[0]) {
- jam();
- fragrecptr.i = cnfRootfragrecptr.p->fragmentptr[1];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- initScanFragmentPart(signal);
- signal->theData[0] = scanPtr.i;
- signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
- sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
- return;
- } else {
- if (scanPtr.p->activeLocalFrag == cnfRootfragrecptr.p->fragmentptr[1]) {
- jam();
- /* --------------------------------------------------------------------------------- */
- // Both fragments have completed their scan part and we can indicate that the scan is
- // now completed.
- /* --------------------------------------------------------------------------------- */
- scanPtr.p->scanBucketState = ScanRec::SCAN_COMPLETED;
- /*empty*/;
- } else {
- jam();
- /* ALL ELEMENTS ARE SENT */
- sendSystemerror(signal);
- }//if
- }//if
- /* --------------------------------------------------------------------------------- */
- // The scan is completed. ACC_CHECK_SCAN will perform all the necessary checks to see
- // what the next step is.
- /* --------------------------------------------------------------------------------- */
- signal->theData[0] = scanPtr.i;
- signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
- execACC_CHECK_SCAN(signal);
- return;
-}//Dbacc::checkNextFragmentLab()
-
-void Dbacc::initScanFragmentPart(Signal* signal)
-{
- DirRangePtr cnfDirRangePtr;
- DirectoryarrayPtr cnfDirptr;
- Page8Ptr cnfPageidptr;
- /* --------------------------------------------------------------------------------- */
- // Set the active fragment part.
- // Set the current bucket scanned to the first.
- // Start with the first lap.
- // Remember the number of buckets at start of the scan.
- // Set the minimum and maximum to values that will always be smaller and larger than.
- // Reset the scan indicator on the first bucket.
- /* --------------------------------------------------------------------------------- */
- scanPtr.p->activeLocalFrag = fragrecptr.i;
- scanPtr.p->nextBucketIndex = 0; /* INDEX OF SCAN BUCKET */
- scanPtr.p->scanBucketState = ScanRec::FIRST_LAP;
- scanPtr.p->startNoOfBuckets = fragrecptr.p->p + fragrecptr.p->maxp;
- scanPtr.p->minBucketIndexToRescan = 0xFFFFFFFF;
- scanPtr.p->maxBucketIndexToRescan = 0;
- cnfDirRangePtr.i = fragrecptr.p->directory;
- ptrCheckGuard(cnfDirRangePtr, cdirrangesize, dirRange);
- cnfDirptr.i = cnfDirRangePtr.p->dirArray[0];
- ptrCheckGuard(cnfDirptr, cdirarraysize, directoryarray);
- cnfPageidptr.i = cnfDirptr.p->pagep[0];
- ptrCheckGuard(cnfPageidptr, cpagesize, page8);
- trsbPageindex = scanPtr.p->nextBucketIndex & ((1 << fragrecptr.p->k) - 1);
- rsbPageidptr.i = cnfPageidptr.i;
- rsbPageidptr.p = cnfPageidptr.p;
- releaseScanBucket(signal);
-}//Dbacc::initScanFragmentPart()
-
-/* --------------------------------------------------------------------------------- */
-/* FLAG = 6 = ZCOPY_CLOSE THE SCAN PROCESS IS READY OR ABORTED. ALL OPERATION IN THE */
-/* ACTIVE OR WAIT QUEUE ARE RELEASED, SCAN FLAG OF ROOT FRAG IS RESET AND THE SCAN */
-/* RECORD IS RELEASED. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseScanLab(Signal* signal)
-{
- releaseAndCommitActiveOps(signal);
- releaseAndCommitQueuedOps(signal);
- releaseAndAbortLockedOps(signal);
-
- rootfragrecptr.i = scanPtr.p->rootPtr;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- for (tmp = 0; tmp < MAX_PARALLEL_SCANS_PER_FRAG; tmp++) {
- jam();
- if (rootfragrecptr.p->scan[tmp] == scanPtr.i) {
- jam();
- rootfragrecptr.p->scan[tmp] = RNIL;
- }//if
- }//for
- // Stops the heartbeat.
- scanPtr.p->scanTimer = 0;
- signal->theData[0] = scanPtr.p->scanUserptr;
- signal->theData[1] = RNIL;
- signal->theData[2] = RNIL;
- sendSignal(scanPtr.p->scanUserblockref, GSN_NEXT_SCANCONF, signal, 3, JBB);
- releaseScanRec(signal);
- return;
-}//Dbacc::releaseScanLab()
-
-
-void Dbacc::releaseAndCommitActiveOps(Signal* signal)
-{
- OperationrecPtr trsoOperPtr;
- operationRecPtr.i = scanPtr.p->scanFirstActiveOp;
- while (operationRecPtr.i != RNIL) {
- jam();
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- trsoOperPtr.i = operationRecPtr.p->nextOp;
- fragrecptr.i = operationRecPtr.p->fragptr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- if (!scanPtr.p->scanReadCommittedFlag) {
- jam();
- commitOperation(signal);
- }//if
- takeOutActiveScanOp(signal);
- releaseOpRec(signal);
- scanPtr.p->scanOpsAllocated--;
- operationRecPtr.i = trsoOperPtr.i;
- }//if
-}//Dbacc::releaseAndCommitActiveOps()
-
-
-void Dbacc::releaseAndCommitQueuedOps(Signal* signal)
-{
- OperationrecPtr trsoOperPtr;
- operationRecPtr.i = scanPtr.p->scanFirstQueuedOp;
- while (operationRecPtr.i != RNIL) {
- jam();
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- trsoOperPtr.i = operationRecPtr.p->nextOp;
- fragrecptr.i = operationRecPtr.p->fragptr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- if (!scanPtr.p->scanReadCommittedFlag) {
- jam();
- commitOperation(signal);
- }//if
- takeOutReadyScanQueue(signal);
- releaseOpRec(signal);
- scanPtr.p->scanOpsAllocated--;
- operationRecPtr.i = trsoOperPtr.i;
- }//if
-}//Dbacc::releaseAndCommitQueuedOps()
-
-void Dbacc::releaseAndAbortLockedOps(Signal* signal) {
-
- OperationrecPtr trsoOperPtr;
- operationRecPtr.i = scanPtr.p->scanFirstLockedOp;
- while (operationRecPtr.i != RNIL) {
- jam();
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- trsoOperPtr.i = operationRecPtr.p->nextOp;
- fragrecptr.i = operationRecPtr.p->fragptr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- if (!scanPtr.p->scanReadCommittedFlag) {
- jam();
- abortOperation(signal);
- }//if
- takeOutScanLockQueue(scanPtr.i);
- releaseOpRec(signal);
- scanPtr.p->scanOpsAllocated--;
- operationRecPtr.i = trsoOperPtr.i;
- }//if
-}//Dbacc::releaseAndAbortLockedOps()
-
-/* 3.18.3 ACC_CHECK_SCAN */
-/* ******************--------------------------------------------------------------- */
-/* ACC_CHECK_SCAN */
-/* ENTER ACC_CHECK_SCAN WITH */
-/* SCAN_PTR */
-/* ******************--------------------------------------------------------------- */
-/* ******************--------------------------------------------------------------- */
-/* ACC_CHECK_SCAN */
-/* ******************------------------------------+ */
-void Dbacc::execACC_CHECK_SCAN(Signal* signal)
-{
- Uint32 TcheckLcpStop;
- jamEntry();
- scanPtr.i = signal->theData[0];
- TcheckLcpStop = signal->theData[1];
- ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
- while (scanPtr.p->scanFirstQueuedOp != RNIL) {
- jam();
- //----------------------------------------------------------------------------
- // An operation has been released from the lock queue. We are in the parallel
- // queue of this tuple. We are ready to report the tuple now.
- //----------------------------------------------------------------------------
- operationRecPtr.i = scanPtr.p->scanFirstQueuedOp;
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- takeOutReadyScanQueue(signal);
- fragrecptr.i = operationRecPtr.p->fragptr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- if (operationRecPtr.p->elementIsDisappeared == ZTRUE) {
- jam();
- if (fragrecptr.p->createLcp == ZTRUE) {
- if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_COMMIT) {
- jam();
- /*--------------------------------------------------------------*/
- // We did not have enough undo log buffers to safely abort an
- // operation. Try again in 10 milliseconds.
- /*--------------------------------------------------------------*/
- sendSignalWithDelay(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 10, 2);
- return;
- }//if
- }//if
- abortOperation(signal);
- releaseOpRec(signal);
- scanPtr.p->scanOpsAllocated--;
- continue;
- }//if
- putActiveScanOp(signal);
- sendNextScanConf(signal);
- return;
- }//while
-
-
- if ((scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) &&
- (scanPtr.p->scanLockHeld == 0)) {
- jam();
- //----------------------------------------------------------------------------
- // The scan is now completed and there are no more locks outstanding. Thus we
- // we will report the scan as completed to LQH.
- //----------------------------------------------------------------------------
- signal->theData[0] = scanPtr.p->scanUserptr;
- signal->theData[1] = RNIL;
- signal->theData[2] = RNIL;
- sendSignal(scanPtr.p->scanUserblockref, GSN_NEXT_SCANCONF, signal, 3, JBB);
- return;
- }//if
- if (TcheckLcpStop == AccCheckScan::ZCHECK_LCP_STOP) {
- //---------------------------------------------------------------------------
- // To ensure that the block of the fragment occurring at the start of a local
- // checkpoint is not held for too long we insert a release and reacquiring of
- // that lock here. This is performed in LQH. If we are blocked or if we have
- // requested a sleep then we will receive RNIL in the returning signal word.
- //---------------------------------------------------------------------------
- signal->theData[0] = scanPtr.p->scanUserptr;
- signal->theData[1] =
- ((scanPtr.p->scanLockHeld >= ZSCAN_MAX_LOCK) ||
- (scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED));
- EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2);
- jamEntry();
- if (signal->theData[0] == RNIL) {
- jam();
- return;
- }//if
- }//if
- /**
- * If we have more than max locks held OR
- * scan is completed AND at least one lock held
- * - Inform LQH about this condition
- */
- if ((scanPtr.p->scanLockHeld >= ZSCAN_MAX_LOCK) ||
- (cfreeopRec == RNIL) ||
- ((scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) &&
- (scanPtr.p->scanLockHeld > 0))) {
- jam();
- signal->theData[0] = scanPtr.p->scanUserptr;
- signal->theData[1] = RNIL; // No operation is returned
- signal->theData[2] = 512; // MASV
- sendSignal(scanPtr.p->scanUserblockref, GSN_NEXT_SCANCONF, signal, 3, JBB);
- return;
- }
- if (scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) {
- jam();
- signal->theData[0] = scanPtr.i;
- signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
- execACC_CHECK_SCAN(signal);
- return;
- }//if
-
- scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter;
-
- fragrecptr.i = scanPtr.p->activeLocalFrag;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- gnsDirRangePtr.i = fragrecptr.p->directory;
- ptrCheckGuard(gnsDirRangePtr, cdirrangesize, dirRange);
- checkNextBucketLab(signal);
- return;
-}//Dbacc::execACC_CHECK_SCAN()
-
-/* ******************---------------------------------------------------- */
-/* ACC_TO_REQ PERFORM A TAKE OVER */
-/* ******************-------------------+ */
-/* SENDER: LQH, LEVEL B */
-void Dbacc::execACC_TO_REQ(Signal* signal)
-{
- OperationrecPtr tatrOpPtr;
-
- jamEntry();
- tatrOpPtr.i = signal->theData[1]; /* OPER PTR OF ACC */
- ptrCheckGuard(tatrOpPtr, coprecsize, operationrec);
- if (tatrOpPtr.p->operation == ZSCAN_OP) {
- tatrOpPtr.p->transId1 = signal->theData[2];
- tatrOpPtr.p->transId2 = signal->theData[3];
- } else {
- jam();
- signal->theData[0] = cminusOne;
- signal->theData[1] = ZTO_OP_STATE_ERROR;
- }//if
- return;
-}//Dbacc::execACC_TO_REQ()
-
-/* --------------------------------------------------------------------------------- */
-/* CONTAINERINFO */
-/* INPUT: */
-/* CI_PAGEIDPTR (PAGE POINTER WHERE CONTAINER RESIDES) */
-/* TCI_PAGEINDEX (INDEX OF CONTAINER, USED TO CALCULATE PAGE INDEX) */
-/* TCI_ISFORWARD (DIRECTION OF CONTAINER FORWARD OR BACKWARD) */
-/* */
-/* OUTPUT: */
-/* TCI_CONTAINERPTR (A POINTER TO THE HEAD OF THE CONTAINER) */
-/* TCI_CONTAINERLEN (LENGTH OF THE CONTAINER */
-/* TCI_CONTAINERHEAD (THE HEADER OF THE CONTAINER) */
-/* */
-/* DESCRIPTION: THE ADDRESS OF THE CONTAINER WILL BE CALCULATED AND */
-/* ALL INFORMATION ABOUT THE CONTAINER WILL BE READ */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::containerinfo(Signal* signal)
-{
- tciContainerptr = (tciPageindex << ZSHIFT_PLUS) - (tciPageindex << ZSHIFT_MINUS);
- if (tciIsforward == ZTRUE) {
- jam();
- tciContainerptr = tciContainerptr + ZHEAD_SIZE;
- } else {
- jam();
- tciContainerptr = ((tciContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE;
- }//if
- arrGuard(tciContainerptr, 2048);
- tciContainerhead = ciPageidptr.p->word32[tciContainerptr];
- tciContainerlen = tciContainerhead >> 26;
-}//Dbacc::containerinfo()
-
-/* --------------------------------------------------------------------------------- */
-/* GET_SCAN_ELEMENT */
-/* INPUT: GSE_PAGEIDPTR */
-/* TGSE_PAGEINDEX */
-/* OUTPUT: TGSE_IS_LOCKED (IF TRESULT /= ZFALSE) */
-/* GSE_PAGEIDPTR */
-/* TGSE_PAGEINDEX */
-/* --------------------------------------------------------------------------------- */
-bool Dbacc::getScanElement(Signal* signal)
-{
- tgseIsforward = ZTRUE;
- NEXTSEARCH_SCAN_LOOP:
- ciPageidptr.i = gsePageidptr.i;
- ciPageidptr.p = gsePageidptr.p;
- tciPageindex = tgsePageindex;
- tciIsforward = tgseIsforward;
- containerinfo(signal);
- sscPageidptr.i = gsePageidptr.i;
- sscPageidptr.p = gsePageidptr.p;
- tsscContainerlen = tciContainerlen;
- tsscContainerptr = tciContainerptr;
- tsscIsforward = tciIsforward;
- if (searchScanContainer(signal)) {
- jam();
- tgseIsLocked = tsscIsLocked;
- tgseElementptr = tsscElementptr;
- tgseContainerptr = tsscContainerptr;
- return true;
- }//if
- if (((tciContainerhead >> 7) & 0x3) != 0) {
- jam();
- nciPageidptr.i = gsePageidptr.i;
- nciPageidptr.p = gsePageidptr.p;
- tnciContainerhead = tciContainerhead;
- tnciContainerptr = tciContainerptr;
- nextcontainerinfo(signal);
- tgsePageindex = tnciPageindex;
- gsePageidptr.i = nciPageidptr.i;
- gsePageidptr.p = nciPageidptr.p;
- tgseIsforward = tnciIsforward;
- goto NEXTSEARCH_SCAN_LOOP;
- }//if
- return false;
-}//Dbacc::getScanElement()
-
-/* --------------------------------------------------------------------------------- */
-/* INIT_SCAN_OP_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initScanOpRec(Signal* signal)
-{
- Uint32 tisoTmp;
- Uint32 tisoLocalPtr;
- Uint32 guard24;
-
- scanPtr.p->scanOpsAllocated++;
-
- operationRecPtr.p->scanRecPtr = scanPtr.i;
- operationRecPtr.p->operation = ZSCAN_OP;
- operationRecPtr.p->transactionstate = ACTIVE;
- operationRecPtr.p->commitDeleteCheckFlag = ZFALSE;
- operationRecPtr.p->lockMode = scanPtr.p->scanLockMode;
- operationRecPtr.p->fid = fragrecptr.p->myfid;
- operationRecPtr.p->fragptr = fragrecptr.i;
- operationRecPtr.p->elementIsDisappeared = ZFALSE;
- operationRecPtr.p->nextParallelQue = RNIL;
- operationRecPtr.p->prevParallelQue = RNIL;
- operationRecPtr.p->nextSerialQue = RNIL;
- operationRecPtr.p->prevSerialQue = RNIL;
- operationRecPtr.p->prevQueOp = RNIL;
- operationRecPtr.p->nextQueOp = RNIL;
- operationRecPtr.p->keyinfoPage = RNIL; // Safety precaution
- operationRecPtr.p->transId1 = scanPtr.p->scanTrid1;
- operationRecPtr.p->transId2 = scanPtr.p->scanTrid2;
- operationRecPtr.p->lockOwner = ZFALSE;
- operationRecPtr.p->dirtyRead = 0;
- operationRecPtr.p->nodeType = 0; // Not a stand-by node
- operationRecPtr.p->elementIsforward = tisoIsforward;
- operationRecPtr.p->elementContainer = tisoContainerptr;
- operationRecPtr.p->elementPointer = tisoElementptr;
- operationRecPtr.p->elementPage = isoPageptr.i;
- operationRecPtr.p->isAccLockReq = ZFALSE;
- operationRecPtr.p->isUndoLogReq = ZFALSE;
- tisoLocalPtr = tisoElementptr + tisoIsforward;
- guard24 = fragrecptr.p->localkeylen - 1;
- for (tisoTmp = 0; tisoTmp <= guard24; tisoTmp++) {
- arrGuard(tisoTmp, 2);
- arrGuard(tisoLocalPtr, 2048);
- operationRecPtr.p->localdata[tisoTmp] = isoPageptr.p->word32[tisoLocalPtr];
- tisoLocalPtr = tisoLocalPtr + tisoIsforward;
- }//for
- arrGuard(tisoLocalPtr, 2048);
- operationRecPtr.p->keydata[0] = isoPageptr.p->word32[tisoLocalPtr];
- operationRecPtr.p->tupkeylen = fragrecptr.p->keyLength;
- operationRecPtr.p->xfrmtupkeylen = 0; // not used
-}//Dbacc::initScanOpRec()
-
-/* --------------------------------------------------------------------------------- */
-/* NEXTCONTAINERINFO */
-/* DESCRIPTION:THE CONTAINER HEAD WILL BE CHECKED TO CALCULATE INFORMATION */
-/* ABOUT NEXT CONTAINER IN THE BUCKET. */
-/* INPUT: TNCI_CONTAINERHEAD */
-/* NCI_PAGEIDPTR */
-/* TNCI_CONTAINERPTR */
-/* OUTPUT: */
-/* TNCI_PAGEINDEX (INDEX FROM WHICH PAGE INDEX CAN BE CALCULATED). */
-/* TNCI_ISFORWARD (IS THE NEXT CONTAINER FORWARD (+1) OR BACKWARD (-1) */
-/* NCI_PAGEIDPTR (PAGE REFERENCE OF NEXT CONTAINER) */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::nextcontainerinfo(Signal* signal)
-{
- tnciNextSamePage = (tnciContainerhead >> 9) & 0x1; /* CHECK BIT FOR CHECKING WHERE */
- /* THE NEXT CONTAINER IS IN THE SAME PAGE */
- tnciPageindex = tnciContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */
- if (((tnciContainerhead >> 7) & 3) == ZLEFT) {
- jam();
- tnciIsforward = ZTRUE;
- } else {
- jam();
- tnciIsforward = cminusOne;
- }//if
- if (tnciNextSamePage == ZFALSE) {
- jam();
- /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */
- arrGuard(tnciContainerptr + 1, 2048);
- tnciTmp = nciPageidptr.p->word32[tnciContainerptr + 1];
- nciOverflowrangeptr.i = fragrecptr.p->overflowdir;
- ptrCheckGuard(nciOverflowrangeptr, cdirrangesize, dirRange);
- arrGuard((tnciTmp >> 8), 256);
- nciOverflowDirptr.i = nciOverflowrangeptr.p->dirArray[tnciTmp >> 8];
- ptrCheckGuard(nciOverflowDirptr, cdirarraysize, directoryarray);
- nciPageidptr.i = nciOverflowDirptr.p->pagep[tnciTmp & 0xff];
- ptrCheckGuard(nciPageidptr, cpagesize, page8);
- }//if
-}//Dbacc::nextcontainerinfo()
-
-/* --------------------------------------------------------------------------------- */
-/* PUT_ACTIVE_SCAN_OP */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::putActiveScanOp(Signal* signal)
-{
- OperationrecPtr pasOperationRecPtr;
- pasOperationRecPtr.i = scanPtr.p->scanFirstActiveOp;
- if (pasOperationRecPtr.i != RNIL) {
- jam();
- ptrCheckGuard(pasOperationRecPtr, coprecsize, operationrec);
- pasOperationRecPtr.p->prevOp = operationRecPtr.i;
- }//if
- operationRecPtr.p->nextOp = pasOperationRecPtr.i;
- operationRecPtr.p->prevOp = RNIL;
- scanPtr.p->scanFirstActiveOp = operationRecPtr.i;
-}//Dbacc::putActiveScanOp()
-
-/**
- * putOpScanLockQueue
- *
- * Description: Put an operation in the doubly linked
- * lock list on a scan record. The list is used to
- * keep track of which operations belonging
- * to the scan are put in serial lock list of another
- * operation
- *
- * @note Use takeOutScanLockQueue to remove an operation
- * from the list
- *
- */
-void Dbacc::putOpScanLockQue()
-{
-
-#ifdef VM_TRACE
- // DEBUG CODE
- // Check that there are as many operations in the lockqueue as
- // scanLockHeld indicates
- OperationrecPtr tmpOp;
- int numLockedOpsBefore = 0;
- tmpOp.i = scanPtr.p->scanFirstLockedOp;
- while(tmpOp.i != RNIL){
- numLockedOpsBefore++;
- ptrCheckGuard(tmpOp, coprecsize, operationrec);
- if (tmpOp.p->nextOp == RNIL)
- ndbrequire(tmpOp.i == scanPtr.p->scanLastLockedOp);
- tmpOp.i = tmpOp.p->nextOp;
- }
- ndbrequire(numLockedOpsBefore==scanPtr.p->scanLockHeld);
-#endif
-
- OperationrecPtr pslOperationRecPtr;
- ScanRec theScanRec;
- theScanRec = *scanPtr.p;
-
- pslOperationRecPtr.i = scanPtr.p->scanLastLockedOp;
- operationRecPtr.p->prevOp = pslOperationRecPtr.i;
- operationRecPtr.p->nextOp = RNIL;
- if (pslOperationRecPtr.i != RNIL) {
- jam();
- ptrCheckGuard(pslOperationRecPtr, coprecsize, operationrec);
- pslOperationRecPtr.p->nextOp = operationRecPtr.i;
- } else {
- jam();
- scanPtr.p->scanFirstLockedOp = operationRecPtr.i;
- }//if
- scanPtr.p->scanLastLockedOp = operationRecPtr.i;
- scanPtr.p->scanLockHeld++;
-
-}//Dbacc::putOpScanLockQue()
-
-/* --------------------------------------------------------------------------------- */
-/* PUT_READY_SCAN_QUEUE */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::putReadyScanQueue(Signal* signal, Uint32 scanRecIndex)
-{
- OperationrecPtr prsOperationRecPtr;
- ScanRecPtr TscanPtr;
-
- TscanPtr.i = scanRecIndex;
- ptrCheckGuard(TscanPtr, cscanRecSize, scanRec);
-
- prsOperationRecPtr.i = TscanPtr.p->scanLastQueuedOp;
- operationRecPtr.p->prevOp = prsOperationRecPtr.i;
- operationRecPtr.p->nextOp = RNIL;
- TscanPtr.p->scanLastQueuedOp = operationRecPtr.i;
- if (prsOperationRecPtr.i != RNIL) {
- jam();
- ptrCheckGuard(prsOperationRecPtr, coprecsize, operationrec);
- prsOperationRecPtr.p->nextOp = operationRecPtr.i;
- } else {
- jam();
- TscanPtr.p->scanFirstQueuedOp = operationRecPtr.i;
- }//if
-}//Dbacc::putReadyScanQueue()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_SCAN_BUCKET */
-// Input:
-// rsbPageidptr.i Index to page where buckets starts
-// rsbPageidptr.p Pointer to page where bucket starts
-// trsbPageindex Page index of starting container in bucket
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseScanBucket(Signal* signal)
-{
- Uint32 trsbIsforward;
-
- trsbIsforward = ZTRUE;
- NEXTRELEASESCANLOOP:
- ciPageidptr.i = rsbPageidptr.i;
- ciPageidptr.p = rsbPageidptr.p;
- tciPageindex = trsbPageindex;
- tciIsforward = trsbIsforward;
- containerinfo(signal);
- rscPageidptr.i = rsbPageidptr.i;
- rscPageidptr.p = rsbPageidptr.p;
- trscContainerlen = tciContainerlen;
- trscContainerptr = tciContainerptr;
- trscIsforward = trsbIsforward;
- releaseScanContainer(signal);
- if (((tciContainerhead >> 7) & 0x3) != 0) {
- jam();
- nciPageidptr.i = rsbPageidptr.i;
- nciPageidptr.p = rsbPageidptr.p;
- tnciContainerhead = tciContainerhead;
- tnciContainerptr = tciContainerptr;
- nextcontainerinfo(signal);
- rsbPageidptr.i = nciPageidptr.i;
- rsbPageidptr.p = nciPageidptr.p;
- trsbPageindex = tnciPageindex;
- trsbIsforward = tnciIsforward;
- goto NEXTRELEASESCANLOOP;
- }//if
-}//Dbacc::releaseScanBucket()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_SCAN_CONTAINER */
-/* INPUT: TRSC_CONTAINERLEN */
-/* RSC_PAGEIDPTR */
-/* TRSC_CONTAINERPTR */
-/* TRSC_ISFORWARD */
-/* SCAN_PTR */
-/* */
-/* DESCRIPTION: SEARCHS IN A CONTAINER, AND THE SCAN BIT OF THE ELEMENTS */
-/* OF THE CONTAINER IS RESET */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseScanContainer(Signal* signal)
-{
- OperationrecPtr rscOperPtr;
- Uint32 trscElemStep;
- Uint32 trscElementptr;
- Uint32 trscElemlens;
- Uint32 trscElemlen;
-
- if (trscContainerlen < 4) {
- if (trscContainerlen != ZCON_HEAD_SIZE) {
- jam();
- sendSystemerror(signal);
- }//if
- return; /* 2 IS THE MINIMUM SIZE OF THE ELEMENT */
- }//if
- trscElemlens = trscContainerlen - ZCON_HEAD_SIZE;
- trscElemlen = fragrecptr.p->elementLength;
- if (trscIsforward == 1) {
- jam();
- trscElementptr = trscContainerptr + ZCON_HEAD_SIZE;
- trscElemStep = trscElemlen;
- } else {
- jam();
- trscElementptr = trscContainerptr - 1;
- trscElemStep = 0 - trscElemlen;
- }//if
- do {
- arrGuard(trscElementptr, 2048);
- const Uint32 eh = rscPageidptr.p->word32[trscElementptr];
- const Uint32 scanMask = scanPtr.p->scanMask;
- if (ElementHeader::getUnlocked(eh)) {
- jam();
- const Uint32 tmp = ElementHeader::clearScanBit(eh, scanMask);
- dbgWord32(rscPageidptr, trscElementptr, tmp);
- rscPageidptr.p->word32[trscElementptr] = tmp;
- } else {
- jam();
- rscOperPtr.i = ElementHeader::getOpPtrI(eh);
- ptrCheckGuard(rscOperPtr, coprecsize, operationrec);
- rscOperPtr.p->scanBits &= ~scanMask;
- }//if
- trscElemlens = trscElemlens - trscElemlen;
- trscElementptr = trscElementptr + trscElemStep;
- } while (trscElemlens > 1);
- if (trscElemlens != 0) {
- jam();
- sendSystemerror(signal);
- }//if
-}//Dbacc::releaseScanContainer()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_SCAN_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseScanRec(Signal* signal)
-{
- // Check that all ops this scan has allocated have been
- // released
- ndbrequire(scanPtr.p->scanOpsAllocated==0);
-
- // Check that all locks this scan might have aquired
- // have been properly released
- ndbrequire(scanPtr.p->scanLockHeld == 0);
- ndbrequire(scanPtr.p->scanFirstLockedOp == RNIL);
- ndbrequire(scanPtr.p->scanLastLockedOp == RNIL);
-
- // Check that all active operations have been
- // properly released
- ndbrequire(scanPtr.p->scanFirstActiveOp == RNIL);
-
- // Check that all queued operations have been
- // properly released
- ndbrequire(scanPtr.p->scanFirstQueuedOp == RNIL);
- ndbrequire(scanPtr.p->scanLastQueuedOp == RNIL);
-
- // Put scan record in free list
- scanPtr.p->scanNextfreerec = cfirstFreeScanRec;
- scanPtr.p->scanState = ScanRec::SCAN_DISCONNECT;
- cfirstFreeScanRec = scanPtr.i;
-
-}//Dbacc::releaseScanRec()
-
-/* --------------------------------------------------------------------------------- */
-/* SEARCH_SCAN_CONTAINER */
-/* INPUT: TSSC_CONTAINERLEN */
-/* TSSC_CONTAINERPTR */
-/* TSSC_ISFORWARD */
-/* SSC_PAGEIDPTR */
-/* SCAN_PTR */
-/* OUTPUT: TSSC_IS_LOCKED */
-/* */
-/* DESCRIPTION: SEARCH IN A CONTAINER TO FIND THE NEXT SCAN ELEMENT. */
-/* TO DO THIS THE SCAN BIT OF THE ELEMENT HEADER IS CHECKED. IF */
-/* THIS BIT IS ZERO, IT IS SET TO ONE AND THE ELEMENT IS RETURNED.*/
-/* --------------------------------------------------------------------------------- */
-bool Dbacc::searchScanContainer(Signal* signal)
-{
- OperationrecPtr sscOperPtr;
- Uint32 tsscScanBits;
- Uint32 tsscElemlens;
- Uint32 tsscElemlen;
- Uint32 tsscElemStep;
-
- if (tsscContainerlen < 4) {
- jam();
- return false; /* 2 IS THE MINIMUM SIZE OF THE ELEMENT */
- }//if
- tsscElemlens = tsscContainerlen - ZCON_HEAD_SIZE;
- tsscElemlen = fragrecptr.p->elementLength;
- /* LENGTH OF THE ELEMENT */
- if (tsscIsforward == 1) {
- jam();
- tsscElementptr = tsscContainerptr + ZCON_HEAD_SIZE;
- tsscElemStep = tsscElemlen;
- } else {
- jam();
- tsscElementptr = tsscContainerptr - 1;
- tsscElemStep = 0 - tsscElemlen;
- }//if
- SCANELEMENTLOOP001:
- arrGuard(tsscElementptr, 2048);
- const Uint32 eh = sscPageidptr.p->word32[tsscElementptr];
- tsscIsLocked = ElementHeader::getLocked(eh);
- if (!tsscIsLocked){
- jam();
- tsscScanBits = ElementHeader::getScanBits(eh);
- if ((scanPtr.p->scanMask & tsscScanBits) == 0) {
- jam();
- const Uint32 tmp = ElementHeader::setScanBit(eh, scanPtr.p->scanMask);
- dbgWord32(sscPageidptr, tsscElementptr, tmp);
- sscPageidptr.p->word32[tsscElementptr] = tmp;
- return true;
- }//if
- } else {
- jam();
- sscOperPtr.i = ElementHeader::getOpPtrI(eh);
- ptrCheckGuard(sscOperPtr, coprecsize, operationrec);
- if ((sscOperPtr.p->scanBits & scanPtr.p->scanMask) == 0) {
- jam();
- sscOperPtr.p->scanBits |= scanPtr.p->scanMask;
- return true;
- }//if
- }//if
- /* THE ELEMENT IS ALREADY SENT. */
- /* SEARCH FOR NEXT ONE */
- tsscElemlens = tsscElemlens - tsscElemlen;
- if (tsscElemlens > 1) {
- jam();
- tsscElementptr = tsscElementptr + tsscElemStep;
- goto SCANELEMENTLOOP001;
- }//if
- return false;
-}//Dbacc::searchScanContainer()
-
-/* --------------------------------------------------------------------------------- */
-/* SEND THE RESPONSE NEXT_SCANCONF AND POSSIBLE KEYINFO SIGNALS AS WELL. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::sendNextScanConf(Signal* signal)
-{
- scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter;
- Uint32 blockNo = refToBlock(scanPtr.p->scanUserblockref);
- jam();
- /** ---------------------------------------------------------------------
- * LQH WILL NOT HAVE ANY USE OF THE TUPLE KEY LENGTH IN THIS CASE AND
- * SO WE DO NOT PROVIDE IT. IN THIS CASE THESE VALUES ARE UNDEFINED.
- * ---------------------------------------------------------------------- */
- signal->theData[0] = scanPtr.p->scanUserptr;
- signal->theData[1] = operationRecPtr.i;
- signal->theData[2] = operationRecPtr.p->fid;
- signal->theData[3] = operationRecPtr.p->localdata[0];
- signal->theData[4] = operationRecPtr.p->localdata[1];
- signal->theData[5] = fragrecptr.p->localkeylen;
- EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 6);
- return;
-}//Dbacc::sendNextScanConf()
-
-/*---------------------------------------------------------------------------
- * sendScanHbRep
- * Description: Using Dispatcher::execute() to send a heartbeat to DBTC
- * from DBLQH telling the scan is alive. We use the sendScanHbRep()
- * in DBLQH, this needs to be done here in DBACC since it can take
- * a while before LQH receives an answer the normal way from ACC.
- *--------------------------------------------------------------------------*/
-void Dbacc::sendScanHbRep(Signal* signal, Uint32 scanPtrIndex)
-{
- scanPtr.i = scanPtrIndex;
- ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
-
- // If the timer status is on we continue with a new heartbeat in one second,
- // else the loop stops and we will not send a new CONTINUEB
- if (scanPtr.p->scanTimer != 0){
- if (scanPtr.p->scanTimer == scanPtr.p->scanContinuebCounter){
- jam();
- ndbrequire(scanPtr.p->scanState != ScanRec::SCAN_DISCONNECT);
-
- signal->theData[0] = scanPtr.p->scanUserptr;
- signal->theData[1] = scanPtr.p->scanTrid1;
- signal->theData[2] = scanPtr.p->scanTrid2;
- EXECUTE_DIRECT(DBLQH, GSN_SCAN_HBREP, signal, 3);
- jamEntry();
- }//if
- scanPtr.p->scanContinuebCounter++;
- signal->theData[0] = ZSEND_SCAN_HBREP;
- signal->theData[1] = scanPtr.i;
- sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 2);
- } else {
- jam();
- scanPtr.p->scanContinuebCounter = 0;
- }//if
-}//Dbacc::sendScanHbRep()
-
-/* --------------------------------------------------------------------------------- */
-/* SETLOCK */
-/* DESCRIPTION:SETS LOCK ON AN ELEMENT. INFORMATION ABOUT THE ELEMENT IS */
-/* SAVED IN THE ELEMENT HEAD.A COPY OF THIS INFORMATION WILL */
-/* BE PUT IN THE OPERATION RECORD. A FIELD IN THE HEADER OF */
-/* THE ELEMENT POINTS TO THE OPERATION RECORD. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::setlock(Signal* signal)
-{
- Uint32 tselTmp1;
-
- arrGuard(tslElementptr, 2048);
- tselTmp1 = slPageidptr.p->word32[tslElementptr];
- operationRecPtr.p->scanBits = ElementHeader::getScanBits(tselTmp1);
- operationRecPtr.p->hashvaluePart = ElementHeader::getHashValuePart(tselTmp1);
-
- tselTmp1 = ElementHeader::setLocked(operationRecPtr.i);
- dbgWord32(slPageidptr, tslElementptr, tselTmp1);
- slPageidptr.p->word32[tslElementptr] = tselTmp1;
-}//Dbacc::setlock()
-
-/* --------------------------------------------------------------------------------- */
-/* TAKE_OUT_ACTIVE_SCAN_OP */
-/* DESCRIPTION: AN ACTIVE SCAN OPERATION IS BELOGED TO AN ACTIVE LIST OF THE */
-/* SCAN RECORD. BY THIS SUBRUTIN THE LIST IS UPDATED. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::takeOutActiveScanOp(Signal* signal)
-{
- OperationrecPtr tasOperationRecPtr;
-
- if (operationRecPtr.p->prevOp != RNIL) {
- jam();
- tasOperationRecPtr.i = operationRecPtr.p->prevOp;
- ptrCheckGuard(tasOperationRecPtr, coprecsize, operationrec);
- tasOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp;
- } else {
- jam();
- scanPtr.p->scanFirstActiveOp = operationRecPtr.p->nextOp;
- }//if
- if (operationRecPtr.p->nextOp != RNIL) {
- jam();
- tasOperationRecPtr.i = operationRecPtr.p->nextOp;
- ptrCheckGuard(tasOperationRecPtr, coprecsize, operationrec);
- tasOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp;
- }//if
-}//Dbacc::takeOutActiveScanOp()
-
-/**
- * takeOutScanLockQueue
- *
- * Description: Take out an operation from the doubly linked
- * lock list on a scan record.
- *
- * @note Use putOpScanLockQue to insert a operation in
- * the list
- *
- */
-void Dbacc::takeOutScanLockQueue(Uint32 scanRecIndex)
-{
- OperationrecPtr tslOperationRecPtr;
- ScanRecPtr TscanPtr;
-
- TscanPtr.i = scanRecIndex;
- ptrCheckGuard(TscanPtr, cscanRecSize, scanRec);
-
- if (operationRecPtr.p->prevOp != RNIL) {
- jam();
- tslOperationRecPtr.i = operationRecPtr.p->prevOp;
- ptrCheckGuard(tslOperationRecPtr, coprecsize, operationrec);
- tslOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp;
- } else {
- jam();
- // Check that first are pointing at operation to take out
- ndbrequire(TscanPtr.p->scanFirstLockedOp==operationRecPtr.i);
- TscanPtr.p->scanFirstLockedOp = operationRecPtr.p->nextOp;
- }//if
- if (operationRecPtr.p->nextOp != RNIL) {
- jam();
- tslOperationRecPtr.i = operationRecPtr.p->nextOp;
- ptrCheckGuard(tslOperationRecPtr, coprecsize, operationrec);
- tslOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp;
- } else {
- jam();
- // Check that last are pointing at operation to take out
- ndbrequire(TscanPtr.p->scanLastLockedOp==operationRecPtr.i);
- TscanPtr.p->scanLastLockedOp = operationRecPtr.p->prevOp;
- }//if
- TscanPtr.p->scanLockHeld--;
-
-#ifdef VM_TRACE
- // DEBUG CODE
- // Check that there are as many operations in the lockqueue as
- // scanLockHeld indicates
- OperationrecPtr tmpOp;
- int numLockedOps = 0;
- tmpOp.i = TscanPtr.p->scanFirstLockedOp;
- while(tmpOp.i != RNIL){
- numLockedOps++;
- ptrCheckGuard(tmpOp, coprecsize, operationrec);
- if (tmpOp.p->nextOp == RNIL)
- ndbrequire(tmpOp.i == TscanPtr.p->scanLastLockedOp);
- tmpOp.i = tmpOp.p->nextOp;
- }
- ndbrequire(numLockedOps==TscanPtr.p->scanLockHeld);
-#endif
-}//Dbacc::takeOutScanLockQueue()
-
-/* --------------------------------------------------------------------------------- */
-/* TAKE_OUT_READY_SCAN_QUEUE */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::takeOutReadyScanQueue(Signal* signal)
-{
- OperationrecPtr trsOperationRecPtr;
-
- if (operationRecPtr.p->prevOp != RNIL) {
- jam();
- trsOperationRecPtr.i = operationRecPtr.p->prevOp;
- ptrCheckGuard(trsOperationRecPtr, coprecsize, operationrec);
- trsOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp;
- } else {
- jam();
- scanPtr.p->scanFirstQueuedOp = operationRecPtr.p->nextOp;
- }//if
- if (operationRecPtr.p->nextOp != RNIL) {
- jam();
- trsOperationRecPtr.i = operationRecPtr.p->nextOp;
- ptrCheckGuard(trsOperationRecPtr, coprecsize, operationrec);
- trsOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp;
- } else {
- jam();
- scanPtr.p->scanLastQueuedOp = operationRecPtr.p->nextOp;
- }//if
-}//Dbacc::takeOutReadyScanQueue()
-
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-/* */
-/* END OF SCAN MODULE */
-/* */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-
-bool Dbacc::getrootfragmentrec(Signal* signal, RootfragmentrecPtr& rootPtr, Uint32 fid)
-{
- for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
- jam();
- if (tabptr.p->fragholder[i] == fid) {
- jam();
- rootPtr.i = tabptr.p->fragptrholder[i];
- ptrCheckGuard(rootPtr, crootfragmentsize, rootfragmentrec);
- return true;
- }//if
- }//for
- return false;
-}//Dbacc::getrootfragmentrec()
-
-/* --------------------------------------------------------------------------------- */
-/* INIT_FS_OP_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initFsOpRec(Signal* signal)
-{
- fsOpptr.p->fsOpfragrecPtr = fragrecptr.i;
- fsOpptr.p->fsConptr = fsConnectptr.i;
-}//Dbacc::initFsOpRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INIT_LCP_CONN_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initLcpConnRec(Signal* signal)
-{
- lcpConnectptr.p->lcpUserblockref = tuserblockref;
- lcpConnectptr.p->lcpUserptr = tuserptr;
- lcpConnectptr.p->noOfLcpConf = 0; /* NO OF RETUREND CONF SIGNALS */
- lcpConnectptr.p->syncUndopageState = WAIT_NOTHING;
-}//Dbacc::initLcpConnRec()
-
-/* --------------------------------------------------------------------------------- */
-/* INIT_OVERPAGE */
-/* INPUT. IOP_PAGEPTR, POINTER TO AN OVERFLOW PAGE RECORD */
-/* DESCRIPTION: CONTAINERS AND FREE LISTS OF THE PAGE, GET INITIALE VALUE */
-/* ACCORDING TO LH3 AND PAGE STRUCTOR DESCRIPTION OF NDBACC BLOCK */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initOverpage(Signal* signal)
-{
- Uint32 tiopTmp;
- Uint32 tiopPrevFree;
- Uint32 tiopNextFree;
-
- for (tiopIndex = 0; tiopIndex <= 2047; tiopIndex++) {
- iopPageptr.p->word32[tiopIndex] = 0;
- }//for
- iopPageptr.p->word32[ZPOS_OVERFLOWREC] = iopOverflowRecPtr.i;
- iopPageptr.p->word32[ZPOS_CHECKSUM] = 0;
- iopPageptr.p->word32[ZPOS_PAGE_ID] = tiopPageId;
- iopPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = 0;
- tiopTmp = ZEMPTYLIST;
- tiopTmp = (tiopTmp << 16) + (tiopTmp << 23);
- iopPageptr.p->word32[ZPOS_EMPTY_LIST] = tiopTmp + (1 << ZPOS_PAGE_TYPE_BIT);
- /* --------------------------------------------------------------------------------- */
- /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */
- /* --------------------------------------------------------------------------------- */
- tiopIndex = ZHEAD_SIZE + 1;
- iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST;
- for (tiopPrevFree = 0; tiopPrevFree <= ZEMPTYLIST - 2; tiopPrevFree++) {
- tiopIndex = tiopIndex + ZBUF_SIZE;
- iopPageptr.p->word32[tiopIndex] = tiopPrevFree;
- }//for
- /* --------------------------------------------------------------------------------- */
- /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */
- /* --------------------------------------------------------------------------------- */
- tiopIndex = ZHEAD_SIZE;
- for (tiopNextFree = 1; tiopNextFree <= ZEMPTYLIST - 1; tiopNextFree++) {
- iopPageptr.p->word32[tiopIndex] = tiopNextFree;
- tiopIndex = tiopIndex + ZBUF_SIZE;
- }//for
- iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST; /* LEFT_LIST IS UPDATED */
- /* --------------------------------------------------------------------------------- */
- /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */
- /* --------------------------------------------------------------------------------- */
- tiopIndex = (ZBUF_SIZE + ZHEAD_SIZE) - 1;
- iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST;
- for (tiopPrevFree = 0; tiopPrevFree <= ZEMPTYLIST - 2; tiopPrevFree++) {
- tiopIndex = tiopIndex + ZBUF_SIZE;
- iopPageptr.p->word32[tiopIndex] = tiopPrevFree;
- }//for
- /* --------------------------------------------------------------------------------- */
- /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */
- /* --------------------------------------------------------------------------------- */
- tiopIndex = (ZBUF_SIZE + ZHEAD_SIZE) - 2;
- for (tiopNextFree = 1; tiopNextFree <= ZEMPTYLIST - 1; tiopNextFree++) {
- iopPageptr.p->word32[tiopIndex] = tiopNextFree;
- tiopIndex = tiopIndex + ZBUF_SIZE;
- }//for
- iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST; /* RIGHT_LIST IS UPDATED */
-}//Dbacc::initOverpage()
-
-/* --------------------------------------------------------------------------------- */
-/* INIT_PAGE */
-/* INPUT. INP_PAGEPTR, POINTER TO A PAGE RECORD */
-/* DESCRIPTION: CONTAINERS AND FREE LISTS OF THE PAGE, GET INITIALE VALUE */
-/* ACCORDING TO LH3 AND PAGE STRUCTOR DISACRIPTION OF NDBACC BLOCK */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initPage(Signal* signal)
-{
- Uint32 tinpTmp1;
- Uint32 tinpIndex;
- Uint32 tinpTmp;
- Uint32 tinpPrevFree;
- Uint32 tinpNextFree;
-
- for (tiopIndex = 0; tiopIndex <= 2047; tiopIndex++) {
- inpPageptr.p->word32[tiopIndex] = 0;
- }//for
- /* --------------------------------------------------------------------------------- */
- /* SET PAGE ID FOR USE OF CHECKPOINTER. */
- /* PREPARE CONTAINER HEADERS INDICATING EMPTY CONTAINERS WITHOUT NEXT. */
- /* --------------------------------------------------------------------------------- */
- inpPageptr.p->word32[ZPOS_PAGE_ID] = tipPageId;
- tinpTmp1 = ZCON_HEAD_SIZE;
- tinpTmp1 = tinpTmp1 << 26;
- /* --------------------------------------------------------------------------------- */
- /* INITIALISE ZNO_CONTAINERS PREDEFINED HEADERS ON LEFT SIZE. */
- /* --------------------------------------------------------------------------------- */
- tinpIndex = ZHEAD_SIZE;
- for (tinpTmp = 0; tinpTmp <= ZNO_CONTAINERS - 1; tinpTmp++) {
- inpPageptr.p->word32[tinpIndex] = tinpTmp1;
- tinpIndex = tinpIndex + ZBUF_SIZE;
- }//for
- /* WORD32(ZPOS_EMPTY_LIST) DATA STRUCTURE:*/
- /*--------------------------------------- */
- /*| PAGE TYPE|LEFT FREE|RIGHT FREE */
- /*| 1 | LIST | LIST */
- /*| BIT | 7 BITS | 7 BITS */
- /*--------------------------------------- */
- /* --------------------------------------------------------------------------------- */
- /* INITIALISE FIRST POINTER TO DOUBLY LINKED LIST OF FREE CONTAINERS. */
- /* INITIALISE EMPTY LISTS OF USED CONTAINERS. */
- /* INITIALISE LEFT FREE LIST TO 64 AND RIGHT FREE LIST TO ZERO. */
- /* ALSO INITIALISE PAGE TYPE TO NOT OVERFLOW PAGE. */
- /* --------------------------------------------------------------------------------- */
- tinpTmp = ZEMPTYLIST;
- tinpTmp = (tinpTmp << 16) + (tinpTmp << 23);
- tinpTmp = tinpTmp + (ZNO_CONTAINERS << 7);
- inpPageptr.p->word32[ZPOS_EMPTY_LIST] = tinpTmp;
- /* --------------------------------------------------------------------------------- */
- /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */
- /* --------------------------------------------------------------------------------- */
- tinpIndex = (ZHEAD_SIZE + ZBUF_SIZE) - 1;
- inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST;
- for (tinpPrevFree = 0; tinpPrevFree <= ZEMPTYLIST - 2; tinpPrevFree++) {
- tinpIndex = tinpIndex + ZBUF_SIZE;
- inpPageptr.p->word32[tinpIndex] = tinpPrevFree;
- }//for
- /* --------------------------------------------------------------------------------- */
- /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */
- /* --------------------------------------------------------------------------------- */
- tinpIndex = (ZHEAD_SIZE + ZBUF_SIZE) - 2;
- for (tinpNextFree = 1; tinpNextFree <= ZEMPTYLIST - 1; tinpNextFree++) {
- inpPageptr.p->word32[tinpIndex] = tinpNextFree;
- tinpIndex = tinpIndex + ZBUF_SIZE;
- }//for
- inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST;
- /* --------------------------------------------------------------------------------- */
- /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */
- /* THE FIRST ZNO_CONTAINERS ARE NOT PUT INTO FREE LIST SINCE THEY ARE */
- /* PREDEFINED AS OCCUPIED. */
- /* --------------------------------------------------------------------------------- */
- tinpIndex = (ZNO_CONTAINERS * ZBUF_SIZE) + ZHEAD_SIZE;
- for (tinpNextFree = ZNO_CONTAINERS + 1; tinpNextFree <= ZEMPTYLIST - 1; tinpNextFree++) {
- inpPageptr.p->word32[tinpIndex] = tinpNextFree;
- tinpIndex = tinpIndex + ZBUF_SIZE;
- }//for
- inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST;
- /* --------------------------------------------------------------------------------- */
- /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */
- /* THE FIRST ZNO_CONTAINERS ARE NOT PUT INTO FREE LIST SINCE THEY ARE */
- /* PREDEFINED AS OCCUPIED. */
- /* --------------------------------------------------------------------------------- */
- tinpIndex = ((ZNO_CONTAINERS * ZBUF_SIZE) + ZHEAD_SIZE) + 1;
- inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST;
- for (tinpPrevFree = ZNO_CONTAINERS; tinpPrevFree <= ZEMPTYLIST - 2; tinpPrevFree++) {
- tinpIndex = tinpIndex + ZBUF_SIZE;
- inpPageptr.p->word32[tinpIndex] = tinpPrevFree;
- }//for
- /* --------------------------------------------------------------------------------- */
- /* INITIALISE HEADER POSITIONS NOT CURRENTLY USED AND ENSURE USE OF OVERFLOW */
- /* RECORD POINTER ON THIS PAGE LEADS TO ERROR. */
- /* --------------------------------------------------------------------------------- */
- inpPageptr.p->word32[ZPOS_CHECKSUM] = 0;
- inpPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = 0;
- inpPageptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
-}//Dbacc::initPage()
-
-/* --------------------------------------------------------------------------------- */
-/* PUT_OP_IN_FRAG_WAIT_QUE */
-/* DESCRIPTION: AN OPERATION WHICH OWNS A LOCK OF AN ELEMENT, IS PUT IN A */
-/* LIST OF THE FRAGMENT. THIS LIST IS USED TO STOP THE QUEUE */
-/* OPERATION DURING CREATE CHECK POINT PROSESS FOR STOP AND */
-/* RESTART OF THE OPERATIONS. */
-/* */
-/* IF CONTINUEB SIGNALS ARE INTRODUCED AFTER STARTING TO EXECUTE ACCKEYREQ WE */
-/* MUST PUT IT IN THIS LIST BEFORE EXITING TO ENSURE THAT WE ARE NOT BEING */
-/* LOCKED AFTER THAT LQH HAS RECEIVED ALL LCP_HOLDOP'S. THEN THE LCP WILL NEVER*/
-/* PROCEED. WE ALSO PUT IT INTO THIS LIST WHEN WAITING FOR LONG KEYS. THIS IS */
-/* ONLY NEEDED IF SIGNALS CAN ENTER BETWEEN THE KEYDATA CARRYING SIGNALS. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::putOpInFragWaitQue(Signal* signal)
-{
- OperationrecPtr tpiwOperRecPtr;
-
- if (operationRecPtr.p->operation != ZSCAN_OP) {
- if (fragrecptr.p->firstWaitInQueOp == RNIL) {
- jam();
- fragrecptr.p->firstWaitInQueOp = operationRecPtr.i;
- } else {
- jam();
- tpiwOperRecPtr.i = fragrecptr.p->lastWaitInQueOp;
- ptrCheckGuard(tpiwOperRecPtr, coprecsize, operationrec);
- tpiwOperRecPtr.p->nextQueOp = operationRecPtr.i;
- }//if
- operationRecPtr.p->opState = WAIT_IN_QUEUE;
- operationRecPtr.p->nextQueOp = RNIL;
- operationRecPtr.p->prevQueOp = fragrecptr.p->lastWaitInQueOp;
- fragrecptr.p->lastWaitInQueOp = operationRecPtr.i;
- }//if
-}//Dbacc::putOpInFragWaitQue()
-
-/* --------------------------------------------------------------------------------- */
-/* PUT_OVERFLOW_REC_IN_FRAG */
-/* DESCRIPTION: AN OVERFLOW RECORD WITCH IS USED TO KEEP INFORMATION ABOUT */
-/* OVERFLOW PAGE WILL BE PUT IN A LIST OF OVERFLOW RECORDS IN */
-/* THE FRAGMENT RECORD. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::putOverflowRecInFrag(Signal* signal)
-{
- OverflowRecordPtr tpifNextOverrecPtr;
- OverflowRecordPtr tpifPrevOverrecPtr;
-
- tpifNextOverrecPtr.i = fragrecptr.p->firstOverflowRec;
- tpifPrevOverrecPtr.i = RNIL;
- while (tpifNextOverrecPtr.i != RNIL) {
- ptrCheckGuard(tpifNextOverrecPtr, coverflowrecsize, overflowRecord);
- if (tpifNextOverrecPtr.p->dirindex < porOverflowRecPtr.p->dirindex) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* PROCEED IN LIST TO THE NEXT IN THE LIST SINCE THE ENTRY HAD A LOWER PAGE ID.*/
- /* WE WANT TO ENSURE THAT LOWER PAGE ID'S ARE KEPT FULL RATHER THAN THE */
- /* OPPOSITE TO ENSURE THAT HIGH PAGE ID'S CAN BE REMOVED WHEN SHRINKS ARE */
- /* PERFORMED. */
- /* --------------------------------------------------------------------------------- */
- tpifPrevOverrecPtr = tpifNextOverrecPtr;
- tpifNextOverrecPtr.i = tpifNextOverrecPtr.p->nextOverRec;
- } else {
- jam();
- ndbrequire(tpifNextOverrecPtr.p->dirindex != porOverflowRecPtr.p->dirindex);
- /* --------------------------------------------------------------------------------- */
- /* TRYING TO INSERT THE SAME PAGE TWICE. SYSTEM ERROR. */
- /* --------------------------------------------------------------------------------- */
- break;
- }//if
- }//while
- if (tpifNextOverrecPtr.i == RNIL) {
- jam();
- fragrecptr.p->lastOverflowRec = porOverflowRecPtr.i;
- } else {
- jam();
- tpifNextOverrecPtr.p->prevOverRec = porOverflowRecPtr.i;
- }//if
- if (tpifPrevOverrecPtr.i == RNIL) {
- jam();
- fragrecptr.p->firstOverflowRec = porOverflowRecPtr.i;
- } else {
- jam();
- tpifPrevOverrecPtr.p->nextOverRec = porOverflowRecPtr.i;
- }//if
- porOverflowRecPtr.p->prevOverRec = tpifPrevOverrecPtr.i;
- porOverflowRecPtr.p->nextOverRec = tpifNextOverrecPtr.i;
-}//Dbacc::putOverflowRecInFrag()
-
-/* --------------------------------------------------------------------------------- */
-/* PUT_REC_IN_FREE_OVERDIR */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::putRecInFreeOverdir(Signal* signal)
-{
- OverflowRecordPtr tpfoNextOverrecPtr;
- OverflowRecordPtr tpfoPrevOverrecPtr;
-
- tpfoNextOverrecPtr.i = fragrecptr.p->firstFreeDirindexRec;
- tpfoPrevOverrecPtr.i = RNIL;
- while (tpfoNextOverrecPtr.i != RNIL) {
- ptrCheckGuard(tpfoNextOverrecPtr, coverflowrecsize, overflowRecord);
- if (tpfoNextOverrecPtr.p->dirindex < priOverflowRecPtr.p->dirindex) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* PROCEED IN LIST TO THE NEXT IN THE LIST SINCE THE ENTRY HAD A LOWER PAGE ID.*/
- /* WE WANT TO ENSURE THAT LOWER PAGE ID'S ARE KEPT FULL RATHER THAN THE */
- /* OPPOSITE TO ENSURE THAT HIGH PAGE ID'S CAN BE REMOVED WHEN SHRINKS ARE */
- /* PERFORMED. */
- /* --------------------------------------------------------------------------------- */
- tpfoPrevOverrecPtr = tpfoNextOverrecPtr;
- tpfoNextOverrecPtr.i = tpfoNextOverrecPtr.p->nextOverList;
- } else {
- jam();
- ndbrequire(tpfoNextOverrecPtr.p->dirindex != priOverflowRecPtr.p->dirindex);
- /* --------------------------------------------------------------------------------- */
- /* ENSURE WE ARE NOT TRYING TO INSERT THE SAME PAGE TWICE. */
- /* --------------------------------------------------------------------------------- */
- break;
- }//if
- }//while
- if (tpfoNextOverrecPtr.i != RNIL) {
- jam();
- tpfoNextOverrecPtr.p->prevOverList = priOverflowRecPtr.i;
- }//if
- if (tpfoPrevOverrecPtr.i == RNIL) {
- jam();
- fragrecptr.p->firstFreeDirindexRec = priOverflowRecPtr.i;
- } else {
- jam();
- tpfoPrevOverrecPtr.p->nextOverList = priOverflowRecPtr.i;
- }//if
- priOverflowRecPtr.p->prevOverList = tpfoPrevOverrecPtr.i;
- priOverflowRecPtr.p->nextOverList = tpfoNextOverrecPtr.i;
-}//Dbacc::putRecInFreeOverdir()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_DIRECTORY */
-/* --------------------------------------- ----------------------------------------- */
-void Dbacc::releaseDirectory(Signal* signal)
-{
- ptrCheckGuard(rdDirptr, cdirarraysize, directoryarray);
- rdDirptr.p->pagep[0] = cfirstfreedir;
- cfirstfreedir = rdDirptr.i;
-}//Dbacc::releaseDirectory()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_DIRRANGE */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseDirrange(Signal* signal)
-{
- ptrCheckGuard(rdDirRangePtr, cdirrangesize, dirRange);
- rdDirRangePtr.p->dirArray[0] = cfirstfreeDirrange;
- cfirstfreeDirrange = rdDirRangePtr.i;
-}//Dbacc::releaseDirrange()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_FS_CONN_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseFsConnRec(Signal* signal)
-{
- fsConnectptr.p->fsNext = cfsFirstfreeconnect;
- cfsFirstfreeconnect = fsConnectptr.i;
- fsConnectptr.p->fsState = WAIT_NOTHING;
-}//Dbacc::releaseFsConnRec()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_FS_OP_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseFsOpRec(Signal* signal)
-{
- fsOpptr.p->fsOpnext = cfsFirstfreeop;
- cfsFirstfreeop = fsOpptr.i;
- fsOpptr.p->fsOpstate = WAIT_NOTHING;
-}//Dbacc::releaseFsOpRec()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_LCP_CONNECT_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseLcpConnectRec(Signal* signal)
-{
- lcpConnectptr.p->lcpstate = LCP_FREE;
- lcpConnectptr.p->nextLcpConn = cfirstfreelcpConnect;
- lcpConnectptr.p->lcpstate = LCP_FREE;
- cfirstfreelcpConnect = lcpConnectptr.i;
-}//Dbacc::releaseLcpConnectRec()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE OP RECORD */
-/* PUT A FREE OPERATION IN A FREE LIST OF THE OPERATIONS */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseOpRec(Signal* signal)
-{
-#if 0
- // DEBUG CODE
- // Check that the operation to be released isn't
- // already in the list of free operations
- // Since this code loops through the entire list of free operations
- // it's only enabled in VM_TRACE mode
- OperationrecPtr opRecPtr;
- bool opInList = false;
- opRecPtr.i = cfreeopRec;
- while (opRecPtr.i != RNIL){
- if (opRecPtr.i == operationRecPtr.i){
- opInList = true;
- break;
- }
- ptrCheckGuard(opRecPtr, coprecsize, operationrec);
- opRecPtr.i = opRecPtr.p->nextOp;
- }
- ndbrequire(opInList == false);
-#endif
- ndbrequire(operationRecPtr.p->lockOwner == ZFALSE);
-
- operationRecPtr.p->nextOp = cfreeopRec;
- cfreeopRec = operationRecPtr.i; /* UPDATE FREE LIST OF OP RECORDS */
- operationRecPtr.p->prevOp = RNIL;
- operationRecPtr.p->opState = FREE_OP;
- operationRecPtr.p->transactionstate = IDLE;
- operationRecPtr.p->operation = ZUNDEFINED_OP;
-}//Dbacc::releaseOpRec()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_OVERFLOW_REC */
-/* PUT A FREE OVERFLOW REC IN A FREE LIST OF THE OVERFLOW RECORDS */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseOverflowRec(Signal* signal)
-{
- rorOverflowRecPtr.p->nextfreeoverrec = cfirstfreeoverrec;
- cfirstfreeoverrec = rorOverflowRecPtr.i;
-}//Dbacc::releaseOverflowRec()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_OVERPAGE */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseOverpage(Signal* signal)
-{
- DirRangePtr ropOverflowrangeptr;
- DirectoryarrayPtr ropOverflowDirptr;
- OverflowRecordPtr ropOverflowRecPtr;
- OverflowRecordPtr tuodOverflowRecPtr;
- Uint32 tropTmp;
- Uint32 tropTmp1;
- Uint32 tropTmp2;
-
- ropOverflowRecPtr.i = ropPageptr.p->word32[ZPOS_OVERFLOWREC];
- ndbrequire(ropOverflowRecPtr.i != RNIL);
- /* THE OVERFLOW REC WILL BE TAKEN OUT OF THE */
- /* FREELIST OF OVERFLOW PAGE WITH FREE */
- /* CONTAINER AND WILL BE PUT IN THE FREE LIST */
- /* OF THE FREE DIRECTORY INDEXES. */
- if ((fragrecptr.p->lastOverflowRec == ropOverflowRecPtr.i) &&
- (fragrecptr.p->firstOverflowRec == ropOverflowRecPtr.i)) {
- jam();
- return; /* THERE IS ONLY ONE OVERFLOW PAGE */
- }//if
- if ((fragrecptr.p->createLcp == ZTRUE) &&
- (fragrecptr.p->lcpMaxOverDirIndex > ropPageptr.p->word32[ZPOS_PAGE_ID])) {
- /* --------------------------------------------------------------------------------- */
- /* THE PAGE PARTICIPATES IN THE LOCAL CHECKPOINT. */
- /* --------------------------------------------------------------------------------- */
- if (fragrecptr.p->fragState == LCP_SEND_PAGES) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THE PAGE PARTICIPATES IN THE LOCAL CHECKPOINT AND THE WRITE TO DISK HAS NOT */
- /* YET BEEN COMPLETED. WE MUST KEEP IT A WHILE LONGER SINCE AN EMPTY PAGE IS */
- /* NOT EQUIVALENT TO AN INITIALISED PAGE SINCE THE FREE LISTS CAN DIFFER. */
- /* --------------------------------------------------------------------------------- */
- return;
- } else {
- if ((fragrecptr.p->fragState == LCP_SEND_OVER_PAGES) &&
- (fragrecptr.p->lcpDirIndex <= ropPageptr.p->word32[ZPOS_PAGE_ID])) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* SEE COMMENT ABOVE */
- /* --------------------------------------------------------------------------------- */
- return;
- }//if
- }//if
- }//if
-#if kalle
- logicalPage = 0;
-
- i = fragrecptr.p->directory;
- p = dirRange.getPtr(i);
-
- i1 = logicalPage >> 8;
- i2 = logicalPage & 0xFF;
-
- ndbrequire(i1 < 256);
-
- i = p->dirArray[i1];
- p = directoryarray.getPtr(i);
-
- physicPageId = p->pagep[i2];
- physicPageP = page8.getPtr(physicPageId);
-
- p->pagep[i2] = RNIL;
- rpPageptr = { physicPageId, physicPageP };
- releasePage(signal);
-
-#endif
-
- /* --------------------------------------------------------------------------------- */
- /* IT WAS OK TO RELEASE THE PAGE. */
- /* --------------------------------------------------------------------------------- */
- ptrCheckGuard(ropOverflowRecPtr, coverflowrecsize, overflowRecord);
- tfoOverflowRecPtr = ropOverflowRecPtr;
- takeRecOutOfFreeOverpage(signal);
- ropOverflowRecPtr.p->overpage = RNIL;
- priOverflowRecPtr = ropOverflowRecPtr;
- putRecInFreeOverdir(signal);
- tropTmp = ropPageptr.p->word32[ZPOS_PAGE_ID];
- ropOverflowrangeptr.i = fragrecptr.p->overflowdir;
- tropTmp1 = tropTmp >> 8;
- tropTmp2 = tropTmp & 0xff;
- ptrCheckGuard(ropOverflowrangeptr, cdirrangesize, dirRange);
- arrGuard(tropTmp1, 256);
- ropOverflowDirptr.i = ropOverflowrangeptr.p->dirArray[tropTmp1];
- ptrCheckGuard(ropOverflowDirptr, cdirarraysize, directoryarray);
- ropOverflowDirptr.p->pagep[tropTmp2] = RNIL;
- rpPageptr = ropPageptr;
- releasePage(signal);
- if (ropOverflowRecPtr.p->dirindex != (fragrecptr.p->lastOverIndex - 1)) {
- jam();
- return;
- }//if
- /* --------------------------------------------------------------------------------- */
- /* THE LAST PAGE IN THE DIRECTORY WAS RELEASED IT IS NOW NECESSARY TO REMOVE */
- /* ALL RELEASED OVERFLOW DIRECTORIES AT THE END OF THE LIST. */
- /* --------------------------------------------------------------------------------- */
- do {
- fragrecptr.p->lastOverIndex--;
- if (tropTmp2 == 0) {
- jam();
- ndbrequire(tropTmp1 != 0);
- ropOverflowrangeptr.p->dirArray[tropTmp1] = RNIL;
- rdDirptr.i = ropOverflowDirptr.i;
- releaseDirectory(signal);
- tropTmp1--;
- tropTmp2 = 255;
- } else {
- jam();
- tropTmp2--;
- }//if
- ropOverflowDirptr.i = ropOverflowrangeptr.p->dirArray[tropTmp1];
- ptrCheckGuard(ropOverflowDirptr, cdirarraysize, directoryarray);
- } while (ropOverflowDirptr.p->pagep[tropTmp2] == RNIL);
- /* --------------------------------------------------------------------------------- */
- /* RELEASE ANY OVERFLOW RECORDS THAT ARE PART OF THE FREE INDEX LIST WHICH */
- /* DIRECTORY INDEX NOW HAS BEEN RELEASED. */
- /* --------------------------------------------------------------------------------- */
- tuodOverflowRecPtr.i = fragrecptr.p->firstFreeDirindexRec;
- jam();
- while (tuodOverflowRecPtr.i != RNIL) {
- jam();
- ptrCheckGuard(tuodOverflowRecPtr, coverflowrecsize, overflowRecord);
- if (tuodOverflowRecPtr.p->dirindex >= fragrecptr.p->lastOverIndex) {
- jam();
- rorOverflowRecPtr = tuodOverflowRecPtr;
- troOverflowRecPtr.p = tuodOverflowRecPtr.p;
- tuodOverflowRecPtr.i = troOverflowRecPtr.p->nextOverList;
- takeRecOutOfFreeOverdir(signal);
- releaseOverflowRec(signal);
- } else {
- jam();
- tuodOverflowRecPtr.i = tuodOverflowRecPtr.p->nextOverList;
- }//if
- }//while
-}//Dbacc::releaseOverpage()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_PAGE */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releasePage(Signal* signal)
-{
-#ifdef VM_TRACE
- bool inList = false;
- Uint32 numInList = 0;
- Page8Ptr tmpPagePtr;
- tmpPagePtr.i = cfirstfreepage;
- while (tmpPagePtr.i != RNIL){
- ptrCheckGuard(tmpPagePtr, cpagesize, page8);
- if (tmpPagePtr.i == rpPageptr.i){
- jam(); inList = true;
- break;
- }
- numInList++;
- tmpPagePtr.i = tmpPagePtr.p->word32[0];
- }
- ndbrequire(inList == false);
- // ndbrequire(numInList == cnoOfAllocatedPages);
-#endif
- rpPageptr.p->word32[0] = cfirstfreepage;
- cfirstfreepage = rpPageptr.i;
- cnoOfAllocatedPages--;
-}//Dbacc::releasePage()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_LCP_PAGE */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseLcpPage(Signal* signal)
-{
- rlpPageptr.p->word32[0] = cfirstfreeLcpPage;
- cfirstfreeLcpPage = rlpPageptr.i;
-}//Dbacc::releaseLcpPage()
-
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_SR_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseSrRec(Signal* signal)
-{
- srVersionPtr.p->nextFreeSr = cfirstFreeSrVersionRec;
- cfirstFreeSrVersionRec = srVersionPtr.i;
-}//Dbacc::releaseSrRec()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE_DIRECTORY */
-/* DESCRIPTION: A DIRECTORY BLOCK (ZDIRBLOCKSIZE NUMBERS OF DIRECTORY */
-/* RECORDS WILL BE ALLOCATED AND RETURNED. */
-/* SIZE OF DIRECTORY ERROR_CODE, WILL BE RETURNED IF THERE IS NO ANY */
-/* FREE BLOCK */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeDirectory(Signal* signal)
-{
- Uint32 tsdyIndex;
-
- if (cfirstfreedir == RNIL) {
- jam();
- if (cdirarraysize <= cdirmemory) {
- jam();
- tresult = ZDIRSIZE_ERROR;
- return;
- } else {
- jam();
- sdDirptr.i = cdirmemory;
- ptrCheckGuard(sdDirptr, cdirarraysize, directoryarray);
- cdirmemory = cdirmemory + 1;
- }//if
- } else {
- jam();
- sdDirptr.i = cfirstfreedir;
- ptrCheckGuard(sdDirptr, cdirarraysize, directoryarray);
- cfirstfreedir = sdDirptr.p->pagep[0];
- sdDirptr.p->pagep[0] = RNIL;
- }//if
- for (tsdyIndex = 0; tsdyIndex <= 255; tsdyIndex++) {
- sdDirptr.p->pagep[tsdyIndex] = RNIL;
- }//for
-}//Dbacc::seizeDirectory()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE_DIRRANGE */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeDirrange(Signal* signal)
-{
- Uint32 tsdeIndex;
-
- newDirRangePtr.i = cfirstfreeDirrange;
- ptrCheckGuard(newDirRangePtr, cdirrangesize, dirRange);
- cfirstfreeDirrange = newDirRangePtr.p->dirArray[0];
- for (tsdeIndex = 0; tsdeIndex <= 255; tsdeIndex++) {
- newDirRangePtr.p->dirArray[tsdeIndex] = RNIL;
- }//for
-}//Dbacc::seizeDirrange()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE FRAGREC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeFragrec(Signal* signal)
-{
- fragrecptr.i = cfirstfreefrag;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- cfirstfreefrag = fragrecptr.p->nextfreefrag;
- fragrecptr.p->nextfreefrag = RNIL;
-}//Dbacc::seizeFragrec()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE_FS_CONNECT_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeFsConnectRec(Signal* signal)
-{
- fsConnectptr.i = cfsFirstfreeconnect;
- ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
- cfsFirstfreeconnect = fsConnectptr.p->fsNext;
- fsConnectptr.p->fsNext = RNIL;
- fsConnectptr.p->fsState = WAIT_NOTHING;
-}//Dbacc::seizeFsConnectRec()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE_FS_OP_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeFsOpRec(Signal* signal)
-{
- fsOpptr.i = cfsFirstfreeop;
- ptrCheckGuard(fsOpptr, cfsOpsize, fsOprec);
- cfsFirstfreeop = fsOpptr.p->fsOpnext;
- fsOpptr.p->fsOpnext = RNIL;
-}//Dbacc::seizeFsOpRec()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE_LCP_CONNECT_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeLcpConnectRec(Signal* signal)
-{
- lcpConnectptr.i = cfirstfreelcpConnect;
- ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
- cfirstfreelcpConnect = lcpConnectptr.p->nextLcpConn;
- lcpConnectptr.p->nextLcpConn = RNIL;
-}//Dbacc::seizeLcpConnectRec()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE_OP_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeOpRec(Signal* signal)
-{
- operationRecPtr.i = cfreeopRec;
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- cfreeopRec = operationRecPtr.p->nextOp; /* UPDATE FREE LIST OF OP RECORDS */
- /* PUTS OPERTION RECORD PTR IN THE LIST */
- /* OF OPERATION IN CONNECTION RECORD */
- operationRecPtr.p->nextOp = RNIL;
-}//Dbacc::seizeOpRec()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE OVERFLOW RECORD */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeOverRec(Signal* signal) {
- sorOverflowRecPtr.i = cfirstfreeoverrec;
- ptrCheckGuard(sorOverflowRecPtr, coverflowrecsize, overflowRecord);
- cfirstfreeoverrec = sorOverflowRecPtr.p->nextfreeoverrec;
- sorOverflowRecPtr.p->nextfreeoverrec = RNIL;
- sorOverflowRecPtr.p->prevOverRec = RNIL;
- sorOverflowRecPtr.p->nextOverRec = RNIL;
-}//Dbacc::seizeOverRec()
-
-
-/**
- * A ZPAGESIZE_ERROR has occured, out of index pages
- * Print some debug info if debug compiled
- */
-void Dbacc::zpagesize_error(const char* where){
- DEBUG(where << endl
- << " ZPAGESIZE_ERROR" << endl
- << " cfirstfreepage=" << cfirstfreepage << endl
- << " cfreepage=" <<cfreepage<<endl
- << " cpagesize=" <<cpagesize<<endl
- << " cnoOfAllocatedPages="<<cnoOfAllocatedPages);
-}
-
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE_PAGE */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizePage(Signal* signal)
-{
- tresult = 0;
- if (cfirstfreepage == RNIL) {
- if (cfreepage < cpagesize) {
- jam();
- spPageptr.i = cfreepage;
- ptrCheckGuard(spPageptr, cpagesize, page8);
- cfreepage++;
- cnoOfAllocatedPages++;
- } else {
- jam();
- zpagesize_error("Dbacc::seizePage");
- tresult = ZPAGESIZE_ERROR;
- }//if
- } else {
- jam();
- spPageptr.i = cfirstfreepage;
- ptrCheckGuard(spPageptr, cpagesize, page8);
- cfirstfreepage = spPageptr.p->word32[0];
- cnoOfAllocatedPages++;
- }//if
-}//Dbacc::seizePage()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE_PAGE */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeLcpPage(Page8Ptr& regPagePtr)
-{
- regPagePtr.i = cfirstfreeLcpPage;
- ptrCheckGuard(regPagePtr, cpagesize, page8);
- cfirstfreeLcpPage = regPagePtr.p->word32[0];
-}//Dbacc::seizeLcpPage()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE_ROOTFRAGREC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeRootfragrec(Signal* signal)
-{
- rootfragrecptr.i = cfirstfreerootfrag;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- cfirstfreerootfrag = rootfragrecptr.p->nextroot;
- rootfragrecptr.p->nextroot = RNIL;
-}//Dbacc::seizeRootfragrec()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE_SCAN_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeScanRec(Signal* signal)
-{
- scanPtr.i = cfirstFreeScanRec;
- ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
- ndbrequire(scanPtr.p->scanState == ScanRec::SCAN_DISCONNECT);
- cfirstFreeScanRec = scanPtr.p->scanNextfreerec;
-}//Dbacc::seizeScanRec()
-
-/* --------------------------------------------------------------------------------- */
-/* SEIZE_SR_VERSION_REC */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::seizeSrVerRec(Signal* signal)
-{
- srVersionPtr.i = cfirstFreeSrVersionRec;
- ptrCheckGuard(srVersionPtr, csrVersionRecSize, srVersionRec);
- cfirstFreeSrVersionRec = srVersionPtr.p->nextFreeSr;
-}//Dbacc::seizeSrVerRec()
-
-/* --------------------------------------------------------------------------------- */
-/* SEND_SYSTEMERROR */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::sendSystemerror(Signal* signal)
-{
- progError(0, 0);
-}//Dbacc::sendSystemerror()
-
-/* --------------------------------------------------------------------------------- */
-/* TAKE_REC_OUT_OF_FREE_OVERDIR */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::takeRecOutOfFreeOverdir(Signal* signal)
-{
- OverflowRecordPtr tofoOverrecPtr;
- if (troOverflowRecPtr.p->nextOverList != RNIL) {
- jam();
- tofoOverrecPtr.i = troOverflowRecPtr.p->nextOverList;
- ptrCheckGuard(tofoOverrecPtr, coverflowrecsize, overflowRecord);
- tofoOverrecPtr.p->prevOverList = troOverflowRecPtr.p->prevOverList;
- }//if
- if (troOverflowRecPtr.p->prevOverList != RNIL) {
- jam();
- tofoOverrecPtr.i = troOverflowRecPtr.p->prevOverList;
- ptrCheckGuard(tofoOverrecPtr, coverflowrecsize, overflowRecord);
- tofoOverrecPtr.p->nextOverList = troOverflowRecPtr.p->nextOverList;
- } else {
- jam();
- fragrecptr.p->firstFreeDirindexRec = troOverflowRecPtr.p->nextOverList;
- }//if
-}//Dbacc::takeRecOutOfFreeOverdir()
-
-/* --------------------------------------------------------------------------------- */
-/* TAKE_REC_OUT_OF_FREE_OVERPAGE */
-/* DESCRIPTION: AN OVERFLOW PAGE WHICH IS EMPTY HAVE TO BE TAKE OUT OF THE */
-/* FREE LIST OF OVERFLOW PAGE. BY THIS SUBROUTINE THIS LIST */
-/* WILL BE UPDATED. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::takeRecOutOfFreeOverpage(Signal* signal)
-{
- OverflowRecordPtr tfoNextOverflowRecPtr;
- OverflowRecordPtr tfoPrevOverflowRecPtr;
-
- if (tfoOverflowRecPtr.p->nextOverRec != RNIL) {
- jam();
- tfoNextOverflowRecPtr.i = tfoOverflowRecPtr.p->nextOverRec;
- ptrCheckGuard(tfoNextOverflowRecPtr, coverflowrecsize, overflowRecord);
- tfoNextOverflowRecPtr.p->prevOverRec = tfoOverflowRecPtr.p->prevOverRec;
- } else {
- ndbrequire(fragrecptr.p->lastOverflowRec == tfoOverflowRecPtr.i);
- jam();
- fragrecptr.p->lastOverflowRec = tfoOverflowRecPtr.p->prevOverRec;
- }//if
- if (tfoOverflowRecPtr.p->prevOverRec != RNIL) {
- jam();
- tfoPrevOverflowRecPtr.i = tfoOverflowRecPtr.p->prevOverRec;
- ptrCheckGuard(tfoPrevOverflowRecPtr, coverflowrecsize, overflowRecord);
- tfoPrevOverflowRecPtr.p->nextOverRec = tfoOverflowRecPtr.p->nextOverRec;
- } else {
- ndbrequire(fragrecptr.p->firstOverflowRec == tfoOverflowRecPtr.i);
- jam();
- fragrecptr.p->firstOverflowRec = tfoOverflowRecPtr.p->nextOverRec;
- }//if
-}//Dbacc::takeRecOutOfFreeOverpage()
-
-void
-Dbacc::reportMemoryUsage(Signal* signal, int gth){
- signal->theData[0] = NDB_LE_MemoryUsage;
- signal->theData[1] = gth;
- signal->theData[2] = sizeof(* rpPageptr.p);
- signal->theData[3] = cnoOfAllocatedPages;
- signal->theData[4] = cpagesize;
- signal->theData[5] = DBACC;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 6, JBB);
-}
-
-void
-Dbacc::execDUMP_STATE_ORD(Signal* signal)
-{
- DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0];
- if (dumpState->args[0] == DumpStateOrd::AccDumpOneScanRec){
- Uint32 recordNo = RNIL;
- if (signal->length() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- if (recordNo >= cscanRecSize)
- return;
-
- scanPtr.i = recordNo;
- ptrAss(scanPtr, scanRec);
- infoEvent("Dbacc::ScanRec[%d]: state=%d, transid(0x%x, 0x%x)",
- scanPtr.i, scanPtr.p->scanState,scanPtr.p->scanTrid1,
- scanPtr.p->scanTrid2);
- infoEvent(" timer=%d, continueBCount=%d, "
- "activeLocalFrag=%d, root=%d, nextBucketIndex=%d",
- scanPtr.p->scanTimer,
- scanPtr.p->scanContinuebCounter,
- scanPtr.p->activeLocalFrag,
- scanPtr.p->rootPtr,
- scanPtr.p->nextBucketIndex);
- infoEvent(" scanNextfreerec=%d firstActOp=%d firstLockedOp=%d, "
- "scanLastLockedOp=%d firstQOp=%d lastQOp=%d",
- scanPtr.p->scanNextfreerec,
- scanPtr.p->scanFirstActiveOp,
- scanPtr.p->scanFirstLockedOp,
- scanPtr.p->scanLastLockedOp,
- scanPtr.p->scanFirstQueuedOp,
- scanPtr.p->scanLastQueuedOp);
- infoEvent(" scanUserP=%d, startNoBuck=%d, minBucketIndexToRescan=%d, "
- "maxBucketIndexToRescan=%d",
- scanPtr.p->scanUserptr,
- scanPtr.p->startNoOfBuckets,
- scanPtr.p->minBucketIndexToRescan,
- scanPtr.p->maxBucketIndexToRescan);
- infoEvent(" scanBucketState=%d, scanLockHeld=%d, userBlockRef=%d, "
- "scanMask=%d scanLockMode=%d",
- scanPtr.p->scanBucketState,
- scanPtr.p->scanLockHeld,
- scanPtr.p->scanUserblockref,
- scanPtr.p->scanMask,
- scanPtr.p->scanLockMode);
- return;
- }
-
- // Dump all ScanRec(ords)
- if (dumpState->args[0] == DumpStateOrd::AccDumpAllScanRec){
- Uint32 recordNo = 0;
- if (signal->length() == 1)
- infoEvent("ACC: Dump all ScanRec - size: %d",
- cscanRecSize);
- else if (signal->length() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- dumpState->args[0] = DumpStateOrd::AccDumpOneScanRec;
- dumpState->args[1] = recordNo;
- execDUMP_STATE_ORD(signal);
-
- if (recordNo < cscanRecSize-1){
- dumpState->args[0] = DumpStateOrd::AccDumpAllScanRec;
- dumpState->args[1] = recordNo+1;
- sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
- }
- return;
- }
-
- // Dump all active ScanRec(ords)
- if (dumpState->args[0] == DumpStateOrd::AccDumpAllActiveScanRec){
- Uint32 recordNo = 0;
- if (signal->length() == 1)
- infoEvent("ACC: Dump active ScanRec - size: %d",
- cscanRecSize);
- else if (signal->length() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- ScanRecPtr sp;
- sp.i = recordNo;
- ptrAss(sp, scanRec);
- if (sp.p->scanState != ScanRec::SCAN_DISCONNECT){
- dumpState->args[0] = DumpStateOrd::AccDumpOneScanRec;
- dumpState->args[1] = recordNo;
- execDUMP_STATE_ORD(signal);
- }
-
- if (recordNo < cscanRecSize-1){
- dumpState->args[0] = DumpStateOrd::AccDumpAllActiveScanRec;
- dumpState->args[1] = recordNo+1;
- sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
- }
- return;
- }
-
- if(dumpState->args[0] == DumpStateOrd::DumpPageMemory){
- reportMemoryUsage(signal, 0);
- return;
- }
-
- if(dumpState->args[0] == DumpStateOrd::EnableUndoDelayDataWrite){
- ndbout << "Dbacc:: delay write of datapages for table = "
- << dumpState->args[1]<< endl;
- c_errorInsert3000_TableId = dumpState->args[1];
- SET_ERROR_INSERT_VALUE(3000);
- return;
- }
-
- if(dumpState->args[0] == DumpStateOrd::AccDumpOneOperationRec){
- Uint32 recordNo = RNIL;
- if (signal->length() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- if (recordNo >= coprecsize)
- return;
-
- OperationrecPtr tmpOpPtr;
- tmpOpPtr.i = recordNo;
- ptrAss(tmpOpPtr, operationrec);
- infoEvent("Dbacc::operationrec[%d]: opState=%d, transid(0x%x, 0x%x)",
- tmpOpPtr.i, tmpOpPtr.p->opState, tmpOpPtr.p->transId1,
- tmpOpPtr.p->transId2);
- infoEvent("elementIsforward=%d, elementPage=%d, elementPointer=%d ",
- tmpOpPtr.p->elementIsforward, tmpOpPtr.p->elementPage,
- tmpOpPtr.p->elementPointer);
- infoEvent("fid=%d, fragptr=%d, hashvaluePart=%d ",
- tmpOpPtr.p->fid, tmpOpPtr.p->fragptr,
- tmpOpPtr.p->hashvaluePart);
- infoEvent("hashValue=%d, insertDeleteLen=%d, keyinfoPage=%d ",
- tmpOpPtr.p->hashValue, tmpOpPtr.p->insertDeleteLen,
- tmpOpPtr.p->keyinfoPage);
- infoEvent("nextLockOwnerOp=%d, nextOp=%d, nextParallelQue=%d ",
- tmpOpPtr.p->nextLockOwnerOp, tmpOpPtr.p->nextOp,
- tmpOpPtr.p->nextParallelQue);
- infoEvent("nextQueOp=%d, nextSerialQue=%d, prevOp=%d ",
- tmpOpPtr.p->nextQueOp, tmpOpPtr.p->nextSerialQue,
- tmpOpPtr.p->prevOp);
- infoEvent("prevLockOwnerOp=%d, prevParallelQue=%d, prevQueOp=%d ",
- tmpOpPtr.p->prevLockOwnerOp, tmpOpPtr.p->nextParallelQue,
- tmpOpPtr.p->prevQueOp);
- infoEvent("prevSerialQue=%d, scanRecPtr=%d, longPagePtr=%d ",
- tmpOpPtr.p->prevSerialQue, tmpOpPtr.p->scanRecPtr,
- tmpOpPtr.p->longPagePtr);
- infoEvent("transactionstate=%d, elementIsDisappeared=%d, insertIsDone=%d ",
- tmpOpPtr.p->transactionstate, tmpOpPtr.p->elementIsDisappeared,
- tmpOpPtr.p->insertIsDone);
- infoEvent("lockMode=%d, lockOwner=%d, nodeType=%d ",
- tmpOpPtr.p->lockMode, tmpOpPtr.p->lockOwner,
- tmpOpPtr.p->nodeType);
- infoEvent("operation=%d, opSimple=%d, dirtyRead=%d,scanBits=%d ",
- tmpOpPtr.p->operation, tmpOpPtr.p->opSimple,
- tmpOpPtr.p->dirtyRead, tmpOpPtr.p->scanBits);
- return;
- }
-
- if(dumpState->args[0] == DumpStateOrd::AccDumpNumOpRecs){
-
- Uint32 freeOpRecs = 0;
- OperationrecPtr opRecPtr;
- opRecPtr.i = cfreeopRec;
- while (opRecPtr.i != RNIL){
- freeOpRecs++;
- ptrCheckGuard(opRecPtr, coprecsize, operationrec);
- opRecPtr.i = opRecPtr.p->nextOp;
- }
-
- infoEvent("Dbacc::OperationRecords: num=%d, free=%d",
- coprecsize, freeOpRecs);
-
- return;
- }
- if(dumpState->args[0] == DumpStateOrd::AccDumpFreeOpRecs){
-
- OperationrecPtr opRecPtr;
- opRecPtr.i = cfreeopRec;
- while (opRecPtr.i != RNIL){
-
- dumpState->args[0] = DumpStateOrd::AccDumpOneOperationRec;
- dumpState->args[1] = opRecPtr.i;
- execDUMP_STATE_ORD(signal);
-
- ptrCheckGuard(opRecPtr, coprecsize, operationrec);
- opRecPtr.i = opRecPtr.p->nextOp;
- }
-
-
- return;
- }
-
- if(dumpState->args[0] == DumpStateOrd::AccDumpNotFreeOpRecs){
- Uint32 recordStart = RNIL;
- if (signal->length() == 2)
- recordStart = dumpState->args[1];
- else
- return;
-
- if (recordStart >= coprecsize)
- return;
-
- for (Uint32 i = recordStart; i < coprecsize; i++){
-
- bool inFreeList = false;
- OperationrecPtr opRecPtr;
- opRecPtr.i = cfreeopRec;
- while (opRecPtr.i != RNIL){
- if (opRecPtr.i == i){
- inFreeList = true;
- break;
- }
- ptrCheckGuard(opRecPtr, coprecsize, operationrec);
- opRecPtr.i = opRecPtr.p->nextOp;
- }
- if (inFreeList == false){
- dumpState->args[0] = DumpStateOrd::AccDumpOneOperationRec;
- dumpState->args[1] = i;
- execDUMP_STATE_ORD(signal);
- }
- }
- return;
- }
-
-#if 0
- if (type == 100) {
- RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend();
- req->primaryTableId = 2;
- req->secondaryTableId = RNIL;
- req->userPtr = 2;
- req->userRef = DBDICT_REF;
- sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal,
- RelTabMemReq::SignalLength, JBB);
- return;
- }//if
- if (type == 101) {
- RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend();
- req->primaryTableId = 4;
- req->secondaryTableId = 5;
- req->userPtr = 4;
- req->userRef = DBDICT_REF;
- sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal,
- RelTabMemReq::SignalLength, JBB);
- return;
- }//if
- if (type == 102) {
- RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend();
- req->primaryTableId = 6;
- req->secondaryTableId = 8;
- req->userPtr = 6;
- req->userRef = DBDICT_REF;
- sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal,
- RelTabMemReq::SignalLength, JBB);
- return;
- }//if
- if (type == 103) {
- DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend();
- req->primaryTableId = 2;
- req->secondaryTableId = RNIL;
- req->userPtr = 2;
- req->userRef = DBDICT_REF;
- sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal,
- DropTabFileReq::SignalLength, JBB);
- return;
- }//if
- if (type == 104) {
- DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend();
- req->primaryTableId = 4;
- req->secondaryTableId = 5;
- req->userPtr = 4;
- req->userRef = DBDICT_REF;
- sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal,
- DropTabFileReq::SignalLength, JBB);
- return;
- }//if
- if (type == 105) {
- DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend();
- req->primaryTableId = 6;
- req->secondaryTableId = 8;
- req->userPtr = 6;
- req->userRef = DBDICT_REF;
- sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal,
- DropTabFileReq::SignalLength, JBB);
- return;
- }//if
-#endif
-}//Dbacc::execDUMP_STATE_ORD()
-
-void Dbacc::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
-
- switch (var) {
-
- case NoOfDiskPagesToDiskAfterRestartACC:
- clblPagesPerTick = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfDiskPagesToDiskDuringRestartACC:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-
-}//execSET_VAR_REQ()
-
-void
-Dbacc::execREAD_PSUEDO_REQ(Signal* signal){
- jamEntry();
- fragrecptr.i = signal->theData[0];
- Uint32 attrId = signal->theData[1];
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- Uint64 tmp;
- switch(attrId){
- case AttributeHeader::ROW_COUNT:
- tmp = rootfragrecptr.p->noOfElements;
- break;
- case AttributeHeader::COMMIT_COUNT:
- tmp = rootfragrecptr.p->m_commit_count;
- break;
- default:
- tmp = 0;
- }
- memcpy(signal->theData, &tmp, 8); /* must be memcpy, gives strange results on
- * ithanium gcc (GCC) 3.4.1 smp linux 2.4
- * otherwise
- */
- // Uint32 * src = (Uint32*)&tmp;
- // signal->theData[0] = src[0];
- // signal->theData[1] = src[1];
-}
-
diff --git a/ndb/src/kernel/blocks/dbacc/Makefile.am b/ndb/src/kernel/blocks/dbacc/Makefile.am
deleted file mode 100644
index ca1b1efac37..00000000000
--- a/ndb/src/kernel/blocks/dbacc/Makefile.am
+++ /dev/null
@@ -1,26 +0,0 @@
-
-noinst_LIBRARIES = libdbacc.a
-
-libdbacc_a_SOURCES = DbaccInit.cpp DbaccMain.cpp
-
-INCLUDES_LOC = -I$(top_srcdir)/ndb/src/kernel/blocks/dbtup
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libdbacc.dsp
-
-libdbacc.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libdbacc_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
deleted file mode 100644
index d51f9537154..00000000000
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ /dev/null
@@ -1,12104 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include <ndb_global.h>
-#include <my_sys.h>
-
-#define DBDICT_C
-#include "Dbdict.hpp"
-
-#include <ndb_limits.h>
-#include <NdbOut.hpp>
-#include <Properties.hpp>
-#include <Configuration.hpp>
-#include <SectionReader.hpp>
-#include <SimpleProperties.hpp>
-#include <AttributeHeader.hpp>
-#include <signaldata/DictSchemaInfo.hpp>
-#include <signaldata/DictTabInfo.hpp>
-#include <signaldata/DropTabFile.hpp>
-
-#include <signaldata/EventReport.hpp>
-#include <signaldata/FsCloseReq.hpp>
-#include <signaldata/FsConf.hpp>
-#include <signaldata/FsOpenReq.hpp>
-#include <signaldata/FsReadWriteReq.hpp>
-#include <signaldata/FsRef.hpp>
-#include <signaldata/GetTabInfo.hpp>
-#include <signaldata/GetTableId.hpp>
-#include <signaldata/HotSpareRep.hpp>
-#include <signaldata/NFCompleteRep.hpp>
-#include <signaldata/NodeFailRep.hpp>
-#include <signaldata/ReadNodesConf.hpp>
-#include <signaldata/RelTabMem.hpp>
-#include <signaldata/WaitGCP.hpp>
-#include <signaldata/ListTables.hpp>
-
-#include <signaldata/CreateTrig.hpp>
-#include <signaldata/AlterTrig.hpp>
-#include <signaldata/DropTrig.hpp>
-#include <signaldata/CreateIndx.hpp>
-#include <signaldata/DropIndx.hpp>
-#include <signaldata/BuildIndx.hpp>
-
-#include <signaldata/CreateEvnt.hpp>
-#include <signaldata/UtilPrepare.hpp>
-#include <signaldata/UtilExecute.hpp>
-#include <signaldata/UtilRelease.hpp>
-#include <signaldata/SumaImpl.hpp>
-#include <GrepError.hpp>
-//#include <signaldata/DropEvnt.hpp>
-
-#include <signaldata/LqhFrag.hpp>
-
-#include <signaldata/DiAddTab.hpp>
-#include <signaldata/DihStartTab.hpp>
-
-#include <signaldata/DropTable.hpp>
-#include <signaldata/DropTab.hpp>
-#include <signaldata/PrepDropTab.hpp>
-
-#include <signaldata/CreateTable.hpp>
-#include <signaldata/AlterTable.hpp>
-#include <signaldata/AlterTab.hpp>
-#include <signaldata/CreateFragmentation.hpp>
-#include <signaldata/CreateTab.hpp>
-#include <NdbSleep.h>
-
-#define ZNOT_FOUND 626
-#define ZALREADYEXIST 630
-
-//#define EVENT_PH2_DEBUG
-//#define EVENT_PH3_DEBUG
-//#define EVENT_DEBUG
-
-#define EVENT_TRACE \
-// ndbout_c("Event debug trace: File: %s Line: %u", __FILE__, __LINE__)
-
-#define DIV(x,y) (((x)+(y)-1)/(y))
-#include <ndb_version.h>
-
-/* **************************************************************** */
-/* ---------------------------------------------------------------- */
-/* MODULE: GENERAL MODULE -------------------------------- */
-/* ---------------------------------------------------------------- */
-/* */
-/* This module contains general stuff. Mostly debug signals and */
-/* general signals that go into a specific module after checking a */
-/* state variable. Also general subroutines used by many. */
-/* ---------------------------------------------------------------- */
-/* **************************************************************** */
-
-/* ---------------------------------------------------------------- */
-// This signal is used to dump states of various variables in the
-// block by command.
-/* ---------------------------------------------------------------- */
-void
-Dbdict::execDUMP_STATE_ORD(Signal* signal)
-{
- jamEntry();
-
-#ifdef VM_TRACE
- if(signal->theData[0] == 1222){
- const Uint32 tab = signal->theData[1];
- PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
- req->senderRef = reference();
- req->senderData = 1222;
- req->tableId = tab;
- sendSignal(DBLQH_REF, GSN_PREP_DROP_TAB_REQ, signal,
- PrepDropTabReq::SignalLength, JBB);
- }
-
- if(signal->theData[0] == 1223){
- const Uint32 tab = signal->theData[1];
- PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
- req->senderRef = reference();
- req->senderData = 1222;
- req->tableId = tab;
- sendSignal(DBTC_REF, GSN_PREP_DROP_TAB_REQ, signal,
- PrepDropTabReq::SignalLength, JBB);
- }
-
- if(signal->theData[0] == 1224){
- const Uint32 tab = signal->theData[1];
- PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
- req->senderRef = reference();
- req->senderData = 1222;
- req->tableId = tab;
- sendSignal(DBDIH_REF, GSN_PREP_DROP_TAB_REQ, signal,
- PrepDropTabReq::SignalLength, JBB);
- }
-
- if(signal->theData[0] == 1225){
- const Uint32 tab = signal->theData[1];
- const Uint32 ver = signal->theData[2];
- TableRecordPtr tabRecPtr;
- c_tableRecordPool.getPtr(tabRecPtr, tab);
- DropTableReq * req = (DropTableReq*)signal->getDataPtr();
- req->senderData = 1225;
- req->senderRef = numberToRef(1,1);
- req->tableId = tab;
- req->tableVersion = tabRecPtr.p->tableVersion + ver;
- sendSignal(DBDICT_REF, GSN_DROP_TABLE_REQ, signal,
- DropTableReq::SignalLength, JBB);
- }
-#endif
-
- return;
-}//Dbdict::execDUMP_STATE_ORD()
-
-/* ---------------------------------------------------------------- */
-/* ---------------------------------------------------------------- */
-// CONTINUEB is used when a real-time break is needed for long
-// processes.
-/* ---------------------------------------------------------------- */
-/* ---------------------------------------------------------------- */
-void Dbdict::execCONTINUEB(Signal* signal)
-{
- jamEntry();
- switch (signal->theData[0]) {
- case ZPACK_TABLE_INTO_PAGES :
- jam();
- packTableIntoPages(signal, signal->theData[1], signal->theData[2]);
- break;
-
- case ZSEND_GET_TAB_RESPONSE :
- jam();
- sendGetTabResponse(signal);
- break;
-
- default :
- ndbrequire(false);
- break;
- }//switch
- return;
-}//execCONTINUEB()
-
-/* ---------------------------------------------------------------- */
-/* ---------------------------------------------------------------- */
-// Routine to handle pack table into pages.
-/* ---------------------------------------------------------------- */
-/* ---------------------------------------------------------------- */
-
-void Dbdict::packTableIntoPages(Signal* signal, Uint32 tableId, Uint32 pageId)
-{
-
- PageRecordPtr pagePtr;
- TableRecordPtr tablePtr;
- c_pageRecordArray.getPtr(pagePtr, pageId);
-
- memset(&pagePtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE);
- c_tableRecordPool.getPtr(tablePtr, tableId);
- LinearWriter w(&pagePtr.p->word[ZPAGE_HEADER_SIZE],
- 8 * ZSIZE_OF_PAGES_IN_WORDS);
-
- w.first();
- packTableIntoPagesImpl(w, tablePtr, signal);
-
- Uint32 wordsOfTable = w.getWordsUsed();
- Uint32 pagesUsed =
- DIV(wordsOfTable + ZPAGE_HEADER_SIZE, ZSIZE_OF_PAGES_IN_WORDS);
- pagePtr.p->word[ZPOS_CHECKSUM] =
- computeChecksum(&pagePtr.p->word[0], pagesUsed * ZSIZE_OF_PAGES_IN_WORDS);
-
- switch (c_packTable.m_state) {
- case PackTable::PTS_IDLE:
- case PackTable::PTS_ADD_TABLE_MASTER:
- case PackTable::PTS_ADD_TABLE_SLAVE:
- case PackTable::PTS_RESTART:
- ndbrequire(false);
- break;
- case PackTable::PTS_GET_TAB:
- jam();
- c_retrieveRecord.retrievedNoOfPages = pagesUsed;
- c_retrieveRecord.retrievedNoOfWords = wordsOfTable;
- sendGetTabResponse(signal);
- return;
- break;
- }//switch
- ndbrequire(false);
- return;
-}//packTableIntoPages()
-
-void
-Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
- TableRecordPtr tablePtr,
- Signal* signal){
-
- w.add(DictTabInfo::TableName, tablePtr.p->tableName);
- w.add(DictTabInfo::TableId, tablePtr.i);
-#ifdef HAVE_TABLE_REORG
- w.add(DictTabInfo::SecondTableId, tablePtr.p->secondTable);
-#else
- w.add(DictTabInfo::SecondTableId, (Uint32)0);
-#endif
- w.add(DictTabInfo::TableVersion, tablePtr.p->tableVersion);
- w.add(DictTabInfo::NoOfKeyAttr, tablePtr.p->noOfPrimkey);
- w.add(DictTabInfo::NoOfAttributes, tablePtr.p->noOfAttributes);
- w.add(DictTabInfo::NoOfNullable, tablePtr.p->noOfNullAttr);
- w.add(DictTabInfo::NoOfVariable, (Uint32)0);
- w.add(DictTabInfo::KeyLength, tablePtr.p->tupKeyLength);
-
- w.add(DictTabInfo::TableLoggedFlag, tablePtr.p->storedTable);
- w.add(DictTabInfo::MinLoadFactor, tablePtr.p->minLoadFactor);
- w.add(DictTabInfo::MaxLoadFactor, tablePtr.p->maxLoadFactor);
- w.add(DictTabInfo::TableKValue, tablePtr.p->kValue);
- w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType);
- w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType);
-
- if(!signal)
- {
- w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount);
- }
- else
- {
- Uint32 * theData = signal->getDataPtrSend();
- CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
- req->senderRef = 0;
- req->senderData = RNIL;
- req->fragmentationType = tablePtr.p->fragmentType;
- req->noOfFragments = 0;
- req->fragmentNode = 0;
- req->primaryTableId = tablePtr.i;
- EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal,
- CreateFragmentationReq::SignalLength);
- if(signal->theData[0] == 0)
- {
- Uint16 *data = (Uint16*)&signal->theData[25];
- Uint32 count = 2 + data[0] * data[1];
- w.add(DictTabInfo::FragmentDataLen, 2*count);
- w.add(DictTabInfo::FragmentData, data, 2*count);
- }
- }
-
- if (tablePtr.p->primaryTableId != RNIL){
- TableRecordPtr primTab;
- c_tableRecordPool.getPtr(primTab, tablePtr.p->primaryTableId);
- w.add(DictTabInfo::PrimaryTable, primTab.p->tableName);
- w.add(DictTabInfo::PrimaryTableId, tablePtr.p->primaryTableId);
- w.add(DictTabInfo::IndexState, tablePtr.p->indexState);
- w.add(DictTabInfo::InsertTriggerId, tablePtr.p->insertTriggerId);
- w.add(DictTabInfo::UpdateTriggerId, tablePtr.p->updateTriggerId);
- w.add(DictTabInfo::DeleteTriggerId, tablePtr.p->deleteTriggerId);
- w.add(DictTabInfo::CustomTriggerId, tablePtr.p->customTriggerId);
- }
- w.add(DictTabInfo::FrmLen, tablePtr.p->frmLen);
- w.add(DictTabInfo::FrmData, tablePtr.p->frmData, tablePtr.p->frmLen);
-
- Uint32 nextAttribute = tablePtr.p->firstAttribute;
- AttributeRecordPtr attrPtr;
- do {
- jam();
- c_attributeRecordPool.getPtr(attrPtr, nextAttribute);
-
- w.add(DictTabInfo::AttributeName, attrPtr.p->attributeName);
- w.add(DictTabInfo::AttributeId, attrPtr.p->attributeId);
- w.add(DictTabInfo::AttributeKeyFlag, attrPtr.p->tupleKey > 0);
-
- const Uint32 desc = attrPtr.p->attributeDescriptor;
- const Uint32 attrType = AttributeDescriptor::getType(desc);
- const Uint32 attrSize = AttributeDescriptor::getSize(desc);
- const Uint32 arraySize = AttributeDescriptor::getArraySize(desc);
- const Uint32 nullable = AttributeDescriptor::getNullable(desc);
- const Uint32 DKey = AttributeDescriptor::getDKey(desc);
-
- // AttributeType deprecated
- w.add(DictTabInfo::AttributeSize, attrSize);
- w.add(DictTabInfo::AttributeArraySize, arraySize);
- w.add(DictTabInfo::AttributeNullableFlag, nullable);
- w.add(DictTabInfo::AttributeDKey, DKey);
- w.add(DictTabInfo::AttributeExtType, attrType);
- w.add(DictTabInfo::AttributeExtPrecision, attrPtr.p->extPrecision);
- w.add(DictTabInfo::AttributeExtScale, attrPtr.p->extScale);
- w.add(DictTabInfo::AttributeExtLength, attrPtr.p->extLength);
- w.add(DictTabInfo::AttributeAutoIncrement,
- (Uint32)attrPtr.p->autoIncrement);
- w.add(DictTabInfo::AttributeDefaultValue, attrPtr.p->defaultValue);
-
- w.add(DictTabInfo::AttributeEnd, 1);
- nextAttribute = attrPtr.p->nextAttrInTable;
- } while (nextAttribute != RNIL);
-
- w.add(DictTabInfo::TableEnd, 1);
-}
-
-/* ---------------------------------------------------------------- */
-/* ---------------------------------------------------------------- */
-// The routines to handle responses from file system.
-/* ---------------------------------------------------------------- */
-/* ---------------------------------------------------------------- */
-
-/* ---------------------------------------------------------------- */
-// A file was successfully closed.
-/* ---------------------------------------------------------------- */
-void Dbdict::execFSCLOSECONF(Signal* signal)
-{
- FsConnectRecordPtr fsPtr;
- FsConf * const fsConf = (FsConf *)&signal->theData[0];
- jamEntry();
- c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer);
- switch (fsPtr.p->fsState) {
- case FsConnectRecord::CLOSE_WRITE_SCHEMA:
- jam();
- closeWriteSchemaConf(signal, fsPtr);
- break;
- case FsConnectRecord::CLOSE_READ_SCHEMA:
- jam();
- closeReadSchemaConf(signal, fsPtr);
- break;
- case FsConnectRecord::CLOSE_READ_TAB_FILE:
- jam();
- closeReadTableConf(signal, fsPtr);
- break;
- case FsConnectRecord::CLOSE_WRITE_TAB_FILE:
- jam();
- closeWriteTableConf(signal, fsPtr);
- break;
- default:
- jamLine((fsPtr.p->fsState & 0xFFF));
- ndbrequire(false);
- break;
- }//switch
-}//execFSCLOSECONF()
-
-/* ---------------------------------------------------------------- */
-// A close file was refused.
-/* ---------------------------------------------------------------- */
-void Dbdict::execFSCLOSEREF(Signal* signal)
-{
- jamEntry();
- progError(0, 0);
-}//execFSCLOSEREF()
-
-/* ---------------------------------------------------------------- */
-// A file was successfully opened.
-/* ---------------------------------------------------------------- */
-void Dbdict::execFSOPENCONF(Signal* signal)
-{
- FsConnectRecordPtr fsPtr;
- jamEntry();
- FsConf * const fsConf = (FsConf *)&signal->theData[0];
- c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer);
-
- Uint32 filePointer = fsConf->filePointer;
- fsPtr.p->filePtr = filePointer;
- switch (fsPtr.p->fsState) {
- case FsConnectRecord::OPEN_WRITE_SCHEMA:
- jam();
- fsPtr.p->fsState = FsConnectRecord::WRITE_SCHEMA;
- writeSchemaFile(signal, filePointer, fsPtr.i);
- break;
- case FsConnectRecord::OPEN_READ_SCHEMA1:
- jam();
- fsPtr.p->fsState = FsConnectRecord::READ_SCHEMA1;
- readSchemaFile(signal, filePointer, fsPtr.i);
- break;
- case FsConnectRecord::OPEN_READ_SCHEMA2:
- jam();
- fsPtr.p->fsState = FsConnectRecord::READ_SCHEMA2;
- readSchemaFile(signal, filePointer, fsPtr.i);
- break;
- case FsConnectRecord::OPEN_READ_TAB_FILE1:
- jam();
- fsPtr.p->fsState = FsConnectRecord::READ_TAB_FILE1;
- readTableFile(signal, filePointer, fsPtr.i);
- break;
- case FsConnectRecord::OPEN_READ_TAB_FILE2:
- jam();
- fsPtr.p->fsState = FsConnectRecord::READ_TAB_FILE2;
- readTableFile(signal, filePointer, fsPtr.i);
- break;
- case FsConnectRecord::OPEN_WRITE_TAB_FILE:
- jam();
- fsPtr.p->fsState = FsConnectRecord::WRITE_TAB_FILE;
- writeTableFile(signal, filePointer, fsPtr.i);
- break;
- default:
- jamLine((fsPtr.p->fsState & 0xFFF));
- ndbrequire(false);
- break;
- }//switch
-}//execFSOPENCONF()
-
-/* ---------------------------------------------------------------- */
-// An open file was refused.
-/* ---------------------------------------------------------------- */
-void Dbdict::execFSOPENREF(Signal* signal)
-{
- jamEntry();
- FsRef * const fsRef = (FsRef *)&signal->theData[0];
- FsConnectRecordPtr fsPtr;
- c_fsConnectRecordPool.getPtr(fsPtr, fsRef->userPointer);
- switch (fsPtr.p->fsState) {
- case FsConnectRecord::OPEN_READ_SCHEMA1:
- openReadSchemaRef(signal, fsPtr);
- break;
- case FsConnectRecord::OPEN_READ_TAB_FILE1:
- jam();
- openReadTableRef(signal, fsPtr);
- break;
- default:
- jamLine((fsPtr.p->fsState & 0xFFF));
- ndbrequire(false);
- break;
- }//switch
-}//execFSOPENREF()
-
-/* ---------------------------------------------------------------- */
-// A file was successfully read.
-/* ---------------------------------------------------------------- */
-void Dbdict::execFSREADCONF(Signal* signal)
-{
- jamEntry();
- FsConf * const fsConf = (FsConf *)&signal->theData[0];
- FsConnectRecordPtr fsPtr;
- c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer);
- switch (fsPtr.p->fsState) {
- case FsConnectRecord::READ_SCHEMA1:
- case FsConnectRecord::READ_SCHEMA2:
- readSchemaConf(signal ,fsPtr);
- break;
- case FsConnectRecord::READ_TAB_FILE1:
- case FsConnectRecord::READ_TAB_FILE2:
- jam();
- readTableConf(signal ,fsPtr);
- break;
- default:
- jamLine((fsPtr.p->fsState & 0xFFF));
- ndbrequire(false);
- break;
- }//switch
-}//execFSREADCONF()
-
-/* ---------------------------------------------------------------- */
-// A read file was refused.
-/* ---------------------------------------------------------------- */
-void Dbdict::execFSREADREF(Signal* signal)
-{
- jamEntry();
- FsRef * const fsRef = (FsRef *)&signal->theData[0];
- FsConnectRecordPtr fsPtr;
- c_fsConnectRecordPool.getPtr(fsPtr, fsRef->userPointer);
- switch (fsPtr.p->fsState) {
- case FsConnectRecord::READ_SCHEMA1:
- readSchemaRef(signal, fsPtr);
- break;
- case FsConnectRecord::READ_TAB_FILE1:
- jam();
- readTableRef(signal, fsPtr);
- break;
- default:
- jamLine((fsPtr.p->fsState & 0xFFF));
- ndbrequire(false);
- break;
- }//switch
-}//execFSREADREF()
-
-/* ---------------------------------------------------------------- */
-// A file was successfully written.
-/* ---------------------------------------------------------------- */
-void Dbdict::execFSWRITECONF(Signal* signal)
-{
- FsConf * const fsConf = (FsConf *)&signal->theData[0];
- FsConnectRecordPtr fsPtr;
- jamEntry();
- c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer);
- switch (fsPtr.p->fsState) {
- case FsConnectRecord::WRITE_TAB_FILE:
- writeTableConf(signal, fsPtr);
- break;
- case FsConnectRecord::WRITE_SCHEMA:
- jam();
- writeSchemaConf(signal, fsPtr);
- break;
- default:
- jamLine((fsPtr.p->fsState & 0xFFF));
- ndbrequire(false);
- break;
- }//switch
-}//execFSWRITECONF()
-
-/* ---------------------------------------------------------------- */
-// A write file was refused.
-/* ---------------------------------------------------------------- */
-void Dbdict::execFSWRITEREF(Signal* signal)
-{
- jamEntry();
- progError(0, 0);
-}//execFSWRITEREF()
-
-/* ---------------------------------------------------------------- */
-// Routines to handle Read/Write of Table Files
-/* ---------------------------------------------------------------- */
-void
-Dbdict::writeTableFile(Signal* signal, Uint32 tableId,
- SegmentedSectionPtr tabInfoPtr, Callback* callback){
-
- ndbrequire(c_writeTableRecord.tableWriteState == WriteTableRecord::IDLE);
-
- Uint32 sz = tabInfoPtr.sz + ZPAGE_HEADER_SIZE;
-
- c_writeTableRecord.noOfPages = DIV(sz, ZSIZE_OF_PAGES_IN_WORDS);
- c_writeTableRecord.tableWriteState = WriteTableRecord::TWR_CALLBACK;
- c_writeTableRecord.m_callback = * callback;
-
- c_writeTableRecord.pageId = 0;
- ndbrequire(c_writeTableRecord.noOfPages < 8);
-
- PageRecordPtr pageRecPtr;
- c_pageRecordArray.getPtr(pageRecPtr, c_writeTableRecord.pageId);
- copy(&pageRecPtr.p->word[ZPAGE_HEADER_SIZE], tabInfoPtr);
-
- memset(&pageRecPtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE);
- pageRecPtr.p->word[ZPOS_CHECKSUM] =
- computeChecksum(&pageRecPtr.p->word[0],
- c_writeTableRecord.noOfPages * ZSIZE_OF_PAGES_IN_WORDS);
-
- startWriteTableFile(signal, tableId);
-
-}
-
-void Dbdict::startWriteTableFile(Signal* signal, Uint32 tableId)
-{
- FsConnectRecordPtr fsPtr;
- c_writeTableRecord.tableId = tableId;
- c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
- fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_TAB_FILE;
- openTableFile(signal, 0, fsPtr.i, tableId, true);
- c_writeTableRecord.noOfTableFilesHandled = 0;
-}//Dbdict::startWriteTableFile()
-
-void Dbdict::openTableFile(Signal* signal,
- Uint32 fileNo,
- Uint32 fsConPtr,
- Uint32 tableId,
- bool writeFlag)
-{
- TableRecordPtr tablePtr;
- FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0];
- c_tableRecordPool.getPtr(tablePtr, tableId);
-
- fsOpenReq->userReference = reference();
- fsOpenReq->userPointer = fsConPtr;
- if (writeFlag) {
- jam();
- fsOpenReq->fileFlags =
- FsOpenReq::OM_WRITEONLY |
- FsOpenReq::OM_TRUNCATE |
- FsOpenReq::OM_CREATE |
- FsOpenReq::OM_SYNC;
- } else {
- jam();
- fsOpenReq->fileFlags = FsOpenReq::OM_READONLY;
- }//if
- ndbrequire(tablePtr.p->tableVersion < ZNIL);
- fsOpenReq->fileNumber[3] = 0; // Initialise before byte changes
- FsOpenReq::setVersion(fsOpenReq->fileNumber, 1);
- FsOpenReq::setSuffix(fsOpenReq->fileNumber, FsOpenReq::S_TABLELIST);
- FsOpenReq::v1_setDisk(fsOpenReq->fileNumber, (fileNo + 1));
- FsOpenReq::v1_setTable(fsOpenReq->fileNumber, tableId);
- FsOpenReq::v1_setFragment(fsOpenReq->fileNumber, (Uint32)-1);
- FsOpenReq::v1_setS(fsOpenReq->fileNumber, tablePtr.p->tableVersion);
- FsOpenReq::v1_setP(fsOpenReq->fileNumber, 255);
-/* ---------------------------------------------------------------- */
-// File name : D1/DBDICT/T0/S1.TableList
-// D1 means Disk 1 (set by fileNo + 1)
-// T0 means table id = 0
-// S1 means tableVersion 1
-// TableList indicates that this is a file for a table description.
-/* ---------------------------------------------------------------- */
- sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
-}//openTableFile()
-
-void Dbdict::writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
-{
- FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
-
- fsRWReq->filePointer = filePtr;
- fsRWReq->userReference = reference();
- fsRWReq->userPointer = fsConPtr;
- fsRWReq->operationFlag = 0; // Initialise before bit changes
- FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1);
- FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
- FsReadWriteReq::fsFormatArrayOfPages);
- fsRWReq->varIndex = ZBAT_TABLE_FILE;
- fsRWReq->numberOfPages = c_writeTableRecord.noOfPages;
- fsRWReq->data.arrayOfPages.varIndex = c_writeTableRecord.pageId;
- fsRWReq->data.arrayOfPages.fileOffset = 0; // Write to file page 0
- sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
-}//writeTableFile()
-
-void Dbdict::writeTableConf(Signal* signal,
- FsConnectRecordPtr fsPtr)
-{
- fsPtr.p->fsState = FsConnectRecord::CLOSE_WRITE_TAB_FILE;
- closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
- return;
-}//Dbdict::writeTableConf()
-
-void Dbdict::closeWriteTableConf(Signal* signal,
- FsConnectRecordPtr fsPtr)
-{
- c_writeTableRecord.noOfTableFilesHandled++;
- if (c_writeTableRecord.noOfTableFilesHandled < 2) {
- jam();
- fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_TAB_FILE;
- openTableFile(signal, 1, fsPtr.i, c_writeTableRecord.tableId, true);
- return;
- }
- ndbrequire(c_writeTableRecord.noOfTableFilesHandled == 2);
- c_fsConnectRecordPool.release(fsPtr);
- WriteTableRecord::TableWriteState state = c_writeTableRecord.tableWriteState;
- c_writeTableRecord.tableWriteState = WriteTableRecord::IDLE;
- switch (state) {
- case WriteTableRecord::IDLE:
- case WriteTableRecord::WRITE_ADD_TABLE_MASTER :
- case WriteTableRecord::WRITE_ADD_TABLE_SLAVE :
- case WriteTableRecord::WRITE_RESTART_FROM_MASTER :
- case WriteTableRecord::WRITE_RESTART_FROM_OWN :
- ndbrequire(false);
- break;
- case WriteTableRecord::TWR_CALLBACK:
- jam();
- execute(signal, c_writeTableRecord.m_callback, 0);
- return;
- }
- ndbrequire(false);
-}//Dbdict::closeWriteTableConf()
-
-void Dbdict::startReadTableFile(Signal* signal, Uint32 tableId)
-{
- //globalSignalLoggers.log(number(), "startReadTableFile");
- ndbrequire(!c_readTableRecord.inUse);
-
- FsConnectRecordPtr fsPtr;
- c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
- c_readTableRecord.inUse = true;
- c_readTableRecord.tableId = tableId;
- fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE1;
- openTableFile(signal, 0, fsPtr.i, tableId, false);
-}//Dbdict::startReadTableFile()
-
-void Dbdict::openReadTableRef(Signal* signal,
- FsConnectRecordPtr fsPtr)
-{
- fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2;
- openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
- return;
-}//Dbdict::openReadTableConf()
-
-void Dbdict::readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
-{
- FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
-
- fsRWReq->filePointer = filePtr;
- fsRWReq->userReference = reference();
- fsRWReq->userPointer = fsConPtr;
- fsRWReq->operationFlag = 0; // Initialise before bit changes
- FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0);
- FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
- FsReadWriteReq::fsFormatArrayOfPages);
- fsRWReq->varIndex = ZBAT_TABLE_FILE;
- fsRWReq->numberOfPages = c_readTableRecord.noOfPages;
- fsRWReq->data.arrayOfPages.varIndex = c_readTableRecord.pageId;
- fsRWReq->data.arrayOfPages.fileOffset = 0; // Write to file page 0
- sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
-}//readTableFile()
-
-void Dbdict::readTableConf(Signal* signal,
- FsConnectRecordPtr fsPtr)
-{
- /* ---------------------------------------------------------------- */
- // Verify the data read from disk
- /* ---------------------------------------------------------------- */
- bool crashInd;
- if (fsPtr.p->fsState == FsConnectRecord::READ_TAB_FILE1) {
- jam();
- crashInd = false;
- } else {
- jam();
- crashInd = true;
- }//if
-
- PageRecordPtr tmpPagePtr;
- c_pageRecordArray.getPtr(tmpPagePtr, c_readTableRecord.pageId);
- Uint32 sz = c_readTableRecord.noOfPages * ZSIZE_OF_PAGES_IN_WORDS;
- Uint32 chk = computeChecksum((const Uint32*)tmpPagePtr.p, sz);
-
- ndbrequire((chk == 0) || !crashInd);
- if(chk != 0){
- jam();
- ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_TAB_FILE1);
- readTableRef(signal, fsPtr);
- return;
- }//if
-
- fsPtr.p->fsState = FsConnectRecord::CLOSE_READ_TAB_FILE;
- closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
- return;
-}//Dbdict::readTableConf()
-
-void Dbdict::readTableRef(Signal* signal,
- FsConnectRecordPtr fsPtr)
-{
- fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2;
- openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
- return;
-}//Dbdict::readTableRef()
-
-void Dbdict::closeReadTableConf(Signal* signal,
- FsConnectRecordPtr fsPtr)
-{
- c_fsConnectRecordPool.release(fsPtr);
- c_readTableRecord.inUse = false;
-
- execute(signal, c_readTableRecord.m_callback, 0);
- return;
-}//Dbdict::closeReadTableConf()
-
-/* ---------------------------------------------------------------- */
-// Routines to handle Read/Write of Schema Files
-/* ---------------------------------------------------------------- */
-void
-Dbdict::updateSchemaState(Signal* signal, Uint32 tableId,
- SchemaFile::TableEntry* te, Callback* callback){
-
- jam();
- ndbrequire(tableId < c_tableRecordPool.getSize());
- XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
- SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tableId);
-
- SchemaFile::TableState newState =
- (SchemaFile::TableState)te->m_tableState;
- SchemaFile::TableState oldState =
- (SchemaFile::TableState)tableEntry->m_tableState;
-
- Uint32 newVersion = te->m_tableVersion;
- Uint32 oldVersion = tableEntry->m_tableVersion;
-
- bool ok = false;
- switch(newState){
- case SchemaFile::ADD_STARTED:
- jam();
- ok = true;
- ndbrequire((oldVersion + 1) == newVersion);
- ndbrequire(oldState == SchemaFile::INIT ||
- oldState == SchemaFile::DROP_TABLE_COMMITTED);
- break;
- case SchemaFile::TABLE_ADD_COMMITTED:
- jam();
- ok = true;
- ndbrequire(newVersion == oldVersion);
- ndbrequire(oldState == SchemaFile::ADD_STARTED);
- break;
- case SchemaFile::ALTER_TABLE_COMMITTED:
- jam();
- ok = true;
- ndbrequire((oldVersion + 1) == newVersion);
- ndbrequire(oldState == SchemaFile::TABLE_ADD_COMMITTED ||
- oldState == SchemaFile::ALTER_TABLE_COMMITTED);
- break;
- case SchemaFile::DROP_TABLE_STARTED:
- jam();
- case SchemaFile::DROP_TABLE_COMMITTED:
- jam();
- ok = true;
- ndbrequire(false);
- break;
- case SchemaFile::INIT:
- jam();
- ok = true;
- ndbrequire((oldState == SchemaFile::ADD_STARTED));
- }//if
- ndbrequire(ok);
-
- * tableEntry = * te;
- computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES);
-
- ndbrequire(c_writeSchemaRecord.inUse == false);
- c_writeSchemaRecord.inUse = true;
-
- c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
- c_writeSchemaRecord.newFile = false;
- c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES;
- c_writeSchemaRecord.noOfPages = 1;
- c_writeSchemaRecord.m_callback = * callback;
-
- startWriteSchemaFile(signal);
-}
-
-void Dbdict::startWriteSchemaFile(Signal* signal)
-{
- FsConnectRecordPtr fsPtr;
- c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
- fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_SCHEMA;
- openSchemaFile(signal, 0, fsPtr.i, true, c_writeSchemaRecord.newFile);
- c_writeSchemaRecord.noOfSchemaFilesHandled = 0;
-}//Dbdict::startWriteSchemaFile()
-
-void Dbdict::openSchemaFile(Signal* signal,
- Uint32 fileNo,
- Uint32 fsConPtr,
- bool writeFlag,
- bool newFile)
-{
- FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0];
- fsOpenReq->userReference = reference();
- fsOpenReq->userPointer = fsConPtr;
- if (writeFlag) {
- jam();
- fsOpenReq->fileFlags =
- FsOpenReq::OM_WRITEONLY |
- FsOpenReq::OM_SYNC;
- if (newFile)
- fsOpenReq->fileFlags |=
- FsOpenReq::OM_TRUNCATE |
- FsOpenReq::OM_CREATE;
- } else {
- jam();
- fsOpenReq->fileFlags = FsOpenReq::OM_READONLY;
- }//if
- fsOpenReq->fileNumber[3] = 0; // Initialise before byte changes
- FsOpenReq::setVersion(fsOpenReq->fileNumber, 1);
- FsOpenReq::setSuffix(fsOpenReq->fileNumber, FsOpenReq::S_SCHEMALOG);
- FsOpenReq::v1_setDisk(fsOpenReq->fileNumber, (fileNo + 1));
- FsOpenReq::v1_setTable(fsOpenReq->fileNumber, (Uint32)-1);
- FsOpenReq::v1_setFragment(fsOpenReq->fileNumber, (Uint32)-1);
- FsOpenReq::v1_setS(fsOpenReq->fileNumber, (Uint32)-1);
- FsOpenReq::v1_setP(fsOpenReq->fileNumber, 0);
-/* ---------------------------------------------------------------- */
-// File name : D1/DBDICT/P0.SchemaLog
-// D1 means Disk 1 (set by fileNo + 1). Writes to both D1 and D2
-// SchemaLog indicates that this is a file giving a list of current tables.
-/* ---------------------------------------------------------------- */
- sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
-}//openSchemaFile()
-
-void Dbdict::writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
-{
- FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
-
- // check write record
- WriteSchemaRecord & wr = c_writeSchemaRecord;
- ndbrequire(wr.pageId == (wr.pageId != 0) * NDB_SF_MAX_PAGES);
- ndbrequire(wr.noOfPages != 0);
- ndbrequire(wr.firstPage + wr.noOfPages <= NDB_SF_MAX_PAGES);
-
- fsRWReq->filePointer = filePtr;
- fsRWReq->userReference = reference();
- fsRWReq->userPointer = fsConPtr;
- fsRWReq->operationFlag = 0; // Initialise before bit changes
- FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1);
- FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
- FsReadWriteReq::fsFormatArrayOfPages);
- fsRWReq->varIndex = ZBAT_SCHEMA_FILE;
- fsRWReq->numberOfPages = wr.noOfPages;
- // Write from memory page
- fsRWReq->data.arrayOfPages.varIndex = wr.pageId + wr.firstPage;
- fsRWReq->data.arrayOfPages.fileOffset = wr.firstPage;
- sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
-}//writeSchemaFile()
-
-void Dbdict::writeSchemaConf(Signal* signal,
- FsConnectRecordPtr fsPtr)
-{
- fsPtr.p->fsState = FsConnectRecord::CLOSE_WRITE_SCHEMA;
- closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
- return;
-}//Dbdict::writeSchemaConf()
-
-void Dbdict::closeFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
-{
- FsCloseReq * const fsCloseReq = (FsCloseReq *)&signal->theData[0];
- fsCloseReq->filePointer = filePtr;
- fsCloseReq->userReference = reference();
- fsCloseReq->userPointer = fsConPtr;
- FsCloseReq::setRemoveFileFlag(fsCloseReq->fileFlag, false);
- sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, FsCloseReq::SignalLength, JBA);
- return;
-}//closeFile()
-
-void Dbdict::closeWriteSchemaConf(Signal* signal,
- FsConnectRecordPtr fsPtr)
-{
- c_writeSchemaRecord.noOfSchemaFilesHandled++;
- if (c_writeSchemaRecord.noOfSchemaFilesHandled < 2) {
- jam();
- fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_SCHEMA;
- openSchemaFile(signal, 1, fsPtr.i, true, c_writeSchemaRecord.newFile);
- return;
- }
- ndbrequire(c_writeSchemaRecord.noOfSchemaFilesHandled == 2);
-
- c_fsConnectRecordPool.release(fsPtr);
-
- c_writeSchemaRecord.inUse = false;
- execute(signal, c_writeSchemaRecord.m_callback, 0);
- return;
-}//Dbdict::closeWriteSchemaConf()
-
-void Dbdict::startReadSchemaFile(Signal* signal)
-{
- //globalSignalLoggers.log(number(), "startReadSchemaFile");
- FsConnectRecordPtr fsPtr;
- c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
- fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA1;
- openSchemaFile(signal, 0, fsPtr.i, false, false);
-}//Dbdict::startReadSchemaFile()
-
-void Dbdict::openReadSchemaRef(Signal* signal,
- FsConnectRecordPtr fsPtr)
-{
- fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA2;
- openSchemaFile(signal, 1, fsPtr.i, false, false);
-}//Dbdict::openReadSchemaRef()
-
-void Dbdict::readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
-{
- FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
-
- // check read record
- ReadSchemaRecord & rr = c_readSchemaRecord;
- ndbrequire(rr.pageId == (rr.pageId != 0) * NDB_SF_MAX_PAGES);
- ndbrequire(rr.noOfPages != 0);
- ndbrequire(rr.firstPage + rr.noOfPages <= NDB_SF_MAX_PAGES);
-
- fsRWReq->filePointer = filePtr;
- fsRWReq->userReference = reference();
- fsRWReq->userPointer = fsConPtr;
- fsRWReq->operationFlag = 0; // Initialise before bit changes
- FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0);
- FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
- FsReadWriteReq::fsFormatArrayOfPages);
- fsRWReq->varIndex = ZBAT_SCHEMA_FILE;
- fsRWReq->numberOfPages = rr.noOfPages;
- fsRWReq->data.arrayOfPages.varIndex = rr.pageId + rr.firstPage;
- fsRWReq->data.arrayOfPages.fileOffset = rr.firstPage;
- sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
-}//readSchemaFile()
-
-void Dbdict::readSchemaConf(Signal* signal,
- FsConnectRecordPtr fsPtr)
-{
-/* ---------------------------------------------------------------- */
-// Verify the data read from disk
-/* ---------------------------------------------------------------- */
- bool crashInd;
- if (fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1) {
- jam();
- crashInd = false;
- } else {
- jam();
- crashInd = true;
- }//if
-
- ReadSchemaRecord & rr = c_readSchemaRecord;
- XSchemaFile * xsf = &c_schemaFile[rr.pageId != 0];
-
- if (rr.schemaReadState == ReadSchemaRecord::INITIAL_READ_HEAD) {
- jam();
- ndbrequire(rr.firstPage == 0);
- SchemaFile * sf = &xsf->schemaPage[0];
- Uint32 noOfPages;
- if (sf->NdbVersion < NDB_SF_VERSION_5_0_6) {
- jam();
- const Uint32 pageSize_old = 32 * 1024;
- noOfPages = pageSize_old / NDB_SF_PAGE_SIZE - 1;
- } else {
- noOfPages = sf->FileSize / NDB_SF_PAGE_SIZE - 1;
- }
- rr.schemaReadState = ReadSchemaRecord::INITIAL_READ;
- if (noOfPages != 0) {
- rr.firstPage = 1;
- rr.noOfPages = noOfPages;
- readSchemaFile(signal, fsPtr.p->filePtr, fsPtr.i);
- return;
- }
- }
-
- SchemaFile * sf0 = &xsf->schemaPage[0];
- xsf->noOfPages = sf0->FileSize / NDB_SF_PAGE_SIZE;
-
- if (sf0->NdbVersion < NDB_SF_VERSION_5_0_6 &&
- ! convertSchemaFileTo_5_0_6(xsf)) {
- jam();
- ndbrequire(! crashInd);
- ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1);
- readSchemaRef(signal, fsPtr);
- return;
- }
-
- for (Uint32 n = 0; n < xsf->noOfPages; n++) {
- SchemaFile * sf = &xsf->schemaPage[n];
- bool ok =
- memcmp(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)) == 0 &&
- sf->FileSize != 0 &&
- sf->FileSize % NDB_SF_PAGE_SIZE == 0 &&
- sf->FileSize == sf0->FileSize &&
- sf->PageNumber == n &&
- computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS) == 0;
- ndbrequire(ok || !crashInd);
- if (! ok) {
- jam();
- ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1);
- readSchemaRef(signal, fsPtr);
- return;
- }
- }
-
- fsPtr.p->fsState = FsConnectRecord::CLOSE_READ_SCHEMA;
- closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
- return;
-}//Dbdict::readSchemaConf()
-
-void Dbdict::readSchemaRef(Signal* signal,
- FsConnectRecordPtr fsPtr)
-{
- fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA2;
- openSchemaFile(signal, 1, fsPtr.i, false, false);
- return;
-}//Dbdict::readSchemaRef()
-
-void Dbdict::closeReadSchemaConf(Signal* signal,
- FsConnectRecordPtr fsPtr)
-{
- c_fsConnectRecordPool.release(fsPtr);
- ReadSchemaRecord::SchemaReadState state = c_readSchemaRecord.schemaReadState;
- c_readSchemaRecord.schemaReadState = ReadSchemaRecord::IDLE;
-
- switch(state) {
- case ReadSchemaRecord::INITIAL_READ :
- jam();
- {
- // write back both copies
-
- ndbrequire(c_writeSchemaRecord.inUse == false);
- XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0 ];
- Uint32 noOfPages =
- (c_tableRecordPool.getSize() + NDB_SF_PAGE_ENTRIES - 1) /
- NDB_SF_PAGE_ENTRIES;
- resizeSchemaFile(xsf, noOfPages);
-
- c_writeSchemaRecord.inUse = true;
- c_writeSchemaRecord.pageId = c_schemaRecord.oldSchemaPage;
- c_writeSchemaRecord.newFile = true;
- c_writeSchemaRecord.firstPage = 0;
- c_writeSchemaRecord.noOfPages = xsf->noOfPages;
-
- c_writeSchemaRecord.m_callback.m_callbackFunction =
- safe_cast(&Dbdict::initSchemaFile_conf);
-
- startWriteSchemaFile(signal);
- }
- break;
-
- default :
- ndbrequire(false);
- break;
-
- }//switch
-}//Dbdict::closeReadSchemaConf()
-
-bool
-Dbdict::convertSchemaFileTo_5_0_6(XSchemaFile * xsf)
-{
- const Uint32 pageSize_old = 32 * 1024;
- Uint32 page_old[pageSize_old >> 2];
- SchemaFile * sf_old = (SchemaFile *)page_old;
-
- if (xsf->noOfPages * NDB_SF_PAGE_SIZE != pageSize_old)
- return false;
- SchemaFile * sf0 = &xsf->schemaPage[0];
- memcpy(sf_old, sf0, pageSize_old);
-
- // init max number new pages needed
- xsf->noOfPages = (sf_old->NoOfTableEntries + NDB_SF_PAGE_ENTRIES - 1) /
- NDB_SF_PAGE_ENTRIES;
- initSchemaFile(xsf, 0, xsf->noOfPages, true);
-
- Uint32 noOfPages = 1;
- Uint32 n, i, j;
- for (n = 0; n < xsf->noOfPages; n++) {
- jam();
- for (i = 0; i < NDB_SF_PAGE_ENTRIES; i++) {
- j = n * NDB_SF_PAGE_ENTRIES + i;
- if (j >= sf_old->NoOfTableEntries)
- continue;
- const SchemaFile::TableEntry_old & te_old = sf_old->TableEntries_old[j];
- if (te_old.m_tableState == SchemaFile::INIT ||
- te_old.m_tableState == SchemaFile::DROP_TABLE_COMMITTED ||
- te_old.m_noOfPages == 0)
- continue;
- SchemaFile * sf = &xsf->schemaPage[n];
- SchemaFile::TableEntry & te = sf->TableEntries[i];
- te.m_tableState = te_old.m_tableState;
- te.m_tableVersion = te_old.m_tableVersion;
- te.m_tableType = te_old.m_tableType;
- te.m_info_words = te_old.m_noOfPages * ZSIZE_OF_PAGES_IN_WORDS -
- ZPAGE_HEADER_SIZE;
- te.m_gcp = te_old.m_gcp;
- if (noOfPages < n)
- noOfPages = n;
- }
- }
- xsf->noOfPages = noOfPages;
- initSchemaFile(xsf, 0, xsf->noOfPages, false);
-
- return true;
-}
-
-/* **************************************************************** */
-/* ---------------------------------------------------------------- */
-/* MODULE: INITIALISATION MODULE ------------------------- */
-/* ---------------------------------------------------------------- */
-/* */
-/* This module contains initialisation of data at start/restart. */
-/* ---------------------------------------------------------------- */
-/* **************************************************************** */
-
-Dbdict::Dbdict(const class Configuration & conf):
- SimulatedBlock(DBDICT, conf),
- c_tableRecordHash(c_tableRecordPool),
- c_attributeRecordHash(c_attributeRecordPool),
- c_triggerRecordHash(c_triggerRecordPool),
- c_opCreateTable(c_opRecordPool),
- c_opDropTable(c_opRecordPool),
- c_opCreateIndex(c_opRecordPool),
- c_opDropIndex(c_opRecordPool),
- c_opAlterIndex(c_opRecordPool),
- c_opBuildIndex(c_opRecordPool),
- c_opCreateEvent(c_opRecordPool),
- c_opSubEvent(c_opRecordPool),
- c_opDropEvent(c_opRecordPool),
- c_opSignalUtil(c_opRecordPool),
- c_opCreateTrigger(c_opRecordPool),
- c_opDropTrigger(c_opRecordPool),
- c_opAlterTrigger(c_opRecordPool),
- c_opRecordSequence(0)
-{
- BLOCK_CONSTRUCTOR(Dbdict);
-
- const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
- ndbrequire(p != 0);
-
- ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS, &c_maxNoOfTriggers);
- // Transit signals
- addRecSignal(GSN_DUMP_STATE_ORD, &Dbdict::execDUMP_STATE_ORD);
- addRecSignal(GSN_GET_TABINFOREQ, &Dbdict::execGET_TABINFOREQ);
- addRecSignal(GSN_GET_TABLEID_REQ, &Dbdict::execGET_TABLEDID_REQ);
- addRecSignal(GSN_GET_TABINFO_CONF, &Dbdict::execGET_TABINFO_CONF);
- addRecSignal(GSN_CONTINUEB, &Dbdict::execCONTINUEB);
-
- addRecSignal(GSN_CREATE_TABLE_REQ, &Dbdict::execCREATE_TABLE_REQ);
- addRecSignal(GSN_CREATE_TAB_REQ, &Dbdict::execCREATE_TAB_REQ);
- addRecSignal(GSN_CREATE_TAB_REF, &Dbdict::execCREATE_TAB_REF);
- addRecSignal(GSN_CREATE_TAB_CONF, &Dbdict::execCREATE_TAB_CONF);
- addRecSignal(GSN_CREATE_FRAGMENTATION_REF, &Dbdict::execCREATE_FRAGMENTATION_REF);
- addRecSignal(GSN_CREATE_FRAGMENTATION_CONF, &Dbdict::execCREATE_FRAGMENTATION_CONF);
- addRecSignal(GSN_DIADDTABCONF, &Dbdict::execDIADDTABCONF);
- addRecSignal(GSN_DIADDTABREF, &Dbdict::execDIADDTABREF);
- addRecSignal(GSN_ADD_FRAGREQ, &Dbdict::execADD_FRAGREQ);
- addRecSignal(GSN_TAB_COMMITCONF, &Dbdict::execTAB_COMMITCONF);
- addRecSignal(GSN_TAB_COMMITREF, &Dbdict::execTAB_COMMITREF);
- addRecSignal(GSN_ALTER_TABLE_REQ, &Dbdict::execALTER_TABLE_REQ);
- addRecSignal(GSN_ALTER_TAB_REQ, &Dbdict::execALTER_TAB_REQ);
- addRecSignal(GSN_ALTER_TAB_REF, &Dbdict::execALTER_TAB_REF);
- addRecSignal(GSN_ALTER_TAB_CONF, &Dbdict::execALTER_TAB_CONF);
-
- // Index signals
- addRecSignal(GSN_CREATE_INDX_REQ, &Dbdict::execCREATE_INDX_REQ);
- addRecSignal(GSN_CREATE_INDX_CONF, &Dbdict::execCREATE_INDX_CONF);
- addRecSignal(GSN_CREATE_INDX_REF, &Dbdict::execCREATE_INDX_REF);
-
- addRecSignal(GSN_ALTER_INDX_REQ, &Dbdict::execALTER_INDX_REQ);
- addRecSignal(GSN_ALTER_INDX_CONF, &Dbdict::execALTER_INDX_CONF);
- addRecSignal(GSN_ALTER_INDX_REF, &Dbdict::execALTER_INDX_REF);
-
- addRecSignal(GSN_CREATE_TABLE_CONF, &Dbdict::execCREATE_TABLE_CONF);
- addRecSignal(GSN_CREATE_TABLE_REF, &Dbdict::execCREATE_TABLE_REF);
-
- addRecSignal(GSN_DROP_INDX_REQ, &Dbdict::execDROP_INDX_REQ);
- addRecSignal(GSN_DROP_INDX_CONF, &Dbdict::execDROP_INDX_CONF);
- addRecSignal(GSN_DROP_INDX_REF, &Dbdict::execDROP_INDX_REF);
-
- addRecSignal(GSN_DROP_TABLE_CONF, &Dbdict::execDROP_TABLE_CONF);
- addRecSignal(GSN_DROP_TABLE_REF, &Dbdict::execDROP_TABLE_REF);
-
- addRecSignal(GSN_BUILDINDXREQ, &Dbdict::execBUILDINDXREQ);
- addRecSignal(GSN_BUILDINDXCONF, &Dbdict::execBUILDINDXCONF);
- addRecSignal(GSN_BUILDINDXREF, &Dbdict::execBUILDINDXREF);
-
- // Util signals
- addRecSignal(GSN_UTIL_PREPARE_CONF, &Dbdict::execUTIL_PREPARE_CONF);
- addRecSignal(GSN_UTIL_PREPARE_REF, &Dbdict::execUTIL_PREPARE_REF);
-
- addRecSignal(GSN_UTIL_EXECUTE_CONF, &Dbdict::execUTIL_EXECUTE_CONF);
- addRecSignal(GSN_UTIL_EXECUTE_REF, &Dbdict::execUTIL_EXECUTE_REF);
-
- addRecSignal(GSN_UTIL_RELEASE_CONF, &Dbdict::execUTIL_RELEASE_CONF);
- addRecSignal(GSN_UTIL_RELEASE_REF, &Dbdict::execUTIL_RELEASE_REF);
-
- // Event signals
- addRecSignal(GSN_CREATE_EVNT_REQ, &Dbdict::execCREATE_EVNT_REQ);
- addRecSignal(GSN_CREATE_EVNT_CONF, &Dbdict::execCREATE_EVNT_CONF);
- addRecSignal(GSN_CREATE_EVNT_REF, &Dbdict::execCREATE_EVNT_REF);
-
- addRecSignal(GSN_CREATE_SUBID_CONF, &Dbdict::execCREATE_SUBID_CONF);
- addRecSignal(GSN_CREATE_SUBID_REF, &Dbdict::execCREATE_SUBID_REF);
-
- addRecSignal(GSN_SUB_CREATE_CONF, &Dbdict::execSUB_CREATE_CONF);
- addRecSignal(GSN_SUB_CREATE_REF, &Dbdict::execSUB_CREATE_REF);
-
- addRecSignal(GSN_SUB_START_REQ, &Dbdict::execSUB_START_REQ);
- addRecSignal(GSN_SUB_START_CONF, &Dbdict::execSUB_START_CONF);
- addRecSignal(GSN_SUB_START_REF, &Dbdict::execSUB_START_REF);
-
- addRecSignal(GSN_SUB_STOP_REQ, &Dbdict::execSUB_STOP_REQ);
- addRecSignal(GSN_SUB_STOP_CONF, &Dbdict::execSUB_STOP_CONF);
- addRecSignal(GSN_SUB_STOP_REF, &Dbdict::execSUB_STOP_REF);
-
- addRecSignal(GSN_SUB_SYNC_CONF, &Dbdict::execSUB_SYNC_CONF);
- addRecSignal(GSN_SUB_SYNC_REF, &Dbdict::execSUB_SYNC_REF);
-
- addRecSignal(GSN_DROP_EVNT_REQ, &Dbdict::execDROP_EVNT_REQ);
-
- addRecSignal(GSN_SUB_REMOVE_REQ, &Dbdict::execSUB_REMOVE_REQ);
- addRecSignal(GSN_SUB_REMOVE_CONF, &Dbdict::execSUB_REMOVE_CONF);
- addRecSignal(GSN_SUB_REMOVE_REF, &Dbdict::execSUB_REMOVE_REF);
-
- // Trigger signals
- addRecSignal(GSN_CREATE_TRIG_REQ, &Dbdict::execCREATE_TRIG_REQ);
- addRecSignal(GSN_CREATE_TRIG_CONF, &Dbdict::execCREATE_TRIG_CONF);
- addRecSignal(GSN_CREATE_TRIG_REF, &Dbdict::execCREATE_TRIG_REF);
- addRecSignal(GSN_ALTER_TRIG_REQ, &Dbdict::execALTER_TRIG_REQ);
- addRecSignal(GSN_ALTER_TRIG_CONF, &Dbdict::execALTER_TRIG_CONF);
- addRecSignal(GSN_ALTER_TRIG_REF, &Dbdict::execALTER_TRIG_REF);
- addRecSignal(GSN_DROP_TRIG_REQ, &Dbdict::execDROP_TRIG_REQ);
- addRecSignal(GSN_DROP_TRIG_CONF, &Dbdict::execDROP_TRIG_CONF);
- addRecSignal(GSN_DROP_TRIG_REF, &Dbdict::execDROP_TRIG_REF);
-
- // Received signals
- addRecSignal(GSN_HOT_SPAREREP, &Dbdict::execHOT_SPAREREP);
- addRecSignal(GSN_GET_SCHEMA_INFOREQ, &Dbdict::execGET_SCHEMA_INFOREQ);
- addRecSignal(GSN_SCHEMA_INFO, &Dbdict::execSCHEMA_INFO);
- addRecSignal(GSN_SCHEMA_INFOCONF, &Dbdict::execSCHEMA_INFOCONF);
- addRecSignal(GSN_DICTSTARTREQ, &Dbdict::execDICTSTARTREQ);
- addRecSignal(GSN_READ_NODESCONF, &Dbdict::execREAD_NODESCONF);
- addRecSignal(GSN_FSOPENCONF, &Dbdict::execFSOPENCONF);
- addRecSignal(GSN_FSOPENREF, &Dbdict::execFSOPENREF);
- addRecSignal(GSN_FSCLOSECONF, &Dbdict::execFSCLOSECONF);
- addRecSignal(GSN_FSCLOSEREF, &Dbdict::execFSCLOSEREF);
- addRecSignal(GSN_FSWRITECONF, &Dbdict::execFSWRITECONF);
- addRecSignal(GSN_FSWRITEREF, &Dbdict::execFSWRITEREF);
- addRecSignal(GSN_FSREADCONF, &Dbdict::execFSREADCONF);
- addRecSignal(GSN_FSREADREF, &Dbdict::execFSREADREF);
- addRecSignal(GSN_LQHFRAGCONF, &Dbdict::execLQHFRAGCONF);
- addRecSignal(GSN_LQHADDATTCONF, &Dbdict::execLQHADDATTCONF);
- addRecSignal(GSN_LQHADDATTREF, &Dbdict::execLQHADDATTREF);
- addRecSignal(GSN_LQHFRAGREF, &Dbdict::execLQHFRAGREF);
- addRecSignal(GSN_NDB_STTOR, &Dbdict::execNDB_STTOR);
- addRecSignal(GSN_READ_CONFIG_REQ, &Dbdict::execREAD_CONFIG_REQ, true);
- addRecSignal(GSN_STTOR, &Dbdict::execSTTOR);
- addRecSignal(GSN_TC_SCHVERCONF, &Dbdict::execTC_SCHVERCONF);
- addRecSignal(GSN_NODE_FAILREP, &Dbdict::execNODE_FAILREP);
- addRecSignal(GSN_INCL_NODEREQ, &Dbdict::execINCL_NODEREQ);
- addRecSignal(GSN_API_FAILREQ, &Dbdict::execAPI_FAILREQ);
-
- addRecSignal(GSN_WAIT_GCP_REF, &Dbdict::execWAIT_GCP_REF);
- addRecSignal(GSN_WAIT_GCP_CONF, &Dbdict::execWAIT_GCP_CONF);
-
- addRecSignal(GSN_LIST_TABLES_REQ, &Dbdict::execLIST_TABLES_REQ);
-
- addRecSignal(GSN_DROP_TABLE_REQ, &Dbdict::execDROP_TABLE_REQ);
-
- addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dbdict::execPREP_DROP_TAB_REQ);
- addRecSignal(GSN_PREP_DROP_TAB_REF, &Dbdict::execPREP_DROP_TAB_REF);
- addRecSignal(GSN_PREP_DROP_TAB_CONF, &Dbdict::execPREP_DROP_TAB_CONF);
-
- addRecSignal(GSN_DROP_TAB_REQ, &Dbdict::execDROP_TAB_REQ);
- addRecSignal(GSN_DROP_TAB_REF, &Dbdict::execDROP_TAB_REF);
- addRecSignal(GSN_DROP_TAB_CONF, &Dbdict::execDROP_TAB_CONF);
-}//Dbdict::Dbdict()
-
-Dbdict::~Dbdict()
-{
-}//Dbdict::~Dbdict()
-
-BLOCK_FUNCTIONS(Dbdict)
-
-void Dbdict::initCommonData()
-{
-/* ---------------------------------------------------------------- */
-// Initialise all common variables.
-/* ---------------------------------------------------------------- */
- initRetrieveRecord(0, 0, 0);
- initSchemaRecord();
- initRestartRecord();
- initSendSchemaRecord();
- initReadTableRecord();
- initWriteTableRecord();
- initReadSchemaRecord();
- initWriteSchemaRecord();
-
- c_masterNodeId = ZNIL;
- c_numberNode = 0;
- c_noNodesFailed = 0;
- c_failureNr = 0;
- c_blockState = BS_IDLE;
- c_packTable.m_state = PackTable::PTS_IDLE;
- c_startPhase = 0;
- c_restartType = 255; //Ensure not used restartType
- c_tabinfoReceived = 0;
- c_initialStart = false;
- c_systemRestart = false;
- c_initialNodeRestart = false;
- c_nodeRestart = false;
-}//Dbdict::initCommonData()
-
-void Dbdict::initRecords()
-{
- initNodeRecords();
- initPageRecords();
- initTableRecords();
- initTriggerRecords();
-}//Dbdict::initRecords()
-
-void Dbdict::initSendSchemaRecord()
-{
- c_sendSchemaRecord.noOfWords = (Uint32)-1;
- c_sendSchemaRecord.pageId = RNIL;
- c_sendSchemaRecord.noOfWordsCurrentlySent = 0;
- c_sendSchemaRecord.noOfSignalsSentSinceDelay = 0;
- c_sendSchemaRecord.inUse = false;
- //c_sendSchemaRecord.sendSchemaState = SendSchemaRecord::IDLE;
-}//initSendSchemaRecord()
-
-void Dbdict::initReadTableRecord()
-{
- c_readTableRecord.noOfPages = (Uint32)-1;
- c_readTableRecord.pageId = RNIL;
- c_readTableRecord.tableId = ZNIL;
- c_readTableRecord.inUse = false;
-}//initReadTableRecord()
-
-void Dbdict::initWriteTableRecord()
-{
- c_writeTableRecord.noOfPages = (Uint32)-1;
- c_writeTableRecord.pageId = RNIL;
- c_writeTableRecord.noOfTableFilesHandled = 3;
- c_writeTableRecord.tableId = ZNIL;
- c_writeTableRecord.tableWriteState = WriteTableRecord::IDLE;
-}//initWriteTableRecord()
-
-void Dbdict::initReadSchemaRecord()
-{
- c_readSchemaRecord.pageId = RNIL;
- c_readSchemaRecord.schemaReadState = ReadSchemaRecord::IDLE;
-}//initReadSchemaRecord()
-
-void Dbdict::initWriteSchemaRecord()
-{
- c_writeSchemaRecord.inUse = false;
- c_writeSchemaRecord.pageId = RNIL;
- c_writeSchemaRecord.noOfSchemaFilesHandled = 3;
-}//initWriteSchemaRecord()
-
-void Dbdict::initRetrieveRecord(Signal* signal, Uint32 i, Uint32 returnCode)
-{
- c_retrieveRecord.busyState = false;
- c_retrieveRecord.blockRef = 0;
- c_retrieveRecord.m_senderData = RNIL;
- c_retrieveRecord.tableId = RNIL;
- c_retrieveRecord.currentSent = 0;
- c_retrieveRecord.retrievedNoOfPages = 0;
- c_retrieveRecord.retrievedNoOfWords = 0;
- c_retrieveRecord.m_useLongSig = false;
-}//initRetrieveRecord()
-
-void Dbdict::initSchemaRecord()
-{
- c_schemaRecord.schemaPage = RNIL;
- c_schemaRecord.oldSchemaPage = RNIL;
-}//Dbdict::initSchemaRecord()
-
-void Dbdict::initRestartRecord()
-{
- c_restartRecord.gciToRestart = 0;
- c_restartRecord.activeTable = ZNIL;
-}//Dbdict::initRestartRecord()
-
-void Dbdict::initNodeRecords()
-{
- jam();
- for (unsigned i = 1; i < MAX_NODES; i++) {
- NodeRecordPtr nodePtr;
- c_nodes.getPtr(nodePtr, i);
- nodePtr.p->hotSpare = false;
- nodePtr.p->nodeState = NodeRecord::API_NODE;
- }//for
-}//Dbdict::initNodeRecords()
-
-void Dbdict::initPageRecords()
-{
- c_retrieveRecord.retrievePage = ZMAX_PAGES_OF_TABLE_DEFINITION;
- ndbrequire(ZNUMBER_OF_PAGES >= (ZMAX_PAGES_OF_TABLE_DEFINITION + 1));
- c_schemaRecord.schemaPage = 0;
- c_schemaRecord.oldSchemaPage = NDB_SF_MAX_PAGES;
-}//Dbdict::initPageRecords()
-
-void Dbdict::initTableRecords()
-{
- TableRecordPtr tablePtr;
- while (1) {
- jam();
- refresh_watch_dog();
- c_tableRecordPool.seize(tablePtr);
- if (tablePtr.i == RNIL) {
- jam();
- break;
- }//if
- initialiseTableRecord(tablePtr);
- }//while
-}//Dbdict::initTableRecords()
-
-void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
-{
- tablePtr.p->activePage = RNIL;
- tablePtr.p->filePtr[0] = RNIL;
- tablePtr.p->filePtr[1] = RNIL;
- tablePtr.p->firstAttribute = RNIL;
- tablePtr.p->firstPage = RNIL;
- tablePtr.p->lastAttribute = RNIL;
- tablePtr.p->tableId = tablePtr.i;
- tablePtr.p->tableVersion = (Uint32)-1;
- tablePtr.p->tabState = TableRecord::NOT_DEFINED;
- tablePtr.p->tabReturnState = TableRecord::TRS_IDLE;
- tablePtr.p->myConnect = RNIL;
- tablePtr.p->fragmentType = DictTabInfo::AllNodesSmallTable;
- memset(tablePtr.p->tableName, 0, sizeof(tablePtr.p->tableName));
- tablePtr.p->gciTableCreated = 0;
- tablePtr.p->noOfAttributes = ZNIL;
- tablePtr.p->noOfNullAttr = 0;
- tablePtr.p->frmLen = 0;
- memset(tablePtr.p->frmData, 0, sizeof(tablePtr.p->frmData));
- /*
- tablePtr.p->lh3PageIndexBits = 0;
- tablePtr.p->lh3DistrBits = 0;
- tablePtr.p->lh3PageBits = 6;
- */
- tablePtr.p->kValue = 6;
- tablePtr.p->localKeyLen = 1;
- tablePtr.p->maxLoadFactor = 80;
- tablePtr.p->minLoadFactor = 70;
- tablePtr.p->noOfPrimkey = 1;
- tablePtr.p->tupKeyLength = 1;
- tablePtr.p->storedTable = true;
- tablePtr.p->tableType = DictTabInfo::UserTable;
- tablePtr.p->primaryTableId = RNIL;
- // volatile elements
- tablePtr.p->indexState = TableRecord::IS_UNDEFINED;
- tablePtr.p->insertTriggerId = RNIL;
- tablePtr.p->updateTriggerId = RNIL;
- tablePtr.p->deleteTriggerId = RNIL;
- tablePtr.p->customTriggerId = RNIL;
- tablePtr.p->buildTriggerId = RNIL;
- tablePtr.p->indexLocal = 0;
-}//Dbdict::initialiseTableRecord()
-
-void Dbdict::initTriggerRecords()
-{
- TriggerRecordPtr triggerPtr;
- while (1) {
- jam();
- refresh_watch_dog();
- c_triggerRecordPool.seize(triggerPtr);
- if (triggerPtr.i == RNIL) {
- jam();
- break;
- }//if
- initialiseTriggerRecord(triggerPtr);
- }//while
-}
-
-void Dbdict::initialiseTriggerRecord(TriggerRecordPtr triggerPtr)
-{
- triggerPtr.p->triggerState = TriggerRecord::TS_NOT_DEFINED;
- triggerPtr.p->triggerLocal = 0;
- memset(triggerPtr.p->triggerName, 0, sizeof(triggerPtr.p->triggerName));
- triggerPtr.p->triggerId = RNIL;
- triggerPtr.p->tableId = RNIL;
- triggerPtr.p->triggerType = (TriggerType::Value)~0;
- triggerPtr.p->triggerActionTime = (TriggerActionTime::Value)~0;
- triggerPtr.p->triggerEvent = (TriggerEvent::Value)~0;
- triggerPtr.p->monitorReplicas = false;
- triggerPtr.p->monitorAllAttributes = false;
- triggerPtr.p->attributeMask.clear();
- triggerPtr.p->indexId = RNIL;
-}
-
-Uint32 Dbdict::getFsConnRecord()
-{
- FsConnectRecordPtr fsPtr;
- c_fsConnectRecordPool.seize(fsPtr);
- ndbrequire(fsPtr.i != RNIL);
- fsPtr.p->filePtr = (Uint32)-1;
- fsPtr.p->ownerPtr = RNIL;
- fsPtr.p->fsState = FsConnectRecord::IDLE;
- return fsPtr.i;
-}//Dbdict::getFsConnRecord()
-
-Uint32 Dbdict::getFreeTableRecord(Uint32 primaryTableId)
-{
- Uint32 minId = (primaryTableId == RNIL ? 0 : primaryTableId + 1);
- TableRecordPtr tablePtr;
- TableRecordPtr firstTablePtr;
- bool firstFound = false;
- Uint32 tabSize = c_tableRecordPool.getSize();
- for (tablePtr.i = minId; tablePtr.i < tabSize ; tablePtr.i++) {
- jam();
- c_tableRecordPool.getPtr(tablePtr);
- if (tablePtr.p->tabState == TableRecord::NOT_DEFINED) {
- jam();
- initialiseTableRecord(tablePtr);
- tablePtr.p->tabState = TableRecord::DEFINING;
- firstFound = true;
- firstTablePtr.i = tablePtr.i;
- firstTablePtr.p = tablePtr.p;
- break;
- }//if
- }//for
- if (!firstFound) {
- jam();
- return RNIL;
- }//if
-#ifdef HAVE_TABLE_REORG
- bool secondFound = false;
- for (tablePtr.i = firstTablePtr.i + 1; tablePtr.i < tabSize ; tablePtr.i++) {
- jam();
- c_tableRecordPool.getPtr(tablePtr);
- if (tablePtr.p->tabState == TableRecord::NOT_DEFINED) {
- jam();
- initialiseTableRecord(tablePtr);
- tablePtr.p->tabState = TableRecord::REORG_TABLE_PREPARED;
- tablePtr.p->secondTable = firstTablePtr.i;
- firstTablePtr.p->secondTable = tablePtr.i;
- secondFound = true;
- break;
- }//if
- }//for
- if (!secondFound) {
- jam();
- firstTablePtr.p->tabState = TableRecord::NOT_DEFINED;
- return RNIL;
- }//if
-#endif
- return firstTablePtr.i;
-}//Dbdict::getFreeTableRecord()
-
-Uint32 Dbdict::getFreeTriggerRecord()
-{
- const Uint32 size = c_triggerRecordPool.getSize();
- TriggerRecordPtr triggerPtr;
- for (triggerPtr.i = 0; triggerPtr.i < size; triggerPtr.i++) {
- jam();
- c_triggerRecordPool.getPtr(triggerPtr);
- if (triggerPtr.p->triggerState == TriggerRecord::TS_NOT_DEFINED) {
- jam();
- initialiseTriggerRecord(triggerPtr);
- return triggerPtr.i;
- }
- }
- return RNIL;
-}
-
-bool
-Dbdict::getNewAttributeRecord(TableRecordPtr tablePtr,
- AttributeRecordPtr & attrPtr)
-{
- c_attributeRecordPool.seize(attrPtr);
- if(attrPtr.i == RNIL){
- return false;
- }
-
- memset(attrPtr.p->attributeName, 0, sizeof(attrPtr.p->attributeName));
- attrPtr.p->attributeDescriptor = 0x00012255; //Default value
- attrPtr.p->attributeId = ZNIL;
- attrPtr.p->nextAttrInTable = RNIL;
- attrPtr.p->tupleKey = 0;
- memset(attrPtr.p->defaultValue, 0, sizeof(attrPtr.p->defaultValue));
-
- /* ---------------------------------------------------------------- */
- // A free attribute record has been acquired. We will now link it
- // to the table record.
- /* ---------------------------------------------------------------- */
- if (tablePtr.p->lastAttribute == RNIL) {
- jam();
- tablePtr.p->firstAttribute = attrPtr.i;
- } else {
- jam();
- AttributeRecordPtr lastAttrPtr;
- c_attributeRecordPool.getPtr(lastAttrPtr, tablePtr.p->lastAttribute);
- lastAttrPtr.p->nextAttrInTable = attrPtr.i;
- }//if
- tablePtr.p->lastAttribute = attrPtr.i;
- return true;
-}//Dbdict::getNewAttributeRecord()
-
-/* **************************************************************** */
-/* ---------------------------------------------------------------- */
-/* MODULE: START/RESTART HANDLING ------------------------ */
-/* ---------------------------------------------------------------- */
-/* */
-/* This module contains the code that is common for all */
-/* start/restart types. */
-/* ---------------------------------------------------------------- */
-/* **************************************************************** */
-
-/* ---------------------------------------------------------------- */
-// This is sent as the first signal during start/restart.
-/* ---------------------------------------------------------------- */
-void Dbdict::execSTTOR(Signal* signal)
-{
- jamEntry();
- c_startPhase = signal->theData[1];
- switch (c_startPhase) {
- case 1:
- break;
- case 3:
- c_restartType = signal->theData[7]; /* valid if 3 */
- ndbrequire(c_restartType == NodeState::ST_INITIAL_START ||
- c_restartType == NodeState::ST_SYSTEM_RESTART ||
- c_restartType == NodeState::ST_INITIAL_NODE_RESTART ||
- c_restartType == NodeState::ST_NODE_RESTART);
- break;
- }
- sendSTTORRY(signal);
-}//execSTTOR()
-
-void Dbdict::sendSTTORRY(Signal* signal)
-{
- signal->theData[0] = 0; /* garbage SIGNAL KEY */
- signal->theData[1] = 0; /* garbage SIGNAL VERSION NUMBER */
- signal->theData[2] = 0; /* garbage */
- signal->theData[3] = 1; /* first wanted start phase */
- signal->theData[4] = 3; /* get type of start */
- signal->theData[5] = ZNOMOREPHASES;
- sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 6, JBB);
-}
-
-/* ---------------------------------------------------------------- */
-// We receive information about sizes of records.
-/* ---------------------------------------------------------------- */
-void Dbdict::execREAD_CONFIG_REQ(Signal* signal)
-{
- const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
- Uint32 ref = req->senderRef;
- Uint32 senderData = req->senderData;
- ndbrequire(req->noOfParameters == 0);
-
- jamEntry();
-
- const ndb_mgm_configuration_iterator * p =
- theConfiguration.getOwnConfigIterator();
- ndbrequire(p != 0);
-
- Uint32 attributesize, tablerecSize;
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_ATTRIBUTE,&attributesize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &tablerecSize));
-
- c_attributeRecordPool.setSize(attributesize);
- c_attributeRecordHash.setSize(64);
- c_fsConnectRecordPool.setSize(ZFS_CONNECT_SIZE);
- c_nodes.setSize(MAX_NODES);
- c_pageRecordArray.setSize(ZNUMBER_OF_PAGES);
- c_schemaPageRecordArray.setSize(2 * NDB_SF_MAX_PAGES);
- c_tableRecordPool.setSize(tablerecSize);
- c_tableRecordHash.setSize(tablerecSize);
- c_triggerRecordPool.setSize(c_maxNoOfTriggers);
- c_triggerRecordHash.setSize(c_maxNoOfTriggers);
- c_opRecordPool.setSize(256); // XXX need config params
- c_opCreateTable.setSize(8);
- c_opDropTable.setSize(8);
- c_opCreateIndex.setSize(8);
- c_opCreateEvent.setSize(8);
- c_opSubEvent.setSize(8);
- c_opDropEvent.setSize(8);
- c_opSignalUtil.setSize(8);
- c_opDropIndex.setSize(8);
- c_opAlterIndex.setSize(8);
- c_opBuildIndex.setSize(8);
- c_opCreateTrigger.setSize(8);
- c_opDropTrigger.setSize(8);
- c_opAlterTrigger.setSize(8);
-
- // Initialize schema file copies
- c_schemaFile[0].schemaPage =
- (SchemaFile*)c_schemaPageRecordArray.getPtr(0 * NDB_SF_MAX_PAGES);
- c_schemaFile[0].noOfPages = 0;
- c_schemaFile[1].schemaPage =
- (SchemaFile*)c_schemaPageRecordArray.getPtr(1 * NDB_SF_MAX_PAGES);
- c_schemaFile[1].noOfPages = 0;
-
- // Initialize BAT for interface to file system
- NewVARIABLE* bat = allocateBat(2);
- bat[0].WA = &c_schemaPageRecordArray.getPtr(0)->word[0];
- bat[0].nrr = 2 * NDB_SF_MAX_PAGES;
- bat[0].ClusterSize = NDB_SF_PAGE_SIZE;
- bat[0].bits.q = NDB_SF_PAGE_SIZE_IN_WORDS_LOG2;
- bat[0].bits.v = 5; // 32 bits per element
- bat[1].WA = &c_pageRecordArray.getPtr(0)->word[0];
- bat[1].nrr = ZNUMBER_OF_PAGES;
- bat[1].ClusterSize = ZSIZE_OF_PAGES_IN_WORDS * 4;
- bat[1].bits.q = ZLOG_SIZE_OF_PAGES_IN_WORDS; // 2**13 = 8192 elements
- bat[1].bits.v = 5; // 32 bits per element
-
- initCommonData();
- initRecords();
-
- ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = senderData;
- sendSignal(ref, GSN_READ_CONFIG_CONF, signal,
- ReadConfigConf::SignalLength, JBB);
-}//execSIZEALT_REP()
-
-/* ---------------------------------------------------------------- */
-// Start phase signals sent by CNTR. We reply with NDB_STTORRY when
-// we completed this phase.
-/* ---------------------------------------------------------------- */
-void Dbdict::execNDB_STTOR(Signal* signal)
-{
- jamEntry();
- c_startPhase = signal->theData[2];
- const Uint32 restartType = signal->theData[3];
- if (restartType == NodeState::ST_INITIAL_START) {
- jam();
- c_initialStart = true;
- } else if (restartType == NodeState::ST_SYSTEM_RESTART) {
- jam();
- c_systemRestart = true;
- } else if (restartType == NodeState::ST_INITIAL_NODE_RESTART) {
- jam();
- c_initialNodeRestart = true;
- } else if (restartType == NodeState::ST_NODE_RESTART) {
- jam();
- c_nodeRestart = true;
- } else {
- ndbrequire(false);
- }//if
- switch (c_startPhase) {
- case 1:
- jam();
- initSchemaFile(signal);
- break;
- case 3:
- jam();
- signal->theData[0] = reference();
- sendSignal(NDBCNTR_REF, GSN_READ_NODESREQ, signal, 1, JBB);
- break;
- case 6:
- jam();
- c_initialStart = false;
- c_systemRestart = false;
- c_initialNodeRestart = false;
- c_nodeRestart = false;
- sendNDB_STTORRY(signal);
- break;
- case 7:
- // uses c_restartType
- if(restartType == NodeState::ST_SYSTEM_RESTART &&
- c_masterNodeId == getOwnNodeId()){
- rebuildIndexes(signal, 0);
- return;
- }
- sendNDB_STTORRY(signal);
- break;
- default:
- jam();
- sendNDB_STTORRY(signal);
- break;
- }//switch
-}//execNDB_STTOR()
-
-void Dbdict::sendNDB_STTORRY(Signal* signal)
-{
- signal->theData[0] = reference();
- sendSignal(NDBCNTR_REF, GSN_NDB_STTORRY, signal, 1, JBB);
- return;
-}//sendNDB_STTORRY()
-
-/* ---------------------------------------------------------------- */
-// We receive the information about which nodes that are up and down.
-/* ---------------------------------------------------------------- */
-void Dbdict::execREAD_NODESCONF(Signal* signal)
-{
- jamEntry();
-
- ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
- c_numberNode = readNodes->noOfNodes;
- c_masterNodeId = readNodes->masterNodeId;
-
- c_noNodesFailed = 0;
- c_aliveNodes.clear();
- for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
- jam();
- NodeRecordPtr nodePtr;
- c_nodes.getPtr(nodePtr, i);
-
- if (NodeBitmask::get(readNodes->allNodes, i)) {
- jam();
- nodePtr.p->nodeState = NodeRecord::NDB_NODE_ALIVE;
- if (NodeBitmask::get(readNodes->inactiveNodes, i)) {
- jam();
- /**-------------------------------------------------------------------
- *
- * THIS NODE IS DEFINED IN THE CLUSTER BUT IS NOT ALIVE CURRENTLY.
- * WE ADD THE NODE TO THE SET OF FAILED NODES AND ALSO SET THE
- * BLOCKSTATE TO BUSY TO AVOID ADDING TABLES WHILE NOT ALL NODES ARE
- * ALIVE.
- *------------------------------------------------------------------*/
- nodePtr.p->nodeState = NodeRecord::NDB_NODE_DEAD;
- c_noNodesFailed++;
- } else {
- c_aliveNodes.set(i);
- }
- }//if
- }//for
- sendNDB_STTORRY(signal);
-}//execREAD_NODESCONF()
-
-/* ---------------------------------------------------------------- */
-// HOT_SPAREREP informs DBDICT about which nodes that have become
-// hot spare nodes.
-/* ---------------------------------------------------------------- */
-void Dbdict::execHOT_SPAREREP(Signal* signal)
-{
- Uint32 hotSpareNodes = 0;
- jamEntry();
- HotSpareRep * const hotSpare = (HotSpareRep*)&signal->theData[0];
- for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
- if (NodeBitmask::get(hotSpare->theHotSpareNodes, i)) {
- NodeRecordPtr nodePtr;
- c_nodes.getPtr(nodePtr, i);
- nodePtr.p->hotSpare = true;
- hotSpareNodes++;
- }//if
- }//for
- ndbrequire(hotSpareNodes == hotSpare->noHotSpareNodes);
- c_noHotSpareNodes = hotSpareNodes;
- return;
-}//execHOT_SPAREREP()
-
-void Dbdict::initSchemaFile(Signal* signal)
-{
- XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
- xsf->noOfPages = (c_tableRecordPool.getSize() + NDB_SF_PAGE_ENTRIES - 1)
- / NDB_SF_PAGE_ENTRIES;
- initSchemaFile(xsf, 0, xsf->noOfPages, true);
- // init alt copy too for INR
- XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0];
- oldxsf->noOfPages = xsf->noOfPages;
- memcpy(&oldxsf->schemaPage[0], &xsf->schemaPage[0], xsf->schemaPage[0].FileSize);
-
- if (c_initialStart || c_initialNodeRestart) {
- jam();
- ndbrequire(c_writeSchemaRecord.inUse == false);
- c_writeSchemaRecord.inUse = true;
- c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
- c_writeSchemaRecord.newFile = true;
- c_writeSchemaRecord.firstPage = 0;
- c_writeSchemaRecord.noOfPages = xsf->noOfPages;
-
- c_writeSchemaRecord.m_callback.m_callbackFunction =
- safe_cast(&Dbdict::initSchemaFile_conf);
-
- startWriteSchemaFile(signal);
- } else if (c_systemRestart || c_nodeRestart) {
- jam();
- ndbrequire(c_readSchemaRecord.schemaReadState == ReadSchemaRecord::IDLE);
- c_readSchemaRecord.pageId = c_schemaRecord.oldSchemaPage;
- c_readSchemaRecord.firstPage = 0;
- c_readSchemaRecord.noOfPages = 1;
- c_readSchemaRecord.schemaReadState = ReadSchemaRecord::INITIAL_READ_HEAD;
- startReadSchemaFile(signal);
- } else {
- ndbrequire(false);
- }//if
-}//Dbdict::initSchemaFile()
-
-void
-Dbdict::initSchemaFile_conf(Signal* signal, Uint32 callbackData, Uint32 rv){
- jam();
- sendNDB_STTORRY(signal);
-}
-
-void
-Dbdict::activateIndexes(Signal* signal, Uint32 i)
-{
- AlterIndxReq* req = (AlterIndxReq*)signal->getDataPtrSend();
- TableRecordPtr tablePtr;
- for (; i < c_tableRecordPool.getSize(); i++) {
- tablePtr.i = i;
- c_tableRecordPool.getPtr(tablePtr);
- if (tablePtr.p->tabState != TableRecord::DEFINED)
- continue;
- if (! tablePtr.p->isIndex())
- continue;
- jam();
- req->setUserRef(reference());
- req->setConnectionPtr(i);
- req->setTableId(tablePtr.p->primaryTableId);
- req->setIndexId(tablePtr.i);
- req->setIndexVersion(tablePtr.p->tableVersion);
- req->setOnline(true);
- if (c_restartType == NodeState::ST_SYSTEM_RESTART) {
- if (c_masterNodeId != getOwnNodeId())
- continue;
- // from file index state is not defined currently
- req->setRequestType(AlterIndxReq::RT_SYSTEMRESTART);
- req->addRequestFlag((Uint32)RequestFlag::RF_NOBUILD);
- }
- else if (
- c_restartType == NodeState::ST_NODE_RESTART ||
- c_restartType == NodeState::ST_INITIAL_NODE_RESTART) {
- // from master index must be online
- if (tablePtr.p->indexState != TableRecord::IS_ONLINE)
- continue;
- req->setRequestType(AlterIndxReq::RT_NODERESTART);
- // activate locally, rebuild not needed
- req->addRequestFlag((Uint32)RequestFlag::RF_LOCAL);
- req->addRequestFlag((Uint32)RequestFlag::RF_NOBUILD);
- } else {
- ndbrequire(false);
- }
- sendSignal(reference(), GSN_ALTER_INDX_REQ,
- signal, AlterIndxReq::SignalLength, JBB);
- return;
- }
- signal->theData[0] = reference();
- sendSignal(c_restartRecord.returnBlockRef, GSN_DICTSTARTCONF,
- signal, 1, JBB);
-}
-
-void
-Dbdict::rebuildIndexes(Signal* signal, Uint32 i){
- BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
-
- TableRecordPtr indexPtr;
- for (; i < c_tableRecordPool.getSize(); i++) {
- indexPtr.i = i;
- c_tableRecordPool.getPtr(indexPtr);
- if (indexPtr.p->tabState != TableRecord::DEFINED)
- continue;
- if (! indexPtr.p->isIndex())
- continue;
-
- jam();
-
- req->setUserRef(reference());
- req->setConnectionPtr(i);
- req->setRequestType(BuildIndxReq::RT_SYSTEMRESTART);
- req->setBuildId(0); // not used
- req->setBuildKey(0); // not used
- req->setIndexType(indexPtr.p->tableType);
- req->setIndexId(indexPtr.i);
- req->setTableId(indexPtr.p->primaryTableId);
- req->setParallelism(16);
-
- // from file index state is not defined currently
- if (indexPtr.p->storedTable) {
- // rebuild not needed
- req->addRequestFlag((Uint32)RequestFlag::RF_NOBUILD);
- }
-
- // send
- sendSignal(reference(), GSN_BUILDINDXREQ,
- signal, BuildIndxReq::SignalLength, JBB);
- return;
- }
- sendNDB_STTORRY(signal);
-}
-
-
-/* **************************************************************** */
-/* ---------------------------------------------------------------- */
-/* MODULE: SYSTEM RESTART MODULE ------------------------- */
-/* ---------------------------------------------------------------- */
-/* */
-/* This module contains code specific for system restart */
-/* ---------------------------------------------------------------- */
-/* **************************************************************** */
-
-/* ---------------------------------------------------------------- */
-// DIH asks DICT to read in table data from disk during system
-// restart. DIH also asks DICT to send information about which
-// tables that should be started as part of this system restart.
-// DICT will also activate the tables in TC as part of this process.
-/* ---------------------------------------------------------------- */
-void Dbdict::execDICTSTARTREQ(Signal* signal)
-{
- jamEntry();
- c_restartRecord.gciToRestart = signal->theData[0];
- c_restartRecord.returnBlockRef = signal->theData[1];
- if (c_nodeRestart || c_initialNodeRestart) {
- jam();
-
- CRASH_INSERTION(6000);
-
- BlockReference dictRef = calcDictBlockRef(c_masterNodeId);
- signal->theData[0] = getOwnNodeId();
- sendSignal(dictRef, GSN_GET_SCHEMA_INFOREQ, signal, 1, JBB);
- return;
- }
- ndbrequire(c_systemRestart);
- ndbrequire(c_masterNodeId == getOwnNodeId());
-
- c_schemaRecord.m_callback.m_callbackData = 0;
- c_schemaRecord.m_callback.m_callbackFunction =
- safe_cast(&Dbdict::masterRestart_checkSchemaStatusComplete);
-
- c_restartRecord.activeTable = 0;
- c_schemaRecord.schemaPage = c_schemaRecord.oldSchemaPage; // ugly
- checkSchemaStatus(signal);
-}//execDICTSTARTREQ()
-
-void
-Dbdict::masterRestart_checkSchemaStatusComplete(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode){
-
- c_schemaRecord.schemaPage = 0; // ugly
- XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0];
- ndbrequire(oldxsf->noOfPages != 0);
-
- LinearSectionPtr ptr[3];
- ptr[0].p = (Uint32*)&oldxsf->schemaPage[0];
- ptr[0].sz = oldxsf->noOfPages * NDB_SF_PAGE_SIZE_IN_WORDS;
-
- c_sendSchemaRecord.m_SCHEMAINFO_Counter = c_aliveNodes;
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
-
- rg.m_nodes.clear(getOwnNodeId());
- Callback c = { 0, 0 };
- sendFragmentedSignal(rg,
- GSN_SCHEMA_INFO,
- signal,
- 1, //SchemaInfo::SignalLength,
- JBB,
- ptr,
- 1,
- c);
-
- XSchemaFile * newxsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
- newxsf->noOfPages = oldxsf->noOfPages;
- memcpy(&newxsf->schemaPage[0], &oldxsf->schemaPage[0],
- oldxsf->noOfPages * NDB_SF_PAGE_SIZE);
-
- signal->theData[0] = getOwnNodeId();
- sendSignal(reference(), GSN_SCHEMA_INFOCONF, signal, 1, JBB);
-}
-
-void
-Dbdict::execGET_SCHEMA_INFOREQ(Signal* signal){
-
- const Uint32 ref = signal->getSendersBlockRef();
- //const Uint32 senderData = signal->theData[0];
-
- ndbrequire(c_sendSchemaRecord.inUse == false);
- c_sendSchemaRecord.inUse = true;
-
- LinearSectionPtr ptr[3];
-
- XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
- ndbrequire(xsf->noOfPages != 0);
-
- ptr[0].p = (Uint32*)&xsf->schemaPage[0];
- ptr[0].sz = xsf->noOfPages * NDB_SF_PAGE_SIZE_IN_WORDS;
-
- Callback c = { safe_cast(&Dbdict::sendSchemaComplete), 0 };
- sendFragmentedSignal(ref,
- GSN_SCHEMA_INFO,
- signal,
- 1, //GetSchemaInfoConf::SignalLength,
- JBB,
- ptr,
- 1,
- c);
-}//Dbdict::execGET_SCHEMA_INFOREQ()
-
-void
-Dbdict::sendSchemaComplete(Signal * signal,
- Uint32 callbackData,
- Uint32 returnCode){
- ndbrequire(c_sendSchemaRecord.inUse == true);
- c_sendSchemaRecord.inUse = false;
-
-}
-
-
-/* ---------------------------------------------------------------- */
-// We receive the schema info from master as part of all restarts
-// except the initial start where no tables exists.
-/* ---------------------------------------------------------------- */
-void Dbdict::execSCHEMA_INFO(Signal* signal)
-{
- jamEntry();
- if(!assembleFragments(signal)){
- jam();
- return;
- }
-
- if(getNodeState().getNodeRestartInProgress()){
- CRASH_INSERTION(6001);
- }
-
- SegmentedSectionPtr schemaDataPtr;
- signal->getSection(schemaDataPtr, 0);
-
- XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
- ndbrequire(schemaDataPtr.sz % NDB_SF_PAGE_SIZE_IN_WORDS == 0);
- xsf->noOfPages = schemaDataPtr.sz / NDB_SF_PAGE_SIZE_IN_WORDS;
- copy((Uint32*)&xsf->schemaPage[0], schemaDataPtr);
- releaseSections(signal);
-
- SchemaFile * sf0 = &xsf->schemaPage[0];
- if (sf0->NdbVersion < NDB_SF_VERSION_5_0_6) {
- bool ok = convertSchemaFileTo_5_0_6(xsf);
- ndbrequire(ok);
- }
-
- validateChecksum(xsf);
-
- XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0];
- resizeSchemaFile(xsf, oldxsf->noOfPages);
-
- ndbrequire(signal->getSendersBlockRef() != reference());
-
- /* ---------------------------------------------------------------- */
- // Synchronise our view on data with other nodes in the cluster.
- // This is an important part of restart handling where we will handle
- // cases where the table have been added but only partially, where
- // tables have been deleted but not completed the deletion yet and
- // other scenarios needing synchronisation.
- /* ---------------------------------------------------------------- */
- c_schemaRecord.m_callback.m_callbackData = 0;
- c_schemaRecord.m_callback.m_callbackFunction =
- safe_cast(&Dbdict::restart_checkSchemaStatusComplete);
- c_restartRecord.activeTable = 0;
- checkSchemaStatus(signal);
-}//execSCHEMA_INFO()
-
-void
-Dbdict::restart_checkSchemaStatusComplete(Signal * signal,
- Uint32 callbackData,
- Uint32 returnCode){
-
- ndbrequire(c_writeSchemaRecord.inUse == false);
- c_writeSchemaRecord.inUse = true;
- XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
- c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
- c_writeSchemaRecord.newFile = true;
- c_writeSchemaRecord.firstPage = 0;
- c_writeSchemaRecord.noOfPages = xsf->noOfPages;
- c_writeSchemaRecord.m_callback.m_callbackData = 0;
- c_writeSchemaRecord.m_callback.m_callbackFunction =
- safe_cast(&Dbdict::restart_writeSchemaConf);
-
- startWriteSchemaFile(signal);
-}
-
-void
-Dbdict::restart_writeSchemaConf(Signal * signal,
- Uint32 callbackData,
- Uint32 returnCode){
-
- if(c_systemRestart){
- jam();
- signal->theData[0] = getOwnNodeId();
- sendSignal(calcDictBlockRef(c_masterNodeId), GSN_SCHEMA_INFOCONF,
- signal, 1, JBB);
- return;
- }
-
- ndbrequire(c_nodeRestart || c_initialNodeRestart);
- c_blockState = BS_IDLE;
- activateIndexes(signal, 0);
- return;
-}
-
-void Dbdict::execSCHEMA_INFOCONF(Signal* signal)
-{
- jamEntry();
- ndbrequire(signal->getNoOfSections() == 0);
-
-/* ---------------------------------------------------------------- */
-// This signal is received in the master as part of system restart
-// from all nodes (including the master) after they have synchronised
-// their data with the master node's schema information.
-/* ---------------------------------------------------------------- */
- const Uint32 nodeId = signal->theData[0];
- c_sendSchemaRecord.m_SCHEMAINFO_Counter.clearWaitingFor(nodeId);
-
- if (!c_sendSchemaRecord.m_SCHEMAINFO_Counter.done()){
- jam();
- return;
- }//if
- activateIndexes(signal, 0);
-}//execSCHEMA_INFOCONF()
-
-void Dbdict::checkSchemaStatus(Signal* signal)
-{
- XSchemaFile * newxsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
- XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0];
- ndbrequire(newxsf->noOfPages == oldxsf->noOfPages);
- const Uint32 noOfEntries = newxsf->noOfPages * NDB_SF_PAGE_ENTRIES;
-
- for (; c_restartRecord.activeTable < noOfEntries;
- c_restartRecord.activeTable++) {
- jam();
-
- Uint32 tableId = c_restartRecord.activeTable;
- SchemaFile::TableEntry *newEntry = getTableEntry(newxsf, tableId);
- SchemaFile::TableEntry *oldEntry = getTableEntry(oldxsf, tableId);
- SchemaFile::TableState schemaState =
- (SchemaFile::TableState)newEntry->m_tableState;
- SchemaFile::TableState oldSchemaState =
- (SchemaFile::TableState)oldEntry->m_tableState;
-
- if (c_restartRecord.activeTable >= c_tableRecordPool.getSize()) {
- jam();
- ndbrequire(schemaState == SchemaFile::INIT);
- ndbrequire(oldSchemaState == SchemaFile::INIT);
- continue;
- }//if
-
- switch(schemaState){
- case SchemaFile::INIT:{
- jam();
- bool ok = false;
- switch(oldSchemaState) {
- case SchemaFile::INIT:
- jam();
- case SchemaFile::DROP_TABLE_COMMITTED:
- jam();
- ok = true;
- jam();
- break;
-
- case SchemaFile::ADD_STARTED:
- jam();
- case SchemaFile::TABLE_ADD_COMMITTED:
- jam();
- case SchemaFile::DROP_TABLE_STARTED:
- jam();
- case SchemaFile::ALTER_TABLE_COMMITTED:
- jam();
- ok = true;
- jam();
- newEntry->m_tableState = SchemaFile::INIT;
- restartDropTab(signal, tableId);
- return;
- }//switch
- ndbrequire(ok);
- break;
- }
- case SchemaFile::ADD_STARTED:{
- jam();
- bool ok = false;
- switch(oldSchemaState) {
- case SchemaFile::INIT:
- jam();
- case SchemaFile::DROP_TABLE_COMMITTED:
- jam();
- ok = true;
- break;
- case SchemaFile::ADD_STARTED:
- jam();
- case SchemaFile::DROP_TABLE_STARTED:
- jam();
- case SchemaFile::TABLE_ADD_COMMITTED:
- jam();
- case SchemaFile::ALTER_TABLE_COMMITTED:
- jam();
- ok = true;
- //------------------------------------------------------------------
- // Add Table was started but not completed. Will be dropped in all
- // nodes. Update schema information (restore table version).
- //------------------------------------------------------------------
- newEntry->m_tableState = SchemaFile::INIT;
- restartDropTab(signal, tableId);
- return;
- }
- ndbrequire(ok);
- break;
- }
- case SchemaFile::TABLE_ADD_COMMITTED:{
- jam();
- bool ok = false;
- switch(oldSchemaState) {
- case SchemaFile::INIT:
- jam();
- case SchemaFile::ADD_STARTED:
- jam();
- case SchemaFile::DROP_TABLE_STARTED:
- jam();
- case SchemaFile::DROP_TABLE_COMMITTED:
- jam();
- ok = true;
- //------------------------------------------------------------------
- // Table was added in the master node but not in our node. We can
- // retrieve the table definition from the master.
- //------------------------------------------------------------------
- restartCreateTab(signal, tableId, oldEntry, false);
- return;
- break;
- case SchemaFile::TABLE_ADD_COMMITTED:
- jam();
- case SchemaFile::ALTER_TABLE_COMMITTED:
- jam();
- ok = true;
- //------------------------------------------------------------------
- // Table was added in both our node and the master node. We can
- // retrieve the table definition from our own disk.
- //------------------------------------------------------------------
- if(* newEntry == * oldEntry){
- jam();
-
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, tableId);
- tablePtr.p->tableVersion = oldEntry->m_tableVersion;
- tablePtr.p->tableType = (DictTabInfo::TableType)oldEntry->m_tableType;
-
- // On NR get index from master because index state is not on file
- const bool file = c_systemRestart || tablePtr.p->isTable();
- restartCreateTab(signal, tableId, oldEntry, file);
-
- return;
- } else {
- //------------------------------------------------------------------
- // Must be a new version of the table if anything differs. Both table
- // version and global checkpoint must be different.
- // This should not happen for the master node. This can happen after
- // drop table followed by add table or after change table.
- // Not supported in this version.
- //------------------------------------------------------------------
- ndbrequire(c_masterNodeId != getOwnNodeId());
- ndbrequire(newEntry->m_tableVersion != oldEntry->m_tableVersion);
- jam();
-
- restartCreateTab(signal, tableId, oldEntry, false);
- return;
- }//if
- }
- ndbrequire(ok);
- break;
- }
- case SchemaFile::DROP_TABLE_STARTED:
- jam();
- case SchemaFile::DROP_TABLE_COMMITTED:{
- jam();
- bool ok = false;
- switch(oldSchemaState){
- case SchemaFile::INIT:
- jam();
- case SchemaFile::DROP_TABLE_COMMITTED:
- jam();
- ok = true;
- break;
- case SchemaFile::ADD_STARTED:
- jam();
- case SchemaFile::TABLE_ADD_COMMITTED:
- jam();
- case SchemaFile::DROP_TABLE_STARTED:
- jam();
- case SchemaFile::ALTER_TABLE_COMMITTED:
- jam();
- newEntry->m_tableState = SchemaFile::INIT;
- restartDropTab(signal, tableId);
- return;
- }
- ndbrequire(ok);
- break;
- }
- case SchemaFile::ALTER_TABLE_COMMITTED: {
- jam();
- bool ok = false;
- switch(oldSchemaState) {
- case SchemaFile::INIT:
- jam();
- case SchemaFile::ADD_STARTED:
- jam();
- case SchemaFile::DROP_TABLE_STARTED:
- jam();
- case SchemaFile::DROP_TABLE_COMMITTED:
- jam();
- case SchemaFile::TABLE_ADD_COMMITTED:
- jam();
- ok = true;
- //------------------------------------------------------------------
- // Table was altered in the master node but not in our node. We can
- // retrieve the altered table definition from the master.
- //------------------------------------------------------------------
- restartCreateTab(signal, tableId, oldEntry, false);
- return;
- break;
- case SchemaFile::ALTER_TABLE_COMMITTED:
- jam();
- ok = true;
-
- //------------------------------------------------------------------
- // Table was altered in both our node and the master node. We can
- // retrieve the table definition from our own disk.
- //------------------------------------------------------------------
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, tableId);
- tablePtr.p->tableVersion = oldEntry->m_tableVersion;
- tablePtr.p->tableType = (DictTabInfo::TableType)oldEntry->m_tableType;
-
- // On NR get index from master because index state is not on file
- const bool file = c_systemRestart || tablePtr.p->isTable();
- restartCreateTab(signal, tableId, oldEntry, file);
-
- return;
- }
- ndbrequire(ok);
- break;
- }
- }
- }
-
- execute(signal, c_schemaRecord.m_callback, 0);
-}//checkSchemaStatus()
-
-void
-Dbdict::restartCreateTab(Signal* signal, Uint32 tableId,
- const SchemaFile::TableEntry * te, bool file){
- jam();
-
- CreateTableRecordPtr createTabPtr;
- c_opCreateTable.seize(createTabPtr);
- ndbrequire(!createTabPtr.isNull());
-
- createTabPtr.p->key = ++c_opRecordSequence;
- c_opCreateTable.add(createTabPtr);
-
- createTabPtr.p->m_errorCode = 0;
- createTabPtr.p->m_tablePtrI = tableId;
- createTabPtr.p->m_coordinatorRef = reference();
- createTabPtr.p->m_senderRef = 0;
- createTabPtr.p->m_senderData = RNIL;
- createTabPtr.p->m_tabInfoPtrI = RNIL;
- createTabPtr.p->m_dihAddFragPtr = RNIL;
-
- if(file && !ERROR_INSERTED(6002)){
- jam();
-
- c_readTableRecord.noOfPages =
- DIV(te->m_info_words + ZPAGE_HEADER_SIZE, ZSIZE_OF_PAGES_IN_WORDS);
- c_readTableRecord.pageId = 0;
- c_readTableRecord.m_callback.m_callbackData = createTabPtr.p->key;
- c_readTableRecord.m_callback.m_callbackFunction =
- safe_cast(&Dbdict::restartCreateTab_readTableConf);
-
- startReadTableFile(signal, tableId);
- return;
- } else {
-
- ndbrequire(c_masterNodeId != getOwnNodeId());
-
- /**
- * Get from master
- */
- GetTabInfoReq * const req = (GetTabInfoReq *)&signal->theData[0];
- req->senderRef = reference();
- req->senderData = createTabPtr.p->key;
- req->requestType = GetTabInfoReq::RequestById |
- GetTabInfoReq::LongSignalConf;
- req->tableId = tableId;
- sendSignal(calcDictBlockRef(c_masterNodeId), GSN_GET_TABINFOREQ, signal,
- GetTabInfoReq::SignalLength, JBB);
-
- if(ERROR_INSERTED(6002)){
- NdbSleep_MilliSleep(10);
- CRASH_INSERTION(6002);
- }
- }
-}
-
-void
-Dbdict::restartCreateTab_readTableConf(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode){
- jam();
-
- PageRecordPtr pageRecPtr;
- c_pageRecordArray.getPtr(pageRecPtr, c_readTableRecord.pageId);
-
- ParseDictTabInfoRecord parseRecord;
- parseRecord.requestType = DictTabInfo::GetTabInfoConf;
- parseRecord.errorCode = 0;
-
- Uint32 sz = c_readTableRecord.noOfPages * ZSIZE_OF_PAGES_IN_WORDS;
- SimplePropertiesLinearReader r(&pageRecPtr.p->word[0], sz);
- handleTabInfoInit(r, &parseRecord);
- ndbrequire(parseRecord.errorCode == 0);
-
- /* ---------------------------------------------------------------- */
- // We have read the table description from disk as part of system restart.
- // We will also write it back again to ensure that both copies are ok.
- /* ---------------------------------------------------------------- */
- ndbrequire(c_writeTableRecord.tableWriteState == WriteTableRecord::IDLE);
- c_writeTableRecord.noOfPages = c_readTableRecord.noOfPages;
- c_writeTableRecord.pageId = c_readTableRecord.pageId;
- c_writeTableRecord.tableWriteState = WriteTableRecord::TWR_CALLBACK;
- c_writeTableRecord.m_callback.m_callbackData = callbackData;
- c_writeTableRecord.m_callback.m_callbackFunction =
- safe_cast(&Dbdict::restartCreateTab_writeTableConf);
- startWriteTableFile(signal, c_readTableRecord.tableId);
-}
-
-void
-Dbdict::execGET_TABINFO_CONF(Signal* signal){
- jamEntry();
-
- if(!assembleFragments(signal)){
- jam();
- return;
- }
-
- GetTabInfoConf * const conf = (GetTabInfoConf*)signal->getDataPtr();
-
- const Uint32 tableId = conf->tableId;
- const Uint32 senderData = conf->senderData;
-
- SegmentedSectionPtr tabInfoPtr;
- signal->getSection(tabInfoPtr, GetTabInfoConf::DICT_TAB_INFO);
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, senderData));
- ndbrequire(!createTabPtr.isNull());
- ndbrequire(createTabPtr.p->m_tablePtrI == tableId);
-
- /**
- * Put data into table record
- */
- ParseDictTabInfoRecord parseRecord;
- parseRecord.requestType = DictTabInfo::GetTabInfoConf;
- parseRecord.errorCode = 0;
-
- SimplePropertiesSectionReader r(tabInfoPtr, getSectionSegmentPool());
- handleTabInfoInit(r, &parseRecord);
- ndbrequire(parseRecord.errorCode == 0);
-
- Callback callback;
- callback.m_callbackData = createTabPtr.p->key;
- callback.m_callbackFunction =
- safe_cast(&Dbdict::restartCreateTab_writeTableConf);
-
- signal->header.m_noOfSections = 0;
- writeTableFile(signal, createTabPtr.p->m_tablePtrI, tabInfoPtr, &callback);
- signal->setSection(tabInfoPtr, 0);
- releaseSections(signal);
-}
-
-void
-Dbdict::restartCreateTab_writeTableConf(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode){
- jam();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
-
- Callback callback;
- callback.m_callbackData = callbackData;
- callback.m_callbackFunction =
- safe_cast(&Dbdict::restartCreateTab_dihComplete);
-
- SegmentedSectionPtr fragDataPtr;
- fragDataPtr.sz = 0;
- fragDataPtr.setNull();
- createTab_dih(signal, createTabPtr, fragDataPtr, &callback);
-}
-
-void
-Dbdict::restartCreateTab_dihComplete(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode){
- jam();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
-
- //@todo check error
- ndbrequire(createTabPtr.p->m_errorCode == 0);
-
- Callback callback;
- callback.m_callbackData = callbackData;
- callback.m_callbackFunction =
- safe_cast(&Dbdict::restartCreateTab_activateComplete);
-
- alterTab_activate(signal, createTabPtr, &callback);
-}
-
-void
-Dbdict::restartCreateTab_activateComplete(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode){
- jam();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
-
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
- tabPtr.p->tabState = TableRecord::DEFINED;
-
- c_opCreateTable.release(createTabPtr);
-
- c_restartRecord.activeTable++;
- checkSchemaStatus(signal);
-}
-
-void
-Dbdict::restartDropTab(Signal* signal, Uint32 tableId){
-
- const Uint32 key = ++c_opRecordSequence;
-
- DropTableRecordPtr dropTabPtr;
- ndbrequire(c_opDropTable.seize(dropTabPtr));
-
- dropTabPtr.p->key = key;
- c_opDropTable.add(dropTabPtr);
-
- dropTabPtr.p->m_errorCode = 0;
- dropTabPtr.p->m_request.tableId = tableId;
- dropTabPtr.p->m_coordinatorRef = 0;
- dropTabPtr.p->m_requestType = DropTabReq::RestartDropTab;
- dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_REQ;
-
-
- dropTabPtr.p->m_participantData.m_block = 0;
- dropTabPtr.p->m_participantData.m_callback.m_callbackData = key;
- dropTabPtr.p->m_participantData.m_callback.m_callbackFunction =
- safe_cast(&Dbdict::restartDropTab_complete);
- dropTab_nextStep(signal, dropTabPtr);
-}
-
-void
-Dbdict::restartDropTab_complete(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode){
- jam();
-
- DropTableRecordPtr dropTabPtr;
- ndbrequire(c_opDropTable.find(dropTabPtr, callbackData));
-
- //@todo check error
-
- c_opDropTable.release(dropTabPtr);
-
- c_restartRecord.activeTable++;
- checkSchemaStatus(signal);
-}
-
-/* **************************************************************** */
-/* ---------------------------------------------------------------- */
-/* MODULE: NODE FAILURE HANDLING ------------------------- */
-/* ---------------------------------------------------------------- */
-/* */
-/* This module contains the code that is used when nodes */
-/* (kernel/api) fails. */
-/* ---------------------------------------------------------------- */
-/* **************************************************************** */
-
-/* ---------------------------------------------------------------- */
-// We receive a report of an API that failed.
-/* ---------------------------------------------------------------- */
-void Dbdict::execAPI_FAILREQ(Signal* signal)
-{
- jamEntry();
- Uint32 failedApiNode = signal->theData[0];
- BlockReference retRef = signal->theData[1];
-
-#if 0
- Uint32 userNode = refToNode(c_connRecord.userBlockRef);
- if (userNode == failedApiNode) {
- jam();
- c_connRecord.userBlockRef = (Uint32)-1;
- }//if
-#endif
-
- signal->theData[0] = failedApiNode;
- signal->theData[1] = reference();
- sendSignal(retRef, GSN_API_FAILCONF, signal, 2, JBB);
-}//execAPI_FAILREQ()
-
-/* ---------------------------------------------------------------- */
-// We receive a report of one or more node failures of kernel nodes.
-/* ---------------------------------------------------------------- */
-void Dbdict::execNODE_FAILREP(Signal* signal)
-{
- jamEntry();
- NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
-
- c_failureNr = nodeFail->failNo;
- const Uint32 numberOfFailedNodes = nodeFail->noOfNodes;
- const bool masterFailed = (c_masterNodeId != nodeFail->masterNodeId);
- c_masterNodeId = nodeFail->masterNodeId;
-
- c_noNodesFailed += numberOfFailedNodes;
- Uint32 theFailedNodes[NodeBitmask::Size];
- memcpy(theFailedNodes, nodeFail->theNodes, sizeof(theFailedNodes));
-
- c_counterMgr.execNODE_FAILREP(signal);
-
- bool ok = false;
- switch(c_blockState){
- case BS_IDLE:
- jam();
- ok = true;
- if(c_opRecordPool.getSize() != c_opRecordPool.getNoOfFree()){
- jam();
- c_blockState = BS_NODE_FAILURE;
- }
- break;
- case BS_CREATE_TAB:
- jam();
- ok = true;
- if(!masterFailed)
- break;
- // fall through
- case BS_BUSY:
- case BS_NODE_FAILURE:
- jam();
- c_blockState = BS_NODE_FAILURE;
- ok = true;
- break;
- }
- ndbrequire(ok);
-
- for(unsigned i = 1; i < MAX_NDB_NODES; i++) {
- jam();
- if(NodeBitmask::get(theFailedNodes, i)) {
- jam();
- NodeRecordPtr nodePtr;
- c_nodes.getPtr(nodePtr, i);
-
- nodePtr.p->nodeState = NodeRecord::NDB_NODE_DEAD;
- NFCompleteRep * const nfCompRep = (NFCompleteRep *)&signal->theData[0];
- nfCompRep->blockNo = DBDICT;
- nfCompRep->nodeId = getOwnNodeId();
- nfCompRep->failedNodeId = nodePtr.i;
- sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal,
- NFCompleteRep::SignalLength, JBB);
-
- c_aliveNodes.clear(i);
- }//if
- }//for
-
-}//execNODE_FAILREP()
-
-
-/* **************************************************************** */
-/* ---------------------------------------------------------------- */
-/* MODULE: NODE START HANDLING --------------------------- */
-/* ---------------------------------------------------------------- */
-/* */
-/* This module contains the code that is used when kernel nodes */
-/* starts. */
-/* ---------------------------------------------------------------- */
-/* **************************************************************** */
-
-/* ---------------------------------------------------------------- */
-// Include a starting node in list of nodes to be part of adding
-// and dropping tables.
-/* ---------------------------------------------------------------- */
-void Dbdict::execINCL_NODEREQ(Signal* signal)
-{
- jamEntry();
- NodeRecordPtr nodePtr;
- BlockReference retRef = signal->theData[0];
- nodePtr.i = signal->theData[1];
-
- ndbrequire(c_noNodesFailed > 0);
- c_noNodesFailed--;
-
- c_nodes.getPtr(nodePtr);
- ndbrequire(nodePtr.p->nodeState == NodeRecord::NDB_NODE_DEAD);
- nodePtr.p->nodeState = NodeRecord::NDB_NODE_ALIVE;
- signal->theData[0] = reference();
- sendSignal(retRef, GSN_INCL_NODECONF, signal, 1, JBB);
-
- c_aliveNodes.set(nodePtr.i);
-}//execINCL_NODEREQ()
-
-/* **************************************************************** */
-/* ---------------------------------------------------------------- */
-/* MODULE: ADD TABLE HANDLING ---------------------------- */
-/* ---------------------------------------------------------------- */
-/* */
-/* This module contains the code that is used when adding a table. */
-/* ---------------------------------------------------------------- */
-/* **************************************************************** */
-
-/* ---------------------------------------------------------------- */
-// This signal receives information about a table from either:
-// API, Ndbcntr or from other DICT.
-/* ---------------------------------------------------------------- */
-void
-Dbdict::execCREATE_TABLE_REQ(Signal* signal){
- jamEntry();
- if(!assembleFragments(signal)){
- return;
- }
-
- CreateTableReq* const req = (CreateTableReq*)signal->getDataPtr();
- const Uint32 senderRef = req->senderRef;
- const Uint32 senderData = req->senderData;
-
- ParseDictTabInfoRecord parseRecord;
- do {
- if(getOwnNodeId() != c_masterNodeId){
- jam();
- parseRecord.errorCode = CreateTableRef::NotMaster;
- break;
- }
-
- if (c_blockState != BS_IDLE){
- jam();
- parseRecord.errorCode = CreateTableRef::Busy;
- break;
- }
-
- CreateTableRecordPtr createTabPtr;
- c_opCreateTable.seize(createTabPtr);
-
- if(createTabPtr.isNull()){
- jam();
- parseRecord.errorCode = CreateTableRef::Busy;
- break;
- }
-
- parseRecord.requestType = DictTabInfo::CreateTableFromAPI;
- parseRecord.errorCode = 0;
-
- SegmentedSectionPtr ptr;
- signal->getSection(ptr, CreateTableReq::DICT_TAB_INFO);
- SimplePropertiesSectionReader r(ptr, getSectionSegmentPool());
-
- handleTabInfoInit(r, &parseRecord);
- releaseSections(signal);
-
- if(parseRecord.errorCode != 0){
- jam();
- c_opCreateTable.release(createTabPtr);
- break;
- }
-
- createTabPtr.p->key = ++c_opRecordSequence;
- c_opCreateTable.add(createTabPtr);
- createTabPtr.p->m_errorCode = 0;
- createTabPtr.p->m_senderRef = senderRef;
- createTabPtr.p->m_senderData = senderData;
- createTabPtr.p->m_tablePtrI = parseRecord.tablePtr.i;
- createTabPtr.p->m_coordinatorRef = reference();
- createTabPtr.p->m_fragmentsPtrI = RNIL;
- createTabPtr.p->m_dihAddFragPtr = RNIL;
-
- Uint32 * theData = signal->getDataPtrSend();
- CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
- req->senderRef = reference();
- req->senderData = createTabPtr.p->key;
- req->fragmentationType = parseRecord.tablePtr.p->fragmentType;
- req->noOfFragments = 0;
- req->fragmentNode = 0;
- req->primaryTableId = RNIL;
- if (parseRecord.tablePtr.p->isOrderedIndex()) {
- // ordered index has same fragmentation as the table
- const Uint32 primaryTableId = parseRecord.tablePtr.p->primaryTableId;
- TableRecordPtr primaryTablePtr;
- c_tableRecordPool.getPtr(primaryTablePtr, primaryTableId);
- // fragmentationType must be consistent
- req->fragmentationType = primaryTablePtr.p->fragmentType;
- req->primaryTableId = primaryTableId;
- }
- sendSignal(DBDIH_REF, GSN_CREATE_FRAGMENTATION_REQ, signal,
- CreateFragmentationReq::SignalLength, JBB);
-
- c_blockState = BS_CREATE_TAB;
- return;
- } while(0);
-
- /**
- * Something went wrong
- */
- releaseSections(signal);
-
- CreateTableRef * ref = (CreateTableRef*)signal->getDataPtrSend();
- ref->senderData = senderData;
- ref->senderRef = reference();
- ref->masterNodeId = c_masterNodeId;
- ref->errorCode = parseRecord.errorCode;
- ref->errorLine = parseRecord.errorLine;
- ref->errorKey = parseRecord.errorKey;
- ref->status = parseRecord.status;
- sendSignal(senderRef, GSN_CREATE_TABLE_REF, signal,
- CreateTableRef::SignalLength, JBB);
-}
-
-void
-Dbdict::execALTER_TABLE_REQ(Signal* signal)
-{
- // Received by master
- jamEntry();
- if(!assembleFragments(signal)){
- return;
- }
- AlterTableReq* const req = (AlterTableReq*)signal->getDataPtr();
- const Uint32 senderRef = req->senderRef;
- const Uint32 senderData = req->senderData;
- const Uint32 changeMask = req->changeMask;
- const Uint32 tableId = req->tableId;
- const Uint32 tableVersion = req->tableVersion;
- ParseDictTabInfoRecord* aParseRecord;
-
- // Get table definition
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, tableId, false);
- if(tablePtr.isNull()){
- jam();
- alterTableRef(signal, req, AlterTableRef::NoSuchTable);
- return;
- }
-
- if(getOwnNodeId() != c_masterNodeId){
- jam();
- alterTableRef(signal, req, AlterTableRef::NotMaster);
- return;
- }
-
- if(c_blockState != BS_IDLE){
- jam();
- alterTableRef(signal, req, AlterTableRef::Busy);
- return;
- }
-
- const TableRecord::TabState tabState = tablePtr.p->tabState;
- bool ok = false;
- switch(tabState){
- case TableRecord::NOT_DEFINED:
- case TableRecord::REORG_TABLE_PREPARED:
- case TableRecord::DEFINING:
- case TableRecord::CHECKED:
- jam();
- alterTableRef(signal, req, AlterTableRef::NoSuchTable);
- return;
- case TableRecord::DEFINED:
- ok = true;
- jam();
- break;
- case TableRecord::PREPARE_DROPPING:
- case TableRecord::DROPPING:
- jam();
- alterTableRef(signal, req, AlterTableRef::DropInProgress);
- return;
- }
- ndbrequire(ok);
-
- if(tablePtr.p->tableVersion != tableVersion){
- jam();
- alterTableRef(signal, req, AlterTableRef::InvalidTableVersion);
- return;
- }
- // Parse new table defintion
- ParseDictTabInfoRecord parseRecord;
- aParseRecord = &parseRecord;
-
- CreateTableRecordPtr alterTabPtr; // Reuse create table records
- c_opCreateTable.seize(alterTabPtr);
- CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
-
- if(alterTabPtr.isNull()){
- jam();
- alterTableRef(signal, req, AlterTableRef::Busy);
- return;
- }
-
- regAlterTabPtr->m_changeMask = changeMask;
- parseRecord.requestType = DictTabInfo::AlterTableFromAPI;
- parseRecord.errorCode = 0;
-
- SegmentedSectionPtr ptr;
- signal->getSection(ptr, AlterTableReq::DICT_TAB_INFO);
- SimplePropertiesSectionReader r(ptr, getSectionSegmentPool());
-
- handleTabInfoInit(r, &parseRecord, false); // Will not save info
-
- if(parseRecord.errorCode != 0){
- jam();
- c_opCreateTable.release(alterTabPtr);
- alterTableRef(signal, req,
- (AlterTableRef::ErrorCode) parseRecord.errorCode,
- aParseRecord);
- return;
- }
-
- releaseSections(signal);
- regAlterTabPtr->key = ++c_opRecordSequence;
- c_opCreateTable.add(alterTabPtr);
- ndbrequire(c_opCreateTable.find(alterTabPtr, regAlterTabPtr->key));
- regAlterTabPtr->m_errorCode = 0;
- regAlterTabPtr->m_senderRef = senderRef;
- regAlterTabPtr->m_senderData = senderData;
- regAlterTabPtr->m_tablePtrI = parseRecord.tablePtr.i;
- regAlterTabPtr->m_alterTableFailed = false;
- regAlterTabPtr->m_coordinatorRef = reference();
- regAlterTabPtr->m_fragmentsPtrI = RNIL;
- regAlterTabPtr->m_dihAddFragPtr = RNIL;
-
- // Alter table on all nodes
- c_blockState = BS_BUSY;
-
- // Send prepare request to all alive nodes
- SimplePropertiesSectionWriter w(getSectionSegmentPool());
- packTableIntoPagesImpl(w, parseRecord.tablePtr);
-
- SegmentedSectionPtr tabInfoPtr;
- w.getPtr(tabInfoPtr);
- signal->setSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO);
-
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- regAlterTabPtr->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ;
- SafeCounter safeCounter(c_counterMgr, regAlterTabPtr->m_coordinatorData.m_counter);
- safeCounter.init<AlterTabRef>(rg, regAlterTabPtr->key);
-
- AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend();
- lreq->senderRef = reference();
- lreq->senderData = regAlterTabPtr->key;
- lreq->clientRef = regAlterTabPtr->m_senderRef;
- lreq->clientData = regAlterTabPtr->m_senderData;
- lreq->changeMask = changeMask;
- lreq->tableId = tableId;
- lreq->tableVersion = tableVersion + 1;
- lreq->gci = tablePtr.p->gciTableCreated;
- lreq->requestType = AlterTabReq::AlterTablePrepare;
-
- sendSignal(rg, GSN_ALTER_TAB_REQ, signal,
- AlterTabReq::SignalLength, JBB);
-
-}
-
-void Dbdict::alterTableRef(Signal * signal,
- AlterTableReq * req,
- AlterTableRef::ErrorCode errCode,
- ParseDictTabInfoRecord* parseRecord)
-{
- jam();
- releaseSections(signal);
- AlterTableRef * ref = (AlterTableRef*)signal->getDataPtrSend();
- Uint32 senderRef = req->senderRef;
- ref->senderData = req->senderData;
- ref->senderRef = reference();
- ref->masterNodeId = c_masterNodeId;
- if (parseRecord) {
- ref->errorCode = parseRecord->errorCode;
- ref->errorLine = parseRecord->errorLine;
- ref->errorKey = parseRecord->errorKey;
- ref->status = parseRecord->status;
- }
- else {
- ref->errorCode = errCode;
- ref->errorLine = 0;
- ref->errorKey = 0;
- ref->status = 0;
- }
- sendSignal(senderRef, GSN_ALTER_TABLE_REF, signal,
- AlterTableRef::SignalLength, JBB);
-}
-
-void
-Dbdict::execALTER_TAB_REQ(Signal * signal)
-{
- // Received in all nodes to handle change locally
- jamEntry();
-
- if(!assembleFragments(signal)){
- return;
- }
- AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr();
- const Uint32 senderRef = req->senderRef;
- const Uint32 senderData = req->senderData;
- const Uint32 changeMask = req->changeMask;
- const Uint32 tableId = req->tableId;
- const Uint32 tableVersion = req->tableVersion;
- const Uint32 gci = req->gci;
- AlterTabReq::RequestType requestType =
- (AlterTabReq::RequestType) req->requestType;
-
- SegmentedSectionPtr tabInfoPtr;
- signal->getSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO);
-
- CreateTableRecordPtr alterTabPtr; // Reuse create table records
-
- if (senderRef != reference()) {
- jam();
- c_blockState = BS_BUSY;
- }
- if ((requestType == AlterTabReq::AlterTablePrepare)
- && (senderRef != reference())) {
- jam();
- c_opCreateTable.seize(alterTabPtr);
- if(!alterTabPtr.isNull())
- alterTabPtr.p->m_changeMask = changeMask;
- }
- else {
- jam();
- ndbrequire(c_opCreateTable.find(alterTabPtr, senderData));
- }
- if(alterTabPtr.isNull()){
- jam();
- alterTabRef(signal, req, AlterTableRef::Busy);
- return;
- }
- CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
- regAlterTabPtr->m_alterTableId = tableId;
- regAlterTabPtr->m_coordinatorRef = senderRef;
-
- // Get table definition
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, tableId, false);
- if(tablePtr.isNull()){
- jam();
- alterTabRef(signal, req, AlterTableRef::NoSuchTable);
- return;
- }
-
- switch(requestType) {
- case(AlterTabReq::AlterTablePrepare): {
- ParseDictTabInfoRecord* aParseRecord;
-
- const TableRecord::TabState tabState = tablePtr.p->tabState;
- bool ok = false;
- switch(tabState){
- case TableRecord::NOT_DEFINED:
- case TableRecord::REORG_TABLE_PREPARED:
- case TableRecord::DEFINING:
- case TableRecord::CHECKED:
- jam();
- alterTabRef(signal, req, AlterTableRef::NoSuchTable);
- return;
- case TableRecord::DEFINED:
- ok = true;
- jam();
- break;
- case TableRecord::PREPARE_DROPPING:
- case TableRecord::DROPPING:
- jam();
- alterTabRef(signal, req, AlterTableRef::DropInProgress);
- return;
- }
- ndbrequire(ok);
-
- if(tablePtr.p->tableVersion + 1 != tableVersion){
- jam();
- alterTabRef(signal, req, AlterTableRef::InvalidTableVersion);
- return;
- }
- TableRecordPtr newTablePtr;
- if (senderRef != reference()) {
- jam();
- // Parse altered table defintion
- ParseDictTabInfoRecord parseRecord;
- aParseRecord = &parseRecord;
-
- parseRecord.requestType = DictTabInfo::AlterTableFromAPI;
- parseRecord.errorCode = 0;
-
- SimplePropertiesSectionReader r(tabInfoPtr, getSectionSegmentPool());
-
- handleTabInfoInit(r, &parseRecord, false); // Will not save info
-
- if(parseRecord.errorCode != 0){
- jam();
- c_opCreateTable.release(alterTabPtr);
- alterTabRef(signal, req,
- (AlterTableRef::ErrorCode) parseRecord.errorCode,
- aParseRecord);
- return;
- }
- regAlterTabPtr->key = senderData;
- c_opCreateTable.add(alterTabPtr);
- regAlterTabPtr->m_errorCode = 0;
- regAlterTabPtr->m_senderRef = senderRef;
- regAlterTabPtr->m_senderData = senderData;
- regAlterTabPtr->m_tablePtrI = parseRecord.tablePtr.i;
- regAlterTabPtr->m_fragmentsPtrI = RNIL;
- regAlterTabPtr->m_dihAddFragPtr = RNIL;
- newTablePtr = parseRecord.tablePtr;
- newTablePtr.p->tableVersion = tableVersion;
- }
- else { // (req->senderRef == reference())
- jam();
- c_tableRecordPool.getPtr(newTablePtr, regAlterTabPtr->m_tablePtrI);
- newTablePtr.p->tableVersion = tableVersion;
- }
- if (handleAlterTab(req, regAlterTabPtr, tablePtr, newTablePtr) == -1) {
- jam();
- c_opCreateTable.release(alterTabPtr);
- alterTabRef(signal, req, AlterTableRef::UnsupportedChange);
- return;
- }
- releaseSections(signal);
- // Propagate alter table to other local blocks
- AlterTabReq * req = (AlterTabReq*)signal->getDataPtrSend();
- req->senderRef = reference();
- req->senderData = senderData;
- req->changeMask = changeMask;
- req->tableId = tableId;
- req->tableVersion = tableVersion;
- req->gci = gci;
- req->requestType = requestType;
- sendSignal(DBLQH_REF, GSN_ALTER_TAB_REQ, signal,
- AlterTabReq::SignalLength, JBB);
- return;
- }
- case(AlterTabReq::AlterTableCommit): {
- jam();
- // Write schema for altered table to disk
- SegmentedSectionPtr tabInfoPtr;
- signal->getSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO);
- regAlterTabPtr->m_tabInfoPtrI = tabInfoPtr.i;
-
- signal->header.m_noOfSections = 0;
-
- // Update table record
- tablePtr.p->packedSize = tabInfoPtr.sz;
- tablePtr.p->tableVersion = tableVersion;
- tablePtr.p->gciTableCreated = gci;
-
- SchemaFile::TableEntry tabEntry;
- tabEntry.m_tableVersion = tableVersion;
- tabEntry.m_tableType = tablePtr.p->tableType;
- tabEntry.m_tableState = SchemaFile::ALTER_TABLE_COMMITTED;
- tabEntry.m_gcp = gci;
- tabEntry.m_info_words = tabInfoPtr.sz;
- memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused));
-
- Callback callback;
- callback.m_callbackData = senderData;
- callback.m_callbackFunction =
- safe_cast(&Dbdict::alterTab_writeSchemaConf);
-
- updateSchemaState(signal, tableId, &tabEntry, &callback);
- break;
- }
- case(AlterTabReq::AlterTableRevert): {
- jam();
- // Revert failed alter table
- revertAlterTable(signal, changeMask, tableId, regAlterTabPtr);
- // Acknowledge the reverted alter table
- AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = senderData;
- conf->changeMask = changeMask;
- conf->tableId = tableId;
- conf->tableVersion = tableVersion;
- conf->gci = gci;
- conf->requestType = requestType;
- sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal,
- AlterTabConf::SignalLength, JBB);
- break;
- }
- default: ndbrequire(false);
- }
-}
-
-void Dbdict::alterTabRef(Signal * signal,
- AlterTabReq * req,
- AlterTableRef::ErrorCode errCode,
- ParseDictTabInfoRecord* parseRecord)
-{
- jam();
- releaseSections(signal);
- AlterTabRef * ref = (AlterTabRef*)signal->getDataPtrSend();
- Uint32 senderRef = req->senderRef;
- ref->senderData = req->senderData;
- ref->senderRef = reference();
- if (parseRecord) {
- jam();
- ref->errorCode = parseRecord->errorCode;
- ref->errorLine = parseRecord->errorLine;
- ref->errorKey = parseRecord->errorKey;
- ref->errorStatus = parseRecord->status;
- }
- else {
- jam();
- ref->errorCode = errCode;
- ref->errorLine = 0;
- ref->errorKey = 0;
- ref->errorStatus = 0;
- }
- sendSignal(senderRef, GSN_ALTER_TAB_REF, signal,
- AlterTabRef::SignalLength, JBB);
-
- c_blockState = BS_IDLE;
-}
-
-void Dbdict::execALTER_TAB_REF(Signal * signal){
- jamEntry();
-
- AlterTabRef * ref = (AlterTabRef*)signal->getDataPtr();
-
- Uint32 senderRef = ref->senderRef;
- Uint32 senderData = ref->senderData;
- Uint32 errorCode = ref->errorCode;
- Uint32 errorLine = ref->errorLine;
- Uint32 errorKey = ref->errorKey;
- Uint32 errorStatus = ref->errorStatus;
- AlterTabReq::RequestType requestType =
- (AlterTabReq::RequestType) ref->requestType;
- CreateTableRecordPtr alterTabPtr;
- ndbrequire(c_opCreateTable.find(alterTabPtr, senderData));
- CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
- Uint32 changeMask = regAlterTabPtr->m_changeMask;
- SafeCounter safeCounter(c_counterMgr, regAlterTabPtr->m_coordinatorData.m_counter);
- safeCounter.clearWaitingFor(refToNode(senderRef));
- switch (requestType) {
- case(AlterTabReq::AlterTablePrepare): {
- if (safeCounter.done()) {
- jam();
- // Send revert request to all alive nodes
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, regAlterTabPtr->m_alterTableId);
- Uint32 tableId = tablePtr.p->tableId;
- Uint32 tableVersion = tablePtr.p->tableVersion;
- Uint32 gci = tablePtr.p->gciTableCreated;
- SimplePropertiesSectionWriter w(getSectionSegmentPool());
- packTableIntoPagesImpl(w, tablePtr);
- SegmentedSectionPtr spDataPtr;
- w.getPtr(spDataPtr);
- signal->setSection(spDataPtr, AlterTabReq::DICT_TAB_INFO);
-
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- regAlterTabPtr->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ;
- safeCounter.init<AlterTabRef>(rg, regAlterTabPtr->key);
-
- AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend();
- lreq->senderRef = reference();
- lreq->senderData = regAlterTabPtr->key;
- lreq->clientRef = regAlterTabPtr->m_senderRef;
- lreq->clientData = regAlterTabPtr->m_senderData;
- lreq->changeMask = changeMask;
- lreq->tableId = tableId;
- lreq->tableVersion = tableVersion;
- lreq->gci = gci;
- lreq->requestType = AlterTabReq::AlterTableRevert;
-
- sendSignal(rg, GSN_ALTER_TAB_REQ, signal,
- AlterTabReq::SignalLength, JBB);
- }
- else {
- jam();
- regAlterTabPtr->m_alterTableFailed = true;
- }
- break;
- }
- case(AlterTabReq::AlterTableCommit):
- jam();
- case(AlterTabReq::AlterTableRevert): {
- AlterTableRef * apiRef = (AlterTableRef*)signal->getDataPtrSend();
-
- apiRef->senderData = senderData;
- apiRef->senderRef = reference();
- apiRef->masterNodeId = c_masterNodeId;
- apiRef->errorCode = errorCode;
- apiRef->errorLine = errorLine;
- apiRef->errorKey = errorKey;
- apiRef->status = errorStatus;
- if (safeCounter.done()) {
- jam();
- sendSignal(senderRef, GSN_ALTER_TABLE_REF, signal,
- AlterTableRef::SignalLength, JBB);
- c_blockState = BS_IDLE;
- }
- else {
- jam();
- regAlterTabPtr->m_alterTableFailed = true;
- regAlterTabPtr->m_alterTableRef = *apiRef;
- }
- break;
- }
- default: ndbrequire(false);
- }
-}
-
-void
-Dbdict::execALTER_TAB_CONF(Signal * signal){
- jamEntry();
- AlterTabConf * const conf = (AlterTabConf*)signal->getDataPtr();
- Uint32 senderRef = conf->senderRef;
- Uint32 senderData = conf->senderData;
- Uint32 changeMask = conf->changeMask;
- Uint32 tableId = conf->tableId;
- Uint32 tableVersion = conf->tableVersion;
- Uint32 gci = conf->gci;
- AlterTabReq::RequestType requestType =
- (AlterTabReq::RequestType) conf->requestType;
- CreateTableRecordPtr alterTabPtr;
- ndbrequire(c_opCreateTable.find(alterTabPtr, senderData));
- CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
-
- switch (requestType) {
- case(AlterTabReq::AlterTablePrepare): {
- switch(refToBlock(signal->getSendersBlockRef())) {
- case DBLQH: {
- jam();
- AlterTabReq * req = (AlterTabReq*)signal->getDataPtrSend();
- req->senderRef = reference();
- req->senderData = senderData;
- req->changeMask = changeMask;
- req->tableId = tableId;
- req->tableVersion = tableVersion;
- req->gci = gci;
- req->requestType = requestType;
- sendSignal(DBDIH_REF, GSN_ALTER_TAB_REQ, signal,
- AlterTabReq::SignalLength, JBB);
- return;
- }
- case DBDIH: {
- jam();
- AlterTabReq * req = (AlterTabReq*)signal->getDataPtrSend();
- req->senderRef = reference();
- req->senderData = senderData;
- req->changeMask = changeMask;
- req->tableId = tableId;
- req->tableVersion = tableVersion;
- req->gci = gci;
- req->requestType = requestType;
- sendSignal(DBTC_REF, GSN_ALTER_TAB_REQ, signal,
- AlterTabReq::SignalLength, JBB);
- return;
- }
- case DBTC: {
- jam();
- // Participant is done with prepare phase, send conf to coordinator
- AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = senderData;
- conf->changeMask = changeMask;
- conf->tableId = tableId;
- conf->tableVersion = tableVersion;
- conf->gci = gci;
- conf->requestType = requestType;
- sendSignal(regAlterTabPtr->m_coordinatorRef, GSN_ALTER_TAB_CONF, signal,
- AlterTabConf::SignalLength, JBB);
- return;
- }
- default :break;
- }
- // Coordinator only
- SafeCounter safeCounter(c_counterMgr, regAlterTabPtr->m_coordinatorData.m_counter);
- safeCounter.clearWaitingFor(refToNode(senderRef));
- if (safeCounter.done()) {
- jam();
- // We have received all local confirmations
- if (regAlterTabPtr->m_alterTableFailed) {
- jam();
- // Send revert request to all alive nodes
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, regAlterTabPtr->m_alterTableId);
- Uint32 tableId = tablePtr.p->tableId;
- Uint32 tableVersion = tablePtr.p->tableVersion;
- Uint32 gci = tablePtr.p->gciTableCreated;
- SimplePropertiesSectionWriter w(getSectionSegmentPool());
- packTableIntoPagesImpl(w, tablePtr);
- SegmentedSectionPtr spDataPtr;
- w.getPtr(spDataPtr);
- signal->setSection(spDataPtr, AlterTabReq::DICT_TAB_INFO);
-
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- regAlterTabPtr->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ;
- safeCounter.init<AlterTabRef>(rg, regAlterTabPtr->key);
-
- AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend();
- lreq->senderRef = reference();
- lreq->senderData = regAlterTabPtr->key;
- lreq->clientRef = regAlterTabPtr->m_senderRef;
- lreq->clientData = regAlterTabPtr->m_senderData;
- lreq->changeMask = changeMask;
- lreq->tableId = tableId;
- lreq->tableVersion = tableVersion;
- lreq->gci = gci;
- lreq->requestType = AlterTabReq::AlterTableRevert;
-
- sendSignal(rg, GSN_ALTER_TAB_REQ, signal,
- AlterTabReq::SignalLength, JBB);
- }
- else {
- jam();
- // Send commit request to all alive nodes
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, tableId);
- SimplePropertiesSectionWriter w(getSectionSegmentPool());
- packTableIntoPagesImpl(w, tablePtr);
- SegmentedSectionPtr spDataPtr;
- w.getPtr(spDataPtr);
- signal->setSection(spDataPtr, AlterTabReq::DICT_TAB_INFO);
-
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- regAlterTabPtr->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ;
- safeCounter.init<AlterTabRef>(rg, regAlterTabPtr->key);
-
- AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend();
- lreq->senderRef = reference();
- lreq->senderData = regAlterTabPtr->key;
- lreq->clientRef = regAlterTabPtr->m_senderRef;
- lreq->clientData = regAlterTabPtr->m_senderData;
- lreq->changeMask = changeMask;
- lreq->tableId = tableId;
- lreq->tableVersion = tableVersion;
- lreq->gci = gci;
- lreq->requestType = AlterTabReq::AlterTableCommit;
-
- sendSignal(rg, GSN_ALTER_TAB_REQ, signal,
- AlterTabReq::SignalLength, JBB);
- }
- }
- else {
- // (!safeCounter.done())
- jam();
- }
- break;
- }
- case(AlterTabReq::AlterTableRevert):
- jam();
- case(AlterTabReq::AlterTableCommit): {
- SafeCounter safeCounter(c_counterMgr, regAlterTabPtr->m_coordinatorData.m_counter);
- safeCounter.clearWaitingFor(refToNode(senderRef));
- if (safeCounter.done()) {
- jam();
- // We have received all local confirmations
- releaseSections(signal);
- if (regAlterTabPtr->m_alterTableFailed) {
- jam();
- AlterTableRef * apiRef =
- (AlterTableRef*)signal->getDataPtrSend();
- *apiRef = regAlterTabPtr->m_alterTableRef;
- sendSignal(regAlterTabPtr->m_senderRef, GSN_ALTER_TABLE_REF, signal,
- AlterTableRef::SignalLength, JBB);
- }
- else {
- jam();
- // Alter table completed, inform API
- AlterTableConf * const apiConf =
- (AlterTableConf*)signal->getDataPtrSend();
- apiConf->senderRef = reference();
- apiConf->senderData = regAlterTabPtr->m_senderData;
- apiConf->tableId = tableId;
- apiConf->tableVersion = tableVersion;
-
- //@todo check api failed
- sendSignal(regAlterTabPtr->m_senderRef, GSN_ALTER_TABLE_CONF, signal,
- AlterTableConf::SignalLength, JBB);
- }
-
- // Release resources
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_tablePtrI);
- releaseTableObject(tabPtr.i, false);
- c_opCreateTable.release(alterTabPtr);
- c_blockState = BS_IDLE;
- }
- else {
- // (!safeCounter.done())
- jam();
- }
- break;
- }
- default: ndbrequire(false);
- }
-}
-
-// For debugging
-inline
-void Dbdict::printTables()
-{
- DLHashTable<TableRecord>::Iterator iter;
- bool moreTables = c_tableRecordHash.first(iter);
- printf("TABLES IN DICT:\n");
- while (moreTables) {
- TableRecordPtr tablePtr = iter.curr;
- printf("%s ", tablePtr.p->tableName);
- moreTables = c_tableRecordHash.next(iter);
- }
- printf("\n");
-}
-
-int Dbdict::handleAlterTab(AlterTabReq * req,
- CreateTableRecord * regAlterTabPtr,
- TableRecordPtr origTablePtr,
- TableRecordPtr newTablePtr)
-{
- Uint32 changeMask = req->changeMask;
-
- if (AlterTableReq::getNameFlag(changeMask)) {
- jam();
- // Table rename
- // Remove from hashtable
-#ifdef VM_TRACE
- TableRecordPtr tmp;
- ndbrequire(c_tableRecordHash.find(tmp, *origTablePtr.p));
-#endif
- c_tableRecordHash.remove(origTablePtr);
- strcpy(regAlterTabPtr->previousTableName, origTablePtr.p->tableName);
- strcpy(origTablePtr.p->tableName, newTablePtr.p->tableName);
- // Set new schema version
- origTablePtr.p->tableVersion = newTablePtr.p->tableVersion;
- // Put it back
-#ifdef VM_TRACE
- ndbrequire(!c_tableRecordHash.find(tmp, *origTablePtr.p));
-#endif
- c_tableRecordHash.add(origTablePtr);
-
- return 0;
- }
- jam();
- return -1;
-}
-
-void Dbdict::revertAlterTable(Signal * signal,
- Uint32 changeMask,
- Uint32 tableId,
- CreateTableRecord * regAlterTabPtr)
-{
- if (AlterTableReq::getNameFlag(changeMask)) {
- jam();
- // Table rename
- // Restore previous name
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, tableId);
- // Remove from hashtable
-#ifdef VM_TRACE
- TableRecordPtr tmp;
- ndbrequire(c_tableRecordHash.find(tmp, * tablePtr.p));
-#endif
- c_tableRecordHash.remove(tablePtr);
- // Restore name
- strcpy(tablePtr.p->tableName, regAlterTabPtr->previousTableName);
- // Revert schema version
- tablePtr.p->tableVersion = tablePtr.p->tableVersion - 1;
- // Put it back
-#ifdef VM_TRACE
- ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p));
-#endif
- c_tableRecordHash.add(tablePtr);
-
- return;
- }
-
- ndbrequire(false);
-}
-
-void
-Dbdict::alterTab_writeSchemaConf(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode)
-{
- jam();
- Uint32 key = callbackData;
- CreateTableRecordPtr alterTabPtr;
- ndbrequire(c_opCreateTable.find(alterTabPtr, key));
- CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
- Uint32 tableId = regAlterTabPtr->m_alterTableId;
-
- Callback callback;
- callback.m_callbackData = regAlterTabPtr->key;
- callback.m_callbackFunction =
- safe_cast(&Dbdict::alterTab_writeTableConf);
-
- SegmentedSectionPtr tabInfoPtr;
- getSection(tabInfoPtr, regAlterTabPtr->m_tabInfoPtrI);
-
- writeTableFile(signal, tableId, tabInfoPtr, &callback);
-
- signal->setSection(tabInfoPtr, 0);
- releaseSections(signal);
-}
-
-void
-Dbdict::alterTab_writeTableConf(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode)
-{
- jam();
- CreateTableRecordPtr alterTabPtr;
- ndbrequire(c_opCreateTable.find(alterTabPtr, callbackData));
- CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
- Uint32 coordinatorRef = regAlterTabPtr->m_coordinatorRef;
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_alterTableId);
-
- // Alter table commit request handled successfully
- AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = callbackData;
- conf->tableId = tabPtr.p->tableId;
- conf->tableVersion = tabPtr.p->tableVersion;
- conf->gci = tabPtr.p->gciTableCreated;
- conf->requestType = AlterTabReq::AlterTableCommit;
- sendSignal(coordinatorRef, GSN_ALTER_TAB_CONF, signal,
- AlterTabConf::SignalLength, JBB);
- if(coordinatorRef != reference()) {
- jam();
- // Release resources
- c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_tablePtrI);
- releaseTableObject(tabPtr.i, false);
- c_opCreateTable.release(alterTabPtr);
- c_blockState = BS_IDLE;
- }
-}
-
-void
-Dbdict::execCREATE_FRAGMENTATION_REF(Signal * signal){
- jamEntry();
- const Uint32 * theData = signal->getDataPtr();
- CreateFragmentationRef * const ref = (CreateFragmentationRef*)theData;
- (void)ref;
- ndbrequire(false);
-}
-
-void
-Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){
- jamEntry();
- const Uint32 * theData = signal->getDataPtr();
- CreateFragmentationConf * const conf = (CreateFragmentationConf*)theData;
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
-
- ndbrequire(signal->getNoOfSections() == 1);
-
- SegmentedSectionPtr fragDataPtr;
- signal->getSection(fragDataPtr, CreateFragmentationConf::FRAGMENTS);
- signal->header.m_noOfSections = 0;
-
- /**
- * Get table
- */
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
-
- /**
- * Save fragment count
- */
- tabPtr.p->fragmentCount = conf->noOfFragments;
-
- /**
- * Update table version
- */
- XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
- SchemaFile::TableEntry * tabEntry = getTableEntry(xsf, tabPtr.i);
-
- tabPtr.p->tableVersion = tabEntry->m_tableVersion + 1;
-
- /**
- * Pack
- */
- SimplePropertiesSectionWriter w(getSectionSegmentPool());
- packTableIntoPagesImpl(w, tabPtr);
-
- SegmentedSectionPtr spDataPtr;
- w.getPtr(spDataPtr);
-
- signal->setSection(spDataPtr, CreateTabReq::DICT_TAB_INFO);
- signal->setSection(fragDataPtr, CreateTabReq::FRAGMENTATION);
-
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter);
- createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ;
- createTabPtr.p->m_coordinatorData.m_requestType = CreateTabReq::CreateTablePrepare;
- tmp.init<CreateTabRef>(rg, GSN_CREATE_TAB_REF, createTabPtr.p->key);
-
- CreateTabReq * const req = (CreateTabReq*)theData;
- req->senderRef = reference();
- req->senderData = createTabPtr.p->key;
- req->clientRef = createTabPtr.p->m_senderRef;
- req->clientData = createTabPtr.p->m_senderData;
- req->requestType = CreateTabReq::CreateTablePrepare;
-
- req->gci = 0;
- req->tableId = tabPtr.i;
- req->tableVersion = tabEntry->m_tableVersion + 1;
-
- sendFragmentedSignal(rg, GSN_CREATE_TAB_REQ, signal,
- CreateTabReq::SignalLength, JBB);
-
- return;
-}
-
-void
-Dbdict::execCREATE_TAB_REF(Signal* signal){
- jamEntry();
-
- CreateTabRef * const ref = (CreateTabRef*)signal->getDataPtr();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData));
-
- ndbrequire(createTabPtr.p->m_coordinatorRef == reference());
- ndbrequire(createTabPtr.p->m_coordinatorData.m_gsn == GSN_CREATE_TAB_REQ);
-
- if(ref->errorCode != CreateTabRef::NF_FakeErrorREF){
- createTabPtr.p->setErrorCode(ref->errorCode);
- }
- createTab_reply(signal, createTabPtr, refToNode(ref->senderRef));
-}
-
-void
-Dbdict::execCREATE_TAB_CONF(Signal* signal){
- jamEntry();
-
- ndbrequire(signal->getNoOfSections() == 0);
-
- CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
-
- ndbrequire(createTabPtr.p->m_coordinatorRef == reference());
- ndbrequire(createTabPtr.p->m_coordinatorData.m_gsn == GSN_CREATE_TAB_REQ);
-
- createTab_reply(signal, createTabPtr, refToNode(conf->senderRef));
-}
-
-void
-Dbdict::createTab_reply(Signal* signal,
- CreateTableRecordPtr createTabPtr,
- Uint32 nodeId)
-{
-
- SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter);
- if(!tmp.clearWaitingFor(nodeId)){
- jam();
- return;
- }
-
- switch(createTabPtr.p->m_coordinatorData.m_requestType){
- case CreateTabReq::CreateTablePrepare:{
-
- if(createTabPtr.p->m_errorCode != 0){
- jam();
- /**
- * Failed to prepare on atleast one node -> abort on all
- */
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ;
- createTabPtr.p->m_coordinatorData.m_requestType = CreateTabReq::CreateTableDrop;
- ndbrequire(tmp.init<CreateTabRef>(rg, createTabPtr.p->key));
-
- CreateTabReq * const req = (CreateTabReq*)signal->getDataPtrSend();
- req->senderRef = reference();
- req->senderData = createTabPtr.p->key;
- req->requestType = CreateTabReq::CreateTableDrop;
-
- sendSignal(rg, GSN_CREATE_TAB_REQ, signal,
- CreateTabReq::SignalLength, JBB);
- return;
- }
-
- /**
- * Lock mutex before commiting table
- */
- Mutex mutex(signal, c_mutexMgr, createTabPtr.p->m_startLcpMutex);
- Callback c = { safe_cast(&Dbdict::createTab_startLcpMutex_locked),
- createTabPtr.p->key};
-
- ndbrequire(mutex.lock(c));
- return;
- }
- case CreateTabReq::CreateTableCommit:{
- jam();
- ndbrequire(createTabPtr.p->m_errorCode == 0);
-
- /**
- * Unlock mutex before commiting table
- */
- Mutex mutex(signal, c_mutexMgr, createTabPtr.p->m_startLcpMutex);
- Callback c = { safe_cast(&Dbdict::createTab_startLcpMutex_unlocked),
- createTabPtr.p->key};
- mutex.unlock(c);
- return;
- }
- case CreateTabReq::CreateTableDrop:{
- jam();
- CreateTableRef * const ref = (CreateTableRef*)signal->getDataPtr();
- ref->senderRef = reference();
- ref->senderData = createTabPtr.p->m_senderData;
- ref->errorCode = createTabPtr.p->m_errorCode;
- ref->masterNodeId = c_masterNodeId;
- ref->status = 0;
- ref->errorKey = 0;
- ref->errorLine = 0;
-
- //@todo check api failed
- sendSignal(createTabPtr.p->m_senderRef, GSN_CREATE_TABLE_REF, signal,
- CreateTableRef::SignalLength, JBB);
- c_opCreateTable.release(createTabPtr);
- c_blockState = BS_IDLE;
- return;
- }
- }
- ndbrequire(false);
-}
-
-void
-Dbdict::createTab_startLcpMutex_locked(Signal* signal,
- Uint32 callbackData,
- Uint32 retValue){
- jamEntry();
-
- ndbrequire(retValue == 0);
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
-
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ;
- createTabPtr.p->m_coordinatorData.m_requestType = CreateTabReq::CreateTableCommit;
- SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter);
- tmp.init<CreateTabRef>(rg, GSN_CREATE_TAB_REF, createTabPtr.p->key);
-
- CreateTabReq * const req = (CreateTabReq*)signal->getDataPtrSend();
- req->senderRef = reference();
- req->senderData = createTabPtr.p->key;
- req->requestType = CreateTabReq::CreateTableCommit;
-
- sendSignal(rg, GSN_CREATE_TAB_REQ, signal,
- CreateTabReq::SignalLength, JBB);
-}
-
-void
-Dbdict::createTab_startLcpMutex_unlocked(Signal* signal,
- Uint32 callbackData,
- Uint32 retValue){
- jamEntry();
-
- ndbrequire(retValue == 0);
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
-
- createTabPtr.p->m_startLcpMutex.release(c_mutexMgr);
-
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
-
- CreateTableConf * const conf = (CreateTableConf*)signal->getDataPtr();
- conf->senderRef = reference();
- conf->senderData = createTabPtr.p->m_senderData;
- conf->tableId = createTabPtr.p->m_tablePtrI;
- conf->tableVersion = tabPtr.p->tableVersion;
-
- //@todo check api failed
- sendSignal(createTabPtr.p->m_senderRef, GSN_CREATE_TABLE_CONF, signal,
- CreateTableConf::SignalLength, JBB);
- c_opCreateTable.release(createTabPtr);
- c_blockState = BS_IDLE;
- return;
-}
-
-/***********************************************************
- * CreateTable participant code
- **********************************************************/
-void
-Dbdict::execCREATE_TAB_REQ(Signal* signal){
- jamEntry();
-
- if(!assembleFragments(signal)){
- jam();
- return;
- }
-
- CreateTabReq * const req = (CreateTabReq*)signal->getDataPtr();
-
- CreateTabReq::RequestType rt = (CreateTabReq::RequestType)req->requestType;
- switch(rt){
- case CreateTabReq::CreateTablePrepare:
- CRASH_INSERTION2(6003, getOwnNodeId() != c_masterNodeId);
- createTab_prepare(signal, req);
- return;
- case CreateTabReq::CreateTableCommit:
- CRASH_INSERTION2(6004, getOwnNodeId() != c_masterNodeId);
- createTab_commit(signal, req);
- return;
- case CreateTabReq::CreateTableDrop:
- CRASH_INSERTION2(6005, getOwnNodeId() != c_masterNodeId);
- createTab_drop(signal, req);
- return;
- }
- ndbrequire(false);
-}
-
-void
-Dbdict::createTab_prepare(Signal* signal, CreateTabReq * req){
-
- const Uint32 gci = req->gci;
- const Uint32 tableId = req->tableId;
- const Uint32 tableVersion = req->tableVersion;
-
- SegmentedSectionPtr tabInfoPtr;
- signal->getSection(tabInfoPtr, CreateTabReq::DICT_TAB_INFO);
-
- CreateTableRecordPtr createTabPtr;
- if(req->senderRef == reference()){
- jam();
- ndbrequire(c_opCreateTable.find(createTabPtr, req->senderData));
- } else {
- jam();
- c_opCreateTable.seize(createTabPtr);
-
- ndbrequire(!createTabPtr.isNull());
-
- createTabPtr.p->key = req->senderData;
- c_opCreateTable.add(createTabPtr);
- createTabPtr.p->m_errorCode = 0;
- createTabPtr.p->m_tablePtrI = tableId;
- createTabPtr.p->m_coordinatorRef = req->senderRef;
- createTabPtr.p->m_senderRef = req->clientRef;
- createTabPtr.p->m_senderData = req->clientData;
- createTabPtr.p->m_dihAddFragPtr = RNIL;
-
- /**
- * Put data into table record
- */
- ParseDictTabInfoRecord parseRecord;
- parseRecord.requestType = DictTabInfo::AddTableFromDict;
- parseRecord.errorCode = 0;
-
- SimplePropertiesSectionReader r(tabInfoPtr, getSectionSegmentPool());
-
- handleTabInfoInit(r, &parseRecord);
-
- ndbrequire(parseRecord.errorCode == 0);
- }
-
- ndbrequire(!createTabPtr.isNull());
-
- SegmentedSectionPtr fragPtr;
- signal->getSection(fragPtr, CreateTabReq::FRAGMENTATION);
-
- createTabPtr.p->m_tabInfoPtrI = tabInfoPtr.i;
- createTabPtr.p->m_fragmentsPtrI = fragPtr.i;
-
- signal->header.m_noOfSections = 0;
-
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, tableId);
- tabPtr.p->packedSize = tabInfoPtr.sz;
- tabPtr.p->tableVersion = tableVersion;
- tabPtr.p->gciTableCreated = gci;
-
- SchemaFile::TableEntry tabEntry;
- tabEntry.m_tableVersion = tableVersion;
- tabEntry.m_tableType = tabPtr.p->tableType;
- tabEntry.m_tableState = SchemaFile::ADD_STARTED;
- tabEntry.m_gcp = gci;
- tabEntry.m_info_words = tabInfoPtr.sz;
- memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused));
-
- Callback callback;
- callback.m_callbackData = createTabPtr.p->key;
- callback.m_callbackFunction =
- safe_cast(&Dbdict::createTab_writeSchemaConf1);
-
- updateSchemaState(signal, tableId, &tabEntry, &callback);
-}
-
-void getSection(SegmentedSectionPtr & ptr, Uint32 i);
-
-void
-Dbdict::createTab_writeSchemaConf1(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode){
- jam();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
-
- Callback callback;
- callback.m_callbackData = createTabPtr.p->key;
- callback.m_callbackFunction =
- safe_cast(&Dbdict::createTab_writeTableConf);
-
- SegmentedSectionPtr tabInfoPtr;
- getSection(tabInfoPtr, createTabPtr.p->m_tabInfoPtrI);
- writeTableFile(signal, createTabPtr.p->m_tablePtrI, tabInfoPtr, &callback);
-
- createTabPtr.p->m_tabInfoPtrI = RNIL;
- signal->setSection(tabInfoPtr, 0);
- releaseSections(signal);
-}
-
-void
-Dbdict::createTab_writeTableConf(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode){
- jam();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
-
- SegmentedSectionPtr fragDataPtr;
- getSection(fragDataPtr, createTabPtr.p->m_fragmentsPtrI);
-
- Callback callback;
- callback.m_callbackData = callbackData;
- callback.m_callbackFunction =
- safe_cast(&Dbdict::createTab_dihComplete);
-
- createTab_dih(signal, createTabPtr, fragDataPtr, &callback);
-}
-
-void
-Dbdict::createTab_dih(Signal* signal,
- CreateTableRecordPtr createTabPtr,
- SegmentedSectionPtr fragDataPtr,
- Callback * c){
- jam();
-
- createTabPtr.p->m_callback = * c;
-
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
-
- DiAddTabReq * req = (DiAddTabReq*)signal->getDataPtrSend();
- req->connectPtr = createTabPtr.p->key;
- req->tableId = tabPtr.i;
- req->fragType = tabPtr.p->fragmentType;
- req->kValue = tabPtr.p->kValue;
- req->noOfReplicas = 0;
- req->storedTable = tabPtr.p->storedTable;
- req->tableType = tabPtr.p->tableType;
- req->schemaVersion = tabPtr.p->tableVersion;
- req->primaryTableId = tabPtr.p->primaryTableId;
-
- if(!fragDataPtr.isNull()){
- signal->setSection(fragDataPtr, DiAddTabReq::FRAGMENTATION);
- }
-
- sendSignal(DBDIH_REF, GSN_DIADDTABREQ, signal,
- DiAddTabReq::SignalLength, JBB);
-}
-
-static
-void
-calcLHbits(Uint32 * lhPageBits, Uint32 * lhDistrBits,
- Uint32 fid, Uint32 totalFragments)
-{
- Uint32 distrBits = 0;
- Uint32 pageBits = 0;
-
- Uint32 tmp = 1;
- while (tmp < totalFragments) {
- jam();
- tmp <<= 1;
- distrBits++;
- }//while
-#ifdef ndb_classical_lhdistrbits
- if (tmp != totalFragments) {
- tmp >>= 1;
- if ((fid >= (totalFragments - tmp)) && (fid < (tmp - 1))) {
- distrBits--;
- }//if
- }//if
-#endif
- * lhPageBits = pageBits;
- * lhDistrBits = distrBits;
-
-}//calcLHbits()
-
-
-void
-Dbdict::execADD_FRAGREQ(Signal* signal) {
- jamEntry();
-
- AddFragReq * const req = (AddFragReq*)signal->getDataPtr();
-
- Uint32 dihPtr = req->dihPtr;
- Uint32 senderData = req->senderData;
- Uint32 tableId = req->tableId;
- Uint32 fragId = req->fragmentId;
- Uint32 node = req->nodeId;
- Uint32 lcpNo = req->nextLCP;
- Uint32 fragCount = req->totalFragments;
- Uint32 requestInfo = req->requestInfo;
- Uint32 startGci = req->startGci;
-
- ndbrequire(node == getOwnNodeId());
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, senderData));
-
- createTabPtr.p->m_dihAddFragPtr = dihPtr;
-
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, tableId);
-
-#if 0
- tabPtr.p->gciTableCreated = (startGci > tabPtr.p->gciTableCreated ? startGci:
- startGci > tabPtr.p->gciTableCreated);
-#endif
-
- /**
- * Calc lh3PageBits
- */
- Uint32 lhDistrBits = 0;
- Uint32 lhPageBits = 0;
- ::calcLHbits(&lhPageBits, &lhDistrBits, fragId, fragCount);
-
- {
- LqhFragReq* req = (LqhFragReq*)signal->getDataPtrSend();
- req->senderData = senderData;
- req->senderRef = reference();
- req->fragmentId = fragId;
- req->requestInfo = requestInfo;
- req->tableId = tableId;
- req->localKeyLength = tabPtr.p->localKeyLen;
- req->maxLoadFactor = tabPtr.p->maxLoadFactor;
- req->minLoadFactor = tabPtr.p->minLoadFactor;
- req->kValue = tabPtr.p->kValue;
- req->lh3DistrBits = 0; //lhDistrBits;
- req->lh3PageBits = 0; //lhPageBits;
- req->noOfAttributes = tabPtr.p->noOfAttributes;
- req->noOfNullAttributes = tabPtr.p->noOfNullBits;
- req->noOfPagesToPreAllocate = 0;
- req->schemaVersion = tabPtr.p->tableVersion;
- Uint32 keyLen = tabPtr.p->tupKeyLength;
- req->keyLength = keyLen; // wl-2066 no more "long keys"
- req->nextLCP = lcpNo;
-
- req->noOfKeyAttr = tabPtr.p->noOfPrimkey;
- req->noOfNewAttr = 0;
- // noOfCharsets passed to TUP in upper half
- req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16);
- req->checksumIndicator = 1;
- req->noOfAttributeGroups = 1;
- req->GCPIndicator = 0;
- req->startGci = startGci;
- req->tableType = tabPtr.p->tableType;
- req->primaryTableId = tabPtr.p->primaryTableId;
- sendSignal(DBLQH_REF, GSN_LQHFRAGREQ, signal,
- LqhFragReq::SignalLength, JBB);
- }
-}
-
-void
-Dbdict::execLQHFRAGREF(Signal * signal){
- jamEntry();
- LqhFragRef * const ref = (LqhFragRef*)signal->getDataPtr();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData));
-
- createTabPtr.p->setErrorCode(ref->errorCode);
-
- {
- AddFragRef * const ref = (AddFragRef*)signal->getDataPtr();
- ref->dihPtr = createTabPtr.p->m_dihAddFragPtr;
- sendSignal(DBDIH_REF, GSN_ADD_FRAGREF, signal,
- AddFragRef::SignalLength, JBB);
- }
-}
-
-void
-Dbdict::execLQHFRAGCONF(Signal * signal){
- jamEntry();
- LqhFragConf * const conf = (LqhFragConf*)signal->getDataPtr();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
-
- createTabPtr.p->m_lqhFragPtr = conf->lqhFragPtr;
-
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
- sendLQHADDATTRREQ(signal, createTabPtr, tabPtr.p->firstAttribute);
-}
-
-void
-Dbdict::sendLQHADDATTRREQ(Signal* signal,
- CreateTableRecordPtr createTabPtr,
- Uint32 attributePtrI){
- jam();
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
- LqhAddAttrReq * const req = (LqhAddAttrReq*)signal->getDataPtrSend();
- Uint32 i = 0;
- for(i = 0; i<LqhAddAttrReq::MAX_ATTRIBUTES && attributePtrI != RNIL; i++){
- jam();
- AttributeRecordPtr attrPtr;
- c_attributeRecordPool.getPtr(attrPtr, attributePtrI);
- LqhAddAttrReq::Entry& entry = req->attributes[i];
- entry.attrId = attrPtr.p->attributeId;
- entry.attrDescriptor = attrPtr.p->attributeDescriptor;
- entry.extTypeInfo = 0;
- // charset number passed to TUP, TUX in upper half
- entry.extTypeInfo |= (attrPtr.p->extPrecision & ~0xFFFF);
- if (tabPtr.p->isIndex()) {
- Uint32 primaryAttrId;
- if (attrPtr.p->nextAttrInTable != RNIL) {
- getIndexAttr(tabPtr, attributePtrI, &primaryAttrId);
- } else {
- primaryAttrId = ZNIL;
- if (tabPtr.p->isOrderedIndex())
- entry.attrId = 0; // attribute goes to TUP
- }
- entry.attrId |= (primaryAttrId << 16);
- }
- attributePtrI = attrPtr.p->nextAttrInTable;
- }
- req->lqhFragPtr = createTabPtr.p->m_lqhFragPtr;
- req->senderData = createTabPtr.p->key;
- req->senderAttrPtr = attributePtrI;
- req->noOfAttributes = i;
-
- sendSignal(DBLQH_REF, GSN_LQHADDATTREQ, signal,
- LqhAddAttrReq::HeaderLength + LqhAddAttrReq::EntryLength * i, JBB);
-}
-
-void
-Dbdict::execLQHADDATTREF(Signal * signal){
- jamEntry();
- LqhAddAttrRef * const ref = (LqhAddAttrRef*)signal->getDataPtr();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData));
-
- createTabPtr.p->setErrorCode(ref->errorCode);
-
- {
- AddFragRef * const ref = (AddFragRef*)signal->getDataPtr();
- ref->dihPtr = createTabPtr.p->m_dihAddFragPtr;
- sendSignal(DBDIH_REF, GSN_ADD_FRAGREF, signal,
- AddFragRef::SignalLength, JBB);
- }
-
-}
-
-void
-Dbdict::execLQHADDATTCONF(Signal * signal){
- jamEntry();
- LqhAddAttrConf * const conf = (LqhAddAttrConf*)signal->getDataPtr();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
-
- const Uint32 fragId = conf->fragId;
- const Uint32 nextAttrPtr = conf->senderAttrPtr;
- if(nextAttrPtr != RNIL){
- jam();
- sendLQHADDATTRREQ(signal, createTabPtr, nextAttrPtr);
- return;
- }
-
- {
- AddFragConf * const conf = (AddFragConf*)signal->getDataPtr();
- conf->dihPtr = createTabPtr.p->m_dihAddFragPtr;
- conf->fragId = fragId;
- sendSignal(DBDIH_REF, GSN_ADD_FRAGCONF, signal,
- AddFragConf::SignalLength, JBB);
- }
-}
-
-void
-Dbdict::execDIADDTABREF(Signal* signal){
- jam();
-
- DiAddTabRef * const ref = (DiAddTabRef*)signal->getDataPtr();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData));
-
- createTabPtr.p->setErrorCode(ref->errorCode);
- execute(signal, createTabPtr.p->m_callback, 0);
-}
-
-void
-Dbdict::execDIADDTABCONF(Signal* signal){
- jam();
-
- DiAddTabConf * const conf = (DiAddTabConf*)signal->getDataPtr();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
-
- signal->theData[0] = createTabPtr.p->key;
- signal->theData[1] = reference();
- signal->theData[2] = createTabPtr.p->m_tablePtrI;
-
- if(createTabPtr.p->m_dihAddFragPtr != RNIL){
- jam();
-
- /**
- * We did perform at least one LQHFRAGREQ
- */
- sendSignal(DBLQH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB);
- return;
- } else {
- /**
- * No local fragment (i.e. no LQHFRAGREQ)
- */
- execute(signal, createTabPtr.p->m_callback, 0);
- return;
- //sendSignal(DBDIH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB);
- }
-}
-
-void
-Dbdict::execTAB_COMMITREF(Signal* signal) {
- jamEntry();
- ndbrequire(false);
-}//execTAB_COMMITREF()
-
-void
-Dbdict::execTAB_COMMITCONF(Signal* signal){
- jamEntry();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, signal->theData[0]));
-
- if(refToBlock(signal->getSendersBlockRef()) == DBLQH){
-
- execute(signal, createTabPtr.p->m_callback, 0);
- return;
- }
-
- if(refToBlock(signal->getSendersBlockRef()) == DBDIH){
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
-
- signal->theData[0] = tabPtr.i;
- signal->theData[1] = tabPtr.p->tableVersion;
- signal->theData[2] = (Uint32)tabPtr.p->storedTable;
- signal->theData[3] = reference();
- signal->theData[4] = (Uint32)tabPtr.p->tableType;
- signal->theData[5] = createTabPtr.p->key;
- signal->theData[6] = (Uint32)tabPtr.p->noOfPrimkey;
-
- Uint32 buf[2 * MAX_ATTRIBUTES_IN_INDEX];
- Uint32 sz = 0;
- Uint32 tAttr = tabPtr.p->firstAttribute;
- while (tAttr != RNIL) {
- jam();
- AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
- if (aRec->tupleKey) {
- buf[sz++] = aRec->attributeDescriptor;
- buf[sz++] = (aRec->extPrecision >> 16); // charset number
- }
- tAttr = aRec->nextAttrInTable;
- }
- ndbrequire((int)sz == 2 * tabPtr.p->noOfPrimkey);
-
- LinearSectionPtr lsPtr[3];
- lsPtr[0].p = buf;
- lsPtr[0].sz = sz;
- // note: ACC does not reply
- if (tabPtr.p->isTable() || tabPtr.p->isHashIndex())
- sendSignal(DBACC_REF, GSN_TC_SCHVERREQ, signal, 7, JBB, lsPtr, 1);
- sendSignal(DBTC_REF, GSN_TC_SCHVERREQ, signal, 7, JBB, lsPtr, 1);
- return;
- }
-
- ndbrequire(false);
-}
-
-void
-Dbdict::createTab_dihComplete(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode){
- jam();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
-
- //@todo check for master failed
-
- if(createTabPtr.p->m_errorCode == 0){
- jam();
-
- CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
- conf->senderRef = reference();
- conf->senderData = createTabPtr.p->key;
- sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_CONF,
- signal, CreateTabConf::SignalLength, JBB);
- return;
- }
-
- CreateTabRef * const ref = (CreateTabRef*)signal->getDataPtr();
- ref->senderRef = reference();
- ref->senderData = createTabPtr.p->key;
- ref->errorCode = createTabPtr.p->m_errorCode;
- ref->errorLine = 0;
- ref->errorKey = 0;
- ref->errorStatus = 0;
-
- sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_REF,
- signal, CreateTabRef::SignalLength, JBB);
-}
-
-void
-Dbdict::createTab_commit(Signal * signal, CreateTabReq * req){
- jam();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, req->senderData));
-
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
-
- SchemaFile::TableEntry tabEntry;
- tabEntry.m_tableVersion = tabPtr.p->tableVersion;
- tabEntry.m_tableType = tabPtr.p->tableType;
- tabEntry.m_tableState = SchemaFile::TABLE_ADD_COMMITTED;
- tabEntry.m_gcp = tabPtr.p->gciTableCreated;
- tabEntry.m_info_words = tabPtr.p->packedSize;
- memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused));
-
- Callback callback;
- callback.m_callbackData = createTabPtr.p->key;
- callback.m_callbackFunction =
- safe_cast(&Dbdict::createTab_writeSchemaConf2);
-
- updateSchemaState(signal, tabPtr.i, &tabEntry, &callback);
-}
-
-void
-Dbdict::createTab_writeSchemaConf2(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode){
- jam();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
-
- Callback c;
- c.m_callbackData = callbackData;
- c.m_callbackFunction = safe_cast(&Dbdict::createTab_alterComplete);
- alterTab_activate(signal, createTabPtr, &c);
-}
-
-void
-Dbdict::createTab_alterComplete(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode){
- jam();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
-
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
- tabPtr.p->tabState = TableRecord::DEFINED;
-
- //@todo check error
- //@todo check master failed
-
- CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
- conf->senderRef = reference();
- conf->senderData = createTabPtr.p->key;
- sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_CONF,
- signal, CreateTabConf::SignalLength, JBB);
-
- if(createTabPtr.p->m_coordinatorRef != reference()){
- jam();
- c_opCreateTable.release(createTabPtr);
- }
-}
-
-void
-Dbdict::createTab_drop(Signal* signal, CreateTabReq * req){
- jam();
-
- const Uint32 key = req->senderData;
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, key));
-
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
- tabPtr.p->tabState = TableRecord::DROPPING;
-
- DropTableRecordPtr dropTabPtr;
- ndbrequire(c_opDropTable.seize(dropTabPtr));
-
- dropTabPtr.p->key = key;
- c_opDropTable.add(dropTabPtr);
-
- dropTabPtr.p->m_errorCode = 0;
- dropTabPtr.p->m_request.tableId = createTabPtr.p->m_tablePtrI;
- dropTabPtr.p->m_requestType = DropTabReq::CreateTabDrop;
- dropTabPtr.p->m_coordinatorRef = createTabPtr.p->m_coordinatorRef;
- dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_REQ;
-
- dropTabPtr.p->m_participantData.m_block = 0;
- dropTabPtr.p->m_participantData.m_callback.m_callbackData = req->senderData;
- dropTabPtr.p->m_participantData.m_callback.m_callbackFunction =
- safe_cast(&Dbdict::createTab_dropComplete);
- dropTab_nextStep(signal, dropTabPtr);
-}
-
-void
-Dbdict::createTab_dropComplete(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode){
- jam();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
-
- DropTableRecordPtr dropTabPtr;
- ndbrequire(c_opDropTable.find(dropTabPtr, callbackData));
-
- TableRecordPtr tabPtr;
- c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
-
- releaseTableObject(tabPtr.i);
-
- XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
- SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tabPtr.i);
- tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED;
-
- //@todo check error
- //@todo check master failed
-
- CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
- conf->senderRef = reference();
- conf->senderData = createTabPtr.p->key;
- sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_CONF,
- signal, CreateTabConf::SignalLength, JBB);
-
- if(createTabPtr.p->m_coordinatorRef != reference()){
- jam();
- c_opCreateTable.release(createTabPtr);
- }
-
- c_opDropTable.release(dropTabPtr);
-}
-
-void
-Dbdict::alterTab_activate(Signal* signal, CreateTableRecordPtr createTabPtr,
- Callback * c){
-
- createTabPtr.p->m_callback = * c;
-
- signal->theData[0] = createTabPtr.p->key;
- signal->theData[1] = reference();
- signal->theData[2] = createTabPtr.p->m_tablePtrI;
- sendSignal(DBDIH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB);
-}
-
-void
-Dbdict::execTC_SCHVERCONF(Signal* signal){
- jamEntry();
-
- CreateTableRecordPtr createTabPtr;
- ndbrequire(c_opCreateTable.find(createTabPtr, signal->theData[1]));
-
- execute(signal, createTabPtr.p->m_callback, 0);
-}
-
-#define tabRequire(cond, error) \
- if (!(cond)) { \
- jam(); \
- parseP->errorCode = error; parseP->errorLine = __LINE__; \
- parseP->errorKey = it.getKey(); \
- return; \
- }//if
-
-// handleAddTableFailure(signal, __LINE__, allocatedTable);
-
-void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
- ParseDictTabInfoRecord * parseP,
- bool checkExist)
-{
-/* ---------------------------------------------------------------- */
-// We always start by handling table name since this must be the first
-// item in the list. Through the table name we can derive if it is a
-// correct name, a new name or an already existing table.
-/* ---------------------------------------------------------------- */
-
- it.first();
-
- SimpleProperties::UnpackStatus status;
- DictTabInfo::Table tableDesc; tableDesc.init();
- status = SimpleProperties::unpack(it, &tableDesc,
- DictTabInfo::TableMapping,
- DictTabInfo::TableMappingSize,
- true, true);
-
- if(status != SimpleProperties::Break){
- parseP->errorCode = CreateTableRef::InvalidFormat;
- parseP->status = status;
- parseP->errorKey = it.getKey();
- parseP->errorLine = __LINE__;
- return;
- }
-
- if(parseP->requestType == DictTabInfo::AlterTableFromAPI)
- {
- ndbrequire(!checkExist);
- }
- if(!checkExist)
- {
- ndbrequire(parseP->requestType == DictTabInfo::AlterTableFromAPI);
- }
-
- /* ---------------------------------------------------------------- */
- // Verify that table name is an allowed table name.
- // TODO
- /* ---------------------------------------------------------------- */
- const Uint32 tableNameLength = strlen(tableDesc.TableName) + 1;
-
- TableRecord keyRecord;
- tabRequire(tableNameLength <= sizeof(keyRecord.tableName),
- CreateTableRef::TableNameTooLong);
- strcpy(keyRecord.tableName, tableDesc.TableName);
-
- TableRecordPtr tablePtr;
- c_tableRecordHash.find(tablePtr, keyRecord);
-
- if (checkExist){
- jam();
- /* ---------------------------------------------------------------- */
- // Check if table already existed.
- /* ---------------------------------------------------------------- */
- tabRequire(tablePtr.i == RNIL, CreateTableRef::TableAlreadyExist);
- }
-
- switch (parseP->requestType) {
- case DictTabInfo::CreateTableFromAPI: {
- jam();
- }
- case DictTabInfo::AlterTableFromAPI:{
- jam();
- tablePtr.i = getFreeTableRecord(tableDesc.PrimaryTableId);
- /* ---------------------------------------------------------------- */
- // Check if no free tables existed.
- /* ---------------------------------------------------------------- */
- tabRequire(tablePtr.i != RNIL, CreateTableRef::NoMoreTableRecords);
-
- c_tableRecordPool.getPtr(tablePtr);
- break;
- }
- case DictTabInfo::AddTableFromDict:
- case DictTabInfo::ReadTableFromDiskSR:
- case DictTabInfo::GetTabInfoConf:
- {
-/* ---------------------------------------------------------------- */
-// Get table id and check that table doesn't already exist
-/* ---------------------------------------------------------------- */
- tablePtr.i = tableDesc.TableId;
-
- if (parseP->requestType == DictTabInfo::ReadTableFromDiskSR) {
- ndbrequire(tablePtr.i == c_restartRecord.activeTable);
- }//if
- if (parseP->requestType == DictTabInfo::GetTabInfoConf) {
- ndbrequire(tablePtr.i == c_restartRecord.activeTable);
- }//if
-
- c_tableRecordPool.getPtr(tablePtr);
- ndbrequire(tablePtr.p->tabState == TableRecord::NOT_DEFINED);
-
- //Uint32 oldTableVersion = tablePtr.p->tableVersion;
- initialiseTableRecord(tablePtr);
- if (parseP->requestType == DictTabInfo::AddTableFromDict) {
- jam();
- tablePtr.p->tabState = TableRecord::DEFINING;
- }//if
-#ifdef HAVE_TABLE_REORG
-/* ---------------------------------------------------------------- */
-// Get id of second table id and check that table doesn't already exist
-// and set up links between first and second table.
-/* ---------------------------------------------------------------- */
- TableRecordPtr secondTablePtr;
- secondTablePtr.i = tableDesc.SecondTableId;
- c_tableRecordPool.getPtr(secondTablePtr);
- ndbrequire(secondTablePtr.p->tabState == TableRecord::NOT_DEFINED);
-
- initialiseTableRecord(secondTablePtr);
- secondTablePtr.p->tabState = TableRecord::REORG_TABLE_PREPARED;
- secondTablePtr.p->secondTable = tablePtr.i;
- tablePtr.p->secondTable = secondTablePtr.i;
-#endif
-/* ---------------------------------------------------------------- */
-// Set table version
-/* ---------------------------------------------------------------- */
- Uint32 tableVersion = tableDesc.TableVersion;
- tablePtr.p->tableVersion = tableVersion;
-
- break;
- }
- default:
- ndbrequire(false);
- break;
- }//switch
- parseP->tablePtr = tablePtr;
-
- strcpy(tablePtr.p->tableName, keyRecord.tableName);
- if (parseP->requestType != DictTabInfo::AlterTableFromAPI) {
- jam();
-#ifdef VM_TRACE
- ndbout_c("Dbdict: name=%s,id=%u", tablePtr.p->tableName, tablePtr.i);
- TableRecordPtr tmp;
- ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p));
-#endif
- c_tableRecordHash.add(tablePtr);
- }
-
- //tablePtr.p->noOfPrimkey = tableDesc.NoOfKeyAttr;
- //tablePtr.p->noOfNullAttr = tableDesc.NoOfNullable;
- //tablePtr.p->tupKeyLength = tableDesc.KeyLength;
- tablePtr.p->noOfAttributes = tableDesc.NoOfAttributes;
- tablePtr.p->storedTable = tableDesc.TableLoggedFlag;
- tablePtr.p->minLoadFactor = tableDesc.MinLoadFactor;
- tablePtr.p->maxLoadFactor = tableDesc.MaxLoadFactor;
- tablePtr.p->fragmentType = (DictTabInfo::FragmentType)tableDesc.FragmentType;
- tablePtr.p->tableType = (DictTabInfo::TableType)tableDesc.TableType;
- tablePtr.p->kValue = tableDesc.TableKValue;
- tablePtr.p->fragmentCount = tableDesc.FragmentCount;
-
- tablePtr.p->frmLen = tableDesc.FrmLen;
- memcpy(tablePtr.p->frmData, tableDesc.FrmData, tableDesc.FrmLen);
-
- if(tableDesc.PrimaryTableId != RNIL) {
-
- tablePtr.p->primaryTableId = tableDesc.PrimaryTableId;
- tablePtr.p->indexState = (TableRecord::IndexState)tableDesc.IndexState;
- tablePtr.p->insertTriggerId = tableDesc.InsertTriggerId;
- tablePtr.p->updateTriggerId = tableDesc.UpdateTriggerId;
- tablePtr.p->deleteTriggerId = tableDesc.DeleteTriggerId;
- tablePtr.p->customTriggerId = tableDesc.CustomTriggerId;
- } else {
- tablePtr.p->primaryTableId = RNIL;
- tablePtr.p->indexState = TableRecord::IS_UNDEFINED;
- tablePtr.p->insertTriggerId = RNIL;
- tablePtr.p->updateTriggerId = RNIL;
- tablePtr.p->deleteTriggerId = RNIL;
- tablePtr.p->customTriggerId = RNIL;
- }
- tablePtr.p->buildTriggerId = RNIL;
- tablePtr.p->indexLocal = 0;
-
- handleTabInfo(it, parseP);
-
- if(parseP->errorCode != 0)
- {
- /**
- * Release table
- */
- releaseTableObject(tablePtr.i, checkExist);
- }
-}//handleTabInfoInit()
-
-void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
- ParseDictTabInfoRecord * parseP)
-{
- TableRecordPtr tablePtr = parseP->tablePtr;
-
- SimpleProperties::UnpackStatus status;
-
- Uint32 keyCount = 0;
- Uint32 keyLength = 0;
- Uint32 attrCount = tablePtr.p->noOfAttributes;
- Uint32 nullCount = 0;
- Uint32 nullBits = 0;
- Uint32 noOfCharsets = 0;
- Uint16 charsets[128];
- Uint32 recordLength = 0;
- AttributeRecordPtr attrPtr;
- c_attributeRecordHash.removeAll();
-
- for(Uint32 i = 0; i<attrCount; i++){
- /**
- * Attribute Name
- */
- DictTabInfo::Attribute attrDesc; attrDesc.init();
- status = SimpleProperties::unpack(it, &attrDesc,
- DictTabInfo::AttributeMapping,
- DictTabInfo::AttributeMappingSize,
- true, true);
- if(status != SimpleProperties::Break){
- parseP->errorCode = CreateTableRef::InvalidFormat;
- parseP->status = status;
- parseP->errorKey = it.getKey();
- parseP->errorLine = __LINE__;
- return;
- }
-
- /**
- * Check that attribute is not defined twice
- */
- AttributeRecord tmpAttr;
- {
- strcpy(tmpAttr.attributeName, attrDesc.AttributeName);
-
- AttributeRecordPtr attrPtr;
- c_attributeRecordHash.find(attrPtr, tmpAttr);
-
- if(attrPtr.i != RNIL){
- parseP->errorCode = CreateTableRef::AttributeNameTwice;
- return;
- }
- }
-
- if(!getNewAttributeRecord(tablePtr, attrPtr)){
- jam();
- parseP->errorCode = CreateTableRef::NoMoreAttributeRecords;
- return;
- }
-
- /**
- * TmpAttrib to Attribute mapping
- */
- strcpy(attrPtr.p->attributeName, attrDesc.AttributeName);
- attrPtr.p->attributeId = attrDesc.AttributeId;
- attrPtr.p->tupleKey = (keyCount + 1) * attrDesc.AttributeKeyFlag;
-
- attrPtr.p->extPrecision = attrDesc.AttributeExtPrecision;
- attrPtr.p->extScale = attrDesc.AttributeExtScale;
- attrPtr.p->extLength = attrDesc.AttributeExtLength;
- // charset in upper half of precision
- unsigned csNumber = (attrPtr.p->extPrecision >> 16);
- if (csNumber != 0) {
- /*
- * A new charset is first accessed here on this node.
- * TODO use separate thread (e.g. via NDBFS) if need to load from file
- */
- CHARSET_INFO* cs = get_charset(csNumber, MYF(0));
- if (cs == NULL) {
- parseP->errorCode = CreateTableRef::InvalidCharset;
- parseP->errorLine = __LINE__;
- return;
- }
- // XXX should be done somewhere in mysql
- all_charsets[cs->number] = cs;
- unsigned i = 0;
- while (i < noOfCharsets) {
- if (charsets[i] == csNumber)
- break;
- i++;
- }
- if (i == noOfCharsets) {
- noOfCharsets++;
- if (noOfCharsets > sizeof(charsets)/sizeof(charsets[0])) {
- parseP->errorCode = CreateTableRef::InvalidFormat;
- parseP->errorLine = __LINE__;
- return;
- }
- charsets[i] = csNumber;
- }
- }
-
- // compute attribute size and array size
- bool translateOk = attrDesc.translateExtType();
- tabRequire(translateOk, CreateTableRef::Inconsistency);
-
- if(attrDesc.AttributeArraySize > 65535){
- parseP->errorCode = CreateTableRef::ArraySizeTooBig;
- parseP->status = status;
- parseP->errorKey = it.getKey();
- parseP->errorLine = __LINE__;
- return;
- }
-
- Uint32 desc = 0;
- AttributeDescriptor::setType(desc, attrDesc.AttributeExtType);
- AttributeDescriptor::setSize(desc, attrDesc.AttributeSize);
- AttributeDescriptor::setArray(desc, attrDesc.AttributeArraySize);
- AttributeDescriptor::setNullable(desc, attrDesc.AttributeNullableFlag);
- AttributeDescriptor::setDKey(desc, attrDesc.AttributeDKey);
- AttributeDescriptor::setPrimaryKey(desc, attrDesc.AttributeKeyFlag);
- attrPtr.p->attributeDescriptor = desc;
- attrPtr.p->autoIncrement = attrDesc.AttributeAutoIncrement;
- strcpy(attrPtr.p->defaultValue, attrDesc.AttributeDefaultValue);
-
- tabRequire(attrDesc.AttributeId == i, CreateTableRef::InvalidFormat);
-
- attrCount ++;
- keyCount += attrDesc.AttributeKeyFlag;
- nullCount += attrDesc.AttributeNullableFlag;
-
- const Uint32 aSz = (1 << attrDesc.AttributeSize);
- Uint32 sz;
- if(aSz != 1)
- {
- sz = ((aSz * attrDesc.AttributeArraySize) + 31) >> 5;
- }
- else
- {
- sz = 0;
- nullBits += attrDesc.AttributeArraySize;
- }
-
- if(attrDesc.AttributeArraySize == 0)
- {
- parseP->errorCode = CreateTableRef::InvalidArraySize;
- parseP->status = status;
- parseP->errorKey = it.getKey();
- parseP->errorLine = __LINE__;
- return;
- }
-
- recordLength += sz;
- if(attrDesc.AttributeKeyFlag){
- keyLength += sz;
-
- if(attrDesc.AttributeNullableFlag){
- parseP->errorCode = CreateTableRef::NullablePrimaryKey;
- parseP->status = status;
- parseP->errorKey = it.getKey();
- parseP->errorLine = __LINE__;
- return;
- }
- }
-
- if (parseP->requestType != DictTabInfo::AlterTableFromAPI)
- c_attributeRecordHash.add(attrPtr);
-
- if(!it.next())
- break;
-
- if(it.getKey() != DictTabInfo::AttributeName)
- break;
- }//while
-
- tablePtr.p->noOfPrimkey = keyCount;
- tablePtr.p->noOfNullAttr = nullCount;
- tablePtr.p->noOfCharsets = noOfCharsets;
- tablePtr.p->tupKeyLength = keyLength;
- tablePtr.p->noOfNullBits = nullCount + nullBits;
-
- tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS,
- CreateTableRef::RecordTooBig);
- tabRequire(keyLength <= MAX_KEY_SIZE_IN_WORDS,
- CreateTableRef::InvalidPrimaryKeySize);
- tabRequire(keyLength > 0,
- CreateTableRef::InvalidPrimaryKeySize);
-
-}//handleTabInfo()
-
-
-/* ---------------------------------------------------------------- */
-// DICTTABCONF is sent when participants have received all DICTTABINFO
-// and successfully handled it.
-// Also sent to self (DICT master) when index table creation ready.
-/* ---------------------------------------------------------------- */
-void Dbdict::execCREATE_TABLE_CONF(Signal* signal)
-{
- jamEntry();
- ndbrequire(signal->getNoOfSections() == 0);
-
- CreateTableConf * const conf = (CreateTableConf *)signal->getDataPtr();
- // assume part of create index operation
- OpCreateIndexPtr opPtr;
- c_opCreateIndex.find(opPtr, conf->senderData);
- ndbrequire(! opPtr.isNull());
- opPtr.p->m_request.setIndexId(conf->tableId);
- opPtr.p->m_request.setIndexVersion(conf->tableVersion);
- createIndex_fromCreateTable(signal, opPtr);
-}//execCREATE_TABLE_CONF()
-
-void Dbdict::execCREATE_TABLE_REF(Signal* signal)
-{
- jamEntry();
-
- CreateTableRef * const ref = (CreateTableRef *)signal->getDataPtr();
- // assume part of create index operation
- OpCreateIndexPtr opPtr;
- c_opCreateIndex.find(opPtr, ref->senderData);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- createIndex_fromCreateTable(signal, opPtr);
-}//execCREATE_TABLE_REF()
-
-/* ---------------------------------------------------------------- */
-// New global checkpoint created.
-/* ---------------------------------------------------------------- */
-void Dbdict::execWAIT_GCP_CONF(Signal* signal)
-{
-#if 0
- TableRecordPtr tablePtr;
- jamEntry();
- WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0];
- c_tableRecordPool.getPtr(tablePtr, c_connRecord.connTableId);
- tablePtr.p->gciTableCreated = conf->gcp;
- sendUpdateSchemaState(signal,
- tablePtr.i,
- SchemaFile::TABLE_ADD_COMMITTED,
- c_connRecord.noOfPagesForTable,
- conf->gcp);
-#endif
-}//execWAIT_GCP_CONF()
-
-/* ---------------------------------------------------------------- */
-// Refused new global checkpoint.
-/* ---------------------------------------------------------------- */
-void Dbdict::execWAIT_GCP_REF(Signal* signal)
-{
- jamEntry();
- WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0];
-/* ---------------------------------------------------------------- */
-// Error Handling code needed
-/* ---------------------------------------------------------------- */
- progError(ref->errorCode, 0);
-}//execWAIT_GCP_REF()
-
-
-/* **************************************************************** */
-/* ---------------------------------------------------------------- */
-/* MODULE: DROP TABLE -------------------- */
-/* ---------------------------------------------------------------- */
-/* */
-/* This module contains the code used to drop a table. */
-/* ---------------------------------------------------------------- */
-/* **************************************************************** */
-void
-Dbdict::execDROP_TABLE_REQ(Signal* signal){
- jamEntry();
- DropTableReq* req = (DropTableReq*)signal->getDataPtr();
-
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, req->tableId, false);
- if(tablePtr.isNull()){
- jam();
- dropTableRef(signal, req, DropTableRef::NoSuchTable);
- return;
- }
-
- if(getOwnNodeId() != c_masterNodeId){
- jam();
- dropTableRef(signal, req, DropTableRef::NotMaster);
- return;
- }
-
- if(c_blockState != BS_IDLE){
- jam();
- dropTableRef(signal, req, DropTableRef::Busy);
- return;
- }
-
- const TableRecord::TabState tabState = tablePtr.p->tabState;
- bool ok = false;
- switch(tabState){
- case TableRecord::NOT_DEFINED:
- case TableRecord::REORG_TABLE_PREPARED:
- case TableRecord::DEFINING:
- case TableRecord::CHECKED:
- jam();
- dropTableRef(signal, req, DropTableRef::NoSuchTable);
- return;
- case TableRecord::DEFINED:
- ok = true;
- jam();
- break;
- case TableRecord::PREPARE_DROPPING:
- case TableRecord::DROPPING:
- jam();
- dropTableRef(signal, req, DropTableRef::DropInProgress);
- return;
- }
- ndbrequire(ok);
-
- if(tablePtr.p->tableVersion != req->tableVersion){
- jam();
- dropTableRef(signal, req, DropTableRef::InvalidTableVersion);
- return;
- }
-
- /**
- * Seems ok
- */
- DropTableRecordPtr dropTabPtr;
- c_opDropTable.seize(dropTabPtr);
-
- if(dropTabPtr.isNull()){
- jam();
- dropTableRef(signal, req, DropTableRef::NoDropTableRecordAvailable);
- return;
- }
-
- c_blockState = BS_BUSY;
-
- dropTabPtr.p->key = ++c_opRecordSequence;
- c_opDropTable.add(dropTabPtr);
-
- tablePtr.p->tabState = TableRecord::PREPARE_DROPPING;
-
- dropTabPtr.p->m_request = * req;
- dropTabPtr.p->m_errorCode = 0;
- dropTabPtr.p->m_requestType = DropTabReq::OnlineDropTab;
- dropTabPtr.p->m_coordinatorRef = reference();
- dropTabPtr.p->m_coordinatorData.m_gsn = GSN_PREP_DROP_TAB_REQ;
- dropTabPtr.p->m_coordinatorData.m_block = 0;
- prepDropTab_nextStep(signal, dropTabPtr);
-}
-
-void
-Dbdict::dropTableRef(Signal * signal,
- DropTableReq * req, DropTableRef::ErrorCode errCode){
-
- Uint32 tableId = req->tableId;
- Uint32 tabVersion = req->tableVersion;
- Uint32 senderData = req->senderData;
- Uint32 senderRef = req->senderRef;
-
- DropTableRef * ref = (DropTableRef*)signal->getDataPtrSend();
- ref->tableId = tableId;
- ref->tableVersion = tabVersion;
- ref->senderData = senderData;
- ref->senderRef = reference();
- ref->errorCode = errCode;
- ref->masterNodeId = c_masterNodeId;
- sendSignal(senderRef, GSN_DROP_TABLE_REF, signal,
- DropTableRef::SignalLength, JBB);
-}
-
-void
-Dbdict::prepDropTab_nextStep(Signal* signal, DropTableRecordPtr dropTabPtr){
-
- /**
- * No errors currently allowed
- */
- ndbrequire(dropTabPtr.p->m_errorCode == 0);
-
- Uint32 block = 0;
- switch(dropTabPtr.p->m_coordinatorData.m_block){
- case 0:
- jam();
- block = dropTabPtr.p->m_coordinatorData.m_block = DBDICT;
- break;
- case DBDICT:
- jam();
- block = dropTabPtr.p->m_coordinatorData.m_block = DBLQH;
- break;
- case DBLQH:
- jam();
- block = dropTabPtr.p->m_coordinatorData.m_block = DBTC;
- break;
- case DBTC:
- jam();
- block = dropTabPtr.p->m_coordinatorData.m_block = DBDIH;
- break;
- case DBDIH:
- jam();
- prepDropTab_complete(signal, dropTabPtr);
- return;
- default:
- ndbrequire(false);
- }
-
- PrepDropTabReq * prep = (PrepDropTabReq*)signal->getDataPtrSend();
- prep->senderRef = reference();
- prep->senderData = dropTabPtr.p->key;
- prep->tableId = dropTabPtr.p->m_request.tableId;
- prep->requestType = dropTabPtr.p->m_requestType;
-
- dropTabPtr.p->m_coordinatorData.m_signalCounter = c_aliveNodes;
- NodeReceiverGroup rg(block, c_aliveNodes);
- sendSignal(rg, GSN_PREP_DROP_TAB_REQ, signal,
- PrepDropTabReq::SignalLength, JBB);
-
-#if 0
- for (Uint32 i = 1; i < MAX_NDB_NODES; i++){
- if(c_aliveNodes.get(i)){
- jam();
- BlockReference ref = numberToRef(block, i);
-
- dropTabPtr.p->m_coordinatorData.m_signalCounter.setWaitingFor(i);
- }
- }
-#endif
-}
-
-void
-Dbdict::execPREP_DROP_TAB_CONF(Signal * signal){
- jamEntry();
-
- PrepDropTabConf * prep = (PrepDropTabConf*)signal->getDataPtr();
-
- DropTableRecordPtr dropTabPtr;
- ndbrequire(c_opDropTable.find(dropTabPtr, prep->senderData));
-
- ndbrequire(dropTabPtr.p->m_coordinatorRef == reference());
- ndbrequire(dropTabPtr.p->m_request.tableId == prep->tableId);
- ndbrequire(dropTabPtr.p->m_coordinatorData.m_gsn == GSN_PREP_DROP_TAB_REQ);
-
- Uint32 nodeId = refToNode(prep->senderRef);
- dropTabPtr.p->m_coordinatorData.m_signalCounter.clearWaitingFor(nodeId);
-
- if(!dropTabPtr.p->m_coordinatorData.m_signalCounter.done()){
- jam();
- return;
- }
- prepDropTab_nextStep(signal, dropTabPtr);
-}
-
-void
-Dbdict::execPREP_DROP_TAB_REF(Signal* signal){
- jamEntry();
-
- PrepDropTabRef * prep = (PrepDropTabRef*)signal->getDataPtr();
-
- DropTableRecordPtr dropTabPtr;
- ndbrequire(c_opDropTable.find(dropTabPtr, prep->senderData));
-
- ndbrequire(dropTabPtr.p->m_coordinatorRef == reference());
- ndbrequire(dropTabPtr.p->m_request.tableId == prep->tableId);
- ndbrequire(dropTabPtr.p->m_coordinatorData.m_gsn == GSN_PREP_DROP_TAB_REQ);
-
- Uint32 nodeId = refToNode(prep->senderRef);
- dropTabPtr.p->m_coordinatorData.m_signalCounter.clearWaitingFor(nodeId);
-
- Uint32 block = refToBlock(prep->senderRef);
- if((prep->errorCode == PrepDropTabRef::NoSuchTable && block == DBLQH) ||
- (prep->errorCode == PrepDropTabRef::NF_FakeErrorREF)){
- jam();
- /**
- * Ignore errors:
- * 1) no such table and LQH, it might not exists in different LQH's
- * 2) node failure...
- */
- } else {
- dropTabPtr.p->setErrorCode((Uint32)prep->errorCode);
- }
-
- if(!dropTabPtr.p->m_coordinatorData.m_signalCounter.done()){
- jam();
- return;
- }
- prepDropTab_nextStep(signal, dropTabPtr);
-}
-
-void
-Dbdict::prepDropTab_complete(Signal* signal, DropTableRecordPtr dropTabPtr){
- jam();
-
- dropTabPtr.p->m_coordinatorData.m_gsn = GSN_DROP_TAB_REQ;
- dropTabPtr.p->m_coordinatorData.m_block = DBDICT;
-
- DropTabReq * req = (DropTabReq*)signal->getDataPtrSend();
- req->senderRef = reference();
- req->senderData = dropTabPtr.p->key;
- req->tableId = dropTabPtr.p->m_request.tableId;
- req->requestType = dropTabPtr.p->m_requestType;
-
- dropTabPtr.p->m_coordinatorData.m_signalCounter = c_aliveNodes;
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- sendSignal(rg, GSN_DROP_TAB_REQ, signal,
- DropTabReq::SignalLength, JBB);
-}
-
-void
-Dbdict::execDROP_TAB_REF(Signal* signal){
- jamEntry();
-
- DropTabRef * const req = (DropTabRef*)signal->getDataPtr();
-
- Uint32 block = refToBlock(req->senderRef);
- ndbrequire(req->errorCode == DropTabRef::NF_FakeErrorREF ||
- (req->errorCode == DropTabRef::NoSuchTable &&
- (block == DBTUP || block == DBACC || block == DBLQH)));
-
- if(block != DBDICT){
- jam();
- ndbrequire(refToNode(req->senderRef) == getOwnNodeId());
- dropTab_localDROP_TAB_CONF(signal);
- return;
- }
- ndbrequire(false);
-}
-
-void
-Dbdict::execDROP_TAB_CONF(Signal* signal){
- jamEntry();
-
- DropTabConf * const req = (DropTabConf*)signal->getDataPtr();
-
- if(refToBlock(req->senderRef) != DBDICT){
- jam();
- ndbrequire(refToNode(req->senderRef) == getOwnNodeId());
- dropTab_localDROP_TAB_CONF(signal);
- return;
- }
-
- DropTableRecordPtr dropTabPtr;
- ndbrequire(c_opDropTable.find(dropTabPtr, req->senderData));
-
- ndbrequire(dropTabPtr.p->m_coordinatorRef == reference());
- ndbrequire(dropTabPtr.p->m_request.tableId == req->tableId);
- ndbrequire(dropTabPtr.p->m_coordinatorData.m_gsn == GSN_DROP_TAB_REQ);
-
- Uint32 nodeId = refToNode(req->senderRef);
- dropTabPtr.p->m_coordinatorData.m_signalCounter.clearWaitingFor(nodeId);
-
- if(!dropTabPtr.p->m_coordinatorData.m_signalCounter.done()){
- jam();
- return;
- }
-
- DropTableConf* conf = (DropTableConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = dropTabPtr.p->m_request.senderData;
- conf->tableId = dropTabPtr.p->m_request.tableId;
- conf->tableVersion = dropTabPtr.p->m_request.tableVersion;
-
- Uint32 ref = dropTabPtr.p->m_request.senderRef;
- sendSignal(ref, GSN_DROP_TABLE_CONF, signal,
- DropTableConf::SignalLength, JBB);
-
- c_opDropTable.release(dropTabPtr);
- c_blockState = BS_IDLE;
-}
-
-/**
- * DROP TABLE PARTICIPANT CODE
- */
-void
-Dbdict::execPREP_DROP_TAB_REQ(Signal* signal){
- jamEntry();
- PrepDropTabReq * prep = (PrepDropTabReq*)signal->getDataPtrSend();
-
- DropTableRecordPtr dropTabPtr;
- if(prep->senderRef == reference()){
- jam();
- ndbrequire(c_opDropTable.find(dropTabPtr, prep->senderData));
- ndbrequire(dropTabPtr.p->m_requestType == prep->requestType);
- } else {
- jam();
- c_opDropTable.seize(dropTabPtr);
- if(!dropTabPtr.isNull()){
- dropTabPtr.p->key = prep->senderData;
- c_opDropTable.add(dropTabPtr);
- }
- }
-
- ndbrequire(!dropTabPtr.isNull());
-
- dropTabPtr.p->m_errorCode = 0;
- dropTabPtr.p->m_request.tableId = prep->tableId;
- dropTabPtr.p->m_requestType = prep->requestType;
- dropTabPtr.p->m_coordinatorRef = prep->senderRef;
- dropTabPtr.p->m_participantData.m_gsn = GSN_PREP_DROP_TAB_REQ;
-
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, prep->tableId);
- tablePtr.p->tabState = TableRecord::PREPARE_DROPPING;
-
- /**
- * Modify schema
- */
- XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
- SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tablePtr.i);
- SchemaFile::TableState tabState =
- (SchemaFile::TableState)tableEntry->m_tableState;
- ndbrequire(tabState == SchemaFile::TABLE_ADD_COMMITTED ||
- tabState == SchemaFile::ALTER_TABLE_COMMITTED);
- tableEntry->m_tableState = SchemaFile::DROP_TABLE_STARTED;
- computeChecksum(xsf, tablePtr.i / NDB_SF_PAGE_ENTRIES);
-
- ndbrequire(c_writeSchemaRecord.inUse == false);
- c_writeSchemaRecord.inUse = true;
-
- c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
- c_writeSchemaRecord.newFile = false;
- c_writeSchemaRecord.firstPage = tablePtr.i / NDB_SF_PAGE_ENTRIES;
- c_writeSchemaRecord.noOfPages = 1;
- c_writeSchemaRecord.m_callback.m_callbackData = dropTabPtr.p->key;
- c_writeSchemaRecord.m_callback.m_callbackFunction =
- safe_cast(&Dbdict::prepDropTab_writeSchemaConf);
- startWriteSchemaFile(signal);
-}
-
-void
-Dbdict::prepDropTab_writeSchemaConf(Signal* signal,
- Uint32 dropTabPtrI,
- Uint32 returnCode){
- jam();
-
- DropTableRecordPtr dropTabPtr;
- ndbrequire(c_opDropTable.find(dropTabPtr, dropTabPtrI));
-
- ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_PREP_DROP_TAB_REQ);
-
- /**
- * There probably should be node fail handlign here
- *
- * To check that coordinator hasn't died
- */
-
- PrepDropTabConf * prep = (PrepDropTabConf*)signal->getDataPtr();
- prep->senderRef = reference();
- prep->senderData = dropTabPtrI;
- prep->tableId = dropTabPtr.p->m_request.tableId;
-
- dropTabPtr.p->m_participantData.m_gsn = GSN_PREP_DROP_TAB_CONF;
- sendSignal(dropTabPtr.p->m_coordinatorRef, GSN_PREP_DROP_TAB_CONF, signal,
- PrepDropTabConf::SignalLength, JBB);
-}
-
-void
-Dbdict::execDROP_TAB_REQ(Signal* signal){
- jamEntry();
- DropTabReq * req = (DropTabReq*)signal->getDataPtrSend();
-
- DropTableRecordPtr dropTabPtr;
- ndbrequire(c_opDropTable.find(dropTabPtr, req->senderData));
-
- ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_PREP_DROP_TAB_CONF);
- dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_REQ;
-
- ndbrequire(dropTabPtr.p->m_requestType == req->requestType);
-
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, dropTabPtr.p->m_request.tableId);
- tablePtr.p->tabState = TableRecord::DROPPING;
-
- dropTabPtr.p->m_participantData.m_block = 0;
- dropTabPtr.p->m_participantData.m_callback.m_callbackData = dropTabPtr.p->key;
- dropTabPtr.p->m_participantData.m_callback.m_callbackFunction =
- safe_cast(&Dbdict::dropTab_complete);
- dropTab_nextStep(signal, dropTabPtr);
-}
-
-#include <DebuggerNames.hpp>
-
-void
-Dbdict::dropTab_nextStep(Signal* signal, DropTableRecordPtr dropTabPtr){
-
- /**
- * No errors currently allowed
- */
- ndbrequire(dropTabPtr.p->m_errorCode == 0);
-
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, dropTabPtr.p->m_request.tableId);
-
- Uint32 block = 0;
- switch(dropTabPtr.p->m_participantData.m_block){
- case 0:
- jam();
- block = DBTC;
- break;
- case DBTC:
- jam();
- if (tablePtr.p->isTable() || tablePtr.p->isHashIndex())
- block = DBACC;
- if (tablePtr.p->isOrderedIndex())
- block = DBTUP;
- break;
- case DBACC:
- jam();
- block = DBTUP;
- break;
- case DBTUP:
- jam();
- if (tablePtr.p->isTable() || tablePtr.p->isHashIndex())
- block = DBLQH;
- if (tablePtr.p->isOrderedIndex())
- block = DBTUX;
- break;
- case DBTUX:
- jam();
- block = DBLQH;
- break;
- case DBLQH:
- jam();
- block = DBDIH;
- break;
- case DBDIH:
- jam();
- execute(signal, dropTabPtr.p->m_participantData.m_callback, 0);
- return;
- }
- ndbrequire(block != 0);
- dropTabPtr.p->m_participantData.m_block = block;
-
- DropTabReq * req = (DropTabReq*)signal->getDataPtrSend();
- req->senderRef = reference();
- req->senderData = dropTabPtr.p->key;
- req->tableId = dropTabPtr.p->m_request.tableId;
- req->requestType = dropTabPtr.p->m_requestType;
-
- const Uint32 nodeId = getOwnNodeId();
- dropTabPtr.p->m_participantData.m_signalCounter.clearWaitingFor();
- dropTabPtr.p->m_participantData.m_signalCounter.setWaitingFor(nodeId);
- BlockReference ref = numberToRef(block, 0);
- sendSignal(ref, GSN_DROP_TAB_REQ, signal, DropTabReq::SignalLength, JBB);
-}
-
-void
-Dbdict::dropTab_localDROP_TAB_CONF(Signal* signal){
- jamEntry();
-
- DropTabConf * conf = (DropTabConf*)signal->getDataPtr();
-
- DropTableRecordPtr dropTabPtr;
- ndbrequire(c_opDropTable.find(dropTabPtr, conf->senderData));
-
- ndbrequire(dropTabPtr.p->m_request.tableId == conf->tableId);
- ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_DROP_TAB_REQ);
-
- Uint32 nodeId = refToNode(conf->senderRef);
- dropTabPtr.p->m_participantData.m_signalCounter.clearWaitingFor(nodeId);
-
- if(!dropTabPtr.p->m_participantData.m_signalCounter.done()){
- jam();
- ndbrequire(false);
- return;
- }
- dropTab_nextStep(signal, dropTabPtr);
-}
-
-void
-Dbdict::dropTab_complete(Signal* signal,
- Uint32 dropTabPtrI,
- Uint32 returnCode){
- jam();
-
- DropTableRecordPtr dropTabPtr;
- ndbrequire(c_opDropTable.find(dropTabPtr, dropTabPtrI));
-
- Uint32 tableId = dropTabPtr.p->m_request.tableId;
-
- /**
- * Write to schema file
- */
- XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
- SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tableId);
- SchemaFile::TableState tabState =
- (SchemaFile::TableState)tableEntry->m_tableState;
- ndbrequire(tabState == SchemaFile::DROP_TABLE_STARTED);
- tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED;
- computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES);
-
- ndbrequire(c_writeSchemaRecord.inUse == false);
- c_writeSchemaRecord.inUse = true;
-
- c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
- c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES;
- c_writeSchemaRecord.noOfPages = 1;
- c_writeSchemaRecord.m_callback.m_callbackData = dropTabPtr.p->key;
- c_writeSchemaRecord.m_callback.m_callbackFunction =
- safe_cast(&Dbdict::dropTab_writeSchemaConf);
- startWriteSchemaFile(signal);
-}
-
-void
-Dbdict::dropTab_writeSchemaConf(Signal* signal,
- Uint32 dropTabPtrI,
- Uint32 returnCode){
- jam();
-
- DropTableRecordPtr dropTabPtr;
- ndbrequire(c_opDropTable.find(dropTabPtr, dropTabPtrI));
-
- ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_DROP_TAB_REQ);
-
- dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_CONF;
-
- releaseTableObject(dropTabPtr.p->m_request.tableId);
-
- DropTabConf * conf = (DropTabConf*)signal->getDataPtr();
- conf->senderRef = reference();
- conf->senderData = dropTabPtrI;
- conf->tableId = dropTabPtr.p->m_request.tableId;
-
- dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_CONF;
- sendSignal(dropTabPtr.p->m_coordinatorRef, GSN_DROP_TAB_CONF, signal,
- DropTabConf::SignalLength, JBB);
-
- if(dropTabPtr.p->m_coordinatorRef != reference()){
- c_opDropTable.release(dropTabPtr);
- }
-}
-
-void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash)
-{
- TableRecordPtr tablePtr;
- AttributeRecordPtr attrPtr;
- c_tableRecordPool.getPtr(tablePtr, tableId);
- if (removeFromHash)
- {
-#ifdef VM_TRACE
- TableRecordPtr tmp;
- ndbrequire(c_tableRecordHash.find(tmp, * tablePtr.p));
-#endif
- c_tableRecordHash.remove(tablePtr);
- }
- tablePtr.p->tabState = TableRecord::NOT_DEFINED;
-
- Uint32 nextAttrRecord = tablePtr.p->firstAttribute;
- while (nextAttrRecord != RNIL) {
- jam();
-/* ---------------------------------------------------------------- */
-// Release all attribute records
-/* ---------------------------------------------------------------- */
- c_attributeRecordPool.getPtr(attrPtr, nextAttrRecord);
- nextAttrRecord = attrPtr.p->nextAttrInTable;
- c_attributeRecordPool.release(attrPtr);
- }//if
-#ifdef HAVE_TABLE_REORG
- Uint32 secondTableId = tablePtr.p->secondTable;
- initialiseTableRecord(tablePtr);
- c_tableRecordPool.getPtr(tablePtr, secondTableId);
- initialiseTableRecord(tablePtr);
-#endif
- return;
-}//releaseTableObject()
-
-/**
- * DICT receives these on index create and drop.
- */
-void Dbdict::execDROP_TABLE_CONF(Signal* signal)
-{
- jamEntry();
- ndbrequire(signal->getNoOfSections() == 0);
-
- DropTableConf * const conf = (DropTableConf *)signal->getDataPtr();
- // assume part of drop index operation
- OpDropIndexPtr opPtr;
- c_opDropIndex.find(opPtr, conf->senderData);
- ndbrequire(! opPtr.isNull());
- ndbrequire(opPtr.p->m_request.getIndexId() == conf->tableId);
- ndbrequire(opPtr.p->m_request.getIndexVersion() == conf->tableVersion);
- dropIndex_fromDropTable(signal, opPtr);
-}
-
-void Dbdict::execDROP_TABLE_REF(Signal* signal)
-{
- jamEntry();
-
- DropTableRef * const ref = (DropTableRef *)signal->getDataPtr();
- // assume part of drop index operation
- OpDropIndexPtr opPtr;
- c_opDropIndex.find(opPtr, ref->senderData);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- opPtr.p->m_errorLine = __LINE__;
- dropIndex_fromDropTable(signal, opPtr);
-}
-
-/* **************************************************************** */
-/* ---------------------------------------------------------------- */
-/* MODULE: EXTERNAL INTERFACE TO DATA -------------------- */
-/* ---------------------------------------------------------------- */
-/* */
-/* This module contains the code that is used by other modules to. */
-/* access the data within DBDICT. */
-/* ---------------------------------------------------------------- */
-/* **************************************************************** */
-
-void Dbdict::execGET_TABLEDID_REQ(Signal * signal)
-{
- jamEntry();
- ndbrequire(signal->getNoOfSections() == 1);
- GetTableIdReq const * req = (GetTableIdReq *)signal->getDataPtr();
- Uint32 senderData = req->senderData;
- Uint32 senderRef = req->senderRef;
- Uint32 len = req->len;
-
- if(len>MAX_TAB_NAME_SIZE)
- {
- jam();
- sendGET_TABLEID_REF((Signal*)signal,
- (GetTableIdReq *)req,
- GetTableIdRef::TableNameTooLong);
- return;
- }
-
- char tableName[MAX_TAB_NAME_SIZE];
- TableRecord keyRecord;
- SegmentedSectionPtr ssPtr;
- signal->getSection(ssPtr,GetTableIdReq::TABLE_NAME);
- copy((Uint32*)tableName, ssPtr);
- strcpy(keyRecord.tableName, tableName);
- releaseSections(signal);
-
- if(len > sizeof(keyRecord.tableName)){
- jam();
- sendGET_TABLEID_REF((Signal*)signal,
- (GetTableIdReq *)req,
- GetTableIdRef::TableNameTooLong);
- return;
- }
-
- TableRecordPtr tablePtr;
- if(!c_tableRecordHash.find(tablePtr, keyRecord)) {
- jam();
- sendGET_TABLEID_REF((Signal*)signal,
- (GetTableIdReq *)req,
- GetTableIdRef::TableNotDefined);
- return;
- }
- GetTableIdConf * conf = (GetTableIdConf *)req;
- conf->tableId = tablePtr.p->tableId;
- conf->schemaVersion = tablePtr.p->tableVersion;
- conf->senderData = senderData;
- sendSignal(senderRef, GSN_GET_TABLEID_CONF, signal,
- GetTableIdConf::SignalLength, JBB);
-
-}
-
-
-void Dbdict::sendGET_TABLEID_REF(Signal* signal,
- GetTableIdReq * req,
- GetTableIdRef::ErrorCode errorCode)
-{
- GetTableIdRef * const ref = (GetTableIdRef *)req;
- /**
- * The format of GetTabInfo Req/Ref is the same
- */
- BlockReference retRef = req->senderRef;
- ref->err = errorCode;
- sendSignal(retRef, GSN_GET_TABLEID_REF, signal,
- GetTableIdRef::SignalLength, JBB);
-}//sendGET_TABINFOREF()
-
-/* ---------------------------------------------------------------- */
-// Get a full table description.
-/* ---------------------------------------------------------------- */
-void Dbdict::execGET_TABINFOREQ(Signal* signal)
-{
- jamEntry();
- if(!assembleFragments(signal))
- {
- return;
- }
-
- GetTabInfoReq * const req = (GetTabInfoReq *)&signal->theData[0];
-
- /**
- * If I get a GET_TABINFO_REQ from myself
- * it's is a one from the time queue
- */
- bool fromTimeQueue = (signal->senderBlockRef() == reference());
-
- if (c_retrieveRecord.busyState && fromTimeQueue == true) {
- jam();
-
- sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30,
- signal->length());
- return;
- }//if
-
- const Uint32 MAX_WAITERS = 5;
-
- if(c_retrieveRecord.busyState && fromTimeQueue == false){
- jam();
- if(c_retrieveRecord.noOfWaiters < MAX_WAITERS){
- jam();
- c_retrieveRecord.noOfWaiters++;
-
- sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30,
- signal->length());
- return;
- }
-
- sendGET_TABINFOREF(signal, req, GetTabInfoRef::Busy);
- return;
- }
-
- if(fromTimeQueue){
- jam();
- c_retrieveRecord.noOfWaiters--;
- }
-
- const bool useLongSig = (req->requestType & GetTabInfoReq::LongSignalConf);
- const Uint32 reqType = req->requestType & (~GetTabInfoReq::LongSignalConf);
-
- TableRecordPtr tablePtr;
- if(reqType == GetTabInfoReq::RequestByName){
- jam();
- ndbrequire(signal->getNoOfSections() == 1);
- const Uint32 len = req->tableNameLen;
-
- TableRecord keyRecord;
- if(len > sizeof(keyRecord.tableName)){
- jam();
- releaseSections(signal);
- sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNameTooLong);
- return;
- }
-
- char tableName[MAX_TAB_NAME_SIZE];
- SegmentedSectionPtr ssPtr;
- signal->getSection(ssPtr,GetTabInfoReq::TABLE_NAME);
- SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
- r0.reset(); // undo implicit first()
- if(r0.getWords((Uint32*)tableName, ((len + 3)/4)))
- memcpy(keyRecord.tableName, tableName, len);
- else {
- jam();
- releaseSections(signal);
- sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined);
- return;
- }
- releaseSections(signal);
- // memcpy(keyRecord.tableName, req->tableName, len);
- //ntohS(&keyRecord.tableName[0], len);
-
- c_tableRecordHash.find(tablePtr, keyRecord);
- } else {
- jam();
- c_tableRecordPool.getPtr(tablePtr, req->tableId, false);
- }
-
- // The table seached for was not found
- if(tablePtr.i == RNIL){
- jam();
- sendGET_TABINFOREF(signal, req, GetTabInfoRef::InvalidTableId);
- return;
- }//if
-
- if (tablePtr.p->tabState != TableRecord::DEFINED) {
- jam();
- sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined);
- return;
- }//if
-
- c_retrieveRecord.busyState = true;
- c_retrieveRecord.blockRef = req->senderRef;
- c_retrieveRecord.m_senderData = req->senderData;
- c_retrieveRecord.tableId = tablePtr.i;
- c_retrieveRecord.currentSent = 0;
- c_retrieveRecord.m_useLongSig = useLongSig;
-
- c_packTable.m_state = PackTable::PTS_GET_TAB;
-
- signal->theData[0] = ZPACK_TABLE_INTO_PAGES;
- signal->theData[1] = tablePtr.i;
- signal->theData[2] = c_retrieveRecord.retrievePage;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
-}//execGET_TABINFOREQ()
-
-void Dbdict::sendGetTabResponse(Signal* signal)
-{
- PageRecordPtr pagePtr;
- DictTabInfo * const conf = (DictTabInfo *)&signal->theData[0];
- conf->senderRef = reference();
- conf->senderData = c_retrieveRecord.m_senderData;
- conf->requestType = DictTabInfo::GetTabInfoConf;
- conf->totalLen = c_retrieveRecord.retrievedNoOfWords;
-
- c_pageRecordArray.getPtr(pagePtr, c_retrieveRecord.retrievePage);
- Uint32* pagePointer = (Uint32*)&pagePtr.p->word[0] + ZPAGE_HEADER_SIZE;
-
- if(c_retrieveRecord.m_useLongSig){
- jam();
- GetTabInfoConf* conf = (GetTabInfoConf*)signal->getDataPtr();
- conf->gci = 0;
- conf->tableId = c_retrieveRecord.tableId;
- conf->senderData = c_retrieveRecord.m_senderData;
- conf->totalLen = c_retrieveRecord.retrievedNoOfWords;
-
- Callback c = { safe_cast(&Dbdict::initRetrieveRecord), 0 };
- LinearSectionPtr ptr[3];
- ptr[0].p = pagePointer;
- ptr[0].sz = c_retrieveRecord.retrievedNoOfWords;
- sendFragmentedSignal(c_retrieveRecord.blockRef,
- GSN_GET_TABINFO_CONF,
- signal,
- GetTabInfoConf::SignalLength,
- JBB,
- ptr,
- 1,
- c);
- return;
- }
-
- ndbrequire(false);
-}//sendGetTabResponse()
-
-void Dbdict::sendGET_TABINFOREF(Signal* signal,
- GetTabInfoReq * req,
- GetTabInfoRef::ErrorCode errorCode)
-{
- jamEntry();
- GetTabInfoRef * const ref = (GetTabInfoRef *)&signal->theData[0];
- /**
- * The format of GetTabInfo Req/Ref is the same
- */
- BlockReference retRef = req->senderRef;
- ref->errorCode = errorCode;
-
- sendSignal(retRef, GSN_GET_TABINFOREF, signal, signal->length(), JBB);
-}//sendGET_TABINFOREF()
-
-Uint32 convertEndian(Uint32 in) {
-#ifdef WORDS_BIGENDIAN
- Uint32 ut = 0;
- ut += ((in >> 24) & 255);
- ut += (((in >> 16) & 255) << 8);
- ut += (((in >> 8) & 255) << 16);
- ut += ((in & 255) << 24);
- return ut;
-#else
- return in;
-#endif
-}
-void
-Dbdict::execLIST_TABLES_REQ(Signal* signal)
-{
- jamEntry();
- Uint32 i;
- ListTablesReq * req = (ListTablesReq*)signal->getDataPtr();
- Uint32 senderRef = req->senderRef;
- Uint32 senderData = req->senderData;
- // save req flags
- const Uint32 reqTableId = req->getTableId();
- const Uint32 reqTableType = req->getTableType();
- const bool reqListNames = req->getListNames();
- const bool reqListIndexes = req->getListIndexes();
- // init the confs
- ListTablesConf * conf = (ListTablesConf *)signal->getDataPtrSend();
- conf->senderData = senderData;
- conf->counter = 0;
- Uint32 pos = 0;
- for (i = 0; i < c_tableRecordPool.getSize(); i++) {
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, i);
- // filter
- if (tablePtr.p->tabState == TableRecord::NOT_DEFINED ||
- tablePtr.p->tabState == TableRecord::REORG_TABLE_PREPARED)
- continue;
-
-
- if ((reqTableType != (Uint32)0) && (reqTableType != (unsigned)tablePtr.p->tableType))
- continue;
- if (reqListIndexes && reqTableId != tablePtr.p->primaryTableId)
- continue;
- conf->tableData[pos] = 0;
- // id
- conf->setTableId(pos, tablePtr.i);
- // type
- conf->setTableType(pos, tablePtr.p->tableType);
- // state
- if (tablePtr.p->isTable()) {
- switch (tablePtr.p->tabState) {
- case TableRecord::DEFINING:
- case TableRecord::CHECKED:
- conf->setTableState(pos, DictTabInfo::StateBuilding);
- break;
- case TableRecord::PREPARE_DROPPING:
- case TableRecord::DROPPING:
- conf->setTableState(pos, DictTabInfo::StateDropping);
- break;
- case TableRecord::DEFINED:
- conf->setTableState(pos, DictTabInfo::StateOnline);
- break;
- default:
- conf->setTableState(pos, DictTabInfo::StateBroken);
- break;
- }
- }
- if (tablePtr.p->isIndex()) {
- switch (tablePtr.p->indexState) {
- case TableRecord::IS_OFFLINE:
- conf->setTableState(pos, DictTabInfo::StateOffline);
- break;
- case TableRecord::IS_BUILDING:
- conf->setTableState(pos, DictTabInfo::StateBuilding);
- break;
- case TableRecord::IS_DROPPING:
- conf->setTableState(pos, DictTabInfo::StateDropping);
- break;
- case TableRecord::IS_ONLINE:
- conf->setTableState(pos, DictTabInfo::StateOnline);
- break;
- default:
- conf->setTableState(pos, DictTabInfo::StateBroken);
- break;
- }
- }
- // store
- if (! tablePtr.p->storedTable) {
- conf->setTableStore(pos, DictTabInfo::StoreTemporary);
- } else {
- conf->setTableStore(pos, DictTabInfo::StorePermanent);
- }
- pos++;
- if (pos >= ListTablesConf::DataLength) {
- sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
- ListTablesConf::SignalLength, JBB);
- conf->counter++;
- pos = 0;
- }
- if (! reqListNames)
- continue;
- const Uint32 size = strlen(tablePtr.p->tableName) + 1;
- conf->tableData[pos] = size;
- pos++;
- if (pos >= ListTablesConf::DataLength) {
- sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
- ListTablesConf::SignalLength, JBB);
- conf->counter++;
- pos = 0;
- }
- Uint32 k = 0;
- while (k < size) {
- char* p = (char*)&conf->tableData[pos];
- for (Uint32 j = 0; j < 4; j++) {
- if (k < size)
- *p++ = tablePtr.p->tableName[k++];
- else
- *p++ = 0;
- }
- pos++;
- if (pos >= ListTablesConf::DataLength) {
- sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
- ListTablesConf::SignalLength, JBB);
- conf->counter++;
- pos = 0;
- }
- }
- }
- // XXX merge with above somehow
- for (i = 0; i < c_triggerRecordPool.getSize(); i++) {
- if (reqListIndexes)
- break;
- TriggerRecordPtr triggerPtr;
- c_triggerRecordPool.getPtr(triggerPtr, i);
- if (triggerPtr.p->triggerState == TriggerRecord::TS_NOT_DEFINED)
- continue;
- // constant 10 hardcoded
- Uint32 type = 10 + triggerPtr.p->triggerType;
- if (reqTableType != 0 && reqTableType != type)
- continue;
- conf->tableData[pos] = 0;
- conf->setTableId(pos, triggerPtr.i);
- conf->setTableType(pos, type);
- switch (triggerPtr.p->triggerState) {
- case TriggerRecord::TS_OFFLINE:
- conf->setTableState(pos, DictTabInfo::StateOffline);
- break;
- case TriggerRecord::TS_ONLINE:
- conf->setTableState(pos, DictTabInfo::StateOnline);
- break;
- default:
- conf->setTableState(pos, DictTabInfo::StateBroken);
- break;
- }
- conf->setTableStore(pos, DictTabInfo::StoreTemporary);
- pos++;
- if (pos >= ListTablesConf::DataLength) {
- sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
- ListTablesConf::SignalLength, JBB);
- conf->counter++;
- pos = 0;
- }
- if (! reqListNames)
- continue;
- const Uint32 size = strlen(triggerPtr.p->triggerName) + 1;
- conf->tableData[pos] = size;
- pos++;
- if (pos >= ListTablesConf::DataLength) {
- sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
- ListTablesConf::SignalLength, JBB);
- conf->counter++;
- pos = 0;
- }
- Uint32 k = 0;
- while (k < size) {
- char* p = (char*)&conf->tableData[pos];
- for (Uint32 j = 0; j < 4; j++) {
- if (k < size)
- *p++ = triggerPtr.p->triggerName[k++];
- else
- *p++ = 0;
- }
- pos++;
- if (pos >= ListTablesConf::DataLength) {
- sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
- ListTablesConf::SignalLength, JBB);
- conf->counter++;
- pos = 0;
- }
- }
- }
- // last signal must have less than max length
- sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
- ListTablesConf::HeaderLength + pos, JBB);
-}
-
-/**
- * MODULE: Create index
- *
- * Create index in DICT via create table operation. Then invoke alter
- * index opearation to online the index.
- *
- * Request type in CREATE_INDX signals:
- *
- * RT_USER - from API to DICT master
- * RT_DICT_PREPARE - prepare participants
- * RT_DICT_COMMIT - commit participants
- * RT_TC - create index in TC (part of alter index operation)
- */
-
-void
-Dbdict::execCREATE_INDX_REQ(Signal* signal)
-{
- jamEntry();
- CreateIndxReq* const req = (CreateIndxReq*)signal->getDataPtrSend();
- OpCreateIndexPtr opPtr;
- const Uint32 senderRef = signal->senderBlockRef();
- const CreateIndxReq::RequestType requestType = req->getRequestType();
- if (requestType == CreateIndxReq::RT_USER) {
- jam();
- if (! assembleFragments(signal)) {
- jam();
- return;
- }
- if (signal->getLength() == CreateIndxReq::SignalLength) {
- jam();
- if (getOwnNodeId() != c_masterNodeId) {
- jam();
-
- releaseSections(signal);
- OpCreateIndex opBusy;
- opPtr.p = &opBusy;
- opPtr.p->save(req);
- opPtr.p->m_isMaster = (senderRef == reference());
- opPtr.p->key = 0;
- opPtr.p->m_requestType = CreateIndxReq::RT_DICT_PREPARE;
- opPtr.p->m_errorCode = CreateIndxRef::NotMaster;
- opPtr.p->m_errorLine = __LINE__;
- opPtr.p->m_errorNode = c_masterNodeId;
- createIndex_sendReply(signal, opPtr, true);
- return;
- }
-
- // forward initial request plus operation key to all
- req->setOpKey(++c_opRecordSequence);
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- sendSignal(rg, GSN_CREATE_INDX_REQ,
- signal, CreateIndxReq::SignalLength + 1, JBB);
- return;
- }
- // seize operation record
- ndbrequire(signal->getLength() == CreateIndxReq::SignalLength + 1);
- const Uint32 opKey = req->getOpKey();
- OpCreateIndex opBusy;
- if (! c_opCreateIndex.seize(opPtr))
- opPtr.p = &opBusy;
- opPtr.p->save(req);
- opPtr.p->m_coordinatorRef = senderRef;
- opPtr.p->m_isMaster = (senderRef == reference());
- opPtr.p->key = opKey;
- opPtr.p->m_requestType = CreateIndxReq::RT_DICT_PREPARE;
- if (opPtr.p == &opBusy) {
- jam();
- opPtr.p->m_errorCode = CreateIndxRef::Busy;
- opPtr.p->m_errorLine = __LINE__;
- releaseSections(signal);
- createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
- return;
- }
- c_opCreateIndex.add(opPtr);
- // save attribute list
- SegmentedSectionPtr ssPtr;
- signal->getSection(ssPtr, CreateIndxReq::ATTRIBUTE_LIST_SECTION);
- SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
- r0.reset(); // undo implicit first()
- if (! r0.getWord(&opPtr.p->m_attrList.sz) ||
- ! r0.getWords(opPtr.p->m_attrList.id, opPtr.p->m_attrList.sz)) {
- jam();
- opPtr.p->m_errorCode = CreateIndxRef::InvalidName;
- opPtr.p->m_errorLine = __LINE__;
- releaseSections(signal);
- createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
- return;
- }
- // save name and index table properties
- signal->getSection(ssPtr, CreateIndxReq::INDEX_NAME_SECTION);
- SimplePropertiesSectionReader r1(ssPtr, getSectionSegmentPool());
- DictTabInfo::Table tableDesc;
- tableDesc.init();
- SimpleProperties::UnpackStatus status = SimpleProperties::unpack(
- r1, &tableDesc,
- DictTabInfo::TableMapping, DictTabInfo::TableMappingSize,
- true, true);
- if (status != SimpleProperties::Eof) {
- opPtr.p->m_errorCode = CreateIndxRef::InvalidName;
- opPtr.p->m_errorLine = __LINE__;
- releaseSections(signal);
- createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
- return;
- }
- memcpy(opPtr.p->m_indexName, tableDesc.TableName, MAX_TAB_NAME_SIZE);
- opPtr.p->m_storedIndex = tableDesc.TableLoggedFlag;
- releaseSections(signal);
- // master expects to hear from all
- if (opPtr.p->m_isMaster)
- opPtr.p->m_signalCounter = c_aliveNodes;
- createIndex_slavePrepare(signal, opPtr);
- createIndex_sendReply(signal, opPtr, false);
- return;
- }
- c_opCreateIndex.find(opPtr, req->getConnectionPtr());
- if (! opPtr.isNull()) {
- opPtr.p->m_requestType = requestType;
- if (requestType == CreateIndxReq::RT_DICT_COMMIT ||
- requestType == CreateIndxReq::RT_DICT_ABORT) {
- jam();
- if (requestType == CreateIndxReq::RT_DICT_COMMIT) {
- opPtr.p->m_request.setIndexId(req->getIndexId());
- opPtr.p->m_request.setIndexVersion(req->getIndexVersion());
- createIndex_slaveCommit(signal, opPtr);
- } else {
- createIndex_slaveAbort(signal, opPtr);
- }
- createIndex_sendReply(signal, opPtr, false);
- // done in slave
- if (! opPtr.p->m_isMaster)
- c_opCreateIndex.release(opPtr);
- return;
- }
- }
- jam();
- // return to sender
- releaseSections(signal);
- OpCreateIndex opBad;
- opPtr.p = &opBad;
- opPtr.p->save(req);
- opPtr.p->m_errorCode = CreateIndxRef::BadRequestType;
- opPtr.p->m_errorLine = __LINE__;
- createIndex_sendReply(signal, opPtr, true);
-}
-
-void
-Dbdict::execCREATE_INDX_CONF(Signal* signal)
-{
- jamEntry();
- ndbrequire(signal->getNoOfSections() == 0);
- CreateIndxConf* conf = (CreateIndxConf*)signal->getDataPtrSend();
- createIndex_recvReply(signal, conf, 0);
-}
-
-void
-Dbdict::execCREATE_INDX_REF(Signal* signal)
-{
- jamEntry();
- CreateIndxRef* ref = (CreateIndxRef*)signal->getDataPtrSend();
- createIndex_recvReply(signal, ref->getConf(), ref);
-}
-
-void
-Dbdict::createIndex_recvReply(Signal* signal, const CreateIndxConf* conf,
- const CreateIndxRef* ref)
-{
- jam();
- const Uint32 senderRef = signal->senderBlockRef();
- const CreateIndxReq::RequestType requestType = conf->getRequestType();
- const Uint32 key = conf->getConnectionPtr();
- if (requestType == CreateIndxReq::RT_TC) {
- jam();
- // part of alter index operation
- OpAlterIndexPtr opPtr;
- c_opAlterIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- alterIndex_fromCreateTc(signal, opPtr);
- return;
- }
- OpCreateIndexPtr opPtr;
- c_opCreateIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- ndbrequire(opPtr.p->m_isMaster);
- ndbrequire(opPtr.p->m_requestType == requestType);
- opPtr.p->setError(ref);
- opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
- if (! opPtr.p->m_signalCounter.done()) {
- jam();
- return;
- }
- if (requestType == CreateIndxReq::RT_DICT_COMMIT ||
- requestType == CreateIndxReq::RT_DICT_ABORT) {
- jam();
- // send reply to user
- createIndex_sendReply(signal, opPtr, true);
- c_opCreateIndex.release(opPtr);
- return;
- }
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT;
- createIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- if (requestType == CreateIndxReq::RT_DICT_PREPARE) {
- jam();
- // start index table create
- createIndex_toCreateTable(signal, opPtr);
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT;
- createIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- return;
- }
- ndbrequire(false);
-}
-
-void
-Dbdict::createIndex_slavePrepare(Signal* signal, OpCreateIndexPtr opPtr)
-{
- jam();
-}
-
-void
-Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
-{
- Uint32 attrid_map[MAX_ATTRIBUTES_IN_INDEX];
- Uint32 k;
- jam();
- const CreateIndxReq* const req = &opPtr.p->m_request;
- // signal data writer
- Uint32* wbuffer = &c_indexPage.word[0];
- LinearWriter w(wbuffer, sizeof(c_indexPage) >> 2);
- w.first();
- // get table being indexed
- if (! (req->getTableId() < c_tableRecordPool.getSize())) {
- jam();
- opPtr.p->m_errorCode = CreateIndxRef::InvalidPrimaryTable;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, req->getTableId());
- if (tablePtr.p->tabState != TableRecord::DEFINED) {
- jam();
- opPtr.p->m_errorCode = CreateIndxRef::InvalidPrimaryTable;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- if (! tablePtr.p->isTable()) {
- jam();
- opPtr.p->m_errorCode = CreateIndxRef::InvalidPrimaryTable;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- // compute index table record
- TableRecord indexRec;
- TableRecordPtr indexPtr;
- indexPtr.i = RNIL; // invalid
- indexPtr.p = &indexRec;
- initialiseTableRecord(indexPtr);
- if (req->getIndexType() == DictTabInfo::UniqueHashIndex) {
- indexPtr.p->storedTable = opPtr.p->m_storedIndex;
- indexPtr.p->fragmentType = tablePtr.p->fragmentType;
- } else if (req->getIndexType() == DictTabInfo::OrderedIndex) {
- // first version will not supported logging
- if (opPtr.p->m_storedIndex) {
- jam();
- opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- indexPtr.p->storedTable = false;
- // follows table fragmentation
- indexPtr.p->fragmentType = tablePtr.p->fragmentType;
- } else {
- jam();
- opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- indexPtr.p->tableType = (DictTabInfo::TableType)req->getIndexType();
- indexPtr.p->primaryTableId = req->getTableId();
- indexPtr.p->noOfAttributes = opPtr.p->m_attrList.sz;
- indexPtr.p->tupKeyLength = 0;
- if (indexPtr.p->noOfAttributes == 0) {
- jam();
- opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- if (indexPtr.p->isOrderedIndex()) {
- // tree node size in words (make configurable later)
- indexPtr.p->tupKeyLength = MAX_TTREE_NODE_SIZE;
- }
-
- AttributeMask mask;
- mask.clear();
- for (k = 0; k < opPtr.p->m_attrList.sz; k++) {
- jam();
- unsigned current_id= opPtr.p->m_attrList.id[k];
- AttributeRecord* aRec= NULL;
- Uint32 tAttr= tablePtr.p->firstAttribute;
- for (; tAttr != RNIL; tAttr= aRec->nextAttrInTable)
- {
- aRec = c_attributeRecordPool.getPtr(tAttr);
- if (aRec->attributeId != current_id)
- continue;
- jam();
- break;
- }
- if (tAttr == RNIL) {
- jam();
- opPtr.p->m_errorCode = CreateIndxRef::BadRequestType;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- if (mask.get(current_id))
- {
- jam();
- opPtr.p->m_errorCode = CreateIndxRef::DuplicateAttributes;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- mask.set(current_id);
-
- const Uint32 a = aRec->attributeDescriptor;
- unsigned kk= k;
- if (indexPtr.p->isHashIndex()) {
- const Uint32 s1 = AttributeDescriptor::getSize(a);
- const Uint32 s2 = AttributeDescriptor::getArraySize(a);
- indexPtr.p->tupKeyLength += ((1 << s1) * s2 + 31) >> 5;
- // reorder the attributes according to the tableid order
- // for unque indexes
- for (; kk > 0 && current_id < attrid_map[kk-1]>>16; kk--)
- attrid_map[kk]= attrid_map[kk-1];
- }
- attrid_map[kk]= k | (current_id << 16);
- }
- indexPtr.p->noOfPrimkey = indexPtr.p->noOfAttributes;
- // plus concatenated primary table key attribute
- indexPtr.p->noOfAttributes += 1;
- indexPtr.p->noOfNullAttr = 0;
- // write index table
- w.add(DictTabInfo::TableName, opPtr.p->m_indexName);
- w.add(DictTabInfo::TableLoggedFlag, indexPtr.p->storedTable);
- w.add(DictTabInfo::FragmentTypeVal, indexPtr.p->fragmentType);
- w.add(DictTabInfo::TableTypeVal, indexPtr.p->tableType);
- w.add(DictTabInfo::PrimaryTable, tablePtr.p->tableName);
- w.add(DictTabInfo::PrimaryTableId, tablePtr.i);
- w.add(DictTabInfo::NoOfAttributes, indexPtr.p->noOfAttributes);
- w.add(DictTabInfo::NoOfKeyAttr, indexPtr.p->noOfPrimkey);
- w.add(DictTabInfo::NoOfNullable, indexPtr.p->noOfNullAttr);
- w.add(DictTabInfo::KeyLength, indexPtr.p->tupKeyLength);
- // write index key attributes
- AttributeRecordPtr aRecPtr;
- c_attributeRecordPool.getPtr(aRecPtr, tablePtr.p->firstAttribute);
- for (k = 0; k < opPtr.p->m_attrList.sz; k++) {
- // insert the attributes in the order decided above in attrid_map
- // k is new order, current_id is in previous order
- // ToDo: make sure "current_id" is stored with the table and
- // passed up to NdbDictionary
- unsigned current_id= opPtr.p->m_attrList.id[attrid_map[k] & 0xffff];
- jam();
- for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
- AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
- tAttr = aRec->nextAttrInTable;
- if (aRec->attributeId != current_id)
- continue;
- jam();
- const Uint32 a = aRec->attributeDescriptor;
- bool isNullable = AttributeDescriptor::getNullable(a);
- Uint32 attrType = AttributeDescriptor::getType(a);
- w.add(DictTabInfo::AttributeName, aRec->attributeName);
- w.add(DictTabInfo::AttributeId, k);
- if (indexPtr.p->isHashIndex()) {
- w.add(DictTabInfo::AttributeKeyFlag, (Uint32)true);
- w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false);
- }
- if (indexPtr.p->isOrderedIndex()) {
- w.add(DictTabInfo::AttributeKeyFlag, (Uint32)false);
- w.add(DictTabInfo::AttributeNullableFlag, (Uint32)isNullable);
- }
- w.add(DictTabInfo::AttributeExtType, attrType);
- w.add(DictTabInfo::AttributeExtPrecision, aRec->extPrecision);
- w.add(DictTabInfo::AttributeExtScale, aRec->extScale);
- w.add(DictTabInfo::AttributeExtLength, aRec->extLength);
- w.add(DictTabInfo::AttributeEnd, (Uint32)true);
- }
- }
- if (indexPtr.p->isHashIndex()) {
- jam();
- // write concatenated primary table key attribute
- w.add(DictTabInfo::AttributeName, "NDB$PK");
- w.add(DictTabInfo::AttributeId, opPtr.p->m_attrList.sz);
- w.add(DictTabInfo::AttributeKeyFlag, (Uint32)false);
- w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false);
- w.add(DictTabInfo::AttributeExtType, (Uint32)DictTabInfo::ExtUnsigned);
- w.add(DictTabInfo::AttributeExtLength, tablePtr.p->tupKeyLength);
- w.add(DictTabInfo::AttributeEnd, (Uint32)true);
- }
- if (indexPtr.p->isOrderedIndex()) {
- jam();
- // write index tree node as Uint32 array attribute
- w.add(DictTabInfo::AttributeName, "NDB$TNODE");
- w.add(DictTabInfo::AttributeId, opPtr.p->m_attrList.sz);
- w.add(DictTabInfo::AttributeKeyFlag, (Uint32)true);
- w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false);
- w.add(DictTabInfo::AttributeExtType, (Uint32)DictTabInfo::ExtUnsigned);
- w.add(DictTabInfo::AttributeExtLength, indexPtr.p->tupKeyLength);
- w.add(DictTabInfo::AttributeEnd, (Uint32)true);
- }
- // finish
- w.add(DictTabInfo::TableEnd, (Uint32)true);
- // remember to...
- releaseSections(signal);
- // send create index table request
- CreateTableReq * const cre = (CreateTableReq*)signal->getDataPtrSend();
- cre->senderRef = reference();
- cre->senderData = opPtr.p->key;
- LinearSectionPtr lsPtr[3];
- lsPtr[0].p = wbuffer;
- lsPtr[0].sz = w.getWordsUsed();
- sendSignal(DBDICT_REF, GSN_CREATE_TABLE_REQ,
- signal, CreateTableReq::SignalLength, JBB, lsPtr, 1);
-}
-
-void
-Dbdict::createIndex_fromCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
-{
- jam();
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT;
- createIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- if (! opPtr.p->m_request.getOnline()) {
- jam();
- opPtr.p->m_requestType = CreateIndxReq::RT_DICT_COMMIT;
- createIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- createIndex_toAlterIndex(signal, opPtr);
-}
-
-void
-Dbdict::createIndex_toAlterIndex(Signal* signal, OpCreateIndexPtr opPtr)
-{
- jam();
- AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(AlterIndxReq::RT_CREATE_INDEX);
- req->addRequestFlag(opPtr.p->m_requestFlag);
- req->setTableId(opPtr.p->m_request.getTableId());
- req->setIndexId(opPtr.p->m_request.getIndexId());
- req->setIndexVersion(opPtr.p->m_request.getIndexVersion());
- req->setOnline(true);
- sendSignal(reference(), GSN_ALTER_INDX_REQ,
- signal, AlterIndxReq::SignalLength, JBB);
-}
-
-void
-Dbdict::createIndex_fromAlterIndex(Signal* signal, OpCreateIndexPtr opPtr)
-{
- jam();
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT;
- createIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- opPtr.p->m_requestType = CreateIndxReq::RT_DICT_COMMIT;
- createIndex_sendSlaveReq(signal, opPtr);
-}
-
-void
-Dbdict::createIndex_slaveCommit(Signal* signal, OpCreateIndexPtr opPtr)
-{
- jam();
- const Uint32 indexId = opPtr.p->m_request.getIndexId();
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, indexId);
- if (! opPtr.p->m_request.getOnline()) {
- ndbrequire(indexPtr.p->indexState == TableRecord::IS_UNDEFINED);
- indexPtr.p->indexState = TableRecord::IS_OFFLINE;
- } else {
- ndbrequire(indexPtr.p->indexState == TableRecord::IS_ONLINE);
- }
-}
-
-void
-Dbdict::createIndex_slaveAbort(Signal* signal, OpCreateIndexPtr opPtr)
-{
- jam();
- CreateIndxReq* const req = &opPtr.p->m_request;
- const Uint32 indexId = req->getIndexId();
- if (indexId >= c_tableRecordPool.getSize()) {
- jam();
- return;
- }
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, indexId);
- if (! indexPtr.p->isIndex()) {
- jam();
- return;
- }
- indexPtr.p->indexState = TableRecord::IS_BROKEN;
-}
-
-void
-Dbdict::createIndex_sendSlaveReq(Signal* signal, OpCreateIndexPtr opPtr)
-{
- jam();
- CreateIndxReq* const req = (CreateIndxReq*)signal->getDataPtrSend();
- *req = opPtr.p->m_request;
- req->setUserRef(opPtr.p->m_coordinatorRef);
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(opPtr.p->m_requestType);
- req->addRequestFlag(opPtr.p->m_requestFlag);
- opPtr.p->m_signalCounter = c_aliveNodes;
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- sendSignal(rg, GSN_CREATE_INDX_REQ,
- signal, CreateIndxReq::SignalLength, JBB);
-}
-
-void
-Dbdict::createIndex_sendReply(Signal* signal, OpCreateIndexPtr opPtr,
- bool toUser)
-{
- CreateIndxRef* rep = (CreateIndxRef*)signal->getDataPtrSend();
- Uint32 gsn = GSN_CREATE_INDX_CONF;
- Uint32 length = CreateIndxConf::InternalLength;
- bool sendRef = opPtr.p->hasError();
- if (! toUser) {
- rep->setUserRef(opPtr.p->m_coordinatorRef);
- rep->setConnectionPtr(opPtr.p->key);
- rep->setRequestType(opPtr.p->m_requestType);
- if (opPtr.p->m_requestType == CreateIndxReq::RT_DICT_ABORT)
- sendRef = false;
- } else {
- rep->setUserRef(opPtr.p->m_request.getUserRef());
- rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
- rep->setRequestType(opPtr.p->m_request.getRequestType());
- length = CreateIndxConf::SignalLength;
- }
- rep->setTableId(opPtr.p->m_request.getTableId());
- rep->setIndexId(opPtr.p->m_request.getIndexId());
- rep->setIndexVersion(opPtr.p->m_request.getIndexVersion());
- if (sendRef) {
- if (opPtr.p->m_errorNode == 0)
- opPtr.p->m_errorNode = getOwnNodeId();
- rep->setErrorCode(opPtr.p->m_errorCode);
- rep->setErrorLine(opPtr.p->m_errorLine);
- rep->setErrorNode(opPtr.p->m_errorNode);
- gsn = GSN_CREATE_INDX_REF;
- length = CreateIndxRef::SignalLength;
- }
- sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
-}
-
-/**
- * MODULE: Drop index.
- *
- * Drop index. First alters the index offline (i.e. drops metadata in
- * other blocks) and then drops the index table.
- */
-
-void
-Dbdict::execDROP_INDX_REQ(Signal* signal)
-{
- jamEntry();
- DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend();
- OpDropIndexPtr opPtr;
-
- int err = DropIndxRef::BadRequestType;
- const Uint32 senderRef = signal->senderBlockRef();
- const DropIndxReq::RequestType requestType = req->getRequestType();
- if (requestType == DropIndxReq::RT_USER) {
- jam();
- if (signal->getLength() == DropIndxReq::SignalLength) {
- jam();
- if (getOwnNodeId() != c_masterNodeId) {
- jam();
-
- err = DropIndxRef::NotMaster;
- goto error;
- }
- // forward initial request plus operation key to all
- Uint32 indexId= req->getIndexId();
- Uint32 indexVersion= req->getIndexVersion();
- TableRecordPtr tmp;
- int res = getMetaTablePtr(tmp, indexId, indexVersion);
- switch(res){
- case MetaData::InvalidArgument:
- err = DropIndxRef::IndexNotFound;
- goto error;
- case MetaData::TableNotFound:
- case MetaData::InvalidTableVersion:
- err = DropIndxRef::InvalidIndexVersion;
- goto error;
- }
-
- if (! tmp.p->isIndex()) {
- jam();
- err = DropIndxRef::NotAnIndex;
- goto error;
- }
-
- if (tmp.p->indexState == TableRecord::IS_DROPPING){
- jam();
- err = DropIndxRef::IndexNotFound;
- goto error;
- }
-
- tmp.p->indexState = TableRecord::IS_DROPPING;
-
- req->setOpKey(++c_opRecordSequence);
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- sendSignal(rg, GSN_DROP_INDX_REQ,
- signal, DropIndxReq::SignalLength + 1, JBB);
- return;
- }
- // seize operation record
- ndbrequire(signal->getLength() == DropIndxReq::SignalLength + 1);
- const Uint32 opKey = req->getOpKey();
- OpDropIndex opBusy;
- if (! c_opDropIndex.seize(opPtr))
- opPtr.p = &opBusy;
- opPtr.p->save(req);
- opPtr.p->m_coordinatorRef = senderRef;
- opPtr.p->m_isMaster = (senderRef == reference());
- opPtr.p->key = opKey;
- opPtr.p->m_requestType = DropIndxReq::RT_DICT_PREPARE;
- if (opPtr.p == &opBusy) {
- jam();
- opPtr.p->m_errorCode = DropIndxRef::Busy;
- opPtr.p->m_errorLine = __LINE__;
- dropIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
- return;
- }
- c_opDropIndex.add(opPtr);
- // master expects to hear from all
- if (opPtr.p->m_isMaster)
- opPtr.p->m_signalCounter = c_aliveNodes;
- dropIndex_slavePrepare(signal, opPtr);
- dropIndex_sendReply(signal, opPtr, false);
- return;
- }
- c_opDropIndex.find(opPtr, req->getConnectionPtr());
- if (! opPtr.isNull()) {
- opPtr.p->m_requestType = requestType;
- if (requestType == DropIndxReq::RT_DICT_COMMIT ||
- requestType == DropIndxReq::RT_DICT_ABORT) {
- jam();
- if (requestType == DropIndxReq::RT_DICT_COMMIT)
- dropIndex_slaveCommit(signal, opPtr);
- else
- dropIndex_slaveAbort(signal, opPtr);
- dropIndex_sendReply(signal, opPtr, false);
- // done in slave
- if (! opPtr.p->m_isMaster)
- c_opDropIndex.release(opPtr);
- return;
- }
- }
-error:
- jam();
- // return to sender
- OpDropIndex opBad;
- opPtr.p = &opBad;
- opPtr.p->save(req);
- opPtr.p->m_errorCode = (DropIndxRef::ErrorCode)err;
- opPtr.p->m_errorLine = __LINE__;
- opPtr.p->m_errorNode = c_masterNodeId;
- dropIndex_sendReply(signal, opPtr, true);
-}
-
-void
-Dbdict::execDROP_INDX_CONF(Signal* signal)
-{
- jamEntry();
- DropIndxConf* conf = (DropIndxConf*)signal->getDataPtrSend();
- dropIndex_recvReply(signal, conf, 0);
-}
-
-void
-Dbdict::execDROP_INDX_REF(Signal* signal)
-{
- jamEntry();
- DropIndxRef* ref = (DropIndxRef*)signal->getDataPtrSend();
- dropIndex_recvReply(signal, ref->getConf(), ref);
-}
-
-void
-Dbdict::dropIndex_recvReply(Signal* signal, const DropIndxConf* conf,
- const DropIndxRef* ref)
-{
- jam();
- const Uint32 senderRef = signal->senderBlockRef();
- const DropIndxReq::RequestType requestType = conf->getRequestType();
- const Uint32 key = conf->getConnectionPtr();
- if (requestType == DropIndxReq::RT_TC) {
- jam();
- // part of alter index operation
- OpAlterIndexPtr opPtr;
- c_opAlterIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- alterIndex_fromDropTc(signal, opPtr);
- return;
- }
- OpDropIndexPtr opPtr;
- c_opDropIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- ndbrequire(opPtr.p->m_isMaster);
- ndbrequire(opPtr.p->m_requestType == requestType);
- opPtr.p->setError(ref);
- opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
- if (! opPtr.p->m_signalCounter.done()) {
- jam();
- return;
- }
- if (requestType == DropIndxReq::RT_DICT_COMMIT ||
- requestType == DropIndxReq::RT_DICT_ABORT) {
- jam();
- // send reply to user
- dropIndex_sendReply(signal, opPtr, true);
- c_opDropIndex.release(opPtr);
- return;
- }
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = DropIndxReq::RT_DICT_ABORT;
- dropIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- if (requestType == DropIndxReq::RT_DICT_PREPARE) {
- jam();
- // start alter offline
- dropIndex_toAlterIndex(signal, opPtr);
- return;
- }
- ndbrequire(false);
-}
-
-void
-Dbdict::dropIndex_slavePrepare(Signal* signal, OpDropIndexPtr opPtr)
-{
- jam();
- DropIndxReq* const req = &opPtr.p->m_request;
- // check index exists
- TableRecordPtr indexPtr;
- if (! (req->getIndexId() < c_tableRecordPool.getSize())) {
- jam();
- opPtr.p->m_errorCode = DropIndxRef::IndexNotFound;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- c_tableRecordPool.getPtr(indexPtr, req->getIndexId());
- if (indexPtr.p->tabState != TableRecord::DEFINED) {
- jam();
- opPtr.p->m_errorCode = DropIndxRef::IndexNotFound;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- if (! indexPtr.p->isIndex()) {
- jam();
- opPtr.p->m_errorCode = DropIndxRef::NotAnIndex;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- // ignore incoming primary table id
- req->setTableId(indexPtr.p->primaryTableId);
-}
-
-void
-Dbdict::dropIndex_toAlterIndex(Signal* signal, OpDropIndexPtr opPtr)
-{
- jam();
- AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(AlterIndxReq::RT_DROP_INDEX);
- req->addRequestFlag(opPtr.p->m_requestFlag);
- req->setTableId(opPtr.p->m_request.getTableId());
- req->setIndexId(opPtr.p->m_request.getIndexId());
- req->setIndexVersion(opPtr.p->m_request.getIndexVersion());
- req->setOnline(false);
- sendSignal(reference(), GSN_ALTER_INDX_REQ,
- signal, AlterIndxReq::SignalLength, JBB);
-}
-
-void
-Dbdict::dropIndex_fromAlterIndex(Signal* signal, OpDropIndexPtr opPtr)
-{
- jam();
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = DropIndxReq::RT_DICT_ABORT;
- dropIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- dropIndex_toDropTable(signal, opPtr);
-}
-
-void
-Dbdict::dropIndex_toDropTable(Signal* signal, OpDropIndexPtr opPtr)
-{
- jam();
- DropTableReq* const req = (DropTableReq*)signal->getDataPtrSend();
- req->senderRef = reference();
- req->senderData = opPtr.p->key;
- req->tableId = opPtr.p->m_request.getIndexId();
- req->tableVersion = opPtr.p->m_request.getIndexVersion();
- sendSignal(reference(), GSN_DROP_TABLE_REQ,
- signal,DropTableReq::SignalLength, JBB);
-}
-
-void
-Dbdict::dropIndex_fromDropTable(Signal* signal, OpDropIndexPtr opPtr)
-{
- jam();
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = DropIndxReq::RT_DICT_ABORT;
- dropIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- opPtr.p->m_requestType = DropIndxReq::RT_DICT_COMMIT;
- dropIndex_sendSlaveReq(signal, opPtr);
-}
-
-void
-Dbdict::dropIndex_slaveCommit(Signal* signal, OpDropIndexPtr opPtr)
-{
- jam();
-}
-
-void
-Dbdict::dropIndex_slaveAbort(Signal* signal, OpDropIndexPtr opPtr)
-{
- jam();
- DropIndxReq* const req = &opPtr.p->m_request;
- const Uint32 indexId = req->getIndexId();
- if (indexId >= c_tableRecordPool.getSize()) {
- jam();
- return;
- }
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, indexId);
- indexPtr.p->indexState = TableRecord::IS_BROKEN;
-}
-
-void
-Dbdict::dropIndex_sendSlaveReq(Signal* signal, OpDropIndexPtr opPtr)
-{
- DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend();
- *req = opPtr.p->m_request;
- req->setUserRef(opPtr.p->m_coordinatorRef);
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(opPtr.p->m_requestType);
- req->addRequestFlag(opPtr.p->m_requestFlag);
- opPtr.p->m_signalCounter = c_aliveNodes;
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- sendSignal(rg, GSN_DROP_INDX_REQ,
- signal, DropIndxReq::SignalLength, JBB);
-}
-
-void
-Dbdict::dropIndex_sendReply(Signal* signal, OpDropIndexPtr opPtr,
- bool toUser)
-{
- DropIndxRef* rep = (DropIndxRef*)signal->getDataPtrSend();
- Uint32 gsn = GSN_DROP_INDX_CONF;
- Uint32 length = DropIndxConf::InternalLength;
- bool sendRef = opPtr.p->hasError();
- if (! toUser) {
- rep->setUserRef(opPtr.p->m_coordinatorRef);
- rep->setConnectionPtr(opPtr.p->key);
- rep->setRequestType(opPtr.p->m_requestType);
- if (opPtr.p->m_requestType == DropIndxReq::RT_DICT_ABORT)
- sendRef = false;
- } else {
- rep->setUserRef(opPtr.p->m_request.getUserRef());
- rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
- rep->setRequestType(opPtr.p->m_request.getRequestType());
- length = DropIndxConf::SignalLength;
- }
- rep->setTableId(opPtr.p->m_request.getTableId());
- rep->setIndexId(opPtr.p->m_request.getIndexId());
- rep->setIndexVersion(opPtr.p->m_request.getIndexVersion());
- if (sendRef) {
- if (opPtr.p->m_errorNode == 0)
- opPtr.p->m_errorNode = getOwnNodeId();
- rep->setErrorCode(opPtr.p->m_errorCode);
- rep->setErrorLine(opPtr.p->m_errorLine);
- rep->setErrorNode(opPtr.p->m_errorNode);
- gsn = GSN_DROP_INDX_REF;
- length = DropIndxRef::SignalLength;
- }
- sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
-}
-
-/*****************************************************
- *
- * Util signalling
- *
- *****************************************************/
-
-int
-Dbdict::sendSignalUtilReq(Callback *pcallback,
- BlockReference ref,
- GlobalSignalNumber gsn,
- Signal* signal,
- Uint32 length,
- JobBufferLevel jbuf,
- LinearSectionPtr ptr[3],
- Uint32 noOfSections)
-{
- jam();
- EVENT_TRACE;
- OpSignalUtilPtr utilRecPtr;
-
- // Seize a Util Send record
- if (!c_opSignalUtil.seize(utilRecPtr)) {
- // Failed to allocate util record
- return -1;
- }
- utilRecPtr.p->m_callback = *pcallback;
-
- // should work for all util signal classes
- UtilPrepareReq *req = (UtilPrepareReq*)signal->getDataPtrSend();
- utilRecPtr.p->m_userData = req->getSenderData();
- req->setSenderData(utilRecPtr.i);
-
- if (ptr) {
- jam();
- sendSignal(ref, gsn, signal, length, jbuf, ptr, noOfSections);
- } else {
- jam();
- sendSignal(ref, gsn, signal, length, jbuf);
- }
-
- return 0;
-}
-
-int
-Dbdict::recvSignalUtilReq(Signal* signal, Uint32 returnCode)
-{
- jam();
- EVENT_TRACE;
- UtilPrepareConf * const req = (UtilPrepareConf*)signal->getDataPtr();
- OpSignalUtilPtr utilRecPtr;
- utilRecPtr.i = req->getSenderData();
- if ((utilRecPtr.p = c_opSignalUtil.getPtr(utilRecPtr.i)) == NULL) {
- jam();
- return -1;
- }
-
- req->setSenderData(utilRecPtr.p->m_userData);
- Callback c = utilRecPtr.p->m_callback;
- c_opSignalUtil.release(utilRecPtr);
-
- execute(signal, c, returnCode);
- return 0;
-}
-
-void Dbdict::execUTIL_PREPARE_CONF(Signal *signal)
-{
- jamEntry();
- EVENT_TRACE;
- ndbrequire(recvSignalUtilReq(signal, 0) == 0);
-}
-
-void
-Dbdict::execUTIL_PREPARE_REF(Signal *signal)
-{
- jamEntry();
- EVENT_TRACE;
- ndbrequire(recvSignalUtilReq(signal, 1) == 0);
-}
-
-void Dbdict::execUTIL_EXECUTE_CONF(Signal *signal)
-{
- jamEntry();
- EVENT_TRACE;
- ndbrequire(recvSignalUtilReq(signal, 0) == 0);
-}
-
-void Dbdict::execUTIL_EXECUTE_REF(Signal *signal)
-{
- jamEntry();
- EVENT_TRACE;
-
-#ifdef EVENT_DEBUG
- UtilExecuteRef * ref = (UtilExecuteRef *)signal->getDataPtrSend();
-
- ndbout_c("execUTIL_EXECUTE_REF");
- ndbout_c("senderData %u",ref->getSenderData());
- ndbout_c("errorCode %u",ref->getErrorCode());
- ndbout_c("TCErrorCode %u",ref->getTCErrorCode());
-#endif
-
- ndbrequire(recvSignalUtilReq(signal, 1) == 0);
-}
-void Dbdict::execUTIL_RELEASE_CONF(Signal *signal)
-{
- jamEntry();
- EVENT_TRACE;
- ndbrequire(false);
- ndbrequire(recvSignalUtilReq(signal, 0) == 0);
-}
-void Dbdict::execUTIL_RELEASE_REF(Signal *signal)
-{
- jamEntry();
- EVENT_TRACE;
- ndbrequire(false);
- ndbrequire(recvSignalUtilReq(signal, 1) == 0);
-}
-
-/**
- * MODULE: Create event
- *
- * Create event in DICT.
- *
- *
- * Request type in CREATE_EVNT signals:
- *
- * Signalflow see Dbdict.txt
- *
- */
-
-/*****************************************************************
- *
- * Systable stuff
- *
- */
-
-const Uint32 Dbdict::sysTab_NDBEVENTS_0_szs[EVENT_SYSTEM_TABLE_LENGTH] = {
- sizeof(((sysTab_NDBEVENTS_0*)0)->NAME),
- sizeof(((sysTab_NDBEVENTS_0*)0)->EVENT_TYPE),
- sizeof(((sysTab_NDBEVENTS_0*)0)->TABLE_NAME),
- sizeof(((sysTab_NDBEVENTS_0*)0)->ATTRIBUTE_MASK),
- sizeof(((sysTab_NDBEVENTS_0*)0)->SUBID),
- sizeof(((sysTab_NDBEVENTS_0*)0)->SUBKEY)
-};
-
-void
-Dbdict::prepareTransactionEventSysTable (Callback *pcallback,
- Signal* signal,
- Uint32 senderData,
- UtilPrepareReq::OperationTypeValue prepReq)
-{
- // find table id for event system table
- TableRecord keyRecord;
- strcpy(keyRecord.tableName, EVENT_SYSTEM_TABLE_NAME);
-
- TableRecordPtr tablePtr;
- c_tableRecordHash.find(tablePtr, keyRecord);
-
- ndbrequire(tablePtr.i != RNIL); // system table must exist
-
- Uint32 tableId = tablePtr.p->tableId; /* System table */
- Uint32 noAttr = tablePtr.p->noOfAttributes;
- ndbrequire(noAttr == EVENT_SYSTEM_TABLE_LENGTH);
-
- switch (prepReq) {
- case UtilPrepareReq::Update:
- case UtilPrepareReq::Insert:
- case UtilPrepareReq::Write:
- case UtilPrepareReq::Read:
- jam();
- break;
- case UtilPrepareReq::Delete:
- jam();
- noAttr = 1; // only involves Primary key which should be the first
- break;
- }
- prepareUtilTransaction(pcallback, signal, senderData, tableId, NULL,
- prepReq, noAttr, NULL, NULL);
-}
-
-void
-Dbdict::prepareUtilTransaction(Callback *pcallback,
- Signal* signal,
- Uint32 senderData,
- Uint32 tableId,
- const char* tableName,
- UtilPrepareReq::OperationTypeValue prepReq,
- Uint32 noAttr,
- Uint32 attrIds[],
- const char *attrNames[])
-{
- jam();
- EVENT_TRACE;
-
- UtilPrepareReq * utilPrepareReq =
- (UtilPrepareReq *)signal->getDataPtrSend();
-
- utilPrepareReq->setSenderRef(reference());
- utilPrepareReq->setSenderData(senderData);
-
- const Uint32 pageSizeInWords = 128;
- Uint32 propPage[pageSizeInWords];
- LinearWriter w(&propPage[0],128);
- w.first();
- w.add(UtilPrepareReq::NoOfOperations, 1);
- w.add(UtilPrepareReq::OperationType, prepReq);
- if (tableName) {
- jam();
- w.add(UtilPrepareReq::TableName, tableName);
- } else {
- jam();
- w.add(UtilPrepareReq::TableId, tableId);
- }
- for(Uint32 i = 0; i < noAttr; i++)
- if (tableName) {
- jam();
- w.add(UtilPrepareReq::AttributeName, attrNames[i]);
- } else {
- if (attrIds) {
- jam();
- w.add(UtilPrepareReq::AttributeId, attrIds[i]);
- } else {
- jam();
- w.add(UtilPrepareReq::AttributeId, i);
- }
- }
-#ifdef EVENT_DEBUG
- // Debugging
- SimplePropertiesLinearReader reader(propPage, w.getWordsUsed());
- printf("Dict::prepareInsertTransactions: Sent SimpleProperties:\n");
- reader.printAll(ndbout);
-#endif
-
- struct LinearSectionPtr sectionsPtr[UtilPrepareReq::NoOfSections];
- sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].p = propPage;
- sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].sz = w.getWordsUsed();
-
- sendSignalUtilReq(pcallback, DBUTIL_REF, GSN_UTIL_PREPARE_REQ, signal,
- UtilPrepareReq::SignalLength, JBB,
- sectionsPtr, UtilPrepareReq::NoOfSections);
-}
-
-/*****************************************************************
- *
- * CREATE_EVNT_REQ has three types RT_CREATE, RT_GET (from user)
- * and RT_DICT_AFTER_GET send from master DICT to slaves
- *
- * This function just dscpaches these to
- *
- * createEvent_RT_USER_CREATE
- * createEvent_RT_USER_GET
- * createEvent_RT_DICT_AFTER_GET
- *
- * repectively
- *
- */
-
-void
-Dbdict::execCREATE_EVNT_REQ(Signal* signal)
-{
- jamEntry();
-
-#if 0
- {
- SafeCounterHandle handle;
- {
- SafeCounter tmp(c_counterMgr, handle);
- tmp.init<CreateEvntRef>(CMVMI, GSN_DUMP_STATE_ORD, /* senderData */ 13);
- tmp.clearWaitingFor();
- tmp.setWaitingFor(3);
- ndbrequire(!tmp.done());
- ndbout_c("Allocted");
- }
- ndbrequire(!handle.done());
- {
- SafeCounter tmp(c_counterMgr, handle);
- tmp.clearWaitingFor(3);
- ndbrequire(tmp.done());
- ndbout_c("Deallocted");
- }
- ndbrequire(handle.done());
- }
- {
- NodeBitmask nodes;
- nodes.clear();
-
- nodes.set(2);
- nodes.set(3);
- nodes.set(4);
- nodes.set(5);
-
- {
- Uint32 i = 0;
- while((i = nodes.find(i)) != NodeBitmask::NotFound){
- ndbout_c("1 Node id = %u", i);
- i++;
- }
- }
-
- NodeReceiverGroup rg(DBDICT, nodes);
- RequestTracker rt2;
- ndbrequire(rt2.done());
- ndbrequire(!rt2.hasRef());
- ndbrequire(!rt2.hasConf());
- rt2.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF, 13);
-
- RequestTracker rt3;
- rt3.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF, 13);
-
- ndbrequire(!rt2.done());
- ndbrequire(!rt3.done());
-
- rt2.reportRef(c_counterMgr, 2);
- rt3.reportConf(c_counterMgr, 2);
-
- ndbrequire(!rt2.done());
- ndbrequire(!rt3.done());
-
- rt2.reportConf(c_counterMgr, 3);
- rt3.reportConf(c_counterMgr, 3);
-
- ndbrequire(!rt2.done());
- ndbrequire(!rt3.done());
-
- rt2.reportConf(c_counterMgr, 4);
- rt3.reportConf(c_counterMgr, 4);
-
- ndbrequire(!rt2.done());
- ndbrequire(!rt3.done());
-
- rt2.reportConf(c_counterMgr, 5);
- rt3.reportConf(c_counterMgr, 5);
-
- ndbrequire(rt2.done());
- ndbrequire(rt3.done());
- }
-#endif
-
- if (! assembleFragments(signal)) {
- jam();
- return;
- }
-
- CreateEvntReq *req = (CreateEvntReq*)signal->getDataPtr();
- const CreateEvntReq::RequestType requestType = req->getRequestType();
- const Uint32 requestFlag = req->getRequestFlag();
-
- OpCreateEventPtr evntRecPtr;
- // Seize a Create Event record
- if (!c_opCreateEvent.seize(evntRecPtr)) {
- // Failed to allocate event record
- jam();
- releaseSections(signal);
-
- CreateEvntRef * ret = (CreateEvntRef *)signal->getDataPtrSend();
- ret->senderRef = reference();
- ret->setErrorCode(CreateEvntRef::SeizeError);
- ret->setErrorLine(__LINE__);
- ret->setErrorNode(reference());
- sendSignal(signal->senderBlockRef(), GSN_CREATE_EVNT_REF, signal,
- CreateEvntRef::SignalLength, JBB);
- return;
- }
-
-#ifdef EVENT_DEBUG
- ndbout_c("DBDICT::execCREATE_EVNT_REQ from %u evntRecId = (%d)", refToNode(signal->getSendersBlockRef()), evntRecPtr.i);
-#endif
-
- ndbrequire(req->getUserRef() == signal->getSendersBlockRef());
-
- evntRecPtr.p->init(req,this);
-
- if (requestFlag & (Uint32)CreateEvntReq::RT_DICT_AFTER_GET) {
- jam();
- EVENT_TRACE;
- createEvent_RT_DICT_AFTER_GET(signal, evntRecPtr);
- return;
- }
- if (requestType == CreateEvntReq::RT_USER_GET) {
- jam();
- EVENT_TRACE;
- createEvent_RT_USER_GET(signal, evntRecPtr);
- return;
- }
- if (requestType == CreateEvntReq::RT_USER_CREATE) {
- jam();
- EVENT_TRACE;
- createEvent_RT_USER_CREATE(signal, evntRecPtr);
- return;
- }
-
-#ifdef EVENT_DEBUG
- ndbout << "Dbdict.cpp: Dbdict::execCREATE_EVNT_REQ other" << endl;
-#endif
- jam();
- releaseSections(signal);
-
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
- evntRecPtr.p->m_errorNode = reference();
-
- createEvent_sendReply(signal, evntRecPtr);
-}
-
-/********************************************************************
- *
- * Event creation
- *
- *****************************************************************/
-
-void
-Dbdict::createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr){
- jam();
- evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef());
-
-#ifdef EVENT_DEBUG
- ndbout << "Dbdict.cpp: Dbdict::execCREATE_EVNT_REQ RT_USER" << endl;
- char buf[128] = {0};
- AttributeMask mask = evntRecPtr.p->m_request.getAttrListBitmask();
- mask.getText(buf);
- ndbout_c("mask = %s", buf);
-#endif
-
- // Interpret the long signal
-
- SegmentedSectionPtr ssPtr;
- // save name and event properties
- signal->getSection(ssPtr, CreateEvntReq::EVENT_NAME_SECTION);
-
- SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
-#ifdef EVENT_DEBUG
- r0.printAll(ndbout);
-#endif
- // event name
- if ((!r0.first()) ||
- (r0.getValueType() != SimpleProperties::StringValue) ||
- (r0.getValueLen() <= 0)) {
- jam();
- releaseSections(signal);
-
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
- evntRecPtr.p->m_errorNode = reference();
-
- createEvent_sendReply(signal, evntRecPtr);
- return;
- }
- r0.getString(evntRecPtr.p->m_eventRec.NAME);
- {
- int len = strlen(evntRecPtr.p->m_eventRec.NAME);
- memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
-#ifdef EVENT_DEBUG
- printf("CreateEvntReq::RT_USER_CREATE; EventName %s, len %u\n",
- evntRecPtr.p->m_eventRec.NAME, len);
- for(int i = 0; i < MAX_TAB_NAME_SIZE/4; i++)
- printf("H'%.8x ", ((Uint32*)evntRecPtr.p->m_eventRec.NAME)[i]);
- printf("\n");
-#endif
- }
- // table name
- if ((!r0.next()) ||
- (r0.getValueType() != SimpleProperties::StringValue) ||
- (r0.getValueLen() <= 0)) {
- jam();
- releaseSections(signal);
-
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
- evntRecPtr.p->m_errorNode = reference();
-
- createEvent_sendReply(signal, evntRecPtr);
- return;
- }
- r0.getString(evntRecPtr.p->m_eventRec.TABLE_NAME);
- {
- int len = strlen(evntRecPtr.p->m_eventRec.TABLE_NAME);
- memset(evntRecPtr.p->m_eventRec.TABLE_NAME+len, 0, MAX_TAB_NAME_SIZE-len);
- }
-
-#ifdef EVENT_DEBUG
- ndbout_c("event name: %s",evntRecPtr.p->m_eventRec.NAME);
- ndbout_c("table name: %s",evntRecPtr.p->m_eventRec.TABLE_NAME);
-#endif
-
- releaseSections(signal);
-
- // Send request to SUMA
-
- CreateSubscriptionIdReq * sumaIdReq =
- (CreateSubscriptionIdReq *)signal->getDataPtrSend();
-
- // make sure we save the original sender for later
- sumaIdReq->senderData = evntRecPtr.i;
-#ifdef EVENT_DEBUG
- ndbout << "sumaIdReq->senderData = " << sumaIdReq->senderData << endl;
-#endif
- sendSignal(SUMA_REF, GSN_CREATE_SUBID_REQ, signal,
- CreateSubscriptionIdReq::SignalLength, JBB);
- // we should now return in either execCREATE_SUBID_CONF
- // or execCREATE_SUBID_REF
-}
-
-void Dbdict::execCREATE_SUBID_REF(Signal* signal)
-{
- jamEntry();
- EVENT_TRACE;
- CreateSubscriptionIdRef * const ref =
- (CreateSubscriptionIdRef *)signal->getDataPtr();
- OpCreateEventPtr evntRecPtr;
-
- evntRecPtr.i = ref->senderData;
- ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
-
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
- evntRecPtr.p->m_errorNode = reference();
-
- createEvent_sendReply(signal, evntRecPtr);
-}
-
-void Dbdict::execCREATE_SUBID_CONF(Signal* signal)
-{
- jamEntry();
- EVENT_TRACE;
-
- CreateSubscriptionIdConf const * sumaIdConf =
- (CreateSubscriptionIdConf *)signal->getDataPtr();
-
- Uint32 evntRecId = sumaIdConf->senderData;
- OpCreateEvent *evntRec;
-
- ndbrequire((evntRec = c_opCreateEvent.getPtr(evntRecId)) != NULL);
-
- evntRec->m_request.setEventId(sumaIdConf->subscriptionId);
- evntRec->m_request.setEventKey(sumaIdConf->subscriptionKey);
-
- releaseSections(signal);
-
- Callback c = { safe_cast(&Dbdict::createEventUTIL_PREPARE), 0 };
-
- prepareTransactionEventSysTable(&c, signal, evntRecId,
- UtilPrepareReq::Insert);
-}
-
-void
-Dbdict::createEventComplete_RT_USER_CREATE(Signal* signal,
- OpCreateEventPtr evntRecPtr){
- jam();
- createEvent_sendReply(signal, evntRecPtr);
-}
-
-/*********************************************************************
- *
- * UTIL_PREPARE, UTIL_EXECUTE
- *
- * insert or read systable NDB$EVENTS_0
- */
-
-void interpretUtilPrepareErrorCode(UtilPrepareRef::ErrorCode errorCode,
- bool& temporary, Uint32& line)
-{
- switch (errorCode) {
- case UtilPrepareRef::NO_ERROR:
- jam();
- line = __LINE__;
- EVENT_TRACE;
- break;
- case UtilPrepareRef::PREPARE_SEIZE_ERROR:
- jam();
- temporary = true;
- line = __LINE__;
- EVENT_TRACE;
- break;
- case UtilPrepareRef::PREPARE_PAGES_SEIZE_ERROR:
- jam();
- line = __LINE__;
- EVENT_TRACE;
- break;
- case UtilPrepareRef::PREPARED_OPERATION_SEIZE_ERROR:
- jam();
- line = __LINE__;
- EVENT_TRACE;
- break;
- case UtilPrepareRef::DICT_TAB_INFO_ERROR:
- jam();
- line = __LINE__;
- EVENT_TRACE;
- break;
- case UtilPrepareRef::MISSING_PROPERTIES_SECTION:
- jam();
- line = __LINE__;
- EVENT_TRACE;
- break;
- default:
- jam();
- line = __LINE__;
- EVENT_TRACE;
- break;
- }
-}
-
-void
-Dbdict::createEventUTIL_PREPARE(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode)
-{
- jam();
- EVENT_TRACE;
- if (returnCode == 0) {
- UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr();
- OpCreateEventPtr evntRecPtr;
- jam();
- evntRecPtr.i = req->getSenderData();
- const Uint32 prepareId = req->getPrepareId();
-
- ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
-
- Callback c = { safe_cast(&Dbdict::createEventUTIL_EXECUTE), 0 };
-
- switch (evntRecPtr.p->m_requestType) {
- case CreateEvntReq::RT_USER_GET:
-#ifdef EVENT_DEBUG
- printf("get type = %d\n", CreateEvntReq::RT_USER_GET);
-#endif
- jam();
- executeTransEventSysTable(&c, signal,
- evntRecPtr.i, evntRecPtr.p->m_eventRec,
- prepareId, UtilPrepareReq::Read);
- break;
- case CreateEvntReq::RT_USER_CREATE:
-#ifdef EVENT_DEBUG
- printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE);
-#endif
- {
- evntRecPtr.p->m_eventRec.EVENT_TYPE = evntRecPtr.p->m_request.getEventType();
- AttributeMask m = evntRecPtr.p->m_request.getAttrListBitmask();
- memcpy(evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK, &m,
- sizeof(evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK));
- evntRecPtr.p->m_eventRec.SUBID = evntRecPtr.p->m_request.getEventId();
- evntRecPtr.p->m_eventRec.SUBKEY = evntRecPtr.p->m_request.getEventKey();
- }
- jam();
- executeTransEventSysTable(&c, signal,
- evntRecPtr.i, evntRecPtr.p->m_eventRec,
- prepareId, UtilPrepareReq::Insert);
- break;
- default:
-#ifdef EVENT_DEBUG
- printf("type = %d\n", evntRecPtr.p->m_requestType);
- printf("bet type = %d\n", CreateEvntReq::RT_USER_GET);
- printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE);
-#endif
- ndbrequire(false);
- }
- } else { // returnCode != 0
- UtilPrepareRef* const ref = (UtilPrepareRef*)signal->getDataPtr();
-
- const UtilPrepareRef::ErrorCode errorCode =
- (UtilPrepareRef::ErrorCode)ref->getErrorCode();
-
- OpCreateEventPtr evntRecPtr;
- evntRecPtr.i = ref->getSenderData();
- ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
-
- bool temporary = false;
- interpretUtilPrepareErrorCode(errorCode,
- temporary, evntRecPtr.p->m_errorLine);
- if (temporary) {
- evntRecPtr.p->m_errorCode =
- CreateEvntRef::makeTemporary(CreateEvntRef::Undefined);
- }
-
- if (evntRecPtr.p->m_errorCode == 0) {
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- }
- evntRecPtr.p->m_errorNode = reference();
-
- createEvent_sendReply(signal, evntRecPtr);
- }
-}
-
-void Dbdict::executeTransEventSysTable(Callback *pcallback, Signal *signal,
- const Uint32 ptrI,
- sysTab_NDBEVENTS_0& m_eventRec,
- const Uint32 prepareId,
- UtilPrepareReq::OperationTypeValue prepReq)
-{
- jam();
- const Uint32 noAttr = EVENT_SYSTEM_TABLE_LENGTH;
- Uint32 total_len = 0;
-
- Uint32* attrHdr = signal->theData + 25;
- Uint32* attrPtr = attrHdr;
-
- Uint32 id=0;
- // attribute 0 event name: Primary Key
- {
- AttributeHeader::init(attrPtr, id, sysTab_NDBEVENTS_0_szs[id]/4);
- total_len += sysTab_NDBEVENTS_0_szs[id];
- attrPtr++; id++;
- }
-
- switch (prepReq) {
- case UtilPrepareReq::Read:
- jam();
- EVENT_TRACE;
- // no more
- while ( id < noAttr )
- AttributeHeader::init(attrPtr++, id++, 0);
- ndbrequire(id == (Uint32) noAttr);
- break;
- case UtilPrepareReq::Insert:
- jam();
- EVENT_TRACE;
- while ( id < noAttr ) {
- AttributeHeader::init(attrPtr, id, sysTab_NDBEVENTS_0_szs[id]/4);
- total_len += sysTab_NDBEVENTS_0_szs[id];
- attrPtr++; id++;
- }
- ndbrequire(id == (Uint32) noAttr);
- break;
- case UtilPrepareReq::Delete:
- ndbrequire(id == 1);
- break;
- default:
- ndbrequire(false);
- }
-
- LinearSectionPtr headerPtr;
- LinearSectionPtr dataPtr;
-
- headerPtr.p = attrHdr;
- headerPtr.sz = noAttr;
-
- dataPtr.p = (Uint32*)&m_eventRec;
- dataPtr.sz = total_len/4;
-
- ndbrequire((total_len == sysTab_NDBEVENTS_0_szs[0]) ||
- (total_len == sizeof(sysTab_NDBEVENTS_0)));
-
-#if 0
- printf("Header size %u\n", headerPtr.sz);
- for(int i = 0; i < (int)headerPtr.sz; i++)
- printf("H'%.8x ", attrHdr[i]);
- printf("\n");
-
- printf("Data size %u\n", dataPtr.sz);
- for(int i = 0; i < (int)dataPtr.sz; i++)
- printf("H'%.8x ", dataPage[i]);
- printf("\n");
-#endif
-
- executeTransaction(pcallback, signal,
- ptrI,
- prepareId,
- id,
- headerPtr,
- dataPtr);
-}
-
-void Dbdict::executeTransaction(Callback *pcallback,
- Signal* signal,
- Uint32 senderData,
- Uint32 prepareId,
- Uint32 noAttr,
- LinearSectionPtr headerPtr,
- LinearSectionPtr dataPtr)
-{
- jam();
- EVENT_TRACE;
-
- UtilExecuteReq * utilExecuteReq =
- (UtilExecuteReq *)signal->getDataPtrSend();
-
- utilExecuteReq->setSenderRef(reference());
- utilExecuteReq->setSenderData(senderData);
- utilExecuteReq->setPrepareId(prepareId);
- utilExecuteReq->setReleaseFlag(); // must be done after setting prepareId
-
-#if 0
- printf("Header size %u\n", headerPtr.sz);
- for(int i = 0; i < (int)headerPtr.sz; i++)
- printf("H'%.8x ", headerBuffer[i]);
- printf("\n");
-
- printf("Data size %u\n", dataPtr.sz);
- for(int i = 0; i < (int)dataPtr.sz; i++)
- printf("H'%.8x ", dataBuffer[i]);
- printf("\n");
-#endif
-
- struct LinearSectionPtr sectionsPtr[UtilExecuteReq::NoOfSections];
- sectionsPtr[UtilExecuteReq::HEADER_SECTION].p = headerPtr.p;
- sectionsPtr[UtilExecuteReq::HEADER_SECTION].sz = noAttr;
- sectionsPtr[UtilExecuteReq::DATA_SECTION].p = dataPtr.p;
- sectionsPtr[UtilExecuteReq::DATA_SECTION].sz = dataPtr.sz;
-
- sendSignalUtilReq(pcallback, DBUTIL_REF, GSN_UTIL_EXECUTE_REQ, signal,
- UtilExecuteReq::SignalLength, JBB,
- sectionsPtr, UtilExecuteReq::NoOfSections);
-}
-
-void Dbdict::parseReadEventSys(Signal* signal, sysTab_NDBEVENTS_0& m_eventRec)
-{
- SegmentedSectionPtr headerPtr, dataPtr;
- jam();
- signal->getSection(headerPtr, UtilExecuteReq::HEADER_SECTION);
- SectionReader headerReader(headerPtr, getSectionSegmentPool());
-
- signal->getSection(dataPtr, UtilExecuteReq::DATA_SECTION);
- SectionReader dataReader(dataPtr, getSectionSegmentPool());
-
- AttributeHeader header;
- Uint32 *dst = (Uint32*)&m_eventRec;
-
- for (int i = 0; i < EVENT_SYSTEM_TABLE_LENGTH; i++) {
- headerReader.getWord((Uint32 *)&header);
- int sz = header.getDataSize();
- for (int i=0; i < sz; i++)
- dataReader.getWord(dst++);
- }
-
- ndbrequire( ((char*)dst-(char*)&m_eventRec) == sizeof(m_eventRec) );
-
- releaseSections(signal);
-}
-
-void Dbdict::createEventUTIL_EXECUTE(Signal *signal,
- Uint32 callbackData,
- Uint32 returnCode)
-{
- jam();
- EVENT_TRACE;
- if (returnCode == 0) {
- // Entry into system table all set
- UtilExecuteConf* const conf = (UtilExecuteConf*)signal->getDataPtr();
- jam();
- OpCreateEventPtr evntRecPtr;
- evntRecPtr.i = conf->getSenderData();
-
- ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
- OpCreateEvent *evntRec = evntRecPtr.p;
-
- switch (evntRec->m_requestType) {
- case CreateEvntReq::RT_USER_GET: {
-#ifdef EVENT_DEBUG
- printf("get type = %d\n", CreateEvntReq::RT_USER_GET);
-#endif
- parseReadEventSys(signal, evntRecPtr.p->m_eventRec);
-
- evntRec->m_request.setEventType(evntRecPtr.p->m_eventRec.EVENT_TYPE);
- evntRec->m_request.setAttrListBitmask(*(AttributeMask*)evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK);
- evntRec->m_request.setEventId(evntRecPtr.p->m_eventRec.SUBID);
- evntRec->m_request.setEventKey(evntRecPtr.p->m_eventRec.SUBKEY);
-
-#ifdef EVENT_DEBUG
- printf("EventName: %s\n", evntRec->m_eventRec.NAME);
- printf("TableName: %s\n", evntRec->m_eventRec.TABLE_NAME);
-#endif
-
- // find table id for event table
- TableRecord keyRecord;
- strcpy(keyRecord.tableName, evntRecPtr.p->m_eventRec.TABLE_NAME);
-
- TableRecordPtr tablePtr;
- c_tableRecordHash.find(tablePtr, keyRecord);
-
- if (tablePtr.i == RNIL) {
- jam();
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
- evntRecPtr.p->m_errorNode = reference();
-
- createEvent_sendReply(signal, evntRecPtr);
- return;
- }
-
- evntRec->m_request.setTableId(tablePtr.p->tableId);
-
- createEventComplete_RT_USER_GET(signal, evntRecPtr);
- return;
- }
- case CreateEvntReq::RT_USER_CREATE: {
-#ifdef EVENT_DEBUG
- printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE);
-#endif
- jam();
- createEventComplete_RT_USER_CREATE(signal, evntRecPtr);
- return;
- }
- break;
- default:
- ndbrequire(false);
- }
- } else { // returnCode != 0
- UtilExecuteRef * const ref = (UtilExecuteRef *)signal->getDataPtr();
- OpCreateEventPtr evntRecPtr;
- evntRecPtr.i = ref->getSenderData();
- ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
- jam();
- evntRecPtr.p->m_errorNode = reference();
- evntRecPtr.p->m_errorLine = __LINE__;
-
- switch (ref->getErrorCode()) {
- case UtilExecuteRef::TCError:
- switch (ref->getTCErrorCode()) {
- case ZNOT_FOUND:
- jam();
- evntRecPtr.p->m_errorCode = CreateEvntRef::EventNotFound;
- break;
- case ZALREADYEXIST:
- jam();
- evntRecPtr.p->m_errorCode = CreateEvntRef::EventNameExists;
- break;
- default:
- jam();
- evntRecPtr.p->m_errorCode = CreateEvntRef::UndefinedTCError;
- break;
- }
- break;
- default:
- jam();
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- break;
- }
-
- createEvent_sendReply(signal, evntRecPtr);
- }
-}
-
-/***********************************************************************
- *
- * NdbEventOperation, reading systable, creating event in suma
- *
- */
-
-void
-Dbdict::createEvent_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr){
- jam();
- EVENT_TRACE;
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_REQ::RT_USER_GET evntRecPtr.i = (%d), ref = %u", evntRecPtr.i, evntRecPtr.p->m_request.getUserRef());
-#endif
-
- SegmentedSectionPtr ssPtr;
-
- signal->getSection(ssPtr, 0);
-
- SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
-#ifdef EVENT_DEBUG
- r0.printAll(ndbout);
-#endif
- if ((!r0.first()) ||
- (r0.getValueType() != SimpleProperties::StringValue) ||
- (r0.getValueLen() <= 0)) {
- jam();
- releaseSections(signal);
-
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
- evntRecPtr.p->m_errorNode = reference();
-
- createEvent_sendReply(signal, evntRecPtr);
- return;
- }
-
- r0.getString(evntRecPtr.p->m_eventRec.NAME);
- int len = strlen(evntRecPtr.p->m_eventRec.NAME);
- memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
-
- releaseSections(signal);
-
- Callback c = { safe_cast(&Dbdict::createEventUTIL_PREPARE), 0 };
-
- prepareTransactionEventSysTable(&c, signal, evntRecPtr.i,
- UtilPrepareReq::Read);
- /*
- * Will read systable and fill an OpCreateEventPtr
- * and return below
- */
-}
-
-void
-Dbdict::createEventComplete_RT_USER_GET(Signal* signal,
- OpCreateEventPtr evntRecPtr){
- jam();
-
- // Send to oneself and the other DICT's
- CreateEvntReq * req = (CreateEvntReq *)signal->getDataPtrSend();
-
- *req = evntRecPtr.p->m_request;
- req->senderRef = reference();
- req->senderData = evntRecPtr.i;
-
- req->addRequestFlag(CreateEvntReq::RT_DICT_AFTER_GET);
-
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("DBDICT(Coordinator) sending GSN_CREATE_EVNT_REQ::RT_DICT_AFTER_GET to DBDICT participants evntRecPtr.i = (%d)", evntRecPtr.i);
-#endif
-
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- RequestTracker & p = evntRecPtr.p->m_reqTracker;
- p.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF, evntRecPtr.i);
-
- sendSignal(rg, GSN_CREATE_EVNT_REQ, signal, CreateEvntReq::SignalLength, JBB);
-}
-
-void
-Dbdict::createEvent_nodeFailCallback(Signal* signal, Uint32 eventRecPtrI,
- Uint32 returnCode){
- OpCreateEventPtr evntRecPtr;
- c_opCreateEvent.getPtr(evntRecPtr, eventRecPtrI);
- createEvent_sendReply(signal, evntRecPtr);
-}
-
-void Dbdict::execCREATE_EVNT_REF(Signal* signal)
-{
- jamEntry();
- EVENT_TRACE;
- CreateEvntRef * const ref = (CreateEvntRef *)signal->getDataPtr();
- OpCreateEventPtr evntRecPtr;
-
- evntRecPtr.i = ref->getUserData();
-
- ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
-
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_REF evntRecPtr.i = (%d)", evntRecPtr.i);
-#endif
-
- if (ref->errorCode == CreateEvntRef::NF_FakeErrorREF){
- jam();
- evntRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(ref->senderRef));
- } else {
- jam();
- evntRecPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(ref->senderRef));
- }
- createEvent_sendReply(signal, evntRecPtr);
-
- return;
-}
-
-void Dbdict::execCREATE_EVNT_CONF(Signal* signal)
-{
- jamEntry();
- EVENT_TRACE;
- CreateEvntConf * const conf = (CreateEvntConf *)signal->getDataPtr();
- OpCreateEventPtr evntRecPtr;
-
- evntRecPtr.i = conf->getUserData();
-
- ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
-
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_CONF evntRecPtr.i = (%d)", evntRecPtr.i);
-#endif
-
- evntRecPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(conf->senderRef));
-
- // we will only have a valid tablename if it the master DICT sending this
- // but that's ok
- LinearSectionPtr ptr[1];
- ptr[0].p = (Uint32 *)evntRecPtr.p->m_eventRec.TABLE_NAME;
- ptr[0].sz =
- (strlen(evntRecPtr.p->m_eventRec.TABLE_NAME)+4)/4; // to make sure we have a null
-
- createEvent_sendReply(signal, evntRecPtr, ptr, 1);
-
- return;
-}
-
-/************************************************
- *
- * Participant stuff
- *
- */
-
-void
-Dbdict::createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPtr){
- jam();
- evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef());
-
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("DBDICT(Participant) got CREATE_EVNT_REQ::RT_DICT_AFTER_GET evntRecPtr.i = (%d)", evntRecPtr.i);
-#endif
-
- // the signal comes from the DICT block that got the first user request!
- // This code runs on all DICT nodes, including oneself
-
- // Seize a Create Event record, the Coordinator will now have two seized
- // but that's ok, it's like a recursion
-
- SubCreateReq * sumaReq = (SubCreateReq *)signal->getDataPtrSend();
-
- sumaReq->subscriberRef = reference(); // reference to DICT
- sumaReq->subscriberData = evntRecPtr.i;
- sumaReq->subscriptionId = evntRecPtr.p->m_request.getEventId();
- sumaReq->subscriptionKey = evntRecPtr.p->m_request.getEventKey();
- sumaReq->subscriptionType = SubCreateReq::TableEvent;
- sumaReq->tableId = evntRecPtr.p->m_request.getTableId();
-
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("sending GSN_SUB_CREATE_REQ");
-#endif
-
- sendSignal(SUMA_REF, GSN_SUB_CREATE_REQ, signal,
- SubCreateReq::SignalLength+1 /*to get table Id*/, JBB);
-}
-
-void Dbdict::execSUB_CREATE_REF(Signal* signal)
-{
- jamEntry();
- EVENT_TRACE;
- SubCreateRef * const ref = (SubCreateRef *)signal->getDataPtr();
- OpCreateEventPtr evntRecPtr;
-
- evntRecPtr.i = ref->subscriberData;
- ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
-
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("DBDICT(Participant) got SUB_CREATE_REF evntRecPtr.i = (%d)", evntRecPtr.i);
-#endif
-
- if (ref->err == GrepError::SUBSCRIPTION_ID_NOT_UNIQUE) {
- jam();
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("SUBSCRIPTION_ID_NOT_UNIQUE");
-#endif
- createEvent_sendReply(signal, evntRecPtr);
- return;
- }
-
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("Other error");
-#endif
-
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
- evntRecPtr.p->m_errorNode = reference();
-
- createEvent_sendReply(signal, evntRecPtr);
-}
-
-void Dbdict::execSUB_CREATE_CONF(Signal* signal)
-{
- jamEntry();
- EVENT_TRACE;
-
- SubCreateConf * const sumaConf = (SubCreateConf *)signal->getDataPtr();
-
- const Uint32 subscriptionId = sumaConf->subscriptionId;
- const Uint32 subscriptionKey = sumaConf->subscriptionKey;
- const Uint32 evntRecId = sumaConf->subscriberData;
-
- OpCreateEvent *evntRec;
- ndbrequire((evntRec = c_opCreateEvent.getPtr(evntRecId)) != NULL);
-
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("DBDICT(Participant) got SUB_CREATE_CONF evntRecPtr.i = (%d)", evntRecId);
-#endif
-
- SubSyncReq *sumaSync = (SubSyncReq *)signal->getDataPtrSend();
-
- sumaSync->subscriptionId = subscriptionId;
- sumaSync->subscriptionKey = subscriptionKey;
- sumaSync->part = (Uint32) SubscriptionData::MetaData;
- sumaSync->subscriberData = evntRecId;
-
- sendSignal(SUMA_REF, GSN_SUB_SYNC_REQ, signal,
- SubSyncReq::SignalLength, JBB);
-}
-
-void Dbdict::execSUB_SYNC_REF(Signal* signal)
-{
- jamEntry();
- EVENT_TRACE;
- SubSyncRef * const ref = (SubSyncRef *)signal->getDataPtr();
- OpCreateEventPtr evntRecPtr;
-
- evntRecPtr.i = ref->subscriberData;
- ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
-
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
- evntRecPtr.p->m_errorNode = reference();
-
- createEvent_sendReply(signal, evntRecPtr);
-}
-
-void Dbdict::execSUB_SYNC_CONF(Signal* signal)
-{
- jamEntry();
- EVENT_TRACE;
-
- SubSyncConf * const sumaSyncConf = (SubSyncConf *)signal->getDataPtr();
-
- // Uint32 subscriptionId = sumaSyncConf->subscriptionId;
- // Uint32 subscriptionKey = sumaSyncConf->subscriptionKey;
- OpCreateEventPtr evntRecPtr;
-
- evntRecPtr.i = sumaSyncConf->subscriberData;
- ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
-
- ndbrequire(sumaSyncConf->part == (Uint32)SubscriptionData::MetaData);
-
- createEvent_sendReply(signal, evntRecPtr);
-}
-
-/****************************************************
- *
- * common create reply method
- *
- *******************************************************/
-
-void Dbdict::createEvent_sendReply(Signal* signal,
- OpCreateEventPtr evntRecPtr,
- LinearSectionPtr *ptr, int noLSP)
-{
- jam();
- EVENT_TRACE;
-
- // check if we're ready to sent reply
- // if we are the master dict we might be waiting for conf/ref
-
- if (!evntRecPtr.p->m_reqTracker.done()) {
- jam();
- return; // there's more to come
- }
-
- if (evntRecPtr.p->m_reqTracker.hasRef()) {
- ptr = NULL; // we don't want to return anything if there's an error
- if (!evntRecPtr.p->hasError()) {
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
- evntRecPtr.p->m_errorNode = reference();
- jam();
- } else
- jam();
- }
-
- // reference to API if master DICT
- // else reference to master DICT
- Uint32 senderRef = evntRecPtr.p->m_request.getUserRef();
- Uint32 signalLength;
- Uint32 gsn;
-
- if (evntRecPtr.p->hasError()) {
- jam();
- EVENT_TRACE;
- CreateEvntRef * ret = (CreateEvntRef *)signal->getDataPtrSend();
-
- ret->setEventId(evntRecPtr.p->m_request.getEventId());
- ret->setEventKey(evntRecPtr.p->m_request.getEventKey());
- ret->setUserData(evntRecPtr.p->m_request.getUserData());
- ret->senderRef = reference();
- ret->setTableId(evntRecPtr.p->m_request.getTableId());
- ret->setEventType(evntRecPtr.p->m_request.getEventType());
- ret->setRequestType(evntRecPtr.p->m_request.getRequestType());
-
- ret->setErrorCode(evntRecPtr.p->m_errorCode);
- ret->setErrorLine(evntRecPtr.p->m_errorLine);
- ret->setErrorNode(evntRecPtr.p->m_errorNode);
-
- signalLength = CreateEvntRef::SignalLength;
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("DBDICT sending GSN_CREATE_EVNT_REF to evntRecPtr.i = (%d) node = %u ref = %u", evntRecPtr.i, refToNode(senderRef), senderRef);
- ndbout_c("errorCode = %u", evntRecPtr.p->m_errorCode);
- ndbout_c("errorLine = %u", evntRecPtr.p->m_errorLine);
-#endif
- gsn = GSN_CREATE_EVNT_REF;
-
- } else {
- jam();
- EVENT_TRACE;
- CreateEvntConf * evntConf = (CreateEvntConf *)signal->getDataPtrSend();
-
- evntConf->setEventId(evntRecPtr.p->m_request.getEventId());
- evntConf->setEventKey(evntRecPtr.p->m_request.getEventKey());
- evntConf->setUserData(evntRecPtr.p->m_request.getUserData());
- evntConf->senderRef = reference();
- evntConf->setTableId(evntRecPtr.p->m_request.getTableId());
- evntConf->setAttrListBitmask(evntRecPtr.p->m_request.getAttrListBitmask());
- evntConf->setEventType(evntRecPtr.p->m_request.getEventType());
- evntConf->setRequestType(evntRecPtr.p->m_request.getRequestType());
-
- signalLength = CreateEvntConf::SignalLength;
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("DBDICT sending GSN_CREATE_EVNT_CONF to evntRecPtr.i = (%d) node = %u ref = %u", evntRecPtr.i, refToNode(senderRef), senderRef);
-#endif
- gsn = GSN_CREATE_EVNT_CONF;
- }
-
- if (ptr) {
- jam();
- sendSignal(senderRef, gsn, signal, signalLength, JBB, ptr, noLSP);
- } else {
- jam();
- sendSignal(senderRef, gsn, signal, signalLength, JBB);
- }
-
- c_opCreateEvent.release(evntRecPtr);
-}
-
-/*************************************************************/
-
-/********************************************************************
- *
- * Start event
- *
- *******************************************************************/
-
-void Dbdict::execSUB_START_REQ(Signal* signal)
-{
- jamEntry();
-
- Uint32 origSenderRef = signal->senderBlockRef();
-
- OpSubEventPtr subbPtr;
- if (!c_opSubEvent.seize(subbPtr)) {
- SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend();
- { // fix
- Uint32 subcriberRef = ((SubStartReq*)signal->getDataPtr())->subscriberRef;
- ref->subscriberRef = subcriberRef;
- }
- jam();
- // ret->setErrorCode(SubStartRef::SeizeError);
- // ret->setErrorLine(__LINE__);
- // ret->setErrorNode(reference());
- ref->senderRef = reference();
- ref->setTemporary(SubStartRef::Busy);
-
- sendSignal(origSenderRef, GSN_SUB_START_REF, signal,
- SubStartRef::SignalLength2, JBB);
- return;
- }
-
- {
- const SubStartReq* req = (SubStartReq*) signal->getDataPtr();
- subbPtr.p->m_senderRef = req->senderRef;
- subbPtr.p->m_senderData = req->senderData;
- subbPtr.p->m_errorCode = 0;
- }
-
- if (refToBlock(origSenderRef) != DBDICT) {
- /*
- * Coordinator
- */
- jam();
-
- subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- RequestTracker & p = subbPtr.p->m_reqTracker;
- p.init<SubStartRef>(c_counterMgr, rg, GSN_SUB_START_REF, subbPtr.i);
-
- SubStartReq* req = (SubStartReq*) signal->getDataPtrSend();
-
- req->senderRef = reference();
- req->senderData = subbPtr.i;
-
-#ifdef EVENT_PH3_DEBUG
- ndbout_c("DBDICT(Coordinator) sending GSN_SUB_START_REQ to DBDICT participants subbPtr.i = (%d)", subbPtr.i);
-#endif
-
- sendSignal(rg, GSN_SUB_START_REQ, signal, SubStartReq::SignalLength2, JBB);
- return;
- }
- /*
- * Participant
- */
- ndbrequire(refToBlock(origSenderRef) == DBDICT);
-
- {
- SubStartReq* req = (SubStartReq*) signal->getDataPtrSend();
-
- req->senderRef = reference();
- req->senderData = subbPtr.i;
-
-#ifdef EVENT_PH3_DEBUG
- ndbout_c("DBDICT(Participant) sending GSN_SUB_START_REQ to SUMA subbPtr.i = (%d)", subbPtr.i);
-#endif
- sendSignal(SUMA_REF, GSN_SUB_START_REQ, signal, SubStartReq::SignalLength2, JBB);
- }
-}
-
-void Dbdict::execSUB_START_REF(Signal* signal)
-{
- jamEntry();
-
- const SubStartRef* ref = (SubStartRef*) signal->getDataPtr();
- Uint32 senderRef = ref->senderRef;
-
- OpSubEventPtr subbPtr;
- c_opSubEvent.getPtr(subbPtr, ref->senderData);
-
- if (refToBlock(senderRef) == SUMA) {
- /*
- * Participant
- */
- jam();
-
-#ifdef EVENT_PH3_DEBUG
- ndbout_c("DBDICT(Participant) got GSN_SUB_START_REF = (%d)", subbPtr.i);
-#endif
-
- if (ref->isTemporary()){
- jam();
- SubStartReq* req = (SubStartReq*)signal->getDataPtrSend();
- { // fix
- Uint32 subscriberRef = ref->subscriberRef;
- req->subscriberRef = subscriberRef;
- }
- req->senderRef = reference();
- req->senderData = subbPtr.i;
- sendSignal(SUMA_REF, GSN_SUB_START_REQ,
- signal, SubStartReq::SignalLength2, JBB);
- } else {
- jam();
-
- SubStartRef* ref = (SubStartRef*) signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = subbPtr.p->m_senderData;
- sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF,
- signal, SubStartRef::SignalLength2, JBB);
- c_opSubEvent.release(subbPtr);
- }
- return;
- }
- /*
- * Coordinator
- */
- ndbrequire(refToBlock(senderRef) == DBDICT);
-#ifdef EVENT_PH3_DEBUG
- ndbout_c("DBDICT(Coordinator) got GSN_SUB_START_REF = (%d)", subbPtr.i);
-#endif
- if (ref->errorCode == SubStartRef::NF_FakeErrorREF){
- jam();
- subbPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef));
- } else {
- jam();
- subbPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef));
- }
- completeSubStartReq(signal,subbPtr.i,0);
-}
-
-void Dbdict::execSUB_START_CONF(Signal* signal)
-{
- jamEntry();
-
- const SubStartConf* conf = (SubStartConf*) signal->getDataPtr();
- Uint32 senderRef = conf->senderRef;
-
- OpSubEventPtr subbPtr;
- c_opSubEvent.getPtr(subbPtr, conf->senderData);
-
- if (refToBlock(senderRef) == SUMA) {
- /*
- * Participant
- */
- jam();
- SubStartConf* conf = (SubStartConf*) signal->getDataPtrSend();
-
-#ifdef EVENT_PH3_DEBUG
- ndbout_c("DBDICT(Participant) got GSN_SUB_START_CONF = (%d)", subbPtr.i);
-#endif
-
- conf->senderRef = reference();
- conf->senderData = subbPtr.p->m_senderData;
-
- sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_CONF,
- signal, SubStartConf::SignalLength2, JBB);
- c_opSubEvent.release(subbPtr);
- return;
- }
- /*
- * Coordinator
- */
- ndbrequire(refToBlock(senderRef) == DBDICT);
-#ifdef EVENT_PH3_DEBUG
- ndbout_c("DBDICT(Coordinator) got GSN_SUB_START_CONF = (%d)", subbPtr.i);
-#endif
- subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef));
- completeSubStartReq(signal,subbPtr.i,0);
-}
-
-/*
- * Coordinator
- */
-void Dbdict::completeSubStartReq(Signal* signal,
- Uint32 ptrI,
- Uint32 returnCode){
- jam();
-
- OpSubEventPtr subbPtr;
- c_opSubEvent.getPtr(subbPtr, ptrI);
-
- if (!subbPtr.p->m_reqTracker.done()){
- jam();
- return;
- }
-
- if (subbPtr.p->m_reqTracker.hasRef()) {
- jam();
-#ifdef EVENT_DEBUG
- ndbout_c("SUB_START_REF");
-#endif
- sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF,
- signal, SubStartRef::SignalLength, JBB);
- if (subbPtr.p->m_reqTracker.hasConf()) {
- // stopStartedNodes(signal);
- }
- c_opSubEvent.release(subbPtr);
- return;
- }
-#ifdef EVENT_DEBUG
- ndbout_c("SUB_START_CONF");
-#endif
- sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_CONF,
- signal, SubStartConf::SignalLength, JBB);
- c_opSubEvent.release(subbPtr);
-}
-
-/********************************************************************
- *
- * Stop event
- *
- *******************************************************************/
-
-void Dbdict::execSUB_STOP_REQ(Signal* signal)
-{
- jamEntry();
-
- Uint32 origSenderRef = signal->senderBlockRef();
-
- OpSubEventPtr subbPtr;
- if (!c_opSubEvent.seize(subbPtr)) {
- SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend();
- jam();
- // ret->setErrorCode(SubStartRef::SeizeError);
- // ret->setErrorLine(__LINE__);
- // ret->setErrorNode(reference());
- ref->senderRef = reference();
- ref->setTemporary(SubStopRef::Busy);
-
- sendSignal(origSenderRef, GSN_SUB_STOP_REF, signal,
- SubStopRef::SignalLength, JBB);
- return;
- }
-
- {
- const SubStopReq* req = (SubStopReq*) signal->getDataPtr();
- subbPtr.p->m_senderRef = req->senderRef;
- subbPtr.p->m_senderData = req->senderData;
- subbPtr.p->m_errorCode = 0;
- }
-
- if (refToBlock(origSenderRef) != DBDICT) {
- /*
- * Coordinator
- */
- jam();
-#ifdef EVENT_DEBUG
- ndbout_c("SUB_STOP_REQ 1");
-#endif
- subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- RequestTracker & p = subbPtr.p->m_reqTracker;
- p.init<SubStopRef>(c_counterMgr, rg, GSN_SUB_STOP_REF, subbPtr.i);
-
- SubStopReq* req = (SubStopReq*) signal->getDataPtrSend();
-
- req->senderRef = reference();
- req->senderData = subbPtr.i;
-
- sendSignal(rg, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
- return;
- }
- /*
- * Participant
- */
-#ifdef EVENT_DEBUG
- ndbout_c("SUB_STOP_REQ 2");
-#endif
- ndbrequire(refToBlock(origSenderRef) == DBDICT);
- {
- SubStopReq* req = (SubStopReq*) signal->getDataPtrSend();
-
- req->senderRef = reference();
- req->senderData = subbPtr.i;
-
- sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
- }
-}
-
-void Dbdict::execSUB_STOP_REF(Signal* signal)
-{
- jamEntry();
- const SubStopRef* ref = (SubStopRef*) signal->getDataPtr();
- Uint32 senderRef = ref->senderRef;
-
- OpSubEventPtr subbPtr;
- c_opSubEvent.getPtr(subbPtr, ref->senderData);
-
- if (refToBlock(senderRef) == SUMA) {
- /*
- * Participant
- */
- jam();
- if (ref->isTemporary()){
- jam();
- SubStopReq* req = (SubStopReq*)signal->getDataPtrSend();
- req->senderRef = reference();
- req->senderData = subbPtr.i;
- sendSignal(SUMA_REF, GSN_SUB_STOP_REQ,
- signal, SubStopReq::SignalLength, JBB);
- } else {
- jam();
- SubStopRef* ref = (SubStopRef*) signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = subbPtr.p->m_senderData;
- sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF,
- signal, SubStopRef::SignalLength, JBB);
- c_opSubEvent.release(subbPtr);
- }
- return;
- }
- /*
- * Coordinator
- */
- ndbrequire(refToBlock(senderRef) == DBDICT);
- if (ref->errorCode == SubStopRef::NF_FakeErrorREF){
- jam();
- subbPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef));
- } else {
- jam();
- subbPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef));
- }
- completeSubStopReq(signal,subbPtr.i,0);
-}
-
-void Dbdict::execSUB_STOP_CONF(Signal* signal)
-{
- jamEntry();
-
- const SubStopConf* conf = (SubStopConf*) signal->getDataPtr();
- Uint32 senderRef = conf->senderRef;
-
- OpSubEventPtr subbPtr;
- c_opSubEvent.getPtr(subbPtr, conf->senderData);
-
- if (refToBlock(senderRef) == SUMA) {
- /*
- * Participant
- */
- jam();
- SubStopConf* conf = (SubStopConf*) signal->getDataPtrSend();
-
- conf->senderRef = reference();
- conf->senderData = subbPtr.p->m_senderData;
-
- sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_CONF,
- signal, SubStopConf::SignalLength, JBB);
- c_opSubEvent.release(subbPtr);
- return;
- }
- /*
- * Coordinator
- */
- ndbrequire(refToBlock(senderRef) == DBDICT);
- subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef));
- completeSubStopReq(signal,subbPtr.i,0);
-}
-
-/*
- * Coordinator
- */
-void Dbdict::completeSubStopReq(Signal* signal,
- Uint32 ptrI,
- Uint32 returnCode){
- OpSubEventPtr subbPtr;
- c_opSubEvent.getPtr(subbPtr, ptrI);
-
- if (!subbPtr.p->m_reqTracker.done()){
- jam();
- return;
- }
-
- if (subbPtr.p->m_reqTracker.hasRef()) {
- jam();
-#ifdef EVENT_DEBUG
- ndbout_c("SUB_STOP_REF");
-#endif
- SubStopRef* ref = (SubStopRef*)signal->getDataPtrSend();
-
- ref->senderRef = reference();
- ref->senderData = subbPtr.p->m_senderData;
- /*
- ref->subscriptionId = subbPtr.p->m_senderData;
- ref->subscriptionKey = subbPtr.p->m_senderData;
- ref->part = subbPtr.p->m_part; // SubscriptionData::Part
- ref->subscriberData = subbPtr.p->m_subscriberData;
- ref->subscriberRef = subbPtr.p->m_subscriberRef;
- */
- ref->errorCode = subbPtr.p->m_errorCode;
-
-
- sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF,
- signal, SubStopRef::SignalLength, JBB);
- if (subbPtr.p->m_reqTracker.hasConf()) {
- // stopStartedNodes(signal);
- }
- c_opSubEvent.release(subbPtr);
- return;
- }
-#ifdef EVENT_DEBUG
- ndbout_c("SUB_STOP_CONF");
-#endif
- sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_CONF,
- signal, SubStopConf::SignalLength, JBB);
- c_opSubEvent.release(subbPtr);
-}
-
-/***************************************************************
- * MODULE: Drop event.
- *
- * Drop event.
- *
- * TODO
- */
-
-void
-Dbdict::execDROP_EVNT_REQ(Signal* signal)
-{
- jamEntry();
- EVENT_TRACE;
-
- DropEvntReq *req = (DropEvntReq*)signal->getDataPtr();
- const Uint32 senderRef = signal->senderBlockRef();
- OpDropEventPtr evntRecPtr;
-
- // Seize a Create Event record
- if (!c_opDropEvent.seize(evntRecPtr)) {
- // Failed to allocate event record
- jam();
- releaseSections(signal);
-
- DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend();
- ret->setErrorCode(DropEvntRef::SeizeError);
- ret->setErrorLine(__LINE__);
- ret->setErrorNode(reference());
- sendSignal(senderRef, GSN_DROP_EVNT_REF, signal,
- DropEvntRef::SignalLength, JBB);
- return;
- }
-
-#ifdef EVENT_DEBUG
- ndbout_c("DBDICT::execDROP_EVNT_REQ evntRecId = (%d)", evntRecPtr.i);
-#endif
-
- OpDropEvent* evntRec = evntRecPtr.p;
- evntRec->init(req);
-
- SegmentedSectionPtr ssPtr;
-
- signal->getSection(ssPtr, 0);
-
- SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
-#ifdef EVENT_DEBUG
- r0.printAll(ndbout);
-#endif
- // event name
- if ((!r0.first()) ||
- (r0.getValueType() != SimpleProperties::StringValue) ||
- (r0.getValueLen() <= 0)) {
- jam();
- releaseSections(signal);
-
- evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
- evntRecPtr.p->m_errorNode = reference();
-
- dropEvent_sendReply(signal, evntRecPtr);
- return;
- }
- r0.getString(evntRecPtr.p->m_eventRec.NAME);
- {
- int len = strlen(evntRecPtr.p->m_eventRec.NAME);
- memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
-#ifdef EVENT_DEBUG
- printf("DropEvntReq; EventName %s, len %u\n",
- evntRecPtr.p->m_eventRec.NAME, len);
- for(int i = 0; i < MAX_TAB_NAME_SIZE/4; i++)
- printf("H'%.8x ", ((Uint32*)evntRecPtr.p->m_eventRec.NAME)[i]);
- printf("\n");
-#endif
- }
-
- releaseSections(signal);
-
- Callback c = { safe_cast(&Dbdict::dropEventUTIL_PREPARE_READ), 0 };
-
- prepareTransactionEventSysTable(&c, signal, evntRecPtr.i,
- UtilPrepareReq::Read);
-}
-
-void
-Dbdict::dropEventUTIL_PREPARE_READ(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode)
-{
- jam();
- EVENT_TRACE;
- if (returnCode != 0) {
- EVENT_TRACE;
- dropEventUtilPrepareRef(signal, callbackData, returnCode);
- return;
- }
-
- UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr();
- OpDropEventPtr evntRecPtr;
- evntRecPtr.i = req->getSenderData();
- const Uint32 prepareId = req->getPrepareId();
-
- ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
-
- Callback c = { safe_cast(&Dbdict::dropEventUTIL_EXECUTE_READ), 0 };
-
- executeTransEventSysTable(&c, signal,
- evntRecPtr.i, evntRecPtr.p->m_eventRec,
- prepareId, UtilPrepareReq::Read);
-}
-
-void
-Dbdict::dropEventUTIL_EXECUTE_READ(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode)
-{
- jam();
- EVENT_TRACE;
- if (returnCode != 0) {
- EVENT_TRACE;
- dropEventUtilExecuteRef(signal, callbackData, returnCode);
- return;
- }
-
- OpDropEventPtr evntRecPtr;
- UtilExecuteConf * const ref = (UtilExecuteConf *)signal->getDataPtr();
- jam();
- evntRecPtr.i = ref->getSenderData();
- ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
-
- parseReadEventSys(signal, evntRecPtr.p->m_eventRec);
-
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- RequestTracker & p = evntRecPtr.p->m_reqTracker;
- p.init<SubRemoveRef>(c_counterMgr, rg, GSN_SUB_REMOVE_REF,
- evntRecPtr.i);
-
- SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend();
-
- req->senderRef = reference();
- req->senderData = evntRecPtr.i;
- req->subscriptionId = evntRecPtr.p->m_eventRec.SUBID;
- req->subscriptionKey = evntRecPtr.p->m_eventRec.SUBKEY;
-
- sendSignal(rg, GSN_SUB_REMOVE_REQ, signal, SubRemoveReq::SignalLength, JBB);
-}
-
-/*
- * Participant
- */
-
-void
-Dbdict::execSUB_REMOVE_REQ(Signal* signal)
-{
- jamEntry();
-
- Uint32 origSenderRef = signal->senderBlockRef();
-
- OpSubEventPtr subbPtr;
- if (!c_opSubEvent.seize(subbPtr)) {
- SubRemoveRef * ref = (SubRemoveRef *)signal->getDataPtrSend();
- jam();
- ref->senderRef = reference();
- ref->setTemporary(SubRemoveRef::Busy);
-
- sendSignal(origSenderRef, GSN_SUB_REMOVE_REF, signal,
- SubRemoveRef::SignalLength, JBB);
- return;
- }
-
- {
- const SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtr();
- subbPtr.p->m_senderRef = req->senderRef;
- subbPtr.p->m_senderData = req->senderData;
- subbPtr.p->m_errorCode = 0;
- }
-
- SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend();
- req->senderRef = reference();
- req->senderData = subbPtr.i;
-
- sendSignal(SUMA_REF, GSN_SUB_REMOVE_REQ, signal, SubRemoveReq::SignalLength, JBB);
-}
-
-/*
- * Coordintor/Participant
- */
-
-void
-Dbdict::execSUB_REMOVE_REF(Signal* signal)
-{
- jamEntry();
- const SubRemoveRef* ref = (SubRemoveRef*) signal->getDataPtr();
- Uint32 senderRef = ref->senderRef;
-
- if (refToBlock(senderRef) == SUMA) {
- /*
- * Participant
- */
- jam();
- OpSubEventPtr subbPtr;
- c_opSubEvent.getPtr(subbPtr, ref->senderData);
- if (ref->errorCode == (Uint32) GrepError::SUBSCRIPTION_ID_NOT_FOUND) {
- // conf this since this may occur if a nodefailiure has occured
- // earlier so that the systable was not cleared
- SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = subbPtr.p->m_senderData;
- sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_CONF,
- signal, SubRemoveConf::SignalLength, JBB);
- } else {
- SubRemoveRef* ref = (SubRemoveRef*) signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = subbPtr.p->m_senderData;
- sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_REF,
- signal, SubRemoveRef::SignalLength, JBB);
- }
- c_opSubEvent.release(subbPtr);
- return;
- }
- /*
- * Coordinator
- */
- ndbrequire(refToBlock(senderRef) == DBDICT);
- OpDropEventPtr eventRecPtr;
- c_opDropEvent.getPtr(eventRecPtr, ref->senderData);
- if (ref->errorCode == SubRemoveRef::NF_FakeErrorREF){
- jam();
- eventRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef));
- } else {
- jam();
- eventRecPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef));
- }
- completeSubRemoveReq(signal,eventRecPtr.i,0);
-}
-
-void
-Dbdict::execSUB_REMOVE_CONF(Signal* signal)
-{
- jamEntry();
- const SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtr();
- Uint32 senderRef = conf->senderRef;
-
- if (refToBlock(senderRef) == SUMA) {
- /*
- * Participant
- */
- jam();
- OpSubEventPtr subbPtr;
- c_opSubEvent.getPtr(subbPtr, conf->senderData);
- SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = subbPtr.p->m_senderData;
- sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_CONF,
- signal, SubRemoveConf::SignalLength, JBB);
- c_opSubEvent.release(subbPtr);
- return;
- }
- /*
- * Coordinator
- */
- ndbrequire(refToBlock(senderRef) == DBDICT);
- OpDropEventPtr eventRecPtr;
- c_opDropEvent.getPtr(eventRecPtr, conf->senderData);
- eventRecPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef));
- completeSubRemoveReq(signal,eventRecPtr.i,0);
-}
-
-void
-Dbdict::completeSubRemoveReq(Signal* signal, Uint32 ptrI, Uint32 xxx)
-{
- OpDropEventPtr evntRecPtr;
- c_opDropEvent.getPtr(evntRecPtr, ptrI);
-
- if (!evntRecPtr.p->m_reqTracker.done()){
- jam();
- return;
- }
-
- if (evntRecPtr.p->m_reqTracker.hasRef()) {
- jam();
- evntRecPtr.p->m_errorNode = reference();
- evntRecPtr.p->m_errorLine = __LINE__;
- evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
- dropEvent_sendReply(signal, evntRecPtr);
- return;
- }
-
- Callback c = { safe_cast(&Dbdict::dropEventUTIL_PREPARE_DELETE), 0 };
-
- prepareTransactionEventSysTable(&c, signal, evntRecPtr.i,
- UtilPrepareReq::Delete);
-}
-
-void
-Dbdict::dropEventUTIL_PREPARE_DELETE(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode)
-{
- jam();
- EVENT_TRACE;
- if (returnCode != 0) {
- EVENT_TRACE;
- dropEventUtilPrepareRef(signal, callbackData, returnCode);
- return;
- }
-
- UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr();
- OpDropEventPtr evntRecPtr;
- jam();
- evntRecPtr.i = req->getSenderData();
- const Uint32 prepareId = req->getPrepareId();
-
- ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
-#ifdef EVENT_DEBUG
- printf("DropEvntUTIL_PREPARE; evntRecPtr.i len %u\n",evntRecPtr.i);
-#endif
-
- Callback c = { safe_cast(&Dbdict::dropEventUTIL_EXECUTE_DELETE), 0 };
-
- executeTransEventSysTable(&c, signal,
- evntRecPtr.i, evntRecPtr.p->m_eventRec,
- prepareId, UtilPrepareReq::Delete);
-}
-
-void
-Dbdict::dropEventUTIL_EXECUTE_DELETE(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode)
-{
- jam();
- EVENT_TRACE;
- if (returnCode != 0) {
- EVENT_TRACE;
- dropEventUtilExecuteRef(signal, callbackData, returnCode);
- return;
- }
-
- OpDropEventPtr evntRecPtr;
- UtilExecuteConf * const ref = (UtilExecuteConf *)signal->getDataPtr();
- jam();
- evntRecPtr.i = ref->getSenderData();
- ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
-
- dropEvent_sendReply(signal, evntRecPtr);
-}
-
-void
-Dbdict::dropEventUtilPrepareRef(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode)
-{
- jam();
- EVENT_TRACE;
- UtilPrepareRef * const ref = (UtilPrepareRef *)signal->getDataPtr();
- OpDropEventPtr evntRecPtr;
- evntRecPtr.i = ref->getSenderData();
- ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
-
- bool temporary = false;
- interpretUtilPrepareErrorCode((UtilPrepareRef::ErrorCode)ref->getErrorCode(),
- temporary, evntRecPtr.p->m_errorLine);
- if (temporary) {
- evntRecPtr.p->m_errorCode = (DropEvntRef::ErrorCode)
- ((Uint32) DropEvntRef::Undefined | (Uint32) DropEvntRef::Temporary);
- }
-
- if (evntRecPtr.p->m_errorCode == 0) {
- evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
- }
- evntRecPtr.p->m_errorNode = reference();
-
- dropEvent_sendReply(signal, evntRecPtr);
-}
-
-void
-Dbdict::dropEventUtilExecuteRef(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode)
-{
- jam();
- EVENT_TRACE;
- OpDropEventPtr evntRecPtr;
- UtilExecuteRef * const ref = (UtilExecuteRef *)signal->getDataPtr();
- jam();
- evntRecPtr.i = ref->getSenderData();
- ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
-
- evntRecPtr.p->m_errorNode = reference();
- evntRecPtr.p->m_errorLine = __LINE__;
-
- switch (ref->getErrorCode()) {
- case UtilExecuteRef::TCError:
- switch (ref->getTCErrorCode()) {
- case ZNOT_FOUND:
- jam();
- evntRecPtr.p->m_errorCode = DropEvntRef::EventNotFound;
- break;
- default:
- jam();
- evntRecPtr.p->m_errorCode = DropEvntRef::UndefinedTCError;
- break;
- }
- break;
- default:
- jam();
- evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
- break;
- }
- dropEvent_sendReply(signal, evntRecPtr);
-}
-
-void Dbdict::dropEvent_sendReply(Signal* signal,
- OpDropEventPtr evntRecPtr)
-{
- jam();
- EVENT_TRACE;
- Uint32 senderRef = evntRecPtr.p->m_request.getUserRef();
-
- if (evntRecPtr.p->hasError()) {
- jam();
- DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend();
-
- ret->setUserData(evntRecPtr.p->m_request.getUserData());
- ret->setUserRef(evntRecPtr.p->m_request.getUserRef());
-
- ret->setErrorCode(evntRecPtr.p->m_errorCode);
- ret->setErrorLine(evntRecPtr.p->m_errorLine);
- ret->setErrorNode(evntRecPtr.p->m_errorNode);
-
- sendSignal(senderRef, GSN_DROP_EVNT_REF, signal,
- DropEvntRef::SignalLength, JBB);
- } else {
- jam();
- DropEvntConf * evntConf = (DropEvntConf *)signal->getDataPtrSend();
-
- evntConf->setUserData(evntRecPtr.p->m_request.getUserData());
- evntConf->setUserRef(evntRecPtr.p->m_request.getUserRef());
-
- sendSignal(senderRef, GSN_DROP_EVNT_CONF, signal,
- DropEvntConf::SignalLength, JBB);
- }
-
- c_opDropEvent.release(evntRecPtr);
-}
-
-/**
- * MODULE: Alter index
- *
- * Alter index state. Alter online creates the index in each TC and
- * then invokes create trigger and alter trigger protocols to activate
- * the 3 triggers. Alter offline does the opposite.
- *
- * Request type received in REQ and returned in CONF/REF:
- *
- * RT_USER - from API to DICT master
- * RT_CREATE_INDEX - part of create index operation
- * RT_DROP_INDEX - part of drop index operation
- * RT_NODERESTART - node restart, activate locally only
- * RT_SYSTEMRESTART - system restart, activate and build if not logged
- * RT_DICT_PREPARE - prepare participants
- * RT_DICT_TC - to local TC via each participant
- * RT_DICT_COMMIT - commit in each participant
- */
-
-void
-Dbdict::execALTER_INDX_REQ(Signal* signal)
-{
- jamEntry();
- AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
- OpAlterIndexPtr opPtr;
- const Uint32 senderRef = signal->senderBlockRef();
- const AlterIndxReq::RequestType requestType = req->getRequestType();
- if (requestType == AlterIndxReq::RT_USER ||
- requestType == AlterIndxReq::RT_CREATE_INDEX ||
- requestType == AlterIndxReq::RT_DROP_INDEX ||
- requestType == AlterIndxReq::RT_NODERESTART ||
- requestType == AlterIndxReq::RT_SYSTEMRESTART) {
- jam();
- const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL;
- NdbNodeBitmask receiverNodes = c_aliveNodes;
- if (isLocal) {
- receiverNodes.clear();
- receiverNodes.set(getOwnNodeId());
- }
- if (signal->getLength() == AlterIndxReq::SignalLength) {
- jam();
- if (! isLocal && getOwnNodeId() != c_masterNodeId) {
- jam();
-
- releaseSections(signal);
- OpAlterIndex opBad;
- opPtr.p = &opBad;
- opPtr.p->save(req);
- opPtr.p->m_errorCode = AlterIndxRef::NotMaster;
- opPtr.p->m_errorLine = __LINE__;
- opPtr.p->m_errorNode = c_masterNodeId;
- alterIndex_sendReply(signal, opPtr, true);
- return;
- }
- // forward initial request plus operation key to all
- req->setOpKey(++c_opRecordSequence);
- NodeReceiverGroup rg(DBDICT, receiverNodes);
- sendSignal(rg, GSN_ALTER_INDX_REQ,
- signal, AlterIndxReq::SignalLength + 1, JBB);
- return;
- }
- // seize operation record
- ndbrequire(signal->getLength() == AlterIndxReq::SignalLength + 1);
- const Uint32 opKey = req->getOpKey();
- OpAlterIndex opBusy;
- if (! c_opAlterIndex.seize(opPtr))
- opPtr.p = &opBusy;
- opPtr.p->save(req);
- opPtr.p->m_coordinatorRef = senderRef;
- opPtr.p->m_isMaster = (senderRef == reference());
- opPtr.p->key = opKey;
- opPtr.p->m_requestType = AlterIndxReq::RT_DICT_PREPARE;
- if (opPtr.p == &opBusy) {
- jam();
- opPtr.p->m_errorCode = AlterIndxRef::Busy;
- opPtr.p->m_errorLine = __LINE__;
- alterIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
- return;
- }
- c_opAlterIndex.add(opPtr);
- // master expects to hear from all
- if (opPtr.p->m_isMaster)
- opPtr.p->m_signalCounter = receiverNodes;
- // check request in all participants
- alterIndex_slavePrepare(signal, opPtr);
- alterIndex_sendReply(signal, opPtr, false);
- return;
- }
- c_opAlterIndex.find(opPtr, req->getConnectionPtr());
- if (! opPtr.isNull()) {
- opPtr.p->m_requestType = requestType;
- if (requestType == AlterIndxReq::RT_DICT_TC) {
- jam();
- if (opPtr.p->m_request.getOnline())
- alterIndex_toCreateTc(signal, opPtr);
- else
- alterIndex_toDropTc(signal, opPtr);
- return;
- }
- if (requestType == AlterIndxReq::RT_DICT_COMMIT ||
- requestType == AlterIndxReq::RT_DICT_ABORT) {
- jam();
- if (requestType == AlterIndxReq::RT_DICT_COMMIT)
- alterIndex_slaveCommit(signal, opPtr);
- else
- alterIndex_slaveAbort(signal, opPtr);
- alterIndex_sendReply(signal, opPtr, false);
- // done in slave
- if (! opPtr.p->m_isMaster)
- c_opAlterIndex.release(opPtr);
- return;
- }
- }
- jam();
- // return to sender
- OpAlterIndex opBad;
- opPtr.p = &opBad;
- opPtr.p->save(req);
- opPtr.p->m_errorCode = AlterIndxRef::BadRequestType;
- opPtr.p->m_errorLine = __LINE__;
- alterIndex_sendReply(signal, opPtr, true);
-}
-
-void
-Dbdict::execALTER_INDX_CONF(Signal* signal)
-{
- jamEntry();
- ndbrequire(signal->getNoOfSections() == 0);
- AlterIndxConf* conf = (AlterIndxConf*)signal->getDataPtrSend();
- alterIndex_recvReply(signal, conf, 0);
-}
-
-void
-Dbdict::execALTER_INDX_REF(Signal* signal)
-{
- jamEntry();
- AlterIndxRef* ref = (AlterIndxRef*)signal->getDataPtrSend();
- alterIndex_recvReply(signal, ref->getConf(), ref);
-}
-
-void
-Dbdict::alterIndex_recvReply(Signal* signal, const AlterIndxConf* conf,
- const AlterIndxRef* ref)
-{
- jam();
- const Uint32 senderRef = signal->senderBlockRef();
- const AlterIndxReq::RequestType requestType = conf->getRequestType();
- const Uint32 key = conf->getConnectionPtr();
- if (requestType == AlterIndxReq::RT_CREATE_INDEX) {
- jam();
- // part of create index operation
- OpCreateIndexPtr opPtr;
- c_opCreateIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- createIndex_fromAlterIndex(signal, opPtr);
- return;
- }
- if (requestType == AlterIndxReq::RT_DROP_INDEX) {
- jam();
- // part of drop index operation
- OpDropIndexPtr opPtr;
- c_opDropIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- dropIndex_fromAlterIndex(signal, opPtr);
- return;
- }
- if (requestType == AlterIndxReq::RT_TC ||
- requestType == AlterIndxReq::RT_TUX) {
- jam();
- // part of build index operation
- OpBuildIndexPtr opPtr;
- c_opBuildIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- buildIndex_fromOnline(signal, opPtr);
- return;
- }
- if (requestType == AlterIndxReq::RT_NODERESTART) {
- jam();
- if (ref == 0) {
- infoEvent("DICT: index %u activated", (unsigned)key);
- } else {
- warningEvent("DICT: index %u activation failed: code=%d line=%d",
- (unsigned)key,
- ref->getErrorCode(), ref->getErrorLine());
- }
- activateIndexes(signal, key + 1);
- return;
- }
- if (requestType == AlterIndxReq::RT_SYSTEMRESTART) {
- jam();
- if (ref == 0) {
- infoEvent("DICT: index %u activated done", (unsigned)key);
- } else {
- warningEvent("DICT: index %u activated failed: code=%d line=%d node=%d",
- (unsigned)key,
- ref->getErrorCode(), ref->getErrorLine(), ref->getErrorNode());
- }
- activateIndexes(signal, key + 1);
- return;
- }
- OpAlterIndexPtr opPtr;
- c_opAlterIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- ndbrequire(opPtr.p->m_isMaster);
- ndbrequire(opPtr.p->m_requestType == requestType);
- opPtr.p->setError(ref);
- opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
- if (! opPtr.p->m_signalCounter.done()) {
- jam();
- return;
- }
- if (requestType == AlterIndxReq::RT_DICT_COMMIT ||
- requestType == AlterIndxReq::RT_DICT_ABORT) {
- jam();
- // send reply to user
- alterIndex_sendReply(signal, opPtr, true);
- c_opAlterIndex.release(opPtr);
- return;
- }
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = AlterIndxReq::RT_DICT_ABORT;
- alterIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- if (indexPtr.p->isHashIndex()) {
- if (requestType == AlterIndxReq::RT_DICT_PREPARE) {
- jam();
- if (opPtr.p->m_request.getOnline()) {
- opPtr.p->m_requestType = AlterIndxReq::RT_DICT_TC;
- alterIndex_sendSlaveReq(signal, opPtr);
- } else {
- // start drop triggers
- alterIndex_toDropTrigger(signal, opPtr);
- }
- return;
- }
- if (requestType == AlterIndxReq::RT_DICT_TC) {
- jam();
- if (opPtr.p->m_request.getOnline()) {
- // start create triggers
- alterIndex_toCreateTrigger(signal, opPtr);
- } else {
- opPtr.p->m_requestType = AlterIndxReq::RT_DICT_COMMIT;
- alterIndex_sendSlaveReq(signal, opPtr);
- }
- return;
- }
- }
- if (indexPtr.p->isOrderedIndex()) {
- if (requestType == AlterIndxReq::RT_DICT_PREPARE) {
- jam();
- if (opPtr.p->m_request.getOnline()) {
- // start create triggers
- alterIndex_toCreateTrigger(signal, opPtr);
- } else {
- // start drop triggers
- alterIndex_toDropTrigger(signal, opPtr);
- }
- return;
- }
- }
- ndbrequire(false);
-}
-
-void
-Dbdict::alterIndex_slavePrepare(Signal* signal, OpAlterIndexPtr opPtr)
-{
- jam();
- const AlterIndxReq* const req = &opPtr.p->m_request;
- if (! (req->getIndexId() < c_tableRecordPool.getSize())) {
- jam();
- opPtr.p->m_errorCode = AlterIndxRef::Inconsistency;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, req->getIndexId());
- if (indexPtr.p->tabState != TableRecord::DEFINED) {
- jam();
- opPtr.p->m_errorCode = AlterIndxRef::IndexNotFound;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- if (! indexPtr.p->isIndex()) {
- jam();
- opPtr.p->m_errorCode = AlterIndxRef::NotAnIndex;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- if (req->getOnline())
- indexPtr.p->indexState = TableRecord::IS_BUILDING;
- else
- indexPtr.p->indexState = TableRecord::IS_DROPPING;
-}
-
-void
-Dbdict::alterIndex_toCreateTc(Signal* signal, OpAlterIndexPtr opPtr)
-{
- jam();
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- // request to create index in local TC
- CreateIndxReq* const req = (CreateIndxReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(CreateIndxReq::RT_TC);
- req->setIndexType(indexPtr.p->tableType);
- req->setTableId(indexPtr.p->primaryTableId);
- req->setIndexId(indexPtr.i);
- req->setOnline(true);
- getIndexAttrList(indexPtr, opPtr.p->m_attrList);
- // send
- LinearSectionPtr lsPtr[3];
- lsPtr[0].p = (Uint32*)&opPtr.p->m_attrList;
- lsPtr[0].sz = 1 + opPtr.p->m_attrList.sz;
- sendSignal(calcTcBlockRef(getOwnNodeId()), GSN_CREATE_INDX_REQ,
- signal, CreateIndxReq::SignalLength, JBB, lsPtr, 1);
-}
-
-void
-Dbdict::alterIndex_fromCreateTc(Signal* signal, OpAlterIndexPtr opPtr)
-{
- jam();
- // mark created in local TC
- if (! opPtr.p->hasError()) {
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- indexPtr.p->indexLocal |= TableRecord::IL_CREATED_TC;
- }
- // forward CONF or REF to master
- ndbrequire(opPtr.p->m_requestType == AlterIndxReq::RT_DICT_TC);
- alterIndex_sendReply(signal, opPtr, false);
-}
-
-void
-Dbdict::alterIndex_toDropTc(Signal* signal, OpAlterIndexPtr opPtr)
-{
- jam();
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- // broken index
- if (! (indexPtr.p->indexLocal & TableRecord::IL_CREATED_TC)) {
- jam();
- alterIndex_sendReply(signal, opPtr, false);
- return;
- }
- // request to drop in local TC
- DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(DropIndxReq::RT_TC);
- req->setTableId(indexPtr.p->primaryTableId);
- req->setIndexId(indexPtr.i);
- req->setIndexVersion(indexPtr.p->tableVersion);
- // send
- sendSignal(calcTcBlockRef(getOwnNodeId()), GSN_DROP_INDX_REQ,
- signal, DropIndxReq::SignalLength, JBB);
-}
-
-void
-Dbdict::alterIndex_fromDropTc(Signal* signal, OpAlterIndexPtr opPtr)
-{
- jam();
- ndbrequire(opPtr.p->m_requestType == AlterIndxReq::RT_DICT_TC);
- if (! opPtr.p->hasError()) {
- // mark dropped in local TC
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- indexPtr.p->indexLocal &= ~TableRecord::IL_CREATED_TC;
- }
- // forward CONF or REF to master
- alterIndex_sendReply(signal, opPtr, false);
-}
-
-void
-Dbdict::alterIndex_toCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr)
-{
- jam();
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- // start creation of index triggers
- CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(CreateTrigReq::RT_ALTER_INDEX);
- req->addRequestFlag(opPtr.p->m_requestFlag);
- req->setTableId(opPtr.p->m_request.getTableId());
- req->setIndexId(opPtr.p->m_request.getIndexId());
- req->setTriggerId(RNIL);
- req->setTriggerActionTime(TriggerActionTime::TA_AFTER);
- req->setMonitorAllAttributes(false);
- req->setOnline(true); // alter online after create
- req->setReceiverRef(0); // implicit for index triggers
- getIndexAttrMask(indexPtr, req->getAttributeMask());
- // name section
- char triggerName[MAX_TAB_NAME_SIZE];
- Uint32 buffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string
- LinearWriter w(buffer, sizeof(buffer) >> 2);
- LinearSectionPtr lsPtr[3];
- if (indexPtr.p->isHashIndex()) {
- req->setTriggerType(TriggerType::SECONDARY_INDEX);
- req->setMonitorReplicas(false);
- // insert
- if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
- req->setTriggerId(indexPtr.p->insertTriggerId);
- req->setTriggerEvent(TriggerEvent::TE_INSERT);
- sprintf(triggerName, "NDB$INDEX_%u_INSERT", opPtr.p->m_request.getIndexId());
- w.reset();
- w.add(CreateTrigReq::TriggerNameKey, triggerName);
- lsPtr[0].p = buffer;
- lsPtr[0].sz = w.getWordsUsed();
- sendSignal(reference(), GSN_CREATE_TRIG_REQ,
- signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
- // update
- if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
- req->setTriggerId(indexPtr.p->updateTriggerId);
- req->setTriggerEvent(TriggerEvent::TE_UPDATE);
- sprintf(triggerName, "NDB$INDEX_%u_UPDATE", opPtr.p->m_request.getIndexId());
- w.reset();
- w.add(CreateTrigReq::TriggerNameKey, triggerName);
- lsPtr[0].p = buffer;
- lsPtr[0].sz = w.getWordsUsed();
- sendSignal(reference(), GSN_CREATE_TRIG_REQ,
- signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
- // delete
- if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
- req->setTriggerId(indexPtr.p->deleteTriggerId);
- req->setTriggerEvent(TriggerEvent::TE_DELETE);
- sprintf(triggerName, "NDB$INDEX_%u_DELETE", opPtr.p->m_request.getIndexId());
- w.reset();
- w.add(CreateTrigReq::TriggerNameKey, triggerName);
- lsPtr[0].p = buffer;
- lsPtr[0].sz = w.getWordsUsed();
- sendSignal(reference(), GSN_CREATE_TRIG_REQ,
- signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
- // triggers left to create
- opPtr.p->m_triggerCounter = 3;
- return;
- }
- if (indexPtr.p->isOrderedIndex()) {
- req->addRequestFlag(RequestFlag::RF_NOTCTRIGGER);
- req->setTriggerType(TriggerType::ORDERED_INDEX);
- req->setTriggerActionTime(TriggerActionTime::TA_CUSTOM);
- req->setMonitorReplicas(true);
- // one trigger for 5 events (insert, update, delete, commit, abort)
- if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
- req->setTriggerId(indexPtr.p->customTriggerId);
- req->setTriggerEvent(TriggerEvent::TE_CUSTOM);
- sprintf(triggerName, "NDB$INDEX_%u_CUSTOM", opPtr.p->m_request.getIndexId());
- w.reset();
- w.add(CreateTrigReq::TriggerNameKey, triggerName);
- lsPtr[0].p = buffer;
- lsPtr[0].sz = w.getWordsUsed();
- sendSignal(reference(), GSN_CREATE_TRIG_REQ,
- signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
- // triggers left to create
- opPtr.p->m_triggerCounter = 1;
- return;
- }
- ndbrequire(false);
-}
-
-void
-Dbdict::alterIndex_fromCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr)
-{
- jam();
- ndbrequire(opPtr.p->m_triggerCounter != 0);
- if (--opPtr.p->m_triggerCounter != 0) {
- jam();
- return;
- }
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = AlterIndxReq::RT_DICT_ABORT;
- alterIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- if(opPtr.p->m_requestType != AlterIndxReq::RT_SYSTEMRESTART){
- // send build request
- alterIndex_toBuildIndex(signal, opPtr);
- return;
- }
-
- /**
- * During system restart,
- * leave index in activated but not build state.
- *
- * Build a bit later when REDO has been run
- */
- alterIndex_sendReply(signal, opPtr, true);
-}
-
-void
-Dbdict::alterIndex_toDropTrigger(Signal* signal, OpAlterIndexPtr opPtr)
-{
- jam();
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- // start drop of index triggers
- DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(DropTrigReq::RT_ALTER_INDEX);
- req->setTableId(opPtr.p->m_request.getTableId());
- req->setIndexId(opPtr.p->m_request.getIndexId());
- req->setTriggerInfo(0); // not used
- opPtr.p->m_triggerCounter = 0;
- // insert
- if (indexPtr.p->insertTriggerId != RNIL) {
- req->setTriggerId(indexPtr.p->insertTriggerId);
- sendSignal(reference(), GSN_DROP_TRIG_REQ,
- signal, DropTrigReq::SignalLength, JBB);
- opPtr.p->m_triggerCounter++;
- }
- // update
- if (indexPtr.p->updateTriggerId != RNIL) {
- req->setTriggerId(indexPtr.p->updateTriggerId);
- sendSignal(reference(), GSN_DROP_TRIG_REQ,
- signal, DropTrigReq::SignalLength, JBB);
- opPtr.p->m_triggerCounter++;
- }
- // delete
- if (indexPtr.p->deleteTriggerId != RNIL) {
- req->setTriggerId(indexPtr.p->deleteTriggerId);
- sendSignal(reference(), GSN_DROP_TRIG_REQ,
- signal, DropTrigReq::SignalLength, JBB);
- opPtr.p->m_triggerCounter++;
- }
- // custom
- if (indexPtr.p->customTriggerId != RNIL) {
- req->setTriggerId(indexPtr.p->customTriggerId);
- sendSignal(reference(), GSN_DROP_TRIG_REQ,
- signal, DropTrigReq::SignalLength, JBB);
- opPtr.p->m_triggerCounter++;
- }
- // build
- if (indexPtr.p->buildTriggerId != RNIL) {
- req->setTriggerId(indexPtr.p->buildTriggerId);
- sendSignal(reference(), GSN_DROP_TRIG_REQ,
- signal, DropTrigReq::SignalLength, JBB);
- opPtr.p->m_triggerCounter++;
- }
- if (opPtr.p->m_triggerCounter == 0) {
- // drop in each TC
- jam();
- opPtr.p->m_requestType = AlterIndxReq::RT_DICT_TC;
- alterIndex_sendSlaveReq(signal, opPtr);
- }
-}
-
-void
-Dbdict::alterIndex_fromDropTrigger(Signal* signal, OpAlterIndexPtr opPtr)
-{
- jam();
- ndbrequire(opPtr.p->m_triggerCounter != 0);
- if (--opPtr.p->m_triggerCounter != 0) {
- jam();
- return;
- }
- // finally drop index in each TC
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- const bool isHashIndex = indexPtr.p->isHashIndex();
- const bool isOrderedIndex = indexPtr.p->isOrderedIndex();
- ndbrequire(isHashIndex != isOrderedIndex); // xor
- if (isHashIndex)
- opPtr.p->m_requestType = AlterIndxReq::RT_DICT_TC;
- if (isOrderedIndex)
- opPtr.p->m_requestType = AlterIndxReq::RT_DICT_COMMIT;
- alterIndex_sendSlaveReq(signal, opPtr);
-}
-
-void
-Dbdict::alterIndex_toBuildIndex(Signal* signal, OpAlterIndexPtr opPtr)
-{
- jam();
- // get index and table records
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
- // build request to self (short signal)
- BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(BuildIndxReq::RT_ALTER_INDEX);
- req->addRequestFlag(opPtr.p->m_requestFlag);
- req->setBuildId(0); // not used
- req->setBuildKey(0); // not used
- req->setIndexType(indexPtr.p->tableType);
- req->setIndexId(indexPtr.i);
- req->setTableId(indexPtr.p->primaryTableId);
- req->setParallelism(16);
- // send
- sendSignal(reference(), GSN_BUILDINDXREQ,
- signal, BuildIndxReq::SignalLength, JBB);
-}
-
-void
-Dbdict::alterIndex_fromBuildIndex(Signal* signal, OpAlterIndexPtr opPtr)
-{
- jam();
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = AlterIndxReq::RT_DICT_ABORT;
- alterIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- opPtr.p->m_requestType = AlterIndxReq::RT_DICT_COMMIT;
- alterIndex_sendSlaveReq(signal, opPtr);
-}
-
-void
-Dbdict::alterIndex_slaveCommit(Signal* signal, OpAlterIndexPtr opPtr)
-{
- jam();
- // get index record
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- indexPtr.p->indexState = TableRecord::IS_ONLINE;
-}
-
-void
-Dbdict::alterIndex_slaveAbort(Signal* signal, OpAlterIndexPtr opPtr)
-{
- jam();
- // find index record
- const Uint32 indexId = opPtr.p->m_request.getIndexId();
- if (indexId >= c_tableRecordPool.getSize())
- return;
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, indexId);
- if (! indexPtr.p->isIndex())
- return;
- // mark broken
- indexPtr.p->indexState = TableRecord::IS_BROKEN;
-}
-
-void
-Dbdict::alterIndex_sendSlaveReq(Signal* signal, OpAlterIndexPtr opPtr)
-{
- AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
- *req = opPtr.p->m_request;
- req->setUserRef(opPtr.p->m_coordinatorRef);
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(opPtr.p->m_requestType);
- req->addRequestFlag(opPtr.p->m_requestFlag);
- NdbNodeBitmask receiverNodes = c_aliveNodes;
- if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) {
- receiverNodes.clear();
- receiverNodes.set(getOwnNodeId());
- }
- opPtr.p->m_signalCounter = receiverNodes;
- NodeReceiverGroup rg(DBDICT, receiverNodes);
- sendSignal(rg, GSN_ALTER_INDX_REQ,
- signal, AlterIndxReq::SignalLength, JBB);
-}
-
-void
-Dbdict::alterIndex_sendReply(Signal* signal, OpAlterIndexPtr opPtr,
- bool toUser)
-{
- AlterIndxRef* rep = (AlterIndxRef*)signal->getDataPtrSend();
- Uint32 gsn = GSN_ALTER_INDX_CONF;
- Uint32 length = AlterIndxConf::InternalLength;
- bool sendRef = opPtr.p->hasError();
- if (! toUser) {
- rep->setUserRef(opPtr.p->m_coordinatorRef);
- rep->setConnectionPtr(opPtr.p->key);
- rep->setRequestType(opPtr.p->m_requestType);
- if (opPtr.p->m_requestType == AlterIndxReq::RT_DICT_ABORT)
- sendRef = false;
- } else {
- rep->setUserRef(opPtr.p->m_request.getUserRef());
- rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
- rep->setRequestType(opPtr.p->m_request.getRequestType());
- length = AlterIndxConf::SignalLength;
- }
- rep->setTableId(opPtr.p->m_request.getTableId());
- rep->setIndexId(opPtr.p->m_request.getIndexId());
- if (sendRef) {
- if (opPtr.p->m_errorNode == 0)
- opPtr.p->m_errorNode = getOwnNodeId();
- rep->setErrorCode(opPtr.p->m_errorCode);
- rep->setErrorLine(opPtr.p->m_errorLine);
- rep->setErrorNode(opPtr.p->m_errorNode);
- gsn = GSN_ALTER_INDX_REF;
- length = AlterIndxRef::SignalLength;
- }
- sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
-}
-
-/**
- * MODULE: Build index
- *
- * Build index or all indexes on a table. Request type:
- *
- * RT_USER - normal user request, not yet used
- * RT_ALTER_INDEX - from alter index
- * RT_SYSTEM_RESTART -
- * RT_DICT_PREPARE - prepare participants
- * RT_DICT_TRIX - to participant on way to local TRIX
- * RT_DICT_COMMIT - commit in each participant
- * RT_DICT_ABORT - abort
- * RT_TRIX - to local TRIX
- */
-
-void
-Dbdict::execBUILDINDXREQ(Signal* signal)
-{
- jamEntry();
- BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
- OpBuildIndexPtr opPtr;
- const Uint32 senderRef = signal->senderBlockRef();
- const BuildIndxReq::RequestType requestType = req->getRequestType();
- if (requestType == BuildIndxReq::RT_USER ||
- requestType == BuildIndxReq::RT_ALTER_INDEX ||
- requestType == BuildIndxReq::RT_SYSTEMRESTART) {
- jam();
-
- const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL;
- NdbNodeBitmask receiverNodes = c_aliveNodes;
- if (isLocal) {
- receiverNodes.clear();
- receiverNodes.set(getOwnNodeId());
- }
-
- if (signal->getLength() == BuildIndxReq::SignalLength) {
- jam();
-
- if (!isLocal && getOwnNodeId() != c_masterNodeId) {
- jam();
-
- releaseSections(signal);
- OpBuildIndex opBad;
- opPtr.p = &opBad;
- opPtr.p->save(req);
- opPtr.p->m_errorCode = BuildIndxRef::NotMaster;
- opPtr.p->m_errorLine = __LINE__;
- opPtr.p->m_errorNode = c_masterNodeId;
- buildIndex_sendReply(signal, opPtr, true);
- return;
- }
- // forward initial request plus operation key to all
- req->setOpKey(++c_opRecordSequence);
- NodeReceiverGroup rg(DBDICT, receiverNodes);
- sendSignal(rg, GSN_BUILDINDXREQ,
- signal, BuildIndxReq::SignalLength + 1, JBB);
- return;
- }
- // seize operation record
- ndbrequire(signal->getLength() == BuildIndxReq::SignalLength + 1);
- const Uint32 opKey = req->getOpKey();
- OpBuildIndex opBusy;
- if (! c_opBuildIndex.seize(opPtr))
- opPtr.p = &opBusy;
- opPtr.p->save(req);
- opPtr.p->m_coordinatorRef = senderRef;
- opPtr.p->m_isMaster = (senderRef == reference());
- opPtr.p->key = opKey;
- opPtr.p->m_requestType = BuildIndxReq::RT_DICT_PREPARE;
- if (opPtr.p == &opBusy) {
- jam();
- opPtr.p->m_errorCode = BuildIndxRef::Busy;
- opPtr.p->m_errorLine = __LINE__;
- buildIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
- return;
- }
- c_opBuildIndex.add(opPtr);
- // master expects to hear from all
- opPtr.p->m_signalCounter = receiverNodes;
- buildIndex_sendReply(signal, opPtr, false);
- return;
- }
- c_opBuildIndex.find(opPtr, req->getConnectionPtr());
- if (! opPtr.isNull()) {
- opPtr.p->m_requestType = requestType;
- if (requestType == BuildIndxReq::RT_DICT_TRIX) {
- jam();
- buildIndex_buildTrix(signal, opPtr);
- return;
- }
- if (requestType == BuildIndxReq::RT_DICT_TC ||
- requestType == BuildIndxReq::RT_DICT_TUX) {
- jam();
- buildIndex_toOnline(signal, opPtr);
- return;
- }
- if (requestType == BuildIndxReq::RT_DICT_COMMIT ||
- requestType == BuildIndxReq::RT_DICT_ABORT) {
- jam();
- buildIndex_sendReply(signal, opPtr, false);
- // done in slave
- if (! opPtr.p->m_isMaster)
- c_opBuildIndex.release(opPtr);
- return;
- }
- }
- jam();
- // return to sender
- OpBuildIndex opBad;
- opPtr.p = &opBad;
- opPtr.p->save(req);
- opPtr.p->m_errorCode = BuildIndxRef::BadRequestType;
- opPtr.p->m_errorLine = __LINE__;
- buildIndex_sendReply(signal, opPtr, true);
-}
-
-void
-Dbdict::execBUILDINDXCONF(Signal* signal)
-{
- jamEntry();
- ndbrequire(signal->getNoOfSections() == 0);
- BuildIndxConf* conf = (BuildIndxConf*)signal->getDataPtrSend();
- buildIndex_recvReply(signal, conf, 0);
-}
-
-void
-Dbdict::execBUILDINDXREF(Signal* signal)
-{
- jamEntry();
- BuildIndxRef* ref = (BuildIndxRef*)signal->getDataPtrSend();
- buildIndex_recvReply(signal, ref->getConf(), ref);
-}
-
-void
-Dbdict::buildIndex_recvReply(Signal* signal, const BuildIndxConf* conf,
- const BuildIndxRef* ref)
-{
- jam();
- const Uint32 senderRef = signal->senderBlockRef();
- const BuildIndxReq::RequestType requestType = conf->getRequestType();
- const Uint32 key = conf->getConnectionPtr();
- if (requestType == BuildIndxReq::RT_ALTER_INDEX) {
- jam();
- // part of alter index operation
- OpAlterIndexPtr opPtr;
- c_opAlterIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- alterIndex_fromBuildIndex(signal, opPtr);
- return;
- }
-
- if (requestType == BuildIndxReq::RT_SYSTEMRESTART) {
- jam();
- if (ref == 0) {
- infoEvent("DICT: index %u rebuild done", (unsigned)key);
- } else {
- warningEvent("DICT: index %u rebuild failed: code=%d line=%d node=%d",
- (unsigned)key, ref->getErrorCode());
- }
- rebuildIndexes(signal, key + 1);
- return;
- }
-
- OpBuildIndexPtr opPtr;
- c_opBuildIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- if (requestType == BuildIndxReq::RT_TRIX) {
- jam();
- // forward to master
- opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TRIX;
- buildIndex_sendReply(signal, opPtr, false);
- return;
- }
- ndbrequire(opPtr.p->m_isMaster);
- ndbrequire(opPtr.p->m_requestType == requestType);
- opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
- if (! opPtr.p->m_signalCounter.done()) {
- jam();
- return;
- }
- if (requestType == BuildIndxReq::RT_DICT_COMMIT ||
- requestType == BuildIndxReq::RT_DICT_ABORT) {
- jam();
- // send reply to user
- buildIndex_sendReply(signal, opPtr, true);
- c_opBuildIndex.release(opPtr);
- return;
- }
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = BuildIndxReq::RT_DICT_ABORT;
- buildIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- if (indexPtr.p->isHashIndex()) {
- if (requestType == BuildIndxReq::RT_DICT_PREPARE) {
- jam();
- if (! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD)) {
- buildIndex_toCreateConstr(signal, opPtr);
- } else {
- opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TC;
- buildIndex_sendSlaveReq(signal, opPtr);
- }
- return;
- }
- if (requestType == BuildIndxReq::RT_DICT_TRIX) {
- jam();
- ndbrequire(! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD));
- buildIndex_toDropConstr(signal, opPtr);
- return;
- }
- if (requestType == BuildIndxReq::RT_DICT_TC) {
- jam();
- opPtr.p->m_requestType = BuildIndxReq::RT_DICT_COMMIT;
- buildIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- }
- if (indexPtr.p->isOrderedIndex()) {
- if (requestType == BuildIndxReq::RT_DICT_PREPARE) {
- jam();
- if (! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD)) {
- opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TRIX;
- buildIndex_sendSlaveReq(signal, opPtr);
- } else {
- opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TUX;
- buildIndex_sendSlaveReq(signal, opPtr);
- }
- return;
- }
- if (requestType == BuildIndxReq::RT_DICT_TRIX) {
- jam();
- ndbrequire(! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD));
- opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TUX;
- buildIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- if (requestType == BuildIndxReq::RT_DICT_TUX) {
- jam();
- opPtr.p->m_requestType = BuildIndxReq::RT_DICT_COMMIT;
- buildIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- }
- ndbrequire(false);
-}
-
-void
-Dbdict::buildIndex_toCreateConstr(Signal* signal, OpBuildIndexPtr opPtr)
-{
- jam();
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- // request to create constraint trigger
- CreateTrigReq* req = (CreateTrigReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(CreateTrigReq::RT_BUILD_INDEX);
- req->addRequestFlag(0); // none
- req->setTableId(indexPtr.i);
- req->setIndexId(RNIL);
- req->setTriggerId(RNIL);
- req->setTriggerType(TriggerType::READ_ONLY_CONSTRAINT);
- req->setTriggerActionTime(TriggerActionTime::TA_AFTER);
- req->setTriggerEvent(TriggerEvent::TE_UPDATE);
- req->setMonitorReplicas(false);
- req->setMonitorAllAttributes(false);
- req->setOnline(true); // alter online after create
- req->setReceiverRef(0); // no receiver, REF-ed by TUP
- req->getAttributeMask().clear();
- // NDB$PK is last attribute
- req->getAttributeMask().set(indexPtr.p->noOfAttributes - 1);
- // name section
- char triggerName[MAX_TAB_NAME_SIZE];
- Uint32 buffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string
- LinearWriter w(buffer, sizeof(buffer) >> 2);
- LinearSectionPtr lsPtr[3];
- sprintf(triggerName, "NDB$INDEX_%u_BUILD", indexPtr.i);
- w.reset();
- w.add(CreateTrigReq::TriggerNameKey, triggerName);
- lsPtr[0].p = buffer;
- lsPtr[0].sz = w.getWordsUsed();
- sendSignal(reference(), GSN_CREATE_TRIG_REQ,
- signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
-}
-
-void
-Dbdict::buildIndex_fromCreateConstr(Signal* signal, OpBuildIndexPtr opPtr)
-{
- jam();
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = BuildIndxReq::RT_DICT_ABORT;
- buildIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TRIX;
- buildIndex_sendSlaveReq(signal, opPtr);
-}
-
-void
-Dbdict::buildIndex_buildTrix(Signal* signal, OpBuildIndexPtr opPtr)
-{
- jam();
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
- // build request
- BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(BuildIndxReq::RT_TRIX);
- req->setBuildId(0); // not yet..
- req->setBuildKey(0); // ..in use
- req->setIndexType(indexPtr.p->tableType);
- req->setIndexId(indexPtr.i);
- req->setTableId(indexPtr.p->primaryTableId);
- req->setParallelism(16);
- if (indexPtr.p->isHashIndex()) {
- jam();
- getIndexAttrList(indexPtr, opPtr.p->m_attrList);
- getTableKeyList(tablePtr, opPtr.p->m_tableKeyList);
- // send
- LinearSectionPtr lsPtr[3];
- lsPtr[0].sz = opPtr.p->m_attrList.sz;
- lsPtr[0].p = opPtr.p->m_attrList.id;
- lsPtr[1].sz = opPtr.p->m_tableKeyList.sz;
- lsPtr[1].p = opPtr.p->m_tableKeyList.id;
- sendSignal(calcTrixBlockRef(getOwnNodeId()), GSN_BUILDINDXREQ,
- signal, BuildIndxReq::SignalLength, JBB, lsPtr, 2);
- return;
- }
- if (indexPtr.p->isOrderedIndex()) {
- jam();
- sendSignal(calcTupBlockRef(getOwnNodeId()), GSN_BUILDINDXREQ,
- signal, BuildIndxReq::SignalLength, JBB);
- return;
- }
- ndbrequire(false);
-}
-
-void
-Dbdict::buildIndex_toDropConstr(Signal* signal, OpBuildIndexPtr opPtr)
-{
- jam();
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- // request to drop constraint trigger
- DropTrigReq* req = (DropTrigReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(DropTrigReq::RT_BUILD_INDEX);
- req->addRequestFlag(0); // none
- req->setTableId(indexPtr.i);
- req->setIndexId(RNIL);
- req->setTriggerId(opPtr.p->m_constrTriggerId);
- req->setTriggerInfo(0); // not used
- sendSignal(reference(), GSN_DROP_TRIG_REQ,
- signal, DropTrigReq::SignalLength, JBB);
-}
-
-void
-Dbdict::buildIndex_fromDropConstr(Signal* signal, OpBuildIndexPtr opPtr)
-{
- jam();
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = BuildIndxReq::RT_DICT_ABORT;
- buildIndex_sendSlaveReq(signal, opPtr);
- return;
- }
- opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TC;
- buildIndex_sendSlaveReq(signal, opPtr);
-}
-
-void
-Dbdict::buildIndex_toOnline(Signal* signal, OpBuildIndexPtr opPtr)
-{
- jam();
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
- // request to set index online in TC or TUX
- AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TC) {
- req->setRequestType(AlterIndxReq::RT_TC);
- } else if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TUX) {
- req->setRequestType(AlterIndxReq::RT_TUX);
- } else {
- ndbrequire(false);
- }
- req->setTableId(tablePtr.i);
- req->setIndexId(indexPtr.i);
- req->setIndexVersion(indexPtr.p->tableVersion);
- req->setOnline(true);
- BlockReference blockRef = 0;
- if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TC) {
- blockRef = calcTcBlockRef(getOwnNodeId());
- } else if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TUX) {
- blockRef = calcTuxBlockRef(getOwnNodeId());
- } else {
- ndbrequire(false);
- }
- // send
- sendSignal(blockRef, GSN_ALTER_INDX_REQ,
- signal, BuildIndxReq::SignalLength, JBB);
-}
-
-void
-Dbdict::buildIndex_fromOnline(Signal* signal, OpBuildIndexPtr opPtr)
-{
- jam();
- // forward to master
- buildIndex_sendReply(signal, opPtr, false);
-}
-
-void
-Dbdict::buildIndex_sendSlaveReq(Signal* signal, OpBuildIndexPtr opPtr)
-{
- BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
- *req = opPtr.p->m_request;
- req->setUserRef(opPtr.p->m_coordinatorRef);
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(opPtr.p->m_requestType);
- req->addRequestFlag(opPtr.p->m_requestFlag);
- if(opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
- {
- opPtr.p->m_signalCounter.clearWaitingFor();
- opPtr.p->m_signalCounter.setWaitingFor(getOwnNodeId());
- sendSignal(reference(), GSN_BUILDINDXREQ,
- signal, BuildIndxReq::SignalLength, JBB);
- }
- else
- {
- opPtr.p->m_signalCounter = c_aliveNodes;
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- sendSignal(rg, GSN_BUILDINDXREQ,
- signal, BuildIndxReq::SignalLength, JBB);
- }
-}
-
-void
-Dbdict::buildIndex_sendReply(Signal* signal, OpBuildIndexPtr opPtr,
- bool toUser)
-{
- BuildIndxRef* rep = (BuildIndxRef*)signal->getDataPtrSend();
- Uint32 gsn = GSN_BUILDINDXCONF;
- Uint32 length = BuildIndxConf::InternalLength;
- bool sendRef = opPtr.p->hasError();
- if (! toUser) {
- rep->setUserRef(opPtr.p->m_coordinatorRef);
- rep->setConnectionPtr(opPtr.p->key);
- rep->setRequestType(opPtr.p->m_requestType);
- if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_ABORT)
- sendRef = false;
- } else {
- rep->setUserRef(opPtr.p->m_request.getUserRef());
- rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
- rep->setRequestType(opPtr.p->m_request.getRequestType());
- length = BuildIndxConf::SignalLength;
- }
- rep->setIndexType(opPtr.p->m_request.getIndexType());
- rep->setTableId(opPtr.p->m_request.getTableId());
- rep->setIndexId(opPtr.p->m_request.getIndexId());
- if (sendRef) {
- rep->setErrorCode(opPtr.p->m_errorCode);
- rep->masterNodeId = opPtr.p->m_errorNode;
- gsn = GSN_BUILDINDXREF;
- length = BuildIndxRef::SignalLength;
- }
- sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
-}
-
-/**
- * MODULE: Create trigger
- *
- * Create trigger in all DICT blocks. Optionally start alter trigger
- * operation to set the trigger online.
- *
- * Request type received in REQ and returned in CONF/REF:
- *
- * RT_USER - normal user e.g. BACKUP
- * RT_ALTER_INDEX - from alter index online
- * RT_DICT_PREPARE - seize operation in each DICT
- * RT_DICT_COMMIT - commit create in each DICT
- * RT_TC - sending to TC (operation alter trigger)
- * RT_LQH - sending to LQH (operation alter trigger)
- */
-
-void
-Dbdict::execCREATE_TRIG_REQ(Signal* signal)
-{
- jamEntry();
- CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend();
- OpCreateTriggerPtr opPtr;
- const Uint32 senderRef = signal->senderBlockRef();
- const CreateTrigReq::RequestType requestType = req->getRequestType();
- if (requestType == CreateTrigReq::RT_USER ||
- requestType == CreateTrigReq::RT_ALTER_INDEX ||
- requestType == CreateTrigReq::RT_BUILD_INDEX) {
- jam();
- if (! assembleFragments(signal)) {
- jam();
- return;
- }
- const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL;
- NdbNodeBitmask receiverNodes = c_aliveNodes;
- if (isLocal) {
- receiverNodes.clear();
- receiverNodes.set(getOwnNodeId());
- }
- if (signal->getLength() == CreateTrigReq::SignalLength) {
- jam();
- if (! isLocal && getOwnNodeId() != c_masterNodeId) {
- jam();
-
- releaseSections(signal);
- OpCreateTrigger opBad;
- opPtr.p = &opBad;
- opPtr.p->save(req);
- opPtr.p->m_errorCode = CreateTrigRef::NotMaster;
- opPtr.p->m_errorLine = __LINE__;
- opPtr.p->m_errorNode = c_masterNodeId;
- createTrigger_sendReply(signal, opPtr, true);
- return;
- }
- // forward initial request plus operation key to all
- req->setOpKey(++c_opRecordSequence);
- NodeReceiverGroup rg(DBDICT, receiverNodes);
- sendSignal(rg, GSN_CREATE_TRIG_REQ,
- signal, CreateTrigReq::SignalLength + 1, JBB);
- return;
- }
- // seize operation record
- ndbrequire(signal->getLength() == CreateTrigReq::SignalLength + 1);
- const Uint32 opKey = req->getOpKey();
- OpCreateTrigger opBusy;
- if (! c_opCreateTrigger.seize(opPtr))
- opPtr.p = &opBusy;
- opPtr.p->save(req);
- opPtr.p->m_coordinatorRef = senderRef;
- opPtr.p->m_isMaster = (senderRef == reference());
- opPtr.p->key = opKey;
- opPtr.p->m_requestType = CreateTrigReq::RT_DICT_PREPARE;
- if (opPtr.p == &opBusy) {
- jam();
- opPtr.p->m_errorCode = CreateTrigRef::Busy;
- opPtr.p->m_errorLine = __LINE__;
- releaseSections(signal);
- createTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
- return;
- }
- c_opCreateTrigger.add(opPtr);
- {
- // save name
- SegmentedSectionPtr ssPtr;
- signal->getSection(ssPtr, CreateTrigReq::TRIGGER_NAME_SECTION);
- SimplePropertiesSectionReader ssReader(ssPtr, getSectionSegmentPool());
- if (ssReader.getKey() != CreateTrigReq::TriggerNameKey ||
- ! ssReader.getString(opPtr.p->m_triggerName)) {
- jam();
- opPtr.p->m_errorCode = CreateTrigRef::InvalidName;
- opPtr.p->m_errorLine = __LINE__;
- releaseSections(signal);
- createTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
- return;
- }
- }
- releaseSections(signal);
- {
- // check that trigger name is unique
- TriggerRecordPtr triggerPtr;
- TriggerRecord keyRecord;
- strcpy(keyRecord.triggerName, opPtr.p->m_triggerName);
- c_triggerRecordHash.find(triggerPtr, keyRecord);
- if (triggerPtr.i != RNIL) {
- jam();
- opPtr.p->m_errorCode = CreateTrigRef::TriggerExists;
- opPtr.p->m_errorLine = __LINE__;
- createTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
- return;
- }
- }
-
- // master expects to hear from all
- if (opPtr.p->m_isMaster)
- opPtr.p->m_signalCounter = receiverNodes;
- // check request in all participants
- createTrigger_slavePrepare(signal, opPtr);
- createTrigger_sendReply(signal, opPtr, false);
- return;
- }
- c_opCreateTrigger.find(opPtr, req->getConnectionPtr());
- if (! opPtr.isNull()) {
- opPtr.p->m_requestType = requestType;
- if (requestType == CreateTrigReq::RT_DICT_CREATE) {
- jam();
- // master has set trigger id
- opPtr.p->m_request.setTriggerId(req->getTriggerId());
- createTrigger_slaveCreate(signal, opPtr);
- createTrigger_sendReply(signal, opPtr, false);
- return;
- }
- if (requestType == CreateTrigReq::RT_DICT_COMMIT ||
- requestType == CreateTrigReq::RT_DICT_ABORT) {
- jam();
- if (requestType == CreateTrigReq::RT_DICT_COMMIT)
- createTrigger_slaveCommit(signal, opPtr);
- else
- createTrigger_slaveAbort(signal, opPtr);
- createTrigger_sendReply(signal, opPtr, false);
- // done in slave
- if (! opPtr.p->m_isMaster)
- c_opCreateTrigger.release(opPtr);
- return;
- }
- }
- jam();
- // return to sender
- releaseSections(signal);
- OpCreateTrigger opBad;
- opPtr.p = &opBad;
- opPtr.p->save(req);
- opPtr.p->m_errorCode = CreateTrigRef::BadRequestType;
- opPtr.p->m_errorLine = __LINE__;
- createTrigger_sendReply(signal, opPtr, true);
-}
-
-void
-Dbdict::execCREATE_TRIG_CONF(Signal* signal)
-{
- jamEntry();
- ndbrequire(signal->getNoOfSections() == 0);
- CreateTrigConf* conf = (CreateTrigConf*)signal->getDataPtrSend();
- createTrigger_recvReply(signal, conf, 0);
-}
-
-void
-Dbdict::execCREATE_TRIG_REF(Signal* signal)
-{
- jamEntry();
- CreateTrigRef* ref = (CreateTrigRef*)signal->getDataPtrSend();
- createTrigger_recvReply(signal, ref->getConf(), ref);
-}
-
-void
-Dbdict::createTrigger_recvReply(Signal* signal, const CreateTrigConf* conf,
- const CreateTrigRef* ref)
-{
- jam();
- const Uint32 senderRef = signal->senderBlockRef();
- const CreateTrigReq::RequestType requestType = conf->getRequestType();
- const Uint32 key = conf->getConnectionPtr();
- if (requestType == CreateTrigReq::RT_ALTER_INDEX) {
- jam();
- // part of alter index operation
- OpAlterIndexPtr opPtr;
- c_opAlterIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- alterIndex_fromCreateTrigger(signal, opPtr);
- return;
- }
- if (requestType == CreateTrigReq::RT_BUILD_INDEX) {
- jam();
- // part of build index operation
- OpBuildIndexPtr opPtr;
- c_opBuildIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- // fill in trigger id
- opPtr.p->m_constrTriggerId = conf->getTriggerId();
- buildIndex_fromCreateConstr(signal, opPtr);
- return;
- }
- if (requestType == CreateTrigReq::RT_TC ||
- requestType == CreateTrigReq::RT_LQH) {
- jam();
- // part of alter trigger operation
- OpAlterTriggerPtr opPtr;
- c_opAlterTrigger.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- alterTrigger_fromCreateLocal(signal, opPtr);
- return;
- }
- OpCreateTriggerPtr opPtr;
- c_opCreateTrigger.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- ndbrequire(opPtr.p->m_isMaster);
- ndbrequire(opPtr.p->m_requestType == requestType);
- opPtr.p->setError(ref);
- opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
- if (! opPtr.p->m_signalCounter.done()) {
- jam();
- return;
- }
- if (requestType == CreateTrigReq::RT_DICT_COMMIT ||
- requestType == CreateTrigReq::RT_DICT_ABORT) {
- jam();
- // send reply to user
- createTrigger_sendReply(signal, opPtr, true);
- c_opCreateTrigger.release(opPtr);
- return;
- }
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = CreateTrigReq::RT_DICT_ABORT;
- createTrigger_sendSlaveReq(signal, opPtr);
- return;
- }
- if (requestType == CreateTrigReq::RT_DICT_PREPARE) {
- jam();
- // seize trigger id in master
- createTrigger_masterSeize(signal, opPtr);
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = CreateTrigReq::RT_DICT_ABORT;
- createTrigger_sendSlaveReq(signal, opPtr);
- return;
- }
- opPtr.p->m_requestType = CreateTrigReq::RT_DICT_CREATE;
- createTrigger_sendSlaveReq(signal, opPtr);
- return;
- }
- if (requestType == CreateTrigReq::RT_DICT_CREATE) {
- jam();
- if (opPtr.p->m_request.getOnline()) {
- jam();
- // start alter online
- createTrigger_toAlterTrigger(signal, opPtr);
- return;
- }
- opPtr.p->m_requestType = CreateTrigReq::RT_DICT_COMMIT;
- createTrigger_sendSlaveReq(signal, opPtr);
- return;
- }
- ndbrequire(false);
-}
-
-void
-Dbdict::createTrigger_slavePrepare(Signal* signal, OpCreateTriggerPtr opPtr)
-{
- jam();
- const CreateTrigReq* const req = &opPtr.p->m_request;
- // check trigger type
- if (req->getRequestType() == CreateTrigReq::RT_USER &&
- req->getTriggerType() == TriggerType::SUBSCRIPTION ||
- req->getRequestType() == CreateTrigReq::RT_ALTER_INDEX &&
- req->getTriggerType() == TriggerType::SECONDARY_INDEX ||
- req->getRequestType() == CreateTrigReq::RT_ALTER_INDEX &&
- req->getTriggerType() == TriggerType::ORDERED_INDEX ||
- req->getRequestType() == CreateTrigReq::RT_BUILD_INDEX &&
- req->getTriggerType() == TriggerType::READ_ONLY_CONSTRAINT) {
- ;
- } else {
- jam();
- opPtr.p->m_errorCode = CreateTrigRef::UnsupportedTriggerType;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- // check the table
- const Uint32 tableId = req->getTableId();
- if (! (tableId < c_tableRecordPool.getSize())) {
- jam();
- opPtr.p->m_errorCode = CreateTrigRef::InvalidTable;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, tableId);
- if (tablePtr.p->tabState != TableRecord::DEFINED) {
- jam();
- opPtr.p->m_errorCode = CreateTrigRef::InvalidTable;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
-}
-
-void
-Dbdict::createTrigger_masterSeize(Signal* signal, OpCreateTriggerPtr opPtr)
-{
- TriggerRecordPtr triggerPtr;
- if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) {
- triggerPtr.i = opPtr.p->m_request.getTriggerId();
- } else {
- triggerPtr.i = getFreeTriggerRecord();
- if (triggerPtr.i == RNIL) {
- jam();
- opPtr.p->m_errorCode = CreateTrigRef::TooManyTriggers;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- }
- c_triggerRecordPool.getPtr(triggerPtr);
- initialiseTriggerRecord(triggerPtr);
- triggerPtr.p->triggerState = TriggerRecord::TS_DEFINING;
- opPtr.p->m_request.setTriggerId(triggerPtr.i);
-}
-
-void
-Dbdict::createTrigger_slaveCreate(Signal* signal, OpCreateTriggerPtr opPtr)
-{
- jam();
- const CreateTrigReq* const req = &opPtr.p->m_request;
- // get the trigger record
- const Uint32 triggerId = req->getTriggerId();
- TriggerRecordPtr triggerPtr;
- c_triggerRecordPool.getPtr(triggerPtr, triggerId);
- initialiseTriggerRecord(triggerPtr);
- // fill in trigger data
- strcpy(triggerPtr.p->triggerName, opPtr.p->m_triggerName);
- triggerPtr.p->triggerId = triggerId;
- triggerPtr.p->tableId = req->getTableId();
- triggerPtr.p->indexId = RNIL;
- triggerPtr.p->triggerType = req->getTriggerType();
- triggerPtr.p->triggerActionTime = req->getTriggerActionTime();
- triggerPtr.p->triggerEvent = req->getTriggerEvent();
- triggerPtr.p->monitorReplicas = req->getMonitorReplicas();
- triggerPtr.p->monitorAllAttributes = req->getMonitorAllAttributes();
- triggerPtr.p->attributeMask = req->getAttributeMask();
- triggerPtr.p->triggerState = TriggerRecord::TS_OFFLINE;
- // add to hash table
- // ndbout_c("++++++++++++ Adding trigger id %u, %s", triggerPtr.p->triggerId, triggerPtr.p->triggerName);
- c_triggerRecordHash.add(triggerPtr);
- if (triggerPtr.p->triggerType == TriggerType::SECONDARY_INDEX ||
- triggerPtr.p->triggerType == TriggerType::ORDERED_INDEX) {
- jam();
- // connect to index record XXX should be done in caller instead
- triggerPtr.p->indexId = req->getIndexId();
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId);
- switch (triggerPtr.p->triggerEvent) {
- case TriggerEvent::TE_INSERT:
- indexPtr.p->insertTriggerId = triggerPtr.p->triggerId;
- break;
- case TriggerEvent::TE_UPDATE:
- indexPtr.p->updateTriggerId = triggerPtr.p->triggerId;
- break;
- case TriggerEvent::TE_DELETE:
- indexPtr.p->deleteTriggerId = triggerPtr.p->triggerId;
- break;
- case TriggerEvent::TE_CUSTOM:
- indexPtr.p->customTriggerId = triggerPtr.p->triggerId;
- break;
- default:
- ndbrequire(false);
- break;
- }
- }
- if (triggerPtr.p->triggerType == TriggerType::READ_ONLY_CONSTRAINT) {
- jam();
- // connect to index record XXX should be done in caller instead
- triggerPtr.p->indexId = req->getTableId();
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId);
- indexPtr.p->buildTriggerId = triggerPtr.p->triggerId;
- }
-}
-
-void
-Dbdict::createTrigger_toAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr)
-{
- jam();
- AlterTrigReq* req = (AlterTrigReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(AlterTrigReq::RT_CREATE_TRIGGER);
- req->addRequestFlag(opPtr.p->m_requestFlag);
- req->setTableId(opPtr.p->m_request.getTableId());
- req->setTriggerId(opPtr.p->m_request.getTriggerId());
- req->setTriggerInfo(0); // not used
- req->setOnline(true);
- req->setReceiverRef(opPtr.p->m_request.getReceiverRef());
- sendSignal(reference(), GSN_ALTER_TRIG_REQ,
- signal, AlterTrigReq::SignalLength, JBB);
-}
-
-void
-Dbdict::createTrigger_fromAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr)
-{
- jam();
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = CreateTrigReq::RT_DICT_ABORT;
- createTrigger_sendSlaveReq(signal, opPtr);
- return;
- }
- opPtr.p->m_requestType = CreateTrigReq::RT_DICT_COMMIT;
- createTrigger_sendSlaveReq(signal, opPtr);
-}
-
-void
-Dbdict::createTrigger_slaveCommit(Signal* signal, OpCreateTriggerPtr opPtr)
-{
- jam();
- const CreateTrigReq* const req = &opPtr.p->m_request;
- // get the trigger record
- const Uint32 triggerId = req->getTriggerId();
- TriggerRecordPtr triggerPtr;
- c_triggerRecordPool.getPtr(triggerPtr, triggerId);
- if (! req->getOnline()) {
- triggerPtr.p->triggerState = TriggerRecord::TS_OFFLINE;
- } else {
- ndbrequire(triggerPtr.p->triggerState == TriggerRecord::TS_ONLINE);
- }
-}
-
-void
-Dbdict::createTrigger_slaveAbort(Signal* signal, OpCreateTriggerPtr opPtr)
-{
- jam();
-}
-
-void
-Dbdict::createTrigger_sendSlaveReq(Signal* signal, OpCreateTriggerPtr opPtr)
-{
- CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend();
- *req = opPtr.p->m_request;
- req->setUserRef(opPtr.p->m_coordinatorRef);
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(opPtr.p->m_requestType);
- req->addRequestFlag(opPtr.p->m_requestFlag);
- NdbNodeBitmask receiverNodes = c_aliveNodes;
- if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) {
- receiverNodes.clear();
- receiverNodes.set(getOwnNodeId());
- }
- opPtr.p->m_signalCounter = receiverNodes;
- NodeReceiverGroup rg(DBDICT, receiverNodes);
- sendSignal(rg, GSN_CREATE_TRIG_REQ,
- signal, CreateTrigReq::SignalLength, JBB);
-}
-
-void
-Dbdict::createTrigger_sendReply(Signal* signal, OpCreateTriggerPtr opPtr,
- bool toUser)
-{
- CreateTrigRef* rep = (CreateTrigRef*)signal->getDataPtrSend();
- Uint32 gsn = GSN_CREATE_TRIG_CONF;
- Uint32 length = CreateTrigConf::InternalLength;
- bool sendRef = opPtr.p->hasError();
- if (! toUser) {
- rep->setUserRef(opPtr.p->m_coordinatorRef);
- rep->setConnectionPtr(opPtr.p->key);
- rep->setRequestType(opPtr.p->m_requestType);
- if (opPtr.p->m_requestType == CreateTrigReq::RT_DICT_ABORT)
- sendRef = false;
- } else {
- rep->setUserRef(opPtr.p->m_request.getUserRef());
- rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
- rep->setRequestType(opPtr.p->m_request.getRequestType());
- length = CreateTrigConf::SignalLength;
- }
- rep->setTableId(opPtr.p->m_request.getTableId());
- rep->setIndexId(opPtr.p->m_request.getIndexId());
- rep->setTriggerId(opPtr.p->m_request.getTriggerId());
- rep->setTriggerInfo(opPtr.p->m_request.getTriggerInfo());
- if (sendRef) {
- if (opPtr.p->m_errorNode == 0)
- opPtr.p->m_errorNode = getOwnNodeId();
- rep->setErrorCode(opPtr.p->m_errorCode);
- rep->setErrorLine(opPtr.p->m_errorLine);
- rep->setErrorNode(opPtr.p->m_errorNode);
- gsn = GSN_CREATE_TRIG_REF;
- length = CreateTrigRef::SignalLength;
- }
- sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
-}
-
-/**
- * MODULE: Drop trigger.
- */
-
-void
-Dbdict::execDROP_TRIG_REQ(Signal* signal)
-{
- jamEntry();
- DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend();
- OpDropTriggerPtr opPtr;
- const Uint32 senderRef = signal->senderBlockRef();
- const DropTrigReq::RequestType requestType = req->getRequestType();
-
- if (signal->getNoOfSections() > 0) {
- ndbrequire(signal->getNoOfSections() == 1);
- jam();
- TriggerRecord keyRecord;
- OpDropTrigger opTmp;
- opPtr.p=&opTmp;
-
- SegmentedSectionPtr ssPtr;
- signal->getSection(ssPtr, DropTrigReq::TRIGGER_NAME_SECTION);
- SimplePropertiesSectionReader ssReader(ssPtr, getSectionSegmentPool());
- if (ssReader.getKey() != DropTrigReq::TriggerNameKey ||
- ! ssReader.getString(keyRecord.triggerName)) {
- jam();
- opPtr.p->m_errorCode = DropTrigRef::InvalidName;
- opPtr.p->m_errorLine = __LINE__;
- releaseSections(signal);
- dropTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
- return;
- }
- releaseSections(signal);
-
- TriggerRecordPtr triggerPtr;
-
- // ndbout_c("++++++++++++++ Looking for trigger %s", keyRecord.triggerName);
- c_triggerRecordHash.find(triggerPtr, keyRecord);
- if (triggerPtr.i == RNIL) {
- jam();
- req->setTriggerId(RNIL);
- } else {
- jam();
- // ndbout_c("++++++++++ Found trigger %s", triggerPtr.p->triggerName);
- req->setTriggerId(triggerPtr.p->triggerId);
- req->setTableId(triggerPtr.p->tableId);
- }
- }
- if (requestType == DropTrigReq::RT_USER ||
- requestType == DropTrigReq::RT_ALTER_INDEX ||
- requestType == DropTrigReq::RT_BUILD_INDEX) {
- jam();
- if (signal->getLength() == DropTrigReq::SignalLength) {
- if (getOwnNodeId() != c_masterNodeId) {
- jam();
- // forward to DICT master
- sendSignal(calcDictBlockRef(c_masterNodeId), GSN_DROP_TRIG_REQ,
- signal, signal->getLength(), JBB);
- return;
- }
- if (!c_triggerRecordPool.findId(req->getTriggerId())) {
- jam();
- // return to sender
- OpDropTrigger opBad;
- opPtr.p = &opBad;
- opPtr.p->save(req);
- opPtr.p->m_errorCode = DropTrigRef::TriggerNotFound;
- opPtr.p->m_errorLine = __LINE__;
- dropTrigger_sendReply(signal, opPtr, true);
- return;
- }
- // forward initial request plus operation key to all
- req->setOpKey(++c_opRecordSequence);
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- sendSignal(rg, GSN_DROP_TRIG_REQ,
- signal, DropTrigReq::SignalLength + 1, JBB);
- return;
- }
- // seize operation record
- ndbrequire(signal->getLength() == DropTrigReq::SignalLength + 1);
- const Uint32 opKey = req->getOpKey();
- OpDropTrigger opBusy;
- if (! c_opDropTrigger.seize(opPtr))
- opPtr.p = &opBusy;
- opPtr.p->save(req);
- opPtr.p->m_coordinatorRef = senderRef;
- opPtr.p->m_isMaster = (senderRef == reference());
- opPtr.p->key = opKey;
- opPtr.p->m_requestType = DropTrigReq::RT_DICT_PREPARE;
- if (opPtr.p == &opBusy) {
- jam();
- opPtr.p->m_errorCode = DropTrigRef::Busy;
- opPtr.p->m_errorLine = __LINE__;
- dropTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
- return;
- }
- c_opDropTrigger.add(opPtr);
- // master expects to hear from all
- if (opPtr.p->m_isMaster)
- opPtr.p->m_signalCounter = c_aliveNodes;
- dropTrigger_slavePrepare(signal, opPtr);
- dropTrigger_sendReply(signal, opPtr, false);
- return;
- }
- c_opDropTrigger.find(opPtr, req->getConnectionPtr());
- if (! opPtr.isNull()) {
- opPtr.p->m_requestType = requestType;
- if (requestType == DropTrigReq::RT_DICT_COMMIT ||
- requestType == DropTrigReq::RT_DICT_ABORT) {
- jam();
- if (requestType == DropTrigReq::RT_DICT_COMMIT)
- dropTrigger_slaveCommit(signal, opPtr);
- else
- dropTrigger_slaveAbort(signal, opPtr);
- dropTrigger_sendReply(signal, opPtr, false);
- // done in slave
- if (! opPtr.p->m_isMaster)
- c_opDropTrigger.release(opPtr);
- return;
- }
- }
- jam();
- // return to sender
- OpDropTrigger opBad;
- opPtr.p = &opBad;
- opPtr.p->save(req);
- opPtr.p->m_errorCode = DropTrigRef::BadRequestType;
- opPtr.p->m_errorLine = __LINE__;
- dropTrigger_sendReply(signal, opPtr, true);
-}
-
-void
-Dbdict::execDROP_TRIG_CONF(Signal* signal)
-{
- jamEntry();
- DropTrigConf* conf = (DropTrigConf*)signal->getDataPtrSend();
- dropTrigger_recvReply(signal, conf, 0);
-}
-
-void
-Dbdict::execDROP_TRIG_REF(Signal* signal)
-{
- jamEntry();
- DropTrigRef* ref = (DropTrigRef*)signal->getDataPtrSend();
- dropTrigger_recvReply(signal, ref->getConf(), ref);
-}
-
-void
-Dbdict::dropTrigger_recvReply(Signal* signal, const DropTrigConf* conf,
- const DropTrigRef* ref)
-{
- jam();
- const Uint32 senderRef = signal->senderBlockRef();
- const DropTrigReq::RequestType requestType = conf->getRequestType();
- const Uint32 key = conf->getConnectionPtr();
- if (requestType == DropTrigReq::RT_ALTER_INDEX) {
- jam();
- // part of alter index operation
- OpAlterIndexPtr opPtr;
- c_opAlterIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- alterIndex_fromDropTrigger(signal, opPtr);
- return;
- }
- if (requestType == DropTrigReq::RT_BUILD_INDEX) {
- jam();
- // part of build index operation
- OpBuildIndexPtr opPtr;
- c_opBuildIndex.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- buildIndex_fromDropConstr(signal, opPtr);
- return;
- }
- if (requestType == DropTrigReq::RT_TC ||
- requestType == DropTrigReq::RT_LQH) {
- jam();
- // part of alter trigger operation
- OpAlterTriggerPtr opPtr;
- c_opAlterTrigger.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- alterTrigger_fromDropLocal(signal, opPtr);
- return;
- }
- OpDropTriggerPtr opPtr;
- c_opDropTrigger.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- ndbrequire(opPtr.p->m_isMaster);
- ndbrequire(opPtr.p->m_requestType == requestType);
- opPtr.p->setError(ref);
- opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
- if (! opPtr.p->m_signalCounter.done()) {
- jam();
- return;
- }
- if (requestType == DropTrigReq::RT_DICT_COMMIT ||
- requestType == DropTrigReq::RT_DICT_ABORT) {
- jam();
- // send reply to user
- dropTrigger_sendReply(signal, opPtr, true);
- c_opDropTrigger.release(opPtr);
- return;
- }
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = DropTrigReq::RT_DICT_ABORT;
- dropTrigger_sendSlaveReq(signal, opPtr);
- return;
- }
- if (requestType == DropTrigReq::RT_DICT_PREPARE) {
- jam();
- // start alter offline
- dropTrigger_toAlterTrigger(signal, opPtr);
- return;
- }
- ndbrequire(false);
-}
-
-void
-Dbdict::dropTrigger_slavePrepare(Signal* signal, OpDropTriggerPtr opPtr)
-{
- jam();
-}
-
-void
-Dbdict::dropTrigger_toAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr)
-{
- jam();
- AlterTrigReq* req = (AlterTrigReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(AlterTrigReq::RT_DROP_TRIGGER);
- req->setTableId(opPtr.p->m_request.getTableId());
- req->setTriggerId(opPtr.p->m_request.getTriggerId());
- req->setTriggerInfo(0); // not used
- req->setOnline(false);
- req->setReceiverRef(0);
- sendSignal(reference(), GSN_ALTER_TRIG_REQ,
- signal, AlterTrigReq::SignalLength, JBB);
-}
-
-void
-Dbdict::dropTrigger_fromAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr)
-{
- jam();
- // remove in all
- opPtr.p->m_requestType = DropTrigReq::RT_DICT_COMMIT;
- dropTrigger_sendSlaveReq(signal, opPtr);
-}
-
-void
-Dbdict::dropTrigger_sendSlaveReq(Signal* signal, OpDropTriggerPtr opPtr)
-{
- DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend();
- *req = opPtr.p->m_request;
- req->setUserRef(opPtr.p->m_coordinatorRef);
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(opPtr.p->m_requestType);
- req->addRequestFlag(opPtr.p->m_requestFlag);
- opPtr.p->m_signalCounter = c_aliveNodes;
- NodeReceiverGroup rg(DBDICT, c_aliveNodes);
- sendSignal(rg, GSN_DROP_TRIG_REQ,
- signal, DropTrigReq::SignalLength, JBB);
-}
-
-void
-Dbdict::dropTrigger_slaveCommit(Signal* signal, OpDropTriggerPtr opPtr)
-{
- jam();
- const DropTrigReq* const req = &opPtr.p->m_request;
- // get trigger record
- const Uint32 triggerId = req->getTriggerId();
- TriggerRecordPtr triggerPtr;
- c_triggerRecordPool.getPtr(triggerPtr, triggerId);
- if (triggerPtr.p->triggerType == TriggerType::SECONDARY_INDEX ||
- triggerPtr.p->triggerType == TriggerType::ORDERED_INDEX) {
- jam();
- // disconnect from index if index trigger XXX move to drop index
- triggerPtr.p->indexId = req->getIndexId();
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId);
- ndbrequire(! indexPtr.isNull());
- switch (triggerPtr.p->triggerEvent) {
- case TriggerEvent::TE_INSERT:
- indexPtr.p->insertTriggerId = RNIL;
- break;
- case TriggerEvent::TE_UPDATE:
- indexPtr.p->updateTriggerId = RNIL;
- break;
- case TriggerEvent::TE_DELETE:
- indexPtr.p->deleteTriggerId = RNIL;
- break;
- case TriggerEvent::TE_CUSTOM:
- indexPtr.p->customTriggerId = RNIL;
- break;
- default:
- ndbrequire(false);
- break;
- }
- }
- if (triggerPtr.p->triggerType == TriggerType::READ_ONLY_CONSTRAINT) {
- jam();
- // disconnect from index record XXX should be done in caller instead
- triggerPtr.p->indexId = req->getTableId();
- TableRecordPtr indexPtr;
- c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId);
- indexPtr.p->buildTriggerId = RNIL;
- }
- // remove trigger
- // ndbout_c("++++++++++++ Removing trigger id %u, %s", triggerPtr.p->triggerId, triggerPtr.p->triggerName);
- c_triggerRecordHash.remove(triggerPtr);
- triggerPtr.p->triggerState = TriggerRecord::TS_NOT_DEFINED;
-}
-
-void
-Dbdict::dropTrigger_slaveAbort(Signal* signal, OpDropTriggerPtr opPtr)
-{
- jam();
-}
-
-void
-Dbdict::dropTrigger_sendReply(Signal* signal, OpDropTriggerPtr opPtr,
- bool toUser)
-{
- DropTrigRef* rep = (DropTrigRef*)signal->getDataPtrSend();
- Uint32 gsn = GSN_DROP_TRIG_CONF;
- Uint32 length = DropTrigConf::InternalLength;
- bool sendRef = opPtr.p->hasError();
- if (! toUser) {
- rep->setUserRef(opPtr.p->m_coordinatorRef);
- rep->setConnectionPtr(opPtr.p->key);
- rep->setRequestType(opPtr.p->m_requestType);
- if (opPtr.p->m_requestType == DropTrigReq::RT_DICT_ABORT)
- sendRef = false;
- } else {
- rep->setUserRef(opPtr.p->m_request.getUserRef());
- rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
- rep->setRequestType(opPtr.p->m_request.getRequestType());
- length = DropTrigConf::SignalLength;
- }
- rep->setTableId(opPtr.p->m_request.getTableId());
- rep->setIndexId(opPtr.p->m_request.getIndexId());
- rep->setTriggerId(opPtr.p->m_request.getTriggerId());
- if (sendRef) {
- if (opPtr.p->m_errorNode == 0)
- opPtr.p->m_errorNode = getOwnNodeId();
- rep->setErrorCode(opPtr.p->m_errorCode);
- rep->setErrorLine(opPtr.p->m_errorLine);
- rep->setErrorNode(opPtr.p->m_errorNode);
- gsn = GSN_DROP_TRIG_REF;
- length = CreateTrigRef::SignalLength;
- }
- sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
-}
-
-/**
- * MODULE: Alter trigger.
- *
- * Alter trigger state. Alter online creates the trigger first in all
- * TC (if index trigger) and then in all LQH-TUP.
- *
- * Request type received in REQ and returned in CONF/REF:
- *
- * RT_USER - normal user e.g. BACKUP
- * RT_CREATE_TRIGGER - from create trigger
- * RT_DROP_TRIGGER - from drop trigger
- * RT_DICT_PREPARE - seize operations and check request
- * RT_DICT_TC - master to each DICT on way to TC
- * RT_DICT_LQH - master to each DICT on way to LQH-TUP
- * RT_DICT_COMMIT - commit state change in each DICT (no reply)
- */
-
-void
-Dbdict::execALTER_TRIG_REQ(Signal* signal)
-{
- jamEntry();
- AlterTrigReq* const req = (AlterTrigReq*)signal->getDataPtrSend();
- OpAlterTriggerPtr opPtr;
- const Uint32 senderRef = signal->senderBlockRef();
- const AlterTrigReq::RequestType requestType = req->getRequestType();
- if (requestType == AlterTrigReq::RT_USER ||
- requestType == AlterTrigReq::RT_CREATE_TRIGGER ||
- requestType == AlterTrigReq::RT_DROP_TRIGGER) {
- jam();
- const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL;
- NdbNodeBitmask receiverNodes = c_aliveNodes;
- if (isLocal) {
- receiverNodes.clear();
- receiverNodes.set(getOwnNodeId());
- }
- if (signal->getLength() == AlterTrigReq::SignalLength) {
- jam();
- if (! isLocal && getOwnNodeId() != c_masterNodeId) {
- jam();
- // forward to DICT master
- sendSignal(calcDictBlockRef(c_masterNodeId), GSN_ALTER_TRIG_REQ,
- signal, AlterTrigReq::SignalLength, JBB);
- return;
- }
- // forward initial request plus operation key to all
- req->setOpKey(++c_opRecordSequence);
- NodeReceiverGroup rg(DBDICT, receiverNodes);
- sendSignal(rg, GSN_ALTER_TRIG_REQ,
- signal, AlterTrigReq::SignalLength + 1, JBB);
- return;
- }
- // seize operation record
- ndbrequire(signal->getLength() == AlterTrigReq::SignalLength + 1);
- const Uint32 opKey = req->getOpKey();
- OpAlterTrigger opBusy;
- if (! c_opAlterTrigger.seize(opPtr))
- opPtr.p = &opBusy;
- opPtr.p->save(req);
- opPtr.p->m_coordinatorRef = senderRef;
- opPtr.p->m_isMaster = (senderRef == reference());
- opPtr.p->key = opKey;
- opPtr.p->m_requestType = AlterTrigReq::RT_DICT_PREPARE;
- if (opPtr.p == &opBusy) {
- jam();
- opPtr.p->m_errorCode = AlterTrigRef::Busy;
- opPtr.p->m_errorLine = __LINE__;
- alterTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
- return;
- }
- c_opAlterTrigger.add(opPtr);
- // master expects to hear from all
- if (opPtr.p->m_isMaster) {
- opPtr.p->m_nodes = receiverNodes;
- opPtr.p->m_signalCounter = receiverNodes;
- }
- alterTrigger_slavePrepare(signal, opPtr);
- alterTrigger_sendReply(signal, opPtr, false);
- return;
- }
- c_opAlterTrigger.find(opPtr, req->getConnectionPtr());
- if (! opPtr.isNull()) {
- opPtr.p->m_requestType = requestType;
- if (requestType == AlterTrigReq::RT_DICT_TC ||
- requestType == AlterTrigReq::RT_DICT_LQH) {
- jam();
- if (req->getOnline())
- alterTrigger_toCreateLocal(signal, opPtr);
- else
- alterTrigger_toDropLocal(signal, opPtr);
- return;
- }
- if (requestType == AlterTrigReq::RT_DICT_COMMIT ||
- requestType == AlterTrigReq::RT_DICT_ABORT) {
- jam();
- if (requestType == AlterTrigReq::RT_DICT_COMMIT)
- alterTrigger_slaveCommit(signal, opPtr);
- else
- alterTrigger_slaveAbort(signal, opPtr);
- alterTrigger_sendReply(signal, opPtr, false);
- // done in slave
- if (! opPtr.p->m_isMaster)
- c_opAlterTrigger.release(opPtr);
- return;
- }
- }
- jam();
- // return to sender
- OpAlterTrigger opBad;
- opPtr.p = &opBad;
- opPtr.p->save(req);
- opPtr.p->m_errorCode = AlterTrigRef::BadRequestType;
- opPtr.p->m_errorLine = __LINE__;
- alterTrigger_sendReply(signal, opPtr, true);
- return;
-}
-
-void
-Dbdict::execALTER_TRIG_CONF(Signal* signal)
-{
- jamEntry();
- AlterTrigConf* conf = (AlterTrigConf*)signal->getDataPtrSend();
- alterTrigger_recvReply(signal, conf, 0);
-}
-
-void
-Dbdict::execALTER_TRIG_REF(Signal* signal)
-{
- jamEntry();
- AlterTrigRef* ref = (AlterTrigRef*)signal->getDataPtrSend();
- alterTrigger_recvReply(signal, ref->getConf(), ref);
-}
-
-void
-Dbdict::alterTrigger_recvReply(Signal* signal, const AlterTrigConf* conf,
- const AlterTrigRef* ref)
-{
- jam();
- const Uint32 senderRef = signal->senderBlockRef();
- const AlterTrigReq::RequestType requestType = conf->getRequestType();
- const Uint32 key = conf->getConnectionPtr();
- if (requestType == AlterTrigReq::RT_CREATE_TRIGGER) {
- jam();
- // part of create trigger operation
- OpCreateTriggerPtr opPtr;
- c_opCreateTrigger.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- createTrigger_fromAlterTrigger(signal, opPtr);
- return;
- }
- if (requestType == AlterTrigReq::RT_DROP_TRIGGER) {
- jam();
- // part of drop trigger operation
- OpDropTriggerPtr opPtr;
- c_opDropTrigger.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- opPtr.p->setError(ref);
- dropTrigger_fromAlterTrigger(signal, opPtr);
- return;
- }
- OpAlterTriggerPtr opPtr;
- c_opAlterTrigger.find(opPtr, key);
- ndbrequire(! opPtr.isNull());
- ndbrequire(opPtr.p->m_isMaster);
- ndbrequire(opPtr.p->m_requestType == requestType);
- /*
- * If refuse on drop trig, because of non-existent trigger,
- * comes from anyone but the master node - ignore it and
- * remove the node from forter ALTER_TRIG communication
- * This will happen if a new node has started since the
- * trigger whas created.
- */
- if (ref &&
- refToNode(senderRef) != refToNode(reference()) &&
- opPtr.p->m_request.getRequestType() == AlterTrigReq::RT_DROP_TRIGGER &&
- ref->getErrorCode() == AlterTrigRef::TriggerNotFound) {
- jam();
- ref = 0; // ignore this error
- opPtr.p->m_nodes.clear(refToNode(senderRef)); // remove this from group
- }
- opPtr.p->setError(ref);
- opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
- if (! opPtr.p->m_signalCounter.done()) {
- jam();
- return;
- }
- if (requestType == AlterTrigReq::RT_DICT_COMMIT ||
- requestType == AlterTrigReq::RT_DICT_ABORT) {
- jam();
- // send reply to user
- alterTrigger_sendReply(signal, opPtr, true);
- c_opAlterTrigger.release(opPtr);
- return;
- }
- if (opPtr.p->hasError()) {
- jam();
- opPtr.p->m_requestType = AlterTrigReq::RT_DICT_ABORT;
- alterTrigger_sendSlaveReq(signal, opPtr);
- return;
- }
- if (! (opPtr.p->m_request.getRequestFlag() & RequestFlag::RF_NOTCTRIGGER)) {
- if (requestType == AlterTrigReq::RT_DICT_PREPARE) {
- jam();
- if (opPtr.p->m_request.getOnline())
- opPtr.p->m_requestType = AlterTrigReq::RT_DICT_TC;
- else
- opPtr.p->m_requestType = AlterTrigReq::RT_DICT_LQH;
- alterTrigger_sendSlaveReq(signal, opPtr);
- return;
- }
- if (requestType == AlterTrigReq::RT_DICT_TC) {
- jam();
- if (opPtr.p->m_request.getOnline())
- opPtr.p->m_requestType = AlterTrigReq::RT_DICT_LQH;
- else
- opPtr.p->m_requestType = AlterTrigReq::RT_DICT_COMMIT;
- alterTrigger_sendSlaveReq(signal, opPtr);
- return;
- }
- if (requestType == AlterTrigReq::RT_DICT_LQH) {
- jam();
- if (opPtr.p->m_request.getOnline())
- opPtr.p->m_requestType = AlterTrigReq::RT_DICT_COMMIT;
- else
- opPtr.p->m_requestType = AlterTrigReq::RT_DICT_TC;
- alterTrigger_sendSlaveReq(signal, opPtr);
- return;
- }
- } else {
- if (requestType == AlterTrigReq::RT_DICT_PREPARE) {
- jam();
- opPtr.p->m_requestType = AlterTrigReq::RT_DICT_LQH;
- alterTrigger_sendSlaveReq(signal, opPtr);
- return;
- }
- if (requestType == AlterTrigReq::RT_DICT_LQH) {
- jam();
- opPtr.p->m_requestType = AlterTrigReq::RT_DICT_COMMIT;
- alterTrigger_sendSlaveReq(signal, opPtr);
- return;
- }
- }
- ndbrequire(false);
-}
-
-void
-Dbdict::alterTrigger_slavePrepare(Signal* signal, OpAlterTriggerPtr opPtr)
-{
- jam();
- const AlterTrigReq* const req = &opPtr.p->m_request;
- const Uint32 triggerId = req->getTriggerId();
- TriggerRecordPtr triggerPtr;
- if (! (triggerId < c_triggerRecordPool.getSize())) {
- jam();
- opPtr.p->m_errorCode = AlterTrigRef::TriggerNotFound;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
- c_triggerRecordPool.getPtr(triggerPtr, triggerId);
- if (triggerPtr.p->triggerState == TriggerRecord::TS_NOT_DEFINED) {
- jam();
- opPtr.p->m_errorCode = AlterTrigRef::TriggerNotFound;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
-}
-
-void
-Dbdict::alterTrigger_toCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr)
-{
- jam();
- // find trigger record
- const Uint32 triggerId = opPtr.p->m_request.getTriggerId();
- TriggerRecordPtr triggerPtr;
- c_triggerRecordPool.getPtr(triggerPtr, triggerId);
- CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
- req->setRequestType(CreateTrigReq::RT_TC);
- } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
- req->setRequestType(CreateTrigReq::RT_LQH);
- } else {
- ndbassert(false);
- }
- req->setTableId(triggerPtr.p->tableId);
- req->setIndexId(triggerPtr.p->indexId);
- req->setTriggerId(triggerPtr.i);
- req->setTriggerType(triggerPtr.p->triggerType);
- req->setTriggerActionTime(triggerPtr.p->triggerActionTime);
- req->setTriggerEvent(triggerPtr.p->triggerEvent);
- req->setMonitorReplicas(triggerPtr.p->monitorReplicas);
- req->setMonitorAllAttributes(triggerPtr.p->monitorAllAttributes);
- req->setOnline(true);
- req->setReceiverRef(opPtr.p->m_request.getReceiverRef());
- BlockReference blockRef = 0;
- if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
- blockRef = calcTcBlockRef(getOwnNodeId());
- } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
- blockRef = calcLqhBlockRef(getOwnNodeId());
- } else {
- ndbassert(false);
- }
- req->setAttributeMask(triggerPtr.p->attributeMask);
- sendSignal(blockRef, GSN_CREATE_TRIG_REQ,
- signal, CreateTrigReq::SignalLength, JBB);
-}
-
-void
-Dbdict::alterTrigger_fromCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr)
-{
- jam();
- if (! opPtr.p->hasError()) {
- // mark created locally
- TriggerRecordPtr triggerPtr;
- c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId());
- if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
- triggerPtr.p->triggerLocal |= TriggerRecord::TL_CREATED_TC;
- } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
- triggerPtr.p->triggerLocal |= TriggerRecord::TL_CREATED_LQH;
- } else {
- ndbrequire(false);
- }
- }
- // forward CONF or REF to master
- alterTrigger_sendReply(signal, opPtr, false);
-}
-
-void
-Dbdict::alterTrigger_toDropLocal(Signal* signal, OpAlterTriggerPtr opPtr)
-{
- jam();
- TriggerRecordPtr triggerPtr;
- c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId());
- DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend();
- req->setUserRef(reference());
- req->setConnectionPtr(opPtr.p->key);
- if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
- // broken trigger
- if (! (triggerPtr.p->triggerLocal & TriggerRecord::TL_CREATED_TC)) {
- jam();
- alterTrigger_sendReply(signal, opPtr, false);
- return;
- }
- req->setRequestType(DropTrigReq::RT_TC);
- } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
- // broken trigger
- if (! (triggerPtr.p->triggerLocal & TriggerRecord::TL_CREATED_LQH)) {
- jam();
- alterTrigger_sendReply(signal, opPtr, false);
- return;
- }
- req->setRequestType(DropTrigReq::RT_LQH);
- } else {
- ndbassert(false);
- }
- req->setTableId(triggerPtr.p->tableId);
- req->setIndexId(triggerPtr.p->indexId);
- req->setTriggerId(triggerPtr.i);
- req->setTriggerType(triggerPtr.p->triggerType);
- req->setTriggerActionTime(triggerPtr.p->triggerActionTime);
- req->setTriggerEvent(triggerPtr.p->triggerEvent);
- req->setMonitorReplicas(triggerPtr.p->monitorReplicas);
- req->setMonitorAllAttributes(triggerPtr.p->monitorAllAttributes);
- BlockReference blockRef = 0;
- if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
- blockRef = calcTcBlockRef(getOwnNodeId());
- } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
- blockRef = calcLqhBlockRef(getOwnNodeId());
- } else {
- ndbassert(false);
- }
- sendSignal(blockRef, GSN_DROP_TRIG_REQ,
- signal, DropTrigReq::SignalLength, JBB);
-}
-
-void
-Dbdict::alterTrigger_fromDropLocal(Signal* signal, OpAlterTriggerPtr opPtr)
-{
- jam();
- if (! opPtr.p->hasError()) {
- // mark dropped locally
- TriggerRecordPtr triggerPtr;
- c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId());
- if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
- triggerPtr.p->triggerLocal &= ~TriggerRecord::TL_CREATED_TC;
- } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
- triggerPtr.p->triggerLocal &= ~TriggerRecord::TL_CREATED_LQH;
- } else {
- ndbrequire(false);
- }
- }
- // forward CONF or REF to master
- alterTrigger_sendReply(signal, opPtr, false);
-}
-
-void
-Dbdict::alterTrigger_slaveCommit(Signal* signal, OpAlterTriggerPtr opPtr)
-{
- jam();
- TriggerRecordPtr triggerPtr;
- c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId());
- // set state
- triggerPtr.p->triggerState = TriggerRecord::TS_ONLINE;
-}
-
-void
-Dbdict::alterTrigger_slaveAbort(Signal* signal, OpAlterTriggerPtr opPtr)
-{
- jam();
-}
-
-void
-Dbdict::alterTrigger_sendSlaveReq(Signal* signal, OpAlterTriggerPtr opPtr)
-{
- AlterTrigReq* const req = (AlterTrigReq*)signal->getDataPtrSend();
- *req = opPtr.p->m_request;
- req->setUserRef(opPtr.p->m_coordinatorRef);
- req->setConnectionPtr(opPtr.p->key);
- req->setRequestType(opPtr.p->m_requestType);
- req->addRequestFlag(opPtr.p->m_requestFlag);
- NdbNodeBitmask receiverNodes = c_aliveNodes;
- if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) {
- receiverNodes.clear();
- receiverNodes.set(getOwnNodeId());
- } else {
- opPtr.p->m_nodes.bitAND(receiverNodes);
- receiverNodes = opPtr.p->m_nodes;
- }
- opPtr.p->m_signalCounter = receiverNodes;
- NodeReceiverGroup rg(DBDICT, receiverNodes);
- sendSignal(rg, GSN_ALTER_TRIG_REQ,
- signal, AlterTrigReq::SignalLength, JBB);
-}
-
-void
-Dbdict::alterTrigger_sendReply(Signal* signal, OpAlterTriggerPtr opPtr,
- bool toUser)
-{
- jam();
- AlterTrigRef* rep = (AlterTrigRef*)signal->getDataPtrSend();
- Uint32 gsn = GSN_ALTER_TRIG_CONF;
- Uint32 length = AlterTrigConf::InternalLength;
- bool sendRef = opPtr.p->hasError();
- if (! toUser) {
- rep->setUserRef(opPtr.p->m_coordinatorRef);
- rep->setConnectionPtr(opPtr.p->key);
- rep->setRequestType(opPtr.p->m_requestType);
- if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_ABORT) {
- jam();
- sendRef = false;
- } else {
- jam();
- }
- } else {
- jam();
- rep->setUserRef(opPtr.p->m_request.getUserRef());
- rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
- rep->setRequestType(opPtr.p->m_request.getRequestType());
- length = AlterTrigConf::SignalLength;
- }
- rep->setTableId(opPtr.p->m_request.getTableId());
- rep->setTriggerId(opPtr.p->m_request.getTriggerId());
- if (sendRef) {
- if (opPtr.p->m_errorNode == 0) {
- jam();
- opPtr.p->m_errorNode = getOwnNodeId();
- } else {
- jam();
- }
- rep->setErrorCode(opPtr.p->m_errorCode);
- rep->setErrorLine(opPtr.p->m_errorLine);
- rep->setErrorNode(opPtr.p->m_errorNode);
- gsn = GSN_ALTER_TRIG_REF;
- length = AlterTrigRef::SignalLength;
- }
- sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
-}
-
-/**
- * MODULE: Support routines for index and trigger.
- */
-
-void
-Dbdict::getTableKeyList(TableRecordPtr tablePtr, AttributeList& list)
-{
- jam();
- list.sz = 0;
- for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
- AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
- if (aRec->tupleKey)
- list.id[list.sz++] = aRec->attributeId;
- tAttr = aRec->nextAttrInTable;
- }
-}
-
-// XXX should store the primary attribute id
-void
-Dbdict::getIndexAttr(TableRecordPtr indexPtr, Uint32 itAttr, Uint32* id)
-{
- jam();
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
- AttributeRecord* iaRec = c_attributeRecordPool.getPtr(itAttr);
- for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
- AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
- if (iaRec->equal(*aRec)) {
- id[0] = aRec->attributeId;
- return;
- }
- tAttr = aRec->nextAttrInTable;
- }
- ndbrequire(false);
-}
-
-void
-Dbdict::getIndexAttrList(TableRecordPtr indexPtr, AttributeList& list)
-{
- jam();
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
- list.sz = 0;
- memset(list.id, 0, sizeof(list.id));
- ndbrequire(indexPtr.p->noOfAttributes >= 2);
- Uint32 itAttr = indexPtr.p->firstAttribute;
- for (Uint32 i = 0; i < (Uint32)indexPtr.p->noOfAttributes - 1; i++) {
- getIndexAttr(indexPtr, itAttr, &list.id[list.sz++]);
- AttributeRecord* iaRec = c_attributeRecordPool.getPtr(itAttr);
- itAttr = iaRec->nextAttrInTable;
- }
-}
-
-void
-Dbdict::getIndexAttrMask(TableRecordPtr indexPtr, AttributeMask& mask)
-{
- jam();
- TableRecordPtr tablePtr;
- c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
- mask.clear();
- ndbrequire(indexPtr.p->noOfAttributes >= 2);
- Uint32 itAttr = indexPtr.p->firstAttribute;
- for (Uint32 i = 0; i < (Uint32)indexPtr.p->noOfAttributes - 1; i++) {
- Uint32 id;
- getIndexAttr(indexPtr, itAttr, &id);
- mask.set(id);
- AttributeRecord* iaRec = c_attributeRecordPool.getPtr(itAttr);
- itAttr = iaRec->nextAttrInTable;
- }
-}
-
-/* **************************************************************** */
-/* ---------------------------------------------------------------- */
-/* MODULE: STORE/RESTORE SCHEMA FILE---------------------- */
-/* ---------------------------------------------------------------- */
-/* */
-/* General module used to store the schema file on disk and */
-/* similar function to restore it from disk. */
-/* ---------------------------------------------------------------- */
-/* **************************************************************** */
-
-void
-Dbdict::initSchemaFile(XSchemaFile * xsf, Uint32 firstPage, Uint32 lastPage,
- bool initEntries)
-{
- ndbrequire(lastPage <= xsf->noOfPages);
- for (Uint32 n = firstPage; n < lastPage; n++) {
- SchemaFile * sf = &xsf->schemaPage[n];
- if (initEntries)
- memset(sf, 0, NDB_SF_PAGE_SIZE);
-
- Uint32 ndb_version = NDB_VERSION;
- if (ndb_version < NDB_SF_VERSION_5_0_6)
- ndb_version = NDB_SF_VERSION_5_0_6;
-
- memcpy(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic));
- sf->ByteOrder = 0x12345678;
- sf->NdbVersion = ndb_version;
- sf->FileSize = xsf->noOfPages * NDB_SF_PAGE_SIZE;
- sf->PageNumber = n;
- sf->CheckSum = 0;
- sf->NoOfTableEntries = NDB_SF_PAGE_ENTRIES;
-
- computeChecksum(xsf, n);
- }
-}
-
-void
-Dbdict::resizeSchemaFile(XSchemaFile * xsf, Uint32 noOfPages)
-{
- ndbrequire(noOfPages <= NDB_SF_MAX_PAGES);
- if (xsf->noOfPages < noOfPages) {
- jam();
- Uint32 firstPage = xsf->noOfPages;
- xsf->noOfPages = noOfPages;
- initSchemaFile(xsf, 0, firstPage, false);
- initSchemaFile(xsf, firstPage, xsf->noOfPages, true);
- }
- if (xsf->noOfPages > noOfPages) {
- jam();
- Uint32 tableId = noOfPages * NDB_SF_PAGE_ENTRIES;
- while (tableId < xsf->noOfPages * NDB_SF_PAGE_ENTRIES) {
- SchemaFile::TableEntry * te = getTableEntry(xsf, tableId);
- if (te->m_tableState != SchemaFile::INIT &&
- te->m_tableState != SchemaFile::DROP_TABLE_COMMITTED) {
- ndbrequire(false);
- }
- tableId++;
- }
- xsf->noOfPages = noOfPages;
- initSchemaFile(xsf, 0, xsf->noOfPages, false);
- }
-}
-
-void
-Dbdict::computeChecksum(XSchemaFile * xsf, Uint32 pageNo){
- SchemaFile * sf = &xsf->schemaPage[pageNo];
- sf->CheckSum = 0;
- sf->CheckSum = computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS);
-}
-
-bool
-Dbdict::validateChecksum(const XSchemaFile * xsf){
-
- for (Uint32 n = 0; n < xsf->noOfPages; n++) {
- SchemaFile * sf = &xsf->schemaPage[n];
- Uint32 c = computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS);
- if ( c != 0)
- return false;
- }
- return true;
-}
-
-Uint32
-Dbdict::computeChecksum(const Uint32 * src, Uint32 len){
- Uint32 ret = 0;
- for(Uint32 i = 0; i<len; i++)
- ret ^= src[i];
- return ret;
-}
-
-SchemaFile::TableEntry *
-Dbdict::getTableEntry(XSchemaFile * xsf, Uint32 tableId)
-{
- Uint32 n = tableId / NDB_SF_PAGE_ENTRIES;
- Uint32 i = tableId % NDB_SF_PAGE_ENTRIES;
- ndbrequire(n < xsf->noOfPages);
-
- SchemaFile * sf = &xsf->schemaPage[n];
- return &sf->TableEntries[i];
-}
-
-// global metadata support
-
-int
-Dbdict::getMetaTablePtr(TableRecordPtr& tablePtr, Uint32 tableId, Uint32 tableVersion)
-{
- if (tableId >= c_tableRecordPool.getSize()) {
- return MetaData::InvalidArgument;
- }
- c_tableRecordPool.getPtr(tablePtr, tableId);
- if (tablePtr.p->tabState == TableRecord::NOT_DEFINED) {
- return MetaData::TableNotFound;
- }
- if (tablePtr.p->tableVersion != tableVersion) {
- return MetaData::InvalidTableVersion;
- }
- // online flag is not maintained by DICT
- tablePtr.p->online =
- tablePtr.p->isTable() && tablePtr.p->tabState == TableRecord::DEFINED ||
- tablePtr.p->isIndex() && tablePtr.p->indexState == TableRecord::IS_ONLINE;
- return 0;
-}
-
-int
-Dbdict::getMetaTable(MetaData::Table& table, Uint32 tableId, Uint32 tableVersion)
-{
- int ret;
- TableRecordPtr tablePtr;
- if ((ret = getMetaTablePtr(tablePtr, tableId, tableVersion)) < 0) {
- return ret;
- }
- new (&table) MetaData::Table(*tablePtr.p);
- return 0;
-}
-
-int
-Dbdict::getMetaTable(MetaData::Table& table, const char* tableName)
-{
- int ret;
- TableRecordPtr tablePtr;
- if (strlen(tableName) + 1 > MAX_TAB_NAME_SIZE) {
- return MetaData::InvalidArgument;
- }
- TableRecord keyRecord;
- strcpy(keyRecord.tableName, tableName);
- c_tableRecordHash.find(tablePtr, keyRecord);
- if (tablePtr.i == RNIL) {
- return MetaData::TableNotFound;
- }
- if ((ret = getMetaTablePtr(tablePtr, tablePtr.i, tablePtr.p->tableVersion)) < 0) {
- return ret;
- }
- new (&table) MetaData::Table(*tablePtr.p);
- return 0;
-}
-
-int
-Dbdict::getMetaAttribute(MetaData::Attribute& attr, const MetaData::Table& table, Uint32 attributeId)
-{
- int ret;
- TableRecordPtr tablePtr;
- if ((ret = getMetaTablePtr(tablePtr, table.tableId, table.tableVersion)) < 0) {
- return ret;
- }
- AttributeRecordPtr attrPtr;
- attrPtr.i = tablePtr.p->firstAttribute;
- while (attrPtr.i != RNIL) {
- c_attributeRecordPool.getPtr(attrPtr);
- if (attrPtr.p->attributeId == attributeId)
- break;
- attrPtr.i = attrPtr.p->nextAttrInTable;
- }
- if (attrPtr.i == RNIL) {
- return MetaData::AttributeNotFound;
- }
- new (&attr) MetaData::Attribute(*attrPtr.p);
- return 0;
-}
-
-int
-Dbdict::getMetaAttribute(MetaData::Attribute& attr, const MetaData::Table& table, const char* attributeName)
-{
- int ret;
- TableRecordPtr tablePtr;
- if ((ret = getMetaTablePtr(tablePtr, table.tableId, table.tableVersion)) < 0) {
- return ret;
- }
- AttributeRecordPtr attrPtr;
- attrPtr.i = tablePtr.p->firstAttribute;
- while (attrPtr.i != RNIL) {
- c_attributeRecordPool.getPtr(attrPtr);
- if (strcmp(attrPtr.p->attributeName, attributeName) == 0)
- break;
- attrPtr.i = attrPtr.p->nextAttrInTable;
- }
- if (attrPtr.i == RNIL) {
- return MetaData::AttributeNotFound;
- }
- new (&attr) MetaData::Attribute(*attrPtr.p);
- return 0;
-}
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
deleted file mode 100644
index 68bb9b628d4..00000000000
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ /dev/null
@@ -1,2021 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef DBDICT_H
-#define DBDICT_H
-
-/**
- * Dict : Dictionary Block
- */
-
-#include <ndb_limits.h>
-#include <trigger_definitions.h>
-#include <pc.hpp>
-#include <ArrayList.hpp>
-#include <DLHashTable.hpp>
-#include <CArray.hpp>
-#include <KeyTable2.hpp>
-#include <SimulatedBlock.hpp>
-#include <SimpleProperties.hpp>
-#include <SignalCounter.hpp>
-#include <Bitmask.hpp>
-#include <AttributeList.hpp>
-#include <signaldata/GetTableId.hpp>
-#include <signaldata/GetTabInfo.hpp>
-#include <signaldata/DictTabInfo.hpp>
-#include <signaldata/CreateTable.hpp>
-#include <signaldata/CreateTab.hpp>
-#include <signaldata/DropTable.hpp>
-#include <signaldata/AlterTable.hpp>
-#include <signaldata/AlterTab.hpp>
-#include <signaldata/CreateIndx.hpp>
-#include <signaldata/DropIndx.hpp>
-#include <signaldata/AlterIndx.hpp>
-#include <signaldata/BuildIndx.hpp>
-#include <signaldata/UtilPrepare.hpp>
-#include <signaldata/CreateEvnt.hpp>
-#include <signaldata/CreateTrig.hpp>
-#include <signaldata/DropTrig.hpp>
-#include <signaldata/AlterTrig.hpp>
-#include "SchemaFile.hpp"
-#include <blocks/mutexes.hpp>
-#include <SafeCounter.hpp>
-#include <RequestTracker.hpp>
-
-#ifdef DBDICT_C
-// Debug Macros
-
-/*--------------------------------------------------------------*/
-// Constants for CONTINUEB
-/*--------------------------------------------------------------*/
-#define ZPACK_TABLE_INTO_PAGES 0
-#define ZSEND_GET_TAB_RESPONSE 3
-
-
-/*--------------------------------------------------------------*/
-// Other constants in alphabetical order
-/*--------------------------------------------------------------*/
-#define ZNOMOREPHASES 255
-
-/*--------------------------------------------------------------*/
-// Schema file defines
-/*--------------------------------------------------------------*/
-#define ZSCHEMA_WORDS 4
-
-/*--------------------------------------------------------------*/
-// Page constants
-/*--------------------------------------------------------------*/
-#define ZBAT_SCHEMA_FILE 0 //Variable number of page for NDBFS
-#define ZBAT_TABLE_FILE 1 //Variable number of page for NDBFS
-#define ZPAGE_HEADER_SIZE 32
-#define ZPOS_PAGE_SIZE 16
-#define ZPOS_CHECKSUM 17
-#define ZPOS_VERSION 18
-#define ZPOS_PAGE_HEADER_SIZE 19
-
-/*--------------------------------------------------------------*/
-// Size constants
-/*--------------------------------------------------------------*/
-#define ZFS_CONNECT_SIZE 4
-#define ZSIZE_OF_PAGES_IN_WORDS 8192
-#define ZLOG_SIZE_OF_PAGES_IN_WORDS 13
-#define ZMAX_PAGES_OF_TABLE_DEFINITION 8
-#define ZNUMBER_OF_PAGES (ZMAX_PAGES_OF_TABLE_DEFINITION + 1)
-#define ZNO_OF_FRAGRECORD 5
-
-/*--------------------------------------------------------------*/
-// Error codes
-/*--------------------------------------------------------------*/
-#define ZNODE_FAILURE_ERROR 704
-#endif
-
-/**
- * Systable NDB$EVENTS_0
- */
-
-#define EVENT_SYSTEM_TABLE_NAME "sys/def/NDB$EVENTS_0"
-#define EVENT_SYSTEM_TABLE_LENGTH 6
-
-struct sysTab_NDBEVENTS_0 {
- char NAME[MAX_TAB_NAME_SIZE];
- Uint32 EVENT_TYPE;
- char TABLE_NAME[MAX_TAB_NAME_SIZE];
- Uint32 ATTRIBUTE_MASK[MAXNROFATTRIBUTESINWORDS];
- Uint32 SUBID;
- Uint32 SUBKEY;
-};
-
-/**
- * DICT - This blocks handles all metadata
- */
-class Dbdict: public SimulatedBlock {
-public:
- /*
- * 2.3 RECORD AND FILESIZES
- */
- /**
- * Shared table / index record. Most of this is permanent data stored
- * on disk. Index trigger ids are volatile.
- */
- struct TableRecord : public MetaData::Table {
- /****************************************************
- * Support variables for table handling
- ****************************************************/
-
- /* Active page which is sent to disk */
- Uint32 activePage;
-
- /** File pointer received from disk */
- Uint32 filePtr[2];
-
- /** Pointer to first attribute in table */
- Uint32 firstAttribute;
-
- /* Pointer to first page of table description */
- Uint32 firstPage;
-
- /** Pointer to last attribute in table */
- Uint32 lastAttribute;
-
- /* Temporary record used during add/drop table */
- Uint32 myConnect;
-#ifdef HAVE_TABLE_REORG
- /* Second table used by this table (for table reorg) */
- Uint32 secondTable;
-#endif
- /* Next record in Pool */
- Uint32 nextPool;
-
- /* Next record in hash table */
- Uint32 nextHash;
-
- /* Previous record in Pool */
- Uint32 prevPool;
-
- /* Previous record in hash table */
- Uint32 prevHash;
-
- enum TabState {
- NOT_DEFINED = 0,
- REORG_TABLE_PREPARED = 1,
- DEFINING = 2,
- CHECKED = 3,
- DEFINED = 4,
- PREPARE_DROPPING = 5,
- DROPPING = 6
- };
- TabState tabState;
-
- /* State when returning from TC_SCHVERREQ */
- enum TabReturnState {
- TRS_IDLE = 0,
- ADD_TABLE = 1,
- SLAVE_SYSTEM_RESTART = 2,
- MASTER_SYSTEM_RESTART = 3
- };
- TabReturnState tabReturnState;
-
- /** Number of words */
- Uint32 packedSize;
-
- /** Index state (volatile data) */
- enum IndexState {
- IS_UNDEFINED = 0, // initial
- IS_OFFLINE = 1, // index table created
- IS_BUILDING = 2, // building (local state)
- IS_DROPPING = 3, // dropping (local state)
- IS_ONLINE = 4, // online
- IS_BROKEN = 9 // build or drop aborted
- };
- IndexState indexState;
-
- /** Trigger ids of index (volatile data) */
- Uint32 insertTriggerId;
- Uint32 updateTriggerId;
- Uint32 deleteTriggerId;
- Uint32 customTriggerId; // ordered index
- Uint32 buildTriggerId; // temp during build
-
- /** Index state in other blocks on this node */
- enum IndexLocal {
- IL_CREATED_TC = 1 << 0 // created in TC
- };
- Uint32 indexLocal;
-
- Uint32 noOfNullBits;
-
- inline bool equal(TableRecord & rec) const {
- return strcmp(tableName, rec.tableName) == 0;
- }
-
- inline Uint32 hashValue() const {
- Uint32 h = 0;
- for (const char* p = tableName; *p != 0; p++)
- h = (h << 5) + h + (*p);
- return h;
- }
-
- /** frm data for this table */
- /** TODO Could preferrably be made dynamic size */
- Uint32 frmLen;
- char frmData[MAX_FRM_DATA_SIZE];
-
- Uint32 fragmentCount;
- };
-
- typedef Ptr<TableRecord> TableRecordPtr;
- ArrayPool<TableRecord> c_tableRecordPool;
- DLHashTable<TableRecord> c_tableRecordHash;
-
- /**
- * Table attributes. Permanent data.
- *
- * Indexes have an attribute list which duplicates primary table
- * attributes. This is wrong but convenient.
- */
- struct AttributeRecord : public MetaData::Attribute {
- union {
- /** Pointer to the next attribute used by ArrayPool */
- Uint32 nextPool;
-
- /** Pointer to the next attribute used by DLHash */
- Uint32 nextHash;
- };
-
- /** Pointer to the previous attribute used by DLHash */
- Uint32 prevHash;
-
- /** Pointer to the next attribute in table */
- Uint32 nextAttrInTable;
-
- inline bool equal(AttributeRecord & rec) const {
- return strcmp(attributeName, rec.attributeName) == 0;
- }
-
- inline Uint32 hashValue() const {
- Uint32 h = 0;
- for (const char* p = attributeName; *p != 0; p++)
- h = (h << 5) + h + (*p);
- return h;
- }
- };
-
- typedef Ptr<AttributeRecord> AttributeRecordPtr;
- ArrayPool<AttributeRecord> c_attributeRecordPool;
- DLHashTable<AttributeRecord> c_attributeRecordHash;
-
- /**
- * Triggers. This is volatile data not saved on disk. Setting a
- * trigger online creates the trigger in TC (if index) and LQH-TUP.
- */
- struct TriggerRecord {
-
- /** Trigger state */
- enum TriggerState {
- TS_NOT_DEFINED = 0,
- TS_DEFINING = 1,
- TS_OFFLINE = 2, // created globally in DICT
- TS_BUILDING = 3,
- TS_DROPPING = 4,
- TS_ONLINE = 5 // activated globally
- };
- TriggerState triggerState;
-
- /** Trigger state in other blocks on this node */
- enum IndexLocal {
- TL_CREATED_TC = 1 << 0, // created in TC
- TL_CREATED_LQH = 1 << 1 // created in LQH-TUP
- };
- Uint32 triggerLocal;
-
- /** Trigger name, used by DICT to identify the trigger */
- char triggerName[MAX_TAB_NAME_SIZE];
-
- /** Trigger id, used by TRIX, TC, LQH, and TUP to identify the trigger */
- Uint32 triggerId;
-
- /** Table id, the table the trigger is defined on */
- Uint32 tableId;
-
- /** Trigger type, defines what the trigger is used for */
- TriggerType::Value triggerType;
-
- /** Trigger action time, defines when the trigger should fire */
- TriggerActionTime::Value triggerActionTime;
-
- /** Trigger event, defines what events the trigger should monitor */
- TriggerEvent::Value triggerEvent;
-
- /** Monitor all replicas */
- bool monitorReplicas;
-
- /** Monitor all, the trigger monitors changes of all attributes in table */
- bool monitorAllAttributes;
-
- /**
- * Attribute mask, defines what attributes are to be monitored.
- * Can be seen as a compact representation of SQL column name list.
- */
- AttributeMask attributeMask;
-
- /** Index id, only used by secondary_index triggers */
- Uint32 indexId;
-
- union {
- /** Pointer to the next attribute used by ArrayPool */
- Uint32 nextPool;
-
- /** Next record in hash table */
- Uint32 nextHash;
- };
-
- /** Previous record in hash table */
- Uint32 prevHash;
-
- /** Equal function, used by DLHashTable */
- inline bool equal(TriggerRecord & rec) const {
- return strcmp(triggerName, rec.triggerName) == 0;
- }
-
- /** Hash value function, used by DLHashTable */
- inline Uint32 hashValue() const {
- Uint32 h = 0;
- for (const char* p = triggerName; *p != 0; p++)
- h = (h << 5) + h + (*p);
- return h;
- }
- };
-
- Uint32 c_maxNoOfTriggers;
- typedef Ptr<TriggerRecord> TriggerRecordPtr;
- ArrayPool<TriggerRecord> c_triggerRecordPool;
- DLHashTable<TriggerRecord> c_triggerRecordHash;
-
- /**
- * Information for each FS connection.
- ****************************************************************************/
- struct FsConnectRecord {
- enum FsState {
- IDLE = 0,
- OPEN_WRITE_SCHEMA = 1,
- WRITE_SCHEMA = 2,
- CLOSE_WRITE_SCHEMA = 3,
- OPEN_READ_SCHEMA1 = 4,
- OPEN_READ_SCHEMA2 = 5,
- READ_SCHEMA1 = 6,
- READ_SCHEMA2 = 7,
- CLOSE_READ_SCHEMA = 8,
- OPEN_READ_TAB_FILE1 = 9,
- OPEN_READ_TAB_FILE2 = 10,
- READ_TAB_FILE1 = 11,
- READ_TAB_FILE2 = 12,
- CLOSE_READ_TAB_FILE = 13,
- OPEN_WRITE_TAB_FILE = 14,
- WRITE_TAB_FILE = 15,
- CLOSE_WRITE_TAB_FILE = 16
- };
- /** File Pointer for this file system connection */
- Uint32 filePtr;
-
- /** Reference of owner record */
- Uint32 ownerPtr;
-
- /** State of file system connection */
- FsState fsState;
-
- /** Used by Array Pool for free list handling */
- Uint32 nextPool;
- };
-
- typedef Ptr<FsConnectRecord> FsConnectRecordPtr;
- ArrayPool<FsConnectRecord> c_fsConnectRecordPool;
-
- /**
- * This record stores all the information about a node and all its attributes
- ****************************************************************************/
- struct NodeRecord {
- enum NodeState {
- API_NODE = 0,
- NDB_NODE_ALIVE = 1,
- NDB_NODE_DEAD = 2
- };
- bool hotSpare;
- NodeState nodeState;
- };
-
- typedef Ptr<NodeRecord> NodeRecordPtr;
- CArray<NodeRecord> c_nodes;
- NdbNodeBitmask c_aliveNodes;
-
- /**
- * This record stores all the information about a table and all its attributes
- ****************************************************************************/
- struct PageRecord {
- Uint32 word[8192];
- };
-
- typedef Ptr<PageRecord> PageRecordPtr;
- CArray<PageRecord> c_pageRecordArray;
-
- struct SchemaPageRecord {
- Uint32 word[NDB_SF_PAGE_SIZE_IN_WORDS];
- };
-
- CArray<SchemaPageRecord> c_schemaPageRecordArray;
-
- /**
- * A page for create index table signal.
- */
- PageRecord c_indexPage;
-
-public:
- Dbdict(const class Configuration &);
- virtual ~Dbdict();
-
-private:
- BLOCK_DEFINES(Dbdict);
-
- // Signal receivers
- void execDICTSTARTREQ(Signal* signal);
-
- void execGET_TABINFOREQ(Signal* signal);
- void execGET_TABLEDID_REQ(Signal* signal);
- void execGET_TABINFO_REF(Signal* signal);
- void execGET_TABINFO_CONF(Signal* signal);
- void execCONTINUEB(Signal* signal);
-
- void execDUMP_STATE_ORD(Signal* signal);
- void execHOT_SPAREREP(Signal* signal);
- void execDIADDTABCONF(Signal* signal);
- void execDIADDTABREF(Signal* signal);
- void execTAB_COMMITCONF(Signal* signal);
- void execTAB_COMMITREF(Signal* signal);
- void execGET_SCHEMA_INFOREQ(Signal* signal);
- void execSCHEMA_INFO(Signal* signal);
- void execSCHEMA_INFOCONF(Signal* signal);
- void execREAD_NODESCONF(Signal* signal);
- void execFSCLOSECONF(Signal* signal);
- void execFSCLOSEREF(Signal* signal);
- void execFSOPENCONF(Signal* signal);
- void execFSOPENREF(Signal* signal);
- void execFSREADCONF(Signal* signal);
- void execFSREADREF(Signal* signal);
- void execFSWRITECONF(Signal* signal);
- void execFSWRITEREF(Signal* signal);
- void execNDB_STTOR(Signal* signal);
- void execREAD_CONFIG_REQ(Signal* signal);
- void execSTTOR(Signal* signal);
- void execTC_SCHVERCONF(Signal* signal);
- void execNODE_FAILREP(Signal* signal);
- void execINCL_NODEREQ(Signal* signal);
- void execAPI_FAILREQ(Signal* signal);
-
- void execWAIT_GCP_REF(Signal* signal);
- void execWAIT_GCP_CONF(Signal* signal);
-
- void execLIST_TABLES_REQ(Signal* signal);
-
- // Index signals
- void execCREATE_INDX_REQ(Signal* signal);
- void execCREATE_INDX_CONF(Signal* signal);
- void execCREATE_INDX_REF(Signal* signal);
-
- void execALTER_INDX_REQ(Signal* signal);
- void execALTER_INDX_CONF(Signal* signal);
- void execALTER_INDX_REF(Signal* signal);
-
- void execCREATE_TABLE_CONF(Signal* signal);
- void execCREATE_TABLE_REF(Signal* signal);
-
- void execDROP_INDX_REQ(Signal* signal);
- void execDROP_INDX_CONF(Signal* signal);
- void execDROP_INDX_REF(Signal* signal);
-
- void execDROP_TABLE_CONF(Signal* signal);
- void execDROP_TABLE_REF(Signal* signal);
-
- void execBUILDINDXREQ(Signal* signal);
- void execBUILDINDXCONF(Signal* signal);
- void execBUILDINDXREF(Signal* signal);
-
- // Util signals used by Event code
- void execUTIL_PREPARE_CONF(Signal* signal);
- void execUTIL_PREPARE_REF (Signal* signal);
- void execUTIL_EXECUTE_CONF(Signal* signal);
- void execUTIL_EXECUTE_REF (Signal* signal);
- void execUTIL_RELEASE_CONF(Signal* signal);
- void execUTIL_RELEASE_REF (Signal* signal);
-
-
- // Event signals from API
- void execCREATE_EVNT_REQ (Signal* signal);
- void execCREATE_EVNT_CONF(Signal* signal);
- void execCREATE_EVNT_REF (Signal* signal);
-
- void execDROP_EVNT_REQ (Signal* signal);
-
- void execSUB_START_REQ (Signal* signal);
- void execSUB_START_CONF (Signal* signal);
- void execSUB_START_REF (Signal* signal);
-
- void execSUB_STOP_REQ (Signal* signal);
- void execSUB_STOP_CONF (Signal* signal);
- void execSUB_STOP_REF (Signal* signal);
-
- // Event signals from SUMA
-
- void execCREATE_SUBID_CONF(Signal* signal);
- void execCREATE_SUBID_REF (Signal* signal);
-
- void execSUB_CREATE_CONF(Signal* signal);
- void execSUB_CREATE_REF (Signal* signal);
-
- void execSUB_SYNC_CONF(Signal* signal);
- void execSUB_SYNC_REF (Signal* signal);
-
- void execSUB_REMOVE_REQ(Signal* signal);
- void execSUB_REMOVE_CONF(Signal* signal);
- void execSUB_REMOVE_REF(Signal* signal);
-
- // Trigger signals
- void execCREATE_TRIG_REQ(Signal* signal);
- void execCREATE_TRIG_CONF(Signal* signal);
- void execCREATE_TRIG_REF(Signal* signal);
- void execALTER_TRIG_REQ(Signal* signal);
- void execALTER_TRIG_CONF(Signal* signal);
- void execALTER_TRIG_REF(Signal* signal);
- void execDROP_TRIG_REQ(Signal* signal);
- void execDROP_TRIG_CONF(Signal* signal);
- void execDROP_TRIG_REF(Signal* signal);
-
- void execDROP_TABLE_REQ(Signal* signal);
-
- void execPREP_DROP_TAB_REQ(Signal* signal);
- void execPREP_DROP_TAB_REF(Signal* signal);
- void execPREP_DROP_TAB_CONF(Signal* signal);
-
- void execDROP_TAB_REQ(Signal* signal);
- void execDROP_TAB_REF(Signal* signal);
- void execDROP_TAB_CONF(Signal* signal);
-
- void execCREATE_TABLE_REQ(Signal* signal);
- void execALTER_TABLE_REQ(Signal* signal);
- void execCREATE_FRAGMENTATION_REF(Signal*);
- void execCREATE_FRAGMENTATION_CONF(Signal*);
- void execCREATE_TAB_REQ(Signal* signal);
- void execADD_FRAGREQ(Signal* signal);
- void execLQHFRAGREF(Signal* signal);
- void execLQHFRAGCONF(Signal* signal);
- void execLQHADDATTREF(Signal* signal);
- void execLQHADDATTCONF(Signal* signal);
- void execCREATE_TAB_REF(Signal* signal);
- void execCREATE_TAB_CONF(Signal* signal);
- void execALTER_TAB_REQ(Signal* signal);
- void execALTER_TAB_REF(Signal* signal);
- void execALTER_TAB_CONF(Signal* signal);
-
- /*
- * 2.4 COMMON STORED VARIABLES
- */
-
- /**
- * This record stores all the state needed
- * when the schema page is being sent to other nodes
- ***************************************************************************/
- struct SendSchemaRecord {
- /** Number of words of schema data */
- Uint32 noOfWords;
- /** Page Id of schema data */
- Uint32 pageId;
-
- Uint32 nodeId;
- SignalCounter m_SCHEMAINFO_Counter;
-
- Uint32 noOfWordsCurrentlySent;
- Uint32 noOfSignalsSentSinceDelay;
-
- bool inUse;
- };
- SendSchemaRecord c_sendSchemaRecord;
-
- /**
- * This record stores all the state needed
- * when a table file is being read from disk
- ****************************************************************************/
- struct ReadTableRecord {
- /** Number of Pages */
- Uint32 noOfPages;
- /** Page Id*/
- Uint32 pageId;
- /** Table Id of read table */
- Uint32 tableId;
-
- bool inUse;
- Callback m_callback;
- };
- ReadTableRecord c_readTableRecord;
-
- /**
- * This record stores all the state needed
- * when a table file is being written to disk
- ****************************************************************************/
- struct WriteTableRecord {
- /** Number of Pages */
- Uint32 noOfPages;
- /** Page Id*/
- Uint32 pageId;
- /** Table Files Handled, local state variable */
- Uint32 noOfTableFilesHandled;
- /** Table Id of written table */
- Uint32 tableId;
- /** State, indicates from where it was called */
- enum TableWriteState {
- IDLE = 0,
- WRITE_ADD_TABLE_MASTER = 1,
- WRITE_ADD_TABLE_SLAVE = 2,
- WRITE_RESTART_FROM_MASTER = 3,
- WRITE_RESTART_FROM_OWN = 4,
- TWR_CALLBACK = 5
- };
- TableWriteState tableWriteState;
- Callback m_callback;
- };
- WriteTableRecord c_writeTableRecord;
-
- /**
- * This record stores all the state needed
- * when a schema file is being read from disk
- ****************************************************************************/
- struct ReadSchemaRecord {
- /** Page Id of schema page */
- Uint32 pageId;
- /** First page to read */
- Uint32 firstPage;
- /** Number of pages to read */
- Uint32 noOfPages;
- /** State, indicates from where it was called */
- enum SchemaReadState {
- IDLE = 0,
- INITIAL_READ_HEAD = 1,
- INITIAL_READ = 2
- };
- SchemaReadState schemaReadState;
- };
- ReadSchemaRecord c_readSchemaRecord;
-
- /**
- * This record stores all the state needed
- * when a schema file is being written to disk
- ****************************************************************************/
- struct WriteSchemaRecord {
- /** Page Id of schema page */
- Uint32 pageId;
- /** Rewrite entire file */
- Uint32 newFile;
- /** First page to write */
- Uint32 firstPage;
- /** Number of pages to write */
- Uint32 noOfPages;
- /** Schema Files Handled, local state variable */
- Uint32 noOfSchemaFilesHandled;
-
- bool inUse;
- Callback m_callback;
- };
- WriteSchemaRecord c_writeSchemaRecord;
-
- /**
- * This record stores all the information needed
- * when a file is being read from disk
- ****************************************************************************/
- struct RestartRecord {
- /** Global check point identity */
- Uint32 gciToRestart;
-
- /** The active table at restart process */
- Uint32 activeTable;
-
- /** The active table at restart process */
- BlockReference returnBlockRef;
- };
- RestartRecord c_restartRecord;
-
- /**
- * This record stores all the information needed
- * when a file is being read from disk
- ****************************************************************************/
- struct RetrieveRecord {
- RetrieveRecord(){ noOfWaiters = 0;}
-
- /** Only one retrieve table definition at a time */
- bool busyState;
-
- /**
- * No of waiting in time queue
- */
- Uint32 noOfWaiters;
-
- /** Block Reference of retriever */
- BlockReference blockRef;
-
- /** Id of retriever */
- Uint32 m_senderData;
-
- /** Table id of retrieved table */
- Uint32 tableId;
-
- /** Starting page to retrieve data from */
- Uint32 retrievePage;
-
- /** Number of pages retrieved */
- Uint32 retrievedNoOfPages;
-
- /** Number of words retrieved */
- Uint32 retrievedNoOfWords;
-
- /** Number of words sent currently */
- Uint32 currentSent;
-
- /**
- * Long signal stuff
- */
- bool m_useLongSig;
- };
- RetrieveRecord c_retrieveRecord;
-
- /**
- * This record stores all the information needed
- * when a file is being read from disk
- *
- * This is the info stored in one entry of the schema
- * page. Each table has 4 words of info.
- * Word 1: Schema version (upper 16 bits)
- * Table State (lower 16 bits)
- * Word 2: Number of pages of table description
- * Word 3: Global checkpoint id table was created
- * Word 4: Currently zero
- ****************************************************************************/
- struct SchemaRecord {
- /** Schema file first page (0) */
- Uint32 schemaPage;
-
- /** Old Schema file first page (used at node restart) */
- Uint32 oldSchemaPage;
-
- Callback m_callback;
- };
- SchemaRecord c_schemaRecord;
-
- /*
- * Schema file, list of schema pages. Use an array until a pool
- * exists and NDBFS interface can use it.
- */
- struct XSchemaFile {
- SchemaFile* schemaPage;
- Uint32 noOfPages;
- };
- // 0-normal 1-old
- XSchemaFile c_schemaFile[2];
-
- void initSchemaFile(XSchemaFile *, Uint32 firstPage, Uint32 lastPage,
- bool initEntries);
- void resizeSchemaFile(XSchemaFile * xsf, Uint32 noOfPages);
- void computeChecksum(XSchemaFile *, Uint32 pageNo);
- bool validateChecksum(const XSchemaFile *);
- SchemaFile::TableEntry * getTableEntry(XSchemaFile *, Uint32 tableId);
-
- Uint32 computeChecksum(const Uint32 * src, Uint32 len);
-
-
- /* ----------------------------------------------------------------------- */
- // Node References
- /* ----------------------------------------------------------------------- */
- Uint16 c_masterNodeId;
-
- /* ----------------------------------------------------------------------- */
- // Various current system properties
- /* ----------------------------------------------------------------------- */
- Uint16 c_numberNode;
- Uint16 c_noHotSpareNodes;
- Uint16 c_noNodesFailed;
- Uint32 c_failureNr;
-
- /* ----------------------------------------------------------------------- */
- // State variables
- /* ----------------------------------------------------------------------- */
-
- enum BlockState {
- BS_IDLE = 0,
- BS_CREATE_TAB = 1,
- BS_BUSY = 2,
- BS_NODE_FAILURE = 3
- };
- BlockState c_blockState;
-
- struct PackTable {
-
- enum PackTableState {
- PTS_IDLE = 0,
- PTS_ADD_TABLE_MASTER = 1,
- PTS_ADD_TABLE_SLAVE = 2,
- PTS_GET_TAB = 3,
- PTS_RESTART = 4
- } m_state;
-
- } c_packTable;
-
- Uint32 c_startPhase;
- Uint32 c_restartType;
- bool c_initialStart;
- bool c_systemRestart;
- bool c_nodeRestart;
- bool c_initialNodeRestart;
- Uint32 c_tabinfoReceived;
-
- /**
- * Temporary structure used when parsing table info
- */
- struct ParseDictTabInfoRecord {
- DictTabInfo::RequestType requestType;
- Uint32 errorCode;
- Uint32 errorLine;
-
- SimpleProperties::UnpackStatus status;
- Uint32 errorKey;
- TableRecordPtr tablePtr;
- };
-
- // Operation records
-
- /**
- * Common part of operation records. Uses KeyTable2. Note that each
- * seize/release invokes ctor/dtor automatically.
- */
- struct OpRecordCommon {
- Uint32 key; // key shared between master and slaves
- Uint32 nextHash;
- Uint32 prevHash;
- Uint32 hashValue() const {
- return key;
- }
- bool equal(const OpRecordCommon& rec) const {
- return key == rec.key;
- }
- };
-
- /**
- * Create table record
- */
- struct CreateTableRecord : OpRecordCommon {
- Uint32 m_senderRef;
- Uint32 m_senderData;
- Uint32 m_coordinatorRef;
-
- Uint32 m_errorCode;
- void setErrorCode(Uint32 c){ if(m_errorCode == 0) m_errorCode = c;}
-
- // For alter table
- Uint32 m_changeMask;
- bool m_alterTableFailed;
- AlterTableRef m_alterTableRef;
- Uint32 m_alterTableId;
-
- /* Previous table name (used for reverting failed table rename) */
- char previousTableName[MAX_TAB_NAME_SIZE];
-
- Uint32 m_tablePtrI;
- Uint32 m_tabInfoPtrI;
- Uint32 m_fragmentsPtrI;
-
- Uint32 m_dihAddFragPtr; // Connect ptr towards DIH
- Uint32 m_lqhFragPtr; // Connect ptr towards LQH
-
- Callback m_callback; // Who's using local create tab
- MutexHandle2<DIH_START_LCP_MUTEX> m_startLcpMutex;
-
- struct CoordinatorData {
- Uint32 m_gsn;
- SafeCounterHandle m_counter;
- CreateTabReq::RequestType m_requestType;
- } m_coordinatorData;
- };
- typedef Ptr<CreateTableRecord> CreateTableRecordPtr;
-
- /**
- * Drop table record
- */
- struct DropTableRecord : OpRecordCommon {
- DropTableReq m_request;
-
- Uint32 m_requestType;
- Uint32 m_coordinatorRef;
-
- Uint32 m_errorCode;
- void setErrorCode(Uint32 c){ if(m_errorCode == 0) m_errorCode = c;}
-
- /**
- * When sending stuff around
- */
- struct CoordinatorData {
- Uint32 m_gsn;
- Uint32 m_block;
- SignalCounter m_signalCounter;
- } m_coordinatorData;
-
- struct ParticipantData {
- Uint32 m_gsn;
- Uint32 m_block;
- SignalCounter m_signalCounter;
-
- Callback m_callback;
- } m_participantData;
- };
- typedef Ptr<DropTableRecord> DropTableRecordPtr;
-
- /**
- * Request flags passed in signals along with request type and
- * propagated across operations.
- */
- struct RequestFlag {
- enum {
- RF_LOCAL = 1 << 0, // create on local node only
- RF_NOBUILD = 1 << 1, // no need to build index
- RF_NOTCTRIGGER = 1 << 2 // alter trigger: no trigger in TC
- };
- };
-
- /**
- * Operation record for create index.
- */
- struct OpCreateIndex : OpRecordCommon {
- // original request (index id will be added)
- CreateIndxReq m_request;
- AttributeList m_attrList;
- char m_indexName[MAX_TAB_NAME_SIZE];
- bool m_storedIndex;
- // coordinator DICT
- Uint32 m_coordinatorRef;
- bool m_isMaster;
- // state info
- CreateIndxReq::RequestType m_requestType;
- Uint32 m_requestFlag;
- // error info
- CreateIndxRef::ErrorCode m_errorCode;
- Uint32 m_errorLine;
- Uint32 m_errorNode;
- // counters
- SignalCounter m_signalCounter;
- // ctor
- OpCreateIndex() {
- memset(&m_request, 0, sizeof(m_request));
- m_coordinatorRef = 0;
- m_requestType = CreateIndxReq::RT_UNDEFINED;
- m_requestFlag = 0;
- m_errorCode = CreateIndxRef::NoError;
- m_errorLine = 0;
- m_errorNode = 0;
- }
- void save(const CreateIndxReq* req) {
- m_request = *req;
- m_requestType = req->getRequestType();
- m_requestFlag = req->getRequestFlag();
- }
- bool hasError() {
- return m_errorCode != CreateIndxRef::NoError;
- }
- void setError(const CreateIndxRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- void setError(const CreateTableRef* ref) {
- if (ref != 0 && ! hasError()) {
- switch (ref->getErrorCode()) {
- case CreateTableRef::TableAlreadyExist:
- m_errorCode = CreateIndxRef::IndexExists;
- break;
- default:
- m_errorCode = (CreateIndxRef::ErrorCode)ref->getErrorCode();
- break;
- }
- m_errorLine = ref->getErrorLine();
- }
- }
- void setError(const AlterIndxRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (CreateIndxRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- };
- typedef Ptr<OpCreateIndex> OpCreateIndexPtr;
-
- /**
- * Operation record for drop index.
- */
- struct OpDropIndex : OpRecordCommon {
- // original request
- DropIndxReq m_request;
- // coordinator DICT
- Uint32 m_coordinatorRef;
- bool m_isMaster;
- // state info
- DropIndxReq::RequestType m_requestType;
- Uint32 m_requestFlag;
- // error info
- DropIndxRef::ErrorCode m_errorCode;
- Uint32 m_errorLine;
- Uint32 m_errorNode;
- // counters
- SignalCounter m_signalCounter;
- // ctor
- OpDropIndex() {
- memset(&m_request, 0, sizeof(m_request));
- m_coordinatorRef = 0;
- m_requestType = DropIndxReq::RT_UNDEFINED;
- m_requestFlag = 0;
- m_errorCode = DropIndxRef::NoError;
- m_errorLine = 0;
- m_errorNode = 0;
- }
- void save(const DropIndxReq* req) {
- m_request = *req;
- m_requestType = req->getRequestType();
- m_requestFlag = req->getRequestFlag();
- }
- bool hasError() {
- return m_errorCode != DropIndxRef::NoError;
- }
- void setError(const DropIndxRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- void setError(const AlterIndxRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (DropIndxRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- void setError(const DropTableRef* ref) {
- if (ref != 0 && ! hasError()) {
- switch(ref->errorCode) {
- case(DropTableRef::Busy):
- m_errorCode = DropIndxRef::Busy;
- break;
- case(DropTableRef::NoSuchTable):
- m_errorCode = DropIndxRef::IndexNotFound;
- break;
- case(DropTableRef::DropInProgress):
- m_errorCode = DropIndxRef::Busy;
- break;
- case(DropTableRef::NoDropTableRecordAvailable):
- m_errorCode = DropIndxRef::Busy;
- break;
- default:
- m_errorCode = (DropIndxRef::ErrorCode)ref->errorCode;
- break;
- }
- //m_errorLine = ref->getErrorLine();
- //m_errorNode = ref->getErrorNode();
- }
- }
- };
- typedef Ptr<OpDropIndex> OpDropIndexPtr;
-
- /**
- * Operation record for alter index.
- */
- struct OpAlterIndex : OpRecordCommon {
- // original request plus buffer for attribute lists
- AlterIndxReq m_request;
- AttributeList m_attrList;
- AttributeList m_tableKeyList;
- // coordinator DICT
- Uint32 m_coordinatorRef;
- bool m_isMaster;
- // state info
- AlterIndxReq::RequestType m_requestType;
- Uint32 m_requestFlag;
- // error info
- AlterIndxRef::ErrorCode m_errorCode;
- Uint32 m_errorLine;
- Uint32 m_errorNode;
- // counters
- SignalCounter m_signalCounter;
- Uint32 m_triggerCounter;
- // ctor
- OpAlterIndex() {
- memset(&m_request, 0, sizeof(m_request));
- m_coordinatorRef = 0;
- m_requestType = AlterIndxReq::RT_UNDEFINED;
- m_requestFlag = 0;
- m_errorCode = AlterIndxRef::NoError;
- m_errorLine = 0;
- m_errorNode = 0;
- m_triggerCounter = 0;
- }
- void save(const AlterIndxReq* req) {
- m_request = *req;
- m_requestType = req->getRequestType();
- m_requestFlag = req->getRequestFlag();
- }
- bool hasError() {
- return m_errorCode != AlterIndxRef::NoError;
- }
- void setError(const AlterIndxRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- void setError(const CreateIndxRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- void setError(const DropIndxRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- void setError(const BuildIndxRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
- }
- }
- void setError(const CreateTrigRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- void setError(const DropTrigRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- };
- typedef Ptr<OpAlterIndex> OpAlterIndexPtr;
-
- /**
- * Operation record for build index.
- */
- struct OpBuildIndex : OpRecordCommon {
- // original request plus buffer for attribute lists
- BuildIndxReq m_request;
- AttributeList m_attrList;
- AttributeList m_tableKeyList;
- // coordinator DICT
- Uint32 m_coordinatorRef;
- bool m_isMaster;
- // state info
- BuildIndxReq::RequestType m_requestType;
- Uint32 m_requestFlag;
- Uint32 m_constrTriggerId;
- // error info
- BuildIndxRef::ErrorCode m_errorCode;
- Uint32 m_errorLine;
- Uint32 m_errorNode;
- // counters
- SignalCounter m_signalCounter;
- // ctor
- OpBuildIndex() {
- memset(&m_request, 0, sizeof(m_request));
- m_coordinatorRef = 0;
- m_requestType = BuildIndxReq::RT_UNDEFINED;
- m_requestFlag = 0;
-// Uint32 m_constrTriggerId = RNIL;
- m_errorCode = BuildIndxRef::NoError;
- m_errorLine = 0;
- m_errorNode = 0;
- }
- void save(const BuildIndxReq* req) {
- m_request = *req;
- m_requestType = req->getRequestType();
- m_requestFlag = req->getRequestFlag();
- }
- bool hasError() {
- return m_errorCode != BuildIndxRef::NoError;
- }
- void setError(const BuildIndxRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = ref->getErrorCode();
- }
- }
- void setError(const AlterIndxRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (BuildIndxRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- void setError(const CreateTrigRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (BuildIndxRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- void setError(const DropTrigRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (BuildIndxRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- };
- typedef Ptr<OpBuildIndex> OpBuildIndexPtr;
-
- /**
- * Operation record for Util Signals.
- */
- struct OpSignalUtil : OpRecordCommon{
- Callback m_callback;
- Uint32 m_userData;
- };
- typedef Ptr<OpSignalUtil> OpSignalUtilPtr;
-
- /**
- * Operation record for subscribe-start-stop
- */
- struct OpSubEvent : OpRecordCommon {
- Uint32 m_senderRef;
- Uint32 m_senderData;
- Uint32 m_errorCode;
- RequestTracker m_reqTracker;
- };
- typedef Ptr<OpSubEvent> OpSubEventPtr;
-
- static const Uint32 sysTab_NDBEVENTS_0_szs[];
-
- /**
- * Operation record for create event.
- */
- struct OpCreateEvent : OpRecordCommon {
- // original request (event id will be added)
- CreateEvntReq m_request;
- //AttributeMask m_attrListBitmask;
- // AttributeList m_attrList;
- sysTab_NDBEVENTS_0 m_eventRec;
- // char m_eventName[MAX_TAB_NAME_SIZE];
- // char m_tableName[MAX_TAB_NAME_SIZE];
-
- // coordinator DICT
- RequestTracker m_reqTracker;
- // state info
- CreateEvntReq::RequestType m_requestType;
- Uint32 m_requestFlag;
- // error info
- CreateEvntRef::ErrorCode m_errorCode;
- Uint32 m_errorLine;
- Uint32 m_errorNode;
- // ctor
- OpCreateEvent() {
- memset(&m_request, 0, sizeof(m_request));
- m_requestType = CreateEvntReq::RT_UNDEFINED;
- m_requestFlag = 0;
- m_errorCode = CreateEvntRef::NoError;
- m_errorLine = 0;
- m_errorNode = 0;
- }
- void init(const CreateEvntReq* req, Dbdict* dp) {
- m_request = *req;
- m_errorCode = CreateEvntRef::NoError;
- m_errorLine = 0;
- m_errorNode = 0;
- m_requestType = req->getRequestType();
- m_requestFlag = req->getRequestFlag();
- }
- bool hasError() {
- return m_errorCode != CreateEvntRef::NoError;
- }
- void setError(const CreateEvntRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
-
- };
- typedef Ptr<OpCreateEvent> OpCreateEventPtr;
-
- /**
- * Operation record for drop event.
- */
- struct OpDropEvent : OpRecordCommon {
- // original request
- DropEvntReq m_request;
- // char m_eventName[MAX_TAB_NAME_SIZE];
- sysTab_NDBEVENTS_0 m_eventRec;
- RequestTracker m_reqTracker;
- // error info
- DropEvntRef::ErrorCode m_errorCode;
- Uint32 m_errorLine;
- Uint32 m_errorNode;
- // ctor
- OpDropEvent() {
- memset(&m_request, 0, sizeof(m_request));
- m_errorCode = DropEvntRef::NoError;
- m_errorLine = 0;
- m_errorNode = 0;
- }
- void init(const DropEvntReq* req) {
- m_request = *req;
- m_errorCode = DropEvntRef::NoError;
- m_errorLine = 0;
- m_errorNode = 0;
- }
- bool hasError() {
- return m_errorCode != DropEvntRef::NoError;
- }
- void setError(const DropEvntRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- };
- typedef Ptr<OpDropEvent> OpDropEventPtr;
-
- /**
- * Operation record for create trigger.
- */
- struct OpCreateTrigger : OpRecordCommon {
- // original request (trigger id will be added)
- CreateTrigReq m_request;
- char m_triggerName[MAX_TAB_NAME_SIZE];
- // coordinator DICT
- Uint32 m_coordinatorRef;
- bool m_isMaster;
- // state info
- CreateTrigReq::RequestType m_requestType;
- Uint32 m_requestFlag;
- // error info
- CreateTrigRef::ErrorCode m_errorCode;
- Uint32 m_errorLine;
- Uint32 m_errorNode;
- // counters
- SignalCounter m_signalCounter;
- // ctor
- OpCreateTrigger() {
- memset(&m_request, 0, sizeof(m_request));
- m_coordinatorRef = 0;
- m_requestType = CreateTrigReq::RT_UNDEFINED;
- m_requestFlag = 0;
- m_errorCode = CreateTrigRef::NoError;
- m_errorLine = 0;
- m_errorNode = 0;
- }
- void save(const CreateTrigReq* req) {
- m_request = *req;
- m_requestType = req->getRequestType();
- m_requestFlag = req->getRequestFlag();
- }
- bool hasError() {
- return m_errorCode != CreateTrigRef::NoError;
- }
- void setError(const CreateTrigRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- void setError(const AlterTrigRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (CreateTrigRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- };
- typedef Ptr<OpCreateTrigger> OpCreateTriggerPtr;
-
- /**
- * Operation record for drop trigger.
- */
- struct OpDropTrigger : OpRecordCommon {
- // original request
- DropTrigReq m_request;
- // coordinator DICT
- Uint32 m_coordinatorRef;
- bool m_isMaster;
- // state info
- DropTrigReq::RequestType m_requestType;
- Uint32 m_requestFlag;
- // error info
- DropTrigRef::ErrorCode m_errorCode;
- Uint32 m_errorLine;
- Uint32 m_errorNode;
- // counters
- SignalCounter m_signalCounter;
- // ctor
- OpDropTrigger() {
- memset(&m_request, 0, sizeof(m_request));
- m_coordinatorRef = 0;
- m_requestType = DropTrigReq::RT_UNDEFINED;
- m_requestFlag = 0;
- m_errorCode = DropTrigRef::NoError;
- m_errorLine = 0;
- m_errorNode = 0;
- }
- void save(const DropTrigReq* req) {
- m_request = *req;
- m_requestType = req->getRequestType();
- m_requestFlag = req->getRequestFlag();
- }
- bool hasError() {
- return m_errorCode != DropTrigRef::NoError;
- }
- void setError(const DropTrigRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- void setError(const AlterTrigRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (DropTrigRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- };
- typedef Ptr<OpDropTrigger> OpDropTriggerPtr;
-
- /**
- * Operation record for alter trigger.
- */
- struct OpAlterTrigger : OpRecordCommon {
- // original request
- AlterTrigReq m_request;
- // nodes participating in operation
- NdbNodeBitmask m_nodes;
- // coordinator DICT
- Uint32 m_coordinatorRef;
- bool m_isMaster;
- // state info
- AlterTrigReq::RequestType m_requestType;
- Uint32 m_requestFlag;
- // error info
- AlterTrigRef::ErrorCode m_errorCode;
- Uint32 m_errorLine;
- Uint32 m_errorNode;
- // counters
- SignalCounter m_signalCounter;
- // ctor
- OpAlterTrigger() {
- memset(&m_request, 0, sizeof(m_request));
- m_coordinatorRef = 0;
- m_requestType = AlterTrigReq::RT_UNDEFINED;
- m_requestFlag = 0;
- m_errorCode = AlterTrigRef::NoError;
- m_errorLine = 0;
- m_errorNode = 0;
- }
- void save(const AlterTrigReq* req) {
- m_request = *req;
- m_requestType = req->getRequestType();
- m_requestFlag = req->getRequestFlag();
- }
- bool hasError() {
- return m_errorCode != AlterTrigRef::NoError;
- }
- void setError(const AlterTrigRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (AlterTrigRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- void setError(const CreateTrigRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (AlterTrigRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- void setError(const DropTrigRef* ref) {
- if (ref != 0 && ! hasError()) {
- m_errorCode = (AlterTrigRef::ErrorCode)ref->getErrorCode();
- m_errorLine = ref->getErrorLine();
- m_errorNode = ref->getErrorNode();
- }
- }
- };
- typedef Ptr<OpAlterTrigger> OpAlterTriggerPtr;
-
- // Common operation record pool
-public:
- STATIC_CONST( opCreateTableSize = sizeof(CreateTableRecord) );
- STATIC_CONST( opDropTableSize = sizeof(DropTableRecord) );
- STATIC_CONST( opCreateIndexSize = sizeof(OpCreateIndex) );
- STATIC_CONST( opDropIndexSize = sizeof(OpDropIndex) );
- STATIC_CONST( opAlterIndexSize = sizeof(OpAlterIndex) );
- STATIC_CONST( opBuildIndexSize = sizeof(OpBuildIndex) );
- STATIC_CONST( opCreateEventSize = sizeof(OpCreateEvent) );
- STATIC_CONST( opSubEventSize = sizeof(OpSubEvent) );
- STATIC_CONST( opDropEventSize = sizeof(OpDropEvent) );
- STATIC_CONST( opSignalUtilSize = sizeof(OpSignalUtil) );
- STATIC_CONST( opCreateTriggerSize = sizeof(OpCreateTrigger) );
- STATIC_CONST( opDropTriggerSize = sizeof(OpDropTrigger) );
- STATIC_CONST( opAlterTriggerSize = sizeof(OpAlterTrigger) );
-private:
-#define PTR_ALIGN(n) ((((n)+sizeof(void*)-1)>>2)&~((sizeof(void*)-1)>>2))
- union OpRecordUnion {
- Uint32 u_opCreateTable [PTR_ALIGN(opCreateTableSize)];
- Uint32 u_opDropTable [PTR_ALIGN(opDropTableSize)];
- Uint32 u_opCreateIndex [PTR_ALIGN(opCreateIndexSize)];
- Uint32 u_opDropIndex [PTR_ALIGN(opDropIndexSize)];
- Uint32 u_opCreateEvent [PTR_ALIGN(opCreateEventSize)];
- Uint32 u_opSubEvent [PTR_ALIGN(opSubEventSize)];
- Uint32 u_opDropEvent [PTR_ALIGN(opDropEventSize)];
- Uint32 u_opSignalUtil [PTR_ALIGN(opSignalUtilSize)];
- Uint32 u_opAlterIndex [PTR_ALIGN(opAlterIndexSize)];
- Uint32 u_opBuildIndex [PTR_ALIGN(opBuildIndexSize)];
- Uint32 u_opCreateTrigger[PTR_ALIGN(opCreateTriggerSize)];
- Uint32 u_opDropTrigger [PTR_ALIGN(opDropTriggerSize)];
- Uint32 u_opAlterTrigger [PTR_ALIGN(opAlterTriggerSize)];
- Uint32 nextPool;
- };
- ArrayPool<OpRecordUnion> c_opRecordPool;
-
- // Operation records
- KeyTable2<CreateTableRecord, OpRecordUnion> c_opCreateTable;
- KeyTable2<DropTableRecord, OpRecordUnion> c_opDropTable;
- KeyTable2<OpCreateIndex, OpRecordUnion> c_opCreateIndex;
- KeyTable2<OpDropIndex, OpRecordUnion> c_opDropIndex;
- KeyTable2<OpAlterIndex, OpRecordUnion> c_opAlterIndex;
- KeyTable2<OpBuildIndex, OpRecordUnion> c_opBuildIndex;
- KeyTable2<OpCreateEvent, OpRecordUnion> c_opCreateEvent;
- KeyTable2<OpSubEvent, OpRecordUnion> c_opSubEvent;
- KeyTable2<OpDropEvent, OpRecordUnion> c_opDropEvent;
- KeyTable2<OpSignalUtil, OpRecordUnion> c_opSignalUtil;
- KeyTable2<OpCreateTrigger, OpRecordUnion> c_opCreateTrigger;
- KeyTable2<OpDropTrigger, OpRecordUnion> c_opDropTrigger;
- KeyTable2<OpAlterTrigger, OpRecordUnion> c_opAlterTrigger;
-
- // Unique key for operation XXX move to some system table
- Uint32 c_opRecordSequence;
-
- // Statement blocks
-
- /* ------------------------------------------------------------ */
- // Start/Restart Handling
- /* ------------------------------------------------------------ */
- void sendSTTORRY(Signal* signal);
- void sendNDB_STTORRY(Signal* signal);
- void initSchemaFile(Signal* signal);
-
- /* ------------------------------------------------------------ */
- // Drop Table Handling
- /* ------------------------------------------------------------ */
- void releaseTableObject(Uint32 tableId, bool removeFromHash = true);
-
- /* ------------------------------------------------------------ */
- // General Stuff
- /* ------------------------------------------------------------ */
- Uint32 getFreeTableRecord(Uint32 primaryTableId);
- Uint32 getFreeTriggerRecord();
- bool getNewAttributeRecord(TableRecordPtr tablePtr,
- AttributeRecordPtr & attrPtr);
- void packTableIntoPages(Signal* signal, Uint32 tableId, Uint32 pageId);
- void packTableIntoPagesImpl(SimpleProperties::Writer &, TableRecordPtr,
- Signal* signal= 0);
-
- void sendGET_TABINFOREQ(Signal* signal,
- Uint32 tableId);
- void sendTC_SCHVERREQ(Signal* signal,
- Uint32 tableId,
- BlockReference tcRef);
-
- /* ------------------------------------------------------------ */
- // System Restart Handling
- /* ------------------------------------------------------------ */
- void initSendSchemaData(Signal* signal);
- void sendSchemaData(Signal* signal);
- Uint32 sendSCHEMA_INFO(Signal* signal, Uint32 nodeId, Uint32* pagePointer);
- void checkSchemaStatus(Signal* signal);
- void sendDIHSTARTTAB_REQ(Signal* signal);
-
- /* ------------------------------------------------------------ */
- // Receive Table Handling
- /* ------------------------------------------------------------ */
- void handleTabInfoInit(SimpleProperties::Reader &,
- ParseDictTabInfoRecord *,
- bool checkExist = true);
- void handleTabInfo(SimpleProperties::Reader & it, ParseDictTabInfoRecord *);
-
- void handleAddTableFailure(Signal* signal,
- Uint32 failureLine,
- Uint32 tableId);
- bool verifyTableCorrect(Signal* signal, Uint32 tableId);
-
- /* ------------------------------------------------------------ */
- // Add Table Handling
- /* ------------------------------------------------------------ */
-
- /* ------------------------------------------------------------ */
- // Add Fragment Handling
- /* ------------------------------------------------------------ */
- void sendLQHADDATTRREQ(Signal*, CreateTableRecordPtr, Uint32 attributePtrI);
-
- /* ------------------------------------------------------------ */
- // Read/Write Schema and Table files
- /* ------------------------------------------------------------ */
- void updateSchemaState(Signal* signal, Uint32 tableId,
- SchemaFile::TableEntry*, Callback*);
- void startWriteSchemaFile(Signal* signal);
- void openSchemaFile(Signal* signal,
- Uint32 fileNo,
- Uint32 fsPtr,
- bool writeFlag,
- bool newFile);
- void writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
- void writeSchemaConf(Signal* signal,
- FsConnectRecordPtr fsPtr);
- void closeFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
- void closeWriteSchemaConf(Signal* signal,
- FsConnectRecordPtr fsPtr);
- void initSchemaFile_conf(Signal* signal, Uint32 i, Uint32 returnCode);
-
- void writeTableFile(Signal* signal, Uint32 tableId,
- SegmentedSectionPtr tabInfo, Callback*);
- void startWriteTableFile(Signal* signal, Uint32 tableId);
- void openTableFile(Signal* signal,
- Uint32 fileNo,
- Uint32 fsPtr,
- Uint32 tableId,
- bool writeFlag);
- void writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
- void writeTableConf(Signal* signal,
- FsConnectRecordPtr fsPtr);
- void closeWriteTableConf(Signal* signal,
- FsConnectRecordPtr fsPtr);
-
- void startReadTableFile(Signal* signal, Uint32 tableId);
- void openReadTableRef(Signal* signal,
- FsConnectRecordPtr fsPtr);
- void readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
- void readTableConf(Signal* signal,
- FsConnectRecordPtr fsPtr);
- void readTableRef(Signal* signal,
- FsConnectRecordPtr fsPtr);
- void closeReadTableConf(Signal* signal,
- FsConnectRecordPtr fsPtr);
-
- void startReadSchemaFile(Signal* signal);
- void openReadSchemaRef(Signal* signal,
- FsConnectRecordPtr fsPtr);
- void readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
- void readSchemaConf(Signal* signal, FsConnectRecordPtr fsPtr);
- void readSchemaRef(Signal* signal, FsConnectRecordPtr fsPtr);
- void closeReadSchemaConf(Signal* signal,
- FsConnectRecordPtr fsPtr);
- bool convertSchemaFileTo_5_0_6(XSchemaFile*);
-
- /* ------------------------------------------------------------ */
- // Get table definitions
- /* ------------------------------------------------------------ */
- void sendGET_TABINFOREF(Signal* signal,
- GetTabInfoReq*,
- GetTabInfoRef::ErrorCode errorCode);
-
- void sendGET_TABLEID_REF(Signal* signal,
- GetTableIdReq * req,
- GetTableIdRef::ErrorCode errorCode);
-
- void sendGetTabResponse(Signal* signal);
-
- /* ------------------------------------------------------------ */
- // Indexes and triggers
- /* ------------------------------------------------------------ */
-
- // reactivate and rebuild indexes on start up
- void activateIndexes(Signal* signal, Uint32 i);
- void rebuildIndexes(Signal* signal, Uint32 i);
-
- // create index
- void createIndex_recvReply(Signal* signal, const CreateIndxConf* conf,
- const CreateIndxRef* ref);
- void createIndex_slavePrepare(Signal* signal, OpCreateIndexPtr opPtr);
- void createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr);
- void createIndex_fromCreateTable(Signal* signal, OpCreateIndexPtr opPtr);
- void createIndex_toAlterIndex(Signal* signal, OpCreateIndexPtr opPtr);
- void createIndex_fromAlterIndex(Signal* signal, OpCreateIndexPtr opPtr);
- void createIndex_slaveCommit(Signal* signal, OpCreateIndexPtr opPtr);
- void createIndex_slaveAbort(Signal* signal, OpCreateIndexPtr opPtr);
- void createIndex_sendSlaveReq(Signal* signal, OpCreateIndexPtr opPtr);
- void createIndex_sendReply(Signal* signal, OpCreateIndexPtr opPtr, bool);
- // drop index
- void dropIndex_recvReply(Signal* signal, const DropIndxConf* conf,
- const DropIndxRef* ref);
- void dropIndex_slavePrepare(Signal* signal, OpDropIndexPtr opPtr);
- void dropIndex_toAlterIndex(Signal* signal, OpDropIndexPtr opPtr);
- void dropIndex_fromAlterIndex(Signal* signal, OpDropIndexPtr opPtr);
- void dropIndex_toDropTable(Signal* signal, OpDropIndexPtr opPtr);
- void dropIndex_fromDropTable(Signal* signal, OpDropIndexPtr opPtr);
- void dropIndex_slaveCommit(Signal* signal, OpDropIndexPtr opPtr);
- void dropIndex_slaveAbort(Signal* signal, OpDropIndexPtr opPtr);
- void dropIndex_sendSlaveReq(Signal* signal, OpDropIndexPtr opPtr);
- void dropIndex_sendReply(Signal* signal, OpDropIndexPtr opPtr, bool);
- // alter index
- void alterIndex_recvReply(Signal* signal, const AlterIndxConf* conf,
- const AlterIndxRef* ref);
- void alterIndex_slavePrepare(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_toCreateTc(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_fromCreateTc(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_toDropTc(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_fromDropTc(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_toCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_fromCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_toDropTrigger(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_fromDropTrigger(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_toBuildIndex(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_fromBuildIndex(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_slaveCommit(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_slaveAbort(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_sendSlaveReq(Signal* signal, OpAlterIndexPtr opPtr);
- void alterIndex_sendReply(Signal* signal, OpAlterIndexPtr opPtr, bool);
- // build index
- void buildIndex_recvReply(Signal* signal, const BuildIndxConf* conf,
- const BuildIndxRef* ref);
- void buildIndex_toCreateConstr(Signal* signal, OpBuildIndexPtr opPtr);
- void buildIndex_fromCreateConstr(Signal* signal, OpBuildIndexPtr opPtr);
- void buildIndex_buildTrix(Signal* signal, OpBuildIndexPtr opPtr);
- void buildIndex_toDropConstr(Signal* signal, OpBuildIndexPtr opPtr);
- void buildIndex_fromDropConstr(Signal* signal, OpBuildIndexPtr opPtr);
- void buildIndex_toOnline(Signal* signal, OpBuildIndexPtr opPtr);
- void buildIndex_fromOnline(Signal* signal, OpBuildIndexPtr opPtr);
- void buildIndex_sendSlaveReq(Signal* signal, OpBuildIndexPtr opPtr);
- void buildIndex_sendReply(Signal* signal, OpBuildIndexPtr opPtr, bool);
-
- // Events
- void
- createEventUTIL_PREPARE(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode);
- void
- createEventUTIL_EXECUTE(Signal *signal,
- Uint32 callbackData,
- Uint32 returnCode);
- void
- dropEventUTIL_PREPARE_READ(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode);
- void
- dropEventUTIL_EXECUTE_READ(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode);
- void
- dropEventUTIL_PREPARE_DELETE(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode);
- void
- dropEventUTIL_EXECUTE_DELETE(Signal *signal,
- Uint32 callbackData,
- Uint32 returnCode);
- void
- dropEventUtilPrepareRef(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode);
- void
- dropEventUtilExecuteRef(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode);
- int
- sendSignalUtilReq(Callback *c,
- BlockReference ref,
- GlobalSignalNumber gsn,
- Signal* signal,
- Uint32 length,
- JobBufferLevel jbuf,
- LinearSectionPtr ptr[3],
- Uint32 noOfSections);
- int
- recvSignalUtilReq(Signal* signal, Uint32 returnCode);
-
- void completeSubStartReq(Signal* signal, Uint32 ptrI, Uint32 returnCode);
- void completeSubStopReq(Signal* signal, Uint32 ptrI, Uint32 returnCode);
- void completeSubRemoveReq(Signal* signal, Uint32 ptrI, Uint32 returnCode);
-
- void dropEvent_sendReply(Signal* signal,
- OpDropEventPtr evntRecPtr);
-
- void createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr);
- void createEventComplete_RT_USER_CREATE(Signal* signal,
- OpCreateEventPtr evntRecPtr);
- void createEvent_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr);
- void createEventComplete_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr);
-
- void createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPtr);
-
- void createEvent_nodeFailCallback(Signal* signal, Uint32 eventRecPtrI,
- Uint32 returnCode);
- void createEvent_sendReply(Signal* signal, OpCreateEventPtr evntRecPtr,
- LinearSectionPtr *ptr = NULL, int noLSP = 0);
-
- void prepareTransactionEventSysTable (Callback *c,
- Signal* signal,
- Uint32 senderData,
- UtilPrepareReq::OperationTypeValue prepReq);
- void prepareUtilTransaction(Callback *c,
- Signal* signal,
- Uint32 senderData,
- Uint32 tableId,
- const char *tableName,
- UtilPrepareReq::OperationTypeValue prepReq,
- Uint32 noAttr,
- Uint32 attrIds[],
- const char *attrNames[]);
-
- void executeTransEventSysTable(Callback *c,
- Signal *signal,
- const Uint32 ptrI,
- sysTab_NDBEVENTS_0& m_eventRec,
- const Uint32 prepareId,
- UtilPrepareReq::OperationTypeValue prepReq);
- void executeTransaction(Callback *c,
- Signal* signal,
- Uint32 senderData,
- Uint32 prepareId,
- Uint32 noAttr,
- LinearSectionPtr headerPtr,
- LinearSectionPtr dataPtr);
-
- void parseReadEventSys(Signal *signal, sysTab_NDBEVENTS_0& m_eventRec);
-
- // create trigger
- void createTrigger_recvReply(Signal* signal, const CreateTrigConf* conf,
- const CreateTrigRef* ref);
- void createTrigger_slavePrepare(Signal* signal, OpCreateTriggerPtr opPtr);
- void createTrigger_masterSeize(Signal* signal, OpCreateTriggerPtr opPtr);
- void createTrigger_slaveCreate(Signal* signal, OpCreateTriggerPtr opPtr);
- void createTrigger_toAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr);
- void createTrigger_fromAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr);
- void createTrigger_slaveCommit(Signal* signal, OpCreateTriggerPtr opPtr);
- void createTrigger_slaveAbort(Signal* signal, OpCreateTriggerPtr opPtr);
- void createTrigger_sendSlaveReq(Signal* signal, OpCreateTriggerPtr opPtr);
- void createTrigger_sendReply(Signal* signal, OpCreateTriggerPtr opPtr, bool);
- // drop trigger
- void dropTrigger_recvReply(Signal* signal, const DropTrigConf* conf,
- const DropTrigRef* ref);
- void dropTrigger_slavePrepare(Signal* signal, OpDropTriggerPtr opPtr);
- void dropTrigger_toAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr);
- void dropTrigger_fromAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr);
- void dropTrigger_slaveCommit(Signal* signal, OpDropTriggerPtr opPtr);
- void dropTrigger_slaveAbort(Signal* signal, OpDropTriggerPtr opPtr);
- void dropTrigger_sendSlaveReq(Signal* signal, OpDropTriggerPtr opPtr);
- void dropTrigger_sendReply(Signal* signal, OpDropTriggerPtr opPtr, bool);
- // alter trigger
- void alterTrigger_recvReply(Signal* signal, const AlterTrigConf* conf,
- const AlterTrigRef* ref);
- void alterTrigger_slavePrepare(Signal* signal, OpAlterTriggerPtr opPtr);
- void alterTrigger_toCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr);
- void alterTrigger_fromCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr);
- void alterTrigger_toDropLocal(Signal* signal, OpAlterTriggerPtr opPtr);
- void alterTrigger_fromDropLocal(Signal* signal, OpAlterTriggerPtr opPtr);
- void alterTrigger_slaveCommit(Signal* signal, OpAlterTriggerPtr opPtr);
- void alterTrigger_slaveAbort(Signal* signal, OpAlterTriggerPtr opPtr);
- void alterTrigger_sendSlaveReq(Signal* signal, OpAlterTriggerPtr opPtr);
- void alterTrigger_sendReply(Signal* signal, OpAlterTriggerPtr opPtr, bool);
- // support
- void getTableKeyList(TableRecordPtr tablePtr, AttributeList& list);
- void getIndexAttr(TableRecordPtr indexPtr, Uint32 itAttr, Uint32* id);
- void getIndexAttrList(TableRecordPtr indexPtr, AttributeList& list);
- void getIndexAttrMask(TableRecordPtr indexPtr, AttributeMask& mask);
-
- /* ------------------------------------------------------------ */
- // Initialisation
- /* ------------------------------------------------------------ */
- void initCommonData();
- void initRecords();
- void initConnectRecord();
- void initRetrieveRecord(Signal*, Uint32, Uint32 returnCode);
- void initSchemaRecord();
- void initRestartRecord();
- void initSendSchemaRecord();
- void initReadTableRecord();
- void initWriteTableRecord();
- void initReadSchemaRecord();
- void initWriteSchemaRecord();
-
- void initNodeRecords();
- void initTableRecords();
- void initialiseTableRecord(TableRecordPtr tablePtr);
- void initTriggerRecords();
- void initialiseTriggerRecord(TriggerRecordPtr triggerPtr);
- void initPageRecords();
-
- Uint32 getFsConnRecord();
-
- bool getIsFailed(Uint32 nodeId) const;
-
- void dropTableRef(Signal * signal, DropTableReq *, DropTableRef::ErrorCode);
- void printTables(); // For debugging only
- int handleAlterTab(AlterTabReq * req,
- CreateTableRecord * regAlterTabPtr,
- TableRecordPtr origTablePtr,
- TableRecordPtr newTablePtr);
- void revertAlterTable(Signal * signal,
- Uint32 changeMask,
- Uint32 tableId,
- CreateTableRecord * regAlterTabPtr);
- void alterTableRef(Signal * signal,
- AlterTableReq *, AlterTableRef::ErrorCode,
- ParseDictTabInfoRecord* parseRecord = NULL);
- void alterTabRef(Signal * signal,
- AlterTabReq *, AlterTableRef::ErrorCode,
- ParseDictTabInfoRecord* parseRecord = NULL);
- void alterTab_writeSchemaConf(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode);
- void alterTab_writeTableConf(Signal* signal,
- Uint32 callbackData,
- Uint32 returnCode);
-
- void prepDropTab_nextStep(Signal* signal, DropTableRecordPtr);
- void prepDropTab_complete(Signal* signal, DropTableRecordPtr);
- void prepDropTab_writeSchemaConf(Signal* signal, Uint32 dropTabPtrI, Uint32);
-
- void dropTab_localDROP_TAB_CONF(Signal* signal);
- void dropTab_nextStep(Signal* signal, DropTableRecordPtr);
- void dropTab_complete(Signal* signal, Uint32 dropTabPtrI, Uint32);
- void dropTab_writeSchemaConf(Signal* signal, Uint32 dropTabPtrI, Uint32);
-
- void createTab_prepare(Signal* signal, CreateTabReq * req);
- void createTab_writeSchemaConf1(Signal* signal, Uint32 callback, Uint32);
- void createTab_writeTableConf(Signal* signal, Uint32 callbackData, Uint32);
- void createTab_dih(Signal*, CreateTableRecordPtr,
- SegmentedSectionPtr, Callback*);
- void createTab_dihComplete(Signal* signal, Uint32 callbackData, Uint32);
-
- void createTab_startLcpMutex_locked(Signal* signal, Uint32, Uint32);
- void createTab_startLcpMutex_unlocked(Signal* signal, Uint32, Uint32);
-
- void createTab_commit(Signal* signal, CreateTabReq * req);
- void createTab_writeSchemaConf2(Signal* signal, Uint32 callbackData, Uint32);
- void createTab_alterComplete(Signal*, Uint32 callbackData, Uint32);
-
- void createTab_drop(Signal* signal, CreateTabReq * req);
- void createTab_dropComplete(Signal* signal, Uint32 callbackData, Uint32);
-
- void createTab_reply(Signal* signal, CreateTableRecordPtr, Uint32 nodeId);
- void alterTab_activate(Signal*, CreateTableRecordPtr, Callback*);
-
- void restartCreateTab(Signal*, Uint32, const SchemaFile::TableEntry *, bool);
- void restartCreateTab_readTableConf(Signal* signal, Uint32 callback, Uint32);
- void restartCreateTab_writeTableConf(Signal* signal, Uint32 callback, Uint32);
- void restartCreateTab_dihComplete(Signal* signal, Uint32 callback, Uint32);
- void restartCreateTab_activateComplete(Signal*, Uint32 callback, Uint32);
-
- void restartDropTab(Signal* signal, Uint32 tableId);
- void restartDropTab_complete(Signal*, Uint32 callback, Uint32);
-
- void restart_checkSchemaStatusComplete(Signal*, Uint32 callback, Uint32);
- void restart_writeSchemaConf(Signal*, Uint32 callbackData, Uint32);
- void masterRestart_checkSchemaStatusComplete(Signal*, Uint32, Uint32);
-
- void sendSchemaComplete(Signal*, Uint32 callbackData, Uint32);
-
- // global metadata support
- friend class MetaData;
- int getMetaTablePtr(TableRecordPtr& tablePtr, Uint32 tableId, Uint32 tableVersion);
- int getMetaTable(MetaData::Table& table, Uint32 tableId, Uint32 tableVersion);
- int getMetaTable(MetaData::Table& table, const char* tableName);
- int getMetaAttribute(MetaData::Attribute& attribute, const MetaData::Table& table, Uint32 attributeId);
- int getMetaAttribute(MetaData::Attribute& attribute, const MetaData::Table& table, const char* attributeName);
-};
-
-#endif
diff --git a/ndb/src/kernel/blocks/dbdict/Makefile.am b/ndb/src/kernel/blocks/dbdict/Makefile.am
deleted file mode 100644
index 9a0d68f8148..00000000000
--- a/ndb/src/kernel/blocks/dbdict/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-#SUBDIRS = printSchemafile
-
-noinst_LIBRARIES = libdbdict.a
-
-libdbdict_a_SOURCES = Dbdict.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libdbdict.dsp
-
-libdbdict.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libdbdict_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
deleted file mode 100644
index ee67bf47d7b..00000000000
--- a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
+++ /dev/null
@@ -1,1603 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef DBDIH_H
-#define DBDIH_H
-
-#include <ndb_limits.h>
-#include <pc.hpp>
-#include <SimulatedBlock.hpp>
-#include "Sysfile.hpp"
-#include <ArrayList.hpp>
-#include <SignalCounter.hpp>
-
-#include <signaldata/MasterLCP.hpp>
-#include <signaldata/CopyGCIReq.hpp>
-#include <blocks/mutexes.hpp>
-
-#ifdef DBDIH_C
-
-/*###################*/
-/* FILE SYSTEM FLAGS */
-/*###################*/
-#define ZLIST_OF_PAIRS 0
-#define ZLIST_OF_PAIRS_SYNCH 16
-#define ZOPEN_READ_WRITE 2
-#define ZCREATE_READ_WRITE 0x302
-#define ZCLOSE_NO_DELETE 0
-#define ZCLOSE_DELETE 1
-
-/*###############*/
-/* NODE STATES */
-/*###############*/
-#define ZIDLE 0
-#define ZACTIVE 1
-
-/*#########*/
-/* GENERAL */
-/*#########*/
-#define ZVAR_NO_WORD 1
-#define ZVAR_NO_CRESTART_INFO 20
-#define ZVAR_NO_CRESTART_INFO_TO_FILE 21
-#define ZVALID 1
-#define ZINVALID 2
-
-/*###############*/
-/* ERROR CODES */
-/*###############*/
-// ------------------------------------------
-// Error Codes for Transactions (None sofar)
-// ------------------------------------------
-
-// --------------------------------------
-// Error Codes for Add Table
-// --------------------------------------
-#define ZREPLERROR1 306
-#define ZNOTIMPLEMENTED 307
-#define ZTABLEINSTALLED 310
-// --------------------------------------
-// Error Codes for Scan Table
-// --------------------------------------
-#define ZERRONOUSSTATE 308
-
-// --------------------------------------
-// Crash Codes
-// --------------------------------------
-#define ZCOULD_NOT_OCCUR_ERROR 300
-#define ZNOT_MASTER_ERROR 301
-#define ZWRONG_FAILURE_NUMBER_ERROR 302
-#define ZWRONG_START_NODE_ERROR 303
-#define ZNO_REPLICA_FOUND_ERROR 304
-#define ZNODE_ALREADY_STARTING_ERROR 305
-#define ZNODE_START_DISALLOWED_ERROR 309
-
-// --------------------------------------
-// Codes from LQH
-// --------------------------------------
-#define ZNODE_FAILURE_ERROR 400
-
-
-/*#########*/
-/* PHASES */
-/*#########*/
-#define ZNDB_SPH1 1
-#define ZNDB_SPH2 2
-#define ZNDB_SPH3 3
-#define ZNDB_SPH4 4
-#define ZNDB_SPH5 5
-#define ZNDB_SPH6 6
-#define ZNDB_SPH7 7
-#define ZNDB_SPH8 8
-/*#########*/
-/* SIZES */
-/*#########*/
-#define ZPAGEREC 100
-#define ZCREATE_REPLICA_FILE_SIZE 4
-#define ZPROXY_MASTER_FILE_SIZE 10
-#define ZPROXY_FILE_SIZE 10
-#endif
-
-class Dbdih: public SimulatedBlock {
-public:
-
- // Records
-
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤
- * THE API CONNECT RECORD IS THE SAME RECORD POINTER AS USED IN THE TC BLOCK
- *
- * IT KEEPS TRACK OF ALL THE OPERATIONS CONNECTED TO THIS TRANSACTION.
- * IT IS LINKED INTO A QUEUE IN CASE THE GLOBAL CHECKPOINT IS CURRENTLY
- * ONGOING */
- struct ApiConnectRecord {
- Uint32 apiGci;
- Uint32 nextApi;
- };
- typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr;
-
- /*############## CONNECT_RECORD ##############*/
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
- /* THE CONNECT RECORD IS CREATED WHEN A TRANSACTION HAS TO START. IT KEEPS
- ALL INTERMEDIATE INFORMATION NECESSARY FOR THE TRANSACTION FROM THE
- DISTRIBUTED MANAGER. THE RECORD KEEPS INFORMATION ABOUT THE
- OPERATIONS THAT HAVE TO BE CARRIED OUT BY THE TRANSACTION AND
- ALSO THE TRAIL OF NODES FOR EACH OPERATION IN THE THE
- TRANSACTION.
- */
- struct ConnectRecord {
- enum ConnectState {
- INUSE = 0,
- FREE = 1,
- STARTED = 2
- };
- Uint32 nodes[MAX_REPLICAS];
- ConnectState connectState;
- Uint32 nfConnect;
- Uint32 table;
- Uint32 userpointer;
- BlockReference userblockref;
- };
- typedef Ptr<ConnectRecord> ConnectRecordPtr;
-
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
- /* THESE RECORDS ARE USED WHEN CREATING REPLICAS DURING SYSTEM */
- /* RESTART. I NEED A COMPLEX DATA STRUCTURE DESCRIBING THE REPLICAS */
- /* I WILL TRY TO CREATE FOR EACH FRAGMENT. */
- /* */
- /* I STORE A REFERENCE TO THE FOUR POSSIBLE CREATE REPLICA RECORDS */
- /* IN A COMMON STORED VARIABLE. I ALLOW A MAXIMUM OF 4 REPLICAS TO */
- /* BE RESTARTED PER FRAGMENT. */
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
- struct CreateReplicaRecord {
- Uint32 logStartGci[MAX_LOG_EXEC];
- Uint32 logStopGci[MAX_LOG_EXEC];
- Uint16 logNodeId[MAX_LOG_EXEC];
- Uint32 createLcpId;
-
- bool hotSpareUse;
- Uint32 replicaRec;
- Uint16 dataNodeId;
- Uint16 lcpNo;
- Uint16 noLogNodes;
- };
- typedef Ptr<CreateReplicaRecord> CreateReplicaRecordPtr;
-
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
- /* THIS RECORD CONTAINS A FILE DESCRIPTION. THERE ARE TWO */
- /* FILES PER TABLE TO RAISE SECURITY LEVEL AGAINST DISK CRASHES. */
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
- struct FileRecord {
- enum FileStatus {
- CLOSED = 0,
- CRASHED = 1,
- OPEN = 2
- };
- enum FileType {
- TABLE_FILE = 0,
- GCP_FILE = 1
- };
- enum ReqStatus {
- IDLE = 0,
- CREATING_GCP = 1,
- OPENING_GCP = 2,
- OPENING_COPY_GCI = 3,
- WRITING_COPY_GCI = 4,
- CREATING_COPY_GCI = 5,
- OPENING_TABLE = 6,
- READING_GCP = 7,
- READING_TABLE = 8,
- WRITE_INIT_GCP = 9,
- TABLE_CREATE = 10,
- TABLE_WRITE = 11,
- TABLE_CLOSE = 12,
- CLOSING_GCP = 13,
- CLOSING_TABLE_CRASH = 14,
- CLOSING_TABLE_SR = 15,
- CLOSING_GCP_CRASH = 16,
- TABLE_OPEN_FOR_DELETE = 17,
- TABLE_CLOSE_DELETE = 18
- };
- Uint32 fileName[4];
- Uint32 fileRef;
- FileStatus fileStatus;
- FileType fileType;
- Uint32 nextFile;
- ReqStatus reqStatus;
- Uint32 tabRef;
- };
- typedef Ptr<FileRecord> FileRecordPtr;
-
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
- /* THIS RECORD KEEPS THE STORAGE AND DECISIONS INFORMATION OF A FRAGMENT */
- /* AND ITS REPLICAS. IF FRAGMENT HAS MORE THAN ONE BACK UP */
- /* REPLICA THEN A LIST OF MORE NODES IS ATTACHED TO THIS RECORD. */
- /* EACH RECORD IN MORE LIST HAS INFORMATION ABOUT ONE BACKUP. THIS RECORD */
- /* ALSO HAVE THE STATUS OF THE FRAGMENT. */
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
- /* */
- /* FRAGMENTSTORE RECORD ALIGNED TO BE 64 BYTES */
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
- struct Fragmentstore {
- Uint16 activeNodes[MAX_REPLICAS];
- Uint32 preferredPrimary;
-
- Uint32 oldStoredReplicas; /* "DEAD" STORED REPLICAS */
- Uint32 storedReplicas; /* "ALIVE" STORED REPLICAS */
- Uint32 nextFragmentChunk;
-
- Uint8 distributionKey;
- Uint8 fragReplicas;
- Uint8 noOldStoredReplicas; /* NUMBER OF "DEAD" STORED REPLICAS */
- Uint8 noStoredReplicas; /* NUMBER OF "ALIVE" STORED REPLICAS*/
- Uint8 noLcpReplicas; ///< No of replicas remaining to be LCP:ed
- };
- typedef Ptr<Fragmentstore> FragmentstorePtr;
-
- /*########### PAGE RECORD ############*/
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
- /* THIS RECORD KEEPS INFORMATION ABOUT NODE GROUPS. */
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
- struct NodeGroupRecord {
- Uint32 nodesInGroup[MAX_REPLICAS + 1];
- Uint32 nextReplicaNode;
- Uint32 nodeCount;
- bool activeTakeOver;
- };
- typedef Ptr<NodeGroupRecord> NodeGroupRecordPtr;
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
- /* THIS RECORD KEEPS INFORMATION ABOUT NODES. */
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
- /* RECORD ALIGNED TO BE 64 BYTES. */
- /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
- enum NodefailHandlingStep {
- NF_REMOVE_NODE_FROM_TABLE = 1,
- NF_GCP_TAKE_OVER = 2,
- NF_LCP_TAKE_OVER = 4
- };
-
- struct NodeRecord {
- NodeRecord();
-
- enum NodeStatus {
- NOT_IN_CLUSTER = 0,
- ALIVE = 1,
- STARTING = 2,
- DIED_NOW = 3,
- DYING = 4,
- DEAD = 5
- };
-
- struct FragmentCheckpointInfo {
- Uint32 tableId;
- Uint32 fragId;
- Uint32 replicaPtr;
- };
-
- enum GcpState {
- READY = 0,
- PREPARE_SENT = 1,
- PREPARE_RECEIVED = 2,
- COMMIT_SENT = 3,
- NODE_FINISHED = 4,
- SAVE_REQ_SENT = 5,
- SAVE_RECEIVED = 6,
- COPY_GCI_SENT = 7
- };
-
- GcpState gcpstate;
- Sysfile::ActiveStatus activeStatus;
-
- NodeStatus nodeStatus;
- bool useInTransactions;
- bool allowNodeStart;
- bool copyCompleted;
- bool m_inclDihLcp;
-
- FragmentCheckpointInfo startedChkpt[2];
- FragmentCheckpointInfo queuedChkpt[2];
-
- Bitmask<1> m_nodefailSteps;
- Uint32 activeTabptr;
- Uint32 nextNode;
- Uint32 nodeGroup;
-
- SignalCounter m_NF_COMPLETE_REP;
-
- Uint8 dbtcFailCompleted;
- Uint8 dblqhFailCompleted;
- Uint8 dbdihFailCompleted;
- Uint8 dbdictFailCompleted;
- Uint8 recNODE_FAILREP;
-
- Uint8 noOfQueuedChkpt;
- Uint8 noOfStartedChkpt;
-
- MasterLCPConf::State lcpStateAtTakeOver;
- };
- typedef Ptr<NodeRecord> NodeRecordPtr;
- /**********************************************************************/
- /* THIS RECORD KEEPS THE INFORMATION ABOUT A TABLE AND ITS FRAGMENTS */
- /**********************************************************************/
- struct PageRecord {
- Uint32 word[2048];
- /* 8 KBYTE PAGE*/
- Uint32 nextfreepage;
- };
- typedef Ptr<PageRecord> PageRecordPtr;
-
- /************ REPLICA RECORD *************/
- /**********************************************************************/
- /* THIS RECORD KEEPS THE INFORMATION ABOUT A REPLICA OF A FRAGMENT */
- /**********************************************************************/
- struct ReplicaRecord {
- /* -------------------------------------------------------------------- */
- /* THE GLOBAL CHECKPOINT IDENTITY WHEN THIS REPLICA WAS CREATED. */
- /* THERE IS ONE INDEX PER REPLICA. A REPLICA INDEX IS CREATED WHEN ANODE*/
- /* CRASH OCCURS. */
- /* -------------------------------------------------------------------- */
- Uint32 createGci[8];
- /* -------------------------------------------------------------------- */
- /* THE LAST GLOBAL CHECKPOINT IDENTITY WHICH HAS BEEN SAVED ON DISK. */
- /* THIS VARIABLE IS ONLY VALID FOR REPLICAS WHICH HAVE "DIED". A REPLICA*/
- /* "DIES" EITHER WHEN THE NODE CRASHES THAT KEPT THE REPLICA OR BY BEING*/
- /* STOPPED IN A CONTROLLED MANNER. */
- /* THERE IS ONE INDEX PER REPLICA. A REPLICA INDEX IS CREATED WHEN ANODE*/
- /* CRASH OCCURS. */
- /* -------------------------------------------------------------------- */
- Uint32 replicaLastGci[8];
- /* -------------------------------------------------------------------- */
- /* THE LOCAL CHECKPOINT IDENTITY OF A LOCAL CHECKPOINT. */
- /* -------------------------------------------------------------------- */
- Uint32 lcpId[MAX_LCP_STORED];
- /* -------------------------------------------------------------------- */
- /* THIS VARIABLE KEEPS TRACK OF THE MAXIMUM GLOBAL CHECKPOINT COMPLETED */
- /* FOR EACH OF THE LOCAL CHECKPOINTS IN THIS FRAGMENT REPLICA. */
- /* -------------------------------------------------------------------- */
- Uint32 maxGciCompleted[MAX_LCP_STORED];
- /* -------------------------------------------------------------------- */
- /* THIS VARIABLE KEEPS TRACK OF THE MINIMUM GLOBAL CHECKPOINT STARTEDFOR*/
- /* EACH OF THE LOCAL CHECKPOINTS IN THIS FRAGMENT REPLICA. */
- /* -------------------------------------------------------------------- */
- Uint32 maxGciStarted[MAX_LCP_STORED];
- /* -------------------------------------------------------------------- */
- /* THE GLOBAL CHECKPOINT IDENTITY WHEN THE TABLE WAS CREATED. */
- /* -------------------------------------------------------------------- */
- Uint32 initialGci;
-
- /* -------------------------------------------------------------------- */
- /* THE REFERENCE TO THE NEXT REPLICA. EITHER IT REFERS TO THE NEXT IN */
- /* THE FREE LIST OR IT REFERS TO THE NEXT IN A LIST OF REPLICAS ON A */
- /* FRAGMENT. */
- /* -------------------------------------------------------------------- */
- Uint32 nextReplica;
-
- /* -------------------------------------------------------------------- */
- /* THE NODE ID WHERE THIS REPLICA IS STORED. */
- /* -------------------------------------------------------------------- */
- Uint16 procNode;
-
- /* -------------------------------------------------------------------- */
- /* The last local checkpoint id started or queued on this replica. */
- /* -------------------------------------------------------------------- */
- Uint32 lcpIdStarted; // Started or queued
-
- /* -------------------------------------------------------------------- */
- /* THIS VARIABLE SPECIFIES WHAT THE STATUS OF THE LOCAL CHECKPOINT IS.IT*/
- /* CAN EITHER BE VALID OR INVALID. AT CREATION OF A FRAGMENT REPLICA ALL*/
- /* LCP'S ARE INVALID. ALSO IF IF INDEX >= NO_LCP THEN THELOCALCHECKPOINT*/
- /* IS ALWAYS INVALID. IF THE LCP BEFORE THE NEXT_LCP HAS LCP_ID THAT */
- /* DIFFERS FROM THE LATEST LCP_ID STARTED THEN THE NEXT_LCP IS ALSO */
- /* INVALID */
- /* -------------------------------------------------------------------- */
- Uint8 lcpStatus[MAX_LCP_STORED];
-
- /* -------------------------------------------------------------------- */
- /* THE NEXT LOCAL CHECKPOINT TO EXECUTE IN THIS FRAGMENT REPLICA. */
- /* -------------------------------------------------------------------- */
- Uint8 nextLcp;
-
- /* -------------------------------------------------------------------- */
- /* THE NUMBER OF CRASHED REPLICAS IN THIS REPLICAS SO FAR. */
- /* -------------------------------------------------------------------- */
- Uint8 noCrashedReplicas;
-
- /**
- * Is a LCP currently ongoing on fragment
- */
- Uint8 lcpOngoingFlag;
- };
- typedef Ptr<ReplicaRecord> ReplicaRecordPtr;
-
- /*************************************************************************
- * TAB_DESCRIPTOR IS A DESCRIPTOR OF THE LOCATION OF THE FRAGMENTS BELONGING
- * TO THE TABLE.THE INFORMATION ABOUT FRAGMENTS OF A TABLE ARE STORED IN
- * CHUNKS OF FRAGMENTSTORE RECORDS.
- * THIS RECORD ALSO HAS THE NECESSARY INFORMATION TO LOCATE A FRAGMENT AND
- * TO LOCATE A FRAGMENT AND TO TRANSLATE A KEY OF A TUPLE TO THE FRAGMENT IT
- * BELONGS
- */
- struct TabRecord {
- /**
- * State for copying table description into pages
- */
- enum CopyStatus {
- CS_IDLE,
- CS_SR_PHASE1_READ_PAGES,
- CS_SR_PHASE2_READ_TABLE,
- CS_SR_PHASE3_COPY_TABLE,
- CS_REMOVE_NODE,
- CS_LCP_READ_TABLE,
- CS_COPY_TAB_REQ,
- CS_COPY_NODE_STATE,
- CS_ADD_TABLE_MASTER,
- CS_ADD_TABLE_SLAVE,
- CS_INVALIDATE_NODE_LCP
- };
- /**
- * State for copying pages to disk
- */
- enum UpdateState {
- US_IDLE,
- US_LOCAL_CHECKPOINT,
- US_REMOVE_NODE,
- US_COPY_TAB_REQ,
- US_ADD_TABLE_MASTER,
- US_ADD_TABLE_SLAVE,
- US_INVALIDATE_NODE_LCP
- };
- enum TabLcpStatus {
- TLS_ACTIVE = 1,
- TLS_WRITING_TO_FILE = 2,
- TLS_COMPLETED = 3
- };
- enum TabStatus {
- TS_IDLE = 0,
- TS_ACTIVE = 1,
- TS_CREATING = 2,
- TS_DROPPING = 3
- };
- enum Method {
- HASH = 0,
- NOTDEFINED = 1
- };
- CopyStatus tabCopyStatus;
- UpdateState tabUpdateState;
- TabLcpStatus tabLcpStatus;
- TabStatus tabStatus;
- Method method;
-
- Uint32 pageRef[8];
-//-----------------------------------------------------------------------------
-// Each entry in this array contains a reference to 16 fragment records in a
-// row. Thus finding the correct record is very quick provided the fragment id.
-//-----------------------------------------------------------------------------
- Uint32 startFid[MAX_NDB_NODES];
-
- Uint32 tabFile[2];
- Uint32 connectrec;
- Uint32 hashpointer;
- Uint32 mask;
- Uint32 noOfWords;
- Uint32 schemaVersion;
- Uint32 tabRemoveNode;
- Uint32 totalfragments;
- Uint32 noOfFragChunks;
- Uint32 tabErrorCode;
- struct {
- Uint32 tabUserRef;
- Uint32 tabUserPtr;
- } m_dropTab;
-
- struct DropTable {
- Uint32 senderRef;
- Uint32 senderData;
- SignalCounter waitDropTabCount;
- } m_prepDropTab;
-
- Uint8 kvalue;
- Uint8 noOfBackups;
- Uint8 noPages;
- Uint8 storedTable; /* 0 IF THE TABLE IS A TEMPORARY TABLE */
- Uint16 tableType;
- Uint16 primaryTableId;
- };
- typedef Ptr<TabRecord> TabRecordPtr;
-
- /***************************************************************************/
- /* THIS RECORD IS USED TO KEEP TRACK OF TAKE OVER AND STARTING A NODE. */
- /* WE KEEP IT IN A RECORD TO ENABLE IT TO BE PARALLELISED IN THE FUTURE. */
- /**************************************************************************/
- struct TakeOverRecord {
- enum ToMasterStatus {
- IDLE = 0,
- TO_WAIT_START_TAKE_OVER = 1,
- TO_START_COPY = 2,
- TO_START_COPY_ONGOING = 3,
- TO_WAIT_START = 4,
- STARTING = 5,
- SELECTING_NEXT = 6,
- TO_WAIT_PREPARE_CREATE = 9,
- PREPARE_CREATE = 10,
- COPY_FRAG = 11,
- TO_WAIT_UPDATE_TO = 12,
- TO_UPDATE_TO = 13,
- COPY_ACTIVE = 14,
- TO_WAIT_COMMIT_CREATE = 15,
- LOCK_MUTEX = 23,
- COMMIT_CREATE = 16,
- TO_COPY_COMPLETED = 17,
- WAIT_LCP = 18,
- TO_END_COPY = 19,
- TO_END_COPY_ONGOING = 20,
- TO_WAIT_ENDING = 21,
- ENDING = 22
- };
- enum ToSlaveStatus {
- TO_SLAVE_IDLE = 0,
- TO_SLAVE_STARTED = 1,
- TO_SLAVE_CREATE_PREPARE = 2,
- TO_SLAVE_COPY_FRAG_COMPLETED = 3,
- TO_SLAVE_CREATE_COMMIT = 4,
- TO_SLAVE_COPY_COMPLETED = 5
- };
- Uint32 startGci;
- Uint32 toCopyNode;
- Uint32 toCurrentFragid;
- Uint32 toCurrentReplica;
- Uint32 toCurrentTabref;
- Uint32 toFailedNode;
- Uint32 toStartingNode;
- Uint32 nextTakeOver;
- Uint32 prevTakeOver;
- bool toNodeRestart;
- ToMasterStatus toMasterStatus;
- ToSlaveStatus toSlaveStatus;
- MutexHandle2<DIH_SWITCH_PRIMARY_MUTEX> m_switchPrimaryMutexHandle;
- };
- typedef Ptr<TakeOverRecord> TakeOverRecordPtr;
-
-public:
- Dbdih(const class Configuration &);
- virtual ~Dbdih();
-
- struct RWFragment {
- Uint32 pageIndex;
- Uint32 wordIndex;
- Uint32 fragId;
- TabRecordPtr rwfTabPtr;
- PageRecordPtr rwfPageptr;
- };
- struct CopyTableNode {
- Uint32 pageIndex;
- Uint32 wordIndex;
- Uint32 noOfWords;
- TabRecordPtr ctnTabPtr;
- PageRecordPtr ctnPageptr;
- };
-
-private:
- BLOCK_DEFINES(Dbdih);
-
- void execDUMP_STATE_ORD(Signal *);
- void execNDB_TAMPER(Signal *);
- void execDEBUG_SIG(Signal *);
- void execEMPTY_LCP_CONF(Signal *);
- void execMASTER_GCPREF(Signal *);
- void execMASTER_GCPREQ(Signal *);
- void execMASTER_GCPCONF(Signal *);
- void execMASTER_LCPREF(Signal *);
- void execMASTER_LCPREQ(Signal *);
- void execMASTER_LCPCONF(Signal *);
- void execNF_COMPLETEREP(Signal *);
- void execSTART_PERMREQ(Signal *);
- void execSTART_PERMCONF(Signal *);
- void execSTART_PERMREF(Signal *);
- void execINCL_NODEREQ(Signal *);
- void execINCL_NODECONF(Signal *);
- void execEND_TOREQ(Signal *);
- void execEND_TOCONF(Signal *);
- void execSTART_TOREQ(Signal *);
- void execSTART_TOCONF(Signal *);
- void execSTART_MEREQ(Signal *);
- void execSTART_MECONF(Signal *);
- void execSTART_MEREF(Signal *);
- void execSTART_COPYREQ(Signal *);
- void execSTART_COPYCONF(Signal *);
- void execSTART_COPYREF(Signal *);
- void execCREATE_FRAGREQ(Signal *);
- void execCREATE_FRAGCONF(Signal *);
- void execDIVERIFYREQ(Signal *);
- void execGCP_SAVECONF(Signal *);
- void execGCP_PREPARECONF(Signal *);
- void execGCP_PREPARE(Signal *);
- void execGCP_NODEFINISH(Signal *);
- void execGCP_COMMIT(Signal *);
- void execDIHNDBTAMPER(Signal *);
- void execCONTINUEB(Signal *);
- void execCOPY_GCIREQ(Signal *);
- void execCOPY_GCICONF(Signal *);
- void execCOPY_TABREQ(Signal *);
- void execCOPY_TABCONF(Signal *);
- void execTCGETOPSIZECONF(Signal *);
- void execTC_CLOPSIZECONF(Signal *);
-
- void execLCP_FRAG_REP(Signal *);
- void execLCP_COMPLETE_REP(Signal *);
- void execSTART_LCP_REQ(Signal *);
- void execSTART_LCP_CONF(Signal *);
- MutexHandle2<DIH_START_LCP_MUTEX> c_startLcpMutexHandle;
- void startLcpMutex_locked(Signal* signal, Uint32, Uint32);
- void startLcpMutex_unlocked(Signal* signal, Uint32, Uint32);
-
- MutexHandle2<DIH_SWITCH_PRIMARY_MUTEX> c_switchPrimaryMutexHandle;
- void switchPrimaryMutex_locked(Signal* signal, Uint32, Uint32);
- void switchPrimaryMutex_unlocked(Signal* signal, Uint32, Uint32);
- void switch_primary_stop_node(Signal* signal, Uint32, Uint32);
-
- void execBLOCK_COMMIT_ORD(Signal *);
- void execUNBLOCK_COMMIT_ORD(Signal *);
-
- void execDIH_SWITCH_REPLICA_REQ(Signal *);
- void execDIH_SWITCH_REPLICA_REF(Signal *);
- void execDIH_SWITCH_REPLICA_CONF(Signal *);
-
- void execSTOP_PERM_REQ(Signal *);
- void execSTOP_PERM_REF(Signal *);
- void execSTOP_PERM_CONF(Signal *);
-
- void execSTOP_ME_REQ(Signal *);
- void execSTOP_ME_REF(Signal *);
- void execSTOP_ME_CONF(Signal *);
-
- void execREAD_CONFIG_REQ(Signal *);
- void execUNBLO_DICTCONF(Signal *);
- void execCOPY_ACTIVECONF(Signal *);
- void execTAB_COMMITREQ(Signal *);
- void execNODE_FAILREP(Signal *);
- void execCOPY_FRAGCONF(Signal *);
- void execCOPY_FRAGREF(Signal *);
- void execDIADDTABREQ(Signal *);
- void execDIGETNODESREQ(Signal *);
- void execDIRELEASEREQ(Signal *);
- void execDISEIZEREQ(Signal *);
- void execSTTOR(Signal *);
- void execDI_FCOUNTREQ(Signal *);
- void execDIGETPRIMREQ(Signal *);
- void execGCP_SAVEREF(Signal *);
- void execGCP_TCFINISHED(Signal *);
- void execREAD_NODESCONF(Signal *);
- void execNDB_STTOR(Signal *);
- void execDICTSTARTCONF(Signal *);
- void execNDB_STARTREQ(Signal *);
- void execGETGCIREQ(Signal *);
- void execDIH_RESTARTREQ(Signal *);
- void execSTART_RECCONF(Signal *);
- void execSTART_FRAGCONF(Signal *);
- void execADD_FRAGCONF(Signal *);
- void execADD_FRAGREF(Signal *);
- void execFSOPENCONF(Signal *);
- void execFSOPENREF(Signal *);
- void execFSCLOSECONF(Signal *);
- void execFSCLOSEREF(Signal *);
- void execFSREADCONF(Signal *);
- void execFSREADREF(Signal *);
- void execFSWRITECONF(Signal *);
- void execFSWRITEREF(Signal *);
- void execSET_VAR_REQ(Signal *);
- void execCHECKNODEGROUPSREQ(Signal *);
- void execSTART_INFOREQ(Signal*);
- void execSTART_INFOREF(Signal*);
- void execSTART_INFOCONF(Signal*);
- void execWAIT_GCP_REQ(Signal* signal);
- void execWAIT_GCP_REF(Signal* signal);
- void execWAIT_GCP_CONF(Signal* signal);
- void execUPDATE_TOREQ(Signal* signal);
- void execUPDATE_TOCONF(Signal* signal);
-
- void execPREP_DROP_TAB_REQ(Signal* signal);
- void execWAIT_DROP_TAB_REF(Signal* signal);
- void execWAIT_DROP_TAB_CONF(Signal* signal);
- void execDROP_TAB_REQ(Signal* signal);
-
- void execALTER_TAB_REQ(Signal* signal);
-
- void execCREATE_FRAGMENTATION_REQ(Signal*);
-
- void waitDropTabWritingToFile(Signal *, TabRecordPtr tabPtr);
- void checkPrepDropTabComplete(Signal *, TabRecordPtr tabPtr);
- void checkWaitDropTabFailedLqh(Signal *, Uint32 nodeId, Uint32 tableId);
-
- // Statement blocks
-//------------------------------------
-// Methods that send signals
-//------------------------------------
- void nullRoutine(Signal *, Uint32 nodeId);
- void sendCOPY_GCIREQ(Signal *, Uint32 nodeId);
- void sendDIH_SWITCH_REPLICA_REQ(Signal *, Uint32 nodeId);
- void sendEMPTY_LCP_REQ(Signal *, Uint32 nodeId);
- void sendEND_TOREQ(Signal *, Uint32 nodeId);
- void sendGCP_COMMIT(Signal *, Uint32 nodeId);
- void sendGCP_PREPARE(Signal *, Uint32 nodeId);
- void sendGCP_SAVEREQ(Signal *, Uint32 nodeId);
- void sendINCL_NODEREQ(Signal *, Uint32 nodeId);
- void sendMASTER_GCPREQ(Signal *, Uint32 nodeId);
- void sendMASTER_LCPREQ(Signal *, Uint32 nodeId);
- void sendMASTER_LCPCONF(Signal * signal);
- void sendSTART_RECREQ(Signal *, Uint32 nodeId);
- void sendSTART_INFOREQ(Signal *, Uint32 nodeId);
- void sendSTART_TOREQ(Signal *, Uint32 nodeId);
- void sendSTOP_ME_REQ(Signal *, Uint32 nodeId);
- void sendTC_CLOPSIZEREQ(Signal *, Uint32 nodeId);
- void sendTCGETOPSIZEREQ(Signal *, Uint32 nodeId);
- void sendUPDATE_TOREQ(Signal *, Uint32 nodeId);
- void sendSTART_LCP_REQ(Signal *, Uint32 nodeId);
-
- void sendLCP_FRAG_ORD(Signal*, NodeRecord::FragmentCheckpointInfo info);
- void sendLastLCP_FRAG_ORD(Signal *);
-
- void sendCopyTable(Signal *, CopyTableNode* ctn,
- BlockReference ref, Uint32 reqinfo);
- void sendCreateFragReq(Signal *,
- Uint32 startGci,
- Uint32 storedType,
- Uint32 takeOverPtr);
- void sendDihfragreq(Signal *,
- TabRecordPtr regTabPtr,
- Uint32 fragId);
- void sendStartFragreq(Signal *,
- TabRecordPtr regTabPtr,
- Uint32 fragId);
- void sendHOT_SPAREREP(Signal *);
- void sendAddFragreq(Signal *,
- TabRecordPtr regTabPtr,
- Uint32 fragId,
- Uint32 lcpNo,
- Uint32 param);
-
- void sendAddFragreq(Signal*, ConnectRecordPtr, TabRecordPtr, Uint32 fragId);
- void addTable_closeConf(Signal* signal, Uint32 tabPtrI);
- void resetReplicaSr(TabRecordPtr tabPtr);
- void resetReplicaLcp(ReplicaRecord * replicaP, Uint32 stopGci);
-
-//------------------------------------
-// Methods for LCP functionality
-//------------------------------------
- void checkKeepGci(Uint32 replicaStartIndex);
- void checkLcpStart(Signal *, Uint32 lineNo);
- void checkStartMoreLcp(Signal *, Uint32 nodeId);
- bool reportLcpCompletion(const class LcpFragRep *);
- void sendLCP_COMPLETE_REP(Signal *);
-
-//------------------------------------
-// Methods for Delete Table Files
-//------------------------------------
- void startDeleteFile(Signal* signal, TabRecordPtr tabPtr);
- void openTableFileForDelete(Signal* signal, Uint32 fileIndex);
- void tableOpenLab(Signal* signal, FileRecordPtr regFilePtr);
- void tableDeleteLab(Signal* signal, FileRecordPtr regFilePtr);
-
-//------------------------------------
-// File Record specific methods
-//------------------------------------
- void closeFile(Signal *, FileRecordPtr regFilePtr);
- void closeFileDelete(Signal *, FileRecordPtr regFilePtr);
- void createFileRw(Signal *, FileRecordPtr regFilePtr);
- void openFileRw(Signal *, FileRecordPtr regFilePtr);
- void openFileRo(Signal *, FileRecordPtr regFilePtr);
- void seizeFile(FileRecordPtr& regFilePtr);
- void releaseFile(Uint32 fileIndex);
-
-//------------------------------------
-// Methods called when completing file
-// operation.
-//------------------------------------
- void creatingGcpLab(Signal *, FileRecordPtr regFilePtr);
- void openingGcpLab(Signal *, FileRecordPtr regFilePtr);
- void openingTableLab(Signal *, FileRecordPtr regFilePtr);
- void tableCreateLab(Signal *, FileRecordPtr regFilePtr);
- void creatingGcpErrorLab(Signal *, FileRecordPtr regFilePtr);
- void openingCopyGciErrorLab(Signal *, FileRecordPtr regFilePtr);
- void creatingCopyGciErrorLab(Signal *, FileRecordPtr regFilePtr);
- void openingGcpErrorLab(Signal *, FileRecordPtr regFilePtr);
- void openingTableErrorLab(Signal *, FileRecordPtr regFilePtr);
- void tableCreateErrorLab(Signal *, FileRecordPtr regFilePtr);
- void closingGcpLab(Signal *, FileRecordPtr regFilePtr);
- void closingGcpCrashLab(Signal *, FileRecordPtr regFilePtr);
- void closingTableCrashLab(Signal *, FileRecordPtr regFilePtr);
- void closingTableSrLab(Signal *, FileRecordPtr regFilePtr);
- void tableCloseLab(Signal *, FileRecordPtr regFilePtr);
- void tableCloseErrorLab(FileRecordPtr regFilePtr);
- void readingGcpLab(Signal *, FileRecordPtr regFilePtr);
- void readingTableLab(Signal *, FileRecordPtr regFilePtr);
- void readingGcpErrorLab(Signal *, FileRecordPtr regFilePtr);
- void readingTableErrorLab(Signal *, FileRecordPtr regFilePtr);
- void writingCopyGciLab(Signal *, FileRecordPtr regFilePtr);
- void writeInitGcpLab(Signal *, FileRecordPtr regFilePtr);
- void tableWriteLab(Signal *, FileRecordPtr regFilePtr);
- void writeInitGcpErrorLab(Signal *, FileRecordPtr regFilePtr);
-
-
- void calculateHotSpare();
- void checkEscalation();
- void clearRestartInfoBits(Signal *);
- void invalidateLcpInfoAfterSr();
-
- bool isMaster();
- bool isActiveMaster();
-
- void emptyverificbuffer(Signal *, bool aContintueB);
- Uint32 findHotSpare();
- void handleGcpStateInMaster(Signal *, NodeRecordPtr failedNodeptr);
- void initRestartInfo();
- void initRestorableGciFiles();
- void makeNodeGroups(Uint32 nodeArray[]);
- void makePrnList(class ReadNodesConf * readNodes, Uint32 nodeArray[]);
- void nodeResetStart();
- void releaseTabPages(Uint32 tableId);
- void replication(Uint32 noOfReplicas,
- NodeGroupRecordPtr NGPtr,
- FragmentstorePtr regFragptr);
- void selectMasterCandidateAndSend(Signal *);
- void setInitialActiveStatus();
- void setLcpActiveStatusEnd();
- void setLcpActiveStatusStart(Signal *);
- void setNodeActiveStatus();
- void setNodeGroups();
- void setNodeInfo(Signal *);
- void setNodeLcpActiveStatus();
- void setNodeRestartInfoBits();
- void startGcp(Signal *);
-
- void readFragment(RWFragment* rf, FragmentstorePtr regFragptr);
- Uint32 readPageWord(RWFragment* rf);
- void readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr);
- void readReplicas(RWFragment* rf, FragmentstorePtr regFragptr);
- void readRestorableGci(Signal *, FileRecordPtr regFilePtr);
- void readTabfile(Signal *, TabRecord* tab, FileRecordPtr regFilePtr);
- void writeFragment(RWFragment* wf, FragmentstorePtr regFragptr);
- void writePageWord(RWFragment* wf, Uint32 dataWord);
- void writeReplicas(RWFragment* wf, Uint32 replicaStartIndex);
- void writeRestorableGci(Signal *, FileRecordPtr regFilePtr);
- void writeTabfile(Signal *, TabRecord* tab, FileRecordPtr regFilePtr);
- void copyTabReq_complete(Signal* signal, TabRecordPtr tabPtr);
-
- void gcpcommitreqLab(Signal *);
- void gcpsavereqLab(Signal *);
- void copyGciLab(Signal *, CopyGCIReq::CopyReason reason);
- void storeNewLcpIdLab(Signal *);
- void startLcpRoundLoopLab(Signal *, Uint32 startTableId, Uint32 startFragId);
-
- void nodeFailCompletedCheckLab(Signal*, NodeRecordPtr failedNodePtr);
-
- /**
- *
- */
- void setLocalNodefailHandling(Signal*, Uint32 failedNodeId,
- NodefailHandlingStep step);
- void checkLocalNodefailComplete(Signal*, Uint32 failedNodeId,
- NodefailHandlingStep step);
-
- void ndbsttorry10Lab(Signal *, Uint32 _line);
- void createMutexes(Signal* signal, Uint32 no);
- void createMutex_done(Signal* signal, Uint32 no, Uint32 retVal);
- void crashSystemAtGcpStop(Signal *);
- void sendFirstDictfragsreq(Signal *, TabRecordPtr regTabPtr);
- void addtabrefuseLab(Signal *, ConnectRecordPtr regConnectPtr, Uint32 errorCode);
- void GCP_SAVEhandling(Signal *, Uint32 nodeId);
- void packTableIntoPagesLab(Signal *, Uint32 tableId);
- void readPagesIntoTableLab(Signal *, Uint32 tableId);
- void readPagesIntoFragLab(Signal *, RWFragment* rf);
- void readTabDescriptionLab(Signal *, Uint32 tableId);
- void copyTableLab(Signal *, Uint32 tableId);
- void breakCopyTableLab(Signal *,
- TabRecordPtr regTabPtr,
- Uint32 nodeId);
- void checkAddfragCompletedLab(Signal *,
- TabRecordPtr regTabPtr,
- Uint32 fragId);
- void completeRestartLab(Signal *);
- void readTableFromPagesLab(Signal *, TabRecordPtr regTabPtr);
- void srPhase2ReadTableLab(Signal *, TabRecordPtr regTabPtr);
- void checkTcCounterLab(Signal *);
- void calculateKeepGciLab(Signal *, Uint32 tableId, Uint32 fragId);
- void tableUpdateLab(Signal *, TabRecordPtr regTabPtr);
- void checkLcpCompletedLab(Signal *);
- void initLcpLab(Signal *, Uint32 masterRef, Uint32 tableId);
- void startGcpLab(Signal *, Uint32 aWaitTime);
- void checkGcpStopLab(Signal *);
- void MASTER_GCPhandling(Signal *, Uint32 failedNodeId);
- void MASTER_LCPhandling(Signal *, Uint32 failedNodeId);
- void rnfTableNotReadyLab(Signal *, TabRecordPtr regTabPtr, Uint32 removeNodeId);
- void startLcpTakeOverLab(Signal *, Uint32 failedNodeId);
-
- void startLcpMasterTakeOver(Signal *, Uint32 failedNodeId);
- void startGcpMasterTakeOver(Signal *, Uint32 failedNodeId);
- void checkGcpOutstanding(Signal*, Uint32 failedNodeId);
-
- void checkEmptyLcpComplete(Signal *);
- void lcpBlockedLab(Signal *);
- void breakCheckTabCompletedLab(Signal *, TabRecordPtr regTabptr);
- void readGciFileLab(Signal *);
- void openingCopyGciSkipInitLab(Signal *, FileRecordPtr regFilePtr);
- void startLcpRoundLab(Signal *);
- void gcpBlockedLab(Signal *);
- void initialStartCompletedLab(Signal *);
- void allNodesLcpCompletedLab(Signal *);
- void nodeRestartPh2Lab(Signal *);
- void initGciFilesLab(Signal *);
- void dictStartConfLab(Signal *);
- void nodeDictStartConfLab(Signal *);
- void ndbStartReqLab(Signal *, BlockReference ref);
- void nodeRestartStartRecConfLab(Signal *);
- void dihCopyCompletedLab(Signal *);
- void release_connect(ConnectRecordPtr ptr);
- void copyTableNode(Signal *,
- CopyTableNode* ctn,
- NodeRecordPtr regNodePtr);
- void startFragment(Signal *, Uint32 tableId, Uint32 fragId);
- bool checkLcpAllTablesDoneInLqh();
-
- void lcpStateAtNodeFailureLab(Signal *, Uint32 nodeId);
- void copyNodeLab(Signal *, Uint32 tableId);
- void copyGciReqLab(Signal *);
- void allLab(Signal *,
- ConnectRecordPtr regConnectPtr,
- TabRecordPtr regTabPtr);
- void tableCopyNodeLab(Signal *, TabRecordPtr regTabPtr);
-
- void removeNodeFromTables(Signal *, Uint32 tableId, Uint32 nodeId);
- void removeNodeFromTable(Signal *, Uint32 tableId, TabRecordPtr tabPtr);
- void removeNodeFromTablesComplete(Signal* signal, Uint32 nodeId);
-
- void packFragIntoPagesLab(Signal *, RWFragment* wf);
- void startNextChkpt(Signal *);
- void failedNodeLcpHandling(Signal*, NodeRecordPtr failedNodePtr);
- void failedNodeSynchHandling(Signal *, NodeRecordPtr failedNodePtr);
- void checkCopyTab(NodeRecordPtr failedNodePtr);
-
- void initCommonData();
- void initialiseRecordsLab(Signal *, Uint32 stepNo, Uint32, Uint32);
-
- void findReplica(ReplicaRecordPtr& regReplicaPtr,
- Fragmentstore* fragPtrP, Uint32 nodeId);
-//------------------------------------
-// Node failure handling methods
-//------------------------------------
- void startRemoveFailedNode(Signal *, NodeRecordPtr failedNodePtr);
- void handleGcpTakeOver(Signal *, NodeRecordPtr failedNodePtr);
- void handleLcpTakeOver(Signal *, NodeRecordPtr failedNodePtr);
- void handleNewMaster(Signal *, NodeRecordPtr failedNodePtr);
- void checkTakeOverInMasterAllNodeFailure(Signal*, NodeRecordPtr failedNode);
- void checkTakeOverInMasterCopyNodeFailure(Signal*, Uint32 failedNodeId);
- void checkTakeOverInMasterStartNodeFailure(Signal*, Uint32 takeOverPtr);
- void checkTakeOverInNonMasterStartNodeFailure(Signal*, Uint32 takeOverPtr);
- void handleLcpMasterTakeOver(Signal *, Uint32 nodeId);
-
-//------------------------------------
-// Replica record specific methods
-//------------------------------------
- Uint32 findLogInterval(ConstPtr<ReplicaRecord> regReplicaPtr,
- Uint32 startGci);
- void findMinGci(ReplicaRecordPtr fmgReplicaPtr,
- Uint32& keeGci,
- Uint32& oldestRestorableGci);
- bool findStartGci(ConstPtr<ReplicaRecord> fstReplicaPtr,
- Uint32 tfstStopGci,
- Uint32& tfstStartGci,
- Uint32& tfstLcp);
- void newCrashedReplica(Uint32 nodeId, ReplicaRecordPtr ncrReplicaPtr);
- void packCrashedReplicas(ReplicaRecordPtr pcrReplicaPtr);
- void releaseReplicas(Uint32 replicaPtr);
- void removeOldCrashedReplicas(ReplicaRecordPtr rocReplicaPtr);
- void removeTooNewCrashedReplicas(ReplicaRecordPtr rtnReplicaPtr);
- void seizeReplicaRec(ReplicaRecordPtr& replicaPtr);
-
-//------------------------------------
-// Methods operating on a fragment and
-// its connected replicas and nodes.
-//------------------------------------
- void allocStoredReplica(FragmentstorePtr regFragptr,
- ReplicaRecordPtr& newReplicaPtr,
- Uint32 nodeId);
- Uint32 extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[]);
- bool findBestLogNode(CreateReplicaRecord* createReplica,
- FragmentstorePtr regFragptr,
- Uint32 startGci,
- Uint32 stopGci,
- Uint32 logNode,
- Uint32& fblStopGci);
- bool findLogNodes(CreateReplicaRecord* createReplica,
- FragmentstorePtr regFragptr,
- Uint32 startGci,
- Uint32 stopGci);
- void findToReplica(TakeOverRecord* regTakeOver,
- Uint32 replicaType,
- FragmentstorePtr regFragptr,
- ReplicaRecordPtr& ftrReplicaPtr);
- void initFragstore(FragmentstorePtr regFragptr);
- void insertBackup(FragmentstorePtr regFragptr, Uint32 nodeId);
- void insertfraginfo(FragmentstorePtr regFragptr,
- Uint32 noOfBackups,
- Uint32* nodeArray);
- void linkOldStoredReplica(FragmentstorePtr regFragptr,
- ReplicaRecordPtr replicaPtr);
- void linkStoredReplica(FragmentstorePtr regFragptr,
- ReplicaRecordPtr replicaPtr);
- void prepareReplicas(FragmentstorePtr regFragptr);
- void removeNodeFromStored(Uint32 nodeId,
- FragmentstorePtr regFragptr,
- ReplicaRecordPtr replicaPtr);
- void removeOldStoredReplica(FragmentstorePtr regFragptr,
- ReplicaRecordPtr replicaPtr);
- void removeStoredReplica(FragmentstorePtr regFragptr,
- ReplicaRecordPtr replicaPtr);
- void searchStoredReplicas(FragmentstorePtr regFragptr);
- void updateNodeInfo(FragmentstorePtr regFragptr);
-
-//------------------------------------
-// Fragment allocation, deallocation and
-// find methods
-//------------------------------------
- void allocFragments(Uint32 noOfFragments, TabRecordPtr regTabPtr);
- void releaseFragments(TabRecordPtr regTabPtr);
- void getFragstore(TabRecord *, Uint32 fragNo, FragmentstorePtr & ptr);
- void initialiseFragstore();
-
-//------------------------------------
-// Page Record specific methods
-//------------------------------------
- void allocpage(PageRecordPtr& regPagePtr);
- void releasePage(Uint32 pageIndex);
-
-//------------------------------------
-// Table Record specific methods
-//------------------------------------
- void initTable(TabRecordPtr regTabPtr);
- void initTableFile(TabRecordPtr regTabPtr);
- void releaseTable(TabRecordPtr tabPtr);
- Uint32 findTakeOver(Uint32 failedNodeId);
- void handleTakeOverMaster(Signal *, Uint32 takeOverPtr);
- void handleTakeOverNewMaster(Signal *, Uint32 takeOverPtr);
-
-//------------------------------------
-// TakeOver Record specific methods
-//------------------------------------
- void initTakeOver(TakeOverRecordPtr regTakeOverptr);
- void seizeTakeOver(TakeOverRecordPtr& regTakeOverptr);
- void allocateTakeOver(TakeOverRecordPtr& regTakeOverptr);
- void releaseTakeOver(Uint32 takeOverPtr);
- bool anyActiveTakeOver();
- void checkToCopy();
- void checkToCopyCompleted(Signal *);
- bool checkToInterrupted(TakeOverRecordPtr& regTakeOverptr);
- Uint32 getStartNode(Uint32 takeOverPtr);
-
-//------------------------------------
-// Methods for take over functionality
-//------------------------------------
- void changeNodeGroups(Uint32 startNode, Uint32 nodeTakenOver);
- void endTakeOver(Uint32 takeOverPtr);
- void initStartTakeOver(const class StartToReq *,
- TakeOverRecordPtr regTakeOverPtr);
-
- void nodeRestartTakeOver(Signal *, Uint32 startNodeId);
- void systemRestartTakeOverLab(Signal *);
- void startTakeOver(Signal *,
- Uint32 takeOverPtr,
- Uint32 startNode,
- Uint32 toNode);
- void sendStartTo(Signal *, Uint32 takeOverPtr);
- void startNextCopyFragment(Signal *, Uint32 takeOverPtr);
- void toCopyFragLab(Signal *, Uint32 takeOverPtr);
- void startHsAddFragConfLab(Signal *);
- void prepareSendCreateFragReq(Signal *, Uint32 takeOverPtr);
- void sendUpdateTo(Signal *, Uint32 takeOverPtr, Uint32 updateState);
- void toCopyCompletedLab(Signal *, TakeOverRecordPtr regTakeOverptr);
- void takeOverCompleted(Uint32 aNodeId);
- void sendEndTo(Signal *, Uint32 takeOverPtr);
-
-//------------------------------------
-// Node Record specific methods
-//------------------------------------
- void checkStartTakeOver(Signal *);
- void insertAlive(NodeRecordPtr newNodePtr);
- void insertDeadNode(NodeRecordPtr removeNodePtr);
- void removeAlive(NodeRecordPtr removeNodePtr);
- void removeDeadNode(NodeRecordPtr removeNodePtr);
-
- NodeRecord::NodeStatus getNodeStatus(Uint32 nodeId);
- void setNodeStatus(Uint32 nodeId, NodeRecord::NodeStatus);
- Sysfile::ActiveStatus getNodeActiveStatus(Uint32 nodeId);
- void setNodeActiveStatus(Uint32 nodeId, Sysfile::ActiveStatus newStatus);
- void setNodeLcpActiveStatus(Uint32 nodeId, bool newState);
- bool getNodeLcpActiveStatus(Uint32 nodeId);
- bool getAllowNodeStart(Uint32 nodeId);
- void setAllowNodeStart(Uint32 nodeId, bool newState);
- bool getNodeCopyCompleted(Uint32 nodeId);
- void setNodeCopyCompleted(Uint32 nodeId, bool newState);
- bool checkNodeAlive(Uint32 nodeId);
-
- // Initialisation
- void initData();
- void initRecords();
-
- // Variables to support record structures and their free lists
-
- ApiConnectRecord *apiConnectRecord;
- Uint32 capiConnectFileSize;
-
- ConnectRecord *connectRecord;
- Uint32 cfirstconnect;
- Uint32 cconnectFileSize;
-
- CreateReplicaRecord *createReplicaRecord;
- Uint32 cnoOfCreateReplicas;
-
- FileRecord *fileRecord;
- Uint32 cfirstfreeFile;
- Uint32 cfileFileSize;
-
- Fragmentstore *fragmentstore;
- Uint32 cfirstfragstore;
- Uint32 cfragstoreFileSize;
-
- Uint32 c_nextNodeGroup;
- NodeGroupRecord *nodeGroupRecord;
-
- NodeRecord *nodeRecord;
-
- PageRecord *pageRecord;
- Uint32 cfirstfreepage;
- Uint32 cpageFileSize;
-
- ReplicaRecord *replicaRecord;
- Uint32 cfirstfreeReplica;
- Uint32 cnoFreeReplicaRec;
- Uint32 creplicaFileSize;
-
- TabRecord *tabRecord;
- Uint32 ctabFileSize;
-
- TakeOverRecord *takeOverRecord;
- Uint32 cfirstfreeTakeOver;
-
- /*
- 2.4 C O M M O N S T O R E D V A R I A B L E S
- ----------------------------------------------------
- */
- Uint32 cfirstVerifyQueue;
- Uint32 clastVerifyQueue;
- Uint32 cverifyQueueCounter;
-
- /*------------------------------------------------------------------------*/
- /* THIS VARIABLE KEEPS THE REFERENCES TO FILE RECORDS THAT DESCRIBE */
- /* THE TWO FILES THAT ARE USED TO STORE THE VARIABLE CRESTART_INFO */
- /* ON DISK. */
- /*------------------------------------------------------------------------*/
- Uint32 crestartInfoFile[2];
- /*------------------------------------------------------------------------*/
- /* THIS VARIABLE KEEPS TRACK OF THE STATUS OF A GLOBAL CHECKPOINT */
- /* PARTICIPANT. THIS IS NEEDED TO HANDLE A NODE FAILURE. WHEN A NODE*/
- /* FAILURE OCCURS IT IS EASY THAT THE PROTOCOL STOPS IF NO ACTION IS*/
- /* TAKEN TO PREVENT THIS. THIS VARIABLE ENSURES SUCH ACTION CAN BE */
- /* TAKEN. */
- /*------------------------------------------------------------------------*/
- enum GcpParticipantState {
- GCP_PARTICIPANT_READY = 0,
- GCP_PARTICIPANT_PREPARE_RECEIVED = 1,
- GCP_PARTICIPANT_COMMIT_RECEIVED = 2,
- GCP_PARTICIPANT_TC_FINISHED = 3,
- GCP_PARTICIPANT_COPY_GCI_RECEIVED = 4
- };
- GcpParticipantState cgcpParticipantState;
- /*------------------------------------------------------------------------*/
- /* THESE VARIABLES ARE USED TO CONTROL THAT GCP PROCESSING DO NOT */
- /*STOP FOR SOME REASON. */
- /*------------------------------------------------------------------------*/
- enum GcpStatus {
- GCP_READY = 0,
- GCP_PREPARE_SENT = 1,
- GCP_COMMIT_SENT = 2,
- GCP_NODE_FINISHED = 3,
- GCP_SAVE_LQH_FINISHED = 4
- };
- GcpStatus cgcpStatus;
- Uint32 cgcpStartCounter;
- Uint32 coldGcpStatus;
- Uint32 coldGcpId;
- /*------------------------------------------------------------------------*/
- /* THIS VARIABLE KEEPS TRACK OF THE STATE OF THIS NODE AS MASTER. */
- /*------------------------------------------------------------------------*/
- enum MasterState {
- MASTER_IDLE = 0,
- MASTER_ACTIVE = 1,
- MASTER_TAKE_OVER_GCP = 2
- };
- MasterState cmasterState;
- Uint16 cmasterTakeOverNode;
- /* NODE IS NOT MASTER */
- /* NODE IS ACTIVE AS MASTER */
- /* NODE IS TAKING OVER AS MASTER */
-
- struct CopyGCIMaster {
- CopyGCIMaster(){ m_copyReason = m_waiting = CopyGCIReq::IDLE;}
- /*------------------------------------------------------------------------*/
- /* THIS STATE VARIABLE IS USED TO INDICATE IF COPYING OF RESTART */
- /* INFO WAS STARTED BY A LOCAL CHECKPOINT OR AS PART OF A SYSTEM */
- /* RESTART. */
- /*------------------------------------------------------------------------*/
- CopyGCIReq::CopyReason m_copyReason;
-
- /*------------------------------------------------------------------------*/
- /* COPYING RESTART INFO CAN BE STARTED BY LOCAL CHECKPOINTS AND BY */
- /* GLOBAL CHECKPOINTS. WE CAN HOWEVER ONLY HANDLE ONE SUCH COPY AT */
- /* THE TIME. THUS WE HAVE TO KEEP WAIT INFORMATION IN THIS VARIABLE.*/
- /*------------------------------------------------------------------------*/
- CopyGCIReq::CopyReason m_waiting;
- } c_copyGCIMaster;
-
- struct CopyGCISlave {
- CopyGCISlave(){ m_copyReason = CopyGCIReq::IDLE; m_expectedNextWord = 0;}
- /*------------------------------------------------------------------------*/
- /* THIS STATE VARIABLE IS USED TO INDICATE IF COPYING OF RESTART */
- /* INFO WAS STARTED BY A LOCAL CHECKPOINT OR AS PART OF A SYSTEM */
- /* RESTART. THIS VARIABLE IS USED BY THE NODE THAT RECEIVES */
- /* COPY_GCI_REQ. */
- /*------------------------------------------------------------------------*/
- Uint32 m_senderData;
- BlockReference m_senderRef;
- CopyGCIReq::CopyReason m_copyReason;
-
- Uint32 m_expectedNextWord;
- } c_copyGCISlave;
-
- /*------------------------------------------------------------------------*/
- /* THIS VARIABLE IS USED TO KEEP TRACK OF THE STATE OF LOCAL */
- /* CHECKPOINTS. */
- /*------------------------------------------------------------------------*/
-public:
- enum LcpStatus {
- LCP_STATUS_IDLE = 0,
- LCP_TCGET = 1, // Only master
- LCP_STATUS_ACTIVE = 2,
- LCP_CALCULATE_KEEP_GCI = 4, // Only master
- LCP_COPY_GCI = 5,
- LCP_INIT_TABLES = 6,
- LCP_TC_CLOPSIZE = 7, // Only master
- LCP_START_LCP_ROUND = 8,
- LCP_TAB_COMPLETED = 9,
- LCP_TAB_SAVED = 10
- };
-private:
-
- struct LcpState {
- LcpStatus lcpStatus;
- Uint32 lcpStatusUpdatedPlace;
-
- void setLcpStatus(LcpStatus status, Uint32 line){
- lcpStatus = status;
- lcpStatusUpdatedPlace = line;
- }
-
- Uint32 lcpStart;
- Uint32 lcpStartGcp;
- Uint32 keepGci; /* USED TO CALCULATE THE GCI TO KEEP AFTER A LCP */
- Uint32 oldestRestorableGci;
-
- struct CurrentFragment {
- Uint32 tableId;
- Uint32 fragmentId;
- } currentFragment;
-
- Uint32 noOfLcpFragRepOutstanding;
-
- /*------------------------------------------------------------------------*/
- /* USED TO ENSURE THAT LCP'S ARE EXECUTED WITH CERTAIN TIMEINTERVALS*/
- /* EVEN WHEN SYSTEM IS NOT DOING ANYTHING. */
- /*------------------------------------------------------------------------*/
- Uint32 ctimer;
- Uint32 ctcCounter;
- Uint32 clcpDelay; /* MAX. 2^(CLCP_DELAY - 2) SEC BETWEEN LCP'S */
-
- /*------------------------------------------------------------------------*/
- /* THIS STATE IS USED TO TELL IF THE FIRST LCP AFTER START/RESTART */
- /* HAS BEEN RUN. AFTER A NODE RESTART THE NODE DOES NOT ENTER */
- /* STARTED STATE BEFORE THIS IS DONE. */
- /*------------------------------------------------------------------------*/
- bool immediateLcpStart;
- bool m_LCP_COMPLETE_REP_From_Master_Received;
- SignalCounter m_LCP_COMPLETE_REP_Counter_DIH;
- SignalCounter m_LCP_COMPLETE_REP_Counter_LQH;
- SignalCounter m_LAST_LCP_FRAG_ORD;
- NdbNodeBitmask m_participatingLQH;
- NdbNodeBitmask m_participatingDIH;
-
- Uint32 m_masterLcpDihRef;
- bool m_MASTER_LCPREQ_Received;
- Uint32 m_MASTER_LCPREQ_FailedNodeId;
- } c_lcpState;
-
- /*------------------------------------------------------------------------*/
- /* THIS VARIABLE KEEPS TRACK OF HOW MANY TABLES ARE ACTIVATED WHEN */
- /* STARTING A LOCAL CHECKPOINT WE SHOULD AVOID STARTING A CHECKPOINT*/
- /* WHEN NO TABLES ARE ACTIVATED. */
- /*------------------------------------------------------------------------*/
- Uint32 cnoOfActiveTables;
- Uint32 cgcpDelay; /* Delay between global checkpoints */
-
- BlockReference cdictblockref; /* DICTIONARY BLOCK REFERENCE */
- Uint32 cfailurenr; /* EVERY TIME WHEN A NODE FAILURE IS REPORTED
- THIS NUMBER IS INCREMENTED. AT THE START OF
- THE SYSTEM THIS NUMBER MUST BE INITIATED TO
- ZERO */
- bool cgckptflag; /* A FLAG WHICH IS SET WHILE A NEW GLOBAL CHECK
- POINT IS BEING CREATED. NO VERIFICATION IS ALLOWED
- IF THE FLAG IS SET*/
- Uint32 cgcpOrderBlocked;
- BlockReference clocallqhblockref;
- BlockReference clocaltcblockref;
- BlockReference cmasterdihref;
- Uint16 cownNodeId;
- Uint32 cnewgcp;
- BlockReference cndbStartReqBlockref;
- BlockReference cntrlblockref;
- Uint32 cgcpSameCounter;
- Uint32 coldgcp;
- Uint32 con_lineNodes;
- Uint32 creceivedfrag;
- Uint32 cremainingfrags;
- Uint32 cstarttype;
- Uint32 csystemnodes;
- Uint32 currentgcp;
-
- enum GcpMasterTakeOverState {
- GMTOS_IDLE = 0,
- GMTOS_INITIAL = 1,
- ALL_READY = 2,
- ALL_PREPARED = 3,
- COMMIT_STARTED_NOT_COMPLETED = 4,
- COMMIT_COMPLETED = 5,
- PREPARE_STARTED_NOT_COMMITTED = 6,
- SAVE_STARTED_NOT_COMPLETED = 7
- };
- GcpMasterTakeOverState cgcpMasterTakeOverState;
-
-public:
- enum LcpMasterTakeOverState {
- LMTOS_IDLE = 0,
- LMTOS_WAIT_EMPTY_LCP = 1, // Currently doing empty LCP
- LMTOS_WAIT_LCP_FRAG_REP = 2,// Currently waiting for outst. LCP_FRAG_REP
- LMTOS_INITIAL = 3,
- LMTOS_ALL_IDLE = 4,
- LMTOS_ALL_ACTIVE = 5,
- LMTOS_LCP_CONCLUDING = 6,
- LMTOS_COPY_ONGOING = 7
- };
-private:
- class MasterTakeOverState {
- public:
- void set(LcpMasterTakeOverState s, Uint32 line) {
- state = s; updatePlace = line;
- }
-
- LcpMasterTakeOverState state;
- Uint32 updatePlace;
-
- Uint32 minTableId;
- Uint32 minFragId;
- Uint32 failedNodeId;
- } c_lcpMasterTakeOverState;
-
- Uint16 cmasterNodeId;
- Uint8 cnoHotSpare;
-
- struct NodeStartMasterRecord {
- Uint32 startNode;
- Uint32 wait;
- Uint32 failNr;
- bool activeState;
- bool blockLcp;
- bool blockGcp;
- Uint32 startInfoErrorCode;
- Uint32 m_outstandingGsn;
- };
- NodeStartMasterRecord c_nodeStartMaster;
-
- struct NodeStartSlaveRecord {
- NodeStartSlaveRecord() { nodeId = 0;}
-
- Uint32 nodeId;
- };
- NodeStartSlaveRecord c_nodeStartSlave;
-
- Uint32 cfirstAliveNode;
- Uint32 cfirstDeadNode;
- Uint32 cstartPhase;
- Uint32 cnoReplicas;
-
- Uint32 c_startToLock;
- Uint32 c_endToLock;
- Uint32 c_createFragmentLock;
- Uint32 c_updateToLock;
-
- bool cwaitLcpSr;
- Uint32 cnoOfNodeGroups;
- bool cstartGcpNow;
-
- Uint32 crestartGci; /* VALUE OF GCI WHEN SYSTEM RESTARTED OR STARTED */
- Uint32 cminHotSpareNodes;
-
- /**
- * Counter variables keeping track of the number of outstanding signals
- * for particular signals in various protocols.
- */
- SignalCounter c_COPY_GCIREQ_Counter;
- SignalCounter c_COPY_TABREQ_Counter;
- SignalCounter c_CREATE_FRAGREQ_Counter;
- SignalCounter c_DIH_SWITCH_REPLICA_REQ_Counter;
- SignalCounter c_EMPTY_LCP_REQ_Counter;
- SignalCounter c_END_TOREQ_Counter;
- SignalCounter c_GCP_COMMIT_Counter;
- SignalCounter c_GCP_PREPARE_Counter;
- SignalCounter c_GCP_SAVEREQ_Counter;
- SignalCounter c_INCL_NODEREQ_Counter;
- SignalCounter c_MASTER_GCPREQ_Counter;
- SignalCounter c_MASTER_LCPREQ_Counter;
- SignalCounter c_START_INFOREQ_Counter;
- SignalCounter c_START_RECREQ_Counter;
- SignalCounter c_START_TOREQ_Counter;
- SignalCounter c_STOP_ME_REQ_Counter;
- SignalCounter c_TC_CLOPSIZEREQ_Counter;
- SignalCounter c_TCGETOPSIZEREQ_Counter;
- SignalCounter c_UPDATE_TOREQ_Counter;
- SignalCounter c_START_LCP_REQ_Counter;
-
- bool c_blockCommit;
- Uint32 c_blockCommitNo;
-
- bool getBlockCommit() const {
- return c_blockCommit || cgckptflag;
- }
-
- /**
- * SwitchReplicaRecord - Should only be used by master
- */
- struct SwitchReplicaRecord {
- void clear(){}
-
- Uint32 nodeId;
- Uint32 tableId;
- Uint32 fragNo;
- };
- SwitchReplicaRecord c_switchReplicas;
-
- struct StopPermProxyRecord {
- StopPermProxyRecord() { clientRef = 0; }
-
- Uint32 clientData;
- BlockReference clientRef;
- BlockReference masterRef;
- };
-
- struct StopPermMasterRecord {
- StopPermMasterRecord() { clientRef = 0;}
-
- Uint32 returnValue;
-
- Uint32 clientData;
- BlockReference clientRef;
- };
-
- StopPermProxyRecord c_stopPermProxy;
- StopPermMasterRecord c_stopPermMaster;
-
- void checkStopPermProxy(Signal*, NodeId failedNodeId);
- void checkStopPermMaster(Signal*, NodeRecordPtr failedNodePtr);
-
- void switchReplica(Signal*,
- Uint32 nodeId,
- Uint32 tableId,
- Uint32 fragNo);
-
- void switchReplicaReply(Signal*, NodeId nodeId);
-
- /**
- * Wait GCP (proxy)
- */
- struct WaitGCPProxyRecord {
- WaitGCPProxyRecord() { clientRef = 0;}
-
- Uint32 clientData;
- BlockReference clientRef;
- BlockReference masterRef;
-
- union { Uint32 nextPool; Uint32 nextList; };
- Uint32 prevList;
- };
- typedef Ptr<WaitGCPProxyRecord> WaitGCPProxyPtr;
-
- /**
- * Wait GCP (master)
- */
- struct WaitGCPMasterRecord {
- WaitGCPMasterRecord() { clientRef = 0;}
- Uint32 clientData;
- BlockReference clientRef;
-
- union { Uint32 nextPool; Uint32 nextList; };
- Uint32 prevList;
- };
- typedef Ptr<WaitGCPMasterRecord> WaitGCPMasterPtr;
-
- /**
- * Pool/list of WaitGCPProxyRecord record
- */
- ArrayPool<WaitGCPProxyRecord> waitGCPProxyPool;
- ArrayList<WaitGCPProxyRecord> c_waitGCPProxyList;
-
- /**
- * Pool/list of WaitGCPMasterRecord record
- */
- ArrayPool<WaitGCPMasterRecord> waitGCPMasterPool;
- ArrayList<WaitGCPMasterRecord> c_waitGCPMasterList;
-
- void checkWaitGCPProxy(Signal*, NodeId failedNodeId);
- void checkWaitGCPMaster(Signal*, NodeId failedNodeId);
- void emptyWaitGCPMasterQueue(Signal*);
-
- /**
- * Stop me
- */
- struct StopMeRecord {
- StopMeRecord() { clientRef = 0;}
-
- BlockReference clientRef;
- Uint32 clientData;
- };
- StopMeRecord c_stopMe;
-
- void checkStopMe(Signal *, NodeRecordPtr failedNodePtr);
-
-#define DIH_CDATA_SIZE 128
- /**
- * This variable must be atleast the size of Sysfile::SYSFILE_SIZE32
- */
- Uint32 cdata[DIH_CDATA_SIZE]; /* TEMPORARY ARRAY VARIABLE */
-
- /**
- * Sys file data
- */
- Uint32 sysfileData[DIH_CDATA_SIZE];
- Uint32 sysfileDataToFile[DIH_CDATA_SIZE];
-
- /**
- * When a node comes up without filesystem
- * we have to clear all LCP for that node
- */
- void invalidateNodeLCP(Signal *, Uint32 nodeId, Uint32 tableId);
- void invalidateNodeLCP(Signal *, Uint32 nodeId, TabRecordPtr);
-
- /**
- * Reply from nodeId
- */
- void startInfoReply(Signal *, Uint32 nodeId);
-};
-
-#if (DIH_CDATA_SIZE < _SYSFILE_SIZE32)
-#error "cdata is to small compared to Sysfile size"
-#endif
-
-#endif
-
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
deleted file mode 100644
index 2a661104347..00000000000
--- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ /dev/null
@@ -1,14319 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#define DBDIH_C
-#include <ndb_limits.h>
-#include <ndb_version.h>
-#include <NdbOut.hpp>
-
-#include "Dbdih.hpp"
-#include "Configuration.hpp"
-
-#include <signaldata/BlockCommitOrd.hpp>
-#include <signaldata/CheckNodeGroups.hpp>
-#include <signaldata/CreateFrag.hpp>
-#include <signaldata/CopyActive.hpp>
-#include <signaldata/CopyFrag.hpp>
-#include <signaldata/CopyGCIReq.hpp>
-#include <signaldata/DiAddTab.hpp>
-#include <signaldata/DictStart.hpp>
-#include <signaldata/DiGetNodes.hpp>
-#include <signaldata/DihContinueB.hpp>
-#include <signaldata/DihSwitchReplica.hpp>
-#include <signaldata/DumpStateOrd.hpp>
-#include <signaldata/EmptyLcp.hpp>
-#include <signaldata/EndTo.hpp>
-#include <signaldata/EventReport.hpp>
-#include <signaldata/GCPSave.hpp>
-#include <signaldata/HotSpareRep.hpp>
-#include <signaldata/MasterGCP.hpp>
-#include <signaldata/MasterLCP.hpp>
-#include <signaldata/NFCompleteRep.hpp>
-#include <signaldata/NodeFailRep.hpp>
-#include <signaldata/ReadNodesConf.hpp>
-#include <signaldata/StartFragReq.hpp>
-#include <signaldata/StartInfo.hpp>
-#include <signaldata/StartMe.hpp>
-#include <signaldata/StartPerm.hpp>
-#include <signaldata/StartRec.hpp>
-#include <signaldata/StartTo.hpp>
-#include <signaldata/StopPerm.hpp>
-#include <signaldata/StopMe.hpp>
-#include <signaldata/TestOrd.hpp>
-#include <signaldata/UpdateTo.hpp>
-#include <signaldata/WaitGCP.hpp>
-#include <signaldata/DihStartTab.hpp>
-#include <signaldata/LCP.hpp>
-#include <signaldata/SystemError.hpp>
-
-#include <signaldata/DropTab.hpp>
-#include <signaldata/AlterTab.hpp>
-#include <signaldata/PrepDropTab.hpp>
-#include <signaldata/SumaImpl.hpp>
-#include <signaldata/DictTabInfo.hpp>
-#include <signaldata/CreateFragmentation.hpp>
-#include <signaldata/LqhFrag.hpp>
-#include <signaldata/FsOpenReq.hpp>
-#include <DebuggerNames.hpp>
-
-#include <EventLogger.hpp>
-extern EventLogger g_eventLogger;
-
-#define SYSFILE ((Sysfile *)&sysfileData[0])
-
-#define RETURN_IF_NODE_NOT_ALIVE(node) \
- if (!checkNodeAlive((node))) { \
- jam(); \
- return; \
- } \
-
-#define RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverIndex, regTOPtr) \
- regTOPtr.i = takeOverIndex; \
- ptrCheckGuard(regTOPtr, MAX_NDB_NODES, takeOverRecord); \
- if (checkToInterrupted(regTOPtr)) { \
- jam(); \
- return; \
- } \
-
-#define receiveLoopMacro(sigName, receiveNodeId)\
-{ \
- c_##sigName##_Counter.clearWaitingFor(receiveNodeId); \
- if(c_##sigName##_Counter.done() == false){ \
- jam(); \
- return; \
- } \
-}
-
-#define sendLoopMacro(sigName, signalRoutine) \
-{ \
- c_##sigName##_Counter.clearWaitingFor(); \
- NodeRecordPtr specNodePtr; \
- specNodePtr.i = cfirstAliveNode; \
- do { \
- jam(); \
- ptrCheckGuard(specNodePtr, MAX_NDB_NODES, nodeRecord); \
- c_##sigName##_Counter.setWaitingFor(specNodePtr.i); \
- signalRoutine(signal, specNodePtr.i); \
- specNodePtr.i = specNodePtr.p->nextNode; \
- } while (specNodePtr.i != RNIL); \
-}
-
-static
-Uint32
-prevLcpNo(Uint32 lcpNo){
- if(lcpNo == 0)
- return MAX_LCP_STORED - 1;
- return lcpNo - 1;
-}
-
-static
-Uint32
-nextLcpNo(Uint32 lcpNo){
- lcpNo++;
- if(lcpNo == MAX_LCP_STORED)
- return 0;
- return lcpNo;
-}
-
-#define gth(x, y) ndbrequire(((int)x)>((int)y))
-
-void Dbdih::nullRoutine(Signal* signal, Uint32 nodeId)
-{
-}//Dbdih::nullRoutine()
-
-void Dbdih::sendCOPY_GCIREQ(Signal* signal, Uint32 nodeId)
-{
- ndbrequire(c_copyGCIMaster.m_copyReason != CopyGCIReq::IDLE);
-
- const BlockReference ref = calcDihBlockRef(nodeId);
- const Uint32 wordPerSignal = CopyGCIReq::DATA_SIZE;
- const Uint32 noOfSignals = ((Sysfile::SYSFILE_SIZE32 + (wordPerSignal - 1)) /
- wordPerSignal);
-
- CopyGCIReq * const copyGCI = (CopyGCIReq *)&signal->theData[0];
- copyGCI->anyData = nodeId;
- copyGCI->copyReason = c_copyGCIMaster.m_copyReason;
- copyGCI->startWord = 0;
-
- for(Uint32 i = 0; i < noOfSignals; i++) {
- jam();
- { // Do copy
- const int startWord = copyGCI->startWord;
- for(Uint32 j = 0; j < wordPerSignal; j++) {
- copyGCI->data[j] = sysfileData[j+startWord];
- }//for
- }
- sendSignal(ref, GSN_COPY_GCIREQ, signal, 25, JBB);
- copyGCI->startWord += wordPerSignal;
- }//for
-}//Dbdih::sendCOPY_GCIREQ()
-
-
-void Dbdih::sendDIH_SWITCH_REPLICA_REQ(Signal* signal, Uint32 nodeId)
-{
- const BlockReference ref = calcDihBlockRef(nodeId);
- sendSignal(ref, GSN_DIH_SWITCH_REPLICA_REQ, signal,
- DihSwitchReplicaReq::SignalLength, JBB);
-}//Dbdih::sendDIH_SWITCH_REPLICA_REQ()
-
-void Dbdih::sendEMPTY_LCP_REQ(Signal* signal, Uint32 nodeId)
-{
- BlockReference ref = calcLqhBlockRef(nodeId);
- sendSignal(ref, GSN_EMPTY_LCP_REQ, signal, EmptyLcpReq::SignalLength, JBB);
-}//Dbdih::sendEMPTY_LCPREQ()
-
-void Dbdih::sendEND_TOREQ(Signal* signal, Uint32 nodeId)
-{
- BlockReference ref = calcDihBlockRef(nodeId);
- sendSignal(ref, GSN_END_TOREQ, signal, EndToReq::SignalLength, JBB);
-}//Dbdih::sendEND_TOREQ()
-
-void Dbdih::sendGCP_COMMIT(Signal* signal, Uint32 nodeId)
-{
- BlockReference ref = calcDihBlockRef(nodeId);
- signal->theData[0] = cownNodeId;
- signal->theData[1] = cnewgcp;
- sendSignal(ref, GSN_GCP_COMMIT, signal, 2, JBA);
-}//Dbdih::sendGCP_COMMIT()
-
-void Dbdih::sendGCP_PREPARE(Signal* signal, Uint32 nodeId)
-{
- BlockReference ref = calcDihBlockRef(nodeId);
- signal->theData[0] = cownNodeId;
- signal->theData[1] = cnewgcp;
- sendSignal(ref, GSN_GCP_PREPARE, signal, 2, JBA);
-}//Dbdih::sendGCP_PREPARE()
-
-void Dbdih::sendGCP_SAVEREQ(Signal* signal, Uint32 nodeId)
-{
- GCPSaveReq * const saveReq = (GCPSaveReq*)&signal->theData[0];
- BlockReference ref = calcLqhBlockRef(nodeId);
- saveReq->dihBlockRef = reference();
- saveReq->dihPtr = nodeId;
- saveReq->gci = coldgcp;
- sendSignal(ref, GSN_GCP_SAVEREQ, signal, GCPSaveReq::SignalLength, JBB);
-}//Dbdih::sendGCP_SAVEREQ()
-
-void Dbdih::sendINCL_NODEREQ(Signal* signal, Uint32 nodeId)
-{
- BlockReference nodeDihRef = calcDihBlockRef(nodeId);
- signal->theData[0] = reference();
- signal->theData[1] = c_nodeStartMaster.startNode;
- signal->theData[2] = c_nodeStartMaster.failNr;
- signal->theData[3] = 0;
- signal->theData[4] = currentgcp;
- sendSignal(nodeDihRef, GSN_INCL_NODEREQ, signal, 5, JBB);
-}//Dbdih::sendINCL_NODEREQ()
-
-void Dbdih::sendMASTER_GCPREQ(Signal* signal, Uint32 nodeId)
-{
- BlockReference ref = calcDihBlockRef(nodeId);
- sendSignal(ref, GSN_MASTER_GCPREQ, signal, MasterGCPReq::SignalLength, JBB);
-}//Dbdih::sendMASTER_GCPREQ()
-
-void Dbdih::sendMASTER_LCPREQ(Signal* signal, Uint32 nodeId)
-{
- BlockReference ref = calcDihBlockRef(nodeId);
- sendSignal(ref, GSN_MASTER_LCPREQ, signal, MasterLCPReq::SignalLength, JBB);
-}//Dbdih::sendMASTER_LCPREQ()
-
-void Dbdih::sendSTART_INFOREQ(Signal* signal, Uint32 nodeId)
-{
- const BlockReference ref = calcDihBlockRef(nodeId);
- sendSignal(ref, GSN_START_INFOREQ, signal, StartInfoReq::SignalLength, JBB);
-}//sendSTART_INFOREQ()
-
-void Dbdih::sendSTART_RECREQ(Signal* signal, Uint32 nodeId)
-{
- StartRecReq * const req = (StartRecReq*)&signal->theData[0];
- BlockReference ref = calcLqhBlockRef(nodeId);
- req->receivingNodeId = nodeId;
- req->senderRef = reference();
- req->keepGci = SYSFILE->keepGCI;
- req->lastCompletedGci = SYSFILE->lastCompletedGCI[nodeId];
- req->newestGci = SYSFILE->newestRestorableGCI;
- sendSignal(ref, GSN_START_RECREQ, signal, StartRecReq::SignalLength, JBB);
-
- signal->theData[0] = NDB_LE_StartREDOLog;
- signal->theData[1] = nodeId;
- signal->theData[2] = SYSFILE->keepGCI;
- signal->theData[3] = SYSFILE->lastCompletedGCI[nodeId];
- signal->theData[4] = SYSFILE->newestRestorableGCI;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 5, JBB);
-}//Dbdih::sendSTART_RECREQ()
-
-void Dbdih::sendSTART_TOREQ(Signal* signal, Uint32 nodeId)
-{
- BlockReference ref = calcDihBlockRef(nodeId);
- sendSignal(ref, GSN_START_TOREQ, signal, StartToReq::SignalLength, JBB);
-}//Dbdih::sendSTART_TOREQ()
-
-void Dbdih::sendSTOP_ME_REQ(Signal* signal, Uint32 nodeId)
-{
- if (nodeId != getOwnNodeId()) {
- jam();
- const BlockReference ref = calcDihBlockRef(nodeId);
- sendSignal(ref, GSN_STOP_ME_REQ, signal, StopMeReq::SignalLength, JBB);
- }//if
-}//Dbdih::sendSTOP_ME_REQ()
-
-void Dbdih::sendTC_CLOPSIZEREQ(Signal* signal, Uint32 nodeId)
-{
- BlockReference ref = calcTcBlockRef(nodeId);
- signal->theData[0] = nodeId;
- signal->theData[1] = reference();
- sendSignal(ref, GSN_TC_CLOPSIZEREQ, signal, 2, JBB);
-}//Dbdih::sendTC_CLOPSIZEREQ()
-
-void Dbdih::sendTCGETOPSIZEREQ(Signal* signal, Uint32 nodeId)
-{
- BlockReference ref = calcTcBlockRef(nodeId);
- signal->theData[0] = nodeId;
- signal->theData[1] = reference();
- sendSignal(ref, GSN_TCGETOPSIZEREQ, signal, 2, JBB);
-}//Dbdih::sendTCGETOPSIZEREQ()
-
-void Dbdih::sendUPDATE_TOREQ(Signal* signal, Uint32 nodeId)
-{
- const BlockReference ref = calcDihBlockRef(nodeId);
- sendSignal(ref, GSN_UPDATE_TOREQ, signal, UpdateToReq::SignalLength, JBB);
-}//sendUPDATE_TOREQ()
-
-void Dbdih::execCONTINUEB(Signal* signal)
-{
- jamEntry();
- switch ((DihContinueB::Type)signal->theData[0]) {
- case DihContinueB::ZPACK_TABLE_INTO_PAGES:
- {
- jam();
- Uint32 tableId = signal->theData[1];
- packTableIntoPagesLab(signal, tableId);
- return;
- break;
- }
- case DihContinueB::ZPACK_FRAG_INTO_PAGES:
- {
- RWFragment wf;
- jam();
- wf.rwfTabPtr.i = signal->theData[1];
- ptrCheckGuard(wf.rwfTabPtr, ctabFileSize, tabRecord);
- wf.fragId = signal->theData[2];
- wf.pageIndex = signal->theData[3];
- wf.wordIndex = signal->theData[4];
- packFragIntoPagesLab(signal, &wf);
- return;
- break;
- }
- case DihContinueB::ZREAD_PAGES_INTO_TABLE:
- {
- jam();
- Uint32 tableId = signal->theData[1];
- readPagesIntoTableLab(signal, tableId);
- return;
- break;
- }
- case DihContinueB::ZREAD_PAGES_INTO_FRAG:
- {
- RWFragment rf;
- jam();
- rf.rwfTabPtr.i = signal->theData[1];
- ptrCheckGuard(rf.rwfTabPtr, ctabFileSize, tabRecord);
- rf.fragId = signal->theData[2];
- rf.pageIndex = signal->theData[3];
- rf.wordIndex = signal->theData[4];
- readPagesIntoFragLab(signal, &rf);
- return;
- break;
- }
- case DihContinueB::ZCOPY_TABLE:
- {
- jam();
- Uint32 tableId = signal->theData[1];
- copyTableLab(signal, tableId);
- return;
- }
- case DihContinueB::ZCOPY_TABLE_NODE:
- {
- NodeRecordPtr nodePtr;
- CopyTableNode ctn;
- jam();
- ctn.ctnTabPtr.i = signal->theData[1];
- ptrCheckGuard(ctn.ctnTabPtr, ctabFileSize, tabRecord);
- nodePtr.i = signal->theData[2];
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- ctn.pageIndex = signal->theData[3];
- ctn.wordIndex = signal->theData[4];
- ctn.noOfWords = signal->theData[5];
- copyTableNode(signal, &ctn, nodePtr);
- return;
- }
- case DihContinueB::ZSTART_FRAGMENT:
- {
- jam();
- Uint32 tableId = signal->theData[1];
- Uint32 fragId = signal->theData[2];
- startFragment(signal, tableId, fragId);
- return;
- }
- case DihContinueB::ZCOMPLETE_RESTART:
- jam();
- completeRestartLab(signal);
- return;
- case DihContinueB::ZREAD_TABLE_FROM_PAGES:
- {
- TabRecordPtr tabPtr;
- jam();
- tabPtr.i = signal->theData[1];
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- readTableFromPagesLab(signal, tabPtr);
- return;
- }
- case DihContinueB::ZSR_PHASE2_READ_TABLE:
- {
- TabRecordPtr tabPtr;
- jam();
- tabPtr.i = signal->theData[1];
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- srPhase2ReadTableLab(signal, tabPtr);
- return;
- }
- case DihContinueB::ZCHECK_TC_COUNTER:
- jam();
-#ifndef NO_LCP
- checkTcCounterLab(signal);
-#endif
- return;
- case DihContinueB::ZCALCULATE_KEEP_GCI:
- {
- jam();
- Uint32 tableId = signal->theData[1];
- Uint32 fragId = signal->theData[2];
- calculateKeepGciLab(signal, tableId, fragId);
- return;
- }
- case DihContinueB::ZSTORE_NEW_LCP_ID:
- jam();
- storeNewLcpIdLab(signal);
- return;
- case DihContinueB::ZTABLE_UPDATE:
- {
- TabRecordPtr tabPtr;
- jam();
- tabPtr.i = signal->theData[1];
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- tableUpdateLab(signal, tabPtr);
- return;
- }
- case DihContinueB::ZCHECK_LCP_COMPLETED:
- {
- jam();
- checkLcpCompletedLab(signal);
- return;
- }
- case DihContinueB::ZINIT_LCP:
- {
- jam();
- Uint32 senderRef = signal->theData[1];
- Uint32 tableId = signal->theData[2];
- initLcpLab(signal, senderRef, tableId);
- return;
- }
- case DihContinueB::ZADD_TABLE_MASTER_PAGES:
- {
- TabRecordPtr tabPtr;
- jam();
- tabPtr.i = signal->theData[1];
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- tabPtr.p->tabUpdateState = TabRecord::US_ADD_TABLE_MASTER;
- tableUpdateLab(signal, tabPtr);
- return;
- break;
- }
- case DihContinueB::ZDIH_ADD_TABLE_MASTER:
- {
- jam();
- addTable_closeConf(signal, signal->theData[1]);
- return;
- }
- case DihContinueB::ZADD_TABLE_SLAVE_PAGES:
- {
- TabRecordPtr tabPtr;
- jam();
- tabPtr.i = signal->theData[1];
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- tabPtr.p->tabUpdateState = TabRecord::US_ADD_TABLE_SLAVE;
- tableUpdateLab(signal, tabPtr);
- return;
- }
- case DihContinueB::ZDIH_ADD_TABLE_SLAVE:
- {
- ndbrequire(false);
- return;
- }
- case DihContinueB::ZSTART_GCP:
- jam();
-#ifndef NO_GCP
- startGcpLab(signal, signal->theData[1]);
-#endif
- return;
- break;
- case DihContinueB::ZCOPY_GCI:{
- jam();
- CopyGCIReq::CopyReason reason = (CopyGCIReq::CopyReason)signal->theData[1];
- ndbrequire(c_copyGCIMaster.m_copyReason == reason);
- sendLoopMacro(COPY_GCIREQ, sendCOPY_GCIREQ);
- return;
- }
- break;
- case DihContinueB::ZEMPTY_VERIFY_QUEUE:
- jam();
- emptyverificbuffer(signal, true);
- return;
- break;
- case DihContinueB::ZCHECK_GCP_STOP:
- jam();
-#ifndef NO_GCP
- checkGcpStopLab(signal);
-#endif
- return;
- break;
- case DihContinueB::ZREMOVE_NODE_FROM_TABLE:
- {
- jam();
- Uint32 nodeId = signal->theData[1];
- Uint32 tableId = signal->theData[2];
- removeNodeFromTables(signal, nodeId, tableId);
- return;
- }
- case DihContinueB::ZCOPY_NODE:
- {
- jam();
- Uint32 tableId = signal->theData[1];
- copyNodeLab(signal, tableId);
- return;
- }
- case DihContinueB::ZSTART_TAKE_OVER:
- {
- jam();
- Uint32 takeOverPtrI = signal->theData[1];
- Uint32 startNode = signal->theData[2];
- Uint32 toNode = signal->theData[3];
- startTakeOver(signal, takeOverPtrI, startNode, toNode);
- return;
- break;
- }
- case DihContinueB::ZCHECK_START_TAKE_OVER:
- jam();
- checkStartTakeOver(signal);
- break;
- case DihContinueB::ZTO_START_COPY_FRAG:
- {
- jam();
- Uint32 takeOverPtrI = signal->theData[1];
- startNextCopyFragment(signal, takeOverPtrI);
- return;
- }
- case DihContinueB::ZINVALIDATE_NODE_LCP:
- {
- jam();
- const Uint32 nodeId = signal->theData[1];
- const Uint32 tableId = signal->theData[2];
- invalidateNodeLCP(signal, nodeId, tableId);
- return;
- }
- case DihContinueB::ZINITIALISE_RECORDS:
- jam();
- initialiseRecordsLab(signal,
- signal->theData[1],
- signal->theData[2],
- signal->theData[3]);
- return;
- break;
- case DihContinueB::ZSTART_PERMREQ_AGAIN:
- jam();
- nodeRestartPh2Lab(signal);
- return;
- break;
- case DihContinueB::SwitchReplica:
- {
- jam();
- const Uint32 nodeId = signal->theData[1];
- const Uint32 tableId = signal->theData[2];
- const Uint32 fragNo = signal->theData[3];
- switchReplica(signal, nodeId, tableId, fragNo);
- return;
- }
- case DihContinueB::ZSEND_START_TO:
- {
- jam();
- Uint32 takeOverPtrI = signal->theData[1];
- sendStartTo(signal, takeOverPtrI);
- return;
- }
- case DihContinueB::ZSEND_ADD_FRAG:
- {
- jam();
- Uint32 takeOverPtrI = signal->theData[1];
- toCopyFragLab(signal, takeOverPtrI);
- return;
- }
- case DihContinueB::ZSEND_UPDATE_TO:
- {
- jam();
- Uint32 takeOverPtrI = signal->theData[1];
- Uint32 updateState = signal->theData[4];
- sendUpdateTo(signal, takeOverPtrI, updateState);
- return;
- }
- case DihContinueB::ZSEND_END_TO:
- {
- jam();
- Uint32 takeOverPtrI = signal->theData[1];
- sendEndTo(signal, takeOverPtrI);
- return;
- }
- case DihContinueB::ZSEND_CREATE_FRAG:
- {
- jam();
- Uint32 takeOverPtrI = signal->theData[1];
- Uint32 storedType = signal->theData[2];
- Uint32 startGci = signal->theData[3];
- sendCreateFragReq(signal, startGci, storedType, takeOverPtrI);
- return;
- }
- case DihContinueB::WAIT_DROP_TAB_WRITING_TO_FILE:{
- jam();
- TabRecordPtr tabPtr;
- tabPtr.i = signal->theData[1];
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- waitDropTabWritingToFile(signal, tabPtr);
- return;
- }
- case DihContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH:{
- jam();
- Uint32 nodeId = signal->theData[1];
- Uint32 tableId = signal->theData[2];
- checkWaitDropTabFailedLqh(signal, nodeId, tableId);
- return;
- }
- }//switch
-
- ndbrequire(false);
- return;
-}//Dbdih::execCONTINUEB()
-
-void Dbdih::execCOPY_GCIREQ(Signal* signal)
-{
- CopyGCIReq * const copyGCI = (CopyGCIReq *)&signal->theData[0];
- jamEntry();
- CopyGCIReq::CopyReason reason = (CopyGCIReq::CopyReason)copyGCI->copyReason;
- const Uint32 tstart = copyGCI->startWord;
-
- ndbrequire(cmasterdihref == signal->senderBlockRef()) ;
- ndbrequire(c_copyGCISlave.m_copyReason == CopyGCIReq::IDLE);
- ndbrequire(c_copyGCISlave.m_expectedNextWord == tstart);
- ndbrequire(reason != CopyGCIReq::IDLE);
-
- arrGuard(tstart + CopyGCIReq::DATA_SIZE, sizeof(sysfileData)/4);
- for(Uint32 i = 0; i<CopyGCIReq::DATA_SIZE; i++)
- cdata[tstart+i] = copyGCI->data[i];
-
- if ((tstart + CopyGCIReq::DATA_SIZE) >= Sysfile::SYSFILE_SIZE32) {
- jam();
- c_copyGCISlave.m_expectedNextWord = 0;
- } else {
- jam();
- c_copyGCISlave.m_expectedNextWord += CopyGCIReq::DATA_SIZE;
- return;
- }//if
-
- memcpy(sysfileData, cdata, sizeof(sysfileData));
-
- c_copyGCISlave.m_copyReason = reason;
- c_copyGCISlave.m_senderRef = signal->senderBlockRef();
- c_copyGCISlave.m_senderData = copyGCI->anyData;
-
- CRASH_INSERTION2(7020, reason==CopyGCIReq::LOCAL_CHECKPOINT);
- CRASH_INSERTION2(7008, reason==CopyGCIReq::GLOBAL_CHECKPOINT);
-
- /* -------------------------------------------------------------------------*/
- /* WE SET THE REQUESTER OF THE COPY GCI TO THE CURRENT MASTER. IF THE */
- /* CURRENT MASTER WE DO NOT WANT THE NEW MASTER TO RECEIVE CONFIRM OF */
- /* SOMETHING HE HAS NOT SENT. THE TAKE OVER MUST BE CAREFUL. */
- /* -------------------------------------------------------------------------*/
- bool ok = false;
- switch(reason){
- case CopyGCIReq::IDLE:
- ok = true;
- jam();
- ndbrequire(false);
- break;
- case CopyGCIReq::LOCAL_CHECKPOINT: {
- ok = true;
- jam();
- c_lcpState.setLcpStatus(LCP_COPY_GCI, __LINE__);
- c_lcpState.m_masterLcpDihRef = cmasterdihref;
- setNodeInfo(signal);
- break;
- }
- case CopyGCIReq::RESTART: {
- ok = true;
- jam();
- coldgcp = SYSFILE->newestRestorableGCI;
- crestartGci = SYSFILE->newestRestorableGCI;
- Sysfile::setRestartOngoing(SYSFILE->systemRestartBits);
- currentgcp = coldgcp + 1;
- cnewgcp = coldgcp + 1;
- setNodeInfo(signal);
- if ((Sysfile::getLCPOngoing(SYSFILE->systemRestartBits))) {
- jam();
- /* -------------------------------------------------------------------- */
- // IF THERE WAS A LOCAL CHECKPOINT ONGOING AT THE CRASH MOMENT WE WILL
- // INVALIDATE THAT LOCAL CHECKPOINT.
- /* -------------------------------------------------------------------- */
- invalidateLcpInfoAfterSr();
- }//if
- break;
- }
- case CopyGCIReq::GLOBAL_CHECKPOINT: {
- ok = true;
- jam();
- cgcpParticipantState = GCP_PARTICIPANT_COPY_GCI_RECEIVED;
- setNodeInfo(signal);
- break;
- }//if
- case CopyGCIReq::INITIAL_START_COMPLETED:
- ok = true;
- jam();
- break;
- }
- ndbrequire(ok);
-
- /* ----------------------------------------------------------------------- */
- /* WE START BY TRYING TO OPEN THE FIRST RESTORABLE GCI FILE. */
- /* ----------------------------------------------------------------------- */
- FileRecordPtr filePtr;
- filePtr.i = crestartInfoFile[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- if (filePtr.p->fileStatus == FileRecord::OPEN) {
- jam();
- openingCopyGciSkipInitLab(signal, filePtr);
- return;
- }//if
- openFileRw(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::OPENING_COPY_GCI;
- return;
-}//Dbdih::execCOPY_GCIREQ()
-
-void Dbdih::execDICTSTARTCONF(Signal* signal)
-{
- jamEntry();
- Uint32 nodeId = refToNode(signal->getSendersBlockRef());
- if (nodeId != getOwnNodeId()) {
- jam();
- nodeDictStartConfLab(signal);
- } else {
- jam();
- dictStartConfLab(signal);
- }//if
-}//Dbdih::execDICTSTARTCONF()
-
-void Dbdih::execFSCLOSECONF(Signal* signal)
-{
- FileRecordPtr filePtr;
- jamEntry();
- filePtr.i = signal->theData[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- filePtr.p->fileStatus = FileRecord::CLOSED;
- FileRecord::ReqStatus status = filePtr.p->reqStatus;
- filePtr.p->reqStatus = FileRecord::IDLE;
- switch (status) {
- case FileRecord::CLOSING_GCP:
- jam();
- closingGcpLab(signal, filePtr);
- break;
- case FileRecord::CLOSING_GCP_CRASH:
- jam();
- closingGcpCrashLab(signal, filePtr);
- break;
- case FileRecord::CLOSING_TABLE_CRASH:
- jam();
- closingTableCrashLab(signal, filePtr);
- break;
- case FileRecord::CLOSING_TABLE_SR:
- jam();
- closingTableSrLab(signal, filePtr);
- break;
- case FileRecord::TABLE_CLOSE:
- jam();
- tableCloseLab(signal, filePtr);
- break;
- case FileRecord::TABLE_CLOSE_DELETE:
- jam();
- tableDeleteLab(signal, filePtr);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbdih::execFSCLOSECONF()
-
-void Dbdih::execFSCLOSEREF(Signal* signal)
-{
- FileRecordPtr filePtr;
- jamEntry();
- filePtr.i = signal->theData[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- FileRecord::ReqStatus status = filePtr.p->reqStatus;
- filePtr.p->reqStatus = FileRecord::IDLE;
- switch (status) {
- case FileRecord::CLOSING_GCP:
- ndbrequire(false);
- break;
- case FileRecord::CLOSING_GCP_CRASH:
- jam();
- closingGcpCrashLab(signal, filePtr);
- break;
- case FileRecord::CLOSING_TABLE_CRASH:
- jam();
- closingTableCrashLab(signal, filePtr);
- break;
- case FileRecord::CLOSING_TABLE_SR:
- ndbrequire(false);
- break;
- case FileRecord::TABLE_CLOSE:
- ndbrequire(false);
- break;
- case FileRecord::TABLE_CLOSE_DELETE:
- ndbrequire(false);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbdih::execFSCLOSEREF()
-
-void Dbdih::execFSOPENCONF(Signal* signal)
-{
- FileRecordPtr filePtr;
- jamEntry();
- filePtr.i = signal->theData[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- filePtr.p->fileRef = signal->theData[1];
- filePtr.p->fileStatus = FileRecord::OPEN;
- FileRecord::ReqStatus status = filePtr.p->reqStatus;
- filePtr.p->reqStatus = FileRecord::IDLE;
- switch (status) {
- case FileRecord::CREATING_GCP:
- jam();
- creatingGcpLab(signal, filePtr);
- break;
- case FileRecord::OPENING_COPY_GCI:
- jam();
- openingCopyGciSkipInitLab(signal, filePtr);
- break;
- case FileRecord::CREATING_COPY_GCI:
- jam();
- openingCopyGciSkipInitLab(signal, filePtr);
- break;
- case FileRecord::OPENING_GCP:
- jam();
- openingGcpLab(signal, filePtr);
- break;
- case FileRecord::OPENING_TABLE:
- jam();
- openingTableLab(signal, filePtr);
- break;
- case FileRecord::TABLE_CREATE:
- jam();
- tableCreateLab(signal, filePtr);
- break;
- case FileRecord::TABLE_OPEN_FOR_DELETE:
- jam();
- tableOpenLab(signal, filePtr);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbdih::execFSOPENCONF()
-
-void Dbdih::execFSOPENREF(Signal* signal)
-{
- FileRecordPtr filePtr;
- jamEntry();
- filePtr.i = signal->theData[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- FileRecord::ReqStatus status = filePtr.p->reqStatus;
- filePtr.p->reqStatus = FileRecord::IDLE;
- switch (status) {
- case FileRecord::CREATING_GCP:
- /* --------------------------------------------------------------------- */
- /* WE DID NOT MANAGE TO CREATE A GLOBAL CHECKPOINT FILE. SERIOUS ERROR */
- /* WHICH CAUSES A SYSTEM RESTART. */
- /* --------------------------------------------------------------------- */
- ndbrequire(false);
- break;
- case FileRecord::OPENING_COPY_GCI:
- jam();
- openingCopyGciErrorLab(signal, filePtr);
- break;
- case FileRecord::CREATING_COPY_GCI:
- ndbrequire(false);
- break;
- case FileRecord::OPENING_GCP:
- jam();
- openingGcpErrorLab(signal, filePtr);
- break;
- case FileRecord::OPENING_TABLE:
- jam();
- openingTableErrorLab(signal, filePtr);
- break;
- case FileRecord::TABLE_CREATE:
- ndbrequire(false);
- break;
- case FileRecord::TABLE_OPEN_FOR_DELETE:
- jam();
- tableDeleteLab(signal, filePtr);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbdih::execFSOPENREF()
-
-void Dbdih::execFSREADCONF(Signal* signal)
-{
- FileRecordPtr filePtr;
- jamEntry();
- filePtr.i = signal->theData[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- FileRecord::ReqStatus status = filePtr.p->reqStatus;
- filePtr.p->reqStatus = FileRecord::IDLE;
- switch (status) {
- case FileRecord::READING_GCP:
- jam();
- readingGcpLab(signal, filePtr);
- break;
- case FileRecord::READING_TABLE:
- jam();
- readingTableLab(signal, filePtr);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbdih::execFSREADCONF()
-
-void Dbdih::execFSREADREF(Signal* signal)
-{
- FileRecordPtr filePtr;
- jamEntry();
- filePtr.i = signal->theData[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- FileRecord::ReqStatus status = filePtr.p->reqStatus;
- filePtr.p->reqStatus = FileRecord::IDLE;
- switch (status) {
- case FileRecord::READING_GCP:
- jam();
- readingGcpErrorLab(signal, filePtr);
- break;
- case FileRecord::READING_TABLE:
- jam();
- readingTableErrorLab(signal, filePtr);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbdih::execFSREADREF()
-
-void Dbdih::execFSWRITECONF(Signal* signal)
-{
- FileRecordPtr filePtr;
- jamEntry();
- filePtr.i = signal->theData[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- FileRecord::ReqStatus status = filePtr.p->reqStatus;
- filePtr.p->reqStatus = FileRecord::IDLE;
- switch (status) {
- case FileRecord::WRITING_COPY_GCI:
- jam();
- writingCopyGciLab(signal, filePtr);
- break;
- case FileRecord::WRITE_INIT_GCP:
- jam();
- writeInitGcpLab(signal, filePtr);
- break;
- case FileRecord::TABLE_WRITE:
- jam();
- tableWriteLab(signal, filePtr);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbdih::execFSWRITECONF()
-
-void Dbdih::execFSWRITEREF(Signal* signal)
-{
- FileRecordPtr filePtr;
- jamEntry();
- filePtr.i = signal->theData[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- FileRecord::ReqStatus status = filePtr.p->reqStatus;
- filePtr.p->reqStatus = FileRecord::IDLE;
- switch (status) {
- case FileRecord::WRITING_COPY_GCI:
- /* --------------------------------------------------------------------- */
- /* EVEN CREATING THE FILE DID NOT WORK. WE WILL THEN CRASH. */
- /* ERROR IN WRITING FILE. WE WILL NOT CONTINUE FROM HERE. */
- /* --------------------------------------------------------------------- */
- ndbrequire(false);
- break;
- case FileRecord::WRITE_INIT_GCP:
- /* --------------------------------------------------------------------- */
- /* AN ERROR OCCURRED IN WRITING A GCI FILE WHICH IS A SERIOUS ERROR */
- /* THAT CAUSE A SYSTEM RESTART. */
- /* --------------------------------------------------------------------- */
- ndbrequire(false);
- break;
- case FileRecord::TABLE_WRITE:
- ndbrequire(false);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbdih::execFSWRITEREF()
-
-void Dbdih::execGETGCIREQ(Signal* signal)
-{
-
- jamEntry();
- Uint32 userPtr = signal->theData[0];
- BlockReference userRef = signal->theData[1];
-
- signal->theData[0] = userPtr;
- signal->theData[1] = SYSFILE->newestRestorableGCI;
- sendSignal(userRef, GSN_GETGCICONF, signal, 2, JBB);
-}//Dbdih::execGETGCIREQ()
-
-void Dbdih::execREAD_CONFIG_REQ(Signal* signal)
-{
- const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
- Uint32 ref = req->senderRef;
- Uint32 senderData = req->senderData;
- ndbrequire(req->noOfParameters == 0);
-
- jamEntry();
-
- const ndb_mgm_configuration_iterator * p =
- theConfiguration.getOwnConfigIterator();
- ndbrequire(p != 0);
-
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_API_CONNECT,
- &capiConnectFileSize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_CONNECT,&cconnectFileSize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_FRAG_CONNECT,
- &cfragstoreFileSize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_REPLICAS,
- &creplicaFileSize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_TABLE, &ctabFileSize))
- cfileFileSize = (2 * ctabFileSize) + 2;
- initRecords();
- initialiseRecordsLab(signal, 0, ref, senderData);
- return;
-}//Dbdih::execSIZEALT_REP()
-
-void Dbdih::execSTART_COPYREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dbdih::execSTART_COPYREF()
-
-void Dbdih::execSTART_FRAGCONF(Signal* signal)
-{
- (void)signal; // Don't want compiler warning
- /* ********************************************************************* */
- /* If anyone wants to add functionality in this method, be aware that */
- /* for temporary tables no START_FRAGREQ is sent and therefore no */
- /* START_FRAGCONF signal will be received for those tables!! */
- /* ********************************************************************* */
- jamEntry();
- return;
-}//Dbdih::execSTART_FRAGCONF()
-
-void Dbdih::execSTART_MEREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dbdih::execSTART_MEREF()
-
-void Dbdih::execTAB_COMMITREQ(Signal* signal)
-{
- TabRecordPtr tabPtr;
- jamEntry();
- Uint32 tdictPtr = signal->theData[0];
- BlockReference tdictBlockref = signal->theData[1];
- tabPtr.i = signal->theData[2];
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_CREATING);
- tabPtr.p->tabStatus = TabRecord::TS_ACTIVE;
- signal->theData[0] = tdictPtr;
- signal->theData[1] = cownNodeId;
- signal->theData[2] = tabPtr.i;
- sendSignal(tdictBlockref, GSN_TAB_COMMITCONF, signal, 3, JBB);
- return;
-}//Dbdih::execTAB_COMMITREQ()
-
-/*
- 3.2 S T A N D A R D S U B P R O G R A M S I N P L E X
- *************************************************************
- */
-/*
- 3.2.1 S T A R T / R E S T A R T
- **********************************
- */
-/*****************************************************************************/
-/* ********** START / RESTART MODULE *************/
-/*****************************************************************************/
-/*
- 3.2.1.1 LOADING O W N B L O C K R E F E R E N C E (ABSOLUTE PHASE 1)
- *****************************************************************************
- */
-void Dbdih::execDIH_RESTARTREQ(Signal* signal)
-{
- jamEntry();
- cntrlblockref = signal->theData[0];
- if(theConfiguration.getInitialStart()){
- sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB);
- } else {
- readGciFileLab(signal);
- }
- return;
-}//Dbdih::execDIH_RESTARTREQ()
-
-void Dbdih::execSTTOR(Signal* signal)
-{
- jamEntry();
-
- signal->theData[0] = 0;
- signal->theData[1] = 0;
- signal->theData[2] = 0;
- signal->theData[3] = 1; // Next start phase
- signal->theData[4] = 255; // Next start phase
- sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
- return;
-}//Dbdih::execSTTOR()
-
-void Dbdih::initialStartCompletedLab(Signal* signal)
-{
- /*-------------------------------------------------------------------------*/
- /* NOW THAT (RE)START IS COMPLETED WE CAN START THE LCP.*/
- /*-------------------------------------------------------------------------*/
- return;
-}//Dbdih::initialStartCompletedLab()
-
-/*
- * ***************************************************************************
- * S E N D I N G R E P L Y T O S T A R T / R E S T A R T R E Q U E S T S
- * ****************************************************************************
- */
-void Dbdih::ndbsttorry10Lab(Signal* signal, Uint32 _line)
-{
- /*-------------------------------------------------------------------------*/
- // AN NDB START PHASE HAS BEEN COMPLETED. WHEN START PHASE 6 IS COMPLETED WE
- // RECORD THAT THE SYSTEM IS RUNNING.
- /*-------------------------------------------------------------------------*/
- signal->theData[0] = reference();
- sendSignal(cntrlblockref, GSN_NDB_STTORRY, signal, 1, JBB);
- return;
-}//Dbdih::ndbsttorry10Lab()
-
-/*
-****************************************
-I N T E R N A L P H A S E S
-****************************************
-*/
-/*---------------------------------------------------------------------------*/
-/*NDB_STTOR START SIGNAL AT START/RESTART */
-/*---------------------------------------------------------------------------*/
-void Dbdih::execNDB_STTOR(Signal* signal)
-{
- jamEntry();
- BlockReference cntrRef = signal->theData[0]; /* SENDERS BLOCK REFERENCE */
- Uint32 ownNodeId = signal->theData[1]; /* OWN PROCESSOR ID*/
- Uint32 phase = signal->theData[2]; /* INTERNAL START PHASE*/
- Uint32 typestart = signal->theData[3];
-
- cstarttype = typestart;
- cstartPhase = phase;
-
- switch (phase){
- case ZNDB_SPH1:
- jam();
- /*----------------------------------------------------------------------*/
- /* Set the delay between local checkpoints in ndb startphase 1. */
- /*----------------------------------------------------------------------*/
- cownNodeId = ownNodeId;
- /*-----------------------------------------------------------------------*/
- // Compute all static block references in this node as part of
- // ndb start phase 1.
- /*-----------------------------------------------------------------------*/
- cntrlblockref = cntrRef;
- clocaltcblockref = calcTcBlockRef(ownNodeId);
- clocallqhblockref = calcLqhBlockRef(ownNodeId);
- cdictblockref = calcDictBlockRef(ownNodeId);
- ndbsttorry10Lab(signal, __LINE__);
- break;
-
- case ZNDB_SPH2:
- jam();
- /*-----------------------------------------------------------------------*/
- // Set the number of replicas, maximum is 4 replicas.
- // Read the ndb nodes from the configuration.
- /*-----------------------------------------------------------------------*/
-
- /*-----------------------------------------------------------------------*/
- // For node restarts we will also add a request for permission
- // to continue the system restart.
- // The permission is given by the master node in the alive set.
- /*-----------------------------------------------------------------------*/
- createMutexes(signal, 0);
- break;
-
- case ZNDB_SPH3:
- jam();
- /*-----------------------------------------------------------------------*/
- // Non-master nodes performing an initial start will execute
- // the start request here since the
- // initial start do not synchronise so much from the master.
- // In the master nodes the start
- // request will be sent directly to dih (in ndb_startreq) when all
- // nodes have completed phase 3 of the start.
- /*-----------------------------------------------------------------------*/
- cmasterState = MASTER_IDLE;
- if(cstarttype == NodeState::ST_INITIAL_START ||
- cstarttype == NodeState::ST_SYSTEM_RESTART){
- jam();
- cmasterState = isMaster() ? MASTER_ACTIVE : MASTER_IDLE;
- }
- if (!isMaster() && cstarttype == NodeState::ST_INITIAL_START) {
- jam();
- ndbStartReqLab(signal, cntrRef);
- return;
- }//if
- ndbsttorry10Lab(signal, __LINE__);
- break;
-
- case ZNDB_SPH4:
- jam();
- c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
- cmasterTakeOverNode = ZNIL;
- switch(typestart){
- case NodeState::ST_INITIAL_START:
- jam();
- ndbsttorry10Lab(signal, __LINE__);
- return;
- case NodeState::ST_SYSTEM_RESTART:
- jam();
- if (isMaster()) {
- jam();
- systemRestartTakeOverLab(signal);
- if (anyActiveTakeOver() && false) {
- jam();
- ndbout_c("1 - anyActiveTakeOver == true");
- return;
- }
- }
- ndbsttorry10Lab(signal, __LINE__);
- return;
- case NodeState::ST_INITIAL_NODE_RESTART:
- case NodeState::ST_NODE_RESTART:
- jam();
- /***********************************************************************
- * When starting nodes while system is operational we must be controlled
- * by the master since only one node restart is allowed at a time.
- * When this signal is confirmed the master has also copied the
- * dictionary and the distribution information.
- */
- StartMeReq * req = (StartMeReq*)&signal->theData[0];
- req->startingRef = reference();
- req->startingVersion = 0; // Obsolete
- sendSignal(cmasterdihref, GSN_START_MEREQ, signal,
- StartMeReq::SignalLength, JBB);
- return;
- }
- ndbrequire(false);
- break;
- case ZNDB_SPH5:
- jam();
- switch(typestart){
- case NodeState::ST_INITIAL_START:
- case NodeState::ST_SYSTEM_RESTART:
- jam();
- jam();
- /*---------------------------------------------------------------------*/
- // WE EXECUTE A LOCAL CHECKPOINT AS A PART OF A SYSTEM RESTART.
- // THE IDEA IS THAT WE NEED TO
- // ENSURE THAT WE CAN RECOVER FROM PROBLEMS CAUSED BY MANY NODE
- // CRASHES THAT CAUSES THE LOG
- // TO GROW AND THE NUMBER OF LOG ROUNDS TO EXECUTE TO GROW.
- // THIS CAN OTHERWISE GET US INTO
- // A SITUATION WHICH IS UNREPAIRABLE. THUS WE EXECUTE A CHECKPOINT
- // BEFORE ALLOWING ANY TRANSACTIONS TO START.
- /*---------------------------------------------------------------------*/
- if (!isMaster()) {
- jam();
- ndbsttorry10Lab(signal, __LINE__);
- return;
- }//if
-
- c_lcpState.immediateLcpStart = true;
- cwaitLcpSr = true;
- checkLcpStart(signal, __LINE__);
- return;
- case NodeState::ST_NODE_RESTART:
- case NodeState::ST_INITIAL_NODE_RESTART:
- jam();
- signal->theData[0] = cownNodeId;
- signal->theData[1] = reference();
- sendSignal(cmasterdihref, GSN_START_COPYREQ, signal, 2, JBB);
- return;
- }
- ndbrequire(false);
- case ZNDB_SPH6:
- jam();
- switch(typestart){
- case NodeState::ST_INITIAL_START:
- case NodeState::ST_SYSTEM_RESTART:
- jam();
- if(isMaster()){
- jam();
- startGcp(signal);
- }
- ndbsttorry10Lab(signal, __LINE__);
- return;
- case NodeState::ST_NODE_RESTART:
- case NodeState::ST_INITIAL_NODE_RESTART:
- ndbsttorry10Lab(signal, __LINE__);
- return;
- }
- ndbrequire(false);
- break;
- default:
- jam();
- ndbsttorry10Lab(signal, __LINE__);
- break;
- }//switch
-}//Dbdih::execNDB_STTOR()
-
-void
-Dbdih::createMutexes(Signal * signal, Uint32 count){
- Callback c = { safe_cast(&Dbdih::createMutex_done), count };
-
- switch(count){
- case 0:{
- Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
- mutex.create(c);
- return;
- }
- case 1:{
- Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle);
- mutex.create(c);
- return;
- }
- }
-
- signal->theData[0] = reference();
- sendSignal(cntrlblockref, GSN_READ_NODESREQ, signal, 1, JBB);
-}
-
-void
-Dbdih::createMutex_done(Signal* signal, Uint32 senderData, Uint32 retVal){
- jamEntry();
- ndbrequire(retVal == 0);
-
- switch(senderData){
- case 0:{
- Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
- mutex.release();
- }
- case 1:{
- Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle);
- mutex.release();
- }
- }
-
- createMutexes(signal, senderData + 1);
-}
-
-/*****************************************************************************/
-/* ------------------------------------------------------------------------- */
-/* WE HAVE BEEN REQUESTED BY NDBCNTR TO PERFORM A RESTART OF THE */
-/* DATABASE TABLES. */
-/* THIS SIGNAL IS SENT AFTER COMPLETING PHASE 3 IN ALL BLOCKS IN A */
-/* SYSTEM RESTART. WE WILL ALSO JUMP TO THIS LABEL FROM PHASE 3 IN AN */
-/* INITIAL START. */
-/* ------------------------------------------------------------------------- */
-/*****************************************************************************/
-void Dbdih::execNDB_STARTREQ(Signal* signal)
-{
- jamEntry();
- BlockReference ref = signal->theData[0];
- cstarttype = signal->theData[1];
- ndbStartReqLab(signal, ref);
-}//Dbdih::execNDB_STARTREQ()
-
-void Dbdih::ndbStartReqLab(Signal* signal, BlockReference ref)
-{
- cndbStartReqBlockref = ref;
- if (cstarttype == NodeState::ST_INITIAL_START) {
- jam();
- initRestartInfo();
- initGciFilesLab(signal);
- return;
- }
-
- ndbrequire(isMaster());
- copyGciLab(signal, CopyGCIReq::RESTART); // We have already read the file!
-}//Dbdih::ndbStartReqLab()
-
-void Dbdih::execREAD_NODESCONF(Signal* signal)
-{
- unsigned i;
- ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
- jamEntry();
- Uint32 nodeArray[MAX_NDB_NODES];
-
- csystemnodes = readNodes->noOfNodes;
- cmasterNodeId = readNodes->masterNodeId;
- int index = 0;
- NdbNodeBitmask tmp; tmp.assign(2, readNodes->allNodes);
- for (i = 1; i < MAX_NDB_NODES; i++){
- jam();
- if(tmp.get(i)){
- jam();
- nodeArray[index] = i;
- if(NodeBitmask::get(readNodes->inactiveNodes, i) == false){
- jam();
- con_lineNodes++;
- }//if
- index++;
- }//if
- }//for
-
- if(cstarttype == NodeState::ST_SYSTEM_RESTART ||
- cstarttype == NodeState::ST_NODE_RESTART){
-
- for(i = 1; i<MAX_NDB_NODES; i++){
- const Uint32 stat = Sysfile::getNodeStatus(i, SYSFILE->nodeStatus);
- if(stat == Sysfile::NS_NotDefined && !tmp.get(i)){
- jam();
- continue;
- }
-
- if(tmp.get(i) && stat != Sysfile::NS_NotDefined){
- jam();
- continue;
- }
- char buf[255];
- BaseString::snprintf(buf, sizeof(buf),
- "Illegal configuration change."
- " Initial start needs to be performed "
- " when changing no of storage nodes (node %d)", i);
- progError(__LINE__,
- ERR_INVALID_CONFIG,
- buf);
- }
- }
-
- ndbrequire(csystemnodes >= 1 && csystemnodes < MAX_NDB_NODES);
- if (cstarttype == NodeState::ST_INITIAL_START) {
- jam();
- ndbrequire(cnoReplicas <= csystemnodes);
- calculateHotSpare();
- ndbrequire(cnoReplicas <= (csystemnodes - cnoHotSpare));
- }//if
-
- cmasterdihref = calcDihBlockRef(cmasterNodeId);
- /*-------------------------------------------------------------------------*/
- /* MAKE THE LIST OF PRN-RECORD WHICH IS ONE OF THE NODES-LIST IN THIS BLOCK*/
- /*-------------------------------------------------------------------------*/
- makePrnList(readNodes, nodeArray);
- if (cstarttype == NodeState::ST_INITIAL_START) {
- jam();
- /**----------------------------------------------------------------------
- * WHEN WE INITIALLY START A DATABASE WE WILL CREATE NODE GROUPS.
- * ALL NODES ARE PUT INTO NODE GROUPS ALTHOUGH HOT SPARE NODES ARE PUT
- * INTO A SPECIAL NODE GROUP. IN EACH NODE GROUP WE HAVE THE SAME AMOUNT
- * OF NODES AS THERE ARE NUMBER OF REPLICAS.
- * ONE POSSIBLE USAGE OF NODE GROUPS ARE TO MAKE A NODE GROUP A COMPLETE
- * FRAGMENT OF THE DATABASE. THIS MEANS THAT ALL REPLICAS WILL BE STORED
- * IN THE NODE GROUP.
- *-----------------------------------------------------------------------*/
- makeNodeGroups(nodeArray);
- }//if
- ndbrequire(checkNodeAlive(cmasterNodeId));
- if (cstarttype == NodeState::ST_INITIAL_START) {
- jam();
- /**-----------------------------------------------------------------------
- * INITIALISE THE SECOND NODE-LIST AND SET NODE BITS AND SOME NODE STATUS.
- * VERY CONNECTED WITH MAKE_NODE_GROUPS. CHANGING ONE WILL AFFECT THE
- * OTHER AS WELL.
- *-----------------------------------------------------------------------*/
- setInitialActiveStatus();
- } else if (cstarttype == NodeState::ST_SYSTEM_RESTART) {
- jam();
- /*empty*/;
- } else if ((cstarttype == NodeState::ST_NODE_RESTART) ||
- (cstarttype == NodeState::ST_INITIAL_NODE_RESTART)) {
- jam();
- nodeRestartPh2Lab(signal);
- return;
- } else {
- ndbrequire(false);
- }//if
- /**------------------------------------------------------------------------
- * ESTABLISH CONNECTIONS WITH THE OTHER DIH BLOCKS AND INITIALISE THIS
- * NODE-LIST THAT HANDLES CONNECTION WITH OTHER DIH BLOCKS.
- *-------------------------------------------------------------------------*/
- ndbsttorry10Lab(signal, __LINE__);
-}//Dbdih::execREAD_NODESCONF()
-
-/*---------------------------------------------------------------------------*/
-/* START NODE LOGIC FOR NODE RESTART */
-/*---------------------------------------------------------------------------*/
-void Dbdih::nodeRestartPh2Lab(Signal* signal)
-{
- /*------------------------------------------------------------------------*/
- // REQUEST FOR PERMISSION FROM MASTER TO START A NODE IN AN ALREADY
- // RUNNING SYSTEM.
- /*------------------------------------------------------------------------*/
- StartPermReq * const req = (StartPermReq *)&signal->theData[0];
-
- req->blockRef = reference();
- req->nodeId = cownNodeId;
- req->startType = cstarttype;
- sendSignal(cmasterdihref, GSN_START_PERMREQ, signal, 3, JBB);
-}//Dbdih::nodeRestartPh2Lab()
-
-void Dbdih::execSTART_PERMCONF(Signal* signal)
-{
- jamEntry();
- CRASH_INSERTION(7121);
- Uint32 nodeId = signal->theData[0];
- cfailurenr = signal->theData[1];
- ndbrequire(nodeId == cownNodeId);
- ndbsttorry10Lab(signal, __LINE__);
-}//Dbdih::execSTART_PERMCONF()
-
-void Dbdih::execSTART_PERMREF(Signal* signal)
-{
- jamEntry();
- Uint32 errorCode = signal->theData[1];
- if (errorCode == ZNODE_ALREADY_STARTING_ERROR) {
- jam();
- /*-----------------------------------------------------------------------*/
- // The master was busy adding another node. We will wait for a second and
- // try again.
- /*-----------------------------------------------------------------------*/
- signal->theData[0] = DihContinueB::ZSTART_PERMREQ_AGAIN;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 3000, 1);
- return;
- }//if
- /*------------------------------------------------------------------------*/
- // Some node process in another node involving our node was still active. We
- // will recover from this by crashing here.
- // This is controlled restart using the
- // already existing features of node crashes. It is not a bug getting here.
- /*-------------------------------------------------------------------------*/
- ndbrequire(false);
- return;
-}//Dbdih::execSTART_PERMREF()
-
-/*---------------------------------------------------------------------------*/
-/* THIS SIGNAL IS RECEIVED IN THE STARTING NODE WHEN THE START_MEREQ */
-/* HAS BEEN EXECUTED IN THE MASTER NODE. */
-/*---------------------------------------------------------------------------*/
-void Dbdih::execSTART_MECONF(Signal* signal)
-{
- jamEntry();
- StartMeConf * const startMe = (StartMeConf *)&signal->theData[0];
- Uint32 nodeId = startMe->startingNodeId;
- const Uint32 startWord = startMe->startWord;
- Uint32 i;
-
- CRASH_INSERTION(7130);
- ndbrequire(nodeId == cownNodeId);
- arrGuard(startWord + StartMeConf::DATA_SIZE, sizeof(cdata)/4);
- for(i = 0; i < StartMeConf::DATA_SIZE; i++)
- cdata[startWord+i] = startMe->data[i];
-
- if(startWord + StartMeConf::DATA_SIZE < Sysfile::SYSFILE_SIZE32){
- jam();
- /**
- * We are still waiting for data
- */
- return;
- }
- jam();
-
- /**
- * Copy into sysfile
- *
- * But dont copy lastCompletedGCI:s
- */
- Uint32 tempGCP[MAX_NDB_NODES];
- for(i = 0; i < MAX_NDB_NODES; i++)
- tempGCP[i] = SYSFILE->lastCompletedGCI[i];
-
- for(i = 0; i < Sysfile::SYSFILE_SIZE32; i++)
- sysfileData[i] = cdata[i];
- for(i = 0; i < MAX_NDB_NODES; i++)
- SYSFILE->lastCompletedGCI[i] = tempGCP[i];
-
- setNodeActiveStatus();
- setNodeGroups();
- ndbsttorry10Lab(signal, __LINE__);
-}//Dbdih::execSTART_MECONF()
-
-void Dbdih::execSTART_COPYCONF(Signal* signal)
-{
- jamEntry();
- Uint32 nodeId = signal->theData[0];
- ndbrequire(nodeId == cownNodeId);
- CRASH_INSERTION(7132);
- ndbsttorry10Lab(signal, __LINE__);
- return;
-}//Dbdih::execSTART_COPYCONF()
-
-/*---------------------------------------------------------------------------*/
-/* MASTER LOGIC FOR NODE RESTART */
-/*---------------------------------------------------------------------------*/
-/* NODE RESTART PERMISSION REQUEST */
-/*---------------------------------------------------------------------------*/
-// A REQUEST FROM A STARTING NODE TO PERFORM A NODE RESTART. IF NO OTHER NODE
-// IS ACTIVE IN PERFORMING A NODE RESTART AND THERE ARE NO ACTIVE PROCESSES IN
-// THIS NODE INVOLVING THE STARTING NODE THIS REQUEST WILL BE GRANTED.
-/*---------------------------------------------------------------------------*/
-void Dbdih::execSTART_PERMREQ(Signal* signal)
-{
- StartPermReq * const req = (StartPermReq*)&signal->theData[0];
- jamEntry();
- const BlockReference retRef = req->blockRef;
- const Uint32 nodeId = req->nodeId;
- const Uint32 typeStart = req->startType;
-
- CRASH_INSERTION(7122);
- ndbrequire(isMaster());
- ndbrequire(refToNode(retRef) == nodeId);
- if ((c_nodeStartMaster.activeState) ||
- (c_nodeStartMaster.wait != ZFALSE)) {
- jam();
- signal->theData[0] = nodeId;
- signal->theData[1] = ZNODE_ALREADY_STARTING_ERROR;
- sendSignal(retRef, GSN_START_PERMREF, signal, 2, JBB);
- return;
- }//if
- if (getNodeStatus(nodeId) != NodeRecord::DEAD){
- ndbout << "nodeStatus in START_PERMREQ = "
- << (Uint32) getNodeStatus(nodeId) << endl;
- ndbrequire(false);
- }//if
-
- /*----------------------------------------------------------------------
- * WE START THE INCLUSION PROCEDURE
- * ---------------------------------------------------------------------*/
- c_nodeStartMaster.failNr = cfailurenr;
- c_nodeStartMaster.wait = ZFALSE;
- c_nodeStartMaster.startInfoErrorCode = 0;
- c_nodeStartMaster.startNode = nodeId;
- c_nodeStartMaster.activeState = true;
- c_nodeStartMaster.m_outstandingGsn = GSN_START_INFOREQ;
-
- setNodeStatus(nodeId, NodeRecord::STARTING);
- /**
- * But if it's a NodeState::ST_INITIAL_NODE_RESTART
- *
- * We first have to clear LCP's
- * For normal node restart we simply ensure that all nodes
- * are informed of the node restart
- */
- StartInfoReq *const r =(StartInfoReq*)&signal->theData[0];
- r->startingNodeId = nodeId;
- r->typeStart = typeStart;
- r->systemFailureNo = cfailurenr;
- sendLoopMacro(START_INFOREQ, sendSTART_INFOREQ);
-}//Dbdih::execSTART_PERMREQ()
-
-void Dbdih::execSTART_INFOREF(Signal* signal)
-{
- StartInfoRef * ref = (StartInfoRef*)&signal->theData[0];
- if (getNodeStatus(ref->startingNodeId) != NodeRecord::STARTING) {
- jam();
- return;
- }//if
- ndbrequire(c_nodeStartMaster.startNode == ref->startingNodeId);
- c_nodeStartMaster.startInfoErrorCode = ref->errorCode;
- startInfoReply(signal, ref->sendingNodeId);
-}//Dbdih::execSTART_INFOREF()
-
-void Dbdih::execSTART_INFOCONF(Signal* signal)
-{
- jamEntry();
- StartInfoConf * conf = (StartInfoConf*)&signal->theData[0];
- if (getNodeStatus(conf->startingNodeId) != NodeRecord::STARTING) {
- jam();
- return;
- }//if
- ndbrequire(c_nodeStartMaster.startNode == conf->startingNodeId);
- startInfoReply(signal, conf->sendingNodeId);
-}//Dbdih::execSTART_INFOCONF()
-
-void Dbdih::startInfoReply(Signal* signal, Uint32 nodeId)
-{
- receiveLoopMacro(START_INFOREQ, nodeId);
- /**
- * We're finished with the START_INFOREQ's
- */
- if (c_nodeStartMaster.startInfoErrorCode == 0) {
- jam();
- /**
- * Everything has been a success so far
- */
- StartPermConf * conf = (StartPermConf*)&signal->theData[0];
- conf->startingNodeId = c_nodeStartMaster.startNode;
- conf->systemFailureNo = cfailurenr;
- sendSignal(calcDihBlockRef(c_nodeStartMaster.startNode),
- GSN_START_PERMCONF, signal, StartPermConf::SignalLength, JBB);
- c_nodeStartMaster.m_outstandingGsn = GSN_START_PERMCONF;
- } else {
- jam();
- StartPermRef * ref = (StartPermRef*)&signal->theData[0];
- ref->startingNodeId = c_nodeStartMaster.startNode;
- ref->errorCode = c_nodeStartMaster.startInfoErrorCode;
- sendSignal(calcDihBlockRef(c_nodeStartMaster.startNode),
- GSN_START_PERMREF, signal, StartPermRef::SignalLength, JBB);
- nodeResetStart();
- }//if
-}//Dbdih::startInfoReply()
-
-/*---------------------------------------------------------------------------*/
-/* NODE RESTART CONTINUE REQUEST */
-/*---------------------------------------------------------------------------*/
-// THIS SIGNAL AND THE CODE BELOW IS EXECUTED BY THE MASTER WHEN IT HAS BEEN
-// REQUESTED TO START UP A NEW NODE. The master instructs the starting node
-// how to set up its log for continued execution.
-/*---------------------------------------------------------------------------*/
-void Dbdih::execSTART_MEREQ(Signal* signal)
-{
- StartMeReq * req = (StartMeReq*)&signal->theData[0];
- jamEntry();
- const BlockReference Tblockref = req->startingRef;
- const Uint32 Tnodeid = refToNode(Tblockref);
-
- ndbrequire(isMaster());
- ndbrequire(c_nodeStartMaster.startNode == Tnodeid);
- ndbrequire(getNodeStatus(Tnodeid) == NodeRecord::STARTING);
-
- sendSTART_RECREQ(signal, Tnodeid);
-}//Dbdih::execSTART_MEREQ()
-
-void Dbdih::nodeRestartStartRecConfLab(Signal* signal)
-{
- c_nodeStartMaster.blockLcp = true;
- if ((c_lcpState.lcpStatus != LCP_STATUS_IDLE) &&
- (c_lcpState.lcpStatus != LCP_TCGET)) {
- jam();
- /*-----------------------------------------------------------------------*/
- // WE WILL NOT ALLOW A NODE RESTART TO COME IN WHEN A LOCAL CHECKPOINT IS
- // ONGOING. IT WOULD COMPLICATE THE LCP PROTOCOL TOO MUCH. WE WILL ADD THIS
- // LATER.
- /*-----------------------------------------------------------------------*/
- return;
- }//if
- lcpBlockedLab(signal);
-}//Dbdih::nodeRestartStartRecConfLab()
-
-void Dbdih::lcpBlockedLab(Signal* signal)
-{
- ndbrequire(getNodeStatus(c_nodeStartMaster.startNode)==NodeRecord::STARTING);
- /*------------------------------------------------------------------------*/
- // NOW WE HAVE COPIED ALL INFORMATION IN DICT WE ARE NOW READY TO COPY ALL
- // INFORMATION IN DIH TO THE NEW NODE.
- /*------------------------------------------------------------------------*/
- c_nodeStartMaster.wait = 10;
- signal->theData[0] = DihContinueB::ZCOPY_NODE;
- signal->theData[1] = 0;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- c_nodeStartMaster.m_outstandingGsn = GSN_COPY_TABREQ;
-}//Dbdih::lcpBlockedLab()
-
-void Dbdih::nodeDictStartConfLab(Signal* signal)
-{
- /*-------------------------------------------------------------------------*/
- // NOW WE HAVE COPIED BOTH DIH AND DICT INFORMATION. WE ARE NOW READY TO
- // INTEGRATE THE NODE INTO THE LCP AND GCP PROTOCOLS AND TO ALLOW UPDATES OF
- // THE DICTIONARY AGAIN.
- /*-------------------------------------------------------------------------*/
- c_nodeStartMaster.wait = ZFALSE;
- c_nodeStartMaster.blockGcp = true;
- if (cgcpStatus != GCP_READY) {
- /*-----------------------------------------------------------------------*/
- // The global checkpoint is executing. Wait until it is completed before we
- // continue processing the node recovery.
- /*-----------------------------------------------------------------------*/
- jam();
- return;
- }//if
- gcpBlockedLab(signal);
-
- /*-----------------------------------------------------------------*/
- // Report that node restart has completed copy of dictionary.
- /*-----------------------------------------------------------------*/
- signal->theData[0] = NDB_LE_NR_CopyDict;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
-}//Dbdih::nodeDictStartConfLab()
-
-void Dbdih::dihCopyCompletedLab(Signal* signal)
-{
- BlockReference ref = calcDictBlockRef(c_nodeStartMaster.startNode);
- DictStartReq * req = (DictStartReq*)&signal->theData[0];
- req->restartGci = cnewgcp;
- req->senderRef = reference();
- sendSignal(ref, GSN_DICTSTARTREQ,
- signal, DictStartReq::SignalLength, JBB);
- c_nodeStartMaster.m_outstandingGsn = GSN_DICTSTARTREQ;
- c_nodeStartMaster.wait = 0;
-}//Dbdih::dihCopyCompletedLab()
-
-void Dbdih::gcpBlockedLab(Signal* signal)
-{
- /*-----------------------------------------------------------------*/
- // Report that node restart has completed copy of distribution info.
- /*-----------------------------------------------------------------*/
- signal->theData[0] = NDB_LE_NR_CopyDistr;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
-
- /**
- * The node DIH will be part of LCP
- */
- NodeRecordPtr nodePtr;
- nodePtr.i = c_nodeStartMaster.startNode;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- nodePtr.p->m_inclDihLcp = true;
-
- /*-------------------------------------------------------------------------*/
- // NOW IT IS TIME TO INFORM ALL OTHER NODES IN THE CLUSTER OF THE STARTED
- // NODE SUCH THAT THEY ALSO INCLUDE THE NODE IN THE NODE LISTS AND SO FORTH.
- /*------------------------------------------------------------------------*/
- sendLoopMacro(INCL_NODEREQ, sendINCL_NODEREQ);
- /*-------------------------------------------------------------------------*/
- // We also need to send to the starting node to ensure he is aware of the
- // global checkpoint id and the correct state. We do not wait for any reply
- // since the starting node will not send any.
- /*-------------------------------------------------------------------------*/
- sendINCL_NODEREQ(signal, c_nodeStartMaster.startNode);
-}//Dbdih::gcpBlockedLab()
-
-/*---------------------------------------------------------------------------*/
-// THIS SIGNAL IS EXECUTED IN BOTH SLAVES AND IN THE MASTER
-/*---------------------------------------------------------------------------*/
-void Dbdih::execINCL_NODECONF(Signal* signal)
-{
- Uint32 TsendNodeId;
- Uint32 TstartNode_or_blockref;
-
- jamEntry();
- TstartNode_or_blockref = signal->theData[0];
- TsendNodeId = signal->theData[1];
-
- if (TstartNode_or_blockref == clocallqhblockref) {
- jam();
- /*-----------------------------------------------------------------------*/
- // THIS SIGNAL CAME FROM THE LOCAL LQH BLOCK.
- // WE WILL NOW SEND INCLUDE TO THE TC BLOCK.
- /*-----------------------------------------------------------------------*/
- signal->theData[0] = reference();
- signal->theData[1] = c_nodeStartSlave.nodeId;
- sendSignal(clocaltcblockref, GSN_INCL_NODEREQ, signal, 2, JBB);
- return;
- }//if
- if (TstartNode_or_blockref == clocaltcblockref) {
- jam();
- /*----------------------------------------------------------------------*/
- // THIS SIGNAL CAME FROM THE LOCAL LQH BLOCK.
- // WE WILL NOW SEND INCLUDE TO THE DICT BLOCK.
- /*----------------------------------------------------------------------*/
- signal->theData[0] = reference();
- signal->theData[1] = c_nodeStartSlave.nodeId;
- sendSignal(cdictblockref, GSN_INCL_NODEREQ, signal, 2, JBB);
- return;
- }//if
- if (TstartNode_or_blockref == cdictblockref) {
- jam();
- /*-----------------------------------------------------------------------*/
- // THIS SIGNAL CAME FROM THE LOCAL DICT BLOCK. WE WILL NOW SEND CONF TO THE
- // BACKUP.
- /*-----------------------------------------------------------------------*/
- signal->theData[0] = reference();
- signal->theData[1] = c_nodeStartSlave.nodeId;
- sendSignal(BACKUP_REF, GSN_INCL_NODEREQ, signal, 2, JBB);
-
- // Suma will not send response to this for now, later...
- sendSignal(SUMA_REF, GSN_INCL_NODEREQ, signal, 2, JBB);
- // Grep will not send response to this for now, later...
- sendSignal(GREP_REF, GSN_INCL_NODEREQ, signal, 2, JBB);
- return;
- }//if
- if (TstartNode_or_blockref == numberToRef(BACKUP, getOwnNodeId())){
- jam();
- signal->theData[0] = c_nodeStartSlave.nodeId;
- signal->theData[1] = cownNodeId;
- sendSignal(cmasterdihref, GSN_INCL_NODECONF, signal, 2, JBB);
- c_nodeStartSlave.nodeId = 0;
- return;
- }
-
- ndbrequire(cmasterdihref = reference());
- receiveLoopMacro(INCL_NODEREQ, TsendNodeId);
-
- CRASH_INSERTION(7128);
- /*-------------------------------------------------------------------------*/
- // Now that we have included the starting node in the node lists in the
- // various blocks we are ready to start the global checkpoint protocol
- /*------------------------------------------------------------------------*/
- c_nodeStartMaster.wait = 11;
- c_nodeStartMaster.blockGcp = false;
-
- signal->theData[0] = reference();
- sendSignal(reference(), GSN_UNBLO_DICTCONF, signal, 1, JBB);
-}//Dbdih::execINCL_NODECONF()
-
-void Dbdih::execUNBLO_DICTCONF(Signal* signal)
-{
- jamEntry();
- c_nodeStartMaster.wait = ZFALSE;
- if (!c_nodeStartMaster.activeState) {
- jam();
- return;
- }//if
-
- CRASH_INSERTION(7129);
- /**-----------------------------------------------------------------------
- * WE HAVE NOW PREPARED IT FOR INCLUSION IN THE LCP PROTOCOL.
- * WE CAN NOW START THE LCP PROTOCOL AGAIN.
- * WE HAVE ALSO MADE THIS FOR THE GCP PROTOCOL.
- * WE ARE READY TO START THE PROTOCOLS AND RESPOND TO THE START REQUEST
- * FROM THE STARTING NODE.
- *------------------------------------------------------------------------*/
-
- StartMeConf * const startMe = (StartMeConf *)&signal->theData[0];
-
- const Uint32 wordPerSignal = StartMeConf::DATA_SIZE;
- const int noOfSignals = ((Sysfile::SYSFILE_SIZE32 + (wordPerSignal - 1)) /
- wordPerSignal);
-
- startMe->startingNodeId = c_nodeStartMaster.startNode;
- startMe->startWord = 0;
-
- const Uint32 ref = calcDihBlockRef(c_nodeStartMaster.startNode);
- for(int i = 0; i < noOfSignals; i++){
- jam();
- { // Do copy
- const int startWord = startMe->startWord;
- for(Uint32 j = 0; j < wordPerSignal; j++){
- startMe->data[j] = sysfileData[j+startWord];
- }
- }
- sendSignal(ref, GSN_START_MECONF, signal, StartMeConf::SignalLength, JBB);
- startMe->startWord += wordPerSignal;
- }//for
- c_nodeStartMaster.m_outstandingGsn = GSN_START_MECONF;
-}//Dbdih::execUNBLO_DICTCONF()
-
-/*---------------------------------------------------------------------------*/
-/* NODE RESTART COPY REQUEST */
-/*---------------------------------------------------------------------------*/
-// A NODE RESTART HAS REACHED ITS FINAL PHASE WHEN THE DATA IS TO BE COPIED
-// TO THE NODE. START_COPYREQ IS EXECUTED BY THE MASTER NODE.
-/*---------------------------------------------------------------------------*/
-void Dbdih::execSTART_COPYREQ(Signal* signal)
-{
- jamEntry();
- Uint32 startNodeId = signal->theData[0];
- //BlockReference startingRef = signal->theData[1];
- ndbrequire(c_nodeStartMaster.startNode == startNodeId);
- /*-------------------------------------------------------------------------*/
- // REPORT Copy process of node restart is now about to start up.
- /*-------------------------------------------------------------------------*/
- signal->theData[0] = NDB_LE_NR_CopyFragsStarted;
- signal->theData[1] = startNodeId;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
-
- CRASH_INSERTION(7131);
- nodeRestartTakeOver(signal, startNodeId);
- // BlockReference ref = calcQmgrBlockRef(startNodeId);
- // signal->theData[0] = cownNodeId;
- // Remove comments as soon as I open up the Qmgr block
- // TODO_RONM
- // sendSignal(ref, GSN_ALLOW_NODE_CRASHORD, signal, 1, JBB);
-}//Dbdih::execSTART_COPYREQ()
-
-/*---------------------------------------------------------------------------*/
-/* SLAVE LOGIC FOR NODE RESTART */
-/*---------------------------------------------------------------------------*/
-void Dbdih::execSTART_INFOREQ(Signal* signal)
-{
- jamEntry();
- StartInfoReq *const req =(StartInfoReq*)&signal->theData[0];
- Uint32 startNode = req->startingNodeId;
- if (cfailurenr != req->systemFailureNo) {
- jam();
- //---------------------------------------------------------------
- // A failure occurred since master sent this request. We will ignore
- // this request since the node is already dead that is starting.
- //---------------------------------------------------------------
- return;
- }//if
- CRASH_INSERTION(7123);
- if (isMaster()) {
- jam();
- ndbrequire(getNodeStatus(startNode) == NodeRecord::STARTING);
- } else {
- jam();
- ndbrequire(getNodeStatus(startNode) == NodeRecord::DEAD);
- }//if
- if ((!getAllowNodeStart(startNode)) ||
- (c_nodeStartSlave.nodeId != 0) ||
- (ERROR_INSERTED(7124))) {
- jam();
- StartInfoRef *const ref =(StartInfoRef*)&signal->theData[0];
- ref->startingNodeId = startNode;
- ref->sendingNodeId = cownNodeId;
- ref->errorCode = ZNODE_START_DISALLOWED_ERROR;
- sendSignal(cmasterdihref, GSN_START_INFOREF, signal,
- StartInfoRef::SignalLength, JBB);
- return;
- }//if
- setNodeStatus(startNode, NodeRecord::STARTING);
- if (req->typeStart == NodeState::ST_INITIAL_NODE_RESTART) {
- jam();
- setAllowNodeStart(startNode, false);
- invalidateNodeLCP(signal, startNode, 0);
- } else {
- jam();
- StartInfoConf * c = (StartInfoConf*)&signal->theData[0];
- c->sendingNodeId = cownNodeId;
- c->startingNodeId = startNode;
- sendSignal(cmasterdihref, GSN_START_INFOCONF, signal,
- StartInfoConf::SignalLength, JBB);
- return;
- }//if
-}//Dbdih::execSTART_INFOREQ()
-
-void Dbdih::execINCL_NODEREQ(Signal* signal)
-{
- jamEntry();
- Uint32 retRef = signal->theData[0];
- Uint32 nodeId = signal->theData[1];
- Uint32 tnodeStartFailNr = signal->theData[2];
- currentgcp = signal->theData[4];
- CRASH_INSERTION(7127);
- cnewgcp = currentgcp;
- coldgcp = currentgcp - 1;
- if (!isMaster()) {
- jam();
- /*-----------------------------------------------------------------------*/
- // We don't want to change the state of the master since he can be in the
- // state LCP_TCGET at this time.
- /*-----------------------------------------------------------------------*/
- c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
- }//if
-
- /*-------------------------------------------------------------------------*/
- // When a node is restarted we must ensure that a lcp will be run
- // as soon as possible and the reset the delay according to the original
- // configuration.
- // Without an initial local checkpoint the new node will not be available.
- /*-------------------------------------------------------------------------*/
- if (getOwnNodeId() == nodeId) {
- jam();
- /*-----------------------------------------------------------------------*/
- // We are the starting node. We came here only to set the global checkpoint
- // id's and the lcp status.
- /*-----------------------------------------------------------------------*/
- CRASH_INSERTION(7171);
- return;
- }//if
- if (getNodeStatus(nodeId) != NodeRecord::STARTING) {
- jam();
- return;
- }//if
- ndbrequire(cfailurenr == tnodeStartFailNr);
- ndbrequire (c_nodeStartSlave.nodeId == 0);
- c_nodeStartSlave.nodeId = nodeId;
-
- ndbrequire (retRef == cmasterdihref);
-
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
-
- Sysfile::ActiveStatus TsaveState = nodePtr.p->activeStatus;
- Uint32 TnodeGroup = nodePtr.p->nodeGroup;
-
- new (nodePtr.p) NodeRecord();
- nodePtr.p->nodeGroup = TnodeGroup;
- nodePtr.p->activeStatus = TsaveState;
- nodePtr.p->nodeStatus = NodeRecord::ALIVE;
- nodePtr.p->useInTransactions = true;
- nodePtr.p->m_inclDihLcp = true;
-
- removeDeadNode(nodePtr);
- insertAlive(nodePtr);
- con_lineNodes++;
-
- /*-------------------------------------------------------------------------*/
- // WE WILL ALSO SEND THE INCLUDE NODE REQUEST TO THE LOCAL LQH BLOCK.
- /*-------------------------------------------------------------------------*/
- signal->theData[0] = reference();
- signal->theData[1] = nodeId;
- signal->theData[2] = currentgcp;
- sendSignal(clocallqhblockref, GSN_INCL_NODEREQ, signal, 3, JBB);
-}//Dbdih::execINCL_NODEREQ()
-
-/* ------------------------------------------------------------------------- */
-// execINCL_NODECONF() is found in the master logic part since it is used by
-// both the master and the slaves.
-/* ------------------------------------------------------------------------- */
-
-/*****************************************************************************/
-/*********** TAKE OVER DECISION MODULE *************/
-/*****************************************************************************/
-// This module contains the subroutines that take the decision whether to take
-// over a node now or not.
-/* ------------------------------------------------------------------------- */
-/* MASTER LOGIC FOR SYSTEM RESTART */
-/* ------------------------------------------------------------------------- */
-// WE ONLY COME HERE IF WE ARE THE MASTER AND WE ARE PERFORMING A SYSTEM
-// RESTART. WE ALSO COME HERE DURING THIS SYSTEM RESTART ONE TIME PER NODE
-// THAT NEEDS TAKE OVER.
-/*---------------------------------------------------------------------------*/
-// WE CHECK IF ANY NODE NEEDS TO BE TAKEN OVER AND THE TAKE OVER HAS NOT YET
-// BEEN STARTED OR COMPLETED.
-/*---------------------------------------------------------------------------*/
-void
-Dbdih::systemRestartTakeOverLab(Signal* signal)
-{
- NodeRecordPtr nodePtr;
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- ptrAss(nodePtr, nodeRecord);
- switch (nodePtr.p->activeStatus) {
- case Sysfile::NS_Active:
- case Sysfile::NS_ActiveMissed_1:
- jam();
- break;
- /*---------------------------------------------------------------------*/
- // WE HAVE NOT REACHED A STATE YET WHERE THIS NODE NEEDS TO BE TAKEN OVER
- /*---------------------------------------------------------------------*/
- case Sysfile::NS_ActiveMissed_2:
- case Sysfile::NS_NotActive_NotTakenOver:
- jam();
- /*---------------------------------------------------------------------*/
- // THIS NODE IS IN TROUBLE.
- // WE MUST SUCCEED WITH A LOCAL CHECKPOINT WITH THIS NODE TO REMOVE THE
- // DANGER. IF THE NODE IS NOT ALIVE THEN THIS WILL NOT BE
- // POSSIBLE AND WE CAN START THE TAKE OVER IMMEDIATELY IF WE HAVE ANY
- // NODES THAT CAN PERFORM A TAKE OVER.
- /*---------------------------------------------------------------------*/
- if (nodePtr.p->nodeStatus != NodeRecord::ALIVE) {
- jam();
- Uint32 ThotSpareNode = findHotSpare();
- if (ThotSpareNode != RNIL) {
- jam();
- startTakeOver(signal, RNIL, ThotSpareNode, nodePtr.i);
- }//if
- } else if(nodePtr.p->activeStatus == Sysfile::NS_NotActive_NotTakenOver){
- jam();
- /*-------------------------------------------------------------------*/
- // NOT ACTIVE NODES THAT HAVE NOT YET BEEN TAKEN OVER NEEDS TAKE OVER
- // IMMEDIATELY. IF WE ARE ALIVE WE TAKE OVER OUR OWN NODE.
- /*-------------------------------------------------------------------*/
- startTakeOver(signal, RNIL, nodePtr.i, nodePtr.i);
- }//if
- break;
- case Sysfile::NS_TakeOver:
- /**-------------------------------------------------------------------
- * WE MUST HAVE FAILED IN THE MIDDLE OF THE TAKE OVER PROCESS.
- * WE WILL CONCLUDE THE TAKE OVER PROCESS NOW.
- *-------------------------------------------------------------------*/
- if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
- jam();
- Uint32 takeOverNode = Sysfile::getTakeOverNode(nodePtr.i,
- SYSFILE->takeOver);
- if(takeOverNode == 0){
- jam();
- warningEvent("Bug in take-over code restarting");
- takeOverNode = nodePtr.i;
- }
- startTakeOver(signal, RNIL, nodePtr.i, takeOverNode);
- } else {
- jam();
- /**-------------------------------------------------------------------
- * We are not currently taking over, change our active status.
- *-------------------------------------------------------------------*/
- nodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
- setNodeRestartInfoBits();
- }//if
- break;
- case Sysfile::NS_HotSpare:
- jam();
- break;
- /*---------------------------------------------------------------------*/
- // WE NEED NOT TAKE OVER NODES THAT ARE HOT SPARE.
- /*---------------------------------------------------------------------*/
- case Sysfile::NS_NotDefined:
- jam();
- break;
- /*---------------------------------------------------------------------*/
- // WE NEED NOT TAKE OVER NODES THAT DO NOT EVEN EXIST IN THE CLUSTER.
- /*---------------------------------------------------------------------*/
- default:
- ndbrequire(false);
- break;
- }//switch
- }//for
- /*-------------------------------------------------------------------------*/
- /* NO TAKE OVER HAS BEEN INITIATED. */
- /*-------------------------------------------------------------------------*/
-}//Dbdih::systemRestartTakeOverLab()
-
-/*---------------------------------------------------------------------------*/
-// This subroutine is called as part of node restart in the master node.
-/*---------------------------------------------------------------------------*/
-void Dbdih::nodeRestartTakeOver(Signal* signal, Uint32 startNodeId)
-{
- switch (getNodeActiveStatus(startNodeId)) {
- case Sysfile::NS_Active:
- case Sysfile::NS_ActiveMissed_1:
- case Sysfile::NS_ActiveMissed_2:
- jam();
- /*-----------------------------------------------------------------------*/
- // AN ACTIVE NODE HAS BEEN STARTED. THE ACTIVE NODE MUST THEN GET ALL DATA
- // IT HAD BEFORE ITS CRASH. WE START THE TAKE OVER IMMEDIATELY.
- // SINCE WE ARE AN ACTIVE NODE WE WILL TAKE OVER OUR OWN NODE THAT
- // PREVIOUSLY CRASHED.
- /*-----------------------------------------------------------------------*/
- startTakeOver(signal, RNIL, startNodeId, startNodeId);
- break;
- case Sysfile::NS_HotSpare:{
- jam();
- /*-----------------------------------------------------------------------*/
- // WHEN STARTING UP A HOT SPARE WE WILL CHECK IF ANY NODE NEEDS TO TAKEN
- // OVER. IF SO THEN WE WILL START THE TAKE OVER.
- /*-----------------------------------------------------------------------*/
- bool takeOverStarted = false;
- NodeRecordPtr nodePtr;
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- ptrAss(nodePtr, nodeRecord);
- if (nodePtr.p->activeStatus == Sysfile::NS_NotActive_NotTakenOver) {
- jam();
- takeOverStarted = true;
- startTakeOver(signal, RNIL, startNodeId, nodePtr.i);
- }//if
- }//for
- if (!takeOverStarted) {
- jam();
- /*-------------------------------------------------------------------*/
- // NO TAKE OVER WAS NEEDED AT THE MOMENT WE START-UP AND WAIT UNTIL A
- // TAKE OVER IS NEEDED.
- /*-------------------------------------------------------------------*/
- BlockReference ref = calcDihBlockRef(startNodeId);
- signal->theData[0] = startNodeId;
- sendSignal(ref, GSN_START_COPYCONF, signal, 1, JBB);
- }//if
- break;
- }
- case Sysfile::NS_NotActive_NotTakenOver:
- jam();
- /*-----------------------------------------------------------------------*/
- // ALL DATA IN THE NODE IS LOST BUT WE HAVE NOT TAKEN OVER YET. WE WILL
- // TAKE OVER OUR OWN NODE
- /*-----------------------------------------------------------------------*/
- startTakeOver(signal, RNIL, startNodeId, startNodeId);
- break;
- case Sysfile::NS_TakeOver:{
- jam();
- /*--------------------------------------------------------------------
- * We were in the process of taking over but it was not completed.
- * We will complete it now instead.
- *--------------------------------------------------------------------*/
- Uint32 takeOverNode = Sysfile::getTakeOverNode(startNodeId,
- SYSFILE->takeOver);
- startTakeOver(signal, RNIL, startNodeId, takeOverNode);
- break;
- }
- default:
- ndbrequire(false);
- break;
- }//switch
- nodeResetStart();
-}//Dbdih::nodeRestartTakeOver()
-
-/*************************************************************************/
-// Ths routine is called when starting a local checkpoint.
-/*************************************************************************/
-void Dbdih::checkStartTakeOver(Signal* signal)
-{
- NodeRecordPtr csoNodeptr;
- Uint32 tcsoHotSpareNode;
- Uint32 tcsoTakeOverNode;
- if (isMaster()) {
- /*-----------------------------------------------------------------*/
- /* WE WILL ONLY START TAKE OVER IF WE ARE MASTER. */
- /*-----------------------------------------------------------------*/
- /* WE WILL ONLY START THE TAKE OVER IF THERE WERE A NEED OF */
- /* A TAKE OVER. */
- /*-----------------------------------------------------------------*/
- /* WE CAN ONLY PERFORM THE TAKE OVER IF WE HAVE A HOT SPARE */
- /* AVAILABLE. */
- /*-----------------------------------------------------------------*/
- tcsoTakeOverNode = 0;
- tcsoHotSpareNode = 0;
- for (csoNodeptr.i = 1; csoNodeptr.i < MAX_NDB_NODES; csoNodeptr.i++) {
- ptrAss(csoNodeptr, nodeRecord);
- if (csoNodeptr.p->activeStatus == Sysfile::NS_NotActive_NotTakenOver) {
- jam();
- tcsoTakeOverNode = csoNodeptr.i;
- } else {
- jam();
- if (csoNodeptr.p->activeStatus == Sysfile::NS_HotSpare) {
- jam();
- tcsoHotSpareNode = csoNodeptr.i;
- }//if
- }//if
- }//for
- if ((tcsoTakeOverNode != 0) &&
- (tcsoHotSpareNode != 0)) {
- jam();
- startTakeOver(signal, RNIL, tcsoHotSpareNode, tcsoTakeOverNode);
- }//if
- }//if
-}//Dbdih::checkStartTakeOver()
-
-/*****************************************************************************/
-/*********** NODE ADDING MODULE *************/
-/*********** CODE TO HANDLE TAKE OVER *************/
-/*****************************************************************************/
-// A take over can be initiated by a number of things:
-// 1) A node restart, usually the node takes over itself but can also take
-// over somebody else if its own data was already taken over
-// 2) At system restart it is necessary to use the take over code to recover
-// nodes which had too old checkpoints to be restorable by the usual
-// restoration from disk.
-// 3) When a node has missed too many local checkpoints and is decided by the
-// master to be taken over by a hot spare node that sits around waiting
-// for this to happen.
-//
-// To support multiple node failures efficiently the code is written such that
-// only one take over can handle transitions in state but during a copy
-// fragment other take over's can perform state transitions.
-/*****************************************************************************/
-void Dbdih::startTakeOver(Signal* signal,
- Uint32 takeOverPtrI,
- Uint32 startNode,
- Uint32 nodeTakenOver)
-{
- NodeRecordPtr toNodePtr;
- NodeGroupRecordPtr NGPtr;
- toNodePtr.i = nodeTakenOver;
- ptrCheckGuard(toNodePtr, MAX_NDB_NODES, nodeRecord);
- NGPtr.i = toNodePtr.p->nodeGroup;
- ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
- TakeOverRecordPtr takeOverPtr;
- if (takeOverPtrI == RNIL) {
- jam();
- setAllowNodeStart(startNode, false);
- seizeTakeOver(takeOverPtr);
- if (startNode == c_nodeStartMaster.startNode) {
- jam();
- takeOverPtr.p->toNodeRestart = true;
- }//if
- takeOverPtr.p->toStartingNode = startNode;
- takeOverPtr.p->toFailedNode = nodeTakenOver;
- } else {
- jam();
- RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
- ndbrequire(takeOverPtr.p->toStartingNode == startNode);
- ndbrequire(takeOverPtr.p->toFailedNode == nodeTakenOver);
- ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_WAIT_START_TAKE_OVER);
- }//if
- if ((NGPtr.p->activeTakeOver) || (ERROR_INSERTED(7157))) {
- jam();
- /**------------------------------------------------------------------------
- * A take over is already active in this node group. We only allow one
- * take over per node group. Otherwise we will overload the node group and
- * also we will require much more checks when starting up copying of
- * fragments. The parallelism for take over is mainly to ensure that we
- * can handle take over efficiently in large systems with 4 nodes and above
- * A typical case is a 8 node system executing on two 8-cpu boxes.
- * A box crash in one of the boxes will mean 4 nodes crashes.
- * We want to be able to restart those four nodes to some
- * extent in parallel.
- *
- * We will wait for a few seconds and then try again.
- */
- takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_START_TAKE_OVER;
- signal->theData[0] = DihContinueB::ZSTART_TAKE_OVER;
- signal->theData[1] = takeOverPtr.i;
- signal->theData[2] = startNode;
- signal->theData[3] = nodeTakenOver;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 5000, 4);
- return;
- }//if
- NGPtr.p->activeTakeOver = true;
- if (startNode == nodeTakenOver) {
- jam();
- switch (getNodeActiveStatus(nodeTakenOver)) {
- case Sysfile::NS_Active:
- case Sysfile::NS_ActiveMissed_1:
- case Sysfile::NS_ActiveMissed_2:
- jam();
- break;
- case Sysfile::NS_NotActive_NotTakenOver:
- case Sysfile::NS_TakeOver:
- jam();
- setNodeActiveStatus(nodeTakenOver, Sysfile::NS_TakeOver);
- break;
- default:
- ndbrequire(false);
- }//switch
- } else {
- jam();
- setNodeActiveStatus(nodeTakenOver, Sysfile::NS_HotSpare);
- setNodeActiveStatus(startNode, Sysfile::NS_TakeOver);
- changeNodeGroups(startNode, nodeTakenOver);
- }//if
- setNodeRestartInfoBits();
- /* ---------------------------------------------------------------------- */
- /* WE SET THE RESTART INFORMATION TO INDICATE THAT WE ARE ABOUT TO TAKE */
- /* OVER THE FAILED NODE. WE SET THIS INFORMATION AND WAIT UNTIL THE */
- /* GLOBAL CHECKPOINT HAS WRITTEN THE RESTART INFORMATION. */
- /* ---------------------------------------------------------------------- */
- Sysfile::setTakeOverNode(takeOverPtr.p->toFailedNode, SYSFILE->takeOver,
- startNode);
- takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_START_COPY;
-
- cstartGcpNow = true;
-}//Dbdih::startTakeOver()
-
-void Dbdih::changeNodeGroups(Uint32 startNode, Uint32 nodeTakenOver)
-{
- NodeRecordPtr startNodePtr;
- NodeRecordPtr toNodePtr;
- startNodePtr.i = startNode;
- ptrCheckGuard(startNodePtr, MAX_NDB_NODES, nodeRecord);
- toNodePtr.i = nodeTakenOver;
- ptrCheckGuard(toNodePtr, MAX_NDB_NODES, nodeRecord);
- ndbrequire(startNodePtr.p->nodeGroup == ZNIL);
- NodeGroupRecordPtr NGPtr;
-
- NGPtr.i = toNodePtr.p->nodeGroup;
- ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
- bool nodeFound = false;
- for (Uint32 i = 0; i < NGPtr.p->nodeCount; i++) {
- jam();
- if (NGPtr.p->nodesInGroup[i] == nodeTakenOver) {
- jam();
- NGPtr.p->nodesInGroup[i] = startNode;
- nodeFound = true;
- }//if
- }//for
- ndbrequire(nodeFound);
- Sysfile::setNodeGroup(startNodePtr.i, SYSFILE->nodeGroups, toNodePtr.p->nodeGroup);
- startNodePtr.p->nodeGroup = toNodePtr.p->nodeGroup;
- Sysfile::setNodeGroup(toNodePtr.i, SYSFILE->nodeGroups, NO_NODE_GROUP_ID);
- toNodePtr.p->nodeGroup = ZNIL;
-}//Dbdih::changeNodeGroups()
-
-void Dbdih::checkToCopy()
-{
- TakeOverRecordPtr takeOverPtr;
- for (takeOverPtr.i = 0;takeOverPtr.i < MAX_NDB_NODES; takeOverPtr.i++) {
- ptrAss(takeOverPtr, takeOverRecord);
- /*----------------------------------------------------------------------*/
- // TAKE OVER HANDLING WRITES RESTART INFORMATION THROUGH
- // THE GLOBAL CHECKPOINT
- // PROTOCOL. WE CHECK HERE BEFORE STARTING A WRITE OF THE RESTART
- // INFORMATION.
- /*-----------------------------------------------------------------------*/
- if (takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_START_COPY) {
- jam();
- takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_START_COPY_ONGOING;
- } else if (takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_END_COPY) {
- jam();
- takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_END_COPY_ONGOING;
- }//if
- }//for
-}//Dbdih::checkToCopy()
-
-void Dbdih::checkToCopyCompleted(Signal* signal)
-{
- /* ------------------------------------------------------------------------*/
- /* WE CHECK HERE IF THE WRITING OF TAKE OVER INFORMATION ALSO HAS BEEN */
- /* COMPLETED. */
- /* ------------------------------------------------------------------------*/
- TakeOverRecordPtr toPtr;
- for (toPtr.i = 0; toPtr.i < MAX_NDB_NODES; toPtr.i++) {
- ptrAss(toPtr, takeOverRecord);
- if (toPtr.p->toMasterStatus == TakeOverRecord::TO_START_COPY_ONGOING){
- jam();
- sendStartTo(signal, toPtr.i);
- } else if (toPtr.p->toMasterStatus == TakeOverRecord::TO_END_COPY_ONGOING){
- jam();
- sendEndTo(signal, toPtr.i);
- } else {
- jam();
- }//if
- }//for
-}//Dbdih::checkToCopyCompleted()
-
-bool Dbdih::checkToInterrupted(TakeOverRecordPtr& takeOverPtr)
-{
- if (checkNodeAlive(takeOverPtr.p->toStartingNode)) {
- jam();
- return false;
- } else {
- jam();
- endTakeOver(takeOverPtr.i);
- return true;
- }//if
-}//Dbdih::checkToInterrupted()
-
-void Dbdih::sendStartTo(Signal* signal, Uint32 takeOverPtrI)
-{
- TakeOverRecordPtr takeOverPtr;
- CRASH_INSERTION(7155);
- RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
- if ((c_startToLock != RNIL) || (ERROR_INSERTED(7158))) {
- jam();
- takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_START;
- signal->theData[0] = DihContinueB::ZSEND_START_TO;
- signal->theData[1] = takeOverPtrI;
- signal->theData[2] = takeOverPtr.p->toStartingNode;
- signal->theData[3] = takeOverPtr.p->toFailedNode;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 30, 4);
- return;
- }//if
- c_startToLock = takeOverPtrI;
- StartToReq * const req = (StartToReq *)&signal->theData[0];
- req->userPtr = takeOverPtr.i;
- req->userRef = reference();
- req->startingNodeId = takeOverPtr.p->toStartingNode;
- req->nodeTakenOver = takeOverPtr.p->toFailedNode;
- req->nodeRestart = takeOverPtr.p->toNodeRestart;
- takeOverPtr.p->toMasterStatus = TakeOverRecord::STARTING;
- sendLoopMacro(START_TOREQ, sendSTART_TOREQ);
-}//Dbdih::sendStartTo()
-
-void Dbdih::execSTART_TOREQ(Signal* signal)
-{
- TakeOverRecordPtr takeOverPtr;
- jamEntry();
- const StartToReq * const req = (StartToReq *)&signal->theData[0];
- takeOverPtr.i = req->userPtr;
- BlockReference ref = req->userRef;
- Uint32 startingNode = req->startingNodeId;
-
- CRASH_INSERTION(7133);
- RETURN_IF_NODE_NOT_ALIVE(req->startingNodeId);
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
- allocateTakeOver(takeOverPtr);
- initStartTakeOver(req, takeOverPtr);
-
- StartToConf * const conf = (StartToConf *)&signal->theData[0];
- conf->userPtr = takeOverPtr.i;
- conf->sendingNodeId = cownNodeId;
- conf->startingNodeId = startingNode;
- sendSignal(ref, GSN_START_TOCONF, signal, StartToConf::SignalLength, JBB);
-}//Dbdih::execSTART_TOREQ()
-
-void Dbdih::execSTART_TOCONF(Signal* signal)
-{
- TakeOverRecordPtr takeOverPtr;
- jamEntry();
- const StartToConf * const conf = (StartToConf *)&signal->theData[0];
-
- CRASH_INSERTION(7147);
-
- RETURN_IF_NODE_NOT_ALIVE(conf->startingNodeId);
-
- takeOverPtr.i = conf->userPtr;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
- ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::STARTING);
- ndbrequire(takeOverPtr.p->toStartingNode == conf->startingNodeId);
- receiveLoopMacro(START_TOREQ, conf->sendingNodeId);
- CRASH_INSERTION(7134);
- c_startToLock = RNIL;
-
- startNextCopyFragment(signal, takeOverPtr.i);
-}//Dbdih::execSTART_TOCONF()
-
-void Dbdih::initStartTakeOver(const StartToReq * req,
- TakeOverRecordPtr takeOverPtr)
-{
- takeOverPtr.p->toCurrentTabref = 0;
- takeOverPtr.p->toCurrentFragid = 0;
- takeOverPtr.p->toStartingNode = req->startingNodeId;
- takeOverPtr.p->toFailedNode = req->nodeTakenOver;
- takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_STARTED;
- takeOverPtr.p->toCopyNode = RNIL;
- takeOverPtr.p->toCurrentReplica = RNIL;
- takeOverPtr.p->toNodeRestart = req->nodeRestart;
-}//Dbdih::initStartTakeOver()
-
-void Dbdih::startNextCopyFragment(Signal* signal, Uint32 takeOverPtrI)
-{
- TabRecordPtr tabPtr;
- TakeOverRecordPtr takeOverPtr;
- Uint32 loopCount;
- RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
- takeOverPtr.p->toMasterStatus = TakeOverRecord::SELECTING_NEXT;
- loopCount = 0;
- if (ERROR_INSERTED(7159)) {
- loopCount = 100;
- }//if
- while (loopCount++ < 100) {
- tabPtr.i = takeOverPtr.p->toCurrentTabref;
- if (tabPtr.i >= ctabFileSize) {
- jam();
- CRASH_INSERTION(7136);
- sendUpdateTo(signal, takeOverPtr.i, UpdateToReq::TO_COPY_COMPLETED);
- return;
- }//if
- ptrAss(tabPtr, tabRecord);
- if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE){
- jam();
- takeOverPtr.p->toCurrentFragid = 0;
- takeOverPtr.p->toCurrentTabref++;
- continue;
- }//if
- Uint32 fragId = takeOverPtr.p->toCurrentFragid;
- if (fragId >= tabPtr.p->totalfragments) {
- jam();
- takeOverPtr.p->toCurrentFragid = 0;
- takeOverPtr.p->toCurrentTabref++;
- if (ERROR_INSERTED(7135)) {
- if (takeOverPtr.p->toCurrentTabref == 1) {
- ndbrequire(false);
- }//if
- }//if
- continue;
- }//if
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, fragId, fragPtr);
- ReplicaRecordPtr loopReplicaPtr;
- loopReplicaPtr.i = fragPtr.p->oldStoredReplicas;
- while (loopReplicaPtr.i != RNIL) {
- ptrCheckGuard(loopReplicaPtr, creplicaFileSize, replicaRecord);
- if (loopReplicaPtr.p->procNode == takeOverPtr.p->toFailedNode) {
- jam();
- /* ----------------------------------------------------------------- */
- /* WE HAVE FOUND A REPLICA THAT BELONGED THE FAILED NODE THAT NEEDS */
- /* TAKE OVER. WE TAKE OVER THIS REPLICA TO THE NEW NODE. */
- /* ----------------------------------------------------------------- */
- takeOverPtr.p->toCurrentReplica = loopReplicaPtr.i;
- toCopyFragLab(signal, takeOverPtr.i);
- return;
- } else if (loopReplicaPtr.p->procNode == takeOverPtr.p->toStartingNode) {
- jam();
- /* ----------------------------------------------------------------- */
- /* WE HAVE OBVIOUSLY STARTED TAKING OVER THIS WITHOUT COMPLETING IT. */
- /* WE */
- /* NEED TO COMPLETE THE TAKE OVER OF THIS REPLICA. */
- /* ----------------------------------------------------------------- */
- takeOverPtr.p->toCurrentReplica = loopReplicaPtr.i;
- toCopyFragLab(signal, takeOverPtr.i);
- return;
- } else {
- jam();
- loopReplicaPtr.i = loopReplicaPtr.p->nextReplica;
- }//if
- }//while
- takeOverPtr.p->toCurrentFragid++;
- }//while
- signal->theData[0] = DihContinueB::ZTO_START_COPY_FRAG;
- signal->theData[1] = takeOverPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
-}//Dbdih::startNextCopyFragment()
-
-void Dbdih::toCopyFragLab(Signal* signal,
- Uint32 takeOverPtrI)
-{
- TakeOverRecordPtr takeOverPtr;
- RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
-
- CreateReplicaRecordPtr createReplicaPtr;
- createReplicaPtr.i = 0;
- ptrAss(createReplicaPtr, createReplicaRecord);
-
- ReplicaRecordPtr replicaPtr;
- replicaPtr.i = takeOverPtr.p->toCurrentReplica;
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
-
- TabRecordPtr tabPtr;
- tabPtr.i = takeOverPtr.p->toCurrentTabref;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- /* ----------------------------------------------------------------------- */
- /* WE HAVE FOUND A REPLICA THAT NEEDS TAKE OVER. WE WILL START THIS TAKE */
- /* OVER BY ADDING THE FRAGMENT WHEREAFTER WE WILL ORDER THE PRIMARY */
- /* REPLICA TO COPY ITS CONTENT TO THE NEW STARTING REPLICA. */
- /* THIS OPERATION IS A SINGLE USER OPERATION UNTIL WE HAVE SENT */
- /* COPY_FRAGREQ. AFTER SENDING COPY_FRAGREQ WE ARE READY TO START A NEW */
- /* FRAGMENT REPLICA. WE WILL NOT IMPLEMENT THIS IN THE FIRST PHASE. */
- /* ----------------------------------------------------------------------- */
- cnoOfCreateReplicas = 1;
- createReplicaPtr.p->hotSpareUse = true;
- createReplicaPtr.p->dataNodeId = takeOverPtr.p->toStartingNode;
-
- prepareSendCreateFragReq(signal, takeOverPtrI);
-}//Dbdih::toCopyFragLab()
-
-void Dbdih::prepareSendCreateFragReq(Signal* signal, Uint32 takeOverPtrI)
-{
- TakeOverRecordPtr takeOverPtr;
- RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
-
- TabRecordPtr tabPtr;
- tabPtr.i = takeOverPtr.p->toCurrentTabref;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- FragmentstorePtr fragPtr;
-
- getFragstore(tabPtr.p, takeOverPtr.p->toCurrentFragid, fragPtr);
- Uint32 nodes[MAX_REPLICAS];
- extractNodeInfo(fragPtr.p, nodes);
- takeOverPtr.p->toCopyNode = nodes[0];
- sendCreateFragReq(signal, 0, CreateFragReq::STORED, takeOverPtr.i);
-}//Dbdih::prepareSendCreateFragReq()
-
-void Dbdih::sendCreateFragReq(Signal* signal,
- Uint32 startGci,
- Uint32 replicaType,
- Uint32 takeOverPtrI)
-{
- TakeOverRecordPtr takeOverPtr;
- RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
- if ((c_createFragmentLock != RNIL) ||
- ((ERROR_INSERTED(7161))&&(replicaType == CreateFragReq::STORED)) ||
- ((ERROR_INSERTED(7162))&&(replicaType == CreateFragReq::COMMIT_STORED))){
- if (replicaType == CreateFragReq::STORED) {
- jam();
- takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_PREPARE_CREATE;
- } else {
- ndbrequire(replicaType == CreateFragReq::COMMIT_STORED);
- jam();
- takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_COMMIT_CREATE;
- }//if
- signal->theData[0] = DihContinueB::ZSEND_CREATE_FRAG;
- signal->theData[1] = takeOverPtr.i;
- signal->theData[2] = replicaType;
- signal->theData[3] = startGci;
- signal->theData[4] = takeOverPtr.p->toStartingNode;
- signal->theData[5] = takeOverPtr.p->toFailedNode;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 50, 6);
- return;
- }//if
- c_createFragmentLock = takeOverPtr.i;
- sendLoopMacro(CREATE_FRAGREQ, nullRoutine);
-
- CreateFragReq * const req = (CreateFragReq *)&signal->theData[0];
- req->userPtr = takeOverPtr.i;
- req->userRef = reference();
- req->tableId = takeOverPtr.p->toCurrentTabref;
- req->fragId = takeOverPtr.p->toCurrentFragid;
- req->startingNodeId = takeOverPtr.p->toStartingNode;
- req->copyNodeId = takeOverPtr.p->toCopyNode;
- req->startGci = startGci;
- req->replicaType = replicaType;
-
- NodeRecordPtr nodePtr;
- nodePtr.i = cfirstAliveNode;
- do {
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- BlockReference ref = calcDihBlockRef(nodePtr.i);
- sendSignal(ref, GSN_CREATE_FRAGREQ, signal,
- CreateFragReq::SignalLength, JBB);
- nodePtr.i = nodePtr.p->nextNode;
- } while (nodePtr.i != RNIL);
-
- if (replicaType == CreateFragReq::STORED) {
- jam();
- takeOverPtr.p->toMasterStatus = TakeOverRecord::PREPARE_CREATE;
- } else {
- ndbrequire(replicaType == CreateFragReq::COMMIT_STORED);
- jam();
- takeOverPtr.p->toMasterStatus = TakeOverRecord::COMMIT_CREATE;
- }
-}//Dbdih::sendCreateFragReq()
-
-/* --------------------------------------------------------------------------*/
-/* AN ORDER TO START OR COMMIT THE REPLICA CREATION ARRIVED FROM THE */
-/* MASTER. */
-/* --------------------------------------------------------------------------*/
-void Dbdih::execCREATE_FRAGREQ(Signal* signal)
-{
- jamEntry();
- CreateFragReq * const req = (CreateFragReq *)&signal->theData[0];
-
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = req->userPtr;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
-
- BlockReference retRef = req->userRef;
-
- TabRecordPtr tabPtr;
- tabPtr.i = req->tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- Uint32 fragId = req->fragId;
- Uint32 tdestNodeid = req->startingNodeId;
- Uint32 tsourceNodeid = req->copyNodeId;
- Uint32 startGci = req->startGci;
- Uint32 replicaType = req->replicaType;
-
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, fragId, fragPtr);
- RETURN_IF_NODE_NOT_ALIVE(tdestNodeid);
- ReplicaRecordPtr frReplicaPtr;
- findToReplica(takeOverPtr.p, replicaType, fragPtr, frReplicaPtr);
- ndbrequire(frReplicaPtr.i != RNIL);
-
- switch (replicaType) {
- case CreateFragReq::STORED:
- jam();
- CRASH_INSERTION(7138);
- /* ----------------------------------------------------------------------*/
- /* HERE WE ARE INSERTING THE NEW BACKUP NODE IN THE EXECUTION OF ALL */
- /* OPERATIONS. FROM HERE ON ALL OPERATIONS ON THIS FRAGMENT WILL INCLUDE*/
- /* USE OF THE NEW REPLICA. */
- /* --------------------------------------------------------------------- */
- insertBackup(fragPtr, tdestNodeid);
- takeOverPtr.p->toCopyNode = tsourceNodeid;
- takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_CREATE_PREPARE;
-
- fragPtr.p->distributionKey++;
- fragPtr.p->distributionKey &= 255;
- break;
- case CreateFragReq::COMMIT_STORED:
- jam();
- CRASH_INSERTION(7139);
- /* ----------------------------------------------------------------------*/
- /* HERE WE ARE MOVING THE REPLICA TO THE STORED SECTION SINCE IT IS NOW */
- /* FULLY LOADED WITH ALL DATA NEEDED. */
- // We also update the order of the replicas here so that if the new
- // replica is the desired primary we insert it as primary.
- /* ----------------------------------------------------------------------*/
- takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_CREATE_COMMIT;
- removeOldStoredReplica(fragPtr, frReplicaPtr);
- linkStoredReplica(fragPtr, frReplicaPtr);
- updateNodeInfo(fragPtr);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
-
- /* ------------------------------------------------------------------------*/
- /* THE NEW NODE OF THIS REPLICA IS THE STARTING NODE. */
- /* ------------------------------------------------------------------------*/
- if (frReplicaPtr.p->procNode != takeOverPtr.p->toStartingNode) {
- jam();
- /* ---------------------------------------------------------------------*/
- /* IF WE ARE STARTING A TAKE OVER NODE WE MUST INVALIDATE ALL LCP'S. */
- /* OTHERWISE WE WILL TRY TO START LCP'S THAT DO NOT EXIST. */
- /* ---------------------------------------------------------------------*/
- frReplicaPtr.p->procNode = takeOverPtr.p->toStartingNode;
- frReplicaPtr.p->noCrashedReplicas = 0;
- frReplicaPtr.p->createGci[0] = startGci;
- ndbrequire(startGci != 0xF1F1F1F1);
- frReplicaPtr.p->replicaLastGci[0] = (Uint32)-1;
- for (Uint32 i = 0; i < MAX_LCP_STORED; i++) {
- frReplicaPtr.p->lcpStatus[i] = ZINVALID;
- }//for
- } else {
- jam();
- const Uint32 noCrashed = frReplicaPtr.p->noCrashedReplicas;
- arrGuard(noCrashed, 8);
- frReplicaPtr.p->createGci[noCrashed] = startGci;
- ndbrequire(startGci != 0xF1F1F1F1);
- frReplicaPtr.p->replicaLastGci[noCrashed] = (Uint32)-1;
- }//if
- takeOverPtr.p->toCurrentTabref = tabPtr.i;
- takeOverPtr.p->toCurrentFragid = fragId;
- CreateFragConf * const conf = (CreateFragConf *)&signal->theData[0];
- conf->userPtr = takeOverPtr.i;
- conf->tableId = tabPtr.i;
- conf->fragId = fragId;
- conf->sendingNodeId = cownNodeId;
- conf->startingNodeId = tdestNodeid;
- sendSignal(retRef, GSN_CREATE_FRAGCONF, signal,
- CreateFragConf::SignalLength, JBB);
-}//Dbdih::execCREATE_FRAGREQ()
-
-void Dbdih::execCREATE_FRAGCONF(Signal* signal)
-{
- jamEntry();
- CRASH_INSERTION(7148);
- const CreateFragConf * const conf = (CreateFragConf *)&signal->theData[0];
- Uint32 fragId = conf->fragId;
-
- RETURN_IF_NODE_NOT_ALIVE(conf->startingNodeId);
-
- TabRecordPtr tabPtr;
- tabPtr.i = conf->tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = conf->userPtr;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
-
- ndbrequire(tabPtr.i == takeOverPtr.p->toCurrentTabref);
- ndbrequire(fragId == takeOverPtr.p->toCurrentFragid);
- receiveLoopMacro(CREATE_FRAGREQ, conf->sendingNodeId);
- c_createFragmentLock = RNIL;
-
- if (takeOverPtr.p->toMasterStatus == TakeOverRecord::PREPARE_CREATE) {
- jam();
- CRASH_INSERTION(7140);
- /* --------------------------------------------------------------------- */
- /* ALL NODES HAVE PREPARED THE INTRODUCTION OF THIS NEW NODE AND IT IS */
- /* ALREADY IN USE. WE CAN NOW START COPYING THE FRAGMENT. */
- /*---------------------------------------------------------------------- */
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, fragId, fragPtr);
- takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_FRAG;
- BlockReference ref = calcLqhBlockRef(takeOverPtr.p->toCopyNode);
- CopyFragReq * const copyFragReq = (CopyFragReq *)&signal->theData[0];
- copyFragReq->userPtr = takeOverPtr.i;
- copyFragReq->userRef = reference();
- copyFragReq->tableId = tabPtr.i;
- copyFragReq->fragId = fragId;
- copyFragReq->nodeId = takeOverPtr.p->toStartingNode;
- copyFragReq->schemaVersion = tabPtr.p->schemaVersion;
- copyFragReq->distributionKey = fragPtr.p->distributionKey;
- sendSignal(ref, GSN_COPY_FRAGREQ, signal, CopyFragReq::SignalLength, JBB);
- } else {
- ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COMMIT_CREATE);
- jam();
- CRASH_INSERTION(7141);
- /* --------------------------------------------------------------------- */
- // REPORT that copy of fragment has been completed.
- /* --------------------------------------------------------------------- */
- signal->theData[0] = NDB_LE_NR_CopyFragDone;
- signal->theData[1] = takeOverPtr.p->toStartingNode;
- signal->theData[2] = tabPtr.i;
- signal->theData[3] = takeOverPtr.p->toCurrentFragid;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
- /* --------------------------------------------------------------------- */
- /* WE HAVE NOW CREATED THIS NEW REPLICA AND WE ARE READY TO TAKE THE */
- /* THE NEXT REPLICA. */
- /* --------------------------------------------------------------------- */
-
- Mutex mutex(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle);
- mutex.unlock(); // ignore result
-
- takeOverPtr.p->toCurrentFragid++;
- startNextCopyFragment(signal, takeOverPtr.i);
- }//if
-}//Dbdih::execCREATE_FRAGCONF()
-
-void Dbdih::execCOPY_FRAGREF(Signal* signal)
-{
- const CopyFragRef * const ref = (CopyFragRef *)&signal->theData[0];
- jamEntry();
- Uint32 takeOverPtrI = ref->userPtr;
- Uint32 startingNodeId = ref->startingNodeId;
- Uint32 errorCode = ref->errorCode;
-
- TakeOverRecordPtr takeOverPtr;
- RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
- ndbrequire(errorCode != ZNODE_FAILURE_ERROR);
- ndbrequire(ref->tableId == takeOverPtr.p->toCurrentTabref);
- ndbrequire(ref->fragId == takeOverPtr.p->toCurrentFragid);
- ndbrequire(ref->startingNodeId == takeOverPtr.p->toStartingNode);
- ndbrequire(ref->sendingNodeId == takeOverPtr.p->toCopyNode);
- ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG);
- endTakeOver(takeOverPtrI);
- //--------------------------------------------------------------------------
- // For some reason we did not succeed in copying a fragment. We treat this
- // as a serious failure and crash the starting node.
- //--------------------------------------------------------------------------
- BlockReference cntrRef = calcNdbCntrBlockRef(startingNodeId);
- SystemError * const sysErr = (SystemError*)&signal->theData[0];
- sysErr->errorCode = SystemError::CopyFragRefError;
- sysErr->errorRef = reference();
- sysErr->data1 = errorCode;
- sysErr->data2 = 0;
- sendSignal(cntrRef, GSN_SYSTEM_ERROR, signal,
- SystemError::SignalLength, JBB);
- return;
-}//Dbdih::execCOPY_FRAGREF()
-
-void Dbdih::execCOPY_FRAGCONF(Signal* signal)
-{
- const CopyFragConf * const conf = (CopyFragConf *)&signal->theData[0];
- jamEntry();
- CRASH_INSERTION(7142);
-
- TakeOverRecordPtr takeOverPtr;
- Uint32 takeOverPtrI = conf->userPtr;
- RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
-
- ndbrequire(conf->tableId == takeOverPtr.p->toCurrentTabref);
- ndbrequire(conf->fragId == takeOverPtr.p->toCurrentFragid);
- ndbrequire(conf->startingNodeId == takeOverPtr.p->toStartingNode);
- ndbrequire(conf->sendingNodeId == takeOverPtr.p->toCopyNode);
- ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG);
- sendUpdateTo(signal, takeOverPtr.i,
- (Uint32)UpdateToReq::TO_COPY_FRAG_COMPLETED);
-}//Dbdih::execCOPY_FRAGCONF()
-
-void Dbdih::sendUpdateTo(Signal* signal,
- Uint32 takeOverPtrI, Uint32 updateState)
-{
- TakeOverRecordPtr takeOverPtr;
- RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
- if ((c_updateToLock != RNIL) ||
- ((ERROR_INSERTED(7163)) &&
- (updateState == UpdateToReq::TO_COPY_FRAG_COMPLETED)) ||
- ((ERROR_INSERTED(7169)) &&
- (updateState == UpdateToReq::TO_COPY_COMPLETED))) {
- jam();
- takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_UPDATE_TO;
- signal->theData[0] = DihContinueB::ZSEND_UPDATE_TO;
- signal->theData[1] = takeOverPtrI;
- signal->theData[2] = takeOverPtr.p->toStartingNode;
- signal->theData[3] = takeOverPtr.p->toFailedNode;
- signal->theData[4] = updateState;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 30, 5);
- return;
- }//if
- c_updateToLock = takeOverPtrI;
- if (updateState == UpdateToReq::TO_COPY_FRAG_COMPLETED) {
- jam();
- takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_UPDATE_TO;
- } else {
- jam();
- ndbrequire(updateState == UpdateToReq::TO_COPY_COMPLETED);
- takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_COPY_COMPLETED;
- }//if
-
- UpdateToReq * const req = (UpdateToReq *)&signal->theData[0];
- req->userPtr = takeOverPtr.i;
- req->userRef = reference();
- req->updateState = (UpdateToReq::UpdateState)updateState;
- req->startingNodeId = takeOverPtr.p->toStartingNode;
- req->tableId = takeOverPtr.p->toCurrentTabref;
- req->fragmentNo = takeOverPtr.p->toCurrentFragid;
- sendLoopMacro(UPDATE_TOREQ, sendUPDATE_TOREQ);
-}//Dbdih::sendUpdateTo()
-
-void Dbdih::execUPDATE_TOREQ(Signal* signal)
-{
- jamEntry();
- const UpdateToReq * const req = (UpdateToReq *)&signal->theData[0];
- BlockReference ref = req->userRef;
- ndbrequire(cmasterdihref == ref);
-
- CRASH_INSERTION(7154);
- RETURN_IF_NODE_NOT_ALIVE(req->startingNodeId);
-
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = req->userPtr;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
-
- ndbrequire(req->startingNodeId == takeOverPtr.p->toStartingNode);
- if (req->updateState == UpdateToReq::TO_COPY_FRAG_COMPLETED) {
- jam();
- ndbrequire(takeOverPtr.p->toSlaveStatus == TakeOverRecord::TO_SLAVE_CREATE_PREPARE);
- takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_COPY_FRAG_COMPLETED;
- takeOverPtr.p->toCurrentTabref = req->tableId;
- takeOverPtr.p->toCurrentFragid = req->fragmentNo;
- } else {
- jam();
- ndbrequire(req->updateState == UpdateToReq::TO_COPY_COMPLETED);
- takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_COPY_COMPLETED;
- setNodeCopyCompleted(takeOverPtr.p->toStartingNode, true);
- }//if
-
-
- UpdateToConf * const conf = (UpdateToConf *)&signal->theData[0];
- conf->userPtr = takeOverPtr.i;
- conf->sendingNodeId = cownNodeId;
- conf->startingNodeId = takeOverPtr.p->toStartingNode;
- sendSignal(ref, GSN_UPDATE_TOCONF, signal, UpdateToConf::SignalLength, JBB);
-}//Dbdih::execUPDATE_TOREQ()
-
-void Dbdih::execUPDATE_TOCONF(Signal* signal)
-{
- const UpdateToConf * const conf = (UpdateToConf *)&signal->theData[0];
- CRASH_INSERTION(7152);
-
- RETURN_IF_NODE_NOT_ALIVE(conf->startingNodeId);
-
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = conf->userPtr;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
-
- receiveLoopMacro(UPDATE_TOREQ, conf->sendingNodeId);
- CRASH_INSERTION(7153);
- c_updateToLock = RNIL;
-
- if (takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_COPY_COMPLETED) {
- jam();
- toCopyCompletedLab(signal, takeOverPtr);
- return;
- } else {
- ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_UPDATE_TO);
- }//if
- TabRecordPtr tabPtr;
- tabPtr.i = takeOverPtr.p->toCurrentTabref;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, takeOverPtr.p->toCurrentFragid, fragPtr);
- takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_ACTIVE;
- BlockReference lqhRef = calcLqhBlockRef(takeOverPtr.p->toStartingNode);
- CopyActiveReq * const req = (CopyActiveReq *)&signal->theData[0];
- req->userPtr = takeOverPtr.i;
- req->userRef = reference();
- req->tableId = takeOverPtr.p->toCurrentTabref;
- req->fragId = takeOverPtr.p->toCurrentFragid;
- req->distributionKey = fragPtr.p->distributionKey;
-
- sendSignal(lqhRef, GSN_COPY_ACTIVEREQ, signal,
- CopyActiveReq::SignalLength, JBB);
-}//Dbdih::execUPDATE_TOCONF()
-
-void Dbdih::execCOPY_ACTIVECONF(Signal* signal)
-{
- const CopyActiveConf * const conf = (CopyActiveConf *)&signal->theData[0];
- jamEntry();
- CRASH_INSERTION(7143);
-
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = conf->userPtr;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
-
- ndbrequire(conf->tableId == takeOverPtr.p->toCurrentTabref);
- ndbrequire(conf->fragId == takeOverPtr.p->toCurrentFragid);
- ndbrequire(checkNodeAlive(conf->startingNodeId));
- ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_ACTIVE);
-
- takeOverPtr.p->startGci = conf->startGci;
- takeOverPtr.p->toMasterStatus = TakeOverRecord::LOCK_MUTEX;
-
- Mutex mutex(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle);
- Callback c = { safe_cast(&Dbdih::switchPrimaryMutex_locked), takeOverPtr.i };
- ndbrequire(mutex.lock(c));
-}//Dbdih::execCOPY_ACTIVECONF()
-
-void
-Dbdih::switchPrimaryMutex_locked(Signal* signal, Uint32 toPtrI, Uint32 retVal){
- jamEntry();
- ndbrequire(retVal == 0);
-
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = toPtrI;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
-
- ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::LOCK_MUTEX);
-
- if (!checkNodeAlive((takeOverPtr.p->toStartingNode))) {
- // We have mutex
- Mutex mutex(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle);
- mutex.unlock(); // Ignore result
-
- c_createFragmentLock = RNIL;
- c_CREATE_FRAGREQ_Counter.clearWaitingFor();
- endTakeOver(takeOverPtr.i);
- return;
- }
-
- takeOverPtr.p->toMasterStatus = TakeOverRecord::COMMIT_CREATE;
- sendCreateFragReq(signal, takeOverPtr.p->startGci,
- CreateFragReq::COMMIT_STORED, takeOverPtr.i);
-}
-
-void Dbdih::toCopyCompletedLab(Signal * signal, TakeOverRecordPtr takeOverPtr)
-{
- signal->theData[0] = NDB_LE_NR_CopyFragsCompleted;
- signal->theData[1] = takeOverPtr.p->toStartingNode;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
-
- c_lcpState.immediateLcpStart = true;
- takeOverPtr.p->toMasterStatus = TakeOverRecord::WAIT_LCP;
-
- /*-----------------------------------------------------------------------*/
- /* NOW WE CAN ALLOW THE NEW NODE TO PARTICIPATE IN LOCAL CHECKPOINTS. */
- /* WHEN THE FIRST LOCAL CHECKPOINT IS READY WE DECLARE THE TAKE OVER AS */
- /* COMPLETED. SINCE LOCAL CHECKPOINTS HAVE BEEN BLOCKED DURING THE COPY */
- /* PROCESS WE MUST ALSO START A NEW LOCAL CHECKPOINT PROCESS BY ENSURING */
- /* THAT IT LOOKS LIKE IT IS TIME FOR A NEW LOCAL CHECKPOINT AND BY */
- /* UNBLOCKING THE LOCAL CHECKPOINT AGAIN. */
- /* --------------------------------------------------------------------- */
-}//Dbdih::toCopyCompletedLab()
-
-void Dbdih::sendEndTo(Signal* signal, Uint32 takeOverPtrI)
-{
- TakeOverRecordPtr takeOverPtr;
- CRASH_INSERTION(7156);
- RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
- if ((c_endToLock != RNIL) || (ERROR_INSERTED(7164))) {
- jam();
- takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_ENDING;
- signal->theData[0] = DihContinueB::ZSEND_END_TO;
- signal->theData[1] = takeOverPtrI;
- signal->theData[2] = takeOverPtr.p->toStartingNode;
- signal->theData[3] = takeOverPtr.p->toFailedNode;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 30, 4);
- return;
- }//if
- c_endToLock = takeOverPtr.i;
- takeOverPtr.p->toMasterStatus = TakeOverRecord::ENDING;
- EndToReq * const req = (EndToReq *)&signal->theData[0];
- req->userPtr = takeOverPtr.i;
- req->userRef = reference();
- req->startingNodeId = takeOverPtr.p->toStartingNode;
- sendLoopMacro(END_TOREQ, sendEND_TOREQ);
-}//Dbdih::sendStartTo()
-
-void Dbdih::execEND_TOREQ(Signal* signal)
-{
- jamEntry();
- const EndToReq * const req = (EndToReq *)&signal->theData[0];
- BlockReference ref = req->userRef;
- Uint32 startingNodeId = req->startingNodeId;
-
- CRASH_INSERTION(7144);
- RETURN_IF_NODE_NOT_ALIVE(startingNodeId);
-
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = req->userPtr;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
-
- ndbrequire(startingNodeId == takeOverPtr.p->toStartingNode);
- takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_IDLE;
-
- if (!isMaster()) {
- jam();
- endTakeOver(takeOverPtr.i);
- }//if
-
- EndToConf * const conf = (EndToConf *)&signal->theData[0];
- conf->userPtr = takeOverPtr.i;
- conf->sendingNodeId = cownNodeId;
- conf->startingNodeId = startingNodeId;
- sendSignal(ref, GSN_END_TOCONF, signal, EndToConf::SignalLength, JBB);
-}//Dbdih::execEND_TOREQ()
-
-void Dbdih::execEND_TOCONF(Signal* signal)
-{
- const EndToConf * const conf = (EndToConf *)&signal->theData[0];
- jamEntry();
-
- const Uint32 nodeId = conf->startingNodeId;
- CRASH_INSERTION(7145);
-
- RETURN_IF_NODE_NOT_ALIVE(nodeId);
-
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = conf->userPtr;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
-
- ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::ENDING);
- ndbrequire(nodeId == takeOverPtr.p->toStartingNode);
-
- receiveLoopMacro(END_TOREQ, conf->sendingNodeId);
- CRASH_INSERTION(7146);
- c_endToLock = RNIL;
-
- /* -----------------------------------------------------------------------*/
- /* WE HAVE FINALLY COMPLETED THE TAKE OVER. WE RESET THE STATUS AND CHECK*/
- /* IF ANY MORE TAKE OVERS ARE NEEDED AT THE MOMENT. */
- /* FIRST WE CHECK IF A RESTART IS ONGOING. IN THAT CASE WE RESTART PHASE */
- /* 4 AND CHECK IF ANY MORE TAKE OVERS ARE NEEDED BEFORE WE START NDB */
- /* CLUSTER. THIS CAN ONLY HAPPEN IN A SYSTEM RESTART. */
- /* ---------------------------------------------------------------------- */
- if (takeOverPtr.p->toNodeRestart) {
- jam();
- /* ----------------------------------------------------------------------*/
- /* THE TAKE OVER NODE WAS A STARTING NODE. WE WILL SEND START_COPYCONF */
- /* TO THE STARTING NODE SUCH THAT THE NODE CAN COMPLETE THE START-UP. */
- /* --------------------------------------------------------------------- */
- BlockReference ref = calcDihBlockRef(takeOverPtr.p->toStartingNode);
- signal->theData[0] = takeOverPtr.p->toStartingNode;
- sendSignal(ref, GSN_START_COPYCONF, signal, 1,JBB);
- }//if
- endTakeOver(takeOverPtr.i);
-
- ndbout_c("2 - endTakeOver");
- if (cstartPhase == ZNDB_SPH4) {
- jam();
- ndbrequire(false);
- if (anyActiveTakeOver()) {
- jam();
- ndbout_c("4 - anyActiveTakeOver == true");
- return;
- }//if
- ndbout_c("5 - anyActiveTakeOver == false -> ndbsttorry10Lab");
- ndbsttorry10Lab(signal, __LINE__);
- return;
- }//if
- checkStartTakeOver(signal);
-}//Dbdih::execEND_TOCONF()
-
-void Dbdih::allocateTakeOver(TakeOverRecordPtr& takeOverPtr)
-{
- if (isMaster()) {
- jam();
- //--------------------------------------------
- // Master already seized the take over record.
- //--------------------------------------------
- return;
- }//if
- if (takeOverPtr.i == cfirstfreeTakeOver) {
- jam();
- seizeTakeOver(takeOverPtr);
- } else {
- TakeOverRecordPtr nextTakeOverptr;
- TakeOverRecordPtr prevTakeOverptr;
- nextTakeOverptr.i = takeOverPtr.p->nextTakeOver;
- prevTakeOverptr.i = takeOverPtr.p->prevTakeOver;
- if (prevTakeOverptr.i != RNIL) {
- jam();
- ptrCheckGuard(prevTakeOverptr, MAX_NDB_NODES, takeOverRecord);
- prevTakeOverptr.p->nextTakeOver = nextTakeOverptr.i;
- }//if
- if (nextTakeOverptr.i != RNIL) {
- jam();
- ptrCheckGuard(nextTakeOverptr, MAX_NDB_NODES, takeOverRecord);
- nextTakeOverptr.p->prevTakeOver = prevTakeOverptr.i;
- }//if
- }//if
-}//Dbdih::allocateTakeOver()
-
-void Dbdih::seizeTakeOver(TakeOverRecordPtr& takeOverPtr)
-{
- TakeOverRecordPtr nextTakeOverptr;
- ndbrequire(cfirstfreeTakeOver != RNIL);
- takeOverPtr.i = cfirstfreeTakeOver;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
- cfirstfreeTakeOver = takeOverPtr.p->nextTakeOver;
- nextTakeOverptr.i = takeOverPtr.p->nextTakeOver;
- if (nextTakeOverptr.i != RNIL) {
- jam();
- ptrCheckGuard(nextTakeOverptr, MAX_NDB_NODES, takeOverRecord);
- nextTakeOverptr.p->prevTakeOver = RNIL;
- }//if
- takeOverPtr.p->nextTakeOver = RNIL;
- takeOverPtr.p->prevTakeOver = RNIL;
-}//Dbdih::seizeTakeOver()
-
-void Dbdih::endTakeOver(Uint32 takeOverPtrI)
-{
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = takeOverPtrI;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
-
- releaseTakeOver(takeOverPtrI);
- if ((takeOverPtr.p->toMasterStatus != TakeOverRecord::IDLE) &&
- (takeOverPtr.p->toMasterStatus != TakeOverRecord::TO_WAIT_START_TAKE_OVER)) {
- jam();
- NodeGroupRecordPtr NGPtr;
- NodeRecordPtr nodePtr;
- nodePtr.i = takeOverPtr.p->toStartingNode;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- NGPtr.i = nodePtr.p->nodeGroup;
- ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
- NGPtr.p->activeTakeOver = false;
- }//if
- setAllowNodeStart(takeOverPtr.p->toStartingNode, true);
- initTakeOver(takeOverPtr);
-}//Dbdih::endTakeOver()
-
-void Dbdih::releaseTakeOver(Uint32 takeOverPtrI)
-{
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = takeOverPtrI;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
-
- takeOverPtr.p->nextTakeOver = cfirstfreeTakeOver;
- cfirstfreeTakeOver = takeOverPtr.i;
-}//Dbdih::releaseTakeOver()
-
-void Dbdih::initTakeOver(TakeOverRecordPtr takeOverPtr)
-{
- takeOverPtr.p->toCopyNode = RNIL;
- takeOverPtr.p->toCurrentFragid = RNIL;
- takeOverPtr.p->toCurrentReplica = RNIL;
- takeOverPtr.p->toCurrentTabref = RNIL;
- takeOverPtr.p->toFailedNode = RNIL;
- takeOverPtr.p->toStartingNode = RNIL;
- takeOverPtr.p->prevTakeOver = RNIL;
- takeOverPtr.p->nextTakeOver = RNIL;
- takeOverPtr.p->toNodeRestart = false;
- takeOverPtr.p->toMasterStatus = TakeOverRecord::IDLE;
- takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_IDLE;
-}//Dbdih::initTakeOver()
-
-bool Dbdih::anyActiveTakeOver()
-{
- TakeOverRecordPtr takeOverPtr;
- for (takeOverPtr.i = 0; takeOverPtr.i < MAX_NDB_NODES; takeOverPtr.i++) {
- ptrAss(takeOverPtr, takeOverRecord);
- if (takeOverPtr.p->toMasterStatus != TakeOverRecord::IDLE) {
- jam();
- return true;
- }//if
- }//for
- return false;
-}//Dbdih::anyActiveTakeOver()
-
-/*****************************************************************************/
-/* ------------------------------------------------------------------------- */
-/* WE HAVE BEEN REQUESTED TO PERFORM A SYSTEM RESTART. WE START BY */
-/* READING THE GCI FILES. THIS REQUEST WILL ONLY BE SENT TO THE MASTER */
-/* DIH. THAT MEANS WE HAVE TO REPLICATE THE INFORMATION WE READ FROM */
-/* OUR FILES TO ENSURE THAT ALL NODES HAVE THE SAME DISTRIBUTION */
-/* INFORMATION. */
-/* ------------------------------------------------------------------------- */
-/*****************************************************************************/
-void Dbdih::readGciFileLab(Signal* signal)
-{
- FileRecordPtr filePtr;
- filePtr.i = crestartInfoFile[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- filePtr.p->reqStatus = FileRecord::OPENING_GCP;
-
- openFileRo(signal, filePtr);
-}//Dbdih::readGciFileLab()
-
-void Dbdih::openingGcpLab(Signal* signal, FileRecordPtr filePtr)
-{
- /* ----------------------------------------------------------------------- */
- /* WE HAVE SUCCESSFULLY OPENED A FILE CONTAINING INFORMATION ABOUT */
- /* THE GLOBAL CHECKPOINTS THAT ARE POSSIBLE TO RESTART. */
- /* ----------------------------------------------------------------------- */
- readRestorableGci(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::READING_GCP;
-}//Dbdih::openingGcpLab()
-
-void Dbdih::readingGcpLab(Signal* signal, FileRecordPtr filePtr)
-{
- /* ----------------------------------------------------------------------- */
- /* WE HAVE NOW SUCCESSFULLY MANAGED TO READ IN THE GLOBAL CHECKPOINT */
- /* INFORMATION FROM FILE. LATER WE WILL ADD SOME FUNCTIONALITY THAT */
- /* CHECKS THE RESTART TIMERS TO DEDUCE FROM WHERE TO RESTART. */
- /* NOW WE WILL SIMPLY RESTART FROM THE NEWEST GLOBAL CHECKPOINT */
- /* POSSIBLE TO RESTORE. */
- /* */
- /* BEFORE WE INVOKE DICT WE NEED TO COPY CRESTART_INFO TO ALL NODES. */
- /* WE ALSO COPY TO OUR OWN NODE. TO ENABLE US TO DO THIS PROPERLY WE */
- /* START BY CLOSING THIS FILE. */
- /* ----------------------------------------------------------------------- */
- closeFile(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::CLOSING_GCP;
-}//Dbdih::readingGcpLab()
-
-void Dbdih::closingGcpLab(Signal* signal, FileRecordPtr filePtr)
-{
- if (Sysfile::getInitialStartOngoing(SYSFILE->systemRestartBits) == false){
- jam();
- selectMasterCandidateAndSend(signal);
- return;
- } else {
- jam();
- sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB);
- return;
- }//if
-}//Dbdih::closingGcpLab()
-
-/* ------------------------------------------------------------------------- */
-/* SELECT THE MASTER CANDIDATE TO BE USED IN SYSTEM RESTARTS. */
-/* ------------------------------------------------------------------------- */
-void Dbdih::selectMasterCandidateAndSend(Signal* signal)
-{
- Uint32 gci = 0;
- Uint32 masterCandidateId = 0;
- NodeRecordPtr nodePtr;
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- ptrAss(nodePtr, nodeRecord);
- if (SYSFILE->lastCompletedGCI[nodePtr.i] > gci) {
- jam();
- masterCandidateId = nodePtr.i;
- gci = SYSFILE->lastCompletedGCI[nodePtr.i];
- }//if
- }//for
- ndbrequire(masterCandidateId != 0);
- setNodeGroups();
- signal->theData[0] = masterCandidateId;
- signal->theData[1] = gci;
- sendSignal(cntrlblockref, GSN_DIH_RESTARTCONF, signal, 2, JBB);
-
- Uint32 node_groups[MAX_NDB_NODES];
- memset(node_groups, 0, sizeof(node_groups));
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- const Uint32 ng = Sysfile::getNodeGroup(nodePtr.i, SYSFILE->nodeGroups);
- if(ng != NO_NODE_GROUP_ID){
- ndbrequire(ng < MAX_NDB_NODES);
- node_groups[ng]++;
- }
- }
-
- for (nodePtr.i = 0; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- Uint32 count = node_groups[nodePtr.i];
- if(count != 0 && count != cnoReplicas){
- char buf[255];
- BaseString::snprintf(buf, sizeof(buf),
- "Illegal configuration change."
- " Initial start needs to be performed "
- " when changing no of replicas (%d != %d)",
- node_groups[nodePtr.i], cnoReplicas);
- progError(__LINE__,
- ERR_INVALID_CONFIG,
- buf);
- }
- }
-}//Dbdih::selectMasterCandidate()
-
-/* ------------------------------------------------------------------------- */
-/* ERROR HANDLING DURING READING RESTORABLE GCI FROM FILE. */
-/* ------------------------------------------------------------------------- */
-void Dbdih::openingGcpErrorLab(Signal* signal, FileRecordPtr filePtr)
-{
- filePtr.p->fileStatus = FileRecord::CRASHED;
- filePtr.p->reqStatus = FileRecord::IDLE;
- if (crestartInfoFile[0] == filePtr.i) {
- jam();
- /* --------------------------------------------------------------------- */
- /* THE FIRST FILE WAS NOT ABLE TO BE OPENED. SET STATUS TO CRASHED AND */
- /* TRY OPEN THE NEXT FILE. */
- /* --------------------------------------------------------------------- */
- filePtr.i = crestartInfoFile[1];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- openFileRo(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::OPENING_GCP;
- } else {
- jam();
- /* --------------------------------------------------------------------- */
- /* WE FAILED IN OPENING THE SECOND FILE. BOTH FILES WERE CORRUPTED. WE */
- /* CANNOT CONTINUE THE RESTART IN THIS CASE. TELL NDBCNTR OF OUR */
- /* FAILURE. */
- /*---------------------------------------------------------------------- */
- sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB);
- return;
- }//if
-}//Dbdih::openingGcpErrorLab()
-
-void Dbdih::readingGcpErrorLab(Signal* signal, FileRecordPtr filePtr)
-{
- filePtr.p->fileStatus = FileRecord::CRASHED;
- /* ----------------------------------------------------------------------- */
- /* WE FAILED IN READING THE FILE AS WELL. WE WILL CLOSE THIS FILE. */
- /* ----------------------------------------------------------------------- */
- closeFile(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::CLOSING_GCP_CRASH;
-}//Dbdih::readingGcpErrorLab()
-
-void Dbdih::closingGcpCrashLab(Signal* signal, FileRecordPtr filePtr)
-{
- if (crestartInfoFile[0] == filePtr.i) {
- jam();
- /* --------------------------------------------------------------------- */
- /* ERROR IN FIRST FILE, TRY THE SECOND FILE. */
- /* --------------------------------------------------------------------- */
- filePtr.i = crestartInfoFile[1];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- openFileRw(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::OPENING_GCP;
- return;
- }//if
- /* ----------------------------------------------------------------------- */
- /* WE DISCOVERED A FAILURE WITH THE SECOND FILE AS WELL. THIS IS A */
- /* SERIOUS PROBLEM. REPORT FAILURE TO NDBCNTR. */
- /* ----------------------------------------------------------------------- */
- sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB);
-}//Dbdih::closingGcpCrashLab()
-
-/*****************************************************************************/
-/* ------------------------------------------------------------------------- */
-/* THIS IS AN INITIAL RESTART. WE WILL CREATE THE TWO FILES DESCRIBING */
-/* THE GLOBAL CHECKPOINTS THAT ARE RESTORABLE. */
-/* ------------------------------------------------------------------------- */
-/*****************************************************************************/
-void Dbdih::initGciFilesLab(Signal* signal)
-{
- FileRecordPtr filePtr;
- filePtr.i = crestartInfoFile[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- createFileRw(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::CREATING_GCP;
-}//Dbdih::initGciFilesLab()
-
-/* ------------------------------------------------------------------------- */
-/* GLOBAL CHECKPOINT FILE HAVE BEEN SUCCESSFULLY CREATED. */
-/* ------------------------------------------------------------------------- */
-void Dbdih::creatingGcpLab(Signal* signal, FileRecordPtr filePtr)
-{
- if (filePtr.i == crestartInfoFile[0]) {
- jam();
- /* --------------------------------------------------------------------- */
- /* IF CREATED FIRST THEN ALSO CREATE THE SECOND FILE. */
- /* --------------------------------------------------------------------- */
- filePtr.i = crestartInfoFile[1];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- createFileRw(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::CREATING_GCP;
- } else {
- jam();
- /* --------------------------------------------------------------------- */
- /* BOTH FILES HAVE BEEN CREATED. NOW WRITE THE INITIAL DATA TO BOTH */
- /* OF THE FILES. */
- /* --------------------------------------------------------------------- */
- filePtr.i = crestartInfoFile[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- writeRestorableGci(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::WRITE_INIT_GCP;
- }//if
-}//Dbdih::creatingGcpLab()
-
-/* ------------------------------------------------------------------------- */
-/* WE HAVE SUCCESSFULLY WRITTEN A GCI FILE. */
-/* ------------------------------------------------------------------------- */
-void Dbdih::writeInitGcpLab(Signal* signal, FileRecordPtr filePtr)
-{
- filePtr.p->reqStatus = FileRecord::IDLE;
- if (filePtr.i == crestartInfoFile[0]) {
- jam();
- /* --------------------------------------------------------------------- */
- /* WE HAVE WRITTEN THE FIRST FILE NOW ALSO WRITE THE SECOND FILE. */
- /* --------------------------------------------------------------------- */
- filePtr.i = crestartInfoFile[1];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- writeRestorableGci(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::WRITE_INIT_GCP;
- } else {
- /* --------------------------------------------------------------------- */
- /* WE HAVE WRITTEN BOTH FILES. LEAVE BOTH FILES OPEN AND CONFIRM OUR */
- /* PART OF THE INITIAL START. */
- /* --------------------------------------------------------------------- */
- if (isMaster()) {
- jam();
- /*---------------------------------------------------------------------*/
- // IN MASTER NODES THE START REQUEST IS RECEIVED FROM NDBCNTR AND WE MUST
- // RESPOND WHEN COMPLETED.
- /*---------------------------------------------------------------------*/
- signal->theData[0] = reference();
- sendSignal(cndbStartReqBlockref, GSN_NDB_STARTCONF, signal, 1, JBB);
- } else {
- jam();
- ndbsttorry10Lab(signal, __LINE__);
- return;
- }//if
- }//if
-}//Dbdih::writeInitGcpLab()
-
-/*****************************************************************************/
-/* ********** NODES DELETION MODULE *************/
-/*****************************************************************************/
-/*---------------------------------------------------------------------------*/
-/* LOGIC FOR NODE FAILURE */
-/*---------------------------------------------------------------------------*/
-void Dbdih::execNODE_FAILREP(Signal* signal)
-{
- Uint32 i;
- Uint32 failedNodes[MAX_NDB_NODES];
- jamEntry();
- NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
-
- cfailurenr = nodeFail->failNo;
- Uint32 newMasterId = nodeFail->masterNodeId;
- const Uint32 noOfFailedNodes = nodeFail->noOfNodes;
-
- /*-------------------------------------------------------------------------*/
- // The first step is to convert from a bit mask to an array of failed nodes.
- /*-------------------------------------------------------------------------*/
- Uint32 index = 0;
- for (i = 1; i < MAX_NDB_NODES; i++) {
- jam();
- if(NodeBitmask::get(nodeFail->theNodes, i)){
- jam();
- failedNodes[index] = i;
- index++;
- }//if
- }//for
- ndbrequire(noOfFailedNodes == index);
- ndbrequire(noOfFailedNodes - 1 < MAX_NDB_NODES);
-
- /*-------------------------------------------------------------------------*/
- // The second step is to update the node status of the failed nodes, remove
- // them from the alive node list and put them into the dead node list. Also
- // update the number of nodes on-line.
- // We also set certain state variables ensuring that the node no longer is
- // used in transactions and also mark that we received this signal.
- /*-------------------------------------------------------------------------*/
- for (i = 0; i < noOfFailedNodes; i++) {
- jam();
- NodeRecordPtr TNodePtr;
- TNodePtr.i = failedNodes[i];
- ptrCheckGuard(TNodePtr, MAX_NDB_NODES, nodeRecord);
- TNodePtr.p->useInTransactions = false;
- TNodePtr.p->m_inclDihLcp = false;
- TNodePtr.p->recNODE_FAILREP = ZTRUE;
- if (TNodePtr.p->nodeStatus == NodeRecord::ALIVE) {
- jam();
- con_lineNodes--;
- TNodePtr.p->nodeStatus = NodeRecord::DIED_NOW;
- removeAlive(TNodePtr);
- insertDeadNode(TNodePtr);
- }//if
- }//for
-
- /*-------------------------------------------------------------------------*/
- // Verify that we can continue to operate the cluster. If we cannot we will
- // not return from checkEscalation.
- /*-------------------------------------------------------------------------*/
- checkEscalation();
-
- /*------------------------------------------------------------------------*/
- // Verify that a starting node has also crashed. Reset the node start record.
- /*-------------------------------------------------------------------------*/
- if (c_nodeStartMaster.startNode != RNIL) {
- ndbrequire(getNodeStatus(c_nodeStartMaster.startNode)!= NodeRecord::ALIVE);
- }//if
-
- /*--------------------------------------------------*/
- /* */
- /* WE CHANGE THE REFERENCE TO MASTER DIH */
- /* BLOCK AND POINTER AT THIS PLACE IN THE CODE*/
- /*--------------------------------------------------*/
- Uint32 oldMasterId = cmasterNodeId;
- BlockReference oldMasterRef = cmasterdihref;
- cmasterdihref = calcDihBlockRef(newMasterId);
- cmasterNodeId = newMasterId;
-
- const bool masterTakeOver = (oldMasterId != newMasterId);
-
- for(i = 0; i < noOfFailedNodes; i++) {
- NodeRecordPtr failedNodePtr;
- failedNodePtr.i = failedNodes[i];
- ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
- Uint32 activeTakeOverPtr = findTakeOver(failedNodes[i]);
- if (oldMasterRef == reference()) {
- /*-------------------------------------------------------*/
- // Functions that need to be called only for master nodes.
- /*-------------------------------------------------------*/
- checkCopyTab(failedNodePtr);
- checkStopPermMaster(signal, failedNodePtr);
- checkWaitGCPMaster(signal, failedNodes[i]);
- checkTakeOverInMasterAllNodeFailure(signal, failedNodePtr);
- checkTakeOverInMasterCopyNodeFailure(signal, failedNodePtr.i);
- checkTakeOverInMasterStartNodeFailure(signal, activeTakeOverPtr);
- checkGcpOutstanding(signal, failedNodePtr.i);
- } else {
- jam();
- /*-----------------------------------------------------------*/
- // Functions that need to be called only for nodes that were
- // not master before these failures.
- /*-----------------------------------------------------------*/
- checkStopPermProxy(signal, failedNodes[i]);
- checkWaitGCPProxy(signal, failedNodes[i]);
- if (isMaster()) {
- /*-----------------------------------------------------------*/
- // We take over as master since old master has failed
- /*-----------------------------------------------------------*/
- handleTakeOverNewMaster(signal, activeTakeOverPtr);
- } else {
- /*-----------------------------------------------------------*/
- // We are not master and will not become master.
- /*-----------------------------------------------------------*/
- checkTakeOverInNonMasterStartNodeFailure(signal, activeTakeOverPtr);
- }//if
- }//if
- /*--------------------------------------------------*/
- // Functions that need to be called for all nodes.
- /*--------------------------------------------------*/
- checkStopMe(signal, failedNodePtr);
- failedNodeLcpHandling(signal, failedNodePtr);
- checkWaitDropTabFailedLqh(signal, failedNodePtr.i, 0); // 0 = start w/ tab 0
- startRemoveFailedNode(signal, failedNodePtr);
-
- /**
- * This is the last function called
- * It modifies failedNodePtr.p->nodeStatus
- */
- failedNodeSynchHandling(signal, failedNodePtr);
- }//for
-
- if(masterTakeOver){
- jam();
- startLcpMasterTakeOver(signal, oldMasterId);
- startGcpMasterTakeOver(signal, oldMasterId);
-
- if(getNodeState().getNodeRestartInProgress()){
- jam();
- progError(__LINE__,
- ERR_SYSTEM_ERROR,
- "Unhandle master failure during node restart");
- }
- }
-
-
- if (isMaster()) {
- jam();
- setNodeRestartInfoBits();
- }//if
-}//Dbdih::execNODE_FAILREP()
-
-void Dbdih::checkCopyTab(NodeRecordPtr failedNodePtr)
-{
- jam();
-
- if(c_nodeStartMaster.startNode != failedNodePtr.i){
- jam();
- return;
- }
-
- switch(c_nodeStartMaster.m_outstandingGsn){
- case GSN_COPY_TABREQ:
- jam();
- ndbrequire(c_COPY_TABREQ_Counter.isWaitingFor(failedNodePtr.i));
- releaseTabPages(failedNodePtr.p->activeTabptr);
- c_COPY_TABREQ_Counter.clearWaitingFor(failedNodePtr.i);
- c_nodeStartMaster.wait = ZFALSE;
- break;
- case GSN_START_INFOREQ:
- case GSN_START_PERMCONF:
- case GSN_DICTSTARTREQ:
- case GSN_START_MECONF:
- jam();
- break;
- default:
- ndbout_c("outstanding gsn: %s(%d)",
- getSignalName(c_nodeStartMaster.m_outstandingGsn),
- c_nodeStartMaster.m_outstandingGsn);
- ndbrequire(false);
- }
-
- nodeResetStart();
-}//Dbdih::checkCopyTab()
-
-void Dbdih::checkStopMe(Signal* signal, NodeRecordPtr failedNodePtr)
-{
- jam();
- if (c_STOP_ME_REQ_Counter.isWaitingFor(failedNodePtr.i)){
- jam();
- ndbrequire(c_stopMe.clientRef != 0);
- StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0];
- stopMeConf->senderRef = calcDihBlockRef(failedNodePtr.i);
- stopMeConf->senderData = c_stopMe.clientData;
- sendSignal(reference(), GSN_STOP_ME_CONF, signal,
- StopMeConf::SignalLength, JBB);
- }//if
-}//Dbdih::checkStopMe()
-
-void Dbdih::checkStopPermMaster(Signal* signal, NodeRecordPtr failedNodePtr)
-{
- DihSwitchReplicaRef* const ref = (DihSwitchReplicaRef*)&signal->theData[0];
- jam();
- if (c_DIH_SWITCH_REPLICA_REQ_Counter.isWaitingFor(failedNodePtr.i)){
- jam();
- ndbrequire(c_stopPermMaster.clientRef != 0);
- ref->senderNode = failedNodePtr.i;
- ref->errorCode = StopPermRef::NF_CausedAbortOfStopProcedure;
- sendSignal(reference(), GSN_DIH_SWITCH_REPLICA_REF, signal,
- DihSwitchReplicaRef::SignalLength, JBB);
- return;
- }//if
-}//Dbdih::checkStopPermMaster()
-
-void Dbdih::checkStopPermProxy(Signal* signal, NodeId failedNodeId)
-{
- jam();
- if(c_stopPermProxy.clientRef != 0 &&
- refToNode(c_stopPermProxy.masterRef) == failedNodeId){
-
- /**
- * The master has failed report to proxy-client
- */
- jam();
- StopPermRef* const ref = (StopPermRef*)&signal->theData[0];
-
- ref->senderData = c_stopPermProxy.clientData;
- ref->errorCode = StopPermRef::NF_CausedAbortOfStopProcedure;
- sendSignal(c_stopPermProxy.clientRef, GSN_STOP_PERM_REF, signal, 2, JBB);
- c_stopPermProxy.clientRef = 0;
- }//if
-}//Dbdih::checkStopPermProxy()
-
-void
-Dbdih::checkTakeOverInMasterAllNodeFailure(Signal* signal,
- NodeRecordPtr failedNodePtr)
-{
- //------------------------------------------------------------------------
- // This code is used to handle the failure of "all" nodes during the
- // take over when "all" nodes are informed about state changes in
- // the take over protocol.
- //--------------------------------------------------------------------------
- if (c_START_TOREQ_Counter.isWaitingFor(failedNodePtr.i)){
- jam();
- StartToConf * const conf = (StartToConf *)&signal->theData[0];
- conf->userPtr = c_startToLock;
- conf->sendingNodeId = failedNodePtr.i;
- conf->startingNodeId = getStartNode(c_startToLock);
- sendSignal(reference(), GSN_START_TOCONF, signal,
- StartToConf::SignalLength, JBB);
- }//if
- if (c_CREATE_FRAGREQ_Counter.isWaitingFor(failedNodePtr.i)){
- jam();
- CreateFragConf * const conf = (CreateFragConf *)&signal->theData[0];
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = c_createFragmentLock;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
- conf->userPtr = takeOverPtr.i;
- conf->tableId = takeOverPtr.p->toCurrentTabref;
- conf->fragId = takeOverPtr.p->toCurrentFragid;
- conf->sendingNodeId = failedNodePtr.i;
- conf->startingNodeId = takeOverPtr.p->toStartingNode;
- sendSignal(reference(), GSN_CREATE_FRAGCONF, signal,
- CreateFragConf::SignalLength, JBB);
- }//if
- if (c_UPDATE_TOREQ_Counter.isWaitingFor(failedNodePtr.i)){
- jam();
- UpdateToConf * const conf = (UpdateToConf *)&signal->theData[0];
- conf->userPtr = c_updateToLock;
- conf->sendingNodeId = failedNodePtr.i;
- conf->startingNodeId = getStartNode(c_updateToLock);
- sendSignal(reference(), GSN_UPDATE_TOCONF, signal,
- UpdateToConf::SignalLength, JBB);
- }//if
-
- if (c_END_TOREQ_Counter.isWaitingFor(failedNodePtr.i)){
- jam();
- EndToConf * const conf = (EndToConf *)&signal->theData[0];
- conf->userPtr = c_endToLock;
- conf->sendingNodeId = failedNodePtr.i;
- conf->startingNodeId = getStartNode(c_endToLock);
- sendSignal(reference(), GSN_END_TOCONF, signal,
- EndToConf::SignalLength, JBB);
- }//if
-}//Dbdih::checkTakeOverInMasterAllNodeFailure()
-
-void Dbdih::checkTakeOverInMasterCopyNodeFailure(Signal* signal,
- Uint32 failedNodeId)
-{
- //---------------------------------------------------------------------------
- // This code is used to handle failure of the copying node during a take over
- //---------------------------------------------------------------------------
- TakeOverRecordPtr takeOverPtr;
- for (Uint32 i = 0; i < MAX_NDB_NODES; i++) {
- jam();
- takeOverPtr.i = i;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
- if ((takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG) &&
- (takeOverPtr.p->toCopyNode == failedNodeId)) {
- jam();
- /**
- * The copying node failed but the system is still operational.
- * We restart the copy process by selecting a new copy node.
- * We do not need to add a fragment however since it is already added.
- * We start again from the prepare create fragment phase.
- */
- prepareSendCreateFragReq(signal, takeOverPtr.i);
- }//if
- }//for
-}//Dbdih::checkTakeOverInMasterCopyNodeFailure()
-
-void Dbdih::checkTakeOverInMasterStartNodeFailure(Signal* signal,
- Uint32 takeOverPtrI)
-{
- jam();
- if (takeOverPtrI == RNIL) {
- jam();
- return;
- }
- //-----------------------------------------------------------------------
- // We are the master and the starting node has failed during a take over.
- // We need to handle this failure in different ways depending on the state.
- //-----------------------------------------------------------------------
-
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = takeOverPtrI;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
-
- bool ok = false;
- switch (takeOverPtr.p->toMasterStatus) {
- case TakeOverRecord::IDLE:
- //-----------------------------------------------------------------------
- // The state cannot be idle when it has a starting node.
- //-----------------------------------------------------------------------
- ndbrequire(false);
- break;
- case TakeOverRecord::TO_WAIT_START_TAKE_OVER:
- jam();
- case TakeOverRecord::TO_START_COPY:
- jam();
- case TakeOverRecord::TO_START_COPY_ONGOING:
- jam();
- case TakeOverRecord::TO_WAIT_START:
- jam();
- case TakeOverRecord::TO_WAIT_PREPARE_CREATE:
- jam();
- case TakeOverRecord::TO_WAIT_UPDATE_TO:
- jam();
- case TakeOverRecord::TO_WAIT_COMMIT_CREATE:
- jam();
- case TakeOverRecord::TO_END_COPY:
- jam();
- case TakeOverRecord::TO_END_COPY_ONGOING:
- jam();
- case TakeOverRecord::TO_WAIT_ENDING:
- jam();
- //-----------------------------------------------------------------------
- // We will not do anything since an internal signal process is outstanding.
- // When the signal arrives the take over will be released.
- //-----------------------------------------------------------------------
- ok = true;
- break;
- case TakeOverRecord::STARTING:
- jam();
- ok = true;
- c_startToLock = RNIL;
- c_START_TOREQ_Counter.clearWaitingFor();
- endTakeOver(takeOverPtr.i);
- break;
- case TakeOverRecord::TO_UPDATE_TO:
- jam();
- ok = true;
- c_updateToLock = RNIL;
- c_UPDATE_TOREQ_Counter.clearWaitingFor();
- endTakeOver(takeOverPtr.i);
- break;
- case TakeOverRecord::ENDING:
- jam();
- ok = true;
- c_endToLock = RNIL;
- c_END_TOREQ_Counter.clearWaitingFor();
- endTakeOver(takeOverPtr.i);
- break;
- case TakeOverRecord::COMMIT_CREATE:
- ok = true;
- jam();
- {// We have mutex
- Mutex m(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle);
- m.unlock(); // Ignore result
- }
- // Fall through
- case TakeOverRecord::PREPARE_CREATE:
- ok = true;
- jam();
- c_createFragmentLock = RNIL;
- c_CREATE_FRAGREQ_Counter.clearWaitingFor();
- endTakeOver(takeOverPtr.i);
- break;
- case TakeOverRecord::LOCK_MUTEX:
- ok = true;
- jam();
- // Lock mutex will return and do endTakeOver
- break;
-
- //-----------------------------------------------------------------------
- // Signals are outstanding to external nodes. These signals carry the node
- // id of the starting node and will not use the take over record if the
- // starting node has failed.
- //-----------------------------------------------------------------------
- case TakeOverRecord::COPY_FRAG:
- ok = true;
- jam();
- //-----------------------------------------------------------------------
- // The starting node will discover the problem. We will receive either
- // COPY_FRAGREQ or COPY_FRAGCONF and then we can release the take over
- // record and end the process. If the copying node should also die then
- // we will try to send prepare create fragment and will then discover
- // that the starting node has failed.
- //-----------------------------------------------------------------------
- break;
- case TakeOverRecord::COPY_ACTIVE:
- ok = true;
- jam();
- //-----------------------------------------------------------------------
- // In this we are waiting for a signal from the starting node. Thus we
- // can release the take over record and end the process.
- //-----------------------------------------------------------------------
- endTakeOver(takeOverPtr.i);
- break;
- case TakeOverRecord::WAIT_LCP:
- ok = true;
- jam();
- //-----------------------------------------------------------------------
- //-----------------------------------------------------------------------
- endTakeOver(takeOverPtr.i);
- break;
- /**
- * The following are states that it should not be possible to "be" in
- */
- case TakeOverRecord::SELECTING_NEXT:
- jam();
- case TakeOverRecord::TO_COPY_COMPLETED:
- jam();
- ndbrequire(false);
- }
- if(!ok){
- jamLine(takeOverPtr.p->toSlaveStatus);
- ndbrequire(ok);
- }
-}//Dbdih::checkTakeOverInMasterStartNodeFailure()
-
-void Dbdih::checkTakeOverInNonMasterStartNodeFailure(Signal* signal,
- Uint32 takeOverPtrI)
-{
- jam();
- if (takeOverPtrI == RNIL) {
- jam();
- return;
- }
- //-----------------------------------------------------------------------
- // We are not master and not taking over as master. A take over was ongoing
- // but the starting node has now failed. Handle it according to the state
- // of the take over.
- //-----------------------------------------------------------------------
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = takeOverPtrI;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
- bool ok = false;
- switch (takeOverPtr.p->toSlaveStatus) {
- case TakeOverRecord::TO_SLAVE_IDLE:
- ndbrequire(false);
- break;
- case TakeOverRecord::TO_SLAVE_STARTED:
- jam();
- case TakeOverRecord::TO_SLAVE_CREATE_PREPARE:
- jam();
- case TakeOverRecord::TO_SLAVE_COPY_FRAG_COMPLETED:
- jam();
- case TakeOverRecord::TO_SLAVE_CREATE_COMMIT:
- jam();
- case TakeOverRecord::TO_SLAVE_COPY_COMPLETED:
- jam();
- ok = true;
- endTakeOver(takeOverPtr.i);
- break;
- }//switch
- if(!ok){
- jamLine(takeOverPtr.p->toSlaveStatus);
- ndbrequire(ok);
- }
-}//Dbdih::checkTakeOverInNonMasterStartNodeFailure()
-
-void Dbdih::failedNodeSynchHandling(Signal* signal,
- NodeRecordPtr failedNodePtr)
-{
- jam();
- /*----------------------------------------------------*/
- /* INITIALISE THE VARIABLES THAT KEEP TRACK OF */
- /* WHEN A NODE FAILURE IS COMPLETED. */
- /*----------------------------------------------------*/
- failedNodePtr.p->dbdictFailCompleted = ZFALSE;
- failedNodePtr.p->dbtcFailCompleted = ZFALSE;
- failedNodePtr.p->dbdihFailCompleted = ZFALSE;
- failedNodePtr.p->dblqhFailCompleted = ZFALSE;
-
- failedNodePtr.p->m_NF_COMPLETE_REP.clearWaitingFor();
-
- NodeRecordPtr nodePtr;
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- ptrAss(nodePtr, nodeRecord);
- if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
- jam();
- /**
- * We'r waiting for nodePtr.i to complete
- * handling of failedNodePtr.i's death
- */
-
- failedNodePtr.p->m_NF_COMPLETE_REP.setWaitingFor(nodePtr.i);
- } else {
- jam();
- if ((nodePtr.p->nodeStatus == NodeRecord::DYING) &&
- (nodePtr.p->m_NF_COMPLETE_REP.isWaitingFor(failedNodePtr.i))){
- jam();
- /*----------------------------------------------------*/
- /* THE NODE FAILED BEFORE REPORTING THE FAILURE */
- /* HANDLING COMPLETED ON THIS FAILED NODE. */
- /* REPORT THAT NODE FAILURE HANDLING WAS */
- /* COMPLETED ON THE NEW FAILED NODE FOR THIS */
- /* PARTICULAR OLD FAILED NODE. */
- /*----------------------------------------------------*/
- NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0];
- nf->blockNo = 0;
- nf->nodeId = failedNodePtr.i;
- nf->failedNodeId = nodePtr.i;
- nf->from = __LINE__;
- sendSignal(reference(), GSN_NF_COMPLETEREP, signal,
- NFCompleteRep::SignalLength, JBB);
- }//if
- }//if
- }//for
- if (failedNodePtr.p->nodeStatus == NodeRecord::DIED_NOW) {
- jam();
- failedNodePtr.p->nodeStatus = NodeRecord::DYING;
- } else {
- jam();
- /*----------------------------------------------------*/
- // No more processing needed when node not even started
- // yet. We give the node status to DEAD since we do not
- // care whether all nodes complete the node failure
- // handling. The node have not been included in the
- // node failure protocols.
- /*----------------------------------------------------*/
- failedNodePtr.p->nodeStatus = NodeRecord::DEAD;
- /**-----------------------------------------------------------------------
- * WE HAVE COMPLETED HANDLING THE NODE FAILURE IN DIH. WE CAN REPORT THIS
- * TO DIH THAT WAIT FOR THE OTHER BLOCKS TO BE CONCLUDED AS WELL.
- *-----------------------------------------------------------------------*/
- NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0];
- nf->blockNo = DBDIH;
- nf->nodeId = cownNodeId;
- nf->failedNodeId = failedNodePtr.i;
- nf->from = __LINE__;
- sendSignal(reference(), GSN_NF_COMPLETEREP, signal,
- NFCompleteRep::SignalLength, JBB);
- }//if
-}//Dbdih::failedNodeSynchHandling()
-
-Uint32 Dbdih::findTakeOver(Uint32 failedNodeId)
-{
- for (Uint32 i = 0; i < MAX_NDB_NODES; i++) {
- jam();
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = i;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
- if (takeOverPtr.p->toStartingNode == failedNodeId) {
- jam();
- return i;
- }//if
- }//for
- return RNIL;
-}//Dbdih::findTakeOver()
-
-Uint32 Dbdih::getStartNode(Uint32 takeOverPtrI)
-{
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = takeOverPtrI;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
- return takeOverPtr.p->toStartingNode;
-}//Dbdih::getStartNode()
-
-void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr)
-{
- jam();
- const Uint32 nodeId = failedNodePtr.i;
-
- if (c_lcpState.m_participatingLQH.get(failedNodePtr.i)){
- /*----------------------------------------------------*/
- /* THE NODE WAS INVOLVED IN A LOCAL CHECKPOINT. WE */
- /* MUST UPDATE THE ACTIVE STATUS TO INDICATE THAT */
- /* THE NODE HAVE MISSED A LOCAL CHECKPOINT. */
- /*----------------------------------------------------*/
- switch (failedNodePtr.p->activeStatus) {
- case Sysfile::NS_Active:
- jam();
- failedNodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_1;
- break;
- case Sysfile::NS_ActiveMissed_1:
- jam();
- failedNodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_2;
- break;
- case Sysfile::NS_ActiveMissed_2:
- jam();
- failedNodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
- break;
- case Sysfile::NS_TakeOver:
- jam();
- failedNodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
- break;
- default:
- ndbout << "activeStatus = " << (Uint32) failedNodePtr.p->activeStatus;
- ndbout << " at failure after NODE_FAILREP of node = ";
- ndbout << failedNodePtr.i << endl;
- ndbrequire(false);
- break;
- }//switch
- }//if
-
- c_lcpState.m_participatingDIH.clear(failedNodePtr.i);
- c_lcpState.m_participatingLQH.clear(failedNodePtr.i);
-
- if(c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.isWaitingFor(failedNodePtr.i)){
- jam();
- LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend();
- rep->nodeId = failedNodePtr.i;
- rep->lcpId = SYSFILE->latestLCP_ID;
- rep->blockNo = DBDIH;
- sendSignal(reference(), GSN_LCP_COMPLETE_REP, signal,
- LcpCompleteRep::SignalLength, JBB);
- }
-
- /**
- * Check if we'r waiting for the failed node's LQH to complete
- *
- * Note that this is ran "before" LCP master take over
- */
- if(c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.isWaitingFor(nodeId)){
- jam();
-
- LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend();
- rep->nodeId = nodeId;
- rep->lcpId = SYSFILE->latestLCP_ID;
- rep->blockNo = DBLQH;
- sendSignal(reference(), GSN_LCP_COMPLETE_REP, signal,
- LcpCompleteRep::SignalLength, JBB);
-
- if(c_lcpState.m_LAST_LCP_FRAG_ORD.isWaitingFor(nodeId)){
- jam();
- /**
- * Make sure we're ready to accept it
- */
- c_lcpState.m_LAST_LCP_FRAG_ORD.clearWaitingFor(nodeId);
- }
- }
-
- if (c_TCGETOPSIZEREQ_Counter.isWaitingFor(failedNodePtr.i)) {
- jam();
- signal->theData[0] = failedNodePtr.i;
- signal->theData[1] = 0;
- sendSignal(reference(), GSN_TCGETOPSIZECONF, signal, 2, JBB);
- }//if
-
- if (c_TC_CLOPSIZEREQ_Counter.isWaitingFor(failedNodePtr.i)) {
- jam();
- signal->theData[0] = failedNodePtr.i;
- sendSignal(reference(), GSN_TC_CLOPSIZECONF, signal, 1, JBB);
- }//if
-
- if (c_START_LCP_REQ_Counter.isWaitingFor(failedNodePtr.i)) {
- jam();
- StartLcpConf * conf = (StartLcpConf*)signal->getDataPtrSend();
- conf->senderRef = numberToRef(DBLQH, failedNodePtr.i);
- conf->lcpId = SYSFILE->latestLCP_ID;
- sendSignal(reference(), GSN_START_LCP_CONF, signal,
- StartLcpConf::SignalLength, JBB);
- }//if
-
- if (c_EMPTY_LCP_REQ_Counter.isWaitingFor(failedNodePtr.i)) {
- jam();
- EmptyLcpConf * const rep = (EmptyLcpConf *)&signal->theData[0];
- rep->senderNodeId = failedNodePtr.i;
- rep->tableId = ~0;
- rep->fragmentId = ~0;
- rep->lcpNo = 0;
- rep->lcpId = SYSFILE->latestLCP_ID;
- rep->idle = true;
- sendSignal(reference(), GSN_EMPTY_LCP_CONF, signal,
- EmptyLcpConf::SignalLength, JBB);
- }//if
-
- if (c_MASTER_LCPREQ_Counter.isWaitingFor(failedNodePtr.i)) {
- jam();
- MasterLCPRef * const ref = (MasterLCPRef *)&signal->theData[0];
- ref->senderNodeId = failedNodePtr.i;
- ref->failedNodeId = cmasterTakeOverNode;
- sendSignal(reference(), GSN_MASTER_LCPREF, signal,
- MasterLCPRef::SignalLength, JBB);
- }//if
-
-}//Dbdih::failedNodeLcpHandling()
-
-void Dbdih::checkGcpOutstanding(Signal* signal, Uint32 failedNodeId){
- if (c_GCP_PREPARE_Counter.isWaitingFor(failedNodeId)){
- jam();
- signal->theData[0] = failedNodeId;
- signal->theData[1] = cnewgcp;
- sendSignal(reference(), GSN_GCP_PREPARECONF, signal, 2, JBB);
- }//if
-
- if (c_GCP_COMMIT_Counter.isWaitingFor(failedNodeId)) {
- jam();
- signal->theData[0] = failedNodeId;
- signal->theData[1] = coldgcp;
- signal->theData[2] = cfailurenr;
- sendSignal(reference(), GSN_GCP_NODEFINISH, signal, 3, JBB);
- }//if
-
- if (c_GCP_SAVEREQ_Counter.isWaitingFor(failedNodeId)) {
- jam();
- GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
- saveRef->dihPtr = failedNodeId;
- saveRef->nodeId = failedNodeId;
- saveRef->gci = coldgcp;
- saveRef->errorCode = GCPSaveRef::FakedSignalDueToNodeFailure;
- sendSignal(reference(), GSN_GCP_SAVEREF, signal,
- GCPSaveRef::SignalLength, JBB);
- }//if
-
- if (c_COPY_GCIREQ_Counter.isWaitingFor(failedNodeId)) {
- jam();
- signal->theData[0] = failedNodeId;
- sendSignal(reference(), GSN_COPY_GCICONF, signal, 1, JBB);
- }//if
-
- if (c_MASTER_GCPREQ_Counter.isWaitingFor(failedNodeId)){
- jam();
- MasterGCPRef * const ref = (MasterGCPRef *)&signal->theData[0];
- ref->senderNodeId = failedNodeId;
- ref->failedNodeId = cmasterTakeOverNode;
- sendSignal(reference(), GSN_MASTER_GCPREF, signal,
- MasterGCPRef::SignalLength, JBB);
- }//if
-}//Dbdih::handleGcpStateInMaster()
-
-
-void
-Dbdih::startLcpMasterTakeOver(Signal* signal, Uint32 nodeId){
- jam();
-
- c_lcpMasterTakeOverState.minTableId = ~0;
- c_lcpMasterTakeOverState.minFragId = ~0;
- c_lcpMasterTakeOverState.failedNodeId = nodeId;
-
- c_lcpMasterTakeOverState.set(LMTOS_WAIT_EMPTY_LCP, __LINE__);
-
- if(c_EMPTY_LCP_REQ_Counter.done()){
- jam();
- c_lcpState.m_LAST_LCP_FRAG_ORD.clearWaitingFor();
-
- EmptyLcpReq* req = (EmptyLcpReq*)signal->getDataPtrSend();
- req->senderRef = reference();
- sendLoopMacro(EMPTY_LCP_REQ, sendEMPTY_LCP_REQ);
- ndbrequire(!c_EMPTY_LCP_REQ_Counter.done());
- } else {
- /**
- * Node failure during master take over...
- */
- ndbout_c("Nodefail during master take over");
- }
-
- setLocalNodefailHandling(signal, nodeId, NF_LCP_TAKE_OVER);
-}
-
-void Dbdih::startGcpMasterTakeOver(Signal* signal, Uint32 oldMasterId){
- jam();
- /*--------------------------------------------------*/
- /* */
- /* THE MASTER HAVE FAILED AND WE WERE ELECTED */
- /* TO BE THE NEW MASTER NODE. WE NEED TO QUERY*/
- /* ALL THE OTHER NODES ABOUT THEIR STATUS IN */
- /* ORDER TO BE ABLE TO TAKE OVER CONTROL OF */
- /* THE GLOBAL CHECKPOINT PROTOCOL AND THE */
- /* LOCAL CHECKPOINT PROTOCOL. */
- /*--------------------------------------------------*/
- if(!isMaster()){
- jam();
- return;
- }
- cmasterState = MASTER_TAKE_OVER_GCP;
- cmasterTakeOverNode = oldMasterId;
- MasterGCPReq * const req = (MasterGCPReq *)&signal->theData[0];
- req->masterRef = reference();
- req->failedNodeId = oldMasterId;
- sendLoopMacro(MASTER_GCPREQ, sendMASTER_GCPREQ);
- cgcpMasterTakeOverState = GMTOS_INITIAL;
-
- signal->theData[0] = NDB_LE_GCP_TakeoverStarted;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
-
- setLocalNodefailHandling(signal, oldMasterId, NF_GCP_TAKE_OVER);
-}//Dbdih::handleNewMaster()
-
-void Dbdih::handleTakeOverNewMaster(Signal* signal, Uint32 takeOverPtrI)
-{
- jam();
- if (takeOverPtrI != RNIL) {
- jam();
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = takeOverPtrI;
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
- bool ok = false;
- switch (takeOverPtr.p->toSlaveStatus) {
- case TakeOverRecord::TO_SLAVE_IDLE:
- ndbrequire(false);
- break;
- case TakeOverRecord::TO_SLAVE_STARTED:
- jam();
- case TakeOverRecord::TO_SLAVE_CREATE_PREPARE:
- jam();
- case TakeOverRecord::TO_SLAVE_COPY_FRAG_COMPLETED:
- jam();
- case TakeOverRecord::TO_SLAVE_CREATE_COMMIT:
- jam();
- ok = true;
- infoEvent("Unhandled MasterTO of TO slaveStatus=%d killing node %d",
- takeOverPtr.p->toSlaveStatus,
- takeOverPtr.p->toStartingNode);
- takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_ACTIVE;
-
- {
- BlockReference cntrRef = calcNdbCntrBlockRef(takeOverPtr.p->toStartingNode);
- SystemError * const sysErr = (SystemError*)&signal->theData[0];
- sysErr->errorCode = SystemError::CopyFragRefError;
- sysErr->errorRef = reference();
- sysErr->data1= 0;
- sysErr->data2= __LINE__;
- sendSignal(cntrRef, GSN_SYSTEM_ERROR, signal,
- SystemError::SignalLength, JBB);
- }
- break;
- case TakeOverRecord::TO_SLAVE_COPY_COMPLETED:
- ok = true;
- jam();
- takeOverPtr.p->toMasterStatus = TakeOverRecord::WAIT_LCP;
- break;
- }
- ndbrequire(ok);
- }//if
-}//Dbdih::handleTakeOverNewMaster()
-
-void Dbdih::startRemoveFailedNode(Signal* signal, NodeRecordPtr failedNodePtr)
-{
- Uint32 nodeId = failedNodePtr.i;
- if(failedNodePtr.p->nodeStatus != NodeRecord::DIED_NOW){
- jam();
- /**
- * Is node isn't alive. It can't be part of LCP
- */
- ndbrequire(!c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.isWaitingFor(nodeId));
-
- /**
- * And there is no point in removing any replicas
- * It's dead...
- */
- return;
- }
-
- jam();
- signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
- signal->theData[1] = failedNodePtr.i;
- signal->theData[2] = 0; // Tab id
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
-
- setLocalNodefailHandling(signal, failedNodePtr.i, NF_REMOVE_NODE_FROM_TABLE);
-}//Dbdih::startRemoveFailedNode()
-
-/*--------------------------------------------------*/
-/* THE MASTER HAS FAILED AND THE NEW MASTER IS*/
-/* QUERYING THIS NODE ABOUT THE STATE OF THE */
-/* GLOBAL CHECKPOINT PROTOCOL */
-/*--------------------------------------------------*/
-void Dbdih::execMASTER_GCPREQ(Signal* signal)
-{
- NodeRecordPtr failedNodePtr;
- MasterGCPReq * const masterGCPReq = (MasterGCPReq *)&signal->theData[0];
- jamEntry();
- const BlockReference newMasterBlockref = masterGCPReq->masterRef;
- const Uint32 failedNodeId = masterGCPReq->failedNodeId;
- if (c_copyGCISlave.m_copyReason != CopyGCIReq::IDLE) {
- jam();
- /*--------------------------------------------------*/
- /* WE ARE CURRENTLY WRITING THE RESTART INFO */
- /* IN THIS NODE. SINCE ONLY ONE PROCESS IS */
- /* ALLOWED TO DO THIS AT A TIME WE MUST ENSURE*/
- /* THAT THIS IS NOT ONGOING WHEN THE NEW */
- /* MASTER TAKES OVER CONTROL. IF NOT ALL NODES*/
- /* RECEIVE THE SAME RESTART INFO DUE TO THE */
- /* FAILURE OF THE MASTER IT IS TAKEN CARE OF */
- /* BY THE NEW MASTER. */
- /*--------------------------------------------------*/
- sendSignalWithDelay(reference(), GSN_MASTER_GCPREQ,
- signal, 10, MasterGCPReq::SignalLength);
- return;
- }//if
- failedNodePtr.i = failedNodeId;
- ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
- if (failedNodePtr.p->nodeStatus == NodeRecord::ALIVE) {
- jam();
- /*--------------------------------------------------*/
- /* ENSURE THAT WE HAVE PROCESSED THE SIGNAL */
- /* NODE_FAILURE BEFORE WE PROCESS THIS REQUEST*/
- /* FROM THE NEW MASTER. THIS ENSURES THAT WE */
- /* HAVE REMOVED THE FAILED NODE FROM THE LIST */
- /* OF ACTIVE NODES AND SO FORTH. */
- /*--------------------------------------------------*/
- sendSignalWithDelay(reference(), GSN_MASTER_GCPREQ,
- signal, 10, MasterGCPReq::SignalLength);
- return;
- } else {
- ndbrequire(failedNodePtr.p->nodeStatus == NodeRecord::DYING);
- }//if
- MasterGCPConf::State gcpState;
- switch (cgcpParticipantState) {
- case GCP_PARTICIPANT_READY:
- jam();
- /*--------------------------------------------------*/
- /* THE GLOBAL CHECKPOINT IS NOT ACTIVE SINCE */
- /* THE PREVIOUS GLOBAL CHECKPOINT IS COMPLETED*/
- /* AND THE NEW HAVE NOT STARTED YET. */
- /*--------------------------------------------------*/
- gcpState = MasterGCPConf::GCP_READY;
- break;
- case GCP_PARTICIPANT_PREPARE_RECEIVED:
- jam();
- /*--------------------------------------------------*/
- /* GCP_PREPARE HAVE BEEN RECEIVED AND RESPONSE*/
- /* HAVE BEEN SENT. */
- /*--------------------------------------------------*/
- gcpState = MasterGCPConf::GCP_PREPARE_RECEIVED;
- break;
- case GCP_PARTICIPANT_COMMIT_RECEIVED:
- jam();
- /*------------------------------------------------*/
- /* GCP_COMMIT HAVE BEEN RECEIVED BUT NOT YET*/
- /* GCP_TCFINISHED FROM LOCAL TC. */
- /*------------------------------------------------*/
- gcpState = MasterGCPConf::GCP_COMMIT_RECEIVED;
- break;
- case GCP_PARTICIPANT_TC_FINISHED:
- jam();
- /*------------------------------------------------*/
- /* GCP_COMMIT HAS BEEN RECEIVED AND ALSO */
- /* GCP_TCFINISHED HAVE BEEN RECEIVED. */
- /*------------------------------------------------*/
- gcpState = MasterGCPConf::GCP_TC_FINISHED;
- break;
- case GCP_PARTICIPANT_COPY_GCI_RECEIVED:
- /*--------------------------------------------------*/
- /* COPY RESTART INFORMATION HAS BEEN RECEIVED */
- /* BUT NOT YET COMPLETED. */
- /*--------------------------------------------------*/
- ndbrequire(false);
- gcpState= MasterGCPConf::GCP_READY; // remove warning
- break;
- default:
- /*------------------------------------------------*/
- /* */
- /* THIS SHOULD NOT OCCUR SINCE THE ABOVE */
- /* STATES ARE THE ONLY POSSIBLE STATES AT A */
- /* NODE WHICH WAS NOT A MASTER NODE. */
- /*------------------------------------------------*/
- ndbrequire(false);
- gcpState= MasterGCPConf::GCP_READY; // remove warning
- break;
- }//switch
- MasterGCPConf * const masterGCPConf = (MasterGCPConf *)&signal->theData[0];
- masterGCPConf->gcpState = gcpState;
- masterGCPConf->senderNodeId = cownNodeId;
- masterGCPConf->failedNodeId = failedNodeId;
- masterGCPConf->newGCP = cnewgcp;
- masterGCPConf->latestLCP = SYSFILE->latestLCP_ID;
- masterGCPConf->oldestRestorableGCI = SYSFILE->oldestRestorableGCI;
- masterGCPConf->keepGCI = SYSFILE->keepGCI;
- for(Uint32 i = 0; i < NdbNodeBitmask::Size; i++)
- masterGCPConf->lcpActive[i] = SYSFILE->lcpActive[i];
- sendSignal(newMasterBlockref, GSN_MASTER_GCPCONF, signal,
- MasterGCPConf::SignalLength, JBB);
-}//Dbdih::execMASTER_GCPREQ()
-
-void Dbdih::execMASTER_GCPCONF(Signal* signal)
-{
- NodeRecordPtr senderNodePtr;
- MasterGCPConf * const masterGCPConf = (MasterGCPConf *)&signal->theData[0];
- jamEntry();
- senderNodePtr.i = masterGCPConf->senderNodeId;
- ptrCheckGuard(senderNodePtr, MAX_NDB_NODES, nodeRecord);
-
- MasterGCPConf::State gcpState = (MasterGCPConf::State)masterGCPConf->gcpState;
- const Uint32 failedNodeId = masterGCPConf->failedNodeId;
- const Uint32 newGcp = masterGCPConf->newGCP;
- const Uint32 latestLcpId = masterGCPConf->latestLCP;
- const Uint32 oldestRestorableGci = masterGCPConf->oldestRestorableGCI;
- const Uint32 oldestKeepGci = masterGCPConf->keepGCI;
- if (latestLcpId > SYSFILE->latestLCP_ID) {
- jam();
-#if 0
- ndbout_c("Dbdih: Setting SYSFILE->latestLCP_ID to %d", latestLcpId);
- SYSFILE->latestLCP_ID = latestLcpId;
-#endif
- SYSFILE->keepGCI = oldestKeepGci;
- SYSFILE->oldestRestorableGCI = oldestRestorableGci;
- for(Uint32 i = 0; i < NdbNodeBitmask::Size; i++)
- SYSFILE->lcpActive[i] = masterGCPConf->lcpActive[i];
- }//if
- switch (gcpState) {
- case MasterGCPConf::GCP_READY:
- jam();
- senderNodePtr.p->gcpstate = NodeRecord::READY;
- break;
- case MasterGCPConf::GCP_PREPARE_RECEIVED:
- jam();
- senderNodePtr.p->gcpstate = NodeRecord::PREPARE_RECEIVED;
- cnewgcp = newGcp;
- break;
- case MasterGCPConf::GCP_COMMIT_RECEIVED:
- jam();
- senderNodePtr.p->gcpstate = NodeRecord::COMMIT_SENT;
- break;
- case MasterGCPConf::GCP_TC_FINISHED:
- jam();
- senderNodePtr.p->gcpstate = NodeRecord::NODE_FINISHED;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- switch (cgcpMasterTakeOverState) {
- case GMTOS_INITIAL:
- switch (gcpState) {
- case MasterGCPConf::GCP_READY:
- jam();
- cgcpMasterTakeOverState = ALL_READY;
- break;
- case MasterGCPConf::GCP_PREPARE_RECEIVED:
- jam();
- cgcpMasterTakeOverState = ALL_PREPARED;
- break;
- case MasterGCPConf::GCP_COMMIT_RECEIVED:
- jam();
- cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
- break;
- case MasterGCPConf::GCP_TC_FINISHED:
- jam();
- cgcpMasterTakeOverState = COMMIT_COMPLETED;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- break;
- case ALL_READY:
- switch (gcpState) {
- case MasterGCPConf::GCP_READY:
- jam();
- /*empty*/;
- break;
- case MasterGCPConf::GCP_PREPARE_RECEIVED:
- jam();
- cgcpMasterTakeOverState = PREPARE_STARTED_NOT_COMMITTED;
- break;
- case MasterGCPConf::GCP_COMMIT_RECEIVED:
- ndbrequire(false);
- break;
- case MasterGCPConf::GCP_TC_FINISHED:
- jam();
- cgcpMasterTakeOverState = SAVE_STARTED_NOT_COMPLETED;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- break;
- case PREPARE_STARTED_NOT_COMMITTED:
- switch (gcpState) {
- case MasterGCPConf::GCP_READY:
- jam();
- break;
- case MasterGCPConf::GCP_PREPARE_RECEIVED:
- jam();
- break;
- case MasterGCPConf::GCP_COMMIT_RECEIVED:
- ndbrequire(false);
- break;
- case MasterGCPConf::GCP_TC_FINISHED:
- ndbrequire(false);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- break;
- case ALL_PREPARED:
- switch (gcpState) {
- case MasterGCPConf::GCP_READY:
- jam();
- cgcpMasterTakeOverState = PREPARE_STARTED_NOT_COMMITTED;
- break;
- case MasterGCPConf::GCP_PREPARE_RECEIVED:
- jam();
- break;
- case MasterGCPConf::GCP_COMMIT_RECEIVED:
- jam();
- cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
- break;
- case MasterGCPConf::GCP_TC_FINISHED:
- jam();
- cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- break;
- case COMMIT_STARTED_NOT_COMPLETED:
- switch (gcpState) {
- case MasterGCPConf::GCP_READY:
- ndbrequire(false);
- break;
- case MasterGCPConf::GCP_PREPARE_RECEIVED:
- jam();
- break;
- case MasterGCPConf::GCP_COMMIT_RECEIVED:
- jam();
- break;
- case MasterGCPConf::GCP_TC_FINISHED:
- jam();
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- break;
- case COMMIT_COMPLETED:
- switch (gcpState) {
- case MasterGCPConf::GCP_READY:
- cgcpMasterTakeOverState = SAVE_STARTED_NOT_COMPLETED;
- break;
- case MasterGCPConf::GCP_PREPARE_RECEIVED:
- jam();
- cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
- break;
- case MasterGCPConf::GCP_COMMIT_RECEIVED:
- jam();
- cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
- break;
- case MasterGCPConf::GCP_TC_FINISHED:
- jam();
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- break;
- case SAVE_STARTED_NOT_COMPLETED:
- switch (gcpState) {
- case MasterGCPConf::GCP_READY:
- jam();
- break;
- case MasterGCPConf::GCP_PREPARE_RECEIVED:
- ndbrequire(false);
- break;
- case MasterGCPConf::GCP_COMMIT_RECEIVED:
- ndbrequire(false);
- break;
- case MasterGCPConf::GCP_TC_FINISHED:
- jam();
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- receiveLoopMacro(MASTER_GCPREQ, senderNodePtr.i);
- /*-------------------------------------------------------------------------*/
- // We have now received all responses and are ready to take over the GCP
- // protocol as master.
- /*-------------------------------------------------------------------------*/
- MASTER_GCPhandling(signal, failedNodeId);
- return;
-}//Dbdih::execMASTER_GCPCONF()
-
-void Dbdih::execMASTER_GCPREF(Signal* signal)
-{
- const MasterGCPRef * const ref = (MasterGCPRef *)&signal->theData[0];
- jamEntry();
- receiveLoopMacro(MASTER_GCPREQ, ref->senderNodeId);
- /*-------------------------------------------------------------------------*/
- // We have now received all responses and are ready to take over the GCP
- // protocol as master.
- /*-------------------------------------------------------------------------*/
- MASTER_GCPhandling(signal, ref->failedNodeId);
-}//Dbdih::execMASTER_GCPREF()
-
-void Dbdih::MASTER_GCPhandling(Signal* signal, Uint32 failedNodeId)
-{
- NodeRecordPtr failedNodePtr;
- cmasterState = MASTER_ACTIVE;
- /*----------------------------------------------------------*/
- /* REMOVE ALL ACTIVE STATUS ON ALREADY FAILED NODES */
- /* THIS IS PERFORMED HERE SINCE WE GET THE LCP ACTIVE */
- /* STATUS AS PART OF THE COPY RESTART INFO AND THIS IS*/
- /* HANDLED BY THE MASTER GCP TAKE OVER PROTOCOL. */
- /*----------------------------------------------------------*/
-
- failedNodePtr.i = failedNodeId;
- ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
- switch (cgcpMasterTakeOverState) {
- case ALL_READY:
- jam();
- startGcp(signal);
- break;
- case PREPARE_STARTED_NOT_COMMITTED:
- {
- NodeRecordPtr nodePtr;
- jam();
- c_GCP_PREPARE_Counter.clearWaitingFor();
- nodePtr.i = cfirstAliveNode;
- do {
- jam();
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- if (nodePtr.p->gcpstate == NodeRecord::READY) {
- jam();
- c_GCP_PREPARE_Counter.setWaitingFor(nodePtr.i);
- sendGCP_PREPARE(signal, nodePtr.i);
- }//if
- nodePtr.i = nodePtr.p->nextNode;
- } while(nodePtr.i != RNIL);
- if (c_GCP_PREPARE_Counter.done()) {
- jam();
- gcpcommitreqLab(signal);
- }//if
- break;
- }
- case ALL_PREPARED:
- jam();
- gcpcommitreqLab(signal);
- break;
- case COMMIT_STARTED_NOT_COMPLETED:
- {
- NodeRecordPtr nodePtr;
- jam();
- c_GCP_COMMIT_Counter.clearWaitingFor();
- nodePtr.i = cfirstAliveNode;
- do {
- jam();
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- if (nodePtr.p->gcpstate == NodeRecord::PREPARE_RECEIVED) {
- jam();
- sendGCP_COMMIT(signal, nodePtr.i);
- c_GCP_COMMIT_Counter.setWaitingFor(nodePtr.i);
- } else {
- ndbrequire((nodePtr.p->gcpstate == NodeRecord::NODE_FINISHED) ||
- (nodePtr.p->gcpstate == NodeRecord::COMMIT_SENT));
- }//if
- nodePtr.i = nodePtr.p->nextNode;
- } while(nodePtr.i != RNIL);
- if (c_GCP_COMMIT_Counter.done()){
- jam();
- gcpsavereqLab(signal);
- }//if
- break;
- }
- case COMMIT_COMPLETED:
- jam();
- gcpsavereqLab(signal);
- break;
- case SAVE_STARTED_NOT_COMPLETED:
- {
- NodeRecordPtr nodePtr;
- jam();
- SYSFILE->newestRestorableGCI = coldgcp;
- nodePtr.i = cfirstAliveNode;
- do {
- jam();
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- SYSFILE->lastCompletedGCI[nodePtr.i] = coldgcp;
- nodePtr.i = nodePtr.p->nextNode;
- } while (nodePtr.i != RNIL);
- /**-------------------------------------------------------------------
- * THE FAILED NODE DID ALSO PARTICIPATE IN THIS GLOBAL CHECKPOINT
- * WHICH IS RECORDED.
- *-------------------------------------------------------------------*/
- SYSFILE->lastCompletedGCI[failedNodeId] = coldgcp;
- copyGciLab(signal, CopyGCIReq::GLOBAL_CHECKPOINT);
- break;
- }
- default:
- ndbrequire(false);
- break;
- }//switch
-
- signal->theData[0] = NDB_LE_GCP_TakeoverCompleted;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
-
- /*--------------------------------------------------*/
- /* WE SEPARATE HANDLING OF GLOBAL CHECKPOINTS */
- /* AND LOCAL CHECKPOINTS HERE. LCP'S HAVE TO */
- /* REMOVE ALL FAILED FRAGMENTS BEFORE WE CAN */
- /* HANDLE THE LCP PROTOCOL. */
- /*--------------------------------------------------*/
- checkLocalNodefailComplete(signal, failedNodeId, NF_GCP_TAKE_OVER);
-
- return;
-}//Dbdih::masterGcpConfFromFailedLab()
-
-void
-Dbdih::invalidateNodeLCP(Signal* signal, Uint32 nodeId, Uint32 tableId)
-{
- jamEntry();
- TabRecordPtr tabPtr;
- tabPtr.i = tableId;
- const Uint32 RT_BREAK = 64;
- if (ERROR_INSERTED(7125)) {
- return;
- }//if
- for (Uint32 i = 0; i<RT_BREAK; i++) {
- jam();
- if (tabPtr.i >= ctabFileSize){
- jam();
- /**
- * Ready with entire loop
- * Return to master
- */
- setAllowNodeStart(nodeId, true);
- if (getNodeStatus(nodeId) == NodeRecord::STARTING) {
- jam();
- StartInfoConf * conf = (StartInfoConf*)&signal->theData[0];
- conf->sendingNodeId = cownNodeId;
- conf->startingNodeId = nodeId;
- sendSignal(cmasterdihref, GSN_START_INFOCONF, signal,
- StartInfoConf::SignalLength, JBB);
- }//if
- return;
- }//if
- ptrAss(tabPtr, tabRecord);
- if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) {
- jam();
- invalidateNodeLCP(signal, nodeId, tabPtr);
- return;
- }//if
- tabPtr.i++;
- }//for
- signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP;
- signal->theData[1] = nodeId;
- signal->theData[2] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
-}//Dbdih::invalidateNodeLCP()
-
-void
-Dbdih::invalidateNodeLCP(Signal* signal, Uint32 nodeId, TabRecordPtr tabPtr)
-{
- /**
- * Check so that no one else is using the tab descriptior
- */
- if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
- jam();
- signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP;
- signal->theData[1] = nodeId;
- signal->theData[2] = tabPtr.i;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 20, 3);
- return;
- }//if
-
- /**
- * For each fragment
- */
- bool modified = false;
- FragmentstorePtr fragPtr;
- for(Uint32 fragNo = 0; fragNo < tabPtr.p->totalfragments; fragNo++){
- jam();
- getFragstore(tabPtr.p, fragNo, fragPtr);
- /**
- * For each of replica record
- */
- ReplicaRecordPtr replicaPtr;
- for(replicaPtr.i = fragPtr.p->oldStoredReplicas; replicaPtr.i != RNIL;
- replicaPtr.i = replicaPtr.p->nextReplica) {
- jam();
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- if(replicaPtr.p->procNode == nodeId){
- jam();
- /**
- * Found one with correct node id
- */
- /**
- * Invalidate all LCP's
- */
- modified = true;
- for(int i = 0; i < MAX_LCP_STORED; i++) {
- replicaPtr.p->lcpStatus[i] = ZINVALID;
- }//if
- /**
- * And reset nextLcp
- */
- replicaPtr.p->nextLcp = 0;
- replicaPtr.p->noCrashedReplicas = 0;
- }//if
- }//for
- }//for
-
- if (modified) {
- jam();
- /**
- * Save table description to disk
- */
- tabPtr.p->tabCopyStatus = TabRecord::CS_INVALIDATE_NODE_LCP;
- tabPtr.p->tabUpdateState = TabRecord::US_INVALIDATE_NODE_LCP;
- tabPtr.p->tabRemoveNode = nodeId;
- signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
- signal->theData[1] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- }
-
- jam();
- /**
- * Move to next table
- */
- tabPtr.i++;
- signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP;
- signal->theData[1] = nodeId;
- signal->theData[2] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
- return;
-}//Dbdih::invalidateNodeLCP()
-
-/*------------------------------------------------*/
-/* INPUT: TABPTR */
-/* TNODEID */
-/*------------------------------------------------*/
-void Dbdih::removeNodeFromTables(Signal* signal,
- Uint32 nodeId, Uint32 tableId)
-{
- jamEntry();
- TabRecordPtr tabPtr;
- tabPtr.i = tableId;
- const Uint32 RT_BREAK = 64;
- for (Uint32 i = 0; i<RT_BREAK; i++) {
- jam();
- if (tabPtr.i >= ctabFileSize){
- jam();
- removeNodeFromTablesComplete(signal, nodeId);
- return;
- }//if
-
- ptrAss(tabPtr, tabRecord);
- if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) {
- jam();
- removeNodeFromTable(signal, nodeId, tabPtr);
- return;
- }//if
- tabPtr.i++;
- }//for
- signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
- signal->theData[1] = nodeId;
- signal->theData[2] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
-}
-
-void Dbdih::removeNodeFromTable(Signal* signal,
- Uint32 nodeId, TabRecordPtr tabPtr){
-
- /**
- * Check so that no one else is using the tab descriptior
- */
- if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
- jam();
- signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
- signal->theData[1] = nodeId;
- signal->theData[2] = tabPtr.i;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 20, 3);
- return;
- }//if
-
- /**
- * For each fragment
- */
- Uint32 noOfRemovedReplicas = 0; // No of replicas removed
- Uint32 noOfRemovedLcpReplicas = 0; // No of replicas in LCP removed
- Uint32 noOfRemainingLcpReplicas = 0;// No of replicas in LCP remaining
-
- //const Uint32 lcpId = SYSFILE->latestLCP_ID;
- const bool lcpOngoingFlag = (tabPtr.p->tabLcpStatus== TabRecord::TLS_ACTIVE);
-
- FragmentstorePtr fragPtr;
- for(Uint32 fragNo = 0; fragNo < tabPtr.p->totalfragments; fragNo++){
- jam();
- getFragstore(tabPtr.p, fragNo, fragPtr);
-
- /**
- * For each of replica record
- */
- Uint32 replicaNo = 0;
- ReplicaRecordPtr replicaPtr;
- for(replicaPtr.i = fragPtr.p->storedReplicas; replicaPtr.i != RNIL;
- replicaPtr.i = replicaPtr.p->nextReplica, replicaNo++) {
- jam();
-
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- if(replicaPtr.p->procNode == nodeId){
- jam();
- noOfRemovedReplicas++;
- removeNodeFromStored(nodeId, fragPtr, replicaPtr);
- if(replicaPtr.p->lcpOngoingFlag){
- jam();
- /**
- * This replica is currently LCP:ed
- */
- ndbrequire(fragPtr.p->noLcpReplicas > 0);
- fragPtr.p->noLcpReplicas --;
-
- noOfRemovedLcpReplicas ++;
- replicaPtr.p->lcpOngoingFlag = false;
- }
- }
- }
- noOfRemainingLcpReplicas += fragPtr.p->noLcpReplicas;
- }
-
- if(noOfRemovedReplicas == 0){
- jam();
- /**
- * The table had no replica on the failed node
- * continue with next table
- */
- tabPtr.i++;
- signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
- signal->theData[1] = nodeId;
- signal->theData[2] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
- return;
- }
-
- /**
- * We did remove at least one replica
- */
- bool ok = false;
- switch(tabPtr.p->tabLcpStatus){
- case TabRecord::TLS_COMPLETED:
- ok = true;
- jam();
- /**
- * WE WILL WRITE THE TABLE DESCRIPTION TO DISK AT THIS TIME
- * INDEPENDENT OF WHAT THE LOCAL CHECKPOINT NEEDED.
- * THIS IS TO ENSURE THAT THE FAILED NODES ARE ALSO UPDATED ON DISK
- * IN THE DIH DATA STRUCTURES BEFORE WE COMPLETE HANDLING OF THE
- * NODE FAILURE.
- */
- ndbrequire(noOfRemovedLcpReplicas == 0);
-
- tabPtr.p->tabCopyStatus = TabRecord::CS_REMOVE_NODE;
- tabPtr.p->tabUpdateState = TabRecord::US_REMOVE_NODE;
- tabPtr.p->tabRemoveNode = nodeId;
- signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
- signal->theData[1] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- break;
- case TabRecord::TLS_ACTIVE:
- ok = true;
- jam();
- /**
- * The table is participating in an LCP currently
- */
- // Fall through
- break;
- case TabRecord::TLS_WRITING_TO_FILE:
- ok = true;
- jam();
- /**
- * This should never happen since we in the beginning of this function
- * checks the tabCopyStatus
- */
- ndbrequire(lcpOngoingFlag);
- ndbrequire(false);
- break;
- }
- ndbrequire(ok);
-
- /**
- * The table is participating in an LCP currently
- * and we removed some replicas that should have been checkpointed
- */
- ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE);
- ndbrequire(tabPtr.p->tabLcpStatus == TabRecord::TLS_ACTIVE);
-
- /**
- * Save the table
- */
- tabPtr.p->tabCopyStatus = TabRecord::CS_REMOVE_NODE;
- tabPtr.p->tabUpdateState = TabRecord::US_REMOVE_NODE;
- tabPtr.p->tabRemoveNode = nodeId;
- signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
- signal->theData[1] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
-
- if(noOfRemainingLcpReplicas == 0){
- jam();
- /**
- * The removal on the failed node made the LCP complete
- */
- tabPtr.p->tabLcpStatus = TabRecord::TLS_WRITING_TO_FILE;
- checkLcpAllTablesDoneInLqh();
- }
-}
-
-void
-Dbdih::removeNodeFromTablesComplete(Signal* signal, Uint32 nodeId){
- jam();
-
- /**
- * Check if we "accidently" completed a LCP
- */
- checkLcpCompletedLab(signal);
-
- /**
- * Check if we (DIH) are finished with node fail handling
- */
- checkLocalNodefailComplete(signal, nodeId, NF_REMOVE_NODE_FROM_TABLE);
-}
-
-void
-Dbdih::checkLocalNodefailComplete(Signal* signal, Uint32 failedNodeId,
- NodefailHandlingStep step){
- jam();
-
- NodeRecordPtr nodePtr;
- nodePtr.i = failedNodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
-
- ndbrequire(nodePtr.p->m_nodefailSteps.get(step));
- nodePtr.p->m_nodefailSteps.clear(step);
-
- if(nodePtr.p->m_nodefailSteps.count() > 0){
- jam();
- return;
- }
-
- NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0];
- nf->blockNo = DBDIH;
- nf->nodeId = cownNodeId;
- nf->failedNodeId = failedNodeId;
- nf->from = __LINE__;
- sendSignal(reference(), GSN_NF_COMPLETEREP, signal,
- NFCompleteRep::SignalLength, JBB);
-}
-
-
-void
-Dbdih::setLocalNodefailHandling(Signal* signal, Uint32 failedNodeId,
- NodefailHandlingStep step){
- jam();
-
- NodeRecordPtr nodePtr;
- nodePtr.i = failedNodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
-
- ndbrequire(!nodePtr.p->m_nodefailSteps.get(step));
- nodePtr.p->m_nodefailSteps.set(step);
-}
-
-void Dbdih::startLcpTakeOverLab(Signal* signal, Uint32 failedNodeId)
-{
- /*--------------------------------------------------------------------*/
- // Start LCP master take over process. Consists of the following steps.
- // 1) Ensure that all LQH's have reported all fragments they have been
- // told to checkpoint. Can be a fairly long step time-wise.
- // 2) Query all nodes about their LCP status.
- // During the query process we do not want our own state to change.
- // This can change due to delayed reception of LCP_REPORT, completed
- // save of table on disk or reception of DIH_LCPCOMPLETE from other
- // node.
- /*--------------------------------------------------------------------*/
-}//Dbdih::startLcpTakeOver()
-
-void Dbdih::execEMPTY_LCP_CONF(Signal* signal)
-{
- jamEntry();
-
- ndbrequire(c_lcpMasterTakeOverState.state == LMTOS_WAIT_EMPTY_LCP);
-
- const EmptyLcpConf * const conf = (EmptyLcpConf *)&signal->theData[0];
- Uint32 nodeId = conf->senderNodeId;
-
- if(!conf->idle){
- jam();
- if (conf->tableId < c_lcpMasterTakeOverState.minTableId) {
- jam();
- c_lcpMasterTakeOverState.minTableId = conf->tableId;
- c_lcpMasterTakeOverState.minFragId = conf->fragmentId;
- } else if (conf->tableId == c_lcpMasterTakeOverState.minTableId &&
- conf->fragmentId < c_lcpMasterTakeOverState.minFragId) {
- jam();
- c_lcpMasterTakeOverState.minFragId = conf->fragmentId;
- }//if
- if(isMaster()){
- jam();
- c_lcpState.m_LAST_LCP_FRAG_ORD.setWaitingFor(nodeId);
- }
- }
-
- receiveLoopMacro(EMPTY_LCP_REQ, nodeId);
- /*--------------------------------------------------------------------*/
- // Received all EMPTY_LCPCONF. We can continue with next phase of the
- // take over LCP master process.
- /*--------------------------------------------------------------------*/
- c_lcpMasterTakeOverState.set(LMTOS_WAIT_LCP_FRAG_REP, __LINE__);
- checkEmptyLcpComplete(signal);
- return;
-}//Dbdih::execEMPTY_LCPCONF()
-
-void
-Dbdih::checkEmptyLcpComplete(Signal *signal){
-
- ndbrequire(c_lcpMasterTakeOverState.state == LMTOS_WAIT_LCP_FRAG_REP);
-
- if(c_lcpState.noOfLcpFragRepOutstanding > 0){
- jam();
- return;
- }
-
- if(isMaster()){
- jam();
-
- signal->theData[0] = NDB_LE_LCP_TakeoverStarted;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
-
- signal->theData[0] = 7012;
- execDUMP_STATE_ORD(signal);
-
- c_lcpMasterTakeOverState.set(LMTOS_INITIAL, __LINE__);
- MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0];
- req->masterRef = reference();
- req->failedNodeId = c_lcpMasterTakeOverState.failedNodeId;
- sendLoopMacro(MASTER_LCPREQ, sendMASTER_LCPREQ);
- } else {
- sendMASTER_LCPCONF(signal);
- }
-}
-
-/*--------------------------------------------------*/
-/* THE MASTER HAS FAILED AND THE NEW MASTER IS*/
-/* QUERYING THIS NODE ABOUT THE STATE OF THE */
-/* LOCAL CHECKPOINT PROTOCOL. */
-/*--------------------------------------------------*/
-void Dbdih::execMASTER_LCPREQ(Signal* signal)
-{
- const MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0];
- jamEntry();
- const BlockReference newMasterBlockref = req->masterRef;
-
- Uint32 failedNodeId = req->failedNodeId;
-
- /**
- * There can be no take over with the same master
- */
- ndbrequire(c_lcpState.m_masterLcpDihRef != newMasterBlockref);
- c_lcpState.m_masterLcpDihRef = newMasterBlockref;
- c_lcpState.m_MASTER_LCPREQ_Received = true;
- c_lcpState.m_MASTER_LCPREQ_FailedNodeId = failedNodeId;
-
- if(newMasterBlockref != cmasterdihref){
- jam();
- ndbrequire(0);
- }
-
- sendMASTER_LCPCONF(signal);
-}//Dbdih::execMASTER_LCPREQ()
-
-void
-Dbdih::sendMASTER_LCPCONF(Signal * signal){
-
- if(!c_EMPTY_LCP_REQ_Counter.done()){
- /**
- * Have not received all EMPTY_LCP_REP
- * dare not answer MASTER_LCP_CONF yet
- */
- jam();
- return;
- }
-
- if(!c_lcpState.m_MASTER_LCPREQ_Received){
- jam();
- /**
- * Has not received MASTER_LCPREQ yet
- */
- return;
- }
-
- if(c_lcpState.lcpStatus == LCP_INIT_TABLES){
- jam();
- /**
- * Still aborting old initLcpLab
- */
- return;
- }
-
- if(c_lcpState.lcpStatus == LCP_COPY_GCI){
- jam();
- /**
- * Restart it
- */
- //Uint32 lcpId = SYSFILE->latestLCP_ID;
- SYSFILE->latestLCP_ID--;
- c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
-#if 0
- if(c_copyGCISlave.m_copyReason == CopyGCIReq::LOCAL_CHECKPOINT){
- ndbout_c("Dbdih: Also resetting c_copyGCISlave");
- c_copyGCISlave.m_copyReason = CopyGCIReq::IDLE;
- c_copyGCISlave.m_expectedNextWord = 0;
- }
-#endif
- }
-
- bool ok = false;
- MasterLCPConf::State lcpState;
- switch (c_lcpState.lcpStatus) {
- case LCP_STATUS_IDLE:
- ok = true;
- jam();
- /*------------------------------------------------*/
- /* LOCAL CHECKPOINT IS CURRENTLY NOT ACTIVE */
- /* SINCE NO COPY OF RESTART INFORMATION HAVE*/
- /* BEEN RECEIVED YET. ALSO THE PREVIOUS */
- /* CHECKPOINT HAVE BEEN FULLY COMPLETED. */
- /*------------------------------------------------*/
- lcpState = MasterLCPConf::LCP_STATUS_IDLE;
- break;
- case LCP_STATUS_ACTIVE:
- ok = true;
- jam();
- /*--------------------------------------------------*/
- /* COPY OF RESTART INFORMATION HAS BEEN */
- /* PERFORMED AND ALSO RESPONSE HAVE BEEN SENT.*/
- /*--------------------------------------------------*/
- lcpState = MasterLCPConf::LCP_STATUS_ACTIVE;
- break;
- case LCP_TAB_COMPLETED:
- ok = true;
- jam();
- /*--------------------------------------------------------*/
- /* ALL LCP_REPORT'S HAVE BEEN COMPLETED FOR */
- /* ALL TABLES. SAVE OF AT LEAST ONE TABLE IS */
- /* ONGOING YET. */
- /*--------------------------------------------------------*/
- lcpState = MasterLCPConf::LCP_TAB_COMPLETED;
- break;
- case LCP_TAB_SAVED:
- ok = true;
- jam();
- /*--------------------------------------------------------*/
- /* ALL LCP_REPORT'S HAVE BEEN COMPLETED FOR */
- /* ALL TABLES. ALL TABLES HAVE ALSO BEEN SAVED */
- /* ALL OTHER NODES ARE NOT YET FINISHED WITH */
- /* THE LOCAL CHECKPOINT. */
- /*--------------------------------------------------------*/
- lcpState = MasterLCPConf::LCP_TAB_SAVED;
- break;
- case LCP_TCGET:
- case LCP_CALCULATE_KEEP_GCI:
- case LCP_TC_CLOPSIZE:
- case LCP_START_LCP_ROUND:
- /**
- * These should only exists on the master
- * but since this is master take over
- * it not allowed
- */
- ndbrequire(false);
- lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning
- break;
- case LCP_COPY_GCI:
- case LCP_INIT_TABLES:
- ok = true;
- /**
- * These two states are handled by if statements above
- */
- ndbrequire(false);
- lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning
- break;
- }//switch
- ndbrequire(ok);
-
- Uint32 failedNodeId = c_lcpState.m_MASTER_LCPREQ_FailedNodeId;
- MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0];
- conf->senderNodeId = cownNodeId;
- conf->lcpState = lcpState;
- conf->failedNodeId = failedNodeId;
- sendSignal(c_lcpState.m_masterLcpDihRef, GSN_MASTER_LCPCONF,
- signal, MasterLCPConf::SignalLength, JBB);
-
- // Answer to MASTER_LCPREQ sent, reset flag so
- // that it's not sent again before another request comes in
- c_lcpState.m_MASTER_LCPREQ_Received = false;
-
- if(c_lcpState.lcpStatus == LCP_TAB_SAVED){
-#ifdef VM_TRACE
- ndbout_c("Sending extra GSN_LCP_COMPLETE_REP to new master");
-#endif
- sendLCP_COMPLETE_REP(signal);
- }
-
- if(!isMaster()){
- c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__);
- checkLocalNodefailComplete(signal, failedNodeId, NF_LCP_TAKE_OVER);
- }
-
- return;
-}
-
-NdbOut&
-operator<<(NdbOut& out, const Dbdih::LcpMasterTakeOverState state){
- switch(state){
- case Dbdih::LMTOS_IDLE:
- out << "LMTOS_IDLE";
- break;
- case Dbdih::LMTOS_WAIT_EMPTY_LCP:
- out << "LMTOS_WAIT_EMPTY_LCP";
- break;
- case Dbdih::LMTOS_WAIT_LCP_FRAG_REP:
- out << "LMTOS_WAIT_EMPTY_LCP";
- break;
- case Dbdih::LMTOS_INITIAL:
- out << "LMTOS_INITIAL";
- break;
- case Dbdih::LMTOS_ALL_IDLE:
- out << "LMTOS_ALL_IDLE";
- break;
- case Dbdih::LMTOS_ALL_ACTIVE:
- out << "LMTOS_ALL_ACTIVE";
- break;
- case Dbdih::LMTOS_LCP_CONCLUDING:
- out << "LMTOS_LCP_CONCLUDING";
- break;
- case Dbdih::LMTOS_COPY_ONGOING:
- out << "LMTOS_COPY_ONGOING";
- break;
- }
- return out;
-}
-
-struct MASTERLCP_StateTransitions {
- Dbdih::LcpMasterTakeOverState CurrentState;
- MasterLCPConf::State ParticipantState;
- Dbdih::LcpMasterTakeOverState NewState;
-};
-
-static const
-MASTERLCP_StateTransitions g_masterLCPTakeoverStateTransitions[] = {
- /**
- * Current = LMTOS_INITIAL
- */
- { Dbdih::LMTOS_INITIAL,
- MasterLCPConf::LCP_STATUS_IDLE,
- Dbdih::LMTOS_ALL_IDLE },
-
- { Dbdih::LMTOS_INITIAL,
- MasterLCPConf::LCP_STATUS_ACTIVE,
- Dbdih::LMTOS_ALL_ACTIVE },
-
- { Dbdih::LMTOS_INITIAL,
- MasterLCPConf::LCP_TAB_COMPLETED,
- Dbdih::LMTOS_LCP_CONCLUDING },
-
- { Dbdih::LMTOS_INITIAL,
- MasterLCPConf::LCP_TAB_SAVED,
- Dbdih::LMTOS_LCP_CONCLUDING },
-
- /**
- * Current = LMTOS_ALL_IDLE
- */
- { Dbdih::LMTOS_ALL_IDLE,
- MasterLCPConf::LCP_STATUS_IDLE,
- Dbdih::LMTOS_ALL_IDLE },
-
- { Dbdih::LMTOS_ALL_IDLE,
- MasterLCPConf::LCP_STATUS_ACTIVE,
- Dbdih::LMTOS_COPY_ONGOING },
-
- { Dbdih::LMTOS_ALL_IDLE,
- MasterLCPConf::LCP_TAB_COMPLETED,
- Dbdih::LMTOS_LCP_CONCLUDING },
-
- { Dbdih::LMTOS_ALL_IDLE,
- MasterLCPConf::LCP_TAB_SAVED,
- Dbdih::LMTOS_LCP_CONCLUDING },
-
- /**
- * Current = LMTOS_COPY_ONGOING
- */
- { Dbdih::LMTOS_COPY_ONGOING,
- MasterLCPConf::LCP_STATUS_IDLE,
- Dbdih::LMTOS_COPY_ONGOING },
-
- { Dbdih::LMTOS_COPY_ONGOING,
- MasterLCPConf::LCP_STATUS_ACTIVE,
- Dbdih::LMTOS_COPY_ONGOING },
-
- /**
- * Current = LMTOS_ALL_ACTIVE
- */
- { Dbdih::LMTOS_ALL_ACTIVE,
- MasterLCPConf::LCP_STATUS_IDLE,
- Dbdih::LMTOS_COPY_ONGOING },
-
- { Dbdih::LMTOS_ALL_ACTIVE,
- MasterLCPConf::LCP_STATUS_ACTIVE,
- Dbdih::LMTOS_ALL_ACTIVE },
-
- { Dbdih::LMTOS_ALL_ACTIVE,
- MasterLCPConf::LCP_TAB_COMPLETED,
- Dbdih::LMTOS_LCP_CONCLUDING },
-
- { Dbdih::LMTOS_ALL_ACTIVE,
- MasterLCPConf::LCP_TAB_SAVED,
- Dbdih::LMTOS_LCP_CONCLUDING },
-
- /**
- * Current = LMTOS_LCP_CONCLUDING
- */
- { Dbdih::LMTOS_LCP_CONCLUDING,
- MasterLCPConf::LCP_STATUS_IDLE,
- Dbdih::LMTOS_LCP_CONCLUDING },
-
- { Dbdih::LMTOS_LCP_CONCLUDING,
- MasterLCPConf::LCP_STATUS_ACTIVE,
- Dbdih::LMTOS_LCP_CONCLUDING },
-
- { Dbdih::LMTOS_LCP_CONCLUDING,
- MasterLCPConf::LCP_TAB_COMPLETED,
- Dbdih::LMTOS_LCP_CONCLUDING },
-
- { Dbdih::LMTOS_LCP_CONCLUDING,
- MasterLCPConf::LCP_TAB_SAVED,
- Dbdih::LMTOS_LCP_CONCLUDING }
-};
-
-const Uint32 g_masterLCPTakeoverStateTransitionsRows =
-sizeof(g_masterLCPTakeoverStateTransitions) / sizeof(struct MASTERLCP_StateTransitions);
-
-void Dbdih::execMASTER_LCPCONF(Signal* signal)
-{
- const MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0];
- jamEntry();
- Uint32 senderNodeId = conf->senderNodeId;
- MasterLCPConf::State lcpState = (MasterLCPConf::State)conf->lcpState;
- const Uint32 failedNodeId = conf->failedNodeId;
- NodeRecordPtr nodePtr;
- nodePtr.i = senderNodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- nodePtr.p->lcpStateAtTakeOver = lcpState;
-
-#ifdef VM_TRACE
- ndbout_c("MASTER_LCPCONF");
- printMASTER_LCP_CONF(stdout, &signal->theData[0], 0, 0);
-#endif
-
- bool found = false;
- for(Uint32 i = 0; i<g_masterLCPTakeoverStateTransitionsRows; i++){
- const struct MASTERLCP_StateTransitions * valid =
- &g_masterLCPTakeoverStateTransitions[i];
-
- if(valid->CurrentState == c_lcpMasterTakeOverState.state &&
- valid->ParticipantState == lcpState){
- jam();
- found = true;
- c_lcpMasterTakeOverState.set(valid->NewState, __LINE__);
- break;
- }
- }
- ndbrequire(found);
-
- bool ok = false;
- switch(lcpState){
- case MasterLCPConf::LCP_STATUS_IDLE:
- ok = true;
- break;
- case MasterLCPConf::LCP_STATUS_ACTIVE:
- case MasterLCPConf::LCP_TAB_COMPLETED:
- case MasterLCPConf::LCP_TAB_SAVED:
- ok = true;
- c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.setWaitingFor(nodePtr.i);
- break;
- }
- ndbrequire(ok);
-
- receiveLoopMacro(MASTER_LCPREQ, senderNodeId);
- /*-------------------------------------------------------------------------*/
- // We have now received all responses and are ready to take over the LCP
- // protocol as master.
- /*-------------------------------------------------------------------------*/
- MASTER_LCPhandling(signal, failedNodeId);
-}//Dbdih::execMASTER_LCPCONF()
-
-void Dbdih::execMASTER_LCPREF(Signal* signal)
-{
- const MasterLCPRef * const ref = (MasterLCPRef *)&signal->theData[0];
- jamEntry();
- receiveLoopMacro(MASTER_LCPREQ, ref->senderNodeId);
- /*-------------------------------------------------------------------------*/
- // We have now received all responses and are ready to take over the LCP
- // protocol as master.
- /*-------------------------------------------------------------------------*/
- MASTER_LCPhandling(signal, ref->failedNodeId);
-}//Dbdih::execMASTER_LCPREF()
-
-void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
-{
- /*-------------------------------------------------------------------------
- *
- * WE ARE NOW READY TO CONCLUDE THE TAKE OVER AS MASTER.
- * WE HAVE ENOUGH INFO TO START UP ACTIVITIES IN THE PROPER PLACE.
- * ALSO SET THE PROPER STATE VARIABLES.
- *------------------------------------------------------------------------*/
- c_lcpState.currentFragment.tableId = c_lcpMasterTakeOverState.minTableId;
- c_lcpState.currentFragment.fragmentId = c_lcpMasterTakeOverState.minFragId;
- c_lcpState.m_LAST_LCP_FRAG_ORD = c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH;
-
- NodeRecordPtr failedNodePtr;
- failedNodePtr.i = failedNodeId;
- ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
-
- switch (c_lcpMasterTakeOverState.state) {
- case LMTOS_ALL_IDLE:
- jam();
- /* --------------------------------------------------------------------- */
- // All nodes were idle in the LCP protocol. Start checking for start of LCP
- // protocol.
- /* --------------------------------------------------------------------- */
-#ifdef VM_TRACE
- ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_IDLE -> checkLcpStart");
-#endif
- checkLcpStart(signal, __LINE__);
- break;
- case LMTOS_COPY_ONGOING:
- jam();
- /* --------------------------------------------------------------------- */
- // We were in the starting process of the LCP protocol. We will restart the
- // protocol by calculating the keep gci and storing the new lcp id.
- /* --------------------------------------------------------------------- */
-#ifdef VM_TRACE
- ndbout_c("MASTER_LCPhandling:: LMTOS_COPY_ONGOING -> storeNewLcpId");
-#endif
- if (c_lcpState.lcpStatus == LCP_STATUS_ACTIVE) {
- jam();
- /*---------------------------------------------------------------------*/
- /* WE NEED TO DECREASE THE LATEST LCP ID SINCE WE HAVE ALREADY */
- /* STARTED THIS */
- /* LOCAL CHECKPOINT. */
- /*---------------------------------------------------------------------*/
- Uint32 lcpId = SYSFILE->latestLCP_ID;
-#ifdef VM_TRACE
- ndbout_c("Decreasing latestLCP_ID from %d to %d", lcpId, lcpId - 1);
-#endif
- SYSFILE->latestLCP_ID--;
- }//if
- storeNewLcpIdLab(signal);
- break;
- case LMTOS_ALL_ACTIVE:
- {
- jam();
- /* -------------------------------------------------------------------
- * Everybody was in the active phase. We will restart sending
- * LCP_FRAGORD to the nodes from the new master.
- * We also need to set dihLcpStatus to ZACTIVE
- * in the master node since the master will wait for all nodes to
- * complete before finalising the LCP process.
- * ------------------------------------------------------------------ */
-#ifdef VM_TRACE
- ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_ACTIVE -> "
- "startLcpRoundLoopLab(table=%u, fragment=%u)",
- c_lcpMasterTakeOverState.minTableId,
- c_lcpMasterTakeOverState.minFragId);
-#endif
-
- c_lcpState.keepGci = SYSFILE->keepGCI;
- c_lcpState.setLcpStatus(LCP_START_LCP_ROUND, __LINE__);
- startLcpRoundLoopLab(signal, 0, 0);
- break;
- }
- case LMTOS_LCP_CONCLUDING:
- {
- jam();
- /* ------------------------------------------------------------------- */
- // The LCP process is in the finalisation phase. We simply wait for it to
- // complete with signals arriving in. We need to check also if we should
- // change state due to table write completion during state
- // collection phase.
- /* ------------------------------------------------------------------- */
- ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE);
- startLcpRoundLoopLab(signal, 0, 0);
- break;
- }
- default:
- ndbrequire(false);
- break;
- }//switch
- signal->theData[0] = NDB_LE_LCP_TakeoverCompleted;
- signal->theData[1] = c_lcpMasterTakeOverState.state;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
-
- signal->theData[0] = 7012;
- execDUMP_STATE_ORD(signal);
-
- signal->theData[0] = 7015;
- execDUMP_STATE_ORD(signal);
-
- c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__);
-
- checkLocalNodefailComplete(signal, failedNodePtr.i, NF_LCP_TAKE_OVER);
-}
-
-/* ------------------------------------------------------------------------- */
-/* A BLOCK OR A NODE HAS COMPLETED THE HANDLING OF THE NODE FAILURE. */
-/* ------------------------------------------------------------------------- */
-void Dbdih::execNF_COMPLETEREP(Signal* signal)
-{
- NodeRecordPtr failedNodePtr;
- NFCompleteRep * const nfCompleteRep = (NFCompleteRep *)&signal->theData[0];
- jamEntry();
- const Uint32 blockNo = nfCompleteRep->blockNo;
- Uint32 nodeId = nfCompleteRep->nodeId;
- failedNodePtr.i = nfCompleteRep->failedNodeId;
-
- ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
- switch (blockNo) {
- case DBTC:
- jam();
- ndbrequire(failedNodePtr.p->dbtcFailCompleted == ZFALSE);
- /* -------------------------------------------------------------------- */
- // Report the event that DBTC completed node failure handling.
- /* -------------------------------------------------------------------- */
- signal->theData[0] = NDB_LE_NodeFailCompleted;
- signal->theData[1] = DBTC;
- signal->theData[2] = failedNodePtr.i;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
-
- failedNodePtr.p->dbtcFailCompleted = ZTRUE;
- break;
- case DBDICT:
- jam();
- ndbrequire(failedNodePtr.p->dbdictFailCompleted == ZFALSE);
- /* --------------------------------------------------------------------- */
- // Report the event that DBDICT completed node failure handling.
- /* --------------------------------------------------------------------- */
- signal->theData[0] = NDB_LE_NodeFailCompleted;
- signal->theData[1] = DBDICT;
- signal->theData[2] = failedNodePtr.i;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
-
- failedNodePtr.p->dbdictFailCompleted = ZTRUE;
- break;
- case DBDIH:
- jam();
- ndbrequire(failedNodePtr.p->dbdihFailCompleted == ZFALSE);
- /* --------------------------------------------------------------------- */
- // Report the event that DBDIH completed node failure handling.
- /* --------------------------------------------------------------------- */
- signal->theData[0] = NDB_LE_NodeFailCompleted;
- signal->theData[1] = DBDIH;
- signal->theData[2] = failedNodePtr.i;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
-
- failedNodePtr.p->dbdihFailCompleted = ZTRUE;
- break;
- case DBLQH:
- jam();
- ndbrequire(failedNodePtr.p->dblqhFailCompleted == ZFALSE);
- /* --------------------------------------------------------------------- */
- // Report the event that DBDIH completed node failure handling.
- /* --------------------------------------------------------------------- */
- signal->theData[0] = NDB_LE_NodeFailCompleted;
- signal->theData[1] = DBLQH;
- signal->theData[2] = failedNodePtr.i;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
-
- failedNodePtr.p->dblqhFailCompleted = ZTRUE;
- break;
- case 0: /* Node has finished */
- jam();
- ndbrequire(nodeId < MAX_NDB_NODES);
-
- if (failedNodePtr.p->recNODE_FAILREP == ZFALSE) {
- jam();
- /* ------------------------------------------------------------------- */
- // We received a report about completion of node failure before we
- // received the message about the NODE failure ourselves.
- // We will send the signal to ourselves with a small delay
- // (10 milliseconds).
- /* ------------------------------------------------------------------- */
- //nf->from = __LINE__;
- sendSignalWithDelay(reference(), GSN_NF_COMPLETEREP, signal, 10,
- signal->length());
- return;
- }//if
-
- if (!failedNodePtr.p->m_NF_COMPLETE_REP.isWaitingFor(nodeId)){
- jam();
- return;
- }
-
- failedNodePtr.p->m_NF_COMPLETE_REP.clearWaitingFor(nodeId);;
-
- /* -------------------------------------------------------------------- */
- // Report the event that nodeId has completed node failure handling.
- /* -------------------------------------------------------------------- */
- signal->theData[0] = NDB_LE_NodeFailCompleted;
- signal->theData[1] = 0;
- signal->theData[2] = failedNodePtr.i;
- signal->theData[3] = nodeId;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
-
- nodeFailCompletedCheckLab(signal, failedNodePtr);
- return;
- break;
- default:
- ndbrequire(false);
- return;
- break;
- }//switch
- if (failedNodePtr.p->dbtcFailCompleted == ZFALSE) {
- jam();
- return;
- }//if
- if (failedNodePtr.p->dbdictFailCompleted == ZFALSE) {
- jam();
- return;
- }//if
- if (failedNodePtr.p->dbdihFailCompleted == ZFALSE) {
- jam();
- return;
- }//if
- if (failedNodePtr.p->dblqhFailCompleted == ZFALSE) {
- jam();
- return;
- }//if
- /* ----------------------------------------------------------------------- */
- /* ALL BLOCKS IN THIS NODE HAVE COMPLETED THEIR PART OF HANDLING THE */
- /* NODE FAILURE. WE CAN NOW REPORT THIS COMPLETION TO ALL OTHER NODES. */
- /* ----------------------------------------------------------------------- */
- NodeRecordPtr nodePtr;
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- ptrAss(nodePtr, nodeRecord);
- if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
- jam();
- BlockReference ref = calcDihBlockRef(nodePtr.i);
- NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0];
- nf->blockNo = 0;
- nf->nodeId = cownNodeId;
- nf->failedNodeId = failedNodePtr.i;
- nf->from = __LINE__;
- sendSignal(ref, GSN_NF_COMPLETEREP, signal,
- NFCompleteRep::SignalLength, JBB);
- }//if
- }//for
- return;
-}//Dbdih::execNF_COMPLETEREP()
-
-void Dbdih::nodeFailCompletedCheckLab(Signal* signal,
- NodeRecordPtr failedNodePtr)
-{
- jam();
- if (!failedNodePtr.p->m_NF_COMPLETE_REP.done()){
- jam();
- return;
- }//if
- /* ---------------------------------------------------------------------- */
- /* ALL BLOCKS IN ALL NODES HAVE NOW REPORTED COMPLETION OF THE NODE */
- /* FAILURE HANDLING. WE ARE NOW READY TO ACCEPT THAT THIS NODE STARTS */
- /* AGAIN. */
- /* ---------------------------------------------------------------------- */
- jam();
- failedNodePtr.p->nodeStatus = NodeRecord::DEAD;
- failedNodePtr.p->recNODE_FAILREP = ZFALSE;
-
- /* ---------------------------------------------------------------------- */
- // Report the event that all nodes completed node failure handling.
- /* ---------------------------------------------------------------------- */
- signal->theData[0] = NDB_LE_NodeFailCompleted;
- signal->theData[1] = 0;
- signal->theData[2] = failedNodePtr.i;
- signal->theData[3] = 0;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
-
- /* ---------------------------------------------------------------------- */
- // Report to QMGR that we have concluded recovery handling of this node.
- /* ---------------------------------------------------------------------- */
- signal->theData[0] = failedNodePtr.i;
- sendSignal(QMGR_REF, GSN_NDB_FAILCONF, signal, 1, JBB);
-
- if (isMaster()) {
- jam();
- /* --------------------------------------------------------------------- */
- /* IF WE ARE MASTER WE MUST CHECK IF COPY FRAGMENT WAS INTERRUPTED */
- /* BY THE FAILED NODES. */
- /* --------------------------------------------------------------------- */
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = 0;
- ptrAss(takeOverPtr, takeOverRecord);
- if ((takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG) &&
- (failedNodePtr.i == takeOverPtr.p->toCopyNode)) {
- jam();
-#ifdef VM_TRACE
- ndbrequire("Tell jonas" == 0);
-#endif
- /*------------------------------------------------------------------*/
- /* WE ARE CURRENTLY IN THE PROCESS OF COPYING A FRAGMENT. WE */
- /* WILL CHECK IF THE COPY NODE HAVE FAILED. */
- /*------------------------------------------------------------------*/
- takeOverPtr.p->toMasterStatus = TakeOverRecord::SELECTING_NEXT;
- startNextCopyFragment(signal, takeOverPtr.i);
- return;
- }//if
- checkStartTakeOver(signal);
- }//if
- return;
-}//Dbdih::nodeFailCompletedCheckLab()
-
-/*****************************************************************************/
-/* ********** SEIZING / RELEASING MODULE *************/
-/*****************************************************************************/
-/*
- 3.4 L O C A L N O D E S E I Z E
- ************************************
- */
-/*
- 3.4.1 L O C A L N O D E S E I Z E R E Q U E S T
- ******************************************************
- */
-void Dbdih::execDISEIZEREQ(Signal* signal)
-{
- ConnectRecordPtr connectPtr;
- jamEntry();
- Uint32 userPtr = signal->theData[0];
- BlockReference userRef = signal->theData[1];
- ndbrequire(cfirstconnect != RNIL);
- connectPtr.i = cfirstconnect;
- ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
- cfirstconnect = connectPtr.p->nfConnect;
- connectPtr.p->nfConnect = RNIL;
- connectPtr.p->userpointer = userPtr;
- connectPtr.p->userblockref = userRef;
- connectPtr.p->connectState = ConnectRecord::INUSE;
- signal->theData[0] = connectPtr.p->userpointer;
- signal->theData[1] = connectPtr.i;
- sendSignal(userRef, GSN_DISEIZECONF, signal, 2, JBB);
-}//Dbdih::execDISEIZEREQ()
-
-/*
- 3.5 L O C A L N O D E R E L E A S E
- ****************************************
- */
-/*
- 3.5.1 L O C A L N O D E R E L E A S E R E Q U E S T
- *******************************************************=
- */
-void Dbdih::execDIRELEASEREQ(Signal* signal)
-{
- ConnectRecordPtr connectPtr;
- jamEntry();
- connectPtr.i = signal->theData[0];
- Uint32 userRef = signal->theData[2];
- ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
- ndbrequire(connectPtr.p->connectState != ConnectRecord::FREE);
- ndbrequire(connectPtr.p->userblockref == userRef);
- signal->theData[0] = connectPtr.p->userpointer;
- sendSignal(connectPtr.p->userblockref, GSN_DIRELEASECONF, signal, 1, JBB);
- release_connect(connectPtr);
-}//Dbdih::execDIRELEASEREQ()
-
-/*
- 3.7 A D D T A B L E
- **********************=
- */
-/*****************************************************************************/
-/* ********** TABLE ADDING MODULE *************/
-/*****************************************************************************/
-/*
- 3.7.1 A D D T A B L E M A I N L Y
- ***************************************
- */
-void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
- jamEntry();
- CreateFragmentationReq * const req =
- (CreateFragmentationReq*)signal->getDataPtr();
-
- const Uint32 senderRef = req->senderRef;
- const Uint32 senderData = req->senderData;
- const Uint32 fragmentNode = req->fragmentNode;
- const Uint32 fragmentType = req->fragmentationType;
- //const Uint32 fragmentCount = req->noOfFragments;
- const Uint32 primaryTableId = req->primaryTableId;
-
- Uint32 err = 0;
-
- do {
- Uint32 noOfFragments = 0;
- Uint32 noOfReplicas = cnoReplicas;
- switch(fragmentType){
- case DictTabInfo::AllNodesSmallTable:
- jam();
- noOfFragments = csystemnodes;
- break;
- case DictTabInfo::AllNodesMediumTable:
- jam();
- noOfFragments = 2 * csystemnodes;
- break;
- case DictTabInfo::AllNodesLargeTable:
- jam();
- noOfFragments = 4 * csystemnodes;
- break;
- case DictTabInfo::SingleFragment:
- jam();
- noOfFragments = 1;
- break;
-#if 0
- case DictTabInfo::SpecifiedFragmentCount:
- noOfFragments = (fragmentCount == 0 ? 1 : (fragmentCount + 1)/ 2);
- break;
-#endif
- default:
- jam();
- err = CreateFragmentationRef::InvalidFragmentationType;
- break;
- }
- if(err)
- break;
-
- NodeGroupRecordPtr NGPtr;
- TabRecordPtr primTabPtr;
- if (primaryTableId == RNIL) {
- if(fragmentNode == 0){
- jam();
- NGPtr.i = 0;
- if(noOfFragments < csystemnodes)
- {
- NGPtr.i = c_nextNodeGroup;
- c_nextNodeGroup = (NGPtr.i + 1 == cnoOfNodeGroups ? 0 : NGPtr.i + 1);
- }
- } else if(! (fragmentNode < MAX_NDB_NODES)) {
- jam();
- err = CreateFragmentationRef::InvalidNodeId;
- } else {
- jam();
- const Uint32 stat = Sysfile::getNodeStatus(fragmentNode,
- SYSFILE->nodeStatus);
- switch (stat) {
- case Sysfile::NS_Active:
- case Sysfile::NS_ActiveMissed_1:
- case Sysfile::NS_ActiveMissed_2:
- case Sysfile::NS_TakeOver:
- jam();
- break;
- case Sysfile::NS_NotActive_NotTakenOver:
- jam();
- break;
- case Sysfile::NS_HotSpare:
- jam();
- case Sysfile::NS_NotDefined:
- jam();
- default:
- jam();
- err = CreateFragmentationRef::InvalidNodeType;
- break;
- }
- if(err)
- break;
- NGPtr.i = Sysfile::getNodeGroup(fragmentNode,
- SYSFILE->nodeGroups);
- break;
- }
- } else {
- if (primaryTableId >= ctabFileSize) {
- jam();
- err = CreateFragmentationRef::InvalidPrimaryTable;
- break;
- }
- primTabPtr.i = primaryTableId;
- ptrAss(primTabPtr, tabRecord);
- if (primTabPtr.p->tabStatus != TabRecord::TS_ACTIVE) {
- jam();
- err = CreateFragmentationRef::InvalidPrimaryTable;
- break;
- }
- if (noOfFragments != primTabPtr.p->totalfragments) {
- jam();
- err = CreateFragmentationRef::InvalidFragmentationType;
- break;
- }
- }
-
- Uint32 count = 2;
- Uint16 *fragments = (Uint16*)(signal->theData+25);
- if (primaryTableId == RNIL) {
- jam();
- Uint8 next_replica_node[MAX_NDB_NODES];
- memset(next_replica_node,0,sizeof(next_replica_node));
- for(Uint32 fragNo = 0; fragNo<noOfFragments; fragNo++){
- jam();
- ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
- const Uint32 max = NGPtr.p->nodeCount;
-
- Uint32 tmp= next_replica_node[NGPtr.i];
- for(Uint32 replicaNo = 0; replicaNo<noOfReplicas; replicaNo++)
- {
- jam();
- const Uint32 nodeId = NGPtr.p->nodesInGroup[tmp++];
- fragments[count++] = nodeId;
- tmp = (tmp >= max ? 0 : tmp);
- }
- tmp++;
- next_replica_node[NGPtr.i]= (tmp >= max ? 0 : tmp);
-
- /**
- * Next node group for next fragment
- */
- NGPtr.i++;
- NGPtr.i = (NGPtr.i == cnoOfNodeGroups ? 0 : NGPtr.i);
- }
- } else {
- for (Uint32 fragNo = 0;
- fragNo < primTabPtr.p->totalfragments; fragNo++) {
- jam();
- FragmentstorePtr fragPtr;
- ReplicaRecordPtr replicaPtr;
- getFragstore(primTabPtr.p, fragNo, fragPtr);
- fragments[count++] = fragPtr.p->preferredPrimary;
- for (replicaPtr.i = fragPtr.p->storedReplicas;
- replicaPtr.i != RNIL;
- replicaPtr.i = replicaPtr.p->nextReplica) {
- jam();
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- if (replicaPtr.p->procNode != fragPtr.p->preferredPrimary) {
- jam();
- fragments[count++] = replicaPtr.p->procNode;
- }//if
- }//for
- for (replicaPtr.i = fragPtr.p->oldStoredReplicas;
- replicaPtr.i != RNIL;
- replicaPtr.i = replicaPtr.p->nextReplica) {
- jam();
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- if (replicaPtr.p->procNode != fragPtr.p->preferredPrimary) {
- jam();
- fragments[count++] = replicaPtr.p->procNode;
- }//if
- }//for
- }
- }
- ndbrequire(count == (2 + noOfReplicas * noOfFragments));
-
- CreateFragmentationConf * const conf =
- (CreateFragmentationConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = senderData;
- conf->noOfReplicas = noOfReplicas;
- conf->noOfFragments = noOfFragments;
-
- fragments[0] = noOfReplicas;
- fragments[1] = noOfFragments;
-
- if(senderRef != 0)
- {
- LinearSectionPtr ptr[3];
- ptr[0].p = (Uint32*)&fragments[0];
- ptr[0].sz = (count + 1) / 2;
- sendSignal(senderRef,
- GSN_CREATE_FRAGMENTATION_CONF,
- signal,
- CreateFragmentationConf::SignalLength,
- JBB,
- ptr,
- 1);
- }
- else
- {
- // Execute direct
- signal->theData[0] = 0;
- }
- return;
- } while(false);
-
- if(senderRef != 0)
- {
- CreateFragmentationRef * const ref =
- (CreateFragmentationRef*)signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = senderData;
- ref->errorCode = err;
- sendSignal(senderRef, GSN_CREATE_FRAGMENTATION_REF, signal,
- CreateFragmentationRef::SignalLength, JBB);
- }
- else
- {
- // Execute direct
- signal->theData[0] = err;
- }
-}
-
-void Dbdih::execDIADDTABREQ(Signal* signal)
-{
- jamEntry();
-
- DiAddTabReq * const req = (DiAddTabReq*)signal->getDataPtr();
-
- // Seize connect record
- ndbrequire(cfirstconnect != RNIL);
- ConnectRecordPtr connectPtr;
- connectPtr.i = cfirstconnect;
- ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
- cfirstconnect = connectPtr.p->nfConnect;
-
- const Uint32 userPtr = req->connectPtr;
- const BlockReference userRef = signal->getSendersBlockRef();
- connectPtr.p->nfConnect = RNIL;
- connectPtr.p->userpointer = userPtr;
- connectPtr.p->userblockref = userRef;
- connectPtr.p->connectState = ConnectRecord::INUSE;
- connectPtr.p->table = req->tableId;
-
- TabRecordPtr tabPtr;
- tabPtr.i = req->tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- tabPtr.p->connectrec = connectPtr.i;
- tabPtr.p->tableType = req->tableType;
- tabPtr.p->schemaVersion = req->schemaVersion;
- tabPtr.p->primaryTableId = req->primaryTableId;
-
- if(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE){
- jam();
- tabPtr.p->tabStatus = TabRecord::TS_CREATING;
- sendAddFragreq(signal, connectPtr, tabPtr, 0);
- return;
- }
-
- if(getNodeState().getSystemRestartInProgress() &&
- tabPtr.p->tabStatus == TabRecord::TS_IDLE){
- jam();
-
- ndbrequire(cmasterNodeId == getOwnNodeId());
- tabPtr.p->tabStatus = TabRecord::TS_CREATING;
-
- initTableFile(tabPtr);
- FileRecordPtr filePtr;
- filePtr.i = tabPtr.p->tabFile[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- openFileRw(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::OPENING_TABLE;
- return;
- }
-
- /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
- /* AT THE TIME OF INITIATING THE FILE OF TABLE */
- /* DESCRIPTION IS CREATED FOR APPROPRIATE SIZE. EACH */
- /* EACH RECORD IN THIS FILE HAS THE INFORMATION ABOUT */
- /* ONE TABLE. THE POINTER TO THIS RECORD IS THE TABLE */
- /* REFERENCE. IN THE BEGINNING ALL RECORDS ARE CREATED */
- /* BUT THEY DO NOT HAVE ANY INFORMATION ABOUT ANY TABLE*/
- /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
- tabPtr.p->tabStatus = TabRecord::TS_CREATING;
- tabPtr.p->storedTable = req->storedTable;
- tabPtr.p->method = TabRecord::HASH;
- tabPtr.p->kvalue = req->kValue;
-
- union {
- Uint16 fragments[2 + MAX_FRAG_PER_NODE*MAX_REPLICAS*MAX_NDB_NODES];
- Uint32 align;
- };
- SegmentedSectionPtr fragDataPtr;
- signal->getSection(fragDataPtr, DiAddTabReq::FRAGMENTATION);
- copy((Uint32*)fragments, fragDataPtr);
- releaseSections(signal);
-
- const Uint32 noReplicas = fragments[0];
- const Uint32 noFragments = fragments[1];
-
- tabPtr.p->noOfBackups = noReplicas - 1;
- tabPtr.p->totalfragments = noFragments;
- ndbrequire(noReplicas == cnoReplicas); // Only allowed
-
- if (ERROR_INSERTED(7173)) {
- CLEAR_ERROR_INSERT_VALUE;
- addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
- return;
- }
- if ((noReplicas * noFragments) > cnoFreeReplicaRec) {
- jam();
- addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
- return;
- }//if
- if (noFragments > cremainingfrags) {
- jam();
- addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
- return;
- }//if
-
- Uint32 logTotalFragments = 1;
- while (logTotalFragments <= tabPtr.p->totalfragments) {
- jam();
- logTotalFragments <<= 1;
- }
- logTotalFragments >>= 1;
- tabPtr.p->mask = logTotalFragments - 1;
- tabPtr.p->hashpointer = tabPtr.p->totalfragments - logTotalFragments;
- allocFragments(tabPtr.p->totalfragments, tabPtr);
-
- Uint32 index = 2;
- for (Uint32 fragId = 0; fragId < noFragments; fragId++) {
- jam();
- FragmentstorePtr fragPtr;
- Uint32 activeIndex = 0;
- getFragstore(tabPtr.p, fragId, fragPtr);
- fragPtr.p->preferredPrimary = fragments[index];
- for (Uint32 i = 0; i<noReplicas; i++) {
- const Uint32 nodeId = fragments[index++];
- ReplicaRecordPtr replicaPtr;
- allocStoredReplica(fragPtr, replicaPtr, nodeId);
- if (getNodeStatus(nodeId) == NodeRecord::ALIVE) {
- jam();
- ndbrequire(activeIndex < MAX_REPLICAS);
- fragPtr.p->activeNodes[activeIndex] = nodeId;
- activeIndex++;
- } else {
- jam();
- removeStoredReplica(fragPtr, replicaPtr);
- linkOldStoredReplica(fragPtr, replicaPtr);
- }//if
- }//for
- fragPtr.p->fragReplicas = activeIndex;
- ndbrequire(activeIndex > 0 && fragPtr.p->storedReplicas != RNIL);
- }
- initTableFile(tabPtr);
- tabPtr.p->tabCopyStatus = TabRecord::CS_ADD_TABLE_MASTER;
- signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
- signal->theData[1] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
-}
-
-void
-Dbdih::addTable_closeConf(Signal * signal, Uint32 tabPtrI){
- TabRecordPtr tabPtr;
- tabPtr.i = tabPtrI;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- ConnectRecordPtr connectPtr;
- connectPtr.i = tabPtr.p->connectrec;
- ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
-
- sendAddFragreq(signal, connectPtr, tabPtr, 0);
-}
-
-void
-Dbdih::sendAddFragreq(Signal* signal, ConnectRecordPtr connectPtr,
- TabRecordPtr tabPtr, Uint32 fragId){
- jam();
- const Uint32 fragCount = tabPtr.p->totalfragments;
- ReplicaRecordPtr replicaPtr; replicaPtr.i = RNIL;
- for(; fragId<fragCount; fragId++){
- jam();
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, fragId, fragPtr);
-
- replicaPtr.i = fragPtr.p->storedReplicas;
- while(replicaPtr.i != RNIL){
- jam();
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- if(replicaPtr.p->procNode == getOwnNodeId()){
- break;
- }
- replicaPtr.i = replicaPtr.p->nextReplica;
- }
-
- if(replicaPtr.i != RNIL){
- jam();
- break;
- }
-
- replicaPtr.i = fragPtr.p->oldStoredReplicas;
- while(replicaPtr.i != RNIL){
- jam();
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- if(replicaPtr.p->procNode == getOwnNodeId()){
- break;
- }
- replicaPtr.i = replicaPtr.p->nextReplica;
- }
-
- if(replicaPtr.i != RNIL){
- jam();
- break;
- }
- }
-
- if(replicaPtr.i != RNIL){
- jam();
- ndbrequire(fragId < fragCount);
- ndbrequire(replicaPtr.p->procNode == getOwnNodeId());
-
- Uint32 requestInfo = 0;
- if(!tabPtr.p->storedTable){
- requestInfo |= LqhFragReq::TemporaryTable;
- }
-
- if(getNodeState().getNodeRestartInProgress()){
- requestInfo |= LqhFragReq::CreateInRunning;
- }
-
- AddFragReq* const req = (AddFragReq*)signal->getDataPtr();
- req->dihPtr = connectPtr.i;
- req->senderData = connectPtr.p->userpointer;
- req->fragmentId = fragId;
- req->requestInfo = requestInfo;
- req->tableId = tabPtr.i;
- req->nextLCP = 0;
- req->nodeId = getOwnNodeId();
- req->totalFragments = fragCount;
- req->startGci = SYSFILE->newestRestorableGCI;
- sendSignal(DBDICT_REF, GSN_ADD_FRAGREQ, signal,
- AddFragReq::SignalLength, JBB);
- return;
- }
-
- // Done
- DiAddTabConf * const conf = (DiAddTabConf*)signal->getDataPtr();
- conf->senderData = connectPtr.p->userpointer;
- sendSignal(connectPtr.p->userblockref, GSN_DIADDTABCONF, signal,
- DiAddTabConf::SignalLength, JBB);
-
- // Release
- release_connect(connectPtr);
-}
-void
-Dbdih::release_connect(ConnectRecordPtr ptr)
-{
- ptr.p->userblockref = ZNIL;
- ptr.p->userpointer = RNIL;
- ptr.p->connectState = ConnectRecord::FREE;
- ptr.p->nfConnect = cfirstconnect;
- cfirstconnect = ptr.i;
-}
-
-void
-Dbdih::execADD_FRAGCONF(Signal* signal){
- jamEntry();
- AddFragConf * const conf = (AddFragConf*)signal->getDataPtr();
-
- ConnectRecordPtr connectPtr;
- connectPtr.i = conf->dihPtr;
- ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
-
- TabRecordPtr tabPtr;
- tabPtr.i = connectPtr.p->table;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- sendAddFragreq(signal, connectPtr, tabPtr, conf->fragId + 1);
-}
-
-void
-Dbdih::execADD_FRAGREF(Signal* signal){
- jamEntry();
- AddFragRef * const ref = (AddFragRef*)signal->getDataPtr();
-
- ConnectRecordPtr connectPtr;
- connectPtr.i = ref->dihPtr;
- ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
-
- {
- DiAddTabRef * const ref = (DiAddTabRef*)signal->getDataPtr();
- ref->senderData = connectPtr.p->userpointer;
- ref->errorCode = ~0;
- sendSignal(connectPtr.p->userblockref, GSN_DIADDTABREF, signal,
- DiAddTabRef::SignalLength, JBB);
- }
-
- // Release
- release_connect(connectPtr);
-}
-
-/*
- 3.7.1.3 R E F U S E
- *********************
- */
-void Dbdih::addtabrefuseLab(Signal* signal, ConnectRecordPtr connectPtr, Uint32 errorCode)
-{
- signal->theData[0] = connectPtr.p->userpointer;
- signal->theData[1] = errorCode;
- sendSignal(connectPtr.p->userblockref, GSN_DIADDTABREF, signal, 2, JBB);
- release_connect(connectPtr);
- return;
-}//Dbdih::addtabrefuseLab()
-
-/*
- 3.7.2 A D D T A B L E D U P L I C A T I O N
- *************************************************
- */
-/*
- 3.7.2.1 A D D T A B L E D U P L I C A T I O N R E Q U E S T
- *******************************************************************=
- */
-
-/*
- D E L E T E T A B L E
- **********************=
- */
-/*****************************************************************************/
-/*********** DELETE TABLE MODULE *************/
-/*****************************************************************************/
-void
-Dbdih::execDROP_TAB_REQ(Signal* signal){
- jamEntry();
- DropTabReq* req = (DropTabReq*)signal->getDataPtr();
-
- TabRecordPtr tabPtr;
- tabPtr.i = req->tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- tabPtr.p->m_dropTab.tabUserRef = req->senderRef;
- tabPtr.p->m_dropTab.tabUserPtr = req->senderData;
-
- DropTabReq::RequestType rt = (DropTabReq::RequestType)req->requestType;
-
- switch(rt){
- case DropTabReq::OnlineDropTab:
- jam();
- ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_DROPPING);
- releaseTable(tabPtr);
- break;
- case DropTabReq::CreateTabDrop:
- jam();
- releaseTable(tabPtr);
- break;
- case DropTabReq::RestartDropTab:
- break;
- }
-
- startDeleteFile(signal, tabPtr);
-}
-
-void Dbdih::startDeleteFile(Signal* signal, TabRecordPtr tabPtr)
-{
- if (tabPtr.p->tabFile[0] == RNIL) {
- jam();
- initTableFile(tabPtr);
- }//if
- openTableFileForDelete(signal, tabPtr.p->tabFile[0]);
-}//Dbdih::startDeleteFile()
-
-void Dbdih::openTableFileForDelete(Signal* signal, Uint32 fileIndex)
-{
- FileRecordPtr filePtr;
- filePtr.i = fileIndex;
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- openFileRw(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::TABLE_OPEN_FOR_DELETE;
-}//Dbdih::openTableFileForDelete()
-
-void Dbdih::tableOpenLab(Signal* signal, FileRecordPtr filePtr)
-{
- closeFileDelete(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::TABLE_CLOSE_DELETE;
- return;
-}//Dbdih::tableOpenLab()
-
-void Dbdih::tableDeleteLab(Signal* signal, FileRecordPtr filePtr)
-{
- TabRecordPtr tabPtr;
- tabPtr.i = filePtr.p->tabRef;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- if (filePtr.i == tabPtr.p->tabFile[0]) {
- jam();
- openTableFileForDelete(signal, tabPtr.p->tabFile[1]);
- return;
- }//if
- ndbrequire(filePtr.i == tabPtr.p->tabFile[1]);
-
- releaseFile(tabPtr.p->tabFile[0]);
- releaseFile(tabPtr.p->tabFile[1]);
- tabPtr.p->tabFile[0] = tabPtr.p->tabFile[1] = RNIL;
-
- tabPtr.p->tabStatus = TabRecord::TS_IDLE;
-
- DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend();
- dropConf->senderRef = reference();
- dropConf->senderData = tabPtr.p->m_dropTab.tabUserPtr;
- dropConf->tableId = tabPtr.i;
- sendSignal(tabPtr.p->m_dropTab.tabUserRef, GSN_DROP_TAB_CONF,
- signal, DropTabConf::SignalLength, JBB);
-
- tabPtr.p->m_dropTab.tabUserPtr = RNIL;
- tabPtr.p->m_dropTab.tabUserRef = 0;
-}//Dbdih::tableDeleteLab()
-
-
-void Dbdih::releaseTable(TabRecordPtr tabPtr)
-{
- FragmentstorePtr fragPtr;
- if (tabPtr.p->noOfFragChunks > 0) {
- for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
- jam();
- getFragstore(tabPtr.p, fragId, fragPtr);
- releaseReplicas(fragPtr.p->storedReplicas);
- releaseReplicas(fragPtr.p->oldStoredReplicas);
- }//for
- releaseFragments(tabPtr);
- }
- if (tabPtr.p->tabFile[0] != RNIL) {
- jam();
- releaseFile(tabPtr.p->tabFile[0]);
- releaseFile(tabPtr.p->tabFile[1]);
- tabPtr.p->tabFile[0] = tabPtr.p->tabFile[1] = RNIL;
- }//if
-}//Dbdih::releaseTable()
-
-void Dbdih::releaseReplicas(Uint32 replicaPtrI)
-{
- ReplicaRecordPtr replicaPtr;
- replicaPtr.i = replicaPtrI;
- jam();
- while (replicaPtr.i != RNIL) {
- jam();
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- Uint32 tmp = replicaPtr.p->nextReplica;
- replicaPtr.p->nextReplica = cfirstfreeReplica;
- cfirstfreeReplica = replicaPtr.i;
- replicaPtr.i = tmp;
- cnoFreeReplicaRec++;
- }//while
-}//Dbdih::releaseReplicas()
-
-void Dbdih::seizeReplicaRec(ReplicaRecordPtr& replicaPtr)
-{
- replicaPtr.i = cfirstfreeReplica;
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- cfirstfreeReplica = replicaPtr.p->nextReplica;
- cnoFreeReplicaRec--;
- replicaPtr.p->nextReplica = RNIL;
-}//Dbdih::seizeReplicaRec()
-
-void Dbdih::releaseFile(Uint32 fileIndex)
-{
- FileRecordPtr filePtr;
- filePtr.i = fileIndex;
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- filePtr.p->nextFile = cfirstfreeFile;
- cfirstfreeFile = filePtr.i;
-}//Dbdih::releaseFile()
-
-
-void Dbdih::execALTER_TAB_REQ(Signal * signal)
-{
- AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr();
- const Uint32 senderRef = req->senderRef;
- const Uint32 senderData = req->senderData;
- const Uint32 changeMask = req->changeMask;
- const Uint32 tableId = req->tableId;
- const Uint32 tableVersion = req->tableVersion;
- const Uint32 gci = req->gci;
- AlterTabReq::RequestType requestType =
- (AlterTabReq::RequestType) req->requestType;
-
- TabRecordPtr tabPtr;
- tabPtr.i = tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- tabPtr.p->schemaVersion = tableVersion;
-
- // Request handled successfully
- AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = senderData;
- conf->changeMask = changeMask;
- conf->tableId = tableId;
- conf->tableVersion = tableVersion;
- conf->gci = gci;
- conf->requestType = requestType;
- sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal,
- AlterTabConf::SignalLength, JBB);
-}
-
-/*
- G E T N O D E S
- **********************=
- */
-/*****************************************************************************/
-/* ********** TRANSACTION HANDLING MODULE *************/
-/*****************************************************************************/
-/*
- 3.8.1 G E T N O D E S R E Q U E S T
- ******************************************
- Asks what nodes should be part of a transaction.
-*/
-void Dbdih::execDIGETNODESREQ(Signal* signal)
-{
- const DiGetNodesReq * const req = (DiGetNodesReq *)&signal->theData[0];
- FragmentstorePtr fragPtr;
- TabRecordPtr tabPtr;
- tabPtr.i = req->tableId;
- Uint32 hashValue = req->hashValue;
- Uint32 ttabFileSize = ctabFileSize;
- TabRecord* regTabDesc = tabRecord;
- jamEntry();
- ptrCheckGuard(tabPtr, ttabFileSize, regTabDesc);
- Uint32 fragId = hashValue & tabPtr.p->mask;
- ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
- if (fragId < tabPtr.p->hashpointer) {
- jam();
- fragId = hashValue & ((tabPtr.p->mask << 1) + 1);
- }//if
- getFragstore(tabPtr.p, fragId, fragPtr);
- DiGetNodesConf * const conf = (DiGetNodesConf *)&signal->theData[0];
- Uint32 nodeCount = extractNodeInfo(fragPtr.p, conf->nodes);
- Uint32 sig2 = (nodeCount - 1) +
- (fragPtr.p->distributionKey << 16);
- conf->zero = 0;
- conf->reqinfo = sig2;
- conf->fragId = fragId;
-}//Dbdih::execDIGETNODESREQ()
-
-Uint32 Dbdih::extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[])
-{
- Uint32 nodeCount = 0;
- for (Uint32 i = 0; i < fragPtr->fragReplicas; i++) {
- jam();
- NodeRecordPtr nodePtr;
- ndbrequire(i < MAX_REPLICAS);
- nodePtr.i = fragPtr->activeNodes[i];
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- if (nodePtr.p->useInTransactions) {
- jam();
- nodes[nodeCount] = nodePtr.i;
- nodeCount++;
- }//if
- }//for
- ndbrequire(nodeCount > 0);
- return nodeCount;
-}//Dbdih::extractNodeInfo()
-
-void
-Dbdih::getFragstore(TabRecord * tab, //In parameter
- Uint32 fragNo, //In parameter
- FragmentstorePtr & fragptr) //Out parameter
-{
- FragmentstorePtr fragPtr;
- Uint32 chunkNo = fragNo >> LOG_NO_OF_FRAGS_PER_CHUNK;
- Uint32 chunkIndex = fragNo & (NO_OF_FRAGS_PER_CHUNK - 1);
- Uint32 TfragstoreFileSize = cfragstoreFileSize;
- Fragmentstore* TfragStore = fragmentstore;
- if (chunkNo < MAX_NDB_NODES) {
- fragPtr.i = tab->startFid[chunkNo] + chunkIndex;
- ptrCheckGuard(fragPtr, TfragstoreFileSize, TfragStore);
- fragptr = fragPtr;
- return;
- }//if
- ndbrequire(false);
-}//Dbdih::getFragstore()
-
-void Dbdih::allocFragments(Uint32 noOfFragments, TabRecordPtr tabPtr)
-{
- FragmentstorePtr fragPtr;
- Uint32 noOfChunks = (noOfFragments + (NO_OF_FRAGS_PER_CHUNK - 1)) >> LOG_NO_OF_FRAGS_PER_CHUNK;
- ndbrequire(cremainingfrags >= noOfFragments);
- for (Uint32 i = 0; i < noOfChunks; i++) {
- jam();
- Uint32 baseFrag = cfirstfragstore;
- tabPtr.p->startFid[i] = baseFrag;
- fragPtr.i = baseFrag;
- ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
- cfirstfragstore = fragPtr.p->nextFragmentChunk;
- cremainingfrags -= NO_OF_FRAGS_PER_CHUNK;
- for (Uint32 j = 0; j < NO_OF_FRAGS_PER_CHUNK; j++) {
- jam();
- fragPtr.i = baseFrag + j;
- ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
- initFragstore(fragPtr);
- }//if
- }//for
- tabPtr.p->noOfFragChunks = noOfChunks;
-}//Dbdih::allocFragments()
-
-void Dbdih::releaseFragments(TabRecordPtr tabPtr)
-{
- FragmentstorePtr fragPtr;
- for (Uint32 i = 0; i < tabPtr.p->noOfFragChunks; i++) {
- jam();
- Uint32 baseFrag = tabPtr.p->startFid[i];
- fragPtr.i = baseFrag;
- ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
- fragPtr.p->nextFragmentChunk = cfirstfragstore;
- cfirstfragstore = baseFrag;
- tabPtr.p->startFid[i] = RNIL;
- cremainingfrags += NO_OF_FRAGS_PER_CHUNK;
- }//for
- tabPtr.p->noOfFragChunks = 0;
-}//Dbdih::releaseFragments()
-
-void Dbdih::initialiseFragstore()
-{
- Uint32 i;
- FragmentstorePtr fragPtr;
- for (i = 0; i < cfragstoreFileSize; i++) {
- fragPtr.i = i;
- ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
- initFragstore(fragPtr);
- }//for
- Uint32 noOfChunks = cfragstoreFileSize >> LOG_NO_OF_FRAGS_PER_CHUNK;
- fragPtr.i = 0;
- cfirstfragstore = RNIL;
- cremainingfrags = 0;
- for (i = 0; i < noOfChunks; i++) {
- refresh_watch_dog();
- ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
- fragPtr.p->nextFragmentChunk = cfirstfragstore;
- cfirstfragstore = fragPtr.i;
- fragPtr.i += NO_OF_FRAGS_PER_CHUNK;
- cremainingfrags += NO_OF_FRAGS_PER_CHUNK;
- }//for
-}//Dbdih::initialiseFragstore()
-
-/*
- 3.9 V E R I F I C A T I O N
- ****************************=
- */
-/****************************************************************************/
-/* ********** VERIFICATION SUB-MODULE *************/
-/****************************************************************************/
-/*
- 3.9.1 R E C E I V I N G O F V E R I F I C A T I O N R E Q U E S T
- *************************************************************************
- */
-void Dbdih::execDIVERIFYREQ(Signal* signal)
-{
-
- jamEntry();
- if ((getBlockCommit() == false) &&
- (cfirstVerifyQueue == RNIL)) {
- jam();
- /*-----------------------------------------------------------------------*/
- // We are not blocked and the verify queue was empty currently so we can
- // simply reply back to TC immediately. The method was called with
- // EXECUTE_DIRECT so we reply back by setting signal data and returning.
- // theData[0] already contains the correct information so
- // we need not touch it.
- /*-----------------------------------------------------------------------*/
- signal->theData[1] = currentgcp;
- signal->theData[2] = 0;
- return;
- }//if
- /*-------------------------------------------------------------------------*/
- // Since we are blocked we need to put this operation last in the verify
- // queue to ensure that operation starts up in the correct order.
- /*-------------------------------------------------------------------------*/
- ApiConnectRecordPtr tmpApiConnectptr;
- ApiConnectRecordPtr localApiConnectptr;
-
- cverifyQueueCounter++;
- localApiConnectptr.i = signal->theData[0];
- tmpApiConnectptr.i = clastVerifyQueue;
- ptrCheckGuard(localApiConnectptr, capiConnectFileSize, apiConnectRecord);
- localApiConnectptr.p->apiGci = cnewgcp;
- localApiConnectptr.p->nextApi = RNIL;
- clastVerifyQueue = localApiConnectptr.i;
- if (tmpApiConnectptr.i == RNIL) {
- jam();
- cfirstVerifyQueue = localApiConnectptr.i;
- } else {
- jam();
- ptrCheckGuard(tmpApiConnectptr, capiConnectFileSize, apiConnectRecord);
- tmpApiConnectptr.p->nextApi = localApiConnectptr.i;
- }//if
- emptyverificbuffer(signal, false);
- signal->theData[2] = 1; // Indicate no immediate return
- return;
-}//Dbdih::execDIVERIFYREQ()
-
-void Dbdih::execDI_FCOUNTREQ(Signal* signal)
-{
- ConnectRecordPtr connectPtr;
- TabRecordPtr tabPtr;
- jamEntry();
- connectPtr.i = signal->theData[0];
- tabPtr.i = signal->theData[1];
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
-
- if(connectPtr.i != RNIL){
- ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
- if (connectPtr.p->connectState == ConnectRecord::INUSE) {
- jam();
- signal->theData[0] = connectPtr.p->userpointer;
- signal->theData[1] = tabPtr.p->totalfragments;
- sendSignal(connectPtr.p->userblockref, GSN_DI_FCOUNTCONF, signal,2, JBB);
- return;
- }//if
- signal->theData[0] = connectPtr.p->userpointer;
- signal->theData[1] = ZERRONOUSSTATE;
- sendSignal(connectPtr.p->userblockref, GSN_DI_FCOUNTREF, signal, 2, JBB);
- return;
- }//if
-
- //connectPtr.i == RNIL -> question without connect record
- const Uint32 senderData = signal->theData[2];
- const BlockReference senderRef = signal->senderBlockRef();
- signal->theData[0] = RNIL;
- signal->theData[1] = tabPtr.p->totalfragments;
- signal->theData[2] = tabPtr.i;
- signal->theData[3] = senderData;
- signal->theData[4] = tabPtr.p->noOfBackups;
- sendSignal(senderRef, GSN_DI_FCOUNTCONF, signal, 5, JBB);
-}//Dbdih::execDI_FCOUNTREQ()
-
-void Dbdih::execDIGETPRIMREQ(Signal* signal)
-{
- FragmentstorePtr fragPtr;
- ConnectRecordPtr connectPtr;
- TabRecordPtr tabPtr;
- jamEntry();
- Uint32 passThrough = signal->theData[1];
- tabPtr.i = signal->theData[2];
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- if (DictTabInfo::isOrderedIndex(tabPtr.p->tableType)) {
- jam();
- tabPtr.i = tabPtr.p->primaryTableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- }
- Uint32 fragId = signal->theData[3];
-
- ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
- connectPtr.i = signal->theData[0];
- if(connectPtr.i != RNIL)
- {
- jam();
- ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
- signal->theData[0] = connectPtr.p->userpointer;
- }
- else
- {
- jam();
- signal->theData[0] = RNIL;
- }
-
- Uint32 nodes[MAX_REPLICAS];
- getFragstore(tabPtr.p, fragId, fragPtr);
- Uint32 count = extractNodeInfo(fragPtr.p, nodes);
-
- signal->theData[1] = passThrough;
- signal->theData[2] = nodes[0];
- signal->theData[3] = nodes[1];
- signal->theData[4] = nodes[2];
- signal->theData[5] = nodes[3];
- signal->theData[6] = count;
- signal->theData[7] = tabPtr.i;
- signal->theData[8] = fragId;
-
- const BlockReference senderRef = signal->senderBlockRef();
- sendSignal(senderRef, GSN_DIGETPRIMCONF, signal, 9, JBB);
-}//Dbdih::execDIGETPRIMREQ()
-
-/****************************************************************************/
-/* ********** GLOBAL-CHECK-POINT HANDLING MODULE *************/
-/****************************************************************************/
-/*
- 3.10 G L O B A L C H E C K P O I N T ( IN M A S T E R R O L E)
- *******************************************************************
- */
-void Dbdih::checkGcpStopLab(Signal* signal)
-{
- Uint32 tgcpStatus;
-
- tgcpStatus = cgcpStatus;
- if (tgcpStatus == coldGcpStatus) {
- jam();
- if (coldGcpId == cnewgcp) {
- jam();
- if (cgcpStatus != GCP_READY) {
- jam();
- cgcpSameCounter++;
- if (cgcpSameCounter == 1200) {
- jam();
-#ifdef VM_TRACE
- ndbout << "System crash due to GCP Stop in state = ";
- ndbout << (Uint32) cgcpStatus << endl;
-#endif
- crashSystemAtGcpStop(signal);
- return;
- }//if
- } else {
- jam();
- if (cgcpOrderBlocked == 0) {
- jam();
- cgcpSameCounter++;
- if (cgcpSameCounter == 1200) {
- jam();
-#ifdef VM_TRACE
- ndbout << "System crash due to GCP Stop in state = ";
- ndbout << (Uint32) cgcpStatus << endl;
-#endif
- crashSystemAtGcpStop(signal);
- return;
- }//if
- } else {
- jam();
- cgcpSameCounter = 0;
- }//if
- }//if
- } else {
- jam();
- cgcpSameCounter = 0;
- }//if
- } else {
- jam();
- cgcpSameCounter = 0;
- }//if
- signal->theData[0] = DihContinueB::ZCHECK_GCP_STOP;
- signal->theData[1] = coldGcpStatus;
- signal->theData[2] = cgcpStatus;
- signal->theData[3] = coldGcpId;
- signal->theData[4] = cnewgcp;
- signal->theData[5] = cgcpSameCounter;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 6);
- coldGcpStatus = cgcpStatus;
- coldGcpId = cnewgcp;
- return;
-}//Dbdih::checkGcpStopLab()
-
-void Dbdih::startGcpLab(Signal* signal, Uint32 aWaitTime)
-{
- if ((cgcpOrderBlocked == 1) ||
- (c_nodeStartMaster.blockGcp == true) ||
- (cfirstVerifyQueue != RNIL)) {
- /*************************************************************************/
- // 1: Global Checkpoint has been stopped by management command
- // 2: Global Checkpoint is blocked by node recovery activity
- // 3: Previous global checkpoint is not yet completed.
- // All this means that global checkpoint cannot start now.
- /*************************************************************************/
- jam();
- cgcpStartCounter++;
- signal->theData[0] = DihContinueB::ZSTART_GCP;
- signal->theData[1] = aWaitTime > 100 ? (aWaitTime - 100) : 0;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2);
- return;
- }//if
- if (cstartGcpNow == false && aWaitTime > 100){
- /*************************************************************************/
- // We still have more than 100 milliseconds before we start the next and
- // nobody has ordered immediate start of a global checkpoint.
- // During initial start we will use continuos global checkpoints to
- // speed it up since we need to complete a global checkpoint after
- // inserting a lot of records.
- /*************************************************************************/
- jam();
- cgcpStartCounter++;
- signal->theData[0] = DihContinueB::ZSTART_GCP;
- signal->theData[1] = (aWaitTime - 100);
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2);
- return;
- }//if
- cgcpStartCounter = 0;
- cstartGcpNow = false;
- /***************************************************************************/
- // Report the event that a global checkpoint has started.
- /***************************************************************************/
- signal->theData[0] = NDB_LE_GlobalCheckpointStarted; //Event type
- signal->theData[1] = cnewgcp;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
-
- CRASH_INSERTION(7000);
- cnewgcp++;
- signal->setTrace(TestOrd::TraceGlobalCheckpoint);
- sendLoopMacro(GCP_PREPARE, sendGCP_PREPARE);
- cgcpStatus = GCP_PREPARE_SENT;
-}//Dbdih::startGcpLab()
-
-void Dbdih::execGCP_PREPARECONF(Signal* signal)
-{
- jamEntry();
- Uint32 senderNodeId = signal->theData[0];
- Uint32 gci = signal->theData[1];
- ndbrequire(gci == cnewgcp);
- receiveLoopMacro(GCP_PREPARE, senderNodeId);
- //-------------------------------------------------------------
- // We have now received all replies. We are ready to continue
- // with committing the global checkpoint.
- //-------------------------------------------------------------
- gcpcommitreqLab(signal);
-}//Dbdih::execGCP_PREPARECONF()
-
-void Dbdih::gcpcommitreqLab(Signal* signal)
-{
- CRASH_INSERTION(7001);
- sendLoopMacro(GCP_COMMIT, sendGCP_COMMIT);
- cgcpStatus = GCP_COMMIT_SENT;
- return;
-}//Dbdih::gcpcommitreqLab()
-
-void Dbdih::execGCP_NODEFINISH(Signal* signal)
-{
- jamEntry();
- const Uint32 senderNodeId = signal->theData[0];
- const Uint32 gci = signal->theData[1];
- const Uint32 failureNr = signal->theData[2];
- if (!isMaster()) {
- jam();
- ndbrequire(failureNr > cfailurenr);
- //-------------------------------------------------------------
- // Another node thinks we are master. This could happen when he
- // has heard of a node failure which I have not heard of. Ignore
- // signal in this case since we will discover it by sending
- // MASTER_GCPREQ to the node.
- //-------------------------------------------------------------
- return;
- } else if (cmasterState == MASTER_TAKE_OVER_GCP) {
- jam();
- //-------------------------------------------------------------
- // We are currently taking over as master. We will delay the
- // signal until we have completed the take over gcp handling.
- //-------------------------------------------------------------
- sendSignalWithDelay(reference(), GSN_GCP_NODEFINISH, signal, 20, 3);
- return;
- } else {
- ndbrequire(cmasterState == MASTER_ACTIVE);
- }//if
- ndbrequire(gci == coldgcp);
- receiveLoopMacro(GCP_COMMIT, senderNodeId);
- //-------------------------------------------------------------
- // We have now received all replies. We are ready to continue
- // with saving the global checkpoint to disk.
- //-------------------------------------------------------------
- CRASH_INSERTION(7002);
- gcpsavereqLab(signal);
- return;
-}//Dbdih::execGCP_NODEFINISH()
-
-void Dbdih::gcpsavereqLab(Signal* signal)
-{
- sendLoopMacro(GCP_SAVEREQ, sendGCP_SAVEREQ);
- cgcpStatus = GCP_NODE_FINISHED;
-}//Dbdih::gcpsavereqLab()
-
-void Dbdih::execGCP_SAVECONF(Signal* signal)
-{
- jamEntry();
- const GCPSaveConf * const saveConf = (GCPSaveConf*)&signal->theData[0];
- ndbrequire(saveConf->gci == coldgcp);
- ndbrequire(saveConf->nodeId == saveConf->dihPtr);
- SYSFILE->lastCompletedGCI[saveConf->nodeId] = saveConf->gci;
- GCP_SAVEhandling(signal, saveConf->nodeId);
-}//Dbdih::execGCP_SAVECONF()
-
-void Dbdih::execGCP_SAVEREF(Signal* signal)
-{
- jamEntry();
- const GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
- ndbrequire(saveRef->gci == coldgcp);
- ndbrequire(saveRef->nodeId == saveRef->dihPtr);
- /**
- * Only allow reason not to save
- */
- ndbrequire(saveRef->errorCode == GCPSaveRef::NodeShutdownInProgress ||
- saveRef->errorCode == GCPSaveRef::FakedSignalDueToNodeFailure ||
- saveRef->errorCode == GCPSaveRef::NodeRestartInProgress);
- GCP_SAVEhandling(signal, saveRef->nodeId);
-}//Dbdih::execGCP_SAVEREF()
-
-void Dbdih::GCP_SAVEhandling(Signal* signal, Uint32 nodeId)
-{
- receiveLoopMacro(GCP_SAVEREQ, nodeId);
- /*-------------------------------------------------------------------------*/
- // All nodes have replied. We are ready to update the system file.
- /*-------------------------------------------------------------------------*/
- cgcpStatus = GCP_SAVE_LQH_FINISHED;
- CRASH_INSERTION(7003);
- checkToCopy();
- /**------------------------------------------------------------------------
- * SET NEW RECOVERABLE GCI. ALSO RESET RESTART COUNTER TO ZERO.
- * THIS INDICATES THAT THE SYSTEM HAS BEEN RECOVERED AND SURVIVED AT
- * LEAST ONE GLOBAL CHECKPOINT PERIOD. WE WILL USE THIS PARAMETER TO
- * SET BACK THE RESTART GCI IF WE ENCOUNTER MORE THAN ONE UNSUCCESSFUL
- * RESTART.
- *------------------------------------------------------------------------*/
- SYSFILE->newestRestorableGCI = coldgcp;
- if(Sysfile::getInitialStartOngoing(SYSFILE->systemRestartBits) &&
- getNodeState().startLevel == NodeState::SL_STARTED){
- jam();
-#if 0
- ndbout_c("Dbdih: Clearing initial start ongoing");
-#endif
- Sysfile::clearInitialStartOngoing(SYSFILE->systemRestartBits);
- }
- copyGciLab(signal, CopyGCIReq::GLOBAL_CHECKPOINT);
-}//Dbdih::GCP_SAVEhandling()
-
-/*
- 3.11 G L O B A L C H E C K P O I N T (N O T - M A S T E R)
- *************************************************************
- */
-void Dbdih::execGCP_PREPARE(Signal* signal)
-{
- jamEntry();
- CRASH_INSERTION(7005);
- Uint32 masterNodeId = signal->theData[0];
- Uint32 gci = signal->theData[1];
- BlockReference retRef = calcDihBlockRef(masterNodeId);
-
- ndbrequire (cmasterdihref == retRef);
- ndbrequire (cgcpParticipantState == GCP_PARTICIPANT_READY);
- ndbrequire (gci == (currentgcp + 1));
-
- cgckptflag = true;
- cgcpParticipantState = GCP_PARTICIPANT_PREPARE_RECEIVED;
- cnewgcp = gci;
-
- signal->theData[0] = cownNodeId;
- signal->theData[1] = gci;
- sendSignal(retRef, GSN_GCP_PREPARECONF, signal, 2, JBA);
- return;
-}//Dbdih::execGCP_PREPARE()
-
-void Dbdih::execGCP_COMMIT(Signal* signal)
-{
- jamEntry();
- CRASH_INSERTION(7006);
- Uint32 masterNodeId = signal->theData[0];
- Uint32 gci = signal->theData[1];
-
- ndbrequire(gci == (currentgcp + 1));
- ndbrequire(masterNodeId = cmasterNodeId);
- ndbrequire(cgcpParticipantState == GCP_PARTICIPANT_PREPARE_RECEIVED);
-
- coldgcp = currentgcp;
- currentgcp = cnewgcp;
- cgckptflag = false;
- emptyverificbuffer(signal, true);
- cgcpParticipantState = GCP_PARTICIPANT_COMMIT_RECEIVED;
- signal->theData[1] = coldgcp;
- sendSignal(clocaltcblockref, GSN_GCP_NOMORETRANS, signal, 2, JBB);
- return;
-}//Dbdih::execGCP_COMMIT()
-
-void Dbdih::execGCP_TCFINISHED(Signal* signal)
-{
- jamEntry();
- CRASH_INSERTION(7007);
- Uint32 gci = signal->theData[1];
- ndbrequire(gci == coldgcp);
-
- cgcpParticipantState = GCP_PARTICIPANT_TC_FINISHED;
- signal->theData[0] = cownNodeId;
- signal->theData[1] = coldgcp;
- signal->theData[2] = cfailurenr;
- sendSignal(cmasterdihref, GSN_GCP_NODEFINISH, signal, 3, JBB);
-}//Dbdih::execGCP_TCFINISHED()
-
-/*****************************************************************************/
-//****** RECEIVING TAMPER REQUEST FROM NDBAPI ******
-/*****************************************************************************/
-void Dbdih::execDIHNDBTAMPER(Signal* signal)
-{
- jamEntry();
- Uint32 tcgcpblocked = signal->theData[0];
- /* ACTION TO BE TAKEN BY DIH */
- Uint32 tuserpointer = signal->theData[1];
- BlockReference tuserblockref = signal->theData[2];
- switch (tcgcpblocked) {
- case 1:
- jam();
- if (isMaster()) {
- jam();
- cgcpOrderBlocked = 1;
- } else {
- jam();
- /* TRANSFER THE REQUEST */
- /* TO MASTER*/
- signal->theData[0] = tcgcpblocked;
- signal->theData[1] = tuserpointer;
- signal->theData[2] = tuserblockref;
- sendSignal(cmasterdihref, GSN_DIHNDBTAMPER, signal, 3, JBB);
- }//if
- break;
- case 2:
- jam();
- if (isMaster()) {
- jam();
- cgcpOrderBlocked = 0;
- } else {
- jam();
- /* TRANSFER THE REQUEST */
- /* TO MASTER*/
- signal->theData[0] = tcgcpblocked;
- signal->theData[1] = tuserpointer;
- signal->theData[2] = tuserblockref;
- sendSignal(cmasterdihref, GSN_DIHNDBTAMPER, signal, 3, JBB);
- }//if
- break;
- case 3:
- ndbrequire(false);
- return;
- break;
- case 4:
- jam();
- signal->theData[0] = tuserpointer;
- signal->theData[1] = crestartGci;
- sendSignal(tuserblockref, GSN_DIHNDBTAMPER, signal, 2, JBB);
- break;
-#ifdef ERROR_INSERT
- case 5:
- jam();
- if(tuserpointer == 0)
- {
- jam();
- signal->theData[0] = 0;
- sendSignal(QMGR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(NDBCNTR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(NDBFS_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(DBACC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(DBTUP_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(DBLQH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(DBDICT_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(DBDIH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(DBTC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(CMVMI_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- return;
- }
- /*----------------------------------------------------------------------*/
- // Insert errors.
- /*----------------------------------------------------------------------*/
- if (tuserpointer < 1000) {
- /*--------------------------------------------------------------------*/
- // Insert errors into QMGR.
- /*--------------------------------------------------------------------*/
- jam();
- tuserblockref = QMGR_REF;
- } else if (tuserpointer < 2000) {
- /*--------------------------------------------------------------------*/
- // Insert errors into NDBCNTR.
- /*--------------------------------------------------------------------*/
- jam();
- tuserblockref = NDBCNTR_REF;
- } else if (tuserpointer < 3000) {
- /*--------------------------------------------------------------------*/
- // Insert errors into NDBFS.
- /*--------------------------------------------------------------------*/
- jam();
- tuserblockref = NDBFS_REF;
- } else if (tuserpointer < 4000) {
- /*--------------------------------------------------------------------*/
- // Insert errors into DBACC.
- /*--------------------------------------------------------------------*/
- jam();
- tuserblockref = DBACC_REF;
- } else if (tuserpointer < 5000) {
- /*--------------------------------------------------------------------*/
- // Insert errors into DBTUP.
- /*--------------------------------------------------------------------*/
- jam();
- tuserblockref = DBTUP_REF;
- } else if (tuserpointer < 6000) {
- /*---------------------------------------------------------------------*/
- // Insert errors into DBLQH.
- /*---------------------------------------------------------------------*/
- jam();
- tuserblockref = DBLQH_REF;
- } else if (tuserpointer < 7000) {
- /*---------------------------------------------------------------------*/
- // Insert errors into DBDICT.
- /*---------------------------------------------------------------------*/
- jam();
- tuserblockref = DBDICT_REF;
- } else if (tuserpointer < 8000) {
- /*---------------------------------------------------------------------*/
- // Insert errors into DBDIH.
- /*--------------------------------------------------------------------*/
- jam();
- tuserblockref = DBDIH_REF;
- } else if (tuserpointer < 9000) {
- /*--------------------------------------------------------------------*/
- // Insert errors into DBTC.
- /*--------------------------------------------------------------------*/
- jam();
- tuserblockref = DBTC_REF;
- } else if (tuserpointer < 10000) {
- /*--------------------------------------------------------------------*/
- // Insert errors into CMVMI.
- /*--------------------------------------------------------------------*/
- jam();
- tuserblockref = CMVMI_REF;
- } else if (tuserpointer < 11000) {
- jam();
- tuserblockref = BACKUP_REF;
- } else if (tuserpointer < 12000) {
- // DBUTIL_REF ?
- jam();
- } else if (tuserpointer < 13000) {
- jam();
- tuserblockref = DBTUX_REF;
- } else if (tuserpointer < 14000) {
- jam();
- tuserblockref = SUMA_REF;
- } else if (tuserpointer < 15000) {
- jam();
- tuserblockref = DBDICT_REF;
- } else if (tuserpointer < 30000) {
- /*--------------------------------------------------------------------*/
- // Ignore errors in the 20000-range.
- /*--------------------------------------------------------------------*/
- jam();
- return;
- } else if (tuserpointer < 40000) {
- jam();
- /*--------------------------------------------------------------------*/
- // Redirect errors to master DIH in the 30000-range.
- /*--------------------------------------------------------------------*/
- tuserblockref = cmasterdihref;
- tuserpointer -= 30000;
- signal->theData[0] = 5;
- signal->theData[1] = tuserpointer;
- signal->theData[2] = tuserblockref;
- sendSignal(tuserblockref, GSN_DIHNDBTAMPER, signal, 3, JBB);
- return;
- } else if (tuserpointer < 50000) {
- NodeRecordPtr localNodeptr;
- Uint32 Tfound = 0;
- jam();
- /*--------------------------------------------------------------------*/
- // Redirect errors to non-master DIH in the 40000-range.
- /*--------------------------------------------------------------------*/
- tuserpointer -= 40000;
- for (localNodeptr.i = 1;
- localNodeptr.i < MAX_NDB_NODES;
- localNodeptr.i++) {
- jam();
- ptrAss(localNodeptr, nodeRecord);
- if ((localNodeptr.p->nodeStatus == NodeRecord::ALIVE) &&
- (localNodeptr.i != cmasterNodeId)) {
- jam();
- tuserblockref = calcDihBlockRef(localNodeptr.i);
- Tfound = 1;
- break;
- }//if
- }//for
- if (Tfound == 0) {
- jam();
- /*-------------------------------------------------------------------*/
- // Ignore since no non-master node existed.
- /*-------------------------------------------------------------------*/
- return;
- }//if
- signal->theData[0] = 5;
- signal->theData[1] = tuserpointer;
- signal->theData[2] = tuserblockref;
- sendSignal(tuserblockref, GSN_DIHNDBTAMPER, signal, 3, JBB);
- return;
- } else {
- jam();
- return;
- }//if
- signal->theData[0] = tuserpointer;
- if (tuserpointer != 0) {
- sendSignal(tuserblockref, GSN_NDB_TAMPER, signal, 1, JBB);
- } else {
- sendSignal(QMGR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(NDBCNTR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(NDBFS_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(DBACC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(DBTUP_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(DBLQH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(DBDICT_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(DBDIH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(DBTC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- sendSignal(CMVMI_REF, GSN_NDB_TAMPER, signal, 1, JBB);
- }//if
- break;
-#endif
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dbdih::execDIHNDBTAMPER()
-
-/*****************************************************************************/
-/* ********** FILE HANDLING MODULE *************/
-/*****************************************************************************/
-void Dbdih::copyGciLab(Signal* signal, CopyGCIReq::CopyReason reason)
-{
- if(c_copyGCIMaster.m_copyReason != CopyGCIReq::IDLE){
- /**
- * There can currently only be one waiting
- */
- ndbrequire(c_copyGCIMaster.m_waiting == CopyGCIReq::IDLE);
- c_copyGCIMaster.m_waiting = reason;
- return;
- }
- c_copyGCIMaster.m_copyReason = reason;
- sendLoopMacro(COPY_GCIREQ, sendCOPY_GCIREQ);
-
-}//Dbdih::copyGciLab()
-
-/* ------------------------------------------------------------------------- */
-/* COPY_GCICONF RESPONSE TO COPY_GCIREQ */
-/* ------------------------------------------------------------------------- */
-void Dbdih::execCOPY_GCICONF(Signal* signal)
-{
- jamEntry();
- NodeRecordPtr senderNodePtr;
- senderNodePtr.i = signal->theData[0];
- receiveLoopMacro(COPY_GCIREQ, senderNodePtr.i);
-
- CopyGCIReq::CopyReason waiting = c_copyGCIMaster.m_waiting;
- CopyGCIReq::CopyReason current = c_copyGCIMaster.m_copyReason;
-
- c_copyGCIMaster.m_copyReason = CopyGCIReq::IDLE;
- c_copyGCIMaster.m_waiting = CopyGCIReq::IDLE;
-
- bool ok = false;
- switch(current){
- case CopyGCIReq::RESTART:{
- ok = true;
- jam();
- DictStartReq * req = (DictStartReq*)&signal->theData[0];
- req->restartGci = SYSFILE->newestRestorableGCI;
- req->senderRef = reference();
- sendSignal(cdictblockref, GSN_DICTSTARTREQ,
- signal, DictStartReq::SignalLength, JBB);
- break;
- }
- case CopyGCIReq::LOCAL_CHECKPOINT:{
- ok = true;
- jam();
- startLcpRoundLab(signal);
- break;
- }
- case CopyGCIReq::GLOBAL_CHECKPOINT:
- ok = true;
- jam();
- checkToCopyCompleted(signal);
-
- /************************************************************************/
- // Report the event that a global checkpoint has completed.
- /************************************************************************/
- signal->setTrace(0);
- signal->theData[0] = NDB_LE_GlobalCheckpointCompleted; //Event type
- signal->theData[1] = coldgcp;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
-
- CRASH_INSERTION(7004);
- emptyWaitGCPMasterQueue(signal);
- cgcpStatus = GCP_READY;
- signal->theData[0] = DihContinueB::ZSTART_GCP;
- signal->theData[1] = cgcpDelay;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2);
- if (c_nodeStartMaster.blockGcp == true) {
- jam();
- /* ------------------------------------------------------------------ */
- /* A NEW NODE WANTS IN AND WE MUST ALLOW IT TO COME IN NOW SINCE THE */
- /* GCP IS COMPLETED. */
- /* ------------------------------------------------------------------ */
- gcpBlockedLab(signal);
- }//if
- break;
- case CopyGCIReq::INITIAL_START_COMPLETED:
- ok = true;
- jam();
- initialStartCompletedLab(signal);
- break;
- case CopyGCIReq::IDLE:
- ok = false;
- jam();
- }
- ndbrequire(ok);
-
- /**
- * Pop queue
- */
- if(waiting != CopyGCIReq::IDLE){
- c_copyGCIMaster.m_copyReason = waiting;
- signal->theData[0] = DihContinueB::ZCOPY_GCI;
- signal->theData[1] = waiting;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- }
-}//Dbdih::execCOPY_GCICONF()
-
-void Dbdih::invalidateLcpInfoAfterSr()
-{
- NodeRecordPtr nodePtr;
- SYSFILE->latestLCP_ID--;
- Sysfile::clearLCPOngoing(SYSFILE->systemRestartBits);
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- ptrAss(nodePtr, nodeRecord);
- if (!NdbNodeBitmask::get(SYSFILE->lcpActive, nodePtr.i)){
- jam();
- /* ------------------------------------------------------------------- */
- // The node was not active in the local checkpoint.
- // To avoid that we step the active status too fast to not
- // active we step back one step from Sysfile::NS_ActiveMissed_x.
- /* ------------------------------------------------------------------- */
- switch (nodePtr.p->activeStatus) {
- case Sysfile::NS_Active:
- /* ----------------------------------------------------------------- */
- // When not active in ongoing LCP and still active is a contradiction.
- /* ----------------------------------------------------------------- */
- ndbrequire(false);
- case Sysfile::NS_ActiveMissed_1:
- jam();
- nodePtr.p->activeStatus = Sysfile::NS_Active;
- break;
- case Sysfile::NS_ActiveMissed_2:
- jam();
- nodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_1;
- break;
- default:
- jam();
- break;
- }//switch
- }//if
- }//for
- setNodeRestartInfoBits();
-}//Dbdih::invalidateLcpInfoAfterSr()
-
-/* ------------------------------------------------------------------------- */
-/* THE NEXT STEP IS TO WRITE THE FILE. */
-/* ------------------------------------------------------------------------- */
-void Dbdih::openingCopyGciSkipInitLab(Signal* signal, FileRecordPtr filePtr)
-{
- writeRestorableGci(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::WRITING_COPY_GCI;
- return;
-}//Dbdih::openingCopyGciSkipInitLab()
-
-void Dbdih::writingCopyGciLab(Signal* signal, FileRecordPtr filePtr)
-{
- /* ----------------------------------------------------------------------- */
- /* WE HAVE NOW WRITTEN THIS FILE. WRITE ALSO NEXT FILE IF THIS IS NOT */
- /* ALREADY THE LAST. */
- /* ----------------------------------------------------------------------- */
- filePtr.p->reqStatus = FileRecord::IDLE;
- if (filePtr.i == crestartInfoFile[0]) {
- jam();
- filePtr.i = crestartInfoFile[1];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- if (filePtr.p->fileStatus == FileRecord::OPEN) {
- jam();
- openingCopyGciSkipInitLab(signal, filePtr);
- return;
- }//if
- openFileRw(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::OPENING_COPY_GCI;
- return;
- }//if
- /* ----------------------------------------------------------------------- */
- /* WE HAVE COMPLETED WRITING BOTH FILES SUCCESSFULLY. NOW REPORT OUR */
- /* SUCCESS TO THE MASTER DIH. BUT FIRST WE NEED TO RESET A NUMBER OF */
- /* VARIABLES USED BY THE LOCAL CHECKPOINT PROCESS (ONLY IF TRIGGERED */
- /* BY LOCAL CHECKPOINT PROCESS. */
- /* ----------------------------------------------------------------------- */
- CopyGCIReq::CopyReason reason = c_copyGCISlave.m_copyReason;
-
- if (reason == CopyGCIReq::GLOBAL_CHECKPOINT) {
- jam();
- cgcpParticipantState = GCP_PARTICIPANT_READY;
-
- SubGcpCompleteRep * const rep = (SubGcpCompleteRep*)signal->getDataPtr();
- rep->gci = coldgcp;
- rep->senderData = 0;
- sendSignal(SUMA_REF, GSN_SUB_GCP_COMPLETE_REP, signal,
- SubGcpCompleteRep::SignalLength, JBB);
- }
-
- jam();
- c_copyGCISlave.m_copyReason = CopyGCIReq::IDLE;
-
- if(c_copyGCISlave.m_senderRef == cmasterdihref){
- jam();
- /**
- * Only if same master
- */
- signal->theData[0] = c_copyGCISlave.m_senderData;
- sendSignal(c_copyGCISlave.m_senderRef, GSN_COPY_GCICONF, signal, 1, JBB);
-
- }
- return;
-}//Dbdih::writingCopyGciLab()
-
-void Dbdih::execSTART_LCP_REQ(Signal* signal){
- StartLcpReq * req = (StartLcpReq*)signal->getDataPtr();
-
- CRASH_INSERTION2(7021, isMaster());
- CRASH_INSERTION2(7022, !isMaster());
-
- ndbrequire(c_lcpState.m_masterLcpDihRef = req->senderRef);
- c_lcpState.m_participatingDIH = req->participatingDIH;
- c_lcpState.m_participatingLQH = req->participatingLQH;
-
- c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH = req->participatingLQH;
- if(isMaster()){
- jam();
- ndbrequire(isActiveMaster());
- c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH = req->participatingDIH;
-
- } else {
- c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.clearWaitingFor();
- }
-
- c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received = false;
-
- c_lcpState.setLcpStatus(LCP_INIT_TABLES, __LINE__);
-
- signal->theData[0] = DihContinueB::ZINIT_LCP;
- signal->theData[1] = c_lcpState.m_masterLcpDihRef;
- signal->theData[2] = 0;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
-}
-
-void Dbdih::initLcpLab(Signal* signal, Uint32 senderRef, Uint32 tableId)
-{
- TabRecordPtr tabPtr;
- tabPtr.i = tableId;
-
- if(c_lcpState.m_masterLcpDihRef != senderRef){
- jam();
- /**
- * This is LCP master takeover
- */
-#ifdef VM_TRACE
- ndbout_c("initLcpLab aborted due to LCP master takeover - 1");
-#endif
- c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
- sendMASTER_LCPCONF(signal);
- return;
- }
-
- if(c_lcpState.m_masterLcpDihRef != cmasterdihref){
- jam();
- /**
- * Master take over but has not yet received MASTER_LCPREQ
- */
-#ifdef VM_TRACE
- ndbout_c("initLcpLab aborted due to LCP master takeover - 2");
-#endif
- return;
- }
-
- //const Uint32 lcpId = SYSFILE->latestLCP_ID;
-
- for(; tabPtr.i < ctabFileSize; tabPtr.i++){
-
- ptrAss(tabPtr, tabRecord);
-
- if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) {
- jam();
- tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
- continue;
- }
-
- if (tabPtr.p->storedTable == 0) {
- /**
- * Temporary table
- */
- jam();
- tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
- continue;
- }
-
- if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
- /* ----------------------------------------------------------------- */
- // We protect the updates of table data structures by this variable.
- /* ----------------------------------------------------------------- */
- jam();
- signal->theData[0] = DihContinueB::ZINIT_LCP;
- signal->theData[1] = senderRef;
- signal->theData[2] = tabPtr.i;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 20, 3);
- return;
- }//if
-
- /**
- * Found a table
- */
- tabPtr.p->tabLcpStatus = TabRecord::TLS_ACTIVE;
-
- /**
- * For each fragment
- */
- for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
- jam();
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, fragId, fragPtr);
-
- /**
- * For each of replica record
- */
- Uint32 replicaCount = 0;
- ReplicaRecordPtr replicaPtr;
- for(replicaPtr.i = fragPtr.p->storedReplicas; replicaPtr.i != RNIL;
- replicaPtr.i = replicaPtr.p->nextReplica) {
- jam();
-
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- Uint32 nodeId = replicaPtr.p->procNode;
- if(c_lcpState.m_participatingLQH.get(nodeId)){
- jam();
- replicaCount++;
- replicaPtr.p->lcpOngoingFlag = true;
- }
- }
-
- fragPtr.p->noLcpReplicas = replicaCount;
- }//for
-
- signal->theData[0] = DihContinueB::ZINIT_LCP;
- signal->theData[1] = senderRef;
- signal->theData[2] = tabPtr.i + 1;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
- return;
- }
-
- /**
- * No more tables
- */
- jam();
-
- if (c_lcpState.m_masterLcpDihRef != reference()){
- jam();
- ndbrequire(!isMaster());
- c_lcpState.setLcpStatus(LCP_STATUS_ACTIVE, __LINE__);
- } else {
- jam();
- ndbrequire(isMaster());
- }
-
- CRASH_INSERTION2(7023, isMaster());
- CRASH_INSERTION2(7024, !isMaster());
-
- jam();
- StartLcpConf * conf = (StartLcpConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- sendSignal(c_lcpState.m_masterLcpDihRef, GSN_START_LCP_CONF, signal,
- StartLcpConf::SignalLength, JBB);
- return;
-}//Dbdih::initLcpLab()
-
-/* ------------------------------------------------------------------------- */
-/* ERROR HANDLING FOR COPY RESTORABLE GCI FILE. */
-/* ------------------------------------------------------------------------- */
-void Dbdih::openingCopyGciErrorLab(Signal* signal, FileRecordPtr filePtr)
-{
- createFileRw(signal, filePtr);
- /* ------------------------------------------------------------------------- */
- /* ERROR IN OPENING FILE. WE WILL TRY BY CREATING FILE INSTEAD. */
- /* ------------------------------------------------------------------------- */
- filePtr.p->reqStatus = FileRecord::CREATING_COPY_GCI;
- return;
-}//Dbdih::openingCopyGciErrorLab()
-
-/* ------------------------------------------------------------------------- */
-/* ENTER DICTSTARTCONF WITH */
-/* TBLOCKREF */
-/* ------------------------------------------------------------------------- */
-void Dbdih::dictStartConfLab(Signal* signal)
-{
- /* ----------------------------------------------------------------------- */
- /* WE HAVE NOW RECEIVED ALL THE TABLES TO RESTART. */
- /* ----------------------------------------------------------------------- */
- signal->theData[0] = DihContinueB::ZSTART_FRAGMENT;
- signal->theData[1] = 0; /* START WITH TABLE 0 */
- signal->theData[2] = 0; /* AND FRAGMENT 0 */
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
- return;
-}//Dbdih::dictStartConfLab()
-
-
-void Dbdih::openingTableLab(Signal* signal, FileRecordPtr filePtr)
-{
- /* ---------------------------------------------------------------------- */
- /* SUCCESSFULLY OPENED A FILE. READ THE FIRST PAGE OF THIS FILE. */
- /* ---------------------------------------------------------------------- */
- TabRecordPtr tabPtr;
- PageRecordPtr pagePtr;
-
- tabPtr.i = filePtr.p->tabRef;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- tabPtr.p->noPages = 1;
- allocpage(pagePtr);
- tabPtr.p->pageRef[0] = pagePtr.i;
- readTabfile(signal, tabPtr.p, filePtr);
- filePtr.p->reqStatus = FileRecord::READING_TABLE;
- return;
-}//Dbdih::openingTableLab()
-
-void Dbdih::openingTableErrorLab(Signal* signal, FileRecordPtr filePtr)
-{
- TabRecordPtr tabPtr;
- tabPtr.i = filePtr.p->tabRef;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- /* ---------------------------------------------------------------------- */
- /* WE FAILED IN OPENING A FILE. IF THE FIRST FILE THEN TRY WITH THE */
- /* DUPLICATE FILE, OTHERWISE WE REPORT AN ERROR IN THE SYSTEM RESTART. */
- /* ---------------------------------------------------------------------- */
- ndbrequire(filePtr.i == tabPtr.p->tabFile[0]);
- filePtr.i = tabPtr.p->tabFile[1];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- openFileRw(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::OPENING_TABLE;
-}//Dbdih::openingTableErrorLab()
-
-void Dbdih::readingTableLab(Signal* signal, FileRecordPtr filePtr)
-{
- TabRecordPtr tabPtr;
- PageRecordPtr pagePtr;
- /* ---------------------------------------------------------------------- */
- /* WE HAVE SUCCESSFULLY READ A NUMBER OF PAGES IN THE TABLE FILE. IF */
- /* MORE PAGES EXIST IN THE FILE THEN READ ALL PAGES IN THE FILE. */
- /* ---------------------------------------------------------------------- */
- filePtr.p->reqStatus = FileRecord::IDLE;
- tabPtr.i = filePtr.p->tabRef;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- pagePtr.i = tabPtr.p->pageRef[0];
- ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
- Uint32 noOfStoredPages = pagePtr.p->word[33];
- if (tabPtr.p->noPages < noOfStoredPages) {
- jam();
- ndbrequire(noOfStoredPages <= 8);
- for (Uint32 i = tabPtr.p->noPages; i < noOfStoredPages; i++) {
- jam();
- allocpage(pagePtr);
- tabPtr.p->pageRef[i] = pagePtr.i;
- }//for
- tabPtr.p->noPages = noOfStoredPages;
- readTabfile(signal, tabPtr.p, filePtr);
- filePtr.p->reqStatus = FileRecord::READING_TABLE;
- } else {
- ndbrequire(tabPtr.p->noPages == pagePtr.p->word[33]);
- ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE);
- jam();
- /* --------------------------------------------------------------------- */
- /* WE HAVE READ ALL PAGES. NOW READ FROM PAGES INTO TABLE AND FRAGMENT */
- /* DATA STRUCTURES. */
- /* --------------------------------------------------------------------- */
- tabPtr.p->tabCopyStatus = TabRecord::CS_SR_PHASE1_READ_PAGES;
- signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_TABLE;
- signal->theData[1] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- }//if
- return;
-}//Dbdih::readingTableLab()
-
-void Dbdih::readTableFromPagesLab(Signal* signal, TabRecordPtr tabPtr)
-{
- FileRecordPtr filePtr;
- filePtr.i = tabPtr.p->tabFile[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- /* ---------------------------------------------------------------------- */
- /* WE HAVE NOW COPIED TO OUR NODE. WE HAVE NOW COMPLETED RESTORING */
- /* THIS TABLE. CONTINUE WITH THE NEXT TABLE. */
- /* WE ALSO NEED TO CLOSE THE TABLE FILE. */
- /* ---------------------------------------------------------------------- */
- if (filePtr.p->fileStatus != FileRecord::OPEN) {
- jam();
- filePtr.i = tabPtr.p->tabFile[1];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- }//if
- closeFile(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::CLOSING_TABLE_SR;
- return;
-}//Dbdih::readTableFromPagesLab()
-
-void Dbdih::closingTableSrLab(Signal* signal, FileRecordPtr filePtr)
-{
- /**
- * Update table/fragment info
- */
- TabRecordPtr tabPtr;
- tabPtr.i = filePtr.p->tabRef;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- resetReplicaSr(tabPtr);
-
- signal->theData[0] = DihContinueB::ZCOPY_TABLE;
- signal->theData[1] = filePtr.p->tabRef;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
-
- return;
-}//Dbdih::closingTableSrLab()
-
-void
-Dbdih::resetReplicaSr(TabRecordPtr tabPtr){
-
- const Uint32 newestRestorableGCI = SYSFILE->newestRestorableGCI;
-
- for(Uint32 i = 0; i<tabPtr.p->totalfragments; i++){
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, i, fragPtr);
-
- /**
- * 1) Start by moving all replicas into oldStoredReplicas
- */
- prepareReplicas(fragPtr);
-
- /**
- * 2) Move all "alive" replicas into storedReplicas
- * + update noCrashedReplicas...
- */
- ReplicaRecordPtr replicaPtr;
- replicaPtr.i = fragPtr.p->oldStoredReplicas;
- while (replicaPtr.i != RNIL) {
- jam();
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- const Uint32 nextReplicaPtrI = replicaPtr.p->nextReplica;
-
- NodeRecordPtr nodePtr;
- nodePtr.i = replicaPtr.p->procNode;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
-
- const Uint32 noCrashedReplicas = replicaPtr.p->noCrashedReplicas;
- if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
- jam();
- switch (nodePtr.p->activeStatus) {
- case Sysfile::NS_Active:
- case Sysfile::NS_ActiveMissed_1:
- case Sysfile::NS_ActiveMissed_2:{
- jam();
- /* --------------------------------------------------------------- */
- /* THE NODE IS ALIVE AND KICKING AND ACTIVE, LET'S USE IT. */
- /* --------------------------------------------------------------- */
- arrGuard(noCrashedReplicas, 8);
- Uint32 lastGci = replicaPtr.p->replicaLastGci[noCrashedReplicas];
- if(lastGci >= newestRestorableGCI){
- jam();
- /** -------------------------------------------------------------
- * THE REPLICA WAS ALIVE AT THE SYSTEM FAILURE. WE WILL SET THE
- * LAST REPLICA GCI TO MINUS ONE SINCE IT HASN'T FAILED YET IN THE
- * NEW SYSTEM.
- *-------------------------------------------------------------- */
- replicaPtr.p->replicaLastGci[noCrashedReplicas] = (Uint32)-1;
- } else {
- jam();
- /*--------------------------------------------------------------
- * SINCE IT WAS NOT ALIVE AT THE TIME OF THE SYSTEM CRASH THIS IS
- * A COMPLETELY NEW REPLICA. WE WILL SET THE CREATE GCI TO BE THE
- * NEXT GCI TO BE EXECUTED.
- *--------_----------------------------------------------------- */
- const Uint32 nextCrashed = noCrashedReplicas + 1;
- replicaPtr.p->noCrashedReplicas = nextCrashed;
- arrGuard(nextCrashed, 8);
- replicaPtr.p->createGci[nextCrashed] = newestRestorableGCI + 1;
- ndbrequire(newestRestorableGCI + 1 != 0xF1F1F1F1);
- replicaPtr.p->replicaLastGci[nextCrashed] = (Uint32)-1;
- }//if
-
- resetReplicaLcp(replicaPtr.p, newestRestorableGCI);
-
- /* -----------------------------------------------------------------
- * LINK THE REPLICA INTO THE STORED REPLICA LIST. WE WILL USE THIS
- * NODE AS A STORED REPLICA.
- * WE MUST FIRST LINK IT OUT OF THE LIST OF OLD STORED REPLICAS.
- * --------------------------------------------------------------- */
- removeOldStoredReplica(fragPtr, replicaPtr);
- linkStoredReplica(fragPtr, replicaPtr);
-
- }
- default:
- jam();
- /*empty*/;
- break;
- }
- }
- replicaPtr.i = nextReplicaPtrI;
- }//while
- }
-}
-
-void
-Dbdih::resetReplicaLcp(ReplicaRecord * replicaP, Uint32 stopGci){
-
- Uint32 lcpNo = replicaP->nextLcp;
- const Uint32 startLcpNo = lcpNo;
- do {
- lcpNo = prevLcpNo(lcpNo);
- ndbrequire(lcpNo < MAX_LCP_STORED);
- if (replicaP->lcpStatus[lcpNo] == ZVALID) {
- if (replicaP->maxGciStarted[lcpNo] < stopGci) {
- jam();
- /* ----------------------------------------------------------------- */
- /* WE HAVE FOUND A USEFUL LOCAL CHECKPOINT THAT CAN BE USED FOR */
- /* RESTARTING THIS FRAGMENT REPLICA. */
- /* ----------------------------------------------------------------- */
- return ;
- }//if
- }//if
-
- /**
- * WE COULD NOT USE THIS LOCAL CHECKPOINT. IT WAS TOO
- * RECENT OR SIMPLY NOT A VALID CHECKPOINT.
- * WE SHOULD THUS REMOVE THIS LOCAL CHECKPOINT SINCE IT WILL NEVER
- * AGAIN BE USED. SET LCP_STATUS TO INVALID.
- */
- replicaP->nextLcp = lcpNo;
- replicaP->lcpId[lcpNo] = 0;
- replicaP->lcpStatus[lcpNo] = ZINVALID;
- } while (lcpNo != startLcpNo);
-
- replicaP->nextLcp = 0;
-}
-
-void Dbdih::readingTableErrorLab(Signal* signal, FileRecordPtr filePtr)
-{
- TabRecordPtr tabPtr;
- tabPtr.i = filePtr.p->tabRef;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- /* ---------------------------------------------------------------------- */
- /* READING THIS FILE FAILED. CLOSE IT AFTER RELEASING ALL PAGES. */
- /* ---------------------------------------------------------------------- */
- ndbrequire(tabPtr.p->noPages <= 8);
- for (Uint32 i = 0; i < tabPtr.p->noPages; i++) {
- jam();
- releasePage(tabPtr.p->pageRef[i]);
- }//for
- closeFile(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::CLOSING_TABLE_CRASH;
- return;
-}//Dbdih::readingTableErrorLab()
-
-void Dbdih::closingTableCrashLab(Signal* signal, FileRecordPtr filePtr)
-{
- TabRecordPtr tabPtr;
- /* ---------------------------------------------------------------------- */
- /* WE HAVE NOW CLOSED A FILE WHICH WE HAD A READ ERROR WITH. PROCEED */
- /* WITH NEXT FILE IF NOT THE LAST OTHERWISE REPORT ERROR. */
- /* ---------------------------------------------------------------------- */
- tabPtr.i = filePtr.p->tabRef;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- ndbrequire(filePtr.i == tabPtr.p->tabFile[0]);
- filePtr.i = tabPtr.p->tabFile[1];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- openFileRw(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::OPENING_TABLE;
-}//Dbdih::closingTableCrashLab()
-
-/*****************************************************************************/
-/* ********** COPY TABLE MODULE *************/
-/*****************************************************************************/
-void Dbdih::execCOPY_TABREQ(Signal* signal)
-{
- CRASH_INSERTION(7172);
-
- TabRecordPtr tabPtr;
- PageRecordPtr pagePtr;
- jamEntry();
- BlockReference ref = signal->theData[0];
- Uint32 reqinfo = signal->theData[1];
- tabPtr.i = signal->theData[2];
- Uint32 schemaVersion = signal->theData[3];
- Uint32 noOfWords = signal->theData[4];
- ndbrequire(ref == cmasterdihref);
- ndbrequire(!isMaster());
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- if (reqinfo == 1) {
- jam();
- tabPtr.p->schemaVersion = schemaVersion;
- initTableFile(tabPtr);
- }//if
- ndbrequire(tabPtr.p->noPages < 8);
- if (tabPtr.p->noOfWords == 0) {
- jam();
- allocpage(pagePtr);
- tabPtr.p->pageRef[tabPtr.p->noPages] = pagePtr.i;
- tabPtr.p->noPages++;
- } else {
- jam();
- pagePtr.i = tabPtr.p->pageRef[tabPtr.p->noPages - 1];
- ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
- }//if
- ndbrequire(tabPtr.p->noOfWords + 15 < 2048);
- ndbrequire(tabPtr.p->noOfWords < 2048);
- MEMCOPY_NO_WORDS(&pagePtr.p->word[tabPtr.p->noOfWords], &signal->theData[5], 16);
- tabPtr.p->noOfWords += 16;
- if (tabPtr.p->noOfWords == 2048) {
- jam();
- tabPtr.p->noOfWords = 0;
- }//if
- if (noOfWords > 16) {
- jam();
- return;
- }//if
- tabPtr.p->noOfWords = 0;
- ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE);
- tabPtr.p->tabCopyStatus = TabRecord::CS_COPY_TAB_REQ;
- signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_TABLE;
- signal->theData[1] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
-}//Dbdih::execCOPY_TABREQ()
-
-void
-Dbdih::copyTabReq_complete(Signal* signal, TabRecordPtr tabPtr){
- if (!isMaster()) {
- jam();
- //----------------------------------------------------------------------------
- // In this particular case we do not release table pages if we are master. The
- // reason is that the master could still be sending the table info to another
- // node.
- //----------------------------------------------------------------------------
- releaseTabPages(tabPtr.i);
- tabPtr.p->tabStatus = TabRecord::TS_ACTIVE;
- for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
- jam();
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, fragId, fragPtr);
- updateNodeInfo(fragPtr);
- }//for
- }//if
- signal->theData[0] = cownNodeId;
- signal->theData[1] = tabPtr.i;
- sendSignal(cmasterdihref, GSN_COPY_TABCONF, signal, 2, JBB);
-}
-
-/*****************************************************************************/
-/* ****** READ FROM A NUMBER OF PAGES INTO THE TABLE DATA STRUCTURES ********/
-/*****************************************************************************/
-void Dbdih::readPagesIntoTableLab(Signal* signal, Uint32 tableId)
-{
- RWFragment rf;
- rf.wordIndex = 35;
- rf.pageIndex = 0;
- rf.rwfTabPtr.i = tableId;
- ptrCheckGuard(rf.rwfTabPtr, ctabFileSize, tabRecord);
- rf.rwfPageptr.i = rf.rwfTabPtr.p->pageRef[0];
- ptrCheckGuard(rf.rwfPageptr, cpageFileSize, pageRecord);
- rf.rwfTabPtr.p->totalfragments = readPageWord(&rf);
- rf.rwfTabPtr.p->noOfBackups = readPageWord(&rf);
- rf.rwfTabPtr.p->hashpointer = readPageWord(&rf);
- rf.rwfTabPtr.p->kvalue = readPageWord(&rf);
- rf.rwfTabPtr.p->mask = readPageWord(&rf);
- ndbrequire(readPageWord(&rf) == TabRecord::HASH);
- rf.rwfTabPtr.p->method = TabRecord::HASH;
- /* ---------------------------------- */
- /* Type of table, 2 = temporary table */
- /* ---------------------------------- */
- rf.rwfTabPtr.p->storedTable = readPageWord(&rf);
-
- Uint32 noOfFrags = rf.rwfTabPtr.p->totalfragments;
- ndbrequire(noOfFrags > 0);
- ndbrequire((noOfFrags * (rf.rwfTabPtr.p->noOfBackups + 1)) <= cnoFreeReplicaRec);
- allocFragments(noOfFrags, rf.rwfTabPtr);
-
- signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_FRAG;
- signal->theData[1] = rf.rwfTabPtr.i;
- signal->theData[2] = 0;
- signal->theData[3] = rf.pageIndex;
- signal->theData[4] = rf.wordIndex;
- sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
- return;
-}//Dbdih::readPagesIntoTableLab()
-
-void Dbdih::readPagesIntoFragLab(Signal* signal, RWFragment* rf)
-{
- ndbrequire(rf->pageIndex < 8);
- rf->rwfPageptr.i = rf->rwfTabPtr.p->pageRef[rf->pageIndex];
- ptrCheckGuard(rf->rwfPageptr, cpageFileSize, pageRecord);
- FragmentstorePtr fragPtr;
- getFragstore(rf->rwfTabPtr.p, rf->fragId, fragPtr);
- readFragment(rf, fragPtr);
- readReplicas(rf, fragPtr);
- rf->fragId++;
- if (rf->fragId == rf->rwfTabPtr.p->totalfragments) {
- jam();
- switch (rf->rwfTabPtr.p->tabCopyStatus) {
- case TabRecord::CS_SR_PHASE1_READ_PAGES:
- jam();
- releaseTabPages(rf->rwfTabPtr.i);
- rf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
- signal->theData[0] = DihContinueB::ZREAD_TABLE_FROM_PAGES;
- signal->theData[1] = rf->rwfTabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- break;
- case TabRecord::CS_COPY_TAB_REQ:
- jam();
- rf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
- if(getNodeState().getSystemRestartInProgress()){
- jam();
- copyTabReq_complete(signal, rf->rwfTabPtr);
- return;
- }
- rf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
- rf->rwfTabPtr.p->tabUpdateState = TabRecord::US_COPY_TAB_REQ;
- signal->theData[0] = DihContinueB::ZTABLE_UPDATE;
- signal->theData[1] = rf->rwfTabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- break;
- default:
- ndbrequire(false);
- return;
- break;
- }//switch
- } else {
- jam();
- signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_FRAG;
- signal->theData[1] = rf->rwfTabPtr.i;
- signal->theData[2] = rf->fragId;
- signal->theData[3] = rf->pageIndex;
- signal->theData[4] = rf->wordIndex;
- sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
- }//if
- return;
-}//Dbdih::readPagesIntoFragLab()
-
-/*****************************************************************************/
-/***** WRITING FROM TABLE DATA STRUCTURES INTO A SET OF PAGES ******/
-// execCONTINUEB(ZPACK_TABLE_INTO_PAGES)
-/*****************************************************************************/
-void Dbdih::packTableIntoPagesLab(Signal* signal, Uint32 tableId)
-{
- RWFragment wf;
- TabRecordPtr tabPtr;
- allocpage(wf.rwfPageptr);
- tabPtr.i = tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- tabPtr.p->pageRef[0] = wf.rwfPageptr.i;
- tabPtr.p->noPages = 1;
- wf.wordIndex = 35;
- wf.pageIndex = 0;
- writePageWord(&wf, tabPtr.p->totalfragments);
- writePageWord(&wf, tabPtr.p->noOfBackups);
- writePageWord(&wf, tabPtr.p->hashpointer);
- writePageWord(&wf, tabPtr.p->kvalue);
- writePageWord(&wf, tabPtr.p->mask);
- writePageWord(&wf, TabRecord::HASH);
- writePageWord(&wf, tabPtr.p->storedTable);
-
- signal->theData[0] = DihContinueB::ZPACK_FRAG_INTO_PAGES;
- signal->theData[1] = tabPtr.i;
- signal->theData[2] = 0;
- signal->theData[3] = wf.pageIndex;
- signal->theData[4] = wf.wordIndex;
- sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
-}//Dbdih::packTableIntoPagesLab()
-
-/*****************************************************************************/
-// execCONTINUEB(ZPACK_FRAG_INTO_PAGES)
-/*****************************************************************************/
-void Dbdih::packFragIntoPagesLab(Signal* signal, RWFragment* wf)
-{
- ndbrequire(wf->pageIndex < 8);
- wf->rwfPageptr.i = wf->rwfTabPtr.p->pageRef[wf->pageIndex];
- ptrCheckGuard(wf->rwfPageptr, cpageFileSize, pageRecord);
- FragmentstorePtr fragPtr;
- getFragstore(wf->rwfTabPtr.p, wf->fragId, fragPtr);
- writeFragment(wf, fragPtr);
- writeReplicas(wf, fragPtr.p->storedReplicas);
- writeReplicas(wf, fragPtr.p->oldStoredReplicas);
- wf->fragId++;
- if (wf->fragId == wf->rwfTabPtr.p->totalfragments) {
- jam();
- PageRecordPtr pagePtr;
- pagePtr.i = wf->rwfTabPtr.p->pageRef[0];
- ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
- pagePtr.p->word[33] = wf->rwfTabPtr.p->noPages;
- pagePtr.p->word[34] = ((wf->rwfTabPtr.p->noPages - 1) * 2048) + wf->wordIndex;
- switch (wf->rwfTabPtr.p->tabCopyStatus) {
- case TabRecord::CS_SR_PHASE2_READ_TABLE:
- /* -------------------------------------------------------------------*/
- // We are performing a system restart and we are now ready to copy the
- // table from this node (the master) to all other nodes.
- /* -------------------------------------------------------------------*/
- jam();
- wf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
- signal->theData[0] = DihContinueB::ZSR_PHASE2_READ_TABLE;
- signal->theData[1] = wf->rwfTabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- break;
- case TabRecord::CS_COPY_NODE_STATE:
- jam();
- tableCopyNodeLab(signal, wf->rwfTabPtr);
- return;
- break;
- case TabRecord::CS_LCP_READ_TABLE:
- jam();
- signal->theData[0] = DihContinueB::ZTABLE_UPDATE;
- signal->theData[1] = wf->rwfTabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- break;
- case TabRecord::CS_REMOVE_NODE:
- case TabRecord::CS_INVALIDATE_NODE_LCP:
- jam();
- signal->theData[0] = DihContinueB::ZTABLE_UPDATE;
- signal->theData[1] = wf->rwfTabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- break;
- case TabRecord::CS_ADD_TABLE_MASTER:
- jam();
- wf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
- signal->theData[0] = DihContinueB::ZADD_TABLE_MASTER_PAGES;
- signal->theData[1] = wf->rwfTabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- break;
- case TabRecord::CS_ADD_TABLE_SLAVE:
- jam();
- wf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
- signal->theData[0] = DihContinueB::ZADD_TABLE_SLAVE_PAGES;
- signal->theData[1] = wf->rwfTabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- break;
- default:
- ndbrequire(false);
- return;
- break;
- }//switch
- } else {
- jam();
- signal->theData[0] = DihContinueB::ZPACK_FRAG_INTO_PAGES;
- signal->theData[1] = wf->rwfTabPtr.i;
- signal->theData[2] = wf->fragId;
- signal->theData[3] = wf->pageIndex;
- signal->theData[4] = wf->wordIndex;
- sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
- }//if
- return;
-}//Dbdih::packFragIntoPagesLab()
-
-/*****************************************************************************/
-/* ********** START FRAGMENT MODULE *************/
-/*****************************************************************************/
-void Dbdih::startFragment(Signal* signal, Uint32 tableId, Uint32 fragId)
-{
- Uint32 TloopCount = 0;
- TabRecordPtr tabPtr;
- while (true) {
- if (TloopCount > 100) {
- jam();
- signal->theData[0] = DihContinueB::ZSTART_FRAGMENT;
- signal->theData[1] = tableId;
- signal->theData[2] = 0;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
- return;
- }
-
- if (tableId >= ctabFileSize) {
- jam();
- signal->theData[0] = DihContinueB::ZCOMPLETE_RESTART;
- sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
- return;
- }//if
-
- tabPtr.i = tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE){
- jam();
- TloopCount++;
- tableId++;
- fragId = 0;
- continue;
- }
-
- if(tabPtr.p->storedTable == 0){
- jam();
- TloopCount++;
- tableId++;
- fragId = 0;
- continue;
- }
-
- jam();
- break;
- }//while
-
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, fragId, fragPtr);
- /* ----------------------------------------------------------------------- */
- /* WE NEED TO RESET THE REPLICA DATA STRUCTURES. THIS MEANS THAT WE */
- /* MUST REMOVE REPLICAS THAT WAS NOT STARTED AT THE GCI TO RESTORE. WE */
- /* NEED TO PUT ALL STORED REPLICAS ON THE LIST OF OLD STORED REPLICAS */
- /* RESET THE NUMBER OF REPLICAS TO CREATE. */
- /* ----------------------------------------------------------------------- */
- cnoOfCreateReplicas = 0;
- /* ----------------------------------------------------------------------- */
- /* WE WILL NEVER START MORE THAN FOUR FRAGMENT REPLICAS WHATEVER THE */
- /* DESIRED REPLICATION IS. */
- /* ----------------------------------------------------------------------- */
- ndbrequire(tabPtr.p->noOfBackups < 4);
- /* ----------------------------------------------------------------------- */
- /* SEARCH FOR STORED REPLICAS THAT CAN BE USED TO RESTART THE SYSTEM. */
- /* ----------------------------------------------------------------------- */
- searchStoredReplicas(fragPtr);
- if (cnoOfCreateReplicas == 0) {
- /* --------------------------------------------------------------------- */
- /* THERE WERE NO STORED REPLICAS AVAILABLE THAT CAN SERVE AS REPLICA TO*/
- /* RESTART THE SYSTEM FROM. IN A LATER RELEASE WE WILL ADD */
- /* FUNCTIONALITY TO CHECK IF THERE ARE ANY STANDBY NODES THAT COULD DO */
- /* THIS TASK INSTEAD IN THIS IMPLEMENTATION WE SIMPLY CRASH THE SYSTEM.*/
- /* THIS WILL DECREASE THE GCI TO RESTORE WHICH HOPEFULLY WILL MAKE IT */
- /* POSSIBLE TO RESTORE THE SYSTEM. */
- /* --------------------------------------------------------------------- */
- char buf[100];
- BaseString::snprintf(buf, sizeof(buf),
- "Unable to find restorable replica for "
- "table: %d fragment: %d gci: %d",
- tableId, fragId, SYSFILE->newestRestorableGCI);
- progError(__LINE__,
- ERR_SYSTEM_ERROR,
- buf);
- ndbrequire(false);
- return;
- }//if
-
- /* ----------------------------------------------------------------------- */
- /* WE HAVE CHANGED THE NODE TO BE PRIMARY REPLICA AND THE NODES TO BE */
- /* BACKUP NODES. WE MUST UPDATE THIS NODES DATA STRUCTURE SINCE WE */
- /* WILL NOT COPY THE TABLE DATA TO OURSELF. */
- /* ----------------------------------------------------------------------- */
- updateNodeInfo(fragPtr);
- /* ----------------------------------------------------------------------- */
- /* NOW WE HAVE COLLECTED ALL THE REPLICAS WE COULD GET. WE WILL NOW */
- /* RESTART THE FRAGMENT REPLICAS WE HAVE FOUND IRRESPECTIVE OF IF THERE*/
- /* ARE ENOUGH ACCORDING TO THE DESIRED REPLICATION. */
- /* ----------------------------------------------------------------------- */
- /* WE START BY SENDING ADD_FRAGREQ FOR THOSE REPLICAS THAT NEED IT. */
- /* ----------------------------------------------------------------------- */
- CreateReplicaRecordPtr createReplicaPtr;
- for (createReplicaPtr.i = 0;
- createReplicaPtr.i < cnoOfCreateReplicas;
- createReplicaPtr.i++) {
- jam();
- ptrCheckGuard(createReplicaPtr, 4, createReplicaRecord);
- createReplicaPtr.p->hotSpareUse = false;
- }//for
-
- sendStartFragreq(signal, tabPtr, fragId);
-
- /**
- * Don't wait for START_FRAGCONF
- */
- fragId++;
- if (fragId >= tabPtr.p->totalfragments) {
- jam();
- tabPtr.i++;
- fragId = 0;
- }//if
- signal->theData[0] = DihContinueB::ZSTART_FRAGMENT;
- signal->theData[1] = tabPtr.i;
- signal->theData[2] = fragId;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
-
- return;
-}//Dbdih::startFragmentLab()
-
-
-/*****************************************************************************/
-/* ********** COMPLETE RESTART MODULE *************/
-/*****************************************************************************/
-void Dbdih::completeRestartLab(Signal* signal)
-{
- sendLoopMacro(START_RECREQ, sendSTART_RECREQ);
-}//completeRestartLab()
-
-/* ------------------------------------------------------------------------- */
-// SYSTEM RESTART:
-/* A NODE HAS COMPLETED RESTORING ALL DATABASE FRAGMENTS. */
-// NODE RESTART:
-// THE STARTING NODE HAS PREPARED ITS LOG FILES TO ENABLE EXECUTION
-// OF TRANSACTIONS.
-// Precondition:
-// This signal must be received by the master node.
-/* ------------------------------------------------------------------------- */
-void Dbdih::execSTART_RECCONF(Signal* signal)
-{
- jamEntry();
- Uint32 senderNodeId = signal->theData[0];
- ndbrequire(isMaster());
- if (getNodeState().startLevel >= NodeState::SL_STARTED){
- /* --------------------------------------------------------------------- */
- // Since our node is already up and running this must be a node restart.
- // This means that we should be the master node,
- // otherwise we have a problem.
- /* --------------------------------------------------------------------- */
- jam();
- ndbrequire(senderNodeId == c_nodeStartMaster.startNode);
- nodeRestartStartRecConfLab(signal);
- return;
- } else {
- /* --------------------------------------------------------------------- */
- // This was the system restart case. We set the state indicating that the
- // node has completed restoration of all fragments.
- /* --------------------------------------------------------------------- */
- receiveLoopMacro(START_RECREQ, senderNodeId);
-
- signal->theData[0] = reference();
- sendSignal(cntrlblockref, GSN_NDB_STARTCONF, signal, 1, JBB);
- return;
- }//if
-}//Dbdih::execSTART_RECCONF()
-
-void Dbdih::copyNodeLab(Signal* signal, Uint32 tableId)
-{
- /* ----------------------------------------------------------------------- */
- // This code is executed by the master to assist a node restart in receiving
- // the data in the master.
- /* ----------------------------------------------------------------------- */
- Uint32 TloopCount = 0;
-
- if (!c_nodeStartMaster.activeState) {
- jam();
- /* --------------------------------------------------------------------- */
- // Obviously the node crashed in the middle of its node restart. We will
- // stop this process simply by returning after resetting the wait indicator.
- /* ---------------------------------------------------------------------- */
- c_nodeStartMaster.wait = ZFALSE;
- return;
- }//if
- TabRecordPtr tabPtr;
- tabPtr.i = tableId;
- while (tabPtr.i < ctabFileSize) {
- ptrAss(tabPtr, tabRecord);
- if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) {
- /* -------------------------------------------------------------------- */
- // The table is defined. We will start by packing the table into pages.
- // The tabCopyStatus indicates to the CONTINUEB(ZPACK_TABLE_INTO_PAGES)
- // who called it. After packing the table into page(s) it will be sent to
- // the starting node by COPY_TABREQ signals. After returning from the
- // starting node we will return to this subroutine and continue
- // with the next table.
- /* -------------------------------------------------------------------- */
- ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE);
- tabPtr.p->tabCopyStatus = TabRecord::CS_COPY_NODE_STATE;
- signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
- signal->theData[1] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- } else {
- jam();
- if (TloopCount > 100) {
- /* ------------------------------------------------------------------ */
- // Introduce real-time break after looping through 100 not copied tables
- /* ----------------------------------------------------------------- */
- jam();
- signal->theData[0] = DihContinueB::ZCOPY_NODE;
- signal->theData[1] = tabPtr.i + 1;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- } else {
- jam();
- TloopCount++;
- tabPtr.i++;
- }//if
- }//if
- }//while
- dihCopyCompletedLab(signal);
- return;
-}//Dbdih::copyNodeLab()
-
-void Dbdih::tableCopyNodeLab(Signal* signal, TabRecordPtr tabPtr)
-{
- /* ----------------------------------------------------------------------- */
- /* COPY PAGES READ TO STARTING NODE. */
- /* ----------------------------------------------------------------------- */
- if (!c_nodeStartMaster.activeState) {
- jam();
- releaseTabPages(tabPtr.i);
- c_nodeStartMaster.wait = ZFALSE;
- return;
- }//if
- NodeRecordPtr copyNodePtr;
- PageRecordPtr pagePtr;
- copyNodePtr.i = c_nodeStartMaster.startNode;
- ptrCheckGuard(copyNodePtr, MAX_NDB_NODES, nodeRecord);
-
- copyNodePtr.p->activeTabptr = tabPtr.i;
- pagePtr.i = tabPtr.p->pageRef[0];
- ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
-
- signal->theData[0] = DihContinueB::ZCOPY_TABLE_NODE;
- signal->theData[1] = tabPtr.i;
- signal->theData[2] = copyNodePtr.i;
- signal->theData[3] = 0;
- signal->theData[4] = 0;
- signal->theData[5] = pagePtr.p->word[34];
- sendSignal(reference(), GSN_CONTINUEB, signal, 6, JBB);
-}//Dbdih::tableCopyNodeLab()
-
-/* ------------------------------------------------------------------------- */
-// execCONTINUEB(ZCOPY_TABLE)
-// This routine is used to copy the table descriptions from the master to
-// other nodes. It is used in the system restart to copy from master to all
-// starting nodes.
-/* ------------------------------------------------------------------------- */
-void Dbdih::copyTableLab(Signal* signal, Uint32 tableId)
-{
- TabRecordPtr tabPtr;
- tabPtr.i = tableId;
- ptrAss(tabPtr, tabRecord);
-
- ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE);
- tabPtr.p->tabCopyStatus = TabRecord::CS_SR_PHASE2_READ_TABLE;
- signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
- signal->theData[1] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
-}//Dbdih::copyTableLab()
-
-/* ------------------------------------------------------------------------- */
-// execCONTINUEB(ZSR_PHASE2_READ_TABLE)
-/* ------------------------------------------------------------------------- */
-void Dbdih::srPhase2ReadTableLab(Signal* signal, TabRecordPtr tabPtr)
-{
- /* ----------------------------------------------------------------------- */
- // We set the sendCOPY_TABREQState to ZACTIVE for all nodes since it is a long
- // process to send off all table descriptions. Thus we ensure that we do
- // not encounter race conditions where one node is completed before the
- // sending process is completed. This could lead to that we start off the
- // system before we actually finished all copying of table descriptions
- // and could lead to strange errors.
- /* ----------------------------------------------------------------------- */
-
- //sendLoopMacro(COPY_TABREQ, nullRoutine);
-
- breakCopyTableLab(signal, tabPtr, cfirstAliveNode);
- return;
-}//Dbdih::srPhase2ReadTableLab()
-
-/* ------------------------------------------------------------------------- */
-/* COPY PAGES READ TO ALL NODES. */
-/* ------------------------------------------------------------------------- */
-void Dbdih::breakCopyTableLab(Signal* signal, TabRecordPtr tabPtr, Uint32 nodeId)
-{
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- while (nodePtr.i != RNIL) {
- jam();
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- if (nodePtr.i == getOwnNodeId()){
- jam();
- /* ------------------------------------------------------------------- */
- /* NOT NECESSARY TO COPY TO MY OWN NODE. I ALREADY HAVE THE PAGES. */
- /* I DO HOWEVER NEED TO STORE THE TABLE DESCRIPTION ONTO DISK. */
- /* ------------------------------------------------------------------- */
- /* IF WE ARE MASTER WE ONLY NEED TO SAVE THE TABLE ON DISK. WE ALREADY */
- /* HAVE THE TABLE DESCRIPTION IN THE DATA STRUCTURES. */
- // AFTER COMPLETING THE WRITE TO DISK THE MASTER WILL ALSO SEND
- // COPY_TABCONF AS ALL THE OTHER NODES.
- /* ------------------------------------------------------------------- */
- c_COPY_TABREQ_Counter.setWaitingFor(nodePtr.i);
- tabPtr.p->tabUpdateState = TabRecord::US_COPY_TAB_REQ;
- signal->theData[0] = DihContinueB::ZTABLE_UPDATE;
- signal->theData[1] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- nodePtr.i = nodePtr.p->nextNode;
- } else {
- PageRecordPtr pagePtr;
- /* -------------------------------------------------------------------- */
- // RATHER THAN SENDING ALL COPY_TABREQ IN PARALLEL WE WILL SERIALISE THIS
- // ACTIVITY AND WILL THUS CALL breakCopyTableLab AGAIN WHEN COMPLETED THE
- // SENDING OF COPY_TABREQ'S.
- /* -------------------------------------------------------------------- */
- jam();
- tabPtr.p->tabCopyStatus = TabRecord::CS_SR_PHASE3_COPY_TABLE;
- pagePtr.i = tabPtr.p->pageRef[0];
- ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
- signal->theData[0] = DihContinueB::ZCOPY_TABLE_NODE;
- signal->theData[1] = tabPtr.i;
- signal->theData[2] = nodePtr.i;
- signal->theData[3] = 0;
- signal->theData[4] = 0;
- signal->theData[5] = pagePtr.p->word[34];
- sendSignal(reference(), GSN_CONTINUEB, signal, 6, JBB);
- return;
- }//if
- }//while
- /* ----------------------------------------------------------------------- */
- /* WE HAVE NOW SENT THE TABLE PAGES TO ALL NODES. EXIT AND WAIT FOR ALL */
- /* REPLIES. */
- /* ----------------------------------------------------------------------- */
- return;
-}//Dbdih::breakCopyTableLab()
-
-/* ------------------------------------------------------------------------- */
-// execCONTINUEB(ZCOPY_TABLE_NODE)
-/* ------------------------------------------------------------------------- */
-void Dbdih::copyTableNode(Signal* signal,
- CopyTableNode* ctn, NodeRecordPtr nodePtr)
-{
- if (getNodeState().startLevel >= NodeState::SL_STARTED){
- /* --------------------------------------------------------------------- */
- // We are in the process of performing a node restart and are copying a
- // table description to a starting node. We will check that no nodes have
- // crashed in this process.
- /* --------------------------------------------------------------------- */
- if (!c_nodeStartMaster.activeState) {
- jam();
- /** ------------------------------------------------------------------
- * The starting node crashed. We will release table pages and stop this
- * copy process and allow new node restarts to start.
- * ------------------------------------------------------------------ */
- releaseTabPages(ctn->ctnTabPtr.i);
- c_nodeStartMaster.wait = ZFALSE;
- return;
- }//if
- }//if
- ndbrequire(ctn->pageIndex < 8);
- ctn->ctnPageptr.i = ctn->ctnTabPtr.p->pageRef[ctn->pageIndex];
- ptrCheckGuard(ctn->ctnPageptr, cpageFileSize, pageRecord);
- /**
- * If first page & firstWord reqinfo = 1 (first signal)
- */
- Uint32 reqinfo = (ctn->pageIndex == 0) && (ctn->wordIndex == 0);
- if(reqinfo == 1){
- c_COPY_TABREQ_Counter.setWaitingFor(nodePtr.i);
- }
-
- for (Uint32 i = 0; i < 16; i++) {
- jam();
- sendCopyTable(signal, ctn, calcDihBlockRef(nodePtr.i), reqinfo);
- reqinfo = 0;
- if (ctn->noOfWords <= 16) {
- jam();
- switch (ctn->ctnTabPtr.p->tabCopyStatus) {
- case TabRecord::CS_SR_PHASE3_COPY_TABLE:
- /* ------------------------------------------------------------------ */
- // We have copied the table description to this node.
- // We will now proceed
- // with sending the table description to the next node in the node list.
- /* ------------------------------------------------------------------ */
- jam();
- ctn->ctnTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
- breakCopyTableLab(signal, ctn->ctnTabPtr, nodePtr.p->nextNode);
- return;
- break;
- case TabRecord::CS_COPY_NODE_STATE:
- jam();
- ctn->ctnTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
- return;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- } else {
- jam();
- ctn->wordIndex += 16;
- if (ctn->wordIndex == 2048) {
- jam();
- ctn->wordIndex = 0;
- ctn->pageIndex++;
- ndbrequire(ctn->pageIndex < 8);
- ctn->ctnPageptr.i = ctn->ctnTabPtr.p->pageRef[ctn->pageIndex];
- ptrCheckGuard(ctn->ctnPageptr, cpageFileSize, pageRecord);
- }//if
- ctn->noOfWords -= 16;
- }//if
- }//for
- signal->theData[0] = DihContinueB::ZCOPY_TABLE_NODE;
- signal->theData[1] = ctn->ctnTabPtr.i;
- signal->theData[2] = nodePtr.i;
- signal->theData[3] = ctn->pageIndex;
- signal->theData[4] = ctn->wordIndex;
- signal->theData[5] = ctn->noOfWords;
- sendSignal(reference(), GSN_CONTINUEB, signal, 6, JBB);
-}//Dbdih::copyTableNodeLab()
-
-void Dbdih::sendCopyTable(Signal* signal, CopyTableNode* ctn,
- BlockReference ref, Uint32 reqinfo)
-{
- signal->theData[0] = reference();
- signal->theData[1] = reqinfo;
- signal->theData[2] = ctn->ctnTabPtr.i;
- signal->theData[3] = ctn->ctnTabPtr.p->schemaVersion;
- signal->theData[4] = ctn->noOfWords;
- ndbrequire(ctn->wordIndex + 15 < 2048);
- MEMCOPY_NO_WORDS(&signal->theData[5], &ctn->ctnPageptr.p->word[ctn->wordIndex], 16);
- sendSignal(ref, GSN_COPY_TABREQ, signal, 21, JBB);
-}//Dbdih::sendCopyTable()
-
-void Dbdih::execCOPY_TABCONF(Signal* signal)
-{
- NodeRecordPtr nodePtr;
- jamEntry();
- nodePtr.i = signal->theData[0];
- Uint32 tableId = signal->theData[1];
- if (getNodeState().startLevel >= NodeState::SL_STARTED){
- /* --------------------------------------------------------------------- */
- // We are in the process of performing a node restart. Continue by copying
- // the next table to the starting node.
- /* --------------------------------------------------------------------- */
- jam();
- NodeRecordPtr nodePtr;
- nodePtr.i = signal->theData[0];
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- c_COPY_TABREQ_Counter.clearWaitingFor(nodePtr.i);
-
- releaseTabPages(tableId);
- signal->theData[0] = DihContinueB::ZCOPY_NODE;
- signal->theData[1] = tableId + 1;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- } else {
- /* --------------------------------------------------------------------- */
- // We are in the process of performing a system restart. Check if all nodes
- // have saved the new table description to file and then continue with the
- // next table.
- /* --------------------------------------------------------------------- */
- receiveLoopMacro(COPY_TABREQ, nodePtr.i);
- /* --------------------------------------------------------------------- */
- /* WE HAVE NOW COPIED TO ALL NODES. WE HAVE NOW COMPLETED RESTORING */
- /* THIS TABLE. CONTINUE WITH THE NEXT TABLE. */
- /* WE NEED TO RELEASE THE PAGES IN THE TABLE IN THIS NODE HERE. */
- /* WE ALSO NEED TO CLOSE THE TABLE FILE. */
- /* --------------------------------------------------------------------- */
- releaseTabPages(tableId);
-
- TabRecordPtr tabPtr;
- tabPtr.i = tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- ConnectRecordPtr connectPtr;
- connectPtr.i = tabPtr.p->connectrec;
- ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
-
- sendAddFragreq(signal, connectPtr, tabPtr, 0);
- return;
- }//if
-}//Dbdih::execCOPY_TABCONF()
-
-/*
- 3.13 L O C A L C H E C K P O I N T (M A S T E R)
- ****************************************************
- */
-/*****************************************************************************/
-/* ********** LOCAL-CHECK-POINT-HANDLING MODULE *************/
-/*****************************************************************************/
-/* ------------------------------------------------------------------------- */
-/* IT IS TIME TO CHECK IF IT IS TIME TO START A LOCAL CHECKPOINT. */
-/* WE WILL EITHER START AFTER 1 MILLION WORDS HAVE ARRIVED OR WE WILL */
-/* EXECUTE AFTER ABOUT 16 MINUTES HAVE PASSED BY. */
-/* ------------------------------------------------------------------------- */
-void Dbdih::checkTcCounterLab(Signal* signal)
-{
- CRASH_INSERTION(7009);
- if (c_lcpState.lcpStatus != LCP_STATUS_IDLE) {
- ndbout << "lcpStatus = " << (Uint32) c_lcpState.lcpStatus;
- ndbout << "lcpStatusUpdatedPlace = " <<
- c_lcpState.lcpStatusUpdatedPlace << endl;
- ndbrequire(false);
- return;
- }//if
- c_lcpState.ctimer += 32;
- if ((c_nodeStartMaster.blockLcp == true) ||
- ((c_lcpState.lcpStartGcp + 1) > currentgcp)) {
- jam();
- /* --------------------------------------------------------------------- */
- // No reason to start juggling the states and checking for start of LCP if
- // we are blocked to start an LCP anyway.
- // We also block LCP start if we have not completed one global checkpoints
- // before starting another local checkpoint.
- /* --------------------------------------------------------------------- */
- signal->theData[0] = DihContinueB::ZCHECK_TC_COUNTER;
- signal->theData[1] = __LINE__;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1 * 100, 2);
- return;
- }//if
- c_lcpState.setLcpStatus(LCP_TCGET, __LINE__);
-
- c_lcpState.ctcCounter = c_lcpState.ctimer;
- sendLoopMacro(TCGETOPSIZEREQ, sendTCGETOPSIZEREQ);
-}//Dbdih::checkTcCounterLab()
-
-void Dbdih::checkLcpStart(Signal* signal, Uint32 lineNo)
-{
- /* ----------------------------------------------------------------------- */
- // Verify that we are not attempting to start another instance of the LCP
- // when it is not alright to do so.
- /* ----------------------------------------------------------------------- */
- ndbrequire(c_lcpState.lcpStart == ZIDLE);
- c_lcpState.lcpStart = ZACTIVE;
- signal->theData[0] = DihContinueB::ZCHECK_TC_COUNTER;
- signal->theData[1] = lineNo;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 2);
-}//Dbdih::checkLcpStart()
-
-/* ------------------------------------------------------------------------- */
-/*TCGETOPSIZECONF HOW MUCH OPERATION SIZE HAVE BEEN EXECUTED BY TC */
-/* ------------------------------------------------------------------------- */
-void Dbdih::execTCGETOPSIZECONF(Signal* signal)
-{
- jamEntry();
- Uint32 senderNodeId = signal->theData[0];
- c_lcpState.ctcCounter += signal->theData[1];
-
- receiveLoopMacro(TCGETOPSIZEREQ, senderNodeId);
-
- ndbrequire(c_lcpState.lcpStatus == LCP_TCGET);
- ndbrequire(c_lcpState.lcpStart == ZACTIVE);
- /* ----------------------------------------------------------------------- */
- // We are not actively starting another LCP, still we receive this signal.
- // This is not ok.
- /* ---------------------------------------------------------------------- */
- /* ALL TC'S HAVE RESPONDED NOW. NOW WE WILL CHECK IF ENOUGH OPERATIONS */
- /* HAVE EXECUTED TO ENABLE US TO START A NEW LOCAL CHECKPOINT. */
- /* WHILE COPYING DICTIONARY AND DISTRIBUTION INFO TO A STARTING NODE */
- /* WE WILL ALSO NOT ALLOW THE LOCAL CHECKPOINT TO PROCEED. */
- /*----------------------------------------------------------------------- */
- if (c_lcpState.immediateLcpStart == false) {
- if ((c_lcpState.ctcCounter <
- ((Uint32)1 << c_lcpState.clcpDelay)) ||
- (c_nodeStartMaster.blockLcp == true)) {
- jam();
- c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
-
- signal->theData[0] = DihContinueB::ZCHECK_TC_COUNTER;
- signal->theData[1] = __LINE__;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1 * 100, 2);
- return;
- }//if
- }//if
- c_lcpState.lcpStart = ZIDLE;
- c_lcpState.immediateLcpStart = false;
- /* -----------------------------------------------------------------------
- * Now the initial lcp is started,
- * we can reset the delay to its orginal value
- * --------------------------------------------------------------------- */
- CRASH_INSERTION(7010);
- /* ----------------------------------------------------------------------- */
- /* IF MORE THAN 1 MILLION WORDS PASSED THROUGH THE TC'S THEN WE WILL */
- /* START A NEW LOCAL CHECKPOINT. CLEAR CTIMER. START CHECKPOINT */
- /* ACTIVITY BY CALCULATING THE KEEP GLOBAL CHECKPOINT. */
- // Also remember the current global checkpoint to ensure that we run at least
- // one global checkpoints between each local checkpoint that we start up.
- /* ----------------------------------------------------------------------- */
- c_lcpState.ctimer = 0;
- c_lcpState.keepGci = coldgcp;
- c_lcpState.lcpStartGcp = currentgcp;
- /* ----------------------------------------------------------------------- */
- /* UPDATE THE NEW LATEST LOCAL CHECKPOINT ID. */
- /* ----------------------------------------------------------------------- */
- cnoOfActiveTables = 0;
- c_lcpState.setLcpStatus(LCP_CALCULATE_KEEP_GCI, __LINE__);
- c_lcpState.oldestRestorableGci = SYSFILE->oldestRestorableGCI;
- ndbrequire(((int)c_lcpState.oldestRestorableGci) > 0);
-
- if (ERROR_INSERTED(7011)) {
- signal->theData[0] = NDB_LE_LCPStoppedInCalcKeepGci;
- signal->theData[1] = 0;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
- return;
- }//if
- signal->theData[0] = DihContinueB::ZCALCULATE_KEEP_GCI;
- signal->theData[1] = 0; /* TABLE ID = 0 */
- signal->theData[2] = 0; /* FRAGMENT ID = 0 */
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
- return;
-}//Dbdih::execTCGETOPSIZECONF()
-
-/* ------------------------------------------------------------------------- */
-/* WE NEED TO CALCULATE THE OLDEST GLOBAL CHECKPOINT THAT WILL BE */
-/* COMPLETELY RESTORABLE AFTER EXECUTING THIS LOCAL CHECKPOINT. */
-/* ------------------------------------------------------------------------- */
-void Dbdih::calculateKeepGciLab(Signal* signal, Uint32 tableId, Uint32 fragId)
-{
- TabRecordPtr tabPtr;
- Uint32 TloopCount = 1;
- tabPtr.i = tableId;
- do {
- if (tabPtr.i >= ctabFileSize) {
- if (cnoOfActiveTables > 0) {
- jam();
- signal->theData[0] = DihContinueB::ZSTORE_NEW_LCP_ID;
- sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
- return;
- } else {
- jam();
- /* ------------------------------------------------------------------ */
- /* THERE ARE NO TABLES TO CHECKPOINT. WE STOP THE CHECKPOINT ALREADY */
- /* HERE TO AVOID STRANGE PROBLEMS LATER. */
- /* ------------------------------------------------------------------ */
- c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
- checkLcpStart(signal, __LINE__);
- return;
- }//if
- }//if
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE ||
- tabPtr.p->storedTable == 0) {
- if (TloopCount > 100) {
- jam();
- signal->theData[0] = DihContinueB::ZCALCULATE_KEEP_GCI;
- signal->theData[1] = tabPtr.i + 1;
- signal->theData[2] = 0;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
- return;
- } else {
- jam();
- TloopCount++;
- tabPtr.i++;
- }//if
- } else {
- jam();
- TloopCount = 0;
- }//if
- } while (TloopCount != 0);
- cnoOfActiveTables++;
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, fragId, fragPtr);
- checkKeepGci(fragPtr.p->storedReplicas);
- fragId++;
- if (fragId >= tabPtr.p->totalfragments) {
- jam();
- tabPtr.i++;
- fragId = 0;
- }//if
- signal->theData[0] = DihContinueB::ZCALCULATE_KEEP_GCI;
- signal->theData[1] = tabPtr.i;
- signal->theData[2] = fragId;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
- return;
-}//Dbdih::calculateKeepGciLab()
-
-/* ------------------------------------------------------------------------- */
-/* WE NEED TO STORE ON DISK THE FACT THAT WE ARE STARTING THIS LOCAL */
-/* CHECKPOINT ROUND. THIS WILL INVALIDATE ALL THE LOCAL CHECKPOINTS */
-/* THAT WILL EVENTUALLY BE OVERWRITTEN AS PART OF THIS LOCAL CHECKPOINT*/
-/* ------------------------------------------------------------------------- */
-void Dbdih::storeNewLcpIdLab(Signal* signal)
-{
- /***************************************************************************/
- // Report the event that a local checkpoint has started.
- /***************************************************************************/
- signal->theData[0] = NDB_LE_LocalCheckpointStarted; //Event type
- signal->theData[1] = SYSFILE->latestLCP_ID + 1;
- signal->theData[2] = c_lcpState.keepGci;
- signal->theData[3] = c_lcpState.oldestRestorableGci;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
-
- signal->setTrace(TestOrd::TraceLocalCheckpoint);
-
- CRASH_INSERTION(7013);
- SYSFILE->keepGCI = c_lcpState.keepGci;
- //Uint32 lcpId = SYSFILE->latestLCP_ID;
- SYSFILE->latestLCP_ID++;
- SYSFILE->oldestRestorableGCI = c_lcpState.oldestRestorableGci;
-
- const Uint32 oldestRestorableGCI = SYSFILE->oldestRestorableGCI;
- //const Uint32 newestRestorableGCI = SYSFILE->newestRestorableGCI;
- //ndbrequire(newestRestorableGCI >= oldestRestorableGCI);
-
- Int32 val = oldestRestorableGCI;
- ndbrequire(val > 0);
-
- /* ----------------------------------------------------------------------- */
- /* SET BIT INDICATING THAT LOCAL CHECKPOINT IS ONGOING. THIS IS CLEARED */
- /* AT THE END OF A LOCAL CHECKPOINT. */
- /* ----------------------------------------------------------------------- */
- SYSFILE->setLCPOngoing(SYSFILE->systemRestartBits);
- /* ---------------------------------------------------------------------- */
- /* CHECK IF ANY NODE MUST BE TAKEN OUT OF SERVICE AND REFILLED WITH */
- /* NEW FRESH DATA FROM AN ACTIVE NODE. */
- /* ---------------------------------------------------------------------- */
- setLcpActiveStatusStart(signal);
- c_lcpState.setLcpStatus(LCP_COPY_GCI, __LINE__);
- //#ifdef VM_TRACE
- // infoEvent("LocalCheckpoint %d started", SYSFILE->latestLCP_ID);
- // signal->theData[0] = 7012;
- // execDUMP_STATE_ORD(signal);
- //#endif
-
- copyGciLab(signal, CopyGCIReq::LOCAL_CHECKPOINT);
-}//Dbdih::storeNewLcpIdLab()
-
-void Dbdih::startLcpRoundLab(Signal* signal) {
- jam();
-
- Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
- Callback c = { safe_cast(&Dbdih::startLcpMutex_locked), 0 };
- ndbrequire(mutex.lock(c));
-}
-
-void
-Dbdih::startLcpMutex_locked(Signal* signal, Uint32 senderData, Uint32 retVal){
- jamEntry();
- ndbrequire(retVal == 0);
-
- StartLcpReq* req = (StartLcpReq*)signal->getDataPtrSend();
- req->senderRef = reference();
- req->lcpId = SYSFILE->latestLCP_ID;
- req->participatingLQH = c_lcpState.m_participatingLQH;
- req->participatingDIH = c_lcpState.m_participatingDIH;
- sendLoopMacro(START_LCP_REQ, sendSTART_LCP_REQ);
-}
-void
-Dbdih::sendSTART_LCP_REQ(Signal* signal, Uint32 nodeId){
- BlockReference ref = calcDihBlockRef(nodeId);
- sendSignal(ref, GSN_START_LCP_REQ, signal, StartLcpReq::SignalLength, JBB);
-}
-
-void
-Dbdih::execSTART_LCP_CONF(Signal* signal){
- StartLcpConf * conf = (StartLcpConf*)signal->getDataPtr();
-
- Uint32 nodeId = refToNode(conf->senderRef);
- receiveLoopMacro(START_LCP_REQ, nodeId);
-
- Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
- Callback c = { safe_cast(&Dbdih::startLcpMutex_unlocked), 0 };
- mutex.unlock(c);
-}
-
-void
-Dbdih::startLcpMutex_unlocked(Signal* signal, Uint32 data, Uint32 retVal){
- jamEntry();
- ndbrequire(retVal == 0);
-
- Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
- mutex.release();
-
- CRASH_INSERTION(7014);
- c_lcpState.setLcpStatus(LCP_TC_CLOPSIZE, __LINE__);
- sendLoopMacro(TC_CLOPSIZEREQ, sendTC_CLOPSIZEREQ);
-}
-
-void Dbdih::execTC_CLOPSIZECONF(Signal* signal) {
- jamEntry();
- Uint32 senderNodeId = signal->theData[0];
- receiveLoopMacro(TC_CLOPSIZEREQ, senderNodeId);
-
- ndbrequire(c_lcpState.lcpStatus == LCP_TC_CLOPSIZE);
- /* ----------------------------------------------------------------------- */
- /* ALL TC'S HAVE CLEARED THEIR OPERATION SIZE COUNTERS. NOW PROCEED BY */
- /* STARTING THE LOCAL CHECKPOINT IN EACH LQH. */
- /* ----------------------------------------------------------------------- */
- c_lcpState.m_LAST_LCP_FRAG_ORD = c_lcpState.m_participatingLQH;
-
- CRASH_INSERTION(7015);
- c_lcpState.setLcpStatus(LCP_START_LCP_ROUND, __LINE__);
- startLcpRoundLoopLab(signal, 0, 0);
-}//Dbdih::execTC_CLOPSIZECONF()
-
-void Dbdih::startLcpRoundLoopLab(Signal* signal,
- Uint32 startTableId, Uint32 startFragId)
-{
- NodeRecordPtr nodePtr;
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- ptrAss(nodePtr, nodeRecord);
- if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
- ndbrequire(nodePtr.p->noOfStartedChkpt == 0);
- ndbrequire(nodePtr.p->noOfQueuedChkpt == 0);
- }//if
- }//if
- c_lcpState.currentFragment.tableId = startTableId;
- c_lcpState.currentFragment.fragmentId = startFragId;
- startNextChkpt(signal);
-}//Dbdih::startLcpRoundLoopLab()
-
-void Dbdih::startNextChkpt(Signal* signal)
-{
- Uint32 lcpId = SYSFILE->latestLCP_ID;
-
- NdbNodeBitmask busyNodes;
- busyNodes.clear();
- const Uint32 lcpNodes = c_lcpState.m_participatingLQH.count();
-
- bool save = true;
- LcpState::CurrentFragment curr = c_lcpState.currentFragment;
-
- while (curr.tableId < ctabFileSize) {
- TabRecordPtr tabPtr;
- tabPtr.i = curr.tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- if ((tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) ||
- (tabPtr.p->tabLcpStatus != TabRecord::TLS_ACTIVE)) {
- curr.tableId++;
- curr.fragmentId = 0;
- continue;
- }//if
-
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, curr.fragmentId, fragPtr);
-
- ReplicaRecordPtr replicaPtr;
- for(replicaPtr.i = fragPtr.p->storedReplicas;
- replicaPtr.i != RNIL ;
- replicaPtr.i = replicaPtr.p->nextReplica){
-
- jam();
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
-
- NodeRecordPtr nodePtr;
- nodePtr.i = replicaPtr.p->procNode;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
-
- if (replicaPtr.p->lcpOngoingFlag &&
- replicaPtr.p->lcpIdStarted < lcpId) {
- jam();
- //-------------------------------------------------------------------
- // We have found a replica on a node that performs local checkpoint
- // that is alive and that have not yet been started.
- //-------------------------------------------------------------------
-
- if (nodePtr.p->noOfStartedChkpt < 2) {
- jam();
- /**
- * Send LCP_FRAG_ORD to LQH
- */
-
- /**
- * Mark the replica so with lcpIdStarted == true
- */
- replicaPtr.p->lcpIdStarted = lcpId;
-
- Uint32 i = nodePtr.p->noOfStartedChkpt;
- nodePtr.p->startedChkpt[i].tableId = tabPtr.i;
- nodePtr.p->startedChkpt[i].fragId = curr.fragmentId;
- nodePtr.p->startedChkpt[i].replicaPtr = replicaPtr.i;
- nodePtr.p->noOfStartedChkpt = i + 1;
-
- sendLCP_FRAG_ORD(signal, nodePtr.p->startedChkpt[i]);
- } else if (nodePtr.p->noOfQueuedChkpt < 2) {
- jam();
- /**
- * Put LCP_FRAG_ORD "in queue"
- */
-
- /**
- * Mark the replica so with lcpIdStarted == true
- */
- replicaPtr.p->lcpIdStarted = lcpId;
-
- Uint32 i = nodePtr.p->noOfQueuedChkpt;
- nodePtr.p->queuedChkpt[i].tableId = tabPtr.i;
- nodePtr.p->queuedChkpt[i].fragId = curr.fragmentId;
- nodePtr.p->queuedChkpt[i].replicaPtr = replicaPtr.i;
- nodePtr.p->noOfQueuedChkpt = i + 1;
- } else {
- jam();
-
- if(save){
- /**
- * Stop increasing value on first that was "full"
- */
- c_lcpState.currentFragment = curr;
- save = false;
- }
-
- busyNodes.set(nodePtr.i);
- if(busyNodes.count() == lcpNodes){
- /**
- * There were no possibility to start the local checkpoint
- * and it was not possible to queue it up. In this case we
- * stop the start of local checkpoints until the nodes with a
- * backlog have performed more checkpoints. We will return and
- * will not continue the process of starting any more checkpoints.
- */
- return;
- }//if
- }//if
- }
- }//while
- curr.fragmentId++;
- if (curr.fragmentId >= tabPtr.p->totalfragments) {
- jam();
- curr.fragmentId = 0;
- curr.tableId++;
- }//if
- }//while
-
- sendLastLCP_FRAG_ORD(signal);
-}//Dbdih::startNextChkpt()
-
-void Dbdih::sendLastLCP_FRAG_ORD(Signal* signal)
-{
- LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0];
- lcpFragOrd->tableId = RNIL;
- lcpFragOrd->fragmentId = 0;
- lcpFragOrd->lcpId = SYSFILE->latestLCP_ID;
- lcpFragOrd->lcpNo = 0;
- lcpFragOrd->keepGci = c_lcpState.keepGci;
- lcpFragOrd->lastFragmentFlag = true;
-
- NodeRecordPtr nodePtr;
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- ptrAss(nodePtr, nodeRecord);
-
- if(nodePtr.p->noOfQueuedChkpt == 0 &&
- nodePtr.p->noOfStartedChkpt == 0 &&
- c_lcpState.m_LAST_LCP_FRAG_ORD.isWaitingFor(nodePtr.i)){
- jam();
-
- CRASH_INSERTION(7028);
-
- /**
- * Nothing queued or started <=> Complete on that node
- *
- */
- c_lcpState.m_LAST_LCP_FRAG_ORD.clearWaitingFor(nodePtr.i);
- if(ERROR_INSERTED(7075)){
- continue;
- }
- BlockReference ref = calcLqhBlockRef(nodePtr.i);
- sendSignal(ref, GSN_LCP_FRAG_ORD, signal,LcpFragOrd::SignalLength, JBB);
- }
- }
- if(ERROR_INSERTED(7075)){
- if(c_lcpState.m_LAST_LCP_FRAG_ORD.done())
- CRASH_INSERTION(7075);
- }
-}//Dbdih::sendLastLCP_FRAGORD()
-
-/* ------------------------------------------------------------------------- */
-/* A FRAGMENT REPLICA HAS COMPLETED EXECUTING ITS LOCAL CHECKPOINT. */
-/* CHECK IF ALL REPLICAS IN THE TABLE HAVE COMPLETED. IF SO STORE THE */
-/* THE TABLE DISTRIBUTION ON DISK. ALSO SEND LCP_REPORT TO ALL OTHER */
-/* NODES SO THAT THEY CAN STORE THE TABLE ONTO DISK AS WELL. */
-/* ------------------------------------------------------------------------- */
-void Dbdih::execLCP_FRAG_REP(Signal* signal)
-{
- jamEntry();
- ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE);
-
-#if 0
- printLCP_FRAG_REP(stdout,
- signal->getDataPtr(),
- signal->length(), number());
-#endif
-
- LcpFragRep * const lcpReport = (LcpFragRep *)&signal->theData[0];
- Uint32 nodeId = lcpReport->nodeId;
- Uint32 tableId = lcpReport->tableId;
- Uint32 fragId = lcpReport->fragId;
-
- jamEntry();
-
- CRASH_INSERTION2(7025, isMaster());
- CRASH_INSERTION2(7016, !isMaster());
-
- bool fromTimeQueue = (signal->senderBlockRef() == reference());
-
- TabRecordPtr tabPtr;
- tabPtr.i = tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- if(tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
- jam();
- /*-----------------------------------------------------------------------*/
- // If the table is currently copied to disk we also
- // stop already here to avoid strange half-way updates
- // of the table data structures.
- /*-----------------------------------------------------------------------*/
- /*
- We need to send this signal without a delay since we have discovered
- that we have run out of space in the short time queue. This problem
- is very erunlikely to happen but it has and it results in a node crash.
- This should be considered a "quick fix" and not a permanent solution.
- A cleaner/better way would be to check the time queue if it is full or
- not before sending this signal.
- */
- sendSignal(reference(), GSN_LCP_FRAG_REP, signal, signal->length(), JBB);
- /* Kept here for reference
- sendSignalWithDelay(reference(), GSN_LCP_FRAG_REP,
- signal, 20, signal->length());
- */
-
- if(!fromTimeQueue){
- c_lcpState.noOfLcpFragRepOutstanding++;
- }
-
- return;
- }//if
-
- if(fromTimeQueue){
- jam();
-
- ndbrequire(c_lcpState.noOfLcpFragRepOutstanding > 0);
- c_lcpState.noOfLcpFragRepOutstanding--;
- }
-
- bool tableDone = reportLcpCompletion(lcpReport);
-
- if(tableDone){
- jam();
-
- if(tabPtr.p->tabStatus == TabRecord::TS_DROPPING){
- jam();
- ndbout_c("TS_DROPPING - Neglecting to save Table: %d Frag: %d - ",
- tableId,
- fragId);
- } else {
- jam();
- /**
- * Write table description to file
- */
- tabPtr.p->tabLcpStatus = TabRecord::TLS_WRITING_TO_FILE;
- tabPtr.p->tabCopyStatus = TabRecord::CS_LCP_READ_TABLE;
- tabPtr.p->tabUpdateState = TabRecord::US_LOCAL_CHECKPOINT;
- signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
- signal->theData[1] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
-
- checkLcpAllTablesDoneInLqh();
- }
- }
-
-#ifdef VM_TRACE
- /* --------------------------------------------------------------------- */
- // REPORT that local checkpoint have completed this fragment.
- /* --------------------------------------------------------------------- */
- signal->theData[0] = NDB_LE_LCPFragmentCompleted;
- signal->theData[1] = nodeId;
- signal->theData[2] = tableId;
- signal->theData[3] = fragId;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
-#endif
-
- bool ok = false;
- switch(c_lcpMasterTakeOverState.state){
- case LMTOS_IDLE:
- ok = true;
- jam();
- /**
- * Fall through
- */
- break;
- case LMTOS_WAIT_EMPTY_LCP: // LCP Take over waiting for EMPTY_LCPCONF
- jam();
- return;
- case LMTOS_WAIT_LCP_FRAG_REP:
- jam();
- checkEmptyLcpComplete(signal);
- return;
- case LMTOS_INITIAL:
- case LMTOS_ALL_IDLE:
- case LMTOS_ALL_ACTIVE:
- case LMTOS_LCP_CONCLUDING:
- case LMTOS_COPY_ONGOING:
- ndbrequire(false);
- }
- ndbrequire(ok);
-
- /* ----------------------------------------------------------------------- */
- // Check if there are more LCP's to start up.
- /* ----------------------------------------------------------------------- */
- if(isMaster()){
- jam();
-
- /**
- * Remove from "running" array
- */
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
-
- const Uint32 outstanding = nodePtr.p->noOfStartedChkpt;
- ndbrequire(outstanding > 0);
- if(nodePtr.p->startedChkpt[0].tableId != tableId ||
- nodePtr.p->startedChkpt[0].fragId != fragId){
- jam();
- ndbrequire(outstanding > 1);
- ndbrequire(nodePtr.p->startedChkpt[1].tableId == tableId);
- ndbrequire(nodePtr.p->startedChkpt[1].fragId == fragId);
- } else {
- jam();
- nodePtr.p->startedChkpt[0] = nodePtr.p->startedChkpt[1];
- }
- nodePtr.p->noOfStartedChkpt--;
- checkStartMoreLcp(signal, nodeId);
- }
-}
-
-bool
-Dbdih::checkLcpAllTablesDoneInLqh(){
- TabRecordPtr tabPtr;
-
- /**
- * Check if finished with all tables
- */
- for (tabPtr.i = 0; tabPtr.i < ctabFileSize; tabPtr.i++) {
- jam();
- ptrAss(tabPtr, tabRecord);
- if ((tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) &&
- (tabPtr.p->tabLcpStatus == TabRecord::TLS_ACTIVE)) {
- jam();
- /**
- * Nope, not finished with all tables
- */
- return false;
- }//if
- }//for
-
- CRASH_INSERTION2(7026, isMaster());
- CRASH_INSERTION2(7017, !isMaster());
-
- c_lcpState.setLcpStatus(LCP_TAB_COMPLETED, __LINE__);
- return true;
-}
-
-void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr,
- Fragmentstore* fragPtrP, Uint32 nodeId)
-{
- replicaPtr.i = fragPtrP->storedReplicas;
- while(replicaPtr.i != RNIL){
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- if (replicaPtr.p->procNode == nodeId) {
- jam();
- return;
- } else {
- jam();
- replicaPtr.i = replicaPtr.p->nextReplica;
- }//if
- };
-
-#ifdef VM_TRACE
- ndbout_c("Fragment Replica(node=%d) not found", nodeId);
- replicaPtr.i = fragPtrP->oldStoredReplicas;
- while(replicaPtr.i != RNIL){
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- if (replicaPtr.p->procNode == nodeId) {
- jam();
- break;
- } else {
- jam();
- replicaPtr.i = replicaPtr.p->nextReplica;
- }//if
- };
- if(replicaPtr.i != RNIL){
- ndbout_c("...But was found in oldStoredReplicas");
- } else {
- ndbout_c("...And wasn't found in oldStoredReplicas");
- }
-#endif
- ndbrequire(false);
-}//Dbdih::findReplica()
-
-/**
- * Return true if table is all fragment replicas have been checkpointed
- * to disk (in all LQHs)
- * false otherwise
- */
-bool
-Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport)
-{
- Uint32 lcpNo = lcpReport->lcpNo;
- Uint32 lcpId = lcpReport->lcpId;
- Uint32 maxGciStarted = lcpReport->maxGciStarted;
- Uint32 maxGciCompleted = lcpReport->maxGciCompleted;
- Uint32 tableId = lcpReport->tableId;
- Uint32 fragId = lcpReport->fragId;
- Uint32 nodeId = lcpReport->nodeId;
-
- TabRecordPtr tabPtr;
- tabPtr.i = tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, fragId, fragPtr);
-
- ReplicaRecordPtr replicaPtr;
- findReplica(replicaPtr, fragPtr.p, nodeId);
-
- ndbrequire(replicaPtr.p->lcpOngoingFlag == true);
- if(lcpNo != replicaPtr.p->nextLcp){
- ndbout_c("lcpNo = %d replicaPtr.p->nextLcp = %d",
- lcpNo, replicaPtr.p->nextLcp);
- ndbrequire(false);
- }
- ndbrequire(lcpNo == replicaPtr.p->nextLcp);
- ndbrequire(lcpNo < MAX_LCP_STORED);
- ndbrequire(replicaPtr.p->lcpId[lcpNo] != lcpId);
-
- replicaPtr.p->lcpIdStarted = lcpId;
- replicaPtr.p->lcpOngoingFlag = false;
-
- removeOldCrashedReplicas(replicaPtr);
- replicaPtr.p->lcpId[lcpNo] = lcpId;
- replicaPtr.p->lcpStatus[lcpNo] = ZVALID;
- replicaPtr.p->maxGciStarted[lcpNo] = maxGciStarted;
- gth(maxGciStarted + 1, 0);
- replicaPtr.p->maxGciCompleted[lcpNo] = maxGciCompleted;
- replicaPtr.p->nextLcp = nextLcpNo(replicaPtr.p->nextLcp);
-
- ndbrequire(fragPtr.p->noLcpReplicas > 0);
- fragPtr.p->noLcpReplicas --;
-
- if(fragPtr.p->noLcpReplicas > 0){
- jam();
- return false;
- }
-
- for (Uint32 fid = 0; fid < tabPtr.p->totalfragments; fid++) {
- jam();
- getFragstore(tabPtr.p, fid, fragPtr);
- if (fragPtr.p->noLcpReplicas > 0){
- jam();
- /* ----------------------------------------------------------------- */
- // Not all fragments in table have been checkpointed.
- /* ----------------------------------------------------------------- */
- if(0)
- ndbout_c("reportLcpCompletion: fragment %d not ready", fid);
- return false;
- }//if
- }//for
- return true;
-}//Dbdih::reportLcpCompletion()
-
-void Dbdih::checkStartMoreLcp(Signal* signal, Uint32 nodeId)
-{
- ndbrequire(isMaster());
-
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
-
- ndbrequire(nodePtr.p->noOfStartedChkpt < 2);
-
- if (nodePtr.p->noOfQueuedChkpt > 0) {
- jam();
- nodePtr.p->noOfQueuedChkpt--;
- Uint32 i = nodePtr.p->noOfStartedChkpt;
- nodePtr.p->startedChkpt[i] = nodePtr.p->queuedChkpt[0];
- nodePtr.p->queuedChkpt[0] = nodePtr.p->queuedChkpt[1];
- //-------------------------------------------------------------------
- // We can send a LCP_FRAGORD to the node ordering it to perform a
- // local checkpoint on this fragment replica.
- //-------------------------------------------------------------------
- nodePtr.p->noOfStartedChkpt = i + 1;
-
- sendLCP_FRAG_ORD(signal, nodePtr.p->startedChkpt[i]);
- }
-
- /* ----------------------------------------------------------------------- */
- // When there are no more outstanding LCP reports and there are no one queued
- // in at least one node, then we are ready to make sure all nodes have at
- // least two outstanding LCP requests per node and at least two queued for
- // sending.
- /* ----------------------------------------------------------------------- */
- startNextChkpt(signal);
-}//Dbdih::checkStartMoreLcp()
-
-void
-Dbdih::sendLCP_FRAG_ORD(Signal* signal,
- NodeRecord::FragmentCheckpointInfo info){
-
- ReplicaRecordPtr replicaPtr;
- replicaPtr.i = info.replicaPtr;
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
-
- BlockReference ref = calcLqhBlockRef(replicaPtr.p->procNode);
-
- LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0];
- lcpFragOrd->tableId = info.tableId;
- lcpFragOrd->fragmentId = info.fragId;
- lcpFragOrd->lcpId = SYSFILE->latestLCP_ID;
- lcpFragOrd->lcpNo = replicaPtr.p->nextLcp;
- lcpFragOrd->keepGci = c_lcpState.keepGci;
- lcpFragOrd->lastFragmentFlag = false;
- sendSignal(ref, GSN_LCP_FRAG_ORD, signal, LcpFragOrd::SignalLength, JBB);
-}
-
-void Dbdih::checkLcpCompletedLab(Signal* signal)
-{
- if(c_lcpState.lcpStatus < LCP_TAB_COMPLETED){
- jam();
- return;
- }
-
- TabRecordPtr tabPtr;
- for (tabPtr.i = 0; tabPtr.i < ctabFileSize; tabPtr.i++) {
- jam();
- ptrAss(tabPtr, tabRecord);
- if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) {
- if (tabPtr.p->tabLcpStatus != TabRecord::TLS_COMPLETED) {
- jam();
- return;
- }//if
- }//if
- }//for
-
- CRASH_INSERTION2(7027, isMaster());
- CRASH_INSERTION2(7018, !isMaster());
-
- if(c_lcpState.lcpStatus == LCP_TAB_COMPLETED){
- /**
- * We'r done
- */
- c_lcpState.setLcpStatus(LCP_TAB_SAVED, __LINE__);
- sendLCP_COMPLETE_REP(signal);
- return;
- }
-
- ndbrequire(c_lcpState.lcpStatus == LCP_TAB_SAVED);
- allNodesLcpCompletedLab(signal);
- return;
-}//Dbdih::checkLcpCompletedLab()
-
-void
-Dbdih::sendLCP_COMPLETE_REP(Signal* signal){
- jam();
- LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend();
- rep->nodeId = getOwnNodeId();
- rep->lcpId = SYSFILE->latestLCP_ID;
- rep->blockNo = DBDIH;
-
- sendSignal(c_lcpState.m_masterLcpDihRef, GSN_LCP_COMPLETE_REP, signal,
- LcpCompleteRep::SignalLength, JBB);
-}
-
-/*-------------------------------------------------------------------------- */
-/* COMP_LCP_ROUND A LQH HAS COMPLETED A LOCAL CHECKPOINT */
-/*------------------------------------------------------------------------- */
-void Dbdih::execLCP_COMPLETE_REP(Signal* signal)
-{
- jamEntry();
-
-#if 0
- ndbout_c("LCP_COMPLETE_REP");
- printLCP_COMPLETE_REP(stdout,
- signal->getDataPtr(),
- signal->length(), number());
-#endif
-
- LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtr();
- Uint32 lcpId = rep->lcpId;
- Uint32 nodeId = rep->nodeId;
- Uint32 blockNo = rep->blockNo;
-
- if(c_lcpMasterTakeOverState.state > LMTOS_WAIT_LCP_FRAG_REP){
- jam();
- /**
- * Don't allow LCP_COMPLETE_REP to arrive during
- * LCP master take over
- */
- ndbrequire(isMaster());
- ndbrequire(blockNo == DBDIH);
- sendSignalWithDelay(reference(), GSN_LCP_COMPLETE_REP, signal, 100,
- signal->length());
- return;
- }
-
- ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE);
-
- switch(blockNo){
- case DBLQH:
- jam();
- c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.clearWaitingFor(nodeId);
- ndbrequire(!c_lcpState.m_LAST_LCP_FRAG_ORD.isWaitingFor(nodeId));
- break;
- case DBDIH:
- jam();
- ndbrequire(isMaster());
- c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.clearWaitingFor(nodeId);
- break;
- case 0:
- jam();
- ndbrequire(!isMaster());
- ndbrequire(c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received == false);
- c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received = true;
- break;
- default:
- ndbrequire(false);
- }
- ndbrequire(lcpId == SYSFILE->latestLCP_ID);
-
- allNodesLcpCompletedLab(signal);
- return;
-}
-
-void Dbdih::allNodesLcpCompletedLab(Signal* signal)
-{
- jam();
-
- if (c_lcpState.lcpStatus != LCP_TAB_SAVED) {
- jam();
- /**
- * We have not sent LCP_COMPLETE_REP to master DIH yet
- */
- return;
- }//if
-
- if (!c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.done()){
- jam();
- return;
- }
-
- if (!c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.done()){
- jam();
- return;
- }
-
- if (!isMaster() &&
- c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received == false){
- jam();
- /**
- * Wait until master DIH has signaled lcp is complete
- */
- return;
- }
-
- if(c_lcpMasterTakeOverState.state != LMTOS_IDLE){
- jam();
-#ifdef VM_TRACE
- ndbout_c("Exiting from allNodesLcpCompletedLab");
-#endif
- return;
- }
-
-
- /*------------------------------------------------------------------------ */
- /* WE HAVE NOW COMPLETED A LOCAL CHECKPOINT. WE ARE NOW READY TO WAIT */
- /* FOR THE NEXT LOCAL CHECKPOINT. SEND WITHOUT TIME-OUT SINCE IT MIGHT */
- /* BE TIME TO START THE NEXT LOCAL CHECKPOINT IMMEDIATELY. */
- /* CLEAR BIT 3 OF SYSTEM RESTART BITS TO INDICATE THAT THERE IS NO */
- /* LOCAL CHECKPOINT ONGOING. THIS WILL BE WRITTEN AT SOME LATER TIME */
- /* DURING A GLOBAL CHECKPOINT. IT IS NOT NECESSARY TO WRITE IT */
- /* IMMEDIATELY. WE WILL ALSO CLEAR BIT 2 OF SYSTEM RESTART BITS IF ALL */
- /* CURRENTLY ACTIVE NODES COMPLETED THE LOCAL CHECKPOINT. */
- /*------------------------------------------------------------------------ */
- CRASH_INSERTION(7019);
- signal->setTrace(0);
-
- c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
- setLcpActiveStatusEnd();
- Sysfile::clearLCPOngoing(SYSFILE->systemRestartBits);
-
- if(!isMaster()){
- jam();
- /**
- * We're not master, be content
- */
- return;
- }
-
- // Send LCP_COMPLETE_REP to all other nodes
- // allowing them to set their lcpStatus to LCP_STATUS_IDLE
- LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend();
- rep->nodeId = getOwnNodeId();
- rep->lcpId = SYSFILE->latestLCP_ID;
- rep->blockNo = 0; // 0 = Sent from master
-
- NodeRecordPtr nodePtr;
- nodePtr.i = cfirstAliveNode;
- do {
- jam();
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- if (nodePtr.i != cownNodeId){
- BlockReference ref = calcDihBlockRef(nodePtr.i);
- sendSignal(ref, GSN_LCP_COMPLETE_REP, signal,
- LcpCompleteRep::SignalLength, JBB);
- }
- nodePtr.i = nodePtr.p->nextNode;
- } while (nodePtr.i != RNIL);
-
-
- jam();
- /***************************************************************************/
- // Report the event that a local checkpoint has completed.
- /***************************************************************************/
- signal->theData[0] = NDB_LE_LocalCheckpointCompleted; //Event type
- signal->theData[1] = SYSFILE->latestLCP_ID;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
-
- /**
- * Start checking for next LCP
- */
- checkLcpStart(signal, __LINE__);
-
- if (cwaitLcpSr == true) {
- jam();
- cwaitLcpSr = false;
- ndbsttorry10Lab(signal, __LINE__);
- return;
- }//if
-
- if (c_nodeStartMaster.blockLcp == true) {
- jam();
- lcpBlockedLab(signal);
- return;
- }//if
- return;
-}//Dbdih::allNodesLcpCompletedLab()
-
-/******************************************************************************/
-/* ********** TABLE UPDATE MODULE *************/
-/* ****************************************************************************/
-/* ------------------------------------------------------------------------- */
-/* THIS MODULE IS USED TO UPDATE THE TABLE DESCRIPTION. IT STARTS BY */
-/* CREATING THE FIRST TABLE FILE, THEN UPDATES THIS FILE AND CLOSES IT.*/
-/* AFTER THAT THE SAME HAPPENS WITH THE SECOND FILE. AFTER THAT THE */
-/* TABLE DISTRIBUTION HAS BEEN UPDATED. */
-/* */
-/* THE REASON FOR CREATING THE FILE AND NOT OPENING IT IS TO ENSURE */
-/* THAT WE DO NOT GET A MIX OF OLD AND NEW INFORMATION IN THE FILE IN */
-/* ERROR SITUATIONS. */
-/* ------------------------------------------------------------------------- */
-void Dbdih::tableUpdateLab(Signal* signal, TabRecordPtr tabPtr) {
- FileRecordPtr filePtr;
- filePtr.i = tabPtr.p->tabFile[0];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- createFileRw(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::TABLE_CREATE;
- return;
-}//Dbdih::tableUpdateLab()
-
-void Dbdih::tableCreateLab(Signal* signal, FileRecordPtr filePtr)
-{
- TabRecordPtr tabPtr;
- tabPtr.i = filePtr.p->tabRef;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- writeTabfile(signal, tabPtr.p, filePtr);
- filePtr.p->reqStatus = FileRecord::TABLE_WRITE;
- return;
-}//Dbdih::tableCreateLab()
-
-void Dbdih::tableWriteLab(Signal* signal, FileRecordPtr filePtr)
-{
- closeFile(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::TABLE_CLOSE;
- return;
-}//Dbdih::tableWriteLab()
-
-void Dbdih::tableCloseLab(Signal* signal, FileRecordPtr filePtr)
-{
- TabRecordPtr tabPtr;
- tabPtr.i = filePtr.p->tabRef;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- if (filePtr.i == tabPtr.p->tabFile[0]) {
- jam();
- filePtr.i = tabPtr.p->tabFile[1];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- createFileRw(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::TABLE_CREATE;
- return;
- }//if
- switch (tabPtr.p->tabUpdateState) {
- case TabRecord::US_LOCAL_CHECKPOINT:
- jam();
- releaseTabPages(tabPtr.i);
- signal->theData[0] = DihContinueB::ZCHECK_LCP_COMPLETED;
- sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
-
- tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
- tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
- tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
- return;
- break;
- case TabRecord::US_REMOVE_NODE:
- jam();
- releaseTabPages(tabPtr.i);
- for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
- jam();
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, fragId, fragPtr);
- updateNodeInfo(fragPtr);
- }//for
- tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
- tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
- if (tabPtr.p->tabLcpStatus == TabRecord::TLS_WRITING_TO_FILE) {
- jam();
- tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
- signal->theData[0] = DihContinueB::ZCHECK_LCP_COMPLETED;
- sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
- }//if
- signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
- signal->theData[1] = tabPtr.p->tabRemoveNode;
- signal->theData[2] = tabPtr.i + 1;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
- return;
- break;
- case TabRecord::US_INVALIDATE_NODE_LCP:
- jam();
- releaseTabPages(tabPtr.i);
- tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
- tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
-
- signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP;
- signal->theData[1] = tabPtr.p->tabRemoveNode;
- signal->theData[2] = tabPtr.i + 1;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
- return;
- case TabRecord::US_COPY_TAB_REQ:
- jam();
- tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
- copyTabReq_complete(signal, tabPtr);
- return;
- break;
- case TabRecord::US_ADD_TABLE_MASTER:
- jam();
- releaseTabPages(tabPtr.i);
- tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
- signal->theData[0] = DihContinueB::ZDIH_ADD_TABLE_MASTER;
- signal->theData[1] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- break;
- case TabRecord::US_ADD_TABLE_SLAVE:
- jam();
- releaseTabPages(tabPtr.i);
- tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
- signal->theData[0] = DihContinueB::ZDIH_ADD_TABLE_SLAVE;
- signal->theData[1] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- return;
- break;
- default:
- ndbrequire(false);
- return;
- break;
- }//switch
-}//Dbdih::tableCloseLab()
-
-/**
- * GCP stop detected,
- * send SYSTEM_ERROR to all other alive nodes
- */
-void Dbdih::crashSystemAtGcpStop(Signal* signal){
- NodeRecordPtr nodePtr;
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- ptrAss(nodePtr, nodeRecord);
- if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
- jam();
- const BlockReference ref =
- numberToRef(refToBlock(cntrlblockref), nodePtr.i);
- SystemError * const sysErr = (SystemError*)&signal->theData[0];
- sysErr->errorCode = SystemError::GCPStopDetected;
- sysErr->errorRef = reference();
- sysErr->data1 = cgcpStatus;
- sysErr->data2 = cgcpOrderBlocked;
- sendSignal(ref, GSN_SYSTEM_ERROR, signal,
- SystemError::SignalLength, JBA);
- }//if
- }//for
- return;
-}//Dbdih::crashSystemAtGcpStop()
-
-/*************************************************************************/
-/* */
-/* MODULE: ALLOCPAGE */
-/* DESCRIPTION: THE SUBROUTINE IS CALLED WITH POINTER TO PAGE */
-/* RECORD. A PAGE RECORD IS TAKEN FROM */
-/* THE FREE PAGE LIST */
-/*************************************************************************/
-void Dbdih::allocpage(PageRecordPtr& pagePtr)
-{
- ndbrequire(cfirstfreepage != RNIL);
- pagePtr.i = cfirstfreepage;
- ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
- cfirstfreepage = pagePtr.p->nextfreepage;
- pagePtr.p->nextfreepage = RNIL;
-}//Dbdih::allocpage()
-
-/*************************************************************************/
-/* */
-/* MODULE: ALLOC_STORED_REPLICA */
-/* DESCRIPTION: THE SUBROUTINE IS CALLED TO GET A REPLICA RECORD, */
-/* TO INITIALISE IT AND TO LINK IT INTO THE FRAGMENT */
-/* STORE RECORD. USED FOR STORED REPLICAS. */
-/*************************************************************************/
-void Dbdih::allocStoredReplica(FragmentstorePtr fragPtr,
- ReplicaRecordPtr& newReplicaPtr,
- Uint32 nodeId)
-{
- Uint32 i;
- ReplicaRecordPtr arrReplicaPtr;
- ReplicaRecordPtr arrPrevReplicaPtr;
-
- seizeReplicaRec(newReplicaPtr);
- for (i = 0; i < MAX_LCP_STORED; i++) {
- newReplicaPtr.p->maxGciCompleted[i] = 0;
- newReplicaPtr.p->maxGciStarted[i] = 0;
- newReplicaPtr.p->lcpId[i] = 0;
- newReplicaPtr.p->lcpStatus[i] = ZINVALID;
- }//for
- newReplicaPtr.p->noCrashedReplicas = 0;
- newReplicaPtr.p->initialGci = currentgcp;
- for (i = 0; i < 8; i++) {
- newReplicaPtr.p->replicaLastGci[i] = (Uint32)-1;
- newReplicaPtr.p->createGci[i] = 0;
- }//for
- newReplicaPtr.p->createGci[0] = currentgcp;
- ndbrequire(currentgcp != 0xF1F1F1F1);
- newReplicaPtr.p->nextLcp = 0;
- newReplicaPtr.p->procNode = nodeId;
- newReplicaPtr.p->lcpOngoingFlag = false;
- newReplicaPtr.p->lcpIdStarted = 0;
-
- arrPrevReplicaPtr.i = RNIL;
- arrReplicaPtr.i = fragPtr.p->storedReplicas;
- while (arrReplicaPtr.i != RNIL) {
- jam();
- ptrCheckGuard(arrReplicaPtr, creplicaFileSize, replicaRecord);
- arrPrevReplicaPtr = arrReplicaPtr;
- arrReplicaPtr.i = arrReplicaPtr.p->nextReplica;
- }//while
- if (arrPrevReplicaPtr.i == RNIL) {
- jam();
- fragPtr.p->storedReplicas = newReplicaPtr.i;
- } else {
- jam();
- arrPrevReplicaPtr.p->nextReplica = newReplicaPtr.i;
- }//if
- fragPtr.p->noStoredReplicas++;
-}//Dbdih::allocStoredReplica()
-
-/*************************************************************************/
-/* CALCULATE HOW MANY HOT SPARES THAT ARE TO BE ASSIGNED IN THIS SYSTEM */
-/*************************************************************************/
-void Dbdih::calculateHotSpare()
-{
- Uint32 tchsTmp;
- Uint32 tchsNoNodes;
-
- switch (cnoReplicas) {
- case 1:
- jam();
- cnoHotSpare = 0;
- break;
- case 2:
- case 3:
- case 4:
- jam();
- if (csystemnodes > cnoReplicas) {
- jam();
- /* --------------------------------------------------------------------- */
- /* WITH MORE NODES THAN REPLICAS WE WILL ALWAYS USE AT LEAST ONE HOT */
- /* SPARE IF THAT HAVE BEEN REQUESTED BY THE CONFIGURATION FILE. THE */
- /* NUMBER OF NODES TO BE USED FOR NORMAL OPERATION IS ALWAYS */
- /* A MULTIPLE OF THE NUMBER OF REPLICAS SINCE WE WILL ORGANISE NODES */
- /* INTO NODE GROUPS. THE REMAINING NODES WILL BE HOT SPARE NODES. */
- /* --------------------------------------------------------------------- */
- if ((csystemnodes - cnoReplicas) >= cminHotSpareNodes) {
- jam();
- /* --------------------------------------------------------------------- */
- // We set the minimum number of hot spares according to users request
- // through the configuration file.
- /* --------------------------------------------------------------------- */
- tchsNoNodes = csystemnodes - cminHotSpareNodes;
- cnoHotSpare = cminHotSpareNodes;
- } else if (cminHotSpareNodes > 0) {
- jam();
- /* --------------------------------------------------------------------- */
- // The user requested at least one hot spare node and we will support him
- // in that.
- /* --------------------------------------------------------------------- */
- tchsNoNodes = csystemnodes - 1;
- cnoHotSpare = 1;
- } else {
- jam();
- /* --------------------------------------------------------------------- */
- // The user did not request any hot spare nodes so in this case we will
- // only use hot spare nodes if the number of nodes is such that we cannot
- // use all nodes as normal nodes.
- /* --------------------------------------------------------------------- */
- tchsNoNodes = csystemnodes;
- cnoHotSpare = 0;
- }//if
- } else {
- jam();
- /* --------------------------------------------------------------------- */
- // We only have enough to support the replicas. We will not have any hot
- // spares.
- /* --------------------------------------------------------------------- */
- tchsNoNodes = csystemnodes;
- cnoHotSpare = 0;
- }//if
- tchsTmp = tchsNoNodes - (cnoReplicas * (tchsNoNodes / cnoReplicas));
- cnoHotSpare = cnoHotSpare + tchsTmp;
- break;
- default:
- jam();
- progError(0, 0);
- break;
- }//switch
-}//Dbdih::calculateHotSpare()
-
-/*************************************************************************/
-/* CHECK IF THE NODE CRASH IS TO ESCALATE INTO A SYSTEM CRASH. WE COULD */
-/* DO THIS BECAUSE ALL REPLICAS OF SOME FRAGMENT ARE LOST. WE COULD ALSO */
-/* DO IT AFTER MANY NODE FAILURES THAT MAKE IT VERY DIFFICULT TO RESTORE */
-/* DATABASE AFTER A SYSTEM CRASH. IT MIGHT EVEN BE IMPOSSIBLE AND THIS */
-/* MUST BE AVOIDED EVEN MORE THAN AVOIDING SYSTEM CRASHES. */
-/*************************************************************************/
-void Dbdih::checkEscalation()
-{
- Uint32 TnodeGroup[MAX_NDB_NODES];
- NodeRecordPtr nodePtr;
- Uint32 i;
- for (i = 0; i < MAX_NDB_NODES; i++) {
- TnodeGroup[i] = ZFALSE;
- }//for
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- ptrAss(nodePtr, nodeRecord);
- if (nodePtr.p->nodeStatus == NodeRecord::ALIVE &&
- nodePtr.p->activeStatus == Sysfile::NS_Active){
- ndbrequire(nodePtr.p->nodeGroup < MAX_NDB_NODES);
- TnodeGroup[nodePtr.p->nodeGroup] = ZTRUE;
- }
- }
- for (i = 0; i < cnoOfNodeGroups; i++) {
- jam();
- if (TnodeGroup[i] == ZFALSE) {
- jam();
- progError(__LINE__, ERR_SYSTEM_ERROR, "Lost node group");
- }//if
- }//for
-}//Dbdih::checkEscalation()
-
-/*************************************************************************/
-/* */
-/* MODULE: CHECK_KEEP_GCI */
-/* DESCRIPTION: CHECK FOR MINIMUM GCI RESTORABLE WITH NEW LOCAL */
-/* CHECKPOINT. */
-/*************************************************************************/
-void Dbdih::checkKeepGci(Uint32 replicaStartIndex)
-{
- ReplicaRecordPtr ckgReplicaPtr;
- ckgReplicaPtr.i = replicaStartIndex;
- while (ckgReplicaPtr.i != RNIL) {
- jam();
- ptrCheckGuard(ckgReplicaPtr, creplicaFileSize, replicaRecord);
- Uint32 keepGci;
- Uint32 oldestRestorableGci;
- findMinGci(ckgReplicaPtr, keepGci, oldestRestorableGci);
- if (keepGci < c_lcpState.keepGci) {
- jam();
- /* ------------------------------------------------------------------- */
- /* WE MUST KEEP LOG RECORDS SO THAT WE CAN USE ALL LOCAL CHECKPOINTS */
- /* THAT ARE AVAILABLE. THUS WE NEED TO CALCULATE THE MINIMUM OVER ALL */
- /* FRAGMENTS. */
- /* ------------------------------------------------------------------- */
- c_lcpState.keepGci = keepGci;
- }//if
- if (oldestRestorableGci > c_lcpState.oldestRestorableGci) {
- jam();
- c_lcpState.oldestRestorableGci = oldestRestorableGci;
- ndbrequire(((int)c_lcpState.oldestRestorableGci) >= 0);
- }//if
- ckgReplicaPtr.i = ckgReplicaPtr.p->nextReplica;
- }//while
-}//Dbdih::checkKeepGci()
-
-void Dbdih::closeFile(Signal* signal, FileRecordPtr filePtr)
-{
- signal->theData[0] = filePtr.p->fileRef;
- signal->theData[1] = reference();
- signal->theData[2] = filePtr.i;
- signal->theData[3] = ZCLOSE_NO_DELETE;
- sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
-}//Dbdih::closeFile()
-
-void Dbdih::closeFileDelete(Signal* signal, FileRecordPtr filePtr)
-{
- signal->theData[0] = filePtr.p->fileRef;
- signal->theData[1] = reference();
- signal->theData[2] = filePtr.i;
- signal->theData[3] = ZCLOSE_DELETE;
- sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
-}//Dbdih::closeFileDelete()
-
-void Dbdih::createFileRw(Signal* signal, FileRecordPtr filePtr)
-{
- signal->theData[0] = reference();
- signal->theData[1] = filePtr.i;
- signal->theData[2] = filePtr.p->fileName[0];
- signal->theData[3] = filePtr.p->fileName[1];
- signal->theData[4] = filePtr.p->fileName[2];
- signal->theData[5] = filePtr.p->fileName[3];
- signal->theData[6] = ZCREATE_READ_WRITE;
- sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
-}//Dbdih::createFileRw()
-
-void Dbdih::emptyverificbuffer(Signal* signal, bool aContinueB)
-{
- if(cfirstVerifyQueue == RNIL){
- jam();
- return;
- }//if
- ApiConnectRecordPtr localApiConnectptr;
- if(getBlockCommit() == false){
- jam();
- ndbrequire(cverifyQueueCounter > 0);
- cverifyQueueCounter--;
- localApiConnectptr.i = cfirstVerifyQueue;
- ptrCheckGuard(localApiConnectptr, capiConnectFileSize, apiConnectRecord);
- ndbrequire(localApiConnectptr.p->apiGci <= currentgcp);
- cfirstVerifyQueue = localApiConnectptr.p->nextApi;
- if (cfirstVerifyQueue == RNIL) {
- jam();
- ndbrequire(cverifyQueueCounter == 0);
- clastVerifyQueue = RNIL;
- }//if
- signal->theData[0] = localApiConnectptr.i;
- signal->theData[1] = currentgcp;
- sendSignal(clocaltcblockref, GSN_DIVERIFYCONF, signal, 2, JBB);
- if (aContinueB == true) {
- jam();
- //-----------------------------------------------------------------------
- // This emptying happened as part of a take-out process by continueb signals.
- // This ensures that we will empty the queue eventually. We will also empty
- // one item every time we insert one item to ensure that the list doesn't
- // grow when it is not blocked.
- //-----------------------------------------------------------------------
- signal->theData[0] = DihContinueB::ZEMPTY_VERIFY_QUEUE;
- sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
- }//if
- } else {
- jam();
- //-----------------------------------------------------------------------
- // We are blocked so it is no use in continuing the emptying of the
- // verify buffer. Whenever the block is removed the emptying will
- // restart.
- //-----------------------------------------------------------------------
- }
- return;
-}//Dbdih::emptyverificbuffer()
-
-/*----------------------------------------------------------------*/
-/* FIND A FREE HOT SPARE IF AVAILABLE AND ALIVE. */
-/*----------------------------------------------------------------*/
-Uint32 Dbdih::findHotSpare()
-{
- NodeRecordPtr nodePtr;
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- ptrAss(nodePtr, nodeRecord);
- if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
- if (nodePtr.p->activeStatus == Sysfile::NS_HotSpare) {
- jam();
- return nodePtr.i;
- }//if
- }//if
- }//for
- return RNIL;
-}//Dbdih::findHotSpare()
-
-/*************************************************************************/
-/* FIND THE NODES FROM WHICH WE CAN EXECUTE THE LOG TO RESTORE THE */
-/* DATA NODE IN A SYSTEM RESTART. */
-/*************************************************************************/
-bool Dbdih::findLogNodes(CreateReplicaRecord* createReplica,
- FragmentstorePtr fragPtr,
- Uint32 startGci,
- Uint32 stopGci)
-{
- ConstPtr<ReplicaRecord> flnReplicaPtr;
- flnReplicaPtr.i = createReplica->replicaRec;
- ptrCheckGuard(flnReplicaPtr, creplicaFileSize, replicaRecord);
- /* --------------------------------------------------------------------- */
- /* WE START BY CHECKING IF THE DATA NODE CAN HANDLE THE LOG ALL BY */
- /* ITSELF. THIS IS THE DESIRED BEHAVIOUR. IF THIS IS NOT POSSIBLE */
- /* THEN WE SEARCH FOR THE BEST POSSIBLE NODES AMONG THE NODES THAT */
- /* ARE PART OF THIS SYSTEM RESTART. */
- /* THIS CAN ONLY BE HANDLED BY THE LAST CRASHED REPLICA. */
- /* The condition is that the replica was created before or at the */
- /* time of the starting gci, in addition it must have been alive */
- /* at the time of the stopping gci. This is checked by two */
- /* conditions, the first checks replicaLastGci and the second */
- /* checks that it is also smaller than the last gci the node was */
- /* involved in. This is necessary to check since createGci is set */
- /* Last + 1 and sometimes startGci = stopGci + 1 and in that case */
- /* it could happen that replicaLastGci is set to -1 with CreateGci */
- /* set to LastGci + 1. */
- /* --------------------------------------------------------------------- */
- arrGuard(flnReplicaPtr.p->noCrashedReplicas, 8);
- const Uint32 noCrashed = flnReplicaPtr.p->noCrashedReplicas;
-
- if (!(ERROR_INSERTED(7073) || ERROR_INSERTED(7074))&&
- (startGci >= flnReplicaPtr.p->createGci[noCrashed]) &&
- (stopGci <= flnReplicaPtr.p->replicaLastGci[noCrashed]) &&
- (stopGci <= SYSFILE->lastCompletedGCI[flnReplicaPtr.p->procNode])) {
- jam();
- /* --------------------------------------------------------------------- */
- /* WE FOUND ALL THE LOG RECORDS NEEDED IN THE DATA NODE. WE WILL */
- /* USE THOSE. */
- /* --------------------------------------------------------------------- */
- createReplica->noLogNodes = 1;
- createReplica->logStartGci[0] = startGci;
- createReplica->logStopGci[0] = stopGci;
- createReplica->logNodeId[0] = flnReplicaPtr.p->procNode;
- return true;
- }//if
- Uint32 logNode = 0;
- do {
- Uint32 fblStopGci;
- jam();
- if(!findBestLogNode(createReplica,
- fragPtr,
- startGci,
- stopGci,
- logNode,
- fblStopGci)){
- jam();
- return false;
- }
-
- logNode++;
- if (fblStopGci >= stopGci) {
- jam();
- createReplica->noLogNodes = logNode;
- return true;
- }//if
- startGci = fblStopGci + 1;
- if (logNode >= 4) { // Why??
- jam();
- break;
- }//if
- } while (1);
- /* --------------------------------------------------------------------- */
- /* IT WAS NOT POSSIBLE TO RESTORE THE REPLICA. THIS CAN EITHER BE */
- /* BECAUSE OF LACKING NODES OR BECAUSE OF A REALLY SERIOUS PROBLEM.*/
- /* --------------------------------------------------------------------- */
- return false;
-}//Dbdih::findLogNodes()
-
-/*************************************************************************/
-/* FIND THE BEST POSSIBLE LOG NODE TO EXECUTE THE LOG AS SPECIFIED */
-/* BY THE INPUT PARAMETERS. WE SCAN THROUGH ALL ALIVE REPLICAS. */
-/* THIS MEANS STORED, OLD_STORED */
-/*************************************************************************/
-bool
-Dbdih::findBestLogNode(CreateReplicaRecord* createReplica,
- FragmentstorePtr fragPtr,
- Uint32 startGci,
- Uint32 stopGci,
- Uint32 logNode,
- Uint32& fblStopGci)
-{
- ConstPtr<ReplicaRecord> fblFoundReplicaPtr;
- ConstPtr<ReplicaRecord> fblReplicaPtr;
-
- /* --------------------------------------------------------------------- */
- /* WE START WITH ZERO AS FOUND TO ENSURE THAT FIRST HIT WILL BE */
- /* BETTER. */
- /* --------------------------------------------------------------------- */
- fblStopGci = 0;
- fblReplicaPtr.i = fragPtr.p->storedReplicas;
- while (fblReplicaPtr.i != RNIL) {
- jam();
- ptrCheckGuard(fblReplicaPtr, creplicaFileSize, replicaRecord);
- if (checkNodeAlive(fblReplicaPtr.p->procNode)) {
- jam();
- Uint32 fliStopGci = findLogInterval(fblReplicaPtr, startGci);
- if (fliStopGci > fblStopGci) {
- jam();
- fblStopGci = fliStopGci;
- fblFoundReplicaPtr = fblReplicaPtr;
- }//if
- }//if
- fblReplicaPtr.i = fblReplicaPtr.p->nextReplica;
- }//while
- fblReplicaPtr.i = fragPtr.p->oldStoredReplicas;
- while (fblReplicaPtr.i != RNIL) {
- jam();
- ptrCheckGuard(fblReplicaPtr, creplicaFileSize, replicaRecord);
- if (checkNodeAlive(fblReplicaPtr.p->procNode)) {
- jam();
- Uint32 fliStopGci = findLogInterval(fblReplicaPtr, startGci);
- if (fliStopGci > fblStopGci) {
- jam();
- fblStopGci = fliStopGci;
- fblFoundReplicaPtr = fblReplicaPtr;
- }//if
- }//if
- fblReplicaPtr.i = fblReplicaPtr.p->nextReplica;
- }//while
- if (fblStopGci != 0) {
- jam();
- ndbrequire(logNode < MAX_LOG_EXEC);
- createReplica->logNodeId[logNode] = fblFoundReplicaPtr.p->procNode;
- createReplica->logStartGci[logNode] = startGci;
- if (fblStopGci >= stopGci) {
- jam();
- createReplica->logStopGci[logNode] = stopGci;
- } else {
- jam();
- createReplica->logStopGci[logNode] = fblStopGci;
- }//if
- }//if
-
- return fblStopGci != 0;
-}//Dbdih::findBestLogNode()
-
-Uint32 Dbdih::findLogInterval(ConstPtr<ReplicaRecord> replicaPtr,
- Uint32 startGci)
-{
- ndbrequire(replicaPtr.p->noCrashedReplicas <= 8);
- Uint32 loopLimit = replicaPtr.p->noCrashedReplicas + 1;
- for (Uint32 i = 0; i < loopLimit; i++) {
- jam();
- if (replicaPtr.p->createGci[i] <= startGci) {
- if (replicaPtr.p->replicaLastGci[i] >= startGci) {
- jam();
- return replicaPtr.p->replicaLastGci[i];
- }//if
- }//if
- }//for
- return 0;
-}//Dbdih::findLogInterval()
-
-/*************************************************************************/
-/* */
-/* MODULE: FIND THE MINIMUM GCI THAT THIS NODE HAS LOG RECORDS FOR.*/
-/*************************************************************************/
-void Dbdih::findMinGci(ReplicaRecordPtr fmgReplicaPtr,
- Uint32& keepGci,
- Uint32& oldestRestorableGci)
-{
- Uint32 nextLcpNo;
- Uint32 lcpNo;
- for (Uint32 i = 0; i < MAX_LCP_STORED; i++) {
- jam();
- if ((fmgReplicaPtr.p->lcpStatus[i] == ZVALID) &&
- ((fmgReplicaPtr.p->lcpId[i] + MAX_LCP_STORED) <= (SYSFILE->latestLCP_ID + 1))) {
- jam();
- /*--------------------------------------------------------------------*/
- // We invalidate the checkpoint we are preparing to overwrite.
- // The LCP id is still the old lcp id,
- // this is the reason of comparing with lcpId + 1.
- /*---------------------------------------------------------------------*/
- fmgReplicaPtr.p->lcpStatus[i] = ZINVALID;
- }//if
- }//for
- keepGci = (Uint32)-1;
- oldestRestorableGci = 0;
- nextLcpNo = fmgReplicaPtr.p->nextLcp;
- lcpNo = fmgReplicaPtr.p->nextLcp;
- do {
- ndbrequire(lcpNo < MAX_LCP_STORED);
- if (fmgReplicaPtr.p->lcpStatus[lcpNo] == ZVALID) {
- jam();
- keepGci = fmgReplicaPtr.p->maxGciCompleted[lcpNo];
- oldestRestorableGci = fmgReplicaPtr.p->maxGciStarted[lcpNo];
- ndbrequire(((int)oldestRestorableGci) >= 0);
- return;
- } else {
- jam();
- ndbrequire(fmgReplicaPtr.p->lcpStatus[lcpNo] == ZINVALID);
- if (fmgReplicaPtr.p->createGci[0] == fmgReplicaPtr.p->initialGci) {
- jam();
- /*-------------------------------------------------------------------
- * WE CAN STILL RESTORE THIS REPLICA WITHOUT ANY LOCAL CHECKPOINTS BY
- * ONLY USING THE LOG. IF THIS IS NOT POSSIBLE THEN WE REPORT THE LAST
- * VALID LOCAL CHECKPOINT AS THE MINIMUM GCI RECOVERABLE.
- *-----------------------------------------------------------------*/
- keepGci = fmgReplicaPtr.p->createGci[0];
- }//if
- }//if
- lcpNo = prevLcpNo(lcpNo);
- } while (lcpNo != nextLcpNo);
- return;
-}//Dbdih::findMinGci()
-
-bool Dbdih::findStartGci(ConstPtr<ReplicaRecord> replicaPtr,
- Uint32 stopGci,
- Uint32& startGci,
- Uint32& lcpNo)
-{
- lcpNo = replicaPtr.p->nextLcp;
- const Uint32 startLcpNo = lcpNo;
- do {
- lcpNo = prevLcpNo(lcpNo);
- ndbrequire(lcpNo < MAX_LCP_STORED);
- if (replicaPtr.p->lcpStatus[lcpNo] == ZVALID) {
- if (replicaPtr.p->maxGciStarted[lcpNo] < stopGci) {
- jam();
- /* ----------------------------------------------------------------- */
- /* WE HAVE FOUND A USEFUL LOCAL CHECKPOINT THAT CAN BE USED FOR */
- /* RESTARTING THIS FRAGMENT REPLICA. */
- /* ----------------------------------------------------------------- */
- startGci = replicaPtr.p->maxGciCompleted[lcpNo] + 1;
- return true;
- }
- }
- } while (lcpNo != startLcpNo);
- /* --------------------------------------------------------------------- */
- /* NO VALID LOCAL CHECKPOINT WAS AVAILABLE. WE WILL ADD THE */
- /* FRAGMENT. THUS THE NEXT LCP MUST BE SET TO ZERO. */
- /* WE MUST EXECUTE THE LOG FROM THE INITIAL GLOBAL CHECKPOINT WHEN */
- /* THE TABLE WAS CREATED. */
- /* --------------------------------------------------------------------- */
- startGci = replicaPtr.p->initialGci;
- ndbrequire(replicaPtr.p->nextLcp == 0);
- return false;
-}//Dbdih::findStartGci()
-
-/**************************************************************************/
-/* ---------------------------------------------------------------------- */
-/* FIND A TAKE OVER REPLICA WHICH IS TO BE STARTED OR COMMITTED WHEN*/
-/* TAKING OVER A FAILED NODE. */
-/* ---------------------------------------------------------------------- */
-/*************************************************************************/
-void Dbdih::findToReplica(TakeOverRecord* regTakeOver,
- Uint32 replicaType,
- FragmentstorePtr fragPtr,
- ReplicaRecordPtr& ftrReplicaPtr)
-{
- switch (replicaType) {
- case CreateFragReq::STORED:
- case CreateFragReq::COMMIT_STORED:
- /* ----------------------------------------------------------------------*/
- /* HERE WE SEARCH FOR STORED REPLICAS. THE REPLICA MUST BE STORED IN THE */
- /* SECTION FOR OLD STORED REPLICAS SINCE WE HAVE NOT TAKEN OVER YET. */
- /* ----------------------------------------------------------------------*/
- ftrReplicaPtr.i = fragPtr.p->oldStoredReplicas;
- while (ftrReplicaPtr.i != RNIL) {
- ptrCheckGuard(ftrReplicaPtr, creplicaFileSize, replicaRecord);
- if (ftrReplicaPtr.p->procNode == regTakeOver->toStartingNode) {
- jam();
- return;
- } else {
- if (ftrReplicaPtr.p->procNode == regTakeOver->toFailedNode) {
- jam();
- return;
- } else {
- jam();
- ftrReplicaPtr.i = ftrReplicaPtr.p->nextReplica;
- }//if
- }//if
- }//while
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
-}//Dbdih::findToReplica()
-
-void Dbdih::initCommonData()
-{
- c_blockCommit = false;
- c_blockCommitNo = 0;
- c_createFragmentLock = RNIL;
- c_endToLock = RNIL;
- cfailurenr = 1;
- cfirstAliveNode = RNIL;
- cfirstDeadNode = RNIL;
- cfirstVerifyQueue = RNIL;
- cgckptflag = false;
- cgcpDelay = 0;
- cgcpMasterTakeOverState = GMTOS_IDLE;
- cgcpOrderBlocked = 0;
- cgcpParticipantState = GCP_PARTICIPANT_READY;
- cgcpSameCounter = 0;
- cgcpStartCounter = 0;
- cgcpStatus = GCP_READY;
-
- clastVerifyQueue = RNIL;
- c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__);
-
- c_lcpState.clcpDelay = 0;
- c_lcpState.lcpStart = ZIDLE;
- c_lcpState.lcpStartGcp = 0;
- c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
- c_lcpState.currentFragment.tableId = 0;
- c_lcpState.currentFragment.fragmentId = 0;
- c_lcpState.noOfLcpFragRepOutstanding = 0;
- c_lcpState.keepGci = 0;
- c_lcpState.oldestRestorableGci = 0;
- c_lcpState.ctcCounter = 0;
- c_lcpState.ctimer = 0;
- c_lcpState.immediateLcpStart = false;
- c_lcpState.m_MASTER_LCPREQ_Received = false;
-
- cmasterdihref = 0;
- cmasterNodeId = 0;
- cmasterState = MASTER_IDLE;
- cmasterTakeOverNode = 0;
- cnewgcp = 0;
- cnoHotSpare = 0;
- cnoOfActiveTables = 0;
- cnoOfNodeGroups = 0;
- cnoReplicas = 0;
- coldgcp = 0;
- coldGcpId = 0;
- coldGcpStatus = cgcpStatus;
- con_lineNodes = 0;
- creceivedfrag = 0;
- crestartGci = 0;
- crestartInfoFile[0] = RNIL;
- crestartInfoFile[1] = RNIL;
- cstartGcpNow = false;
- cstartPhase = 0;
- c_startToLock = RNIL;
- cstarttype = (Uint32)-1;
- csystemnodes = 0;
- c_updateToLock = RNIL;
- currentgcp = 0;
- cverifyQueueCounter = 0;
- cwaitLcpSr = false;
-
- nodeResetStart();
- c_nodeStartMaster.wait = ZFALSE;
-
- memset(&sysfileData[0], 0, sizeof(sysfileData));
-
- const ndb_mgm_configuration_iterator * p =
- theConfiguration.getOwnConfigIterator();
- ndbrequire(p != 0);
-
- c_lcpState.clcpDelay = 20;
- ndb_mgm_get_int_parameter(p, CFG_DB_LCP_INTERVAL, &c_lcpState.clcpDelay);
- c_lcpState.clcpDelay = c_lcpState.clcpDelay > 31 ? 31 : c_lcpState.clcpDelay;
-
- cminHotSpareNodes = 0;
- //ndb_mgm_get_int_parameter(p, CFG_DB_MIN_HOT_SPARES, &cminHotSpareNodes);
- cminHotSpareNodes = cminHotSpareNodes > 2 ? 2 : cminHotSpareNodes;
-
- cnoReplicas = 1;
- ndb_mgm_get_int_parameter(p, CFG_DB_NO_REPLICAS, &cnoReplicas);
- cnoReplicas = cnoReplicas > 4 ? 4 : cnoReplicas;
-
- cgcpDelay = 2000;
- ndb_mgm_get_int_parameter(p, CFG_DB_GCP_INTERVAL, &cgcpDelay);
- cgcpDelay = cgcpDelay > 60000 ? 60000 : (cgcpDelay < 10 ? 10 : cgcpDelay);
-}//Dbdih::initCommonData()
-
-void Dbdih::initFragstore(FragmentstorePtr fragPtr)
-{
- fragPtr.p->storedReplicas = RNIL;
- fragPtr.p->oldStoredReplicas = RNIL;
-
- fragPtr.p->noStoredReplicas = 0;
- fragPtr.p->noOldStoredReplicas = 0;
- fragPtr.p->fragReplicas = 0;
- fragPtr.p->preferredPrimary = 0;
-
- for (Uint32 i = 0; i < MAX_REPLICAS; i++)
- fragPtr.p->activeNodes[i] = 0;
-
- fragPtr.p->noLcpReplicas = 0;
- fragPtr.p->distributionKey = 0;
-}//Dbdih::initFragstore()
-
-/*************************************************************************/
-/* */
-/* MODULE: INIT_RESTART_INFO */
-/* DESCRIPTION: INITIATE RESTART INFO VARIABLE AND VARIABLES FOR */
-/* GLOBAL CHECKPOINTS. */
-/*************************************************************************/
-void Dbdih::initRestartInfo()
-{
- Uint32 i;
- for (i = 0; i < MAX_NDB_NODES; i++) {
- SYSFILE->lastCompletedGCI[i] = 0;
- }//for
- NodeRecordPtr nodePtr;
- nodePtr.i = cfirstAliveNode;
- do {
- jam();
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- SYSFILE->lastCompletedGCI[nodePtr.i] = 1;
- /* FIRST GCP = 1 ALREADY SET BY LQH */
- nodePtr.i = nodePtr.p->nextNode;
- } while (nodePtr.i != RNIL);
- coldgcp = 1;
- currentgcp = 2;
- cnewgcp = 2;
- crestartGci = 1;
-
- SYSFILE->keepGCI = 1;
- SYSFILE->oldestRestorableGCI = 1;
- SYSFILE->newestRestorableGCI = 1;
- SYSFILE->systemRestartBits = 0;
- for (i = 0; i < NodeBitmask::Size; i++) {
- SYSFILE->lcpActive[0] = 0;
- }//for
- for (i = 0; i < Sysfile::TAKE_OVER_SIZE; i++) {
- SYSFILE->takeOver[i] = 0;
- }//for
- Sysfile::setInitialStartOngoing(SYSFILE->systemRestartBits);
-}//Dbdih::initRestartInfo()
-
-/*--------------------------------------------------------------------*/
-/* NODE GROUP BITS ARE INITIALISED BEFORE THIS. */
-/* NODE ACTIVE BITS ARE INITIALISED BEFORE THIS. */
-/*--------------------------------------------------------------------*/
-/*************************************************************************/
-/* */
-/* MODULE: INIT_RESTORABLE_GCI_FILES */
-/* DESCRIPTION: THE SUBROUTINE SETS UP THE FILES THAT REFERS TO THE*/
-/* FILES THAT KEEP THE VARIABLE CRESTART_INFO */
-/*************************************************************************/
-void Dbdih::initRestorableGciFiles()
-{
- Uint32 tirgTmp;
- FileRecordPtr filePtr;
- seizeFile(filePtr);
- filePtr.p->tabRef = RNIL;
- filePtr.p->fileType = FileRecord::GCP_FILE;
- filePtr.p->reqStatus = FileRecord::IDLE;
- filePtr.p->fileStatus = FileRecord::CLOSED;
- crestartInfoFile[0] = filePtr.i;
- filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */
- filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */
- filePtr.p->fileName[2] = (Uint32)-1; /* S PART IGNORED */
- tirgTmp = 1; /* FILE NAME VERSION 1 */
- tirgTmp = (tirgTmp << 8) + 6; /* .SYSFILE */
- tirgTmp = (tirgTmp << 8) + 1; /* D1 DIRECTORY */
- tirgTmp = (tirgTmp << 8) + 0; /* P0 FILE NAME */
- filePtr.p->fileName[3] = tirgTmp;
- /* --------------------------------------------------------------------- */
- /* THE NAME BECOMES /D1/DBDICT/S0.SYSFILE */
- /* --------------------------------------------------------------------- */
- seizeFile(filePtr);
- filePtr.p->tabRef = RNIL;
- filePtr.p->fileType = FileRecord::GCP_FILE;
- filePtr.p->reqStatus = FileRecord::IDLE;
- filePtr.p->fileStatus = FileRecord::CLOSED;
- crestartInfoFile[1] = filePtr.i;
- filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */
- filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */
- filePtr.p->fileName[2] = (Uint32)-1; /* S PART IGNORED */
- tirgTmp = 1; /* FILE NAME VERSION 1 */
- tirgTmp = (tirgTmp << 8) + 6; /* .SYSFILE */
- tirgTmp = (tirgTmp << 8) + 2; /* D1 DIRECTORY */
- tirgTmp = (tirgTmp << 8) + 0; /* P0 FILE NAME */
- filePtr.p->fileName[3] = tirgTmp;
- /* --------------------------------------------------------------------- */
- /* THE NAME BECOMES /D2/DBDICT/P0.SYSFILE */
- /* --------------------------------------------------------------------- */
-}//Dbdih::initRestorableGciFiles()
-
-void Dbdih::initTable(TabRecordPtr tabPtr)
-{
- tabPtr.p->noOfFragChunks = 0;
- tabPtr.p->method = TabRecord::NOTDEFINED;
- tabPtr.p->tabStatus = TabRecord::TS_IDLE;
- tabPtr.p->noOfWords = 0;
- tabPtr.p->noPages = 0;
- tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
- tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
- tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
- tabPtr.p->noOfBackups = 0;
- tabPtr.p->kvalue = 0;
- tabPtr.p->hashpointer = (Uint32)-1;
- tabPtr.p->mask = 0;
- tabPtr.p->storedTable = 1;
- tabPtr.p->tabErrorCode = 0;
- tabPtr.p->schemaVersion = (Uint32)-1;
- tabPtr.p->tabRemoveNode = RNIL;
- tabPtr.p->totalfragments = (Uint32)-1;
- tabPtr.p->connectrec = RNIL;
- tabPtr.p->tabFile[0] = RNIL;
- tabPtr.p->tabFile[1] = RNIL;
- tabPtr.p->m_dropTab.tabUserRef = 0;
- tabPtr.p->m_dropTab.tabUserPtr = RNIL;
- Uint32 i;
- for (i = 0; i < MAX_NDB_NODES; i++) {
- tabPtr.p->startFid[i] = RNIL;
- }//for
- for (i = 0; i < 8; i++) {
- tabPtr.p->pageRef[i] = RNIL;
- }//for
- tabPtr.p->tableType = DictTabInfo::UndefTableType;
-}//Dbdih::initTable()
-
-/*************************************************************************/
-/* */
-/* MODULE: INIT_TABLE_FILES */
-/* DESCRIPTION: THE SUBROUTINE SETS UP THE FILES THAT REFERS TO THE*/
-/* FILES THAT KEEP THE TABLE FRAGMENTATION DESCRIPTION. */
-/*************************************************************************/
-void Dbdih::initTableFile(TabRecordPtr tabPtr)
-{
- Uint32 titfTmp;
- FileRecordPtr filePtr;
- seizeFile(filePtr);
- filePtr.p->tabRef = tabPtr.i;
- filePtr.p->fileType = FileRecord::TABLE_FILE;
- filePtr.p->reqStatus = FileRecord::IDLE;
- filePtr.p->fileStatus = FileRecord::CLOSED;
- tabPtr.p->tabFile[0] = filePtr.i;
- filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */
- filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */
- filePtr.p->fileName[2] = tabPtr.i; /* Stid FILE NAME */
- titfTmp = 1; /* FILE NAME VERSION 1 */
- titfTmp = (titfTmp << 8) + 3; /* .FRAGLIST */
- titfTmp = (titfTmp << 8) + 1; /* D1 DIRECTORY */
- titfTmp = (titfTmp << 8) + 255; /* P PART IGNORED */
- filePtr.p->fileName[3] = titfTmp;
- /* --------------------------------------------------------------------- */
- /* THE NAME BECOMES /D1/DBDICT/Stid.FRAGLIST */
- /* --------------------------------------------------------------------- */
- seizeFile(filePtr);
- filePtr.p->tabRef = tabPtr.i;
- filePtr.p->fileType = FileRecord::TABLE_FILE;
- filePtr.p->reqStatus = FileRecord::IDLE;
- filePtr.p->fileStatus = FileRecord::CLOSED;
- tabPtr.p->tabFile[1] = filePtr.i;
- filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */
- filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */
- filePtr.p->fileName[2] = tabPtr.i; /* Stid FILE NAME */
- titfTmp = 1; /* FILE NAME VERSION 1 */
- titfTmp = (titfTmp << 8) + 3; /* .FRAGLIST */
- titfTmp = (titfTmp << 8) + 2; /* D2 DIRECTORY */
- titfTmp = (titfTmp << 8) + 255; /* P PART IGNORED */
- filePtr.p->fileName[3] = titfTmp;
- /* --------------------------------------------------------------------- */
- /* THE NAME BECOMES /D2/DBDICT/Stid.FRAGLIST */
- /* --------------------------------------------------------------------- */
-}//Dbdih::initTableFile()
-
-void Dbdih::initialiseRecordsLab(Signal* signal,
- Uint32 stepNo, Uint32 retRef, Uint32 retData)
-{
- switch (stepNo) {
- case 0:
- jam();
- initCommonData();
- break;
- case 1:{
- ApiConnectRecordPtr apiConnectptr;
- jam();
- /******** INTIALIZING API CONNECT RECORDS ********/
- for (apiConnectptr.i = 0; apiConnectptr.i < capiConnectFileSize; apiConnectptr.i++) {
- refresh_watch_dog();
- ptrAss(apiConnectptr, apiConnectRecord);
- apiConnectptr.p->nextApi = RNIL;
- }//for
- jam();
- break;
- }
- case 2:{
- ConnectRecordPtr connectPtr;
- jam();
- /****** CONNECT ******/
- for (connectPtr.i = 0; connectPtr.i < cconnectFileSize; connectPtr.i++) {
- refresh_watch_dog();
- ptrAss(connectPtr, connectRecord);
- connectPtr.p->userpointer = RNIL;
- connectPtr.p->userblockref = ZNIL;
- connectPtr.p->connectState = ConnectRecord::FREE;
- connectPtr.p->table = RNIL;
- connectPtr.p->nfConnect = connectPtr.i + 1;
- }//for
- connectPtr.i = cconnectFileSize - 1;
- ptrAss(connectPtr, connectRecord);
- connectPtr.p->nfConnect = RNIL;
- cfirstconnect = 0;
- break;
- }
- case 3:
- {
- FileRecordPtr filePtr;
- jam();
- /******** INTIALIZING FILE RECORDS ********/
- for (filePtr.i = 0; filePtr.i < cfileFileSize; filePtr.i++) {
- ptrAss(filePtr, fileRecord);
- filePtr.p->nextFile = filePtr.i + 1;
- filePtr.p->fileStatus = FileRecord::CLOSED;
- filePtr.p->reqStatus = FileRecord::IDLE;
- }//for
- filePtr.i = cfileFileSize - 1;
- ptrAss(filePtr, fileRecord);
- filePtr.p->nextFile = RNIL;
- cfirstfreeFile = 0;
- initRestorableGciFiles();
- break;
- }
- case 4:
- jam();
- initialiseFragstore();
- break;
- case 5:
- {
- jam();
- /******* NODE GROUP RECORD ******/
- /******* NODE RECORD ******/
- NodeGroupRecordPtr loopNGPtr;
- for (loopNGPtr.i = 0; loopNGPtr.i < MAX_NDB_NODES; loopNGPtr.i++) {
- ptrAss(loopNGPtr, nodeGroupRecord);
- loopNGPtr.p->nodesInGroup[0] = RNIL;
- loopNGPtr.p->nodesInGroup[1] = RNIL;
- loopNGPtr.p->nodesInGroup[2] = RNIL;
- loopNGPtr.p->nodesInGroup[3] = RNIL;
- loopNGPtr.p->nextReplicaNode = 0;
- loopNGPtr.p->nodeCount = 0;
- loopNGPtr.p->activeTakeOver = false;
- }//for
- NodeRecordPtr nodePtr;
- for (nodePtr.i = 0; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- ptrAss(nodePtr, nodeRecord);
- new (nodePtr.p) NodeRecord();
- }//for
- break;
- }
- case 6:
- {
- PageRecordPtr pagePtr;
- jam();
- /******* PAGE RECORD ******/
- for (pagePtr.i = 0; pagePtr.i < cpageFileSize; pagePtr.i++) {
- refresh_watch_dog();
- ptrAss(pagePtr, pageRecord);
- pagePtr.p->nextfreepage = pagePtr.i + 1;
- }//for
- pagePtr.i = cpageFileSize - 1;
- ptrAss(pagePtr, pageRecord);
- pagePtr.p->nextfreepage = RNIL;
- cfirstfreepage = 0;
- break;
- }
- case 7:
- {
- ReplicaRecordPtr initReplicaPtr;
- jam();
- /******* REPLICA RECORD ******/
- for (initReplicaPtr.i = 0; initReplicaPtr.i < creplicaFileSize;
- initReplicaPtr.i++) {
- refresh_watch_dog();
- ptrAss(initReplicaPtr, replicaRecord);
- initReplicaPtr.p->lcpIdStarted = 0;
- initReplicaPtr.p->lcpOngoingFlag = false;
- initReplicaPtr.p->nextReplica = initReplicaPtr.i + 1;
- }//for
- initReplicaPtr.i = creplicaFileSize - 1;
- ptrAss(initReplicaPtr, replicaRecord);
- initReplicaPtr.p->nextReplica = RNIL;
- cnoFreeReplicaRec = creplicaFileSize;
- cfirstfreeReplica = 0;
- break;
- }
- case 8:
- {
- TabRecordPtr loopTabptr;
- jam();
- /********* TAB-DESCRIPTOR ********/
- for (loopTabptr.i = 0; loopTabptr.i < ctabFileSize; loopTabptr.i++) {
- ptrAss(loopTabptr, tabRecord);
- refresh_watch_dog();
- initTable(loopTabptr);
- }//for
- break;
- }
- case 9:
- {
- TakeOverRecordPtr takeOverPtr;
- jam();
- cfirstfreeTakeOver = RNIL;
- for (takeOverPtr.i = 0; takeOverPtr.i < MAX_NDB_NODES; takeOverPtr.i++) {
- ptrAss(takeOverPtr, takeOverRecord);
- initTakeOver(takeOverPtr);
- releaseTakeOver(takeOverPtr.i);
- }//for
-
- ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = retData;
- sendSignal(retRef, GSN_READ_CONFIG_CONF, signal,
- ReadConfigConf::SignalLength, JBB);
- return;
- break;
- }
- default:
- ndbrequire(false);
- break;
- }//switch
- jam();
- /* ---------------------------------------------------------------------- */
- /* SEND REAL-TIME BREAK DURING INIT OF VARIABLES DURING SYSTEM RESTART. */
- /* ---------------------------------------------------------------------- */
- signal->theData[0] = DihContinueB::ZINITIALISE_RECORDS;
- signal->theData[1] = stepNo + 1;
- signal->theData[2] = retRef;
- signal->theData[3] = retData;
- sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
-}//Dbdih::initialiseRecordsLab()
-
-/*************************************************************************/
-/* INSERT THE NODE INTO THE LINKED LIST OF NODES INVOLVED ALL */
-/* DISTRIBUTED PROTOCOLS (EXCEPT GCP PROTOCOL THAT USES THE DIH */
-/* LINKED LIST INSTEAD). */
-/*************************************************************************/
-void Dbdih::insertAlive(NodeRecordPtr newNodePtr)
-{
- NodeRecordPtr nodePtr;
-
- nodePtr.i = cfirstAliveNode;
- if (nodePtr.i == RNIL) {
- jam();
- cfirstAliveNode = newNodePtr.i;
- } else {
- do {
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- if (nodePtr.p->nextNode == RNIL) {
- jam();
- nodePtr.p->nextNode = newNodePtr.i;
- break;
- } else {
- jam();
- nodePtr.i = nodePtr.p->nextNode;
- }//if
- } while (1);
- }//if
- newNodePtr.p->nextNode = RNIL;
-}//Dbdih::insertAlive()
-
-void Dbdih::insertBackup(FragmentstorePtr fragPtr, Uint32 nodeId)
-{
- for (Uint32 i = fragPtr.p->fragReplicas; i > 1; i--) {
- jam();
- ndbrequire(i < MAX_REPLICAS && i > 0);
- fragPtr.p->activeNodes[i] = fragPtr.p->activeNodes[i - 1];
- }//for
- fragPtr.p->activeNodes[1] = nodeId;
- fragPtr.p->fragReplicas++;
-}//Dbdih::insertBackup()
-
-void Dbdih::insertDeadNode(NodeRecordPtr newNodePtr)
-{
- NodeRecordPtr nodePtr;
-
- nodePtr.i = cfirstDeadNode;
- if (nodePtr.i == RNIL) {
- jam();
- cfirstDeadNode = newNodePtr.i;
- } else {
- do {
- jam();
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- if (nodePtr.p->nextNode == RNIL) {
- jam();
- nodePtr.p->nextNode = newNodePtr.i;
- break;
- } else {
- jam();
- nodePtr.i = nodePtr.p->nextNode;
- }//if
- } while (1);
- }//if
- newNodePtr.p->nextNode = RNIL;
-}//Dbdih::insertDeadNode()
-
-void Dbdih::linkOldStoredReplica(FragmentstorePtr fragPtr,
- ReplicaRecordPtr replicatePtr)
-{
- ReplicaRecordPtr losReplicaPtr;
-
- replicatePtr.p->nextReplica = RNIL;
- fragPtr.p->noOldStoredReplicas++;
- losReplicaPtr.i = fragPtr.p->oldStoredReplicas;
- if (losReplicaPtr.i == RNIL) {
- jam();
- fragPtr.p->oldStoredReplicas = replicatePtr.i;
- return;
- }//if
- ptrCheckGuard(losReplicaPtr, creplicaFileSize, replicaRecord);
- while (losReplicaPtr.p->nextReplica != RNIL) {
- jam();
- losReplicaPtr.i = losReplicaPtr.p->nextReplica;
- ptrCheckGuard(losReplicaPtr, creplicaFileSize, replicaRecord);
- }//if
- losReplicaPtr.p->nextReplica = replicatePtr.i;
-}//Dbdih::linkOldStoredReplica()
-
-void Dbdih::linkStoredReplica(FragmentstorePtr fragPtr,
- ReplicaRecordPtr replicatePtr)
-{
- ReplicaRecordPtr lsrReplicaPtr;
-
- fragPtr.p->noStoredReplicas++;
- replicatePtr.p->nextReplica = RNIL;
- lsrReplicaPtr.i = fragPtr.p->storedReplicas;
- if (fragPtr.p->storedReplicas == RNIL) {
- jam();
- fragPtr.p->storedReplicas = replicatePtr.i;
- return;
- }//if
- ptrCheckGuard(lsrReplicaPtr, creplicaFileSize, replicaRecord);
- while (lsrReplicaPtr.p->nextReplica != RNIL) {
- jam();
- lsrReplicaPtr.i = lsrReplicaPtr.p->nextReplica;
- ptrCheckGuard(lsrReplicaPtr, creplicaFileSize, replicaRecord);
- }//if
- lsrReplicaPtr.p->nextReplica = replicatePtr.i;
-}//Dbdih::linkStoredReplica()
-
-/*************************************************************************/
-/* MAKE NODE GROUPS BASED ON THE LIST OF NODES RECEIVED FROM CNTR */
-/*************************************************************************/
-void Dbdih::makeNodeGroups(Uint32 nodeArray[])
-{
- NodeRecordPtr mngNodeptr;
- Uint32 tmngNode;
- Uint32 tmngNodeGroup;
- Uint32 tmngLimit;
- Uint32 i;
-
- /**-----------------------------------------------------------------------
- * ASSIGN ALL ACTIVE NODES INTO NODE GROUPS. HOT SPARE NODES ARE ASSIGNED
- * TO NODE GROUP ZNIL
- *-----------------------------------------------------------------------*/
- tmngNodeGroup = 0;
- tmngLimit = csystemnodes - cnoHotSpare;
- ndbrequire(tmngLimit < MAX_NDB_NODES);
- for (i = 0; i < tmngLimit; i++) {
- NodeGroupRecordPtr NGPtr;
- jam();
- tmngNode = nodeArray[i];
- mngNodeptr.i = tmngNode;
- ptrCheckGuard(mngNodeptr, MAX_NDB_NODES, nodeRecord);
- mngNodeptr.p->nodeGroup = tmngNodeGroup;
- NGPtr.i = tmngNodeGroup;
- ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
- arrGuard(NGPtr.p->nodeCount, MAX_REPLICAS);
- NGPtr.p->nodesInGroup[NGPtr.p->nodeCount++] = mngNodeptr.i;
- if (NGPtr.p->nodeCount == cnoReplicas) {
- jam();
- tmngNodeGroup++;
- }//if
- }//for
- cnoOfNodeGroups = tmngNodeGroup;
- ndbrequire(csystemnodes < MAX_NDB_NODES);
- for (i = tmngLimit + 1; i < csystemnodes; i++) {
- jam();
- tmngNode = nodeArray[i];
- mngNodeptr.i = tmngNode;
- ptrCheckGuard(mngNodeptr, MAX_NDB_NODES, nodeRecord);
- mngNodeptr.p->nodeGroup = ZNIL;
- }//for
- for(i = 0; i < MAX_NDB_NODES; i++){
- jam();
- Sysfile::setNodeGroup(i, SYSFILE->nodeGroups, NO_NODE_GROUP_ID);
- }//for
- for (mngNodeptr.i = 1; mngNodeptr.i < MAX_NDB_NODES; mngNodeptr.i++) {
- jam();
- ptrAss(mngNodeptr, nodeRecord);
- if (mngNodeptr.p->nodeGroup != ZNIL) {
- jam();
- Sysfile::setNodeGroup(mngNodeptr.i, SYSFILE->nodeGroups, mngNodeptr.p->nodeGroup);
- }//if
- }//for
-}//Dbdih::makeNodeGroups()
-
-/**
- * On node failure QMGR asks DIH about node groups. This is
- * a direct signal (function call in same process). Input is
- * bitmask of surviving nodes. The routine is not concerned
- * about node count. Reply is one of:
- * 1) win - we can survive, and nobody else can
- * 2) lose - we cannot survive
- * 3) partition - we can survive but there could be others
- */
-void Dbdih::execCHECKNODEGROUPSREQ(Signal* signal)
-{
- jamEntry();
- CheckNodeGroups* sd = (CheckNodeGroups*)&signal->theData[0];
-
- bool direct = (sd->requestType & CheckNodeGroups::Direct);
- bool ok = false;
- switch(sd->requestType & ~CheckNodeGroups::Direct){
- case CheckNodeGroups::ArbitCheck:{
- ok = true;
- jam();
- unsigned missall = 0;
- unsigned haveall = 0;
- for (Uint32 i = 0; i < cnoOfNodeGroups; i++) {
- jam();
- NodeGroupRecordPtr ngPtr;
- ngPtr.i = i;
- ptrAss(ngPtr, nodeGroupRecord);
- Uint32 count = 0;
- for (Uint32 j = 0; j < ngPtr.p->nodeCount; j++) {
- jam();
- Uint32 nodeId = ngPtr.p->nodesInGroup[j];
- if (sd->mask.get(nodeId)) {
- jam();
- count++;
- }//if
- }//for
- if (count == 0) {
- jam();
- missall++;
- }//if
- if (count == ngPtr.p->nodeCount) {
- haveall++;
- }//if
- }//for
-
- if (missall) {
- jam();
- sd->output = CheckNodeGroups::Lose;
- } else if (haveall) {
- jam();
- sd->output = CheckNodeGroups::Win;
- } else {
- jam();
- sd->output = CheckNodeGroups::Partitioning;
- }//if
- }
- break;
- case CheckNodeGroups::GetNodeGroup:
- ok = true;
- sd->output = Sysfile::getNodeGroup(getOwnNodeId(), SYSFILE->nodeGroups);
- break;
- case CheckNodeGroups::GetNodeGroupMembers: {
- ok = true;
- Uint32 ownNodeGoup =
- Sysfile::getNodeGroup(sd->nodeId, SYSFILE->nodeGroups);
-
- sd->output = ownNodeGoup;
- sd->mask.clear();
-
- NodeGroupRecordPtr ngPtr;
- ngPtr.i = ownNodeGoup;
- ptrAss(ngPtr, nodeGroupRecord);
- for (Uint32 j = 0; j < ngPtr.p->nodeCount; j++) {
- jam();
- sd->mask.set(ngPtr.p->nodesInGroup[j]);
- }
-#if 0
- for (int i = 0; i < MAX_NDB_NODES; i++) {
- if (ownNodeGoup ==
- Sysfile::getNodeGroup(i, SYSFILE->nodeGroups)) {
- sd->mask.set(i);
- }
- }
-#endif
- }
- break;
- }
- ndbrequire(ok);
-
- if (!direct)
- sendSignal(sd->blockRef, GSN_CHECKNODEGROUPSCONF, signal,
- CheckNodeGroups::SignalLength, JBB);
-}//Dbdih::execCHECKNODEGROUPSREQ()
-
-void Dbdih::makePrnList(ReadNodesConf * readNodes, Uint32 nodeArray[])
-{
- cfirstAliveNode = RNIL;
- ndbrequire(con_lineNodes > 0);
- ndbrequire(csystemnodes < MAX_NDB_NODES);
- for (Uint32 i = 0; i < csystemnodes; i++) {
- NodeRecordPtr nodePtr;
- jam();
- nodePtr.i = nodeArray[i];
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- new (nodePtr.p) NodeRecord();
- if (NodeBitmask::get(readNodes->inactiveNodes, nodePtr.i) == false){
- jam();
- nodePtr.p->nodeStatus = NodeRecord::ALIVE;
- nodePtr.p->useInTransactions = true;
- nodePtr.p->copyCompleted = true;
- nodePtr.p->m_inclDihLcp = true;
- insertAlive(nodePtr);
- } else {
- jam();
- nodePtr.p->nodeStatus = NodeRecord::DEAD;
- insertDeadNode(nodePtr);
- }//if
- }//for
-}//Dbdih::makePrnList()
-
-/*************************************************************************/
-/* A NEW CRASHED REPLICA IS ADDED BY A NODE FAILURE. */
-/*************************************************************************/
-void Dbdih::newCrashedReplica(Uint32 nodeId, ReplicaRecordPtr ncrReplicaPtr)
-{
- /*----------------------------------------------------------------------*/
- /* SET THE REPLICA_LAST_GCI OF THE CRASHED REPLICA TO LAST GCI */
- /* EXECUTED BY THE FAILED NODE. */
- /*----------------------------------------------------------------------*/
- /* WE HAVE A NEW CRASHED REPLICA. INITIATE CREATE GCI TO INDICATE */
- /* THAT THE NEW REPLICA IS NOT STARTED YET AND REPLICA_LAST_GCI IS*/
- /* SET TO -1 TO INDICATE THAT IT IS NOT DEAD YET. */
- /*----------------------------------------------------------------------*/
- arrGuard(ncrReplicaPtr.p->noCrashedReplicas + 1, 8);
- ncrReplicaPtr.p->replicaLastGci[ncrReplicaPtr.p->noCrashedReplicas] =
- SYSFILE->lastCompletedGCI[nodeId];
- ncrReplicaPtr.p->noCrashedReplicas = ncrReplicaPtr.p->noCrashedReplicas + 1;
- ncrReplicaPtr.p->createGci[ncrReplicaPtr.p->noCrashedReplicas] = 0;
- ncrReplicaPtr.p->replicaLastGci[ncrReplicaPtr.p->noCrashedReplicas] =
- (Uint32)-1;
-}//Dbdih::newCrashedReplica()
-
-/*************************************************************************/
-/* AT NODE FAILURE DURING START OF A NEW NODE WE NEED TO RESET A */
-/* SET OF VARIABLES CONTROLLING THE START AND INDICATING ONGOING */
-/* START OF A NEW NODE. */
-/*************************************************************************/
-void Dbdih::nodeResetStart()
-{
- jam();
- c_nodeStartMaster.startNode = RNIL;
- c_nodeStartMaster.failNr = cfailurenr;
- c_nodeStartMaster.activeState = false;
- c_nodeStartMaster.blockGcp = false;
- c_nodeStartMaster.blockLcp = false;
- c_nodeStartMaster.m_outstandingGsn = 0;
-}//Dbdih::nodeResetStart()
-
-void Dbdih::openFileRw(Signal* signal, FileRecordPtr filePtr)
-{
- signal->theData[0] = reference();
- signal->theData[1] = filePtr.i;
- signal->theData[2] = filePtr.p->fileName[0];
- signal->theData[3] = filePtr.p->fileName[1];
- signal->theData[4] = filePtr.p->fileName[2];
- signal->theData[5] = filePtr.p->fileName[3];
- signal->theData[6] = FsOpenReq::OM_READWRITE;
- sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
-}//Dbdih::openFileRw()
-
-void Dbdih::openFileRo(Signal* signal, FileRecordPtr filePtr)
-{
- signal->theData[0] = reference();
- signal->theData[1] = filePtr.i;
- signal->theData[2] = filePtr.p->fileName[0];
- signal->theData[3] = filePtr.p->fileName[1];
- signal->theData[4] = filePtr.p->fileName[2];
- signal->theData[5] = filePtr.p->fileName[3];
- signal->theData[6] = FsOpenReq::OM_READONLY;
- sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
-}//Dbdih::openFileRw()
-
-/*************************************************************************/
-/* REMOVE A CRASHED REPLICA BY PACKING THE ARRAY OF CREATED GCI AND*/
-/* THE LAST GCI OF THE CRASHED REPLICA. */
-/*************************************************************************/
-void Dbdih::packCrashedReplicas(ReplicaRecordPtr replicaPtr)
-{
- ndbrequire(replicaPtr.p->noCrashedReplicas > 0);
- ndbrequire(replicaPtr.p->noCrashedReplicas <= 8);
- for (Uint32 i = 0; i < replicaPtr.p->noCrashedReplicas; i++) {
- jam();
- replicaPtr.p->createGci[i] = replicaPtr.p->createGci[i + 1];
- replicaPtr.p->replicaLastGci[i] = replicaPtr.p->replicaLastGci[i + 1];
- }//for
- replicaPtr.p->noCrashedReplicas--;
-
-#ifdef VM_TRACE
- for (Uint32 i = 0; i < replicaPtr.p->noCrashedReplicas; i++) {
- jam();
- ndbrequire(replicaPtr.p->createGci[i] != 0xF1F1F1F1);
- ndbrequire(replicaPtr.p->replicaLastGci[i] != 0xF1F1F1F1);
- }//for
-#endif
-}//Dbdih::packCrashedReplicas()
-
-void Dbdih::prepareReplicas(FragmentstorePtr fragPtr)
-{
- ReplicaRecordPtr prReplicaPtr;
- Uint32 prevReplica = RNIL;
-
- /* --------------------------------------------------------------------- */
- /* BEGIN BY LINKING ALL REPLICA RECORDS ONTO THE OLD STORED REPLICA*/
- /* LIST. */
- /* AT A SYSTEM RESTART OBVIOUSLY ALL NODES ARE OLD. */
- /* --------------------------------------------------------------------- */
- prReplicaPtr.i = fragPtr.p->storedReplicas;
- while (prReplicaPtr.i != RNIL) {
- jam();
- prevReplica = prReplicaPtr.i;
- ptrCheckGuard(prReplicaPtr, creplicaFileSize, replicaRecord);
- prReplicaPtr.i = prReplicaPtr.p->nextReplica;
- }//while
- /* --------------------------------------------------------------------- */
- /* LIST OF STORED REPLICAS WILL BE EMPTY NOW. */
- /* --------------------------------------------------------------------- */
- if (prevReplica != RNIL) {
- prReplicaPtr.i = prevReplica;
- ptrCheckGuard(prReplicaPtr, creplicaFileSize, replicaRecord);
- prReplicaPtr.p->nextReplica = fragPtr.p->oldStoredReplicas;
- fragPtr.p->oldStoredReplicas = fragPtr.p->storedReplicas;
- fragPtr.p->storedReplicas = RNIL;
- fragPtr.p->noOldStoredReplicas += fragPtr.p->noStoredReplicas;
- fragPtr.p->noStoredReplicas = 0;
- }//if
-}//Dbdih::prepareReplicas()
-
-void Dbdih::readFragment(RWFragment* rf, FragmentstorePtr fragPtr)
-{
- Uint32 TreadFid = readPageWord(rf);
- fragPtr.p->preferredPrimary = readPageWord(rf);
- fragPtr.p->noStoredReplicas = readPageWord(rf);
- fragPtr.p->noOldStoredReplicas = readPageWord(rf);
- Uint32 TdistKey = readPageWord(rf);
-
- ndbrequire(fragPtr.p->noStoredReplicas > 0);
- ndbrequire(TreadFid == rf->fragId);
- ndbrequire(TdistKey < 256);
- if ((cstarttype == NodeState::ST_NODE_RESTART) ||
- (cstarttype == NodeState::ST_INITIAL_NODE_RESTART)) {
- jam();
- fragPtr.p->distributionKey = TdistKey;
- }//if
-}//Dbdih::readFragment()
-
-Uint32 Dbdih::readPageWord(RWFragment* rf)
-{
- if (rf->wordIndex >= 2048) {
- jam();
- ndbrequire(rf->wordIndex == 2048);
- rf->pageIndex++;
- ndbrequire(rf->pageIndex < 8);
- rf->rwfPageptr.i = rf->rwfTabPtr.p->pageRef[rf->pageIndex];
- ptrCheckGuard(rf->rwfPageptr, cpageFileSize, pageRecord);
- rf->wordIndex = 32;
- }//if
- Uint32 dataWord = rf->rwfPageptr.p->word[rf->wordIndex];
- rf->wordIndex++;
- return dataWord;
-}//Dbdih::readPageWord()
-
-void Dbdih::readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr)
-{
- Uint32 i;
- readReplicaPtr.p->procNode = readPageWord(rf);
- readReplicaPtr.p->initialGci = readPageWord(rf);
- readReplicaPtr.p->noCrashedReplicas = readPageWord(rf);
- readReplicaPtr.p->nextLcp = readPageWord(rf);
-
- for (i = 0; i < MAX_LCP_STORED; i++) {
- readReplicaPtr.p->maxGciCompleted[i] = readPageWord(rf);
- readReplicaPtr.p->maxGciStarted[i] = readPageWord(rf);
- readReplicaPtr.p->lcpId[i] = readPageWord(rf);
- readReplicaPtr.p->lcpStatus[i] = readPageWord(rf);
- }//for
- const Uint32 noCrashedReplicas = readReplicaPtr.p->noCrashedReplicas;
- ndbrequire(noCrashedReplicas < 8);
- for (i = 0; i < noCrashedReplicas; i++) {
- readReplicaPtr.p->createGci[i] = readPageWord(rf);
- readReplicaPtr.p->replicaLastGci[i] = readPageWord(rf);
- ndbrequire(readReplicaPtr.p->createGci[i] != 0xF1F1F1F1);
- ndbrequire(readReplicaPtr.p->replicaLastGci[i] != 0xF1F1F1F1);
- }//for
- for(i = noCrashedReplicas; i<8; i++){
- readReplicaPtr.p->createGci[i] = readPageWord(rf);
- readReplicaPtr.p->replicaLastGci[i] = readPageWord(rf);
- // They are not initialized...
- readReplicaPtr.p->createGci[i] = 0;
- readReplicaPtr.p->replicaLastGci[i] = ~0;
- }
- /* ---------------------------------------------------------------------- */
- /* IF THE LAST COMPLETED LOCAL CHECKPOINT IS VALID AND LARGER THAN */
- /* THE LAST COMPLETED CHECKPOINT THEN WE WILL INVALIDATE THIS LOCAL */
- /* CHECKPOINT FOR THIS REPLICA. */
- /* ---------------------------------------------------------------------- */
- Uint32 trraLcp = prevLcpNo(readReplicaPtr.p->nextLcp);
- ndbrequire(trraLcp < MAX_LCP_STORED);
- if ((readReplicaPtr.p->lcpStatus[trraLcp] == ZVALID) &&
- (readReplicaPtr.p->lcpId[trraLcp] > SYSFILE->latestLCP_ID)) {
- jam();
- readReplicaPtr.p->lcpStatus[trraLcp] = ZINVALID;
- }//if
- /* ---------------------------------------------------------------------- */
- /* WE ALSO HAVE TO INVALIDATE ANY LOCAL CHECKPOINTS THAT HAVE BEEN */
- /* INVALIDATED BY MOVING BACK THE RESTART GCI. */
- /* ---------------------------------------------------------------------- */
- for (i = 0; i < MAX_LCP_STORED; i++) {
- jam();
- if ((readReplicaPtr.p->lcpStatus[i] == ZVALID) &&
- (readReplicaPtr.p->maxGciStarted[i] > SYSFILE->newestRestorableGCI)) {
- jam();
- readReplicaPtr.p->lcpStatus[i] = ZINVALID;
- }//if
- }//for
- /* ---------------------------------------------------------------------- */
- /* WE WILL REMOVE ANY OCCURRENCES OF REPLICAS THAT HAVE CRASHED */
- /* THAT ARE NO LONGER VALID DUE TO MOVING RESTART GCI BACKWARDS. */
- /* ---------------------------------------------------------------------- */
- removeTooNewCrashedReplicas(readReplicaPtr);
- /* ---------------------------------------------------------------------- */
- /* WE WILL REMOVE ANY OCCURRENCES OF REPLICAS THAT HAVE CRASHED */
- /* THAT ARE NO LONGER VALID SINCE THEY ARE NO LONGER RESTORABLE. */
- /* ---------------------------------------------------------------------- */
- removeOldCrashedReplicas(readReplicaPtr);
- /* --------------------------------------------------------------------- */
- // We set the last GCI of the replica that was alive before the node
- // crashed last time. We set it to the last GCI which the node participated in.
- /* --------------------------------------------------------------------- */
- ndbrequire(readReplicaPtr.p->noCrashedReplicas < 8);
- readReplicaPtr.p->replicaLastGci[readReplicaPtr.p->noCrashedReplicas] =
- SYSFILE->lastCompletedGCI[readReplicaPtr.p->procNode];
- /* ---------------------------------------------------------------------- */
- /* FIND PROCESSOR RECORD */
- /* ---------------------------------------------------------------------- */
-}//Dbdih::readReplica()
-
-void Dbdih::readReplicas(RWFragment* rf, FragmentstorePtr fragPtr)
-{
- Uint32 i;
- ReplicaRecordPtr newReplicaPtr;
- Uint32 noStoredReplicas = fragPtr.p->noStoredReplicas;
- Uint32 noOldStoredReplicas = fragPtr.p->noOldStoredReplicas;
- /* ----------------------------------------------------------------------- */
- /* WE CLEAR THE NUMBER OF STORED REPLICAS SINCE IT WILL BE CALCULATED */
- /* BY THE LINKING SUBROUTINES. */
- /* ----------------------------------------------------------------------- */
- fragPtr.p->noStoredReplicas = 0;
- fragPtr.p->noOldStoredReplicas = 0;
- Uint32 replicaIndex = 0;
- ndbrequire(noStoredReplicas + noOldStoredReplicas <= MAX_REPLICAS);
- for (i = 0; i < noStoredReplicas; i++) {
- seizeReplicaRec(newReplicaPtr);
- readReplica(rf, newReplicaPtr);
- if (checkNodeAlive(newReplicaPtr.p->procNode)) {
- jam();
- ndbrequire(replicaIndex < MAX_REPLICAS);
- fragPtr.p->activeNodes[replicaIndex] = newReplicaPtr.p->procNode;
- replicaIndex++;
- linkStoredReplica(fragPtr, newReplicaPtr);
- } else {
- jam();
- linkOldStoredReplica(fragPtr, newReplicaPtr);
- }//if
- }//for
- fragPtr.p->fragReplicas = noStoredReplicas;
- for (i = 0; i < noOldStoredReplicas; i++) {
- jam();
- seizeReplicaRec(newReplicaPtr);
- readReplica(rf, newReplicaPtr);
- linkOldStoredReplica(fragPtr, newReplicaPtr);
- }//for
-}//Dbdih::readReplicas()
-
-void Dbdih::readRestorableGci(Signal* signal, FileRecordPtr filePtr)
-{
- signal->theData[0] = filePtr.p->fileRef;
- signal->theData[1] = reference();
- signal->theData[2] = filePtr.i;
- signal->theData[3] = ZLIST_OF_PAIRS;
- signal->theData[4] = ZVAR_NO_CRESTART_INFO;
- signal->theData[5] = 1;
- signal->theData[6] = 0;
- signal->theData[7] = 0;
- sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
-}//Dbdih::readRestorableGci()
-
-void Dbdih::readTabfile(Signal* signal, TabRecord* tab, FileRecordPtr filePtr)
-{
- signal->theData[0] = filePtr.p->fileRef;
- signal->theData[1] = reference();
- signal->theData[2] = filePtr.i;
- signal->theData[3] = ZLIST_OF_PAIRS;
- signal->theData[4] = ZVAR_NO_WORD;
- signal->theData[5] = tab->noPages;
- for (Uint32 i = 0; i < tab->noPages; i++) {
- signal->theData[6 + (2 * i)] = tab->pageRef[i];
- signal->theData[7 + (2 * i)] = i;
- }//for
- sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 22, JBA);
-}//Dbdih::readTabfile()
-
-void Dbdih::releasePage(Uint32 pageIndex)
-{
- PageRecordPtr pagePtr;
- pagePtr.i = pageIndex;
- ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
- pagePtr.p->nextfreepage = cfirstfreepage;
- cfirstfreepage = pagePtr.i;
-}//Dbdih::releasePage()
-
-void Dbdih::releaseTabPages(Uint32 tableId)
-{
- TabRecordPtr tabPtr;
- tabPtr.i = tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
- ndbrequire(tabPtr.p->noPages <= 8);
- for (Uint32 i = 0; i < tabPtr.p->noPages; i++) {
- jam();
- releasePage(tabPtr.p->pageRef[i]);
- }//for
- tabPtr.p->noPages = 0;
-}//Dbdih::releaseTabPages()
-
-/*************************************************************************/
-/* REMOVE NODE FROM SET OF ALIVE NODES. */
-/*************************************************************************/
-void Dbdih::removeAlive(NodeRecordPtr removeNodePtr)
-{
- NodeRecordPtr nodePtr;
-
- nodePtr.i = cfirstAliveNode;
- if (nodePtr.i == removeNodePtr.i) {
- jam();
- cfirstAliveNode = removeNodePtr.p->nextNode;
- return;
- }//if
- do {
- jam();
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- if (nodePtr.p->nextNode == removeNodePtr.i) {
- jam();
- nodePtr.p->nextNode = removeNodePtr.p->nextNode;
- break;
- } else {
- jam();
- nodePtr.i = nodePtr.p->nextNode;
- }//if
- } while (1);
-}//Dbdih::removeAlive()
-
-/*************************************************************************/
-/* REMOVE NODE FROM SET OF DEAD NODES. */
-/*************************************************************************/
-void Dbdih::removeDeadNode(NodeRecordPtr removeNodePtr)
-{
- NodeRecordPtr nodePtr;
-
- nodePtr.i = cfirstDeadNode;
- if (nodePtr.i == removeNodePtr.i) {
- jam();
- cfirstDeadNode = removeNodePtr.p->nextNode;
- return;
- }//if
- do {
- jam();
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- if (nodePtr.p->nextNode == removeNodePtr.i) {
- jam();
- nodePtr.p->nextNode = removeNodePtr.p->nextNode;
- break;
- } else {
- jam();
- nodePtr.i = nodePtr.p->nextNode;
- }//if
- } while (1);
-}//Dbdih::removeDeadNode()
-
-/*---------------------------------------------------------------*/
-/* REMOVE REPLICAS OF A FAILED NODE FROM LIST OF STORED */
-/* REPLICAS AND MOVE IT TO THE LIST OF OLD STORED REPLICAS.*/
-/* ALSO UPDATE THE CRASHED REPLICA INFORMATION. */
-/*---------------------------------------------------------------*/
-void Dbdih::removeNodeFromStored(Uint32 nodeId,
- FragmentstorePtr fragPtr,
- ReplicaRecordPtr replicatePtr)
-{
- newCrashedReplica(nodeId, replicatePtr);
- removeStoredReplica(fragPtr, replicatePtr);
- linkOldStoredReplica(fragPtr, replicatePtr);
- ndbrequire(fragPtr.p->storedReplicas != RNIL);
-}//Dbdih::removeNodeFromStored()
-
-/*************************************************************************/
-/* REMOVE ANY OLD CRASHED REPLICAS THAT ARE NOT RESTORABLE ANY MORE*/
-/*************************************************************************/
-void Dbdih::removeOldCrashedReplicas(ReplicaRecordPtr rocReplicaPtr)
-{
- while (rocReplicaPtr.p->noCrashedReplicas > 0) {
- jam();
- /* --------------------------------------------------------------------- */
- /* ONLY IF THERE IS AT LEAST ONE REPLICA THEN CAN WE REMOVE ANY. */
- /* --------------------------------------------------------------------- */
- if (rocReplicaPtr.p->replicaLastGci[0] < SYSFILE->oldestRestorableGCI){
- jam();
- /* ------------------------------------------------------------------- */
- /* THIS CRASHED REPLICA HAS BECOME EXTINCT AND MUST BE REMOVED TO */
- /* GIVE SPACE FOR NEW CRASHED REPLICAS. */
- /* ------------------------------------------------------------------- */
- packCrashedReplicas(rocReplicaPtr);
- } else {
- break;
- }//if
- }//while
- if (rocReplicaPtr.p->createGci[0] < SYSFILE->keepGCI){
- jam();
- /* --------------------------------------------------------------------- */
- /* MOVE FORWARD THE CREATE GCI TO A GCI THAT CAN BE USED. WE HAVE */
- /* NO CERTAINTY IN FINDING ANY LOG RECORDS FROM OLDER GCI'S. */
- /* --------------------------------------------------------------------- */
- rocReplicaPtr.p->createGci[0] = SYSFILE->keepGCI;
- ndbrequire(SYSFILE->keepGCI != 0xF1F1F1F1);
- }//if
-}//Dbdih::removeOldCrashedReplicas()
-
-void Dbdih::removeOldStoredReplica(FragmentstorePtr fragPtr,
- ReplicaRecordPtr replicatePtr)
-{
- ReplicaRecordPtr rosTmpReplicaPtr;
- ReplicaRecordPtr rosPrevReplicaPtr;
-
- fragPtr.p->noOldStoredReplicas--;
- if (fragPtr.p->oldStoredReplicas == replicatePtr.i) {
- jam();
- fragPtr.p->oldStoredReplicas = replicatePtr.p->nextReplica;
- } else {
- rosPrevReplicaPtr.i = fragPtr.p->oldStoredReplicas;
- ptrCheckGuard(rosPrevReplicaPtr, creplicaFileSize, replicaRecord);
- rosTmpReplicaPtr.i = rosPrevReplicaPtr.p->nextReplica;
- while (rosTmpReplicaPtr.i != replicatePtr.i) {
- jam();
- rosPrevReplicaPtr.i = rosTmpReplicaPtr.i;
- ptrCheckGuard(rosPrevReplicaPtr, creplicaFileSize, replicaRecord);
- ptrCheckGuard(rosTmpReplicaPtr, creplicaFileSize, replicaRecord);
- rosTmpReplicaPtr.i = rosTmpReplicaPtr.p->nextReplica;
- }//if
- rosPrevReplicaPtr.p->nextReplica = replicatePtr.p->nextReplica;
- }//if
-}//Dbdih::removeOldStoredReplica()
-
-void Dbdih::removeStoredReplica(FragmentstorePtr fragPtr,
- ReplicaRecordPtr replicatePtr)
-{
- ReplicaRecordPtr rsrTmpReplicaPtr;
- ReplicaRecordPtr rsrPrevReplicaPtr;
-
- fragPtr.p->noStoredReplicas--;
- if (fragPtr.p->storedReplicas == replicatePtr.i) {
- jam();
- fragPtr.p->storedReplicas = replicatePtr.p->nextReplica;
- } else {
- jam();
- rsrPrevReplicaPtr.i = fragPtr.p->storedReplicas;
- rsrTmpReplicaPtr.i = fragPtr.p->storedReplicas;
- ptrCheckGuard(rsrTmpReplicaPtr, creplicaFileSize, replicaRecord);
- rsrTmpReplicaPtr.i = rsrTmpReplicaPtr.p->nextReplica;
- while (rsrTmpReplicaPtr.i != replicatePtr.i) {
- jam();
- rsrPrevReplicaPtr.i = rsrTmpReplicaPtr.i;
- ptrCheckGuard(rsrTmpReplicaPtr, creplicaFileSize, replicaRecord);
- rsrTmpReplicaPtr.i = rsrTmpReplicaPtr.p->nextReplica;
- }//while
- ptrCheckGuard(rsrPrevReplicaPtr, creplicaFileSize, replicaRecord);
- rsrPrevReplicaPtr.p->nextReplica = replicatePtr.p->nextReplica;
- }//if
-}//Dbdih::removeStoredReplica()
-
-/*************************************************************************/
-/* REMOVE ALL TOO NEW CRASHED REPLICAS THAT IS IN THIS REPLICA. */
-/*************************************************************************/
-void Dbdih::removeTooNewCrashedReplicas(ReplicaRecordPtr rtnReplicaPtr)
-{
- while (rtnReplicaPtr.p->noCrashedReplicas > 0) {
- jam();
- /* --------------------------------------------------------------------- */
- /* REMOVE ALL REPLICAS THAT ONLY LIVED IN A PERIOD THAT HAVE BEEN */
- /* REMOVED FROM THE RESTART INFORMATION SINCE THE RESTART FAILED */
- /* TOO MANY TIMES. */
- /* --------------------------------------------------------------------- */
- arrGuard(rtnReplicaPtr.p->noCrashedReplicas - 1, 8);
- if (rtnReplicaPtr.p->createGci[rtnReplicaPtr.p->noCrashedReplicas - 1] >
- SYSFILE->newestRestorableGCI){
- jam();
- rtnReplicaPtr.p->createGci[rtnReplicaPtr.p->noCrashedReplicas - 1] =
- (Uint32)-1;
- rtnReplicaPtr.p->replicaLastGci[rtnReplicaPtr.p->noCrashedReplicas - 1] =
- (Uint32)-1;
- rtnReplicaPtr.p->noCrashedReplicas--;
- } else {
- break;
- }//if
- }//while
-}//Dbdih::removeTooNewCrashedReplicas()
-
-/*************************************************************************/
-/* */
-/* MODULE: SEARCH FOR POSSIBLE REPLICAS THAT CAN HANDLE THE GLOBAL */
-/* CHECKPOINT WITHOUT NEEDING ANY EXTRA LOGGING FACILITIES.*/
-/* A MAXIMUM OF FOUR NODES IS RETRIEVED. */
-/*************************************************************************/
-void Dbdih::searchStoredReplicas(FragmentstorePtr fragPtr)
-{
- Uint32 nextReplicaPtrI;
- ConstPtr<ReplicaRecord> replicaPtr;
-
- replicaPtr.i = fragPtr.p->storedReplicas;
- while (replicaPtr.i != RNIL) {
- jam();
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- nextReplicaPtrI = replicaPtr.p->nextReplica;
- NodeRecordPtr nodePtr;
- nodePtr.i = replicaPtr.p->procNode;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
- jam();
- switch (nodePtr.p->activeStatus) {
- case Sysfile::NS_Active:
- case Sysfile::NS_ActiveMissed_1:
- case Sysfile::NS_ActiveMissed_2:{
- /* ----------------------------------------------------------------- */
- /* INITIALISE THE CREATE REPLICA STRUCTURE THAT IS USED FOR SENDING*/
- /* TO LQH START_FRAGREQ. */
- /* SET THE DATA NODE WHERE THE LOCAL CHECKPOINT IS FOUND. ALSO */
- /* SET A REFERENCE TO THE REPLICA POINTER OF THAT. */
- /* ----------------------------------------------------------------- */
- CreateReplicaRecordPtr createReplicaPtr;
- createReplicaPtr.i = cnoOfCreateReplicas;
- ptrCheckGuard(createReplicaPtr, 4, createReplicaRecord);
- cnoOfCreateReplicas++;
- createReplicaPtr.p->dataNodeId = replicaPtr.p->procNode;
- createReplicaPtr.p->replicaRec = replicaPtr.i;
- /* ----------------------------------------------------------------- */
- /* WE NEED TO SEARCH FOR A PROPER LOCAL CHECKPOINT TO USE FOR THE */
- /* SYSTEM RESTART. */
- /* ----------------------------------------------------------------- */
- Uint32 startGci;
- Uint32 startLcpNo;
- Uint32 stopGci = SYSFILE->newestRestorableGCI;
- bool result = findStartGci(replicaPtr,
- stopGci,
- startGci,
- startLcpNo);
- if (!result) {
- jam();
- /* --------------------------------------------------------------- */
- /* WE COULD NOT FIND ANY LOCAL CHECKPOINT. THE FRAGMENT THUS DO NOT*/
- /* CONTAIN ANY VALID LOCAL CHECKPOINT. IT DOES HOWEVER CONTAIN A */
- /* VALID FRAGMENT LOG. THUS BY FIRST CREATING THE FRAGMENT AND THEN*/
- /* EXECUTING THE FRAGMENT LOG WE CAN CREATE THE FRAGMENT AS */
- /* DESIRED. THIS SHOULD ONLY OCCUR AFTER CREATING A FRAGMENT. */
- /* */
- /* TO INDICATE THAT NO LOCAL CHECKPOINT IS TO BE USED WE SET THE */
- /* LOCAL CHECKPOINT TO ZNIL. */
- /* --------------------------------------------------------------- */
- createReplicaPtr.p->lcpNo = ZNIL;
- } else {
- jam();
- /* --------------------------------------------------------------- */
- /* WE FOUND A PROPER LOCAL CHECKPOINT TO RESTART FROM. */
- /* SET LOCAL CHECKPOINT ID AND LOCAL CHECKPOINT NUMBER. */
- /* --------------------------------------------------------------- */
- createReplicaPtr.p->lcpNo = startLcpNo;
- arrGuard(startLcpNo, MAX_LCP_STORED);
- createReplicaPtr.p->createLcpId = replicaPtr.p->lcpId[startLcpNo];
- }//if
-
- if(ERROR_INSERTED(7073) || ERROR_INSERTED(7074)){
- jam();
- nodePtr.p->nodeStatus = NodeRecord::DEAD;
- }
-
- /* ----------------------------------------------------------------- */
- /* WE HAVE EITHER FOUND A LOCAL CHECKPOINT OR WE ARE PLANNING TO */
- /* EXECUTE THE LOG FROM THE INITIAL CREATION OF THE TABLE. IN BOTH */
- /* CASES WE NEED TO FIND A SET OF LOGS THAT CAN EXECUTE SUCH THAT */
- /* WE RECOVER TO THE SYSTEM RESTART GLOBAL CHECKPOINT. */
- /* -_--------------------------------------------------------------- */
- if (!findLogNodes(createReplicaPtr.p, fragPtr, startGci, stopGci)) {
- jam();
- /* --------------------------------------------------------------- */
- /* WE WERE NOT ABLE TO FIND ANY WAY OF RESTORING THIS REPLICA. */
- /* THIS IS A POTENTIAL SYSTEM ERROR. */
- /* --------------------------------------------------------------- */
- cnoOfCreateReplicas--;
- return;
- }//if
-
- if(ERROR_INSERTED(7073) || ERROR_INSERTED(7074)){
- jam();
- nodePtr.p->nodeStatus = NodeRecord::ALIVE;
- }
-
- break;
- }
- default:
- jam();
- /*empty*/;
- break;
- }//switch
- }
- replicaPtr.i = nextReplicaPtrI;
- }//while
-}//Dbdih::searchStoredReplicas()
-
-/*************************************************************************/
-/* */
-/* MODULE: SEIZE_FILE */
-/* DESCRIPTION: THE SUBROUTINE SEIZES A FILE RECORD FROM THE */
-/* FREE LIST. */
-/*************************************************************************/
-void Dbdih::seizeFile(FileRecordPtr& filePtr)
-{
- filePtr.i = cfirstfreeFile;
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- cfirstfreeFile = filePtr.p->nextFile;
- filePtr.p->nextFile = RNIL;
-}//Dbdih::seizeFile()
-
-/*************************************************************************/
-/* SEND CREATE_FRAGREQ TO ALL NODES IN THE NDB CLUSTER. */
-/*************************************************************************/
-/*************************************************************************/
-/* */
-/* MODULE: FIND THE START GCI AND LOCAL CHECKPOINT TO USE. */
-/*************************************************************************/
-void Dbdih::sendStartFragreq(Signal* signal,
- TabRecordPtr tabPtr, Uint32 fragId)
-{
- CreateReplicaRecordPtr replicaPtr;
- for (replicaPtr.i = 0; replicaPtr.i < cnoOfCreateReplicas; replicaPtr.i++) {
- jam();
- ptrAss(replicaPtr, createReplicaRecord);
- BlockReference ref = calcLqhBlockRef(replicaPtr.p->dataNodeId);
- StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0];
- startFragReq->userPtr = replicaPtr.p->replicaRec;
- startFragReq->userRef = reference();
- startFragReq->lcpNo = replicaPtr.p->lcpNo;
- startFragReq->lcpId = replicaPtr.p->createLcpId;
- startFragReq->tableId = tabPtr.i;
- startFragReq->fragId = fragId;
-
- if(ERROR_INSERTED(7072) || ERROR_INSERTED(7074)){
- jam();
- const Uint32 noNodes = replicaPtr.p->noLogNodes;
- Uint32 start = replicaPtr.p->logStartGci[noNodes - 1];
- const Uint32 stop = replicaPtr.p->logStopGci[noNodes - 1];
-
- for(Uint32 i = noNodes; i < 4 && (stop - start) > 0; i++){
- replicaPtr.p->noLogNodes++;
- replicaPtr.p->logStopGci[i - 1] = start;
-
- replicaPtr.p->logNodeId[i] = replicaPtr.p->logNodeId[i-1];
- replicaPtr.p->logStartGci[i] = start + 1;
- replicaPtr.p->logStopGci[i] = stop;
- start += 1;
- }
- }
-
- startFragReq->noOfLogNodes = replicaPtr.p->noLogNodes;
-
- for (Uint32 i = 0; i < 4 ; i++) {
- startFragReq->lqhLogNode[i] = replicaPtr.p->logNodeId[i];
- startFragReq->startGci[i] = replicaPtr.p->logStartGci[i];
- startFragReq->lastGci[i] = replicaPtr.p->logStopGci[i];
- }//for
-
- sendSignal(ref, GSN_START_FRAGREQ, signal,
- StartFragReq::SignalLength, JBB);
- }//for
-}//Dbdih::sendStartFragreq()
-
-/*************************************************************************/
-/* SET THE INITIAL ACTIVE STATUS ON ALL NODES AND PUT INTO LISTS. */
-/*************************************************************************/
-void Dbdih::setInitialActiveStatus()
-{
- NodeRecordPtr siaNodeptr;
- Uint32 tsiaNodeActiveStatus;
- Uint32 tsiaNoActiveNodes;
-
- tsiaNoActiveNodes = csystemnodes - cnoHotSpare;
- for(Uint32 i = 0; i<Sysfile::NODE_STATUS_SIZE; i++)
- SYSFILE->nodeStatus[i] = 0;
- for (siaNodeptr.i = 1; siaNodeptr.i < MAX_NDB_NODES; siaNodeptr.i++) {
- ptrAss(siaNodeptr, nodeRecord);
- if (siaNodeptr.p->nodeStatus == NodeRecord::ALIVE) {
- if (tsiaNoActiveNodes == 0) {
- jam();
- siaNodeptr.p->activeStatus = Sysfile::NS_HotSpare;
- } else {
- jam();
- tsiaNoActiveNodes = tsiaNoActiveNodes - 1;
- siaNodeptr.p->activeStatus = Sysfile::NS_Active;
- }//if
- } else {
- jam();
- siaNodeptr.p->activeStatus = Sysfile::NS_NotDefined;
- }//if
- switch (siaNodeptr.p->activeStatus) {
- case Sysfile::NS_Active:
- jam();
- tsiaNodeActiveStatus = Sysfile::NS_Active;
- break;
- case Sysfile::NS_HotSpare:
- jam();
- tsiaNodeActiveStatus = Sysfile::NS_HotSpare;
- break;
- case Sysfile::NS_NotDefined:
- jam();
- tsiaNodeActiveStatus = Sysfile::NS_NotDefined;
- break;
- default:
- ndbrequire(false);
- return;
- break;
- }//switch
- Sysfile::setNodeStatus(siaNodeptr.i, SYSFILE->nodeStatus,
- tsiaNodeActiveStatus);
- }//for
-}//Dbdih::setInitialActiveStatus()
-
-/*************************************************************************/
-/* SET LCP ACTIVE STATUS AT THE END OF A LOCAL CHECKPOINT. */
-/*************************************************************************/
-void Dbdih::setLcpActiveStatusEnd()
-{
- NodeRecordPtr nodePtr;
-
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- ptrAss(nodePtr, nodeRecord);
- if (c_lcpState.m_participatingLQH.get(nodePtr.i)){
- switch (nodePtr.p->activeStatus) {
- case Sysfile::NS_Active:
- case Sysfile::NS_ActiveMissed_1:
- case Sysfile::NS_ActiveMissed_2:
- jam();
- /*-------------------------------------------------------------------*/
- /* THE NODE PARTICIPATED IN THIS CHECKPOINT.
- * WE CAN SET ITS STATUS TO ACTIVE */
- /*-------------------------------------------------------------------*/
- nodePtr.p->activeStatus = Sysfile::NS_Active;
- takeOverCompleted(nodePtr.i);
- break;
- case Sysfile::NS_TakeOver:
- jam();
- /*-------------------------------------------------------------------*/
- /* THE NODE HAS COMPLETED A CHECKPOINT AFTER TAKE OVER. WE CAN NOW */
- /* SET ITS STATUS TO ACTIVE. WE CAN ALSO COMPLETE THE TAKE OVER */
- /* AND ALSO WE CLEAR THE TAKE OVER NODE IN THE RESTART INFO. */
- /*-------------------------------------------------------------------*/
- nodePtr.p->activeStatus = Sysfile::NS_Active;
- takeOverCompleted(nodePtr.i);
- break;
- default:
- ndbrequire(false);
- return;
- break;
- }//switch
- }//if
- }//for
-
- if(getNodeState().getNodeRestartInProgress()){
- jam();
- if(c_lcpState.m_participatingLQH.get(getOwnNodeId())){
- nodePtr.i = getOwnNodeId();
- ptrAss(nodePtr, nodeRecord);
- ndbrequire(nodePtr.p->activeStatus == Sysfile::NS_Active);
- ndbout_c("NR: setLcpActiveStatusEnd - m_participatingLQH");
- } else {
- ndbout_c("NR: setLcpActiveStatusEnd - !m_participatingLQH");
- }
- }
-
- c_lcpState.m_participatingDIH.clear();
- c_lcpState.m_participatingLQH.clear();
- if (isMaster()) {
- jam();
- setNodeRestartInfoBits();
- }//if
-}//Dbdih::setLcpActiveStatusEnd()
-
-void Dbdih::takeOverCompleted(Uint32 aNodeId)
-{
- TakeOverRecordPtr takeOverPtr;
- takeOverPtr.i = findTakeOver(aNodeId);
- if (takeOverPtr.i != RNIL) {
- jam();
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
- if (takeOverPtr.p->toMasterStatus != TakeOverRecord::WAIT_LCP) {
- jam();
- ndbrequire(!isMaster());
- return;
- }//if
- ndbrequire(isMaster());
- Sysfile::setTakeOverNode(aNodeId, SYSFILE->takeOver, 0);
- takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_END_COPY;
- cstartGcpNow = true;
- }//if
-}//Dbdih::takeOverCompleted()
-
-/*************************************************************************/
-/* SET LCP ACTIVE STATUS BEFORE STARTING A LOCAL CHECKPOINT. */
-/*************************************************************************/
-void Dbdih::setLcpActiveStatusStart(Signal* signal)
-{
- NodeRecordPtr nodePtr;
-
- c_lcpState.m_participatingLQH.clear();
- c_lcpState.m_participatingDIH.clear();
-
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- ptrAss(nodePtr, nodeRecord);
-#if 0
- if(nodePtr.p->nodeStatus != NodeRecord::NOT_IN_CLUSTER){
- infoEvent("Node %d nodeStatus=%d activeStatus=%d copyCompleted=%d lcp=%d",
- nodePtr.i,
- nodePtr.p->nodeStatus,
- nodePtr.p->activeStatus,
- nodePtr.p->copyCompleted,
- nodePtr.p->m_inclDihLcp);
- }
-#endif
- if(nodePtr.p->nodeStatus == NodeRecord::ALIVE && nodePtr.p->m_inclDihLcp){
- jam();
- c_lcpState.m_participatingDIH.set(nodePtr.i);
- }
-
- if ((nodePtr.p->nodeStatus == NodeRecord::ALIVE) &&
- (nodePtr.p->copyCompleted)) {
- switch (nodePtr.p->activeStatus) {
- case Sysfile::NS_Active:
- jam();
- /*-------------------------------------------------------------------*/
- // The normal case. Starting a LCP for a started node which hasn't
- // missed the previous LCP.
- /*-------------------------------------------------------------------*/
- c_lcpState.m_participatingLQH.set(nodePtr.i);
- break;
- case Sysfile::NS_ActiveMissed_1:
- jam();
- /*-------------------------------------------------------------------*/
- // The node is starting up and is participating in a local checkpoint
- // as the final phase of the start-up. We can still use the checkpoints
- // on the node after a system restart.
- /*-------------------------------------------------------------------*/
- c_lcpState.m_participatingLQH.set(nodePtr.i);
- break;
- case Sysfile::NS_ActiveMissed_2:
- jam();
- /*-------------------------------------------------------------------*/
- // The node is starting up and is participating in a local checkpoint
- // as the final phase of the start-up. We have missed so
- // many checkpoints that we no longer can use this node to
- // recreate fragments from disk.
- // It must be taken over with the copy fragment process after a system
- // crash. We indicate this by setting the active status to TAKE_OVER.
- /*-------------------------------------------------------------------*/
- nodePtr.p->activeStatus = Sysfile::NS_TakeOver;
- //break; // Fall through
- case Sysfile::NS_TakeOver:{
- TakeOverRecordPtr takeOverPtr;
- jam();
- /*-------------------------------------------------------------------*/
- /* THIS NODE IS CURRENTLY TAKING OVER A FAILED NODE. */
- /*-------------------------------------------------------------------*/
- takeOverPtr.i = findTakeOver(nodePtr.i);
- if (takeOverPtr.i != RNIL) {
- jam();
- ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
- if (takeOverPtr.p->toMasterStatus == TakeOverRecord::WAIT_LCP) {
- jam();
- /*---------------------------------------------------------------
- * ALL THE INFORMATION HAVE BEEN REPLICATED TO THE NEW
- * NODE AND WE ARE ONLY WAITING FOR A LOCAL CHECKPOINT TO BE
- * PERFORMED ON THE NODE TO SET ITS STATUS TO ACTIVE.
- */
- infoEvent("Node %d is WAIT_LCP including in LCP", nodePtr.i);
- c_lcpState.m_participatingLQH.set(nodePtr.i);
- }//if
- }//if
- break;
- }
- default:
- jam();
- /*empty*/;
- break;
- }//switch
- } else {
- switch (nodePtr.p->activeStatus) {
- case Sysfile::NS_Active:
- jam();
- nodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_1;
- break;
- case Sysfile::NS_ActiveMissed_1:
- jam();
- nodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_2;
- break;
- case Sysfile::NS_ActiveMissed_2:
- jam();
- if ((nodePtr.p->nodeStatus == NodeRecord::ALIVE) &&
- (!nodePtr.p->copyCompleted)) {
- jam();
- /*-----------------------------------------------------------------*/
- // The node is currently starting up and has not completed the
- // copy phase.
- // It will thus be in the TAKE_OVER state.
- /*-----------------------------------------------------------------*/
- ndbrequire(findTakeOver(nodePtr.i) != RNIL);
- nodePtr.p->activeStatus = Sysfile::NS_TakeOver;
- } else {
- jam();
- /*-----------------------------------------------------------------*/
- /* THE NODE IS ACTIVE AND HAS NOT COMPLETED ANY OF THE LAST 3
- * CHECKPOINTS */
- /* WE MUST TAKE IT OUT OF ACTION AND START A NEW NODE TO TAKE OVER.*/
- /*-----------------------------------------------------------------*/
- nodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
- }//if
- break;
- case Sysfile::NS_TakeOver:
- jam();
- break;
- default:
- jam();
- /*empty*/;
- break;
- }//switch
- }//if
- }//for
- if (isMaster()) {
- jam();
- checkStartTakeOver(signal);
- setNodeRestartInfoBits();
- }//if
-}//Dbdih::setLcpActiveStatusStart()
-
-/*************************************************************************/
-/* SET NODE ACTIVE STATUS AT SYSTEM RESTART AND WHEN UPDATED BY MASTER */
-/*************************************************************************/
-void Dbdih::setNodeActiveStatus()
-{
- NodeRecordPtr snaNodeptr;
-
- for (snaNodeptr.i = 1; snaNodeptr.i < MAX_NDB_NODES; snaNodeptr.i++) {
- ptrAss(snaNodeptr, nodeRecord);
- const Uint32 tsnaNodeBits = Sysfile::getNodeStatus(snaNodeptr.i,
- SYSFILE->nodeStatus);
- switch (tsnaNodeBits) {
- case Sysfile::NS_Active:
- jam();
- snaNodeptr.p->activeStatus = Sysfile::NS_Active;
- break;
- case Sysfile::NS_ActiveMissed_1:
- jam();
- snaNodeptr.p->activeStatus = Sysfile::NS_ActiveMissed_1;
- break;
- case Sysfile::NS_ActiveMissed_2:
- jam();
- snaNodeptr.p->activeStatus = Sysfile::NS_ActiveMissed_2;
- break;
- case Sysfile::NS_TakeOver:
- jam();
- snaNodeptr.p->activeStatus = Sysfile::NS_TakeOver;
- break;
- case Sysfile::NS_HotSpare:
- jam();
- snaNodeptr.p->activeStatus = Sysfile::NS_HotSpare;
- break;
- case Sysfile::NS_NotActive_NotTakenOver:
- jam();
- snaNodeptr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
- break;
- case Sysfile::NS_NotDefined:
- jam();
- snaNodeptr.p->activeStatus = Sysfile::NS_NotDefined;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- }//for
-}//Dbdih::setNodeActiveStatus()
-
-/***************************************************************************/
-/* SET THE NODE GROUP BASED ON THE RESTART INFORMATION OR AS SET BY MASTER */
-/***************************************************************************/
-void Dbdih::setNodeGroups()
-{
- NodeGroupRecordPtr NGPtr;
- NodeRecordPtr sngNodeptr;
- Uint32 Ti;
-
- for (Ti = 0; Ti < MAX_NDB_NODES; Ti++) {
- NGPtr.i = Ti;
- ptrAss(NGPtr, nodeGroupRecord);
- NGPtr.p->nodeCount = 0;
- }//for
- for (sngNodeptr.i = 1; sngNodeptr.i < MAX_NDB_NODES; sngNodeptr.i++) {
- ptrAss(sngNodeptr, nodeRecord);
- Sysfile::ActiveStatus s =
- (Sysfile::ActiveStatus)Sysfile::getNodeStatus(sngNodeptr.i,
- SYSFILE->nodeStatus);
- switch (s){
- case Sysfile::NS_Active:
- case Sysfile::NS_ActiveMissed_1:
- case Sysfile::NS_ActiveMissed_2:
- case Sysfile::NS_NotActive_NotTakenOver:
- case Sysfile::NS_TakeOver:
- jam();
- sngNodeptr.p->nodeGroup = Sysfile::getNodeGroup(sngNodeptr.i,
- SYSFILE->nodeGroups);
- NGPtr.i = sngNodeptr.p->nodeGroup;
- ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
- NGPtr.p->nodesInGroup[NGPtr.p->nodeCount] = sngNodeptr.i;
- NGPtr.p->nodeCount++;
- break;
- case Sysfile::NS_HotSpare:
- case Sysfile::NS_NotDefined:
- jam();
- sngNodeptr.p->nodeGroup = ZNIL;
- break;
- default:
- ndbrequire(false);
- return;
- break;
- }//switch
- }//for
- cnoOfNodeGroups = 0;
- for (Ti = 0; Ti < MAX_NDB_NODES; Ti++) {
- jam();
- NGPtr.i = Ti;
- ptrAss(NGPtr, nodeGroupRecord);
- if (NGPtr.p->nodeCount != 0) {
- jam();
- cnoOfNodeGroups++;
- }//if
- }//for
- cnoHotSpare = csystemnodes - (cnoOfNodeGroups * cnoReplicas);
-}//Dbdih::setNodeGroups()
-
-/*************************************************************************/
-/* SET NODE INFORMATION AFTER RECEIVING RESTART INFORMATION FROM MASTER. */
-/* WE TAKE THE OPPORTUNITY TO SYNCHRONISE OUR DATA WITH THE MASTER. IT */
-/* IS ONLY THE MASTER THAT WILL ACT ON THIS DATA. WE WILL KEEP THEM */
-/* UPDATED FOR THE CASE WHEN WE HAVE TO BECOME MASTER. */
-/*************************************************************************/
-void Dbdih::setNodeInfo(Signal* signal)
-{
- setNodeActiveStatus();
- setNodeGroups();
- sendHOT_SPAREREP(signal);
-}//Dbdih::setNodeInfo()
-
-/*************************************************************************/
-// Keep also DBDICT informed about the Hot Spare situation in the cluster.
-/*************************************************************************/
-void Dbdih::sendHOT_SPAREREP(Signal* signal)
-{
- NodeRecordPtr locNodeptr;
- Uint32 Ti = 0;
- HotSpareRep * const hotSpare = (HotSpareRep*)&signal->theData[0];
- NodeBitmask::clear(hotSpare->theHotSpareNodes);
- for (locNodeptr.i = 1; locNodeptr.i < MAX_NDB_NODES; locNodeptr.i++) {
- ptrAss(locNodeptr, nodeRecord);
- switch (locNodeptr.p->activeStatus) {
- case Sysfile::NS_HotSpare:
- jam();
- NodeBitmask::set(hotSpare->theHotSpareNodes, locNodeptr.i);
- Ti++;
- break;
- default:
- jam();
- break;
- }//switch
- }//for
- hotSpare->noHotSpareNodes = Ti;
- sendSignal(DBDICT_REF, GSN_HOT_SPAREREP,
- signal, HotSpareRep::SignalLength, JBB);
-}//Dbdih::sendHOT_SPAREREP()
-
-/*************************************************************************/
-/* SET LCP ACTIVE STATUS FOR ALL NODES BASED ON THE INFORMATION IN */
-/* THE RESTART INFORMATION. */
-/*************************************************************************/
-#if 0
-void Dbdih::setNodeLcpActiveStatus()
-{
- c_lcpState.m_lcpActiveStatus.clear();
- for (Uint32 i = 1; i < MAX_NDB_NODES; i++) {
- if (NodeBitmask::get(SYSFILE->lcpActive, i)) {
- jam();
- c_lcpState.m_lcpActiveStatus.set(i);
- }//if
- }//for
-}//Dbdih::setNodeLcpActiveStatus()
-#endif
-
-/*************************************************************************/
-/* SET THE RESTART INFO BITS BASED ON THE NODES ACTIVE STATUS. */
-/*************************************************************************/
-void Dbdih::setNodeRestartInfoBits()
-{
- NodeRecordPtr nodePtr;
- Uint32 tsnrNodeGroup;
- Uint32 tsnrNodeActiveStatus;
- Uint32 i;
- for(i = 1; i < MAX_NDB_NODES; i++){
- Sysfile::setNodeStatus(i, SYSFILE->nodeStatus, Sysfile::NS_Active);
- }//for
- for(i = 1; i < Sysfile::NODE_GROUPS_SIZE; i++){
- SYSFILE->nodeGroups[i] = 0;
- }//for
- NdbNodeBitmask::clear(SYSFILE->lcpActive);
-
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- ptrAss(nodePtr, nodeRecord);
- switch (nodePtr.p->activeStatus) {
- case Sysfile::NS_Active:
- jam();
- tsnrNodeActiveStatus = Sysfile::NS_Active;
- break;
- case Sysfile::NS_ActiveMissed_1:
- jam();
- tsnrNodeActiveStatus = Sysfile::NS_ActiveMissed_1;
- break;
- case Sysfile::NS_ActiveMissed_2:
- jam();
- tsnrNodeActiveStatus = Sysfile::NS_ActiveMissed_2;
- break;
- case Sysfile::NS_HotSpare:
- jam();
- tsnrNodeActiveStatus = Sysfile::NS_HotSpare;
- break;
- case Sysfile::NS_TakeOver:
- jam();
- tsnrNodeActiveStatus = Sysfile::NS_TakeOver;
- break;
- case Sysfile::NS_NotActive_NotTakenOver:
- jam();
- tsnrNodeActiveStatus = Sysfile::NS_NotActive_NotTakenOver;
- break;
- case Sysfile::NS_NotDefined:
- jam();
- tsnrNodeActiveStatus = Sysfile::NS_NotDefined;
- break;
- default:
- ndbrequire(false);
- tsnrNodeActiveStatus = Sysfile::NS_NotDefined; // remove warning
- break;
- }//switch
- Sysfile::setNodeStatus(nodePtr.i, SYSFILE->nodeStatus,
- tsnrNodeActiveStatus);
- if (nodePtr.p->nodeGroup == ZNIL) {
- jam();
- tsnrNodeGroup = NO_NODE_GROUP_ID;
- } else {
- jam();
- tsnrNodeGroup = nodePtr.p->nodeGroup;
- }//if
- Sysfile::setNodeGroup(nodePtr.i, SYSFILE->nodeGroups, tsnrNodeGroup);
- if (c_lcpState.m_participatingLQH.get(nodePtr.i)){
- jam();
- NodeBitmask::set(SYSFILE->lcpActive, nodePtr.i);
- }//if
- }//for
-}//Dbdih::setNodeRestartInfoBits()
-
-/*************************************************************************/
-/* START THE GLOBAL CHECKPOINT PROTOCOL IN MASTER AT START-UP */
-/*************************************************************************/
-void Dbdih::startGcp(Signal* signal)
-{
- cgcpStatus = GCP_READY;
- coldGcpStatus = cgcpStatus;
- coldGcpId = cnewgcp;
- cgcpSameCounter = 0;
- signal->theData[0] = DihContinueB::ZSTART_GCP;
- signal->theData[1] = 0;
- sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
- signal->theData[0] = DihContinueB::ZCHECK_GCP_STOP;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1);
-}//Dbdih::startGcp()
-
-void Dbdih::updateNodeInfo(FragmentstorePtr fragPtr)
-{
- ReplicaRecordPtr replicatePtr;
- Uint32 index = 0;
- replicatePtr.i = fragPtr.p->storedReplicas;
- do {
- jam();
- ptrCheckGuard(replicatePtr, creplicaFileSize, replicaRecord);
- ndbrequire(index < MAX_REPLICAS);
- fragPtr.p->activeNodes[index] = replicatePtr.p->procNode;
- index++;
- replicatePtr.i = replicatePtr.p->nextReplica;
- } while (replicatePtr.i != RNIL);
- fragPtr.p->fragReplicas = index;
-
- /* ----------------------------------------------------------------------- */
- // We switch primary to the preferred primary if the preferred primary is
- // in the list.
- /* ----------------------------------------------------------------------- */
- const Uint32 prefPrim = fragPtr.p->preferredPrimary;
- for (Uint32 i = 1; i < index; i++) {
- jam();
- ndbrequire(i < MAX_REPLICAS);
- if (fragPtr.p->activeNodes[i] == prefPrim){
- jam();
- Uint32 switchNode = fragPtr.p->activeNodes[0];
- fragPtr.p->activeNodes[0] = prefPrim;
- fragPtr.p->activeNodes[i] = switchNode;
- break;
- }//if
- }//for
-}//Dbdih::updateNodeInfo()
-
-void Dbdih::writeFragment(RWFragment* wf, FragmentstorePtr fragPtr)
-{
- writePageWord(wf, wf->fragId);
- writePageWord(wf, fragPtr.p->preferredPrimary);
- writePageWord(wf, fragPtr.p->noStoredReplicas);
- writePageWord(wf, fragPtr.p->noOldStoredReplicas);
- writePageWord(wf, fragPtr.p->distributionKey);
-}//Dbdih::writeFragment()
-
-void Dbdih::writePageWord(RWFragment* wf, Uint32 dataWord)
-{
- if (wf->wordIndex >= 2048) {
- jam();
- ndbrequire(wf->wordIndex == 2048);
- allocpage(wf->rwfPageptr);
- wf->wordIndex = 32;
- wf->pageIndex++;
- ndbrequire(wf->pageIndex < 8);
- wf->rwfTabPtr.p->pageRef[wf->pageIndex] = wf->rwfPageptr.i;
- wf->rwfTabPtr.p->noPages++;
- }//if
- wf->rwfPageptr.p->word[wf->wordIndex] = dataWord;
- wf->wordIndex++;
-}//Dbdih::writePageWord()
-
-void Dbdih::writeReplicas(RWFragment* wf, Uint32 replicaStartIndex)
-{
- ReplicaRecordPtr wfReplicaPtr;
- wfReplicaPtr.i = replicaStartIndex;
- while (wfReplicaPtr.i != RNIL) {
- jam();
- ptrCheckGuard(wfReplicaPtr, creplicaFileSize, replicaRecord);
- writePageWord(wf, wfReplicaPtr.p->procNode);
- writePageWord(wf, wfReplicaPtr.p->initialGci);
- writePageWord(wf, wfReplicaPtr.p->noCrashedReplicas);
- writePageWord(wf, wfReplicaPtr.p->nextLcp);
- Uint32 i;
- for (i = 0; i < MAX_LCP_STORED; i++) {
- writePageWord(wf, wfReplicaPtr.p->maxGciCompleted[i]);
- writePageWord(wf, wfReplicaPtr.p->maxGciStarted[i]);
- writePageWord(wf, wfReplicaPtr.p->lcpId[i]);
- writePageWord(wf, wfReplicaPtr.p->lcpStatus[i]);
- }//if
- for (i = 0; i < 8; i++) {
- writePageWord(wf, wfReplicaPtr.p->createGci[i]);
- writePageWord(wf, wfReplicaPtr.p->replicaLastGci[i]);
- }//if
-
- wfReplicaPtr.i = wfReplicaPtr.p->nextReplica;
- }//while
-}//Dbdih::writeReplicas()
-
-void Dbdih::writeRestorableGci(Signal* signal, FileRecordPtr filePtr)
-{
- for (Uint32 i = 0; i < Sysfile::SYSFILE_SIZE32; i++) {
- sysfileDataToFile[i] = sysfileData[i];
- }//for
- signal->theData[0] = filePtr.p->fileRef;
- signal->theData[1] = reference();
- signal->theData[2] = filePtr.i;
- signal->theData[3] = ZLIST_OF_PAIRS_SYNCH;
- signal->theData[4] = ZVAR_NO_CRESTART_INFO_TO_FILE;
- signal->theData[5] = 1; /* AMOUNT OF PAGES */
- signal->theData[6] = 0; /* MEMORY PAGE = 0 SINCE COMMON STORED VARIABLE */
- signal->theData[7] = 0;
- sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
-}//Dbdih::writeRestorableGci()
-
-void Dbdih::writeTabfile(Signal* signal, TabRecord* tab, FileRecordPtr filePtr)
-{
- signal->theData[0] = filePtr.p->fileRef;
- signal->theData[1] = reference();
- signal->theData[2] = filePtr.i;
- signal->theData[3] = ZLIST_OF_PAIRS;
- signal->theData[4] = ZVAR_NO_WORD;
- signal->theData[5] = tab->noPages;
- for (Uint32 i = 0; i < tab->noPages; i++) {
- jam();
- signal->theData[6 + (2 * i)] = tab->pageRef[i];
- signal->theData[7 + (2 * i)] = i;
- }//for
- Uint32 length = 6 + (2 * tab->noPages);
- sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, length, JBA);
-}//Dbdih::writeTabfile()
-
-void Dbdih::execDEBUG_SIG(Signal* signal)
-{
- signal = signal; //Avoid compiler warnings
-}//Dbdih::execDEBUG_SIG()
-
-void
-Dbdih::execDUMP_STATE_ORD(Signal* signal)
-{
- DumpStateOrd * const & dumpState = (DumpStateOrd *)&signal->theData[0];
- if (dumpState->args[0] == DumpStateOrd::DihDumpNodeRestartInfo) {
- infoEvent("c_nodeStartMaster.blockLcp = %d, c_nodeStartMaster.blockGcp = %d, c_nodeStartMaster.wait = %d",
- c_nodeStartMaster.blockLcp, c_nodeStartMaster.blockGcp, c_nodeStartMaster.wait);
- infoEvent("cstartGcpNow = %d, cgcpStatus = %d",
- cstartGcpNow, cgcpStatus);
- infoEvent("cfirstVerifyQueue = %d, cverifyQueueCounter = %d",
- cfirstVerifyQueue, cverifyQueueCounter);
- infoEvent("cgcpOrderBlocked = %d, cgcpStartCounter = %d",
- cgcpOrderBlocked, cgcpStartCounter);
- }//if
- if (dumpState->args[0] == DumpStateOrd::DihDumpNodeStatusInfo) {
- NodeRecordPtr localNodePtr;
- infoEvent("Printing nodeStatus of all nodes");
- for (localNodePtr.i = 1; localNodePtr.i < MAX_NDB_NODES; localNodePtr.i++) {
- ptrAss(localNodePtr, nodeRecord);
- if (localNodePtr.p->nodeStatus != NodeRecord::NOT_IN_CLUSTER) {
- infoEvent("Node = %d has status = %d",
- localNodePtr.i, localNodePtr.p->nodeStatus);
- }//if
- }//for
- }//if
-
- if (dumpState->args[0] == DumpStateOrd::DihPrintFragmentation){
- infoEvent("Printing fragmentation of all tables --");
- for(Uint32 i = 0; i<ctabFileSize; i++){
- TabRecordPtr tabPtr;
- tabPtr.i = i;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- if(tabPtr.p->tabStatus != TabRecord::TS_ACTIVE)
- continue;
-
- for(Uint32 j = 0; j < tabPtr.p->totalfragments; j++){
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, j, fragPtr);
-
- Uint32 nodeOrder[MAX_REPLICAS];
- const Uint32 noOfReplicas = extractNodeInfo(fragPtr.p, nodeOrder);
- char buf[100];
- BaseString::snprintf(buf, sizeof(buf), " Table %d Fragment %d - ", tabPtr.i, j);
- for(Uint32 k = 0; k < noOfReplicas; k++){
- char tmp[100];
- BaseString::snprintf(tmp, sizeof(tmp), "%d ", nodeOrder[k]);
- strcat(buf, tmp);
- }
- infoEvent(buf);
- }
- }
- }
-
- if (signal->theData[0] == 7000) {
- infoEvent("ctimer = %d, cgcpParticipantState = %d, cgcpStatus = %d",
- c_lcpState.ctimer, cgcpParticipantState, cgcpStatus);
- infoEvent("coldGcpStatus = %d, coldGcpId = %d, cmasterState = %d",
- coldGcpStatus, coldGcpId, cmasterState);
- infoEvent("cmasterTakeOverNode = %d, ctcCounter = %d",
- cmasterTakeOverNode, c_lcpState.ctcCounter);
- }//if
- if (signal->theData[0] == 7001) {
- infoEvent("c_lcpState.keepGci = %d",
- c_lcpState.keepGci);
- infoEvent("c_lcpState.lcpStatus = %d, clcpStartGcp = %d",
- c_lcpState.lcpStatus,
- c_lcpState.lcpStartGcp);
- infoEvent("cgcpStartCounter = %d, cimmediateLcpStart = %d",
- cgcpStartCounter, c_lcpState.immediateLcpStart);
- }//if
- if (signal->theData[0] == 7002) {
- infoEvent("cnoOfActiveTables = %d, cgcpDelay = %d",
- cnoOfActiveTables, cgcpDelay);
- infoEvent("cdictblockref = %d, cfailurenr = %d",
- cdictblockref, cfailurenr);
- infoEvent("con_lineNodes = %d, reference() = %d, creceivedfrag = %d",
- con_lineNodes, reference(), creceivedfrag);
- }//if
- if (signal->theData[0] == 7003) {
- infoEvent("cfirstAliveNode = %d, cgckptflag = %d",
- cfirstAliveNode, cgckptflag);
- infoEvent("clocallqhblockref = %d, clocaltcblockref = %d, cgcpOrderBlocked = %d",
- clocallqhblockref, clocaltcblockref, cgcpOrderBlocked);
- infoEvent("cstarttype = %d, csystemnodes = %d, currentgcp = %d",
- cstarttype, csystemnodes, currentgcp);
- }//if
- if (signal->theData[0] == 7004) {
- infoEvent("cmasterdihref = %d, cownNodeId = %d, cnewgcp = %d",
- cmasterdihref, cownNodeId, cnewgcp);
- infoEvent("cndbStartReqBlockref = %d, cremainingfrags = %d",
- cndbStartReqBlockref, cremainingfrags);
- infoEvent("cntrlblockref = %d, cgcpSameCounter = %d, coldgcp = %d",
- cntrlblockref, cgcpSameCounter, coldgcp);
- }//if
- if (signal->theData[0] == 7005) {
- infoEvent("crestartGci = %d",
- crestartGci);
- }//if
- if (signal->theData[0] == 7006) {
- infoEvent("clcpDelay = %d, cgcpMasterTakeOverState = %d",
- c_lcpState.clcpDelay, cgcpMasterTakeOverState);
- infoEvent("cmasterNodeId = %d", cmasterNodeId);
- infoEvent("cnoHotSpare = %d, c_nodeStartMaster.startNode = %d, c_nodeStartMaster.wait = %d",
- cnoHotSpare, c_nodeStartMaster.startNode, c_nodeStartMaster.wait);
- }//if
- if (signal->theData[0] == 7007) {
- infoEvent("c_nodeStartMaster.failNr = %d", c_nodeStartMaster.failNr);
- infoEvent("c_nodeStartMaster.startInfoErrorCode = %d",
- c_nodeStartMaster.startInfoErrorCode);
- infoEvent("c_nodeStartMaster.blockLcp = %d, c_nodeStartMaster.blockGcp = %d",
- c_nodeStartMaster.blockLcp, c_nodeStartMaster.blockGcp);
- }//if
- if (signal->theData[0] == 7008) {
- infoEvent("cfirstDeadNode = %d, cstartPhase = %d, cnoReplicas = %d",
- cfirstDeadNode, cstartPhase, cnoReplicas);
- infoEvent("cwaitLcpSr = %d",cwaitLcpSr);
- }//if
- if (signal->theData[0] == 7009) {
- infoEvent("ccalcOldestRestorableGci = %d, cnoOfNodeGroups = %d",
- c_lcpState.oldestRestorableGci, cnoOfNodeGroups);
- infoEvent("cstartGcpNow = %d",
- cstartGcpNow);
- infoEvent("crestartGci = %d",
- crestartGci);
- }//if
- if (signal->theData[0] == 7010) {
- infoEvent("cminHotSpareNodes = %d, c_lcpState.lcpStatusUpdatedPlace = %d, cLcpStart = %d",
- cminHotSpareNodes, c_lcpState.lcpStatusUpdatedPlace, c_lcpState.lcpStart);
- infoEvent("c_blockCommit = %d, c_blockCommitNo = %d",
- c_blockCommit, c_blockCommitNo);
- }//if
- if (signal->theData[0] == 7011){
- infoEvent("c_COPY_GCIREQ_Counter = %s",
- c_COPY_GCIREQ_Counter.getText());
- infoEvent("c_COPY_TABREQ_Counter = %s",
- c_COPY_TABREQ_Counter.getText());
- infoEvent("c_CREATE_FRAGREQ_Counter = %s",
- c_CREATE_FRAGREQ_Counter.getText());
- infoEvent("c_DIH_SWITCH_REPLICA_REQ_Counter = %s",
- c_DIH_SWITCH_REPLICA_REQ_Counter.getText());
- infoEvent("c_EMPTY_LCP_REQ_Counter = %s",c_EMPTY_LCP_REQ_Counter.getText());
- infoEvent("c_END_TOREQ_Counter = %s", c_END_TOREQ_Counter.getText());
- infoEvent("c_GCP_COMMIT_Counter = %s", c_GCP_COMMIT_Counter.getText());
- infoEvent("c_GCP_PREPARE_Counter = %s", c_GCP_PREPARE_Counter.getText());
- infoEvent("c_GCP_SAVEREQ_Counter = %s", c_GCP_SAVEREQ_Counter.getText());
- infoEvent("c_INCL_NODEREQ_Counter = %s", c_INCL_NODEREQ_Counter.getText());
- infoEvent("c_MASTER_GCPREQ_Counter = %s",
- c_MASTER_GCPREQ_Counter.getText());
- infoEvent("c_MASTER_LCPREQ_Counter = %s",
- c_MASTER_LCPREQ_Counter.getText());
- infoEvent("c_START_INFOREQ_Counter = %s",
- c_START_INFOREQ_Counter.getText());
- infoEvent("c_START_RECREQ_Counter = %s", c_START_RECREQ_Counter.getText());
- infoEvent("c_START_TOREQ_Counter = %s", c_START_TOREQ_Counter.getText());
- infoEvent("c_STOP_ME_REQ_Counter = %s", c_STOP_ME_REQ_Counter.getText());
- infoEvent("c_TC_CLOPSIZEREQ_Counter = %s",
- c_TC_CLOPSIZEREQ_Counter.getText());
- infoEvent("c_TCGETOPSIZEREQ_Counter = %s",
- c_TCGETOPSIZEREQ_Counter.getText());
- infoEvent("c_UPDATE_TOREQ_Counter = %s", c_UPDATE_TOREQ_Counter.getText());
- }
-
- if(signal->theData[0] == 7012){
- char buf[8*_NDB_NODE_BITMASK_SIZE+1];
- infoEvent("ParticipatingDIH = %s", c_lcpState.m_participatingDIH.getText(buf));
- infoEvent("ParticipatingLQH = %s", c_lcpState.m_participatingLQH.getText(buf));
- infoEvent("m_LCP_COMPLETE_REP_Counter_DIH = %s",
- c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.getText());
- infoEvent("m_LCP_COMPLETE_REP_Counter_LQH = %s",
- c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.getText());
- infoEvent("m_LAST_LCP_FRAG_ORD = %s",
- c_lcpState.m_LAST_LCP_FRAG_ORD.getText());
- infoEvent("m_LCP_COMPLETE_REP_From_Master_Received = %d",
- c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received);
-
- NodeRecordPtr nodePtr;
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- ptrAss(nodePtr, nodeRecord);
- if(nodePtr.p->nodeStatus == NodeRecord::ALIVE){
- Uint32 i;
- for(i = 0; i<nodePtr.p->noOfStartedChkpt; i++){
- infoEvent("Node %d: started: table=%d fragment=%d replica=%d",
- nodePtr.i,
- nodePtr.p->startedChkpt[i].tableId,
- nodePtr.p->startedChkpt[i].fragId,
- nodePtr.p->startedChkpt[i].replicaPtr);
- }
-
- for(i = 0; i<nodePtr.p->noOfQueuedChkpt; i++){
- infoEvent("Node %d: queued: table=%d fragment=%d replica=%d",
- nodePtr.i,
- nodePtr.p->queuedChkpt[i].tableId,
- nodePtr.p->queuedChkpt[i].fragId,
- nodePtr.p->queuedChkpt[i].replicaPtr);
- }
- }
- }
- }
-
- if(dumpState->args[0] == 7019 && signal->getLength() == 2)
- {
- char buf2[8+1];
- NodeRecordPtr nodePtr;
- nodePtr.i = signal->theData[1];
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- infoEvent("NF Node %d tc: %d lqh: %d dih: %d dict: %d recNODE_FAILREP: %d",
- nodePtr.i,
- nodePtr.p->dbtcFailCompleted,
- nodePtr.p->dblqhFailCompleted,
- nodePtr.p->dbdihFailCompleted,
- nodePtr.p->dbdictFailCompleted,
- nodePtr.p->recNODE_FAILREP);
- infoEvent(" m_NF_COMPLETE_REP: %s m_nodefailSteps: %s",
- nodePtr.p->m_NF_COMPLETE_REP.getText(),
- nodePtr.p->m_nodefailSteps.getText(buf2));
- }
-
- if(dumpState->args[0] == 7020 && signal->getLength() > 3)
- {
- Uint32 gsn= signal->theData[1];
- Uint32 block= signal->theData[2];
- Uint32 length= signal->length() - 3;
- memmove(signal->theData, signal->theData+3, 4*length);
- sendSignal(numberToRef(block, getOwnNodeId()), gsn, signal, length, JBB);
-
- warningEvent("-- SENDING CUSTOM SIGNAL --");
- char buf[100], buf2[100];
- buf2[0]= 0;
- for(Uint32 i = 0; i<length; i++)
- {
- snprintf(buf, 100, "%s %.8x", buf2, signal->theData[i]);
- snprintf(buf2, 100, "%s", buf);
- }
- warningEvent("gsn: %d block: %s, length: %d theData: %s",
- gsn, getBlockName(block, "UNKNOWN"), length, buf);
-
- g_eventLogger.warning("-- SENDING CUSTOM SIGNAL --");
- g_eventLogger.warning("gsn: %d block: %s, length: %d theData: %s",
- gsn, getBlockName(block, "UNKNOWN"), length, buf);
- }
-
- if(dumpState->args[0] == DumpStateOrd::DihDumpLCPState){
- infoEvent("-- Node %d LCP STATE --", getOwnNodeId());
- infoEvent("lcpStatus = %d (update place = %d) ",
- c_lcpState.lcpStatus, c_lcpState.lcpStatusUpdatedPlace);
- infoEvent
- ("lcpStart = %d lcpStartGcp = %d keepGci = %d oldestRestorable = %d",
- c_lcpState.lcpStart, c_lcpState.lcpStartGcp,
- c_lcpState.keepGci, c_lcpState.oldestRestorableGci);
-
- infoEvent
- ("immediateLcpStart = %d masterLcpNodeId = %d",
- c_lcpState.immediateLcpStart,
- refToNode(c_lcpState.m_masterLcpDihRef));
- infoEvent("-- Node %d LCP STATE --", getOwnNodeId());
- }
-
- if(dumpState->args[0] == DumpStateOrd::DihDumpLCPMasterTakeOver){
- infoEvent("-- Node %d LCP MASTER TAKE OVER STATE --", getOwnNodeId());
- infoEvent
- ("c_lcpMasterTakeOverState.state = %d updatePlace = %d failedNodeId = %d",
- c_lcpMasterTakeOverState.state,
- c_lcpMasterTakeOverState.updatePlace,
- c_lcpMasterTakeOverState.failedNodeId);
-
- infoEvent("c_lcpMasterTakeOverState.minTableId = %u minFragId = %u",
- c_lcpMasterTakeOverState.minTableId,
- c_lcpMasterTakeOverState.minFragId);
-
- infoEvent("-- Node %d LCP MASTER TAKE OVER STATE --", getOwnNodeId());
- }
-
- if (signal->theData[0] == 7015){
- for(Uint32 i = 0; i<ctabFileSize; i++){
- TabRecordPtr tabPtr;
- tabPtr.i = i;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- if(tabPtr.p->tabStatus != TabRecord::TS_ACTIVE)
- continue;
-
- infoEvent
- ("Table %d: TabCopyStatus: %d TabUpdateStatus: %d TabLcpStatus: %d",
- tabPtr.i,
- tabPtr.p->tabCopyStatus,
- tabPtr.p->tabUpdateState,
- tabPtr.p->tabLcpStatus);
-
- FragmentstorePtr fragPtr;
- for (Uint32 fid = 0; fid < tabPtr.p->totalfragments; fid++) {
- jam();
- getFragstore(tabPtr.p, fid, fragPtr);
-
- char buf[100], buf2[100];
- BaseString::snprintf(buf, sizeof(buf), " Fragment %d: noLcpReplicas==%d ",
- fid, fragPtr.p->noLcpReplicas);
-
- Uint32 num=0;
- ReplicaRecordPtr replicaPtr;
- replicaPtr.i = fragPtr.p->storedReplicas;
- do {
- ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- BaseString::snprintf(buf2, sizeof(buf2), "%s %d(on %d)=%d(%s)",
- buf, num,
- replicaPtr.p->procNode,
- replicaPtr.p->lcpIdStarted,
- replicaPtr.p->lcpOngoingFlag ? "Ongoing" : "Idle");
- BaseString::snprintf(buf, sizeof(buf), "%s", buf2);
-
- num++;
- replicaPtr.i = replicaPtr.p->nextReplica;
- } while (replicaPtr.i != RNIL);
- infoEvent(buf);
- }
- }
- }
-
- if(dumpState->args[0] == DumpStateOrd::EnableUndoDelayDataWrite){
- ndbout << "Dbdih:: delay write of datapages for table = "
- << dumpState->args[1]<< endl;
- // Send this dump to ACC and TUP
- EXECUTE_DIRECT(DBACC, GSN_DUMP_STATE_ORD, signal, 2);
- EXECUTE_DIRECT(DBTUP, GSN_DUMP_STATE_ORD, signal, 2);
-
- // Start immediate LCP
- c_lcpState.ctimer += (1 << c_lcpState.clcpDelay);
- return;
- }
-
- if (signal->theData[0] == DumpStateOrd::DihAllAllowNodeStart) {
- for (Uint32 i = 1; i < MAX_NDB_NODES; i++)
- setAllowNodeStart(i, true);
- return;
- }//if
- if (signal->theData[0] == DumpStateOrd::DihMinTimeBetweenLCP) {
- // Set time between LCP to min value
- ndbout << "Set time between LCP to min value" << endl;
- c_lcpState.clcpDelay = 0; // TimeBetweenLocalCheckpoints.min
- return;
- }
- if (signal->theData[0] == DumpStateOrd::DihMaxTimeBetweenLCP) {
- // Set time between LCP to max value
- ndbout << "Set time between LCP to max value" << endl;
- c_lcpState.clcpDelay = 31; // TimeBetweenLocalCheckpoints.max
- return;
- }
-
- if(dumpState->args[0] == 7098){
- if(signal->length() == 3){
- jam();
- infoEvent("startLcpRoundLoopLab(tabel=%d, fragment=%d)",
- signal->theData[1], signal->theData[2]);
- startLcpRoundLoopLab(signal, signal->theData[1], signal->theData[2]);
- return;
- } else {
- infoEvent("Invalid no of arguments to 7098 - startLcpRoundLoopLab -"
- " expected 2 (tableId, fragmentId)");
- }
- }
-
- if(dumpState->args[0] == DumpStateOrd::DihStartLcpImmediately){
- c_lcpState.ctimer += (1 << c_lcpState.clcpDelay);
- return;
- }
-}//Dbdih::execDUMP_STATE_ORD()
-
-void
-Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){
- jamEntry();
-
- PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
-
- TabRecordPtr tabPtr;
- tabPtr.i = req->tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- Uint32 senderRef = req->senderRef;
- Uint32 senderData = req->senderData;
-
- PrepDropTabRef::ErrorCode err = PrepDropTabRef::OK;
- { /**
- * Check table state
- */
- bool ok = false;
- switch(tabPtr.p->tabStatus){
- case TabRecord::TS_IDLE:
- ok = true;
- jam();
- err = PrepDropTabRef::NoSuchTable;
- break;
- case TabRecord::TS_DROPPING:
- ok = true;
- jam();
- err = PrepDropTabRef::PrepDropInProgress;
- break;
- case TabRecord::TS_CREATING:
- jam();
- ok = true;
- break;
- case TabRecord::TS_ACTIVE:
- ok = true;
- jam();
- break;
- }
- ndbrequire(ok);
- }
-
- if(err != PrepDropTabRef::OK){
- jam();
- PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = senderData;
- ref->tableId = tabPtr.i;
- ref->errorCode = err;
- sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal,
- PrepDropTabRef::SignalLength, JBB);
- return;
- }
-
- tabPtr.p->tabStatus = TabRecord::TS_DROPPING;
- tabPtr.p->m_prepDropTab.senderRef = senderRef;
- tabPtr.p->m_prepDropTab.senderData = senderData;
-
- if(isMaster()){
- /**
- * Remove from queue
- */
- NodeRecordPtr nodePtr;
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
- jam();
- ptrAss(nodePtr, nodeRecord);
- if (c_lcpState.m_participatingLQH.get(nodePtr.i)){
-
- Uint32 index = 0;
- Uint32 count = nodePtr.p->noOfQueuedChkpt;
- while(index < count){
- if(nodePtr.p->queuedChkpt[index].tableId == tabPtr.i){
- jam();
- // ndbout_c("Unqueuing %d", index);
-
- count--;
- for(Uint32 i = index; i<count; i++){
- jam();
- nodePtr.p->queuedChkpt[i] = nodePtr.p->queuedChkpt[i + 1];
- }
- } else {
- index++;
- }
- }
- nodePtr.p->noOfQueuedChkpt = count;
- }
- }
- }
-
- { /**
- * Check table lcp state
- */
-
- bool ok = false;
- switch(tabPtr.p->tabLcpStatus){
- case TabRecord::TLS_COMPLETED:
- case TabRecord::TLS_WRITING_TO_FILE:
- ok = true;
- jam();
- break;
- return;
- case TabRecord::TLS_ACTIVE:
- ok = true;
- jam();
-
- tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
-
- /**
- * First check if all fragments are done
- */
- if(checkLcpAllTablesDoneInLqh()){
- jam();
-
- ndbout_c("This is the last table");
-
- /**
- * Then check if saving of tab info is done for all tables
- */
- LcpStatus a = c_lcpState.lcpStatus;
- checkLcpCompletedLab(signal);
-
- if(a != c_lcpState.lcpStatus){
- ndbout_c("And all tables are written to already written disk");
- }
- }
- break;
- }
- ndbrequire(ok);
- }
-
- { /**
- * Send WaitDropTabReq to all LQH
- */
- WaitDropTabReq * req = (WaitDropTabReq*)signal->getDataPtrSend();
- req->tableId = tabPtr.i;
- req->senderRef = reference();
-
- NodeRecordPtr nodePtr;
- nodePtr.i = cfirstAliveNode;
- tabPtr.p->m_prepDropTab.waitDropTabCount.clearWaitingFor();
- while(nodePtr.i != RNIL){
- jam();
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
-
- tabPtr.p->m_prepDropTab.waitDropTabCount.setWaitingFor(nodePtr.i);
- sendSignal(calcLqhBlockRef(nodePtr.i), GSN_WAIT_DROP_TAB_REQ,
- signal, WaitDropTabReq::SignalLength, JBB);
-
- nodePtr.i = nodePtr.p->nextNode;
- }
- }
-
- waitDropTabWritingToFile(signal, tabPtr);
-}
-
-void
-Dbdih::waitDropTabWritingToFile(Signal* signal, TabRecordPtr tabPtr){
-
- if(tabPtr.p->tabLcpStatus == TabRecord::TLS_WRITING_TO_FILE){
- jam();
- signal->theData[0] = DihContinueB::WAIT_DROP_TAB_WRITING_TO_FILE;
- signal->theData[1] = tabPtr.i;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2);
- return;
- }
-
- ndbrequire(tabPtr.p->tabLcpStatus == TabRecord::TLS_COMPLETED);
- checkPrepDropTabComplete(signal, tabPtr);
-}
-
-void
-Dbdih::checkPrepDropTabComplete(Signal* signal, TabRecordPtr tabPtr){
-
- if(tabPtr.p->tabLcpStatus != TabRecord::TLS_COMPLETED){
- jam();
- return;
- }
-
- if(!tabPtr.p->m_prepDropTab.waitDropTabCount.done()){
- jam();
- return;
- }
-
- const Uint32 ref = tabPtr.p->m_prepDropTab.senderRef;
- if(ref != 0){
- PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend();
- conf->tableId = tabPtr.i;
- conf->senderRef = reference();
- conf->senderData = tabPtr.p->m_prepDropTab.senderData;
- sendSignal(tabPtr.p->m_prepDropTab.senderRef, GSN_PREP_DROP_TAB_CONF,
- signal, PrepDropTabConf::SignalLength, JBB);
- tabPtr.p->m_prepDropTab.senderRef = 0;
- }
-}
-
-void
-Dbdih::execWAIT_DROP_TAB_REF(Signal* signal){
- jamEntry();
- WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtr();
-
- TabRecordPtr tabPtr;
- tabPtr.i = ref->tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_DROPPING);
- Uint32 nodeId = refToNode(ref->senderRef);
-
- ndbrequire(ref->errorCode == WaitDropTabRef::NoSuchTable ||
- ref->errorCode == WaitDropTabRef::NF_FakeErrorREF);
-
- tabPtr.p->m_prepDropTab.waitDropTabCount.clearWaitingFor(nodeId);
- checkPrepDropTabComplete(signal, tabPtr);
-}
-
-void
-Dbdih::execWAIT_DROP_TAB_CONF(Signal* signal){
- jamEntry();
- WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr();
-
- TabRecordPtr tabPtr;
- tabPtr.i = conf->tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_DROPPING);
- Uint32 nodeId = refToNode(conf->senderRef);
- tabPtr.p->m_prepDropTab.waitDropTabCount.clearWaitingFor(nodeId);
- checkPrepDropTabComplete(signal, tabPtr);
-}
-
-void
-Dbdih::checkWaitDropTabFailedLqh(Signal* signal, Uint32 nodeId, Uint32 tableId){
-
- TabRecordPtr tabPtr;
- tabPtr.i = tableId;
-
- WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr();
- conf->tableId = tableId;
-
- const Uint32 RT_BREAK = 16;
- for(Uint32 i = 0; i<RT_BREAK && tabPtr.i < ctabFileSize; i++, tabPtr.i++){
- ptrAss(tabPtr, tabRecord);
- if(tabPtr.p->tabStatus == TabRecord::TS_DROPPING){
- if(tabPtr.p->m_prepDropTab.waitDropTabCount.isWaitingFor(nodeId)){
- conf->senderRef = calcLqhBlockRef(nodeId);
- execWAIT_DROP_TAB_CONF(signal);
- tabPtr.i++;
- break;
- }
- }
- }
-
- if(tabPtr.i == ctabFileSize){
- /**
- * Finished
- */
- jam();
- return;
- }
-
- signal->theData[0] = DihContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH;
- signal->theData[1] = nodeId;
- signal->theData[2] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
-}
-
-
-void
-Dbdih::execNDB_TAMPER(Signal* signal)
-{
- if ((ERROR_INSERTED(7011)) &&
- (signal->theData[0] == 7012)) {
- CLEAR_ERROR_INSERT_VALUE;
- calculateKeepGciLab(signal, 0, 0);
- return;
- }//if
- SET_ERROR_INSERT_VALUE(signal->theData[0]);
- return;
-}//Dbdih::execNDB_TAMPER()
-
-void Dbdih::execSET_VAR_REQ(Signal* signal) {
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
-
- switch (var) {
- case TimeBetweenLocalCheckpoints:
- c_lcpState.clcpDelay = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case TimeBetweenGlobalCheckpoints:
- cgcpDelay = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-}
-
-void Dbdih::execBLOCK_COMMIT_ORD(Signal* signal){
- BlockCommitOrd* const block = (BlockCommitOrd *)&signal->theData[0];
-
- jamEntry();
-#if 0
- ndbrequire(c_blockCommit == false ||
- c_blockCommitNo == block->failNo);
-#else
- if(!(c_blockCommit == false || c_blockCommitNo == block->failNo)){
- infoEvent("Possible bug in Dbdih::execBLOCK_COMMIT_ORD c_blockCommit = %d c_blockCommitNo = %d"
- " sig->failNo = %d", c_blockCommit, c_blockCommitNo, block->failNo);
- }
-#endif
- c_blockCommit = true;
- c_blockCommitNo = block->failNo;
-}
-
-void Dbdih::execUNBLOCK_COMMIT_ORD(Signal* signal){
- UnblockCommitOrd* const unblock = (UnblockCommitOrd *)&signal->theData[0];
- (void)unblock;
-
- jamEntry();
-
- if(c_blockCommit == true){
- jam();
- // ndbrequire(c_blockCommitNo == unblock->failNo);
-
- c_blockCommit = false;
- emptyverificbuffer(signal, true);
- }
-}
-
-void Dbdih::execSTOP_PERM_REQ(Signal* signal){
-
- jamEntry();
-
- StopPermReq* const req = (StopPermReq*)&signal->theData[0];
- StopPermRef* const ref = (StopPermRef*)&signal->theData[0];
-
- const Uint32 senderData = req->senderData;
- const BlockReference senderRef = req->senderRef;
- const NodeId nodeId = refToNode(senderRef);
-
- if (isMaster()) {
- /**
- * Master
- */
- jam();
- CRASH_INSERTION(7065);
- if (c_stopPermMaster.clientRef != 0) {
- jam();
-
- ref->senderData = senderData;
- ref->errorCode = StopPermRef::NodeShutdownInProgress;
- sendSignal(senderRef, GSN_STOP_PERM_REF, signal,
- StopPermRef::SignalLength, JBB);
- return;
- }//if
-
- if (c_nodeStartMaster.activeState) {
- jam();
- ref->senderData = senderData;
- ref->errorCode = StopPermRef::NodeStartInProgress;
- sendSignal(senderRef, GSN_STOP_PERM_REF, signal,
- StopPermRef::SignalLength, JBB);
- return;
- }//if
-
- /**
- * Lock
- */
- c_nodeStartMaster.activeState = true;
- c_stopPermMaster.clientRef = senderRef;
-
- c_stopPermMaster.clientData = senderData;
- c_stopPermMaster.returnValue = 0;
- c_switchReplicas.clear();
-
- Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle);
- Callback c = { safe_cast(&Dbdih::switch_primary_stop_node), nodeId };
- ndbrequire(mutex.lock(c));
- } else {
- /**
- * Proxy part
- */
- jam();
- CRASH_INSERTION(7066);
- if(c_stopPermProxy.clientRef != 0){
- jam();
- ref->senderData = senderData;
- ref->errorCode = StopPermRef::NodeShutdownInProgress;
- sendSignal(senderRef, GSN_STOP_PERM_REF, signal, 2, JBB);
- return;
- }//if
-
- c_stopPermProxy.clientRef = senderRef;
- c_stopPermProxy.masterRef = cmasterdihref;
- c_stopPermProxy.clientData = senderData;
-
- req->senderRef = reference();
- req->senderData = senderData;
- sendSignal(cmasterdihref, GSN_STOP_PERM_REQ, signal,
- StopPermReq::SignalLength, JBB);
- }//if
-}//Dbdih::execSTOP_PERM_REQ()
-
-void
-Dbdih::switch_primary_stop_node(Signal* signal, Uint32 node_id, Uint32 ret_val)
-{
- ndbrequire(ret_val == 0);
- signal->theData[0] = DihContinueB::SwitchReplica;
- signal->theData[1] = node_id;
- signal->theData[2] = 0; // table id
- signal->theData[3] = 0; // fragment id
- sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
-}
-
-void Dbdih::execSTOP_PERM_REF(Signal* signal)
-{
- jamEntry();
- ndbrequire(c_stopPermProxy.clientRef != 0);
- ndbrequire(c_stopPermProxy.masterRef == signal->senderBlockRef());
- sendSignal(c_stopPermProxy.clientRef, GSN_STOP_PERM_REF, signal, 2, JBB);
- c_stopPermProxy.clientRef = 0;
-}//Dbdih::execSTOP_PERM_REF()
-
-void Dbdih::execSTOP_PERM_CONF(Signal* signal)
-{
- jamEntry();
- ndbrequire(c_stopPermProxy.clientRef != 0);
- ndbrequire(c_stopPermProxy.masterRef == signal->senderBlockRef());
- sendSignal(c_stopPermProxy.clientRef, GSN_STOP_PERM_CONF, signal, 1, JBB);
- c_stopPermProxy.clientRef = 0;
-}//Dbdih::execSTOP_PERM_CONF()
-
-void Dbdih::execDIH_SWITCH_REPLICA_REQ(Signal* signal)
-{
- jamEntry();
- DihSwitchReplicaReq* const req = (DihSwitchReplicaReq*)&signal->theData[0];
- const Uint32 tableId = req->tableId;
- const Uint32 fragNo = req->fragNo;
- const BlockReference senderRef = req->senderRef;
-
- CRASH_INSERTION(7067);
- TabRecordPtr tabPtr;
- tabPtr.i = tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
- if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
- jam();
- sendSignal(reference(), GSN_DIH_SWITCH_REPLICA_REQ, signal,
- DihSwitchReplicaReq::SignalLength, JBB);
- return;
- }//if
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, fragNo, fragPtr);
-
- /**
- * Do funky stuff
- */
- Uint32 oldOrder[MAX_REPLICAS];
- const Uint32 noOfReplicas = extractNodeInfo(fragPtr.p, oldOrder);
-
- if (noOfReplicas < req->noOfReplicas) {
- jam();
- //---------------------------------------------------------------------
- // A crash occurred in the middle of our switch handling.
- //---------------------------------------------------------------------
- DihSwitchReplicaRef* const ref = (DihSwitchReplicaRef*)&signal->theData[0];
- ref->senderNode = cownNodeId;
- ref->errorCode = StopPermRef::NF_CausedAbortOfStopProcedure;
- sendSignal(senderRef, GSN_DIH_SWITCH_REPLICA_REF, signal,
- DihSwitchReplicaRef::SignalLength, JBB);
- }//if
- for (Uint32 i = 0; i < noOfReplicas; i++) {
- jam();
- ndbrequire(i < MAX_REPLICAS);
- fragPtr.p->activeNodes[i] = req->newNodeOrder[i];
- }//for
- /**
- * Reply
- */
- DihSwitchReplicaConf* const conf = (DihSwitchReplicaConf*)&signal->theData[0];
- conf->senderNode = cownNodeId;
- sendSignal(senderRef, GSN_DIH_SWITCH_REPLICA_CONF, signal,
- DihSwitchReplicaConf::SignalLength, JBB);
-}//Dbdih::execDIH_SWITCH_REPLICA_REQ()
-
-void Dbdih::execDIH_SWITCH_REPLICA_CONF(Signal* signal)
-{
- jamEntry();
- /**
- * Response to master
- */
- CRASH_INSERTION(7068);
- DihSwitchReplicaConf* const conf = (DihSwitchReplicaConf*)&signal->theData[0];
- switchReplicaReply(signal, conf->senderNode);
-}//Dbdih::execDIH_SWITCH_REPLICA_CONF()
-
-void Dbdih::execDIH_SWITCH_REPLICA_REF(Signal* signal)
-{
- jamEntry();
- DihSwitchReplicaRef* const ref = (DihSwitchReplicaRef*)&signal->theData[0];
- if(c_stopPermMaster.returnValue == 0){
- jam();
- c_stopPermMaster.returnValue = ref->errorCode;
- }//if
- switchReplicaReply(signal, ref->senderNode);
-}//Dbdih::execDIH_SWITCH_REPLICA_REF()
-
-void Dbdih::switchReplicaReply(Signal* signal,
- NodeId nodeId){
- jam();
- receiveLoopMacro(DIH_SWITCH_REPLICA_REQ, nodeId);
- //------------------------------------------------------
- // We have received all responses from the nodes. Thus
- // we have completed switching replica roles. Continue
- // with the next fragment.
- //------------------------------------------------------
- if(c_stopPermMaster.returnValue != 0){
- jam();
- c_switchReplicas.tableId = ctabFileSize + 1;
- }//if
- c_switchReplicas.fragNo++;
-
- signal->theData[0] = DihContinueB::SwitchReplica;
- signal->theData[1] = c_switchReplicas.nodeId;
- signal->theData[2] = c_switchReplicas.tableId;
- signal->theData[3] = c_switchReplicas.fragNo;
- sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
-}//Dbdih::switchReplicaReply()
-
-void
-Dbdih::switchReplica(Signal* signal,
- Uint32 nodeId,
- Uint32 tableId,
- Uint32 fragNo){
- jam();
- DihSwitchReplicaReq* const req = (DihSwitchReplicaReq*)&signal->theData[0];
-
- const Uint32 RT_BREAK = 64;
-
- for (Uint32 i = 0; i < RT_BREAK; i++) {
- jam();
- if (tableId >= ctabFileSize) {
- jam();
- StopPermConf* const conf = (StopPermConf*)&signal->theData[0];
- StopPermRef* const ref = (StopPermRef*)&signal->theData[0];
- /**
- * Finished with all tables
- */
- if(c_stopPermMaster.returnValue == 0) {
- jam();
- conf->senderData = c_stopPermMaster.clientData;
- sendSignal(c_stopPermMaster.clientRef, GSN_STOP_PERM_CONF,
- signal, 1, JBB);
- } else {
- jam();
- ref->senderData = c_stopPermMaster.clientData;
- ref->errorCode = c_stopPermMaster.returnValue;
- sendSignal(c_stopPermMaster.clientRef, GSN_STOP_PERM_REF, signal, 2,JBB);
- }//if
-
- /**
- * UnLock
- */
- c_nodeStartMaster.activeState = false;
- c_stopPermMaster.clientRef = 0;
- c_stopPermMaster.clientData = 0;
- c_stopPermMaster.returnValue = 0;
- Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle);
- mutex.unlock(); // ignore result
- return;
- }//if
-
- TabRecordPtr tabPtr;
- tabPtr.i = tableId;
- ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
-
- if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) {
- jam();
- tableId++;
- fragNo = 0;
- continue;
- }//if
- if (fragNo >= tabPtr.p->totalfragments) {
- jam();
- tableId++;
- fragNo = 0;
- continue;
- }//if
- FragmentstorePtr fragPtr;
- getFragstore(tabPtr.p, fragNo, fragPtr);
-
- Uint32 oldOrder[MAX_REPLICAS];
- const Uint32 noOfReplicas = extractNodeInfo(fragPtr.p, oldOrder);
-
- if(oldOrder[0] != nodeId) {
- jam();
- fragNo++;
- continue;
- }//if
- req->tableId = tableId;
- req->fragNo = fragNo;
- req->noOfReplicas = noOfReplicas;
- for (Uint32 i = 0; i < (noOfReplicas - 1); i++) {
- req->newNodeOrder[i] = oldOrder[i+1];
- }//for
- req->newNodeOrder[noOfReplicas-1] = nodeId;
- req->senderRef = reference();
-
- /**
- * Initialize struct
- */
- c_switchReplicas.tableId = tableId;
- c_switchReplicas.fragNo = fragNo;
- c_switchReplicas.nodeId = nodeId;
-
- sendLoopMacro(DIH_SWITCH_REPLICA_REQ, sendDIH_SWITCH_REPLICA_REQ);
- return;
- }//for
-
- signal->theData[0] = DihContinueB::SwitchReplica;
- signal->theData[1] = nodeId;
- signal->theData[2] = tableId;
- signal->theData[3] = fragNo;
- sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
-}//Dbdih::switchReplica()
-
-void Dbdih::execSTOP_ME_REQ(Signal* signal)
-{
- jamEntry();
- StopMeReq* const req = (StopMeReq*)&signal->theData[0];
- const BlockReference senderRef = req->senderRef;
- const Uint32 senderData = req->senderData;
- const Uint32 nodeId = refToNode(senderRef);
- {
- /**
- * Set node dead (remove from operations)
- */
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- nodePtr.p->useInTransactions = false;
- }
- if (nodeId != getOwnNodeId()) {
- jam();
- StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0];
- stopMeConf->senderData = senderData;
- stopMeConf->senderRef = reference();
- sendSignal(senderRef, GSN_STOP_ME_CONF, signal,
- StopMeConf::SignalLength, JBB);
- return;
- }//if
-
- /**
- * Local signal
- */
- jam();
- ndbrequire(c_stopMe.clientRef == 0);
-
- c_stopMe.clientData = senderData;
- c_stopMe.clientRef = senderRef;
-
- req->senderData = senderData;
- req->senderRef = reference();
-
- sendLoopMacro(STOP_ME_REQ, sendSTOP_ME_REQ);
-
- /**
- * Send conf to self
- */
- StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0];
- stopMeConf->senderData = senderData;
- stopMeConf->senderRef = reference();
- sendSignal(reference(), GSN_STOP_ME_CONF, signal,
- StopMeConf::SignalLength, JBB);
-}//Dbdih::execSTOP_ME_REQ()
-
-void Dbdih::execSTOP_ME_REF(Signal* signal)
-{
- ndbrequire(false);
-}
-
-void Dbdih::execSTOP_ME_CONF(Signal* signal)
-{
- jamEntry();
- StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0];
-
- const Uint32 senderRef = stopMeConf->senderRef;
- const Uint32 senderData = stopMeConf->senderData;
- const Uint32 nodeId = refToNode(senderRef);
-
- ndbrequire(c_stopMe.clientRef != 0);
- ndbrequire(c_stopMe.clientData == senderData);
-
- receiveLoopMacro(STOP_ME_REQ, nodeId);
- //---------------------------------------------------------
- // All STOP_ME_REQ have been received. We will send the
- // confirmation back to the requesting block.
- //---------------------------------------------------------
-
- stopMeConf->senderRef = reference();
- stopMeConf->senderData = c_stopMe.clientData;
- sendSignal(c_stopMe.clientRef, GSN_STOP_ME_CONF, signal,
- StopMeConf::SignalLength, JBB);
- c_stopMe.clientRef = 0;
-}//Dbdih::execSTOP_ME_CONF()
-
-void Dbdih::execWAIT_GCP_REQ(Signal* signal)
-{
- jamEntry();
- WaitGCPReq* const req = (WaitGCPReq*)&signal->theData[0];
- WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0];
- WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0];
- const Uint32 senderData = req->senderData;
- const BlockReference senderRef = req->senderRef;
- const Uint32 requestType = req->requestType;
-
- if(requestType == WaitGCPReq::CurrentGCI) {
- jam();
- conf->senderData = senderData;
- conf->gcp = cnewgcp;
- sendSignal(senderRef, GSN_WAIT_GCP_CONF, signal,
- WaitGCPConf::SignalLength, JBB);
- return;
- }//if
-
- if(isMaster()) {
- /**
- * Master
- */
- jam();
-
- if((requestType == WaitGCPReq::CompleteIfRunning) &&
- (cgcpStatus == GCP_READY)) {
- jam();
- conf->senderData = senderData;
- conf->gcp = coldgcp;
- sendSignal(senderRef, GSN_WAIT_GCP_CONF, signal,
- WaitGCPConf::SignalLength, JBB);
- return;
- }//if
-
- WaitGCPMasterPtr ptr;
- if(c_waitGCPMasterList.seize(ptr) == false){
- jam();
- ref->senderData = senderData;
- ref->errorCode = WaitGCPRef::NoWaitGCPRecords;
- sendSignal(senderRef, GSN_WAIT_GCP_REF, signal,
- WaitGCPRef::SignalLength, JBB);
- return;
- }//if
- ptr.p->clientRef = senderRef;
- ptr.p->clientData = senderData;
-
- if((requestType == WaitGCPReq::CompleteForceStart) &&
- (cgcpStatus == GCP_READY)) {
- jam();
- cstartGcpNow = true;
- }//if
- return;
- } else {
- /**
- * Proxy part
- */
- jam();
- WaitGCPProxyPtr ptr;
- if (c_waitGCPProxyList.seize(ptr) == false) {
- jam();
- ref->senderData = senderData;
- ref->errorCode = WaitGCPRef::NoWaitGCPRecords;
- sendSignal(senderRef, GSN_WAIT_GCP_REF, signal,
- WaitGCPRef::SignalLength, JBB);
- return;
- }//if
- ptr.p->clientRef = senderRef;
- ptr.p->clientData = senderData;
- ptr.p->masterRef = cmasterdihref;
-
- req->senderData = ptr.i;
- req->senderRef = reference();
- req->requestType = requestType;
-
- sendSignal(cmasterdihref, GSN_WAIT_GCP_REQ, signal,
- WaitGCPReq::SignalLength, JBB);
- return;
- }//if
-}//Dbdih::execWAIT_GCP_REQ()
-
-void Dbdih::execWAIT_GCP_REF(Signal* signal)
-{
- jamEntry();
- ndbrequire(!isMaster());
- WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0];
-
- const Uint32 proxyPtr = ref->senderData;
- const Uint32 errorCode = ref->errorCode;
-
- WaitGCPProxyPtr ptr;
- ptr.i = proxyPtr;
- c_waitGCPProxyList.getPtr(ptr);
-
- ref->senderData = ptr.p->clientData;
- ref->errorCode = errorCode;
- sendSignal(ptr.p->clientRef, GSN_WAIT_GCP_REF, signal,
- WaitGCPRef::SignalLength, JBB);
-
- c_waitGCPProxyList.release(ptr);
-}//Dbdih::execWAIT_GCP_REF()
-
-void Dbdih::execWAIT_GCP_CONF(Signal* signal)
-{
- jamEntry();
- ndbrequire(!isMaster());
- WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0];
- const Uint32 proxyPtr = conf->senderData;
- const Uint32 gcp = conf->gcp;
- WaitGCPProxyPtr ptr;
-
- ptr.i = proxyPtr;
- c_waitGCPProxyList.getPtr(ptr);
-
- conf->senderData = ptr.p->clientData;
- conf->gcp = gcp;
- sendSignal(ptr.p->clientRef, GSN_WAIT_GCP_CONF, signal,
- WaitGCPConf::SignalLength, JBB);
-
- c_waitGCPProxyList.release(ptr);
-}//Dbdih::execWAIT_GCP_CONF()
-
-void Dbdih::checkWaitGCPProxy(Signal* signal, NodeId failedNodeId)
-{
- jam();
- WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0];
- ref->errorCode = WaitGCPRef::NF_CausedAbortOfProcedure;
-
- WaitGCPProxyPtr ptr;
- c_waitGCPProxyList.first(ptr);
- while(ptr.i != RNIL) {
- jam();
- const Uint32 i = ptr.i;
- const Uint32 clientData = ptr.p->clientData;
- const BlockReference clientRef = ptr.p->clientRef;
- const BlockReference masterRef = ptr.p->masterRef;
-
- c_waitGCPProxyList.next(ptr);
- if(refToNode(masterRef) == failedNodeId) {
- jam();
- c_waitGCPProxyList.release(i);
- ref->senderData = clientData;
- sendSignal(clientRef, GSN_WAIT_GCP_REF, signal,
- WaitGCPRef::SignalLength, JBB);
- }//if
- }//while
-}//Dbdih::checkWaitGCPProxy()
-
-void Dbdih::checkWaitGCPMaster(Signal* signal, NodeId failedNodeId)
-{
- jam();
- WaitGCPMasterPtr ptr;
- c_waitGCPMasterList.first(ptr);
-
- while (ptr.i != RNIL) {
- jam();
- const Uint32 i = ptr.i;
- const NodeId nodeId = refToNode(ptr.p->clientRef);
-
- c_waitGCPMasterList.next(ptr);
- if (nodeId == failedNodeId) {
- jam()
- c_waitGCPMasterList.release(i);
- }//if
- }//while
-}//Dbdih::checkWaitGCPMaster()
-
-void Dbdih::emptyWaitGCPMasterQueue(Signal* signal)
-{
- jam();
- WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0];
- conf->gcp = coldgcp;
-
- WaitGCPMasterPtr ptr;
- c_waitGCPMasterList.first(ptr);
- while(ptr.i != RNIL) {
- jam();
- const Uint32 i = ptr.i;
- const Uint32 clientData = ptr.p->clientData;
- const BlockReference clientRef = ptr.p->clientRef;
-
- c_waitGCPMasterList.next(ptr);
- conf->senderData = clientData;
- sendSignal(clientRef, GSN_WAIT_GCP_CONF, signal,
- WaitGCPConf::SignalLength, JBB);
-
- c_waitGCPMasterList.release(i);
- }//while
-}//Dbdih::emptyWaitGCPMasterQueue()
-
-void Dbdih::setNodeStatus(Uint32 nodeId, NodeRecord::NodeStatus newStatus)
-{
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- nodePtr.p->nodeStatus = newStatus;
-}//Dbdih::setNodeStatus()
-
-Dbdih::NodeRecord::NodeStatus Dbdih::getNodeStatus(Uint32 nodeId)
-{
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- return nodePtr.p->nodeStatus;
-}//Dbdih::getNodeStatus()
-
-Sysfile::ActiveStatus
-Dbdih::getNodeActiveStatus(Uint32 nodeId)
-{
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- return nodePtr.p->activeStatus;
-}//Dbdih::getNodeActiveStatus()
-
-
-void
-Dbdih::setNodeActiveStatus(Uint32 nodeId, Sysfile::ActiveStatus newStatus)
-{
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- nodePtr.p->activeStatus = newStatus;
-}//Dbdih::setNodeActiveStatus()
-
-void Dbdih::setAllowNodeStart(Uint32 nodeId, bool newState)
-{
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- nodePtr.p->allowNodeStart = newState;
-}//Dbdih::setAllowNodeStart()
-
-void Dbdih::setNodeCopyCompleted(Uint32 nodeId, bool newState)
-{
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- nodePtr.p->copyCompleted = newState;
-}//Dbdih::setNodeCopyCompleted()
-
-bool Dbdih::getAllowNodeStart(Uint32 nodeId)
-{
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- return nodePtr.p->allowNodeStart;
-}//Dbdih::getAllowNodeStart()
-
-bool Dbdih::getNodeCopyCompleted(Uint32 nodeId)
-{
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- return nodePtr.p->copyCompleted;
-}//Dbdih::getNodeCopyCompleted()
-
-bool Dbdih::checkNodeAlive(Uint32 nodeId)
-{
- NodeRecordPtr nodePtr;
- nodePtr.i = nodeId;
- ndbrequire(nodeId > 0);
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- if (nodePtr.p->nodeStatus != NodeRecord::ALIVE) {
- return false;
- } else {
- return true;
- }//if
-}//Dbdih::checkNodeAlive()
-
-bool Dbdih::isMaster()
-{
- return (reference() == cmasterdihref);
-}//Dbdih::isMaster()
-
-bool Dbdih::isActiveMaster()
-{
- return ((reference() == cmasterdihref) && (cmasterState == MASTER_ACTIVE));
-}//Dbdih::isActiveMaster()
-
-Dbdih::NodeRecord::NodeRecord(){
- m_nodefailSteps.clear();
- gcpstate = NodeRecord::READY;
-
- activeStatus = Sysfile::NS_NotDefined;
- recNODE_FAILREP = ZFALSE;
- nodeGroup = ZNIL;
- dbtcFailCompleted = ZTRUE;
- dbdictFailCompleted = ZTRUE;
- dbdihFailCompleted = ZTRUE;
- dblqhFailCompleted = ZTRUE;
- noOfStartedChkpt = 0;
- noOfQueuedChkpt = 0;
- lcpStateAtTakeOver = (MasterLCPConf::State)255;
-
- activeTabptr = RNIL;
- nodeStatus = NodeRecord::NOT_IN_CLUSTER;
- useInTransactions = false;
- copyCompleted = false;
- allowNodeStart = true;
-}
diff --git a/ndb/src/kernel/blocks/dbdih/Makefile.am b/ndb/src/kernel/blocks/dbdih/Makefile.am
deleted file mode 100644
index d6ad380b806..00000000000
--- a/ndb/src/kernel/blocks/dbdih/Makefile.am
+++ /dev/null
@@ -1,23 +0,0 @@
-noinst_LIBRARIES = libdbdih.a
-
-libdbdih_a_SOURCES = DbdihInit.cpp DbdihMain.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libdbdih.dsp
-
-libdbdih.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libdbdih_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
deleted file mode 100644
index fa7e8667e27..00000000000
--- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ /dev/null
@@ -1,2956 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef DBLQH_H
-#define DBLQH_H
-
-#include <pc.hpp>
-#include <ndb_limits.h>
-#include <SimulatedBlock.hpp>
-#include <DLList.hpp>
-#include <DLFifoList.hpp>
-#include <DLHashTable.hpp>
-
-#include <NodeBitmask.hpp>
-#include <signaldata/LCP.hpp>
-#include <signaldata/LqhTransConf.hpp>
-#include <signaldata/LqhFrag.hpp>
-
-// primary key is stored in TUP
-#include <../dbtup/Dbtup.hpp>
-
-#ifdef DBLQH_C
-// Constants
-/* ------------------------------------------------------------------------- */
-/* CONSTANTS USED WHEN MASTER REQUESTS STATE OF COPY FRAGMENTS. */
-/* ------------------------------------------------------------------------- */
-#define ZCOPY_CLOSING 0
-#define ZCOPY_ONGOING 1
-#define ZCOPY_ACTIVATION 2
-/* ------------------------------------------------------------------------- */
-/* STATES FOR THE VARIABLE GCP_LOG_PART_STATE */
-/* ------------------------------------------------------------------------- */
-#define ZIDLE 0
-#define ZWAIT_DISK 1
-#define ZON_DISK 2
-#define ZACTIVE 1
-/* ------------------------------------------------------------------------- */
-/* STATES FOR THE VARIABLE CSR_PHASES_STARTED */
-/* ------------------------------------------------------------------------- */
-#define ZSR_NO_PHASE_STARTED 0
-#define ZSR_PHASE1_COMPLETED 1
-#define ZSR_PHASE2_COMPLETED 2
-#define ZSR_BOTH_PHASES_STARTED 3
-/* ------------------------------------------------------------------------- */
-/* THE NUMBER OF PAGES IN A MBYTE, THE TWO LOGARITHM OF THIS. */
-/* THE NUMBER OF MBYTES IN A LOG FILE. */
-/* THE MAX NUMBER OF PAGES READ/WRITTEN FROM/TO DISK DURING */
-/* A WRITE OR READ. */
-/* ------------------------------------------------------------------------- */
-#define ZNOT_DIRTY 0
-#define ZDIRTY 1
-#define ZREAD_AHEAD_SIZE 8
-/* ------------------------------------------------------------------------- */
-/* CONSTANTS OF THE LOG PAGES */
-/* ------------------------------------------------------------------------- */
-#define ZPAGE_HEADER_SIZE 32
-#define ZNO_MBYTES_IN_FILE 16
-#define ZPAGE_SIZE 8192
-#define ZPAGES_IN_MBYTE 32
-#define ZTWOLOG_NO_PAGES_IN_MBYTE 5
-#define ZTWOLOG_PAGE_SIZE 13
-#define ZMAX_MM_BUFFER_SIZE 32 // Main memory window during log execution
-
-#define ZMAX_PAGES_WRITTEN 8 // Max pages before writing to disk (=> config)
-#define ZMIN_READ_BUFFER_SIZE 2 // Minimum number of pages to execute log
-#define ZMIN_LOG_PAGES_OPERATION 10 // Minimum no of pages before stopping
-
-#define ZPOS_CHECKSUM 0
-#define ZPOS_LOG_LAP 1
-#define ZPOS_MAX_GCI_COMPLETED 2
-#define ZPOS_MAX_GCI_STARTED 3
-#define ZNEXT_PAGE 4
-#define ZPREV_PAGE 5
-#define ZPOS_VERSION 6
-#define ZPOS_NO_LOG_FILES 7
-#define ZCURR_PAGE_INDEX 8
-#define ZLAST_LOG_PREP_REF 10
-#define ZPOS_DIRTY 11
-/* ------------------------------------------------------------------------- */
-/* CONSTANTS FOR THE VARIOUS REPLICA AND NODE TYPES. */
-/* ------------------------------------------------------------------------- */
-#define ZPRIMARY_NODE 0
-#define ZBACKUP_NODE 1
-#define ZSTANDBY_NODE 2
-#define ZTC_NODE 3
-#define ZLOG_NODE 3
-/* ------------------------------------------------------------------------- */
-/* VARIOUS CONSTANTS USED AS FLAGS TO THE FILE MANAGER. */
-/* ------------------------------------------------------------------------- */
-#define ZOPEN_READ 0
-#define ZOPEN_WRITE 1
-#define ZOPEN_READ_WRITE 2
-#define ZVAR_NO_LOG_PAGE_WORD 1
-#define ZLIST_OF_PAIRS 0
-#define ZLIST_OF_PAIRS_SYNCH 16
-#define ZARRAY_OF_PAGES 1
-#define ZLIST_OF_MEM_PAGES 2
-#define ZLIST_OF_MEM_PAGES_SYNCH 18
-#define ZCLOSE_NO_DELETE 0
-#define ZCLOSE_DELETE 1
-#define ZPAGE_ZERO 0
-/* ------------------------------------------------------------------------- */
-/* THE FOLLOWING CONSTANTS ARE USED TO DESCRIBE THE TYPES OF */
-/* LOG RECORDS, THE SIZE OF THE VARIOUS LOG RECORD TYPES AND */
-/* THE POSITIONS WITHIN THOSE LOG RECORDS. */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* THESE CONSTANTS DESCRIBE THE SIZES OF VARIOUS TYPES OF LOG REORDS. */
-/* NEXT_LOG_SIZE IS ACTUALLY ONE. THE REASON WE SET IT TO 2 IS TO */
-/* SIMPLIFY THE CODE SINCE OTHERWISE HAVE TO USE A SPECIAL VERSION */
-/* OF READ_LOGWORD WHEN READING LOG RECORD TYPE */
-/* SINCE NEXT MBYTE TYPE COULD BE THE VERY LAST WORD IN THE MBYTE. */
-/* BY SETTING IT TO 2 WE ENSURE IT IS NEVER THE VERY LAST WORD */
-/* IN THE MBYTE. */
-/* ------------------------------------------------------------------------- */
-#define ZFD_HEADER_SIZE 3
-#define ZFD_PART_SIZE 48
-#define ZLOG_HEAD_SIZE 6
-#define ZNEXT_LOG_SIZE 2
-#define ZABORT_LOG_SIZE 3
-#define ZCOMMIT_LOG_SIZE 9
-#define ZCOMPLETED_GCI_LOG_SIZE 2
-/* ------------------------------------------------------------------------- */
-/* THESE CONSTANTS DESCRIBE THE TYPE OF A LOG RECORD. */
-/* THIS IS THE FIRST WORD OF A LOG RECORD. */
-/* ------------------------------------------------------------------------- */
-#define ZNEW_PREP_OP_TYPE 0
-#define ZPREP_OP_TYPE 1
-#define ZCOMMIT_TYPE 2
-#define ZABORT_TYPE 3
-#define ZFD_TYPE 4
-#define ZFRAG_SPLIT_TYPE 5
-#define ZNEXT_LOG_RECORD_TYPE 6
-#define ZNEXT_MBYTE_TYPE 7
-#define ZCOMPLETED_GCI_TYPE 8
-#define ZINVALID_COMMIT_TYPE 9
-/* ------------------------------------------------------------------------- */
-/* THE POSITIONS OF LOGGED DATA IN A FILE DESCRIPTOR LOG RECORD HEADER.*/
-/* ALSO THE MAXIMUM NUMBER OF FILE DESCRIPTORS IN A LOG RECORD. */
-/* ------------------------------------------------------------------------- */
-#define ZPOS_LOG_TYPE 0
-#define ZPOS_NO_FD 1
-#define ZPOS_FILE_NO 2
-#define ZMAX_LOG_FILES_IN_PAGE_ZERO 40
-/* ------------------------------------------------------------------------- */
-/* THE POSITIONS WITHIN A PREPARE LOG RECORD AND A NEW PREPARE */
-/* LOG RECORD. */
-/* ------------------------------------------------------------------------- */
-#define ZPOS_HASH_VALUE 2
-#define ZPOS_SCHEMA_VERSION 3
-#define ZPOS_TRANS_TICKET 4
-#define ZPOS_OP_TYPE 5
-#define ZPOS_NO_ATTRINFO 6
-#define ZPOS_NO_KEYINFO 7
-/* ------------------------------------------------------------------------- */
-/* THE POSITIONS WITHIN A COMMIT LOG RECORD. */
-/* ------------------------------------------------------------------------- */
-#define ZPOS_COMMIT_TRANSID1 1
-#define ZPOS_COMMIT_TRANSID2 2
-#define ZPOS_COMMIT_GCI 3
-#define ZPOS_COMMIT_TABLE_REF 4
-#define ZPOS_COMMIT_FRAGID 5
-#define ZPOS_COMMIT_FILE_NO 6
-#define ZPOS_COMMIT_START_PAGE_NO 7
-#define ZPOS_COMMIT_START_PAGE_INDEX 8
-#define ZPOS_COMMIT_STOP_PAGE_NO 9
-/* ------------------------------------------------------------------------- */
-/* THE POSITIONS WITHIN A ABORT LOG RECORD. */
-/* ------------------------------------------------------------------------- */
-#define ZPOS_ABORT_TRANSID1 1
-#define ZPOS_ABORT_TRANSID2 2
-/* ------------------------------------------------------------------------- */
-/* THE POSITION WITHIN A COMPLETED GCI LOG RECORD. */
-/* ------------------------------------------------------------------------- */
-#define ZPOS_COMPLETED_GCI 1
-/* ------------------------------------------------------------------------- */
-/* THE POSITIONS WITHIN A NEW PREPARE LOG RECORD. */
-/* ------------------------------------------------------------------------- */
-#define ZPOS_NEW_PREP_FILE_NO 8
-#define ZPOS_NEW_PREP_PAGE_REF 9
-
-#define ZLAST_WRITE_IN_FILE 1
-#define ZENFORCE_WRITE 2
-/* ------------------------------------------------------------------------- */
-/* CONSTANTS USED AS INPUT TO SUBROUTINE WRITE_LOG_PAGES AMONG OTHERS. */
-/* ------------------------------------------------------------------------- */
-#define ZNORMAL 0
-#define ZINIT 1
-/* ------------------------------------------------------------------------- */
-/* CONSTANTS USED BY CONTINUEB TO DEDUCE WHICH CONTINUE SIGNAL IS TO */
-/* BE EXECUTED AS A RESULT OF THIS CONTINUEB SIGNAL. */
-/* ------------------------------------------------------------------------- */
-#define ZLOG_LQHKEYREQ 0
-#define ZPACK_LQHKEYREQ 1
-#define ZSEND_ATTRINFO 2
-#define ZSR_GCI_LIMITS 3
-#define ZSR_LOG_LIMITS 4
-#define ZSEND_EXEC_CONF 5
-#define ZEXEC_SR 6
-#define ZSR_FOURTH_COMP 7
-#define ZINIT_FOURTH 8
-#define ZTIME_SUPERVISION 9
-#define ZSR_PHASE3_START 10
-#define ZLQH_TRANS_NEXT 11
-#define ZLQH_RELEASE_AT_NODE_FAILURE 12
-#define ZSCAN_TC_CONNECT 13
-#define ZINITIALISE_RECORDS 14
-#define ZINIT_GCP_REC 15
-#define ZRESTART_OPERATIONS_AFTER_STOP 16
-#define ZCHECK_LCP_STOP_BLOCKED 17
-#define ZSCAN_MARKERS 18
-#define ZOPERATION_EVENT_REP 19
-#define ZPREP_DROP_TABLE 20
-
-/* ------------------------------------------------------------------------- */
-/* NODE STATE DURING SYSTEM RESTART, VARIABLES CNODES_SR_STATE */
-/* AND CNODES_EXEC_SR_STATE. */
-/* ------------------------------------------------------------------------- */
-#define ZSTART_SR 1
-#define ZEXEC_SR_COMPLETED 2
-/* ------------------------------------------------------------------------- */
-/* CONSTANTS USED BY NODE STATUS TO DEDUCE THE STATUS OF A NODE. */
-/* ------------------------------------------------------------------------- */
-#define ZNODE_UP 0
-#define ZNODE_DOWN 1
-/* ------------------------------------------------------------------------- */
-/* START PHASES */
-/* ------------------------------------------------------------------------- */
-#define ZLAST_START_PHASE 255
-#define ZSTART_PHASE1 1
-#define ZSTART_PHASE2 2
-#define ZSTART_PHASE3 3
-#define ZSTART_PHASE4 4
-#define ZSTART_PHASE6 6
-/* ------------------------------------------------------------------------- */
-/* CONSTANTS USED BY SCAN AND COPY FRAGMENT PROCEDURES */
-/* ------------------------------------------------------------------------- */
-#define ZSTORED_PROC_SCAN 0
-#define ZSTORED_PROC_COPY 2
-#define ZDELETE_STORED_PROC_ID 3
-//#define ZSCAN_NEXT 1
-//#define ZSCAN_NEXT_COMMIT 2
-//#define ZSCAN_NEXT_ABORT 12
-#define ZCOPY_COMMIT 3
-#define ZCOPY_REPEAT 4
-#define ZCOPY_ABORT 5
-#define ZCOPY_CLOSE 6
-//#define ZSCAN_CLOSE 6
-//#define ZEMPTY_FRAGMENT 0
-#define ZWRITE_LOCK 1
-#define ZSCAN_FRAG_CLOSED 2
-/* ------------------------------------------------------------------------- */
-/* ERROR CODES ADDED IN VERSION 0.1 AND 0.2 */
-/* ------------------------------------------------------------------------- */
-#define ZNOT_FOUND 1 // Not an error code, a return value
-#define ZNO_FREE_LQH_CONNECTION 414
-#define ZGET_DATAREC_ERROR 418
-#define ZGET_ATTRINBUF_ERROR 419
-#define ZNO_FREE_FRAGMENTREC 460 // Insert new fragment error code
-#define ZTAB_FILE_SIZE 464 // Insert new fragment error code + Start kernel
-#define ZNO_ADD_FRAGREC 465 // Insert new fragment error code
-/* ------------------------------------------------------------------------- */
-/* ERROR CODES ADDED IN VERSION 0.3 */
-/* ------------------------------------------------------------------------- */
-#define ZTAIL_PROBLEM_IN_LOG_ERROR 410
-#define ZGCI_TOO_LOW_ERROR 429 // GCP_SAVEREF error code
-#define ZTAB_STATE_ERROR 474 // Insert new fragment error code
-#define ZTOO_NEW_GCI_ERROR 479 // LCP Start error
-/* ------------------------------------------------------------------------- */
-/* ERROR CODES ADDED IN VERSION 0.4 */
-/* ------------------------------------------------------------------------- */
-
-#define ZNO_FREE_FRAG_SCAN_REC_ERROR 490 // SCAN_FRAGREF error code
-#define ZCOPY_NO_FRAGMENT_ERROR 491 // COPY_FRAGREF error code
-#define ZTAKE_OVER_ERROR 499
-#define ZCOPY_NODE_ERROR 1204
-#define ZTOO_MANY_COPY_ACTIVE_ERROR 1208 // COPY_FRAG and COPY_ACTIVEREF code
-#define ZCOPY_ACTIVE_ERROR 1210 // COPY_ACTIVEREF error code
-#define ZNO_TC_CONNECT_ERROR 1217 // Simple Read + SCAN
-/* ------------------------------------------------------------------------- */
-/* ERROR CODES ADDED IN VERSION 1.X */
-/* ------------------------------------------------------------------------- */
-//#define ZSCAN_BOOK_ACC_OP_ERROR 1219 // SCAN_FRAGREF error code
-#define ZFILE_CHANGE_PROBLEM_IN_LOG_ERROR 1220
-#define ZTEMPORARY_REDO_LOG_FAILURE 1221
-#define ZNO_FREE_MARKER_RECORDS_ERROR 1222
-#define ZNODE_SHUTDOWN_IN_PROGESS 1223
-#define ZTOO_MANY_FRAGMENTS 1224
-#define ZTABLE_NOT_DEFINED 1225
-#define ZDROP_TABLE_IN_PROGRESS 1226
-#define ZINVALID_SCHEMA_VERSION 1227
-
-/* ------------------------------------------------------------------------- */
-/* ERROR CODES ADDED IN VERSION 2.X */
-/* ------------------------------------------------------------------------- */
-#define ZNODE_FAILURE_ERROR 400
-/* ------------------------------------------------------------------------- */
-/* ERROR CODES FROM ACC */
-/* ------------------------------------------------------------------------- */
-#define ZNO_TUPLE_FOUND 626
-#define ZTUPLE_ALREADY_EXIST 630
-/* ------------------------------------------------------------------------- */
-/* ERROR CODES FROM TUP */
-/* ------------------------------------------------------------------------- */
-#define ZSEARCH_CONDITION_FALSE 899
-#define ZUSER_ERROR_CODE_LIMIT 6000
-#endif
-
-/**
- * @class dblqh
- *
- * @section secIntro Introduction
- *
- * Dblqh is the coordinator of the LDM. Dblqh is responsible for
- * performing operations on tuples. It does this job with help of
- * Dbacc block (that manages the index structures) and Dbtup
- * (that manages the tuples).
- *
- * Dblqh also keeps track of the participants and acts as a coordinator of
- * 2-phase commits. Logical redo logging is also handled by the Dblqh
- * block.
- *
- * @section secModules Modules
- *
- * The code is partitioned into the following modules:
- * - START / RESTART
- * - Start phase 1: Load our block reference and our processor id
- * - Start phase 2: Initiate all records within the block
- * Connect LQH with ACC and TUP.
- * - Start phase 4: Connect LQH with LQH. Connect every LQH with
- * every LQH in the database system.
- * If initial start, then create the fragment log files.
- * If system restart or node restart,
- * then open the fragment log files and
- * find the end of the log files.
- * - ADD / DELETE FRAGMENT<br>
- * Used by dictionary to create new fragments and delete old fragments.
- * - EXECUTION<br>
- * handles the reception of lqhkeyreq and all processing
- * of operations on behalf of this request.
- * This does also involve reception of various types of attrinfo
- * and keyinfo.
- * It also involves communication with ACC and TUP.
- * - LOG<br>
- * The log module handles the reading and writing of the log.
- * It is also responsible for handling system restart.
- * It controls the system restart in TUP and ACC as well.
- * - TRANSACTION<br>
- * This module handles the commit and the complete phases.
- * - MODULE TO HANDLE TC FAILURE<br>
- * - SCAN<br>
- * This module contains the code that handles a scan of a particular
- * fragment.
- * It operates under the control of TC and orders ACC to
- * perform a scan of all tuples in the fragment.
- * TUP performs the necessary search conditions
- * to ensure that only valid tuples are returned to the application.
- * - NODE RECOVERY<br>
- * Used when a node has failed.
- * It performs a copy of a fragment to a new replica of the fragment.
- * It does also shut down all connections to the failed node.
- * - LOCAL CHECKPOINT<br>
- * Handles execution and control of LCPs
- * It controls the LCPs in TUP and ACC.
- * It also interacts with DIH to control which GCPs are recoverable.
- * - GLOBAL CHECKPOINT<br>
- * Helps DIH in discovering when GCPs are recoverable.
- * It handles the request gcp_savereq that requests LQH to
- * save a particular GCP to disk and respond when completed.
- * - FILE HANDLING<br>
- * With submodules:
- * - SIGNAL RECEPTION
- * - NORMAL OPERATION
- * - FILE CHANGE
- * - INITIAL START
- * - SYSTEM RESTART PHASE ONE
- * - SYSTEM RESTART PHASE TWO,
- * - SYSTEM RESTART PHASE THREE
- * - SYSTEM RESTART PHASE FOUR
- * - ERROR
- * - TEST
- * - LOG
- */
-class Dblqh: public SimulatedBlock {
-public:
- enum LcpCloseState {
- LCP_IDLE = 0,
- LCP_RUNNING = 1, // LCP is running
- LCP_CLOSE_STARTED = 2, // Completion(closing of files) has started
- ACC_LCP_CLOSE_COMPLETED = 3,
- TUP_LCP_CLOSE_COMPLETED = 4
- };
-
- enum ExecUndoLogState {
- EULS_IDLE = 0,
- EULS_STARTED = 1,
- EULS_COMPLETED = 2,
- EULS_ACC_COMPLETED = 3,
- EULS_TUP_COMPLETED = 4
- };
-
- struct AddFragRecord {
- enum AddFragStatus {
- FREE = 0,
- ACC_ADDFRAG = 1,
- WAIT_TWO_TUP = 2,
- WAIT_ONE_TUP = 3,
- WAIT_TWO_TUX = 4,
- WAIT_ONE_TUX = 5,
- WAIT_ADD_ATTR = 6,
- TUP_ATTR_WAIT1 = 7,
- TUP_ATTR_WAIT2 = 8,
- TUX_ATTR_WAIT1 = 9,
- TUX_ATTR_WAIT2 = 10
- };
- LqhAddAttrReq::Entry attributes[LqhAddAttrReq::MAX_ATTRIBUTES];
- UintR accConnectptr;
- AddFragStatus addfragStatus;
- UintR dictConnectptr;
- UintR fragmentPtr;
- UintR nextAddfragrec;
- UintR noOfAllocPages;
- UintR schemaVer;
- UintR tup1Connectptr;
- UintR tup2Connectptr;
- UintR tux1Connectptr;
- UintR tux2Connectptr;
- UintR checksumIndicator;
- UintR GCPIndicator;
- BlockReference dictBlockref;
- Uint32 m_senderAttrPtr;
- Uint16 addfragErrorCode;
- Uint16 attrSentToTup;
- Uint16 attrReceived;
- Uint16 addFragid;
- Uint16 fragid1;
- Uint16 fragid2;
- Uint16 noOfAttr;
- Uint16 noOfNull;
- Uint16 tabId;
- Uint16 totalAttrReceived;
- Uint16 fragCopyCreation;
- Uint16 noOfKeyAttr;
- Uint32 noOfNewAttr; // noOfCharsets in upper half
- Uint16 noOfAttributeGroups;
- Uint16 lh3DistrBits;
- Uint16 tableType;
- Uint16 primaryTableId;
- };// Size 108 bytes
- typedef Ptr<AddFragRecord> AddFragRecordPtr;
-
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* $$$$$$$ ATTRIBUTE INFORMATION RECORD $$$$$$$ */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /**
- * Can contain one (1) attrinfo signal.
- * One signal contains 24 attr. info words.
- * But 32 elements are used to make plex happy.
- * Some of the elements are used to the following things:
- * - Data length in this record is stored in the
- * element indexed by ZINBUF_DATA_LEN.
- * - Next attrinbuf is pointed out by the element
- * indexed by ZINBUF_NEXT.
- */
- struct Attrbuf {
- UintR attrbuf[32];
- }; // Size 128 bytes
- typedef Ptr<Attrbuf> AttrbufPtr;
-
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* $$$$$$$ DATA BUFFER $$$$$$$ */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /**
- * This buffer is used as a general data storage.
- */
- struct Databuf {
- UintR data[4];
- UintR nextDatabuf;
- }; // size 20 bytes
- typedef Ptr<Databuf> DatabufPtr;
-
- struct ScanRecord {
- enum ScanState {
- SCAN_FREE = 0,
- WAIT_STORED_PROC_COPY = 1,
- WAIT_STORED_PROC_SCAN = 2,
- WAIT_NEXT_SCAN_COPY = 3,
- WAIT_NEXT_SCAN = 4,
- WAIT_DELETE_STORED_PROC_ID_SCAN = 5,
- WAIT_DELETE_STORED_PROC_ID_COPY = 6,
- WAIT_ACC_COPY = 7,
- WAIT_ACC_SCAN = 8,
- WAIT_SCAN_NEXTREQ = 10,
- WAIT_CLOSE_SCAN = 12,
- WAIT_CLOSE_COPY = 13,
- WAIT_RELEASE_LOCK = 14,
- WAIT_TUPKEY_COPY = 15,
- WAIT_LQHKEY_COPY = 16,
- IN_QUEUE = 17
- };
- enum ScanType {
- ST_IDLE = 0,
- SCAN = 1,
- COPY = 2
- };
-
- UintR scan_acc_op_ptr[32];
- Uint32 scan_acc_index;
- Uint32 scan_acc_attr_recs;
- UintR scanApiOpPtr;
- UintR scanLocalref[2];
-
- Uint32 m_max_batch_size_rows;
- Uint32 m_max_batch_size_bytes;
-
- Uint32 m_curr_batch_size_rows;
- Uint32 m_curr_batch_size_bytes;
-
- bool check_scan_batch_completed() const;
-
- UintR copyPtr;
- union {
- Uint32 nextPool;
- Uint32 nextList;
- };
- Uint32 prevList;
- Uint32 nextHash;
- Uint32 prevHash;
- bool equal(const ScanRecord & key) const {
- return scanNumber == key.scanNumber && fragPtrI == key.fragPtrI;
- }
- Uint32 hashValue() const {
- return fragPtrI ^ scanNumber;
- }
-
- UintR scanAccPtr;
- UintR scanAiLength;
- UintR scanErrorCounter;
- UintR scanLocalFragid;
- UintR scanSchemaVersion;
-
- /**
- * This is _always_ main table, even in range scan
- * in which case scanTcrec->fragmentptr is different
- */
- Uint32 fragPtrI;
- UintR scanStoredProcId;
- ScanState scanState;
- UintR scanTcrec;
- ScanType scanType;
- BlockReference scanApiBlockref;
- NodeId scanNodeId;
- Uint16 scanReleaseCounter;
- Uint16 scanNumber;
-
- // scan source block ACC TUX TUP
- BlockReference scanBlockref;
-
- Uint8 scanCompletedStatus;
- Uint8 scanFlag;
- Uint8 scanLockHold;
- Uint8 scanLockMode;
- Uint8 readCommitted;
- Uint8 rangeScan;
- Uint8 descending;
- Uint8 tupScan;
- Uint8 scanTcWaiting;
- Uint8 scanKeyinfoFlag;
- Uint8 m_last_row;
- }; // Size 272 bytes
- typedef Ptr<ScanRecord> ScanRecordPtr;
-
- struct Fragrecord {
- enum ExecSrStatus {
- IDLE = 0,
- ACTIVE_REMOVE_AFTER = 1,
- ACTIVE = 2
- };
- /**
- * Possible state transitions are:
- * - FREE -> DEFINED Fragment record is allocated
- * - DEFINED -> ACTIVE Add fragment is completed and
- * fragment is ready to
- * receive operations.
- * - DEFINED -> ACTIVE_CREATION Add fragment is completed and
- * fragment is ready to
- * receive operations in parallel
- * with a copy fragment
- * which is performed from the
- * primary replica
- * - DEFINED -> CRASH_RECOVERING A fragment is ready to be
- * recovered from a local
- * checkpoint on disk
- * - ACTIVE -> BLOCKED A local checkpoint is to be
- * started. No more operations
- * are allowed to be started until
- * the local checkpoint
- * has been started.
- * - ACTIVE -> REMOVING A fragment is removed from the node
- * - BLOCKED -> ACTIVE Operations are allowed again in
- * the fragment.
- * - CRASH_RECOVERING -> ACTIVE A fragment has been recovered and
- * are now ready for
- * operations again.
- * - CRASH_RECOVERING -> REMOVING Fragment recovery failed or
- * was cancelled.
- * - ACTIVE_CREATION -> ACTIVE A fragment is now copied and now
- * is a normal fragment
- * - ACTIVE_CREATION -> REMOVING Copying of the fragment failed
- * - REMOVING -> FREE Removing of the fragment is
- * completed and the fragment
- * is now free again.
- */
- enum FragStatus {
- FREE = 0, ///< Fragment record is currently not in use
- FSACTIVE = 1, ///< Fragment is defined and usable for operations
- DEFINED = 2, ///< Fragment is defined but not yet usable by
- ///< operations
- BLOCKED = 3, ///< LQH is waiting for all active operations to
- ///< complete the current phase so that the
- ///< local checkpoint can be started.
- ACTIVE_CREATION = 4, ///< Fragment is defined and active but is under
- ///< creation by the primary LQH.
- CRASH_RECOVERING = 5, ///< Fragment is recovering after a crash by
- ///< executing the fragment log and so forth.
- ///< Will need further breakdown.
- REMOVING = 6 ///< The fragment is currently removed.
- ///< Operations are not allowed.
- };
- enum LogFlag {
- STATE_TRUE = 0,
- STATE_FALSE = 1
- };
- enum SrStatus {
- SS_IDLE = 0,
- SS_STARTED = 1,
- SS_COMPLETED = 2
- };
- enum LcpFlag {
- LCP_STATE_TRUE = 0,
- LCP_STATE_FALSE = 1
- };
- /**
- * Last GCI for executing the fragment log in this phase.
- */
- UintR execSrLastGci[4];
- /**
- * Start GCI for executing the fragment log in this phase.
- */
- UintR execSrStartGci[4];
- /**
- * Requesting user pointer for executing the fragment log in
- * this phase
- */
- UintR execSrUserptr[4];
- /**
- * The LCP identifier of the LCP's.
- * =0 means that the LCP number has not been stored.
- * The LCP identifier is supplied by DIH when starting the LCP.
- */
- UintR lcpId[MAX_LCP_STORED];
- UintR maxGciInLcp;
- /**
- * This variable contains the maximum global checkpoint
- * identifier that exists in a certain local checkpoint.
- * Maximum 4 local checkpoints is possible in this release.
- */
- UintR maxGciCompletedInLcp;
- UintR srLastGci[4];
- UintR srStartGci[4];
- /**
- * The fragment pointers in ACC
- */
- UintR accFragptr[2];
- /**
- * The EXEC_SR variables are used to keep track of which fragments
- * that are interested in being executed as part of executing the
- * fragment loop.
- * It is initialised for every phase of executing the
- * fragment log (the fragment log can be executed upto four times).
- *
- * Each execution is capable of executing the log records on four
- * fragment replicas.
- */
- /**
- * Requesting block reference for executing the fragment log
- * in this phase.
- */
- BlockReference execSrBlockref[4];
- /**
- * This variable contains references to active scan and copy
- * fragment operations on the fragment.
- * A maximum of four concurrently active is allowed.
- */
- typedef Bitmask<4> ScanNumberMask;
- ScanNumberMask m_scanNumberMask;
- DLList<ScanRecord>::Head m_activeScans;
- DLFifoList<ScanRecord>::Head m_queuedScans;
-
- Uint16 srLqhLognode[4];
- /**
- * The fragment pointers in TUP and TUX
- */
- UintR tupFragptr[2];
- UintR tuxFragptr[2];
- /**
- * This queue is where operations are put when blocked in ACC
- * during start of a local chkp.
- */
- UintR accBlockedList;
- /**
- * This is the queue where all operations that are active on the
- * fragment is put.
- * This is used to deduct when the fragment do
- * no longer contain any active operations.
- * This is needed when starting a local checkpoint.
- */
- UintR activeList;
- /**
- * This variable keeps track of how many operations that are
- * active that have skipped writing the log but not yet committed
- * or aborted. This is used during start of fragment.
- */
- UintR activeTcCounter;
- /**
- * This status specifies whether this fragment is actively
- * engaged in executing the fragment log.
- */
- ExecSrStatus execSrStatus;
- /**
- * The fragment id of this fragment.
- */
- UintR fragId;
- /**
- * Status of fragment
- */
- FragStatus fragStatus;
- /**
- * Indicates a local checkpoint is active and thus can generate
- * UNDO log records.
- */
- UintR fragActiveStatus;
- /**
- * Reference to current LCP record.
- * If no LCP is ongoing on the fragment then the value is RNIL.
- * If LCP_REF /= RNIL then a local checkpoint is ongoing in the
- * fragment.
- * LCP_STATE in LCP_RECORD specifies the state of the
- * local checkpoint.
- */
- UintR lcpRef;
- /**
- * This flag indicates whether logging is currently activated at
- * the fragment.
- * During a system restart it is temporarily shut off.
- * Some fragments have it permanently shut off.
- */
- LogFlag logFlag;
- UintR masterPtr;
- /**
- * This variable contains the maximum global checkpoint identifier
- * which was completed when the local checkpoint was started.
- */
- /**
- * Reference to the next fragment record in a free list of fragment
- * records.
- */
- UintR nextFrag;
- /**
- * The newest GCI that has been committed on fragment
- */
- UintR newestGci;
- SrStatus srStatus;
- UintR srUserptr;
- /**
- * The starting global checkpoint of this fragment.
- */
- UintR startGci;
- /**
- * A reference to the table owning this fragment.
- */
- UintR tabRef;
- /**
- * This is the queue to put operations that have been blocked
- * during start of a local chkp.
- */
- UintR firstWaitQueue;
- UintR lastWaitQueue;
- /**
- * The block reference to ACC on the fragment makes it
- * possible to have different ACC blocks for different
- * fragments in the future.
- */
- BlockReference accBlockref;
- /**
- * Ordered index block.
- */
- BlockReference tuxBlockref;
- /**
- * The master block reference as sent in COPY_ACTIVEREQ.
- */
- BlockReference masterBlockref;
- /**
- * These variables are used during system restart to recall
- * from which node to execute the fragment log and which GCI's
- * this node should start and stop from. Also to remember who
- * to send the response to when system restart is completed.
- */
- BlockReference srBlockref;
- /**
- * The block reference to TUP on the fragment makes it
- * possible to have different TUP blocks for different
- * fragments in the future.
- */
- BlockReference tupBlockref;
- /**
- * This state indicates if the fragment will participate in a
- * checkpoint.
- * Temporary tables with Fragrecord::logFlag permanently off
- * will also have Fragrecord::lcpFlag off.
- */
- LcpFlag lcpFlag;
- /**
- * Used to ensure that updates started with old
- * configuration do not arrive here after the copy fragment
- * has started.
- * If they are allowed to arrive after they
- * could update a record that has already been replicated to
- * the new node. This type of arrival should be extremely
- * rare but we must anyway ensure that no harm is done.
- */
- Uint16 copyNode;
- /**
- * This variable ensures that only one copy fragment is
- * active at a time on the fragment.
- */
- Uint8 copyFragState;
- /**
- * The number of fragment replicas that will execute the log
- * records in this round of executing the fragment
- * log. Maximum four is possible.
- */
- Uint8 execSrNoReplicas;
- /**
- * This variable contains what type of replica this fragment
- * is. Two types are possible:
- * - Primary/Backup replica = 0
- * - Stand-by replica = 1
- *
- * It is not possible to distinguish between primary and
- * backup on a fragment.
- * This can only be done per transaction.
- * DIH can change from primary to backup without informing
- * the various replicas about this change.
- */
- Uint8 fragCopy;
- /**
- * This is the last fragment distribution key that we have
- * heard of.
- */
- Uint8 fragDistributionKey;
- /**
- * The identity of the next local checkpoint this fragment
- * should perform.
- */
- Uint8 nextLcp;
- /**
- * How many local checkpoints does the fragment contain
- */
- Uint8 srChkpnr;
- Uint8 srNoLognodes;
- /**
- * Table type.
- */
- Uint8 tableType;
- /**
- * For ordered index fragment, i-value of corresponding
- * fragment in primary table.
- */
- UintR tableFragptr;
- };
- typedef Ptr<Fragrecord> FragrecordPtr;
-
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* $$$$$$$ GLOBAL CHECKPOINT RECORD $$$$$$ */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /**
- * This record describes a global checkpoint that is
- * completed. It waits for all log records belonging to this
- * global checkpoint to be saved on disk.
- */
- struct GcpRecord {
- /**
- * The file number within each log part where the log was
- * located when gcp_savereq was received. The last record
- * belonging to this global checkpoint is certainly before
- * this place in the log. We could come even closer but it
- * would cost performance and doesn't seem like a good
- * idea. This is simple and it works.
- */
- Uint16 gcpFilePtr[4];
- /**
- * The page number within the file for each log part.
- */
- Uint16 gcpPageNo[4];
- /**
- * The word number within the last page that was written for
- * each log part.
- */
- Uint16 gcpWordNo[4];
- /**
- * The identity of this global checkpoint.
- */
- UintR gcpId;
- /**
- * The state of this global checkpoint, one for each log part.
- */
- Uint8 gcpLogPartState[4];
- /**
- * The sync state of this global checkpoint, one for each
- * log part.
- */
- Uint8 gcpSyncReady[4];
- /**
- * User pointer of the sender of gcp_savereq (= master DIH).
- */
- UintR gcpUserptr;
- /**
- * Block reference of the sender of gcp_savereq
- * (= master DIH).
- */
- BlockReference gcpBlockref;
- }; // Size 44 bytes
- typedef Ptr<GcpRecord> GcpRecordPtr;
-
- struct HostRecord {
- bool inPackedList;
- UintR noOfPackedWordsLqh;
- UintR packedWordsLqh[30];
- UintR noOfPackedWordsTc;
- UintR packedWordsTc[29];
- BlockReference hostLqhBlockRef;
- BlockReference hostTcBlockRef;
- };// Size 128 bytes
- typedef Ptr<HostRecord> HostRecordPtr;
-
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* $$$$$$$ LOCAL CHECKPOINT RECORD $$$$$$$ */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /**
- * This record contains the information about a local
- * checkpoint that is ongoing. This record is also used as a
- * system restart record.
- */
- struct LcpRecord {
- LcpRecord() { m_EMPTY_LCP_REQ.clear(); }
-
- enum LcpState {
- LCP_IDLE = 0,
- LCP_COMPLETED = 2,
- LCP_WAIT_FRAGID = 3,
- LCP_WAIT_TUP_PREPLCP = 4,
- LCP_WAIT_HOLDOPS = 5,
- LCP_WAIT_ACTIVE_FINISH = 6,
- LCP_START_CHKP = 7,
- LCP_BLOCKED_COMP = 8,
- LCP_SR_WAIT_FRAGID = 9,
- LCP_SR_STARTED = 10,
- LCP_SR_COMPLETED = 11
- };
- Uint32 firstLcpLocAcc;
- Uint32 firstLcpLocTup;
- Uint32 lcpAccptr;
-
- LcpState lcpState;
- bool lastFragmentFlag;
-
- struct FragOrd {
- Uint32 fragPtrI;
- LcpFragOrd lcpFragOrd;
- };
- FragOrd currentFragment;
-
- bool lcpQueued;
- FragOrd queuedFragment;
-
- bool reportEmpty;
- NdbNodeBitmask m_EMPTY_LCP_REQ;
- }; // Size 76 bytes
- typedef Ptr<LcpRecord> LcpRecordPtr;
-
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* $$$$$$ LOCAL CHECKPOINT SUPPORT RECORD $$$$$$$ */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /**
- * This record contains the information about an outstanding
- * request to TUP or ACC. Used for both local checkpoints and
- * system restart.
- */
- struct LcpLocRecord {
- enum LcpLocstate {
- IDLE = 0,
- WAIT_TUP_PREPLCP = 1,
- WAIT_LCPHOLDOP = 2,
- HOLDOP_READY = 3,
- ACC_WAIT_STARTED = 4,
- ACC_STARTED = 5,
- ACC_COMPLETED = 6,
- TUP_WAIT_STARTED = 7,
- TUP_STARTED = 8,
- TUP_COMPLETED = 9,
- SR_ACC_STARTED = 10,
- SR_TUP_STARTED = 11,
- SR_ACC_COMPLETED = 12,
- SR_TUP_COMPLETED = 13
- };
- enum WaitingBlock {
- ACC = 0,
- TUP = 1,
- NONE = 2
- };
-
- LcpLocstate lcpLocstate;
- UintR locFragid;
- UintR masterLcpRec;
- UintR nextLcpLoc;
- UintR tupRef;
- WaitingBlock waitingBlock;
- Uint32 accContCounter;
- }; // 28 bytes
- typedef Ptr<LcpLocRecord> LcpLocRecordPtr;
-
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* */
- /* THE RECORDS THAT START BY LOG_ ARE A PART OF THE LOG MANAGER. */
- /* THESE RECORDS ARE USED TO HANDLE THE FRAGMENT LOG. */
- /* */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* $$$$$$$ LOG RECORD $$$$$$$ */
- /* */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* THIS RECORD IS ALIGNED TO BE 256 BYTES. */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /**
- * This record describes the current state of a log.
- * A log consists of a number of log files.
- * These log files are described by the log file record.
- *
- * There will be 4 sets of log files.
- * Different tables will use different log files dependent
- * on the table id.
- * This ensures that more than one outstanding request can
- * be sent to the file system.
- * The log file to use is found by performing a very simple hash
- * function.
- */
- struct LogPartRecord {
- enum LogPartState {
- IDLE = 0, ///< Nothing happens at the moment
- ACTIVE = 1, ///< An operation is active logging
- SR_FIRST_PHASE = 2, ///< Finding the end of the log and
- ///< the information about global
- ///< checkpoints in the log is ongoing.
- SR_FIRST_PHASE_COMPLETED = 3, ///< First phase completed
- SR_THIRD_PHASE_STARTED = 4, ///< Executing fragment log is in 3rd ph
- SR_THIRD_PHASE_COMPLETED = 5,
- SR_FOURTH_PHASE_STARTED = 6, ///< Finding the log tail and head
- ///< is the fourth phase.
- SR_FOURTH_PHASE_COMPLETED = 7,
- FILE_CHANGE_PROBLEM = 8, ///< For some reason the write to
- ///< page zero in file zero have not
- ///< finished after 15 mbyte of
- ///< log data have been written
- TAIL_PROBLEM = 9 ///< Only 1 mbyte of log left.
- ///< No operations allowed to enter the
- ///< log. Only special log records
- ///< are allowed
- };
- enum WaitWriteGciLog {
- WWGL_TRUE = 0,
- WWGL_FALSE = 1
- };
- enum LogExecState {
- LES_IDLE = 0,
- LES_SEARCH_STOP = 1,
- LES_SEARCH_START = 2,
- LES_EXEC_LOG = 3,
- LES_EXEC_LOG_NEW_MBYTE = 4,
- LES_EXEC_LOG_NEW_FILE = 5,
- LES_EXEC_LOGREC_FROM_FILE = 6,
- LES_EXEC_LOG_COMPLETED = 7,
- LES_WAIT_READ_EXEC_SR_NEW_MBYTE = 8,
- LES_WAIT_READ_EXEC_SR = 9,
- LES_EXEC_LOG_INVALIDATE = 10
- };
-
- /**
- * Is a CONTINUEB(ZLOG_LQHKEYREQ) signal sent and
- * outstanding. We do not want several instances of this
- * signal out in the air since that would create multiple
- * writers of the list.
- */
- UintR LogLqhKeyReqSent;
- /**
- * Contains the current log file where log records are
- * written. During system restart it is used to indicate the
- * last log file.
- */
- UintR currentLogfile;
- /**
- * The log file used to execute log records from far behind.
- */
- UintR execSrExecLogFile;
- /**
- * The currently executing prepare record starts in this log
- * page. This variable is used to enable that a log record is
- * executed multiple times in execution of the log.
- */
- UintR execSrLogPage;
- /**
- * This variable keeps track of the lfo record where the
- * pages that were read from disk when an operations log
- * record were not found in the main memory buffer for log
- * pages.
- */
- UintR execSrLfoRec;
- /**
- * The starting page number when reading log from far behind.
- */
- UintR execSrStartPageNo;
- /**
- * The last page number when reading log from far behind.
- */
- UintR execSrStopPageNo;
- /**
- * Contains a reference to the first log file, file number 0.
- */
- UintR firstLogfile;
- /**
- * The head of the operations queued for logging.
- */
- UintR firstLogQueue;
- /**
- * This variable contains the oldest operation in this log
- * part which have not been committed yet.
- */
- UintR firstLogTcrec;
- /**
- * The first reference to a set of 8 pages. These are used
- * during execution of the log to keep track of which pages
- * are in memory and which are not.
- */
- UintR firstPageRef;
- /**
- * This variable contains the global checkpoint record
- * waiting for disk writes to complete.
- */
- UintR gcprec;
- /**
- * The last reference to a set of 8 pages. These are used
- * during execution of the log to keep track of which pages
- * are in memory and which are not.
- */
- UintR lastPageRef;
- /**
- * The tail of the operations queued for logging.
- */
- UintR lastLogQueue;
- /**
- * This variable contains the newest operation in this log
- * part which have not been committed yet.
- */
- UintR lastLogTcrec;
- /**
- * This variable indicates which was the last mbyte that was
- * written before the system crashed. Discovered during
- * system restart.
- */
- UintR lastLogfile;
- /**
- * This variable is used to keep track of the state during
- * the third phase of the system restart, i.e. when
- * LogPartRecord::logPartState ==
- * LogPartRecord::SR_THIRD_PHASE_STARTED.
- */
- LogExecState logExecState;
- /**
- * This variable contains the lap number of this log part.
- */
- UintR logLap;
- /**
- * This variable contains the place to stop executing the log
- * in this phase.
- */
- UintR logLastGci;
- /**
- * This variable contains the place to start executing the
- * log in this phase.
- */
- UintR logStartGci;
- /**
- * The latest GCI completed in this log part.
- */
- UintR logPartNewestCompletedGCI;
- /**
- * The current state of this log part.
- */
- LogPartState logPartState;
- /**
- * A timer that is set every time a log page is sent to disk.
- * Ensures that log pages are not kept in main memory for
- * more than a certain time.
- */
- UintR logPartTimer;
- /**
- * The current timer which is set by the periodic signal
- * received by LQH
- */
- UintR logTimer;
- /**
- * Contains the number of the log tail file and the mbyte
- * reference within that file. This information ensures that
- * the tail is not overwritten when writing new log records.
- */
- UintR logTailFileNo;
- /**
- * The TcConnectionrec used during execution of this log part.
- */
- UintR logTcConrec;
- /**
- * The number of pages that currently resides in the main
- * memory buffer. It does not refer pages that are currently
- * read from the log files. Only to pages already read
- * from the log file.
- */
- UintR mmBufferSize;
- /**
- * Contains the current number of log files in this log part.
- */
- UintR noLogFiles;
- /**
- * This variable is used only during execution of a log
- * record. It keeps track of in which page record a log
- * record was started. It is used then to deduce which
- * pages that are dirty after that the log records on the
- * page have been executed.
- *
- * It is also used to find out where to write the invalidate
- * command when that is needed.
- */
- UintR prevLogpage;
- /**
- * The number of files remaining to gather GCI information
- * for during system restart. Only used if number of files
- * is larger than 60.
- */
- UintR srRemainingFiles;
- /**
- * The log file where to start executing the log during
- * system restart.
- */
- UintR startLogfile;
- /**
- * The last log file in which to execute the log during system
- * restart.
- */
- UintR stopLogfile;
- /**
- * This variable keeps track of when we want to write a complete
- * gci log record but have been blocked by an ongoing log operation.
- */
- WaitWriteGciLog waitWriteGciLog;
- /**
- * The currently executing prepare record starts in this index
- * in the log page.
- */
- Uint16 execSrLogPageIndex;
- /**
- * Which of the four exec_sr's in the fragment is currently executing
- */
- Uint16 execSrExecuteIndex;
- /**
- * The number of pages executed in the current mbyte.
- */
- Uint16 execSrPagesExecuted;
- /**
- * The number of pages read from disk that have arrived and are
- * currently awaiting execution of the log.
- */
- Uint16 execSrPagesRead;
- /**
- * The number of pages read from disk and currently not arrived
- * to the block.
- */
- Uint16 execSrPagesReading;
- /**
- * This variable refers to the new header file where we will
- * start writing the log after a system restart have been completed.
- */
- Uint16 headFileNo;
- /**
- * This variable refers to the page number within the header file.
- */
- Uint16 headPageNo;
- /**
- * This variable refers to the index within the new header
- * page.
- */
- Uint16 headPageIndex;
- /**
- * This variables indicates which was the last mbyte in the last
- * logfile before a system crash. Discovered during system restart.
- */
- Uint16 lastMbyte;
- /**
- * This variable is used only during execution of a log
- * record. It keeps track of in which file page a log
- * record was started. It is used if it is needed to write a
- * dirty page to disk during log execution (this happens when
- * commit records are invalidated).
- */
- Uint16 prevFilepage;
- /**
- * This is used to save where we were in the execution of log
- * records when we find a commit record that needs to be
- * executed.
- *
- * This variable is also used to remember the index where the
- * log type was in the log record. It is only used in this
- * role when finding a commit record that needs to be
- * invalidated.
- */
- Uint16 savePageIndex;
- Uint8 logTailMbyte;
- /**
- * The mbyte within the starting log file where to start
- * executing the log.
- */
- Uint8 startMbyte;
- /**
- * The last mbyte in which to execute the log during system
- * restart.
- */
- Uint8 stopMbyte;
- /**
- * This variable refers to the file where invalidation is
- * occuring during system/node restart.
- */
- Uint16 invalidateFileNo;
- /**
- * This variable refers to the page where invalidation is
- * occuring during system/node restart.
- */
- Uint16 invalidatePageNo;
- }; // Size 164 Bytes
- typedef Ptr<LogPartRecord> LogPartRecordPtr;
-
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* $$$$$$$ LOG FILE RECORD $$$$$$$ */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* THIS RECORD IS ALIGNED TO BE 288 (256 + 32) BYTES. */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /**
- * This record contains information about a log file.
- * A log file contains log records from several tables and
- * fragments of a table. LQH can contain more than
- * one log file to ensure faster log processing.
- *
- * The number of pages to write to disk at a time is
- * configurable.
- */
- struct LogFileRecord {
- enum FileChangeState {
- NOT_ONGOING = 0,
- BOTH_WRITES_ONGOING = 1,
- LAST_WRITE_ONGOING = 2,
- FIRST_WRITE_ONGOING = 3,
- WRITE_PAGE_ZERO_ONGOING = 4
- };
- enum LogFileStatus {
- LFS_IDLE = 0, ///< Log file record not in use
- CLOSED = 1, ///< Log file closed
- OPENING_INIT = 2,
- OPEN_SR_FRONTPAGE = 3, ///< Log file opened as part of system
- ///< restart. Open file 0 to find
- ///< the front page of the log part.
- OPEN_SR_LAST_FILE = 4, ///< Open last log file that was written
- ///< before the system restart.
- OPEN_SR_NEXT_FILE = 5, ///< Open a log file which is 16 files
- ///< backwards to find the next
- ///< information about GCPs.
- OPEN_EXEC_SR_START = 6, ///< Log file opened as part of
- ///< executing
- ///< log during system restart.
- OPEN_EXEC_SR_NEW_MBYTE = 7,
- OPEN_SR_FOURTH_PHASE = 8,
- OPEN_SR_FOURTH_NEXT = 9,
- OPEN_SR_FOURTH_ZERO = 10,
- OPENING_WRITE_LOG = 11, ///< Log file opened as part of writing
- ///< log during normal operation.
- OPEN_EXEC_LOG = 12,
- CLOSING_INIT = 13,
- CLOSING_SR = 14, ///< Log file closed as part of system
- ///< restart. Currently trying to
- ///< find where to start executing the
- ///< log
- CLOSING_EXEC_SR = 15, ///< Log file closed as part of
- ///< executing log during system restart
- CLOSING_EXEC_SR_COMPLETED = 16,
- CLOSING_WRITE_LOG = 17, ///< Log file closed as part of writing
- ///< log during normal operation.
- CLOSING_EXEC_LOG = 18,
- OPEN_INIT = 19,
- OPEN = 20, ///< Log file open
- OPEN_SR_INVALIDATE_PAGES = 21,
- CLOSE_SR_INVALIDATE_PAGES = 22
- };
-
- /**
- * When a new mbyte is started in the log we have to find out
- * how far back in the log we still have prepared operations
- * which have been neither committed or aborted. This variable
- * keeps track of this value for each of the mbytes in this
- * log file. This is used in writing down these values in the
- * header of each log file. That information is used during
- * system restart to find the tail of the log.
- */
- UintR logLastPrepRef[16];
- /**
- * The max global checkpoint completed before the mbyte in the
- * log file was started. One variable per mbyte.
- */
- UintR logMaxGciCompleted[16];
- /**
- * The max global checkpoint started before the mbyte in the log
- * file was started. One variable per mbyte.
- */
- UintR logMaxGciStarted[16];
- /**
- * This variable contains the file name as needed by the file
- * system when opening the file.
- */
- UintR fileName[4];
- /**
- * This variable has a reference to the log page which is
- * currently in use by the log.
- */
- UintR currentLogpage;
- /**
- * The number of the current mbyte in the log file.
- */
- UintR currentMbyte;
- /**
- * This variable is used when changing files. It is to find
- * out when both the last write in the previous file and the
- * first write in this file has been completed. After these
- * writes have completed the variable keeps track of when the
- * write to page zero in file zero is completed.
- */
- FileChangeState fileChangeState;
- /**
- * The number of the file within this log part.
- */
- UintR fileNo;
- /**
- * This variable shows where to read/write the next pages into
- * the log. Used when writing the log during normal operation
- * and when reading the log during system restart. It
- * specifies the page position where each page is 8 kbyte.
- */
- UintR filePosition;
- /**
- * This contains the file pointer needed by the file system
- * when reading/writing/closing and synching.
- */
- UintR fileRef;
- /**
- * The head of the pages waiting for shipment to disk.
- * They are filled with log info.
- */
- UintR firstFilledPage;
- /**
- * A list of active read/write operations on the log file.
- * Operations are always put in last and the first should
- * always complete first.
- */
- UintR firstLfo;
- UintR lastLfo;
- /**
- * The tail of the pages waiting for shipment to disk.
- * They are filled with log info.
- */
- UintR lastFilledPage;
- /**
- * This variable keeps track of the last written page in the
- * file while writing page zero in file zero when changing log
- * file.
- */
- UintR lastPageWritten;
- /**
- * This variable keeps track of the last written word in the
- * last page written in the file while writing page zero in
- * file zero when changing log file.
- */
- UintR lastWordWritten;
- /**
- * This variable contains the last word written in the last page.
- */
- UintR logFilePagesToDiskWithoutSynch;
- /**
- * This variable keeps track of the number of pages written since
- * last synch on this log file.
- */
- LogFileStatus logFileStatus;
- /**
- * A reference to page zero in this file.
- * This page is written before the file is closed.
- */
- UintR logPageZero;
- /**
- * This variable contains a reference to the record describing
- * this log part. One of four records (0,1,2 or 3).
- */
- UintR logPartRec;
- /**
- * Next free log file record or next log file in this log.
- */
- UintR nextLogFile;
- /**
- * The previous log file.
- */
- UintR prevLogFile;
- /**
- * The number of remaining words in this mbyte of the log file.
- */
- UintR remainingWordsInMbyte;
- /**
- * The current file page within the current log file. This is
- * a reference within the file and not a reference to a log
- * page record. It is used to deduce where log records are
- * written. Particularly completed gcp records and prepare log
- * records.
- */
- Uint16 currentFilepage;
- /**
- * The number of pages in the list referenced by
- * LOG_PAGE_BUFFER.
- */
- Uint16 noLogpagesInBuffer;
- }; // Size 288 bytes
- typedef Ptr<LogFileRecord> LogFileRecordPtr;
-
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* $$$$$$$ LOG OPERATION RECORD $$$$$$$ */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /**
- * This record contains a currently active file operation
- * that has started by the log module.
- */
- struct LogFileOperationRecord {
- enum LfoState {
- IDLE = 0, ///< Operation is not used at the moment
- INIT_WRITE_AT_END = 1, ///< Write in file so that it grows to
- ///< 16 Mbyte
- INIT_FIRST_PAGE = 2, ///< Initialise the first page in a file
- WRITE_GCI_ZERO = 3,
- WRITE_INIT_MBYTE = 4,
- WRITE_DIRTY = 5,
- READ_SR_FRONTPAGE = 6, ///< Read page zero in file zero during
- ///< system restart
- READ_SR_LAST_FILE = 7, ///< Read page zero in last file open
- ///< before system crash
- READ_SR_NEXT_FILE = 8, ///< Read 60 files backwards to find
- ///< further information GCPs in page
- ///< zero
- READ_SR_LAST_MBYTE = 9,
- READ_EXEC_SR = 10,
- READ_EXEC_LOG = 11,
- READ_SR_FOURTH_PHASE = 12,
- READ_SR_FOURTH_ZERO = 13,
- FIRST_PAGE_WRITE_IN_LOGFILE = 14,
- LAST_WRITE_IN_FILE = 15,
- WRITE_PAGE_ZERO = 16,
- ACTIVE_WRITE_LOG = 17, ///< A write operation during
- ///< writing of log
- READ_SR_INVALIDATE_PAGES = 18,
- WRITE_SR_INVALIDATE_PAGES = 19
- };
- /**
- * We have to remember the log pages read.
- * Otherwise we cannot build the linked list after the pages have
- * arrived to main memory.
- */
- UintR logPageArray[16];
- /**
- * A list of the pages that are part of this active operation.
- */
- UintR firstLfoPage;
- /**
- * A timer to ensure that records are not lost.
- */
- UintR lfoTimer;
- /**
- * The word number of the last written word in the last during
- * a file write.
- */
- UintR lfoWordWritten;
- /**
- * This variable contains the state of the log file operation.
- */
- LfoState lfoState;
- /**
- * The log file that the file operation affects.
- */
- UintR logFileRec;
- /**
- * The log file operations on a file are kept in a linked list.
- */
- UintR nextLfo;
- /**
- * The page number of the first read/written page during a file
- * read/write.
- */
- Uint16 lfoPageNo;
- /**
- * The number of pages written or read during an operation to
- * the log file.
- */
- Uint16 noPagesRw;
- }; // 92 bytes
- typedef Ptr<LogFileOperationRecord> LogFileOperationRecordPtr;
-
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /* $$$$$$$ LOG PAGE RECORD $$$$$$$ */
- /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
- /**
- * These are the 8 k pages used to store log records before storing
- * them in the file system.
- * Since 64 kbyte is sent to disk at a time it is necessary to have
- * at least 4*64 kbytes of log pages.
- * To handle multiple outstanding requests we need some additional pages.
- * Thus we allocate 1 mbyte to ensure that we do not get problems with
- * insufficient number of pages.
- */
- struct LogPageRecord {
- /**
- * This variable contains the pages that are sent to disk.
- *
- * All pages contain a header of 12 words:
- * - WORD 0: CHECKSUM Calculated before storing on disk and
- * checked when read from disk.
- * - WORD 1: LAP How many wraparounds have the log
- * experienced since initial start of the
- * system.
- * - WORD 2: MAX_GCI_COMPLETED Which is the maximum gci which have
- * completed before this page. This
- * gci will not be found in this
- * page and hereafter in the log.
- * - WORD 3: MAX_GCI_STARTED The maximum gci which have started
- * before this page.
- * - WORD 4: NEXT_PAGE Pointer to the next page.
- * Only used in main memory
- * - WORD 5: PREVIOUS_PAGE Pointer to the previous page.
- * Currently not used.
- * - WORD 6: VERSION NDB version that wrote the page.
- * - WORD 7: NO_LOG_FILES Number of log files in this log part.
- * - WORD 8: CURRENT PAGE INDEX This keeps track of where we are in the
- * page.
- * This is only used when pages is in
- * memory.
- * - WORD 9: OLD PREPARE FILE NO This keeps track of the oldest prepare
- * operation still alive (not committed
- * or aborted) when this mbyte started.
- * - WORD 10: OLD PREPARE PAGE REF File page reference within this file
- * number.
- * Page no + Page index.
- * If no prepare was alive then these
- * values points this mbyte.
- * - WORD 11: DIRTY FLAG = 0 means not dirty and
- * = 1 means the page is dirty.
- * Is used when executing log when
- * a need to write invalid commit
- * records arise.
- *
- * The remaining 2036 words are used for log information, i.e.
- * log records.
- *
- * A log record on this page has the following layout:
- * - WORD 0: LOG RECORD TYPE
- * The following types are supported:
- * - PREPARE OPERATION An operation not yet committed.
- * - NEW PREPARE OPERATION A prepared operation already
- * logged is inserted
- * into the log again so that the
- * log tail can be advanced.
- * This can happen when a transaction is
- * committed for a long time.
- * - ABORT TRANSACTION A previously prepared transaction
- * was aborted.
- * - COMMIT TRANSACTION A previously prepared transaction
- * was committed.
- * - INVALID COMMIT A previous commit record was
- * invalidated by a
- * subsequent system restart.
- * A log record must be invalidated
- * in a system restart if it belongs
- * to a global checkpoint id which
- * is not included in the system
- * restart.
- * Otherwise it will be included in
- * a subsequent system restart since
- * it will then most likely belong
- * to a global checkpoint id which
- * is part of that system
- * restart.
- * This is not a correct behaviour
- * since this operation is lost in a
- * system restart and should not
- * reappear at a later system
- * restart.
- * - COMPLETED GCI A GCI has now been completed.
- * - FRAGMENT SPLIT A fragment has been split
- * (not implemented yet)
- * - FILE DESCRIPTOR This is always the first log record
- * in a file.
- * It is always placed on page 0 after
- * the header.
- * It is written when the file is
- * opened and when the file is closed.
- * - NEXT LOG RECORD This log record only records where
- * the next log record starts.
- * - NEXT MBYTE RECORD This log record specifies that there
- * are no more log records in this mbyte.
- *
- *
- * A FILE DESCRIPTOR log record continues as follows:
- * - WORD 1: NO_LOG_DESCRIPTORS This defines the number of
- * descriptors of log files that
- * will follow hereafter (max 32).
- * the log descriptor will describe
- * information about
- * max_gci_completed,
- * max_gci_started and log_lap at
- * every 1 mbyte of the log file
- * since a log file is 16 mbyte
- * always, i need 16 entries in the
- * array with max_gci_completed,
- * max_gci_started and log_lap. thus
- * 32 entries per log file
- * descriptor (max 32*48 = 1536,
- * always fits in page 0).
- * - WORD 2: LAST LOG FILE The number of the log file currently
- * open. This is only valid in file 0.
- * - WORD 3 - WORD 18: MAX_GCI_COMPLETED for every 1 mbyte
- * in this log file.
- * - WORD 19 - WORD 34: MAX_GCI_STARTED for every 1 mbyte
- * in this log file.
- *
- * Then it continues for NO_LOG_DESCRIPTORS until all subsequent
- * log files (max 32) have been properly described.
- *
- *
- * A PREPARE OPERATION log record continues as follows:
- * - WORD 1: LOG RECORD SIZE
- * - WORD 2: HASH VALUE
- * - WORD 3: SCHEMA VERSION
- * - WORD 4: OPERATION TYPE
- * = 0 READ,
- * = 1 UPDATE,
- * = 2 INSERT,
- * = 3 DELETE
- * - WORD 5: NUMBER OF WORDS IN ATTRINFO PART
- * - WORD 6: KEY LENGTH IN WORDS
- * - WORD 7 - (WORD 7 + KEY_LENGTH - 1) The tuple key
- * - (WORD 7 + KEY_LENGTH) -
- * (WORD 7 + KEY_LENGTH + ATTRINFO_LENGTH - 1) The attrinfo
- *
- * A log record can be spread in several pages in some cases.
- * The next log record always starts immediately after this log record.
- * A log record does however never traverse a 1 mbyte boundary.
- * This is used to ensure that we can always come back if something
- * strange occurs in the log file.
- * To ensure this we also have log records which only records
- * the next log record.
- *
- *
- * A COMMIT TRANSACTION log record continues as follows:
- * - WORD 1: TRANSACTION ID PART 1
- * - WORD 2: TRANSACTION ID PART 2
- * - WORD 3: FRAGMENT ID OF THE OPERATION
- * - WORD 4: TABLE ID OF THE OPERATION
- * - WORD 5: THE FILE NUMBER OF THE PREPARE RECORD
- * - WORD 6: THE STARTING PAGE NUMBER OF THE PREPARE RECORD
- * - WORD 7: THE STARTING PAGE INDEX OF THE PREPARE RECORD
- * - WORD 8: THE STOP PAGE NUMBER OF THE PREPARE RECORD
- * - WORD 9: GLOBAL CHECKPOINT OF THE TRANSACTION
- *
- *
- * An ABORT TRANSACTION log record continues as follows:
- * - WORD 1: TRANSACTION ID PART 1
- * - WORD 2: TRANSACTION ID PART 2
- *
- *
- * A COMPLETED CGI log record continues as follows:
- * - WORD 1: THE COMPLETED GCI
- *
- *
- * A NEXT LOG RECORD log record continues as follows:
- * - There is no more information needed.
- * The next log record will always refer to the start of the next page.
- *
- * A NEXT MBYTE RECORD log record continues as follows:
- * - There is no more information needed.
- * The next mbyte will always refer to the start of the next mbyte.
- */
- UintR logPageWord[8192]; // Size 32 kbytes
- };
- typedef Ptr<LogPageRecord> LogPageRecordPtr;
-
- struct PageRefRecord {
- UintR pageRef[8];
- UintR prNext;
- UintR prPrev;
- Uint16 prFileNo;
- Uint16 prPageNo;
- }; // size 44 bytes
- typedef Ptr<PageRefRecord> PageRefRecordPtr;
-
- struct Tablerec {
- enum TableStatus {
- TABLE_DEFINED = 0,
- NOT_DEFINED = 1,
- ADD_TABLE_ONGOING = 2,
- PREP_DROP_TABLE_ONGOING = 3,
- PREP_DROP_TABLE_DONE = 4
- };
-
- UintR fragrec[MAX_FRAG_PER_NODE];
- Uint16 fragid[MAX_FRAG_PER_NODE];
- /**
- * Status of the table
- */
- TableStatus tableStatus;
- /**
- * Table type and target table of index.
- */
- Uint16 tableType;
- Uint16 primaryTableId;
- Uint32 schemaVersion;
-
- Uint32 usageCount;
- NdbNodeBitmask waitingTC;
- NdbNodeBitmask waitingDIH;
- }; // Size 100 bytes
- typedef Ptr<Tablerec> TablerecPtr;
-
- struct TcConnectionrec {
- enum ListState {
- NOT_IN_LIST = 0,
- IN_ACTIVE_LIST = 1,
- ACC_BLOCK_LIST = 2,
- WAIT_QUEUE_LIST = 3
- };
- enum LogWriteState {
- NOT_STARTED = 0,
- NOT_WRITTEN = 1,
- NOT_WRITTEN_WAIT = 2,
- WRITTEN = 3
- };
- enum AbortState {
- ABORT_IDLE = 0,
- ABORT_ACTIVE = 1,
- NEW_FROM_TC = 2,
- REQ_FROM_TC = 3,
- ABORT_FROM_TC = 4,
- ABORT_FROM_LQH = 5
- };
- enum TransactionState {
- IDLE = 0,
-
- /* -------------------------------------------------------------------- */
- // Transaction in progress states
- /* -------------------------------------------------------------------- */
- WAIT_ACC = 1,
- WAIT_TUPKEYINFO = 2,
- WAIT_ATTR = 3,
- WAIT_TUP = 4,
- STOPPED = 5,
- LOG_QUEUED = 6,
- PREPARED = 7,
- LOG_COMMIT_WRITTEN_WAIT_SIGNAL = 8,
- LOG_COMMIT_QUEUED_WAIT_SIGNAL = 9,
-
- /* -------------------------------------------------------------------- */
- // Commit in progress states
- /* -------------------------------------------------------------------- */
- COMMIT_STOPPED = 10,
- LOG_COMMIT_QUEUED = 11,
- COMMIT_QUEUED = 12,
- COMMITTED = 13,
-
- /* -------------------------------------------------------------------- */
- // Abort in progress states
- /* -------------------------------------------------------------------- */
- WAIT_ACC_ABORT = 14,
- ABORT_QUEUED = 15,
- ABORT_STOPPED = 16,
- WAIT_AI_AFTER_ABORT = 17,
- LOG_ABORT_QUEUED = 18,
- WAIT_TUP_TO_ABORT = 19,
-
- /* -------------------------------------------------------------------- */
- // Scan in progress states
- /* -------------------------------------------------------------------- */
- WAIT_SCAN_AI = 20,
- SCAN_STATE_USED = 21,
- SCAN_FIRST_STOPPED = 22,
- SCAN_CHECK_STOPPED = 23,
- SCAN_STOPPED = 24,
- SCAN_RELEASE_STOPPED = 25,
- SCAN_CLOSE_STOPPED = 26,
- COPY_CLOSE_STOPPED = 27,
- COPY_FIRST_STOPPED = 28,
- COPY_STOPPED = 29,
- SCAN_TUPKEY = 30,
- COPY_TUPKEY = 31,
-
- TC_NOT_CONNECTED = 32,
- PREPARED_RECEIVED_COMMIT = 33, // Temporary state in write commit log
- LOG_COMMIT_WRITTEN = 34 // Temporary state in write commit log
- };
- enum ConnectState {
- DISCONNECTED = 0,
- CONNECTED = 1,
- COPY_CONNECTED = 2,
- LOG_CONNECTED = 3
- };
- ConnectState connectState;
- UintR copyCountWords;
- UintR firstAttrinfo[5];
- UintR tupkeyData[4];
- UintR transid[2];
- AbortState abortState;
- UintR accConnectrec;
- UintR applOprec;
- UintR clientConnectrec;
- UintR tcTimer;
- UintR currReclenAi;
- UintR currTupAiLen;
- UintR firstAttrinbuf;
- UintR firstTupkeybuf;
- UintR fragmentid;
- UintR fragmentptr;
- UintR gci;
- UintR hashValue;
- UintR lastTupkeybuf;
- UintR lastAttrinbuf;
- /**
- * Each operation (TcConnectrec) can be stored in max one out of many
- * lists.
- * This variable keeps track of which list it is in.
- */
- ListState listState;
-
- UintR logStartFileNo;
- LogWriteState logWriteState;
- UintR nextHashRec;
- UintR nextLogTcrec;
- UintR nextTcLogQueue;
- UintR nextTc;
- UintR nextTcConnectrec;
- UintR prevHashRec;
- UintR prevLogTcrec;
- UintR prevTc;
- UintR readlenAi;
- UintR reqRef;
- UintR reqinfo;
- UintR schemaVersion;
- UintR storedProcId;
- UintR simpleTcConnect;
- UintR tableref;
- UintR tcOprec;
- UintR tcScanInfo;
- UintR tcScanRec;
- UintR totReclenAi;
- UintR totSendlenAi;
- UintR tupConnectrec;
- UintR savePointId;
- TransactionState transactionState;
- BlockReference applRef;
- BlockReference clientBlockref;
-
- BlockReference reqBlockref;
- BlockReference tcBlockref;
- BlockReference tcAccBlockref;
- BlockReference tcTuxBlockref;
- BlockReference tcTupBlockref;
- Uint32 commitAckMarker;
- union {
- Uint32 m_scan_curr_range_no;
- UintR noFiredTriggers;
- };
- Uint16 errorCode;
- Uint16 logStartPageIndex;
- Uint16 logStartPageNo;
- Uint16 logStopPageNo;
- Uint16 nextReplica;
- Uint16 primKeyLen;
- Uint16 save1;
- Uint16 nodeAfterNext[3];
-
- Uint8 activeCreat;
- Uint8 apiVersionNo;
- Uint8 dirtyOp;
- Uint8 indTakeOver;
- Uint8 lastReplicaNo;
- Uint8 localFragptr;
- Uint8 lockType;
- Uint8 nextSeqNoReplica;
- Uint8 opSimple;
- Uint8 opExec;
- Uint8 operation;
- Uint8 reclenAiLqhkey;
- Uint8 m_offset_current_keybuf;
- Uint8 replicaType;
- Uint8 simpleRead;
- Uint8 seqNoReplica;
- Uint8 tcNodeFailrec;
- }; /* p2c: size = 280 bytes */
-
- typedef Ptr<TcConnectionrec> TcConnectionrecPtr;
-
- struct TcNodeFailRecord {
- enum TcFailStatus {
- TC_STATE_TRUE = 0,
- TC_STATE_FALSE = 1,
- TC_STATE_BREAK = 2
- };
- UintR lastNewTcRef;
- UintR newTcRef;
- TcFailStatus tcFailStatus;
- UintR tcRecNow;
- BlockReference lastNewTcBlockref;
- BlockReference newTcBlockref;
- Uint16 oldNodeId;
- }; // Size 28 bytes
- typedef Ptr<TcNodeFailRecord> TcNodeFailRecordPtr;
-
- struct CommitLogRecord {
- Uint32 startPageNo;
- Uint32 startPageIndex;
- Uint32 stopPageNo;
- Uint32 fileNo;
- };
-
-public:
- Dblqh(const class Configuration &);
- virtual ~Dblqh();
-
-private:
- BLOCK_DEFINES(Dblqh);
-
- void execPACKED_SIGNAL(Signal* signal);
- void execDEBUG_SIG(Signal* signal);
- void execATTRINFO(Signal* signal);
- void execKEYINFO(Signal* signal);
- void execLQHKEYREQ(Signal* signal);
- void execLQHKEYREF(Signal* signal);
- void execCOMMIT(Signal* signal);
- void execCOMPLETE(Signal* signal);
- void execLQHKEYCONF(Signal* signal);
- void execTESTSIG(Signal* signal);
- void execLQH_RESTART_OP(Signal* signal);
- void execCONTINUEB(Signal* signal);
- void execSTART_RECREQ(Signal* signal);
- void execSTART_RECCONF(Signal* signal);
- void execEXEC_FRAGREQ(Signal* signal);
- void execEXEC_FRAGCONF(Signal* signal);
- void execEXEC_FRAGREF(Signal* signal);
- void execSTART_EXEC_SR(Signal* signal);
- void execEXEC_SRREQ(Signal* signal);
- void execEXEC_SRCONF(Signal* signal);
- void execREAD_PSUEDO_REQ(Signal* signal);
-
- void execDUMP_STATE_ORD(Signal* signal);
- void execACC_COM_BLOCK(Signal* signal);
- void execACC_COM_UNBLOCK(Signal* signal);
- void execTUP_COM_BLOCK(Signal* signal);
- void execTUP_COM_UNBLOCK(Signal* signal);
- void execACC_ABORTCONF(Signal* signal);
- void execNODE_FAILREP(Signal* signal);
- void execCHECK_LCP_STOP(Signal* signal);
- void execSEND_PACKED(Signal* signal);
- void execTUP_ATTRINFO(Signal* signal);
- void execREAD_CONFIG_REQ(Signal* signal);
- void execLQHFRAGREQ(Signal* signal);
- void execLQHADDATTREQ(Signal* signal);
- void execTUP_ADD_ATTCONF(Signal* signal);
- void execTUP_ADD_ATTRREF(Signal* signal);
- void execACCFRAGCONF(Signal* signal);
- void execACCFRAGREF(Signal* signal);
- void execTUPFRAGCONF(Signal* signal);
- void execTUPFRAGREF(Signal* signal);
- void execTAB_COMMITREQ(Signal* signal);
- void execACCSEIZECONF(Signal* signal);
- void execACCSEIZEREF(Signal* signal);
- void execREAD_NODESCONF(Signal* signal);
- void execREAD_NODESREF(Signal* signal);
- void execSTTOR(Signal* signal);
- void execNDB_STTOR(Signal* signal);
- void execTUPSEIZECONF(Signal* signal);
- void execTUPSEIZEREF(Signal* signal);
- void execACCKEYCONF(Signal* signal);
- void execACCKEYREF(Signal* signal);
- void execTUPKEYCONF(Signal* signal);
- void execTUPKEYREF(Signal* signal);
- void execABORT(Signal* signal);
- void execABORTREQ(Signal* signal);
- void execCOMMITREQ(Signal* signal);
- void execCOMPLETEREQ(Signal* signal);
- void execMEMCHECKREQ(Signal* signal);
- void execSCAN_FRAGREQ(Signal* signal);
- void execSCAN_NEXTREQ(Signal* signal);
- void execACC_SCANCONF(Signal* signal);
- void execACC_SCANREF(Signal* signal);
- void execNEXT_SCANCONF(Signal* signal);
- void execNEXT_SCANREF(Signal* signal);
- void execACC_TO_REF(Signal* signal);
- void execSTORED_PROCCONF(Signal* signal);
- void execSTORED_PROCREF(Signal* signal);
- void execCOPY_FRAGREQ(Signal* signal);
- void execCOPY_ACTIVEREQ(Signal* signal);
- void execCOPY_STATEREQ(Signal* signal);
- void execLQH_TRANSREQ(Signal* signal);
- void execTRANSID_AI(Signal* signal);
- void execINCL_NODEREQ(Signal* signal);
- void execACC_LCPCONF(Signal* signal);
- void execACC_LCPREF(Signal* signal);
- void execACC_LCPSTARTED(Signal* signal);
- void execACC_CONTOPCONF(Signal* signal);
- void execLCP_FRAGIDCONF(Signal* signal);
- void execLCP_FRAGIDREF(Signal* signal);
- void execLCP_HOLDOPCONF(Signal* signal);
- void execLCP_HOLDOPREF(Signal* signal);
- void execTUP_PREPLCPCONF(Signal* signal);
- void execTUP_PREPLCPREF(Signal* signal);
- void execTUP_LCPCONF(Signal* signal);
- void execTUP_LCPREF(Signal* signal);
- void execTUP_LCPSTARTED(Signal* signal);
- void execEND_LCPCONF(Signal* signal);
-
- void execLCP_FRAG_ORD(Signal* signal);
- void execEMPTY_LCP_REQ(Signal* signal);
-
- void execSTART_FRAGREQ(Signal* signal);
- void execSTART_RECREF(Signal* signal);
- void execSR_FRAGIDCONF(Signal* signal);
- void execSR_FRAGIDREF(Signal* signal);
- void execACC_SRCONF(Signal* signal);
- void execACC_SRREF(Signal* signal);
- void execTUP_SRCONF(Signal* signal);
- void execTUP_SRREF(Signal* signal);
- void execGCP_SAVEREQ(Signal* signal);
- void execFSOPENCONF(Signal* signal);
- void execFSOPENREF(Signal* signal);
- void execFSCLOSECONF(Signal* signal);
- void execFSCLOSEREF(Signal* signal);
- void execFSWRITECONF(Signal* signal);
- void execFSWRITEREF(Signal* signal);
- void execFSREADCONF(Signal* signal);
- void execFSREADREF(Signal* signal);
- void execSCAN_HBREP(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
- void execTIME_SIGNAL(Signal* signal);
- void execFSSYNCCONF(Signal* signal);
- void execFSSYNCREF(Signal* signal);
-
- void execALTER_TAB_REQ(Signal* signal);
- void execALTER_TAB_CONF(Signal* signal);
-
- void execCREATE_TRIG_CONF(Signal* signal);
- void execCREATE_TRIG_REF(Signal* signal);
- void execCREATE_TRIG_REQ(Signal* signal);
-
- void execDROP_TRIG_CONF(Signal* signal);
- void execDROP_TRIG_REF(Signal* signal);
- void execDROP_TRIG_REQ(Signal* signal);
-
- void execPREP_DROP_TAB_REQ(Signal* signal);
- void execWAIT_DROP_TAB_REQ(Signal* signal);
- void execDROP_TAB_REQ(Signal* signal);
-
- void execLQH_ALLOCREQ(Signal* signal);
- void execLQH_WRITELOG_REQ(Signal* signal);
-
- void execTUXFRAGCONF(Signal* signal);
- void execTUXFRAGREF(Signal* signal);
- void execTUX_ADD_ATTRCONF(Signal* signal);
- void execTUX_ADD_ATTRREF(Signal* signal);
-
- // Statement blocks
-
- void init_acc_ptr_list(ScanRecord*);
- bool seize_acc_ptr_list(ScanRecord*, Uint32);
- void release_acc_ptr_list(ScanRecord*);
- Uint32 get_acc_ptr_from_scan_record(ScanRecord*, Uint32, bool);
- void set_acc_ptr_in_scan_record(ScanRecord*, Uint32, Uint32);
- void i_get_acc_ptr(ScanRecord*, Uint32*&, Uint32);
-
- void removeTable(Uint32 tableId);
- void sendLCP_COMPLETE_REP(Signal* signal, Uint32 lcpId);
- void sendEMPTY_LCP_CONF(Signal* signal, bool idle);
- void sendLCP_FRAGIDREQ(Signal* signal);
- void sendLCP_FRAG_REP(Signal * signal, const LcpRecord::FragOrd &) const;
-
- void updatePackedList(Signal* signal, HostRecord * ahostptr, Uint16 hostId);
- void LQHKEY_abort(Signal* signal, int errortype);
- void LQHKEY_error(Signal* signal, int errortype);
- void nextRecordCopy(Signal* signal);
- void calculateHash(Signal* signal);
- void continueAfterCheckLcpStopBlocked(Signal* signal);
- void checkLcpStopBlockedLab(Signal* signal);
- void sendCommittedTc(Signal* signal, BlockReference atcBlockref);
- void sendCompletedTc(Signal* signal, BlockReference atcBlockref);
- void sendLqhkeyconfTc(Signal* signal, BlockReference atcBlockref);
- void sendCommitLqh(Signal* signal, BlockReference alqhBlockref);
- void sendCompleteLqh(Signal* signal, BlockReference alqhBlockref);
- void sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr);
- void sendPackedSignalTc(Signal* signal, HostRecord * ahostptr);
- Uint32 handleLongTupKey(Signal* signal,
- Uint32 lenSofar,
- Uint32 primKeyLen,
- Uint32* dataPtr);
- void cleanUp(Signal* signal);
- void sendAttrinfoLoop(Signal* signal);
- void sendAttrinfoSignal(Signal* signal);
- void sendLqhAttrinfoSignal(Signal* signal);
- void sendKeyinfoAcc(Signal* signal, Uint32 pos);
- Uint32 initScanrec(const class ScanFragReq *);
- void initScanTc(Signal* signal,
- Uint32 transid1,
- Uint32 transid2,
- Uint32 fragId,
- Uint32 nodeId);
- void finishScanrec(Signal* signal);
- void releaseScanrec(Signal* signal);
- void seizeScanrec(Signal* signal);
- Uint32 sendKeyinfo20(Signal* signal, ScanRecord *, TcConnectionrec *);
- void sendScanFragConf(Signal* signal, Uint32 scanCompleted);
- void initCopyrec(Signal* signal);
- void initCopyTc(Signal* signal);
- void sendCopyActiveConf(Signal* signal,Uint32 tableId);
- void checkLcpCompleted(Signal* signal);
- void checkLcpHoldop(Signal* signal);
- bool checkLcpStarted(Signal* signal);
- void checkLcpTupprep(Signal* signal);
- void getNextFragForLcp(Signal* signal);
- void initLcpLocAcc(Signal* signal, Uint32 fragId);
- void initLcpLocTup(Signal* signal, Uint32 fragId);
- void moveAccActiveFrag(Signal* signal);
- void moveActiveToAcc(Signal* signal);
- void releaseLocalLcps(Signal* signal);
- void seizeLcpLoc(Signal* signal);
- void sendAccContOp(Signal* signal);
- void sendStartLcp(Signal* signal);
- void setLogTail(Signal* signal, Uint32 keepGci);
- Uint32 remainingLogSize(const LogFileRecordPtr &sltCurrLogFilePtr,
- const LogPartRecordPtr &sltLogPartPtr);
- void checkGcpCompleted(Signal* signal, Uint32 pageWritten, Uint32 wordWritten);
- void initFsopenconf(Signal* signal);
- void initFsrwconf(Signal* signal);
- void initLfo(Signal* signal);
- void initLogfile(Signal* signal, Uint32 fileNo);
- void initLogpage(Signal* signal);
- void openFileRw(Signal* signal, LogFileRecordPtr olfLogFilePtr);
- void openLogfileInit(Signal* signal);
- void openNextLogfile(Signal* signal);
- void releaseLfo(Signal* signal);
- void releaseLfoPages(Signal* signal);
- void releaseLogpage(Signal* signal);
- void seizeLfo(Signal* signal);
- void seizeLogfile(Signal* signal);
- void seizeLogpage(Signal* signal);
- void writeFileDescriptor(Signal* signal);
- void writeFileHeaderOpen(Signal* signal, Uint32 type);
- void writeInitMbyte(Signal* signal);
- void writeSinglePage(Signal* signal, Uint32 pageNo, Uint32 wordWritten);
- void buildLinkedLogPageList(Signal* signal);
- void changeMbyte(Signal* signal);
- Uint32 checkIfExecLog(Signal* signal);
- void checkNewMbyte(Signal* signal);
- void checkReadExecSr(Signal* signal);
- void checkScanTcCompleted(Signal* signal);
- void checkSrCompleted(Signal* signal);
- void closeFile(Signal* signal, LogFileRecordPtr logFilePtr);
- void completedLogPage(Signal* signal, Uint32 clpType);
- void deleteFragrec(Uint32 fragId);
- void deleteTransidHash(Signal* signal);
- void findLogfile(Signal* signal,
- Uint32 fileNo,
- LogPartRecordPtr flfLogPartPtr,
- LogFileRecordPtr* parLogFilePtr);
- void findPageRef(Signal* signal, CommitLogRecord* commitLogRecord);
- int findTransaction(UintR Transid1, UintR Transid2, UintR TcOprec);
- void getFirstInLogQueue(Signal* signal);
- bool getFragmentrec(Signal* signal, Uint32 fragId);
- void initialiseAddfragrec(Signal* signal);
- void initialiseAttrbuf(Signal* signal);
- void initialiseDatabuf(Signal* signal);
- void initialiseFragrec(Signal* signal);
- void initialiseGcprec(Signal* signal);
- void initialiseLcpRec(Signal* signal);
- void initialiseLcpLocrec(Signal* signal);
- void initialiseLfo(Signal* signal);
- void initialiseLogFile(Signal* signal);
- void initialiseLogPage(Signal* signal);
- void initialiseLogPart(Signal* signal);
- void initialisePageRef(Signal* signal);
- void initialiseScanrec(Signal* signal);
- void initialiseTabrec(Signal* signal);
- void initialiseTcrec(Signal* signal);
- void initialiseTcNodeFailRec(Signal* signal);
- void initFragrec(Signal* signal,
- Uint32 tableId,
- Uint32 fragId,
- Uint32 copyType);
- void initFragrecSr(Signal* signal);
- void initGciInLogFileRec(Signal* signal, Uint32 noFdDesc);
- void initLcpSr(Signal* signal,
- Uint32 lcpNo,
- Uint32 lcpId,
- Uint32 tableId,
- Uint32 fragId,
- Uint32 fragPtr);
- void initLogpart(Signal* signal);
- void initLogPointers(Signal* signal);
- void initReqinfoExecSr(Signal* signal);
- bool insertFragrec(Signal* signal, Uint32 fragId);
- void linkActiveFrag(Signal* signal);
- void linkFragQueue(Signal* signal);
- void linkWaitLog(Signal* signal, LogPartRecordPtr regLogPartPtr);
- void logNextStart(Signal* signal);
- void moveToPageRef(Signal* signal);
- void readAttrinfo(Signal* signal);
- void readCommitLog(Signal* signal, CommitLogRecord* commitLogRecord);
- void readExecLog(Signal* signal);
- void readExecSrNewMbyte(Signal* signal);
- void readExecSr(Signal* signal);
- void readKey(Signal* signal);
- void readLogData(Signal* signal, Uint32 noOfWords, Uint32* dataPtr);
- void readLogHeader(Signal* signal);
- Uint32 readLogword(Signal* signal);
- Uint32 readLogwordExec(Signal* signal);
- void readSinglePage(Signal* signal, Uint32 pageNo);
- void releaseAccList(Signal* signal);
- void releaseActiveCopy(Signal* signal);
- void releaseActiveFrag(Signal* signal);
- void releaseActiveList(Signal* signal);
- void releaseAddfragrec(Signal* signal);
- void releaseFragrec();
- void releaseLcpLoc(Signal* signal);
- void releaseOprec(Signal* signal);
- void releasePageRef(Signal* signal);
- void releaseMmPages(Signal* signal);
- void releasePrPages(Signal* signal);
- void releaseTcrec(Signal* signal, TcConnectionrecPtr tcConnectptr);
- void releaseTcrecLog(Signal* signal, TcConnectionrecPtr tcConnectptr);
- void releaseWaitQueue(Signal* signal);
- void removeLogTcrec(Signal* signal);
- void removePageRef(Signal* signal);
- Uint32 returnExecLog(Signal* signal);
- int saveTupattrbuf(Signal* signal, Uint32* dataPtr, Uint32 length);
- void seizeAddfragrec(Signal* signal);
- void seizeAttrinbuf(Signal* signal);
- Uint32 seize_attrinbuf();
- Uint32 release_attrinbuf(Uint32);
- Uint32 copy_bounds(Uint32 * dst, TcConnectionrec*);
-
- void seizeFragmentrec(Signal* signal);
- void seizePageRef(Signal* signal);
- void seizeTcrec();
- void seizeTupkeybuf(Signal* signal);
- void sendAborted(Signal* signal);
- void sendLqhTransconf(Signal* signal, LqhTransConf::OperationStatus);
- void sendTupkey(Signal* signal);
- void startExecSr(Signal* signal);
- void startNextExecSr(Signal* signal);
- void startTimeSupervision(Signal* signal);
- void stepAhead(Signal* signal, Uint32 stepAheadWords);
- void systemError(Signal* signal);
- void writeAbortLog(Signal* signal);
- void writeCommitLog(Signal* signal, LogPartRecordPtr regLogPartPtr);
- void writeCompletedGciLog(Signal* signal);
- void writeDirty(Signal* signal);
- void writeKey(Signal* signal);
- void writeLogHeader(Signal* signal);
- void writeLogWord(Signal* signal, Uint32 data);
- void writeNextLog(Signal* signal);
- void errorReport(Signal* signal, int place);
- void warningReport(Signal* signal, int place);
- void invalidateLogAfterLastGCI(Signal *signal);
- void readFileInInvalidate(Signal *signal);
- void exitFromInvalidate(Signal* signal);
- Uint32 calcPageCheckSum(LogPageRecordPtr logP);
-
- // Generated statement blocks
- void systemErrorLab(Signal* signal);
- void initFourth(Signal* signal);
- void packLqhkeyreqLab(Signal* signal);
- void sendNdbSttorryLab(Signal* signal);
- void execSrCompletedLab(Signal* signal);
- void execLogRecord(Signal* signal);
- void srPhase3Comp(Signal* signal);
- void srLogLimits(Signal* signal);
- void srGciLimits(Signal* signal);
- void srPhase3Start(Signal* signal);
- void warningHandlerLab(Signal* signal);
- void checkStartCompletedLab(Signal* signal);
- void continueAbortLab(Signal* signal);
- void abortContinueAfterBlockedLab(Signal* signal, bool canBlock);
- void abortCommonLab(Signal* signal);
- void localCommitLab(Signal* signal);
- void abortErrorLab(Signal* signal);
- void continueAfterReceivingAllAiLab(Signal* signal);
- void abortStateHandlerLab(Signal* signal);
- void writeAttrinfoLab(Signal* signal);
- void scanAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length);
- void abort_scan(Signal* signal, Uint32 scan_ptr_i, Uint32 errcode);
- void localAbortStateHandlerLab(Signal* signal);
- void logLqhkeyreqLab(Signal* signal);
- void lqhAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length);
- void rwConcludedAiLab(Signal* signal);
- void aiStateErrorCheckLab(Signal* signal, Uint32* dataPtr, Uint32 length);
- void takeOverErrorLab(Signal* signal);
- void endgettupkeyLab(Signal* signal);
- void noFreeRecordLab(Signal* signal,
- const class LqhKeyReq * lqhKeyReq,
- Uint32 errorCode);
- void logLqhkeyrefLab(Signal* signal);
- void closeCopyLab(Signal* signal);
- void commitReplyLab(Signal* signal);
- void completeUnusualLab(Signal* signal);
- void completeTransNotLastLab(Signal* signal);
- void completedLab(Signal* signal);
- void copyCompletedLab(Signal* signal);
- void completeLcpRoundLab(Signal* signal);
- void continueAfterLogAbortWriteLab(Signal* signal);
- void sendAttrinfoLab(Signal* signal);
- void sendExecConf(Signal* signal);
- void execSr(Signal* signal);
- void srFourthComp(Signal* signal);
- void timeSup(Signal* signal);
- void closeCopyRequestLab(Signal* signal);
- void closeScanRequestLab(Signal* signal);
- void scanTcConnectLab(Signal* signal, Uint32 startTcCon, Uint32 fragId);
- void initGcpRecLab(Signal* signal);
- void prepareContinueAfterBlockedLab(Signal* signal);
- void commitContinueAfterBlockedLab(Signal* signal);
- void continueCopyAfterBlockedLab(Signal* signal);
- void continueFirstCopyAfterBlockedLab(Signal* signal);
- void continueFirstScanAfterBlockedLab(Signal* signal);
- void continueScanAfterBlockedLab(Signal* signal);
- void continueScanReleaseAfterBlockedLab(Signal* signal);
- void continueCloseScanAfterBlockedLab(Signal* signal);
- void continueCloseCopyAfterBlockedLab(Signal* signal);
- void sendExecFragRefLab(Signal* signal);
- void fragrefLab(Signal* signal, BlockReference retRef,
- Uint32 retPtr, Uint32 errorCode);
- void abortAddFragOps(Signal* signal);
- void rwConcludedLab(Signal* signal);
- void sendsttorryLab(Signal* signal);
- void initialiseRecordsLab(Signal* signal, Uint32 data, Uint32, Uint32);
- void startphase2Lab(Signal* signal, Uint32 config);
- void startphase3Lab(Signal* signal);
- void startphase4Lab(Signal* signal);
- void startphase6Lab(Signal* signal);
- void moreconnectionsLab(Signal* signal);
- void scanReleaseLocksLab(Signal* signal);
- void closeScanLab(Signal* signal);
- void nextScanConfLoopLab(Signal* signal);
- void scanNextLoopLab(Signal* signal);
- void commitReqLab(Signal* signal, Uint32 gci);
- void completeTransLastLab(Signal* signal);
- void tupScanCloseConfLab(Signal* signal);
- void tupCopyCloseConfLab(Signal* signal);
- void accScanCloseConfLab(Signal* signal);
- void accCopyCloseConfLab(Signal* signal);
- void nextScanConfScanLab(Signal* signal);
- void nextScanConfCopyLab(Signal* signal);
- void continueScanNextReqLab(Signal* signal);
- void keyinfoLab(const Uint32 * src, const Uint32 * end);
- void copySendTupkeyReqLab(Signal* signal);
- void storedProcConfScanLab(Signal* signal);
- void storedProcConfCopyLab(Signal* signal);
- void copyStateFinishedLab(Signal* signal);
- void lcpCompletedLab(Signal* signal);
- void lcpStartedLab(Signal* signal);
- void contChkpNextFragLab(Signal* signal);
- void startLcpRoundLab(Signal* signal);
- void startFragRefLab(Signal* signal);
- void srCompletedLab(Signal* signal);
- void openFileInitLab(Signal* signal);
- void openSrFrontpageLab(Signal* signal);
- void openSrLastFileLab(Signal* signal);
- void openSrNextFileLab(Signal* signal);
- void openExecSrStartLab(Signal* signal);
- void openExecSrNewMbyteLab(Signal* signal);
- void openSrFourthPhaseLab(Signal* signal);
- void openSrFourthZeroSkipInitLab(Signal* signal);
- void openSrFourthZeroLab(Signal* signal);
- void openExecLogLab(Signal* signal);
- void checkInitCompletedLab(Signal* signal);
- void closingSrLab(Signal* signal);
- void closeExecSrLab(Signal* signal);
- void execLogComp(Signal* signal);
- void closeWriteLogLab(Signal* signal);
- void closeExecLogLab(Signal* signal);
- void writePageZeroLab(Signal* signal);
- void lastWriteInFileLab(Signal* signal);
- void initWriteEndLab(Signal* signal);
- void initFirstPageLab(Signal* signal);
- void writeGciZeroLab(Signal* signal);
- void writeDirtyLab(Signal* signal);
- void writeInitMbyteLab(Signal* signal);
- void writeLogfileLab(Signal* signal);
- void firstPageWriteLab(Signal* signal);
- void readSrLastMbyteLab(Signal* signal);
- void readSrLastFileLab(Signal* signal);
- void readSrNextFileLab(Signal* signal);
- void readExecSrLab(Signal* signal);
- void readExecLogLab(Signal* signal);
- void readSrFourthPhaseLab(Signal* signal);
- void readSrFourthZeroLab(Signal* signal);
- void copyLqhKeyRefLab(Signal* signal);
- void restartOperationsLab(Signal* signal);
- void lqhTransNextLab(Signal* signal);
- void restartOperationsAfterStopLab(Signal* signal);
- void sttorStartphase1Lab(Signal* signal);
- void startphase1Lab(Signal* signal, Uint32 config, Uint32 nodeId);
- void tupkeyConfLab(Signal* signal);
- void copyTupkeyConfLab(Signal* signal);
- void scanTupkeyConfLab(Signal* signal);
- void scanTupkeyRefLab(Signal* signal);
- void accScanConfScanLab(Signal* signal);
- void accScanConfCopyLab(Signal* signal);
- void scanLockReleasedLab(Signal* signal);
- void openSrFourthNextLab(Signal* signal);
- void closingInitLab(Signal* signal);
- void closeExecSrCompletedLab(Signal* signal);
- void readSrFrontpageLab(Signal* signal);
-
- void sendAddFragReq(Signal* signal);
- void sendAddAttrReq(Signal* signal);
- void checkDropTab(Signal*);
- Uint32 checkDropTabState(Tablerec::TableStatus, Uint32) const;
-
- // Initialisation
- void initData();
- void initRecords();
-
- Dbtup* c_tup;
- Uint32 readPrimaryKeys(ScanRecord*, TcConnectionrec*, Uint32 * dst);
-// ----------------------------------------------------------------
-// These are variables handling the records. For most records one
-// pointer to the array of structs, one pointer-struct, a file size
-// and a first free record variable. The pointer struct are temporary
-// variables that are kept on the class object since there are often a
-// great deal of those variables that exist simultaneously and
-// thus no perfect solution of handling them is currently available.
-// ----------------------------------------------------------------
-/* ------------------------------------------------------------------------- */
-/* POSITIONS WITHIN THE ATTRINBUF AND THE MAX SIZE OF DATA WITHIN AN */
-/* ATTRINBUF. */
-/* ------------------------------------------------------------------------- */
-
-
-#define ZADDFRAGREC_FILE_SIZE 1
- AddFragRecord *addFragRecord;
- AddFragRecordPtr addfragptr;
- UintR cfirstfreeAddfragrec;
- UintR caddfragrecFileSize;
-
-#define ZATTRINBUF_FILE_SIZE 12288 // 1.5 MByte
-#define ZINBUF_DATA_LEN 24 /* POSITION OF 'DATA LENGHT'-VARIABLE. */
-#define ZINBUF_NEXT 25 /* POSITION OF 'NEXT'-VARIABLE. */
- Attrbuf *attrbuf;
- AttrbufPtr attrinbufptr;
- UintR cfirstfreeAttrinbuf;
- UintR cattrinbufFileSize;
- Uint32 c_no_attrinbuf_recs;
-
-#define ZDATABUF_FILE_SIZE 10000 // 200 kByte
- Databuf *databuf;
- DatabufPtr databufptr;
- UintR cfirstfreeDatabuf;
- UintR cdatabufFileSize;
-
-// Configurable
- Fragrecord *fragrecord;
- FragrecordPtr fragptr;
- UintR cfirstfreeFragrec;
- UintR cfragrecFileSize;
-
-#define ZGCPREC_FILE_SIZE 1
- GcpRecord *gcpRecord;
- GcpRecordPtr gcpPtr;
- UintR cgcprecFileSize;
-
-// MAX_NDB_NODES is the size of this array
- HostRecord *hostRecord;
- UintR chostFileSize;
-
-#define ZNO_CONCURRENT_LCP 1
- LcpRecord *lcpRecord;
- LcpRecordPtr lcpPtr;
- UintR cfirstfreeLcpLoc;
- UintR clcpFileSize;
-
-#define ZLCP_LOCREC_FILE_SIZE 4
- LcpLocRecord *lcpLocRecord;
- LcpLocRecordPtr lcpLocptr;
- UintR clcpLocrecFileSize;
-
-#define ZLOG_PART_FILE_SIZE 4
- LogPartRecord *logPartRecord;
- LogPartRecordPtr logPartPtr;
- UintR clogPartFileSize;
-
-// Configurable
- LogFileRecord *logFileRecord;
- LogFileRecordPtr logFilePtr;
- UintR cfirstfreeLogFile;
- UintR clogFileFileSize;
-
-#define ZLFO_FILE_SIZE 256 /* MAX 256 OUTSTANDING FILE OPERATIONS */
- LogFileOperationRecord *logFileOperationRecord;
- LogFileOperationRecordPtr lfoPtr;
- UintR cfirstfreeLfo;
- UintR clfoFileSize;
-
- LogPageRecord *logPageRecord;
- LogPageRecordPtr logPagePtr;
- UintR cfirstfreeLogPage;
- UintR clogPageFileSize;
-
-#define ZPAGE_REF_FILE_SIZE 20
- PageRefRecord *pageRefRecord;
- PageRefRecordPtr pageRefPtr;
- UintR cfirstfreePageRef;
- UintR cpageRefFileSize;
-
-#define ZSCANREC_FILE_SIZE 100
- ArrayPool<ScanRecord> c_scanRecordPool;
- ScanRecordPtr scanptr;
- UintR cscanNoFreeRec;
- Uint32 cscanrecFileSize;
-
-// Configurable
- Tablerec *tablerec;
- TablerecPtr tabptr;
- UintR ctabrecFileSize;
-
-// Configurable
- TcConnectionrec *tcConnectionrec;
- TcConnectionrecPtr tcConnectptr;
- UintR cfirstfreeTcConrec;
- UintR ctcConnectrecFileSize;
-
-// MAX_NDB_NODES is the size of this array
- TcNodeFailRecord *tcNodeFailRecord;
- TcNodeFailRecordPtr tcNodeFailptr;
- UintR ctcNodeFailrecFileSize;
-
- Uint16 terrorCode;
-
- Uint32 c_firstInNodeGroup;
-
-// ------------------------------------------------------------------------
-// These variables are used to store block state which do not need arrays
-// of struct's.
-// ------------------------------------------------------------------------
- Uint32 c_lcpId;
- Uint32 cnoOfFragsCheckpointed;
-
-/* ------------------------------------------------------------------------- */
-// cmaxWordsAtNodeRec keeps track of how many words that currently are
-// outstanding in a node recovery situation.
-// cbookedAccOps keeps track of how many operation records that have been
-// booked in ACC for the scan processes.
-// cmaxAccOps contains the maximum number of operation records which can be
-// allocated for scan purposes in ACC.
-/* ------------------------------------------------------------------------- */
- UintR cmaxWordsAtNodeRec;
- UintR cbookedAccOps;
- UintR cmaxAccOps;
-/* ------------------------------------------------------------------------- */
-/*THIS STATE VARIABLE IS ZTRUE IF AN ADD NODE IS ONGOING. ADD NODE MEANS */
-/*THAT CONNECTIONS ARE SET-UP TO THE NEW NODE. */
-/* ------------------------------------------------------------------------- */
- Uint8 caddNodeState;
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE SPECIFIES WHICH TYPE OF RESTART THAT IS ONGOING */
-/* ------------------------------------------------------------------------- */
- Uint16 cstartType;
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE INDICATES WHETHER AN INITIAL RESTART IS ONGOING OR NOT. */
-/* ------------------------------------------------------------------------- */
- Uint8 cinitialStartOngoing;
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE KEEPS TRACK OF WHEN TUP AND ACC HAVE COMPLETED EXECUTING */
-/*THEIR UNDO LOG. */
-/* ------------------------------------------------------------------------- */
- ExecUndoLogState csrExecUndoLogState;
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE KEEPS TRACK OF WHEN TUP AND ACC HAVE CONFIRMED COMPLETION */
-/*OF A LOCAL CHECKPOINT ROUND. */
-/* ------------------------------------------------------------------------- */
- LcpCloseState clcpCompletedState;
-/* ------------------------------------------------------------------------- */
-/*DURING CONNECTION PROCESSES IN SYSTEM RESTART THESE VARIABLES KEEP TRACK */
-/*OF HOW MANY CONNECTIONS AND RELEASES THAT ARE TO BE PERFORMED. */
-/* ------------------------------------------------------------------------- */
-/***************************************************************************>*/
-/*THESE VARIABLES CONTAIN INFORMATION USED DURING SYSTEM RESTART. */
-/***************************************************************************>*/
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE IS ZTRUE IF THE SIGNAL START_REC_REQ HAVE BEEN RECEIVED. */
-/*RECEPTION OF THIS SIGNAL INDICATES THAT ALL FRAGMENTS THAT THIS NODE */
-/*SHOULD START HAVE BEEN RECEIVED. */
-/* ------------------------------------------------------------------------- */
- Uint8 cstartRecReq;
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE KEEPS TRACK OF HOW MANY FRAGMENTS THAT PARTICIPATE IN */
-/*EXECUTING THE LOG. IF ZERO WE DON'T NEED TO EXECUTE THE LOG AT ALL. */
-/* ------------------------------------------------------------------------- */
- UintR cnoFragmentsExecSr;
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE KEEPS TRACK OF WHICH OF THE FIRST TWO RESTART PHASES THAT */
-/*HAVE COMPLETED. */
-/* ------------------------------------------------------------------------- */
- Uint8 csrPhaseStarted;
-/* ------------------------------------------------------------------------- */
-/*NUMBER OF PHASES COMPLETED OF EXECUTING THE FRAGMENT LOG. */
-/* ------------------------------------------------------------------------- */
- Uint8 csrPhasesCompleted;
-/* ------------------------------------------------------------------------- */
-/*THE BLOCK REFERENCE OF THE MASTER DIH DURING SYSTEM RESTART. */
-/* ------------------------------------------------------------------------- */
- BlockReference cmasterDihBlockref;
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE IS THE HEAD OF A LINKED LIST OF FRAGMENTS WAITING TO BE */
-/*RESTORED FROM DISK. */
-/* ------------------------------------------------------------------------- */
- UintR cfirstWaitFragSr;
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE IS THE HEAD OF A LINKED LIST OF FRAGMENTS THAT HAVE BEEN */
-/*RESTORED FROM DISK THAT AWAITS EXECUTION OF THE FRAGMENT LOG. */
-/* ------------------------------------------------------------------------- */
- UintR cfirstCompletedFragSr;
-
- /**
- * List of fragment that the log execution is completed for
- */
- Uint32 c_redo_log_complete_frags;
-
-/* ------------------------------------------------------------------------- */
-/*USED DURING SYSTEM RESTART, INDICATES THE OLDEST GCI THAT CAN BE RESTARTED */
-/*FROM AFTER THIS SYSTEM RESTART. USED TO FIND THE LOG TAIL. */
-/* ------------------------------------------------------------------------- */
- UintR crestartOldestGci;
-/* ------------------------------------------------------------------------- */
-/*USED DURING SYSTEM RESTART, INDICATES THE NEWEST GCI THAT CAN BE RESTARTED */
-/*AFTER THIS SYSTEM RESTART. USED TO FIND THE LOG HEAD. */
-/* ------------------------------------------------------------------------- */
- UintR crestartNewestGci;
-/* ------------------------------------------------------------------------- */
-/*THE NUMBER OF LOG FILES. SET AS A PARAMETER WHEN NDB IS STARTED. */
-/* ------------------------------------------------------------------------- */
- UintR cnoLogFiles;
-/* ------------------------------------------------------------------------- */
-/*THESE TWO VARIABLES CONTAIN THE NEWEST GCI RECEIVED IN THE BLOCK AND THE */
-/*NEWEST COMPLETED GCI IN THE BLOCK. */
-/* ------------------------------------------------------------------------- */
- UintR cnewestGci;
- UintR cnewestCompletedGci;
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE ONLY PASSES INFORMATION FROM STTOR TO STTORRY = TEMPORARY */
-/* ------------------------------------------------------------------------- */
- Uint16 csignalKey;
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE CONTAINS THE CURRENT START PHASE IN THE BLOCK. IS ZNIL IF */
-/*NO SYSTEM RESTART IS ONGOING. */
-/* ------------------------------------------------------------------------- */
- Uint16 cstartPhase;
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE CONTAIN THE CURRENT GLOBAL CHECKPOINT RECORD. IT'S RNIL IF */
-/*NOT A GCP SAVE IS ONGOING. */
-/* ------------------------------------------------------------------------- */
- UintR ccurrentGcprec;
-/* ------------------------------------------------------------------------- */
-/*THESE VARIABLES ARE USED TO KEEP TRACK OF ALL ACTIVE COPY FRAGMENTS IN LQH.*/
-/* ------------------------------------------------------------------------- */
- Uint8 cnoActiveCopy;
- UintR cactiveCopy[4];
-
-/* ------------------------------------------------------------------------- */
-/*THESE VARIABLES CONTAIN THE BLOCK REFERENCES OF THE OTHER NDB BLOCKS. */
-/*ALSO THE BLOCK REFERENCE OF MY OWN BLOCK = LQH */
-/* ------------------------------------------------------------------------- */
- BlockReference caccBlockref;
- BlockReference ctupBlockref;
- BlockReference ctuxBlockref;
- BlockReference cownref;
- UintR cLqhTimeOutCount;
- UintR cLqhTimeOutCheckCount;
- UintR cnoOfLogPages;
- bool caccCommitBlocked;
- bool ctupCommitBlocked;
- bool cCommitBlocked;
- UintR cCounterAccCommitBlocked;
- UintR cCounterTupCommitBlocked;
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE CONTAINS MY OWN PROCESSOR ID. */
-/* ------------------------------------------------------------------------- */
- NodeId cownNodeid;
-
-/* ------------------------------------------------------------------------- */
-/*THESE VARIABLES CONTAIN INFORMATION ABOUT THE OTHER NODES IN THE SYSTEM */
-/*THESE VARIABLES ARE MOSTLY USED AT SYSTEM RESTART AND ADD NODE TO SET-UP */
-/*AND RELEASE CONNECTIONS TO OTHER NODES IN THE CLUSTER. */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/*THIS ARRAY CONTAINS THE PROCESSOR ID'S OF THE NODES THAT ARE ALIVE. */
-/*CNO_OF_NODES SPECIFIES HOW MANY NODES THAT ARE CURRENTLY ALIVE. */
-/*CNODE_VERSION SPECIFIES THE NDB VERSION EXECUTING ON THE NODE. */
-/* ------------------------------------------------------------------------- */
- UintR cpackedListIndex;
- Uint16 cpackedList[MAX_NDB_NODES];
- UintR cnodeData[MAX_NDB_NODES];
- UintR cnodeStatus[MAX_NDB_NODES];
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE INDICATES WHETHER A CERTAIN NODE HAS SENT ALL FRAGMENTS THAT */
-/*NEED TO HAVE THE LOG EXECUTED. */
-/* ------------------------------------------------------------------------- */
- Uint8 cnodeSrState[MAX_NDB_NODES];
-/* ------------------------------------------------------------------------- */
-/*THIS VARIABLE INDICATES WHETHER A CERTAIN NODE HAVE EXECUTED THE LOG */
-/* ------------------------------------------------------------------------- */
- Uint8 cnodeExecSrState[MAX_NDB_NODES];
- UintR cnoOfNodes;
-
-/* ------------------------------------------------------------------------- */
-/* THIS VARIABLE CONTAINS THE DIRECTORY OF A HASH TABLE OF ALL ACTIVE */
-/* OPERATION IN THE BLOCK. IT IS USED TO BE ABLE TO QUICKLY ABORT AN */
-/* OPERATION WHERE THE CONNECTION WAS LOST DUE TO NODE FAILURES. IT IS */
-/* ACTUALLY USED FOR ALL ABORTS COMMANDED BY TC. */
-/* ------------------------------------------------------------------------- */
- UintR preComputedRequestInfoMask;
- UintR ctransidHash[1024];
-
- Uint32 c_diskless;
-
-public:
- /**
- *
- */
- struct CommitAckMarker {
- Uint32 transid1;
- Uint32 transid2;
-
- Uint32 apiRef; // Api block ref
- Uint32 apiOprec; // Connection Object in NDB API
- Uint32 tcNodeId;
- union { Uint32 nextPool; Uint32 nextHash; };
- Uint32 prevHash;
-
- inline bool equal(const CommitAckMarker & p) const {
- return ((p.transid1 == transid1) && (p.transid2 == transid2));
- }
-
- inline Uint32 hashValue() const {
- return transid1;
- }
- };
-
- typedef Ptr<CommitAckMarker> CommitAckMarkerPtr;
- ArrayPool<CommitAckMarker> m_commitAckMarkerPool;
- DLHashTable<CommitAckMarker> m_commitAckMarkerHash;
- typedef DLHashTable<CommitAckMarker>::Iterator CommitAckMarkerIterator;
- void execREMOVE_MARKER_ORD(Signal* signal);
- void scanMarkers(Signal* signal, Uint32 tcNodeFail, Uint32 bucket, Uint32 i);
-
- struct Counters {
- Uint32 operations;
-
- inline void clear(){
- operations = 0;
- }
- };
-
- Counters c_Counters;
-
- inline bool getAllowRead() const {
- return getNodeState().startLevel < NodeState::SL_STOPPING_3;
- }
-
- DLHashTable<ScanRecord> c_scanTakeOverHash;
-};
-
-inline
-bool
-Dblqh::ScanRecord::check_scan_batch_completed() const
-{
- Uint32 max_rows = m_max_batch_size_rows;
- Uint32 max_bytes = m_max_batch_size_bytes;
-
- return (max_rows > 0 && (m_curr_batch_size_rows >= max_rows)) ||
- (max_bytes > 0 && (m_curr_batch_size_bytes >= max_bytes));
-}
-
-inline
-void
-Dblqh::i_get_acc_ptr(ScanRecord* scanP, Uint32* &acc_ptr, Uint32 index)
-{
- if (index == 0) {
- acc_ptr= (Uint32*)&scanP->scan_acc_op_ptr[0];
- } else {
- Uint32 attr_buf_index, attr_buf_rec;
-
- AttrbufPtr regAttrPtr;
- jam();
- attr_buf_rec= (index + 31) / 32;
- attr_buf_index= (index - 1) & 31;
- regAttrPtr.i= scanP->scan_acc_op_ptr[attr_buf_rec];
- ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
- acc_ptr= (Uint32*)&regAttrPtr.p->attrbuf[attr_buf_index];
- }
-}
-
-#endif
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
deleted file mode 100644
index e39d0ca68a6..00000000000
--- a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
+++ /dev/null
@@ -1,455 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-#include <pc.hpp>
-#define DBLQH_C
-#include "Dblqh.hpp"
-#include <ndb_limits.h>
-
-#define DEBUG(x) { ndbout << "LQH::" << x << endl; }
-
-void Dblqh::initData()
-{
- caddfragrecFileSize = ZADDFRAGREC_FILE_SIZE;
- cattrinbufFileSize = ZATTRINBUF_FILE_SIZE;
- c_no_attrinbuf_recs= ZATTRINBUF_FILE_SIZE;
- cdatabufFileSize = ZDATABUF_FILE_SIZE;
- cfragrecFileSize = 0;
- cgcprecFileSize = ZGCPREC_FILE_SIZE;
- chostFileSize = MAX_NDB_NODES;
- clcpFileSize = ZNO_CONCURRENT_LCP;
- clcpLocrecFileSize = ZLCP_LOCREC_FILE_SIZE;
- clfoFileSize = ZLFO_FILE_SIZE;
- clogFileFileSize = 0;
- clogPartFileSize = ZLOG_PART_FILE_SIZE;
- cpageRefFileSize = ZPAGE_REF_FILE_SIZE;
- cscanrecFileSize = ZSCANREC_FILE_SIZE;
- ctabrecFileSize = 0;
- ctcConnectrecFileSize = 0;
- ctcNodeFailrecFileSize = MAX_NDB_NODES;
-
- addFragRecord = 0;
- attrbuf = 0;
- databuf = 0;
- fragrecord = 0;
- gcpRecord = 0;
- hostRecord = 0;
- lcpRecord = 0;
- lcpLocRecord = 0;
- logPartRecord = 0;
- logFileRecord = 0;
- logFileOperationRecord = 0;
- logPageRecord = 0;
- pageRefRecord = 0;
- tablerec = 0;
- tcConnectionrec = 0;
- tcNodeFailRecord = 0;
-
- // Records with constant sizes
-
- cLqhTimeOutCount = 0;
- cLqhTimeOutCheckCount = 0;
- cbookedAccOps = 0;
- c_redo_log_complete_frags = RNIL;
-}//Dblqh::initData()
-
-void Dblqh::initRecords()
-{
- // Records with dynamic sizes
- addFragRecord = (AddFragRecord*)allocRecord("AddFragRecord",
- sizeof(AddFragRecord),
- caddfragrecFileSize);
- attrbuf = (Attrbuf*)allocRecord("Attrbuf",
- sizeof(Attrbuf),
- cattrinbufFileSize);
-
- databuf = (Databuf*)allocRecord("Databuf",
- sizeof(Databuf),
- cdatabufFileSize);
-
- fragrecord = (Fragrecord*)allocRecord("Fragrecord",
- sizeof(Fragrecord),
- cfragrecFileSize);
-
- gcpRecord = (GcpRecord*)allocRecord("GcpRecord",
- sizeof(GcpRecord),
- cgcprecFileSize);
-
- hostRecord = (HostRecord*)allocRecord("HostRecord",
- sizeof(HostRecord),
- chostFileSize);
-
- lcpRecord = (LcpRecord*)allocRecord("LcpRecord",
- sizeof(LcpRecord),
- clcpFileSize);
-
- for(Uint32 i = 0; i<clcpFileSize; i++){
- new (&lcpRecord[i])LcpRecord();
- }
-
- lcpLocRecord = (LcpLocRecord*)allocRecord("LcpLocRecord",
- sizeof(LcpLocRecord),
- clcpLocrecFileSize);
-
- logPartRecord = (LogPartRecord*)allocRecord("LogPartRecord",
- sizeof(LogPartRecord),
- clogPartFileSize);
-
- logFileRecord = (LogFileRecord*)allocRecord("LogFileRecord",
- sizeof(LogFileRecord),
- clogFileFileSize);
-
- logFileOperationRecord = (LogFileOperationRecord*)
- allocRecord("LogFileOperationRecord",
- sizeof(LogFileOperationRecord),
- clfoFileSize);
-
- logPageRecord = (LogPageRecord*)allocRecord("LogPageRecord",
- sizeof(LogPageRecord),
- clogPageFileSize,
- false);
-
- pageRefRecord = (PageRefRecord*)allocRecord("PageRefRecord",
- sizeof(PageRefRecord),
- cpageRefFileSize);
-
- cscanNoFreeRec = cscanrecFileSize;
- c_scanRecordPool.setSize(cscanrecFileSize);
- c_scanTakeOverHash.setSize(64);
-
- tablerec = (Tablerec*)allocRecord("Tablerec",
- sizeof(Tablerec),
- ctabrecFileSize);
-
- tcConnectionrec = (TcConnectionrec*)allocRecord("TcConnectionrec",
- sizeof(TcConnectionrec),
- ctcConnectrecFileSize);
-
- m_commitAckMarkerPool.setSize(ctcConnectrecFileSize);
- m_commitAckMarkerHash.setSize(1024);
-
- tcNodeFailRecord = (TcNodeFailRecord*)allocRecord("TcNodeFailRecord",
- sizeof(TcNodeFailRecord),
- ctcNodeFailrecFileSize);
-
- /*
- ndbout << "FRAGREC SIZE = " << sizeof(Fragrecord) << endl;
- ndbout << "TAB SIZE = " << sizeof(Tablerec) << endl;
- ndbout << "GCP SIZE = " << sizeof(GcpRecord) << endl;
- ndbout << "LCP SIZE = " << sizeof(LcpRecord) << endl;
- ndbout << "LCPLOC SIZE = " << sizeof(LcpLocRecord) << endl;
- ndbout << "LOGPART SIZE = " << sizeof(LogPartRecord) << endl;
- ndbout << "LOGFILE SIZE = " << sizeof(LogFileRecord) << endl;
- ndbout << "TC SIZE = " << sizeof(TcConnectionrec) << endl;
- ndbout << "HOST SIZE = " << sizeof(HostRecord) << endl;
- ndbout << "LFO SIZE = " << sizeof(LogFileOperationRecord) << endl;
- ndbout << "PR SIZE = " << sizeof(PageRefRecord) << endl;
- ndbout << "SCAN SIZE = " << sizeof(ScanRecord) << endl;
-*/
-
- // Initialize BAT for interface to file system
- NewVARIABLE* bat = allocateBat(2);
- bat[1].WA = &logPageRecord->logPageWord[0];
- bat[1].nrr = clogPageFileSize;
- bat[1].ClusterSize = sizeof(LogPageRecord);
- bat[1].bits.q = ZTWOLOG_PAGE_SIZE;
- bat[1].bits.v = 5;
-}//Dblqh::initRecords()
-
-Dblqh::Dblqh(const class Configuration & conf):
- SimulatedBlock(DBLQH, conf),
- m_commitAckMarkerHash(m_commitAckMarkerPool),
- c_scanTakeOverHash(c_scanRecordPool)
-{
- Uint32 log_page_size= 0;
- BLOCK_CONSTRUCTOR(Dblqh);
-
- const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
- ndbrequire(p != 0);
-
- ndb_mgm_get_int_parameter(p, CFG_DB_REDO_BUFFER,
- &log_page_size);
-
- /**
- * Always set page size in half MBytes
- */
- clogPageFileSize= (log_page_size / sizeof(LogPageRecord));
- Uint32 mega_byte_part= clogPageFileSize & 15;
- if (mega_byte_part != 0) {
- jam();
- clogPageFileSize+= (16 - mega_byte_part);
- }
-
- addRecSignal(GSN_PACKED_SIGNAL, &Dblqh::execPACKED_SIGNAL);
- addRecSignal(GSN_DEBUG_SIG, &Dblqh::execDEBUG_SIG);
- addRecSignal(GSN_ATTRINFO, &Dblqh::execATTRINFO);
- addRecSignal(GSN_KEYINFO, &Dblqh::execKEYINFO);
- addRecSignal(GSN_LQHKEYREQ, &Dblqh::execLQHKEYREQ);
- addRecSignal(GSN_LQHKEYREF, &Dblqh::execLQHKEYREF);
- addRecSignal(GSN_COMMIT, &Dblqh::execCOMMIT);
- addRecSignal(GSN_COMPLETE, &Dblqh::execCOMPLETE);
- addRecSignal(GSN_LQHKEYCONF, &Dblqh::execLQHKEYCONF);
-#ifdef VM_TRACE
- addRecSignal(GSN_TESTSIG, &Dblqh::execTESTSIG);
-#endif
- addRecSignal(GSN_LQH_RESTART_OP, &Dblqh::execLQH_RESTART_OP);
- addRecSignal(GSN_CONTINUEB, &Dblqh::execCONTINUEB);
- addRecSignal(GSN_START_RECREQ, &Dblqh::execSTART_RECREQ);
- addRecSignal(GSN_START_RECCONF, &Dblqh::execSTART_RECCONF);
- addRecSignal(GSN_EXEC_FRAGREQ, &Dblqh::execEXEC_FRAGREQ);
- addRecSignal(GSN_EXEC_FRAGCONF, &Dblqh::execEXEC_FRAGCONF);
- addRecSignal(GSN_EXEC_FRAGREF, &Dblqh::execEXEC_FRAGREF);
- addRecSignal(GSN_START_EXEC_SR, &Dblqh::execSTART_EXEC_SR);
- addRecSignal(GSN_EXEC_SRREQ, &Dblqh::execEXEC_SRREQ);
- addRecSignal(GSN_EXEC_SRCONF, &Dblqh::execEXEC_SRCONF);
- addRecSignal(GSN_SCAN_HBREP, &Dblqh::execSCAN_HBREP);
-
- addRecSignal(GSN_ALTER_TAB_REQ, &Dblqh::execALTER_TAB_REQ);
-
- // Trigger signals, transit to from TUP
- addRecSignal(GSN_CREATE_TRIG_REQ, &Dblqh::execCREATE_TRIG_REQ);
- addRecSignal(GSN_CREATE_TRIG_CONF, &Dblqh::execCREATE_TRIG_CONF);
- addRecSignal(GSN_CREATE_TRIG_REF, &Dblqh::execCREATE_TRIG_REF);
-
- addRecSignal(GSN_DROP_TRIG_REQ, &Dblqh::execDROP_TRIG_REQ);
- addRecSignal(GSN_DROP_TRIG_CONF, &Dblqh::execDROP_TRIG_CONF);
- addRecSignal(GSN_DROP_TRIG_REF, &Dblqh::execDROP_TRIG_REF);
-
- addRecSignal(GSN_DUMP_STATE_ORD, &Dblqh::execDUMP_STATE_ORD);
- addRecSignal(GSN_ACC_COM_BLOCK, &Dblqh::execACC_COM_BLOCK);
- addRecSignal(GSN_ACC_COM_UNBLOCK, &Dblqh::execACC_COM_UNBLOCK);
- addRecSignal(GSN_TUP_COM_BLOCK, &Dblqh::execTUP_COM_BLOCK);
- addRecSignal(GSN_TUP_COM_UNBLOCK, &Dblqh::execTUP_COM_UNBLOCK);
- addRecSignal(GSN_NODE_FAILREP, &Dblqh::execNODE_FAILREP);
- addRecSignal(GSN_CHECK_LCP_STOP, &Dblqh::execCHECK_LCP_STOP);
- addRecSignal(GSN_SEND_PACKED, &Dblqh::execSEND_PACKED);
- addRecSignal(GSN_TUP_ATTRINFO, &Dblqh::execTUP_ATTRINFO);
- addRecSignal(GSN_READ_CONFIG_REQ, &Dblqh::execREAD_CONFIG_REQ, true);
- addRecSignal(GSN_LQHFRAGREQ, &Dblqh::execLQHFRAGREQ);
- addRecSignal(GSN_LQHADDATTREQ, &Dblqh::execLQHADDATTREQ);
- addRecSignal(GSN_TUP_ADD_ATTCONF, &Dblqh::execTUP_ADD_ATTCONF);
- addRecSignal(GSN_TUP_ADD_ATTRREF, &Dblqh::execTUP_ADD_ATTRREF);
- addRecSignal(GSN_ACCFRAGCONF, &Dblqh::execACCFRAGCONF);
- addRecSignal(GSN_ACCFRAGREF, &Dblqh::execACCFRAGREF);
- addRecSignal(GSN_TUPFRAGCONF, &Dblqh::execTUPFRAGCONF);
- addRecSignal(GSN_TUPFRAGREF, &Dblqh::execTUPFRAGREF);
- addRecSignal(GSN_TAB_COMMITREQ, &Dblqh::execTAB_COMMITREQ);
- addRecSignal(GSN_ACCSEIZECONF, &Dblqh::execACCSEIZECONF);
- addRecSignal(GSN_ACCSEIZEREF, &Dblqh::execACCSEIZEREF);
- addRecSignal(GSN_READ_NODESCONF, &Dblqh::execREAD_NODESCONF);
- addRecSignal(GSN_READ_NODESREF, &Dblqh::execREAD_NODESREF);
- addRecSignal(GSN_STTOR, &Dblqh::execSTTOR);
- addRecSignal(GSN_NDB_STTOR, &Dblqh::execNDB_STTOR);
- addRecSignal(GSN_TUPSEIZECONF, &Dblqh::execTUPSEIZECONF);
- addRecSignal(GSN_TUPSEIZEREF, &Dblqh::execTUPSEIZEREF);
- addRecSignal(GSN_ACCKEYCONF, &Dblqh::execACCKEYCONF);
- addRecSignal(GSN_ACCKEYREF, &Dblqh::execACCKEYREF);
- addRecSignal(GSN_TUPKEYCONF, &Dblqh::execTUPKEYCONF);
- addRecSignal(GSN_TUPKEYREF, &Dblqh::execTUPKEYREF);
- addRecSignal(GSN_ABORT, &Dblqh::execABORT);
- addRecSignal(GSN_ABORTREQ, &Dblqh::execABORTREQ);
- addRecSignal(GSN_COMMITREQ, &Dblqh::execCOMMITREQ);
- addRecSignal(GSN_COMPLETEREQ, &Dblqh::execCOMPLETEREQ);
-#ifdef VM_TRACE
- addRecSignal(GSN_MEMCHECKREQ, &Dblqh::execMEMCHECKREQ);
-#endif
- addRecSignal(GSN_SCAN_FRAGREQ, &Dblqh::execSCAN_FRAGREQ);
- addRecSignal(GSN_SCAN_NEXTREQ, &Dblqh::execSCAN_NEXTREQ);
- addRecSignal(GSN_ACC_SCANCONF, &Dblqh::execACC_SCANCONF);
- addRecSignal(GSN_ACC_SCANREF, &Dblqh::execACC_SCANREF);
- addRecSignal(GSN_NEXT_SCANCONF, &Dblqh::execNEXT_SCANCONF);
- addRecSignal(GSN_NEXT_SCANREF, &Dblqh::execNEXT_SCANREF);
- addRecSignal(GSN_STORED_PROCCONF, &Dblqh::execSTORED_PROCCONF);
- addRecSignal(GSN_STORED_PROCREF, &Dblqh::execSTORED_PROCREF);
- addRecSignal(GSN_COPY_FRAGREQ, &Dblqh::execCOPY_FRAGREQ);
- addRecSignal(GSN_COPY_ACTIVEREQ, &Dblqh::execCOPY_ACTIVEREQ);
- addRecSignal(GSN_COPY_STATEREQ, &Dblqh::execCOPY_STATEREQ);
- addRecSignal(GSN_LQH_TRANSREQ, &Dblqh::execLQH_TRANSREQ);
- addRecSignal(GSN_TRANSID_AI, &Dblqh::execTRANSID_AI);
- addRecSignal(GSN_INCL_NODEREQ, &Dblqh::execINCL_NODEREQ);
- addRecSignal(GSN_ACC_LCPCONF, &Dblqh::execACC_LCPCONF);
- addRecSignal(GSN_ACC_LCPREF, &Dblqh::execACC_LCPREF);
- addRecSignal(GSN_ACC_LCPSTARTED, &Dblqh::execACC_LCPSTARTED);
- addRecSignal(GSN_ACC_CONTOPCONF, &Dblqh::execACC_CONTOPCONF);
- addRecSignal(GSN_LCP_FRAGIDCONF, &Dblqh::execLCP_FRAGIDCONF);
- addRecSignal(GSN_LCP_FRAGIDREF, &Dblqh::execLCP_FRAGIDREF);
- addRecSignal(GSN_LCP_HOLDOPCONF, &Dblqh::execLCP_HOLDOPCONF);
- addRecSignal(GSN_LCP_HOLDOPREF, &Dblqh::execLCP_HOLDOPREF);
- addRecSignal(GSN_TUP_PREPLCPCONF, &Dblqh::execTUP_PREPLCPCONF);
- addRecSignal(GSN_TUP_PREPLCPREF, &Dblqh::execTUP_PREPLCPREF);
- addRecSignal(GSN_TUP_LCPCONF, &Dblqh::execTUP_LCPCONF);
- addRecSignal(GSN_TUP_LCPREF, &Dblqh::execTUP_LCPREF);
- addRecSignal(GSN_TUP_LCPSTARTED, &Dblqh::execTUP_LCPSTARTED);
- addRecSignal(GSN_END_LCPCONF, &Dblqh::execEND_LCPCONF);
-
- addRecSignal(GSN_EMPTY_LCP_REQ, &Dblqh::execEMPTY_LCP_REQ);
- addRecSignal(GSN_LCP_FRAG_ORD, &Dblqh::execLCP_FRAG_ORD);
-
- addRecSignal(GSN_START_FRAGREQ, &Dblqh::execSTART_FRAGREQ);
- addRecSignal(GSN_START_RECREF, &Dblqh::execSTART_RECREF);
- addRecSignal(GSN_SR_FRAGIDCONF, &Dblqh::execSR_FRAGIDCONF);
- addRecSignal(GSN_SR_FRAGIDREF, &Dblqh::execSR_FRAGIDREF);
- addRecSignal(GSN_ACC_SRCONF, &Dblqh::execACC_SRCONF);
- addRecSignal(GSN_ACC_SRREF, &Dblqh::execACC_SRREF);
- addRecSignal(GSN_TUP_SRCONF, &Dblqh::execTUP_SRCONF);
- addRecSignal(GSN_TUP_SRREF, &Dblqh::execTUP_SRREF);
- addRecSignal(GSN_GCP_SAVEREQ, &Dblqh::execGCP_SAVEREQ);
- addRecSignal(GSN_FSOPENCONF, &Dblqh::execFSOPENCONF);
- addRecSignal(GSN_FSOPENREF, &Dblqh::execFSOPENREF);
- addRecSignal(GSN_FSCLOSECONF, &Dblqh::execFSCLOSECONF);
- addRecSignal(GSN_FSCLOSEREF, &Dblqh::execFSCLOSEREF);
- addRecSignal(GSN_FSWRITECONF, &Dblqh::execFSWRITECONF);
- addRecSignal(GSN_FSWRITEREF, &Dblqh::execFSWRITEREF);
- addRecSignal(GSN_FSREADCONF, &Dblqh::execFSREADCONF);
- addRecSignal(GSN_FSREADREF, &Dblqh::execFSREADREF);
- addRecSignal(GSN_ACC_ABORTCONF, &Dblqh::execACC_ABORTCONF);
- addRecSignal(GSN_SET_VAR_REQ, &Dblqh::execSET_VAR_REQ);
- addRecSignal(GSN_TIME_SIGNAL, &Dblqh::execTIME_SIGNAL);
- addRecSignal(GSN_FSSYNCCONF, &Dblqh::execFSSYNCCONF);
- addRecSignal(GSN_FSSYNCREF, &Dblqh::execFSSYNCREF);
- addRecSignal(GSN_REMOVE_MARKER_ORD, &Dblqh::execREMOVE_MARKER_ORD);
-
- //addRecSignal(GSN_DROP_TAB_REQ, &Dblqh::execDROP_TAB_REQ);
- addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dblqh::execPREP_DROP_TAB_REQ);
- addRecSignal(GSN_WAIT_DROP_TAB_REQ, &Dblqh::execWAIT_DROP_TAB_REQ);
- addRecSignal(GSN_DROP_TAB_REQ, &Dblqh::execDROP_TAB_REQ);
-
- addRecSignal(GSN_LQH_ALLOCREQ, &Dblqh::execLQH_ALLOCREQ);
- addRecSignal(GSN_LQH_WRITELOG_REQ, &Dblqh::execLQH_WRITELOG_REQ);
-
- // TUX
- addRecSignal(GSN_TUXFRAGCONF, &Dblqh::execTUXFRAGCONF);
- addRecSignal(GSN_TUXFRAGREF, &Dblqh::execTUXFRAGREF);
- addRecSignal(GSN_TUX_ADD_ATTRCONF, &Dblqh::execTUX_ADD_ATTRCONF);
- addRecSignal(GSN_TUX_ADD_ATTRREF, &Dblqh::execTUX_ADD_ATTRREF);
-
- addRecSignal(GSN_READ_PSUEDO_REQ, &Dblqh::execREAD_PSUEDO_REQ);
-
- initData();
-
-#ifdef VM_TRACE
- {
- void* tmp[] = {
- &addfragptr,
- &attrinbufptr,
- &databufptr,
- &fragptr,
- &gcpPtr,
- &lcpPtr,
- &lcpLocptr,
- &logPartPtr,
- &logFilePtr,
- &lfoPtr,
- &logPagePtr,
- &pageRefPtr,
- &scanptr,
- &tabptr,
- &tcConnectptr,
- &tcNodeFailptr,
- };
- init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0]));
- }
-#endif
-
-}//Dblqh::Dblqh()
-
-Dblqh::~Dblqh()
-{
- // Records with dynamic sizes
- deallocRecord((void **)&addFragRecord, "AddFragRecord",
- sizeof(AddFragRecord),
- caddfragrecFileSize);
-
- deallocRecord((void**)&attrbuf,
- "Attrbuf",
- sizeof(Attrbuf),
- cattrinbufFileSize);
-
- deallocRecord((void**)&databuf,
- "Databuf",
- sizeof(Databuf),
- cdatabufFileSize);
-
- deallocRecord((void**)&fragrecord,
- "Fragrecord",
- sizeof(Fragrecord),
- cfragrecFileSize);
-
- deallocRecord((void**)&gcpRecord,
- "GcpRecord",
- sizeof(GcpRecord),
- cgcprecFileSize);
-
- deallocRecord((void**)&hostRecord,
- "HostRecord",
- sizeof(HostRecord),
- chostFileSize);
-
- deallocRecord((void**)&lcpRecord,
- "LcpRecord",
- sizeof(LcpRecord),
- clcpFileSize);
-
- deallocRecord((void**)&lcpLocRecord,
- "LcpLocRecord",
- sizeof(LcpLocRecord),
- clcpLocrecFileSize);
-
- deallocRecord((void**)&logPartRecord,
- "LogPartRecord",
- sizeof(LogPartRecord),
- clogPartFileSize);
-
- deallocRecord((void**)&logFileRecord,
- "LogFileRecord",
- sizeof(LogFileRecord),
- clogFileFileSize);
-
- deallocRecord((void**)&logFileOperationRecord,
- "LogFileOperationRecord",
- sizeof(LogFileOperationRecord),
- clfoFileSize);
-
- deallocRecord((void**)&logPageRecord,
- "LogPageRecord",
- sizeof(LogPageRecord),
- clogPageFileSize);
-
- deallocRecord((void**)&pageRefRecord,
- "PageRefRecord",
- sizeof(PageRefRecord),
- cpageRefFileSize);
-
-
- deallocRecord((void**)&tablerec,
- "Tablerec",
- sizeof(Tablerec),
- ctabrecFileSize);
-
- deallocRecord((void**)&tcConnectionrec,
- "TcConnectionrec",
- sizeof(TcConnectionrec),
- ctcConnectrecFileSize);
-
- deallocRecord((void**)&tcNodeFailRecord,
- "TcNodeFailRecord",
- sizeof(TcNodeFailRecord),
- ctcNodeFailrecFileSize);
-}//Dblqh::~Dblqh()
-
-BLOCK_FUNCTIONS(Dblqh)
-
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
deleted file mode 100644
index 725ea04c148..00000000000
--- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ /dev/null
@@ -1,18635 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#define DBLQH_C
-#include "Dblqh.hpp"
-#include <ndb_limits.h>
-#include <md5_hash.hpp>
-
-#include <ndb_version.h>
-#include <signaldata/TuxBound.hpp>
-#include <signaldata/AccScan.hpp>
-#include <signaldata/CopyActive.hpp>
-#include <signaldata/CopyFrag.hpp>
-#include <signaldata/CreateTrig.hpp>
-#include <signaldata/DropTrig.hpp>
-#include <signaldata/EmptyLcp.hpp>
-#include <signaldata/EventReport.hpp>
-#include <signaldata/ExecFragReq.hpp>
-#include <signaldata/GCPSave.hpp>
-#include <signaldata/TcKeyRef.hpp>
-#include <signaldata/LqhKey.hpp>
-#include <signaldata/NextScan.hpp>
-#include <signaldata/NFCompleteRep.hpp>
-#include <signaldata/NodeFailRep.hpp>
-#include <signaldata/ReadNodesConf.hpp>
-#include <signaldata/RelTabMem.hpp>
-#include <signaldata/ScanFrag.hpp>
-#include <signaldata/SrFragidConf.hpp>
-#include <signaldata/StartFragReq.hpp>
-#include <signaldata/StartRec.hpp>
-#include <signaldata/TupKey.hpp>
-#include <signaldata/TupCommit.hpp>
-#include <signaldata/LqhFrag.hpp>
-#include <signaldata/AccFrag.hpp>
-#include <signaldata/TupFrag.hpp>
-#include <signaldata/DumpStateOrd.hpp>
-#include <signaldata/PackedSignal.hpp>
-
-#include <signaldata/PrepDropTab.hpp>
-#include <signaldata/DropTab.hpp>
-
-#include <signaldata/AlterTab.hpp>
-
-#include <signaldata/LCP.hpp>
-
-// Use DEBUG to print messages that should be
-// seen only when we debug the product
-#ifdef VM_TRACE
-#define DEBUG(x) ndbout << "DBLQH: "<< x << endl;
-NdbOut &
-operator<<(NdbOut& out, Dblqh::TcConnectionrec::TransactionState state){
- out << (int)state;
- return out;
-}
-
-NdbOut &
-operator<<(NdbOut& out, Dblqh::TcConnectionrec::LogWriteState state){
- out << (int)state;
- return out;
-}
-
-NdbOut &
-operator<<(NdbOut& out, Dblqh::TcConnectionrec::ListState state){
- out << (int)state;
- return out;
-}
-
-NdbOut &
-operator<<(NdbOut& out, Dblqh::TcConnectionrec::AbortState state){
- out << (int)state;
- return out;
-}
-
-NdbOut &
-operator<<(NdbOut& out, Dblqh::ScanRecord::ScanState state){
- out << (int)state;
- return out;
-}
-
-NdbOut &
-operator<<(NdbOut& out, Dblqh::LogFileOperationRecord::LfoState state){
- out << (int)state;
- return out;
-}
-
-NdbOut &
-operator<<(NdbOut& out, Dblqh::ScanRecord::ScanType state){
- out << (int)state;
- return out;
-}
-
-#else
-#define DEBUG(x)
-#endif
-
-//#define MARKER_TRACE 1
-//#define TRACE_SCAN_TAKEOVER 1
-
-const Uint32 NR_ScanNo = 0;
-
-void Dblqh::execACC_COM_BLOCK(Signal* signal)
-{
- jamEntry();
-/* ------------------------------------------------------------------------- */
-// Undo log buffer in ACC is in critical sector of being full.
-/* ------------------------------------------------------------------------- */
- cCounterAccCommitBlocked++;
- caccCommitBlocked = true;
- cCommitBlocked = true;
- return;
-}//Dblqh::execACC_COM_BLOCK()
-
-void Dblqh::execACC_COM_UNBLOCK(Signal* signal)
-{
- jamEntry();
-/* ------------------------------------------------------------------------- */
-// Undo log buffer in ACC ok again.
-/* ------------------------------------------------------------------------- */
- caccCommitBlocked = false;
- if (ctupCommitBlocked == false) {
- jam();
- cCommitBlocked = false;
- }//if
- return;
-}//Dblqh::execACC_COM_UNBLOCK()
-
-void Dblqh::execTUP_COM_BLOCK(Signal* signal)
-{
- jamEntry();
-/* ------------------------------------------------------------------------- */
-// Undo log buffer in TUP is in critical sector of being full.
-/* ------------------------------------------------------------------------- */
- cCounterTupCommitBlocked++;
- ctupCommitBlocked = true;
- cCommitBlocked = true;
- return;
-}//Dblqh::execTUP_COM_BLOCK()
-
-void Dblqh::execTUP_COM_UNBLOCK(Signal* signal)
-{
- jamEntry();
-/* ------------------------------------------------------------------------- */
-// Undo log buffer in TUP ok again.
-/* ------------------------------------------------------------------------- */
- ctupCommitBlocked = false;
- if (caccCommitBlocked == false) {
- jam();
- cCommitBlocked = false;
- }//if
- return;
-}//Dblqh::execTUP_COM_UNBLOCK()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SEND SYSTEM ERROR ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::systemError(Signal* signal)
-{
- progError(0, 0);
-}//Dblqh::systemError()
-
-/* *************** */
-/* ACCSEIZEREF > */
-/* *************** */
-void Dblqh::execACCSEIZEREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dblqh::execACCSEIZEREF()
-
-/* ******************************************************>> */
-/* THIS SIGNAL IS USED TO HANDLE REAL-TIME */
-/* BREAKS THAT ARE NECESSARY TO ENSURE REAL-TIME */
-/* OPERATION OF LQH. */
-/* This signal is also used for signal loops, for example */
-/* the timeout handling for writing logs every second. */
-/* ******************************************************>> */
-void Dblqh::execCONTINUEB(Signal* signal)
-{
- jamEntry();
- Uint32 tcase = signal->theData[0];
- Uint32 data0 = signal->theData[1];
- Uint32 data1 = signal->theData[2];
- Uint32 data2 = signal->theData[3];
-#if 0
- if (tcase == RNIL) {
- tcConnectptr.i = data0;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- ndbout << "State = " << tcConnectptr.p->transactionState;
- ndbout << " seqNoReplica = " << tcConnectptr.p->seqNoReplica;
- ndbout << " tcNodeFailrec = " << tcConnectptr.p->tcNodeFailrec;
- ndbout << " activeCreat = " << tcConnectptr.p->activeCreat;
- ndbout << endl;
- ndbout << "tupkeyData0 = " << tcConnectptr.p->tupkeyData[0];
- ndbout << "tupkeyData1 = " << tcConnectptr.p->tupkeyData[1];
- ndbout << "tupkeyData2 = " << tcConnectptr.p->tupkeyData[2];
- ndbout << "tupkeyData3 = " << tcConnectptr.p->tupkeyData[3];
- ndbout << endl;
- ndbout << "abortState = " << tcConnectptr.p->abortState;
- ndbout << "listState = " << tcConnectptr.p->listState;
- ndbout << endl;
- return;
- }//if
-#endif
- switch (tcase) {
- case ZLOG_LQHKEYREQ:
- if (cnoOfLogPages == 0) {
- jam();
- sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
- return;
- }//if
- logPartPtr.i = data0;
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- logFilePtr.i = logPartPtr.p->currentLogfile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logPagePtr.i = logFilePtr.p->currentLogpage;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
-
- tcConnectptr.i = logPartPtr.p->firstLogQueue;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if ((cCommitBlocked == true) &&
- (fragptr.p->fragActiveStatus == ZTRUE)) {
- jam();
- sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
- return;
- }//if
- logPartPtr.p->LogLqhKeyReqSent = ZFALSE;
- getFirstInLogQueue(signal);
-
- switch (tcConnectptr.p->transactionState) {
- case TcConnectionrec::LOG_QUEUED:
- if (tcConnectptr.p->abortState != TcConnectionrec::ABORT_IDLE) {
- jam();
- logNextStart(signal);
- abortCommonLab(signal);
- return;
- } else {
- jam();
-/*------------------------------------------------------------*/
-/* WE MUST SET THE STATE OF THE LOG PART TO IDLE TO */
-/* ENSURE THAT WE ARE NOT QUEUED AGAIN ON THE LOG PART */
-/* WE WILL SET THE LOG PART STATE TO ACTIVE IMMEDIATELY */
-/* SO NO OTHER PROCESS WILL SEE THIS STATE. IT IS MERELY*/
-/* USED TO ENABLE REUSE OF CODE. */
-/*------------------------------------------------------------*/
- if (logPartPtr.p->logPartState == LogPartRecord::ACTIVE) {
- jam();
- logPartPtr.p->logPartState = LogPartRecord::IDLE;
- }//if
- logLqhkeyreqLab(signal);
- return;
- }//if
- break;
- case TcConnectionrec::LOG_ABORT_QUEUED:
- jam();
- writeAbortLog(signal);
- removeLogTcrec(signal);
- logNextStart(signal);
- continueAfterLogAbortWriteLab(signal);
- return;
- break;
- case TcConnectionrec::LOG_COMMIT_QUEUED:
- case TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL:
- jam();
- writeCommitLog(signal, logPartPtr);
- logNextStart(signal);
- if (tcConnectptr.p->transactionState == TcConnectionrec::LOG_COMMIT_QUEUED) {
- if (tcConnectptr.p->seqNoReplica != 0) {
- jam();
- commitReplyLab(signal);
- } else {
- jam();
- localCommitLab(signal);
- }//if
- return;
- } else {
- jam();
- tcConnectptr.p->transactionState = TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL;
- return;
- }//if
- break;
- case TcConnectionrec::COMMIT_QUEUED:
- jam();
- logNextStart(signal);
- localCommitLab(signal);
- break;
- case TcConnectionrec::ABORT_QUEUED:
- jam();
- logNextStart(signal);
- abortCommonLab(signal);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
- break;
- case ZSR_GCI_LIMITS:
- jam();
- signal->theData[0] = data0;
- srGciLimits(signal);
- return;
- break;
- case ZSR_LOG_LIMITS:
- jam();
- signal->theData[0] = data0;
- signal->theData[1] = data1;
- signal->theData[2] = data2;
- srLogLimits(signal);
- return;
- break;
- case ZSEND_EXEC_CONF:
- jam();
- signal->theData[0] = data0;
- sendExecConf(signal);
- return;
- break;
- case ZEXEC_SR:
- jam();
- signal->theData[0] = data0;
- execSr(signal);
- return;
- break;
- case ZSR_FOURTH_COMP:
- jam();
- signal->theData[0] = data0;
- srFourthComp(signal);
- return;
- break;
- case ZINIT_FOURTH:
- jam();
- signal->theData[0] = data0;
- initFourth(signal);
- return;
- break;
- case ZTIME_SUPERVISION:
- jam();
- signal->theData[0] = data0;
- timeSup(signal);
- return;
- break;
- case ZSR_PHASE3_START:
- jam();
- signal->theData[0] = data0;
- srPhase3Start(signal);
- return;
- break;
- case ZLQH_TRANS_NEXT:
- jam();
- tcNodeFailptr.i = data0;
- ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
- lqhTransNextLab(signal);
- return;
- break;
- case ZSCAN_TC_CONNECT:
- jam();
- tabptr.i = data1;
- ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
- scanTcConnectLab(signal, data0, data2);
- return;
- break;
- case ZINITIALISE_RECORDS:
- jam();
- initialiseRecordsLab(signal, data0, data2, signal->theData[4]);
- return;
- break;
- case ZINIT_GCP_REC:
- jam();
- gcpPtr.i = 0;
- ptrAss(gcpPtr, gcpRecord);
- initGcpRecLab(signal);
- return;
- break;
- case ZRESTART_OPERATIONS_AFTER_STOP:
- jam();
- tcConnectptr.i = data0;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- if (tcConnectptr.p->listState != TcConnectionrec::WAIT_QUEUE_LIST) {
- jam();
- return;
- }//if
- releaseWaitQueue(signal);
- linkActiveFrag(signal);
- restartOperationsAfterStopLab(signal);
- return;
- break;
- case ZCHECK_LCP_STOP_BLOCKED:
- jam();
- c_scanRecordPool.getPtr(scanptr, data0);
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- checkLcpStopBlockedLab(signal);
- return;
- case ZSCAN_MARKERS:
- jam();
- scanMarkers(signal, data0, data1, data2);
- return;
- break;
-
- case ZOPERATION_EVENT_REP:
- jam();
- /* --------------------------------------------------------------------- */
- // Report information about transaction activity once per second.
- /* --------------------------------------------------------------------- */
- if (signal->theData[1] == 0) {
- signal->theData[0] = NDB_LE_OperationReportCounters;
- signal->theData[1] = c_Counters.operations;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
- }//if
- c_Counters.clear();
- signal->theData[0] = ZOPERATION_EVENT_REP;
- signal->theData[1] = 0;
- sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 2);
- break;
- case ZPREP_DROP_TABLE:
- jam();
- checkDropTab(signal);
- return;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
-}//Dblqh::execCONTINUEB()
-
-/* *********************************************************> */
-/* Request from DBDIH to include a new node in the node list */
-/* and so forth. */
-/* *********************************************************> */
-void Dblqh::execINCL_NODEREQ(Signal* signal)
-{
- jamEntry();
- BlockReference retRef = signal->theData[0];
- Uint32 nodeId = signal->theData[1];
- cnewestGci = signal->theData[2];
- cnewestCompletedGci = signal->theData[2] - 1;
- ndbrequire(cnoOfNodes < MAX_NDB_NODES);
- for (Uint32 i = 0; i < cnoOfNodes; i++) {
- jam();
- if (cnodeData[i] == nodeId) {
- jam();
- cnodeStatus[i] = ZNODE_UP;
- }//if
- }//for
- signal->theData[0] = cownref;
- sendSignal(retRef, GSN_INCL_NODECONF, signal, 1, JBB);
- return;
-}//Dblqh::execINCL_NODEREQ()
-
-void Dblqh::execTUPSEIZEREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dblqh::execTUPSEIZEREF()
-
-/* ########################################################################## */
-/* ####### START / RESTART MODULE ####### */
-/* ########################################################################## */
-/* ************************************************************************>> */
-/* This is first signal that arrives in a start / restart. Sender is NDBCNTR_REF. */
-/* ************************************************************************>> */
-void Dblqh::execSTTOR(Signal* signal)
-{
- UintR tstartPhase;
-
- jamEntry();
- /* START CASE */
- tstartPhase = signal->theData[1];
- /* SYSTEM RESTART RANK */
- csignalKey = signal->theData[6];
- switch (tstartPhase) {
- case ZSTART_PHASE1:
- jam();
- cstartPhase = tstartPhase;
- sttorStartphase1Lab(signal);
- c_tup = (Dbtup*)globalData.getBlock(DBTUP);
- ndbrequire(c_tup != 0);
- return;
- break;
- default:
- jam();
- /*empty*/;
- sendsttorryLab(signal);
- return;
- break;
- }//switch
-}//Dblqh::execSTTOR()
-
-/* ***************************************> */
-/* Restart phases 1 - 6, sender is Ndbcntr */
-/* ***************************************> */
-void Dblqh::execNDB_STTOR(Signal* signal)
-{
- jamEntry();
- Uint32 ownNodeId = signal->theData[1]; /* START PHASE*/
- cstartPhase = signal->theData[2]; /* MY NODE ID */
- cstartType = signal->theData[3]; /* START TYPE */
-
- switch (cstartPhase) {
- case ZSTART_PHASE1:
- jam();
- preComputedRequestInfoMask = 0;
- LqhKeyReq::setKeyLen(preComputedRequestInfoMask, RI_KEYLEN_MASK);
- LqhKeyReq::setLastReplicaNo(preComputedRequestInfoMask, RI_LAST_REPL_MASK);
- LqhKeyReq::setLockType(preComputedRequestInfoMask, RI_LOCK_TYPE_MASK);
- // Dont LqhKeyReq::setApplicationAddressFlag
- LqhKeyReq::setDirtyFlag(preComputedRequestInfoMask, 1);
- // Dont LqhKeyReq::setInterpretedFlag
- LqhKeyReq::setSimpleFlag(preComputedRequestInfoMask, 1);
- LqhKeyReq::setOperation(preComputedRequestInfoMask, RI_OPERATION_MASK);
- // Dont setAIInLqhKeyReq
- // Dont setSeqNoReplica
- // Dont setSameClientAndTcFlag
- // Dont setReturnedReadLenAIFlag
- // Dont setAPIVersion
- LqhKeyReq::setMarkerFlag(preComputedRequestInfoMask, 1);
- //preComputedRequestInfoMask = 0x003d7fff;
- startphase1Lab(signal, /* dummy */ ~0, ownNodeId);
-
- signal->theData[0] = ZOPERATION_EVENT_REP;
- signal->theData[1] = 1;
- sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
- return;
- break;
- case ZSTART_PHASE2:
- jam();
- startphase2Lab(signal, /* dummy */ ~0);
- return;
- break;
- case ZSTART_PHASE3:
- jam();
- startphase3Lab(signal);
- return;
- break;
- case ZSTART_PHASE4:
- jam();
- startphase4Lab(signal);
- return;
- break;
- case ZSTART_PHASE6:
- jam();
- startphase6Lab(signal);
- return;
- break;
- default:
- jam();
- /*empty*/;
- sendNdbSttorryLab(signal);
- return;
- break;
- }//switch
-}//Dblqh::execNDB_STTOR()
-
-/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-/* +++++++ START PHASE 1 +++++++ */
-/* LOAD OUR BLOCK REFERENCE AND OUR PROCESSOR ID */
-/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-void Dblqh::sttorStartphase1Lab(Signal* signal)
-{
- sendsttorryLab(signal);
- return;
-}//Dblqh::sttorStartphase1Lab()
-
-/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-/* +++++++ START PHASE 2 +++++++ */
-/* */
-/* INITIATE ALL RECORDS WITHIN THE BLOCK */
-/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-void Dblqh::startphase1Lab(Signal* signal, Uint32 _dummy, Uint32 ownNodeId)
-{
- UintR Ti;
- HostRecordPtr ThostPtr;
-
-/* ------- INITIATE ALL RECORDS ------- */
- cownNodeid = ownNodeId;
- caccBlockref = calcAccBlockRef (cownNodeid);
- ctupBlockref = calcTupBlockRef (cownNodeid);
- ctuxBlockref = calcTuxBlockRef (cownNodeid);
- cownref = calcLqhBlockRef (cownNodeid);
- for (Ti = 0; Ti < chostFileSize; Ti++) {
- ThostPtr.i = Ti;
- ptrCheckGuard(ThostPtr, chostFileSize, hostRecord);
- ThostPtr.p->hostLqhBlockRef = calcLqhBlockRef(ThostPtr.i);
- ThostPtr.p->hostTcBlockRef = calcTcBlockRef(ThostPtr.i);
- ThostPtr.p->inPackedList = false;
- ThostPtr.p->noOfPackedWordsLqh = 0;
- ThostPtr.p->noOfPackedWordsTc = 0;
- }//for
- cpackedListIndex = 0;
- sendNdbSttorryLab(signal);
- return;
-}//Dblqh::startphase1Lab()
-
-/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-/* +++++++ START PHASE 2 +++++++ */
-/* */
-/* CONNECT LQH WITH ACC AND TUP. */
-/* EVERY CONNECTION RECORD IN LQH IS ASSIGNED TO ONE ACC CONNECTION RECORD */
-/* AND ONE TUP CONNECTION RECORD. */
-/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-void Dblqh::startphase2Lab(Signal* signal, Uint32 _dummy)
-{
- cmaxWordsAtNodeRec = MAX_NO_WORDS_OUTSTANDING_COPY_FRAGMENT;
-/* -- ACC AND TUP CONNECTION PROCESS -- */
- tcConnectptr.i = 0;
- ptrAss(tcConnectptr, tcConnectionrec);
- moreconnectionsLab(signal);
- return;
-}//Dblqh::startphase2Lab()
-
-void Dblqh::moreconnectionsLab(Signal* signal)
-{
- tcConnectptr.p->tcAccBlockref = caccBlockref;
- // set TUX block here (no operation is seized in TUX)
- tcConnectptr.p->tcTuxBlockref = ctuxBlockref;
-/* NO STATE CHECKING IS PERFORMED, ASSUMED TO WORK */
-/* *************** */
-/* ACCSEIZEREQ < */
-/* *************** */
- signal->theData[0] = tcConnectptr.i;
- signal->theData[1] = cownref;
- sendSignal(caccBlockref, GSN_ACCSEIZEREQ, signal, 2, JBB);
- return;
-}//Dblqh::moreconnectionsLab()
-
-/* ***************> */
-/* ACCSEIZECONF > */
-/* ***************> */
-void Dblqh::execACCSEIZECONF(Signal* signal)
-{
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- tcConnectptr.p->accConnectrec = signal->theData[1];
-/* *************** */
-/* TUPSEIZEREQ < */
-/* *************** */
- tcConnectptr.p->tcTupBlockref = ctupBlockref;
- signal->theData[0] = tcConnectptr.i;
- signal->theData[1] = cownref;
- sendSignal(ctupBlockref, GSN_TUPSEIZEREQ, signal, 2, JBB);
- return;
-}//Dblqh::execACCSEIZECONF()
-
-/* ***************> */
-/* TUPSEIZECONF > */
-/* ***************> */
-void Dblqh::execTUPSEIZECONF(Signal* signal)
-{
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- tcConnectptr.p->tupConnectrec = signal->theData[1];
-/* ------- CHECK IF THERE ARE MORE CONNECTIONS TO BE CONNECTED ------- */
- tcConnectptr.i = tcConnectptr.p->nextTcConnectrec;
- if (tcConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- moreconnectionsLab(signal);
- return;
- }//if
-/* ALL LQH_CONNECT RECORDS ARE CONNECTED TO ACC AND TUP ---- */
- sendNdbSttorryLab(signal);
- return;
-}//Dblqh::execTUPSEIZECONF()
-
-/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-/* +++++++ START PHASE 4 +++++++ */
-/* */
-/* CONNECT LQH WITH LQH. */
-/* CONNECT EACH LQH WITH EVERY LQH IN THE DATABASE SYSTEM. */
-/* IF INITIAL START THEN CREATE THE FRAGMENT LOG FILES */
-/*IF SYSTEM RESTART OR NODE RESTART THEN OPEN THE FRAGMENT LOG FILES AND */
-/*FIND THE END OF THE LOG FILES. */
-/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-/* WAIT UNTIL ADD NODE PROCESSES ARE COMPLETED */
-/* IF INITIAL START ALSO WAIT FOR LOG FILES TO INITIALISED */
-/*START TIME SUPERVISION OF LOG FILES. WE HAVE TO WRITE LOG PAGES TO DISK */
-/*EVEN IF THE PAGES ARE NOT FULL TO ENSURE THAT THEY COME TO DISK ASAP. */
-/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-void Dblqh::startphase3Lab(Signal* signal)
-{
- LogFileRecordPtr prevLogFilePtr;
- LogFileRecordPtr zeroLogFilePtr;
-
- caddNodeState = ZTRUE;
-/* ***************<< */
-/* READ_NODESREQ < */
-/* ***************<< */
- cinitialStartOngoing = ZTRUE;
- ndbrequire(cnoLogFiles != 0);
-
- for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
- jam();
- ptrAss(logPartPtr, logPartRecord);
- initLogpart(signal);
- for (Uint32 fileNo = 0; fileNo < cnoLogFiles; fileNo++) {
- seizeLogfile(signal);
- if (fileNo != 0) {
- jam();
- prevLogFilePtr.p->nextLogFile = logFilePtr.i;
- logFilePtr.p->prevLogFile = prevLogFilePtr.i;
- } else {
- jam();
- logPartPtr.p->firstLogfile = logFilePtr.i;
- logPartPtr.p->currentLogfile = logFilePtr.i;
- zeroLogFilePtr.i = logFilePtr.i;
- zeroLogFilePtr.p = logFilePtr.p;
- }//if
- prevLogFilePtr.i = logFilePtr.i;
- prevLogFilePtr.p = logFilePtr.p;
- initLogfile(signal, fileNo);
- if ((cstartType == NodeState::ST_INITIAL_START) ||
- (cstartType == NodeState::ST_INITIAL_NODE_RESTART)) {
- if (logFilePtr.i == zeroLogFilePtr.i) {
- jam();
-/* ------------------------------------------------------------------------- */
-/*IN AN INITIAL START WE START BY CREATING ALL LOG FILES AND SETTING THEIR */
-/*PROPER SIZE AND INITIALISING PAGE ZERO IN ALL FILES. */
-/*WE START BY CREATING FILE ZERO IN EACH LOG PART AND THEN PROCEED */
-/*SEQUENTIALLY THROUGH ALL LOG FILES IN THE LOG PART. */
-/* ------------------------------------------------------------------------- */
- openLogfileInit(signal);
- }//if
- }//if
- }//for
- zeroLogFilePtr.p->prevLogFile = logFilePtr.i;
- logFilePtr.p->nextLogFile = zeroLogFilePtr.i;
- }//for
- if (cstartType != NodeState::ST_INITIAL_START &&
- cstartType != NodeState::ST_INITIAL_NODE_RESTART) {
- jam();
- ndbrequire(cstartType == NodeState::ST_NODE_RESTART ||
- cstartType == NodeState::ST_SYSTEM_RESTART);
- /** --------------------------------------------------------------------
- * THIS CODE KICKS OFF THE SYSTEM RESTART AND NODE RESTART. IT STARTS UP
- * THE RESTART BY FINDING THE END OF THE LOG AND FROM THERE FINDING THE
- * INFO ABOUT THE GLOBAL CHECKPOINTS IN THE FRAGMENT LOG.
- --------------------------------------------------------------------- */
- for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
- jam();
- LogFileRecordPtr locLogFilePtr;
- ptrAss(logPartPtr, logPartRecord);
- locLogFilePtr.i = logPartPtr.p->firstLogfile;
- ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
- locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FRONTPAGE;
- openFileRw(signal, locLogFilePtr);
- }//for
- }//if
-
- signal->theData[0] = cownref;
- sendSignal(NDBCNTR_REF, GSN_READ_NODESREQ, signal, 1, JBB);
- return;
-}//Dblqh::startphase3Lab()
-
-/* ****************** */
-/* READ_NODESCONF > */
-/* ****************** */
-void Dblqh::execREAD_NODESCONF(Signal* signal)
-{
- jamEntry();
-
- ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
- cnoOfNodes = readNodes->noOfNodes;
-
- unsigned ind = 0;
- unsigned i = 0;
- for (i = 1; i < MAX_NDB_NODES; i++) {
- jam();
- if (NodeBitmask::get(readNodes->allNodes, i)) {
- jam();
- cnodeData[ind] = i;
- cnodeStatus[ind] = NodeBitmask::get(readNodes->inactiveNodes, i);
- //readNodes->getVersionId(i, readNodes->theVersionIds) not used
- ind++;
- }//if
- }//for
- ndbrequire(ind == cnoOfNodes);
- ndbrequire(cnoOfNodes >= 1 && cnoOfNodes < MAX_NDB_NODES);
- ndbrequire(!(cnoOfNodes == 1 && cstartType == NodeState::ST_NODE_RESTART));
-
- caddNodeState = ZFALSE;
- if (cstartType == NodeState::ST_SYSTEM_RESTART) {
- jam();
- sendNdbSttorryLab(signal);
- return;
- }//if
- checkStartCompletedLab(signal);
- return;
-}//Dblqh::execREAD_NODESCONF()
-
-void Dblqh::checkStartCompletedLab(Signal* signal)
-{
- if (caddNodeState == ZFALSE) {
- if (cinitialStartOngoing == ZFALSE) {
- jam();
- sendNdbSttorryLab(signal);
- return;
- }//if
- }//if
- return;
-}//Dblqh::checkStartCompletedLab()
-
-void Dblqh::startphase4Lab(Signal* signal)
-{
- sendNdbSttorryLab(signal);
- return;
-}//Dblqh::startphase4Lab()
-
-/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-/* SET CONCURRENCY OF LOCAL CHECKPOINTS TO BE USED AFTER SYSTEM RESTART. */
-/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-void Dblqh::startphase6Lab(Signal* signal)
-{
- cstartPhase = ZNIL;
- cstartType = ZNIL;
- sendNdbSttorryLab(signal);
- return;
-}//Dblqh::startphase6Lab()
-
-void Dblqh::sendNdbSttorryLab(Signal* signal)
-{
- signal->theData[0] = cownref;
- sendSignal(NDBCNTR_REF, GSN_NDB_STTORRY, signal, 1, JBB);
- return;
-}//Dblqh::sendNdbSttorryLab()
-
-void Dblqh::sendsttorryLab(Signal* signal)
-{
-/* *********<< */
-/* STTORRY < */
-/* *********<< */
- signal->theData[0] = csignalKey; /* SIGNAL KEY */
- signal->theData[1] = 3; /* BLOCK CATEGORY */
- signal->theData[2] = 2; /* SIGNAL VERSION NUMBER */
- signal->theData[3] = ZSTART_PHASE1;
- signal->theData[4] = 255;
- sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
- return;
-}//Dblqh::sendsttorryLab()
-
-/* ***************>> */
-/* READ_NODESREF > */
-/* ***************>> */
-void Dblqh::execREAD_NODESREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dblqh::execREAD_NODESREF()
-
-/* *************** */
-/* SIZEALT_REP > */
-/* *************** */
-void Dblqh::execREAD_CONFIG_REQ(Signal* signal)
-{
- const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
- Uint32 ref = req->senderRef;
- Uint32 senderData = req->senderData;
- ndbrequire(req->noOfParameters == 0);
-
- jamEntry();
-
- const ndb_mgm_configuration_iterator * p =
- theConfiguration.getOwnConfigIterator();
- ndbrequire(p != 0);
-
- cnoLogFiles = 8;
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_REDOLOG_FILES,
- &cnoLogFiles));
- ndbrequire(cnoLogFiles > 0);
-
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_FRAG, &cfragrecFileSize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TABLE, &ctabrecFileSize));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TC_CONNECT,
- &ctcConnectrecFileSize));
- clogFileFileSize = 4 * cnoLogFiles;
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_SCAN, &cscanrecFileSize));
- cmaxAccOps = cscanrecFileSize * MAX_PARALLEL_OP_PER_SCAN;
-
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_diskless));
-
- initRecords();
- initialiseRecordsLab(signal, 0, ref, senderData);
-
- return;
-}//Dblqh::execSIZEALT_REP()
-
-/* ########################################################################## */
-/* ####### ADD/DELETE FRAGMENT MODULE ####### */
-/* THIS MODULE IS USED BY DICTIONARY TO CREATE NEW FRAGMENTS AND DELETE */
-/* OLD FRAGMENTS. */
-/* */
-/* ########################################################################## */
-/* -------------------------------------------------------------- */
-/* FRAG REQ */
-/* -------------------------------------------------------------- */
-/* *********************************************************> */
-/* LQHFRAGREQ: Create new fragments for a table. Sender DICT */
-/* *********************************************************> */
-
-// this unbelievable mess could be replaced by one signal to LQH
-// and execute direct to local DICT to get everything at once
-
-void Dblqh::execLQHFRAGREQ(Signal* signal)
-{
- jamEntry();
- LqhFragReq * req = (LqhFragReq*)signal->getDataPtr();
-
- Uint32 retPtr = req->senderData;
- BlockReference retRef = req->senderRef;
- Uint32 fragId = req->fragmentId;
- Uint32 reqinfo = req->requestInfo;
- tabptr.i = req->tableId;
- Uint16 tlocalKeylen = req->localKeyLength;
- Uint32 tmaxLoadFactor = req->maxLoadFactor;
- Uint32 tminLoadFactor = req->minLoadFactor;
- Uint8 tk = req->kValue;
- Uint8 tlhstar = req->lh3DistrBits;
- Uint8 tlh = req->lh3PageBits;
- Uint32 tnoOfAttr = req->noOfAttributes;
- Uint32 tnoOfNull = req->noOfNullAttributes;
- Uint32 noOfAlloc = req->noOfPagesToPreAllocate;
- Uint32 tschemaVersion = req->schemaVersion;
- Uint32 ttupKeyLength = req->keyLength;
- Uint32 nextLcp = req->nextLCP;
- Uint32 noOfKeyAttr = req->noOfKeyAttr;
- Uint32 noOfNewAttr = req->noOfNewAttr;
- Uint32 checksumIndicator = req->checksumIndicator;
- Uint32 noOfAttributeGroups = req->noOfAttributeGroups;
- Uint32 gcpIndicator = req->GCPIndicator;
- Uint32 startGci = req->startGci;
- Uint32 tableType = req->tableType;
- Uint32 primaryTableId = req->primaryTableId;
-
- ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
- bool tempTable = ((reqinfo & LqhFragReq::TemporaryTable) != 0);
-
- /* Temporary tables set to defined in system restart */
- if (tabptr.p->tableStatus == Tablerec::NOT_DEFINED){
- tabptr.p->tableStatus = Tablerec::ADD_TABLE_ONGOING;
- tabptr.p->tableType = tableType;
- tabptr.p->primaryTableId = primaryTableId;
- tabptr.p->schemaVersion = tschemaVersion;
- }//if
-
- if (tabptr.p->tableStatus != Tablerec::ADD_TABLE_ONGOING){
- jam();
- fragrefLab(signal, retRef, retPtr, ZTAB_STATE_ERROR);
- return;
- }//if
- //--------------------------------------------------------------------
- // We could arrive here if we create the fragment as part of a take
- // over by a hot spare node. The table is then is already created
- // and bit 31 is set, thus indicating that we are creating a fragment
- // by copy creation. Also since the node has already been started we
- // know that it is not a node restart ongoing.
- //--------------------------------------------------------------------
-
- if (getFragmentrec(signal, fragId)) {
- jam();
- fragrefLab(signal, retRef, retPtr, terrorCode);
- return;
- }//if
- if (!insertFragrec(signal, fragId)) {
- jam();
- fragrefLab(signal, retRef, retPtr, terrorCode);
- return;
- }//if
- Uint32 copyType = reqinfo & 3;
- initFragrec(signal, tabptr.i, fragId, copyType);
- fragptr.p->startGci = startGci;
- fragptr.p->newestGci = startGci;
- fragptr.p->tableType = tableType;
-
- if (DictTabInfo::isOrderedIndex(tableType)) {
- jam();
- // find corresponding primary table fragment
- TablerecPtr tTablePtr;
- tTablePtr.i = primaryTableId;
- ptrCheckGuard(tTablePtr, ctabrecFileSize, tablerec);
- FragrecordPtr tFragPtr;
- tFragPtr.i = RNIL;
- for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
- if (tTablePtr.p->fragid[i] == fragptr.p->fragId) {
- jam();
- tFragPtr.i = tTablePtr.p->fragrec[i];
- break;
- }
- }
- ndbrequire(tFragPtr.i != RNIL);
- // store it
- fragptr.p->tableFragptr = tFragPtr.i;
- } else {
- fragptr.p->tableFragptr = fragptr.i;
- }
-
- if (tempTable) {
-//--------------------------------------------
-// reqinfo bit 3-4 = 2 means temporary table
-// without logging or checkpointing.
-//--------------------------------------------
- jam();
- fragptr.p->logFlag = Fragrecord::STATE_FALSE;
- fragptr.p->lcpFlag = Fragrecord::LCP_STATE_FALSE;
- }//if
-
- fragptr.p->nextLcp = nextLcp;
-//----------------------------------------------
-// For node restarts it is not necessarily zero
-//----------------------------------------------
- if (cfirstfreeAddfragrec == RNIL) {
- jam();
- deleteFragrec(fragId);
- fragrefLab(signal, retRef, retPtr, ZNO_ADD_FRAGREC);
- return;
- }//if
- seizeAddfragrec(signal);
- addfragptr.p->addFragid = fragId;
- addfragptr.p->fragmentPtr = fragptr.i;
- addfragptr.p->dictBlockref = retRef;
- addfragptr.p->dictConnectptr = retPtr;
- addfragptr.p->m_senderAttrPtr = RNIL;
- addfragptr.p->noOfAttr = tnoOfAttr;
- addfragptr.p->noOfNull = tnoOfNull;
- addfragptr.p->noOfAllocPages = noOfAlloc;
- addfragptr.p->tabId = tabptr.i;
- addfragptr.p->totalAttrReceived = 0;
- addfragptr.p->attrSentToTup = ZNIL;/* TO FIND PROGRAMMING ERRORS QUICKLY */
- addfragptr.p->schemaVer = tschemaVersion;
- Uint32 tmp = (reqinfo & LqhFragReq::CreateInRunning);
- addfragptr.p->fragCopyCreation = (tmp == 0 ? 0 : 1);
- addfragptr.p->addfragErrorCode = 0;
- addfragptr.p->noOfKeyAttr = noOfKeyAttr;
- addfragptr.p->noOfNewAttr = noOfNewAttr;
- addfragptr.p->checksumIndicator = checksumIndicator;
- addfragptr.p->noOfAttributeGroups = noOfAttributeGroups;
- addfragptr.p->GCPIndicator = gcpIndicator;
- addfragptr.p->lh3DistrBits = tlhstar;
- addfragptr.p->tableType = tableType;
- addfragptr.p->primaryTableId = primaryTableId;
- //
- addfragptr.p->tup1Connectptr = RNIL;
- addfragptr.p->tup2Connectptr = RNIL;
- addfragptr.p->tux1Connectptr = RNIL;
- addfragptr.p->tux2Connectptr = RNIL;
-
- if (DictTabInfo::isTable(tableType) ||
- DictTabInfo::isHashIndex(tableType)) {
- jam();
- AccFragReq* const accreq = (AccFragReq*)signal->getDataPtrSend();
- accreq->userPtr = addfragptr.i;
- accreq->userRef = cownref;
- accreq->tableId = tabptr.i;
- accreq->reqInfo = copyType << 4;
- accreq->fragId = fragId;
- accreq->localKeyLen = tlocalKeylen;
- accreq->maxLoadFactor = tmaxLoadFactor;
- accreq->minLoadFactor = tminLoadFactor;
- accreq->kValue = tk;
- accreq->lhFragBits = tlhstar;
- accreq->lhDirBits = tlh;
- accreq->keyLength = ttupKeyLength;
- /* ----------------------------------------------------------------------- */
- /* Send ACCFRAGREQ, when confirmation is received send 2 * TUPFRAGREQ to */
- /* create 2 tuple fragments on this node. */
- /* ----------------------------------------------------------------------- */
- addfragptr.p->addfragStatus = AddFragRecord::ACC_ADDFRAG;
- sendSignal(fragptr.p->accBlockref, GSN_ACCFRAGREQ,
- signal, AccFragReq::SignalLength, JBB);
- return;
- }
- if (DictTabInfo::isOrderedIndex(tableType)) {
- jam();
- // NOTE: next 2 lines stolen from ACC
- addfragptr.p->fragid1 = (fragId << 1) | 0;
- addfragptr.p->fragid2 = (fragId << 1) | 1;
- addfragptr.p->addfragStatus = AddFragRecord::WAIT_TWO_TUP;
- sendAddFragReq(signal);
- return;
- }
- ndbrequire(false);
-}//Dblqh::execLQHFRAGREQ()
-
-/* *************** */
-/* ACCFRAGCONF > */
-/* *************** */
-void Dblqh::execACCFRAGCONF(Signal* signal)
-{
- jamEntry();
- addfragptr.i = signal->theData[0];
- Uint32 taccConnectptr = signal->theData[1];
- Uint32 fragId1 = signal->theData[2];
- Uint32 fragId2 = signal->theData[3];
- Uint32 accFragPtr1 = signal->theData[4];
- Uint32 accFragPtr2 = signal->theData[5];
- ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
- ndbrequire(addfragptr.p->addfragStatus == AddFragRecord::ACC_ADDFRAG);
-
- addfragptr.p->accConnectptr = taccConnectptr;
- addfragptr.p->fragid1 = fragId1;
- addfragptr.p->fragid2 = fragId2;
- fragptr.i = addfragptr.p->fragmentPtr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- fragptr.p->accFragptr[0] = accFragPtr1;
- fragptr.p->accFragptr[1] = accFragPtr2;
-
- addfragptr.p->addfragStatus = AddFragRecord::WAIT_TWO_TUP;
- sendAddFragReq(signal);
-}//Dblqh::execACCFRAGCONF()
-
-/* *************** */
-/* TUPFRAGCONF > */
-/* *************** */
-void Dblqh::execTUPFRAGCONF(Signal* signal)
-{
- jamEntry();
- addfragptr.i = signal->theData[0];
- Uint32 tupConnectptr = signal->theData[1];
- Uint32 tupFragPtr = signal->theData[2]; /* TUP FRAGMENT POINTER */
- Uint32 localFragId = signal->theData[3]; /* LOCAL FRAGMENT ID */
- ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
- fragptr.i = addfragptr.p->fragmentPtr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (localFragId == addfragptr.p->fragid1) {
- jam();
- fragptr.p->tupFragptr[0] = tupFragPtr;
- } else if (localFragId == addfragptr.p->fragid2) {
- jam();
- fragptr.p->tupFragptr[1] = tupFragPtr;
- } else {
- ndbrequire(false);
- return;
- }//if
- switch (addfragptr.p->addfragStatus) {
- case AddFragRecord::WAIT_TWO_TUP:
- jam();
- fragptr.p->tupFragptr[0] = tupFragPtr;
- addfragptr.p->tup1Connectptr = tupConnectptr;
- addfragptr.p->addfragStatus = AddFragRecord::WAIT_ONE_TUP;
- sendAddFragReq(signal);
- break;
- case AddFragRecord::WAIT_ONE_TUP:
- jam();
- fragptr.p->tupFragptr[1] = tupFragPtr;
- addfragptr.p->tup2Connectptr = tupConnectptr;
- if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
- addfragptr.p->addfragStatus = AddFragRecord::WAIT_TWO_TUX;
- sendAddFragReq(signal);
- break;
- }
- goto done_with_frag;
- break;
- case AddFragRecord::WAIT_TWO_TUX:
- jam();
- fragptr.p->tuxFragptr[0] = tupFragPtr;
- addfragptr.p->tux1Connectptr = tupConnectptr;
- addfragptr.p->addfragStatus = AddFragRecord::WAIT_ONE_TUX;
- sendAddFragReq(signal);
- break;
- case AddFragRecord::WAIT_ONE_TUX:
- jam();
- fragptr.p->tuxFragptr[1] = tupFragPtr;
- addfragptr.p->tux2Connectptr = tupConnectptr;
- goto done_with_frag;
- break;
- done_with_frag:
- /* ---------------------------------------------------------------- */
- /* Finished create of fragments. Now ready for creating attributes. */
- /* ---------------------------------------------------------------- */
- addfragptr.p->addfragStatus = AddFragRecord::WAIT_ADD_ATTR;
- {
- LqhFragConf* conf = (LqhFragConf*)signal->getDataPtrSend();
- conf->senderData = addfragptr.p->dictConnectptr;
- conf->lqhFragPtr = addfragptr.i;
- sendSignal(addfragptr.p->dictBlockref, GSN_LQHFRAGCONF,
- signal, LqhFragConf::SignalLength, JBB);
- }
- break;
- default:
- ndbrequire(false);
- break;
- }
-}//Dblqh::execTUPFRAGCONF()
-
-/* *************** */
-/* TUXFRAGCONF > */
-/* *************** */
-void Dblqh::execTUXFRAGCONF(Signal* signal)
-{
- jamEntry();
- execTUPFRAGCONF(signal);
-}//Dblqh::execTUXFRAGCONF
-
-/*
- * Add fragment in TUP or TUX. Called up to 4 times.
- */
-void
-Dblqh::sendAddFragReq(Signal* signal)
-{
- fragptr.i = addfragptr.p->fragmentPtr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP ||
- addfragptr.p->addfragStatus == AddFragRecord::WAIT_ONE_TUP) {
- if (DictTabInfo::isTable(addfragptr.p->tableType) ||
- DictTabInfo::isHashIndex(addfragptr.p->tableType)) {
- jam();
- signal->theData[0] = addfragptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = 0; /* ADD TABLE */
- signal->theData[3] = addfragptr.p->tabId;
- signal->theData[4] = addfragptr.p->noOfAttr;
- signal->theData[5] =
- addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP
- ? addfragptr.p->fragid1 : addfragptr.p->fragid2;
- signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1;
- signal->theData[7] = addfragptr.p->noOfNull;
- signal->theData[8] = addfragptr.p->schemaVer;
- signal->theData[9] = addfragptr.p->noOfKeyAttr;
- signal->theData[10] = addfragptr.p->noOfNewAttr;
- signal->theData[11] = addfragptr.p->checksumIndicator;
- signal->theData[12] = addfragptr.p->noOfAttributeGroups;
- signal->theData[13] = addfragptr.p->GCPIndicator;
- sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ,
- signal, TupFragReq::SignalLength, JBB);
- return;
- }
- if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
- jam();
- signal->theData[0] = addfragptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = 0; /* ADD TABLE */
- signal->theData[3] = addfragptr.p->tabId;
- signal->theData[4] = 1; /* ordered index: one array attr */
- signal->theData[5] =
- addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP
- ? addfragptr.p->fragid1 : addfragptr.p->fragid2;
- signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1;
- signal->theData[7] = 0; /* ordered index: no nullable */
- signal->theData[8] = addfragptr.p->schemaVer;
- signal->theData[9] = 1; /* ordered index: one key */
- signal->theData[10] = addfragptr.p->noOfNewAttr;
- signal->theData[11] = addfragptr.p->checksumIndicator;
- signal->theData[12] = addfragptr.p->noOfAttributeGroups;
- signal->theData[13] = addfragptr.p->GCPIndicator;
- sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ,
- signal, TupFragReq::SignalLength, JBB);
- return;
- }
- }
- if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUX ||
- addfragptr.p->addfragStatus == AddFragRecord::WAIT_ONE_TUX) {
- if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
- jam();
- TuxFragReq* const tuxreq = (TuxFragReq*)signal->getDataPtrSend();
- tuxreq->userPtr = addfragptr.i;
- tuxreq->userRef = cownref;
- tuxreq->reqInfo = 0; /* ADD TABLE */
- tuxreq->tableId = addfragptr.p->tabId;
- ndbrequire(addfragptr.p->noOfAttr >= 2);
- tuxreq->noOfAttr = addfragptr.p->noOfAttr - 1; /* skip NDB$TNODE */
- tuxreq->fragId =
- addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUX
- ? addfragptr.p->fragid1: addfragptr.p->fragid2;
- tuxreq->fragOff = addfragptr.p->lh3DistrBits;
- tuxreq->tableType = addfragptr.p->tableType;
- tuxreq->primaryTableId = addfragptr.p->primaryTableId;
- // pointer to index fragment in TUP
- tuxreq->tupIndexFragPtrI =
- addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUX ?
- fragptr.p->tupFragptr[0] : fragptr.p->tupFragptr[1];
- // pointers to table fragments in TUP and ACC
- FragrecordPtr tFragPtr;
- tFragPtr.i = fragptr.p->tableFragptr;
- ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
- tuxreq->tupTableFragPtrI[0] = tFragPtr.p->tupFragptr[0];
- tuxreq->tupTableFragPtrI[1] = tFragPtr.p->tupFragptr[1];
- tuxreq->accTableFragPtrI[0] = tFragPtr.p->accFragptr[0];
- tuxreq->accTableFragPtrI[1] = tFragPtr.p->accFragptr[1];
- sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ,
- signal, TuxFragReq::SignalLength, JBB);
- return;
- }
- }
- ndbrequire(false);
-}//Dblqh::sendAddFragReq
-
-/* ************************************************************************> */
-/* LQHADDATTRREQ: Request from DICT to create attributes for the new table. */
-/* ************************************************************************> */
-void Dblqh::execLQHADDATTREQ(Signal* signal)
-{
- jamEntry();
- LqhAddAttrReq * const req = (LqhAddAttrReq*)signal->getDataPtr();
-
- addfragptr.i = req->lqhFragPtr;
- const Uint32 tnoOfAttr = req->noOfAttributes;
- const Uint32 senderData = req->senderData;
- const Uint32 senderAttrPtr = req->senderAttrPtr;
-
- ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
- ndbrequire(addfragptr.p->addfragStatus == AddFragRecord::WAIT_ADD_ATTR);
- ndbrequire((tnoOfAttr != 0) && (tnoOfAttr <= LqhAddAttrReq::MAX_ATTRIBUTES));
- addfragptr.p->totalAttrReceived += tnoOfAttr;
- ndbrequire(addfragptr.p->totalAttrReceived <= addfragptr.p->noOfAttr);
-
- addfragptr.p->attrReceived = tnoOfAttr;
- for (Uint32 i = 0; i < tnoOfAttr; i++) {
- addfragptr.p->attributes[i] = req->attributes[i];
- }//for
- addfragptr.p->attrSentToTup = 0;
- ndbrequire(addfragptr.p->dictConnectptr == senderData);
- addfragptr.p->m_senderAttrPtr = senderAttrPtr;
- addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT1;
- sendAddAttrReq(signal);
-}//Dblqh::execLQHADDATTREQ()
-
-/* *********************>> */
-/* TUP_ADD_ATTCONF > */
-/* *********************>> */
-void Dblqh::execTUP_ADD_ATTCONF(Signal* signal)
-{
- jamEntry();
- addfragptr.i = signal->theData[0];
- // implies that operation was released on the other side
- const bool lastAttr = signal->theData[1];
- ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
- switch (addfragptr.p->addfragStatus) {
- case AddFragRecord::TUP_ATTR_WAIT1:
- jam();
- if (lastAttr)
- addfragptr.p->tup1Connectptr = RNIL;
- addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT2;
- sendAddAttrReq(signal);
- break;
- case AddFragRecord::TUP_ATTR_WAIT2:
- jam();
- if (lastAttr)
- addfragptr.p->tup2Connectptr = RNIL;
- if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
- addfragptr.p->addfragStatus = AddFragRecord::TUX_ATTR_WAIT1;
- sendAddAttrReq(signal);
- break;
- }
- goto done_with_attr;
- break;
- case AddFragRecord::TUX_ATTR_WAIT1:
- jam();
- if (lastAttr)
- addfragptr.p->tux1Connectptr = RNIL;
- addfragptr.p->addfragStatus = AddFragRecord::TUX_ATTR_WAIT2;
- sendAddAttrReq(signal);
- break;
- case AddFragRecord::TUX_ATTR_WAIT2:
- jam();
- if (lastAttr)
- addfragptr.p->tux2Connectptr = RNIL;
- goto done_with_attr;
- break;
- done_with_attr:
- addfragptr.p->attrSentToTup = addfragptr.p->attrSentToTup + 1;
- ndbrequire(addfragptr.p->attrSentToTup <= addfragptr.p->attrReceived);
- ndbrequire(addfragptr.p->totalAttrReceived <= addfragptr.p->noOfAttr);
- if (addfragptr.p->attrSentToTup < addfragptr.p->attrReceived) {
- // more in this batch
- jam();
- addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT1;
- sendAddAttrReq(signal);
- } else if (addfragptr.p->totalAttrReceived < addfragptr.p->noOfAttr) {
- // more batches to receive
- jam();
- addfragptr.p->addfragStatus = AddFragRecord::WAIT_ADD_ATTR;
- LqhAddAttrConf *const conf = (LqhAddAttrConf*)signal->getDataPtrSend();
- conf->senderData = addfragptr.p->dictConnectptr;
- conf->senderAttrPtr = addfragptr.p->m_senderAttrPtr;
- conf->fragId = addfragptr.p->addFragid;
- sendSignal(addfragptr.p->dictBlockref, GSN_LQHADDATTCONF,
- signal, LqhAddAttrConf::SignalLength, JBB);
- } else {
- fragptr.i = addfragptr.p->fragmentPtr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- /* ------------------------------------------------------------------
- * WE HAVE NOW COMPLETED ADDING THIS FRAGMENT. WE NOW NEED TO SET THE
- * PROPER STATE IN FRAG_STATUS DEPENDENT ON IF WE ARE CREATING A NEW
- * REPLICA OR IF WE ARE CREATING A TABLE. FOR FRAGMENTS IN COPY
- * PROCESS WE DO NOT WANT LOGGING ACTIVATED.
- * ----------------------------------------------------------------- */
- if (addfragptr.p->fragCopyCreation == 1) {
- jam();
- if (! DictTabInfo::isOrderedIndex(addfragptr.p->tableType))
- fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION;
- else
- fragptr.p->fragStatus = Fragrecord::FSACTIVE;
- fragptr.p->logFlag = Fragrecord::STATE_FALSE;
- } else {
- jam();
- fragptr.p->fragStatus = Fragrecord::FSACTIVE;
- }//if
- LqhAddAttrConf *const conf = (LqhAddAttrConf*)signal->getDataPtrSend();
- conf->senderData = addfragptr.p->dictConnectptr;
- conf->senderAttrPtr = addfragptr.p->m_senderAttrPtr;
- conf->fragId = addfragptr.p->addFragid;
- sendSignal(addfragptr.p->dictBlockref, GSN_LQHADDATTCONF, signal,
- LqhAddAttrConf::SignalLength, JBB);
- releaseAddfragrec(signal);
- }//if
- break;
- default:
- ndbrequire(false);
- break;
- }
-}
-
-/* **********************>> */
-/* TUX_ADD_ATTRCONF > */
-/* **********************>> */
-void Dblqh::execTUX_ADD_ATTRCONF(Signal* signal)
-{
- jamEntry();
- execTUP_ADD_ATTCONF(signal);
-}//Dblqh::execTUX_ADD_ATTRCONF
-
-/*
- * Add attribute in TUP or TUX. Called up to 4 times.
- */
-void
-Dblqh::sendAddAttrReq(Signal* signal)
-{
- arrGuard(addfragptr.p->attrSentToTup, LqhAddAttrReq::MAX_ATTRIBUTES);
- LqhAddAttrReq::Entry& entry =
- addfragptr.p->attributes[addfragptr.p->attrSentToTup];
- const Uint32 attrId = entry.attrId & 0xffff;
- const Uint32 primaryAttrId = entry.attrId >> 16;
- fragptr.i = addfragptr.p->fragmentPtr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (addfragptr.p->addfragStatus == AddFragRecord::TUP_ATTR_WAIT1 ||
- addfragptr.p->addfragStatus == AddFragRecord::TUP_ATTR_WAIT2) {
- if (DictTabInfo::isTable(addfragptr.p->tableType) ||
- DictTabInfo::isHashIndex(addfragptr.p->tableType) ||
- (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) &&
- primaryAttrId == ZNIL)) {
- jam();
- TupAddAttrReq* const tupreq = (TupAddAttrReq*)signal->getDataPtrSend();
- tupreq->tupConnectPtr =
- addfragptr.p->addfragStatus == AddFragRecord::TUP_ATTR_WAIT1
- ? addfragptr.p->tup1Connectptr : addfragptr.p->tup2Connectptr;
- tupreq->notused1 = 0;
- tupreq->attrId = attrId;
- tupreq->attrDescriptor = entry.attrDescriptor;
- tupreq->extTypeInfo = entry.extTypeInfo;
- sendSignal(fragptr.p->tupBlockref, GSN_TUP_ADD_ATTRREQ,
- signal, TupAddAttrReq::SignalLength, JBB);
- return;
- }
- if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) &&
- primaryAttrId != ZNIL) {
- // this attribute is not for TUP
- jam();
- TupAddAttrConf* tupconf = (TupAddAttrConf*)signal->getDataPtrSend();
- tupconf->userPtr = addfragptr.i;
- tupconf->lastAttr = false;
- sendSignal(reference(), GSN_TUP_ADD_ATTCONF,
- signal, TupAddAttrConf::SignalLength, JBB);
- return;
- }
- }
- if (addfragptr.p->addfragStatus == AddFragRecord::TUX_ATTR_WAIT1 ||
- addfragptr.p->addfragStatus == AddFragRecord::TUX_ATTR_WAIT2) {
- jam();
- if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) &&
- primaryAttrId != ZNIL) {
- jam();
- TuxAddAttrReq* const tuxreq = (TuxAddAttrReq*)signal->getDataPtrSend();
- tuxreq->tuxConnectPtr =
- addfragptr.p->addfragStatus == AddFragRecord::TUX_ATTR_WAIT1
- ? addfragptr.p->tux1Connectptr : addfragptr.p->tux2Connectptr;
- tuxreq->notused1 = 0;
- tuxreq->attrId = attrId;
- tuxreq->attrDescriptor = entry.attrDescriptor;
- tuxreq->extTypeInfo = entry.extTypeInfo;
- tuxreq->primaryAttrId = primaryAttrId;
- sendSignal(fragptr.p->tuxBlockref, GSN_TUX_ADD_ATTRREQ,
- signal, TuxAddAttrReq::SignalLength, JBB);
- return;
- }
- if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) &&
- primaryAttrId == ZNIL) {
- // this attribute is not for TUX
- jam();
- TuxAddAttrConf* tuxconf = (TuxAddAttrConf*)signal->getDataPtrSend();
- tuxconf->userPtr = addfragptr.i;
- tuxconf->lastAttr = false;
- sendSignal(reference(), GSN_TUX_ADD_ATTRCONF,
- signal, TuxAddAttrConf::SignalLength, JBB);
- return;
- }
- }
- ndbrequire(false);
-}//Dblqh::sendAddAttrReq
-
-/* ************************************************************************>> */
-/* TAB_COMMITREQ: Commit the new table for use in transactions. Sender DICT. */
-/* ************************************************************************>> */
-void Dblqh::execTAB_COMMITREQ(Signal* signal)
-{
- jamEntry();
- Uint32 dihPtr = signal->theData[0];
- BlockReference dihBlockref = signal->theData[1];
- tabptr.i = signal->theData[2];
-
- if (tabptr.i >= ctabrecFileSize) {
- jam();
- terrorCode = ZTAB_FILE_SIZE;
- signal->theData[0] = dihPtr;
- signal->theData[1] = cownNodeid;
- signal->theData[2] = tabptr.i;
- signal->theData[3] = terrorCode;
- sendSignal(dihBlockref, GSN_TAB_COMMITREF, signal, 4, JBB);
- return;
- }//if
- ptrAss(tabptr, tablerec);
- if (tabptr.p->tableStatus != Tablerec::ADD_TABLE_ONGOING) {
- jam();
- terrorCode = ZTAB_STATE_ERROR;
- signal->theData[0] = dihPtr;
- signal->theData[1] = cownNodeid;
- signal->theData[2] = tabptr.i;
- signal->theData[3] = terrorCode;
- signal->theData[4] = tabptr.p->tableStatus;
- sendSignal(dihBlockref, GSN_TAB_COMMITREF, signal, 5, JBB);
- ndbrequire(false);
- return;
- }//if
- tabptr.p->usageCount = 0;
- tabptr.p->tableStatus = Tablerec::TABLE_DEFINED;
- signal->theData[0] = dihPtr;
- signal->theData[1] = cownNodeid;
- signal->theData[2] = tabptr.i;
- sendSignal(dihBlockref, GSN_TAB_COMMITCONF, signal, 3, JBB);
- return;
-}//Dblqh::execTAB_COMMITREQ()
-
-
-void Dblqh::fragrefLab(Signal* signal,
- BlockReference fragBlockRef,
- Uint32 fragConPtr,
- Uint32 errorCode)
-{
- LqhFragRef * ref = (LqhFragRef*)signal->getDataPtrSend();
- ref->senderData = fragConPtr;
- ref->errorCode = errorCode;
- sendSignal(fragBlockRef, GSN_LQHFRAGREF, signal,
- LqhFragRef::SignalLength, JBB);
- return;
-}//Dblqh::fragrefLab()
-
-/*
- * Abort on-going ops.
- */
-void Dblqh::abortAddFragOps(Signal* signal)
-{
- fragptr.i = addfragptr.p->fragmentPtr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- signal->theData[0] = (Uint32)-1;
- if (addfragptr.p->tup1Connectptr != RNIL) {
- jam();
- signal->theData[1] = addfragptr.p->tup1Connectptr;
- sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
- addfragptr.p->tup1Connectptr = RNIL;
- }
- if (addfragptr.p->tup2Connectptr != RNIL) {
- jam();
- signal->theData[1] = addfragptr.p->tup2Connectptr;
- sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
- addfragptr.p->tup2Connectptr = RNIL;
- }
- if (addfragptr.p->tux1Connectptr != RNIL) {
- jam();
- signal->theData[1] = addfragptr.p->tux1Connectptr;
- sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
- addfragptr.p->tux1Connectptr = RNIL;
- }
- if (addfragptr.p->tux2Connectptr != RNIL) {
- jam();
- signal->theData[1] = addfragptr.p->tux2Connectptr;
- sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
- addfragptr.p->tux2Connectptr = RNIL;
- }
-}
-
-/* ************>> */
-/* ACCFRAGREF > */
-/* ************>> */
-void Dblqh::execACCFRAGREF(Signal* signal)
-{
- jamEntry();
- addfragptr.i = signal->theData[0];
- ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
- terrorCode = signal->theData[1];
- ndbrequire(addfragptr.p->addfragStatus == AddFragRecord::ACC_ADDFRAG);
- addfragptr.p->addfragErrorCode = terrorCode;
-
- const Uint32 ref = addfragptr.p->dictBlockref;
- const Uint32 senderData = addfragptr.p->dictConnectptr;
- const Uint32 errorCode = addfragptr.p->addfragErrorCode;
- releaseAddfragrec(signal);
- fragrefLab(signal, ref, senderData, errorCode);
-
- return;
-}//Dblqh::execACCFRAGREF()
-
-/* ************>> */
-/* TUPFRAGREF > */
-/* ************>> */
-void Dblqh::execTUPFRAGREF(Signal* signal)
-{
- jamEntry();
- addfragptr.i = signal->theData[0];
- ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
- terrorCode = signal->theData[1];
- fragptr.i = addfragptr.p->fragmentPtr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- addfragptr.p->addfragErrorCode = terrorCode;
-
- // no operation to release, just add some jams
- switch (addfragptr.p->addfragStatus) {
- case AddFragRecord::WAIT_TWO_TUP:
- jam();
- break;
- case AddFragRecord::WAIT_ONE_TUP:
- jam();
- break;
- case AddFragRecord::WAIT_TWO_TUX:
- jam();
- break;
- case AddFragRecord::WAIT_ONE_TUX:
- jam();
- break;
- default:
- ndbrequire(false);
- break;
- }
- abortAddFragOps(signal);
-
- const Uint32 ref = addfragptr.p->dictBlockref;
- const Uint32 senderData = addfragptr.p->dictConnectptr;
- const Uint32 errorCode = addfragptr.p->addfragErrorCode;
- releaseAddfragrec(signal);
- fragrefLab(signal, ref, senderData, errorCode);
-
-}//Dblqh::execTUPFRAGREF()
-
-/* ************>> */
-/* TUXFRAGREF > */
-/* ************>> */
-void Dblqh::execTUXFRAGREF(Signal* signal)
-{
- jamEntry();
- execTUPFRAGREF(signal);
-}//Dblqh::execTUXFRAGREF
-
-/* *********************> */
-/* TUP_ADD_ATTREF > */
-/* *********************> */
-void Dblqh::execTUP_ADD_ATTRREF(Signal* signal)
-{
- jamEntry();
- addfragptr.i = signal->theData[0];
- ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
- terrorCode = signal->theData[1];
- addfragptr.p->addfragErrorCode = terrorCode;
-
- // operation was released on the other side
- switch (addfragptr.p->addfragStatus) {
- case AddFragRecord::TUP_ATTR_WAIT1:
- jam();
- ndbrequire(addfragptr.p->tup1Connectptr != RNIL);
- addfragptr.p->tup1Connectptr = RNIL;
- break;
- case AddFragRecord::TUP_ATTR_WAIT2:
- jam();
- ndbrequire(addfragptr.p->tup2Connectptr != RNIL);
- addfragptr.p->tup2Connectptr = RNIL;
- break;
- case AddFragRecord::TUX_ATTR_WAIT1:
- jam();
- ndbrequire(addfragptr.p->tux1Connectptr != RNIL);
- addfragptr.p->tux1Connectptr = RNIL;
- break;
- case AddFragRecord::TUX_ATTR_WAIT2:
- jam();
- ndbrequire(addfragptr.p->tux2Connectptr != RNIL);
- addfragptr.p->tux2Connectptr = RNIL;
- break;
- default:
- ndbrequire(false);
- break;
- }
- abortAddFragOps(signal);
-
- const Uint32 Ref = addfragptr.p->dictBlockref;
- const Uint32 senderData = addfragptr.p->dictConnectptr;
- const Uint32 errorCode = addfragptr.p->addfragErrorCode;
- releaseAddfragrec(signal);
-
- LqhAddAttrRef *const ref = (LqhAddAttrRef*)signal->getDataPtrSend();
- ref->senderData = senderData;
- ref->errorCode = errorCode;
- sendSignal(Ref, GSN_LQHADDATTREF, signal,
- LqhAddAttrRef::SignalLength, JBB);
-
-}//Dblqh::execTUP_ADD_ATTRREF()
-
-/* **********************> */
-/* TUX_ADD_ATTRREF > */
-/* **********************> */
-void Dblqh::execTUX_ADD_ATTRREF(Signal* signal)
-{
- jamEntry();
- execTUP_ADD_ATTRREF(signal);
-}//Dblqh::execTUX_ADD_ATTRREF
-
-void
-Dblqh::execPREP_DROP_TAB_REQ(Signal* signal){
- jamEntry();
-
- PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
-
- Uint32 senderRef = req->senderRef;
- Uint32 senderData = req->senderData;
-
- TablerecPtr tabPtr;
- tabPtr.i = req->tableId;
- ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
-
- Uint32 errCode = 0;
- errCode = checkDropTabState(tabPtr.p->tableStatus, GSN_PREP_DROP_TAB_REQ);
- if(errCode != 0){
- jam();
-
- PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = senderData;
- ref->tableId = tabPtr.i;
- ref->errorCode = errCode;
- sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal,
- PrepDropTabRef::SignalLength, JBB);
- return;
- }
-
- tabPtr.p->tableStatus = Tablerec::PREP_DROP_TABLE_ONGOING;
- tabPtr.p->waitingTC.clear();
- tabPtr.p->waitingDIH.clear();
-
- PrepDropTabConf * conf = (PrepDropTabConf*)signal->getDataPtrSend();
- conf->tableId = tabPtr.i;
- conf->senderRef = reference();
- conf->senderData = senderData;
- sendSignal(senderRef, GSN_PREP_DROP_TAB_CONF, signal,
- PrepDropTabConf::SignalLength, JBB);
-
- signal->theData[0] = ZPREP_DROP_TABLE;
- signal->theData[1] = tabPtr.i;
- signal->theData[2] = senderRef;
- signal->theData[3] = senderData;
- checkDropTab(signal);
-}
-
-void
-Dblqh::checkDropTab(Signal* signal){
-
- TablerecPtr tabPtr;
- tabPtr.i = signal->theData[1];
- ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
-
- ndbrequire(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING);
-
- if(tabPtr.p->usageCount > 0){
- jam();
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 4);
- return;
- }
-
- bool lcpDone = true;
- lcpPtr.i = 0;
- ptrAss(lcpPtr, lcpRecord);
- if(lcpPtr.p->lcpState != LcpRecord::LCP_IDLE){
- jam();
-
- if(lcpPtr.p->currentFragment.lcpFragOrd.tableId == tabPtr.i){
- jam();
- lcpDone = false;
- }
-
- if(lcpPtr.p->lcpQueued &&
- lcpPtr.p->queuedFragment.lcpFragOrd.tableId == tabPtr.i){
- jam();
- lcpDone = false;
- }
- }
-
- if(!lcpDone){
- jam();
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 4);
- return;
- }
-
- tabPtr.p->tableStatus = Tablerec::PREP_DROP_TABLE_DONE;
-
- WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtrSend();
- conf->tableId = tabPtr.i;
- conf->senderRef = reference();
- for(Uint32 i = 1; i<MAX_NDB_NODES; i++){
- if(tabPtr.p->waitingTC.get(i)){
- tabPtr.p->waitingTC.clear(i);
- sendSignal(calcTcBlockRef(i), GSN_WAIT_DROP_TAB_CONF, signal,
- WaitDropTabConf::SignalLength, JBB);
- }
- if(tabPtr.p->waitingDIH.get(i)){
- tabPtr.p->waitingDIH.clear(i);
- sendSignal(calcDihBlockRef(i), GSN_WAIT_DROP_TAB_CONF, signal,
- WaitDropTabConf::SignalLength, JBB);
- }
- }
-}
-
-void
-Dblqh::execWAIT_DROP_TAB_REQ(Signal* signal){
- jamEntry();
- WaitDropTabReq * req = (WaitDropTabReq*)signal->getDataPtr();
-
- TablerecPtr tabPtr;
- tabPtr.i = req->tableId;
- ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
-
- Uint32 senderRef = req->senderRef;
- Uint32 nodeId = refToNode(senderRef);
- Uint32 blockNo = refToBlock(senderRef);
-
- if(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING){
- jam();
- switch(blockNo){
- case DBTC:
- tabPtr.p->waitingTC.set(nodeId);
- break;
- case DBDIH:
- tabPtr.p->waitingDIH.set(nodeId);
- break;
- default:
- ndbrequire(false);
- }
- return;
- }
-
- if(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
- jam();
- WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtrSend();
- conf->tableId = tabPtr.i;
- conf->senderRef = reference();
- sendSignal(senderRef, GSN_WAIT_DROP_TAB_CONF, signal,
- WaitDropTabConf::SignalLength, JBB);
- return;
- }
-
- WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtrSend();
- ref->tableId = tabPtr.i;
- ref->senderRef = reference();
-
- bool ok = false;
- switch(tabPtr.p->tableStatus){
- case Tablerec::TABLE_DEFINED:
- ok = true;
- ref->errorCode = WaitDropTabRef::IllegalTableState;
- break;
- case Tablerec::NOT_DEFINED:
- ok = true;
- ref->errorCode = WaitDropTabRef::NoSuchTable;
- break;
- case Tablerec::ADD_TABLE_ONGOING:
- ok = true;
- ref->errorCode = WaitDropTabRef::IllegalTableState;
- break;
- case Tablerec::PREP_DROP_TABLE_ONGOING:
- case Tablerec::PREP_DROP_TABLE_DONE:
- // Should have been take care of above
- ndbrequire(false);
- }
- ndbrequire(ok);
- ref->tableStatus = tabPtr.p->tableStatus;
- sendSignal(senderRef, GSN_WAIT_DROP_TAB_REF, signal,
- WaitDropTabRef::SignalLength, JBB);
- return;
-}
-
-void
-Dblqh::execDROP_TAB_REQ(Signal* signal){
- jamEntry();
-
- DropTabReq* req = (DropTabReq*)signal->getDataPtr();
-
- Uint32 senderRef = req->senderRef;
- Uint32 senderData = req->senderData;
-
- TablerecPtr tabPtr;
- tabPtr.i = req->tableId;
- ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
-
- do {
- if(req->requestType == DropTabReq::RestartDropTab){
- jam();
- break;
- }
-
- if(req->requestType == DropTabReq::OnlineDropTab){
- jam();
- Uint32 errCode = 0;
- errCode = checkDropTabState(tabPtr.p->tableStatus, GSN_DROP_TAB_REQ);
- if(errCode != 0){
- jam();
-
- DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = senderData;
- ref->tableId = tabPtr.i;
- ref->errorCode = errCode;
- sendSignal(senderRef, GSN_DROP_TAB_REF, signal,
- DropTabRef::SignalLength, JBB);
- return;
- }
- }
-
- removeTable(tabPtr.i);
-
- } while(false);
-
- ndbrequire(tabPtr.p->usageCount == 0);
- tabPtr.p->tableStatus = Tablerec::NOT_DEFINED;
-
- DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend();
- dropConf->senderRef = reference();
- dropConf->senderData = senderData;
- dropConf->tableId = tabPtr.i;
- sendSignal(senderRef, GSN_DROP_TAB_CONF,
- signal, DropTabConf::SignalLength, JBB);
-}
-
-Uint32
-Dblqh::checkDropTabState(Tablerec::TableStatus status, Uint32 gsn) const{
-
- if(gsn == GSN_PREP_DROP_TAB_REQ){
- switch(status){
- case Tablerec::NOT_DEFINED:
- jam();
- // Fall through
- case Tablerec::ADD_TABLE_ONGOING:
- jam();
- return PrepDropTabRef::NoSuchTable;
- break;
- case Tablerec::PREP_DROP_TABLE_ONGOING:
- jam();
- return PrepDropTabRef::PrepDropInProgress;
- break;
- case Tablerec::PREP_DROP_TABLE_DONE:
- jam();
- return PrepDropTabRef::DropInProgress;
- break;
- case Tablerec::TABLE_DEFINED:
- jam();
- return 0;
- break;
- }
- ndbrequire(0);
- }
-
- if(gsn == GSN_DROP_TAB_REQ){
- switch(status){
- case Tablerec::NOT_DEFINED:
- jam();
- // Fall through
- case Tablerec::ADD_TABLE_ONGOING:
- jam();
- return DropTabRef::NoSuchTable;
- break;
- case Tablerec::PREP_DROP_TABLE_ONGOING:
- jam();
- return DropTabRef::PrepDropInProgress;
- break;
- case Tablerec::PREP_DROP_TABLE_DONE:
- jam();
- return 0;
- break;
- case Tablerec::TABLE_DEFINED:
- jam();
- return DropTabRef::DropWoPrep;
- }
- ndbrequire(0);
- }
- ndbrequire(0);
- return RNIL;
-}
-
-void Dblqh::removeTable(Uint32 tableId)
-{
- tabptr.i = tableId;
- ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
-
- for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
- jam();
- if (tabptr.p->fragid[i] != ZNIL) {
- jam();
- deleteFragrec(tabptr.p->fragid[i]);
- }//if
- }//for
-}//Dblqh::removeTable()
-
-void
-Dblqh::execALTER_TAB_REQ(Signal* signal)
-{
- jamEntry();
- AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr();
- const Uint32 senderRef = req->senderRef;
- const Uint32 senderData = req->senderData;
- const Uint32 changeMask = req->changeMask;
- const Uint32 tableId = req->tableId;
- const Uint32 tableVersion = req->tableVersion;
- const Uint32 gci = req->gci;
- AlterTabReq::RequestType requestType =
- (AlterTabReq::RequestType) req->requestType;
-
- TablerecPtr tablePtr;
- tablePtr.i = tableId;
- ptrCheckGuard(tablePtr, ctabrecFileSize, tablerec);
- tablePtr.p->schemaVersion = tableVersion;
-
- // Request handled successfully
- AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = senderData;
- conf->changeMask = changeMask;
- conf->tableId = tableId;
- conf->tableVersion = tableVersion;
- conf->gci = gci;
- conf->requestType = requestType;
- sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal,
- AlterTabConf::SignalLength, JBB);
-}
-
-/* ************************************************************************>>
- * TIME_SIGNAL: Handles time-out of local operations. This is a clean-up
- * handler. If no other measure has succeeded in cleaning up after time-outs
- * or else then this routine will remove the transaction after 120 seconds of
- * inactivity. The check is performed once per 10 second. Sender is QMGR.
- * ************************************************************************>> */
-void Dblqh::execTIME_SIGNAL(Signal* signal)
-{
- jamEntry();
- cLqhTimeOutCount++;
- cLqhTimeOutCheckCount++;
- if ((cCounterAccCommitBlocked > 0) ||
- (cCounterTupCommitBlocked > 0)) {
- jam();
- signal->theData[0] = NDB_LE_UndoLogBlocked;
- signal->theData[1] = cCounterTupCommitBlocked;
- signal->theData[2] = cCounterAccCommitBlocked;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
-
- cCounterTupCommitBlocked = 0;
- cCounterAccCommitBlocked = 0;
- }//if
- if (cLqhTimeOutCheckCount < 10) {
- jam();
- return;
- }//if
- cLqhTimeOutCheckCount = 0;
-#ifdef VM_TRACE
- TcConnectionrecPtr tTcConptr;
-
- for (tTcConptr.i = 0; tTcConptr.i < ctcConnectrecFileSize;
- tTcConptr.i++) {
- jam();
- ptrAss(tTcConptr, tcConnectionrec);
- if ((tTcConptr.p->tcTimer != 0) &&
- ((tTcConptr.p->tcTimer + 120) < cLqhTimeOutCount)) {
- ndbout << "Dblqh::execTIME_SIGNAL"<<endl
- << "Timeout found in tcConnectRecord " <<tTcConptr.i<<endl
- << " cLqhTimeOutCount = " << cLqhTimeOutCount << endl
- << " tcTimer="<<tTcConptr.p->tcTimer<<endl
- << " tcTimer+120="<<tTcConptr.p->tcTimer + 120<<endl;
-
- ndbout << " transactionState = " << tTcConptr.p->transactionState<<endl;
- ndbout << " operation = " << tTcConptr.p->operation<<endl;
- ndbout << " tcNodeFailrec = " << tTcConptr.p->tcNodeFailrec
- << " seqNoReplica = " << tTcConptr.p->seqNoReplica
- << " simpleRead = " << tTcConptr.p->simpleRead
- << endl;
- ndbout << " replicaType = " << tTcConptr.p->replicaType
- << " reclenAiLqhkey = " << tTcConptr.p->reclenAiLqhkey
- << " opExec = " << tTcConptr.p->opExec
- << endl;
- ndbout << " opSimple = " << tTcConptr.p->opSimple
- << " nextSeqNoReplica = " << tTcConptr.p->nextSeqNoReplica
- << " lockType = " << tTcConptr.p->lockType
- << " localFragptr = " << tTcConptr.p->localFragptr
- << endl;
- ndbout << " lastReplicaNo = " << tTcConptr.p->lastReplicaNo
- << " indTakeOver = " << tTcConptr.p->indTakeOver
- << " dirtyOp = " << tTcConptr.p->dirtyOp
- << endl;
- ndbout << " activeCreat = " << tTcConptr.p->activeCreat
- << " tcBlockref = " << hex << tTcConptr.p->tcBlockref
- << " reqBlockref = " << hex << tTcConptr.p->reqBlockref
- << " primKeyLen = " << tTcConptr.p->primKeyLen
- << endl;
- ndbout << " nextReplica = " << tTcConptr.p->nextReplica
- << " tcBlockref = " << hex << tTcConptr.p->tcBlockref
- << " reqBlockref = " << hex << tTcConptr.p->reqBlockref
- << " primKeyLen = " << tTcConptr.p->primKeyLen
- << endl;
- ndbout << " logStopPageNo = " << tTcConptr.p->logStopPageNo
- << " logStartPageNo = " << tTcConptr.p->logStartPageNo
- << " logStartPageIndex = " << tTcConptr.p->logStartPageIndex
- << endl;
- ndbout << " errorCode = " << tTcConptr.p->errorCode
- << " clientBlockref = " << hex << tTcConptr.p->clientBlockref
- << " applRef = " << hex << tTcConptr.p->applRef
- << " totSendlenAi = " << tTcConptr.p->totSendlenAi
- << endl;
- ndbout << " totReclenAi = " << tTcConptr.p->totReclenAi
- << " tcScanRec = " << tTcConptr.p->tcScanRec
- << " tcScanInfo = " << tTcConptr.p->tcScanInfo
- << " tcOprec = " << hex << tTcConptr.p->tcOprec
- << endl;
- ndbout << " tableref = " << tTcConptr.p->tableref
- << " simpleTcConnect = " << tTcConptr.p->simpleTcConnect
- << " storedProcId = " << tTcConptr.p->storedProcId
- << " schemaVersion = " << tTcConptr.p->schemaVersion
- << endl;
- ndbout << " reqinfo = " << tTcConptr.p->reqinfo
- << " reqRef = " << tTcConptr.p->reqRef
- << " readlenAi = " << tTcConptr.p->readlenAi
- << " prevTc = " << tTcConptr.p->prevTc
- << endl;
- ndbout << " prevLogTcrec = " << tTcConptr.p->prevLogTcrec
- << " prevHashRec = " << tTcConptr.p->prevHashRec
- << " nodeAfterNext0 = " << tTcConptr.p->nodeAfterNext[0]
- << " nodeAfterNext1 = " << tTcConptr.p->nodeAfterNext[1]
- << endl;
- ndbout << " nextTcConnectrec = " << tTcConptr.p->nextTcConnectrec
- << " nextTc = " << tTcConptr.p->nextTc
- << " nextTcLogQueue = " << tTcConptr.p->nextTcLogQueue
- << " nextLogTcrec = " << tTcConptr.p->nextLogTcrec
- << endl;
- ndbout << " nextHashRec = " << tTcConptr.p->nextHashRec
- << " logWriteState = " << tTcConptr.p->logWriteState
- << " logStartFileNo = " << tTcConptr.p->logStartFileNo
- << " listState = " << tTcConptr.p->listState
- << endl;
- ndbout << " lastAttrinbuf = " << tTcConptr.p->lastAttrinbuf
- << " lastTupkeybuf = " << tTcConptr.p->lastTupkeybuf
- << " hashValue = " << tTcConptr.p->hashValue
- << endl;
- ndbout << " gci = " << tTcConptr.p->gci
- << " fragmentptr = " << tTcConptr.p->fragmentptr
- << " fragmentid = " << tTcConptr.p->fragmentid
- << " firstTupkeybuf = " << tTcConptr.p->firstTupkeybuf
- << endl;
- ndbout << " firstAttrinbuf = " << tTcConptr.p->firstAttrinbuf
- << " currTupAiLen = " << tTcConptr.p->currTupAiLen
- << " currReclenAi = " << tTcConptr.p->currReclenAi
- << endl;
- ndbout << " tcTimer = " << tTcConptr.p->tcTimer
- << " clientConnectrec = " << tTcConptr.p->clientConnectrec
- << " applOprec = " << hex << tTcConptr.p->applOprec
- << " abortState = " << tTcConptr.p->abortState
- << endl;
- ndbout << " transid0 = " << hex << tTcConptr.p->transid[0]
- << " transid1 = " << hex << tTcConptr.p->transid[1]
- << " tupkeyData0 = " << tTcConptr.p->tupkeyData[0]
- << " tupkeyData1 = " << tTcConptr.p->tupkeyData[1]
- << endl;
- ndbout << " tupkeyData2 = " << tTcConptr.p->tupkeyData[2]
- << " tupkeyData3 = " << tTcConptr.p->tupkeyData[3]
- << endl;
- switch (tTcConptr.p->transactionState) {
-
- case TcConnectionrec::SCAN_STATE_USED:
- if (tTcConptr.p->tcScanRec < cscanrecFileSize){
- ScanRecordPtr TscanPtr;
- c_scanRecordPool.getPtr(TscanPtr, tTcConptr.p->tcScanRec);
- ndbout << " scanState = " << TscanPtr.p->scanState << endl;
- //TscanPtr.p->scanLocalref[2];
- ndbout << " copyPtr="<<TscanPtr.p->copyPtr
- << " scanAccPtr="<<TscanPtr.p->scanAccPtr
- << " scanAiLength="<<TscanPtr.p->scanAiLength
- << endl;
- ndbout << " m_curr_batch_size_rows="<<
- TscanPtr.p->m_curr_batch_size_rows
- << " m_max_batch_size_rows="<<
- TscanPtr.p->m_max_batch_size_rows
- << " scanErrorCounter="<<TscanPtr.p->scanErrorCounter
- << " scanLocalFragid="<<TscanPtr.p->scanLocalFragid
- << endl;
- ndbout << " scanSchemaVersion="<<TscanPtr.p->scanSchemaVersion
- << " scanStoredProcId="<<TscanPtr.p->scanStoredProcId
- << " scanTcrec="<<TscanPtr.p->scanTcrec
- << endl;
- ndbout << " scanType="<<TscanPtr.p->scanType
- << " scanApiBlockref="<<TscanPtr.p->scanApiBlockref
- << " scanNodeId="<<TscanPtr.p->scanNodeId
- << " scanCompletedStatus="<<TscanPtr.p->scanCompletedStatus
- << endl;
- ndbout << " scanFlag="<<TscanPtr.p->scanFlag
- << " scanLockHold="<<TscanPtr.p->scanLockHold
- << " scanLockMode="<<TscanPtr.p->scanLockMode
- << " scanNumber="<<TscanPtr.p->scanNumber
- << endl;
- ndbout << " scanReleaseCounter="<<TscanPtr.p->scanReleaseCounter
- << " scanTcWaiting="<<TscanPtr.p->scanTcWaiting
- << " scanKeyinfoFlag="<<TscanPtr.p->scanKeyinfoFlag
- << endl;
- }else{
- ndbout << "No connected scan record found" << endl;
- }
- break;
- default:
- break;
- }//switch
-
- // Reset the timer
- tTcConptr.p->tcTimer = 0;
- }//if
- }//for
-#endif
-#ifdef VM_TRACE
- for (lfoPtr.i = 0; lfoPtr.i < clfoFileSize; lfoPtr.i++) {
- ptrAss(lfoPtr, logFileOperationRecord);
- if ((lfoPtr.p->lfoTimer != 0) &&
- ((lfoPtr.p->lfoTimer + 120) < cLqhTimeOutCount)) {
- ndbout << "We have lost LFO record" << endl;
- ndbout << "index = " << lfoPtr.i;
- ndbout << "State = " << lfoPtr.p->lfoState;
- ndbout << " Page No = " << lfoPtr.p->lfoPageNo;
- ndbout << " noPagesRw = " << lfoPtr.p->noPagesRw;
- ndbout << "lfoWordWritten = " << lfoPtr.p->lfoWordWritten << endl;
- lfoPtr.p->lfoTimer = cLqhTimeOutCount;
- }//if
- }//for
-
-#endif
-
-#if 0
- LcpRecordPtr TlcpPtr;
- // Print information about the current local checkpoint
- TlcpPtr.i = 0;
- ptrAss(TlcpPtr, lcpRecord);
- ndbout << "Information about LCP in this LQH" << endl
- << " lcpState="<<TlcpPtr.p->lcpState<<endl
- << " firstLcpLocAcc="<<TlcpPtr.p->firstLcpLocAcc<<endl
- << " firstLcpLocTup="<<TlcpPtr.p->firstLcpLocTup<<endl
- << " lcpAccptr="<<TlcpPtr.p->lcpAccptr<<endl
- << " lastFragmentFlag="<<TlcpPtr.p->lastFragmentFlag<<endl
- << " lcpQueued="<<TlcpPtr.p->lcpQueued<<endl
- << " reportEmptyref="<< TlcpPtr.p->reportEmptyRef<<endl
- << " reportEmpty="<<TlcpPtr.p->reportEmpty<<endl;
-#endif
-}//Dblqh::execTIME_SIGNAL()
-
-/* ######################################################################### */
-/* ####### EXECUTION MODULE ####### */
-/* THIS MODULE HANDLES THE RECEPTION OF LQHKEYREQ AND ALL PROCESSING */
-/* OF OPERATIONS ON BEHALF OF THIS REQUEST. THIS DOES ALSO INVOLVE */
-/* RECEPTION OF VARIOUS TYPES OF ATTRINFO AND KEYINFO. IT DOES ALSO */
-/* INVOLVE COMMUNICATION WITH ACC AND TUP. */
-/* ######################################################################### */
-
-void Dblqh::noFreeRecordLab(Signal* signal,
- const LqhKeyReq * lqhKeyReq,
- Uint32 errCode)
-{
- jamEntry();
- const Uint32 transid1 = lqhKeyReq->transId1;
- const Uint32 transid2 = lqhKeyReq->transId2;
- const Uint32 reqInfo = lqhKeyReq->requestInfo;
-
- if(errCode == ZNO_FREE_MARKER_RECORDS_ERROR ||
- errCode == ZNODE_SHUTDOWN_IN_PROGESS){
- releaseTcrec(signal, tcConnectptr);
- }
-
- if (LqhKeyReq::getSimpleFlag(reqInfo) &&
- LqhKeyReq::getOperation(reqInfo) == ZREAD){
- jam();
- ndbrequire(LqhKeyReq::getApplicationAddressFlag(reqInfo));
- const Uint32 apiRef = lqhKeyReq->variableData[0];
- const Uint32 apiOpRec = lqhKeyReq->variableData[1];
-
- TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend();
-
- tcKeyRef->connectPtr = apiOpRec;
- tcKeyRef->transId[0] = transid1;
- tcKeyRef->transId[1] = transid2;
- tcKeyRef->errorCode = errCode;
- sendSignal(apiRef, GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB);
- } else {
- jam();
-
- const Uint32 clientPtr = lqhKeyReq->clientConnectPtr;
- Uint32 TcOprec = clientPtr;
- if(LqhKeyReq::getSameClientAndTcFlag(reqInfo) == 1){
- if(LqhKeyReq::getApplicationAddressFlag(reqInfo))
- TcOprec = lqhKeyReq->variableData[2];
- else
- TcOprec = lqhKeyReq->variableData[0];
- }
-
- LqhKeyRef * const ref = (LqhKeyRef*)signal->getDataPtrSend();
- ref->userRef = clientPtr;
- ref->connectPtr = TcOprec;
- ref->errorCode = errCode;
- ref->transId1 = transid1;
- ref->transId2 = transid2;
- sendSignal(signal->senderBlockRef(), GSN_LQHKEYREF, signal,
- LqhKeyRef::SignalLength, JBB);
- }//if
- return;
-}//Dblqh::noFreeRecordLab()
-
-void Dblqh::LQHKEY_abort(Signal* signal, int errortype)
-{
- switch (errortype) {
- case 0:
- jam();
- terrorCode = ZCOPY_NODE_ERROR;
- break;
- case 1:
- jam();
- terrorCode = ZNO_FREE_LQH_CONNECTION;
- break;
- case 2:
- jam();
- terrorCode = signal->theData[1];
- break;
- case 3:
- jam();
- ndbrequire((tcConnectptr.p->transactionState == TcConnectionrec::WAIT_ACC_ABORT) ||
- (tcConnectptr.p->transactionState == TcConnectionrec::ABORT_STOPPED) ||
- (tcConnectptr.p->transactionState == TcConnectionrec::ABORT_QUEUED));
- return;
- break;
- case 4:
- jam();
- if(tabptr.p->tableStatus == Tablerec::NOT_DEFINED){
- jam();
- terrorCode = ZTABLE_NOT_DEFINED;
- } else if (tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING ||
- tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
- jam();
- terrorCode = ZDROP_TABLE_IN_PROGRESS;
- } else {
- ndbrequire(0);
- }
- break;
- case 5:
- jam();
- terrorCode = ZINVALID_SCHEMA_VERSION;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- abortErrorLab(signal);
-}//Dblqh::LQHKEY_abort()
-
-void Dblqh::LQHKEY_error(Signal* signal, int errortype)
-{
- switch (errortype) {
- case 0:
- jam();
- break;
- case 1:
- jam();
- break;
- case 2:
- jam();
- break;
- case 3:
- jam();
- break;
- case 4:
- jam();
- break;
- case 5:
- jam();
- break;
- case 6:
- jam();
- break;
- default:
- jam();
- break;
- }//switch
- ndbrequire(false);
-}//Dblqh::LQHKEY_error()
-
-void Dblqh::execLQHKEYREF(Signal* signal)
-{
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- terrorCode = signal->theData[2];
- Uint32 transid1 = signal->theData[3];
- Uint32 transid2 = signal->theData[4];
- if (tcConnectptr.i >= ctcConnectrecFileSize) {
- errorReport(signal, 3);
- return;
- }//if
-/*------------------------------------------------------------------*/
-/* WE HAVE TO CHECK THAT THE SIGNAL DO NOT BELONG TO SOMETHING*/
-/* REMOVED DUE TO A TIME-OUT. */
-/*------------------------------------------------------------------*/
- ptrAss(tcConnectptr, tcConnectionrec);
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- switch (regTcPtr->connectState) {
- case TcConnectionrec::CONNECTED:
- jam();
- if ((regTcPtr->transid[0] != transid1) ||
- (regTcPtr->transid[1] != transid2)) {
- warningReport(signal, 14);
- return;
- }//if
- if (regTcPtr->abortState != TcConnectionrec::ABORT_IDLE) {
- warningReport(signal, 15);
- return;
- }//if
- abortErrorLab(signal);
- return;
- break;
- case TcConnectionrec::LOG_CONNECTED:
- jam();
- logLqhkeyrefLab(signal);
- return;
- break;
- case TcConnectionrec::COPY_CONNECTED:
- jam();
- copyLqhKeyRefLab(signal);
- return;
- break;
- default:
- warningReport(signal, 16);
- return;
- break;
- }//switch
-}//Dblqh::execLQHKEYREF()
-
-/* -------------------------------------------------------------------------- */
-/* ------- ENTER PACKED_SIGNAL ------- */
-/* Execution of packed signal. The packed signal can contain COMMIT, COMPLETE */
-/* or LQHKEYCONF signals. These signals will be executed by their resp. exec */
-/* functions. */
-/* -------------------------------------------------------------------------- */
-void Dblqh::execPACKED_SIGNAL(Signal* signal)
-{
- Uint32 Tstep = 0;
- Uint32 Tlength;
- Uint32 TpackedData[28];
- Uint32 sig0, sig1, sig2, sig3 ,sig4, sig5, sig6;
-
- jamEntry();
- Tlength = signal->length();
- ndbrequire(Tlength <= 25);
- MEMCOPY_NO_WORDS(&TpackedData[0], &signal->theData[0], Tlength);
- while (Tlength > Tstep) {
- switch (TpackedData[Tstep] >> 28) {
- case ZCOMMIT:
- jam();
- sig0 = TpackedData[Tstep + 0] & 0x0FFFFFFF;
- sig1 = TpackedData[Tstep + 1];
- sig2 = TpackedData[Tstep + 2];
- sig3 = TpackedData[Tstep + 3];
- signal->theData[0] = sig0;
- signal->theData[1] = sig1;
- signal->theData[2] = sig2;
- signal->theData[3] = sig3;
- signal->header.theLength = 4;
- execCOMMIT(signal);
- Tstep += 4;
- break;
- case ZCOMPLETE:
- jam();
- sig0 = TpackedData[Tstep + 0] & 0x0FFFFFFF;
- sig1 = TpackedData[Tstep + 1];
- sig2 = TpackedData[Tstep + 2];
- signal->theData[0] = sig0;
- signal->theData[1] = sig1;
- signal->theData[2] = sig2;
- signal->header.theLength = 3;
- execCOMPLETE(signal);
- Tstep += 3;
- break;
- case ZLQHKEYCONF: {
- jam();
- LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
-
- sig0 = TpackedData[Tstep + 0] & 0x0FFFFFFF;
- sig1 = TpackedData[Tstep + 1];
- sig2 = TpackedData[Tstep + 2];
- sig3 = TpackedData[Tstep + 3];
- sig4 = TpackedData[Tstep + 4];
- sig5 = TpackedData[Tstep + 5];
- sig6 = TpackedData[Tstep + 6];
- lqhKeyConf->connectPtr = sig0;
- lqhKeyConf->opPtr = sig1;
- lqhKeyConf->userRef = sig2;
- lqhKeyConf->readLen = sig3;
- lqhKeyConf->transId1 = sig4;
- lqhKeyConf->transId2 = sig5;
- lqhKeyConf->noFiredTriggers = sig6;
- execLQHKEYCONF(signal);
- Tstep += LqhKeyConf::SignalLength;
- break;
- }
- case ZREMOVE_MARKER:
- jam();
- sig0 = TpackedData[Tstep + 1];
- sig1 = TpackedData[Tstep + 2];
- signal->theData[0] = sig0;
- signal->theData[1] = sig1;
- signal->header.theLength = 2;
- execREMOVE_MARKER_ORD(signal);
- Tstep += 3;
- break;
- default:
- ndbrequire(false);
- return;
- }//switch
- }//while
- ndbrequire(Tlength == Tstep);
- return;
-}//Dblqh::execPACKED_SIGNAL()
-
-void
-Dblqh::execREMOVE_MARKER_ORD(Signal* signal)
-{
- CommitAckMarker key;
- key.transid1 = signal->theData[0];
- key.transid2 = signal->theData[1];
- jamEntry();
-
- CommitAckMarkerPtr removedPtr;
- m_commitAckMarkerHash.release(removedPtr, key);
- ndbrequire(removedPtr.i != RNIL);
-#ifdef MARKER_TRACE
- ndbout_c("Rem marker[%.8x %.8x]", key.transid1, key.transid2);
-#endif
-}
-
-
-/* -------------------------------------------------------------------------- */
-/* ------- ENTER SEND_PACKED ------- */
-/* Used to force a packed signal to be sent if local signal buffer is not */
-/* empty. */
-/* -------------------------------------------------------------------------- */
-void Dblqh::execSEND_PACKED(Signal* signal)
-{
- HostRecordPtr Thostptr;
- UintR i;
- UintR TpackedListIndex = cpackedListIndex;
- jamEntry();
- for (i = 0; i < TpackedListIndex; i++) {
- Thostptr.i = cpackedList[i];
- ptrAss(Thostptr, hostRecord);
- jam();
- ndbrequire(Thostptr.i - 1 < MAX_NDB_NODES - 1);
- if (Thostptr.p->noOfPackedWordsLqh > 0) {
- jam();
- sendPackedSignalLqh(signal, Thostptr.p);
- }//if
- if (Thostptr.p->noOfPackedWordsTc > 0) {
- jam();
- sendPackedSignalTc(signal, Thostptr.p);
- }//if
- Thostptr.p->inPackedList = false;
- }//for
- cpackedListIndex = 0;
- return;
-}//Dblqh::execSEND_PACKED()
-
-void
-Dblqh::updatePackedList(Signal* signal, HostRecord * ahostptr, Uint16 hostId)
-{
- Uint32 TpackedListIndex = cpackedListIndex;
- if (ahostptr->inPackedList == false) {
- jam();
- ahostptr->inPackedList = true;
- cpackedList[TpackedListIndex] = hostId;
- cpackedListIndex = TpackedListIndex + 1;
- }//if
-}//Dblqh::updatePackedList()
-
-void
-Dblqh::execREAD_PSUEDO_REQ(Signal* signal){
- jamEntry();
- TcConnectionrecPtr regTcPtr;
- regTcPtr.i = signal->theData[0];
- ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec);
-
- if(signal->theData[1] != AttributeHeader::RANGE_NO)
- {
- jam();
- FragrecordPtr regFragptr;
- regFragptr.i = regTcPtr.p->fragmentptr;
- ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
-
- signal->theData[0] = regFragptr.p->accFragptr[regTcPtr.p->localFragptr];
- EXECUTE_DIRECT(DBACC, GSN_READ_PSUEDO_REQ, signal, 2);
- }
- else
- {
- signal->theData[0] = regTcPtr.p->m_scan_curr_range_no;
- }
-}
-
-/* ************>> */
-/* TUPKEYCONF > */
-/* ************>> */
-void Dblqh::execTUPKEYCONF(Signal* signal)
-{
- TcConnectionrec *regTcConnectionrec = tcConnectionrec;
- Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
- const TupKeyConf * const tupKeyConf = (TupKeyConf *)signal->getDataPtr();
- Uint32 tcIndex = tupKeyConf->userPtr;
- jamEntry();
- tcConnectptr.i = tcIndex;
- ptrCheckGuard(tcConnectptr, ttcConnectrecFileSize, regTcConnectionrec);
- switch (tcConnectptr.p->transactionState) {
- case TcConnectionrec::WAIT_TUP:
- jam();
- if (tcConnectptr.p->seqNoReplica == 0) // Primary replica
- tcConnectptr.p->noFiredTriggers = tupKeyConf->noFiredTriggers;
- tupkeyConfLab(signal);
- break;
- case TcConnectionrec::COPY_TUPKEY:
- jam();
- copyTupkeyConfLab(signal);
- break;
- case TcConnectionrec::SCAN_TUPKEY:
- jam();
- scanTupkeyConfLab(signal);
- break;
- case TcConnectionrec::WAIT_TUP_TO_ABORT:
- jam();
-/* ------------------------------------------------------------------------- */
-// Abort was not ready to start until this signal came back. Now we are ready
-// to start the abort.
-/* ------------------------------------------------------------------------- */
- releaseActiveFrag(signal);
- abortCommonLab(signal);
- break;
- case TcConnectionrec::WAIT_ACC_ABORT:
- case TcConnectionrec::ABORT_QUEUED:
- jam();
-/* -------------------------------------------------------------------------- */
-/* IGNORE SINCE ABORT OF THIS OPERATION IS ONGOING ALREADY. */
-/* -------------------------------------------------------------------------- */
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
-}//Dblqh::execTUPKEYCONF()
-
-/* ************> */
-/* TUPKEYREF > */
-/* ************> */
-void Dblqh::execTUPKEYREF(Signal* signal)
-{
- const TupKeyRef * const tupKeyRef = (TupKeyRef *)signal->getDataPtr();
-
- jamEntry();
- tcConnectptr.i = tupKeyRef->userRef;
- terrorCode = tupKeyRef->errorCode;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- switch (tcConnectptr.p->transactionState) {
- case TcConnectionrec::WAIT_TUP:
- jam();
- releaseActiveFrag(signal);
- abortErrorLab(signal);
- break;
- case TcConnectionrec::COPY_TUPKEY:
- ndbrequire(false);
- break;
- case TcConnectionrec::SCAN_TUPKEY:
- jam();
- scanTupkeyRefLab(signal);
- break;
- case TcConnectionrec::WAIT_TUP_TO_ABORT:
- jam();
-/* ------------------------------------------------------------------------- */
-// Abort was not ready to start until this signal came back. Now we are ready
-// to start the abort.
-/* ------------------------------------------------------------------------- */
- releaseActiveFrag(signal);
- abortCommonLab(signal);
- break;
- case TcConnectionrec::WAIT_ACC_ABORT:
- case TcConnectionrec::ABORT_QUEUED:
- jam();
-/* ------------------------------------------------------------------------- */
-/* IGNORE SINCE ABORT OF THIS OPERATION IS ONGOING ALREADY. */
-/* ------------------------------------------------------------------------- */
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
-}//Dblqh::execTUPKEYREF()
-
-void Dblqh::sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr)
-{
- Uint32 noOfWords = ahostptr->noOfPackedWordsLqh;
- BlockReference hostRef = ahostptr->hostLqhBlockRef;
- MEMCOPY_NO_WORDS(&signal->theData[0],
- &ahostptr->packedWordsLqh[0],
- noOfWords);
- sendSignal(hostRef, GSN_PACKED_SIGNAL, signal, noOfWords, JBB);
- ahostptr->noOfPackedWordsLqh = 0;
-}//Dblqh::sendPackedSignalLqh()
-
-void Dblqh::sendPackedSignalTc(Signal* signal, HostRecord * ahostptr)
-{
- Uint32 noOfWords = ahostptr->noOfPackedWordsTc;
- BlockReference hostRef = ahostptr->hostTcBlockRef;
- MEMCOPY_NO_WORDS(&signal->theData[0],
- &ahostptr->packedWordsTc[0],
- noOfWords);
- sendSignal(hostRef, GSN_PACKED_SIGNAL, signal, noOfWords, JBB);
- ahostptr->noOfPackedWordsTc = 0;
-}//Dblqh::sendPackedSignalTc()
-
-void Dblqh::sendCommitLqh(Signal* signal, BlockReference alqhBlockref)
-{
- HostRecordPtr Thostptr;
- Thostptr.i = refToNode(alqhBlockref);
- ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
- if (Thostptr.p->noOfPackedWordsLqh > 21) {
- jam();
- sendPackedSignalLqh(signal, Thostptr.p);
- } else {
- jam();
- updatePackedList(signal, Thostptr.p, Thostptr.i);
- }//if
- Uint32 pos = Thostptr.p->noOfPackedWordsLqh;
- Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMMIT << 28);
- Uint32 gci = tcConnectptr.p->gci;
- Uint32 transid1 = tcConnectptr.p->transid[0];
- Uint32 transid2 = tcConnectptr.p->transid[1];
- Thostptr.p->packedWordsLqh[pos] = ptrAndType;
- Thostptr.p->packedWordsLqh[pos + 1] = gci;
- Thostptr.p->packedWordsLqh[pos + 2] = transid1;
- Thostptr.p->packedWordsLqh[pos + 3] = transid2;
- Thostptr.p->noOfPackedWordsLqh = pos + 4;
-}//Dblqh::sendCommitLqh()
-
-void Dblqh::sendCompleteLqh(Signal* signal, BlockReference alqhBlockref)
-{
- HostRecordPtr Thostptr;
- Thostptr.i = refToNode(alqhBlockref);
- ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
- if (Thostptr.p->noOfPackedWordsLqh > 22) {
- jam();
- sendPackedSignalLqh(signal, Thostptr.p);
- } else {
- jam();
- updatePackedList(signal, Thostptr.p, Thostptr.i);
- }//if
- Uint32 pos = Thostptr.p->noOfPackedWordsLqh;
- Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMPLETE << 28);
- Uint32 transid1 = tcConnectptr.p->transid[0];
- Uint32 transid2 = tcConnectptr.p->transid[1];
- Thostptr.p->packedWordsLqh[pos] = ptrAndType;
- Thostptr.p->packedWordsLqh[pos + 1] = transid1;
- Thostptr.p->packedWordsLqh[pos + 2] = transid2;
- Thostptr.p->noOfPackedWordsLqh = pos + 3;
-}//Dblqh::sendCompleteLqh()
-
-void Dblqh::sendCommittedTc(Signal* signal, BlockReference atcBlockref)
-{
- HostRecordPtr Thostptr;
- Thostptr.i = refToNode(atcBlockref);
- ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
- if (Thostptr.p->noOfPackedWordsTc > 22) {
- jam();
- sendPackedSignalTc(signal, Thostptr.p);
- } else {
- jam();
- updatePackedList(signal, Thostptr.p, Thostptr.i);
- }//if
- Uint32 pos = Thostptr.p->noOfPackedWordsTc;
- Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMMITTED << 28);
- Uint32 transid1 = tcConnectptr.p->transid[0];
- Uint32 transid2 = tcConnectptr.p->transid[1];
- Thostptr.p->packedWordsTc[pos] = ptrAndType;
- Thostptr.p->packedWordsTc[pos + 1] = transid1;
- Thostptr.p->packedWordsTc[pos + 2] = transid2;
- Thostptr.p->noOfPackedWordsTc = pos + 3;
-}//Dblqh::sendCommittedTc()
-
-void Dblqh::sendCompletedTc(Signal* signal, BlockReference atcBlockref)
-{
- HostRecordPtr Thostptr;
- Thostptr.i = refToNode(atcBlockref);
- ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
- if (Thostptr.p->noOfPackedWordsTc > 22) {
- jam();
- sendPackedSignalTc(signal, Thostptr.p);
- } else {
- jam();
- updatePackedList(signal, Thostptr.p, Thostptr.i);
- }//if
- Uint32 pos = Thostptr.p->noOfPackedWordsTc;
- Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMPLETED << 28);
- Uint32 transid1 = tcConnectptr.p->transid[0];
- Uint32 transid2 = tcConnectptr.p->transid[1];
- Thostptr.p->packedWordsTc[pos] = ptrAndType;
- Thostptr.p->packedWordsTc[pos + 1] = transid1;
- Thostptr.p->packedWordsTc[pos + 2] = transid2;
- Thostptr.p->noOfPackedWordsTc = pos + 3;
-}//Dblqh::sendCompletedTc()
-
-void Dblqh::sendLqhkeyconfTc(Signal* signal, BlockReference atcBlockref)
-{
- LqhKeyConf* lqhKeyConf;
- HostRecordPtr Thostptr;
-
- Thostptr.i = refToNode(atcBlockref);
- ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
- if (refToBlock(atcBlockref) == DBTC) {
- jam();
-/*******************************************************************
-// This signal was intended for DBTC as part of the normal transaction
-// execution.
-********************************************************************/
- if (Thostptr.p->noOfPackedWordsTc > (25 - LqhKeyConf::SignalLength)) {
- jam();
- sendPackedSignalTc(signal, Thostptr.p);
- } else {
- jam();
- updatePackedList(signal, Thostptr.p, Thostptr.i);
- }//if
- lqhKeyConf = (LqhKeyConf *)
- &Thostptr.p->packedWordsTc[Thostptr.p->noOfPackedWordsTc];
- Thostptr.p->noOfPackedWordsTc += LqhKeyConf::SignalLength;
- } else {
- jam();
-/*******************************************************************
-// This signal was intended for DBLQH as part of log execution or
-// node recovery.
-********************************************************************/
- if (Thostptr.p->noOfPackedWordsLqh > (25 - LqhKeyConf::SignalLength)) {
- jam();
- sendPackedSignalLqh(signal, Thostptr.p);
- } else {
- jam();
- updatePackedList(signal, Thostptr.p, Thostptr.i);
- }//if
- lqhKeyConf = (LqhKeyConf *)
- &Thostptr.p->packedWordsLqh[Thostptr.p->noOfPackedWordsLqh];
- Thostptr.p->noOfPackedWordsLqh += LqhKeyConf::SignalLength;
- }//if
- Uint32 ptrAndType = tcConnectptr.i | (ZLQHKEYCONF << 28);
- Uint32 tcOprec = tcConnectptr.p->tcOprec;
- Uint32 ownRef = cownref;
- Uint32 readlenAi = tcConnectptr.p->readlenAi;
- Uint32 transid1 = tcConnectptr.p->transid[0];
- Uint32 transid2 = tcConnectptr.p->transid[1];
- Uint32 noFiredTriggers = tcConnectptr.p->noFiredTriggers;
- lqhKeyConf->connectPtr = ptrAndType;
- lqhKeyConf->opPtr = tcOprec;
- lqhKeyConf->userRef = ownRef;
- lqhKeyConf->readLen = readlenAi;
- lqhKeyConf->transId1 = transid1;
- lqhKeyConf->transId2 = transid2;
- lqhKeyConf->noFiredTriggers = noFiredTriggers;
-}//Dblqh::sendLqhkeyconfTc()
-
-/* ************************************************************************>>
- * KEYINFO: Get tuple request from DBTC. Next step is to contact DBACC to get
- * key to tuple if all key/attrinfo has been received, else for more attrinfo
- * signals.
- * ************************************************************************>> */
-void Dblqh::execKEYINFO(Signal* signal)
-{
- Uint32 tcOprec = signal->theData[0];
- Uint32 transid1 = signal->theData[1];
- Uint32 transid2 = signal->theData[2];
- jamEntry();
- if (findTransaction(transid1, transid2, tcOprec) != ZOK) {
- jam();
- return;
- }//if
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- TcConnectionrec::TransactionState state = regTcPtr->transactionState;
- if (state != TcConnectionrec::WAIT_TUPKEYINFO &&
- state != TcConnectionrec::WAIT_SCAN_AI)
- {
- jam();
-/*****************************************************************************/
-/* TRANSACTION WAS ABORTED, THIS IS MOST LIKELY A SIGNAL BELONGING TO THE */
-/* ABORTED TRANSACTION. THUS IGNORE THE SIGNAL. */
-/*****************************************************************************/
- return;
- }//if
- Uint32 errorCode = handleLongTupKey(signal,
- (Uint32)regTcPtr->save1,
- (Uint32)regTcPtr->primKeyLen,
- &signal->theData[3]);
- if (errorCode != 0) {
- if (errorCode == 1) {
- jam();
- return;
- }//if
- jam();
- terrorCode = errorCode;
- if(state == TcConnectionrec::WAIT_TUPKEYINFO)
- abortErrorLab(signal);
- else
- abort_scan(signal, regTcPtr->tcScanRec, errorCode);
- return;
- }//if
- if(state == TcConnectionrec::WAIT_TUPKEYINFO)
- {
- FragrecordPtr regFragptr;
- regFragptr.i = regTcPtr->fragmentptr;
- ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
- fragptr = regFragptr;
- endgettupkeyLab(signal);
- }
- return;
-}//Dblqh::execKEYINFO()
-
-/* ------------------------------------------------------------------------- */
-/* FILL IN KEY DATA INTO DATA BUFFERS. */
-/* ------------------------------------------------------------------------- */
-Uint32 Dblqh::handleLongTupKey(Signal* signal,
- Uint32 keyLength,
- Uint32 primKeyLength,
- Uint32* dataPtr)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- Uint32 dataPos = 0;
- while (true) {
- keyLength += 4;
- if (cfirstfreeDatabuf == RNIL) {
- jam();
- return ZGET_DATAREC_ERROR;
- }//if
- seizeTupkeybuf(signal);
- Databuf * const regDataPtr = databufptr.p;
- Uint32 data0 = dataPtr[dataPos];
- Uint32 data1 = dataPtr[dataPos + 1];
- Uint32 data2 = dataPtr[dataPos + 2];
- Uint32 data3 = dataPtr[dataPos + 3];
- regDataPtr->data[0] = data0;
- regDataPtr->data[1] = data1;
- regDataPtr->data[2] = data2;
- regDataPtr->data[3] = data3;
- dataPos += 4;
- if (keyLength < primKeyLength) {
- if (dataPos > 16) {
- jam();
-/* SAVE STATE AND WAIT FOR KEYINFO */
- regTcPtr->save1 = keyLength;
- return 1;
- }//if
- } else {
- jam();
- return 0;
- }//if
- }//while
-}//Dblqh::handleLongTupKey()
-
-/* ------------------------------------------------------------------------- */
-/* ------- HANDLE ATTRINFO SIGNALS ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-/* ************************************************************************>> */
-/* ATTRINFO: Continuation of KEYINFO signal (except for scans that do not use*/
-/* any KEYINFO). When all key and attribute info is received we contact DBACC*/
-/* for index handling. */
-/* ************************************************************************>> */
-void Dblqh::execATTRINFO(Signal* signal)
-{
- Uint32 tcOprec = signal->theData[0];
- Uint32 transid1 = signal->theData[1];
- Uint32 transid2 = signal->theData[2];
- jamEntry();
- if (findTransaction(transid1,
- transid2,
- tcOprec) != ZOK) {
- jam();
- return;
- }//if
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- Uint32 length = signal->length() - 3;
- Uint32 totReclenAi = regTcPtr->totReclenAi;
- Uint32 currReclenAi = regTcPtr->currReclenAi + length;
- Uint32* dataPtr = &signal->theData[3];
- regTcPtr->currReclenAi = currReclenAi;
- if (totReclenAi == currReclenAi) {
- switch (regTcPtr->transactionState) {
- case TcConnectionrec::WAIT_ATTR:
- {
- Fragrecord *regFragrecord = fragrecord;
- Uint32 fragIndex = regTcPtr->fragmentptr;
- Uint32 tfragrecFileSize = cfragrecFileSize;
- jam();
- fragptr.i = fragIndex;
- ptrCheckGuard(fragptr, tfragrecFileSize, regFragrecord);
- lqhAttrinfoLab(signal, dataPtr, length);
- endgettupkeyLab(signal);
- return;
- break;
- }
- case TcConnectionrec::WAIT_SCAN_AI:
- jam();
- scanAttrinfoLab(signal, dataPtr, length);
- return;
- break;
- case TcConnectionrec::WAIT_TUP_TO_ABORT:
- case TcConnectionrec::LOG_ABORT_QUEUED:
- case TcConnectionrec::ABORT_QUEUED:
- case TcConnectionrec::ABORT_STOPPED:
- case TcConnectionrec::WAIT_ACC_ABORT:
- case TcConnectionrec::WAIT_AI_AFTER_ABORT:
- jam();
- aiStateErrorCheckLab(signal, dataPtr,length);
- return;
- break;
- default:
- jam();
- ndbrequire(regTcPtr->abortState != TcConnectionrec::ABORT_IDLE);
- break;
- }//switch
- } else if (currReclenAi < totReclenAi) {
- jam();
- switch (regTcPtr->transactionState) {
- case TcConnectionrec::WAIT_ATTR:
- jam();
- lqhAttrinfoLab(signal, dataPtr, length);
- return;
- break;
- case TcConnectionrec::WAIT_SCAN_AI:
- jam();
- scanAttrinfoLab(signal, dataPtr, length);
- return;
- break;
- case TcConnectionrec::WAIT_TUP_TO_ABORT:
- case TcConnectionrec::LOG_ABORT_QUEUED:
- case TcConnectionrec::ABORT_QUEUED:
- case TcConnectionrec::ABORT_STOPPED:
- case TcConnectionrec::WAIT_ACC_ABORT:
- case TcConnectionrec::WAIT_AI_AFTER_ABORT:
- jam();
- aiStateErrorCheckLab(signal, dataPtr, length);
- return;
- break;
- default:
- jam();
- ndbrequire(regTcPtr->abortState != TcConnectionrec::ABORT_IDLE);
- break;
- }//switch
- } else {
- switch (regTcPtr->transactionState) {
- case TcConnectionrec::WAIT_SCAN_AI:
- jam();
- scanAttrinfoLab(signal, dataPtr, length);
- return;
- break;
- default:
- ndbout_c("%d", regTcPtr->transactionState);
- ndbrequire(false);
- break;
- }//switch
- }//if
- return;
-}//Dblqh::execATTRINFO()
-
-/* ************************************************************************>> */
-/* TUP_ATTRINFO: Interpreted execution in DBTUP generates redo-log info */
-/* which is sent back to DBLQH for logging. This is because the decision */
-/* to execute or not is made in DBTUP and thus we cannot start logging until */
-/* DBTUP part has been run. */
-/* ************************************************************************>> */
-void Dblqh::execTUP_ATTRINFO(Signal* signal)
-{
- TcConnectionrec *regTcConnectionrec = tcConnectionrec;
- Uint32 length = signal->length() - 3;
- Uint32 tcIndex = signal->theData[0];
- Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
- jamEntry();
- tcConnectptr.i = tcIndex;
- ptrCheckGuard(tcConnectptr, ttcConnectrecFileSize, regTcConnectionrec);
- ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::WAIT_TUP);
- if (saveTupattrbuf(signal, &signal->theData[3], length) == ZOK) {
- return;
- } else {
- jam();
-/* ------------------------------------------------------------------------- */
-/* WE ARE WAITING FOR RESPONSE FROM TUP HERE. THUS WE NEED TO */
-/* GO THROUGH THE STATE MACHINE FOR THE OPERATION. */
-/* ------------------------------------------------------------------------- */
- localAbortStateHandlerLab(signal);
- }//if
-}//Dblqh::execTUP_ATTRINFO()
-
-/* ------------------------------------------------------------------------- */
-/* ------- HANDLE ATTRINFO FROM LQH ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::lqhAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->operation != ZREAD) {
- if (regTcPtr->opExec != 1) {
- if (saveTupattrbuf(signal, dataPtr, length) == ZOK) {
- ;
- } else {
- jam();
-/* ------------------------------------------------------------------------- */
-/* WE MIGHT BE WAITING FOR RESPONSE FROM SOME BLOCK HERE. THUS WE NEED TO */
-/* GO THROUGH THE STATE MACHINE FOR THE OPERATION. */
-/* ------------------------------------------------------------------------- */
- localAbortStateHandlerLab(signal);
- return;
- }//if
- }//if
- }//if
- Uint32 sig0 = regTcPtr->tupConnectrec;
- Uint32 blockNo = refToBlock(regTcPtr->tcTupBlockref);
- signal->theData[0] = sig0;
- EXECUTE_DIRECT(blockNo, GSN_ATTRINFO, signal, length + 3);
- jamEntry();
-}//Dblqh::lqhAttrinfoLab()
-
-/* ------------------------------------------------------------------------- */
-/* ------ FIND TRANSACTION BY USING HASH TABLE ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-int Dblqh::findTransaction(UintR Transid1, UintR Transid2, UintR TcOprec)
-{
- TcConnectionrec *regTcConnectionrec = tcConnectionrec;
- Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
- TcConnectionrecPtr locTcConnectptr;
-
- Uint32 ThashIndex = (Transid1 ^ TcOprec) & 1023;
- locTcConnectptr.i = ctransidHash[ThashIndex];
- while (locTcConnectptr.i != RNIL) {
- ptrCheckGuard(locTcConnectptr, ttcConnectrecFileSize, regTcConnectionrec);
- if ((locTcConnectptr.p->transid[0] == Transid1) &&
- (locTcConnectptr.p->transid[1] == Transid2) &&
- (locTcConnectptr.p->tcOprec == TcOprec)) {
-/* FIRST PART OF TRANSACTION CORRECT */
-/* SECOND PART ALSO CORRECT */
-/* THE OPERATION RECORD POINTER IN TC WAS ALSO CORRECT */
- jam();
- tcConnectptr.i = locTcConnectptr.i;
- tcConnectptr.p = locTcConnectptr.p;
- return (int)ZOK;
- }//if
- jam();
-/* THIS WAS NOT THE TRANSACTION WHICH WAS SOUGHT */
- locTcConnectptr.i = locTcConnectptr.p->nextHashRec;
- }//while
-/* WE DID NOT FIND THE TRANSACTION, REPORT NOT FOUND */
- return (int)ZNOT_FOUND;
-}//Dblqh::findTransaction()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SAVE ATTRINFO FROM TUP IN ATTRINBUF ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-int Dblqh::saveTupattrbuf(Signal* signal, Uint32* dataPtr, Uint32 length)
-{
- Uint32 tfirstfreeAttrinbuf = cfirstfreeAttrinbuf;
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- Uint32 currTupAiLen = regTcPtr->currTupAiLen;
- if (tfirstfreeAttrinbuf == RNIL) {
- jam();
- terrorCode = ZGET_ATTRINBUF_ERROR;
- return ZGET_ATTRINBUF_ERROR;
- }//if
- seizeAttrinbuf(signal);
- Attrbuf * const regAttrPtr = attrinbufptr.p;
- MEMCOPY_NO_WORDS(&regAttrPtr->attrbuf[0], dataPtr, length);
- regTcPtr->currTupAiLen = currTupAiLen + length;
- regAttrPtr->attrbuf[ZINBUF_DATA_LEN] = length;
- return ZOK;
-}//Dblqh::saveTupattrbuf()
-
-/* ==========================================================================
- * ======= SEIZE ATTRIBUTE IN BUFFER =======
- *
- * GET A NEW ATTRINBUF AND SETS ATTRINBUFPTR.
- * ========================================================================= */
-void Dblqh::seizeAttrinbuf(Signal* signal)
-{
- AttrbufPtr tmpAttrinbufptr;
- AttrbufPtr regAttrinbufptr;
- Attrbuf *regAttrbuf = attrbuf;
- Uint32 tattrinbufFileSize = cattrinbufFileSize;
-
- regAttrinbufptr.i = seize_attrinbuf();
- tmpAttrinbufptr.i = tcConnectptr.p->lastAttrinbuf;
- ptrCheckGuard(regAttrinbufptr, tattrinbufFileSize, regAttrbuf);
- tcConnectptr.p->lastAttrinbuf = regAttrinbufptr.i;
- regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN] = 0;
- if (tmpAttrinbufptr.i == RNIL) {
- jam();
- tcConnectptr.p->firstAttrinbuf = regAttrinbufptr.i;
- } else {
- jam();
- ptrCheckGuard(tmpAttrinbufptr, tattrinbufFileSize, regAttrbuf);
- tmpAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = regAttrinbufptr.i;
- }//if
- regAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = RNIL;
- attrinbufptr = regAttrinbufptr;
-}//Dblqh::seizeAttrinbuf()
-
-/* ==========================================================================
- * ======= SEIZE TC CONNECT RECORD =======
- *
- * GETS A NEW TC CONNECT RECORD FROM FREELIST.
- * ========================================================================= */
-void Dblqh::seizeTcrec()
-{
- TcConnectionrecPtr locTcConnectptr;
-
- locTcConnectptr.i = cfirstfreeTcConrec;
- ptrCheckGuard(locTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- Uint32 nextTc = locTcConnectptr.p->nextTcConnectrec;
- locTcConnectptr.p->nextTcConnectrec = RNIL;
- locTcConnectptr.p->clientConnectrec = RNIL;
- locTcConnectptr.p->clientBlockref = RNIL;
- locTcConnectptr.p->abortState = TcConnectionrec::ABORT_IDLE;
- locTcConnectptr.p->tcTimer = cLqhTimeOutCount;
- locTcConnectptr.p->tableref = RNIL;
- locTcConnectptr.p->savePointId = 0;
- cfirstfreeTcConrec = nextTc;
- tcConnectptr = locTcConnectptr;
- locTcConnectptr.p->connectState = TcConnectionrec::CONNECTED;
-}//Dblqh::seizeTcrec()
-
-/* ==========================================================================
- * ======= SEIZE DATA BUFFER =======
- * ========================================================================= */
-void Dblqh::seizeTupkeybuf(Signal* signal)
-{
- Databuf *regDatabuf = databuf;
- DatabufPtr tmpDatabufptr;
- DatabufPtr regDatabufptr;
- Uint32 tdatabufFileSize = cdatabufFileSize;
-
-/* ------- GET A DATABUF. ------- */
- regDatabufptr.i = cfirstfreeDatabuf;
- tmpDatabufptr.i = tcConnectptr.p->lastTupkeybuf;
- ptrCheckGuard(regDatabufptr, tdatabufFileSize, regDatabuf);
- Uint32 nextFirst = regDatabufptr.p->nextDatabuf;
- tcConnectptr.p->lastTupkeybuf = regDatabufptr.i;
- if (tmpDatabufptr.i == RNIL) {
- jam();
- tcConnectptr.p->firstTupkeybuf = regDatabufptr.i;
- } else {
- jam();
- ptrCheckGuard(tmpDatabufptr, tdatabufFileSize, regDatabuf);
- tmpDatabufptr.p->nextDatabuf = regDatabufptr.i;
- }//if
- cfirstfreeDatabuf = nextFirst;
- regDatabufptr.p->nextDatabuf = RNIL;
- databufptr = regDatabufptr;
-}//Dblqh::seizeTupkeybuf()
-
-/* ------------------------------------------------------------------------- */
-/* ------- TAKE CARE OF LQHKEYREQ ------- */
-/* LQHKEYREQ IS THE SIGNAL THAT STARTS ALL OPERATIONS IN THE LQH BLOCK */
-/* THIS SIGNAL CONTAINS A LOT OF INFORMATION ABOUT WHAT TYPE OF OPERATION, */
-/* KEY INFORMATION, ATTRIBUTE INFORMATION, NODE INFORMATION AND A LOT MORE */
-/* ------------------------------------------------------------------------- */
-void Dblqh::execLQHKEYREQ(Signal* signal)
-{
- UintR sig0, sig1, sig2, sig3, sig4, sig5;
- Uint8 tfragDistKey;
-
- const LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)signal->getDataPtr();
-
- sig0 = lqhKeyReq->clientConnectPtr;
- if (cfirstfreeTcConrec != RNIL && !ERROR_INSERTED(5031)) {
- jamEntry();
- seizeTcrec();
- } else {
-/* ------------------------------------------------------------------------- */
-/* NO FREE TC RECORD AVAILABLE, THUS WE CANNOT HANDLE THE REQUEST. */
-/* ------------------------------------------------------------------------- */
- if (ERROR_INSERTED(5031)) {
- CLEAR_ERROR_INSERT_VALUE;
- }
- noFreeRecordLab(signal, lqhKeyReq, ZNO_TC_CONNECT_ERROR);
- return;
- }//if
-
- if(ERROR_INSERTED(5038) &&
- refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
- jam();
- SET_ERROR_INSERT_VALUE(5039);
- return;
- }
-
- c_Counters.operations++;
-
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- regTcPtr->clientBlockref = signal->senderBlockRef();
- regTcPtr->clientConnectrec = sig0;
- regTcPtr->tcOprec = sig0;
- regTcPtr->storedProcId = ZNIL;
-
- UintR TtotReclenAi = lqhKeyReq->attrLen;
- sig1 = lqhKeyReq->savePointId;
- sig2 = lqhKeyReq->hashValue;
- UintR Treqinfo = lqhKeyReq->requestInfo;
- sig4 = lqhKeyReq->tableSchemaVersion;
- sig5 = lqhKeyReq->tcBlockref;
-
- regTcPtr->savePointId = sig1;
- regTcPtr->hashValue = sig2;
- const Uint32 schemaVersion = regTcPtr->schemaVersion = LqhKeyReq::getSchemaVersion(sig4);
- tabptr.i = LqhKeyReq::getTableId(sig4);
- regTcPtr->tcBlockref = sig5;
-
- const Uint8 op = LqhKeyReq::getOperation(Treqinfo);
- if (op == ZREAD && !getAllowRead()){
- noFreeRecordLab(signal, lqhKeyReq, ZNODE_SHUTDOWN_IN_PROGESS);
- return;
- }
-
- regTcPtr->totReclenAi = LqhKeyReq::getAttrLen(TtotReclenAi);
- regTcPtr->tcScanInfo = lqhKeyReq->scanInfo;
- regTcPtr->indTakeOver = LqhKeyReq::getScanTakeOverFlag(TtotReclenAi);
-
- regTcPtr->readlenAi = 0;
- regTcPtr->currTupAiLen = 0;
- regTcPtr->listState = TcConnectionrec::NOT_IN_LIST;
- regTcPtr->logWriteState = TcConnectionrec::NOT_STARTED;
- regTcPtr->fragmentptr = RNIL;
-
- sig0 = lqhKeyReq->fragmentData;
- sig1 = lqhKeyReq->transId1;
- sig2 = lqhKeyReq->transId2;
- sig3 = lqhKeyReq->variableData[0];
- sig4 = lqhKeyReq->variableData[1];
-
- regTcPtr->fragmentid = LqhKeyReq::getFragmentId(sig0);
- regTcPtr->nextReplica = LqhKeyReq::getNextReplicaNodeId(sig0);
- regTcPtr->transid[0] = sig1;
- regTcPtr->transid[1] = sig2;
- regTcPtr->applRef = sig3;
- regTcPtr->applOprec = sig4;
-
- regTcPtr->commitAckMarker = RNIL;
- if(LqhKeyReq::getMarkerFlag(Treqinfo)){
- jam();
-
- CommitAckMarkerPtr markerPtr;
- m_commitAckMarkerHash.seize(markerPtr);
- if(markerPtr.i == RNIL){
- noFreeRecordLab(signal, lqhKeyReq, ZNO_FREE_MARKER_RECORDS_ERROR);
- return;
- }
- markerPtr.p->transid1 = sig1;
- markerPtr.p->transid2 = sig2;
- markerPtr.p->apiRef = sig3;
- markerPtr.p->apiOprec = sig4;
- const NodeId tcNodeId = refToNode(sig5);
- markerPtr.p->tcNodeId = tcNodeId;
-
- CommitAckMarkerPtr tmp;
-#ifdef VM_TRACE
-#ifdef MARKER_TRACE
- ndbout_c("Add marker[%.8x %.8x]", markerPtr.p->transid1, markerPtr.p->transid2);
-#endif
- ndbrequire(!m_commitAckMarkerHash.find(tmp, * markerPtr.p));
-#endif
- m_commitAckMarkerHash.add(markerPtr);
- regTcPtr->commitAckMarker = markerPtr.i;
- }
-
- regTcPtr->reqinfo = Treqinfo;
- regTcPtr->lastReplicaNo = LqhKeyReq::getLastReplicaNo(Treqinfo);
- regTcPtr->lockType = LqhKeyReq::getLockType(Treqinfo);
- regTcPtr->dirtyOp = LqhKeyReq::getDirtyFlag(Treqinfo);
- regTcPtr->opExec = LqhKeyReq::getInterpretedFlag(Treqinfo);
- regTcPtr->opSimple = LqhKeyReq::getSimpleFlag(Treqinfo);
- regTcPtr->operation = LqhKeyReq::getOperation(Treqinfo);
- regTcPtr->simpleRead = regTcPtr->operation == ZREAD && regTcPtr->opSimple;
- regTcPtr->seqNoReplica = LqhKeyReq::getSeqNoReplica(Treqinfo);
- UintR TreclenAiLqhkey = LqhKeyReq::getAIInLqhKeyReq(Treqinfo);
- regTcPtr->apiVersionNo = 0;
-
- CRASH_INSERTION2(5041, regTcPtr->simpleRead &&
- refToNode(signal->senderBlockRef()) != cownNodeid);
-
- regTcPtr->reclenAiLqhkey = TreclenAiLqhkey;
- regTcPtr->currReclenAi = TreclenAiLqhkey;
- UintR TitcKeyLen = LqhKeyReq::getKeyLen(Treqinfo);
- regTcPtr->primKeyLen = TitcKeyLen;
- regTcPtr->noFiredTriggers = lqhKeyReq->noFiredTriggers;
-
- UintR TapplAddressInd = LqhKeyReq::getApplicationAddressFlag(Treqinfo);
- UintR nextPos = (TapplAddressInd << 1);
- UintR TsameClientAndTcOprec = LqhKeyReq::getSameClientAndTcFlag(Treqinfo);
- if (TsameClientAndTcOprec == 1) {
- regTcPtr->tcOprec = lqhKeyReq->variableData[nextPos];
- nextPos++;
- }//if
- UintR TnextReplicasIndicator = regTcPtr->lastReplicaNo -
- regTcPtr->seqNoReplica;
- if (TnextReplicasIndicator > 1) {
- regTcPtr->nodeAfterNext[0] = lqhKeyReq->variableData[nextPos] & 0xFFFF;
- regTcPtr->nodeAfterNext[1] = lqhKeyReq->variableData[nextPos] >> 16;
- nextPos++;
- }//if
- UintR TstoredProcIndicator = LqhKeyReq::getStoredProcFlag(TtotReclenAi);
- if (TstoredProcIndicator == 1) {
- regTcPtr->storedProcId = lqhKeyReq->variableData[nextPos] & ZNIL;
- nextPos++;
- }//if
- UintR TreadLenAiIndicator = LqhKeyReq::getReturnedReadLenAIFlag(Treqinfo);
- if (TreadLenAiIndicator == 1) {
- regTcPtr->readlenAi = lqhKeyReq->variableData[nextPos] & ZNIL;
- nextPos++;
- }//if
- sig0 = lqhKeyReq->variableData[nextPos + 0];
- sig1 = lqhKeyReq->variableData[nextPos + 1];
- sig2 = lqhKeyReq->variableData[nextPos + 2];
- sig3 = lqhKeyReq->variableData[nextPos + 3];
-
- regTcPtr->tupkeyData[0] = sig0;
- regTcPtr->tupkeyData[1] = sig1;
- regTcPtr->tupkeyData[2] = sig2;
- regTcPtr->tupkeyData[3] = sig3;
-
- if (TitcKeyLen > 0) {
- if (TitcKeyLen < 4) {
- nextPos += TitcKeyLen;
- } else {
- nextPos += 4;
- }//if
- } else {
- LQHKEY_error(signal, 3);
- return;
- }//if
-
- if ((LqhKeyReq::FixedSignalLength + nextPos + TreclenAiLqhkey) !=
- signal->length()) {
- LQHKEY_error(signal, 2);
- return;
- }//if
- UintR TseqNoReplica = regTcPtr->seqNoReplica;
- UintR TlastReplicaNo = regTcPtr->lastReplicaNo;
- if (TseqNoReplica == TlastReplicaNo) {
- jam();
- regTcPtr->nextReplica = ZNIL;
- } else {
- if (TseqNoReplica < TlastReplicaNo) {
- jam();
- regTcPtr->nextSeqNoReplica = TseqNoReplica + 1;
- if ((regTcPtr->nextReplica == 0) ||
- (regTcPtr->nextReplica == cownNodeid)) {
- LQHKEY_error(signal, 0);
- }//if
- } else {
- LQHKEY_error(signal, 4);
- return;
- }//if
- }//if
- TcConnectionrecPtr localNextTcConnectptr;
- Uint32 hashIndex = (regTcPtr->transid[0] ^ regTcPtr->tcOprec) & 1023;
- localNextTcConnectptr.i = ctransidHash[hashIndex];
- ctransidHash[hashIndex] = tcConnectptr.i;
- regTcPtr->prevHashRec = RNIL;
- regTcPtr->nextHashRec = localNextTcConnectptr.i;
- if (localNextTcConnectptr.i != RNIL) {
-/* -------------------------------------------------------------------------- */
-/* ENSURE THAT THE NEXT RECORD HAS SET PREVIOUS TO OUR RECORD IF IT EXISTS */
-/* -------------------------------------------------------------------------- */
- ptrCheckGuard(localNextTcConnectptr,
- ctcConnectrecFileSize, tcConnectionrec);
- jam();
- localNextTcConnectptr.p->prevHashRec = tcConnectptr.i;
- }//if
- if (tabptr.i >= ctabrecFileSize) {
- LQHKEY_error(signal, 5);
- return;
- }//if
- ptrAss(tabptr, tablerec);
- if(tabptr.p->tableStatus != Tablerec::TABLE_DEFINED){
- LQHKEY_abort(signal, 4);
- return;
- }
- if(tabptr.p->schemaVersion != schemaVersion){
- LQHKEY_abort(signal, 5);
- return;
- }
-
- regTcPtr->tableref = tabptr.i;
- tabptr.p->usageCount++;
-
- if (!getFragmentrec(signal, regTcPtr->fragmentid)) {
- LQHKEY_error(signal, 6);
- return;
- }//if
- regTcPtr->localFragptr = regTcPtr->hashValue & 1;
- Uint8 TcopyType = fragptr.p->fragCopy;
- tfragDistKey = fragptr.p->fragDistributionKey;
- if (fragptr.p->fragStatus == Fragrecord::ACTIVE_CREATION) {
- jam();
- regTcPtr->activeCreat = ZTRUE;
- CRASH_INSERTION(5002);
- } else {
- regTcPtr->activeCreat = ZFALSE;
- }//if
- regTcPtr->replicaType = TcopyType;
- regTcPtr->fragmentptr = fragptr.i;
- Uint8 TdistKey = LqhKeyReq::getDistributionKey(TtotReclenAi);
- if ((tfragDistKey != TdistKey) &&
- (regTcPtr->seqNoReplica == 0) &&
- (regTcPtr->dirtyOp == ZFALSE) &&
- (regTcPtr->simpleRead == ZFALSE)) {
- /* ----------------------------------------------------------------------
- * WE HAVE DIFFERENT OPINION THAN THE DIH THAT STARTED THE TRANSACTION.
- * THE REASON COULD BE THAT THIS IS AN OLD DISTRIBUTION WHICH IS NO LONGER
- * VALID TO USE. THIS MUST BE CHECKED.
- * ONE IS ADDED TO THE DISTRIBUTION KEY EVERY TIME WE ADD A NEW REPLICA.
- * FAILED REPLICAS DO NOT AFFECT THE DISTRIBUTION KEY. THIS MEANS THAT THE
- * MAXIMUM DEVIATION CAN BE ONE BETWEEN THOSE TWO VALUES.
- * --------------------------------------------------------------------- */
- Int32 tmp = TdistKey - tfragDistKey;
- tmp = (tmp < 0 ? - tmp : tmp);
- if ((tmp <= 1) || (tfragDistKey == 0)) {
- LQHKEY_abort(signal, 0);
- return;
- }//if
- LQHKEY_error(signal, 1);
- }//if
- if (TreclenAiLqhkey != 0) {
- if (regTcPtr->operation != ZREAD) {
- if (regTcPtr->operation != ZDELETE) {
- if (regTcPtr->opExec != 1) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* */
-/* UPDATES, WRITES AND INSERTS THAT ARE NOT INTERPRETED WILL USE THE */
-/* SAME ATTRINFO IN ALL REPLICAS. THUS WE SAVE THE ATTRINFO ALREADY */
-/* TO SAVE A SIGNAL FROM TUP TO LQH. INTERPRETED EXECUTION IN TUP */
-/* WILL CREATE NEW ATTRINFO FOR THE OTHER REPLICAS AND IT IS THUS NOT */
-/* A GOOD IDEA TO SAVE THE INFORMATION HERE. READS WILL ALSO BE */
-/* UNNECESSARY TO SAVE SINCE THAT ATTRINFO WILL NEVER BE SENT TO ANY */
-/* MORE REPLICAS. */
-/*---------------------------------------------------------------------------*/
-/* READS AND DELETES CAN ONLY HAVE INFORMATION ABOUT WHAT IS TO BE READ. */
-/* NO INFORMATION THAT NEEDS LOGGING. */
-/*---------------------------------------------------------------------------*/
- sig0 = lqhKeyReq->variableData[nextPos + 0];
- sig1 = lqhKeyReq->variableData[nextPos + 1];
- sig2 = lqhKeyReq->variableData[nextPos + 2];
- sig3 = lqhKeyReq->variableData[nextPos + 3];
- sig4 = lqhKeyReq->variableData[nextPos + 4];
-
- regTcPtr->firstAttrinfo[0] = sig0;
- regTcPtr->firstAttrinfo[1] = sig1;
- regTcPtr->firstAttrinfo[2] = sig2;
- regTcPtr->firstAttrinfo[3] = sig3;
- regTcPtr->firstAttrinfo[4] = sig4;
- regTcPtr->currTupAiLen = TreclenAiLqhkey;
- } else {
- jam();
- regTcPtr->reclenAiLqhkey = 0;
- }//if
- } else {
- jam();
- regTcPtr->reclenAiLqhkey = 0;
- }//if
- }//if
- sig0 = lqhKeyReq->variableData[nextPos + 0];
- sig1 = lqhKeyReq->variableData[nextPos + 1];
- sig2 = lqhKeyReq->variableData[nextPos + 2];
- sig3 = lqhKeyReq->variableData[nextPos + 3];
- sig4 = lqhKeyReq->variableData[nextPos + 4];
-
- signal->theData[0] = regTcPtr->tupConnectrec;
- signal->theData[3] = sig0;
- signal->theData[4] = sig1;
- signal->theData[5] = sig2;
- signal->theData[6] = sig3;
- signal->theData[7] = sig4;
- EXECUTE_DIRECT(refToBlock(regTcPtr->tcTupBlockref), GSN_ATTRINFO,
- signal, TreclenAiLqhkey + 3);
- jamEntry();
- if (signal->theData[0] == (UintR)-1) {
- LQHKEY_abort(signal, 2);
- return;
- }//if
- }//if
-/* ------- TAKE CARE OF PRIM KEY DATA ------- */
- if (regTcPtr->primKeyLen <= 4) {
- endgettupkeyLab(signal);
- return;
- } else {
- jam();
-/*--------------------------------------------------------------------*/
-/* KEY LENGTH WAS MORE THAN 4 WORDS (WORD = 4 BYTE). THUS WE */
-/* HAVE TO ALLOCATE A DATA BUFFER TO STORE THE KEY DATA AND */
-/* WAIT FOR THE KEYINFO SIGNAL. */
-/*--------------------------------------------------------------------*/
- regTcPtr->save1 = 4;
- regTcPtr->transactionState = TcConnectionrec::WAIT_TUPKEYINFO;
- return;
- }//if
- return;
-}//Dblqh::execLQHKEYREQ()
-
-void Dblqh::endgettupkeyLab(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->totReclenAi == regTcPtr->currReclenAi) {
- ;
- } else {
- jam();
- ndbrequire(regTcPtr->currReclenAi < regTcPtr->totReclenAi);
- regTcPtr->transactionState = TcConnectionrec::WAIT_ATTR;
- return;
- }//if
-/* ---------------------------------------------------------------------- */
-/* NOW RECEPTION OF LQHKEYREQ IS COMPLETED THE NEXT STEP IS TO START*/
-/* PROCESSING THE MESSAGE. IF THE MESSAGE IS TO A STAND-BY NODE */
-/* WITHOUT NETWORK REDUNDANCY OR PREPARE-TO-COMMIT ACTIVATED THE */
-/* PREPARATION TO SEND TO THE NEXT NODE WILL START IMMEDIATELY. */
-/* */
-/* OTHERWISE THE PROCESSING WILL START AFTER SETTING THE PROPER */
-/* STATE. HOWEVER BEFORE PROCESSING THE MESSAGE */
-/* IT IS NECESSARY TO CHECK THAT THE FRAGMENT IS NOT PERFORMING */
-/* A CHECKPOINT. THE OPERATION SHALL ALSO BE LINKED INTO THE */
-/* FRAGMENT QUEUE OR LIST OF ACTIVE OPERATIONS. */
-/* */
-/* THE FIRST STEP IN PROCESSING THE MESSAGE IS TO CONTACT DBACC. */
-/*------------------------------------------------------------------------*/
- switch (fragptr.p->fragStatus) {
- case Fragrecord::FSACTIVE:
- case Fragrecord::CRASH_RECOVERING:
- case Fragrecord::ACTIVE_CREATION:
- linkActiveFrag(signal);
- prepareContinueAfterBlockedLab(signal);
- return;
- break;
- case Fragrecord::BLOCKED:
- jam();
- linkFragQueue(signal);
- regTcPtr->transactionState = TcConnectionrec::STOPPED;
- return;
- break;
- case Fragrecord::FREE:
- jam();
- case Fragrecord::DEFINED:
- jam();
- case Fragrecord::REMOVING:
- jam();
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dblqh::endgettupkeyLab()
-
-void Dblqh::prepareContinueAfterBlockedLab(Signal* signal)
-{
- UintR ttcScanOp;
- UintR taccreq;
-
-/* -------------------------------------------------------------------------- */
-/* INPUT: TC_CONNECTPTR ACTIVE CONNECTION RECORD */
-/* FRAGPTR FRAGMENT RECORD */
-/* -------------------------------------------------------------------------- */
-/* -------------------------------------------------------------------------- */
-/* CONTINUE HERE AFTER BEING BLOCKED FOR A WHILE DURING LOCAL CHECKPOINT. */
-/* -------------------------------------------------------------------------- */
-/* ALSO AFTER NORMAL PROCEDURE WE CONTINUE HERE */
-/* -------------------------------------------------------------------------- */
- Uint32 tc_ptr_i = tcConnectptr.i;
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->indTakeOver == ZTRUE) {
- jam();
- ttcScanOp = KeyInfo20::getScanOp(regTcPtr->tcScanInfo);
- scanptr.i = RNIL;
- {
- ScanRecord key;
- key.scanNumber = KeyInfo20::getScanNo(regTcPtr->tcScanInfo);
- key.fragPtrI = fragptr.i;
- c_scanTakeOverHash.find(scanptr, key);
-#ifdef TRACE_SCAN_TAKEOVER
- if(scanptr.i == RNIL)
- ndbout_c("not finding (%d %d)", key.scanNumber, key.fragPtrI);
-#endif
- }
- if (scanptr.i == RNIL) {
- jam();
- releaseActiveFrag(signal);
- takeOverErrorLab(signal);
- return;
- }//if
- Uint32 accOpPtr= get_acc_ptr_from_scan_record(scanptr.p,
- ttcScanOp,
- true);
- if (accOpPtr == RNIL) {
- jam();
- releaseActiveFrag(signal);
- takeOverErrorLab(signal);
- return;
- }//if
- signal->theData[1] = accOpPtr;
- signal->theData[2] = regTcPtr->transid[0];
- signal->theData[3] = regTcPtr->transid[1];
- EXECUTE_DIRECT(refToBlock(regTcPtr->tcAccBlockref), GSN_ACC_TO_REQ,
- signal, 4);
- if (signal->theData[0] == (UintR)-1) {
- execACC_TO_REF(signal);
- return;
- }//if
- jamEntry();
- }//if
-/*-------------------------------------------------------------------*/
-/* IT IS NOW TIME TO CONTACT ACC. THE TUPLE KEY WILL BE SENT */
-/* AND THIS WILL BE TRANSLATED INTO A LOCAL KEY BY USING THE */
-/* LOCAL PART OF THE LH3-ALGORITHM. ALSO PROPER LOCKS ON THE */
-/* TUPLE WILL BE SET. FOR INSERTS AND DELETES THE MESSAGE WILL */
-/* START AN INSERT/DELETE INTO THE HASH TABLE. */
-/* */
-/* BEFORE SENDING THE MESSAGE THE REQUEST INFORMATION IS SET */
-/* PROPERLY. */
-/* ----------------------------------------------------------------- */
-#if 0
- if (regTcPtr->tableref != 0) {
- switch (regTcPtr->operation) {
- case ZREAD: ndbout << "Läsning "; break;
- case ZUPDATE: ndbout << " Uppdatering "; break;
- case ZWRITE: ndbout << "Write "; break;
- case ZINSERT: ndbout << "Inläggning "; break;
- case ZDELETE: ndbout << "Borttagning "; break;
- default: ndbout << "????"; break;
- }
- ndbout << "med nyckel = " << regTcPtr->tupkeyData[0] << endl;
- }
-#endif
-
- regTcPtr->transactionState = TcConnectionrec::WAIT_ACC;
- taccreq = regTcPtr->operation;
- taccreq = taccreq + (regTcPtr->opSimple << 3);
- taccreq = taccreq + (regTcPtr->lockType << 4);
- taccreq = taccreq + (regTcPtr->dirtyOp << 6);
- taccreq = taccreq + (regTcPtr->replicaType << 7);
- taccreq = taccreq + (regTcPtr->apiVersionNo << 9);
-/* ************ */
-/* ACCKEYREQ < */
-/* ************ */
- ndbrequire(regTcPtr->localFragptr < 2);
- Uint32 sig0, sig1, sig2, sig3, sig4;
- sig0 = regTcPtr->accConnectrec;
- sig1 = fragptr.p->accFragptr[regTcPtr->localFragptr];
- sig2 = regTcPtr->hashValue;
- sig3 = regTcPtr->primKeyLen;
- sig4 = regTcPtr->transid[0];
- signal->theData[0] = sig0;
- signal->theData[1] = sig1;
- signal->theData[2] = taccreq;
- signal->theData[3] = sig2;
- signal->theData[4] = sig3;
- signal->theData[5] = sig4;
-
- sig0 = regTcPtr->transid[1];
- sig1 = regTcPtr->tupkeyData[0];
- sig2 = regTcPtr->tupkeyData[1];
- sig3 = regTcPtr->tupkeyData[2];
- sig4 = regTcPtr->tupkeyData[3];
- signal->theData[6] = sig0;
- signal->theData[7] = sig1;
- signal->theData[8] = sig2;
- signal->theData[9] = sig3;
- signal->theData[10] = sig4;
- if (regTcPtr->primKeyLen > 4) {
- sendKeyinfoAcc(signal, 11);
- }//if
- EXECUTE_DIRECT(refToBlock(regTcPtr->tcAccBlockref), GSN_ACCKEYREQ,
- signal, 7 + regTcPtr->primKeyLen);
- if (signal->theData[0] < RNIL) {
- signal->theData[0] = tc_ptr_i;
- execACCKEYCONF(signal);
- return;
- } else if (signal->theData[0] == RNIL) {
- ;
- } else {
- ndbrequire(signal->theData[0] == (UintR)-1);
- signal->theData[0] = tc_ptr_i;
- execACCKEYREF(signal);
- }//if
- return;
-}//Dblqh::prepareContinueAfterBlockedLab()
-
-/* ========================================================================== */
-/* ======= SEND KEYINFO TO ACC ======= */
-/* */
-/* ========================================================================== */
-void Dblqh::sendKeyinfoAcc(Signal* signal, Uint32 Ti)
-{
- DatabufPtr regDatabufptr;
- regDatabufptr.i = tcConnectptr.p->firstTupkeybuf;
-
- do {
- jam();
- ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
- Uint32 sig0 = regDatabufptr.p->data[0];
- Uint32 sig1 = regDatabufptr.p->data[1];
- Uint32 sig2 = regDatabufptr.p->data[2];
- Uint32 sig3 = regDatabufptr.p->data[3];
- signal->theData[Ti] = sig0;
- signal->theData[Ti + 1] = sig1;
- signal->theData[Ti + 2] = sig2;
- signal->theData[Ti + 3] = sig3;
- regDatabufptr.i = regDatabufptr.p->nextDatabuf;
- Ti += 4;
- } while (regDatabufptr.i != RNIL);
-}//Dblqh::sendKeyinfoAcc()
-
-void Dblqh::execLQH_ALLOCREQ(Signal* signal)
-{
- TcConnectionrecPtr regTcPtr;
- FragrecordPtr regFragptr;
-
- jamEntry();
- regTcPtr.i = signal->theData[0];
- ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec);
-
- regFragptr.i = regTcPtr.p->fragmentptr;
- ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
-
- ndbrequire(regTcPtr.p->localFragptr < 2);
- signal->theData[0] = regTcPtr.p->tupConnectrec;
- signal->theData[1] = regFragptr.p->tupFragptr[regTcPtr.p->localFragptr];
- signal->theData[2] = regTcPtr.p->tableref;
- Uint32 tup = refToBlock(regTcPtr.p->tcTupBlockref);
- EXECUTE_DIRECT(tup, GSN_TUP_ALLOCREQ, signal, 3);
-}//Dblqh::execTUP_ALLOCREQ()
-
-/* ************>> */
-/* ACCKEYCONF > */
-/* ************>> */
-void Dblqh::execACCKEYCONF(Signal* signal)
-{
- TcConnectionrec *regTcConnectionrec = tcConnectionrec;
- Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
- Uint32 tcIndex = signal->theData[0];
- Uint32 Tfragid = signal->theData[2];
- Uint32 localKey1 = signal->theData[3];
- Uint32 localKey2 = signal->theData[4];
- Uint32 localKeyFlag = signal->theData[5];
- jamEntry();
- tcConnectptr.i = tcIndex;
- ptrCheckGuard(tcConnectptr, ttcConnectrecFileSize, regTcConnectionrec);
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->transactionState != TcConnectionrec::WAIT_ACC) {
- LQHKEY_abort(signal, 3);
- return;
- }//if
- /* ------------------------------------------------------------------------
- * Set transaction state and also reset the activeCreat since that is only
- * valid in cases where the record was not present.
- * ------------------------------------------------------------------------ */
- regTcPtr->transactionState = TcConnectionrec::WAIT_TUP;
- regTcPtr->activeCreat = ZFALSE;
- /* ------------------------------------------------------------------------
- * IT IS NOW TIME TO CONTACT THE TUPLE MANAGER. THE TUPLE MANAGER NEEDS THE
- * INFORMATION ON WHICH TABLE AND FRAGMENT, THE LOCAL KEY AND IT NEEDS TO
- * KNOW THE TYPE OF OPERATION TO PERFORM. TUP CAN SEND THE ATTRINFO DATA
- * EITHER TO THE TC BLOCK OR DIRECTLY TO THE APPLICATION. THE SCHEMA VERSION
- * IS NEEDED SINCE TWO SCHEMA VERSIONS CAN BE ACTIVE SIMULTANEOUSLY ON A
- * TABLE.
- * ----------------------------------------------------------------------- */
- if (regTcPtr->operation == ZWRITE)
- {
- Uint32 op= signal->theData[1];
- if(likely(op == ZINSERT || op == ZUPDATE))
- {
- regTcPtr->operation = op;
- }
- else
- {
- warningEvent("Convering %d to ZUPDATE", op);
- regTcPtr->operation = ZUPDATE;
- }
- }//if
-
- ndbrequire(localKeyFlag == 1);
- localKey2 = localKey1 & MAX_TUPLES_PER_PAGE;
- localKey1 = localKey1 >> MAX_TUPLES_BITS;
- Uint32 Ttupreq = regTcPtr->dirtyOp;
- Ttupreq = Ttupreq + (regTcPtr->opSimple << 1);
- Ttupreq = Ttupreq + (regTcPtr->operation << 6);
- Ttupreq = Ttupreq + (regTcPtr->opExec << 10);
- Ttupreq = Ttupreq + (regTcPtr->apiVersionNo << 11);
-
- /* ---------------------------------------------------------------------
- * Clear interpreted mode bit since we do not want the next replica to
- * use interpreted mode. The next replica will receive a normal write.
- * --------------------------------------------------------------------- */
- regTcPtr->opExec = 0;
- /* ************< */
- /* TUPKEYREQ < */
- /* ************< */
- TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtrSend();
- Uint32 sig0, sig1, sig2, sig3;
-
- sig0 = regTcPtr->tupConnectrec;
- sig1 = regTcPtr->tableref;
- tupKeyReq->connectPtr = sig0;
- tupKeyReq->request = Ttupreq;
- tupKeyReq->tableRef = sig1;
- tupKeyReq->fragId = Tfragid;
- tupKeyReq->keyRef1 = localKey1;
- tupKeyReq->keyRef2 = localKey2;
-
- sig0 = regTcPtr->totReclenAi;
- sig1 = regTcPtr->applOprec;
- sig2 = regTcPtr->applRef;
- sig3 = regTcPtr->schemaVersion;
- FragrecordPtr regFragptr;
- regFragptr.i = regTcPtr->fragmentptr;
- ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
- tupKeyReq->attrBufLen = sig0;
- tupKeyReq->opRef = sig1;
- tupKeyReq->applRef = sig2;
- tupKeyReq->schemaVersion = sig3;
-
- ndbrequire(regTcPtr->localFragptr < 2);
- sig0 = regTcPtr->storedProcId;
- sig1 = regTcPtr->transid[0];
- sig2 = regTcPtr->transid[1];
- sig3 = regFragptr.p->tupFragptr[regTcPtr->localFragptr];
- Uint32 tup = refToBlock(regTcPtr->tcTupBlockref);
-
- tupKeyReq->storedProcedure = sig0;
- tupKeyReq->transId1 = sig1;
- tupKeyReq->transId2 = sig2;
- tupKeyReq->fragPtr = sig3;
- tupKeyReq->primaryReplica = (tcConnectptr.p->seqNoReplica == 0)?true:false;
- tupKeyReq->coordinatorTC = tcConnectptr.p->tcBlockref;
- tupKeyReq->tcOpIndex = tcConnectptr.p->tcOprec;
- tupKeyReq->savePointId = tcConnectptr.p->savePointId;
-
- EXECUTE_DIRECT(tup, GSN_TUPKEYREQ, signal, TupKeyReq::SignalLength);
-}//Dblqh::execACCKEYCONF()
-
-/* --------------------------------------------------------------------------
- * ------- ENTER TUP... -------
- * ENTER TUPKEYCONF WITH
- * TC_CONNECTPTR,
- * TDATA2, LOCAL KEY REFERENCE 1, ONLY INTERESTING AFTER INSERT
- * TDATA3, LOCAL KEY REFERENCE 1, ONLY INTERESTING AFTER INSERT
- * TDATA4, TOTAL LENGTH OF READ DATA SENT TO TC/APPLICATION
- * TDATA5 TOTAL LENGTH OF UPDATE DATA SENT TO/FROM TUP
- * GOTO TUPKEY_CONF
- *
- * TAKE CARE OF RESPONSES FROM TUPLE MANAGER.
- * -------------------------------------------------------------------------- */
-void Dblqh::tupkeyConfLab(Signal* signal)
-{
-/* ---- GET OPERATION TYPE AND CHECK WHAT KIND OF OPERATION IS REQUESTED ---- */
- const TupKeyConf * const tupKeyConf = (TupKeyConf *)&signal->theData[0];
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->simpleRead) {
- jam();
- /* ----------------------------------------------------------------------
- * THE OPERATION IS A SIMPLE READ. WE WILL IMMEDIATELY COMMIT THE OPERATION.
- * SINCE WE HAVE NOT RELEASED THE FRAGMENT LOCK (FOR LOCAL CHECKPOINTS) YET
- * WE CAN GO IMMEDIATELY TO COMMIT_CONTINUE_AFTER_BLOCKED.
- * WE HAVE ALREADY SENT THE RESPONSE SO WE ARE NOT INTERESTED IN READ LENGTH
- * ---------------------------------------------------------------------- */
- regTcPtr->gci = cnewestGci;
- releaseActiveFrag(signal);
- commitContinueAfterBlockedLab(signal);
- return;
- }//if
- if (tupKeyConf->readLength != 0) {
- jam();
-
- /* SET BIT 15 IN REQINFO */
- LqhKeyReq::setApplicationAddressFlag(regTcPtr->reqinfo, 1);
-
- regTcPtr->readlenAi = tupKeyConf->readLength;
- }//if
- regTcPtr->totSendlenAi = tupKeyConf->writeLength;
- ndbrequire(regTcPtr->totSendlenAi == regTcPtr->currTupAiLen);
- rwConcludedLab(signal);
- return;
-}//Dblqh::tupkeyConfLab()
-
-/* --------------------------------------------------------------------------
- * THE CODE IS FOUND IN THE SIGNAL RECEPTION PART OF LQH
- * -------------------------------------------------------------------------- */
-void Dblqh::rwConcludedLab(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- /* ------------------------------------------------------------------------
- * WE HAVE NOW CONCLUDED READING/WRITING IN ACC AND TUP FOR THIS OPERATION.
- * IT IS NOW TIME TO LOG THE OPERATION, SEND REQUEST TO NEXT NODE OR TC AND
- * FOR SOME TYPES OF OPERATIONS IT IS EVEN TIME TO COMMIT THE OPERATION.
- * ------------------------------------------------------------------------ */
- if (regTcPtr->operation == ZREAD) {
- jam();
- /* ----------------------------------------------------------------------
- * A NORMAL READ OPERATION IS NOT LOGGED BUT IS NOT COMMITTED UNTIL THE
- * COMMIT SIGNAL ARRIVES. THUS WE CONTINUE PACKING THE RESPONSE.
- * ---------------------------------------------------------------------- */
- releaseActiveFrag(signal);
- packLqhkeyreqLab(signal);
- return;
- } else {
- FragrecordPtr regFragptr;
- regFragptr.i = regTcPtr->fragmentptr;
- ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
- if (regFragptr.p->logFlag == Fragrecord::STATE_FALSE){
- if (regTcPtr->dirtyOp == ZTRUE) {
- jam();
- /* ------------------------------------------------------------------
- * THIS OPERATION WAS A WRITE OPERATION THAT DO NOT NEED LOGGING AND
- * THAT CAN CAN BE COMMITTED IMMEDIATELY.
- * ------------------------------------------------------------------ */
- regTcPtr->gci = cnewestGci;
- releaseActiveFrag(signal);
- commitContinueAfterBlockedLab(signal);
- return;
- } else {
- jam();
- /* ------------------------------------------------------------------
- * A NORMAL WRITE OPERATION ON A FRAGMENT WHICH DO NOT NEED LOGGING.
- * WE WILL PACK THE REQUEST/RESPONSE TO THE NEXT NODE/TO TC.
- * ------------------------------------------------------------------ */
- regTcPtr->logWriteState = TcConnectionrec::NOT_WRITTEN;
- releaseActiveFrag(signal);
- packLqhkeyreqLab(signal);
- return;
- }//if
- } else {
- jam();
- /* --------------------------------------------------------------------
- * A DIRTY OPERATION WHICH NEEDS LOGGING. WE START BY LOGGING THE
- * REQUEST. IN THIS CASE WE WILL RELEASE THE FRAGMENT LOCK FIRST.
- * --------------------------------------------------------------------
- * A NORMAL WRITE OPERATION THAT NEEDS LOGGING AND WILL NOT BE
- * PREMATURELY COMMITTED.
- * -------------------------------------------------------------------- */
- releaseActiveFrag(signal);
- logLqhkeyreqLab(signal);
- return;
- }//if
- }//if
-}//Dblqh::rwConcludedLab()
-
-void Dblqh::rwConcludedAiLab(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- fragptr.i = regTcPtr->fragmentptr;
- /* ------------------------------------------------------------------------
- * WE HAVE NOW CONCLUDED READING/WRITING IN ACC AND TUP FOR THIS OPERATION.
- * IT IS NOW TIME TO LOG THE OPERATION, SEND REQUEST TO NEXT NODE OR TC AND
- * FOR SOME TYPES OF OPERATIONS IT IS EVEN TIME TO COMMIT THE OPERATION.
- * IN THIS CASE WE HAVE ALREADY RELEASED THE FRAGMENT LOCK.
- * ERROR CASES AT FRAGMENT CREATION AND STAND-BY NODES ARE THE REASONS FOR
- * COMING HERE.
- * ------------------------------------------------------------------------ */
- if (regTcPtr->operation == ZREAD) {
- if (regTcPtr->opSimple == 1) {
- jam();
- /* --------------------------------------------------------------------
- * THE OPERATION IS A SIMPLE READ. WE WILL IMMEDIATELY COMMIT THE
- * OPERATION.
- * -------------------------------------------------------------------- */
- regTcPtr->gci = cnewestGci;
- localCommitLab(signal);
- return;
- } else {
- jam();
- /* --------------------------------------------------------------------
- * A NORMAL READ OPERATION IS NOT LOGGED BUT IS NOT COMMITTED UNTIL
- * THE COMMIT SIGNAL ARRIVES. THUS WE CONTINUE PACKING THE RESPONSE.
- * -------------------------------------------------------------------- */
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- packLqhkeyreqLab(signal);
- return;
- }//if
- } else {
- jam();
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (fragptr.p->logFlag == Fragrecord::STATE_FALSE) {
- if (regTcPtr->dirtyOp == ZTRUE) {
- /* ------------------------------------------------------------------
- * THIS OPERATION WAS A WRITE OPERATION THAT DO NOT NEED LOGGING AND
- * THAT CAN CAN BE COMMITTED IMMEDIATELY.
- * ------------------------------------------------------------------ */
- jam();
- /* ----------------------------------------------------------------
- * IT MUST BE ACTIVE CREATION OF A FRAGMENT.
- * ---------------------------------------------------------------- */
- regTcPtr->gci = cnewestGci;
- localCommitLab(signal);
- return;
- } else {
- /* ------------------------------------------------------------------
- * A NORMAL WRITE OPERATION ON A FRAGMENT WHICH DO NOT NEED LOGGING.
- * WE WILL PACK THE REQUEST/RESPONSE TO THE NEXT NODE/TO TC.
- * ------------------------------------------------------------------ */
- jam();
- /* ---------------------------------------------------------------
- * IT MUST BE ACTIVE CREATION OF A FRAGMENT.
- * NOT A DIRTY OPERATION THUS PACK REQUEST/RESPONSE.
- * ---------------------------------------------------------------- */
- regTcPtr->logWriteState = TcConnectionrec::NOT_WRITTEN;
- packLqhkeyreqLab(signal);
- return;
- }//if
- } else {
- jam();
- /* --------------------------------------------------------------------
- * A DIRTY OPERATION WHICH NEEDS LOGGING. WE START BY LOGGING THE
- * REQUEST. IN THIS CASE WE WILL RELEASE THE FRAGMENT LOCK FIRST.
- * -------------------------------------------------------------------- */
- /* A NORMAL WRITE OPERATION THAT NEEDS LOGGING AND WILL NOT BE
- * PREMATURELY COMMITTED.
- * -------------------------------------------------------------------- */
- logLqhkeyreqLab(signal);
- return;
- }//if
- }//if
-}//Dblqh::rwConcludedAiLab()
-
-/* ##########################################################################
- * ####### LOG MODULE #######
- *
- * ##########################################################################
- * --------------------------------------------------------------------------
- * THE LOG MODULE HANDLES THE READING AND WRITING OF THE LOG
- * IT IS ALSO RESPONSIBLE FOR HANDLING THE SYSTEM RESTART.
- * IT CONTROLS THE SYSTEM RESTART IN TUP AND ACC AS WELL.
- * -------------------------------------------------------------------------- */
-void Dblqh::logLqhkeyreqLab(Signal* signal)
-{
- UintR tcurrentFilepage;
- TcConnectionrecPtr tmpTcConnectptr;
-
- if (cnoOfLogPages < ZMIN_LOG_PAGES_OPERATION || ERROR_INSERTED(5032)) {
- jam();
- if(ERROR_INSERTED(5032)){
- CLEAR_ERROR_INSERT_VALUE;
- }
-/*---------------------------------------------------------------------------*/
-// The log disk is having problems in catching up with the speed of execution.
-// We must wait with writing the log of this operation to ensure we do not
-// overload the log.
-/*---------------------------------------------------------------------------*/
- terrorCode = ZTEMPORARY_REDO_LOG_FAILURE;
- abortErrorLab(signal);
- return;
- }//if
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- logPartPtr.i = regTcPtr->hashValue & 3;
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
-/* -------------------------------------------------- */
-/* THIS PART IS USED TO WRITE THE LOG */
-/* -------------------------------------------------- */
-/* -------------------------------------------------- */
-/* CHECK IF A LOG OPERATION IS ONGOING ALREADY. */
-/* IF SO THEN QUEUE THE OPERATION FOR LATER */
-/* RESTART WHEN THE LOG PART IS FREE AGAIN. */
-/* -------------------------------------------------- */
- LogPartRecord * const regLogPartPtr = logPartPtr.p;
-
- if(ERROR_INSERTED(5033)){
- jam();
- CLEAR_ERROR_INSERT_VALUE;
-
- if ((regLogPartPtr->firstLogQueue != RNIL) &&
- (regLogPartPtr->LogLqhKeyReqSent == ZFALSE)) {
- /* -------------------------------------------------- */
- /* WE HAVE A PROBLEM IN THAT THE LOG HAS NO */
- /* ROOM FOR ADDITIONAL OPERATIONS AT THE MOMENT.*/
- /* -------------------------------------------------- */
- /* -------------------------------------------------- */
- /* WE MUST STILL RESTART QUEUED OPERATIONS SO */
- /* THEY ALSO CAN BE ABORTED. */
- /* -------------------------------------------------- */
- regLogPartPtr->LogLqhKeyReqSent = ZTRUE;
- signal->theData[0] = ZLOG_LQHKEYREQ;
- signal->theData[1] = logPartPtr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- }//if
-
- terrorCode = ZTAIL_PROBLEM_IN_LOG_ERROR;
- abortErrorLab(signal);
- return;
- }
-
- if (regLogPartPtr->logPartState == LogPartRecord::IDLE) {
- ;
- } else if (regLogPartPtr->logPartState == LogPartRecord::ACTIVE) {
- jam();
- linkWaitLog(signal, logPartPtr);
- regTcPtr->transactionState = TcConnectionrec::LOG_QUEUED;
- return;
- } else {
- if ((regLogPartPtr->firstLogQueue != RNIL) &&
- (regLogPartPtr->LogLqhKeyReqSent == ZFALSE)) {
-/* -------------------------------------------------- */
-/* WE HAVE A PROBLEM IN THAT THE LOG HAS NO */
-/* ROOM FOR ADDITIONAL OPERATIONS AT THE MOMENT.*/
-/* -------------------------------------------------- */
-/* -------------------------------------------------- */
-/* WE MUST STILL RESTART QUEUED OPERATIONS SO */
-/* THEY ALSO CAN BE ABORTED. */
-/* -------------------------------------------------- */
- regLogPartPtr->LogLqhKeyReqSent = ZTRUE;
- signal->theData[0] = ZLOG_LQHKEYREQ;
- signal->theData[1] = logPartPtr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- }//if
- if (regLogPartPtr->logPartState == LogPartRecord::TAIL_PROBLEM) {
- jam();
- terrorCode = ZTAIL_PROBLEM_IN_LOG_ERROR;
- } else {
- ndbrequire(regLogPartPtr->logPartState == LogPartRecord::FILE_CHANGE_PROBLEM);
- jam();
- terrorCode = ZFILE_CHANGE_PROBLEM_IN_LOG_ERROR;
- }//if
- abortErrorLab(signal);
- return;
- }//if
- regLogPartPtr->logPartState = LogPartRecord::ACTIVE;
- logFilePtr.i = regLogPartPtr->currentLogfile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
-/* -------------------------------------------------- */
-/* CHECK IF A NEW MBYTE IS TO BE STARTED. IF */
-/* SO INSERT A NEXT LOG RECORD, WRITE THE LOG */
-/* AND PLACE THE LOG POINTER ON THE NEW POSITION*/
-/* IF A NEW FILE IS TO BE USED, CHANGE FILE AND */
-/* ALSO START OPENING THE NEXT LOG FILE. IF A */
-/* LAP HAS BEEN COMPLETED THEN ADD ONE TO LAP */
-/* COUNTER. */
-/* -------------------------------------------------- */
- checkNewMbyte(signal);
-/* -------------------------------------------------- */
-/* INSERT THE OPERATION RECORD LAST IN THE LIST */
-/* OF NOT COMPLETED OPERATIONS. ALSO RECORD THE */
-/* FILE NO, PAGE NO AND PAGE INDEX OF THE START */
-/* OF THIS LOG RECORD. */
-/* IT IS NOT ALLOWED TO INSERT IT INTO THE LIST */
-/* BEFORE CHECKING THE NEW MBYTE SINCE THAT WILL*/
-/* CAUSE THE OLD VALUES OF TC_CONNECTPTR TO BE */
-/* USED IN WRITE_FILE_DESCRIPTOR. */
-/* -------------------------------------------------- */
- Uint32 tcIndex = tcConnectptr.i;
- tmpTcConnectptr.i = regLogPartPtr->lastLogTcrec;
- regLogPartPtr->lastLogTcrec = tcIndex;
- if (tmpTcConnectptr.i == RNIL) {
- jam();
- regLogPartPtr->firstLogTcrec = tcIndex;
- } else {
- ptrCheckGuard(tmpTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- tmpTcConnectptr.p->nextLogTcrec = tcIndex;
- }//if
- Uint32 fileNo = logFilePtr.p->fileNo;
- tcurrentFilepage = logFilePtr.p->currentFilepage;
- logPagePtr.i = logFilePtr.p->currentLogpage;
- regTcPtr->nextLogTcrec = RNIL;
- regTcPtr->prevLogTcrec = tmpTcConnectptr.i;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- Uint32 pageIndex = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- regTcPtr->logStartFileNo = fileNo;
- regTcPtr->logStartPageNo = tcurrentFilepage;
- regTcPtr->logStartPageIndex = pageIndex;
-/* -------------------------------------------------- */
-/* WRITE THE LOG HEADER OF THIS OPERATION. */
-/* -------------------------------------------------- */
- writeLogHeader(signal);
-/* -------------------------------------------------- */
-/* WRITE THE TUPLE KEY OF THIS OPERATION. */
-/* -------------------------------------------------- */
- writeKey(signal);
-/* -------------------------------------------------- */
-/* WRITE THE ATTRIBUTE INFO OF THIS OPERATION. */
-/* -------------------------------------------------- */
- writeAttrinfoLab(signal);
-
- logNextStart(signal);
-/* -------------------------------------------------- */
-/* RESET THE STATE OF THE LOG PART. IF ANY */
-/* OPERATIONS HAVE QUEUED THEN START THE FIRST */
-/* OF THESE. */
-/* -------------------------------------------------- */
-/* -------------------------------------------------- */
-/* CONTINUE WITH PACKING OF LQHKEYREQ */
-/* -------------------------------------------------- */
- tcurrentFilepage = logFilePtr.p->currentFilepage;
- if (logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] == ZPAGE_HEADER_SIZE) {
- jam();
- tcurrentFilepage--;
- }//if
- regTcPtr->logStopPageNo = tcurrentFilepage;
- regTcPtr->logWriteState = TcConnectionrec::WRITTEN;
- if (regTcPtr->abortState != TcConnectionrec::ABORT_IDLE) {
-/* -------------------------------------------------- */
-/* AN ABORT HAVE BEEN ORDERED. THE ABORT WAITED */
-/* FOR THE LOG WRITE TO BE COMPLETED. NOW WE */
-/* CAN PROCEED WITH THE NORMAL ABORT HANDLING. */
-/* -------------------------------------------------- */
- abortCommonLab(signal);
- return;
- }//if
- if (regTcPtr->dirtyOp != ZTRUE) {
- packLqhkeyreqLab(signal);
- } else {
- /* ----------------------------------------------------------------------
- * I NEED TO INSERT A COMMIT LOG RECORD SINCE WE ARE WRITING LOG IN THIS
- * TRANSACTION. SINCE WE RELEASED THE LOG LOCK JUST NOW NO ONE ELSE CAN BE
- * ACTIVE IN WRITING THE LOG. WE THUS WRITE THE LOG WITHOUT GETTING A LOCK
- * SINCE WE ARE ONLY WRITING A COMMIT LOG RECORD.
- * ---------------------------------------------------------------------- */
- writeCommitLog(signal, logPartPtr);
- /* ----------------------------------------------------------------------
- * DIRTY OPERATIONS SHOULD COMMIT BEFORE THEY PACK THE REQUEST/RESPONSE.
- * ---------------------------------------------------------------------- */
- regTcPtr->gci = cnewestGci;
- localCommitLab(signal);
- }//if
-}//Dblqh::logLqhkeyreqLab()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SEND LQHKEYREQ */
-/* */
-/* NO STATE CHECKING SINCE THE SIGNAL IS A LOCAL SIGNAL. THE EXECUTION OF */
-/* THE OPERATION IS COMPLETED. IT IS NOW TIME TO SEND THE OPERATION TO THE */
-/* NEXT REPLICA OR TO TC. */
-/* ------------------------------------------------------------------------- */
-void Dblqh::packLqhkeyreqLab(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->nextReplica == ZNIL) {
-/* ------------------------------------------------------------------------- */
-/* ------- SEND LQHKEYCONF ------- */
-/* */
-/* ------------------------------------------------------------------------- */
- sendLqhkeyconfTc(signal, regTcPtr->tcBlockref);
- if (regTcPtr->dirtyOp != ZTRUE) {
- jam();
- regTcPtr->transactionState = TcConnectionrec::PREPARED;
- releaseOprec(signal);
- } else {
- jam();
-/*************************************************************>*/
-/* DIRTY WRITES ARE USED IN TWO SITUATIONS. THE FIRST */
-/* SITUATION IS WHEN THEY ARE USED TO UPDATE COUNTERS AND*/
-/* OTHER ATTRIBUTES WHICH ARE NOT SENSITIVE TO CONSISTE- */
-/* NCY. THE SECOND SITUATION IS BY OPERATIONS THAT ARE */
-/* SENT AS PART OF A COPY FRAGMENT PROCESS. */
-/* */
-/* DURING A COPY FRAGMENT PROCESS THERE IS NO LOGGING */
-/* ONGOING SINCE THE FRAGMENT IS NOT COMPLETE YET. THE */
-/* LOGGING STARTS AFTER COMPLETING THE LAST COPY TUPLE */
-/* OPERATION. THE EXECUTION OF THE LAST COPY TUPLE DOES */
-/* ALSO START A LOCAL CHECKPOINT SO THAT THE FRAGMENT */
-/* REPLICA IS RECOVERABLE. THUS GLOBAL CHECKPOINT ID FOR */
-/* THOSE OPERATIONS ARE NOT INTERESTING. */
-/* */
-/* A DIRTY WRITE IS BY DEFINITION NOT CONSISTENT. THUS */
-/* IT CAN USE ANY GLOBAL CHECKPOINT. THE IDEA HERE IS TO */
-/* ALWAYS USE THE LATEST DEFINED GLOBAL CHECKPOINT ID IN */
-/* THIS NODE. */
-/*************************************************************>*/
- cleanUp(signal);
- }//if
- return;
- }//if
-/* ------------------------------------------------------------------------- */
-/* ------- SEND LQHKEYREQ ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* THERE ARE MORE REPLICAS TO SEND THE OPERATION TO. A NEW LQHKEYREQ WILL BE */
-/* PREPARED FOR THE NEXT REPLICA. */
-/* ------------------------------------------------------------------------- */
-/* CLEAR REPLICA TYPE, ATTRINFO INDICATOR (IN LQHKEYREQ), */
-/* INTERPRETED EXECUTION, SEQUENTIAL NUMBER OF REPLICA. */
-// Set bit indicating Client and TC record not the same.
-// Set readlenAi indicator if readlenAi != 0
-// Stored Procedure Indicator not set.
-/* ------------------------------------------------------------------------- */
- LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)&signal->theData[0];
-
- UintR Treqinfo;
- UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6;
- Treqinfo = preComputedRequestInfoMask & regTcPtr->reqinfo;
-
- UintR TapplAddressIndicator = (regTcPtr->nextSeqNoReplica == 0 ? 0 : 1);
- LqhKeyReq::setApplicationAddressFlag(Treqinfo, TapplAddressIndicator);
- LqhKeyReq::setInterpretedFlag(Treqinfo, regTcPtr->opExec);
- LqhKeyReq::setSeqNoReplica(Treqinfo, regTcPtr->nextSeqNoReplica);
- LqhKeyReq::setAIInLqhKeyReq(Treqinfo, regTcPtr->reclenAiLqhkey);
- UintR TreadLenAiInd = (regTcPtr->readlenAi == 0 ? 0 : 1);
- UintR TsameLqhAndClient = (tcConnectptr.i ==
- regTcPtr->tcOprec ? 0 : 1);
- LqhKeyReq::setSameClientAndTcFlag(Treqinfo, TsameLqhAndClient);
- LqhKeyReq::setReturnedReadLenAIFlag(Treqinfo, TreadLenAiInd);
-
- UintR TotReclenAi = regTcPtr->totSendlenAi;
-/* ------------------------------------------------------------------------- */
-/* WE ARE NOW PREPARED TO SEND THE LQHKEYREQ. WE HAVE TO DECIDE IF ATTRINFO */
-/* IS INCLUDED IN THE LQHKEYREQ SIGNAL AND THEN SEND IT. */
-/* TAKE OVER SCAN OPERATION IS NEVER USED ON BACKUPS, LOG RECORDS AND START-UP*/
-/* OF NEW REPLICA AND THUS ONLY TOT_SENDLEN_AI IS USED THE UPPER 16 BITS ARE */
-/* ZERO. */
-/* ------------------------------------------------------------------------- */
- sig0 = tcConnectptr.i;
- sig1 = regTcPtr->savePointId;
- sig2 = regTcPtr->hashValue;
- sig4 = regTcPtr->tcBlockref;
-
- lqhKeyReq->clientConnectPtr = sig0;
- lqhKeyReq->attrLen = TotReclenAi;
- lqhKeyReq->savePointId = sig1;
- lqhKeyReq->hashValue = sig2;
- lqhKeyReq->requestInfo = Treqinfo;
- lqhKeyReq->tcBlockref = sig4;
-
- sig0 = regTcPtr->tableref + (regTcPtr->schemaVersion << 16);
- sig1 = regTcPtr->fragmentid + (regTcPtr->nodeAfterNext[0] << 16);
- sig2 = regTcPtr->transid[0];
- sig3 = regTcPtr->transid[1];
- sig4 = regTcPtr->applRef;
- sig5 = regTcPtr->applOprec;
- sig6 = regTcPtr->tcOprec;
- UintR nextPos = (TapplAddressIndicator << 1);
-
- lqhKeyReq->tableSchemaVersion = sig0;
- lqhKeyReq->fragmentData = sig1;
- lqhKeyReq->transId1 = sig2;
- lqhKeyReq->transId2 = sig3;
- lqhKeyReq->noFiredTriggers = regTcPtr->noFiredTriggers;
- lqhKeyReq->variableData[0] = sig4;
- lqhKeyReq->variableData[1] = sig5;
- lqhKeyReq->variableData[2] = sig6;
-
- nextPos += TsameLqhAndClient;
-
- if ((regTcPtr->lastReplicaNo - regTcPtr->nextSeqNoReplica) > 1) {
- sig0 = (UintR)regTcPtr->nodeAfterNext[1] +
- (UintR)(regTcPtr->nodeAfterNext[2] << 16);
- lqhKeyReq->variableData[nextPos] = sig0;
- nextPos++;
- }//if
- sig0 = regTcPtr->readlenAi;
- sig1 = regTcPtr->tupkeyData[0];
- sig2 = regTcPtr->tupkeyData[1];
- sig3 = regTcPtr->tupkeyData[2];
- sig4 = regTcPtr->tupkeyData[3];
-
- lqhKeyReq->variableData[nextPos] = sig0;
- nextPos += TreadLenAiInd;
- lqhKeyReq->variableData[nextPos] = sig1;
- lqhKeyReq->variableData[nextPos + 1] = sig2;
- lqhKeyReq->variableData[nextPos + 2] = sig3;
- lqhKeyReq->variableData[nextPos + 3] = sig4;
- UintR TkeyLen = LqhKeyReq::getKeyLen(Treqinfo);
- if (TkeyLen < 4) {
- nextPos += TkeyLen;
- } else {
- nextPos += 4;
- }//if
-
- sig0 = regTcPtr->firstAttrinfo[0];
- sig1 = regTcPtr->firstAttrinfo[1];
- sig2 = regTcPtr->firstAttrinfo[2];
- sig3 = regTcPtr->firstAttrinfo[3];
- sig4 = regTcPtr->firstAttrinfo[4];
- UintR TAiLen = regTcPtr->reclenAiLqhkey;
- BlockReference lqhRef = calcLqhBlockRef(regTcPtr->nextReplica);
-
- lqhKeyReq->variableData[nextPos] = sig0;
- lqhKeyReq->variableData[nextPos + 1] = sig1;
- lqhKeyReq->variableData[nextPos + 2] = sig2;
- lqhKeyReq->variableData[nextPos + 3] = sig3;
- lqhKeyReq->variableData[nextPos + 4] = sig4;
-
- nextPos += TAiLen;
-
- sendSignal(lqhRef, GSN_LQHKEYREQ, signal,
- nextPos + LqhKeyReq::FixedSignalLength, JBB);
- if (regTcPtr->primKeyLen > 4) {
- jam();
-/* ------------------------------------------------------------------------- */
-/* MORE THAN 4 WORDS OF KEY DATA IS IN THE OPERATION. THEREFORE WE NEED TO */
-/* PREPARE A KEYINFO SIGNAL. MORE THAN ONE KEYINFO SIGNAL CAN BE SENT. */
-/* ------------------------------------------------------------------------- */
- sendTupkey(signal);
- }//if
-/* ------------------------------------------------------------------------- */
-/* NOW I AM PREPARED TO SEND ALL THE ATTRINFO SIGNALS. AT THE MOMENT A LOOP */
-/* SENDS ALL AT ONCE. LATER WE HAVE TO ADDRESS THE PROBLEM THAT THESE COULD */
-/* LEAD TO BUFFER EXPLOSION => NODE CRASH. */
-/* ------------------------------------------------------------------------- */
-/* NEW CODE TO SEND ATTRINFO IN PACK_LQHKEYREQ */
-/* THIS CODE USES A REAL-TIME BREAK AFTER */
-/* SENDING 16 SIGNALS. */
-/* -------------------------------------------------- */
- sig0 = regTcPtr->tcOprec;
- sig1 = regTcPtr->transid[0];
- sig2 = regTcPtr->transid[1];
- signal->theData[0] = sig0;
- signal->theData[1] = sig1;
- signal->theData[2] = sig2;
- AttrbufPtr regAttrinbufptr;
- regAttrinbufptr.i = regTcPtr->firstAttrinbuf;
- while (regAttrinbufptr.i != RNIL) {
- ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
- jam();
- Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN];
- ndbrequire(dataLen != 0);
- MEMCOPY_NO_WORDS(&signal->theData[3], &regAttrinbufptr.p->attrbuf[0], dataLen);
- regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
- sendSignal(lqhRef, GSN_ATTRINFO, signal, dataLen + 3, JBB);
- }//while
- regTcPtr->transactionState = TcConnectionrec::PREPARED;
- if (regTcPtr->dirtyOp == ZTRUE) {
- jam();
-/*************************************************************>*/
-/* DIRTY WRITES ARE USED IN TWO SITUATIONS. THE FIRST */
-/* SITUATION IS WHEN THEY ARE USED TO UPDATE COUNTERS AND*/
-/* OTHER ATTRIBUTES WHICH ARE NOT SENSITIVE TO CONSISTE- */
-/* NCY. THE SECOND SITUATION IS BY OPERATIONS THAT ARE */
-/* SENT AS PART OF A COPY FRAGMENT PROCESS. */
-/* */
-/* DURING A COPY FRAGMENT PROCESS THERE IS NO LOGGING */
-/* ONGOING SINCE THE FRAGMENT IS NOT COMPLETE YET. THE */
-/* LOGGING STARTS AFTER COMPLETING THE LAST COPY TUPLE */
-/* OPERATION. THE EXECUTION OF THE LAST COPY TUPLE DOES */
-/* ALSO START A LOCAL CHECKPOINT SO THAT THE FRAGMENT */
-/* REPLICA IS RECOVERABLE. THUS GLOBAL CHECKPOINT ID FOR */
-/* THOSE OPERATIONS ARE NOT INTERESTING. */
-/* */
-/* A DIRTY WRITE IS BY DEFINITION NOT CONSISTENT. THUS */
-/* IT CAN USE ANY GLOBAL CHECKPOINT. THE IDEA HERE IS TO */
-/* ALWAYS USE THE LATEST DEFINED GLOBAL CHECKPOINT ID IN */
-/* THIS NODE. */
-/*************************************************************>*/
- cleanUp(signal);
- return;
- }//if
- /* ------------------------------------------------------------------------
- * ALL INFORMATION NEEDED BY THE COMMIT PHASE AND COMPLETE PHASE IS
- * KEPT IN THE TC_CONNECT RECORD. TO ENSURE PROPER USE OF MEMORY
- * RESOURCES WE DEALLOCATE THE ATTRINFO RECORD AND KEY RECORDS
- * AS SOON AS POSSIBLE.
- * ------------------------------------------------------------------------ */
- releaseOprec(signal);
-}//Dblqh::packLqhkeyreqLab()
-
-/* ========================================================================= */
-/* ==== CHECK IF THE LOG RECORD FITS INTO THE CURRENT MBYTE, ======= */
-/* OTHERWISE SWITCH TO NEXT MBYTE. */
-/* */
-/* ========================================================================= */
-void Dblqh::checkNewMbyte(Signal* signal)
-{
- UintR tcnmTmp;
- UintR ttotalLogSize;
-
-/* -------------------------------------------------- */
-/* CHECK IF A NEW MBYTE OF LOG RECORD IS TO BE */
-/* OPENED BEFORE WRITING THE LOG RECORD. NO LOG */
-/* RECORDS ARE ALLOWED TO SPAN A MBYTE BOUNDARY */
-/* */
-/* INPUT: TC_CONNECTPTR THE OPERATION */
-/* LOG_FILE_PTR THE LOG FILE */
-/* OUTPUT: LOG_FILE_PTR THE NEW LOG FILE */
-/* -------------------------------------------------- */
- ttotalLogSize = ZLOG_HEAD_SIZE + tcConnectptr.p->currTupAiLen;
- ttotalLogSize = ttotalLogSize + tcConnectptr.p->primKeyLen;
- tcnmTmp = logFilePtr.p->remainingWordsInMbyte;
- if ((ttotalLogSize + ZNEXT_LOG_SIZE) <= tcnmTmp) {
- ndbrequire(tcnmTmp >= ttotalLogSize);
- logFilePtr.p->remainingWordsInMbyte = tcnmTmp - ttotalLogSize;
- return;
- } else {
- jam();
-/* -------------------------------------------------- */
-/* IT WAS NOT ENOUGH SPACE IN THIS MBYTE FOR */
-/* THIS LOG RECORD. MOVE TO NEXT MBYTE */
-/* THIS MIGHT INCLUDE CHANGING LOG FILE */
-/* -------------------------------------------------- */
-/* WE HAVE TO INSERT A NEXT LOG RECORD FIRST */
-/* -------------------------------------------------- */
-/* THEN CONTINUE BY WRITING THE FILE DESCRIPTORS*/
-/* -------------------------------------------------- */
- logPagePtr.i = logFilePtr.p->currentLogpage;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- changeMbyte(signal);
- tcnmTmp = logFilePtr.p->remainingWordsInMbyte;
- }//if
- ndbrequire(tcnmTmp >= ttotalLogSize);
- logFilePtr.p->remainingWordsInMbyte = tcnmTmp - ttotalLogSize;
-}//Dblqh::checkNewMbyte()
-
-/* --------------------------------------------------------------------------
- * ------- WRITE OPERATION HEADER TO LOG -------
- *
- * SUBROUTINE SHORT NAME: WLH
- * ------------------------------------------------------------------------- */
-void Dblqh::writeLogHeader(Signal* signal)
-{
- Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- Uint32 hashValue = tcConnectptr.p->hashValue;
- Uint32 operation = tcConnectptr.p->operation;
- Uint32 keyLen = tcConnectptr.p->primKeyLen;
- Uint32 aiLen = tcConnectptr.p->currTupAiLen;
- Uint32 totLogLen = aiLen + keyLen + ZLOG_HEAD_SIZE;
- if ((logPos + ZLOG_HEAD_SIZE) < ZPAGE_SIZE) {
- Uint32* dataPtr = &logPagePtr.p->logPageWord[logPos];
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + ZLOG_HEAD_SIZE;
- dataPtr[0] = ZPREP_OP_TYPE;
- dataPtr[1] = totLogLen;
- dataPtr[2] = hashValue;
- dataPtr[3] = operation;
- dataPtr[4] = aiLen;
- dataPtr[5] = keyLen;
- } else {
- writeLogWord(signal, ZPREP_OP_TYPE);
- writeLogWord(signal, totLogLen);
- writeLogWord(signal, hashValue);
- writeLogWord(signal, operation);
- writeLogWord(signal, aiLen);
- writeLogWord(signal, keyLen);
- }//if
-}//Dblqh::writeLogHeader()
-
-/* --------------------------------------------------------------------------
- * ------- WRITE TUPLE KEY TO LOG -------
- *
- * SUBROUTINE SHORT NAME: WK
- * ------------------------------------------------------------------------- */
-void Dblqh::writeKey(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- Uint32 logPos, endPos, dataLen;
- Int32 remainingLen;
- logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- remainingLen = regTcPtr->primKeyLen;
- dataLen = remainingLen;
- if (remainingLen > 4)
- dataLen = 4;
- remainingLen -= dataLen;
- endPos = logPos + dataLen;
- if (endPos < ZPAGE_SIZE) {
- MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos],
- &regTcPtr->tupkeyData[0],
- dataLen);
- } else {
- jam();
- for (Uint32 i = 0; i < dataLen; i++)
- writeLogWord(signal, regTcPtr->tupkeyData[i]);
- endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- }//if
- DatabufPtr regDatabufptr;
- regDatabufptr.i = regTcPtr->firstTupkeybuf;
- while (remainingLen > 0) {
- logPos = endPos;
- ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
- dataLen = remainingLen;
- if (remainingLen > 4)
- dataLen = 4;
- remainingLen -= dataLen;
- endPos += dataLen;
- if (endPos < ZPAGE_SIZE) {
- MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos],
- &regDatabufptr.p->data[0],
- dataLen);
- } else {
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos;
- for (Uint32 i = 0; i < dataLen; i++)
- writeLogWord(signal, regDatabufptr.p->data[i]);
- endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- }//if
- regDatabufptr.i = regDatabufptr.p->nextDatabuf;
- }//while
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = endPos;
- ndbrequire(regDatabufptr.i == RNIL);
-}//Dblqh::writeKey()
-
-/* --------------------------------------------------------------------------
- * ------- WRITE ATTRINFO TO LOG -------
- *
- * SUBROUTINE SHORT NAME: WA
- * ------------------------------------------------------------------------- */
-void Dblqh::writeAttrinfoLab(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- Uint32 totLen = regTcPtr->currTupAiLen;
- if (totLen == 0)
- return;
- Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- Uint32 lqhLen = regTcPtr->reclenAiLqhkey;
- ndbrequire(totLen >= lqhLen);
- Uint32 endPos = logPos + lqhLen;
- totLen -= lqhLen;
- if (endPos < ZPAGE_SIZE) {
- MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos],
- &regTcPtr->firstAttrinfo[0],
- lqhLen);
- } else {
- for (Uint32 i = 0; i < lqhLen; i++)
- writeLogWord(signal, regTcPtr->firstAttrinfo[i]);
- endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- }//if
- AttrbufPtr regAttrinbufptr;
- regAttrinbufptr.i = regTcPtr->firstAttrinbuf;
- while (totLen > 0) {
- logPos = endPos;
- ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
- Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN];
- ndbrequire(totLen >= dataLen);
- ndbrequire(dataLen > 0);
- totLen -= dataLen;
- endPos += dataLen;
- if (endPos < ZPAGE_SIZE) {
- MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos],
- &regAttrinbufptr.p->attrbuf[0],
- dataLen);
- } else {
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos;
- for (Uint32 i = 0; i < dataLen; i++)
- writeLogWord(signal, regAttrinbufptr.p->attrbuf[i]);
- endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- }//if
- regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
- }//while
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = endPos;
- ndbrequire(regAttrinbufptr.i == RNIL);
-}//Dblqh::writeAttrinfoLab()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SEND TUPLE KEY IN KEYINFO SIGNAL(S) ------- */
-/* */
-/* SUBROUTINE SHORT NAME: STU */
-/* ------------------------------------------------------------------------- */
-void Dblqh::sendTupkey(Signal* signal)
-{
- UintR TdataPos = 3;
- BlockReference lqhRef = calcLqhBlockRef(tcConnectptr.p->nextReplica);
- signal->theData[0] = tcConnectptr.p->tcOprec;
- signal->theData[1] = tcConnectptr.p->transid[0];
- signal->theData[2] = tcConnectptr.p->transid[1];
- databufptr.i = tcConnectptr.p->firstTupkeybuf;
- do {
- ptrCheckGuard(databufptr, cdatabufFileSize, databuf);
- signal->theData[TdataPos] = databufptr.p->data[0];
- signal->theData[TdataPos + 1] = databufptr.p->data[1];
- signal->theData[TdataPos + 2] = databufptr.p->data[2];
- signal->theData[TdataPos + 3] = databufptr.p->data[3];
-
- databufptr.i = databufptr.p->nextDatabuf;
- TdataPos += 4;
- if (databufptr.i == RNIL) {
- jam();
- sendSignal(lqhRef, GSN_KEYINFO, signal, TdataPos, JBB);
- return;
- } else if (TdataPos == 23) {
- jam();
- sendSignal(lqhRef, GSN_KEYINFO, signal, 23, JBB);
- TdataPos = 3;
- }
- } while (1);
-}//Dblqh::sendTupkey()
-
-void Dblqh::cleanUp(Signal* signal)
-{
- releaseOprec(signal);
- deleteTransidHash(signal);
- releaseTcrec(signal, tcConnectptr);
-}//Dblqh::cleanUp()
-
-/* --------------------------------------------------------------------------
- * ---- RELEASE ALL RECORDS CONNECTED TO THE OPERATION RECORD AND THE ----
- * OPERATION RECORD ITSELF
- * ------------------------------------------------------------------------- */
-void Dblqh::releaseOprec(Signal* signal)
-{
- UintR Tmpbuf;
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
-/* ---- RELEASE DATA BUFFERS ------------------- */
- DatabufPtr regDatabufptr;
- regDatabufptr.i = regTcPtr->firstTupkeybuf;
-/* --------------------------------------------------------------------------
- * ------- RELEASE DATA BUFFERS -------
- *
- * ------------------------------------------------------------------------- */
-
- while (regDatabufptr.i != RNIL) {
- jam();
- ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
- Tmpbuf = regDatabufptr.p->nextDatabuf;
- regDatabufptr.p->nextDatabuf = cfirstfreeDatabuf;
- cfirstfreeDatabuf = regDatabufptr.i;
- regDatabufptr.i = Tmpbuf;
- }//while
-/* ---- RELEASE ATTRINFO BUFFERS ------------------- */
- AttrbufPtr regAttrinbufptr;
- regAttrinbufptr.i = regTcPtr->firstAttrinbuf;
- /* ########################################################################
- * ####### RELEASE_ATTRINBUF #######
- *
- * ####################################################################### */
- while (regAttrinbufptr.i != RNIL) {
- jam();
- regAttrinbufptr.i= release_attrinbuf(regAttrinbufptr.i);
- }//while
- regTcPtr->firstAttrinbuf = RNIL;
- regTcPtr->lastAttrinbuf = RNIL;
- regTcPtr->firstTupkeybuf = RNIL;
- regTcPtr->lastTupkeybuf = RNIL;
-}//Dblqh::releaseOprec()
-
-/* ------------------------------------------------------------------------- */
-/* ------ DELETE TRANSACTION ID FROM HASH TABLE ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::deleteTransidHash(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- TcConnectionrecPtr prevHashptr;
- TcConnectionrecPtr nextHashptr;
-
- prevHashptr.i = regTcPtr->prevHashRec;
- nextHashptr.i = regTcPtr->nextHashRec;
- if (prevHashptr.i != RNIL) {
- jam();
- ptrCheckGuard(prevHashptr, ctcConnectrecFileSize, tcConnectionrec);
- prevHashptr.p->nextHashRec = nextHashptr.i;
- } else {
- jam();
-/* ------------------------------------------------------------------------- */
-/* THE OPERATION WAS PLACED FIRST IN THE LIST OF THE HASH TABLE. NEED TO SET */
-/* A NEW LEADER OF THE LIST. */
-/* ------------------------------------------------------------------------- */
- Uint32 hashIndex = (regTcPtr->transid[0] ^ regTcPtr->tcOprec) & 1023;
- ctransidHash[hashIndex] = nextHashptr.i;
- }//if
- if (nextHashptr.i != RNIL) {
- jam();
- ptrCheckGuard(nextHashptr, ctcConnectrecFileSize, tcConnectionrec);
- nextHashptr.p->prevHashRec = prevHashptr.i;
- }//if
-}//Dblqh::deleteTransidHash()
-
-/* --------------------------------------------------------------------------
- * ------- LINK OPERATION IN ACTIVE LIST ON FRAGMENT -------
- *
- * SUBROUTINE SHORT NAME: LAF
-// Input Pointers:
-// tcConnectptr
-// fragptr
- * ------------------------------------------------------------------------- */
-void Dblqh::linkActiveFrag(Signal* signal)
-{
- TcConnectionrecPtr lafTcConnectptr;
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- Fragrecord * const regFragPtr = fragptr.p;
- Uint32 tcIndex = tcConnectptr.i;
- lafTcConnectptr.i = regFragPtr->activeList;
- regTcPtr->prevTc = RNIL;
- regFragPtr->activeList = tcIndex;
- ndbrequire(regTcPtr->listState == TcConnectionrec::NOT_IN_LIST);
- regTcPtr->nextTc = lafTcConnectptr.i;
- regTcPtr->listState = TcConnectionrec::IN_ACTIVE_LIST;
- if (lafTcConnectptr.i == RNIL) {
- return;
- } else {
- jam();
- ptrCheckGuard(lafTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- lafTcConnectptr.p->prevTc = tcIndex;
- }//if
- return;
-}//Dblqh::linkActiveFrag()
-
-/* -------------------------------------------------------------------------
- * ------- RELEASE OPERATION FROM ACTIVE LIST ON FRAGMENT -------
- *
- * SUBROUTINE SHORT NAME = RAF
- * ------------------------------------------------------------------------- */
-void Dblqh::releaseActiveFrag(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- TcConnectionrecPtr ralTcNextConnectptr;
- TcConnectionrecPtr ralTcPrevConnectptr;
- fragptr.i = regTcPtr->fragmentptr;
- ralTcPrevConnectptr.i = regTcPtr->prevTc;
- ralTcNextConnectptr.i = regTcPtr->nextTc;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- Fragrecord * const regFragPtr = fragptr.p;
- ndbrequire(regTcPtr->listState == TcConnectionrec::IN_ACTIVE_LIST);
- regTcPtr->listState = TcConnectionrec::NOT_IN_LIST;
-
- if (ralTcNextConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(ralTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- ralTcNextConnectptr.p->prevTc = ralTcPrevConnectptr.i;
- }//if
- if (ralTcPrevConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(ralTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- ralTcPrevConnectptr.p->nextTc = regTcPtr->nextTc;
- } else {
- jam();
- /* ----------------------------------------------------------------------
- * OPERATION RECORD IS FIRST IN ACTIVE LIST
- * THIS MEANS THAT THERE EXISTS NO PREVIOUS TC THAT NEEDS TO BE UPDATED.
- * --------------------------------------------------------------------- */
- regFragPtr->activeList = ralTcNextConnectptr.i;
- }//if
- if (regFragPtr->lcpRef != RNIL) {
- jam();
- lcpPtr.i = regFragPtr->lcpRef;
- ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_WAIT_ACTIVE_FINISH);
-
- /* --------------------------------------------------------------------
- * IF A FRAGMENT IS CURRENTLY STARTING A LOCAL CHECKPOINT AND IT
- * IS WAITING FOR ACTIVE OPERATIONS TO BE COMPLETED WITH THE
- * CURRENT PHASE, THEN IT IS CHECKED WHETHER THE
- * LAST ACTIVE OPERATION WAS NOW COMPLETED.
- * ------------------------------------------------------------------- */
- if (regFragPtr->activeList == RNIL) {
- jam();
- /* ------------------------------------------------------------------
- * ACTIVE LIST ON FRAGMENT IS EMPTY AND WE ARE WAITING FOR
- * THIS TO HAPPEN.
- * WE WILL NOW START THE CHECKPOINT IN TUP AND ACC.
- * ----------------------------------------------------------------- */
- /* SEND START LOCAL CHECKPOINT TO ACC AND TUP */
- /* ----------------------------------------------------------------- */
- fragptr.p->lcpRef = RNIL;
- lcpPtr.p->lcpState = LcpRecord::LCP_START_CHKP;
- sendStartLcp(signal);
- }//if
- }//if
-}//Dblqh::releaseActiveFrag()
-
-/* ######################################################################### */
-/* ####### TRANSACTION MODULE ####### */
-/* THIS MODULE HANDLES THE COMMIT AND THE COMPLETE PHASE. */
-/* ######################################################################### */
-void Dblqh::warningReport(Signal* signal, int place)
-{
- switch (place) {
- case 0:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received COMMIT in wrong state in Dblqh" << endl;
-#endif
- break;
- case 1:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received COMMIT with wrong transid in Dblqh" << endl;
-#endif
- break;
- case 2:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received COMPLETE in wrong state in Dblqh" << endl;
-#endif
- break;
- case 3:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received COMPLETE with wrong transid in Dblqh" << endl;
-#endif
- break;
- case 4:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received COMMITREQ in wrong state in Dblqh" << endl;
-#endif
- break;
- case 5:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received COMMITREQ with wrong transid in Dblqh" << endl;
-#endif
- break;
- case 6:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received COMPLETEREQ in wrong state in Dblqh" << endl;
-#endif
- break;
- case 7:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received COMPLETEREQ with wrong transid in Dblqh" << endl;
-#endif
- break;
- case 8:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received ABORT with non-existing transid in Dblqh" << endl;
-#endif
- break;
- case 9:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received ABORTREQ with non-existing transid in Dblqh" << endl;
-#endif
- break;
- case 10:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received ABORTREQ in wrong state in Dblqh" << endl;
-#endif
- break;
- case 11:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received COMMIT when tc-rec released in Dblqh" << endl;
-#endif
- break;
- case 12:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received COMPLETE when tc-rec released in Dblqh" << endl;
-#endif
- break;
- case 13:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received LQHKEYREF when tc-rec released in Dblqh" << endl;
-#endif
- break;
- case 14:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received LQHKEYREF with wrong transid in Dblqh" << endl;
-#endif
- break;
- case 15:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "W: Received LQHKEYREF when already aborting in Dblqh" << endl;
-#endif
- break;
- case 16:
- jam();
- ndbrequire(cstartPhase == ZNIL);
-#ifdef ABORT_TRACE
- ndbout << "W: Received LQHKEYREF in wrong state in Dblqh" << endl;
-#endif
- break;
- default:
- jam();
- break;
- }//switch
- return;
-}//Dblqh::warningReport()
-
-void Dblqh::errorReport(Signal* signal, int place)
-{
- switch (place) {
- case 0:
- jam();
- break;
- case 1:
- jam();
- break;
- case 2:
- jam();
- break;
- case 3:
- jam();
- break;
- default:
- jam();
- break;
- }//switch
- systemErrorLab(signal);
- return;
-}//Dblqh::errorReport()
-
-/* ************************************************************************>>
- * COMMIT: Start commit request from TC. This signal is originally sent as a
- * packed signal and this function is called from execPACKED_SIGNAL.
- * This is the normal commit protocol where TC first send this signal to the
- * backup node which then will send COMMIT to the primary node. If
- * everything is ok the primary node send COMMITTED back to TC.
- * ************************************************************************>> */
-void Dblqh::execCOMMIT(Signal* signal)
-{
- TcConnectionrec *regTcConnectionrec = tcConnectionrec;
- Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
- Uint32 tcIndex = signal->theData[0];
- Uint32 gci = signal->theData[1];
- Uint32 transid1 = signal->theData[2];
- Uint32 transid2 = signal->theData[3];
- jamEntry();
- if (tcIndex >= ttcConnectrecFileSize) {
- errorReport(signal, 0);
- return;
- }//if
- if (ERROR_INSERTED(5011)) {
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_COMMIT, signal, 2000, 4);
- return;
- }//if
- if (ERROR_INSERTED(5012)) {
- SET_ERROR_INSERT_VALUE(5017);
- sendSignalWithDelay(cownref, GSN_COMMIT, signal, 2000, 4);
- return;
- }//if
- tcConnectptr.i = tcIndex;
- ptrAss(tcConnectptr, regTcConnectionrec);
- if ((tcConnectptr.p->transid[0] == transid1) &&
- (tcConnectptr.p->transid[1] == transid2)) {
- commitReqLab(signal, gci);
- return;
- }//if
- warningReport(signal, 1);
- return;
-}//Dblqh::execCOMMIT()
-
-/* ************************************************************************>>
- * COMMITREQ: Commit request from TC. This is the commit protocol used if
- * one of the nodes is not behaving correctly. TC explicitly sends COMMITREQ
- * to both the backup and primary node and gets a COMMITCONF back if the
- * COMMIT was ok.
- * ************************************************************************>> */
-void Dblqh::execCOMMITREQ(Signal* signal)
-{
- jamEntry();
- Uint32 reqPtr = signal->theData[0];
- BlockReference reqBlockref = signal->theData[1];
- Uint32 gci = signal->theData[2];
- Uint32 transid1 = signal->theData[3];
- Uint32 transid2 = signal->theData[4];
- Uint32 tcOprec = signal->theData[6];
- if (ERROR_INSERTED(5004)) {
- systemErrorLab(signal);
- }
- if (ERROR_INSERTED(5017)) {
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_COMMITREQ, signal, 2000, 7);
- return;
- }//if
- if (findTransaction(transid1,
- transid2,
- tcOprec) != ZOK) {
- warningReport(signal, 5);
- return;
- }//if
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- switch (regTcPtr->transactionState) {
- case TcConnectionrec::PREPARED:
- case TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL:
- case TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL:
- jam();
-/*-------------------------------------------------------*/
-/* THE NORMAL CASE. */
-/*-------------------------------------------------------*/
- regTcPtr->reqBlockref = reqBlockref;
- regTcPtr->reqRef = reqPtr;
- regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
- commitReqLab(signal, gci);
- return;
- break;
- case TcConnectionrec::COMMITTED:
- jam();
-/*---------------------------------------------------------*/
-/* FOR SOME REASON THE COMMIT PHASE HAVE BEEN */
-/* FINISHED AFTER A TIME OUT. WE NEED ONLY SEND A */
-/* COMMITCONF SIGNAL. */
-/*---------------------------------------------------------*/
- regTcPtr->reqBlockref = reqBlockref;
- regTcPtr->reqRef = reqPtr;
- regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
- signal->theData[0] = regTcPtr->reqRef;
- signal->theData[1] = cownNodeid;
- signal->theData[2] = regTcPtr->transid[0];
- signal->theData[3] = regTcPtr->transid[1];
- sendSignal(regTcPtr->reqBlockref, GSN_COMMITCONF, signal, 4, JBB);
- break;
- case TcConnectionrec::COMMIT_STOPPED:
- jam();
- regTcPtr->reqBlockref = reqBlockref;
- regTcPtr->reqRef = reqPtr;
- regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
- /*empty*/;
- break;
- default:
- jam();
- warningReport(signal, 4);
- return;
- break;
- }//switch
- return;
-}//Dblqh::execCOMMITREQ()
-
-/* ************************************************************************>>
- * COMPLETE : Complete the transaction. Sent as a packed signal from TC.
- * Works the same way as COMMIT protocol. This is the normal case with both
- * primary and backup working (See COMMIT).
- * ************************************************************************>> */
-void Dblqh::execCOMPLETE(Signal* signal)
-{
- TcConnectionrec *regTcConnectionrec = tcConnectionrec;
- Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
- Uint32 tcIndex = signal->theData[0];
- Uint32 transid1 = signal->theData[1];
- Uint32 transid2 = signal->theData[2];
- jamEntry();
- if (tcIndex >= ttcConnectrecFileSize) {
- errorReport(signal, 1);
- return;
- }//if
- if (ERROR_INSERTED(5013)) {
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_COMPLETE, signal, 2000, 3);
- return;
- }//if
- if (ERROR_INSERTED(5014)) {
- SET_ERROR_INSERT_VALUE(5018);
- sendSignalWithDelay(cownref, GSN_COMPLETE, signal, 2000, 3);
- return;
- }//if
- tcConnectptr.i = tcIndex;
- ptrAss(tcConnectptr, regTcConnectionrec);
- if ((tcConnectptr.p->transactionState == TcConnectionrec::COMMITTED) &&
- (tcConnectptr.p->transid[0] == transid1) &&
- (tcConnectptr.p->transid[1] == transid2)) {
- if (tcConnectptr.p->seqNoReplica != 0) {
- jam();
- localCommitLab(signal);
- return;
- } else {
- jam();
- completeTransLastLab(signal);
- return;
- }//if
- }//if
- if (tcConnectptr.p->transactionState != TcConnectionrec::COMMITTED) {
- warningReport(signal, 2);
- } else {
- warningReport(signal, 3);
- }//if
-}//Dblqh::execCOMPLETE()
-
-/* ************************************************************************>>
- * COMPLETEREQ: Complete request from TC. Same as COMPLETE but used if one
- * node is not working ok (See COMMIT).
- * ************************************************************************>> */
-void Dblqh::execCOMPLETEREQ(Signal* signal)
-{
- jamEntry();
- Uint32 reqPtr = signal->theData[0];
- BlockReference reqBlockref = signal->theData[1];
- Uint32 transid1 = signal->theData[2];
- Uint32 transid2 = signal->theData[3];
- Uint32 tcOprec = signal->theData[5];
- if (ERROR_INSERTED(5005)) {
- systemErrorLab(signal);
- }
- if (ERROR_INSERTED(5018)) {
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_COMPLETEREQ, signal, 2000, 6);
- return;
- }//if
- if (findTransaction(transid1,
- transid2,
- tcOprec) != ZOK) {
- jam();
-/*---------------------------------------------------------*/
-/* FOR SOME REASON THE COMPLETE PHASE STARTED AFTER */
-/* A TIME OUT. THE TRANSACTION IS GONE. WE NEED TO */
-/* REPORT COMPLETION ANYWAY. */
-/*---------------------------------------------------------*/
- signal->theData[0] = reqPtr;
- signal->theData[1] = cownNodeid;
- signal->theData[2] = transid1;
- signal->theData[3] = transid2;
- sendSignal(reqBlockref, GSN_COMPLETECONF, signal, 4, JBB);
- warningReport(signal, 7);
- return;
- }//if
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- switch (regTcPtr->transactionState) {
- case TcConnectionrec::COMMITTED:
- jam();
- regTcPtr->reqBlockref = reqBlockref;
- regTcPtr->reqRef = reqPtr;
- regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
- /*empty*/;
- break;
-/*---------------------------------------------------------*/
-/* THE NORMAL CASE. */
-/*---------------------------------------------------------*/
- case TcConnectionrec::COMMIT_STOPPED:
- jam();
-/*---------------------------------------------------------*/
-/* FOR SOME REASON THE COMPLETE PHASE STARTED AFTER */
-/* A TIME OUT. WE HAVE SET THE PROPER VARIABLES SUCH */
-/* THAT A COMPLETECONF WILL BE SENT WHEN COMPLETE IS */
-/* FINISHED. */
-/*---------------------------------------------------------*/
- regTcPtr->reqBlockref = reqBlockref;
- regTcPtr->reqRef = reqPtr;
- regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
- return;
- break;
- default:
- jam();
- warningReport(signal, 6);
- return;
- break;
- }//switch
- if (regTcPtr->seqNoReplica != 0) {
- jam();
- localCommitLab(signal);
- return;
- } else {
- jam();
- completeTransLastLab(signal);
- return;
- }//if
-}//Dblqh::execCOMPLETEREQ()
-
-/* ************> */
-/* COMPLETED > */
-/* ************> */
-void Dblqh::execLQHKEYCONF(Signal* signal)
-{
- LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
- Uint32 tcIndex = lqhKeyConf->opPtr;
- Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
- TcConnectionrec *regTcConnectionrec = tcConnectionrec;
- jamEntry();
- if (tcIndex >= ttcConnectrecFileSize) {
- errorReport(signal, 2);
- return;
- }//if
- tcConnectptr.i = tcIndex;
- ptrAss(tcConnectptr, regTcConnectionrec);
- switch (tcConnectptr.p->connectState) {
- case TcConnectionrec::LOG_CONNECTED:
- jam();
- completedLab(signal);
- return;
- break;
- case TcConnectionrec::COPY_CONNECTED:
- jam();
- copyCompletedLab(signal);
- return;
- break;
- default:
- jam();
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dblqh::execLQHKEYCONF()
-
-/* ------------------------------------------------------------------------- */
-/* ------- COMMIT PHASE ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::commitReqLab(Signal* signal, Uint32 gci)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- TcConnectionrec::LogWriteState logWriteState = regTcPtr->logWriteState;
- TcConnectionrec::TransactionState transState = regTcPtr->transactionState;
- regTcPtr->gci = gci;
- if (transState == TcConnectionrec::PREPARED) {
- if (logWriteState == TcConnectionrec::WRITTEN) {
- jam();
- regTcPtr->transactionState = TcConnectionrec::PREPARED_RECEIVED_COMMIT;
- TcConnectionrecPtr saveTcPtr = tcConnectptr;
- Uint32 blockNo = refToBlock(regTcPtr->tcTupBlockref);
- signal->theData[0] = regTcPtr->tupConnectrec;
- signal->theData[1] = gci;
- EXECUTE_DIRECT(blockNo, GSN_TUP_WRITELOG_REQ, signal, 2);
- jamEntry();
- if (regTcPtr->transactionState == TcConnectionrec::LOG_COMMIT_QUEUED) {
- jam();
- return;
- }//if
- ndbrequire(regTcPtr->transactionState == TcConnectionrec::LOG_COMMIT_WRITTEN);
- tcConnectptr = saveTcPtr;
- } else if (logWriteState == TcConnectionrec::NOT_STARTED) {
- jam();
- } else if (logWriteState == TcConnectionrec::NOT_WRITTEN) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* IT IS A READ OPERATION OR OTHER OPERATION THAT DO NOT USE THE LOG. */
-/*---------------------------------------------------------------------------*/
-/*---------------------------------------------------------------------------*/
-/* THE LOG HAS NOT BEEN WRITTEN SINCE THE LOG FLAG WAS FALSE. THIS CAN OCCUR */
-/* WHEN WE ARE STARTING A NEW FRAGMENT. */
-/*---------------------------------------------------------------------------*/
- regTcPtr->logWriteState = TcConnectionrec::NOT_STARTED;
- } else {
- ndbrequire(logWriteState == TcConnectionrec::NOT_WRITTEN_WAIT);
- jam();
-/*---------------------------------------------------------------------------*/
-/* THE STATE WAS SET TO NOT_WRITTEN BY THE OPERATION BUT LATER A SCAN OF ALL */
-/* OPERATION RECORD CHANGED IT INTO NOT_WRITTEN_WAIT. THIS INDICATES THAT WE */
-/* ARE WAITING FOR THIS OPERATION TO COMMIT OR ABORT SO THAT WE CAN FIND THE */
-/* STARTING GLOBAL CHECKPOINT OF THIS NEW FRAGMENT. */
-/*---------------------------------------------------------------------------*/
- checkScanTcCompleted(signal);
- }//if
- } else if (transState == TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL) {
- jam();
- regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_QUEUED;
- return;
- } else if (transState == TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL) {
- jam();
- } else {
- warningReport(signal, 0);
- return;
- }//if
- if (regTcPtr->seqNoReplica != 0) {
- jam();
- commitReplyLab(signal);
- return;
- }//if
- localCommitLab(signal);
- return;
-}//Dblqh::commitReqLab()
-
-void Dblqh::execLQH_WRITELOG_REQ(Signal* signal)
-{
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- Uint32 gci = signal->theData[1];
- Uint32 newestGci = cnewestGci;
- TcConnectionrec::LogWriteState logWriteState = regTcPtr->logWriteState;
- TcConnectionrec::TransactionState transState = regTcPtr->transactionState;
- regTcPtr->gci = gci;
- if (gci > newestGci) {
- jam();
-/* ------------------------------------------------------------------------- */
-/* KEEP TRACK OF NEWEST GLOBAL CHECKPOINT THAT LQH HAS HEARD OF. */
-/* ------------------------------------------------------------------------- */
- cnewestGci = gci;
- }//if
- if (logWriteState == TcConnectionrec::WRITTEN) {
-/*---------------------------------------------------------------------------*/
-/* I NEED TO INSERT A COMMIT LOG RECORD SINCE WE ARE WRITING LOG IN THIS */
-/* TRANSACTION. */
-/*---------------------------------------------------------------------------*/
- jam();
- LogPartRecordPtr regLogPartPtr;
- Uint32 noOfLogPages = cnoOfLogPages;
- jam();
- regLogPartPtr.i = regTcPtr->hashValue & 3;
- ptrCheckGuard(regLogPartPtr, clogPartFileSize, logPartRecord);
- if ((regLogPartPtr.p->logPartState == LogPartRecord::ACTIVE) ||
- (noOfLogPages == 0)) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THIS LOG PART WAS CURRENTLY ACTIVE WRITING ANOTHER LOG RECORD. WE MUST */
-/* WAIT UNTIL THIS PART HAS COMPLETED ITS OPERATION. */
-/*---------------------------------------------------------------------------*/
-// We must delay the write of commit info to the log to safe-guard against
-// a crash due to lack of log pages. We temporary stop all log writes to this
-// log part to ensure that we don't get a buffer explosion in the delayed
-// signal buffer instead.
-/*---------------------------------------------------------------------------*/
- linkWaitLog(signal, regLogPartPtr);
- if (transState == TcConnectionrec::PREPARED) {
- jam();
- regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL;
- } else {
- jam();
- ndbrequire(transState == TcConnectionrec::PREPARED_RECEIVED_COMMIT);
- regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_QUEUED;
- }//if
- if (regLogPartPtr.p->logPartState == LogPartRecord::IDLE) {
- jam();
- regLogPartPtr.p->logPartState = LogPartRecord::ACTIVE;
- }//if
- return;
- }//if
- writeCommitLog(signal, regLogPartPtr);
- if (transState == TcConnectionrec::PREPARED) {
- jam();
- regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL;
- } else {
- jam();
- ndbrequire(transState == TcConnectionrec::PREPARED_RECEIVED_COMMIT);
- regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_WRITTEN;
- }//if
- }//if
-}//Dblqh::execLQH_WRITELOG_REQ()
-
-void Dblqh::localCommitLab(Signal* signal)
-{
- FragrecordPtr regFragptr;
- regFragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
- Fragrecord::FragStatus status = regFragptr.p->fragStatus;
- fragptr = regFragptr;
- switch (status) {
- case Fragrecord::FSACTIVE:
- case Fragrecord::CRASH_RECOVERING:
- case Fragrecord::ACTIVE_CREATION:
- jam();
- commitContinueAfterBlockedLab(signal);
- return;
- break;
- case Fragrecord::BLOCKED:
- jam();
- linkFragQueue(signal);
- tcConnectptr.p->transactionState = TcConnectionrec::COMMIT_STOPPED;
- break;
- case Fragrecord::FREE:
- jam();
- case Fragrecord::DEFINED:
- jam();
- case Fragrecord::REMOVING:
- jam();
- default:
- ndbrequire(false);
- break;
- }//switch
-}//Dblqh::localCommitLab()
-
-void Dblqh::commitContinueAfterBlockedLab(Signal* signal)
-{
-/* ------------------------------------------------------------------------- */
-/*INPUT: TC_CONNECTPTR ACTIVE OPERATION RECORD */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/*CONTINUE HERE AFTER BEING BLOCKED FOR A WHILE DURING LOCAL CHECKPOINT. */
-/*The operation is already removed from the active list since there is no */
-/*chance for any real-time breaks before we need to release it. */
-/* ------------------------------------------------------------------------- */
-/*ALSO AFTER NORMAL PROCEDURE WE CONTINUE */
-/*WE MUST COMMIT TUP BEFORE ACC TO ENSURE THAT NO ONE RACES IN AND SEES A */
-/*DIRTY STATE IN TUP. */
-/* ------------------------------------------------------------------------- */
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- Fragrecord * const regFragptr = fragptr.p;
- Uint32 operation = regTcPtr->operation;
- Uint32 simpleRead = regTcPtr->simpleRead;
- Uint32 dirtyOp = regTcPtr->dirtyOp;
- if (regTcPtr->activeCreat == ZFALSE) {
- if ((cCommitBlocked == true) &&
- (regFragptr->fragActiveStatus == ZTRUE)) {
- jam();
-/* ------------------------------------------------------------------------- */
-// TUP and/or ACC have problems in writing the undo log to disk fast enough.
-// We must avoid the commit at this time and try later instead. The fragment
-// is also active with a local checkpoint and this commit can generate UNDO
-// log records that overflow the UNDO log buffer.
-/* ------------------------------------------------------------------------- */
-/*---------------------------------------------------------------------------*/
-// We must delay the write of commit info to the log to safe-guard against
-// a crash due to lack of log pages. We temporary stop all log writes to this
-// log part to ensure that we don't get a buffer explosion in the delayed
-// signal buffer instead.
-/*---------------------------------------------------------------------------*/
- logPartPtr.i = regTcPtr->hashValue & 3;
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- linkWaitLog(signal, logPartPtr);
- regTcPtr->transactionState = TcConnectionrec::COMMIT_QUEUED;
- if (logPartPtr.p->logPartState == LogPartRecord::IDLE) {
- jam();
- logPartPtr.p->logPartState = LogPartRecord::ACTIVE;
- }//if
- return;
- }//if
- if (operation != ZREAD) {
- TupCommitReq * const tupCommitReq =
- (TupCommitReq *)signal->getDataPtrSend();
- Uint32 sig0 = regTcPtr->tupConnectrec;
- Uint32 tup = refToBlock(regTcPtr->tcTupBlockref);
- jam();
- tupCommitReq->opPtr = sig0;
- tupCommitReq->gci = regTcPtr->gci;
- tupCommitReq->hashValue = regTcPtr->hashValue;
- EXECUTE_DIRECT(tup, GSN_TUP_COMMITREQ, signal,
- TupCommitReq::SignalLength);
- Uint32 acc = refToBlock(regTcPtr->tcAccBlockref);
- signal->theData[0] = regTcPtr->accConnectrec;
- EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1);
- } else {
- if(!dirtyOp){
- Uint32 acc = refToBlock(regTcPtr->tcAccBlockref);
- signal->theData[0] = regTcPtr->accConnectrec;
- EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1);
- }
- }
- jamEntry();
- if (simpleRead) {
- jam();
-/* ------------------------------------------------------------------------- */
-/*THE OPERATION WAS A SIMPLE READ THUS THE COMMIT PHASE IS ONLY NEEDED TO */
-/*RELEASE THE LOCKS. AT THIS POINT IN THE CODE THE LOCKS ARE RELEASED AND WE */
-/*ARE IN A POSITION TO SEND LQHKEYCONF TO TC. WE WILL ALSO RELEASE ALL */
-/*RESOURCES BELONGING TO THIS OPERATION SINCE NO MORE WORK WILL BE */
-/*PERFORMED. */
-/* ------------------------------------------------------------------------- */
- cleanUp(signal);
- return;
- }//if
- }//if
- Uint32 seqNoReplica = regTcPtr->seqNoReplica;
- if (regTcPtr->gci > regFragptr->newestGci) {
- jam();
-/* ------------------------------------------------------------------------- */
-/*IT IS THE FIRST TIME THIS GLOBAL CHECKPOINT IS INVOLVED IN UPDATING THIS */
-/*FRAGMENT. UPDATE THE VARIABLE THAT KEEPS TRACK OF NEWEST GCI IN FRAGMENT */
-/* ------------------------------------------------------------------------- */
- regFragptr->newestGci = regTcPtr->gci;
- }//if
- if (dirtyOp != ZTRUE) {
- if (seqNoReplica != 0) {
- jam();
- completeTransNotLastLab(signal);
- return;
- }//if
- commitReplyLab(signal);
- return;
- } else {
-/* ------------------------------------------------------------------------- */
-/*WE MUST HANDLE DIRTY WRITES IN A SPECIAL WAY. THESE OPERATIONS WILL NOT */
-/*SEND ANY COMMIT OR COMPLETE MESSAGES TO OTHER NODES. THEY WILL MERELY SEND */
-/*THOSE SIGNALS INTERNALLY. */
-/* ------------------------------------------------------------------------- */
- if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) {
- jam();
- packLqhkeyreqLab(signal);
- } else {
- ndbrequire(regTcPtr->abortState != TcConnectionrec::NEW_FROM_TC);
- jam();
- sendLqhTransconf(signal, LqhTransConf::Committed);
- cleanUp(signal);
- }//if
- }//if
-}//Dblqh::commitContinueAfterBlockedLab()
-
-void Dblqh::commitReplyLab(Signal* signal)
-{
-/* -------------------------------------------------------------- */
-/* BACKUP AND STAND-BY REPLICAS ONLY UPDATE THE TRANSACTION STATE */
-/* -------------------------------------------------------------- */
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- TcConnectionrec::AbortState abortState = regTcPtr->abortState;
- regTcPtr->transactionState = TcConnectionrec::COMMITTED;
- if (abortState == TcConnectionrec::ABORT_IDLE) {
- Uint32 clientBlockref = regTcPtr->clientBlockref;
- if (regTcPtr->seqNoReplica == 0) {
- jam();
- sendCommittedTc(signal, clientBlockref);
- return;
- } else {
- jam();
- sendCommitLqh(signal, clientBlockref);
- return;
- }//if
- } else if (regTcPtr->abortState == TcConnectionrec::REQ_FROM_TC) {
- jam();
- signal->theData[0] = regTcPtr->reqRef;
- signal->theData[1] = cownNodeid;
- signal->theData[2] = regTcPtr->transid[0];
- signal->theData[3] = regTcPtr->transid[1];
- sendSignal(tcConnectptr.p->reqBlockref, GSN_COMMITCONF, signal, 4, JBB);
- } else {
- ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC);
- jam();
- sendLqhTransconf(signal, LqhTransConf::Committed);
- }//if
- return;
-}//Dblqh::commitReplyLab()
-
-/* ------------------------------------------------------------------------- */
-/* ------- COMPLETE PHASE ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::completeTransNotLastLab(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) {
- Uint32 clientBlockref = regTcPtr->clientBlockref;
- jam();
- sendCompleteLqh(signal, clientBlockref);
- cleanUp(signal);
- return;
- } else {
- jam();
- completeUnusualLab(signal);
- return;
- }//if
-}//Dblqh::completeTransNotLastLab()
-
-void Dblqh::completeTransLastLab(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) {
- Uint32 clientBlockref = regTcPtr->clientBlockref;
- jam();
-/* ------------------------------------------------------------------------- */
-/*DIRTY WRITES WHICH ARE LAST IN THE CHAIN OF REPLICAS WILL SEND COMPLETED */
-/*INSTEAD OF SENDING PREPARED TO THE TC (OR OTHER INITIATOR OF OPERATION). */
-/* ------------------------------------------------------------------------- */
- sendCompletedTc(signal, clientBlockref);
- cleanUp(signal);
- return;
- } else {
- jam();
- completeUnusualLab(signal);
- return;
- }//if
-}//Dblqh::completeTransLastLab()
-
-void Dblqh::completeUnusualLab(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->abortState == TcConnectionrec::ABORT_FROM_TC) {
- jam();
- sendAborted(signal);
- } else if (regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC) {
- jam();
- sendLqhTransconf(signal, LqhTransConf::Committed);
- } else {
- ndbrequire(regTcPtr->abortState == TcConnectionrec::REQ_FROM_TC);
- jam();
- signal->theData[0] = regTcPtr->reqRef;
- signal->theData[1] = cownNodeid;
- signal->theData[2] = regTcPtr->transid[0];
- signal->theData[3] = regTcPtr->transid[1];
- sendSignal(regTcPtr->reqBlockref,
- GSN_COMPLETECONF, signal, 4, JBB);
- }//if
- cleanUp(signal);
- return;
-}//Dblqh::completeUnusualLab()
-
-/* ========================================================================= */
-/* ======= RELEASE TC CONNECT RECORD ======= */
-/* */
-/* RELEASE A TC CONNECT RECORD TO THE FREELIST. */
-/* ========================================================================= */
-void Dblqh::releaseTcrec(Signal* signal, TcConnectionrecPtr locTcConnectptr)
-{
- jam();
- locTcConnectptr.p->tcTimer = 0;
- locTcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED;
- locTcConnectptr.p->nextTcConnectrec = cfirstfreeTcConrec;
- cfirstfreeTcConrec = locTcConnectptr.i;
-
- TablerecPtr tabPtr;
- tabPtr.i = locTcConnectptr.p->tableref;
- if(tabPtr.i == RNIL)
- return;
-
- ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
-
- /**
- * Normal case
- */
- ndbrequire(tabPtr.p->usageCount > 0);
- tabPtr.p->usageCount--;
-}//Dblqh::releaseTcrec()
-
-void Dblqh::releaseTcrecLog(Signal* signal, TcConnectionrecPtr locTcConnectptr)
-{
- jam();
- locTcConnectptr.p->tcTimer = 0;
- locTcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED;
- locTcConnectptr.p->nextTcConnectrec = cfirstfreeTcConrec;
- cfirstfreeTcConrec = locTcConnectptr.i;
-
- TablerecPtr tabPtr;
- tabPtr.i = locTcConnectptr.p->tableref;
- if(tabPtr.i == RNIL)
- return;
-
-}//Dblqh::releaseTcrecLog()
-
-/* ------------------------------------------------------------------------- */
-/* ------- ABORT PHASE ------- */
-/* */
-/*THIS PART IS USED AT ERRORS THAT CAUSE ABORT OF TRANSACTION. */
-/* ------------------------------------------------------------------------- */
-/* ***************************************************>> */
-/* ABORT: Abort transaction in connection. Sender TC. */
-/* This is the normal protocol (See COMMIT) */
-/* ***************************************************>> */
-void Dblqh::execABORT(Signal* signal)
-{
- jamEntry();
- Uint32 tcOprec = signal->theData[0];
- BlockReference tcBlockref = signal->theData[1];
- Uint32 transid1 = signal->theData[2];
- Uint32 transid2 = signal->theData[3];
- CRASH_INSERTION(5003);
- if (ERROR_INSERTED(5015)) {
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_ABORT, signal, 2000, 4);
- return;
- }//if
- if (findTransaction(transid1,
- transid2,
- tcOprec) != ZOK) {
- jam();
-
- if(ERROR_INSERTED(5039) &&
- refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
- jam();
- SET_ERROR_INSERT_VALUE(5040);
- return;
- }
-
- if(ERROR_INSERTED(5040) &&
- refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
- jam();
- SET_ERROR_INSERT_VALUE(5003);
- return;
- }
-
-/* ------------------------------------------------------------------------- */
-// SEND ABORTED EVEN IF NOT FOUND.
-//THE TRANSACTION MIGHT NEVER HAVE ARRIVED HERE.
-/* ------------------------------------------------------------------------- */
- signal->theData[0] = tcOprec;
- signal->theData[1] = transid1;
- signal->theData[2] = transid2;
- signal->theData[3] = cownNodeid;
- signal->theData[4] = ZTRUE;
- sendSignal(tcBlockref, GSN_ABORTED, signal, 5, JBB);
- warningReport(signal, 8);
- return;
- }//if
-/* ------------------------------------------------------------------------- */
-/*A GUIDING DESIGN PRINCIPLE IN HANDLING THESE ERROR SITUATIONS HAVE BEEN */
-/*KEEP IT SIMPLE. THUS WE RATHER INSERT A WAIT AND SET THE ABORT_STATE TO */
-/*ACTIVE RATHER THAN WRITE NEW CODE TO HANDLE EVERY SPECIAL SITUATION. */
-/* ------------------------------------------------------------------------- */
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->nextReplica != ZNIL) {
-/* ------------------------------------------------------------------------- */
-// We will immediately send the ABORT message also to the next LQH node in line.
-/* ------------------------------------------------------------------------- */
- BlockReference TLqhRef = calcLqhBlockRef(regTcPtr->nextReplica);
- signal->theData[0] = regTcPtr->tcOprec;
- signal->theData[1] = regTcPtr->tcBlockref;
- signal->theData[2] = regTcPtr->transid[0];
- signal->theData[3] = regTcPtr->transid[1];
- sendSignal(TLqhRef, GSN_ABORT, signal, 4, JBB);
- }//if
- regTcPtr->abortState = TcConnectionrec::ABORT_FROM_TC;
- regTcPtr->activeCreat = ZFALSE;
-
- const Uint32 commitAckMarker = regTcPtr->commitAckMarker;
- if(commitAckMarker != RNIL){
- jam();
-#ifdef MARKER_TRACE
- {
- CommitAckMarkerPtr tmp;
- m_commitAckMarkerHash.getPtr(tmp, commitAckMarker);
- ndbout_c("Ab2 marker[%.8x %.8x]", tmp.p->transid1, tmp.p->transid2);
- }
-#endif
- m_commitAckMarkerHash.release(commitAckMarker);
- regTcPtr->commitAckMarker = RNIL;
- }
-
- abortStateHandlerLab(signal);
-
- return;
-}//Dblqh::execABORT()
-
-/* ************************************************************************>>
- * ABORTREQ: Same as ABORT but used in case one node isn't working ok.
- * (See COMMITREQ)
- * ************************************************************************>> */
-void Dblqh::execABORTREQ(Signal* signal)
-{
- jamEntry();
- Uint32 reqPtr = signal->theData[0];
- BlockReference reqBlockref = signal->theData[1];
- Uint32 transid1 = signal->theData[2];
- Uint32 transid2 = signal->theData[3];
- Uint32 tcOprec = signal->theData[5];
- if (ERROR_INSERTED(5006)) {
- systemErrorLab(signal);
- }
- if (ERROR_INSERTED(5016)) {
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_ABORTREQ, signal, 2000, 6);
- return;
- }//if
- if (findTransaction(transid1,
- transid2,
- tcOprec) != ZOK) {
- signal->theData[0] = reqPtr;
- signal->theData[2] = cownNodeid;
- signal->theData[3] = transid1;
- signal->theData[4] = transid2;
- sendSignal(reqBlockref, GSN_ABORTCONF, signal, 5, JBB);
- warningReport(signal, 9);
- return;
- }//if
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->transactionState != TcConnectionrec::PREPARED) {
- warningReport(signal, 10);
- return;
- }//if
- regTcPtr->reqBlockref = reqBlockref;
- regTcPtr->reqRef = reqPtr;
- regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
- regTcPtr->activeCreat = ZFALSE;
- abortCommonLab(signal);
- return;
-}//Dblqh::execABORTREQ()
-
-/* ************>> */
-/* ACC_TO_REF > */
-/* ************>> */
-void Dblqh::execACC_TO_REF(Signal* signal)
-{
- jamEntry();
- terrorCode = signal->theData[1];
- releaseActiveFrag(signal);
- abortErrorLab(signal);
- return;
-}//Dblqh::execACC_TO_REF()
-
-/* ************> */
-/* ACCKEYREF > */
-/* ************> */
-void Dblqh::execACCKEYREF(Signal* signal)
-{
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- terrorCode = signal->theData[1];
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- TcConnectionrec * const tcPtr = tcConnectptr.p;
- switch (tcPtr->transactionState) {
- case TcConnectionrec::WAIT_ACC:
- jam();
- releaseActiveFrag(signal);
- break;
- case TcConnectionrec::WAIT_ACC_ABORT:
- case TcConnectionrec::ABORT_STOPPED:
- case TcConnectionrec::ABORT_QUEUED:
- jam();
-/* ------------------------------------------------------------------------- */
-/*IGNORE SINCE ABORT OF THIS OPERATION IS ONGOING ALREADY. */
-/* ------------------------------------------------------------------------- */
- return;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- const Uint32 errCode = terrorCode;
- tcPtr->errorCode = errCode;
-/* ------------------------------------------------------------------------- */
-/*WHEN AN ABORT FROM TC ARRIVES IT COULD ACTUALLY BE A CORRECT BEHAVIOUR */
-/*SINCE THE TUPLE MIGHT NOT HAVE ARRIVED YET OR ALREADY HAVE BEEN INSERTED. */
-/* ------------------------------------------------------------------------- */
- if (tcPtr->activeCreat == ZTRUE) {
- jam();
-/* ------------------------------------------------------------------------- */
-/*THIS IS A NORMAL EVENT DURING CREATION OF A FRAGMENT. PERFORM ABORT IN */
-/*TUP AND ACC AND THEN CONTINUE WITH NORMAL COMMIT PROCESSING. IF THE ERROR */
-/*HAPPENS TO BE A SERIOUS ERROR THEN PERFORM ABORT PROCESSING AS NORMAL. */
-/* ------------------------------------------------------------------------- */
- switch (tcPtr->operation) {
- case ZUPDATE:
- case ZDELETE:
- jam();
- if (errCode != ZNO_TUPLE_FOUND) {
- jam();
-/* ------------------------------------------------------------------------- */
-/*A NORMAL ERROR WILL BE TREATED AS A NORMAL ABORT AND WILL ABORT THE */
-/*TRANSACTION. NO SPECIAL HANDLING IS NEEDED. */
-/* ------------------------------------------------------------------------- */
- tcPtr->activeCreat = ZFALSE;
- }//if
- break;
- case ZINSERT:
- jam();
- if (errCode != ZTUPLE_ALREADY_EXIST) {
- jam();
-/* ------------------------------------------------------------------------- */
-/*A NORMAL ERROR WILL BE TREATED AS A NORMAL ABORT AND WILL ABORT THE */
-/*TRANSACTION. NO SPECIAL HANDLING IS NEEDED. */
-/* ------------------------------------------------------------------------- */
- tcPtr->activeCreat = ZFALSE;
- }//if
- break;
- default:
- jam();
-/* ------------------------------------------------------------------------- */
-/*A NORMAL ERROR WILL BE TREATED AS A NORMAL ABORT AND WILL ABORT THE */
-/*TRANSACTION. NO SPECIAL HANDLING IS NEEDED. */
-/* ------------------------------------------------------------------------- */
- tcPtr->activeCreat = ZFALSE;
- break;
- }//switch
- } else {
- /**
- * Only primary replica can get ZTUPLE_ALREADY_EXIST || ZNO_TUPLE_FOUND
- *
- * Unless it's a simple or dirty read
- *
- * NOT TRUE!
- * 1) op1 - primary insert ok
- * 2) op1 - backup insert fail (log full or what ever)
- * 3) op1 - delete ok @ primary
- * 4) op1 - delete fail @ backup
- *
- * -> ZNO_TUPLE_FOUND is possible
- */
- ndbrequire
- (tcPtr->seqNoReplica == 0 ||
- errCode != ZTUPLE_ALREADY_EXIST ||
- (tcPtr->operation == ZREAD && (tcPtr->dirtyOp || tcPtr->opSimple)));
- }
- tcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH;
- abortCommonLab(signal);
- return;
-}//Dblqh::execACCKEYREF()
-
-void Dblqh::localAbortStateHandlerLab(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->abortState != TcConnectionrec::ABORT_IDLE) {
- jam();
- return;
- }//if
- regTcPtr->activeCreat = ZFALSE;
- regTcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH;
- regTcPtr->errorCode = terrorCode;
- abortStateHandlerLab(signal);
- return;
-}//Dblqh::localAbortStateHandlerLab()
-
-void Dblqh::abortStateHandlerLab(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- switch (regTcPtr->transactionState) {
- case TcConnectionrec::PREPARED:
- jam();
-/* ------------------------------------------------------------------------- */
-/*THE OPERATION IS ALREADY PREPARED AND SENT TO THE NEXT LQH OR BACK TO TC. */
-/*WE CAN SIMPLY CONTINUE WITH THE ABORT PROCESS. */
-/*IF IT WAS A CHECK FOR TRANSACTION STATUS THEN WE REPORT THE STATUS TO THE */
-/*NEW TC AND CONTINUE WITH THE NEXT OPERATION IN LQH. */
-/* ------------------------------------------------------------------------- */
- if (regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC) {
- jam();
- sendLqhTransconf(signal, LqhTransConf::Prepared);
- return;
- }//if
- break;
- case TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL:
- case TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL:
- jam();
-/* ------------------------------------------------------------------------- */
-// We can only reach these states for multi-updates on a record in a transaction.
-// We know that at least one of those has received the COMMIT signal, thus we
-// declare us only prepared since we then receive the expected COMMIT signal.
-/* ------------------------------------------------------------------------- */
- ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC);
- sendLqhTransconf(signal, LqhTransConf::Prepared);
- break;
- case TcConnectionrec::WAIT_TUPKEYINFO:
- case TcConnectionrec::WAIT_ATTR:
- jam();
-/* ------------------------------------------------------------------------- */
-/* WE ARE CURRENTLY WAITING FOR MORE INFORMATION. WE CAN START THE ABORT */
-/* PROCESS IMMEDIATELY. THE KEYINFO AND ATTRINFO SIGNALS WILL BE DROPPED */
-/* SINCE THE ABORT STATE WILL BE SET. */
-/* ------------------------------------------------------------------------- */
- break;
- case TcConnectionrec::WAIT_TUP:
- jam();
-/* ------------------------------------------------------------------------- */
-// TUP is currently active. We have to wait for the TUPKEYREF or TUPKEYCONF
-// to arrive since we might otherwise jeopardise the local checkpoint
-// consistency in overload situations.
-/* ------------------------------------------------------------------------- */
- regTcPtr->transactionState = TcConnectionrec::WAIT_TUP_TO_ABORT;
- return;
- case TcConnectionrec::WAIT_ACC:
- jam();
- if (regTcPtr->listState == TcConnectionrec::ACC_BLOCK_LIST) {
- jam();
-/* ------------------------------------------------------------------------- */
-// If the operation is in the ACC Blocked list the operation is not allowed
-// to start yet. We release it from the ACC Blocked list and will go through
-// the gate in abortCommonLab(..) where it will be blocked.
-/* ------------------------------------------------------------------------- */
- fragptr.i = regTcPtr->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- releaseAccList(signal);
- } else {
- jam();
-/* ------------------------------------------------------------------------- */
-// We start the abort immediately since the operation is still in the active
-// list and the fragment cannot have been frozen yet. By sending LCP_HOLDOPCONF
-// as direct signals we avoid the problem that we might find the operation
-// in an unexpected list in ACC.
-// We cannot accept being blocked before aborting ACC here since that would
-// lead to seriously complex issues.
-/* ------------------------------------------------------------------------- */
- abortContinueAfterBlockedLab(signal, false);
- return;
- }//if
- break;
- case TcConnectionrec::LOG_QUEUED:
- jam();
-/* ------------------------------------------------------------------------- */
-/*CURRENTLY QUEUED FOR LOGGING. WAIT UNTIL THE LOG RECORD HAVE BEEN INSERTED */
-/*AND THEN CONTINUE THE ABORT PROCESS. */
-//Could also be waiting for an overloaded log disk. In this case it is easy
-//to abort when CONTINUEB arrives.
-/* ------------------------------------------------------------------------- */
- return;
- break;
- case TcConnectionrec::STOPPED:
- jam();
- /* ---------------------------------------------------------------------
- * WE ARE CURRENTLY QUEUED FOR ACCESS TO THE FRAGMENT BY A LCP
- * Since nothing has been done, just release operation
- * i.e. no prepare log record has been written
- * so no abort log records needs to be written
- */
- releaseWaitQueue(signal);
- continueAfterLogAbortWriteLab(signal);
- return;
- break;
- case TcConnectionrec::WAIT_AI_AFTER_ABORT:
- jam();
-/* ------------------------------------------------------------------------- */
-/* ABORT OF ACC AND TUP ALREADY COMPLETED. THIS STATE IS ONLY USED WHEN */
-/* CREATING A NEW FRAGMENT. */
-/* ------------------------------------------------------------------------- */
- continueAbortLab(signal);
- return;
- break;
- case TcConnectionrec::WAIT_TUP_TO_ABORT:
- case TcConnectionrec::ABORT_STOPPED:
- case TcConnectionrec::LOG_ABORT_QUEUED:
- case TcConnectionrec::WAIT_ACC_ABORT:
- case TcConnectionrec::ABORT_QUEUED:
- jam();
-/* ------------------------------------------------------------------------- */
-/*ABORT IS ALREADY ONGOING DUE TO SOME ERROR. WE HAVE ALREADY SET THE STATE */
-/*OF THE ABORT SO THAT WE KNOW THAT TC EXPECTS A REPORT. WE CAN THUS SIMPLY */
-/*EXIT. */
-/* ------------------------------------------------------------------------- */
- return;
- break;
- case TcConnectionrec::COMMIT_STOPPED:
- case TcConnectionrec::LOG_COMMIT_QUEUED:
- case TcConnectionrec::COMMIT_QUEUED:
- jam();
-/* ------------------------------------------------------------------------- */
-/*THIS IS ONLY AN ALLOWED STATE IF A DIRTY WRITE OR SIMPLE READ IS PERFORMED.*/
-/*IF WE ARE MERELY CHECKING THE TRANSACTION STATE IT IS ALSO AN ALLOWED STATE*/
-/* ------------------------------------------------------------------------- */
- if (regTcPtr->dirtyOp == ZTRUE) {
- jam();
-/* ------------------------------------------------------------------------- */
-/*COMPLETE THE DIRTY WRITE AND THEN REPORT COMPLETED BACK TO TC. SINCE IT IS */
-/*A DIRTY WRITE IT IS ALLOWED TO COMMIT EVEN IF THE TRANSACTION ABORTS. */
-/* ------------------------------------------------------------------------- */
- return;
- }//if
- if (regTcPtr->simpleRead) {
- jam();
-/* ------------------------------------------------------------------------- */
-/*A SIMPLE READ IS CURRENTLY RELEASING THE LOCKS OR WAITING FOR ACCESS TO */
-/*ACC TO CLEAR THE LOCKS. COMPLETE THIS PROCESS AND THEN RETURN AS NORMAL. */
-/*NO DATA HAS CHANGED DUE TO THIS SIMPLE READ ANYWAY. */
-/* ------------------------------------------------------------------------- */
- return;
- }//if
- ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC);
- jam();
-/* ------------------------------------------------------------------------- */
-/*WE ARE ONLY CHECKING THE STATUS OF THE TRANSACTION. IT IS COMMITTING. */
-/*COMPLETE THE COMMIT LOCALLY AND THEN SEND REPORT OF COMMITTED TO THE NEW TC*/
-/* ------------------------------------------------------------------------- */
- return;
- break;
- case TcConnectionrec::COMMITTED:
- jam();
- ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC);
-/* ------------------------------------------------------------------------- */
-/*WE ARE CHECKING TRANSACTION STATUS. REPORT COMMITTED AND CONTINUE WITH THE */
-/*NEXT OPERATION. */
-/* ------------------------------------------------------------------------- */
- sendLqhTransconf(signal, LqhTransConf::Committed);
- return;
- break;
- default:
- ndbrequire(false);
-/* ------------------------------------------------------------------------- */
-/*THE STATE WAS NOT AN ALLOWED STATE ON A NORMAL OPERATION. SCANS AND COPY */
-/*FRAGMENT OPERATIONS SHOULD HAVE EXECUTED IN ANOTHER PATH. */
-/* ------------------------------------------------------------------------- */
- break;
- }//switch
- abortCommonLab(signal);
- return;
-}//Dblqh::abortStateHandlerLab()
-
-void Dblqh::abortErrorLab(Signal* signal)
-{
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) {
- jam();
- regTcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH;
- regTcPtr->errorCode = terrorCode;
- }//if
- /* -----------------------------------------------------------------------
- * ACTIVE CREATION IS RESET FOR ALL ERRORS WHICH SHOULD BE HANDLED
- * WITH NORMAL ABORT HANDLING.
- * ----------------------------------------------------------------------- */
- regTcPtr->activeCreat = ZFALSE;
- abortCommonLab(signal);
- return;
-}//Dblqh::abortErrorLab()
-
-void Dblqh::abortCommonLab(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- const Uint32 commitAckMarker = regTcPtr->commitAckMarker;
- if(regTcPtr->activeCreat != ZTRUE && commitAckMarker != RNIL){
- /**
- * There is no NR ongoing and we have a marker
- */
- jam();
-#ifdef MARKER_TRACE
- {
- CommitAckMarkerPtr tmp;
- m_commitAckMarkerHash.getPtr(tmp, commitAckMarker);
- ndbout_c("Abo marker[%.8x %.8x]", tmp.p->transid1, tmp.p->transid2);
- }
-#endif
- m_commitAckMarkerHash.release(commitAckMarker);
- regTcPtr->commitAckMarker = RNIL;
- }
-
- fragptr.i = regTcPtr->fragmentptr;
- if (fragptr.i != RNIL) {
- jam();
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- switch (fragptr.p->fragStatus) {
- case Fragrecord::FSACTIVE:
- case Fragrecord::CRASH_RECOVERING:
- case Fragrecord::ACTIVE_CREATION:
- jam();
- linkActiveFrag(signal);
- abortContinueAfterBlockedLab(signal, true);
- return;
- break;
- case Fragrecord::BLOCKED:
- jam();
- linkFragQueue(signal);
- regTcPtr->transactionState = TcConnectionrec::ABORT_STOPPED;
- return;
- break;
- case Fragrecord::FREE:
- jam();
- case Fragrecord::DEFINED:
- jam();
- case Fragrecord::REMOVING:
- jam();
- default:
- ndbrequire(false);
- break;
- }//switch
- } else {
- jam();
- continueAbortLab(signal);
- }//if
-}//Dblqh::abortCommonLab()
-
-void Dblqh::abortContinueAfterBlockedLab(Signal* signal, bool canBlock)
-{
- /* ------------------------------------------------------------------------
- * INPUT: TC_CONNECTPTR ACTIVE OPERATION RECORD
- * ------------------------------------------------------------------------
- * ------------------------------------------------------------------------
- * CAN COME HERE AS RESTART AFTER BEING BLOCKED BY A LOCAL CHECKPOINT.
- * ------------------------------------------------------------------------
- * ALSO AS PART OF A NORMAL ABORT WITHOUT BLOCKING.
- * WE MUST ABORT TUP BEFORE ACC TO ENSURE THAT NO ONE RACES IN
- * AND SEES A STATE IN TUP.
- * ------------------------------------------------------------------------ */
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- fragptr.i = regTcPtr->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if ((cCommitBlocked == true) &&
- (fragptr.p->fragActiveStatus == ZTRUE) &&
- (canBlock == true) &&
- (regTcPtr->operation != ZREAD)) {
- jam();
-/* ------------------------------------------------------------------------- */
-// TUP and/or ACC have problems in writing the undo log to disk fast enough.
-// We must avoid the abort at this time and try later instead. The fragment
-// is also active with a local checkpoint and this commit can generate UNDO
-// log records that overflow the UNDO log buffer.
-//
-// In certain situations it is simply too complex to insert a wait state here
-// since ACC is active and we cannot release the operation from the active
-// list without causing great complexity.
-/* ------------------------------------------------------------------------- */
-/*---------------------------------------------------------------------------*/
-// We must delay the write of abort info to the log to safe-guard against
-// a crash due to lack of log pages. We temporary stop all log writes to this
-// log part to ensure that we don't get a buffer explosion in the delayed
-// signal buffer instead.
-/*---------------------------------------------------------------------------*/
- releaseActiveFrag(signal);
- logPartPtr.i = regTcPtr->hashValue & 3;
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- linkWaitLog(signal, logPartPtr);
- regTcPtr->transactionState = TcConnectionrec::ABORT_QUEUED;
- if (logPartPtr.p->logPartState == LogPartRecord::IDLE) {
- jam();
- logPartPtr.p->logPartState = LogPartRecord::ACTIVE;
- }//if
- return;
- }//if
- signal->theData[0] = regTcPtr->tupConnectrec;
- EXECUTE_DIRECT(DBTUP, GSN_TUP_ABORTREQ, signal, 1);
- regTcPtr->transactionState = TcConnectionrec::WAIT_ACC_ABORT;
- signal->theData[0] = regTcPtr->accConnectrec;
- EXECUTE_DIRECT(DBACC, GSN_ACC_ABORTREQ, signal, 1);
- /* ------------------------------------------------------------------------
- * We need to insert a real-time break by sending ACC_ABORTCONF through the
- * job buffer to ensure that we catch any ACCKEYCONF or TUPKEYCONF or
- * TUPKEYREF that are in the job buffer but not yet processed. Doing
- * everything without that would race and create a state error when they
- * are executed.
- * ----------------------------------------------------------------------- */
- return;
-}//Dblqh::abortContinueAfterBlockedLab()
-
-/* ******************>> */
-/* ACC_ABORTCONF > */
-/* ******************>> */
-void Dblqh::execACC_ABORTCONF(Signal* signal)
-{
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- ndbrequire(regTcPtr->transactionState == TcConnectionrec::WAIT_ACC_ABORT);
- if (regTcPtr->activeCreat == ZTRUE) {
- /* ----------------------------------------------------------------------
- * A NORMAL EVENT DURING CREATION OF A FRAGMENT. WE NOW NEED TO CONTINUE
- * WITH NORMAL COMMIT PROCESSING.
- * ---------------------------------------------------------------------- */
- if (regTcPtr->currTupAiLen == regTcPtr->totReclenAi) {
- jam();
- regTcPtr->abortState = TcConnectionrec::ABORT_IDLE;
- rwConcludedLab(signal);
- return;
- } else {
- ndbrequire(regTcPtr->currTupAiLen < regTcPtr->totReclenAi);
- jam();
- releaseActiveFrag(signal);
- regTcPtr->transactionState = TcConnectionrec::WAIT_AI_AFTER_ABORT;
- return;
- }//if
- }//if
- releaseActiveFrag(signal);
- continueAbortLab(signal);
- return;
-}//Dblqh::execACC_ABORTCONF()
-
-void Dblqh::continueAbortLab(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- /* ------------------------------------------------------------------------
- * AN ERROR OCCURED IN THE ACTIVE CREATION AFTER THE ABORT PHASE.
- * WE NEED TO CONTINUE WITH A NORMAL ABORT.
- * ------------------------------------------------------------------------
- * ALSO USED FOR NORMAL CLEAN UP AFTER A NORMAL ABORT.
- * ------------------------------------------------------------------------
- * ALSO USED WHEN NO FRAGMENT WAS SET UP ON OPERATION.
- * ------------------------------------------------------------------------ */
- if (regTcPtr->logWriteState == TcConnectionrec::WRITTEN) {
- jam();
- /* ----------------------------------------------------------------------
- * I NEED TO INSERT A ABORT LOG RECORD SINCE WE ARE WRITING LOG IN THIS
- * TRANSACTION.
- * ---------------------------------------------------------------------- */
- initLogPointers(signal);
- if (logPartPtr.p->logPartState == LogPartRecord::ACTIVE) {
- jam();
- /* --------------------------------------------------------------------
- * A PREPARE OPERATION IS CURRENTLY WRITING IN THE LOG.
- * WE MUST WAIT ON OUR TURN TO WRITE THE LOG.
- * IT IS NECESSARY TO WRITE ONE LOG RECORD COMPLETELY
- * AT A TIME OTHERWISE WE WILL SCRAMBLE THE LOG.
- * -------------------------------------------------------------------- */
- linkWaitLog(signal, logPartPtr);
- regTcPtr->transactionState = TcConnectionrec::LOG_ABORT_QUEUED;
- return;
- }//if
- if (cnoOfLogPages == 0) {
- jam();
-/*---------------------------------------------------------------------------*/
-// We must delay the write of commit info to the log to safe-guard against
-// a crash due to lack of log pages. We temporary stop all log writes to this
-// log part to ensure that we don't get a buffer explosion in the delayed
-// signal buffer instead.
-/*---------------------------------------------------------------------------*/
- linkWaitLog(signal, logPartPtr);
- regTcPtr->transactionState = TcConnectionrec::LOG_ABORT_QUEUED;
- if (logPartPtr.p->logPartState == LogPartRecord::IDLE) {
- jam();
- logPartPtr.p->logPartState = LogPartRecord::ACTIVE;
- }//if
- return;
- }//if
- writeAbortLog(signal);
- removeLogTcrec(signal);
- } else if (regTcPtr->logWriteState == TcConnectionrec::NOT_STARTED) {
- jam();
- } else if (regTcPtr->logWriteState == TcConnectionrec::NOT_WRITTEN) {
- jam();
- /* ------------------------------------------------------------------
- * IT IS A READ OPERATION OR OTHER OPERATION THAT DO NOT USE THE LOG.
- * ------------------------------------------------------------------ */
- /* ------------------------------------------------------------------
- * THE LOG HAS NOT BEEN WRITTEN SINCE THE LOG FLAG WAS FALSE.
- * THIS CAN OCCUR WHEN WE ARE STARTING A NEW FRAGMENT.
- * ------------------------------------------------------------------ */
- regTcPtr->logWriteState = TcConnectionrec::NOT_STARTED;
- } else {
- ndbrequire(regTcPtr->logWriteState == TcConnectionrec::NOT_WRITTEN_WAIT);
- jam();
- /* ----------------------------------------------------------------
- * THE STATE WAS SET TO NOT_WRITTEN BY THE OPERATION BUT LATER
- * A SCAN OF ALL OPERATION RECORD CHANGED IT INTO NOT_WRITTEN_WAIT.
- * THIS INDICATES THAT WE ARE WAITING FOR THIS OPERATION TO COMMIT
- * OR ABORT SO THAT WE CAN FIND THE
- * STARTING GLOBAL CHECKPOINT OF THIS NEW FRAGMENT.
- * ---------------------------------------------------------------- */
- checkScanTcCompleted(signal);
- }//if
- continueAfterLogAbortWriteLab(signal);
- return;
-}//Dblqh::continueAbortLab()
-
-void Dblqh::continueAfterLogAbortWriteLab(Signal* signal)
-{
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->simpleRead) {
- jam();
- TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend();
-
- tcKeyRef->connectPtr = regTcPtr->applOprec;
- tcKeyRef->transId[0] = regTcPtr->transid[0];
- tcKeyRef->transId[1] = regTcPtr->transid[1];
- tcKeyRef->errorCode = regTcPtr->errorCode;
- sendSignal(regTcPtr->applRef,
- GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB);
- cleanUp(signal);
- return;
- }//if
- if (regTcPtr->abortState == TcConnectionrec::ABORT_FROM_LQH) {
- LqhKeyRef * const lqhKeyRef = (LqhKeyRef *)signal->getDataPtrSend();
-
- jam();
- lqhKeyRef->userRef = regTcPtr->clientConnectrec;
- lqhKeyRef->connectPtr = regTcPtr->tcOprec;
- lqhKeyRef->errorCode = regTcPtr->errorCode;
- lqhKeyRef->transId1 = regTcPtr->transid[0];
- lqhKeyRef->transId2 = regTcPtr->transid[1];
- sendSignal(regTcPtr->clientBlockref, GSN_LQHKEYREF, signal,
- LqhKeyRef::SignalLength, JBB);
- } else if (regTcPtr->abortState == TcConnectionrec::ABORT_FROM_TC) {
- jam();
- sendAborted(signal);
- } else if (regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC) {
- jam();
- sendLqhTransconf(signal, LqhTransConf::Aborted);
- } else {
- ndbrequire(regTcPtr->abortState == TcConnectionrec::REQ_FROM_TC);
- jam();
- signal->theData[0] = regTcPtr->reqRef;
- signal->theData[1] = tcConnectptr.i;
- signal->theData[2] = cownNodeid;
- signal->theData[3] = regTcPtr->transid[0];
- signal->theData[4] = regTcPtr->transid[1];
- sendSignal(regTcPtr->reqBlockref, GSN_ABORTCONF,
- signal, 5, JBB);
- }//if
- cleanUp(signal);
-}//Dblqh::continueAfterLogAbortWriteLab()
-
-/* ##########################################################################
- * ####### MODULE TO HANDLE TC FAILURE #######
- *
- * ########################################################################## */
-
-/* ************************************************************************>>
- * NODE_FAILREP: Node failure report. Sender Ndbcntr. Set status of failed
- * node to down and reply with NF_COMPLETEREP to DIH which will report that
- * LQH has completed failure handling.
- * ************************************************************************>> */
-void Dblqh::execNODE_FAILREP(Signal* signal)
-{
- UintR TfoundNodes = 0;
- UintR TnoOfNodes;
- UintR Tdata[MAX_NDB_NODES];
- Uint32 i;
-
- NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
-
- TnoOfNodes = nodeFail->noOfNodes;
- UintR index = 0;
- for (i = 1; i < MAX_NDB_NODES; i++) {
- jam();
- if(NodeBitmask::get(nodeFail->theNodes, i)){
- jam();
- Tdata[index] = i;
- index++;
- }//if
- }//for
-
- lcpPtr.i = 0;
- ptrAss(lcpPtr, lcpRecord);
-
- ndbrequire(index == TnoOfNodes);
- ndbrequire(cnoOfNodes - 1 < MAX_NDB_NODES);
- for (i = 0; i < TnoOfNodes; i++) {
- const Uint32 nodeId = Tdata[i];
- lcpPtr.p->m_EMPTY_LCP_REQ.clear(nodeId);
-
- for (Uint32 j = 0; j < cnoOfNodes; j++) {
- jam();
- if (cnodeData[j] == nodeId){
- jam();
- cnodeStatus[j] = ZNODE_DOWN;
-
- TfoundNodes++;
- }//if
- }//for
- NFCompleteRep * const nfCompRep = (NFCompleteRep *)&signal->theData[0];
- nfCompRep->blockNo = DBLQH;
- nfCompRep->nodeId = cownNodeid;
- nfCompRep->failedNodeId = Tdata[i];
- sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal,
- NFCompleteRep::SignalLength, JBB);
- }//for
- ndbrequire(TnoOfNodes == TfoundNodes);
-}//Dblqh::execNODE_FAILREP()
-
-/* ************************************************************************>>
- * LQH_TRANSREQ: Report status of all transactions where TC was coordinated
- * by a crashed TC
- * ************************************************************************>> */
-/* ************************************************************************>>
- * THIS SIGNAL IS RECEIVED AFTER A NODE CRASH.
- * THE NODE HAD A TC AND COORDINATED A NUMBER OF TRANSACTIONS.
- * NOW THE MASTER NODE IS PICKING UP THOSE TRANSACTIONS
- * TO COMPLETE THEM. EITHER ABORT THEM OR COMMIT THEM.
- * ************************************************************************>> */
-void Dblqh::execLQH_TRANSREQ(Signal* signal)
-{
- jamEntry();
- Uint32 newTcPtr = signal->theData[0];
- BlockReference newTcBlockref = signal->theData[1];
- Uint32 oldNodeId = signal->theData[2];
- tcNodeFailptr.i = oldNodeId;
- ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
- if ((tcNodeFailptr.p->tcFailStatus == TcNodeFailRecord::TC_STATE_TRUE) ||
- (tcNodeFailptr.p->tcFailStatus == TcNodeFailRecord::TC_STATE_BREAK)) {
- jam();
- tcNodeFailptr.p->lastNewTcBlockref = newTcBlockref;
- /* ------------------------------------------------------------------------
- * WE HAVE RECEIVED A SIGNAL SPECIFYING THAT WE NEED TO HANDLE THE FAILURE
- * OF A TC. NOW WE RECEIVE ANOTHER SIGNAL WITH THE SAME ORDER. THIS CAN
- * OCCUR IF THE NEW TC FAILS. WE MUST BE CAREFUL IN THIS CASE SO THAT WE DO
- * NOT START PARALLEL ACTIVITIES TRYING TO DO THE SAME THING. WE SAVE THE
- * NEW BLOCK REFERENCE TO THE LAST NEW TC IN A VARIABLE AND ASSIGN TO IT TO
- * NEW_TC_BLOCKREF WHEN THE OLD PROCESS RETURNS TO LQH_TRANS_NEXT. IT IS
- * CERTAIN TO COME THERE SINCE THIS IS THE ONLY PATH TO TAKE CARE OF THE
- * NEXT TC CONNECT RECORD. WE SET THE STATUS TO BREAK TO INDICATE TO THE OLD
- * PROCESS WHAT IS HAPPENING.
- * ------------------------------------------------------------------------ */
- tcNodeFailptr.p->lastNewTcRef = newTcPtr;
- tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_BREAK;
- return;
- }//if
- tcNodeFailptr.p->oldNodeId = oldNodeId;
- tcNodeFailptr.p->newTcBlockref = newTcBlockref;
- tcNodeFailptr.p->newTcRef = newTcPtr;
- tcNodeFailptr.p->tcRecNow = 0;
- tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_TRUE;
- signal->theData[0] = ZLQH_TRANS_NEXT;
- signal->theData[1] = tcNodeFailptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- return;
-}//Dblqh::execLQH_TRANSREQ()
-
-void Dblqh::lqhTransNextLab(Signal* signal)
-{
- UintR tend;
- UintR tstart;
- UintR guard0;
-
- if (tcNodeFailptr.p->tcFailStatus == TcNodeFailRecord::TC_STATE_BREAK) {
- jam();
- /* ----------------------------------------------------------------------
- * AN INTERRUPTION TO THIS NODE FAIL HANDLING WAS RECEIVED AND A NEW
- * TC HAVE BEEN ASSIGNED TO TAKE OVER THE FAILED TC. PROBABLY THE OLD
- * NEW TC HAVE FAILED.
- * ---------------------------------------------------------------------- */
- tcNodeFailptr.p->newTcBlockref = tcNodeFailptr.p->lastNewTcBlockref;
- tcNodeFailptr.p->newTcRef = tcNodeFailptr.p->lastNewTcRef;
- tcNodeFailptr.p->tcRecNow = 0;
- tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_TRUE;
- }//if
- tstart = tcNodeFailptr.p->tcRecNow;
- tend = tstart + 200;
- guard0 = tend;
- for (tcConnectptr.i = tstart; tcConnectptr.i <= guard0; tcConnectptr.i++) {
- jam();
- if (tcConnectptr.i >= ctcConnectrecFileSize) {
- jam();
- /**
- * Finished with scanning operation record
- *
- * now scan markers
- */
- scanMarkers(signal, tcNodeFailptr.i, 0, RNIL);
- return;
- }//if
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- if (tcConnectptr.p->transactionState != TcConnectionrec::IDLE) {
- if (tcConnectptr.p->transactionState != TcConnectionrec::TC_NOT_CONNECTED) {
- if (tcConnectptr.p->tcScanRec == RNIL) {
- if (refToNode(tcConnectptr.p->tcBlockref) == tcNodeFailptr.p->oldNodeId) {
- if (tcConnectptr.p->operation != ZREAD) {
- jam();
- tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
- tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
- abortStateHandlerLab(signal);
- return;
- } else {
- jam();
- if (tcConnectptr.p->opSimple != ZTRUE) {
- jam();
- tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
- tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
- abortStateHandlerLab(signal);
- return;
- }//if
- }//if
- }//if
- } else {
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- if (scanptr.p->scanType == ScanRecord::COPY) {
- jam();
- if (scanptr.p->scanNodeId == tcNodeFailptr.p->oldNodeId) {
- jam();
- /* ------------------------------------------------------------
- * THE RECEIVER OF THE COPY HAVE FAILED.
- * WE HAVE TO CLOSE THE COPY PROCESS.
- * ------------------------------------------------------------ */
- tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
- tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
- closeCopyRequestLab(signal);
- return;
- }//if
- } else {
- if (scanptr.p->scanType == ScanRecord::SCAN) {
- jam();
- if (refToNode(tcConnectptr.p->tcBlockref) ==
- tcNodeFailptr.p->oldNodeId) {
- jam();
- tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
- tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
- closeScanRequestLab(signal);
- return;
- }//if
- } else {
- jam();
- /* ------------------------------------------------------------
- * THIS IS AN ERROR THAT SHOULD NOT OCCUR. WE CRASH THE SYSTEM.
- * ------------------------------------------------------------ */
- systemErrorLab(signal);
- return;
- }//if
- }//if
- }//if
- }//if
- }//if
- }//for
- tcNodeFailptr.p->tcRecNow = tend + 1;
- signal->theData[0] = ZLQH_TRANS_NEXT;
- signal->theData[1] = tcNodeFailptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- return;
-}//Dblqh::lqhTransNextLab()
-
-void
-Dblqh::scanMarkers(Signal* signal,
- Uint32 tcNodeFail,
- Uint32 startBucket,
- Uint32 i){
-
- jam();
-
- TcNodeFailRecordPtr tcNodeFailPtr;
- tcNodeFailPtr.i = tcNodeFail;
- ptrCheckGuard(tcNodeFailPtr, ctcNodeFailrecFileSize, tcNodeFailRecord);
- const Uint32 crashedTcNodeId = tcNodeFailPtr.p->oldNodeId;
-
- CommitAckMarkerIterator iter;
- if(i == RNIL){
- m_commitAckMarkerHash.next(startBucket, iter);
- } else {
- jam();
- iter.curr.i = i;
- iter.bucket = startBucket;
- m_commitAckMarkerHash.getPtr(iter.curr);
- m_commitAckMarkerHash.next(iter);
- }
-
- const Uint32 RT_BREAK = 256;
- for(i = 0; i<RT_BREAK || iter.bucket == startBucket; i++){
- jam();
-
- if(iter.curr.i == RNIL){
- /**
- * Done with iteration
- */
- jam();
-
- tcNodeFailPtr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_FALSE;
- signal->theData[0] = tcNodeFailPtr.p->newTcRef;
- signal->theData[1] = cownNodeid;
- signal->theData[2] = LqhTransConf::LastTransConf;
- sendSignal(tcNodeFailPtr.p->newTcBlockref, GSN_LQH_TRANSCONF,
- signal, 3, JBB);
- return;
- }
-
- if(iter.curr.p->tcNodeId == crashedTcNodeId){
- jam();
-
- /**
- * Found marker belonging to crashed node
- */
- LqhTransConf * const lqhTransConf = (LqhTransConf *)&signal->theData[0];
- lqhTransConf->tcRef = tcNodeFailPtr.p->newTcRef;
- lqhTransConf->lqhNodeId = cownNodeid;
- lqhTransConf->operationStatus = LqhTransConf::Marker;
- lqhTransConf->transId1 = iter.curr.p->transid1;
- lqhTransConf->transId2 = iter.curr.p->transid2;
- lqhTransConf->apiRef = iter.curr.p->apiRef;
- lqhTransConf->apiOpRec = iter.curr.p->apiOprec;
- sendSignal(tcNodeFailPtr.p->newTcBlockref, GSN_LQH_TRANSCONF,
- signal, 7, JBB);
-
- signal->theData[0] = ZSCAN_MARKERS;
- signal->theData[1] = tcNodeFailPtr.i;
- signal->theData[2] = iter.bucket;
- signal->theData[3] = iter.curr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
- return;
- }
-
- m_commitAckMarkerHash.next(iter);
- }
-
- signal->theData[0] = ZSCAN_MARKERS;
- signal->theData[1] = tcNodeFailPtr.i;
- signal->theData[2] = iter.bucket;
- signal->theData[3] = RNIL;
- sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
-}
-
-/* #########################################################################
- * ####### SCAN MODULE #######
- *
- * #########################################################################
- * -------------------------------------------------------------------------
- * THIS MODULE CONTAINS THE CODE THAT HANDLES A SCAN OF A PARTICULAR FRAGMENT
- * IT OPERATES UNDER THE CONTROL OF TC AND ORDERS ACC TO PERFORM A SCAN OF
- * ALL TUPLES IN THE FRAGMENT. TUP PERFORMS THE NECESSARY SEARCH CONDITIONS
- * TO ENSURE THAT ONLY VALID TUPLES ARE RETURNED TO THE APPLICATION.
- * ------------------------------------------------------------------------- */
-/* *************** */
-/* ACC_SCANCONF > */
-/* *************** */
-void Dblqh::execACC_SCANCONF(Signal* signal)
-{
- AccScanConf * const accScanConf = (AccScanConf *)&signal->theData[0];
- jamEntry();
- scanptr.i = accScanConf->scanPtr;
- c_scanRecordPool.getPtr(scanptr);
- if (scanptr.p->scanState == ScanRecord::WAIT_ACC_SCAN) {
- accScanConfScanLab(signal);
- } else {
- ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_ACC_COPY);
- accScanConfCopyLab(signal);
- }//if
-}//Dblqh::execACC_SCANCONF()
-
-/* ************>> */
-/* ACC_SCANREF > */
-/* ************>> */
-void Dblqh::execACC_SCANREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dblqh::execACC_SCANREF()
-
-/* ***************>> */
-/* NEXT_SCANCONF > */
-/* ***************>> */
-void Dblqh::execNEXT_SCANCONF(Signal* signal)
-{
- NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0];
- jamEntry();
- scanptr.i = nextScanConf->scanPtr;
- c_scanRecordPool.getPtr(scanptr);
- if (nextScanConf->localKeyLength == 1) {
- jam();
- nextScanConf->localKey[1] =
- nextScanConf->localKey[0] & MAX_TUPLES_PER_PAGE;
- nextScanConf->localKey[0] = nextScanConf->localKey[0] >> MAX_TUPLES_BITS;
- }//if
- switch (scanptr.p->scanState) {
- case ScanRecord::WAIT_CLOSE_SCAN:
- jam();
- accScanCloseConfLab(signal);
- break;
- case ScanRecord::WAIT_CLOSE_COPY:
- jam();
- accCopyCloseConfLab(signal);
- break;
- case ScanRecord::WAIT_NEXT_SCAN:
- jam();
- nextScanConfScanLab(signal);
- break;
- case ScanRecord::WAIT_NEXT_SCAN_COPY:
- jam();
- nextScanConfCopyLab(signal);
- break;
- case ScanRecord::WAIT_RELEASE_LOCK:
- jam();
- ndbrequire(signal->length() == 1);
- scanLockReleasedLab(signal);
- break;
- default:
- ndbrequire(false);
- }//switch
-}//Dblqh::execNEXT_SCANCONF()
-
-/* ***************> */
-/* NEXT_SCANREF > */
-/* ***************> */
-void Dblqh::execNEXT_SCANREF(Signal* signal)
-{
- jamEntry();
- systemErrorLab(signal);
- return;
-}//Dblqh::execNEXT_SCANREF()
-
-/* ******************> */
-/* STORED_PROCCONF > */
-/* ******************> */
-void Dblqh::execSTORED_PROCCONF(Signal* signal)
-{
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- Uint32 storedProcId = signal->theData[1];
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- switch (scanptr.p->scanState) {
- case ScanRecord::WAIT_STORED_PROC_SCAN:
- jam();
- scanptr.p->scanStoredProcId = storedProcId;
- storedProcConfScanLab(signal);
- break;
- case ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN:
- jam();
- releaseActiveFrag(signal);
- tupScanCloseConfLab(signal);
- break;
- case ScanRecord::WAIT_STORED_PROC_COPY:
- jam();
- scanptr.p->scanStoredProcId = storedProcId;
- storedProcConfCopyLab(signal);
- break;
- case ScanRecord::WAIT_DELETE_STORED_PROC_ID_COPY:
- jam();
- releaseActiveFrag(signal);
- tupCopyCloseConfLab(signal);
- break;
- default:
- ndbrequire(false);
- }//switch
-}//Dblqh::execSTORED_PROCCONF()
-
-/* ****************** */
-/* STORED_PROCREF > */
-/* ****************** */
-void Dblqh::execSTORED_PROCREF(Signal* signal)
-{
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- Uint32 errorCode = signal->theData[1];
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- switch (scanptr.p->scanState) {
- case ScanRecord::WAIT_STORED_PROC_SCAN:
- jam();
- scanptr.p->scanCompletedStatus = ZTRUE;
- scanptr.p->scanStoredProcId = signal->theData[2];
- tcConnectptr.p->errorCode = errorCode;
- closeScanLab(signal);
- break;
- default:
- ndbrequire(false);
- }//switch
-}//Dblqh::execSTORED_PROCREF()
-
-/* --------------------------------------------------------------------------
- * ENTER SCAN_NEXTREQ
- * --------------------------------------------------------------------------
- * PRECONDITION:
- * TRANSACTION_STATE = SCAN_STATE
- * SCAN_STATE = WAIT_SCAN_NEXTREQ
- *
- * Case scanLockHold: ZTRUE = Unlock previous round of
- * scanned row(s) and fetch next set of rows.
- * ZFALSE = Fetch new set of rows.
- * Number of rows to read depends on parallelism and how many rows
- * left to scan in the fragment. SCAN_NEXTREQ can also be sent with
- * closeFlag == ZTRUE to close the scan.
- * ------------------------------------------------------------------------- */
-void Dblqh::execSCAN_NEXTREQ(Signal* signal)
-{
- jamEntry();
- const ScanFragNextReq * const nextReq =
- (ScanFragNextReq*)&signal->theData[0];
- const Uint32 transid1 = nextReq->transId1;
- const Uint32 transid2 = nextReq->transId2;
- const Uint32 senderData = nextReq->senderData;
-
- if (findTransaction(transid1, transid2, senderData) != ZOK){
- jam();
- DEBUG(senderData <<
- " Received SCAN_NEXTREQ in LQH with close flag when closed");
- ndbrequire(nextReq->closeFlag == ZTRUE);
- return;
- }
-
- // Crash node if signal sender is same node
- CRASH_INSERTION2(5021, refToNode(signal->senderBlockRef()) == cownNodeid);
- // Crash node if signal sender is NOT same node
- CRASH_INSERTION2(5022, refToNode(signal->senderBlockRef()) != cownNodeid);
-
- if (ERROR_INSERTED(5023)){
- // Drop signal if sender is same node
- if (refToNode(signal->senderBlockRef()) == cownNodeid) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }
- }//if
- if (ERROR_INSERTED(5024)){
- // Drop signal if sender is NOT same node
- if (refToNode(signal->senderBlockRef()) != cownNodeid) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }
- }//if
- if (ERROR_INSERTED(5025)){
- // Delay signal if sender is NOT same node
- if (refToNode(signal->senderBlockRef()) != cownNodeid) {
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_SCAN_NEXTREQ, signal, 1000,
- signal->length());
- return;
- }
- }//if
- if (ERROR_INSERTED(5030)){
- ndbout << "ERROR 5030" << endl;
- CLEAR_ERROR_INSERT_VALUE;
- // Drop signal
- return;
- }//if
-
- if(ERROR_INSERTED(5036)){
- return;
- }
-
- scanptr.i = tcConnectptr.p->tcScanRec;
- ndbrequire(scanptr.i != RNIL);
- c_scanRecordPool.getPtr(scanptr);
- scanptr.p->scanTcWaiting = ZTRUE;
-
- /* ------------------------------------------------------------------
- * If close flag is set this scan should be closed
- * If we are waiting for SCAN_NEXTREQ set flag to stop scanning and
- * continue execution else set flags and wait until the scan
- * completes itself
- * ------------------------------------------------------------------ */
- if (nextReq->closeFlag == ZTRUE){
- jam();
- if(ERROR_INSERTED(5034)){
- CLEAR_ERROR_INSERT_VALUE;
- }
- if(ERROR_INSERTED(5036)){
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }
- closeScanRequestLab(signal);
- return;
- }//if
-
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
-
- /**
- * Change parameters while running
- * (is currently not supported)
- */
- const Uint32 max_rows = nextReq->batch_size_rows;
- const Uint32 max_bytes = nextReq->batch_size_bytes;
- ndbrequire(scanptr.p->m_max_batch_size_rows == max_rows);
- ndbrequire(scanptr.p->m_max_batch_size_bytes == max_bytes);
-
- /* --------------------------------------------------------------------
- * If scanLockHold = TRUE we need to unlock previous round of
- * scanned records.
- * scanReleaseLocks will set states for this and send a NEXT_SCANREQ.
- * When confirm signal NEXT_SCANCONF arrives we call
- * continueScanNextReqLab to continue scanning new rows and
- * acquiring new locks.
- * -------------------------------------------------------------------- */
- if ((scanptr.p->scanLockHold == ZTRUE) &&
- (scanptr.p->m_curr_batch_size_rows > 0)) {
- jam();
- scanptr.p->scanReleaseCounter = 1;
- scanReleaseLocksLab(signal);
- return;
- }//if
-
- /* -----------------------------------------------------------------------
- * We end up here when scanLockHold = FALSE or no rows was locked from
- * previous round.
- * Simply continue scanning.
- * ----------------------------------------------------------------------- */
- continueScanNextReqLab(signal);
-}//Dblqh::execSCAN_NEXTREQ()
-
-void Dblqh::continueScanNextReqLab(Signal* signal)
-{
- if (scanptr.p->scanCompletedStatus == ZTRUE) {
- jam();
- closeScanLab(signal);
- return;
- }//if
-
- if(scanptr.p->m_last_row){
- jam();
- scanptr.p->scanCompletedStatus = ZTRUE;
- scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
- sendScanFragConf(signal, ZFALSE);
- return;
- }
-
- // Update timer on tcConnectRecord
- tcConnectptr.p->tcTimer = cLqhTimeOutCount;
- init_acc_ptr_list(scanptr.p);
- scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT;
- scanNextLoopLab(signal);
-}//Dblqh::continueScanNextReqLab()
-
-/* -------------------------------------------------------------------------
- * WE NEED TO RELEASE LOCKS BEFORE CONTINUING
- * ------------------------------------------------------------------------- */
-void Dblqh::scanReleaseLocksLab(Signal* signal)
-{
- switch (fragptr.p->fragStatus) {
- case Fragrecord::FSACTIVE:
- jam();
- linkActiveFrag(signal);
- break;
- case Fragrecord::BLOCKED:
- jam();
- linkFragQueue(signal);
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_RELEASE_STOPPED;
- return;
- break;
- case Fragrecord::FREE:
- jam();
- case Fragrecord::ACTIVE_CREATION:
- jam();
- case Fragrecord::CRASH_RECOVERING:
- jam();
- case Fragrecord::DEFINED:
- jam();
- case Fragrecord::REMOVING:
- jam();
- default:
- ndbrequire(false);
- }//switch
- continueScanReleaseAfterBlockedLab(signal);
-}//Dblqh::scanReleaseLocksLab()
-
-void Dblqh::continueScanReleaseAfterBlockedLab(Signal* signal)
-{
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- scanptr.p->scanState = ScanRecord::WAIT_RELEASE_LOCK;
- signal->theData[0] = scanptr.p->scanAccPtr;
- signal->theData[1]=
- get_acc_ptr_from_scan_record(scanptr.p,
- scanptr.p->scanReleaseCounter -1,
- false);
- signal->theData[2] = NextScanReq::ZSCAN_COMMIT;
- sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
-}//Dblqh::continueScanReleaseAfterBlockedLab()
-
-/* -------------------------------------------------------------------------
- * ENTER SCAN_NEXTREQ
- * -------------------------------------------------------------------------
- * SCAN_NEXT_REQ SIGNAL ARRIVED IN THE MIDDLE OF EXECUTION OF THE SCAN.
- * IT WAS A REQUEST TO CLOSE THE SCAN. WE WILL CLOSE THE SCAN IN A
- * CAREFUL MANNER TO ENSURE THAT NO ERROR OCCURS.
- * -------------------------------------------------------------------------
- * PRECONDITION:
- * TRANSACTION_STATE = SCAN_STATE_USED
- * TSCAN_COMPLETED = ZTRUE
- * -------------------------------------------------------------------------
- * WE CAN ALSO ARRIVE AT THIS LABEL AFTER A NODE CRASH OF THE SCAN
- * COORDINATOR.
- * ------------------------------------------------------------------------- */
-void Dblqh::closeScanRequestLab(Signal* signal)
-{
- DEBUG("transactionState = " << tcConnectptr.p->transactionState);
- switch (tcConnectptr.p->transactionState) {
- case TcConnectionrec::SCAN_STATE_USED:
- DEBUG("scanState = " << scanptr.p->scanState);
- switch (scanptr.p->scanState) {
- case ScanRecord::IN_QUEUE:
- jam();
- tupScanCloseConfLab(signal);
- break;
- case ScanRecord::WAIT_NEXT_SCAN:
- jam();
- /* -------------------------------------------------------------------
- * SET COMPLETION STATUS AND WAIT FOR OPPORTUNITY TO STOP THE SCAN.
- * ------------------------------------------------------------------- */
- scanptr.p->scanCompletedStatus = ZTRUE;
- break;
- case ScanRecord::WAIT_ACC_SCAN:
- case ScanRecord::WAIT_STORED_PROC_SCAN:
- jam();
- /* -------------------------------------------------------------------
- * WE ARE CURRENTLY STARTING UP THE SCAN. SET COMPLETED STATUS
- * AND WAIT FOR COMPLETION OF STARTUP.
- * ------------------------------------------------------------------- */
- scanptr.p->scanCompletedStatus = ZTRUE;
- break;
- case ScanRecord::WAIT_CLOSE_SCAN:
- case ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN:
- jam();
- /*empty*/;
- break;
- /* -------------------------------------------------------------------
- * CLOSE IS ALREADY ONGOING. WE NEED NOT DO ANYTHING.
- * ------------------------------------------------------------------- */
- case ScanRecord::WAIT_RELEASE_LOCK:
- jam();
- /* -------------------------------------------------------------------
- * WE ARE CURRENTLY RELEASING RECORD LOCKS. AFTER COMPLETING THIS
- * WE WILL START TO CLOSE THE SCAN.
- * ------------------------------------------------------------------- */
- scanptr.p->scanCompletedStatus = ZTRUE;
- break;
- case ScanRecord::WAIT_SCAN_NEXTREQ:
- jam();
- /* -------------------------------------------------------------------
- * WE ARE WAITING FOR A SCAN_NEXTREQ FROM SCAN COORDINATOR(TC)
- * WICH HAVE CRASHED. CLOSE THE SCAN
- * ------------------------------------------------------------------- */
- scanptr.p->scanCompletedStatus = ZTRUE;
-
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
-
- if (scanptr.p->scanLockHold == ZTRUE) {
- if (scanptr.p->m_curr_batch_size_rows > 0) {
- jam();
- scanptr.p->scanReleaseCounter = 1;
- scanReleaseLocksLab(signal);
- return;
- }//if
- }//if
- closeScanLab(signal);
- break;
- default:
- ndbrequire(false);
- }//switch
- break;
- case TcConnectionrec::WAIT_SCAN_AI:
- jam();
- /* ---------------------------------------------------------------------
- * WE ARE STILL WAITING FOR THE ATTRIBUTE INFORMATION THAT
- * OBVIOUSLY WILL NOT ARRIVE. WE CAN QUIT IMMEDIATELY HERE.
- * --------------------------------------------------------------------- */
- //XXX jonas this have to be wrong...
- releaseOprec(signal);
- if (tcConnectptr.p->abortState == TcConnectionrec::NEW_FROM_TC) {
- jam();
- tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec;
- ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
- tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1;
- signal->theData[0] = ZLQH_TRANS_NEXT;
- signal->theData[1] = tcNodeFailptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- return;
- }//if
- tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE;
- scanptr.p->m_curr_batch_size_rows = 0;
- scanptr.p->m_curr_batch_size_bytes= 0;
- sendScanFragConf(signal, ZTRUE);
- abort_scan(signal, scanptr.i, 0);
- return;
- break;
- case TcConnectionrec::SCAN_TUPKEY:
- case TcConnectionrec::SCAN_FIRST_STOPPED:
- case TcConnectionrec::SCAN_CHECK_STOPPED:
- case TcConnectionrec::SCAN_STOPPED:
- jam();
- /* ---------------------------------------------------------------------
- * SET COMPLETION STATUS AND WAIT FOR OPPORTUNITY TO STOP THE SCAN.
- * --------------------------------------------------------------------- */
- scanptr.p->scanCompletedStatus = ZTRUE;
- break;
- case TcConnectionrec::SCAN_RELEASE_STOPPED:
- jam();
- /* ---------------------------------------------------------------------
- * WE ARE CURRENTLY RELEASING RECORD LOCKS. AFTER COMPLETING
- * THIS WE WILL START TO CLOSE THE SCAN.
- * --------------------------------------------------------------------- */
- scanptr.p->scanCompletedStatus = ZTRUE;
- break;
- case TcConnectionrec::SCAN_CLOSE_STOPPED:
- jam();
- /* ---------------------------------------------------------------------
- * CLOSE IS ALREADY ONGOING. WE NEED NOT DO ANYTHING.
- * --------------------------------------------------------------------- */
- /*empty*/;
- break;
- default:
- ndbrequire(false);
- }//switch
-}//Dblqh::closeScanRequestLab()
-
-/* -------------------------------------------------------------------------
- * ENTER NEXT_SCANCONF
- * -------------------------------------------------------------------------
- * PRECONDITION: SCAN_STATE = WAIT_RELEASE_LOCK
- * ------------------------------------------------------------------------- */
-void Dblqh::scanLockReleasedLab(Signal* signal)
-{
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- releaseActiveFrag(signal);
-
- if (scanptr.p->scanReleaseCounter == scanptr.p->m_curr_batch_size_rows) {
- if ((scanptr.p->scanErrorCounter > 0) ||
- (scanptr.p->scanCompletedStatus == ZTRUE)) {
- jam();
- scanptr.p->m_curr_batch_size_rows = 0;
- scanptr.p->m_curr_batch_size_bytes = 0;
- closeScanLab(signal);
- } else if (scanptr.p->check_scan_batch_completed() &&
- scanptr.p->scanLockHold != ZTRUE) {
- jam();
- scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
- sendScanFragConf(signal, ZFALSE);
- } else if (scanptr.p->m_last_row && !scanptr.p->scanLockHold) {
- jam();
- closeScanLab(signal);
- return;
- } else {
- jam();
- /*
- * We came here after releasing locks after
- * receiving SCAN_NEXTREQ from TC. We only come here
- * when scanHoldLock == ZTRUE
- */
- scanptr.p->m_curr_batch_size_rows = 0;
- scanptr.p->m_curr_batch_size_bytes = 0;
- continueScanNextReqLab(signal);
- }//if
- } else if (scanptr.p->scanReleaseCounter < scanptr.p->m_curr_batch_size_rows) {
- jam();
- scanptr.p->scanReleaseCounter++;
- scanReleaseLocksLab(signal);
- } else {
- jam();
- /*
- We come here when we have been scanning for a long time and not been able
- to find m_max_batch_size_rows records to return. We needed to release
- the record we didn't want, but now we are returning all found records to
- the API.
- */
- scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
- sendScanFragConf(signal, ZFALSE);
- }//if
-}//Dblqh::scanLockReleasedLab()
-
-bool
-Dblqh::seize_acc_ptr_list(ScanRecord* scanP, Uint32 batch_size)
-{
- Uint32 i;
- Uint32 attr_buf_recs= (batch_size + 30) / 32;
-
- if (batch_size > 1) {
- if (c_no_attrinbuf_recs < attr_buf_recs) {
- jam();
- return false;
- }
- for (i= 1; i <= attr_buf_recs; i++) {
- scanP->scan_acc_op_ptr[i]= seize_attrinbuf();
- }
- }
- scanP->scan_acc_attr_recs= attr_buf_recs;
- scanP->scan_acc_index = 0;
- return true;
-}
-
-void
-Dblqh::release_acc_ptr_list(ScanRecord* scanP)
-{
- Uint32 i, attr_buf_recs;
- attr_buf_recs= scanP->scan_acc_attr_recs;
-
- for (i= 1; i <= attr_buf_recs; i++) {
- release_attrinbuf(scanP->scan_acc_op_ptr[i]);
- }
- scanP->scan_acc_attr_recs= 0;
- scanP->scan_acc_index = 0;
-}
-
-Uint32
-Dblqh::seize_attrinbuf()
-{
- AttrbufPtr regAttrPtr;
- Uint32 ret_attr_buf;
- ndbrequire(c_no_attrinbuf_recs > 0);
- c_no_attrinbuf_recs--;
- ret_attr_buf= cfirstfreeAttrinbuf;
- regAttrPtr.i= ret_attr_buf;
- ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
- cfirstfreeAttrinbuf= regAttrPtr.p->attrbuf[ZINBUF_NEXT];
- return ret_attr_buf;
-}
-
-Uint32
-Dblqh::release_attrinbuf(Uint32 attr_buf_i)
-{
- Uint32 next_buf;
- AttrbufPtr regAttrPtr;
- c_no_attrinbuf_recs++;
- regAttrPtr.i= attr_buf_i;
- ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
- next_buf= regAttrPtr.p->attrbuf[ZINBUF_NEXT];
- regAttrPtr.p->attrbuf[ZINBUF_NEXT]= cfirstfreeAttrinbuf;
- cfirstfreeAttrinbuf= regAttrPtr.i;
- return next_buf;
-}
-
-void
-Dblqh::init_acc_ptr_list(ScanRecord* scanP)
-{
- scanP->scan_acc_index = 0;
-}
-
-Uint32
-Dblqh::get_acc_ptr_from_scan_record(ScanRecord* scanP,
- Uint32 index,
- bool crash_flag)
-{
- Uint32* acc_ptr;
- Uint32 attr_buf_rec, attr_buf_index;
- if (!((index < MAX_PARALLEL_OP_PER_SCAN) &&
- index < scanP->scan_acc_index)) {
- ndbrequire(crash_flag);
- return RNIL;
- }
- i_get_acc_ptr(scanP, acc_ptr, index);
- return *acc_ptr;
-}
-
-void
-Dblqh::set_acc_ptr_in_scan_record(ScanRecord* scanP,
- Uint32 index, Uint32 acc)
-{
- Uint32 *acc_ptr;
- ndbrequire((index == 0 || scanP->scan_acc_index == index) &&
- (index < MAX_PARALLEL_OP_PER_SCAN));
- scanP->scan_acc_index= index + 1;
- i_get_acc_ptr(scanP, acc_ptr, index);
- *acc_ptr= acc;
-}
-
-/* -------------------------------------------------------------------------
- * SCAN_FRAGREQ: Request to start scanning the specified fragment of a table.
- * ------------------------------------------------------------------------- */
-void Dblqh::execSCAN_FRAGREQ(Signal* signal)
-{
- ScanFragReq * const scanFragReq = (ScanFragReq *)&signal->theData[0];
- ScanFragRef * ref;
- const Uint32 transid1 = scanFragReq->transId1;
- const Uint32 transid2 = scanFragReq->transId2;
- Uint32 errorCode= 0;
- Uint32 senderData;
- Uint32 hashIndex;
- TcConnectionrecPtr nextHashptr;
-
- jamEntry();
- const Uint32 reqinfo = scanFragReq->requestInfo;
- const Uint32 fragId = (scanFragReq->fragmentNoKeyLen & 0xFFFF);
- const Uint32 keyLen = (scanFragReq->fragmentNoKeyLen >> 16);
- tabptr.i = scanFragReq->tableId;
- const Uint32 max_rows = scanFragReq->batch_size_rows;
- const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo);
- const Uint8 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo);
- const Uint8 rangeScan = ScanFragReq::getRangeScanFlag(reqinfo);
- const Uint8 tupScan = ScanFragReq::getTupScanFlag(reqinfo);
-
- ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
- if(tabptr.p->tableStatus != Tablerec::TABLE_DEFINED){
- senderData = scanFragReq->senderData;
- goto error_handler_early_1;
- }
-
- if (cfirstfreeTcConrec != RNIL) {
- seizeTcrec();
- tcConnectptr.p->clientConnectrec = scanFragReq->senderData;
- tcConnectptr.p->clientBlockref = signal->senderBlockRef();
- tcConnectptr.p->savePointId = scanFragReq->savePointId;
- } else {
- jam();
- /* --------------------------------------------------------------------
- * NO FREE TC RECORD AVAILABLE, THUS WE CANNOT HANDLE THE REQUEST.
- * -------------------------------------------------------------------- */
- errorCode = ZNO_TC_CONNECT_ERROR;
- senderData = scanFragReq->senderData;
- goto error_handler_early;
- }//if
- /**
- * A write allways have to get keyinfo
- */
- ndbrequire(scanLockMode == 0 || keyinfo);
-
- ndbrequire(max_rows > 0 && max_rows <= MAX_PARALLEL_OP_PER_SCAN);
- if (!getFragmentrec(signal, fragId)) {
- errorCode = 1231;
- goto error_handler;
- }//if
-
- // Verify scan type vs table type (both sides are boolean)
- if (rangeScan != DictTabInfo::isOrderedIndex(fragptr.p->tableType)) {
- errorCode = 1232;
- goto error_handler;
- }//if
-
- // 1 scan record is reserved for node recovery
- if (cscanNoFreeRec < 2) {
- jam();
- errorCode = ScanFragRef::ZNO_FREE_SCANREC_ERROR;
- goto error_handler;
- }
-
- // XXX adjust cmaxAccOps for range scans and remove this comment
- if ((cbookedAccOps + max_rows) > cmaxAccOps) {
- jam();
- errorCode = ScanFragRef::ZSCAN_BOOK_ACC_OP_ERROR;
- goto error_handler;
- }//if
-
- ndbrequire(c_scanRecordPool.seize(scanptr));
- initScanTc(signal,
- transid1,
- transid2,
- fragId,
- ZNIL);
- tcConnectptr.p->save1 = 4;
- tcConnectptr.p->primKeyLen = keyLen + 4; // hard coded in execKEYINFO
- errorCode = initScanrec(scanFragReq);
- if (errorCode != ZOK) {
- jam();
- goto error_handler2;
- }//if
- cscanNoFreeRec--;
- cbookedAccOps += max_rows;
-
- hashIndex = (tcConnectptr.p->transid[0] ^ tcConnectptr.p->tcOprec) & 1023;
- nextHashptr.i = ctransidHash[hashIndex];
- ctransidHash[hashIndex] = tcConnectptr.i;
- tcConnectptr.p->prevHashRec = RNIL;
- tcConnectptr.p->nextHashRec = nextHashptr.i;
- if (nextHashptr.i != RNIL) {
- jam();
- /* ---------------------------------------------------------------------
- * ENSURE THAT THE NEXT RECORD HAS SET PREVIOUS TO OUR RECORD
- * IF IT EXISTS
- * --------------------------------------------------------------------- */
- ptrCheckGuard(nextHashptr, ctcConnectrecFileSize, tcConnectionrec);
- nextHashptr.p->prevHashRec = tcConnectptr.i;
- }//if
- if (scanptr.p->scanAiLength > 0) {
- jam();
- tcConnectptr.p->transactionState = TcConnectionrec::WAIT_SCAN_AI;
- return;
- }//if
- continueAfterReceivingAllAiLab(signal);
- return;
-
-error_handler2:
- // no scan number allocated
- c_scanRecordPool.release(scanptr);
-error_handler:
- ref = (ScanFragRef*)&signal->theData[0];
- tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE;
- ref->senderData = tcConnectptr.p->clientConnectrec;
- ref->transId1 = transid1;
- ref->transId2 = transid2;
- ref->errorCode = errorCode;
- sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal,
- ScanFragRef::SignalLength, JBB);
- releaseOprec(signal);
- releaseTcrec(signal, tcConnectptr);
- return;
-
- error_handler_early_1:
- if(tabptr.p->tableStatus == Tablerec::NOT_DEFINED){
- jam();
- errorCode = ZTABLE_NOT_DEFINED;
- } else if (tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING ||
- tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
- jam();
- errorCode = ZDROP_TABLE_IN_PROGRESS;
- } else {
- ndbrequire(0);
- }
- error_handler_early:
- ref = (ScanFragRef*)&signal->theData[0];
- ref->senderData = senderData;
- ref->transId1 = transid1;
- ref->transId2 = transid2;
- ref->errorCode = errorCode;
- sendSignal(signal->senderBlockRef(), GSN_SCAN_FRAGREF, signal,
- ScanFragRef::SignalLength, JBB);
-}//Dblqh::execSCAN_FRAGREQ()
-
-void Dblqh::continueAfterReceivingAllAiLab(Signal* signal)
-{
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
-
- if(scanptr.p->scanState == ScanRecord::IN_QUEUE){
- jam();
- return;
- }
-
- scanptr.p->scanState = ScanRecord::WAIT_ACC_SCAN;
- AccScanReq * req = (AccScanReq*)&signal->theData[0];
- req->senderData = scanptr.i;
- req->senderRef = cownref;
- req->tableId = tcConnectptr.p->tableref;
- req->fragmentNo = tcConnectptr.p->fragmentid;
- req->requestInfo = 0;
- AccScanReq::setLockMode(req->requestInfo, scanptr.p->scanLockMode);
- AccScanReq::setReadCommittedFlag(req->requestInfo, scanptr.p->readCommitted);
- AccScanReq::setDescendingFlag(req->requestInfo, scanptr.p->descending);
- req->transId1 = tcConnectptr.p->transid[0];
- req->transId2 = tcConnectptr.p->transid[1];
- req->savePointId = tcConnectptr.p->savePointId;
- sendSignal(scanptr.p->scanBlockref, GSN_ACC_SCANREQ, signal,
- AccScanReq::SignalLength, JBB);
-}//Dblqh::continueAfterReceivingAllAiLab()
-
-void Dblqh::scanAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length)
-{
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- if (saveTupattrbuf(signal, dataPtr, length) == ZOK) {
- if (tcConnectptr.p->currTupAiLen < scanptr.p->scanAiLength) {
- jam();
- } else {
- jam();
- ndbrequire(tcConnectptr.p->currTupAiLen == scanptr.p->scanAiLength);
- continueAfterReceivingAllAiLab(signal);
- }//if
- return;
- }//if
- abort_scan(signal, scanptr.i, ZGET_ATTRINBUF_ERROR);
-}
-
-void Dblqh::abort_scan(Signal* signal, Uint32 scan_ptr_i, Uint32 errcode){
- jam();
- scanptr.i = scan_ptr_i;
- c_scanRecordPool.getPtr(scanptr);
-
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- finishScanrec(signal);
- releaseScanrec(signal);
- tcConnectptr.p->transactionState = TcConnectionrec::IDLE;
- tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE;
-
- if(errcode)
- {
- jam();
- ScanFragRef * ref = (ScanFragRef*)&signal->theData[0];
- ref->senderData = tcConnectptr.p->clientConnectrec;
- ref->transId1 = tcConnectptr.p->transid[0];
- ref->transId2 = tcConnectptr.p->transid[1];
- ref->errorCode = errcode;
- sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal,
- ScanFragRef::SignalLength, JBB);
- }
- deleteTransidHash(signal);
- releaseOprec(signal);
- releaseTcrec(signal, tcConnectptr);
-}
-
-/*---------------------------------------------------------------------*/
-/* Send this 'I am alive' signal to TC when it is received from ACC */
-/* We include the scanPtr.i that comes from ACC in signalData[1], this */
-/* tells TC which fragment record to check for a timeout. */
-/*---------------------------------------------------------------------*/
-void Dblqh::execSCAN_HBREP(Signal* signal)
-{
- jamEntry();
- scanptr.i = signal->theData[0];
- c_scanRecordPool.getPtr(scanptr);
- switch(scanptr.p->scanType){
- case ScanRecord::SCAN:
- if (scanptr.p->scanTcWaiting == ZTRUE) {
- jam();
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
-
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- const Uint32 transid1 = signal->theData[1];
- const Uint32 transid2 = signal->theData[2];
- ndbrequire(transid1 == tcConnectptr.p->transid[0] &&
- transid2 == tcConnectptr.p->transid[1]);
-
- // Update counter on tcConnectPtr
- if (tcConnectptr.p->tcTimer != 0){
- tcConnectptr.p->tcTimer = cLqhTimeOutCount;
- } else {
- jam();
- //ndbout << "SCAN_HBREP when tcTimer was off" << endl;
- }
-
- signal->theData[0] = tcConnectptr.p->clientConnectrec;
- signal->theData[1] = tcConnectptr.p->transid[0];
- signal->theData[2] = tcConnectptr.p->transid[1];
- sendSignal(tcConnectptr.p->clientBlockref,
- GSN_SCAN_HBREP, signal, 3, JBB);
- }//if
- break;
- case ScanRecord::COPY:
- // ndbout << "Dblqh::execSCAN_HBREP Dropping SCAN_HBREP" << endl;
- break;
- default:
- ndbrequire(false);
- }
-}
-
-void Dblqh::accScanConfScanLab(Signal* signal)
-{
- AccScanConf * const accScanConf = (AccScanConf *)&signal->theData[0];
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- /* -----------------------------------------------------------------------
- * PRECONDITION: SCAN_STATE = WAIT_ACC_SCAN
- * ----------------------------------------------------------------------- */
- if (accScanConf->flag == AccScanConf::ZEMPTY_FRAGMENT) {
- jam();
- /* ---------------------------------------------------------------------
- * THE FRAGMENT WAS EMPTY.
- * REPORT SUCCESSFUL COPYING.
- * --------------------------------------------------------------------- */
- tupScanCloseConfLab(signal);
- return;
- }//if
- scanptr.p->scanAccPtr = accScanConf->accPtr;
- if (scanptr.p->rangeScan) {
- jam();
- TuxBoundInfo* req = (TuxBoundInfo*)signal->getDataPtrSend();
- req->errorCode = RNIL;
- req->tuxScanPtrI = scanptr.p->scanAccPtr;
- Uint32 len = req->boundAiLength = copy_bounds(req->data, tcConnectptr.p);
- EXECUTE_DIRECT(DBTUX, GSN_TUX_BOUND_INFO, signal,
- TuxBoundInfo::SignalLength + len);
-
- jamEntry();
- if (req->errorCode != 0) {
- jam();
- /*
- * Cannot use STORED_PROCREF to abort since even the REF
- * returns a stored proc id. So record error and continue.
- * The scan is already Invalid in TUX and returns empty set.
- */
- tcConnectptr.p->errorCode = req->errorCode;
- }
- }
-
- scanptr.p->scanState = ScanRecord::WAIT_STORED_PROC_SCAN;
- if(scanptr.p->scanStoredProcId == RNIL)
- {
- jam();
- signal->theData[0] = tcConnectptr.p->tupConnectrec;
- signal->theData[1] = tcConnectptr.p->tableref;
- signal->theData[2] = scanptr.p->scanSchemaVersion;
- signal->theData[3] = ZSTORED_PROC_SCAN;
-
- signal->theData[4] = scanptr.p->scanAiLength;
- sendSignal(tcConnectptr.p->tcTupBlockref,
- GSN_STORED_PROCREQ, signal, 5, JBB);
-
- signal->theData[0] = tcConnectptr.p->tupConnectrec;
- AttrbufPtr regAttrinbufptr;
- Uint32 firstAttr = regAttrinbufptr.i = tcConnectptr.p->firstAttrinbuf;
- while (regAttrinbufptr.i != RNIL) {
- ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
- jam();
- Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN];
- ndbrequire(dataLen != 0);
- // first 3 words already set in STORED_PROCREQ
- MEMCOPY_NO_WORDS(&signal->theData[3],
- &regAttrinbufptr.p->attrbuf[0],
- dataLen);
- sendSignal(tcConnectptr.p->tcTupBlockref,
- GSN_ATTRINFO, signal, dataLen + 3, JBB);
- regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
- c_no_attrinbuf_recs++;
- }//while
-
- /**
- * Release attr info
- */
- if(firstAttr != RNIL)
- {
- regAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = cfirstfreeAttrinbuf;
- cfirstfreeAttrinbuf = firstAttr;
- tcConnectptr.p->firstAttrinbuf = tcConnectptr.p->lastAttrinbuf = RNIL;
- }
- }
- else
- {
- jam();
- storedProcConfScanLab(signal);
- }
-}//Dblqh::accScanConfScanLab()
-
-#define print_buf(s,idx,len) {\
- printf(s); Uint32 t2=len; DatabufPtr t3; t3.i = idx; \
- while(t3.i != RNIL && t2-- > 0){\
- ptrCheckGuard(t3, cdatabufFileSize, databuf);\
- printf("%d ", t3.i); t3.i= t3.p->nextDatabuf;\
- } printf("\n"); }
-
-Uint32
-Dblqh::copy_bounds(Uint32 * dst, TcConnectionrec* tcPtrP)
-{
- /**
- * copy_bounds handles multiple bounds by
- * in the 16 upper bits of the first words (used to specify bound type)
- * setting the length of this specific bound
- *
- */
-
- DatabufPtr regDatabufptr;
- Uint32 left = 4 - tcPtrP->m_offset_current_keybuf; // left in buf
- Uint32 totalLen = tcPtrP->primKeyLen - 4;
- regDatabufptr.i = tcPtrP->firstTupkeybuf;
-
- ndbassert(tcPtrP->primKeyLen >= 4);
- ndbassert(tcPtrP->m_offset_current_keybuf < 4);
- ndbassert(!(totalLen == 0 && regDatabufptr.i != RNIL));
- ndbassert(!(totalLen != 0 && regDatabufptr.i == RNIL));
-
- if(totalLen)
- {
- ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
- Uint32 sig0 = regDatabufptr.p->data[0];
- Uint32 sig1 = regDatabufptr.p->data[1];
- Uint32 sig2 = regDatabufptr.p->data[2];
- Uint32 sig3 = regDatabufptr.p->data[3];
-
- switch(left){
- case 4:
- * dst++ = sig0;
- case 3:
- * dst++ = sig1;
- case 2:
- * dst++ = sig2;
- case 1:
- * dst++ = sig3;
- }
-
- Uint32 first = (* (dst - left)); // First word in range
-
- // Length of this range
- Uint8 offset;
- const Uint32 len = (first >> 16) ? (first >> 16) : totalLen;
- tcPtrP->m_scan_curr_range_no = (first & 0xFFF0) >> 4;
- (* (dst - left)) = (first & 0xF); // Remove length & range no
-
- if(len < left)
- {
- offset = len;
- }
- else
- {
- Databuf * lastP;
- left = (len - left);
- regDatabufptr.i = regDatabufptr.p->nextDatabuf;
-
- while(left >= 4)
- {
- left -= 4;
- lastP = regDatabufptr.p;
- ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
- sig0 = regDatabufptr.p->data[0];
- sig1 = regDatabufptr.p->data[1];
- sig2 = regDatabufptr.p->data[2];
- sig3 = regDatabufptr.p->data[3];
- regDatabufptr.i = regDatabufptr.p->nextDatabuf;
-
- * dst++ = sig0;
- * dst++ = sig1;
- * dst++ = sig2;
- * dst++ = sig3;
- }
-
- if(left > 0)
- {
- lastP = regDatabufptr.p;
- ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
- sig0 = regDatabufptr.p->data[0];
- sig1 = regDatabufptr.p->data[1];
- sig2 = regDatabufptr.p->data[2];
- sig3 = regDatabufptr.p->data[3];
- * dst++ = sig0;
- * dst++ = sig1;
- * dst++ = sig2;
- * dst++ = sig3;
- }
- else
- {
- lastP = regDatabufptr.p;
- }
- offset = left & 3;
- lastP->nextDatabuf = cfirstfreeDatabuf;
- cfirstfreeDatabuf = tcPtrP->firstTupkeybuf;
- ndbassert(cfirstfreeDatabuf != RNIL);
- }
-
- if(len == totalLen && regDatabufptr.i != RNIL)
- {
- regDatabufptr.p->nextDatabuf = cfirstfreeDatabuf;
- cfirstfreeDatabuf = regDatabufptr.i;
- tcPtrP->lastTupkeybuf = regDatabufptr.i = RNIL;
- ndbassert(cfirstfreeDatabuf != RNIL);
- }
-
- tcPtrP->m_offset_current_keybuf = offset;
- tcPtrP->firstTupkeybuf = regDatabufptr.i;
- tcPtrP->primKeyLen = 4 + totalLen - len;
-
- return len;
- }
- return totalLen;
-}
-
-/* -------------------------------------------------------------------------
- * ENTER STORED_PROCCONF WITH
- * TC_CONNECTPTR,
- * TSTORED_PROC_ID
- * -------------------------------------------------------------------------
- * PRECONDITION: SCAN_STATE = WAIT_STORED_PROC_SCAN
- * ------------------------------------------------------------------------- */
-void Dblqh::storedProcConfScanLab(Signal* signal)
-{
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (scanptr.p->scanCompletedStatus == ZTRUE) {
- jam();
- // STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
- closeScanLab(signal);
- return;
- }//if
- switch (fragptr.p->fragStatus) {
- case Fragrecord::FSACTIVE:
- jam();
- linkActiveFrag(signal);
- break;
- case Fragrecord::BLOCKED:
- jam();
- linkFragQueue(signal);
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_FIRST_STOPPED;
- return;
- break;
- case Fragrecord::FREE:
- jam();
- case Fragrecord::ACTIVE_CREATION:
- jam();
- case Fragrecord::CRASH_RECOVERING:
- jam();
- case Fragrecord::DEFINED:
- jam();
- case Fragrecord::REMOVING:
- jam();
- default:
- ndbrequire(false);
- break;
- }//switch
- continueFirstScanAfterBlockedLab(signal);
-}//Dblqh::storedProcConfScanLab()
-
-void Dblqh::continueFirstScanAfterBlockedLab(Signal* signal)
-{
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN;
- signal->theData[0] = scanptr.p->scanAccPtr;
- signal->theData[1] = RNIL;
- signal->theData[2] = NextScanReq::ZSCAN_NEXT;
- sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
- return;
-}//Dblqh::continueFirstScanAfterBlockedLab()
-
-/* -------------------------------------------------------------------------
- * When executing a scan we must come up to the surface at times to make
- * sure we can quickly start local checkpoints.
- * ------------------------------------------------------------------------- */
-void Dblqh::execCHECK_LCP_STOP(Signal* signal)
-{
- jamEntry();
- scanptr.i = signal->theData[0];
- c_scanRecordPool.getPtr(scanptr);
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (signal->theData[1] == ZTRUE) {
- jam();
- releaseActiveFrag(signal);
- signal->theData[0] = ZCHECK_LCP_STOP_BLOCKED;
- signal->theData[1] = scanptr.i;
- sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
- signal->theData[0] = RNIL;
- return;
- }//if
- if (fragptr.p->fragStatus != Fragrecord::FSACTIVE) {
- ndbrequire(fragptr.p->fragStatus == Fragrecord::BLOCKED);
- releaseActiveFrag(signal);
- linkFragQueue(signal);
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_CHECK_STOPPED;
- signal->theData[0] = RNIL;
- }//if
-}//Dblqh::execCHECK_LCP_STOP()
-
-void Dblqh::checkLcpStopBlockedLab(Signal* signal)
-{
- switch (fragptr.p->fragStatus) {
- case Fragrecord::FSACTIVE:
- jam();
- linkActiveFrag(signal);
- continueAfterCheckLcpStopBlocked(signal);
- break;
- case Fragrecord::BLOCKED:
- jam();
- linkFragQueue(signal);
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_CHECK_STOPPED;
- return;
- break;
- case Fragrecord::FREE:
- jam();
- case Fragrecord::ACTIVE_CREATION:
- jam();
- case Fragrecord::CRASH_RECOVERING:
- jam();
- case Fragrecord::DEFINED:
- jam();
- case Fragrecord::REMOVING:
- jam();
- default:
- ndbrequire(false);
- }//switch
-}//Dblqh::checkLcpStopBlockedLab()
-
-void Dblqh::continueAfterCheckLcpStopBlocked(Signal* signal)
-{
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- signal->theData[0] = scanptr.p->scanAccPtr;
- signal->theData[1] = AccCheckScan::ZNOT_CHECK_LCP_STOP;
- EXECUTE_DIRECT(refToBlock(scanptr.p->scanBlockref), GSN_ACC_CHECK_SCAN,
- signal, 2);
-}//Dblqh::continueAfterCheckLcpStopBlocked()
-
-/* -------------------------------------------------------------------------
- * ENTER NEXT_SCANCONF
- * -------------------------------------------------------------------------
- * PRECONDITION: SCAN_STATE = WAIT_NEXT_SCAN
- * ------------------------------------------------------------------------- */
-void Dblqh::nextScanConfScanLab(Signal* signal)
-{
- NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0];
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- if (nextScanConf->fragId == RNIL) {
- jam();
- /* ---------------------------------------------------------------------
- * THERE ARE NO MORE TUPLES TO FETCH. IF WE HAVE ANY
- * OPERATIONS STILL NEEDING A LOCK WE REPORT TO THE
- * APPLICATION AND CLOSE THE SCAN WHEN THE NEXT SCAN
- * REQUEST IS RECEIVED. IF WE DO NOT HAVE ANY NEED FOR
- * LOCKS WE CAN CLOSE THE SCAN IMMEDIATELY.
- * --------------------------------------------------------------------- */
- releaseActiveFrag(signal);
- /*************************************************************
- * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
- ************************************************************ */
- if (!scanptr.p->scanLockHold)
- {
- jam();
- closeScanLab(signal);
- return;
- }
-
- if (scanptr.p->scanCompletedStatus == ZTRUE) {
- if ((scanptr.p->scanLockHold == ZTRUE) &&
- (scanptr.p->m_curr_batch_size_rows > 0)) {
- jam();
- scanptr.p->scanReleaseCounter = 1;
- scanReleaseLocksLab(signal);
- return;
- }//if
- jam();
- closeScanLab(signal);
- return;
- }//if
-
- if (scanptr.p->m_curr_batch_size_rows > 0) {
- jam();
-
- if((tcConnectptr.p->primKeyLen - 4) == 0)
- scanptr.p->scanCompletedStatus = ZTRUE;
-
- scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
- sendScanFragConf(signal, ZFALSE);
- return;
- }//if
- closeScanLab(signal);
- return;
- }//if
-
- // If accOperationPtr == RNIL no record was returned by ACC
- if (nextScanConf->accOperationPtr == RNIL) {
- jam();
- /*************************************************************
- * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
- ************************************************************ */
- if (scanptr.p->scanCompletedStatus == ZTRUE) {
- releaseActiveFrag(signal);
- if ((scanptr.p->scanLockHold == ZTRUE) &&
- (scanptr.p->m_curr_batch_size_rows > 0)) {
- jam();
- scanptr.p->scanReleaseCounter = 1;
- scanReleaseLocksLab(signal);
- return;
- }//if
- jam();
- closeScanLab(signal);
- return;
- }//if
-
- if (scanptr.p->m_curr_batch_size_rows > 0) {
- jam();
- releaseActiveFrag(signal);
- scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
- sendScanFragConf(signal, ZFALSE);
- return;
- }//if
-
- signal->theData[0] = scanptr.p->scanAccPtr;
- signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
- sendSignal(scanptr.p->scanBlockref,
- GSN_ACC_CHECK_SCAN, signal, 2, JBB);
- return;
- }//if
- jam();
- set_acc_ptr_in_scan_record(scanptr.p,
- scanptr.p->m_curr_batch_size_rows,
- nextScanConf->accOperationPtr);
- jam();
- scanptr.p->scanLocalref[0] = nextScanConf->localKey[0];
- scanptr.p->scanLocalref[1] = nextScanConf->localKey[1];
- scanptr.p->scanLocalFragid = nextScanConf->fragId;
- nextScanConfLoopLab(signal);
-}//Dblqh::nextScanConfScanLab()
-
-void Dblqh::nextScanConfLoopLab(Signal* signal)
-{
- /* ----------------------------------------------------------------------
- * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
- * ---------------------------------------------------------------------- */
- if (scanptr.p->scanCompletedStatus == ZTRUE) {
- jam();
- releaseActiveFrag(signal);
- if ((scanptr.p->scanLockHold == ZTRUE) &&
- (scanptr.p->m_curr_batch_size_rows > 0)) {
- jam();
- scanptr.p->scanReleaseCounter = 1;
- scanReleaseLocksLab(signal);
- return;
- }//if
- closeScanLab(signal);
- return;
- }//if
- jam();
- Uint32 tableRef;
- Uint32 tupFragPtr;
- Uint32 reqinfo = (scanptr.p->scanLockHold == ZFALSE);
- reqinfo = reqinfo + (tcConnectptr.p->operation << 6);
- reqinfo = reqinfo + (tcConnectptr.p->opExec << 10);
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_TUPKEY;
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (! scanptr.p->rangeScan) {
- tableRef = tcConnectptr.p->tableref;
- tupFragPtr = fragptr.p->tupFragptr[scanptr.p->scanLocalFragid & 1];
- } else {
- jam();
- // for ordered index use primary table
- FragrecordPtr tFragPtr;
- tFragPtr.i = fragptr.p->tableFragptr;
- ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
- tableRef = tFragPtr.p->tabRef;
- tupFragPtr = tFragPtr.p->tupFragptr[scanptr.p->scanLocalFragid & 1];
- }
- {
- jam();
- TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtrSend();
-
- tupKeyReq->connectPtr = tcConnectptr.p->tupConnectrec;
- tupKeyReq->request = reqinfo;
- tupKeyReq->tableRef = tableRef;
- tupKeyReq->fragId = scanptr.p->scanLocalFragid;
- tupKeyReq->keyRef1 = scanptr.p->scanLocalref[0];
- tupKeyReq->keyRef2 = scanptr.p->scanLocalref[1];
- tupKeyReq->attrBufLen = 0;
- tupKeyReq->opRef = scanptr.p->scanApiOpPtr;
- tupKeyReq->applRef = scanptr.p->scanApiBlockref;
- tupKeyReq->schemaVersion = scanptr.p->scanSchemaVersion;
- tupKeyReq->storedProcedure = scanptr.p->scanStoredProcId;
- tupKeyReq->transId1 = tcConnectptr.p->transid[0];
- tupKeyReq->transId2 = tcConnectptr.p->transid[1];
- tupKeyReq->fragPtr = tupFragPtr;
- tupKeyReq->primaryReplica = (tcConnectptr.p->seqNoReplica == 0)?true:false;
- tupKeyReq->coordinatorTC = tcConnectptr.p->tcBlockref;
- tupKeyReq->tcOpIndex = tcConnectptr.p->tcOprec;
- tupKeyReq->savePointId = tcConnectptr.p->savePointId;
- Uint32 blockNo = refToBlock(tcConnectptr.p->tcTupBlockref);
- EXECUTE_DIRECT(blockNo, GSN_TUPKEYREQ, signal,
- TupKeyReq::SignalLength);
- }
-}
-
-/* -------------------------------------------------------------------------
- * RECEPTION OF FURTHER KEY INFORMATION WHEN KEY SIZE > 16 BYTES.
- * -------------------------------------------------------------------------
- * PRECONDITION: SCAN_STATE = WAIT_SCAN_KEYINFO
- * ------------------------------------------------------------------------- */
-void
-Dblqh::keyinfoLab(const Uint32 * src, const Uint32 * end)
-{
- do {
- jam();
- seizeTupkeybuf(0);
- databufptr.p->data[0] = * src ++;
- databufptr.p->data[1] = * src ++;
- databufptr.p->data[2] = * src ++;
- databufptr.p->data[3] = * src ++;
- } while (src < end);
-}//Dblqh::keyinfoLab()
-
-Uint32
-Dblqh::readPrimaryKeys(ScanRecord *scanP, TcConnectionrec *tcConP, Uint32 *dst)
-{
- Uint32 tableId = tcConP->tableref;
- Uint32 fragId = scanP->scanLocalFragid;
- Uint32 fragPageId = scanP->scanLocalref[0];
- Uint32 pageIndex = scanP->scanLocalref[1];
-
- if(scanP->rangeScan)
- {
- jam();
- // for ordered index use primary table
- FragrecordPtr tFragPtr;
- tFragPtr.i = fragptr.p->tableFragptr;
- ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
- tableId = tFragPtr.p->tabRef;
- }
-
- int ret = c_tup->accReadPk(tableId, fragId, fragPageId, pageIndex, dst, false);
- if(0)
- ndbout_c("readPrimaryKeys(table: %d fragment: %d [ %d %d ] -> %d",
- tableId, fragId, fragPageId, pageIndex, ret);
- ndbassert(ret > 0);
-
- return ret;
-}
-
-/* -------------------------------------------------------------------------
- * ENTER TUPKEYCONF
- * -------------------------------------------------------------------------
- * PRECONDITION: TRANSACTION_STATE = SCAN_TUPKEY
- * ------------------------------------------------------------------------- */
-void Dblqh::scanTupkeyConfLab(Signal* signal)
-{
- const TupKeyConf * conf = (TupKeyConf *)signal->getDataPtr();
- UintR tdata4 = conf->readLength;
- UintR tdata5 = conf->lastRow;
-
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
- scanptr.i = tcConnectptr.p->tcScanRec;
- releaseActiveFrag(signal);
- c_scanRecordPool.getPtr(scanptr);
- if (scanptr.p->scanCompletedStatus == ZTRUE) {
- /* ---------------------------------------------------------------------
- * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
- * --------------------------------------------------------------------- */
- if ((scanptr.p->scanLockHold == ZTRUE) &&
- (scanptr.p->m_curr_batch_size_rows > 0)) {
- jam();
- scanptr.p->scanReleaseCounter = 1;
- scanReleaseLocksLab(signal);
- return;
- }//if
- jam();
- closeScanLab(signal);
- return;
- }//if
- if (scanptr.p->scanKeyinfoFlag) {
- jam();
- // Inform API about keyinfo len aswell
- tdata4 += sendKeyinfo20(signal, scanptr.p, tcConnectptr.p);
- }//if
- ndbrequire(scanptr.p->m_curr_batch_size_rows < MAX_PARALLEL_OP_PER_SCAN);
- scanptr.p->m_curr_batch_size_bytes+= tdata4;
- scanptr.p->m_curr_batch_size_rows++;
- scanptr.p->m_last_row = tdata5;
- if (scanptr.p->check_scan_batch_completed() | tdata5){
- if (scanptr.p->scanLockHold == ZTRUE) {
- jam();
- scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
- sendScanFragConf(signal, ZFALSE);
- return;
- } else {
- jam();
- scanptr.p->scanReleaseCounter = scanptr.p->m_curr_batch_size_rows;
- scanReleaseLocksLab(signal);
- return;
- }
- } else {
- if (scanptr.p->scanLockHold == ZTRUE) {
- jam();
- scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT;
- } else {
- jam();
- scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_COMMIT;
- }
- }
- scanNextLoopLab(signal);
-}//Dblqh::scanTupkeyConfLab()
-
-void Dblqh::scanNextLoopLab(Signal* signal)
-{
- switch (fragptr.p->fragStatus) {
- case Fragrecord::FSACTIVE:
- jam();
- linkActiveFrag(signal);
- break;
- case Fragrecord::BLOCKED:
- jam();
- linkFragQueue(signal);
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STOPPED;
- return;
- break;
- case Fragrecord::FREE:
- jam();
- case Fragrecord::ACTIVE_CREATION:
- jam();
- case Fragrecord::CRASH_RECOVERING:
- jam();
- case Fragrecord::DEFINED:
- jam();
- case Fragrecord::REMOVING:
- jam();
- default:
- ndbrequire(false);
- }//switch
- continueScanAfterBlockedLab(signal);
-}//Dblqh::scanNextLoopLab()
-
-void Dblqh::continueScanAfterBlockedLab(Signal* signal)
-{
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- Uint32 accOpPtr;
- if (scanptr.p->scanFlag == NextScanReq::ZSCAN_NEXT_ABORT) {
- jam();
- scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_COMMIT;
- accOpPtr= get_acc_ptr_from_scan_record(scanptr.p,
- scanptr.p->m_curr_batch_size_rows,
- false);
- scanptr.p->scan_acc_index--;
- } else if (scanptr.p->scanFlag == NextScanReq::ZSCAN_NEXT_COMMIT) {
- jam();
- accOpPtr= get_acc_ptr_from_scan_record(scanptr.p,
- scanptr.p->m_curr_batch_size_rows-1,
- false);
- } else {
- jam();
- accOpPtr = RNIL; // The value is not used in ACC
- }//if
- scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN;
- signal->theData[0] = scanptr.p->scanAccPtr;
- signal->theData[1] = accOpPtr;
- signal->theData[2] = scanptr.p->scanFlag;
- sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
-}//Dblqh::continueScanAfterBlockedLab()
-
-/* -------------------------------------------------------------------------
- * ENTER TUPKEYREF WITH
- * TC_CONNECTPTR,
- * TERROR_CODE
- * -------------------------------------------------------------------------
- * PRECONDITION: TRANSACTION_STATE = SCAN_TUPKEY
- * ------------------------------------------------------------------------- */
-void Dblqh::scanTupkeyRefLab(Signal* signal)
-{
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
- scanptr.i = tcConnectptr.p->tcScanRec;
- releaseActiveFrag(signal);
- c_scanRecordPool.getPtr(scanptr);
- if (scanptr.p->scanCompletedStatus == ZTRUE) {
- /* ---------------------------------------------------------------------
- * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
- * --------------------------------------------------------------------- */
- if ((scanptr.p->scanLockHold == ZTRUE) &&
- (scanptr.p->m_curr_batch_size_rows > 0)) {
- jam();
- scanptr.p->scanReleaseCounter = 1;
- scanReleaseLocksLab(signal);
- return;
- }//if
- jam();
- closeScanLab(signal);
- return;
- }//if
- if ((terrorCode != ZSEARCH_CONDITION_FALSE) &&
- (terrorCode != ZNO_TUPLE_FOUND) &&
- (terrorCode >= ZUSER_ERROR_CODE_LIMIT)) {
- scanptr.p->scanErrorCounter++;
- tcConnectptr.p->errorCode = terrorCode;
-
- if (scanptr.p->scanLockHold == ZTRUE) {
- jam();
- scanptr.p->scanReleaseCounter = 1;
- } else {
- jam();
- scanptr.p->m_curr_batch_size_rows++;
- scanptr.p->scanReleaseCounter = scanptr.p->m_curr_batch_size_rows;
- }//if
- /* --------------------------------------------------------------------
- * WE NEED TO RELEASE ALL LOCKS CURRENTLY
- * HELD BY THIS SCAN.
- * -------------------------------------------------------------------- */
- scanReleaseLocksLab(signal);
- return;
- }//if
- Uint32 time_passed= tcConnectptr.p->tcTimer - cLqhTimeOutCount;
- if (scanptr.p->m_curr_batch_size_rows > 0) {
- if (time_passed > 1) {
- /* -----------------------------------------------------------------------
- * WE NEED TO ENSURE THAT WE DO NOT SEARCH FOR THE NEXT TUPLE FOR A
- * LONG TIME WHILE WE KEEP A LOCK ON A FOUND TUPLE. WE RATHER REPORT
- * THE FOUND TUPLE IF FOUND TUPLES ARE RARE. If more than 10 ms passed we
- * send the found tuples to the API.
- * ----------------------------------------------------------------------- */
- scanptr.p->scanReleaseCounter = scanptr.p->m_curr_batch_size_rows + 1;
- scanReleaseLocksLab(signal);
- return;
- }
- } else {
- if (time_passed > 10) {
- jam();
- signal->theData[0]= scanptr.i;
- signal->theData[1]= tcConnectptr.p->transid[0];
- signal->theData[2]= tcConnectptr.p->transid[1];
- execSCAN_HBREP(signal);
- }
- }
- scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_ABORT;
- scanNextLoopLab(signal);
-}//Dblqh::scanTupkeyRefLab()
-
-/* -------------------------------------------------------------------------
- * THE SCAN HAS BEEN COMPLETED. EITHER BY REACHING THE END OR BY COMMAND
- * FROM THE APPLICATION OR BY SOME SORT OF ERROR CONDITION.
- * ------------------------------------------------------------------------- */
-void Dblqh::closeScanLab(Signal* signal)
-{
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- switch (fragptr.p->fragStatus) {
- case Fragrecord::FSACTIVE:
- jam();
- linkActiveFrag(signal);
- break;
- case Fragrecord::BLOCKED:
- jam();
- linkFragQueue(signal);
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_CLOSE_STOPPED;
- return;
- break;
- case Fragrecord::FREE:
- jam();
- case Fragrecord::ACTIVE_CREATION:
- jam();
- case Fragrecord::CRASH_RECOVERING:
- jam();
- case Fragrecord::DEFINED:
- jam();
- case Fragrecord::REMOVING:
- jam();
- default:
- ndbrequire(false);
- }//switch
- continueCloseScanAfterBlockedLab(signal);
-}//Dblqh::closeScanLab()
-
-void Dblqh::continueCloseScanAfterBlockedLab(Signal* signal)
-{
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- scanptr.p->scanState = ScanRecord::WAIT_CLOSE_SCAN;
- signal->theData[0] = scanptr.p->scanAccPtr;
- signal->theData[1] = RNIL;
- signal->theData[2] = NextScanReq::ZSCAN_CLOSE;
- sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
-}//Dblqh::continueCloseScanAfterBlockedLab()
-
-/* -------------------------------------------------------------------------
- * ENTER NEXT_SCANCONF
- * -------------------------------------------------------------------------
- * PRECONDITION: SCAN_STATE = WAIT_CLOSE_SCAN
- * ------------------------------------------------------------------------- */
-void Dblqh::accScanCloseConfLab(Signal* signal)
-{
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
-
- if((tcConnectptr.p->primKeyLen - 4) > 0 &&
- scanptr.p->scanCompletedStatus != ZTRUE)
- {
- jam();
- releaseActiveFrag(signal);
- continueAfterReceivingAllAiLab(signal);
- return;
- }
-
- scanptr.p->scanState = ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN;
- signal->theData[0] = tcConnectptr.p->tupConnectrec;
- signal->theData[1] = tcConnectptr.p->tableref;
- signal->theData[2] = scanptr.p->scanSchemaVersion;
- signal->theData[3] = ZDELETE_STORED_PROC_ID;
- signal->theData[4] = scanptr.p->scanStoredProcId;
- sendSignal(tcConnectptr.p->tcTupBlockref,
- GSN_STORED_PROCREQ, signal, 5, JBB);
-}//Dblqh::accScanCloseConfLab()
-
-/* -------------------------------------------------------------------------
- * ENTER STORED_PROCCONF WITH
- * -------------------------------------------------------------------------
- * PRECONDITION: SCAN_STATE = WAIT_DELETE_STORED_PROC_ID_SCAN
- * ------------------------------------------------------------------------- */
-void Dblqh::tupScanCloseConfLab(Signal* signal)
-{
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (tcConnectptr.p->abortState == TcConnectionrec::NEW_FROM_TC) {
- jam();
- tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec;
- ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
- tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1;
- signal->theData[0] = ZLQH_TRANS_NEXT;
- signal->theData[1] = tcNodeFailptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- } else if (tcConnectptr.p->errorCode != 0) {
- jam();
- ScanFragRef * ref = (ScanFragRef*)&signal->theData[0];
- ref->senderData = tcConnectptr.p->clientConnectrec;
- ref->transId1 = tcConnectptr.p->transid[0];
- ref->transId2 = tcConnectptr.p->transid[1];
- ref->errorCode = tcConnectptr.p->errorCode;
- sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal,
- ScanFragRef::SignalLength, JBB);
- } else {
- jam();
- sendScanFragConf(signal, ZSCAN_FRAG_CLOSED);
- }//if
- finishScanrec(signal);
- releaseScanrec(signal);
- tcConnectptr.p->tcScanRec = RNIL;
- deleteTransidHash(signal);
- releaseOprec(signal);
- releaseTcrec(signal, tcConnectptr);
-}//Dblqh::tupScanCloseConfLab()
-
-/* =========================================================================
- * ======= INITIATE SCAN RECORD =======
- *
- * SUBROUTINE SHORT NAME = ISC
- * ========================================================================= */
-Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
-{
- const Uint32 reqinfo = scanFragReq->requestInfo;
- const Uint32 max_rows = scanFragReq->batch_size_rows;
- const Uint32 max_bytes = scanFragReq->batch_size_bytes;
- const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo);
- const Uint32 scanLockHold = ScanFragReq::getHoldLockFlag(reqinfo);
- const Uint32 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo);
- const Uint32 readCommitted = ScanFragReq::getReadCommittedFlag(reqinfo);
- const Uint32 rangeScan = ScanFragReq::getRangeScanFlag(reqinfo);
- const Uint32 descending = ScanFragReq::getDescendingFlag(reqinfo);
- const Uint32 tupScan = ScanFragReq::getTupScanFlag(reqinfo);
- const Uint32 attrLen = ScanFragReq::getAttrLen(reqinfo);
- const Uint32 scanPrio = ScanFragReq::getScanPrio(reqinfo);
-
- scanptr.p->scanKeyinfoFlag = keyinfo;
- scanptr.p->scanLockHold = scanLockHold;
- scanptr.p->scanCompletedStatus = ZFALSE;
- scanptr.p->scanType = ScanRecord::SCAN;
- scanptr.p->scanApiBlockref = scanFragReq->resultRef;
- scanptr.p->scanAiLength = attrLen;
- scanptr.p->scanTcrec = tcConnectptr.i;
- scanptr.p->scanSchemaVersion = scanFragReq->schemaVersion;
-
- scanptr.p->m_curr_batch_size_rows = 0;
- scanptr.p->m_curr_batch_size_bytes= 0;
- scanptr.p->m_max_batch_size_rows = max_rows;
- scanptr.p->m_max_batch_size_bytes = max_bytes;
-
- if (! rangeScan && ! tupScan)
- scanptr.p->scanBlockref = tcConnectptr.p->tcAccBlockref;
- else if (! tupScan)
- scanptr.p->scanBlockref = tcConnectptr.p->tcTuxBlockref;
- else
- scanptr.p->scanBlockref = tcConnectptr.p->tcTupBlockref;
-
- scanptr.p->scanErrorCounter = 0;
- scanptr.p->scanLockMode = scanLockMode;
- scanptr.p->readCommitted = readCommitted;
- scanptr.p->rangeScan = rangeScan;
- scanptr.p->descending = descending;
- scanptr.p->tupScan = tupScan;
- scanptr.p->scanState = ScanRecord::SCAN_FREE;
- scanptr.p->scanFlag = ZFALSE;
- scanptr.p->scanLocalref[0] = 0;
- scanptr.p->scanLocalref[1] = 0;
- scanptr.p->scanLocalFragid = 0;
- scanptr.p->scanTcWaiting = ZTRUE;
- scanptr.p->scanNumber = ~0;
- scanptr.p->scanApiOpPtr = scanFragReq->clientOpPtr;
- scanptr.p->m_last_row = 0;
- scanptr.p->scanStoredProcId = RNIL;
-
- if (max_rows == 0 || (max_bytes > 0 && max_rows > max_bytes)){
- jam();
- return ScanFragRef::ZWRONG_BATCH_SIZE;
- }
- if (!seize_acc_ptr_list(scanptr.p, max_rows)){
- jam();
- return ScanFragRef::ZTOO_MANY_ACTIVE_SCAN_ERROR;
- }
- /**
- * Used for scan take over
- */
- FragrecordPtr tFragPtr;
- tFragPtr.i = fragptr.p->tableFragptr;
- ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
- scanptr.p->fragPtrI = fragptr.p->tableFragptr;
-
- /**
- * !idx uses 1 - (MAX_PARALLEL_SCANS_PER_FRAG - 1) = 1-11
- * idx uses from MAX_PARALLEL_SCANS_PER_FRAG - MAX = 12-42)
- */
- Uint32 start = (rangeScan || tupScan ? MAX_PARALLEL_SCANS_PER_FRAG : 1 );
- Uint32 stop = (rangeScan || tupScan ? MAX_PARALLEL_INDEX_SCANS_PER_FRAG : MAX_PARALLEL_SCANS_PER_FRAG - 1);
- stop += start;
- Uint32 free = tFragPtr.p->m_scanNumberMask.find(start);
-
- if(free == Fragrecord::ScanNumberMask::NotFound || free >= stop){
- jam();
-
- if(scanPrio == 0){
- jam();
- return ScanFragRef::ZTOO_MANY_ACTIVE_SCAN_ERROR;
- }
-
- /**
- * Put on queue
- */
- scanptr.p->scanState = ScanRecord::IN_QUEUE;
- LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
- fragptr.p->m_queuedScans);
- queue.add(scanptr);
- return ZOK;
- }
-
- scanptr.p->scanNumber = free;
- tFragPtr.p->m_scanNumberMask.clear(free);// Update mask
-
- LocalDLList<ScanRecord> active(c_scanRecordPool, fragptr.p->m_activeScans);
- active.add(scanptr);
- if(scanptr.p->scanKeyinfoFlag){
- jam();
-#ifdef VM_TRACE
- ScanRecordPtr tmp;
- ndbrequire(!c_scanTakeOverHash.find(tmp, * scanptr.p));
-#endif
-#ifdef TRACE_SCAN_TAKEOVER
- ndbout_c("adding (%d %d) table: %d fragId: %d frag.i: %d tableFragptr: %d",
- scanptr.p->scanNumber, scanptr.p->fragPtrI,
- tabptr.i, scanFragReq->fragmentNoKeyLen & 0xFFFF,
- fragptr.i, fragptr.p->tableFragptr);
-#endif
- c_scanTakeOverHash.add(scanptr);
- }
- init_acc_ptr_list(scanptr.p);
- return ZOK;
-}
-
-/* =========================================================================
- * ======= INITIATE TC RECORD AT SCAN =======
- *
- * SUBROUTINE SHORT NAME = IST
- * ========================================================================= */
-void Dblqh::initScanTc(Signal* signal,
- Uint32 transid1,
- Uint32 transid2,
- Uint32 fragId,
- Uint32 nodeId)
-{
- tcConnectptr.p->transid[0] = transid1;
- tcConnectptr.p->transid[1] = transid2;
- tcConnectptr.p->tcScanRec = scanptr.i;
- tcConnectptr.p->tableref = tabptr.i;
- tcConnectptr.p->fragmentid = fragId;
- tcConnectptr.p->fragmentptr = fragptr.i;
- tcConnectptr.p->tcOprec = tcConnectptr.p->clientConnectrec;
- tcConnectptr.p->tcBlockref = tcConnectptr.p->clientBlockref;
- tcConnectptr.p->errorCode = 0;
- tcConnectptr.p->reclenAiLqhkey = 0;
- tcConnectptr.p->abortState = TcConnectionrec::ABORT_IDLE;
- tcConnectptr.p->nextReplica = nodeId;
- tcConnectptr.p->currTupAiLen = 0;
- tcConnectptr.p->opExec = 1;
- tcConnectptr.p->operation = ZREAD;
- tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
- tcConnectptr.p->commitAckMarker = RNIL;
- tcConnectptr.p->m_offset_current_keybuf = 0;
- tcConnectptr.p->m_scan_curr_range_no = 0;
-
- tabptr.p->usageCount++;
-}//Dblqh::initScanTc()
-
-/* =========================================================================
- * ======= FINISH SCAN RECORD =======
- *
- * REMOVE SCAN RECORD FROM PER FRAGMENT LIST.
- * ========================================================================= */
-void Dblqh::finishScanrec(Signal* signal)
-{
- release_acc_ptr_list(scanptr.p);
-
- LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
- fragptr.p->m_queuedScans);
-
- if(scanptr.p->scanState == ScanRecord::IN_QUEUE){
- jam();
- queue.release(scanptr);
- return;
- }
-
- if(scanptr.p->scanKeyinfoFlag){
- jam();
- ScanRecordPtr tmp;
-#ifdef TRACE_SCAN_TAKEOVER
- ndbout_c("removing (%d %d)", scanptr.p->scanNumber, scanptr.p->fragPtrI);
-#endif
- c_scanTakeOverHash.remove(tmp, * scanptr.p);
- ndbrequire(tmp.p == scanptr.p);
- }
-
- LocalDLList<ScanRecord> scans(c_scanRecordPool, fragptr.p->m_activeScans);
- scans.release(scanptr);
-
- FragrecordPtr tFragPtr;
- tFragPtr.i = scanptr.p->fragPtrI;
- ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
-
- const Uint32 scanNumber = scanptr.p->scanNumber;
- ndbrequire(!tFragPtr.p->m_scanNumberMask.get(scanNumber));
- ScanRecordPtr restart;
-
- /**
- * Start on of queued scans
- */
- if(scanNumber == NR_ScanNo || !queue.first(restart)){
- jam();
- tFragPtr.p->m_scanNumberMask.set(scanNumber);
- return;
- }
-
- if(ERROR_INSERTED(5034)){
- jam();
- tFragPtr.p->m_scanNumberMask.set(scanNumber);
- return;
- }
-
- ndbrequire(restart.p->scanState == ScanRecord::IN_QUEUE);
-
- ScanRecordPtr tmpScan = scanptr;
- TcConnectionrecPtr tmpTc = tcConnectptr;
-
- tcConnectptr.i = restart.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- restart.p->scanNumber = scanNumber;
-
- queue.remove(restart);
- scans.add(restart);
- if(restart.p->scanKeyinfoFlag){
- jam();
-#ifdef VM_TRACE
- ScanRecordPtr tmp;
- ndbrequire(!c_scanTakeOverHash.find(tmp, * restart.p));
-#endif
- c_scanTakeOverHash.add(restart);
-#ifdef TRACE_SCAN_TAKEOVER
- ndbout_c("adding-r (%d %d)", restart.p->scanNumber, restart.p->fragPtrI);
-#endif
- }
-
- restart.p->scanState = ScanRecord::SCAN_FREE; // set in initScanRec
- if(tcConnectptr.p->transactionState == TcConnectionrec::SCAN_STATE_USED)
- {
- jam();
- scanptr = restart;
- continueAfterReceivingAllAiLab(signal);
- }
- else
- {
- ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::WAIT_SCAN_AI);
- }
- scanptr = tmpScan;
- tcConnectptr = tmpTc;
-}//Dblqh::finishScanrec()
-
-/* =========================================================================
- * ======= RELEASE SCAN RECORD =======
- *
- * RELEASE A SCAN RECORD TO THE FREELIST.
- * ========================================================================= */
-void Dblqh::releaseScanrec(Signal* signal)
-{
- scanptr.p->scanState = ScanRecord::SCAN_FREE;
- scanptr.p->scanType = ScanRecord::ST_IDLE;
- scanptr.p->scanTcWaiting = ZFALSE;
- cbookedAccOps -= scanptr.p->m_max_batch_size_rows;
- cscanNoFreeRec++;
-}//Dblqh::releaseScanrec()
-
-/* ------------------------------------------------------------------------
- * ------- SEND KEYINFO20 TO API -------
- *
- * ------------------------------------------------------------------------ */
-Uint32 Dblqh::sendKeyinfo20(Signal* signal,
- ScanRecord * scanP,
- TcConnectionrec * tcConP)
-{
- ndbrequire(scanP->m_curr_batch_size_rows < MAX_PARALLEL_OP_PER_SCAN);
- KeyInfo20 * keyInfo = (KeyInfo20 *)&signal->theData[0];
-
- /**
- * Note that this code requires signal->theData to be big enough for
- * a entire key
- */
- const BlockReference ref = scanP->scanApiBlockref;
- const Uint32 scanOp = scanP->m_curr_batch_size_rows;
- const Uint32 nodeId = refToNode(ref);
- const bool connectedToNode = getNodeInfo(nodeId).m_connected;
- const Uint32 type = getNodeInfo(nodeId).m_type;
- const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
- const bool old_dest = (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0));
- const bool longable = true; // TODO is_api && !old_dest;
-
- Uint32 * dst = keyInfo->keyData;
- dst += nodeId == getOwnNodeId() ? 0 : KeyInfo20::DataLength;
-
- Uint32 keyLen = readPrimaryKeys(scanP, tcConP, dst);
- Uint32 fragId = tcConP->fragmentid;
- keyInfo->clientOpPtr = scanP->scanApiOpPtr;
- keyInfo->keyLen = keyLen;
- keyInfo->scanInfo_Node =
- KeyInfo20::setScanInfo(scanOp, scanP->scanNumber) + (fragId << 20);
- keyInfo->transId1 = tcConP->transid[0];
- keyInfo->transId2 = tcConP->transid[1];
-
- Uint32 * src = signal->theData+25;
- if(connectedToNode){
- jam();
-
- if(nodeId != getOwnNodeId()){
- jam();
-
- if(keyLen <= KeyInfo20::DataLength || !longable) {
- while(keyLen > KeyInfo20::DataLength){
- jam();
- MEMCOPY_NO_WORDS(keyInfo->keyData, src, KeyInfo20::DataLength);
- sendSignal(ref, GSN_KEYINFO20, signal, 25, JBB);
- src += KeyInfo20::DataLength;;
- keyLen -= KeyInfo20::DataLength;
- }
-
- MEMCOPY_NO_WORDS(keyInfo->keyData, src, keyLen);
- sendSignal(ref, GSN_KEYINFO20, signal,
- KeyInfo20::HeaderLength+keyLen, JBB);
- return keyLen;
- }
-
- LinearSectionPtr ptr[3];
- ptr[0].p = src;
- ptr[0].sz = keyLen;
- sendSignal(ref, GSN_KEYINFO20, signal, KeyInfo20::HeaderLength,
- JBB, ptr, 1);
- return keyLen;
- }
-
- EXECUTE_DIRECT(refToBlock(ref), GSN_KEYINFO20, signal,
- KeyInfo20::HeaderLength + keyLen);
- jamEntry();
- return keyLen;
- }
-
- /**
- * If this node does not have a direct connection
- * to the receiving node we want to send the signals
- * routed via the node that controls this read
- */
- Uint32 routeBlockref = tcConP->clientBlockref;
-
- if(keyLen < KeyInfo20::DataLength || !longable){
- jam();
-
- while (keyLen > (KeyInfo20::DataLength - 1)) {
- jam();
- MEMCOPY_NO_WORDS(keyInfo->keyData, src, KeyInfo20::DataLength - 1);
- keyInfo->keyData[KeyInfo20::DataLength-1] = ref;
- sendSignal(routeBlockref, GSN_KEYINFO20_R, signal, 25, JBB);
- src += KeyInfo20::DataLength - 1;
- keyLen -= KeyInfo20::DataLength - 1;
- }
-
- MEMCOPY_NO_WORDS(keyInfo->keyData, src, keyLen);
- keyInfo->keyData[keyLen] = ref;
- sendSignal(routeBlockref, GSN_KEYINFO20_R, signal,
- KeyInfo20::HeaderLength+keyLen+1, JBB);
- return keyLen;
- }
-
- keyInfo->keyData[0] = ref;
- LinearSectionPtr ptr[3];
- ptr[0].p = src;
- ptr[0].sz = keyLen;
- sendSignal(routeBlockref, GSN_KEYINFO20_R, signal,
- KeyInfo20::HeaderLength+1, JBB, ptr, 1);
- return keyLen;
-}
-
-/* ------------------------------------------------------------------------
- * ------- SEND SCAN_FRAGCONF TO TC THAT CONTROLS THE SCAN -------
- *
- * ------------------------------------------------------------------------ */
-void Dblqh::sendScanFragConf(Signal* signal, Uint32 scanCompleted)
-{
- Uint32 completed_ops= scanptr.p->m_curr_batch_size_rows;
- Uint32 total_len= scanptr.p->m_curr_batch_size_bytes;
- scanptr.p->scanTcWaiting = ZFALSE;
-
- if(ERROR_INSERTED(5037)){
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }
- ScanFragConf * conf = (ScanFragConf*)&signal->theData[0];
- NodeId tc_node_id= refToNode(tcConnectptr.p->clientBlockref);
- Uint32 trans_id1= tcConnectptr.p->transid[0];
- Uint32 trans_id2= tcConnectptr.p->transid[1];
-
- conf->senderData = tcConnectptr.p->clientConnectrec;
- conf->completedOps = completed_ops;
- conf->fragmentCompleted = scanCompleted;
- conf->transId1 = trans_id1;
- conf->transId2 = trans_id2;
- conf->total_len= total_len;
- sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGCONF,
- signal, ScanFragConf::SignalLength, JBB);
-
- if(!scanptr.p->scanLockHold)
- {
- jam();
- scanptr.p->m_curr_batch_size_rows = 0;
- scanptr.p->m_curr_batch_size_bytes= 0;
- }
-}//Dblqh::sendScanFragConf()
-
-/* ######################################################################### */
-/* ####### NODE RECOVERY MODULE ####### */
-/* */
-/* ######################################################################### */
-/*---------------------------------------------------------------------------*/
-/* */
-/* THIS MODULE IS USED WHEN A NODE HAS FAILED. IT PERFORMS A COPY OF A */
-/* FRAGMENT TO A NEW REPLICA OF THE FRAGMENT. IT DOES ALSO SHUT DOWN ALL */
-/* CONNECTIONS TO THE FAILED NODE. */
-/*---------------------------------------------------------------------------*/
-void Dblqh::calculateHash(Signal* signal)
-{
- DatabufPtr locDatabufptr;
- UintR Ti;
- UintR Tdata0;
- UintR Tdata1;
- UintR Tdata2;
- UintR Tdata3;
- UintR* Tdata32;
- Uint64 Tdata[512];
-
- Tdata32 = (UintR*)&Tdata[0];
-
- Tdata0 = tcConnectptr.p->tupkeyData[0];
- Tdata1 = tcConnectptr.p->tupkeyData[1];
- Tdata2 = tcConnectptr.p->tupkeyData[2];
- Tdata3 = tcConnectptr.p->tupkeyData[3];
- Tdata32[0] = Tdata0;
- Tdata32[1] = Tdata1;
- Tdata32[2] = Tdata2;
- Tdata32[3] = Tdata3;
- locDatabufptr.i = tcConnectptr.p->firstTupkeybuf;
- Ti = 4;
- while (locDatabufptr.i != RNIL) {
- ptrCheckGuard(locDatabufptr, cdatabufFileSize, databuf);
- Tdata0 = locDatabufptr.p->data[0];
- Tdata1 = locDatabufptr.p->data[1];
- Tdata2 = locDatabufptr.p->data[2];
- Tdata3 = locDatabufptr.p->data[3];
- Tdata32[Ti ] = Tdata0;
- Tdata32[Ti + 1] = Tdata1;
- Tdata32[Ti + 2] = Tdata2;
- Tdata32[Ti + 3] = Tdata3;
- locDatabufptr.i = locDatabufptr.p->nextDatabuf;
- Ti += 4;
- }//while
- tcConnectptr.p->hashValue =
- md5_hash((Uint64*)&Tdata32[0], (UintR)tcConnectptr.p->primKeyLen);
-}//Dblqh::calculateHash()
-
-/* *************************************** */
-/* COPY_FRAGREQ: Start copying a fragment */
-/* *************************************** */
-void Dblqh::execCOPY_FRAGREQ(Signal* signal)
-{
- jamEntry();
- const CopyFragReq * const copyFragReq = (CopyFragReq *)&signal->theData[0];
- tabptr.i = copyFragReq->tableId;
- ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
- const Uint32 fragId = copyFragReq->fragId;
- const Uint32 copyPtr = copyFragReq->userPtr;
- const Uint32 userRef = copyFragReq->userRef;
- const Uint32 nodeId = copyFragReq->nodeId;
-
- ndbrequire(cnoActiveCopy < 3);
- ndbrequire(getFragmentrec(signal, fragId));
- ndbrequire(fragptr.p->copyFragState == ZIDLE);
- ndbrequire(cfirstfreeTcConrec != RNIL);
- ndbrequire(fragptr.p->m_scanNumberMask.get(NR_ScanNo));
-
- fragptr.p->fragDistributionKey = copyFragReq->distributionKey;
-
- if (DictTabInfo::isOrderedIndex(tabptr.p->tableType)) {
- jam();
- /**
- * Ordered index doesn't need to be copied
- */
- CopyFragConf * const conf = (CopyFragConf *)&signal->theData[0];
- conf->userPtr = copyPtr;
- conf->sendingNodeId = cownNodeid;
- conf->startingNodeId = nodeId;
- conf->tableId = tabptr.i;
- conf->fragId = fragId;
- sendSignal(userRef, GSN_COPY_FRAGCONF, signal,
- CopyFragConf::SignalLength, JBB);
- return;
- }//if
-
- LocalDLList<ScanRecord> scans(c_scanRecordPool, fragptr.p->m_activeScans);
- ndbrequire(scans.seize(scanptr));
-/* ------------------------------------------------------------------------- */
-// We keep track of how many operation records in ACC that has been booked.
-// Copy fragment has records always booked and thus need not book any. The
-// most operations in parallel use is the m_max_batch_size_rows.
-// This variable has to be set-up here since it is used by releaseScanrec
-// to unbook operation records in ACC.
-/* ------------------------------------------------------------------------- */
- scanptr.p->m_max_batch_size_rows = 0;
- scanptr.p->rangeScan = 0;
- scanptr.p->tupScan = 0;
- seizeTcrec();
-
- /**
- * Remove implicit cast/usage of CopyFragReq
- */
- //initCopyrec(signal);
- scanptr.p->copyPtr = copyPtr;
- scanptr.p->scanType = ScanRecord::COPY;
- scanptr.p->scanApiBlockref = userRef;
- scanptr.p->scanNodeId = nodeId;
- scanptr.p->scanTcrec = tcConnectptr.i;
- scanptr.p->scanSchemaVersion = copyFragReq->schemaVersion;
- scanptr.p->scanCompletedStatus = ZFALSE;
- scanptr.p->scanErrorCounter = 0;
- scanptr.p->scanNumber = NR_ScanNo;
- scanptr.p->scanKeyinfoFlag = 0; // Don't put into hash
- scanptr.p->fragPtrI = fragptr.i;
- fragptr.p->m_scanNumberMask.clear(NR_ScanNo);
- scanptr.p->scanBlockref = DBACC_REF;
-
- initScanTc(signal,
- 0,
- (DBLQH << 20) + (cownNodeid << 8),
- fragId,
- copyFragReq->nodeId);
- cactiveCopy[cnoActiveCopy] = fragptr.i;
- cnoActiveCopy++;
-
- tcConnectptr.p->copyCountWords = 0;
- tcConnectptr.p->tcOprec = tcConnectptr.i;
- tcConnectptr.p->schemaVersion = scanptr.p->scanSchemaVersion;
- scanptr.p->scanState = ScanRecord::WAIT_ACC_COPY;
- AccScanReq * req = (AccScanReq*)&signal->theData[0];
- req->senderData = scanptr.i;
- req->senderRef = cownref;
- req->tableId = tabptr.i;
- req->fragmentNo = fragId;
- req->requestInfo = 0;
- AccScanReq::setLockMode(req->requestInfo, 0);
- AccScanReq::setReadCommittedFlag(req->requestInfo, 0);
- req->transId1 = tcConnectptr.p->transid[0];
- req->transId2 = tcConnectptr.p->transid[1];
- req->savePointId = tcConnectptr.p->savePointId;
- sendSignal(tcConnectptr.p->tcAccBlockref, GSN_ACC_SCANREQ, signal,
- AccScanReq::SignalLength, JBB);
- return;
-}//Dblqh::execCOPY_FRAGREQ()
-
-void Dblqh::accScanConfCopyLab(Signal* signal)
-{
- AccScanConf * const accScanConf = (AccScanConf *)&signal->theData[0];
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
-/*--------------------------------------------------------------------------*/
-/* PRECONDITION: SCAN_STATE = WAIT_ACC_COPY */
-/*--------------------------------------------------------------------------*/
- if (accScanConf->flag == AccScanConf::ZEMPTY_FRAGMENT) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THE FRAGMENT WAS EMPTY. */
-/* REPORT SUCCESSFUL COPYING. */
-/*---------------------------------------------------------------------------*/
- tupCopyCloseConfLab(signal);
- return;
- }//if
- scanptr.p->scanAccPtr = accScanConf->accPtr;
- scanptr.p->scanState = ScanRecord::WAIT_STORED_PROC_COPY;
- signal->theData[0] = tcConnectptr.p->tupConnectrec;
- signal->theData[1] = tcConnectptr.p->tableref;
- signal->theData[2] = scanptr.p->scanSchemaVersion;
- signal->theData[3] = ZSTORED_PROC_COPY;
-// theData[4] is not used in TUP with ZSTORED_PROC_COPY
- sendSignal(tcConnectptr.p->tcTupBlockref, GSN_STORED_PROCREQ, signal, 5, JBB);
- return;
-}//Dblqh::accScanConfCopyLab()
-
-/*---------------------------------------------------------------------------*/
-/* ENTER STORED_PROCCONF WITH */
-/* TC_CONNECTPTR, */
-/* TSTORED_PROC_ID */
-/*---------------------------------------------------------------------------*/
-void Dblqh::storedProcConfCopyLab(Signal* signal)
-{
-/*---------------------------------------------------------------------------*/
-/* PRECONDITION: SCAN_STATE = WAIT_STORED_PROC_COPY */
-/*---------------------------------------------------------------------------*/
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (scanptr.p->scanCompletedStatus == ZTRUE) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THE COPY PROCESS HAVE BEEN COMPLETED, MOST LIKELY DUE TO A NODE FAILURE.*/
-/*---------------------------------------------------------------------------*/
- closeCopyLab(signal);
- return;
- }//if
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN_COPY;
- switch (fragptr.p->fragStatus) {
- case Fragrecord::FSACTIVE:
- jam();
- linkActiveFrag(signal);
- break;
- case Fragrecord::BLOCKED:
- jam();
- linkFragQueue(signal);
- tcConnectptr.p->transactionState = TcConnectionrec::COPY_FIRST_STOPPED;
- return;
- break;
- case Fragrecord::FREE:
- jam();
- case Fragrecord::ACTIVE_CREATION:
- jam();
- case Fragrecord::CRASH_RECOVERING:
- jam();
- case Fragrecord::DEFINED:
- jam();
- case Fragrecord::REMOVING:
- jam();
- default:
- jam();
- systemErrorLab(signal);
- return;
- break;
- }//switch
- continueFirstCopyAfterBlockedLab(signal);
- return;
-}//Dblqh::storedProcConfCopyLab()
-
-void Dblqh::continueFirstCopyAfterBlockedLab(Signal* signal)
-{
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- signal->theData[0] = scanptr.p->scanAccPtr;
- signal->theData[1] = RNIL;
- signal->theData[2] = NextScanReq::ZSCAN_NEXT;
- sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
- return;
-}//Dblqh::continueFirstCopyAfterBlockedLab()
-
-/*---------------------------------------------------------------------------*/
-/* ENTER NEXT_SCANCONF WITH */
-/* SCANPTR, */
-/* TFRAGID, */
-/* TACC_OPPTR, */
-/* TLOCAL_KEY1, */
-/* TLOCAL_KEY2, */
-/* TKEY_LENGTH, */
-/* TKEY1, */
-/* TKEY2, */
-/* TKEY3, */
-/* TKEY4 */
-/*---------------------------------------------------------------------------*/
-/* PRECONDITION: SCAN_STATE = WAIT_NEXT_SCAN_COPY */
-/*---------------------------------------------------------------------------*/
-void Dblqh::nextScanConfCopyLab(Signal* signal)
-{
- NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0];
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- if (nextScanConf->fragId == RNIL) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THERE ARE NO MORE TUPLES TO FETCH. WE NEED TO CLOSE */
-/* THE COPY IN ACC AND DELETE THE STORED PROCEDURE IN TUP */
-/*---------------------------------------------------------------------------*/
- releaseActiveFrag(signal);
- if (tcConnectptr.p->copyCountWords == 0) {
- closeCopyLab(signal);
- return;
- }//if
-/*---------------------------------------------------------------------------*/
-// Wait until copying is completed also at the starting node before reporting
-// completion. Signal completion through scanCompletedStatus-flag.
-/*---------------------------------------------------------------------------*/
- scanptr.p->scanCompletedStatus = ZTRUE;
- return;
- }//if
-
- // If accOperationPtr == RNIL no record was returned by ACC
- if (nextScanConf->accOperationPtr == RNIL) {
- jam();
- signal->theData[0] = scanptr.p->scanAccPtr;
- signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
- sendSignal(tcConnectptr.p->tcAccBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
- return;
- }
-
- set_acc_ptr_in_scan_record(scanptr.p, 0, nextScanConf->accOperationPtr);
- initCopyTc(signal);
- copySendTupkeyReqLab(signal);
- return;
-}//Dblqh::nextScanConfCopyLab()
-
-void Dblqh::copySendTupkeyReqLab(Signal* signal)
-{
- Uint32 reqinfo = 0;
- Uint32 tupFragPtr;
-
- reqinfo = reqinfo + (tcConnectptr.p->operation << 6);
- reqinfo = reqinfo + (tcConnectptr.p->opExec << 10);
- tcConnectptr.p->transactionState = TcConnectionrec::COPY_TUPKEY;
- scanptr.p->scanState = ScanRecord::WAIT_TUPKEY_COPY;
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- tupFragPtr = fragptr.p->tupFragptr[scanptr.p->scanLocalFragid & 1];
- {
- TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtrSend();
-
- tupKeyReq->connectPtr = tcConnectptr.p->tupConnectrec;
- tupKeyReq->request = reqinfo;
- tupKeyReq->tableRef = tcConnectptr.p->tableref;
- tupKeyReq->fragId = scanptr.p->scanLocalFragid;
- tupKeyReq->keyRef1 = scanptr.p->scanLocalref[0];
- tupKeyReq->keyRef2 = scanptr.p->scanLocalref[1];
- tupKeyReq->attrBufLen = 0;
- tupKeyReq->opRef = tcConnectptr.i;
- tupKeyReq->applRef = cownref;
- tupKeyReq->schemaVersion = scanptr.p->scanSchemaVersion;
- tupKeyReq->storedProcedure = scanptr.p->scanStoredProcId;
- tupKeyReq->transId1 = tcConnectptr.p->transid[0];
- tupKeyReq->transId2 = tcConnectptr.p->transid[1];
- tupKeyReq->fragPtr = tupFragPtr;
- tupKeyReq->primaryReplica = (tcConnectptr.p->seqNoReplica == 0)?true:false;
- tupKeyReq->coordinatorTC = tcConnectptr.p->tcBlockref;
- tupKeyReq->tcOpIndex = tcConnectptr.p->tcOprec;
- tupKeyReq->savePointId = tcConnectptr.p->savePointId;
- Uint32 blockNo = refToBlock(tcConnectptr.p->tcTupBlockref);
- EXECUTE_DIRECT(blockNo, GSN_TUPKEYREQ, signal,
- TupKeyReq::SignalLength);
- }
-}//Dblqh::copySendTupkeyReqLab()
-
-/*---------------------------------------------------------------------------*/
-/* USED IN COPYING OPERATION TO RECEIVE ATTRINFO FROM TUP. */
-/*---------------------------------------------------------------------------*/
-/* ************>> */
-/* TRANSID_AI > */
-/* ************>> */
-void Dblqh::execTRANSID_AI(Signal* signal)
-{
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- Uint32 length = signal->length() - 3;
- ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::COPY_TUPKEY);
- Uint32 * src = &signal->theData[3];
- while(length > 22){
- if (saveTupattrbuf(signal, src, 22) == ZOK) {
- ;
- } else {
- jam();
- tcConnectptr.p->errorCode = ZGET_ATTRINBUF_ERROR;
- return;
- }//if
- src += 22;
- length -= 22;
- }
- if (saveTupattrbuf(signal, src, length) == ZOK) {
- return;
- }
- jam();
- tcConnectptr.p->errorCode = ZGET_ATTRINBUF_ERROR;
-}//Dblqh::execTRANSID_AI()
-
-/*--------------------------------------------------------------------------*/
-/* ENTER TUPKEYCONF WITH */
-/* TC_CONNECTPTR, */
-/* TDATA2, */
-/* TDATA3, */
-/* TDATA4, */
-/* TDATA5 */
-/*--------------------------------------------------------------------------*/
-/* PRECONDITION: TRANSACTION_STATE = COPY_TUPKEY */
-/*--------------------------------------------------------------------------*/
-void Dblqh::copyTupkeyConfLab(Signal* signal)
-{
- const TupKeyConf * const tupKeyConf = (TupKeyConf *)signal->getDataPtr();
-
- UintR readLength = tupKeyConf->readLength;
-
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- ScanRecord* scanP = scanptr.p;
- releaseActiveFrag(signal);
- if (tcConnectptr.p->errorCode != 0) {
- jam();
- closeCopyLab(signal);
- return;
- }//if
- if (scanptr.p->scanCompletedStatus == ZTRUE) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THE COPY PROCESS HAVE BEEN CLOSED. MOST LIKELY A NODE FAILURE. */
-/*---------------------------------------------------------------------------*/
- closeCopyLab(signal);
- return;
- }//if
- TcConnectionrec * tcConP = tcConnectptr.p;
- tcConnectptr.p->totSendlenAi = readLength;
- tcConnectptr.p->connectState = TcConnectionrec::COPY_CONNECTED;
-
- // Read primary keys (used to get here via scan keyinfo)
- Uint32* tmp = signal->getDataPtrSend()+24;
- Uint32 len= tcConnectptr.p->primKeyLen = readPrimaryKeys(scanP, tcConP, tmp);
-
- // Calculate hash (no need to linearies key)
- tcConnectptr.p->hashValue = md5_hash((Uint64*)tmp, len);
-
- // Move into databuffer to make packLqhkeyreqLab happy
- memcpy(tcConP->tupkeyData, tmp, 4*4);
- if(len > 4)
- keyinfoLab(tmp+4, tmp + len);
- LqhKeyReq::setKeyLen(tcConP->reqinfo, len);
-
-/*---------------------------------------------------------------------------*/
-// To avoid using up to many operation records in ACC we will increase the
-// constant to ensure that we never send more than 40 records at a time.
-// This is where the constant 56 comes from. For long records this constant
-// will not matter that much. The current maximum is 6000 words outstanding
-// (including a number of those 56 words not really sent). We also have to
-// ensure that there are never more simultaneous usage of these operation
-// records to ensure that node recovery does not fail because of simultaneous
-// scanning.
-/*---------------------------------------------------------------------------*/
- UintR TnoOfWords = readLength + len;
- TnoOfWords = TnoOfWords + MAGIC_CONSTANT;
- TnoOfWords = TnoOfWords + (TnoOfWords >> 2);
-
- /*-----------------------------------------------------------------
- * NOTE for transid1!
- * Transid1 in the tcConnection record is used load regulate the
- * copy(node recovery) process.
- * The number of outstanding words are written in the transid1
- * variable. This will be sent to the starting node in the
- * LQHKEYREQ signal and when the answer is returned in the LQHKEYCONF
- * we can reduce the number of outstanding words and check to see
- * if more LQHKEYREQ signals should be sent.
- *
- * However efficient this method is rather unsafe in such way that
- * it overwrites the transid1 original data.
- *
- * Also see TR 587.
- *----------------------------------------------------------------*/
- tcConnectptr.p->transid[0] = TnoOfWords; // Data overload, see note!
- packLqhkeyreqLab(signal);
- tcConnectptr.p->copyCountWords += TnoOfWords;
- scanptr.p->scanState = ScanRecord::WAIT_LQHKEY_COPY;
- if (tcConnectptr.p->copyCountWords < cmaxWordsAtNodeRec) {
- nextRecordCopy(signal);
- return;
- }//if
- return;
-}//Dblqh::copyTupkeyConfLab()
-
-/*---------------------------------------------------------------------------*/
-/* ENTER LQHKEYCONF */
-/*---------------------------------------------------------------------------*/
-/* PRECONDITION: CONNECT_STATE = COPY_CONNECTED */
-/*---------------------------------------------------------------------------*/
-void Dblqh::copyCompletedLab(Signal* signal)
-{
- const LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
-
- ndbrequire(tcConnectptr.p->transid[1] == lqhKeyConf->transId2);
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- if (tcConnectptr.p->copyCountWords >= cmaxWordsAtNodeRec) {
- tcConnectptr.p->copyCountWords -= lqhKeyConf->transId1; // Data overload, see note!
- if (scanptr.p->scanCompletedStatus == ZTRUE) {
- jam();
-/*---------------------------------------------------------------------------*/
-// Copy to complete, we will not start any new copying.
-/*---------------------------------------------------------------------------*/
- closeCopyLab(signal);
- return;
- }//if
- if (tcConnectptr.p->copyCountWords < cmaxWordsAtNodeRec) {
- jam();
- nextRecordCopy(signal);
- }//if
- return;
- }//if
- tcConnectptr.p->copyCountWords -= lqhKeyConf->transId1; // Data overload, see note!
- ndbrequire(tcConnectptr.p->copyCountWords <= cmaxWordsAtNodeRec);
- if (tcConnectptr.p->copyCountWords > 0) {
- jam();
- return;
- }//if
-/*---------------------------------------------------------------------------*/
-// No more outstanding copies. We will only start new ones from here if it was
-// stopped before and this only happens when copyCountWords is bigger than the
-// threshold value. Since this did not occur we must be waiting for completion.
-// Check that this is so. If not we crash to find out what is going on.
-/*---------------------------------------------------------------------------*/
- if (scanptr.p->scanCompletedStatus == ZTRUE) {
- jam();
- closeCopyLab(signal);
- return;
- }//if
- if (scanptr.p->scanState == ScanRecord::WAIT_LQHKEY_COPY) {
- jam();
-/*---------------------------------------------------------------------------*/
-// Make sure that something is in progress. Otherwise we will simply stop
-// and nothing more will happen.
-/*---------------------------------------------------------------------------*/
- systemErrorLab(signal);
- return;
- }//if
- return;
-}//Dblqh::copyCompletedLab()
-
-void Dblqh::nextRecordCopy(Signal* signal)
-{
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- if (scanptr.p->scanState != ScanRecord::WAIT_LQHKEY_COPY) {
- jam();
-/*---------------------------------------------------------------------------*/
-// Make sure that nothing is in progress. Otherwise we will have to simultaneous
-// scans on the same record and this will certainly lead to unexpected
-// behaviour.
-/*---------------------------------------------------------------------------*/
- systemErrorLab(signal);
- return;
- }//if
- scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN_COPY;
- switch (fragptr.p->fragStatus) {
- case Fragrecord::FSACTIVE:
- jam();
- linkActiveFrag(signal);
- break;
- case Fragrecord::BLOCKED:
- jam();
- linkFragQueue(signal);
- tcConnectptr.p->transactionState = TcConnectionrec::COPY_STOPPED;
- return;
- break;
- case Fragrecord::FREE:
- jam();
- case Fragrecord::ACTIVE_CREATION:
- jam();
- case Fragrecord::CRASH_RECOVERING:
- jam();
- case Fragrecord::DEFINED:
- jam();
- case Fragrecord::REMOVING:
- jam();
- default:
- jam();
- systemErrorLab(signal);
- return;
- break;
- }//switch
- continueCopyAfterBlockedLab(signal);
- return;
-}//Dblqh::nextRecordCopy()
-
-void Dblqh::continueCopyAfterBlockedLab(Signal* signal)
-{
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- tcConnectptr.p->errorCode = 0;
- Uint32 acc_op_ptr= get_acc_ptr_from_scan_record(scanptr.p, 0, false);
- signal->theData[0] = scanptr.p->scanAccPtr;
- signal->theData[1] = acc_op_ptr;
- signal->theData[2] = NextScanReq::ZSCAN_NEXT_COMMIT;
- sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
- return;
-}//Dblqh::continueCopyAfterBlockedLab()
-
-void Dblqh::copyLqhKeyRefLab(Signal* signal)
-{
- ndbrequire(tcConnectptr.p->transid[1] == signal->theData[4]);
- tcConnectptr.p->copyCountWords -= signal->theData[3];
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- scanptr.p->scanErrorCounter++;
- tcConnectptr.p->errorCode = terrorCode;
- closeCopyLab(signal);
- return;
-}//Dblqh::copyLqhKeyRefLab()
-
-void Dblqh::closeCopyLab(Signal* signal)
-{
- if (tcConnectptr.p->copyCountWords > 0) {
-/*---------------------------------------------------------------------------*/
-// We are still waiting for responses from the starting node.
-// Wait until all of those have arrived until we start the
-// close process.
-/*---------------------------------------------------------------------------*/
- jam();
- return;
- }//if
- tcConnectptr.p->transid[0] = 0;
- tcConnectptr.p->transid[1] = 0;
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- scanptr.p->scanState = ScanRecord::WAIT_CLOSE_COPY;
- switch (fragptr.p->fragStatus) {
- case Fragrecord::FSACTIVE:
- jam();
- linkActiveFrag(signal);
- break;
- case Fragrecord::BLOCKED:
- jam();
- linkFragQueue(signal);
- tcConnectptr.p->transactionState = TcConnectionrec::COPY_CLOSE_STOPPED;
- return;
- break;
- case Fragrecord::FREE:
- jam();
- case Fragrecord::ACTIVE_CREATION:
- jam();
- case Fragrecord::CRASH_RECOVERING:
- jam();
- case Fragrecord::DEFINED:
- jam();
- case Fragrecord::REMOVING:
- jam();
- default:
- jam();
- systemErrorLab(signal);
- return;
- break;
- }//switch
- continueCloseCopyAfterBlockedLab(signal);
- return;
-}//Dblqh::closeCopyLab()
-
-void Dblqh::continueCloseCopyAfterBlockedLab(Signal* signal)
-{
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
- signal->theData[0] = scanptr.p->scanAccPtr;
- signal->theData[1] = RNIL;
- signal->theData[2] = ZCOPY_CLOSE;
- sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
- return;
-}//Dblqh::continueCloseCopyAfterBlockedLab()
-
-/*---------------------------------------------------------------------------*/
-/* ENTER NEXT_SCANCONF WITH */
-/* SCANPTR, */
-/* TFRAGID, */
-/* TACC_OPPTR, */
-/* TLOCAL_KEY1, */
-/* TLOCAL_KEY2, */
-/* TKEY_LENGTH, */
-/* TKEY1, */
-/* TKEY2, */
-/* TKEY3, */
-/* TKEY4 */
-/*---------------------------------------------------------------------------*/
-/* PRECONDITION: SCAN_STATE = WAIT_CLOSE_COPY */
-/*---------------------------------------------------------------------------*/
-void Dblqh::accCopyCloseConfLab(Signal* signal)
-{
- tcConnectptr.i = scanptr.p->scanTcrec;
- scanptr.p->scanState = ScanRecord::WAIT_DELETE_STORED_PROC_ID_COPY;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- signal->theData[0] = tcConnectptr.p->tupConnectrec;
- signal->theData[1] = tcConnectptr.p->tableref;
- signal->theData[2] = scanptr.p->scanSchemaVersion;
- signal->theData[3] = ZDELETE_STORED_PROC_ID;
- signal->theData[4] = scanptr.p->scanStoredProcId;
- sendSignal(tcConnectptr.p->tcTupBlockref, GSN_STORED_PROCREQ, signal, 5, JBB);
- return;
-}//Dblqh::accCopyCloseConfLab()
-
-/*---------------------------------------------------------------------------*/
-/* ENTER STORED_PROCCONF WITH */
-/* TC_CONNECTPTR, */
-/* TSTORED_PROC_ID */
-/*---------------------------------------------------------------------------*/
-/* PRECONDITION: SCAN_STATE = WAIT_DELETE_STORED_PROC_ID_COPY */
-/*---------------------------------------------------------------------------*/
-void Dblqh::tupCopyCloseConfLab(Signal* signal)
-{
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- fragptr.p->copyFragState = ZIDLE;
-
- if (tcConnectptr.p->abortState == TcConnectionrec::NEW_FROM_TC) {
- jam();
- tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec;
- ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
- tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1;
- signal->theData[0] = ZLQH_TRANS_NEXT;
- signal->theData[1] = tcNodeFailptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
-
- CopyFragRef * const ref = (CopyFragRef *)&signal->theData[0];
- ref->userPtr = scanptr.p->copyPtr;
- ref->sendingNodeId = cownNodeid;
- ref->startingNodeId = scanptr.p->scanNodeId;
- ref->tableId = fragptr.p->tabRef;
- ref->fragId = fragptr.p->fragId;
- ref->errorCode = ZNODE_FAILURE_ERROR;
- sendSignal(scanptr.p->scanApiBlockref, GSN_COPY_FRAGREF, signal,
- CopyFragRef::SignalLength, JBB);
- } else {
- if (scanptr.p->scanErrorCounter > 0) {
- jam();
- CopyFragRef * const ref = (CopyFragRef *)&signal->theData[0];
- ref->userPtr = scanptr.p->copyPtr;
- ref->sendingNodeId = cownNodeid;
- ref->startingNodeId = scanptr.p->scanNodeId;
- ref->tableId = fragptr.p->tabRef;
- ref->fragId = fragptr.p->fragId;
- ref->errorCode = tcConnectptr.p->errorCode;
- sendSignal(scanptr.p->scanApiBlockref, GSN_COPY_FRAGREF, signal,
- CopyFragRef::SignalLength, JBB);
- } else {
- jam();
- CopyFragConf * const conf = (CopyFragConf *)&signal->theData[0];
- conf->userPtr = scanptr.p->copyPtr;
- conf->sendingNodeId = cownNodeid;
- conf->startingNodeId = scanptr.p->scanNodeId;
- conf->tableId = tcConnectptr.p->tableref;
- conf->fragId = tcConnectptr.p->fragmentid;
- sendSignal(scanptr.p->scanApiBlockref, GSN_COPY_FRAGCONF, signal,
- CopyFragConf::SignalLength, JBB);
- }//if
- }//if
- releaseActiveCopy(signal);
- tcConnectptr.p->tcScanRec = RNIL;
- finishScanrec(signal);
- releaseOprec(signal);
- releaseTcrec(signal, tcConnectptr);
- releaseScanrec(signal);
-}//Dblqh::tupCopyCloseConfLab()
-
-/*---------------------------------------------------------------------------*/
-/* A NODE FAILURE OCCURRED DURING THE COPY PROCESS. WE NEED TO CLOSE THE */
-/* COPY PROCESS SINCE A NODE FAILURE DURING THE COPY PROCESS WILL ALSO */
-/* FAIL THE NODE THAT IS TRYING TO START-UP. */
-/*---------------------------------------------------------------------------*/
-void Dblqh::closeCopyRequestLab(Signal* signal)
-{
- scanptr.p->scanErrorCounter++;
- switch (scanptr.p->scanState) {
- case ScanRecord::WAIT_TUPKEY_COPY:
- case ScanRecord::WAIT_NEXT_SCAN_COPY:
- jam();
-/*---------------------------------------------------------------------------*/
-/* SET COMPLETION STATUS AND WAIT FOR OPPORTUNITY TO STOP THE SCAN. */
-// ALSO SET NO OF WORDS OUTSTANDING TO ZERO TO AVOID ETERNAL WAIT.
-/*---------------------------------------------------------------------------*/
- scanptr.p->scanCompletedStatus = ZTRUE;
- tcConnectptr.p->copyCountWords = 0;
- break;
- case ScanRecord::WAIT_ACC_COPY:
- case ScanRecord::WAIT_STORED_PROC_COPY:
- jam();
-/*---------------------------------------------------------------------------*/
-/* WE ARE CURRENTLY STARTING UP THE SCAN. SET COMPLETED STATUS AND WAIT FOR*/
-/* COMPLETION OF STARTUP. */
-/*---------------------------------------------------------------------------*/
- scanptr.p->scanCompletedStatus = ZTRUE;
- break;
- case ScanRecord::WAIT_CLOSE_COPY:
- case ScanRecord::WAIT_DELETE_STORED_PROC_ID_COPY:
- jam();
-/*---------------------------------------------------------------------------*/
-/* CLOSE IS ALREADY ONGOING. WE NEED NOT DO ANYTHING. */
-/*---------------------------------------------------------------------------*/
- break;
- case ScanRecord::WAIT_LQHKEY_COPY:
- jam();
-/*---------------------------------------------------------------------------*/
-/* WE ARE WAITING FOR THE FAILED NODE. THE NODE WILL NEVER COME BACK. */
-// WE NEED TO START THE FAILURE HANDLING IMMEDIATELY.
-// ALSO SET NO OF WORDS OUTSTANDING TO ZERO TO AVOID ETERNAL WAIT.
-/*---------------------------------------------------------------------------*/
- tcConnectptr.p->copyCountWords = 0;
- closeCopyLab(signal);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- return;
-}//Dblqh::closeCopyRequestLab()
-
-/* ****************************************************** */
-/* COPY_ACTIVEREQ: Change state of a fragment to ACTIVE. */
-/* ****************************************************** */
-void Dblqh::execCOPY_ACTIVEREQ(Signal* signal)
-{
- CRASH_INSERTION(5026);
-
- const CopyActiveReq * const req = (CopyActiveReq *)&signal->theData[0];
- jamEntry();
- Uint32 masterPtr = req->userPtr;
- BlockReference masterRef = req->userRef;
- tabptr.i = req->tableId;
- ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
- Uint32 fragId = req->fragId;
- ndbrequire(getFragmentrec(signal, fragId));
-
- fragptr.p->fragDistributionKey = req->distributionKey;
-
- ndbrequire(cnoActiveCopy < 3);
- cactiveCopy[cnoActiveCopy] = fragptr.i;
- cnoActiveCopy++;
- fragptr.p->masterBlockref = masterRef;
- fragptr.p->masterPtr = masterPtr;
- if (fragptr.p->fragStatus == Fragrecord::FSACTIVE) {
- jam();
-/*------------------------------------------------------*/
-/* PROCESS HAVE ALREADY BEEN STARTED BY PREVIOUS */
-/* MASTER. WE HAVE ALREADY SET THE PROPER MASTER */
-/* BLOCK REFERENCE. */
-/*------------------------------------------------------*/
- if (fragptr.p->activeTcCounter == 0) {
- jam();
-/*------------------------------------------------------*/
-/* PROCESS WAS EVEN COMPLETED. */
-/*------------------------------------------------------*/
- sendCopyActiveConf(signal, tabptr.i);
- }//if
- return;
- }//if
- fragptr.p->fragStatus = Fragrecord::FSACTIVE;
- if (fragptr.p->lcpFlag == Fragrecord::LCP_STATE_TRUE) {
- jam();
- fragptr.p->logFlag = Fragrecord::STATE_TRUE;
- }//if
- fragptr.p->activeTcCounter = 1;
-/*------------------------------------------------------*/
-/* SET IT TO ONE TO ENSURE THAT IT IS NOT POSSIBLE*/
-/* TO DECREASE IT TO ZERO UNTIL WE HAVE COMPLETED */
-/* THE SCAN. */
-/*------------------------------------------------------*/
- signal->theData[0] = ZSCAN_TC_CONNECT;
- signal->theData[1] = 0;
- signal->theData[2] = tabptr.i;
- signal->theData[3] = fragId;
- sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
- return;
-}//Dblqh::execCOPY_ACTIVEREQ()
-
-void Dblqh::scanTcConnectLab(Signal* signal, Uint32 tstartTcConnect, Uint32 fragId)
-{
- Uint32 tendTcConnect;
-
- ndbrequire(getFragmentrec(signal, fragId));
- if ((tstartTcConnect + 200) >= ctcConnectrecFileSize) {
- jam();
- tendTcConnect = ctcConnectrecFileSize - 1;
- } else {
- jam();
- tendTcConnect = tstartTcConnect + 200;
- }//if
- for (tcConnectptr.i = tstartTcConnect;
- tcConnectptr.i <= tendTcConnect;
- tcConnectptr.i++) {
- jam();
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- if (tcConnectptr.p->transactionState != TcConnectionrec::IDLE) {
- switch (tcConnectptr.p->logWriteState) {
- case TcConnectionrec::NOT_WRITTEN:
- jam();
- if (fragptr.i == tcConnectptr.p->fragmentptr) {
- jam();
- fragptr.p->activeTcCounter = fragptr.p->activeTcCounter + 1;
- tcConnectptr.p->logWriteState = TcConnectionrec::NOT_WRITTEN_WAIT;
- }//if
- break;
- default:
- jam();
- /*empty*/;
- break;
- }//switch
- }//if
- }//for
- if (tendTcConnect < (ctcConnectrecFileSize - 1)) {
- jam();
- signal->theData[0] = ZSCAN_TC_CONNECT;
- signal->theData[1] = tendTcConnect + 1;
- signal->theData[2] = tabptr.i;
- signal->theData[3] = fragId;
- sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
- } else {
- jam();
-/*------------------------------------------------------*/
-/* THE SCAN HAVE BEEN COMPLETED. WE CHECK IF ALL */
-/* OPERATIONS HAVE ALREADY BEEN COMPLETED. */
-/*------------------------------------------------------*/
- ndbrequire(fragptr.p->activeTcCounter > 0);
- fragptr.p->activeTcCounter--;
- if (fragptr.p->activeTcCounter == 0) {
- jam();
-/*------------------------------------------------------*/
-/* SET START GLOBAL CHECKPOINT TO THE NEXT */
-/* CHECKPOINT WE HAVE NOT YET HEARD ANYTHING ABOUT*/
-/* THIS GCP WILL BE COMPLETELY COVERED BY THE LOG.*/
-/*------------------------------------------------------*/
- fragptr.p->startGci = cnewestGci + 1;
- sendCopyActiveConf(signal, tabptr.i);
- }//if
- }//if
- return;
-}//Dblqh::scanTcConnectLab()
-
-/*---------------------------------------------------------------------------*/
-/* A NEW MASTER IS REQUESTING THE STATE IN LQH OF THE COPY FRAGMENT PARTS. */
-/*---------------------------------------------------------------------------*/
-/* ***************>> */
-/* COPY_STATEREQ > */
-/* ***************>> */
-void Dblqh::execCOPY_STATEREQ(Signal* signal)
-{
- jamEntry();
- ndbrequire(0)
-#if 0
- Uint32* dataPtr = &signal->theData[2];
- BlockReference tmasterBlockref = signal->theData[0];
- Uint32 tnoCopy = 0;
- do {
- jam();
- arrGuard(tnoCopy, 4);
- fragptr.i = cactiveCopy[tnoCopy];
- if (fragptr.i == RNIL) {
- jam();
- break;
- }//if
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (fragptr.p->copyFragState != ZIDLE) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THIS FRAGMENT IS CURRENTLY ACTIVE IN COPYING THE FRAGMENT. */
-/*---------------------------------------------------------------------------*/
- scanptr.i = fragptr.p->fragScanRec[NR_ScanNo];
- c_scanRecordPool.getPtr(scanptr);
- if (scanptr.p->scanCompletedStatus == ZTRUE) {
- jam();
- dataPtr[3 + (tnoCopy << 2)] = ZCOPY_CLOSING;
- } else {
- jam();
- dataPtr[3 + (tnoCopy << 2)] = ZCOPY_ONGOING;
- }//if
- dataPtr[2 + (tnoCopy << 2)] = scanptr.p->scanSchemaVersion;
- scanptr.p->scanApiBlockref = tmasterBlockref;
- } else {
- ndbrequire(fragptr.p->activeTcCounter != 0);
-/*---------------------------------------------------------------------------*/
-/* COPY FRAGMENT IS COMPLETED AND WE ARE CURRENTLY GETTING THE STARTING */
-/* GCI OF THE NEW REPLICA OF THIS FRAGMENT. */
-/*---------------------------------------------------------------------------*/
- fragptr.p->masterBlockref = tmasterBlockref;
- dataPtr[3 + (tnoCopy << 2)] = ZCOPY_ACTIVATION;
- }//if
- dataPtr[tnoCopy << 2] = fragptr.p->tabRef;
- dataPtr[1 + (tnoCopy << 2)] = fragptr.p->fragId;
- tnoCopy++;
- } while (tnoCopy < cnoActiveCopy);
- signal->theData[0] = cownNodeid;
- signal->theData[1] = tnoCopy;
- sendSignal(tmasterBlockref, GSN_COPY_STATECONF, signal, 18, JBB);
-#endif
- return;
-}//Dblqh::execCOPY_STATEREQ()
-
-/* ========================================================================= */
-/* ======= INITIATE TC RECORD AT COPY FRAGMENT ======= */
-/* */
-/* SUBROUTINE SHORT NAME = ICT */
-/* ========================================================================= */
-void Dblqh::initCopyTc(Signal* signal)
-{
- const NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0];
- scanptr.p->scanLocalref[0] = nextScanConf->localKey[0];
- scanptr.p->scanLocalref[1] = nextScanConf->localKey[1];
- scanptr.p->scanLocalFragid = nextScanConf->fragId;
- tcConnectptr.p->operation = ZREAD;
- tcConnectptr.p->apiVersionNo = 0;
- tcConnectptr.p->opExec = 0; /* NOT INTERPRETED MODE */
- tcConnectptr.p->schemaVersion = scanptr.p->scanSchemaVersion;
- Uint32 reqinfo = 0;
- LqhKeyReq::setLockType(reqinfo, ZINSERT);
- LqhKeyReq::setDirtyFlag(reqinfo, 1);
- LqhKeyReq::setSimpleFlag(reqinfo, 1);
- LqhKeyReq::setOperation(reqinfo, ZWRITE);
- /* AILen in LQHKEYREQ IS ZERO */
- tcConnectptr.p->reqinfo = reqinfo;
-/* ------------------------------------------------------------------------ */
-/* THE RECEIVING NODE WILL EXPECT THAT IT IS THE LAST NODE AND WILL */
-/* SEND COMPLETED AS THE RESPONSE SIGNAL SINCE DIRTY_OP BIT IS SET. */
-/* ------------------------------------------------------------------------ */
- tcConnectptr.p->nodeAfterNext[0] = ZNIL;
- tcConnectptr.p->nodeAfterNext[1] = ZNIL;
- tcConnectptr.p->tcBlockref = cownref;
- tcConnectptr.p->readlenAi = 0;
- tcConnectptr.p->storedProcId = ZNIL;
- tcConnectptr.p->opExec = 0;
- tcConnectptr.p->nextSeqNoReplica = 0;
- tcConnectptr.p->dirtyOp = ZFALSE;
- tcConnectptr.p->lastReplicaNo = 0;
- tcConnectptr.p->currTupAiLen = 0;
- tcConnectptr.p->tcTimer = cLqhTimeOutCount;
-}//Dblqh::initCopyTc()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SEND COPY_ACTIVECONF TO MASTER DIH ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::sendCopyActiveConf(Signal* signal, Uint32 tableId)
-{
- releaseActiveCopy(signal);
- CopyActiveConf * const conf = (CopyActiveConf *)&signal->theData[0];
- conf->userPtr = fragptr.p->masterPtr;
- conf->tableId = tableId;
- conf->fragId = fragptr.p->fragId;
- conf->startingNodeId = cownNodeid;
- conf->startGci = fragptr.p->startGci;
- sendSignal(fragptr.p->masterBlockref, GSN_COPY_ACTIVECONF, signal,
- CopyActiveConf::SignalLength, JBB);
-}//Dblqh::sendCopyActiveConf()
-
-/* ##########################################################################
- * ####### LOCAL CHECKPOINT MODULE #######
- *
- * ##########################################################################
- * --------------------------------------------------------------------------
- * THIS MODULE HANDLES THE EXECUTION AND CONTROL OF LOCAL CHECKPOINTS
- * IT CONTROLS THE LOCAL CHECKPOINTS IN TUP AND ACC. IT DOES ALSO INTERACT
- * WITH DIH TO CONTROL WHICH GLOBAL CHECKPOINTS THAT ARE RECOVERABLE
- * ------------------------------------------------------------------------- */
-void Dblqh::execEMPTY_LCP_REQ(Signal* signal)
-{
- jamEntry();
- CRASH_INSERTION(5008);
- EmptyLcpReq * const emptyLcpOrd = (EmptyLcpReq*)&signal->theData[0];
-
- lcpPtr.i = 0;
- ptrAss(lcpPtr, lcpRecord);
-
- Uint32 nodeId = refToNode(emptyLcpOrd->senderRef);
-
- lcpPtr.p->m_EMPTY_LCP_REQ.set(nodeId);
- lcpPtr.p->reportEmpty = true;
-
- if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE){
- jam();
- bool ok = false;
- switch(clcpCompletedState){
- case LCP_IDLE:
- ok = true;
- sendEMPTY_LCP_CONF(signal, true);
- break;
- case LCP_RUNNING:
- ok = true;
- sendEMPTY_LCP_CONF(signal, false);
- break;
- case LCP_CLOSE_STARTED:
- jam();
- case ACC_LCP_CLOSE_COMPLETED:
- jam();
- case TUP_LCP_CLOSE_COMPLETED:
- jam();
- ok = true;
- break;
- }
- ndbrequire(ok);
-
- }//if
-
- return;
-}//Dblqh::execEMPTY_LCPREQ()
-
-void Dblqh::execLCP_FRAG_ORD(Signal* signal)
-{
- jamEntry();
- CRASH_INSERTION(5010);
- LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0];
- Uint32 lcpId = lcpFragOrd->lcpId;
-
- lcpPtr.i = 0;
- ptrAss(lcpPtr, lcpRecord);
-
- lcpPtr.p->lastFragmentFlag = lcpFragOrd->lastFragmentFlag;
- if (lcpFragOrd->lastFragmentFlag) {
- jam();
- if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE) {
- jam();
- /* ----------------------------------------------------------
- * NOW THE COMPLETE LOCAL CHECKPOINT ROUND IS COMPLETED.
- * -------------------------------------------------------- */
- if (cnoOfFragsCheckpointed > 0) {
- jam();
- completeLcpRoundLab(signal);
- } else {
- jam();
- sendLCP_COMPLETE_REP(signal, lcpId);
- }//if
- }
- return;
- }//if
- tabptr.i = lcpFragOrd->tableId;
- ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
-
- ndbrequire(tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING ||
- tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE ||
- tabptr.p->tableStatus == Tablerec::TABLE_DEFINED);
-
- ndbrequire(getFragmentrec(signal, lcpFragOrd->fragmentId));
-
- lcpPtr.i = 0;
- ptrAss(lcpPtr, lcpRecord);
- ndbrequire(!lcpPtr.p->lcpQueued);
- if (c_lcpId < lcpFragOrd->lcpId) {
- jam();
- /**
- * A new LCP
- */
- c_lcpId = lcpFragOrd->lcpId;
- ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_IDLE);
- setLogTail(signal, lcpFragOrd->keepGci);
- ndbrequire(clcpCompletedState == LCP_IDLE);
- clcpCompletedState = LCP_RUNNING;
- }//if
- cnoOfFragsCheckpointed++;
-
- if(tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
- jam();
- LcpRecord::FragOrd fragOrd;
- fragOrd.fragPtrI = fragptr.i;
- fragOrd.lcpFragOrd = * lcpFragOrd;
- sendLCP_FRAG_REP(signal, fragOrd);
- return;
- }
-
- if (lcpPtr.p->lcpState != LcpRecord::LCP_IDLE) {
- ndbrequire(lcpPtr.p->lcpQueued == false);
- lcpPtr.p->lcpQueued = true;
- lcpPtr.p->queuedFragment.fragPtrI = fragptr.i;
- lcpPtr.p->queuedFragment.lcpFragOrd = * lcpFragOrd;
- return;
- }//if
-
- lcpPtr.p->currentFragment.fragPtrI = fragptr.i;
- lcpPtr.p->currentFragment.lcpFragOrd = * lcpFragOrd;
-
- sendLCP_FRAGIDREQ(signal);
-}//Dblqh::execLCP_FRAGORD()
-
-/* --------------------------------------------------------------------------
- * PRECONDITION: LCP_PTR:LCP_STATE = WAIT_FRAGID
- * --------------------------------------------------------------------------
- * WE NOW HAVE THE LOCAL FRAGMENTS THAT THE LOCAL CHECKPOINT WILL USE.
- * -------------------------------------------------------------------------- */
-void Dblqh::execLCP_FRAGIDCONF(Signal* signal)
-{
- UintR Tfragid[4];
-
- jamEntry();
-
- lcpPtr.i = signal->theData[0];
-
- Uint32 TaccPtr = signal->theData[1];
- Uint32 noLocfrag = signal->theData[2];
- Tfragid[0] = signal->theData[3];
- Tfragid[1] = signal->theData[4];
-
- ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_WAIT_FRAGID);
- /* ------------------------------------------------------------------------
- * NO ERROR CHECKING OF TNO_LOCFRAG VALUE. OUT OF BOUND WILL IMPLY THAT AN
- * INDEX OUT OF RANGE WILL CAUSE A SYSTEM RESTART WHICH IS DESIRED.
- * ------------------------------------------------------------------------ */
- lcpPtr.p->lcpAccptr = TaccPtr;
- fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- ndbrequire(noLocfrag - 1 < 2);
- for (Uint32 Tindex = 0; Tindex < noLocfrag; Tindex++) {
- jam();
- Uint32 fragId = Tfragid[Tindex];
- /* ----------------------------------------------------------------------
- * THERE IS NO ERROR CHECKING ON PURPOSE. IT IS POSSIBLE TO CALCULATE HOW
- * MANY LOCAL LCP RECORDS THERE SHOULD BE. IT SHOULD NEVER HAPPEN THAT
- * THERE IS NO ONE FREE. IF THERE IS NO ONE IT WILL ALSO BE A POINTER
- * OUT OF RANGE WHICH IS AN ERROR CODE IN ITSELF. REUSES ERROR HANDLING
- * IN AXE VM.
- * ---------------------------------------------------------------------- */
- seizeLcpLoc(signal);
- initLcpLocAcc(signal, fragId);
- seizeLcpLoc(signal);
- initLcpLocTup(signal, fragId);
- signal->theData[0] = lcpLocptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
- signal->theData[3] = lcpLocptr.p->locFragid;
- signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
- signal->theData[5] = lcpPtr.p->currentFragment.lcpFragOrd.lcpId % MAX_LCP_STORED;
- sendSignal(fragptr.p->tupBlockref, GSN_TUP_PREPLCPREQ, signal, 6, JBB);
- }//for
- lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_TUP_PREPLCP;
- return;
-}//Dblqh::execLCP_FRAGIDCONF()
-
-/* --------------------------------------------------------------------------
- * PRECONDITION: LCP_LOCPTR:LCP_STATE = WAIT_TUPPREPLCP
- * --------------------------------------------------------------------------
- * WE HAVE NOW PREPARED A LOCAL FRAGMENT IN TUP FOR LCP EXECUTION.
- * -------------------------------------------------------------------------- */
-void Dblqh::execTUP_PREPLCPCONF(Signal* signal)
-{
- UintR ttupPtr;
-
- jamEntry();
- lcpLocptr.i = signal->theData[0];
- ttupPtr = signal->theData[1];
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
-
- lcpPtr.i = lcpLocptr.p->masterLcpRec;
- ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::WAIT_TUP_PREPLCP);
-
- lcpLocptr.p->tupRef = ttupPtr;
- lcpLocptr.p->lcpLocstate = LcpLocRecord::IDLE;
- checkLcpTupprep(signal);
- if (lcpPtr.p->lcpState != LcpRecord::LCP_WAIT_HOLDOPS) {
- jam();
- return;
- }//if
- fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- lcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
- do {
- jam();
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- lcpLocptr.p->lcpLocstate = LcpLocRecord::WAIT_LCPHOLDOP;
- signal->theData[0] = lcpPtr.p->lcpAccptr;
- signal->theData[1] = lcpLocptr.p->locFragid;
- signal->theData[2] = 0;
- signal->theData[3] = lcpLocptr.i;
- sendSignal(fragptr.p->accBlockref, GSN_LCP_HOLDOPREQ, signal, 4, JBA);
- lcpLocptr.i = lcpLocptr.p->nextLcpLoc;
- } while (lcpLocptr.i != RNIL);
- /* ------------------------------------------------------------------------
- * SET STATE ON FRAGMENT TO BLOCKED TO ENSURE THAT NO MORE OPERATIONS ARE
- * STARTED FROM LQH IN TUP AND ACC UNTIL THE START CHECKPOINT HAS BEEN
- * COMPLETED. ALSO SET THE LOCAL CHECKPOINT STATE TO WAIT FOR
- * LCP_HOLDOPCONF
- * ----------------------------------------------------------------------- */
- fragptr.p->fragStatus = Fragrecord::BLOCKED;
- fragptr.p->fragActiveStatus = ZTRUE;
- lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_HOLDOPS;
- return;
-}//Dblqh::execTUP_PREPLCPCONF()
-
-void Dblqh::execTUP_PREPLCPREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dblqh::execTUP_PREPLCPREF()
-
-void Dblqh::execLCP_FRAGIDREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dblqh::execLCP_FRAGIDREF()
-
-/* --------------------------------------------------------------------------
- * A NUMBER OF OPERATIONS THAT HAVE BEEN SET ON HOLD IN ACC. MOVE THOSE TO
- * LIST OF BLOCKED ACC OPERATIONS. IF MORE OPERATIONS ARE BLOCKED GET THOSE
- * OTHERWISE CONTINUE THE LOCAL CHECKPOINT BY REQUESTING TUP AND ACC TO
- * WRITE THEIR START CHECKPOINT.
- * --------------------------------------------------------------------------
- * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = WAIT_LCPHOLDOP
- * ------------------------------------------------------------------------- */
-/* ***************>> */
-/* LCP_HOLDOPCONF > */
-/* ***************>> */
-void Dblqh::execLCP_HOLDOPCONF(Signal* signal)
-{
- UintR tnoHoldops;
- Uint32 Tdata[23];
- Uint32 Tlength;
-
- jamEntry();
- lcpLocptr.i = signal->theData[0];
- Tlength = signal->theData[1];
- for (Uint32 i = 0; i < 23; i++)
- Tdata[i] = signal->theData[i + 2];
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::WAIT_LCPHOLDOP);
-
- lcpPtr.i = lcpLocptr.p->masterLcpRec;
- /* ------------------------------------------------------------------------
- * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS
- * REFERENCE WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
- * ----------------------------------------------------------------------- */
- tnoHoldops = Tlength & 65535;
- ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- ndbrequire(tnoHoldops <= 23);
- for (Uint32 Tindex = 0; Tindex < tnoHoldops; Tindex++) {
- jam();
- tcConnectptr.i = Tdata[Tindex];
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- moveActiveToAcc(signal);
- }//for
- if ((Tlength >> 16) == 1) {
- jam();
- /* MORE HOLDOPS NEEDED */
- signal->theData[0] = lcpPtr.p->lcpAccptr;
- signal->theData[1] = lcpLocptr.p->locFragid;
- signal->theData[2] = 1;
- signal->theData[3] = lcpLocptr.i;
- sendSignal(fragptr.p->accBlockref, GSN_LCP_HOLDOPREQ, signal, 4, JBA);
- return;
- } else {
- jam();
-
- /* NO MORE HOLDOPS NEEDED */
- lcpLocptr.p->lcpLocstate = LcpLocRecord::HOLDOP_READY;
- checkLcpHoldop(signal);
-
- if (lcpPtr.p->lcpState == LcpRecord::LCP_WAIT_ACTIVE_FINISH) {
- if (fragptr.p->activeList == RNIL) {
- jam();
- /* ------------------------------------------------------------------
- * THERE ARE NO MORE ACTIVE OPERATIONS. IT IS NOW OK TO START THE
- * LOCAL CHECKPOINT IN BOTH TUP AND ACC.
- * ----------------------------------------------------------------- */
- sendStartLcp(signal);
- lcpPtr.p->lcpState = LcpRecord::LCP_START_CHKP;
- } else {
- jam();
- // Set this to signal releaseActiveFrag
- // that it should check to see if itäs time to call sendStartLcp
- fragptr.p->lcpRef = lcpPtr.i;
- }//if
- }//if
- }//if
-
- /* ----------------------- */
- /* ELSE */
- /* ------------------------------------------------------------------------
- * THERE ARE STILL MORE ACTIVE OPERATIONS. WAIT UNTIL THEY ARE FINSIHED.
- * THIS IS DISCOVERED WHEN RELEASE_ACTIVE_FRAG IS EXECUTED.
- * ------------------------------------------------------------------------
- * DO NOTHING, EXIT IS EXECUTED BELOW
- * ----------------------------------------------------------------------- */
- return;
-}//Dblqh::execLCP_HOLDOPCONF()
-
-/* ***************> */
-/* LCP_HOLDOPREF > */
-/* ***************> */
-void Dblqh::execLCP_HOLDOPREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dblqh::execLCP_HOLDOPREF()
-
-/* ************************************************************************>>
- * ACC_LCPSTARTED: Confirm that ACC started local checkpoint and undo
- * logging is on.
- * ************************************************************************>>
- * --------------------------------------------------------------------------
- * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = ACC_WAIT_STARTED
- * ------------------------------------------------------------------------- */
-void Dblqh::execACC_LCPSTARTED(Signal* signal)
-{
- jamEntry();
- lcpLocptr.i = signal->theData[0];
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED);
-
- lcpPtr.i = lcpLocptr.p->masterLcpRec;
- ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- /* ------------------------------------------------------------------------
- * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS
- * REFERENCE WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
- * ----------------------------------------------------------------------- */
- lcpLocptr.p->lcpLocstate = LcpLocRecord::ACC_STARTED;
- lcpStartedLab(signal);
- return;
-}//Dblqh::execACC_LCPSTARTED()
-
-/* ******************************************> */
-/* TUP_LCPSTARTED: Same as above but for TUP. */
-/* ******************************************> */
-/* --------------------------------------------------------------------------
- * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = TUP_WAIT_STARTED
- * ------------------------------------------------------------------------- */
-void Dblqh::execTUP_LCPSTARTED(Signal* signal)
-{
- jamEntry();
- lcpLocptr.i = signal->theData[0];
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_WAIT_STARTED);
-
- lcpPtr.i = lcpLocptr.p->masterLcpRec;
- ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- /* ------------------------------------------------------------------------
- * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS REFERENCE
- * WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
- * ----------------------------------------------------------------------- */
- lcpLocptr.p->lcpLocstate = LcpLocRecord::TUP_STARTED;
- lcpStartedLab(signal);
- return;
-}//Dblqh::execTUP_LCPSTARTED()
-
-void Dblqh::lcpStartedLab(Signal* signal)
-{
- if (checkLcpStarted(signal))
- {
- jam();
- /* ----------------------------------------------------------------------
- * THE LOCAL CHECKPOINT HAS BEEN STARTED. IT IS NOW TIME TO
- * RESTART THE TRANSACTIONS WHICH HAVE BEEN BLOCKED.
- * --------------------------------------------------------------------- */
- fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- /* ----------------------------------------------------------------------
- * UPDATE THE MAX_GCI_IN_LCP AND MAX_GCI_COMPLETED_IN_LCP NOW BEFORE
- * ACTIVATING THE FRAGMENT AGAIN.
- * --------------------------------------------------------------------- */
- ndbrequire(lcpPtr.p->currentFragment.lcpFragOrd.lcpNo < MAX_LCP_STORED);
- fragptr.p->maxGciInLcp = fragptr.p->newestGci;
- fragptr.p->maxGciCompletedInLcp = cnewestCompletedGci;
- sendAccContOp(signal); /* START OPERATIONS IN ACC */
- moveAccActiveFrag(signal); /* MOVE FROM ACC BLOCKED LIST TO ACTIVE LIST
- ON FRAGMENT */
- }
- /*---------------*/
- /* ELSE */
- /*-------------------------------------------------------------------------*/
- /* THE LOCAL CHECKPOINT HAS NOT BEEN STARTED. EXIT AND WAIT FOR
- * MORE SIGNALS */
- /*-------------------------------------------------------------------------*/
- /* DO NOTHING, EXIT IS EXECUTED BELOW */
- /*-------------------------------------------------------------------------*/
- return;
-}//Dblqh::lcpStartedLab()
-
-/*---------------------------------------------------------------------------
- * ACC HAVE RESTARTED THE BLOCKED OPERATIONS AGAIN IN ONE FRAGMENT PART.
- * IT IS NOW OUR TURN TO RESTART ALL OPERATIONS QUEUED IN LQH IF ALL
- * FRAGMENT PARTS ARE COMPLETED.
- *-------------------------------------------------------------------------- */
-void Dblqh::execACC_CONTOPCONF(Signal* signal)
-{
- if(ERROR_INSERTED(5035) && signal->getSendersBlockRef() != reference()){
- sendSignalWithDelay(reference(), GSN_ACC_CONTOPCONF, signal, 1000,
- signal->length());
- return;
- }
-
- jamEntry();
- lcpLocptr.i = signal->theData[0];
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- lcpLocptr.p->accContCounter = 1;
-
- lcpPtr.i = lcpLocptr.p->masterLcpRec;
- ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- lcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
- do {
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- if (lcpLocptr.p->accContCounter == 0) {
- jam();
- return;
- }//if
- lcpLocptr.i = lcpLocptr.p->nextLcpLoc;
- } while (lcpLocptr.i != RNIL);
- fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- restartOperationsLab(signal);
- return;
-}//Dblqh::execACC_CONTOPCONF()
-
-/* ********************************************************* */
-/* LQH_RESTART_OP: Restart operations after beeing blocked. */
-/* ********************************************************* */
-/*---------------------------------------------------------------------------*/
-/* PRECONDITION: FRAG_STATUS = BLOCKED AND LCP_STATE = STARTED */
-/*---------------------------------------------------------------------------*/
-void Dblqh::execLQH_RESTART_OP(Signal* signal)
-{
- jamEntry();
- fragptr.i = signal->theData[0];
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
-
- lcpPtr.i = signal->theData[1];
- ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- ndbrequire(fragptr.p->fragStatus == Fragrecord::BLOCKED);
- restartOperationsLab(signal);
-}//Dblqh::execLQH_RESTART_OP()
-
-void Dblqh::restartOperationsLab(Signal* signal)
-{
- Uint32 loopCount = 0;
- tcConnectptr.i = fragptr.p->firstWaitQueue;
- do {
- if (tcConnectptr.i != RNIL) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* START UP THE TRANSACTION AGAIN. WE START IT AS A SEPARATE SIGNAL. */
-/*---------------------------------------------------------------------------*/
- signal->theData[0] = ZRESTART_OPERATIONS_AFTER_STOP;
- signal->theData[1] = tcConnectptr.i;
- signal->theData[2] = fragptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- tcConnectptr.i = tcConnectptr.p->nextTc;
- } else {
- jam();
-/*--------------------------------------------------------------------------*/
-/* NO MORE OPERATIONS TO RESTART. WE CAN NOW RESET THE STATE TO ACTIVE AND */
-/* RESTART NORMAL ACTIVITIES ON THE FRAGMENT WHILE THE FUZZY PART OF THE */
-/* LOCAL CHECKPOINT IS COMPLETING. */
-/* IF THE CHECKPOINT WAS COMPLETED ALREADY ON THIS FRAGMENT WE PROCEED WITH */
-/* THE NEXT FRAGMENT NOW THAT WE HAVE COMPLETED THIS CHECKPOINT. */
-/*--------------------------------------------------------------------------*/
- fragptr.p->fragStatus = Fragrecord::FSACTIVE;
- if (lcpPtr.p->lcpState == LcpRecord::LCP_BLOCKED_COMP) {
- jam();
- contChkpNextFragLab(signal);
- return;
- }//if
- return;
- }//if
- loopCount++;
- if (loopCount > 16) {
- jam();
- signal->theData[0] = fragptr.i;
- signal->theData[1] = lcpPtr.i;
- sendSignal(cownref, GSN_LQH_RESTART_OP, signal, 2, JBB);
- return;
- }//if
- } while (1);
-}//Dblqh::restartOperationsLab()
-
-void Dblqh::restartOperationsAfterStopLab(Signal* signal)
-{
- /*-------------------------------------------------------------------------
- * WHEN ARRIVING HERE THE OPERATION IS ALREADY SET IN THE ACTIVE LIST.
- * THUS WE CAN IMMEDIATELY CALL THE METHODS THAT EXECUTE FROM WHERE
- * THE OPERATION WAS STOPPED.
- *------------------------------------------------------------------------ */
- switch (tcConnectptr.p->transactionState) {
- case TcConnectionrec::STOPPED:
- jam();
- /*-----------------------------------------------------------------------
- * STOPPED BEFORE TRYING TO SEND ACCKEYREQ
- *---------------------------------------------------------------------- */
- prepareContinueAfterBlockedLab(signal);
- return;
- break;
- case TcConnectionrec::COMMIT_STOPPED:
- jam();
- /* ----------------------------------------------------------------------
- * STOPPED BEFORE TRYING TO SEND ACC_COMMITREQ
- * --------------------------------------------------------------------- */
- releaseActiveFrag(signal);
- commitContinueAfterBlockedLab(signal);
- return;
- break;
- case TcConnectionrec::ABORT_STOPPED:
- jam();
- /* ----------------------------------------------------------------------
- * STOPPED BEFORE TRYING TO SEND ACC_ABORTREQ
- * --------------------------------------------------------------------- */
- abortContinueAfterBlockedLab(signal, true);
- return;
- break;
- case TcConnectionrec::COPY_STOPPED:
- jam();
- /* ----------------------------------------------------------------------
- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING COPY FRAGMENT
- * --------------------------------------------------------------------- */
- continueCopyAfterBlockedLab(signal);
- return;
- break;
- case TcConnectionrec::COPY_FIRST_STOPPED:
- jam();
- /* ----------------------------------------------------------------------
- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING COPY FRAGMENT
- * --------------------------------------------------------------------- */
- continueFirstCopyAfterBlockedLab(signal);
- return;
- break;
- case TcConnectionrec::SCAN_FIRST_STOPPED:
- jam();
- /* ----------------------------------------------------------------------
- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN
- * --------------------------------------------------------------------- */
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
- continueFirstScanAfterBlockedLab(signal);
- return;
- break;
- case TcConnectionrec::SCAN_CHECK_STOPPED:
- jam();
- /* ----------------------------------------------------------------------
- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN
- * --------------------------------------------------------------------- */
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
- continueAfterCheckLcpStopBlocked(signal);
- return;
- break;
- case TcConnectionrec::SCAN_STOPPED:
- jam();
- /* ----------------------------------------------------------------------
- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN
- * --------------------------------------------------------------------- */
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
- continueScanAfterBlockedLab(signal);
- return;
- break;
- case TcConnectionrec::SCAN_RELEASE_STOPPED:
- jam();
- /* ----------------------------------------------------------------------
- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING RELEASE
- * LOCKS IN SCAN
- * --------------------------------------------------------------------- */
- tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
- continueScanReleaseAfterBlockedLab(signal);
- return;
- break;
- case TcConnectionrec::SCAN_CLOSE_STOPPED:
- jam();
- /* ----------------------------------------------------------------------
- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING CLOSE OF SCAN
- * --------------------------------------------------------------------- */
- continueCloseScanAfterBlockedLab(signal);
- return;
- break;
- case TcConnectionrec::COPY_CLOSE_STOPPED:
- jam();
- /* ----------------------------------------------------------------------
- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING CLOSE OF COPY
- * --------------------------------------------------------------------- */
- continueCloseCopyAfterBlockedLab(signal);
- return;
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- break;
- }//switch
-}//Dblqh::restartOperationsAfterStopLab()
-
-/* *************** */
-/* ACC_LCPCONF > */
-/* *************** */
-/*---------------------------------------------------------------------------
- * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = ACC_STARTED
- *-------------------------------------------------------------------------- */
-void Dblqh::execACC_LCPCONF(Signal* signal)
-{
- jamEntry();
- lcpLocptr.i = signal->theData[0];
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_STARTED);
-
- lcpPtr.i = lcpLocptr.p->masterLcpRec;
- ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- /* ------------------------------------------------------------------------
- * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN
- * THIS REFERENCE WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A
- * SYSTEM RESTART.
- * ----------------------------------------------------------------------- */
- lcpLocptr.p->lcpLocstate = LcpLocRecord::ACC_COMPLETED;
- lcpCompletedLab(signal);
- return;
-}//Dblqh::execACC_LCPCONF()
-
-/* *************** */
-/* TUP_LCPCONF > */
-/* *************** */
-/* --------------------------------------------------------------------------
- * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = TUP_STARTED
- * ------------------------------------------------------------------------- */
-void Dblqh::execTUP_LCPCONF(Signal* signal)
-{
- jamEntry();
- lcpLocptr.i = signal->theData[0];
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_STARTED);
-
- lcpPtr.i = lcpLocptr.p->masterLcpRec;
- ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- /* ------------------------------------------------------------------------
- * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS
- * REFERENCE WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
- * ----------------------------------------------------------------------- */
- lcpLocptr.p->lcpLocstate = LcpLocRecord::TUP_COMPLETED;
- lcpCompletedLab(signal);
- return;
-}//Dblqh::execTUP_LCPCONF()
-
-void Dblqh::lcpCompletedLab(Signal* signal)
-{
- checkLcpCompleted(signal);
- if (lcpPtr.p->lcpState != LcpRecord::LCP_COMPLETED) {
- jam();
- /* ----------------------------------------------------------------------
- * THE LOCAL CHECKPOINT HAS NOT BEEN COMPLETED, EXIT & WAIT
- * FOR MORE SIGNALS
- * --------------------------------------------------------------------- */
- return;
- }//if
- /* ------------------------------------------------------------------------
- * THE LOCAL CHECKPOINT HAS BEEN COMPLETED. IT IS NOW TIME TO START
- * A LOCAL CHECKPOINT ON THE NEXT FRAGMENT OR COMPLETE THIS LCP ROUND.
- * ------------------------------------------------------------------------
- * WE START BY SENDING LCP_REPORT TO DIH TO REPORT THE COMPLETED LCP.
- * TO CATER FOR NODE CRASHES WE SEND IT IN PARALLEL TO ALL NODES.
- * ----------------------------------------------------------------------- */
- fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- fragptr.p->fragActiveStatus = ZFALSE;
-
- contChkpNextFragLab(signal);
- return;
-}//Dblqh::lcpCompletedLab()
-
-void
-Dblqh::sendLCP_FRAG_REP(Signal * signal,
- const LcpRecord::FragOrd & fragOrd) const {
-
- FragrecordPtr fragPtr;
- fragPtr.i = fragOrd.fragPtrI;
- ptrCheckGuard(fragPtr, cfragrecFileSize, fragrecord);
-
- ndbrequire(fragOrd.lcpFragOrd.lcpNo < MAX_LCP_STORED);
- LcpFragRep * const lcpReport = (LcpFragRep *)&signal->theData[0];
- lcpReport->nodeId = cownNodeid;
- lcpReport->lcpId = fragOrd.lcpFragOrd.lcpId;
- lcpReport->lcpNo = fragOrd.lcpFragOrd.lcpNo;
- lcpReport->tableId = fragOrd.lcpFragOrd.tableId;
- lcpReport->fragId = fragOrd.lcpFragOrd.fragmentId;
- lcpReport->maxGciCompleted = fragPtr.p->maxGciCompletedInLcp;
- lcpReport->maxGciStarted = fragPtr.p->maxGciInLcp;
-
- for (Uint32 i = 0; i < cnoOfNodes; i++) {
- jam();
- Uint32 nodeId = cnodeData[i];
- if(cnodeStatus[i] == ZNODE_UP){
- jam();
- BlockReference Tblockref = calcDihBlockRef(nodeId);
- sendSignal(Tblockref, GSN_LCP_FRAG_REP, signal,
- LcpFragRep::SignalLength, JBB);
- }//if
- }//for
-}
-
-void Dblqh::contChkpNextFragLab(Signal* signal)
-{
- /* ------------------------------------------------------------------------
- * UPDATE THE LATEST LOCAL CHECKPOINT COMPLETED ON FRAGMENT.
- * UPDATE THE LCP_ID OF THIS CHECKPOINT.
- * REMOVE THE LINK BETWEEN THE FRAGMENT RECORD AND THE LCP RECORD.
- * ----------------------------------------------------------------------- */
- if (fragptr.p->fragStatus == Fragrecord::BLOCKED) {
- jam();
- /**
- * LCP of fragment complete
- * but restarting of operations isn't
- */
- lcpPtr.p->lcpState = LcpRecord::LCP_BLOCKED_COMP;
- //restartOperationsLab(signal);
- return;
- }//if
-
- /**
- * Send rep when fragment is done + unblocked
- */
- sendLCP_FRAG_REP(signal, lcpPtr.p->currentFragment);
-
- /* ------------------------------------------------------------------------
- * WE ALSO RELEASE THE LOCAL LCP RECORDS.
- * ----------------------------------------------------------------------- */
- releaseLocalLcps(signal);
- if (lcpPtr.p->lcpQueued) {
- jam();
- /* ----------------------------------------------------------------------
- * Transfer the state from the queued to the active LCP.
- * --------------------------------------------------------------------- */
- lcpPtr.p->lcpQueued = false;
- lcpPtr.p->currentFragment = lcpPtr.p->queuedFragment;
-
- /* ----------------------------------------------------------------------
- * START THE QUEUED LOCAL CHECKPOINT.
- * --------------------------------------------------------------------- */
- sendLCP_FRAGIDREQ(signal);
- return;
- }//if
-
- lcpPtr.p->lcpState = LcpRecord::LCP_IDLE;
- if (lcpPtr.p->lastFragmentFlag){
- jam();
- /* ----------------------------------------------------------------------
- * NOW THE COMPLETE LOCAL CHECKPOINT ROUND IS COMPLETED.
- * --------------------------------------------------------------------- */
- completeLcpRoundLab(signal);
- return;
- }//if
-
- if (lcpPtr.p->reportEmpty) {
- jam();
- sendEMPTY_LCP_CONF(signal, false);
- }//if
- return;
-}//Dblqh::contChkpNextFragLab()
-
-void Dblqh::sendLCP_FRAGIDREQ(Signal* signal)
-{
- ndbrequire(lcpPtr.p->firstLcpLocTup == RNIL);
- ndbrequire(lcpPtr.p->firstLcpLocAcc == RNIL);
-
- TablerecPtr tabPtr;
- tabPtr.i = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
- ptrAss(tabPtr, tablerec);
- if(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING ||
- tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
- jam();
- /**
- * Fake that the fragment is done
- */
- lcpCompletedLab(signal);
- return;
- }
-
- ndbrequire(tabPtr.p->tableStatus == Tablerec::TABLE_DEFINED);
-
- lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_FRAGID;
- signal->theData[0] = lcpPtr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
- signal->theData[3] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
- signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId;
- signal->theData[5] = lcpPtr.p->currentFragment.lcpFragOrd.lcpId % MAX_LCP_STORED;
- sendSignal(fragptr.p->accBlockref, GSN_LCP_FRAGIDREQ, signal, 6, JBB);
-}//Dblqh::sendLCP_FRAGIDREQ()
-
-void Dblqh::sendEMPTY_LCP_CONF(Signal* signal, bool idle)
-{
-
- EmptyLcpConf * const rep = (EmptyLcpConf*)&signal->theData[0];
- /* ----------------------------------------------------------------------
- * We have been requested to report when there are no more local
- * waiting to be started or ongoing. In this signal we also report
- * the last completed fragments state.
- * ---------------------------------------------------------------------- */
- rep->senderNodeId = getOwnNodeId();
- if(!idle){
- jam();
- rep->idle = 0 ;
- rep->tableId = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
- rep->fragmentId = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId;
- rep->lcpNo = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
- rep->lcpId = lcpPtr.p->currentFragment.lcpFragOrd.lcpId;
- } else {
- jam();
- rep->idle = 1;
- rep->tableId = ~0;
- rep->fragmentId = ~0;
- rep->lcpNo = ~0;
- rep->lcpId = c_lcpId;
- }
-
- for (Uint32 i = 0; i < cnoOfNodes; i++) {
- jam();
- Uint32 nodeId = cnodeData[i];
- if (lcpPtr.p->m_EMPTY_LCP_REQ.get(nodeId)) {
- jam();
-
- BlockReference blockref = calcDihBlockRef(nodeId);
- sendSignal(blockref, GSN_EMPTY_LCP_CONF, signal,
- EmptyLcpConf::SignalLength, JBB);
- }//if
- }//for
-
- lcpPtr.p->reportEmpty = false;
- lcpPtr.p->m_EMPTY_LCP_REQ.clear();
-}//Dblqh::sendEMPTY_LCPCONF()
-
-void Dblqh::execACC_LCPREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dblqh::execACC_LCPREF()
-
-void Dblqh::execTUP_LCPREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dblqh::execTUP_LCPREF()
-
-/* --------------------------------------------------------------------------
- * THE LOCAL CHECKPOINT ROUND IS NOW COMPLETED. SEND COMPLETED MESSAGE
- * TO THE MASTER DIH.
- * ------------------------------------------------------------------------- */
-void Dblqh::completeLcpRoundLab(Signal* signal)
-{
- clcpCompletedState = LCP_CLOSE_STARTED;
- signal->theData[0] = caccBlockref;
- signal->theData[1] = cownref;
- sendSignal(caccBlockref, GSN_END_LCPREQ, signal, 2, JBB);
- signal->theData[0] = ctupBlockref;
- signal->theData[1] = cownref;
- sendSignal(ctupBlockref, GSN_END_LCPREQ, signal, 2, JBB);
- return;
-}//Dblqh::completeLcpRoundLab()
-
-void Dblqh::execEND_LCPCONF(Signal* signal)
-{
- jamEntry();
- BlockReference userpointer = signal->theData[0];
- if (userpointer == caccBlockref) {
- if (clcpCompletedState == LCP_CLOSE_STARTED) {
- jam();
- clcpCompletedState = ACC_LCP_CLOSE_COMPLETED;
- return;
- } else {
- jam();
- ndbrequire(clcpCompletedState == TUP_LCP_CLOSE_COMPLETED);
- clcpCompletedState = LCP_IDLE;
- }//if
- } else {
- ndbrequire(userpointer == ctupBlockref);
- if (clcpCompletedState == LCP_CLOSE_STARTED) {
- jam();
- clcpCompletedState = TUP_LCP_CLOSE_COMPLETED;
- return;
- } else {
- jam();
- ndbrequire(clcpCompletedState == ACC_LCP_CLOSE_COMPLETED);
- clcpCompletedState = LCP_IDLE;
- }//if
- }//if
- lcpPtr.i = 0;
- ptrAss(lcpPtr, lcpRecord);
- sendLCP_COMPLETE_REP(signal, lcpPtr.p->currentFragment.lcpFragOrd.lcpId);
-}//Dblqh::execEND_LCPCONF()
-
-void Dblqh::sendLCP_COMPLETE_REP(Signal* signal, Uint32 lcpId)
-{
- cnoOfFragsCheckpointed = 0;
- ndbrequire((cnoOfNodes - 1) < (MAX_NDB_NODES - 1));
- /* ------------------------------------------------------------------------
- * WE SEND COMP_LCP_ROUND TO ALL NODES TO PREPARE FOR NODE CRASHES.
- * ----------------------------------------------------------------------- */
- lcpPtr.i = 0;
- ptrAss(lcpPtr, lcpRecord);
- lcpPtr.p->lastFragmentFlag = false;
-
- LcpCompleteRep* rep = (LcpCompleteRep*)signal->getDataPtrSend();
- rep->nodeId = getOwnNodeId();
- rep->lcpId = lcpId;
- rep->blockNo = DBLQH;
-
- for (Uint32 i = 0; i < cnoOfNodes; i++) {
- jam();
- Uint32 nodeId = cnodeData[i];
- if(cnodeStatus[i] == ZNODE_UP){
- jam();
-
- BlockReference blockref = calcDihBlockRef(nodeId);
- sendSignal(blockref, GSN_LCP_COMPLETE_REP, signal,
- LcpCompleteRep::SignalLength, JBB);
- }//if
- }//for
-
- if(lcpPtr.p->reportEmpty){
- jam();
- sendEMPTY_LCP_CONF(signal, true);
- }
- return;
-}//Dblqh::sendCOMP_LCP_ROUND()
-
-/* ==========================================================================
- * ======= CHECK IF ALL PARTS OF A LOCAL CHECKPOINT ARE COMPLETED =======
- *
- * SUBROUTINE SHORT NAME = CLC
- * ========================================================================= */
-void Dblqh::checkLcpCompleted(Signal* signal)
-{
- LcpLocRecordPtr clcLcpLocptr;
-
- clcLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
- while (clcLcpLocptr.i != RNIL) {
- ptrCheckGuard(clcLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- if (clcLcpLocptr.p->lcpLocstate != LcpLocRecord::ACC_COMPLETED) {
- jam();
- ndbrequire((clcLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED) ||
- (clcLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_STARTED));
- return;
- }//if
- clcLcpLocptr.i = clcLcpLocptr.p->nextLcpLoc;
- }
-
- clcLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
- while (clcLcpLocptr.i != RNIL){
- ptrCheckGuard(clcLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- if (clcLcpLocptr.p->lcpLocstate != LcpLocRecord::TUP_COMPLETED) {
- jam();
- ndbrequire((clcLcpLocptr.p->lcpLocstate==LcpLocRecord::TUP_WAIT_STARTED)
- ||(clcLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_STARTED));
- return;
- }//if
- clcLcpLocptr.i = clcLcpLocptr.p->nextLcpLoc;
- }
-
- lcpPtr.p->lcpState = LcpRecord::LCP_COMPLETED;
-}//Dblqh::checkLcpCompleted()
-
-/* ==========================================================================
- * ======= CHECK IF ALL HOLD OPERATIONS ARE COMPLETED =======
- *
- * SUBROUTINE SHORT NAME = CHO
- * ========================================================================= */
-void Dblqh::checkLcpHoldop(Signal* signal)
-{
- LcpLocRecordPtr choLcpLocptr;
-
- choLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
- do {
- ptrCheckGuard(choLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- if (choLcpLocptr.p->lcpLocstate != LcpLocRecord::HOLDOP_READY) {
- ndbrequire(choLcpLocptr.p->lcpLocstate == LcpLocRecord::WAIT_LCPHOLDOP);
- return;
- }//if
- choLcpLocptr.i = choLcpLocptr.p->nextLcpLoc;
- } while (choLcpLocptr.i != RNIL);
- lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_ACTIVE_FINISH;
-}//Dblqh::checkLcpHoldop()
-
-/* ==========================================================================
- * ======= CHECK IF ALL PARTS OF A LOCAL CHECKPOINT ARE STARTED =======
- *
- * SUBROUTINE SHORT NAME = CLS
- * ========================================================================== */
-bool
-Dblqh::checkLcpStarted(Signal* signal)
-{
- LcpLocRecordPtr clsLcpLocptr;
-
- terrorCode = ZOK;
- clsLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
- int i = 0;
- do {
- ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED){
- return false;
- }//if
- clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc;
- i++;
- } while (clsLcpLocptr.i != RNIL);
-
- i = 0;
- clsLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
- do {
- ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_WAIT_STARTED){
- return false;
- }//if
- clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc;
- i++;
- } while (clsLcpLocptr.i != RNIL);
-
- return true;
-}//Dblqh::checkLcpStarted()
-
-/* ==========================================================================
- * ======= CHECK IF ALL PREPARE TUP OPERATIONS ARE COMPLETED =======
- *
- * SUBROUTINE SHORT NAME = CLT
- * ========================================================================== */
-void Dblqh::checkLcpTupprep(Signal* signal)
-{
- LcpLocRecordPtr cltLcpLocptr;
- cltLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
- do {
- ptrCheckGuard(cltLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- if (cltLcpLocptr.p->lcpLocstate != LcpLocRecord::IDLE) {
- ndbrequire(cltLcpLocptr.p->lcpLocstate == LcpLocRecord::WAIT_TUP_PREPLCP);
- return;
- }//if
- cltLcpLocptr.i = cltLcpLocptr.p->nextLcpLoc;
- } while (cltLcpLocptr.i != RNIL);
- lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_HOLDOPS;
-}//Dblqh::checkLcpTupprep()
-
-/* ==========================================================================
- * ======= INITIATE LCP LOCAL RECORD USED TOWARDS ACC =======
- *
- * ========================================================================== */
-void Dblqh::initLcpLocAcc(Signal* signal, Uint32 fragId)
-{
- lcpLocptr.p->nextLcpLoc = lcpPtr.p->firstLcpLocAcc;
- lcpPtr.p->firstLcpLocAcc = lcpLocptr.i;
- lcpLocptr.p->locFragid = fragId;
- lcpLocptr.p->waitingBlock = LcpLocRecord::ACC;
- lcpLocptr.p->lcpLocstate = LcpLocRecord::IDLE;
- lcpLocptr.p->masterLcpRec = lcpPtr.i;
- lcpLocptr.p->tupRef = RNIL;
-}//Dblqh::initLcpLocAcc()
-
-/* ==========================================================================
- * ======= INITIATE LCP LOCAL RECORD USED TOWARDS TUP =======
- *
- * ========================================================================== */
-void Dblqh::initLcpLocTup(Signal* signal, Uint32 fragId)
-{
- lcpLocptr.p->nextLcpLoc = lcpPtr.p->firstLcpLocTup;
- lcpPtr.p->firstLcpLocTup = lcpLocptr.i;
- lcpLocptr.p->locFragid = fragId;
- lcpLocptr.p->waitingBlock = LcpLocRecord::TUP;
- lcpLocptr.p->lcpLocstate = LcpLocRecord::WAIT_TUP_PREPLCP;
- lcpLocptr.p->masterLcpRec = lcpPtr.i;
- lcpLocptr.p->tupRef = RNIL;
-}//Dblqh::initLcpLocTup()
-
-/* --------------------------------------------------------------------------
- * ------- MOVE OPERATION FROM ACC WAITING LIST ON FRAGMENT -------
- * ------- TO ACTIVE LIST ON FRAGMENT -------
- *
- * SUBROUTINE SHORT NAME = MAA
- * -------------------------------------------------------------------------- */
-void Dblqh::moveAccActiveFrag(Signal* signal)
-{
- UintR maaTcNextConnectptr;
-
- tcConnectptr.i = fragptr.p->accBlockedList;
- fragptr.p->accBlockedList = RNIL;
- /* ------------------------------------------------------------------------
- * WE WILL MOVE ALL RECORDS FROM THE ACC BLOCKED LIST AT ONCE.
- * ------------------------------------------------------------------------ */
- while (tcConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- maaTcNextConnectptr = tcConnectptr.p->nextTc;
- ndbrequire(tcConnectptr.p->listState == TcConnectionrec::ACC_BLOCK_LIST);
- tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
- linkActiveFrag(signal);
- tcConnectptr.i = maaTcNextConnectptr;
- }//while
-}//Dblqh::moveAccActiveFrag()
-
-/* --------------------------------------------------------------------------
- * ------- MOVE OPERATION FROM ACTIVE LIST ON FRAGMENT -------
- * ------- TO ACC BLOCKED LIST ON FRAGMENT -------
- *
- * SUBROUTINE SHORT NAME = MAT
- * -------------------------------------------------------------------------- */
-void Dblqh::moveActiveToAcc(Signal* signal)
-{
- TcConnectionrecPtr matTcNextConnectptr;
-
- releaseActiveList(signal);
- /* ------------------------------------------------------------------------
- * PUT OPERATION RECORD FIRST IN ACC BLOCKED LIST.
- * ------------------------------------------------------------------------ */
- matTcNextConnectptr.i = fragptr.p->accBlockedList;
- tcConnectptr.p->nextTc = matTcNextConnectptr.i;
- tcConnectptr.p->prevTc = RNIL;
- tcConnectptr.p->listState = TcConnectionrec::ACC_BLOCK_LIST;
- fragptr.p->accBlockedList = tcConnectptr.i;
- if (matTcNextConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(matTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- matTcNextConnectptr.p->prevTc = tcConnectptr.i;
- }//if
-}//Dblqh::moveActiveToAcc()
-
-/* ------------------------------------------------------------------------- */
-/* ---- RELEASE LOCAL LCP RECORDS AFTER COMPLETION OF A LOCAL CHECKPOINT---- */
-/* */
-/* SUBROUTINE SHORT NAME = RLL */
-/* ------------------------------------------------------------------------- */
-void Dblqh::releaseLocalLcps(Signal* signal)
-{
- lcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
- while (lcpLocptr.i != RNIL){
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- Uint32 tmp = lcpLocptr.p->nextLcpLoc;
- releaseLcpLoc(signal);
- lcpLocptr.i = tmp;
- }
- lcpPtr.p->firstLcpLocAcc = RNIL;
-
- lcpLocptr.i = lcpPtr.p->firstLcpLocTup;
- while (lcpLocptr.i != RNIL){
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- Uint32 tmp = lcpLocptr.p->nextLcpLoc;
- releaseLcpLoc(signal);
- lcpLocptr.i = tmp;
- }
- lcpPtr.p->firstLcpLocTup = RNIL;
-
-}//Dblqh::releaseLocalLcps()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SEIZE LCP LOCAL RECORD ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::seizeLcpLoc(Signal* signal)
-{
- lcpLocptr.i = cfirstfreeLcpLoc;
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- cfirstfreeLcpLoc = lcpLocptr.p->nextLcpLoc;
- lcpLocptr.p->nextLcpLoc = RNIL;
-}//Dblqh::seizeLcpLoc()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SEND ACC_CONT_OP ------- */
-/* */
-/* INPUT: LCP_PTR LOCAL CHECKPOINT RECORD */
-/* FRAGPTR FRAGMENT RECORD */
-/* */
-/* SUBROUTINE SHORT NAME = SAC */
-/* ------------------------------------------------------------------------- */
-void Dblqh::sendAccContOp(Signal* signal)
-{
- LcpLocRecordPtr sacLcpLocptr;
-
- int count = 0;
- sacLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
- do {
- ptrCheckGuard(sacLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- sacLcpLocptr.p->accContCounter = 0;
- /* ------------------------------------------------------------------- */
- /*SEND START OPERATIONS TO ACC AGAIN */
- /* ------------------------------------------------------------------- */
- signal->theData[0] = lcpPtr.p->lcpAccptr;
- signal->theData[1] = sacLcpLocptr.p->locFragid;
- sendSignal(fragptr.p->accBlockref, GSN_ACC_CONTOPREQ, signal, 2, JBA);
- sacLcpLocptr.i = sacLcpLocptr.p->nextLcpLoc;
- } while (sacLcpLocptr.i != RNIL);
-
-}//Dblqh::sendAccContOp()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SEND ACC_LCPREQ AND TUP_LCPREQ ------- */
-/* */
-/* INPUT: LCP_PTR LOCAL CHECKPOINT RECORD */
-/* FRAGPTR FRAGMENT RECORD */
-/* SUBROUTINE SHORT NAME = STL */
-/* ------------------------------------------------------------------------- */
-void Dblqh::sendStartLcp(Signal* signal)
-{
- LcpLocRecordPtr stlLcpLocptr;
- stlLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
- do {
- jam();
- ptrCheckGuard(stlLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- stlLcpLocptr.p->lcpLocstate = LcpLocRecord::ACC_WAIT_STARTED;
- signal->theData[0] = lcpPtr.p->lcpAccptr;
- signal->theData[1] = stlLcpLocptr.i;
- signal->theData[2] = stlLcpLocptr.p->locFragid;
- sendSignal(fragptr.p->accBlockref, GSN_ACC_LCPREQ, signal, 3, JBA);
- stlLcpLocptr.i = stlLcpLocptr.p->nextLcpLoc;
- } while (stlLcpLocptr.i != RNIL);
-
- stlLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
- do {
- jam();
- ptrCheckGuard(stlLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- stlLcpLocptr.p->lcpLocstate = LcpLocRecord::TUP_WAIT_STARTED;
- signal->theData[0] = stlLcpLocptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = stlLcpLocptr.p->tupRef;
- if(ERROR_INSERTED(5077))
- sendSignalWithDelay(fragptr.p->tupBlockref, GSN_TUP_LCPREQ,
- signal, 5000, 3);
- else
- sendSignal(fragptr.p->tupBlockref, GSN_TUP_LCPREQ, signal, 3, JBA);
- stlLcpLocptr.i = stlLcpLocptr.p->nextLcpLoc;
- } while (stlLcpLocptr.i != RNIL);
-
- if(ERROR_INSERTED(5077))
- {
- ndbout_c("Delayed TUP_LCPREQ with 5 sec");
- }
-}//Dblqh::sendStartLcp()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SET THE LOG TAIL IN THE LOG FILES ------- */
-/* */
-/*THIS SUBROUTINE HAVE BEEN BUGGY AND IS RATHER COMPLEX. IT IS IMPORTANT TO */
-/*REMEMBER THAT WE SEARCH FROM THE TAIL UNTIL WE REACH THE HEAD (CURRENT). */
-/*THE TAIL AND HEAD CAN BE ON THE SAME MBYTE. WE SEARCH UNTIL WE FIND A MBYTE*/
-/*THAT WE NEED TO KEEP. WE THEN SET THE TAIL TO BE THE PREVIOUS. IF WE DO */
-/*NOT FIND A MBYTE THAT WE NEED TO KEEP UNTIL WE REACH THE HEAD THEN WE USE */
-/*THE HEAD AS TAIL. FINALLY WE HAVE TO MOVE BACK THE TAIL TO ALSO INCLUDE */
-/*ALL PREPARE RECORDS. THIS MEANS THAT LONG-LIVED TRANSACTIONS ARE DANGEROUS */
-/*FOR SHORT LOGS. */
-/* ------------------------------------------------------------------------- */
-
-// this function has not been verified yet
-Uint32 Dblqh::remainingLogSize(const LogFileRecordPtr &sltCurrLogFilePtr,
- const LogPartRecordPtr &sltLogPartPtr)
-{
- Uint32 hf = sltCurrLogFilePtr.p->fileNo*ZNO_MBYTES_IN_FILE+sltCurrLogFilePtr.p->currentMbyte;
- Uint32 tf = sltLogPartPtr.p->logTailFileNo*ZNO_MBYTES_IN_FILE+sltLogPartPtr.p->logTailMbyte;
- Uint32 sz = sltLogPartPtr.p->noLogFiles*ZNO_MBYTES_IN_FILE;
- if (tf > hf) hf += sz;
- return sz-(hf-tf);
-}
-
-void Dblqh::setLogTail(Signal* signal, Uint32 keepGci)
-{
- LogPartRecordPtr sltLogPartPtr;
- LogFileRecordPtr sltLogFilePtr;
-#if 0
- LogFileRecordPtr sltCurrLogFilePtr;
-#endif
- UintR tsltMbyte;
- UintR tsltStartMbyte;
- UintR tsltIndex;
- UintR tsltFlag;
-
- for (sltLogPartPtr.i = 0; sltLogPartPtr.i < 4; sltLogPartPtr.i++) {
- jam();
- ptrAss(sltLogPartPtr, logPartRecord);
- findLogfile(signal, sltLogPartPtr.p->logTailFileNo,
- sltLogPartPtr, &sltLogFilePtr);
-
-#if 0
- sltCurrLogFilePtr.i = sltLogPartPtr.p->currentLogfile;
- ptrCheckGuard(sltCurrLogFilePtr, clogFileFileSize, logFileRecord);
- infoEvent("setLogTail: Available log file %d size = %d[mbytes]+%d[words]", sltLogPartPtr.i,
- remainingLogSize(sltCurrLogFilePtr, sltLogPartPtr), sltCurrLogFilePtr.p->remainingWordsInMbyte);
-#endif
-
- tsltMbyte = sltLogPartPtr.p->logTailMbyte;
- tsltStartMbyte = tsltMbyte;
- tsltFlag = ZFALSE;
- if (sltLogFilePtr.i == sltLogPartPtr.p->currentLogfile) {
-/* ------------------------------------------------------------------------- */
-/*THE LOG AND THE TAIL IS ALREADY IN THE SAME FILE. */
-/* ------------------------------------------------------------------------- */
- if (sltLogFilePtr.p->currentMbyte >= sltLogPartPtr.p->logTailMbyte) {
- jam();
-/* ------------------------------------------------------------------------- */
-/*THE CURRENT MBYTE IS AHEAD OF OR AT THE TAIL. THUS WE WILL ONLY LOOK FOR */
-/*THE TAIL UNTIL WE REACH THE CURRENT MBYTE WHICH IS IN THIS LOG FILE. */
-/*IF THE LOG TAIL IS AHEAD OF THE CURRENT MBYTE BUT IN THE SAME LOG FILE */
-/*THEN WE HAVE TO SEARCH THROUGH ALL FILES BEFORE WE COME TO THE CURRENT */
-/*MBYTE. WE ALWAYS STOP WHEN WE COME TO THE CURRENT MBYTE SINCE THE TAIL */
-/*CAN NEVER BE BEFORE THE HEAD. */
-/* ------------------------------------------------------------------------- */
- tsltFlag = ZTRUE;
- }//if
- }//if
-
-/* ------------------------------------------------------------------------- */
-/*NOW START SEARCHING FOR THE NEW TAIL, STARTING AT THE CURRENT TAIL AND */
-/*PROCEEDING UNTIL WE FIND A MBYTE WHICH IS NEEDED TO KEEP OR UNTIL WE REACH */
-/*CURRENT MBYTE (THE HEAD). */
-/* ------------------------------------------------------------------------- */
- SLT_LOOP:
- for (tsltIndex = tsltStartMbyte;
- tsltIndex <= ZNO_MBYTES_IN_FILE - 1;
- tsltIndex++) {
- if (sltLogFilePtr.p->logMaxGciStarted[tsltIndex] >= keepGci) {
-/* ------------------------------------------------------------------------- */
-/*WE ARE NOT ALLOWED TO STEP THE LOG ANY FURTHER AHEAD */
-/*SET THE NEW LOG TAIL AND CONTINUE WITH NEXT LOG PART. */
-/*THIS MBYTE IS NOT TO BE INCLUDED SO WE NEED TO STEP BACK ONE MBYTE. */
-/* ------------------------------------------------------------------------- */
- if (tsltIndex != 0) {
- jam();
- tsltMbyte = tsltIndex - 1;
- } else {
- jam();
-/* ------------------------------------------------------------------------- */
-/*STEPPING BACK INCLUDES ALSO STEPPING BACK TO THE PREVIOUS LOG FILE. */
-/* ------------------------------------------------------------------------- */
- tsltMbyte = ZNO_MBYTES_IN_FILE - 1;
- sltLogFilePtr.i = sltLogFilePtr.p->prevLogFile;
- ptrCheckGuard(sltLogFilePtr, clogFileFileSize, logFileRecord);
- }//if
- goto SLT_BREAK;
- } else {
- jam();
- if (tsltFlag == ZTRUE) {
-/* ------------------------------------------------------------------------- */
-/*WE ARE IN THE SAME FILE AS THE CURRENT MBYTE AND WE CAN REACH THE CURRENT */
-/*MBYTE BEFORE WE REACH A NEW TAIL. */
-/* ------------------------------------------------------------------------- */
- if (tsltIndex == sltLogFilePtr.p->currentMbyte) {
- jam();
-/* ------------------------------------------------------------------------- */
-/*THE TAIL OF THE LOG IS ACTUALLY WITHIN THE CURRENT MBYTE. THUS WE SET THE */
-/*LOG TAIL TO BE THE CURRENT MBYTE. */
-/* ------------------------------------------------------------------------- */
- tsltMbyte = sltLogFilePtr.p->currentMbyte;
- goto SLT_BREAK;
- }//if
- }//if
- }//if
- }//for
- sltLogFilePtr.i = sltLogFilePtr.p->nextLogFile;
- ptrCheckGuard(sltLogFilePtr, clogFileFileSize, logFileRecord);
- if (sltLogFilePtr.i == sltLogPartPtr.p->currentLogfile) {
- jam();
- tsltFlag = ZTRUE;
- }//if
- tsltStartMbyte = 0;
- goto SLT_LOOP;
- SLT_BREAK:
- jam();
- {
- UintR ToldTailFileNo = sltLogPartPtr.p->logTailFileNo;
- UintR ToldTailMByte = sltLogPartPtr.p->logTailMbyte;
-
- arrGuard(tsltMbyte, 16);
- sltLogPartPtr.p->logTailFileNo =
- sltLogFilePtr.p->logLastPrepRef[tsltMbyte] >> 16;
-/* ------------------------------------------------------------------------- */
-/*SINCE LOG_MAX_GCI_STARTED ONLY KEEP TRACK OF COMMIT LOG RECORDS WE ALSO */
-/*HAVE TO STEP BACK THE TAIL SO THAT WE INCLUDE ALL PREPARE RECORDS */
-/*NEEDED FOR THOSE COMMIT RECORDS IN THIS MBYTE. THIS IS A RATHER */
-/*CONSERVATIVE APPROACH BUT IT WORKS. */
-/* ------------------------------------------------------------------------- */
- sltLogPartPtr.p->logTailMbyte =
- sltLogFilePtr.p->logLastPrepRef[tsltMbyte] & 65535;
- if ((ToldTailFileNo != sltLogPartPtr.p->logTailFileNo) ||
- (ToldTailMByte != sltLogPartPtr.p->logTailMbyte)) {
- jam();
- if (sltLogPartPtr.p->logPartState == LogPartRecord::TAIL_PROBLEM) {
- if (sltLogPartPtr.p->firstLogQueue == RNIL) {
- jam();
- sltLogPartPtr.p->logPartState = LogPartRecord::IDLE;
- } else {
- jam();
- sltLogPartPtr.p->logPartState = LogPartRecord::ACTIVE;
- }//if
- }//if
- }//if
- }
-#if 0
- infoEvent("setLogTail: Available log file %d size = %d[mbytes]+%d[words]", sltLogPartPtr.i,
- remainingLogSize(sltCurrLogFilePtr, sltLogPartPtr), sltCurrLogFilePtr.p->remainingWordsInMbyte);
-#endif
- }//for
-
-}//Dblqh::setLogTail()
-
-/* ######################################################################### */
-/* ####### GLOBAL CHECKPOINT MODULE ####### */
-/* */
-/* ######################################################################### */
-/*---------------------------------------------------------------------------*/
-/* THIS MODULE HELPS DIH IN DISCOVERING WHEN GLOBAL CHECKPOINTS ARE */
-/* RECOVERABLE. IT HANDLES THE REQUEST GCP_SAVEREQ THAT REQUESTS LQH TO */
-/* SAVE A PARTICULAR GLOBAL CHECKPOINT TO DISK AND RESPOND WHEN COMPLETED. */
-/*---------------------------------------------------------------------------*/
-/* *************** */
-/* GCP_SAVEREQ > */
-/* *************** */
-void Dblqh::execGCP_SAVEREQ(Signal* signal)
-{
- jamEntry();
- const GCPSaveReq * const saveReq = (GCPSaveReq *)&signal->theData[0];
-
- if (ERROR_INSERTED(5000)) {
- systemErrorLab(signal);
- }
-
- if (ERROR_INSERTED(5007)){
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_GCP_SAVEREQ, signal, 10000,
- signal->length());
- return;
- }
-
- const Uint32 dihBlockRef = saveReq->dihBlockRef;
- const Uint32 dihPtr = saveReq->dihPtr;
- const Uint32 gci = saveReq->gci;
-
- ndbrequire(gci >= cnewestCompletedGci);
-
- if (gci == cnewestCompletedGci) {
-/*---------------------------------------------------------------------------*/
-/* GLOBAL CHECKPOINT HAVE ALREADY BEEN HANDLED. REQUEST MUST HAVE BEEN SENT */
-/* FROM NEW MASTER DIH. */
-/*---------------------------------------------------------------------------*/
- if (ccurrentGcprec == RNIL) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THIS INDICATES THAT WE HAVE ALREADY SENT GCP_SAVECONF TO PREVIOUS MASTER. */
-/* WE SIMPLY SEND IT ALSO TO THE NEW MASTER. */
-/*---------------------------------------------------------------------------*/
- GCPSaveConf * const saveConf = (GCPSaveConf*)&signal->theData[0];
- saveConf->dihPtr = dihPtr;
- saveConf->nodeId = getOwnNodeId();
- saveConf->gci = cnewestCompletedGci;
- sendSignal(dihBlockRef, GSN_GCP_SAVECONF, signal,
- GCPSaveConf::SignalLength, JBA);
- return;
- }
- jam();
-/*---------------------------------------------------------------------------*/
-/* WE HAVE NOT YET SENT THE RESPONSE TO THE OLD MASTER. WE WILL SET THE NEW */
-/* RECEIVER OF THE RESPONSE AND THEN EXIT SINCE THE PROCESS IS ALREADY */
-/* STARTED. */
-/*---------------------------------------------------------------------------*/
- gcpPtr.i = ccurrentGcprec;
- ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
- gcpPtr.p->gcpUserptr = dihPtr;
- gcpPtr.p->gcpBlockref = dihBlockRef;
- return;
- }//if
-
- ndbrequire(ccurrentGcprec == RNIL);
-
-
- if(getNodeState().startLevel >= NodeState::SL_STOPPING_4){
- GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
- saveRef->dihPtr = dihPtr;
- saveRef->nodeId = getOwnNodeId();
- saveRef->gci = gci;
- saveRef->errorCode = GCPSaveRef::NodeShutdownInProgress;
- sendSignal(dihBlockRef, GSN_GCP_SAVEREF, signal,
- GCPSaveRef::SignalLength, JBB);
- return;
- }
-
- if(getNodeState().getNodeRestartInProgress()){
- GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
- saveRef->dihPtr = dihPtr;
- saveRef->nodeId = getOwnNodeId();
- saveRef->gci = gci;
- saveRef->errorCode = GCPSaveRef::NodeRestartInProgress;
- sendSignal(dihBlockRef, GSN_GCP_SAVEREF, signal,
- GCPSaveRef::SignalLength, JBB);
- return;
- }
-
- ccurrentGcprec = 0;
- gcpPtr.i = ccurrentGcprec;
- ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
-
- cnewestCompletedGci = gci;
- if (gci > cnewestGci) {
- jam();
- cnewestGci = gci;
- }//if
-
- gcpPtr.p->gcpBlockref = dihBlockRef;
- gcpPtr.p->gcpUserptr = dihPtr;
- gcpPtr.p->gcpId = gci;
- bool tlogActive = false;
- for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
- ptrAss(logPartPtr, logPartRecord);
- if (logPartPtr.p->logPartState == LogPartRecord::ACTIVE) {
- jam();
- logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_TRUE;
- tlogActive = true;
- } else {
- jam();
- logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_FALSE;
- logFilePtr.i = logPartPtr.p->currentLogfile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logPagePtr.i = logFilePtr.p->currentLogpage;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- writeCompletedGciLog(signal);
- }//if
- }//for
- if (tlogActive == true) {
- jam();
- return;
- }//if
- initGcpRecLab(signal);
- startTimeSupervision(signal);
- return;
-}//Dblqh::execGCP_SAVEREQ()
-
-/* ------------------------------------------------------------------------- */
-/* START TIME SUPERVISION OF THE LOG PARTS. */
-/* ------------------------------------------------------------------------- */
-void Dblqh::startTimeSupervision(Signal* signal)
-{
- for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
- jam();
- ptrAss(logPartPtr, logPartRecord);
-/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-/* WE HAVE TO START CHECKING IF THE LOG IS TO BE WRITTEN EVEN IF PAGES ARE */
-/* FULL. INITIALISE THE VALUES OF WHERE WE ARE IN THE LOG CURRENTLY. */
-/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
- logPartPtr.p->logPartTimer = 0;
- logPartPtr.p->logTimer = 1;
- signal->theData[0] = ZTIME_SUPERVISION;
- signal->theData[1] = logPartPtr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- }//for
-}//Dblqh::startTimeSupervision()
-
-/*---------------------------------------------------------------------------*/
-/* WE SET THE GLOBAL CHECKPOINT VARIABLES AFTER WRITING THE COMPLETED GCI LOG*/
-/* RECORD. THIS ENSURES THAT WE WILL ENCOUNTER THE COMPLETED GCI RECORD WHEN */
-/* WE EXECUTE THE FRAGMENT LOG. */
-/*---------------------------------------------------------------------------*/
-void Dblqh::initGcpRecLab(Signal* signal)
-{
-/* ======================================================================== */
-/* ======= INITIATE GCP RECORD ======= */
-/* */
-/* SUBROUTINE SHORT NAME = IGR */
-/* ======================================================================== */
- for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
- jam();
- ptrAss(logPartPtr, logPartRecord);
-/*--------------------------------------------------*/
-/* BY SETTING THE GCPREC = 0 WE START THE */
-/* CHECKING BY CHECK_GCP_COMPLETED. THIS */
-/* CHECKING MUST NOT BE STARTED UNTIL WE HAVE */
-/* INSERTED ALL COMPLETE GCI LOG RECORDS IN */
-/* ALL LOG PARTS. */
-/*--------------------------------------------------*/
- logPartPtr.p->gcprec = 0;
- gcpPtr.p->gcpLogPartState[logPartPtr.i] = ZWAIT_DISK;
- gcpPtr.p->gcpSyncReady[logPartPtr.i] = ZFALSE;
- logFilePtr.i = logPartPtr.p->currentLogfile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- gcpPtr.p->gcpFilePtr[logPartPtr.i] = logFilePtr.i;
- logPagePtr.i = logFilePtr.p->currentLogpage;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- if (logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] == ZPAGE_HEADER_SIZE) {
- jam();
-/*--------------------------------------------------*/
-/* SINCE THE CURRENT FILEPAGE POINTS AT THE */
-/* NEXT WORD TO BE WRITTEN WE HAVE TO ADJUST */
-/* FOR THIS BY DECREASING THE FILE PAGE BY ONE*/
-/* IF NO WORD HAS BEEN WRITTEN ON THE CURRENT */
-/* FILEPAGE. */
-/*--------------------------------------------------*/
- gcpPtr.p->gcpPageNo[logPartPtr.i] = logFilePtr.p->currentFilepage - 1;
- gcpPtr.p->gcpWordNo[logPartPtr.i] = ZPAGE_SIZE - 1;
- } else {
- jam();
- gcpPtr.p->gcpPageNo[logPartPtr.i] = logFilePtr.p->currentFilepage;
- gcpPtr.p->gcpWordNo[logPartPtr.i] =
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] - 1;
- }//if
- }//for
- return;
-}//Dblqh::initGcpRecLab()
-
-/* ========================================================================= */
-/* ==== CHECK IF ANY GLOBAL CHECKPOINTS ARE COMPLETED AFTER A COMPLETED===== */
-/* DISK WRITE. */
-/* */
-/* SUBROUTINE SHORT NAME = CGC */
-/* ========================================================================= */
-void Dblqh::checkGcpCompleted(Signal* signal,
- Uint32 tcgcPageWritten,
- Uint32 tcgcWordWritten)
-{
- UintR tcgcFlag;
- UintR tcgcJ;
-
- gcpPtr.i = logPartPtr.p->gcprec;
- if (gcpPtr.i != RNIL) {
- jam();
-/* ------------------------------------------------------------------------- */
-/* IF THE GLOBAL CHECKPOINT IS NOT WAITING FOR COMPLETION THEN WE CAN QUIT */
-/* THE SEARCH IMMEDIATELY. */
-/* ------------------------------------------------------------------------- */
- ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
- if (gcpPtr.p->gcpFilePtr[logPartPtr.i] == logFilePtr.i) {
-/* ------------------------------------------------------------------------- */
-/* IF THE COMPLETED DISK OPERATION WAS ON ANOTHER FILE THAN THE ONE WE ARE */
-/* WAITING FOR, THEN WE CAN ALSO QUIT THE SEARCH IMMEDIATELY. */
-/* ------------------------------------------------------------------------- */
- if (tcgcPageWritten < gcpPtr.p->gcpPageNo[logPartPtr.i]) {
- jam();
-/* ------------------------------------------------------------------------- */
-/* THIS LOG PART HAVE NOT YET WRITTEN THE GLOBAL CHECKPOINT TO DISK. */
-/* ------------------------------------------------------------------------- */
- return;
- } else {
- if (tcgcPageWritten == gcpPtr.p->gcpPageNo[logPartPtr.i]) {
- if (tcgcWordWritten < gcpPtr.p->gcpWordNo[logPartPtr.i]) {
- jam();
-/* ------------------------------------------------------------------------- */
-/* THIS LOG PART HAVE NOT YET WRITTEN THE GLOBAL CHECKPOINT TO DISK. */
-/* ------------------------------------------------------------------------- */
- return;
- }//if
- }//if
- }//if
-/* ------------------------------------------------------------------------- */
-/* THIS LOG PART HAVE WRITTEN THE GLOBAL CHECKPOINT TO DISK. */
-/* ------------------------------------------------------------------------- */
- logPartPtr.p->gcprec = RNIL;
- gcpPtr.p->gcpLogPartState[logPartPtr.i] = ZON_DISK;
- tcgcFlag = ZTRUE;
- for (tcgcJ = 0; tcgcJ <= 3; tcgcJ++) {
- jam();
- if (gcpPtr.p->gcpLogPartState[tcgcJ] != ZON_DISK) {
- jam();
-/* ------------------------------------------------------------------------- */
-/*ALL LOG PARTS HAVE NOT SAVED THIS GLOBAL CHECKPOINT TO DISK YET. WAIT FOR */
-/*THEM TO COMPLETE. */
-/* ------------------------------------------------------------------------- */
- tcgcFlag = ZFALSE;
- }//if
- }//for
- if (tcgcFlag == ZTRUE) {
- jam();
-/* ------------------------------------------------------------------------- */
-/*WE HAVE FOUND A COMPLETED GLOBAL CHECKPOINT OPERATION. WE NOW NEED TO SEND */
-/*GCP_SAVECONF, REMOVE THE GCP RECORD FROM THE LIST OF WAITING GCP RECORDS */
-/*ON THIS LOG PART AND RELEASE THE GCP RECORD. */
-// After changing the log implementation we need to perform a FSSYNCREQ on all
-// log files where the last log word resided first before proceeding.
-/* ------------------------------------------------------------------------- */
- UintR Ti;
- for (Ti = 0; Ti < 4; Ti++) {
- LogFileRecordPtr loopLogFilePtr;
- loopLogFilePtr.i = gcpPtr.p->gcpFilePtr[Ti];
- ptrCheckGuard(loopLogFilePtr, clogFileFileSize, logFileRecord);
- if (loopLogFilePtr.p->logFileStatus == LogFileRecord::OPEN) {
- jam();
- signal->theData[0] = loopLogFilePtr.p->fileRef;
- signal->theData[1] = cownref;
- signal->theData[2] = gcpPtr.p->gcpFilePtr[Ti];
- sendSignal(NDBFS_REF, GSN_FSSYNCREQ, signal, 3, JBA);
- } else {
- ndbrequire((loopLogFilePtr.p->logFileStatus ==
- LogFileRecord::CLOSED) ||
- (loopLogFilePtr.p->logFileStatus ==
- LogFileRecord::CLOSING_WRITE_LOG) ||
- (loopLogFilePtr.p->logFileStatus ==
- LogFileRecord::OPENING_WRITE_LOG));
- signal->theData[0] = loopLogFilePtr.i;
- execFSSYNCCONF(signal);
- }//if
- }//for
- return;
- }//if
- }//if
- }//if
-}//Dblqh::checkGcpCompleted()
-
-void
-Dblqh::execFSSYNCCONF(Signal* signal)
-{
- GcpRecordPtr localGcpPtr;
- LogFileRecordPtr localLogFilePtr;
- LogPartRecordPtr localLogPartPtr;
- localLogFilePtr.i = signal->theData[0];
- ptrCheckGuard(localLogFilePtr, clogFileFileSize, logFileRecord);
- localLogPartPtr.i = localLogFilePtr.p->logPartRec;
- localGcpPtr.i = ccurrentGcprec;
- ptrCheckGuard(localGcpPtr, cgcprecFileSize, gcpRecord);
- localGcpPtr.p->gcpSyncReady[localLogPartPtr.i] = ZTRUE;
- UintR Ti;
- for (Ti = 0; Ti < 4; Ti++) {
- jam();
- if (localGcpPtr.p->gcpSyncReady[Ti] == ZFALSE) {
- jam();
- return;
- }//if
- }//for
- GCPSaveConf * const saveConf = (GCPSaveConf *)&signal->theData[0];
- saveConf->dihPtr = localGcpPtr.p->gcpUserptr;
- saveConf->nodeId = getOwnNodeId();
- saveConf->gci = localGcpPtr.p->gcpId;
- sendSignal(localGcpPtr.p->gcpBlockref, GSN_GCP_SAVECONF, signal,
- GCPSaveConf::SignalLength, JBA);
- ccurrentGcprec = RNIL;
-}//Dblqh::execFSSYNCCONF()
-
-void
-Dblqh::execFSSYNCREF(Signal* signal)
-{
- jamEntry();
- systemErrorLab(signal);
- return;
-}//Dblqh::execFSSYNCREF()
-
-
-/* ######################################################################### */
-/* ####### FILE HANDLING MODULE ####### */
-/* */
-/* ######################################################################### */
-/* THIS MODULE HANDLES RESPONSE MESSAGES FROM THE FILE SYSTEM */
-/* ######################################################################### */
-/* ######################################################################### */
-/* SIGNAL RECEPTION MODULE */
-/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
-/* */
-/* THIS MODULE CHECKS THE STATE AND JUMPS TO THE PROPER PART OF THE FILE */
-/* HANDLING MODULE. */
-/* ######################################################################### */
-/* *************** */
-/* FSCLOSECONF > */
-/* *************** */
-void Dblqh::execFSCLOSECONF(Signal* signal)
-{
- jamEntry();
- logFilePtr.i = signal->theData[0];
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- switch (logFilePtr.p->logFileStatus) {
- case LogFileRecord::CLOSE_SR_INVALIDATE_PAGES:
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
- // Set the prev file to check if we shall close it.
- logFilePtr.i = logFilePtr.p->prevLogFile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- exitFromInvalidate(signal);
- return;
- break;
- case LogFileRecord::CLOSING_INIT:
- jam();
- closingInitLab(signal);
- return;
- break;
- case LogFileRecord::CLOSING_SR:
- jam();
- closingSrLab(signal);
- return;
- break;
- case LogFileRecord::CLOSING_EXEC_SR:
- jam();
- closeExecSrLab(signal);
- return;
- break;
- case LogFileRecord::CLOSING_EXEC_SR_COMPLETED:
- jam();
- closeExecSrCompletedLab(signal);
- return;
- break;
- case LogFileRecord::CLOSING_WRITE_LOG:
- jam();
- closeWriteLogLab(signal);
- return;
- break;
- case LogFileRecord::CLOSING_EXEC_LOG:
- jam();
- closeExecLogLab(signal);
- return;
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- break;
- }//switch
-}//Dblqh::execFSCLOSECONF()
-
-/* ************>> */
-/* FSCLOSEREF > */
-/* ************>> */
-void Dblqh::execFSCLOSEREF(Signal* signal)
-{
- jamEntry();
- terrorCode = signal->theData[1];
- systemErrorLab(signal);
- return;
-}//Dblqh::execFSCLOSEREF()
-
-/* ************>> */
-/* FSOPENCONF > */
-/* ************>> */
-void Dblqh::execFSOPENCONF(Signal* signal)
-{
- jamEntry();
- initFsopenconf(signal);
- switch (logFilePtr.p->logFileStatus) {
- case LogFileRecord::OPEN_SR_INVALIDATE_PAGES:
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- readFileInInvalidate(signal);
- return;
- break;
- case LogFileRecord::OPENING_INIT:
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- openFileInitLab(signal);
- return;
- break;
- case LogFileRecord::OPEN_SR_FRONTPAGE:
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- openSrFrontpageLab(signal);
- return;
- break;
- case LogFileRecord::OPEN_SR_LAST_FILE:
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- openSrLastFileLab(signal);
- return;
- break;
- case LogFileRecord::OPEN_SR_NEXT_FILE:
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- openSrNextFileLab(signal);
- return;
- break;
- case LogFileRecord::OPEN_EXEC_SR_START:
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- openExecSrStartLab(signal);
- return;
- break;
- case LogFileRecord::OPEN_EXEC_SR_NEW_MBYTE:
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- openExecSrNewMbyteLab(signal);
- return;
- break;
- case LogFileRecord::OPEN_SR_FOURTH_PHASE:
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- openSrFourthPhaseLab(signal);
- return;
- break;
- case LogFileRecord::OPEN_SR_FOURTH_NEXT:
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- openSrFourthNextLab(signal);
- return;
- break;
- case LogFileRecord::OPEN_SR_FOURTH_ZERO:
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- openSrFourthZeroLab(signal);
- return;
- break;
- case LogFileRecord::OPENING_WRITE_LOG:
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- return;
- break;
- case LogFileRecord::OPEN_EXEC_LOG:
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- openExecLogLab(signal);
- return;
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- break;
- }//switch
-}//Dblqh::execFSOPENCONF()
-
-/* ************> */
-/* FSOPENREF > */
-/* ************> */
-void Dblqh::execFSOPENREF(Signal* signal)
-{
- jamEntry();
- terrorCode = signal->theData[1];
- systemErrorLab(signal);
- return;
-}//Dblqh::execFSOPENREF()
-
-/* ************>> */
-/* FSREADCONF > */
-/* ************>> */
-void Dblqh::execFSREADCONF(Signal* signal)
-{
- jamEntry();
- initFsrwconf(signal);
-
- switch (lfoPtr.p->lfoState) {
- case LogFileOperationRecord::READ_SR_LAST_MBYTE:
- jam();
- releaseLfo(signal);
- readSrLastMbyteLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_SR_FRONTPAGE:
- jam();
- releaseLfo(signal);
- readSrFrontpageLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_SR_LAST_FILE:
- jam();
- releaseLfo(signal);
- readSrLastFileLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_SR_NEXT_FILE:
- jam();
- releaseLfo(signal);
- readSrNextFileLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_EXEC_SR:
- jam();
- readExecSrLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_EXEC_LOG:
- jam();
- readExecLogLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES:
- jam();
- invalidateLogAfterLastGCI(signal);
- return;
- break;
- case LogFileOperationRecord::READ_SR_FOURTH_PHASE:
- jam();
- releaseLfo(signal);
- readSrFourthPhaseLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_SR_FOURTH_ZERO:
- jam();
- releaseLfo(signal);
- readSrFourthZeroLab(signal);
- return;
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- break;
- }//switch
-}//Dblqh::execFSREADCONF()
-
-/* ************>> */
-/* FSREADCONF > */
-/* ************>> */
-void Dblqh::execFSREADREF(Signal* signal)
-{
- jamEntry();
- lfoPtr.i = signal->theData[0];
- ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
- terrorCode = signal->theData[1];
- switch (lfoPtr.p->lfoState) {
- case LogFileOperationRecord::READ_SR_LAST_MBYTE:
- jam();
- systemErrorLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_SR_FRONTPAGE:
- jam();
- systemErrorLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_SR_LAST_FILE:
- jam();
- systemErrorLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_SR_NEXT_FILE:
- jam();
- systemErrorLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_EXEC_SR:
- jam();
- systemErrorLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_EXEC_LOG:
- jam();
- systemErrorLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_SR_FOURTH_PHASE:
- jam();
- systemErrorLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_SR_FOURTH_ZERO:
- jam();
- systemErrorLab(signal);
- return;
- break;
- case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES:
- jam()
- systemErrorLab(signal);
- return;
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- break;
- }//switch
- return;
-}//Dblqh::execFSREADREF()
-
-/* *************** */
-/* FSWRITECONF > */
-/* *************** */
-void Dblqh::execFSWRITECONF(Signal* signal)
-{
- jamEntry();
- initFsrwconf(signal);
- switch (lfoPtr.p->lfoState) {
- case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
- jam();
- invalidateLogAfterLastGCI(signal);
- return;
- break;
- case LogFileOperationRecord::WRITE_PAGE_ZERO:
- jam();
- writePageZeroLab(signal);
- return;
- break;
- case LogFileOperationRecord::LAST_WRITE_IN_FILE:
- jam();
- lastWriteInFileLab(signal);
- return;
- break;
- case LogFileOperationRecord::INIT_WRITE_AT_END:
- jam();
- initWriteEndLab(signal);
- return;
- break;
- case LogFileOperationRecord::INIT_FIRST_PAGE:
- jam();
- initFirstPageLab(signal);
- return;
- break;
- case LogFileOperationRecord::WRITE_GCI_ZERO:
- jam();
- writeGciZeroLab(signal);
- return;
- break;
- case LogFileOperationRecord::WRITE_DIRTY:
- jam();
- writeDirtyLab(signal);
- return;
- break;
- case LogFileOperationRecord::WRITE_INIT_MBYTE:
- jam();
- writeInitMbyteLab(signal);
- return;
- break;
- case LogFileOperationRecord::ACTIVE_WRITE_LOG:
- jam();
- writeLogfileLab(signal);
- return;
- break;
- case LogFileOperationRecord::FIRST_PAGE_WRITE_IN_LOGFILE:
- jam();
- firstPageWriteLab(signal);
- return;
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- break;
- }//switch
-}//Dblqh::execFSWRITECONF()
-
-/* ************>> */
-/* FSWRITEREF > */
-/* ************>> */
-void Dblqh::execFSWRITEREF(Signal* signal)
-{
- jamEntry();
- lfoPtr.i = signal->theData[0];
- ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
- terrorCode = signal->theData[1];
- switch (lfoPtr.p->lfoState) {
- case LogFileOperationRecord::WRITE_PAGE_ZERO:
- jam();
- systemErrorLab(signal);
- break;
- case LogFileOperationRecord::LAST_WRITE_IN_FILE:
- jam();
- systemErrorLab(signal);
- break;
- case LogFileOperationRecord::INIT_WRITE_AT_END:
- jam();
- systemErrorLab(signal);
- break;
- case LogFileOperationRecord::INIT_FIRST_PAGE:
- jam();
- systemErrorLab(signal);
- break;
- case LogFileOperationRecord::WRITE_GCI_ZERO:
- jam();
- systemErrorLab(signal);
- break;
- case LogFileOperationRecord::WRITE_DIRTY:
- jam();
- systemErrorLab(signal);
- break;
- case LogFileOperationRecord::WRITE_INIT_MBYTE:
- jam();
- systemErrorLab(signal);
- break;
- case LogFileOperationRecord::ACTIVE_WRITE_LOG:
- jam();
- systemErrorLab(signal);
- break;
- case LogFileOperationRecord::FIRST_PAGE_WRITE_IN_LOGFILE:
- jam();
- systemErrorLab(signal);
- break;
- case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
- jam();
- systemErrorLab(signal);
- break;
- default:
- jam();
- systemErrorLab(signal);
- break;
- }//switch
-}//Dblqh::execFSWRITEREF()
-
-
-/* ========================================================================= */
-/* ======= INITIATE WHEN RECEIVING FSOPENCONF ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initFsopenconf(Signal* signal)
-{
- logFilePtr.i = signal->theData[0];
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logFilePtr.p->fileRef = signal->theData[1];
- logPartPtr.i = logFilePtr.p->logPartRec;
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- logFilePtr.p->currentMbyte = 0;
- logFilePtr.p->filePosition = 0;
- logFilePtr.p->logFilePagesToDiskWithoutSynch = 0;
-}//Dblqh::initFsopenconf()
-
-/* ========================================================================= */
-/* ======= INITIATE WHEN RECEIVING FSREADCONF AND FSWRITECONF ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initFsrwconf(Signal* signal)
-{
- lfoPtr.i = signal->theData[0];
- ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
- logFilePtr.i = lfoPtr.p->logFileRec;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logPartPtr.i = logFilePtr.p->logPartRec;
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- logPagePtr.i = lfoPtr.p->firstLfoPage;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
-}//Dblqh::initFsrwconf()
-
-/* ######################################################################### */
-/* NORMAL OPERATION MODULE */
-/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
-/* */
-/* THIS PART HANDLES THE NORMAL OPENING, CLOSING AND WRITING OF LOG FILES */
-/* DURING NORMAL OPERATION. */
-/* ######################################################################### */
-/*---------------------------------------------------------------------------*/
-/* THIS SIGNAL IS USED TO SUPERVISE THAT THE LOG RECORDS ARE NOT KEPT IN MAIN*/
-/* MEMORY FOR MORE THAN 1 SECOND TO ACHIEVE THE PROPER RELIABILITY. */
-/*---------------------------------------------------------------------------*/
-void Dblqh::timeSup(Signal* signal)
-{
- LogPageRecordPtr origLogPagePtr;
- Uint32 wordWritten;
-
- jamEntry();
- logPartPtr.i = signal->theData[0];
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- logFilePtr.i = logPartPtr.p->currentLogfile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logPagePtr.i = logFilePtr.p->currentLogpage;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- if (logPartPtr.p->logPartTimer != logPartPtr.p->logTimer) {
- jam();
-/*--------------------------------------------------------------------------*/
-/* THIS LOG PART HAS NOT WRITTEN TO DISK DURING THE LAST SECOND. */
-/*--------------------------------------------------------------------------*/
- switch (logPartPtr.p->logPartState) {
- case LogPartRecord::FILE_CHANGE_PROBLEM:
- jam();
-/*--------------------------------------------------------------------------*/
-/* THIS LOG PART HAS PROBLEMS IN CHANGING FILES MAKING IT IMPOSSIBLE */
-// TO WRITE TO THE FILE CURRENTLY. WE WILL COMEBACK LATER AND SEE IF
-// THE PROBLEM HAS BEEN FIXED.
-/*--------------------------------------------------------------------------*/
- case LogPartRecord::ACTIVE:
- jam();
-/*---------------------------------------------------------------------------*/
-/* AN OPERATION IS CURRENTLY ACTIVE IN WRITING THIS LOG PART. WE THUS CANNOT */
-/* WRITE ANYTHING TO DISK AT THIS MOMENT. WE WILL SEND A SIGNAL DELAYED FOR */
-/* 10 MS AND THEN TRY AGAIN. POSSIBLY THE LOG PART WILL HAVE BEEN WRITTEN */
-/* UNTIL THEN OR ELSE IT SHOULD BE FREE TO WRITE AGAIN. */
-/*---------------------------------------------------------------------------*/
- signal->theData[0] = ZTIME_SUPERVISION;
- signal->theData[1] = logPartPtr.i;
- sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
- return;
- break;
- case LogPartRecord::IDLE:
- case LogPartRecord::TAIL_PROBLEM:
- jam();
-/*---------------------------------------------------------------------------*/
-/* IDLE AND NOT WRITTEN TO DISK IN A SECOND. ALSO WHEN WE HAVE A TAIL PROBLEM*/
-/* WE HAVE TO WRITE TO DISK AT TIMES. WE WILL FIRST CHECK WHETHER ANYTHING */
-/* AT ALL HAVE BEEN WRITTEN TO THE PAGES BEFORE WRITING TO DISK. */
-/*---------------------------------------------------------------------------*/
-/* WE HAVE TO WRITE TO DISK IN ALL CASES SINCE THERE COULD BE INFORMATION */
-/* STILL IN THE LOG THAT WAS GENERATED BEFORE THE PREVIOUS TIME SUPERVISION */
-/* BUT AFTER THE LAST DISK WRITE. THIS PREVIOUSLY STOPPED ALL DISK WRITES */
-/* WHEN NO MORE LOG WRITES WERE PERFORMED (THIS HAPPENED WHEN LOG GOT FULL */
-/* AND AFTER LOADING THE INITIAL RECORDS IN INITIAL START). */
-/*---------------------------------------------------------------------------*/
- if (((logFilePtr.p->currentFilepage + 1) & (ZPAGES_IN_MBYTE -1)) == 0) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THIS IS THE LAST PAGE IN THIS MBYTE. WRITE NEXT LOG AND SWITCH TO NEXT */
-/* MBYTE. */
-/*---------------------------------------------------------------------------*/
- changeMbyte(signal);
- } else {
-/*---------------------------------------------------------------------------*/
-/* WRITE THE LOG PAGE TO DISK EVEN IF IT IS NOT FULL. KEEP PAGE AND WRITE A */
-/* COPY. THE ORIGINAL PAGE WILL BE WRITTEN AGAIN LATER ON. */
-/*---------------------------------------------------------------------------*/
- wordWritten = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] - 1;
- origLogPagePtr.i = logPagePtr.i;
- origLogPagePtr.p = logPagePtr.p;
- seizeLogpage(signal);
- MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[0],
- &origLogPagePtr.p->logPageWord[0],
- wordWritten + 1);
- ndbrequire(wordWritten < ZPAGE_SIZE);
- if (logFilePtr.p->noLogpagesInBuffer > 0) {
- jam();
- completedLogPage(signal, ZENFORCE_WRITE);
-/*---------------------------------------------------------------------------*/
-/*SINCE WE ARE ONLY WRITING PART OF THE LAST PAGE WE HAVE TO UPDATE THE WORD */
-/*WRITTEN TO REFLECT THE REAL LAST WORD WRITTEN. WE ALSO HAVE TO MOVE THE */
-/*FILE POSITION ONE STEP BACKWARDS SINCE WE ARE NOT WRITING THE LAST PAGE */
-/*COMPLETELY. IT WILL BE WRITTEN AGAIN. */
-/*---------------------------------------------------------------------------*/
- lfoPtr.p->lfoWordWritten = wordWritten;
- logFilePtr.p->filePosition = logFilePtr.p->filePosition - 1;
- } else {
- if (wordWritten == (ZPAGE_HEADER_SIZE - 1)) {
-/*---------------------------------------------------------------------------*/
-/*THIS IS POSSIBLE BUT VERY UNLIKELY. IF THE PAGE WAS COMPLETED AFTER THE LAST*/
-/*WRITE TO DISK THEN NO_LOG_PAGES_IN_BUFFER > 0 AND IF NOT WRITTEN SINCE LAST*/
-/*WRITE TO DISK THEN THE PREVIOUS PAGE MUST HAVE BEEN WRITTEN BY SOME */
-/*OPERATION AND THAT BECAME COMPLETELY FULL. IN ANY CASE WE NEED NOT WRITE AN*/
-/*EMPTY PAGE TO DISK. */
-/*---------------------------------------------------------------------------*/
- jam();
- releaseLogpage(signal);
- } else {
- jam();
- writeSinglePage(signal, logFilePtr.p->currentFilepage, wordWritten);
- lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG;
- }//if
- }//if
- }//if
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- }//if
- logPartPtr.p->logTimer++;
- return;
-}//Dblqh::timeSup()
-
-void Dblqh::writeLogfileLab(Signal* signal)
-{
-/*---------------------------------------------------------------------------*/
-/* CHECK IF ANY GLOBAL CHECKPOINTS ARE COMPLETED DUE TO THIS COMPLETED DISK */
-/* WRITE. */
-/*---------------------------------------------------------------------------*/
- switch (logFilePtr.p->fileChangeState) {
- case LogFileRecord::NOT_ONGOING:
- jam();
- checkGcpCompleted(signal,
- ((lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1),
- lfoPtr.p->lfoWordWritten);
- break;
-#if 0
- case LogFileRecord::BOTH_WRITES_ONGOING:
- jam();
- ndbout_c("not crashing!!");
- // Fall-through
-#endif
- case LogFileRecord::WRITE_PAGE_ZERO_ONGOING:
- case LogFileRecord::LAST_WRITE_ONGOING:
- jam();
- logFilePtr.p->lastPageWritten = (lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1;
- logFilePtr.p->lastWordWritten = lfoPtr.p->lfoWordWritten;
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- break;
- }//switch
- releaseLfoPages(signal);
- releaseLfo(signal);
- return;
-}//Dblqh::writeLogfileLab()
-
-void Dblqh::closeWriteLogLab(Signal* signal)
-{
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
- return;
-}//Dblqh::closeWriteLogLab()
-
-/* ######################################################################### */
-/* FILE CHANGE MODULE */
-/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
-/* */
-/*THIS PART OF THE FILE MODULE HANDLES WHEN WE ARE CHANGING LOG FILE DURING */
-/*NORMAL OPERATION. WE HAVE TO BE CAREFUL WHEN WE ARE CHANGING LOG FILE SO */
-/*THAT WE DO NOT COMPLICATE THE SYSTEM RESTART PROCESS TOO MUCH. */
-/*THE IDEA IS THAT WE START BY WRITING THE LAST WRITE IN THE OLD FILE AND WE */
-/*ALSO WRITE THE FIRST PAGE OF THE NEW FILE CONCURRENT WITH THAT. THIS FIRST */
-/*PAGE IN THE NEW FILE DO NOT CONTAIN ANY LOG RECORDS OTHER THAN A DESCRIPTOR*/
-/*CONTAINING INFORMATION ABOUT GCI'S NEEDED AT SYSTEM RESTART AND A NEXT LOG */
-/*RECORD. */
-/* */
-/*WHEN BOTH OF THOSE WRITES HAVE COMPLETED WE ALSO WRITE PAGE ZERO IN FILE */
-/*ZERO. THE ONLY INFORMATION WHICH IS INTERESTING HERE IS THE NEW FILE NUMBER*/
-/* */
-/*IF OPTIMISATIONS ARE NEEDED OF THE LOG HANDLING THEN IT IS POSSIBLE TO */
-/*AVOID WRITING THE FIRST PAGE OF THE NEW PAGE IMMEDIATELY. THIS COMPLICATES */
-/*THE SYSTEM RESTART AND ONE HAS TO TAKE SPECIAL CARE WITH FILE ZERO. IT IS */
-/*HOWEVER NO LARGE PROBLEM TO CHANGE INTO THIS SCENARIO. TO AVOID ALSO THE */
-/*WRITING OF PAGE ZERO IS ALSO POSSIBLE BUT COMPLICATES THE DESIGN EVEN */
-/*FURTHER. IT GETS FAIRLY COMPLEX TO FIND THE END OF THE LOG. SOME SORT OF */
-/*BINARY SEARCH IS HOWEVER MOST LIKELY A GOOD METHODOLOGY FOR THIS. */
-/* ######################################################################### */
-void Dblqh::firstPageWriteLab(Signal* signal)
-{
- releaseLfo(signal);
-/*---------------------------------------------------------------------------*/
-/* RELEASE PAGE ZERO IF THE FILE IS NOT FILE 0. */
-/*---------------------------------------------------------------------------*/
- Uint32 fileNo = logFilePtr.p->fileNo;
- if (fileNo != 0) {
- jam();
- releaseLogpage(signal);
- }//if
-/*---------------------------------------------------------------------------*/
-/* IF A NEW FILE HAS BEEN OPENED WE SHALL ALWAYS ALSO WRITE TO PAGE O IN */
-/* FILE 0. THE AIM IS TO MAKE RESTARTS EASIER BY SPECIFYING WHICH IS THE */
-/* LAST FILE WHERE LOGGING HAS STARTED. */
-/*---------------------------------------------------------------------------*/
-/* FIRST CHECK WHETHER THE LAST WRITE IN THE PREVIOUS FILE HAVE COMPLETED */
-/*---------------------------------------------------------------------------*/
- if (logFilePtr.p->fileChangeState == LogFileRecord::BOTH_WRITES_ONGOING) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THE LAST WRITE WAS STILL ONGOING. */
-/*---------------------------------------------------------------------------*/
- logFilePtr.p->fileChangeState = LogFileRecord::LAST_WRITE_ONGOING;
- return;
- } else {
- jam();
- ndbrequire(logFilePtr.p->fileChangeState == LogFileRecord::FIRST_WRITE_ONGOING);
-/*---------------------------------------------------------------------------*/
-/* WRITE TO PAGE 0 IN IN FILE 0 NOW. */
-/*---------------------------------------------------------------------------*/
- logFilePtr.p->fileChangeState = LogFileRecord::WRITE_PAGE_ZERO_ONGOING;
- if (fileNo == 0) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* IF THE NEW FILE WAS 0 THEN WE HAVE ALREADY WRITTEN PAGE ZERO IN FILE 0. */
-/*---------------------------------------------------------------------------*/
- logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING;
- return;
- } else {
- jam();
-/*---------------------------------------------------------------------------*/
-/* WRITE PAGE ZERO IN FILE ZERO. LOG_FILE_REC WILL REFER TO THE LOG FILE WE */
-/* HAVE JUST WRITTEN PAGE ZERO IN TO GET HOLD OF LOG_FILE_PTR FOR THIS */
-/* RECORD QUICKLY. THIS IS NEEDED TO GET HOLD OF THE FILE_CHANGE_STATE. */
-/* THE ONLY INFORMATION WE WANT TO CHANGE IS THE LAST FILE NUMBER IN THE */
-/* FILE DESCRIPTOR. THIS IS USED AT SYSTEM RESTART TO FIND THE END OF THE */
-/* LOG PART. */
-/*---------------------------------------------------------------------------*/
- Uint32 currLogFile = logFilePtr.i;
- logFilePtr.i = logPartPtr.p->firstLogfile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logPagePtr.i = logFilePtr.p->logPageZero;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] = fileNo;
- writeSinglePage(signal, 0, ZPAGE_SIZE - 1);
- lfoPtr.p->logFileRec = currLogFile;
- lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_PAGE_ZERO;
- return;
- }//if
- }//if
-}//Dblqh::firstPageWriteLab()
-
-void Dblqh::lastWriteInFileLab(Signal* signal)
-{
- LogFileRecordPtr locLogFilePtr;
-/*---------------------------------------------------------------------------*/
-/* CHECK IF ANY GLOBAL CHECKPOINTS ARE COMPLETED DUE TO THIS COMPLETED DISK */
-/* WRITE. */
-/*---------------------------------------------------------------------------*/
- checkGcpCompleted(signal,
- ((lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1),
- (ZPAGE_SIZE - 1));
- releaseLfoPages(signal);
- releaseLfo(signal);
-/*---------------------------------------------------------------------------*/
-/* IF THE FILE IS NOT IN USE OR THE NEXT FILE TO BE USED WE WILL CLOSE IT. */
-/*---------------------------------------------------------------------------*/
- locLogFilePtr.i = logPartPtr.p->currentLogfile;
- ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
- if (logFilePtr.i != locLogFilePtr.i) {
- if (logFilePtr.i != locLogFilePtr.p->nextLogFile) {
- if (logFilePtr.p->fileNo != 0) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THE FILE IS NOT FILE ZERO EITHER. WE WILL NOT CLOSE FILE ZERO SINCE WE */
-/* USE IT TO KEEP TRACK OF THE CURRENT LOG FILE BY WRITING PAGE ZERO IN */
-/* FILE ZERO. */
-/*---------------------------------------------------------------------------*/
-/* WE WILL CLOSE THE FILE. */
-/*---------------------------------------------------------------------------*/
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_WRITE_LOG;
- closeFile(signal, logFilePtr);
- }//if
- }//if
- }//if
-/*---------------------------------------------------------------------------*/
-/* IF A NEW FILE HAS BEEN OPENED WE SHALL ALWAYS ALSO WRITE TO PAGE O IN */
-/* FILE 0. THE AIM IS TO MAKE RESTARTS EASIER BY SPECIFYING WHICH IS THE */
-/* LAST FILE WHERE LOGGING HAS STARTED. */
-/*---------------------------------------------------------------------------*/
-/* FIRST CHECK WHETHER THE FIRST WRITE IN THE NEW FILE HAVE COMPLETED */
-/* THIS STATE INFORMATION IS IN THE NEW LOG FILE AND THUS WE HAVE TO MOVE */
-/* THE LOG FILE POINTER TO THIS LOG FILE. */
-/*---------------------------------------------------------------------------*/
- logFilePtr.i = logFilePtr.p->nextLogFile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- if (logFilePtr.p->fileChangeState == LogFileRecord::BOTH_WRITES_ONGOING) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THE FIRST WRITE WAS STILL ONGOING. */
-/*---------------------------------------------------------------------------*/
- logFilePtr.p->fileChangeState = LogFileRecord::FIRST_WRITE_ONGOING;
- return;
- } else {
- ndbrequire(logFilePtr.p->fileChangeState == LogFileRecord::LAST_WRITE_ONGOING);
-/*---------------------------------------------------------------------------*/
-/* WRITE TO PAGE 0 IN IN FILE 0 NOW. */
-/*---------------------------------------------------------------------------*/
- logFilePtr.p->fileChangeState = LogFileRecord::WRITE_PAGE_ZERO_ONGOING;
- Uint32 fileNo = logFilePtr.p->fileNo;
- if (fileNo == 0) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* IF THE NEW FILE WAS 0 THEN WE HAVE ALREADY WRITTEN PAGE ZERO IN FILE 0. */
-/*---------------------------------------------------------------------------*/
- logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING;
- return;
- } else {
- jam();
-/*---------------------------------------------------------------------------*/
-/* WRITE PAGE ZERO IN FILE ZERO. LOG_FILE_REC WILL REFER TO THE LOG FILE WE */
-/* HAVE JUST WRITTEN PAGE ZERO IN TO GET HOLD OF LOG_FILE_PTR FOR THIS */
-/* RECORD QUICKLY. THIS IS NEEDED TO GET HOLD OF THE FILE_CHANGE_STATE. */
-/* THE ONLY INFORMATION WE WANT TO CHANGE IS THE LAST FILE NUMBER IN THE */
-/* FILE DESCRIPTOR. THIS IS USED AT SYSTEM RESTART TO FIND THE END OF THE */
-/* LOG PART. */
-/*---------------------------------------------------------------------------*/
- Uint32 currLogFile = logFilePtr.i;
- logFilePtr.i = logPartPtr.p->firstLogfile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logPagePtr.i = logFilePtr.p->logPageZero;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] = fileNo;
- writeSinglePage(signal, 0, ZPAGE_SIZE - 1);
- lfoPtr.p->logFileRec = currLogFile;
- lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_PAGE_ZERO;
- return;
- }//if
- }//if
-}//Dblqh::lastWriteInFileLab()
-
-void Dblqh::writePageZeroLab(Signal* signal)
-{
- logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING;
-/*---------------------------------------------------------------------------*/
-/* IT COULD HAVE ARRIVED PAGE WRITES TO THE CURRENT FILE WHILE WE WERE */
-/* WAITING FOR THIS DISK WRITE TO COMPLETE. THEY COULD NOT CHECK FOR */
-/* COMPLETED GLOBAL CHECKPOINTS. THUS WE SHOULD DO THAT NOW INSTEAD. */
-/*---------------------------------------------------------------------------*/
- checkGcpCompleted(signal,
- logFilePtr.p->lastPageWritten,
- logFilePtr.p->lastWordWritten);
- releaseLfo(signal);
- return;
-}//Dblqh::writePageZeroLab()
-
-/* ######################################################################### */
-/* INITIAL START MODULE */
-/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
-/* */
-/*THIS MODULE INITIALISES ALL THE LOG FILES THAT ARE NEEDED AT A SYSTEM */
-/*RESTART AND WHICH ARE USED DURING NORMAL OPERATIONS. IT CREATES THE FILES */
-/*AND SETS A PROPER SIZE OF THEM AND INITIALISES THE FIRST PAGE IN EACH FILE */
-/* ######################################################################### */
-void Dblqh::openFileInitLab(Signal* signal)
-{
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN_INIT;
- seizeLogpage(signal);
- writeSinglePage(signal, (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE) - 1, ZPAGE_SIZE - 1);
- lfoPtr.p->lfoState = LogFileOperationRecord::INIT_WRITE_AT_END;
- return;
-}//Dblqh::openFileInitLab()
-
-void Dblqh::initWriteEndLab(Signal* signal)
-{
- releaseLfo(signal);
- initLogpage(signal);
- if (logFilePtr.p->fileNo == 0) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* PAGE ZERO IN FILE ZERO MUST SET LOG LAP TO ONE SINCE IT HAS STARTED */
-/* WRITING TO THE LOG, ALSO GLOBAL CHECKPOINTS ARE SET TO ZERO. */
-/*---------------------------------------------------------------------------*/
- logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1;
- logPagePtr.p->logPageWord[ZPOS_MAX_GCI_STARTED] = 0;
- logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED] = 0;
- logFilePtr.p->logMaxGciStarted[0] = 0;
- logFilePtr.p->logMaxGciCompleted[0] = 0;
- }//if
-/*---------------------------------------------------------------------------*/
-/* REUSE CODE FOR INITIALISATION OF FIRST PAGE IN ALL LOG FILES. */
-/*---------------------------------------------------------------------------*/
- writeFileHeaderOpen(signal, ZINIT);
- return;
-}//Dblqh::initWriteEndLab()
-
-void Dblqh::initFirstPageLab(Signal* signal)
-{
- releaseLfo(signal);
- if (logFilePtr.p->fileNo == 0) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* IN FILE ZERO WE WILL INSERT A PAGE ONE WHERE WE WILL INSERT A COMPLETED */
-/* GCI RECORD FOR GCI = 0. */
-/*---------------------------------------------------------------------------*/
- initLogpage(signal);
- logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1;
- logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE] = ZCOMPLETED_GCI_TYPE;
- logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + 1] = 1;
- writeSinglePage(signal, 1, ZPAGE_SIZE - 1);
- lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_GCI_ZERO;
- return;
- }//if
- logFilePtr.p->currentMbyte = 1;
- writeInitMbyte(signal);
- return;
-}//Dblqh::initFirstPageLab()
-
-void Dblqh::writeGciZeroLab(Signal* signal)
-{
- releaseLfo(signal);
- logFilePtr.p->currentMbyte = 1;
- writeInitMbyte(signal);
- return;
-}//Dblqh::writeGciZeroLab()
-
-void Dblqh::writeInitMbyteLab(Signal* signal)
-{
- releaseLfo(signal);
- logFilePtr.p->currentMbyte = logFilePtr.p->currentMbyte + 1;
- if (logFilePtr.p->currentMbyte == ZNO_MBYTES_IN_FILE) {
- jam();
- releaseLogpage(signal);
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_INIT;
- closeFile(signal, logFilePtr);
- return;
- }//if
- writeInitMbyte(signal);
- return;
-}//Dblqh::writeInitMbyteLab()
-
-void Dblqh::closingInitLab(Signal* signal)
-{
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
- logPartPtr.i = logFilePtr.p->logPartRec;
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- if (logFilePtr.p->nextLogFile == logPartPtr.p->firstLogfile) {
- jam();
- checkInitCompletedLab(signal);
- return;
- } else {
- jam();
- logFilePtr.i = logFilePtr.p->nextLogFile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- openLogfileInit(signal);
- }//if
- return;
-}//Dblqh::closingInitLab()
-
-void Dblqh::checkInitCompletedLab(Signal* signal)
-{
- logPartPtr.p->logPartState = LogPartRecord::SR_FIRST_PHASE_COMPLETED;
-/*---------------------------------------------------------------------------*/
-/* WE HAVE NOW INITIALISED ALL FILES IN THIS LOG PART. WE CAN NOW SET THE */
-/* THE LOG LAP TO ONE SINCE WE WILL START WITH LOG LAP ONE. LOG LAP = ZERO */
-/* MEANS THIS PART OF THE LOG IS NOT WRITTEN YET. */
-/*---------------------------------------------------------------------------*/
- logPartPtr.p->logLap = 1;
- logPartPtr.i = 0;
-CHECK_LOG_PARTS_LOOP:
- ptrAss(logPartPtr, logPartRecord);
- if (logPartPtr.p->logPartState != LogPartRecord::SR_FIRST_PHASE_COMPLETED) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THIS PART HAS STILL NOT COMPLETED. WAIT FOR THIS TO OCCUR. */
-/*---------------------------------------------------------------------------*/
- return;
- }//if
- if (logPartPtr.i == 3) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* ALL LOG PARTS ARE COMPLETED. NOW WE CAN CONTINUE WITH THE RESTART */
-/* PROCESSING. THE NEXT STEP IS TO PREPARE FOR EXECUTING OPERATIONS. THUS WE */
-/* NEED TO INITIALISE ALL NEEDED DATA AND TO OPEN FILE ZERO AND THE NEXT AND */
-/* TO SET THE CURRENT LOG PAGE TO BE PAGE 1 IN FILE ZERO. */
-/*---------------------------------------------------------------------------*/
- for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
- ptrAss(logPartPtr, logPartRecord);
- signal->theData[0] = ZINIT_FOURTH;
- signal->theData[1] = logPartPtr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- }//for
- return;
- } else {
- jam();
- logPartPtr.i = logPartPtr.i + 1;
- goto CHECK_LOG_PARTS_LOOP;
- }//if
-}//Dblqh::checkInitCompletedLab()
-
-/* ========================================================================= */
-/* ======= INITIATE LOG FILE OPERATION RECORD WHEN ALLOCATED ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initLfo(Signal* signal)
-{
- lfoPtr.p->firstLfoPage = RNIL;
- lfoPtr.p->lfoState = LogFileOperationRecord::IDLE;
- lfoPtr.p->logFileRec = logFilePtr.i;
- lfoPtr.p->noPagesRw = 0;
- lfoPtr.p->lfoPageNo = ZNIL;
-}//Dblqh::initLfo()
-
-/* ========================================================================= */
-/* ======= INITIATE LOG FILE WHEN ALLOCATED ======= */
-/* */
-/* INPUT: TFILE_NO NUMBER OF THE FILE INITIATED */
-/* LOG_PART_PTR NUMBER OF LOG PART */
-/* SUBROUTINE SHORT NAME = IL */
-/* ========================================================================= */
-void Dblqh::initLogfile(Signal* signal, Uint32 fileNo)
-{
- UintR tilTmp;
- UintR tilIndex;
-
- logFilePtr.p->currentFilepage = 0;
- logFilePtr.p->currentLogpage = RNIL;
- logFilePtr.p->fileName[0] = (UintR)-1;
- logFilePtr.p->fileName[1] = (UintR)-1; /* = H'FFFFFFFF = -1 */
- logFilePtr.p->fileName[2] = fileNo; /* Sfile_no */
- tilTmp = 1; /* VERSION 1 OF FILE NAME */
- tilTmp = (tilTmp << 8) + 1; /* FRAGMENT LOG => .FRAGLOG AS EXTENSION */
- tilTmp = (tilTmp << 8) + (8 + logPartPtr.i); /* DIRECTORY = D(8+Part)/DBLQH */
- tilTmp = (tilTmp << 8) + 255; /* IGNORE Pxx PART OF FILE NAME */
- logFilePtr.p->fileName[3] = tilTmp;
-/* ========================================================================= */
-/* FILE NAME BECOMES /D2/DBLQH/Tpart_no/Sfile_no.FRAGLOG */
-/* ========================================================================= */
- logFilePtr.p->fileNo = fileNo;
- logFilePtr.p->filePosition = 0;
- logFilePtr.p->firstLfo = RNIL;
- logFilePtr.p->lastLfo = RNIL;
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
- logFilePtr.p->logPartRec = logPartPtr.i;
- logFilePtr.p->noLogpagesInBuffer = 0;
- logFilePtr.p->firstFilledPage = RNIL;
- logFilePtr.p->lastFilledPage = RNIL;
- logFilePtr.p->lastPageWritten = 0;
- logFilePtr.p->logPageZero = RNIL;
- logFilePtr.p->currentMbyte = 0;
- for (tilIndex = 0; tilIndex <= 15; tilIndex++) {
- logFilePtr.p->logMaxGciCompleted[tilIndex] = (UintR)-1;
- logFilePtr.p->logMaxGciStarted[tilIndex] = (UintR)-1;
- logFilePtr.p->logLastPrepRef[tilIndex] = 0;
- }//for
-}//Dblqh::initLogfile()
-
-/* ========================================================================= */
-/* ======= INITIATE LOG PAGE WHEN ALLOCATED ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initLogpage(Signal* signal)
-{
- TcConnectionrecPtr ilpTcConnectptr;
-
- logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = logPartPtr.p->logLap;
- logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED] =
- logPartPtr.p->logPartNewestCompletedGCI;
- logPagePtr.p->logPageWord[ZPOS_MAX_GCI_STARTED] = cnewestGci;
- logPagePtr.p->logPageWord[ZPOS_VERSION] = NDB_VERSION;
- logPagePtr.p->logPageWord[ZPOS_NO_LOG_FILES] = logPartPtr.p->noLogFiles;
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
- ilpTcConnectptr.i = logPartPtr.p->firstLogTcrec;
- if (ilpTcConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(ilpTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF] =
- (ilpTcConnectptr.p->logStartFileNo << 16) +
- (ilpTcConnectptr.p->logStartPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE);
- } else {
- jam();
- logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF] =
- (logFilePtr.p->fileNo << 16) +
- (logFilePtr.p->currentFilepage >> ZTWOLOG_NO_PAGES_IN_MBYTE);
- }//if
-}//Dblqh::initLogpage()
-
-/* ------------------------------------------------------------------------- */
-/* ------- OPEN LOG FILE FOR READ AND WRITE ------- */
-/* */
-/* SUBROUTINE SHORT NAME = OFR */
-/* ------------------------------------------------------------------------- */
-void Dblqh::openFileRw(Signal* signal, LogFileRecordPtr olfLogFilePtr)
-{
- signal->theData[0] = cownref;
- signal->theData[1] = olfLogFilePtr.i;
- signal->theData[2] = olfLogFilePtr.p->fileName[0];
- signal->theData[3] = olfLogFilePtr.p->fileName[1];
- signal->theData[4] = olfLogFilePtr.p->fileName[2];
- signal->theData[5] = olfLogFilePtr.p->fileName[3];
- signal->theData[6] = ZOPEN_READ_WRITE;
- sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
-}//Dblqh::openFileRw()
-
-/* ------------------------------------------------------------------------- */
-/* ------- OPEN LOG FILE DURING INITIAL START ------- */
-/* */
-/* SUBROUTINE SHORT NAME = OLI */
-/* ------------------------------------------------------------------------- */
-void Dblqh::openLogfileInit(Signal* signal)
-{
- logFilePtr.p->logFileStatus = LogFileRecord::OPENING_INIT;
- signal->theData[0] = cownref;
- signal->theData[1] = logFilePtr.i;
- signal->theData[2] = logFilePtr.p->fileName[0];
- signal->theData[3] = logFilePtr.p->fileName[1];
- signal->theData[4] = logFilePtr.p->fileName[2];
- signal->theData[5] = logFilePtr.p->fileName[3];
- signal->theData[6] = 0x302;
- sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
-}//Dblqh::openLogfileInit()
-
-/* OPEN FOR READ/WRITE, DO CREATE AND DO TRUNCATE FILE */
-/* ------------------------------------------------------------------------- */
-/* ------- OPEN NEXT LOG FILE ------- */
-/* */
-/* SUBROUTINE SHORT NAME = ONL */
-/* ------------------------------------------------------------------------- */
-void Dblqh::openNextLogfile(Signal* signal)
-{
- LogFileRecordPtr onlLogFilePtr;
-
- if (logPartPtr.p->noLogFiles > 2) {
- jam();
-/* -------------------------------------------------- */
-/* IF ONLY 1 OR 2 LOG FILES EXIST THEN THEY ARE */
-/* ALWAYS OPEN AND THUS IT IS NOT NECESSARY TO */
-/* OPEN THEM NOW. */
-/* -------------------------------------------------- */
- onlLogFilePtr.i = logFilePtr.p->nextLogFile;
- ptrCheckGuard(onlLogFilePtr, clogFileFileSize, logFileRecord);
- if (onlLogFilePtr.p->logFileStatus != LogFileRecord::CLOSED) {
- ndbrequire(onlLogFilePtr.p->fileNo == 0);
- return;
- }//if
- onlLogFilePtr.p->logFileStatus = LogFileRecord::OPENING_WRITE_LOG;
- signal->theData[0] = cownref;
- signal->theData[1] = onlLogFilePtr.i;
- signal->theData[2] = onlLogFilePtr.p->fileName[0];
- signal->theData[3] = onlLogFilePtr.p->fileName[1];
- signal->theData[4] = onlLogFilePtr.p->fileName[2];
- signal->theData[5] = onlLogFilePtr.p->fileName[3];
- signal->theData[6] = 2;
- sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
- }//if
-}//Dblqh::openNextLogfile()
-
- /* OPEN FOR READ/WRITE, DON'T CREATE AND DON'T TRUNCATE FILE */
-/* ------------------------------------------------------------------------- */
-/* ------- RELEASE LFO RECORD ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::releaseLfo(Signal* signal)
-{
-#ifdef VM_TRACE
- // Check that lfo record isn't already in free list
- LogFileOperationRecordPtr TlfoPtr;
- TlfoPtr.i = cfirstfreeLfo;
- while (TlfoPtr.i != RNIL){
- ptrCheckGuard(TlfoPtr, clfoFileSize, logFileOperationRecord);
- ndbrequire(TlfoPtr.i != lfoPtr.i);
- TlfoPtr.i = TlfoPtr.p->nextLfo;
- }
-#endif
- lfoPtr.p->nextLfo = cfirstfreeLfo;
- lfoPtr.p->lfoTimer = 0;
- cfirstfreeLfo = lfoPtr.i;
- lfoPtr.p->lfoState = LogFileOperationRecord::IDLE;
-}//Dblqh::releaseLfo()
-
-/* ------------------------------------------------------------------------- */
-/* ------- RELEASE ALL LOG PAGES CONNECTED TO A LFO RECORD ------- */
-/* */
-/* SUBROUTINE SHORT NAME = RLP */
-/* ------------------------------------------------------------------------- */
-void Dblqh::releaseLfoPages(Signal* signal)
-{
- LogPageRecordPtr rlpLogPagePtr;
-
- logPagePtr.i = lfoPtr.p->firstLfoPage;
-RLP_LOOP:
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- rlpLogPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE];
- releaseLogpage(signal);
- if (rlpLogPagePtr.i != RNIL) {
- jam();
- logPagePtr.i = rlpLogPagePtr.i;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- goto RLP_LOOP;
- }//if
- lfoPtr.p->firstLfoPage = RNIL;
-}//Dblqh::releaseLfoPages()
-
-/* ------------------------------------------------------------------------- */
-/* ------- RELEASE LOG PAGE ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::releaseLogpage(Signal* signal)
-{
-#ifdef VM_TRACE
- // Check that log page isn't already in free list
- LogPageRecordPtr TlogPagePtr;
- TlogPagePtr.i = cfirstfreeLogPage;
- while (TlogPagePtr.i != RNIL){
- ptrCheckGuard(TlogPagePtr, clogPageFileSize, logPageRecord);
- ndbrequire(TlogPagePtr.i != logPagePtr.i);
- TlogPagePtr.i = TlogPagePtr.p->logPageWord[ZNEXT_PAGE];
- }
-#endif
-
- cnoOfLogPages++;
- logPagePtr.p->logPageWord[ZNEXT_PAGE] = cfirstfreeLogPage;
- cfirstfreeLogPage = logPagePtr.i;
-}//Dblqh::releaseLogpage()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SEIZE LFO RECORD ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::seizeLfo(Signal* signal)
-{
- lfoPtr.i = cfirstfreeLfo;
- ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
- cfirstfreeLfo = lfoPtr.p->nextLfo;
- lfoPtr.p->nextLfo = RNIL;
- lfoPtr.p->lfoTimer = cLqhTimeOutCount;
-}//Dblqh::seizeLfo()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SEIZE LOG FILE RECORD ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::seizeLogfile(Signal* signal)
-{
- logFilePtr.i = cfirstfreeLogFile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
-/* ------------------------------------------------------------------------- */
-/*IF LIST IS EMPTY THEN A SYSTEM CRASH IS INVOKED SINCE LOG_FILE_PTR = RNIL */
-/* ------------------------------------------------------------------------- */
- cfirstfreeLogFile = logFilePtr.p->nextLogFile;
- logFilePtr.p->nextLogFile = RNIL;
-}//Dblqh::seizeLogfile()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SEIZE LOG PAGE RECORD ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::seizeLogpage(Signal* signal)
-{
- cnoOfLogPages--;
- logPagePtr.i = cfirstfreeLogPage;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
-/* ------------------------------------------------------------------------- */
-/*IF LIST IS EMPTY THEN A SYSTEM CRASH IS INVOKED SINCE LOG_PAGE_PTR = RNIL */
-/* ------------------------------------------------------------------------- */
- cfirstfreeLogPage = logPagePtr.p->logPageWord[ZNEXT_PAGE];
- logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
-}//Dblqh::seizeLogpage()
-
-/* ------------------------------------------------------------------------- */
-/* ------- WRITE FILE DESCRIPTOR INFORMATION ------- */
-/* */
-/* SUBROUTINE SHORT NAME: WFD */
-// Pointer handling:
-// logFilePtr in
-// logPartPtr in
-/* ------------------------------------------------------------------------- */
-void Dblqh::writeFileDescriptor(Signal* signal)
-{
- TcConnectionrecPtr wfdTcConnectptr;
- UintR twfdFileNo;
- UintR twfdMbyte;
-
-/* -------------------------------------------------- */
-/* START BY WRITING TO LOG FILE RECORD */
-/* -------------------------------------------------- */
- arrGuard(logFilePtr.p->currentMbyte, 16);
- logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] =
- logPartPtr.p->logPartNewestCompletedGCI;
- logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] = cnewestGci;
- wfdTcConnectptr.i = logPartPtr.p->firstLogTcrec;
- if (wfdTcConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(wfdTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- twfdFileNo = wfdTcConnectptr.p->logStartFileNo;
- twfdMbyte = wfdTcConnectptr.p->logStartPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE;
- logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] =
- (twfdFileNo << 16) + twfdMbyte;
- } else {
- jam();
- logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] =
- (logFilePtr.p->fileNo << 16) + logFilePtr.p->currentMbyte;
- }//if
-}//Dblqh::writeFileDescriptor()
-
-/* ------------------------------------------------------------------------- */
-/* ------- WRITE THE HEADER PAGE OF A NEW FILE ------- */
-/* */
-/* SUBROUTINE SHORT NAME: WMO */
-/* ------------------------------------------------------------------------- */
-void Dblqh::writeFileHeaderOpen(Signal* signal, Uint32 wmoType)
-{
- LogFileRecordPtr wmoLogFilePtr;
- UintR twmoNoLogDescriptors;
- UintR twmoLoop;
- UintR twmoIndex;
-
-/* -------------------------------------------------- */
-/* WRITE HEADER INFORMATION IN THE NEW FILE. */
-/* -------------------------------------------------- */
- logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_LOG_TYPE] = ZFD_TYPE;
- logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] =
- logFilePtr.p->fileNo;
- if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
- jam();
- twmoNoLogDescriptors = ZMAX_LOG_FILES_IN_PAGE_ZERO;
- } else {
- jam();
- twmoNoLogDescriptors = logPartPtr.p->noLogFiles;
- }//if
- logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD] =
- twmoNoLogDescriptors;
- wmoLogFilePtr.i = logFilePtr.i;
- twmoLoop = 0;
-WMO_LOOP:
- jam();
- if (twmoLoop < twmoNoLogDescriptors) {
- jam();
- ptrCheckGuard(wmoLogFilePtr, clogFileFileSize, logFileRecord);
- for (twmoIndex = 0; twmoIndex <= ZNO_MBYTES_IN_FILE - 1; twmoIndex++) {
- jam();
- arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (twmoLoop * ZFD_PART_SIZE)) + twmoIndex, ZPAGE_SIZE);
- logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (twmoLoop * ZFD_PART_SIZE)) + twmoIndex] =
- wmoLogFilePtr.p->logMaxGciCompleted[twmoIndex];
- arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) +
- twmoIndex, ZPAGE_SIZE);
- logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) + twmoIndex] =
- wmoLogFilePtr.p->logMaxGciStarted[twmoIndex];
- arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) +
- twmoIndex, ZPAGE_SIZE);
- logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) + twmoIndex] =
- wmoLogFilePtr.p->logLastPrepRef[twmoIndex];
- }//for
- wmoLogFilePtr.i = wmoLogFilePtr.p->prevLogFile;
- twmoLoop = twmoLoop + 1;
- goto WMO_LOOP;
- }//if
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
- (ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (ZFD_PART_SIZE * twmoNoLogDescriptors);
- arrGuard(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX], ZPAGE_SIZE);
- logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] =
- ZNEXT_LOG_RECORD_TYPE;
-/* ------------------------------------------------------- */
-/* THIS IS A SPECIAL WRITE OF THE FIRST PAGE IN THE */
-/* LOG FILE. THIS HAS SPECIAL SIGNIFANCE TO FIND */
-/* THE END OF THE LOG AT SYSTEM RESTART. */
-/* ------------------------------------------------------- */
- writeSinglePage(signal, 0, ZPAGE_SIZE - 1);
- if (wmoType == ZINIT) {
- jam();
- lfoPtr.p->lfoState = LogFileOperationRecord::INIT_FIRST_PAGE;
- } else {
- jam();
- lfoPtr.p->lfoState = LogFileOperationRecord::FIRST_PAGE_WRITE_IN_LOGFILE;
- }//if
- logFilePtr.p->filePosition = 1;
- if (wmoType == ZNORMAL) {
- jam();
-/* -------------------------------------------------- */
-/* ALLOCATE A NEW PAGE SINCE THE CURRENT IS */
-/* WRITTEN. */
-/* -------------------------------------------------- */
- seizeLogpage(signal);
- initLogpage(signal);
- logFilePtr.p->currentLogpage = logPagePtr.i;
- logFilePtr.p->currentFilepage = logFilePtr.p->currentFilepage + 1;
- }//if
-}//Dblqh::writeFileHeaderOpen()
-
-/* -------------------------------------------------- */
-/* THE NEW FILE POSITION WILL ALWAYS BE 1 SINCE */
-/* WE JUST WROTE THE FIRST PAGE IN THE LOG FILE */
-/* -------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* ------- WRITE A MBYTE HEADER DURING INITIAL START ------- */
-/* */
-/* SUBROUTINE SHORT NAME: WIM */
-/* ------------------------------------------------------------------------- */
-void Dblqh::writeInitMbyte(Signal* signal)
-{
- initLogpage(signal);
- writeSinglePage(signal, logFilePtr.p->currentMbyte * ZPAGES_IN_MBYTE, ZPAGE_SIZE - 1);
- lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_INIT_MBYTE;
-}//Dblqh::writeInitMbyte()
-
-/* ------------------------------------------------------------------------- */
-/* ------- WRITE A SINGLE PAGE INTO A FILE ------- */
-/* */
-/* INPUT: TWSP_PAGE_NO THE PAGE NUMBER WRITTEN */
-/* SUBROUTINE SHORT NAME: WSP */
-/* ------------------------------------------------------------------------- */
-void Dblqh::writeSinglePage(Signal* signal, Uint32 pageNo, Uint32 wordWritten)
-{
- seizeLfo(signal);
- initLfo(signal);
- lfoPtr.p->firstLfoPage = logPagePtr.i;
- logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
-
- // Calculate checksum for page
- logPagePtr.p->logPageWord[ZPOS_CHECKSUM] = calcPageCheckSum(logPagePtr);
-
- lfoPtr.p->lfoPageNo = pageNo;
- lfoPtr.p->lfoWordWritten = wordWritten;
- lfoPtr.p->noPagesRw = 1;
-/* -------------------------------------------------- */
-/* SET TIMER ON THIS LOG PART TO SIGNIFY THAT A */
-/* LOG RECORD HAS BEEN SENT AT THIS TIME. */
-/* -------------------------------------------------- */
- logPartPtr.p->logPartTimer = logPartPtr.p->logTimer;
- signal->theData[0] = logFilePtr.p->fileRef;
- signal->theData[1] = cownref;
- signal->theData[2] = lfoPtr.i;
- signal->theData[3] = ZLIST_OF_PAIRS_SYNCH;
- signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
- signal->theData[5] = 1; /* ONE PAGE WRITTEN */
- signal->theData[6] = logPagePtr.i;
- signal->theData[7] = pageNo;
- sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
-}//Dblqh::writeSinglePage()
-
-/* ##########################################################################
- * SYSTEM RESTART PHASE ONE MODULE
- * THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING.
- *
- * THIS MODULE CONTAINS THE CODE FOR THE FIRST PHASE OF THE SYSTEM RESTART.
- * THE AIM OF THIS PHASE IS TO FIND THE END OF THE LOG AND TO FIND
- * INFORMATION ABOUT WHERE GLOBAL CHECKPOINTS ARE COMPLETED AND STARTED
- * IN THE LOG. THIS INFORMATION IS NEEDED TO START PHASE THREE OF
- * THE SYSTEM RESTART.
- * ########################################################################## */
-/* --------------------------------------------------------------------------
- * A SYSTEM RESTART OR NODE RESTART IS ONGOING. WE HAVE NOW OPENED FILE 0
- * NOW WE NEED TO READ PAGE 0 TO FIND WHICH LOG FILE THAT WAS OPEN AT
- * CRASH TIME.
- * -------------------------------------------------------------------------- */
-void Dblqh::openSrFrontpageLab(Signal* signal)
-{
- readSinglePage(signal, 0);
- lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_FRONTPAGE;
- return;
-}//Dblqh::openSrFrontpageLab()
-
-/* -------------------------------------------------------------------------
- * WE HAVE NOW READ PAGE 0 IN FILE 0. CHECK THE LAST OPEN FILE. ACTUALLY THE
- * LAST OPEN FILE COULD BE THE NEXT AFTER THAT. CHECK THAT FIRST. WHEN THE
- * LAST WAS FOUND WE CAN FIND ALL THE NEEDED INFORMATION WHERE TO START AND
- * STOP READING THE LOG.
- * -------------------------------------------------------------------------- */
-void Dblqh::readSrFrontpageLab(Signal* signal)
-{
- Uint32 fileNo = logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO];
- if (fileNo == 0) {
- jam();
- /* ----------------------------------------------------------------------
- * FILE 0 WAS ALSO LAST FILE SO WE DO NOT NEED TO READ IT AGAIN.
- * ---------------------------------------------------------------------- */
- readSrLastFileLab(signal);
- return;
- }//if
- /* ------------------------------------------------------------------------
- * CLOSE FILE 0 SO THAT WE HAVE CLOSED ALL FILES WHEN STARTING TO READ
- * THE FRAGMENT LOG. ALSO RELEASE PAGE ZERO.
- * ------------------------------------------------------------------------ */
- releaseLogpage(signal);
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR;
- closeFile(signal, logFilePtr);
- LogFileRecordPtr locLogFilePtr;
- findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr);
- locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_LAST_FILE;
- openFileRw(signal, locLogFilePtr);
- return;
-}//Dblqh::readSrFrontpageLab()
-
-void Dblqh::openSrLastFileLab(Signal* signal)
-{
- readSinglePage(signal, 0);
- lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_LAST_FILE;
- return;
-}//Dblqh::openSrLastFileLab()
-
-void Dblqh::readSrLastFileLab(Signal* signal)
-{
- logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP];
- if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
- jam();
- initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO);
- } else {
- jam();
- initGciInLogFileRec(signal, logPartPtr.p->noLogFiles);
- }//if
- releaseLogpage(signal);
- /* ------------------------------------------------------------------------
- * NOW WE HAVE FOUND THE LAST LOG FILE. WE ALSO NEED TO FIND THE LAST
- * MBYTE THAT WAS LAST WRITTEN BEFORE THE SYSTEM CRASH.
- * ------------------------------------------------------------------------ */
- logPartPtr.p->lastLogfile = logFilePtr.i;
- readSinglePage(signal, 0);
- lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_LAST_MBYTE;
- logFilePtr.p->currentMbyte = 0;
- return;
-}//Dblqh::readSrLastFileLab()
-
-void Dblqh::readSrLastMbyteLab(Signal* signal)
-{
- if (logPartPtr.p->lastMbyte == ZNIL) {
- if (logPagePtr.p->logPageWord[ZPOS_LOG_LAP] < logPartPtr.p->logLap) {
- jam();
- logPartPtr.p->lastMbyte = logFilePtr.p->currentMbyte - 1;
- }//if
- }//if
- arrGuard(logFilePtr.p->currentMbyte, 16);
- logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] =
- logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED];
- logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] =
- logPagePtr.p->logPageWord[ZPOS_MAX_GCI_STARTED];
- logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] =
- logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF];
- releaseLogpage(signal);
- if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) {
- jam();
- logFilePtr.p->currentMbyte++;
- readSinglePage(signal, ZPAGES_IN_MBYTE * logFilePtr.p->currentMbyte);
- lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_LAST_MBYTE;
- return;
- } else {
- jam();
- /* ----------------------------------------------------------------------
- * THE LOG WAS IN THE LAST MBYTE WHEN THE CRASH OCCURRED SINCE ALL
- * LOG LAPS ARE EQUAL TO THE CURRENT LOG LAP.
- * ---------------------------------------------------------------------- */
- if (logPartPtr.p->lastMbyte == ZNIL) {
- jam();
- logPartPtr.p->lastMbyte = ZNO_MBYTES_IN_FILE - 1;
- }//if
- }//if
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR;
- closeFile(signal, logFilePtr);
- if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
- Uint32 fileNo;
- if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) {
- jam();
- fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO;
- } else {
- jam();
- fileNo =
- (logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) -
- ZMAX_LOG_FILES_IN_PAGE_ZERO;
- }//if
- if (fileNo == 0) {
- jam();
- /* --------------------------------------------------------------------
- * AVOID USING FILE 0 AGAIN SINCE THAT IS PROBABLY CLOSING AT THE
- * MOMENT.
- * -------------------------------------------------------------------- */
- fileNo = 1;
- logPartPtr.p->srRemainingFiles =
- logPartPtr.p->noLogFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1);
- } else {
- jam();
- logPartPtr.p->srRemainingFiles =
- logPartPtr.p->noLogFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO;
- }//if
- LogFileRecordPtr locLogFilePtr;
- findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr);
- locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_NEXT_FILE;
- openFileRw(signal, locLogFilePtr);
- return;
- }//if
- /* ------------------------------------------------------------------------
- * THERE WERE NO NEED TO READ ANY MORE PAGE ZERO IN OTHER FILES.
- * WE NOW HAVE ALL THE NEEDED INFORMATION ABOUT THE GCI'S THAT WE NEED.
- * NOW JUST WAIT FOR CLOSE OPERATIONS TO COMPLETE.
- * ------------------------------------------------------------------------ */
- return;
-}//Dblqh::readSrLastMbyteLab()
-
-void Dblqh::openSrNextFileLab(Signal* signal)
-{
- readSinglePage(signal, 0);
- lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_NEXT_FILE;
- return;
-}//Dblqh::openSrNextFileLab()
-
-void Dblqh::readSrNextFileLab(Signal* signal)
-{
- if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
- jam();
- initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO);
- } else {
- jam();
- initGciInLogFileRec(signal, logPartPtr.p->srRemainingFiles);
- }//if
- releaseLogpage(signal);
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR;
- closeFile(signal, logFilePtr);
- if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
- Uint32 fileNo;
- if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) {
- jam();
- fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO;
- } else {
- jam();
- fileNo =
- (logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) -
- ZMAX_LOG_FILES_IN_PAGE_ZERO;
- }//if
- if (fileNo == 0) {
- jam();
- /* --------------------------------------------------------------------
- * AVOID USING FILE 0 AGAIN SINCE THAT IS PROBABLY CLOSING AT THE MOMENT.
- * -------------------------------------------------------------------- */
- fileNo = 1;
- logPartPtr.p->srRemainingFiles =
- logPartPtr.p->srRemainingFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1);
- } else {
- jam();
- logPartPtr.p->srRemainingFiles =
- logPartPtr.p->srRemainingFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO;
- }//if
- LogFileRecordPtr locLogFilePtr;
- findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr);
- locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_NEXT_FILE;
- openFileRw(signal, locLogFilePtr);
- }//if
- /* ------------------------------------------------------------------------
- * THERE WERE NO NEED TO READ ANY MORE PAGE ZERO IN OTHER FILES.
- * WE NOW HAVE ALL THE NEEDED INFORMATION ABOUT THE GCI'S THAT WE NEED.
- * NOW JUST WAIT FOR CLOSE OPERATIONS TO COMPLETE.
- * ------------------------------------------------------------------------ */
- return;
-}//Dblqh::readSrNextFileLab()
-
-void Dblqh::closingSrLab(Signal* signal)
-{
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
- logPartPtr.i = logFilePtr.p->logPartRec;
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- logFilePtr.i = logPartPtr.p->firstLogfile;
- do {
- jam();
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- if (logFilePtr.p->logFileStatus != LogFileRecord::CLOSED) {
- jam();
- /* --------------------------------------------------------------------
- * EXIT AND WAIT FOR REMAINING LOG FILES TO COMPLETE THEIR WORK.
- * -------------------------------------------------------------------- */
- return;
- }//if
- logFilePtr.i = logFilePtr.p->nextLogFile;
- } while (logFilePtr.i != logPartPtr.p->firstLogfile);
- /* ------------------------------------------------------------------------
- * ALL FILES IN THIS PART HAVE BEEN CLOSED. THIS INDICATES THAT THE FIRST
- * PHASE OF THE SYSTEM RESTART HAVE BEEN CONCLUDED FOR THIS LOG PART.
- * CHECK IF ALL OTHER LOG PARTS ARE ALSO COMPLETED.
- * ------------------------------------------------------------------------ */
- logPartPtr.p->logPartState = LogPartRecord::SR_FIRST_PHASE_COMPLETED;
- for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
- jam();
- ptrAss(logPartPtr, logPartRecord);
- if (logPartPtr.p->logPartState != LogPartRecord::SR_FIRST_PHASE_COMPLETED) {
- jam();
- /* --------------------------------------------------------------------
- * EXIT AND WAIT FOR THE REST OF THE LOG PARTS TO COMPLETE.
- * -------------------------------------------------------------------- */
- return;
- }//if
- }//for
- /* ------------------------------------------------------------------------
- * THE FIRST PHASE HAVE BEEN COMPLETED.
- * ------------------------------------------------------------------------ */
- signal->theData[0] = ZSR_PHASE3_START;
- signal->theData[1] = ZSR_PHASE1_COMPLETED;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- return;
-}//Dblqh::closingSrLab()
-
-/* ##########################################################################
- * ####### SYSTEM RESTART PHASE TWO MODULE #######
- *
- * THIS MODULE HANDLES THE SYSTEM RESTART WHERE LQH CONTROLS TUP AND ACC TO
- * ENSURE THAT THEY HAVE KNOWLEDGE OF ALL FRAGMENTS AND HAVE DONE THE NEEDED
- * READING OF DATA FROM FILE AND EXECUTION OF LOCAL LOGS. THIS PROCESS
- * EXECUTES CONCURRENTLY WITH PHASE ONE OF THE SYSTEM RESTART. THIS PHASE
- * FINDS THE INFORMATION ABOUT THE FRAGMENT LOG NEEDED TO EXECUTE THE FRAGMENT
- * LOG.
- * WHEN TUP AND ACC HAVE PREPARED ALL FRAGMENTS THEN LQH ORDERS THOSE LQH'S
- * THAT ARE RESPONSIBLE TO EXECUTE THE FRAGMENT LOGS TO DO SO. IT IS POSSIBLE
- * THAT ANOTHER NODE EXECUTES THE LOG FOR A FRAGMENT RESIDING AT THIS NODE.
- * ########################################################################## */
-/* ***************>> */
-/* START_FRAGREQ > */
-/* ***************>> */
-void Dblqh::execSTART_FRAGREQ(Signal* signal)
-{
- const StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0];
- jamEntry();
-
- tabptr.i = startFragReq->tableId;
- Uint32 fragId = startFragReq->fragId;
-
- ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
- if (!getFragmentrec(signal, fragId)) {
- startFragRefLab(signal);
- return;
- }//if
- tabptr.p->tableStatus = Tablerec::TABLE_DEFINED;
-
- initFragrecSr(signal);
- if (startFragReq->lcpNo == ZNIL) {
- jam();
- /* ----------------------------------------------------------------------
- * THERE WAS NO LOCAL CHECKPOINT AVAILABLE FOR THIS FRAGMENT. WE DO
- * NOT NEED TO READ IN THE LOCAL FRAGMENT. WE HAVE ALREADY ADDED THE
- * FRAGMENT AS AN EMPTY FRAGMENT AT THIS POINT. THUS WE CAN SIMPLY
- * EXIT AND THE FRAGMENT WILL PARTICIPATE IN THE EXECUTION OF THE LOG.
- * PUT FRAGMENT ON LIST OF COMPLETED FRAGMENTS FOR EXECUTION OF LOG.
- * ---------------------------------------------------------------------- */
- fragptr.p->nextFrag = cfirstCompletedFragSr;
- cfirstCompletedFragSr = fragptr.i;
- return;
- }//if
- if (cfirstWaitFragSr == RNIL) {
- jam();
- lcpPtr.i = 0;
- ptrAss(lcpPtr, lcpRecord);
- if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE) {
- jam();
- initLcpSr(signal, startFragReq->lcpNo,
- startFragReq->lcpId, tabptr.i,
- fragId, fragptr.i);
- signal->theData[0] = lcpPtr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
- signal->theData[3] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
- signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId;
- sendSignal(fragptr.p->accBlockref, GSN_SR_FRAGIDREQ, signal, 5, JBB);
- return;
- }//if
- }//if
- fragptr.p->nextFrag = cfirstWaitFragSr;
- cfirstWaitFragSr = fragptr.i;
-}//Dblqh::execSTART_FRAGREQ()
-
-void Dblqh::startFragRefLab(Signal* signal)
-{
- const StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0];
- BlockReference userRef = startFragReq->userRef;
- Uint32 userPtr = startFragReq->userPtr;
- signal->theData[0] = userPtr;
- signal->theData[1] = terrorCode;
- signal->theData[2] = cownNodeid;
- sendSignal(userRef, GSN_START_FRAGREF, signal, 3, JBB);
- return;
-}//Dblqh::startFragRefLab()
-
-/* ***************>> */
-/* SR_FRAGIDCONF > */
-/* ***************>> */
-/* --------------------------------------------------------------------------
- * PRECONDITION: LCP_PTR:LCP_STATE = SR_WAIT_FRAGID
- * -------------------------------------------------------------------------- */
-void Dblqh::execSR_FRAGIDCONF(Signal* signal)
-{
- SrFragidConf * const srFragidConf = (SrFragidConf *)&signal->theData[0];
- jamEntry();
-
- lcpPtr.i = srFragidConf->lcpPtr;
- ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_SR_WAIT_FRAGID);
- /* ------------------------------------------------------------------------
- * NO ERROR CHECKING OF TNO_LOCFRAG VALUE. OUT OF BOUND WILL IMPLY THAT AN
- * INDEX OUT OF RANGE WILL CAUSE A SYSTEM RESTART WHICH IS DESIRED.
- * ------------------------------------------------------------------------ */
- lcpPtr.p->lcpAccptr = srFragidConf->accPtr;
- fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- fragptr.p->accFragptr[0] = srFragidConf->fragPtr[0];
- fragptr.p->accFragptr[1] = srFragidConf->fragPtr[1];
- Uint32 noLocFrag = srFragidConf->noLocFrag;
- ndbrequire(noLocFrag == 2);
- Uint32 fragid[2];
- Uint32 i;
- for (i = 0; i < noLocFrag; i++) {
- fragid[i] = srFragidConf->fragId[i];
- }//for
-
- for (i = 0; i < noLocFrag; i++) {
- jam();
- Uint32 fragId = fragid[i];
- /* ----------------------------------------------------------------------
- * THERE IS NO ERROR CHECKING ON PURPOSE. IT IS POSSIBLE TO CALCULATE HOW
- * MANY LOCAL LCP RECORDS THERE SHOULD BE. IT SHOULD NEVER HAPPEN THAT
- * THERE IS NO ONE FREE. IF THERE IS NO ONE IT WILL ALSO BE A POINTER
- * OUT OF RANGE WHICH IS AN ERROR CODE IN ITSELF. REUSES ERROR
- * HANDLING IN AXE VM.
- * ---------------------------------------------------------------------- */
- seizeLcpLoc(signal);
- initLcpLocAcc(signal, fragId);
- lcpLocptr.p->lcpLocstate = LcpLocRecord::SR_ACC_STARTED;
- signal->theData[0] = lcpPtr.p->lcpAccptr;
- signal->theData[1] = lcpLocptr.i;
- signal->theData[2] = lcpLocptr.p->locFragid;
- signal->theData[3] = lcpPtr.p->currentFragment.lcpFragOrd.lcpId % MAX_LCP_STORED;
- sendSignal(fragptr.p->accBlockref, GSN_ACC_SRREQ, signal, 4, JBB);
- seizeLcpLoc(signal);
- initLcpLocTup(signal, fragId);
- lcpLocptr.p->lcpLocstate = LcpLocRecord::SR_TUP_STARTED;
- signal->theData[0] = lcpLocptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
- signal->theData[3] = lcpLocptr.p->locFragid;
- signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
- sendSignal(fragptr.p->tupBlockref, GSN_TUP_SRREQ, signal, 5, JBB);
- }//for
- lcpPtr.p->lcpState = LcpRecord::LCP_SR_STARTED;
- return;
-}//Dblqh::execSR_FRAGIDCONF()
-
-/* ***************> */
-/* SR_FRAGIDREF > */
-/* ***************> */
-void Dblqh::execSR_FRAGIDREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dblqh::execSR_FRAGIDREF()
-
-/* ************>> */
-/* ACC_SRCONF > */
-/* ************>> */
-/* --------------------------------------------------------------------------
- * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = SR_ACC_STARTED
- * -------------------------------------------------------------------------- */
-void Dblqh::execACC_SRCONF(Signal* signal)
-{
- jamEntry();
- lcpLocptr.i = signal->theData[0];
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- if (lcpLocptr.p->lcpLocstate != LcpLocRecord::SR_ACC_STARTED) {
- jam();
- systemErrorLab(signal);
- return;
- }//if
-
- lcpPtr.i = lcpLocptr.p->masterLcpRec;
- ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- /* ------------------------------------------------------------------------
- * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS REFERENCE
- * WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
- * ------------------------------------------------------------------------ */
- lcpLocptr.p->lcpLocstate = LcpLocRecord::SR_ACC_COMPLETED;
- srCompletedLab(signal);
- return;
-}//Dblqh::execACC_SRCONF()
-
-/* ************> */
-/* ACC_SRREF > */
-/* ************> */
-void Dblqh::execACC_SRREF(Signal* signal)
-{
- jamEntry();
- terrorCode = signal->theData[1];
- systemErrorLab(signal);
- return;
-}//Dblqh::execACC_SRREF()
-
-/* ************>> */
-/* TUP_SRCONF > */
-/* ************>> */
-/* --------------------------------------------------------------------------
- * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = SR_TUP_STARTED
- * -------------------------------------------------------------------------- */
-void Dblqh::execTUP_SRCONF(Signal* signal)
-{
- jamEntry();
- lcpLocptr.i = signal->theData[0];
- ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- Uint32 tupFragPtr = signal->theData[1];
- ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::SR_TUP_STARTED);
-
- lcpPtr.i = lcpLocptr.p->masterLcpRec;
- ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- /* ------------------------------------------------------------------------
- * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS REFERENCE
- * WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
- * ------------------------------------------------------------------------ */
- lcpLocptr.p->lcpLocstate = LcpLocRecord::SR_TUP_COMPLETED;
- fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (lcpLocptr.i == lcpPtr.p->firstLcpLocTup) {
- jam();
- fragptr.p->tupFragptr[1] = tupFragPtr;
- } else {
- jam();
- fragptr.p->tupFragptr[0] = tupFragPtr;
- }//if
- srCompletedLab(signal);
- return;
-}//Dblqh::execTUP_SRCONF()
-
-void Dblqh::srCompletedLab(Signal* signal)
-{
- checkSrCompleted(signal);
- if (lcpPtr.p->lcpState == LcpRecord::LCP_SR_COMPLETED) {
- jam();
- /* ----------------------------------------------------------------------
- * THE SYSTEM RESTART OF THIS FRAGMENT HAS BEEN COMPLETED. IT IS NOW
- * TIME TO START A SYSTEM RESTART ON THE NEXT FRAGMENT OR CONTINUE
- * WITH THE NEXT STEP OF THE SYSTEM RESTART. THIS STEP IS TO EXECUTE
- * THE FRAGMENT LOGS.
- * ----------------------------------------------------------------------
- * WE RELEASE THE LOCAL LCP RECORDS.
- * --------------------------------------------------------------------- */
- releaseLocalLcps(signal);
- /* ----------------------------------------------------------------------
- * PUT FRAGMENT ON LIST OF FRAGMENTS WHICH HAVE BEEN STARTED AS PART OF
- * THE SYSTEM RESTART. THEY ARE NOW WAITING TO EXECUTE THE FRAGMENT LOG.
- * --------------------------------------------------------------------- */
- fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- fragptr.p->nextFrag = cfirstCompletedFragSr;
- cfirstCompletedFragSr = fragptr.i;
- if (cfirstWaitFragSr != RNIL) {
- jam();
- /* --------------------------------------------------------------------
- * ANOTHER FRAGMENT IS WAITING FOR SYSTEM RESTART. RESTART THIS
- * FRAGMENT AS WELL.
- * -------------------------------------------------------------------- */
- fragptr.i = cfirstWaitFragSr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- cfirstWaitFragSr = fragptr.p->nextFrag;
- /* --------------------------------------------------------------------
- * RETRIEVE DATA FROM THE FRAGMENT RECORD.
- * -------------------------------------------------------------------- */
- ndbrequire(fragptr.p->srChkpnr < MAX_LCP_STORED);
- initLcpSr(signal,
- fragptr.p->srChkpnr,
- fragptr.p->lcpId[fragptr.p->srChkpnr],
- fragptr.p->tabRef,
- fragptr.p->fragId,
- fragptr.i);
- signal->theData[0] = lcpPtr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
- signal->theData[3] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
- signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId;
- sendSignal(fragptr.p->accBlockref, GSN_SR_FRAGIDREQ, signal, 5, JBB);
- return;
- } else {
- jam();
- /* --------------------------------------------------------------------
- * NO MORE FRAGMENTS ARE WAITING FOR SYSTEM RESTART.
- * -------------------------------------------------------------------- */
- lcpPtr.p->lcpState = LcpRecord::LCP_IDLE;
- if (cstartRecReq == ZTRUE) {
- jam();
- /* ----------------------------------------------------------------
- * WE HAVE ALSO RECEIVED AN INDICATION THAT NO MORE FRAGMENTS
- * NEEDS RESTART.
- * NOW IT IS TIME TO START EXECUTING THE UNDO LOG.
- * ----------------------------------------------------------------
- * WE ARE NOW IN A POSITION TO ORDER TUP AND ACC TO START
- * EXECUTING THEIR UNDO LOGS. THIS MUST BE DONE BEFORE THE
- * FRAGMENT LOGS CAN BE EXECUTED.
- * ---------------------------------------------------------------- */
- csrExecUndoLogState = EULS_STARTED;
- signal->theData[0] = caccBlockref;
- signal->theData[1] = cownref;
- sendSignal(caccBlockref, GSN_START_RECREQ, signal, 2, JBB);
- signal->theData[0] = ctupBlockref;
- signal->theData[1] = cownref;
- sendSignal(ctupBlockref, GSN_START_RECREQ, signal, 2, JBB);
- return;
- } else {
- jam();
- /* ----------------------------------------------------------------
- * WE HAVE NOT RECEIVED ALL FRAGMENTS YET OR AT LEAST NOT WE
- * HAVE NOT RECEIVED THE START_RECREQ SIGNAL. EXIT AND WAIT
- * FOR MORE.
- * ---------------------------------------------------------------- */
- return;
- }//if
- }//if
- }//if
- /*---------------*/
- /* ELSE */
- /*-------------------------------------------------------------------------
- * THE SYSTEM RESTART ON THIS FRAGMENT HAS NOT BEEN COMPLETED,
- * EXIT AND WAIT FOR MORE SIGNALS
- *-------------------------------------------------------------------------
- * DO NOTHING, EXIT IS EXECUTED BELOW
- *------------------------------------------------------------------------- */
- return;
-}//Dblqh::srCompletedLab()
-
-/* ************> */
-/* TUP_SRREF > */
-/* ************> */
-void Dblqh::execTUP_SRREF(Signal* signal)
-{
- jamEntry();
- terrorCode = signal->theData[1];
- systemErrorLab(signal);
- return;
-}//Dblqh::execTUP_SRREF()
-
-/* ***************> */
-/* START_RECREQ > */
-/* ***************> */
-void Dblqh::execSTART_RECREQ(Signal* signal)
-{
- CRASH_INSERTION(5027);
-
- jamEntry();
- StartRecReq * const req = (StartRecReq*)&signal->theData[0];
- cmasterDihBlockref = req->senderRef;
-
- crestartOldestGci = req->keepGci;
- crestartNewestGci = req->lastCompletedGci;
- cnewestGci = req->newestGci;
-
- ndbrequire(req->receivingNodeId == cownNodeid);
-
- cnewestCompletedGci = cnewestGci;
- cstartRecReq = ZTRUE;
- for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
- ptrAss(logPartPtr, logPartRecord);
- logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci;
- }//for
- /* ------------------------------------------------------------------------
- * WE HAVE TO SET THE OLDEST AND THE NEWEST GLOBAL CHECKPOINT IDENTITY
- * THAT WILL SURVIVE THIS SYSTEM RESTART. THIS IS NEEDED SO THAT WE CAN
- * SET THE LOG HEAD AND LOG TAIL PROPERLY BEFORE STARTING THE SYSTEM AGAIN.
- * WE ALSO NEED TO SET CNEWEST_GCI TO ENSURE THAT LOG RECORDS ARE EXECUTED
- * WITH A PROPER GCI.
- *------------------------------------------------------------------------ */
- if (cstartType == NodeState::ST_NODE_RESTART) {
- jam();
- signal->theData[0] = ZSR_PHASE3_START;
- signal->theData[1] = ZSR_PHASE2_COMPLETED;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- return;
- }//if
- if(cstartType == NodeState::ST_INITIAL_NODE_RESTART){
- jam();
- StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
- conf->startingNodeId = getOwnNodeId();
- sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
- StartRecConf::SignalLength, JBB);
- return;
- }//if
- if (cfirstWaitFragSr == RNIL) {
- /* ----------------------------------------------------------------------
- * THERE ARE NO FRAGMENTS WAITING TO BE RESTARTED.
- * --------------------------------------------------------------------- */
- lcpPtr.i = 0;
- ptrAss(lcpPtr, lcpRecord);
- if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE) {
- jam();
- /* --------------------------------------------------------------------
- * THERE ARE NO FRAGMENTS THAT ARE CURRENTLY PERFORMING THEIR
- * SYSTEM RESTART.
- * --------------------------------------------------------------------
- * WE ARE NOW IN A POSITION TO ORDER TUP AND ACC TO START EXECUTING
- * THEIR UNDO LOGS. THIS MUST BE DONE BEFORE THE FRAGMENT LOGS
- * CAN BE EXECUTED.
- * ------------------------------------------------------------------- */
- csrExecUndoLogState = EULS_STARTED;
- signal->theData[0] = caccBlockref;
- signal->theData[1] = cownref;
- sendSignal(caccBlockref, GSN_START_RECREQ, signal, 2, JBB);
- signal->theData[0] = ctupBlockref;
- signal->theData[1] = cownref;
- sendSignal(ctupBlockref, GSN_START_RECREQ, signal, 2, JBB);
- }//if
- }//if
- /* -----------------------------------------------------------------------
- * EXIT AND WAIT FOR COMPLETION OF ALL FRAGMENTS.
- * ----------------------------------------------------------------------- */
- return;
-}//Dblqh::execSTART_RECREQ()
-
-/* ***************>> */
-/* START_RECCONF > */
-/* ***************>> */
-void Dblqh::execSTART_RECCONF(Signal* signal)
-{
- jamEntry();
- BlockReference userRef = signal->theData[0];
- if (userRef == caccBlockref) {
- if (csrExecUndoLogState == EULS_STARTED) {
- jam();
- csrExecUndoLogState = EULS_ACC_COMPLETED;
- } else {
- ndbrequire(csrExecUndoLogState == EULS_TUP_COMPLETED);
- jam();
- csrExecUndoLogState = EULS_COMPLETED;
- /* --------------------------------------------------------------------
- * START THE FIRST PHASE OF EXECUTION OF THE LOG.
- * ------------------------------------------------------------------- */
- startExecSr(signal);
- }//if
- } else {
- ndbrequire(userRef == ctupBlockref);
- if (csrExecUndoLogState == EULS_STARTED) {
- jam();
- csrExecUndoLogState = EULS_TUP_COMPLETED;
- } else {
- ndbrequire(csrExecUndoLogState == EULS_ACC_COMPLETED);
- jam();
- csrExecUndoLogState = EULS_COMPLETED;
- /* --------------------------------------------------------------------
- * START THE FIRST PHASE OF EXECUTION OF THE LOG.
- * ------------------------------------------------------------------- */
- startExecSr(signal);
- }//if
- }//if
- return;
-}//Dblqh::execSTART_RECCONF()
-
-/* ***************> */
-/* START_RECREF > */
-/* ***************> */
-void Dblqh::execSTART_RECREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}//Dblqh::execSTART_RECREF()
-
-/* ***************>> */
-/* START_EXEC_SR > */
-/* ***************>> */
-void Dblqh::execSTART_EXEC_SR(Signal* signal)
-{
- FragrecordPtr prevFragptr;
- jamEntry();
- fragptr.i = signal->theData[0];
- prevFragptr.i = signal->theData[1];
- if (fragptr.i == RNIL) {
- jam();
- ndbrequire(cnoOfNodes < MAX_NDB_NODES);
- /* ----------------------------------------------------------------------
- * NO MORE FRAGMENTS TO START EXECUTING THE LOG ON.
- * SEND EXEC_SRREQ TO ALL LQH TO INDICATE THAT THIS NODE WILL
- * NOT REQUEST ANY MORE FRAGMENTS TO EXECUTE THE FRAGMENT LOG ON.
- * ----------------------------------------------------------------------
- * WE NEED TO SEND THOSE SIGNALS EVEN IF WE HAVE NOT REQUESTED
- * ANY FRAGMENTS PARTICIPATE IN THIS PHASE.
- * --------------------------------------------------------------------- */
- for (Uint32 i = 0; i < cnoOfNodes; i++) {
- jam();
- if (cnodeStatus[i] == ZNODE_UP) {
- jam();
- ndbrequire(cnodeData[i] < MAX_NDB_NODES);
- BlockReference ref = calcLqhBlockRef(cnodeData[i]);
- signal->theData[0] = cownNodeid;
- sendSignal(ref, GSN_EXEC_SRREQ, signal, 1, JBB);
- }//if
- }//for
- } else {
- jam();
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (fragptr.p->srNoLognodes > csrPhasesCompleted) {
- jam();
- Uint32 index = csrPhasesCompleted;
- arrGuard(index, 4);
- BlockReference ref = calcLqhBlockRef(fragptr.p->srLqhLognode[index]);
- fragptr.p->srStatus = Fragrecord::SS_STARTED;
- /* --------------------------------------------------------------------
- * SINCE WE CAN HAVE SEVERAL LQH NODES PER FRAGMENT WE CALCULATE
- * THE LQH POINTER IN SUCH A WAY THAT WE CAN DEDUCE WHICH OF THE
- * LQH NODES THAT HAS RESPONDED WHEN EXEC_FRAGCONF IS RECEIVED.
- * ------------------------------------------------------------------- */
- ExecFragReq * const execFragReq = (ExecFragReq *)&signal->theData[0];
- execFragReq->userPtr = fragptr.i;
- execFragReq->userRef = cownref;
- execFragReq->tableId = fragptr.p->tabRef;
- execFragReq->fragId = fragptr.p->fragId;
- execFragReq->startGci = fragptr.p->srStartGci[index];
- execFragReq->lastGci = fragptr.p->srLastGci[index];
- sendSignal(ref, GSN_EXEC_FRAGREQ, signal, ExecFragReq::SignalLength, JBB);
- prevFragptr.i = fragptr.i;
- fragptr.i = fragptr.p->nextFrag;
- } else {
- jam();
- /* --------------------------------------------------------------------
- * THIS FRAGMENT IS NOW FINISHED WITH THE SYSTEM RESTART. IT DOES
- * NOT NEED TO PARTICIPATE IN ANY MORE PHASES. REMOVE IT FROM THE
- * LIST OF COMPLETED FRAGMENTS TO EXECUTE THE LOG ON.
- * ALSO SEND START_FRAGCONF TO DIH AND SET THE STATE TO ACTIVE ON THE
- * FRAGMENT.
- * ------------------------------------------------------------------- */
- Uint32 next = fragptr.p->nextFrag;
- if (prevFragptr.i != RNIL) {
- jam();
- ptrCheckGuard(prevFragptr, cfragrecFileSize, fragrecord);
- prevFragptr.p->nextFrag = next;
- } else {
- jam();
- cfirstCompletedFragSr = next;
- }//if
-
- /**
- * Put fragment on list which has completed REDO log
- */
- fragptr.p->nextFrag = c_redo_log_complete_frags;
- c_redo_log_complete_frags = fragptr.i;
-
- fragptr.p->fragStatus = Fragrecord::FSACTIVE;
- fragptr.p->logFlag = Fragrecord::STATE_TRUE;
- signal->theData[0] = fragptr.p->srUserptr;
- signal->theData[1] = cownNodeid;
- sendSignal(fragptr.p->srBlockref, GSN_START_FRAGCONF, signal, 2, JBB);
- /* --------------------------------------------------------------------
- * WE HAVE TO ENSURE THAT THIS FRAGMENT IS NOT PUT BACK ON THE LIST BY
- * MISTAKE. WE DO THIS BY ALSO REMOVING IT AS PREVIOUS IN START_EXEC_SR
- * THIS IS PERFORMED BY KEEPING PREV_FRAGPTR AS PREV_FRAGPTR BUT MOVING
- * FRAGPTR TO THE NEXT FRAGMENT IN THE LIST.
- * ------------------------------------------------------------------- */
- fragptr.i = next;
- }//if
- signal->theData[0] = fragptr.i;
- signal->theData[1] = prevFragptr.i;
- sendSignal(cownref, GSN_START_EXEC_SR, signal, 2, JBB);
- }//if
- return;
-}//Dblqh::execSTART_EXEC_SR()
-
-/* ***************> */
-/* EXEC_FRAGREQ > */
-/* ***************> */
-/* --------------------------------------------------------------------------
- * THIS SIGNAL IS USED TO REQUEST THAT A FRAGMENT PARTICIPATES IN EXECUTING
- * THE LOG IN THIS NODE.
- * ------------------------------------------------------------------------- */
-void Dblqh::execEXEC_FRAGREQ(Signal* signal)
-{
- ExecFragReq * const execFragReq = (ExecFragReq *)&signal->theData[0];
- jamEntry();
- tabptr.i = execFragReq->tableId;
- Uint32 fragId = execFragReq->fragId;
- ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
- if (!getFragmentrec(signal, fragId)) {
- jam();
- if (!insertFragrec(signal, fragId)) {
- jam();
- sendExecFragRefLab(signal);
- return;
- }//if
- initFragrec(signal, tabptr.i, fragId, ZLOG_NODE);
- fragptr.p->execSrStatus = Fragrecord::ACTIVE_REMOVE_AFTER;
- } else {
- jam();
- if (fragptr.p->execSrStatus == Fragrecord::ACTIVE_REMOVE_AFTER) {
- jam();
- fragptr.p->execSrStatus = Fragrecord::ACTIVE_REMOVE_AFTER;
- } else {
- jam();
- }//if
- }//if
- ndbrequire(fragptr.p->execSrNoReplicas < 4);
- fragptr.p->execSrBlockref[fragptr.p->execSrNoReplicas] = execFragReq->userRef;
- fragptr.p->execSrUserptr[fragptr.p->execSrNoReplicas] = execFragReq->userPtr;
- fragptr.p->execSrStartGci[fragptr.p->execSrNoReplicas] = execFragReq->startGci;
- fragptr.p->execSrLastGci[fragptr.p->execSrNoReplicas] = execFragReq->lastGci;
- fragptr.p->execSrStatus = Fragrecord::ACTIVE;
- fragptr.p->execSrNoReplicas++;
- cnoFragmentsExecSr++;
- return;
-}//Dblqh::execEXEC_FRAGREQ()
-
-void Dblqh::sendExecFragRefLab(Signal* signal)
-{
- ExecFragReq * const execFragReq = (ExecFragReq *)&signal->theData[0];
- BlockReference retRef = execFragReq->userRef;
- Uint32 retPtr = execFragReq->userPtr;
-
- signal->theData[0] = retPtr;
- signal->theData[1] = terrorCode;
- sendSignal(retRef, GSN_EXEC_FRAGREF, signal, 2, JBB);
- return;
-}//Dblqh::sendExecFragRefLab()
-
-/* ***************>> */
-/* EXEC_FRAGCONF > */
-/* ***************>> */
-void Dblqh::execEXEC_FRAGCONF(Signal* signal)
-{
- jamEntry();
- fragptr.i = signal->theData[0];
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- fragptr.p->srStatus = Fragrecord::SS_COMPLETED;
- return;
-}//Dblqh::execEXEC_FRAGCONF()
-
-/* ***************> */
-/* EXEC_FRAGREF > */
-/* ***************> */
-void Dblqh::execEXEC_FRAGREF(Signal* signal)
-{
- jamEntry();
- terrorCode = signal->theData[1];
- systemErrorLab(signal);
- return;
-}//Dblqh::execEXEC_FRAGREF()
-
-/* *************** */
-/* EXEC_SRCONF > */
-/* *************** */
-void Dblqh::execEXEC_SRCONF(Signal* signal)
-{
- jamEntry();
- Uint32 nodeId = signal->theData[0];
- arrGuard(nodeId, MAX_NDB_NODES);
- cnodeExecSrState[nodeId] = ZEXEC_SR_COMPLETED;
- ndbrequire(cnoOfNodes < MAX_NDB_NODES);
- for (Uint32 i = 0; i < cnoOfNodes; i++) {
- jam();
- if (cnodeStatus[i] == ZNODE_UP) {
- jam();
- nodeId = cnodeData[i];
- arrGuard(nodeId, MAX_NDB_NODES);
- if (cnodeExecSrState[nodeId] != ZEXEC_SR_COMPLETED) {
- jam();
- /* ------------------------------------------------------------------
- * ALL NODES HAVE NOT REPORTED COMPLETION OF EXECUTING FRAGMENT
- * LOGS YET.
- * ----------------------------------------------------------------- */
- return;
- }//if
- }//if
- }//for
- /* ------------------------------------------------------------------------
- * CLEAR NODE SYSTEM RESTART EXECUTION STATE TO PREPARE FOR NEXT PHASE OF
- * LOG EXECUTION.
- * ----------------------------------------------------------------------- */
- for (nodeId = 0; nodeId < MAX_NDB_NODES; nodeId++) {
- cnodeExecSrState[nodeId] = ZSTART_SR;
- }//for
- /* ------------------------------------------------------------------------
- * NOW CHECK IF ALL FRAGMENTS IN THIS PHASE HAVE COMPLETED. IF SO START THE
- * NEXT PHASE.
- * ----------------------------------------------------------------------- */
- fragptr.i = cfirstCompletedFragSr;
- if (fragptr.i == RNIL) {
- jam();
- execSrCompletedLab(signal);
- return;
- }//if
- do {
- jam();
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- ndbrequire(fragptr.p->srStatus == Fragrecord::SS_COMPLETED);
- fragptr.i = fragptr.p->nextFrag;
- } while (fragptr.i != RNIL);
- execSrCompletedLab(signal);
- return;
-}//Dblqh::execEXEC_SRCONF()
-
-void Dblqh::execSrCompletedLab(Signal* signal)
-{
- csrPhasesCompleted++;
- /* ------------------------------------------------------------------------
- * ALL FRAGMENTS WERE COMPLETED. THIS PHASE IS COMPLETED. IT IS NOW TIME TO
- * START THE NEXT PHASE.
- * ----------------------------------------------------------------------- */
- if (csrPhasesCompleted >= 4) {
- jam();
- /* ----------------------------------------------------------------------
- * THIS WAS THE LAST PHASE. WE HAVE NOW COMPLETED THE EXECUTION THE
- * FRAGMENT LOGS IN ALL NODES. BEFORE WE SEND START_RECCONF TO THE
- * MASTER DIH TO INDICATE A COMPLETED SYSTEM RESTART IT IS NECESSARY
- * TO FIND THE HEAD AND THE TAIL OF THE LOG WHEN NEW OPERATIONS START
- * TO COME AGAIN.
- *
- * THE FIRST STEP IS TO FIND THE HEAD AND TAIL MBYTE OF EACH LOG PART.
- * TO DO THIS WE REUSE THE CONTINUEB SIGNAL SR_LOG_LIMITS. THEN WE
- * HAVE TO FIND THE ACTUAL PAGE NUMBER AND PAGE INDEX WHERE TO
- * CONTINUE WRITING THE LOG AFTER THE SYSTEM RESTART.
- * --------------------------------------------------------------------- */
- for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
- jam();
- ptrAss(logPartPtr, logPartRecord);
- logPartPtr.p->logPartState = LogPartRecord::SR_FOURTH_PHASE_STARTED;
- logPartPtr.p->logLastGci = crestartNewestGci;
- logPartPtr.p->logStartGci = crestartOldestGci;
- logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_STOP;
- if (logPartPtr.p->headFileNo == ZNIL) {
- jam();
- /* -----------------------------------------------------------------
- * IF WE HAVEN'T FOUND ANY HEAD OF THE LOG THEN WE ARE IN SERIOUS
- * PROBLEM. THIS SHOULD NOT OCCUR. IF IT OCCURS ANYWAY THEN WE
- * HAVE TO FIND A CURE FOR THIS PROBLEM.
- * ----------------------------------------------------------------- */
- systemErrorLab(signal);
- return;
- }//if
- signal->theData[0] = ZSR_LOG_LIMITS;
- signal->theData[1] = logPartPtr.i;
- signal->theData[2] = logPartPtr.p->lastLogfile;
- signal->theData[3] = logPartPtr.p->lastMbyte;
- sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
- }//for
- return;
- } else {
- jam();
- /* ----------------------------------------------------------------------
- * THERE ARE YET MORE PHASES TO RESTART.
- * WE MUST INITIALISE DATA FOR NEXT PHASE AND SEND START SIGNAL.
- * --------------------------------------------------------------------- */
- startExecSr(signal);
- }//if
- return;
-}//Dblqh::execSrCompletedLab()
-
-/* ************>> */
-/* EXEC_SRREQ > */
-/* ************>> */
-void Dblqh::execEXEC_SRREQ(Signal* signal)
-{
- jamEntry();
- Uint32 nodeId = signal->theData[0];
- ndbrequire(nodeId < MAX_NDB_NODES);
- cnodeSrState[nodeId] = ZEXEC_SR_COMPLETED;
- ndbrequire(cnoOfNodes < MAX_NDB_NODES);
- for (Uint32 i = 0; i < cnoOfNodes; i++) {
- jam();
- if (cnodeStatus[i] == ZNODE_UP) {
- jam();
- nodeId = cnodeData[i];
- if (cnodeSrState[nodeId] != ZEXEC_SR_COMPLETED) {
- jam();
- /* ------------------------------------------------------------------
- * ALL NODES HAVE NOT REPORTED COMPLETION OF SENDING EXEC_FRAGREQ YET.
- * ----------------------------------------------------------------- */
- return;
- }//if
- }//if
- }//for
- /* ------------------------------------------------------------------------
- * CLEAR NODE SYSTEM RESTART STATE TO PREPARE FOR NEXT PHASE OF LOG
- * EXECUTION
- * ----------------------------------------------------------------------- */
- for (nodeId = 0; nodeId < MAX_NDB_NODES; nodeId++) {
- cnodeSrState[nodeId] = ZSTART_SR;
- }//for
- if (csrPhasesCompleted != 0) {
- /* ----------------------------------------------------------------------
- * THE FIRST PHASE MUST ALWAYS EXECUTE THE LOG.
- * --------------------------------------------------------------------- */
- if (cnoFragmentsExecSr == 0) {
- jam();
- /* --------------------------------------------------------------------
- * THERE WERE NO FRAGMENTS THAT NEEDED TO EXECUTE THE LOG IN THIS PHASE.
- * ------------------------------------------------------------------- */
- srPhase3Comp(signal);
- return;
- }//if
- }//if
- /* ------------------------------------------------------------------------
- * NOW ALL NODES HAVE SENT ALL EXEC_FRAGREQ. NOW WE CAN START EXECUTING THE
- * LOG FROM THE MINIMUM GCI NEEDED UNTIL THE MAXIMUM GCI NEEDED.
- *
- * WE MUST FIRST CHECK IF THE FIRST PHASE OF THE SYSTEM RESTART HAS BEEN
- * COMPLETED. THIS HANDLING IS PERFORMED IN THE FILE SYSTEM MODULE
- * ----------------------------------------------------------------------- */
- signal->theData[0] = ZSR_PHASE3_START;
- signal->theData[1] = ZSR_PHASE2_COMPLETED;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- return;
-}//Dblqh::execEXEC_SRREQ()
-
-/* ######################################################################### */
-/* SYSTEM RESTART PHASE THREE MODULE */
-/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
-/* */
-/* THIS MODULE IS CONCERNED WITH EXECUTING THE FRAGMENT LOG. IT DOES ALSO */
-/* CONTAIN SIGNAL RECEPTIONS LQHKEYCONF AND LQHKEYREF SINCE LQHKEYREQ IS USED*/
-/* TO EXECUTE THE LOG RECORDS. */
-/* */
-/* BEFORE IT STARTS IT HAS BEEN DECIDED WHERE TO START AND WHERE TO STOP */
-/* READING THE FRAGMENT LOG BY USING THE INFORMATION ABOUT GCI DISCOVERED IN */
-/* PHASE ONE OF THE SYSTEM RESTART. */
-/* ######################################################################### */
-/*---------------------------------------------------------------------------*/
-/* PHASE THREE OF THE SYSTEM RESTART CAN NOW START. ONE OF THE PHASES HAVE */
-/* COMPLETED. */
-/*---------------------------------------------------------------------------*/
-void Dblqh::srPhase3Start(Signal* signal)
-{
- UintR tsrPhaseStarted;
-
- jamEntry();
- tsrPhaseStarted = signal->theData[0];
- if (csrPhaseStarted == ZSR_NO_PHASE_STARTED) {
- jam();
- csrPhaseStarted = tsrPhaseStarted;
- if (cstartType == NodeState::ST_NODE_RESTART) {
- ndbrequire(cinitialStartOngoing == ZTRUE);
- cinitialStartOngoing = ZFALSE;
- checkStartCompletedLab(signal);
- }//if
- return;
- }//if
- ndbrequire(csrPhaseStarted != tsrPhaseStarted);
- ndbrequire(csrPhaseStarted != ZSR_BOTH_PHASES_STARTED);
-
- csrPhaseStarted = ZSR_BOTH_PHASES_STARTED;
- for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
- jam();
- ptrAss(logPartPtr, logPartRecord);
- logPartPtr.p->logPartState = LogPartRecord::SR_THIRD_PHASE_STARTED;
- logPartPtr.p->logStartGci = (UintR)-1;
- if (csrPhasesCompleted == 0) {
- jam();
- /* --------------------------------------------------------------------
- * THE FIRST PHASE WE MUST ENSURE THAT IT REACHES THE END OF THE LOG.
- * ------------------------------------------------------------------- */
- logPartPtr.p->logLastGci = crestartNewestGci;
- } else {
- jam();
- logPartPtr.p->logLastGci = 2;
- }//if
- }//for
- if (cstartType == NodeState::ST_NODE_RESTART) {
- jam();
- /* ----------------------------------------------------------------------
- * FOR A NODE RESTART WE HAVE NO FRAGMENTS DEFINED YET.
- * THUS WE CAN SKIP THAT PART
- * --------------------------------------------------------------------- */
- signal->theData[0] = ZSR_GCI_LIMITS;
- signal->theData[1] = RNIL;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- } else {
- jam();
- signal->theData[0] = ZSR_GCI_LIMITS;
- signal->theData[1] = 0;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- }//if
- return;
-}//Dblqh::srPhase3Start()
-
-/* --------------------------------------------------------------------------
- * WE NOW WE NEED TO FIND THE LIMITS WITHIN WHICH TO EXECUTE
- * THE FRAGMENT LOG
- * ------------------------------------------------------------------------- */
-void Dblqh::srGciLimits(Signal* signal)
-{
- LogPartRecordPtr tmpLogPartPtr;
-
- jamEntry();
- fragptr.i = signal->theData[0];
- Uint32 loopCount = 0;
- logPartPtr.i = 0;
- ptrAss(logPartPtr, logPartRecord);
- while (fragptr.i < cfragrecFileSize) {
- jam();
- ptrAss(fragptr, fragrecord);
- if (fragptr.p->execSrStatus != Fragrecord::IDLE) {
- jam();
- ndbrequire(fragptr.p->execSrNoReplicas - 1 < 4);
- for (Uint32 i = 0; i < fragptr.p->execSrNoReplicas; i++) {
- jam();
- if (fragptr.p->execSrStartGci[i] < logPartPtr.p->logStartGci) {
- jam();
- logPartPtr.p->logStartGci = fragptr.p->execSrStartGci[i];
- }//if
- if (fragptr.p->execSrLastGci[i] > logPartPtr.p->logLastGci) {
- jam();
- logPartPtr.p->logLastGci = fragptr.p->execSrLastGci[i];
- }//if
- }//for
- }//if
- loopCount++;
- if (loopCount > 20) {
- jam();
- signal->theData[0] = ZSR_GCI_LIMITS;
- signal->theData[1] = fragptr.i + 1;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- return;
- } else {
- jam();
- fragptr.i++;
- }//if
- }//while
- if (logPartPtr.p->logStartGci == (UintR)-1) {
- jam();
- /* --------------------------------------------------------------------
- * THERE WERE NO FRAGMENTS TO INSTALL WE WILL EXECUTE THE LOG AS
- * SHORT AS POSSIBLE TO REACH THE END OF THE LOG. THIS WE DO BY
- * STARTING AT THE STOP GCI.
- * ------------------------------------------------------------------- */
- logPartPtr.p->logStartGci = logPartPtr.p->logLastGci;
- }//if
- for (tmpLogPartPtr.i = 1; tmpLogPartPtr.i < 4; tmpLogPartPtr.i++) {
- ptrAss(tmpLogPartPtr, logPartRecord);
- tmpLogPartPtr.p->logStartGci = logPartPtr.p->logStartGci;
- tmpLogPartPtr.p->logLastGci = logPartPtr.p->logLastGci;
- }//for
- for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
- jam();
- ptrAss(logPartPtr, logPartRecord);
- logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_STOP;
- signal->theData[0] = ZSR_LOG_LIMITS;
- signal->theData[1] = logPartPtr.i;
- signal->theData[2] = logPartPtr.p->lastLogfile;
- signal->theData[3] = logPartPtr.p->lastMbyte;
- sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
- }//for
-}//Dblqh::srGciLimits()
-
-/* --------------------------------------------------------------------------
- * IT IS NOW TIME TO FIND WHERE TO START EXECUTING THE LOG.
- * THIS SIGNAL IS SENT FOR EACH LOG PART AND STARTS THE EXECUTION
- * OF THE LOG FOR THIS PART.
- *-------------------------------------------------------------------------- */
-void Dblqh::srLogLimits(Signal* signal)
-{
- Uint32 tlastPrepRef;
- Uint32 tmbyte;
-
- jamEntry();
- logPartPtr.i = signal->theData[0];
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- logFilePtr.i = signal->theData[1];
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- tmbyte = signal->theData[2];
- Uint32 loopCount = 0;
- /* ------------------------------------------------------------------------
- * WE ARE SEARCHING FOR THE START AND STOP MBYTE OF THE LOG THAT IS TO BE
- * EXECUTED.
- * ----------------------------------------------------------------------- */
- while(true) {
- ndbrequire(tmbyte < 16);
- if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_STOP) {
- if (logFilePtr.p->logMaxGciCompleted[tmbyte] < logPartPtr.p->logLastGci) {
- jam();
- /* --------------------------------------------------------------------
- * WE ARE STEPPING BACKWARDS FROM MBYTE TO MBYTE. THIS IS THE FIRST
- * MBYTE WHICH IS TO BE INCLUDED IN THE LOG EXECUTION. THE STOP GCI
- * HAS NOT BEEN COMPLETED BEFORE THIS MBYTE. THUS THIS MBYTE HAVE
- * TO BE EXECUTED.
- * ------------------------------------------------------------------- */
- logPartPtr.p->stopLogfile = logFilePtr.i;
- logPartPtr.p->stopMbyte = tmbyte;
- logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_START;
- }//if
- }//if
- /* ------------------------------------------------------------------------
- * WHEN WE HAVEN'T FOUND THE STOP MBYTE IT IS NOT NECESSARY TO LOOK FOR THE
- * START MBYTE. THE REASON IS THE FOLLOWING LOGIC CHAIN:
- * MAX_GCI_STARTED >= MAX_GCI_COMPLETED >= LAST_GCI >= START_GCI
- * THUS MAX_GCI_STARTED >= START_GCI. THUS MAX_GCI_STARTED < START_GCI CAN
- * NOT BE TRUE AS WE WILL CHECK OTHERWISE.
- * ----------------------------------------------------------------------- */
- if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_START) {
- if (logFilePtr.p->logMaxGciStarted[tmbyte] < logPartPtr.p->logStartGci) {
- jam();
- /* --------------------------------------------------------------------
- * WE HAVE NOW FOUND THE START OF THE EXECUTION OF THE LOG.
- * WE STILL HAVE TO MOVE IT BACKWARDS TO ALSO INCLUDE THE
- * PREPARE RECORDS WHICH WERE STARTED IN A PREVIOUS MBYTE.
- * ------------------------------------------------------------------- */
- tlastPrepRef = logFilePtr.p->logLastPrepRef[tmbyte];
- logPartPtr.p->startMbyte = tlastPrepRef & 65535;
- LogFileRecordPtr locLogFilePtr;
- findLogfile(signal, tlastPrepRef >> 16, logPartPtr, &locLogFilePtr);
- logPartPtr.p->startLogfile = locLogFilePtr.i;
- logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG;
- }//if
- }//if
- if (logPartPtr.p->logExecState != LogPartRecord::LES_EXEC_LOG) {
- if (tmbyte == 0) {
- jam();
- tmbyte = ZNO_MBYTES_IN_FILE - 1;
- logFilePtr.i = logFilePtr.p->prevLogFile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- } else {
- jam();
- tmbyte--;
- }//if
- if (logPartPtr.p->lastLogfile == logFilePtr.i) {
- ndbrequire(logPartPtr.p->lastMbyte != tmbyte);
- }//if
- if (loopCount > 20) {
- jam();
- signal->theData[0] = ZSR_LOG_LIMITS;
- signal->theData[1] = logPartPtr.i;
- signal->theData[2] = logFilePtr.i;
- signal->theData[3] = tmbyte;
- sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
- return;
- }//if
- loopCount++;
- } else {
- jam();
- break;
- }//if
- }//while
- /* ------------------------------------------------------------------------
- * WE HAVE NOW FOUND BOTH THE START AND THE STOP OF THE LOG. NOW START
- * EXECUTING THE LOG. THE FIRST ACTION IS TO OPEN THE LOG FILE WHERE TO
- * START EXECUTING THE LOG.
- * ----------------------------------------------------------------------- */
- if (logPartPtr.p->logPartState == LogPartRecord::SR_THIRD_PHASE_STARTED) {
- jam();
- logFilePtr.i = logPartPtr.p->startLogfile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN_EXEC_SR_START;
- openFileRw(signal, logFilePtr);
- } else {
- jam();
- ndbrequire(logPartPtr.p->logPartState == LogPartRecord::SR_FOURTH_PHASE_STARTED);
- /* --------------------------------------------------------------------
- * WE HAVE NOW FOUND THE TAIL MBYTE IN THE TAIL FILE.
- * SET THOSE PARAMETERS IN THE LOG PART.
- * WE HAVE ALSO FOUND THE HEAD MBYTE. WE STILL HAVE TO SEARCH
- * FOR THE PAGE NUMBER AND PAGE INDEX WHERE TO SET THE HEAD.
- * ------------------------------------------------------------------- */
- logFilePtr.i = logPartPtr.p->startLogfile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logPartPtr.p->logTailFileNo = logFilePtr.p->fileNo;
- logPartPtr.p->logTailMbyte = logPartPtr.p->startMbyte;
- /* --------------------------------------------------------------------
- * THE HEAD WE ACTUALLY FOUND DURING EXECUTION OF LOG SO WE USE
- * THIS INFO HERE RATHER THAN THE MBYTE WE FOUND TO BE THE HEADER.
- * ------------------------------------------------------------------- */
- LogFileRecordPtr locLogFilePtr;
- findLogfile(signal, logPartPtr.p->headFileNo, logPartPtr, &locLogFilePtr);
- locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_PHASE;
- openFileRw(signal, locLogFilePtr);
- }//if
- return;
-}//Dblqh::srLogLimits()
-
-void Dblqh::openExecSrStartLab(Signal* signal)
-{
- logPartPtr.p->currentLogfile = logFilePtr.i;
- logFilePtr.p->currentMbyte = logPartPtr.p->startMbyte;
- /* ------------------------------------------------------------------------
- * WE NEED A TC CONNECT RECORD TO HANDLE EXECUTION OF LOG RECORDS.
- * ------------------------------------------------------------------------ */
- seizeTcrec();
- logPartPtr.p->logTcConrec = tcConnectptr.i;
- /* ------------------------------------------------------------------------
- * THE FIRST LOG RECORD TO EXECUTE IS ALWAYS AT A NEW MBYTE.
- * SET THE NUMBER OF PAGES IN THE MAIN MEMORY BUFFER TO ZERO AS AN INITIAL
- * VALUE. THIS VALUE WILL BE UPDATED AND ENSURED THAT IT RELEASES PAGES IN
- * THE SUBROUTINE READ_EXEC_SR.
- * ----------------------------------------------------------------------- */
- logPartPtr.p->mmBufferSize = 0;
- readExecSrNewMbyte(signal);
- return;
-}//Dblqh::openExecSrStartLab()
-
-/* ---------------------------------------------------------------------------
- * WE WILL ALWAYS ENSURE THAT WE HAVE AT LEAST 16 KBYTE OF LOG PAGES WHEN WE
- * START READING A LOG RECORD. THE ONLY EXCEPTION IS WHEN WE COME CLOSE TO A
- * MBYTE BOUNDARY. SINCE WE KNOW THAT LOG RECORDS ARE NEVER WRITTEN ACROSS A
- * MBYTE BOUNDARY THIS IS NOT A PROBLEM.
- *
- * WE START BY READING 64 KBYTE BEFORE STARTING TO EXECUTE THE LOG RECORDS.
- * WHEN WE COME BELOW 64 KBYTE WE READ ANOTHER SET OF LOG PAGES. WHEN WE
- * GO BELOW 16 KBYTE WE WAIT UNTIL THE READ PAGES HAVE ENTERED THE BLOCK.
- * ------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------
- * NEW PAGES FROM LOG FILE DURING EXECUTION OF LOG HAS ARRIVED.
- * ------------------------------------------------------------------------- */
-void Dblqh::readExecSrLab(Signal* signal)
-{
- buildLinkedLogPageList(signal);
- /* ------------------------------------------------------------------------
- * WE NEED TO SET THE CURRENT PAGE INDEX OF THE FIRST PAGE SINCE IT CAN BE
- * USED IMMEDIATELY WITHOUT ANY OTHER INITIALISATION. THE REST OF THE PAGES
- * WILL BE INITIALISED BY READ_LOGWORD.
- * ----------------------------------------------------------------------- */
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
- if (logPartPtr.p->logExecState ==
- LogPartRecord::LES_WAIT_READ_EXEC_SR_NEW_MBYTE) {
- jam();
- /* ----------------------------------------------------------------------
- * THIS IS THE FIRST READ DURING THE EXECUTION OF THIS MBYTE. SET THE
- * NEW CURRENT LOG PAGE TO THE FIRST OF THESE PAGES. CHANGE
- * LOG_EXEC_STATE TO ENSURE THAT WE START EXECUTION OF THE LOG.
- * --------------------------------------------------------------------- */
- logFilePtr.p->currentFilepage = logFilePtr.p->currentMbyte *
- ZPAGES_IN_MBYTE;
- logPartPtr.p->prevFilepage = logFilePtr.p->currentFilepage;
- logFilePtr.p->currentLogpage = lfoPtr.p->firstLfoPage;
- logPartPtr.p->prevLogpage = logFilePtr.p->currentLogpage;
- }//if
- moveToPageRef(signal);
- releaseLfo(signal);
- /* ------------------------------------------------------------------------
- * NOW WE HAVE COMPLETED THE RECEPTION OF THESE PAGES.
- * NOW CHECK IF WE NEED TO READ MORE PAGES.
- * ----------------------------------------------------------------------- */
- checkReadExecSr(signal);
- if (logPartPtr.p->logExecState == LogPartRecord::LES_EXEC_LOG) {
- jam();
- signal->theData[0] = ZEXEC_SR;
- signal->theData[1] = logPartPtr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- return;
- }//if
- return;
-}//Dblqh::readExecSrLab()
-
-void Dblqh::openExecSrNewMbyteLab(Signal* signal)
-{
- readExecSrNewMbyte(signal);
- return;
-}//Dblqh::openExecSrNewMbyteLab()
-
-void Dblqh::closeExecSrLab(Signal* signal)
-{
- LogFileRecordPtr locLogFilePtr;
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
- logPartPtr.i = logFilePtr.p->logPartRec;
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- locLogFilePtr.i = logPartPtr.p->currentLogfile;
- ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
- locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_EXEC_SR_NEW_MBYTE;
- openFileRw(signal, locLogFilePtr);
- return;
-}//Dblqh::closeExecSrLab()
-
-void Dblqh::writeDirtyLab(Signal* signal)
-{
- releaseLfo(signal);
- signal->theData[0] = logPartPtr.i;
- execSr(signal);
- return;
-}//Dblqh::writeDirtyLab()
-
-/* --------------------------------------------------------------------------
- * EXECUTE A LOG RECORD WITHIN THE CURRENT MBYTE.
- * ------------------------------------------------------------------------- */
-void Dblqh::execSr(Signal* signal)
-{
- LogFileRecordPtr nextLogFilePtr;
- LogPageRecordPtr tmpLogPagePtr;
- Uint32 logWord;
-
- jamEntry();
- logPartPtr.i = signal->theData[0];
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
-
- do {
- jam();
- logFilePtr.i = logPartPtr.p->currentLogfile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logPagePtr.i = logPartPtr.p->prevLogpage;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- if (logPagePtr.p->logPageWord[ZPOS_DIRTY] == ZDIRTY) {
- jam();
- switch (logPartPtr.p->logExecState) {
- case LogPartRecord::LES_EXEC_LOG_COMPLETED:
- case LogPartRecord::LES_EXEC_LOG_NEW_FILE:
- case LogPartRecord::LES_EXEC_LOG_NEW_MBYTE:
- jam();
- /* ------------------------------------------------------------------
- * IN THIS WE HAVE COMPLETED EXECUTION OF THE CURRENT LOG PAGE
- * AND CAN WRITE IT TO DISK SINCE IT IS DIRTY.
- * ----------------------------------------------------------------- */
- writeDirty(signal);
- return;
- break;
- case LogPartRecord::LES_EXEC_LOG:
- jam();
- /* --------------------------------------------------------------------
- * IN THIS CASE WE ONLY WRITE THE PAGE TO DISK IF WE HAVE COMPLETED
- * EXECUTION OF LOG RECORDS BELONGING TO THIS LOG PAGE.
- * ------------------------------------------------------------------- */
- if (logFilePtr.p->currentLogpage != logPartPtr.p->prevLogpage) {
- jam();
- writeDirty(signal);
- return;
- }//if
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
- }//if
- if (logFilePtr.p->currentLogpage != logPartPtr.p->prevLogpage) {
- jam();
- logPartPtr.p->prevLogpage = logPagePtr.p->logPageWord[ZNEXT_PAGE];
- logPartPtr.p->prevFilepage++;
- continue;
- }//if
- switch (logPartPtr.p->logExecState) {
- case LogPartRecord::LES_EXEC_LOG_COMPLETED:
- jam();
- releaseMmPages(signal);
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_EXEC_SR_COMPLETED;
- closeFile(signal, logFilePtr);
- return;
- break;
- case LogPartRecord::LES_EXEC_LOG_NEW_MBYTE:
- jam();
- logFilePtr.p->currentMbyte++;
- readExecSrNewMbyte(signal);
- return;
- break;
- case LogPartRecord::LES_EXEC_LOG_NEW_FILE:
- jam();
- nextLogFilePtr.i = logFilePtr.p->nextLogFile;
- logPartPtr.p->currentLogfile = nextLogFilePtr.i;
- ptrCheckGuard(nextLogFilePtr, clogFileFileSize, logFileRecord);
- nextLogFilePtr.p->currentMbyte = 0;
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_EXEC_SR;
- closeFile(signal, logFilePtr);
- return;
- break;
- case LogPartRecord::LES_EXEC_LOG:
- jam();
- /*empty*/;
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- break;
- }//switch
- logPagePtr.i = logFilePtr.p->currentLogpage;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- logPartPtr.p->savePageIndex = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- if (logPartPtr.p->execSrPagesRead < ZMIN_READ_BUFFER_SIZE) {
- /* --------------------------------------------------------------------
- * THERE WERE LESS THAN 16 KBYTE OF LOG PAGES REMAINING. WE WAIT UNTIL
- * THE NEXT 64 KBYTE ARRIVES UNTIL WE CONTINUE AGAIN.
- * ------------------------------------------------------------------- */
- if ((logPartPtr.p->execSrPagesRead +
- logPartPtr.p->execSrPagesExecuted) < ZPAGES_IN_MBYTE) {
- jam();
- /* ------------------------------------------------------------------
- * WE ONLY STOP AND WAIT IF THERE MORE PAGES TO READ. IF IT IS NOT
- * THEN IT IS THE END OF THE MBYTE AND WE WILL CONTINUE. IT IS NO
- * RISK THAT A LOG RECORD WE FIND WILL NOT BE READ AT THIS TIME
- * SINCE THE LOG RECORDS NEVER SPAN OVER A MBYTE BOUNDARY.
- * ----------------------------------------------------------------- */
- readExecSr(signal);
- logPartPtr.p->logExecState = LogPartRecord::LES_WAIT_READ_EXEC_SR;
- return;
- }//if
- }//if
- logWord = readLogword(signal);
- switch (logWord) {
-/* ========================================================================= */
-/* ========================================================================= */
- case ZPREP_OP_TYPE:
- {
- logWord = readLogword(signal);
- stepAhead(signal, logWord - 2);
- break;
- }
-/* ========================================================================= */
-/* ========================================================================= */
- case ZINVALID_COMMIT_TYPE:
- jam();
- stepAhead(signal, ZCOMMIT_LOG_SIZE - 1);
- break;
-/* ========================================================================= */
-/* ========================================================================= */
- case ZCOMMIT_TYPE:
- {
- CommitLogRecord commitLogRecord;
- jam();
- tcConnectptr.i = logPartPtr.p->logTcConrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- readCommitLog(signal, &commitLogRecord);
- if (tcConnectptr.p->gci > crestartNewestGci) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THIS LOG RECORD MUST BE IGNORED. IT IS PART OF A GLOBAL CHECKPOINT WHICH */
-/* WILL BE INVALIDATED BY THE SYSTEM RESTART. IF NOT INVALIDATED IT MIGHT BE */
-/* EXECUTED IN A FUTURE SYSTEM RESTART. */
-/*---------------------------------------------------------------------------*/
- tmpLogPagePtr.i = logPartPtr.p->prevLogpage;
- ptrCheckGuard(tmpLogPagePtr, clogPageFileSize, logPageRecord);
- arrGuard(logPartPtr.p->savePageIndex, ZPAGE_SIZE);
- tmpLogPagePtr.p->logPageWord[logPartPtr.p->savePageIndex] =
- ZINVALID_COMMIT_TYPE;
- tmpLogPagePtr.p->logPageWord[ZPOS_DIRTY] = ZDIRTY;
- } else {
- jam();
-/*---------------------------------------------------------------------------*/
-/* CHECK IF I AM SUPPOSED TO EXECUTE THIS LOG RECORD. IF I AM THEN SAVE PAGE */
-/* INDEX IN CURRENT LOG PAGE SINCE IT WILL BE OVERWRITTEN WHEN EXECUTING THE */
-/* LOG RECORD. */
-/*---------------------------------------------------------------------------*/
- logPartPtr.p->execSrExecuteIndex = 0;
- Uint32 result = checkIfExecLog(signal);
- if (result == ZOK) {
- jam();
-//*---------------------------------------------------------------------------*/
-/* IN A NODE RESTART WE WILL NEVER END UP HERE SINCE NO FRAGMENTS HAVE BEEN */
-/* DEFINED YET. THUS NO EXTRA CHECKING FOR NODE RESTART IS NECESSARY. */
-/*---------------------------------------------------------------------------*/
- logPartPtr.p->savePageIndex =
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- tcConnectptr.p->fragmentptr = fragptr.i;
- findPageRef(signal, &commitLogRecord);
- logPartPtr.p->execSrLogPageIndex = commitLogRecord.startPageIndex;
- if (logPagePtr.i != RNIL) {
- jam();
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = commitLogRecord.startPageIndex;
- logPartPtr.p->execSrLogPage = logPagePtr.i;
- execLogRecord(signal);
- return;
- }//if
- logPartPtr.p->execSrStartPageNo = commitLogRecord.startPageNo;
- logPartPtr.p->execSrStopPageNo = commitLogRecord.stopPageNo;
- findLogfile(signal, commitLogRecord.fileNo, logPartPtr, &logFilePtr);
- logPartPtr.p->execSrExecLogFile = logFilePtr.i;
- if (logFilePtr.i == logPartPtr.p->currentLogfile) {
- jam();
- readExecLog(signal);
- lfoPtr.p->lfoState = LogFileOperationRecord::READ_EXEC_LOG;
- return;
- } else {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THE FILE IS CURRENTLY NOT OPEN. WE MUST OPEN IT BEFORE WE CAN READ FROM */
-/* THE FILE. */
-/*---------------------------------------------------------------------------*/
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN_EXEC_LOG;
- openFileRw(signal, logFilePtr);
- return;
- }//if
- }//if
- }//if
- break;
- }
-/* ========================================================================= */
-/* ========================================================================= */
- case ZABORT_TYPE:
- jam();
- stepAhead(signal, ZABORT_LOG_SIZE - 1);
- break;
-/* ========================================================================= */
-/* ========================================================================= */
- case ZFD_TYPE:
- jam();
-/*---------------------------------------------------------------------------*/
-/* THIS IS THE FIRST ITEM WE ENCOUNTER IN A NEW FILE. AT THIS MOMENT WE SHALL*/
-/* SIMPLY BYPASS IT. IT HAS NO SIGNIFANCE WHEN EXECUTING THE LOG. IT HAS ITS */
-/* SIGNIFANCE WHEN FINDING THE START END THE END OF THE LOG. */
-/* WE HARDCODE THE PAGE INDEX SINCE THIS SHOULD NEVER BE FOUND AT ANY OTHER */
-/* PLACE THAN IN THE FIRST PAGE OF A NEW FILE IN THE FIRST POSITION AFTER THE*/
-/* HEADER. */
-/*---------------------------------------------------------------------------*/
- ndbrequire(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] ==
- (ZPAGE_HEADER_SIZE + ZPOS_NO_FD));
- {
- Uint32 noFdDescriptors =
- logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD];
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
- (ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (noFdDescriptors * ZFD_PART_SIZE);
- }
- break;
-/* ========================================================================= */
-/* ========================================================================= */
- case ZNEXT_LOG_RECORD_TYPE:
- jam();
- stepAhead(signal, ZPAGE_SIZE - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]);
- break;
-/* ========================================================================= */
-/* ========================================================================= */
- case ZNEXT_MBYTE_TYPE:
-/*---------------------------------------------------------------------------*/
-/* WE WILL SKIP A PART OF THE LOG FILE. ACTUALLY THE NEXT POINTER IS TO */
-/* A NEW MBYTE. THEREFORE WE WILL START UP A NEW MBYTE. THIS NEW MBYTE IS */
-/* HOWEVER ONLY STARTED IF IT IS NOT AFTER THE STOP MBYTE. */
-/* IF WE HAVE REACHED THE END OF THE STOP MBYTE THEN THE EXECUTION OF THE LOG*/
-/* IS COMPLETED. */
-/*---------------------------------------------------------------------------*/
- if (logPartPtr.p->currentLogfile == logPartPtr.p->stopLogfile) {
- if (logFilePtr.p->currentMbyte == logPartPtr.p->stopMbyte) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* THIS WAS THE LAST MBYTE TO EXECUTE IN THIS LOG PART. WE SHOULD HAVE FOUND */
-/* A COMPLETED GCI RECORD OF THE LAST GCI BEFORE THIS. FOR SOME REASON THIS */
-/* RECORD WAS NOT AVAILABLE ON THE LOG. CRASH THE SYSTEM, A VERY SERIOUS */
-/* ERROR WHICH WE MUST REALLY WORK HARD TO AVOID. */
-/*---------------------------------------------------------------------------*/
-/*---------------------------------------------------------------------------*/
-/* SEND A SIGNAL TO THE SIGNAL LOG AND THEN CRASH THE SYSTEM. */
-/*---------------------------------------------------------------------------*/
- signal->theData[0] = RNIL;
- signal->theData[1] = logPartPtr.i;
- Uint32 tmp = logFilePtr.p->fileName[3];
- tmp = (tmp >> 8) & 0xff;// To get the Directory, DXX.
- signal->theData[2] = tmp;
- signal->theData[3] = logFilePtr.p->fileNo;
- signal->theData[4] = logFilePtr.p->currentFilepage;
- signal->theData[5] = logFilePtr.p->currentMbyte;
- signal->theData[6] = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- sendSignal(cownref, GSN_DEBUG_SIG, signal, 7, JBA);
- return;
- }//if
- }//if
-/*---------------------------------------------------------------------------*/
-/* START EXECUTION OF A NEW MBYTE IN THE LOG. */
-/*---------------------------------------------------------------------------*/
- if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) {
- jam();
- logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_NEW_MBYTE;
- } else {
- ndbrequire(logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1));
- jam();
-/*---------------------------------------------------------------------------*/
-/* WE HAVE TO CHANGE FILE. CLOSE THIS ONE AND THEN OPEN THE NEXT. */
-/*---------------------------------------------------------------------------*/
- logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_NEW_FILE;
- }//if
- break;
-/* ========================================================================= */
-/* ========================================================================= */
- case ZCOMPLETED_GCI_TYPE:
- jam();
- logWord = readLogword(signal);
- if (logWord == logPartPtr.p->logLastGci) {
- jam();
-/*---------------------------------------------------------------------------*/
-/* IF IT IS THE LAST GCI TO LIVE AFTER SYSTEM RESTART THEN WE RECORD THE NEXT*/
-/* WORD AS THE NEW HEADER OF THE LOG FILE. OTHERWISE WE SIMPLY IGNORE THIS */
-/* LOG RECORD. */
-/*---------------------------------------------------------------------------*/
- if (csrPhasesCompleted == 0) {
- jam();
-/*---------------------------------------------------------------------------*/
-/*WE ONLY RECORD THE HEAD OF THE LOG IN THE FIRST LOG ROUND OF LOG EXECUTION.*/
-/*---------------------------------------------------------------------------*/
- logPartPtr.p->headFileNo = logFilePtr.p->fileNo;
- logPartPtr.p->headPageNo = logFilePtr.p->currentFilepage;
- logPartPtr.p->headPageIndex =
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- }//if
-/*---------------------------------------------------------------------------*/
-/* THERE IS NO NEED OF EXECUTING PAST THIS LINE SINCE THERE WILL ONLY BE LOG */
-/* RECORDS THAT WILL BE OF NO INTEREST. THUS CLOSE THE FILE AND START THE */
-/* NEXT PHASE OF THE SYSTEM RESTART. */
-/*---------------------------------------------------------------------------*/
- logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_COMPLETED;
- }//if
- break;
- default:
- jam();
-/* ========================================================================= */
-/* ========================================================================= */
-/*---------------------------------------------------------------------------*/
-/* SEND A SIGNAL TO THE SIGNAL LOG AND THEN CRASH THE SYSTEM. */
-/*---------------------------------------------------------------------------*/
- signal->theData[0] = RNIL;
- signal->theData[1] = logPartPtr.i;
- Uint32 tmp = logFilePtr.p->fileName[3];
- tmp = (tmp >> 8) & 0xff;// To get the Directory, DXX.
- signal->theData[2] = tmp;
- signal->theData[3] = logFilePtr.p->fileNo;
- signal->theData[4] = logFilePtr.p->currentMbyte;
- signal->theData[5] = logFilePtr.p->currentFilepage;
- signal->theData[6] = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- signal->theData[7] = logWord;
- sendSignal(cownref, GSN_DEBUG_SIG, signal, 8, JBA);
- return;
- break;
- }//switch
-/*---------------------------------------------------------------------------*/
-// We continue to execute log records until we find a proper one to execute or
-// that we reach a new page.
-/*---------------------------------------------------------------------------*/
- } while (1);
-}//Dblqh::execSr()
-
-/*---------------------------------------------------------------------------*/
-/* THIS SIGNAL IS ONLY RECEIVED TO BE CAPTURED IN THE SIGNAL LOG. IT IS */
-/* ALSO USED TO CRASH THE SYSTEM AFTER SENDING A SIGNAL TO THE LOG. */
-/*---------------------------------------------------------------------------*/
-void Dblqh::execDEBUG_SIG(Signal* signal)
-{
-/*
-2.5 TEMPORARY VARIABLES
------------------------
-*/
- UintR tdebug;
-
- jamEntry();
- logPagePtr.i = signal->theData[0];
- tdebug = logPagePtr.p->logPageWord[0];
-
- char buf[100];
- BaseString::snprintf(buf, 100,
- "Error while reading REDO log.\n"
- "D=%d, F=%d Mb=%d FP=%d W1=%d W2=%d",
- signal->theData[2], signal->theData[3], signal->theData[4],
- signal->theData[5], signal->theData[6], signal->theData[7]);
-
- progError(__LINE__, ERR_SR_REDOLOG, buf);
-
- return;
-}//Dblqh::execDEBUG_SIG()
-
-/*---------------------------------------------------------------------------*/
-/*---------------------------------------------------------------------------*/
-void Dblqh::closeExecLogLab(Signal* signal)
-{
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
- signal->theData[0] = ZEXEC_SR;
- signal->theData[1] = logFilePtr.p->logPartRec;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- return;
-}//Dblqh::closeExecLogLab()
-
-void Dblqh::openExecLogLab(Signal* signal)
-{
- readExecLog(signal);
- lfoPtr.p->lfoState = LogFileOperationRecord::READ_EXEC_LOG;
- return;
-}//Dblqh::openExecLogLab()
-
-void Dblqh::readExecLogLab(Signal* signal)
-{
- buildLinkedLogPageList(signal);
- logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOGREC_FROM_FILE;
- logPartPtr.p->execSrLfoRec = lfoPtr.i;
- logPartPtr.p->execSrLogPage = logPagePtr.i;
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
- logPartPtr.p->execSrLogPageIndex;
- execLogRecord(signal);
- return;
-}//Dblqh::readExecLogLab()
-
-/*---------------------------------------------------------------------------*/
-/* THIS CODE IS USED TO EXECUTE A LOG RECORD WHEN IT'S DATA HAVE BEEN LOCATED*/
-/* AND TRANSFERRED INTO MEMORY. */
-/*---------------------------------------------------------------------------*/
-void Dblqh::execLogRecord(Signal* signal)
-{
- jamEntry();
-
- tcConnectptr.i = logPartPtr.p->logTcConrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- // Read a log record and prepare it for execution
- readLogHeader(signal);
- readKey(signal);
- readAttrinfo(signal);
- initReqinfoExecSr(signal);
- arrGuard(logPartPtr.p->execSrExecuteIndex, 4);
- BlockReference ref = fragptr.p->execSrBlockref[logPartPtr.p->execSrExecuteIndex];
- tcConnectptr.p->nextReplica = refToNode(ref);
- tcConnectptr.p->connectState = TcConnectionrec::LOG_CONNECTED;
- tcConnectptr.p->tcOprec = tcConnectptr.i;
- packLqhkeyreqLab(signal);
- return;
-}//Dblqh::execLogRecord()
-
-//----------------------------------------------------------------------------
-// This function invalidates log pages after the last GCI record in a
-// system/node restart. This is to ensure that the end of the log is
-// consistent. This function is executed last in start phase 3.
-// RT 450. EDTJAMO.
-//----------------------------------------------------------------------------
-void Dblqh::invalidateLogAfterLastGCI(Signal* signal) {
-
- jam();
- if (logPartPtr.p->logExecState != LogPartRecord::LES_EXEC_LOG_INVALIDATE) {
- jam();
- systemError(signal);
- }
-
- if (logFilePtr.p->fileNo != logPartPtr.p->invalidateFileNo) {
- jam();
- systemError(signal);
- }
-
- switch (lfoPtr.p->lfoState) {
- case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
- jam();
- releaseLfo(signal);
- releaseLogpage(signal);
- if (logPartPtr.p->invalidatePageNo < (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE - 1)) {
- // We continue in this file.
- logPartPtr.p->invalidatePageNo++;
- } else {
- // We continue in the next file.
- logFilePtr.i = logFilePtr.p->nextLogFile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logPartPtr.p->invalidateFileNo = logFilePtr.p->fileNo;
- // Page 0 is used for file descriptors.
- logPartPtr.p->invalidatePageNo = 1;
- if (logFilePtr.p->logFileStatus != LogFileRecord::OPEN) {
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_INVALIDATE_PAGES;
- openFileRw(signal, logFilePtr);
- return;
- break;
- }
- }
- // Read a page from the log file.
- readFileInInvalidate(signal);
- return;
- break;
-
- case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES:
- jam();
- releaseLfo(signal);
- // Check if this page must be invalidated.
- // If the log lap number on a page after the head of the tail is the same
- // as the actual log lap number we must invalidate this page. Otherwise it
- // could be impossible to find the end of the log in a later system/node
- // restart.
- if (logPagePtr.p->logPageWord[ZPOS_LOG_LAP] == logPartPtr.p->logLap) {
- // This page must be invalidated.
- logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 0;
- // Contact NDBFS. Real time break.
- writeSinglePage(signal, logPartPtr.p->invalidatePageNo, ZPAGE_SIZE - 1);
- lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES;
- } else {
- // We are done with invalidating. Finish start phase 3.4.
- exitFromInvalidate(signal);
- }
- return;
- break;
-
- default:
- jam();
- systemError(signal);
- return;
- break;
- }
-
- return;
-}//Dblqh::invalidateLogAfterLastGCI
-
-void Dblqh::readFileInInvalidate(Signal* signal) {
- jam();
- // Contact NDBFS. Real time break.
- readSinglePage(signal, logPartPtr.p->invalidatePageNo);
- lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_INVALIDATE_PAGES;
-}
-
-void Dblqh::exitFromInvalidate(Signal* signal) {
- jam();
- // Close files if necessary. Current file and the next file should be
- // left open.
- if (logFilePtr.i != logPartPtr.p->currentLogfile) {
- LogFileRecordPtr currentLogFilePtr;
- LogFileRecordPtr nextAfterCurrentLogFilePtr;
-
- currentLogFilePtr.i = logPartPtr.p->currentLogfile;
- ptrCheckGuard(currentLogFilePtr, clogFileFileSize, logFileRecord);
-
- nextAfterCurrentLogFilePtr.i = currentLogFilePtr.p->nextLogFile;
-
- if (logFilePtr.i != nextAfterCurrentLogFilePtr.i) {
- // This file should be closed.
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSE_SR_INVALIDATE_PAGES;
- closeFile(signal, logFilePtr);
- // Return from this function and wait for close confirm. Then come back
- // and test the previous file for closing.
- return;
- }
- }
-
- // We are done with closing files, send completed signal and exit this phase.
- signal->theData[0] = ZSR_FOURTH_COMP;
- signal->theData[1] = logPartPtr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- return;
-}
-
-
-/*---------------------------------------------------------------------------*/
-/* THE EXECUTION OF A LOG RECORD IS COMPLETED. RELEASE PAGES IF THEY WERE */
-/* READ FROM DISK FOR THIS PARTICULAR OPERATION. */
-/*---------------------------------------------------------------------------*/
-void Dblqh::completedLab(Signal* signal)
-{
- Uint32 result = returnExecLog(signal);
-/*---------------------------------------------------------------------------*/
-/* ENTER COMPLETED WITH */
-/* LQH_CONNECTPTR */
-/*---------------------------------------------------------------------------*/
- if (result == ZOK) {
- jam();
- execLogRecord(signal);
- return;
- } else if (result == ZNOT_OK) {
- jam();
- signal->theData[0] = ZEXEC_SR;
- signal->theData[1] = logPartPtr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- } else {
- jam();
- /*empty*/;
- }//if
-/*---------------------------------------------------------------------------*/
-/* WE HAVE TO WAIT FOR CLOSING OF THE EXECUTED LOG FILE BEFORE PROCEEDING IN */
-/* RARE CASES. */
-/*---------------------------------------------------------------------------*/
- return;
-}//Dblqh::completedLab()
-
-/*---------------------------------------------------------------------------*/
-/* EXECUTION OF LOG RECORD WAS NOT SUCCESSFUL. CHECK IF IT IS OK ANYWAY, */
-/* THEN EXECUTE THE NEXT LOG RECORD. */
-/*---------------------------------------------------------------------------*/
-void Dblqh::logLqhkeyrefLab(Signal* signal)
-{
- Uint32 result = returnExecLog(signal);
- switch (tcConnectptr.p->operation) {
- case ZUPDATE:
- case ZDELETE:
- jam();
- ndbrequire(terrorCode == ZNO_TUPLE_FOUND);
- break;
- case ZINSERT:
- jam();
- ndbrequire(terrorCode == ZTUPLE_ALREADY_EXIST);
- break;
- default:
- ndbrequire(false);
- return;
- break;
- }//switch
- if (result == ZOK) {
- jam();
- execLogRecord(signal);
- return;
- } else if (result == ZNOT_OK) {
- jam();
- signal->theData[0] = ZEXEC_SR;
- signal->theData[1] = logPartPtr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- } else {
- jam();
- /*empty*/;
- }//if
- /* ------------------------------------------------------------------------
- * WE HAVE TO WAIT FOR CLOSING OF THE EXECUTED LOG FILE BEFORE
- * PROCEEDING IN RARE CASES.
- * ----------------------------------------------------------------------- */
- return;
-}//Dblqh::logLqhkeyrefLab()
-
-void Dblqh::closeExecSrCompletedLab(Signal* signal)
-{
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
- signal->theData[0] = logFilePtr.p->logPartRec;
- execLogComp(signal);
- return;
-}//Dblqh::closeExecSrCompletedLab()
-
-/* --------------------------------------------------------------------------
- * ONE OF THE LOG PARTS HAVE COMPLETED EXECUTING THE LOG. CHECK IF ALL LOG
- * PARTS ARE COMPLETED. IF SO START SENDING EXEC_FRAGCONF AND EXEC_SRCONF.
- * ------------------------------------------------------------------------- */
-void Dblqh::execLogComp(Signal* signal)
-{
- logPartPtr.i = signal->theData[0];
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- logPartPtr.p->logPartState = LogPartRecord::SR_THIRD_PHASE_COMPLETED;
- /* ------------------------------------------------------------------------
- * WE MUST RELEASE THE TC CONNECT RECORD HERE SO THAT IT CAN BE REUSED.
- * ----------------------------------------------------------------------- */
- tcConnectptr.i = logPartPtr.p->logTcConrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- releaseTcrecLog(signal, tcConnectptr);
- for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
- jam();
- ptrAss(logPartPtr, logPartRecord);
- if (logPartPtr.p->logPartState != LogPartRecord::SR_THIRD_PHASE_COMPLETED) {
- if (logPartPtr.p->logPartState != LogPartRecord::SR_THIRD_PHASE_STARTED) {
- jam();
- systemErrorLab(signal);
- return;
- } else {
- jam();
- /* ------------------------------------------------------------------
- * THIS LOG PART WAS NOT COMPLETED YET. EXIT AND WAIT FOR IT
- * TO COMPLETE
- * ----------------------------------------------------------------- */
- return;
- }//if
- }//if
- }//for
- /* ------------------------------------------------------------------------
- * ALL LOG PARTS HAVE COMPLETED THE EXECUTION OF THE LOG. WE CAN NOW START
- * SENDING THE EXEC_FRAGCONF SIGNALS TO ALL INVOLVED FRAGMENTS.
- * ----------------------------------------------------------------------- */
- if (cstartType != NodeState::ST_NODE_RESTART) {
- jam();
- signal->theData[0] = ZSEND_EXEC_CONF;
- signal->theData[1] = 0;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- } else {
- jam();
- /* ----------------------------------------------------------------------
- * FOR NODE RESTART WE CAN SKIP A NUMBER OF STEPS SINCE WE HAVE NO
- * FRAGMENTS DEFINED AT THIS POINT. OBVIOUSLY WE WILL NOT NEED TO
- * EXECUTE ANY MORE LOG STEPS EITHER AND THUS WE CAN IMMEDIATELY
- * START FINDING THE END AND THE START OF THE LOG.
- * --------------------------------------------------------------------- */
- csrPhasesCompleted = 3;
- execSrCompletedLab(signal);
- return;
- }//if
- return;
-}//Dblqh::execLogComp()
-
-/* --------------------------------------------------------------------------
- * GO THROUGH THE FRAGMENT RECORDS TO DEDUCE TO WHICH SHALL BE SENT
- * EXEC_FRAGCONF AFTER COMPLETING THE EXECUTION OF THE LOG.
- * ------------------------------------------------------------------------- */
-void Dblqh::sendExecConf(Signal* signal)
-{
- jamEntry();
- fragptr.i = signal->theData[0];
- Uint32 loopCount = 0;
- while (fragptr.i < cfragrecFileSize) {
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (fragptr.p->execSrStatus != Fragrecord::IDLE) {
- jam();
- ndbrequire(fragptr.p->execSrNoReplicas - 1 < 4);
- for (Uint32 i = 0; i < fragptr.p->execSrNoReplicas; i++) {
- jam();
- signal->theData[0] = fragptr.p->execSrUserptr[i];
- sendSignal(fragptr.p->execSrBlockref[i], GSN_EXEC_FRAGCONF,
- signal, 1, JBB);
- }//for
- if (fragptr.p->execSrStatus == Fragrecord::ACTIVE) {
- jam();
- fragptr.p->execSrStatus = Fragrecord::IDLE;
- } else {
- ndbrequire(fragptr.p->execSrStatus == Fragrecord::ACTIVE_REMOVE_AFTER);
- jam();
- Uint32 fragId = fragptr.p->fragId;
- tabptr.i = fragptr.p->tabRef;
- ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
- deleteFragrec(fragId);
- }//if
- fragptr.p->execSrNoReplicas = 0;
- }//if
- loopCount++;
- if (loopCount > 20) {
- jam();
- signal->theData[0] = ZSEND_EXEC_CONF;
- signal->theData[1] = fragptr.i + 1;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- return;
- } else {
- jam();
- fragptr.i++;
- }//if
- }//while
- /* ----------------------------------------------------------------------
- * WE HAVE NOW SENT ALL EXEC_FRAGCONF. NOW IT IS TIME TO SEND
- * EXEC_SRCONF TO ALL NODES.
- * --------------------------------------------------------------------- */
- srPhase3Comp(signal);
-}//Dblqh::sendExecConf()
-
-/* --------------------------------------------------------------------------
- * PHASE 3 HAS NOW COMPLETED. INFORM ALL OTHER NODES OF THIS EVENT.
- * ------------------------------------------------------------------------- */
-void Dblqh::srPhase3Comp(Signal* signal)
-{
- jamEntry();
- ndbrequire(cnoOfNodes < MAX_NDB_NODES);
- for (Uint32 i = 0; i < cnoOfNodes; i++) {
- jam();
- if (cnodeStatus[i] == ZNODE_UP) {
- jam();
- ndbrequire(cnodeData[i] < MAX_NDB_NODES);
- BlockReference ref = calcLqhBlockRef(cnodeData[i]);
- signal->theData[0] = cownNodeid;
- sendSignal(ref, GSN_EXEC_SRCONF, signal, 1, JBB);
- }//if
- }//for
- return;
-}//Dblqh::srPhase3Comp()
-
-/* ##########################################################################
- * SYSTEM RESTART PHASE FOUR MODULE
- * THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING.
- *
- * THIS MODULE SETS UP THE HEAD AND TAIL POINTERS OF THE LOG PARTS IN THE
- * FRAGMENT LOG. WHEN IT IS COMPLETED IT REPORTS TO THE MASTER DIH THAT
- * IT HAS COMPLETED THE PART OF THE SYSTEM RESTART WHERE THE DATABASE IS
- * LOADED.
- * IT ALSO OPENS THE CURRENT LOG FILE AND THE NEXT AND SETS UP THE FIRST
- * LOG PAGE WHERE NEW LOG DATA IS TO BE INSERTED WHEN THE SYSTEM STARTS
- * AGAIN.
- *
- * THIS PART IS ACTUALLY EXECUTED FOR ALL RESTART TYPES.
- * ######################################################################### */
-void Dblqh::initFourth(Signal* signal)
-{
- LogFileRecordPtr locLogFilePtr;
- jamEntry();
- logPartPtr.i = signal->theData[0];
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- crestartNewestGci = 1;
- crestartOldestGci = 1;
- /* ------------------------------------------------------------------------
- * INITIALISE LOG PART AND LOG FILES AS NEEDED.
- * ----------------------------------------------------------------------- */
- logPartPtr.p->headFileNo = 0;
- logPartPtr.p->headPageNo = 1;
- logPartPtr.p->headPageIndex = ZPAGE_HEADER_SIZE + 2;
- logPartPtr.p->logPartState = LogPartRecord::SR_FOURTH_PHASE_STARTED;
- logPartPtr.p->logTailFileNo = 0;
- logPartPtr.p->logTailMbyte = 0;
- locLogFilePtr.i = logPartPtr.p->firstLogfile;
- ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
- locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_PHASE;
- openFileRw(signal, locLogFilePtr);
- return;
-}//Dblqh::initFourth()
-
-void Dblqh::openSrFourthPhaseLab(Signal* signal)
-{
- /* ------------------------------------------------------------------------
- * WE HAVE NOW OPENED THE HEAD LOG FILE WE WILL NOW START READING IT
- * FROM THE HEAD MBYTE TO FIND THE NEW HEAD OF THE LOG.
- * ----------------------------------------------------------------------- */
- readSinglePage(signal, logPartPtr.p->headPageNo);
- lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_FOURTH_PHASE;
- return;
-}//Dblqh::openSrFourthPhaseLab()
-
-void Dblqh::readSrFourthPhaseLab(Signal* signal)
-{
- if(c_diskless){
- jam();
- logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1;
- }
-
- /* ------------------------------------------------------------------------
- * INITIALISE ALL LOG PART INFO AND LOG FILE INFO THAT IS NEEDED TO
- * START UP THE SYSTEM.
- * ------------------------------------------------------------------------
- * INITIALISE THE NEWEST GLOBAL CHECKPOINT IDENTITY AND THE NEWEST
- * COMPLETED GLOBAL CHECKPOINT IDENITY AS THE NEWEST THAT WAS RESTARTED.
- * ------------------------------------------------------------------------
- * INITIALISE THE HEAD PAGE INDEX IN THIS PAGE.
- * ASSIGN IT AS THE CURRENT LOGPAGE.
- * ASSIGN THE FILE AS THE CURRENT LOG FILE.
- * ASSIGN THE CURRENT FILE NUMBER FROM THE CURRENT LOG FILE AND THE NEXT
- * FILE NUMBER FROM THE NEXT LOG FILE.
- * ASSIGN THE CURRENT FILEPAGE FROM HEAD PAGE NUMBER.
- * ASSIGN THE CURRENT MBYTE BY DIVIDING PAGE NUMBER BY 128.
- * INITIALISE LOG LAP TO BE THE LOG LAP AS FOUND IN THE HEAD PAGE.
- * WE HAVE TO CALCULATE THE NUMBER OF REMAINING WORDS IN THIS MBYTE.
- * ----------------------------------------------------------------------- */
- cnewestGci = crestartNewestGci;
- cnewestCompletedGci = crestartNewestGci;
- logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci;
- logPartPtr.p->currentLogfile = logFilePtr.i;
- logFilePtr.p->filePosition = logPartPtr.p->headPageNo;
- logFilePtr.p->currentMbyte =
- logPartPtr.p->headPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE;
- logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING;
- logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP];
- logFilePtr.p->currentFilepage = logPartPtr.p->headPageNo;
- logFilePtr.p->currentLogpage = logPagePtr.i;
-
- initLogpage(signal);
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPartPtr.p->headPageIndex;
- logFilePtr.p->remainingWordsInMbyte =
- ((
- ((logFilePtr.p->currentMbyte + 1) * ZPAGES_IN_MBYTE) -
- logFilePtr.p->currentFilepage) *
- (ZPAGE_SIZE - ZPAGE_HEADER_SIZE)) -
- (logPartPtr.p->headPageIndex - ZPAGE_HEADER_SIZE);
- /* ------------------------------------------------------------------------
- * THE NEXT STEP IS TO OPEN THE NEXT LOG FILE (IF THERE IS ONE).
- * ----------------------------------------------------------------------- */
- if (logFilePtr.p->nextLogFile != logFilePtr.i) {
- LogFileRecordPtr locLogFilePtr;
- jam();
- locLogFilePtr.i = logFilePtr.p->nextLogFile;
- ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
- locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_NEXT;
- openFileRw(signal, locLogFilePtr);
- } else {
- jam();
- /* ----------------------------------------------------------------------
- * THIS CAN ONLY OCCUR IF WE HAVE ONLY ONE LOG FILE. THIS LOG FILE MUST
- * BE LOG FILE ZERO AND THAT IS THE FILE WE CURRENTLY HAVE READ.
- * THUS WE CAN CONTINUE IMMEDIATELY TO READ PAGE ZERO IN FILE ZERO.
- * --------------------------------------------------------------------- */
- openSrFourthZeroSkipInitLab(signal);
- return;
- }//if
- return;
-}//Dblqh::readSrFourthPhaseLab()
-
-void Dblqh::openSrFourthNextLab(Signal* signal)
-{
- /* ------------------------------------------------------------------------
- * WE MUST ALSO HAVE FILE 0 OPEN ALL THE TIME.
- * ----------------------------------------------------------------------- */
- logFilePtr.i = logPartPtr.p->firstLogfile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- if (logFilePtr.p->logFileStatus == LogFileRecord::OPEN) {
- jam();
- openSrFourthZeroSkipInitLab(signal);
- return;
- } else {
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_ZERO;
- openFileRw(signal, logFilePtr);
- }//if
- return;
-}//Dblqh::openSrFourthNextLab()
-
-void Dblqh::openSrFourthZeroLab(Signal* signal)
-{
- openSrFourthZeroSkipInitLab(signal);
- return;
-}//Dblqh::openSrFourthZeroLab()
-
-void Dblqh::openSrFourthZeroSkipInitLab(Signal* signal)
-{
- if (logFilePtr.i == logPartPtr.p->currentLogfile) {
- if (logFilePtr.p->currentFilepage == 0) {
- jam();
- /* -------------------------------------------------------------------
- * THE HEADER PAGE IN THE LOG IS PAGE ZERO IN FILE ZERO.
- * THIS SHOULD NEVER OCCUR.
- * ------------------------------------------------------------------- */
- systemErrorLab(signal);
- return;
- }//if
- }//if
- readSinglePage(signal, 0);
- lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_FOURTH_ZERO;
- return;
-}//Dblqh::openSrFourthZeroSkipInitLab()
-
-void Dblqh::readSrFourthZeroLab(Signal* signal)
-{
- logFilePtr.p->logPageZero = logPagePtr.i;
- // --------------------------------------------------------------------
- // This is moved to invalidateLogAfterLastGCI(), RT453.
- // signal->theData[0] = ZSR_FOURTH_COMP;
- // signal->theData[1] = logPartPtr.i;
- // sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- // --------------------------------------------------------------------
-
- // Need to invalidate log pages after the head of the log. RT 453. EDTJAMO.
- // Set the start of the invalidation.
- logFilePtr.i = logPartPtr.p->currentLogfile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logPartPtr.p->invalidateFileNo = logPartPtr.p->headFileNo;
- logPartPtr.p->invalidatePageNo = logPartPtr.p->headPageNo;
-
- logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_INVALIDATE;
- seizeLfo(signal);
- initLfo(signal);
- // The state here is a little confusing, but simulates that we return
- // to invalidateLogAfterLastGCI() from an invalidate write and are ready
- // to read a page from file.
- lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES;
-
- invalidateLogAfterLastGCI(signal);
- return;
-}//Dblqh::readSrFourthZeroLab()
-
-/* --------------------------------------------------------------------------
- * ONE OF THE LOG PARTS HAVE COMPLETED PHASE FOUR OF THE SYSTEM RESTART.
- * CHECK IF ALL LOG PARTS ARE COMPLETED. IF SO SEND START_RECCONF
- * ------------------------------------------------------------------------- */
-void Dblqh::srFourthComp(Signal* signal)
-{
- jamEntry();
- logPartPtr.i = signal->theData[0];
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- logPartPtr.p->logPartState = LogPartRecord::SR_FOURTH_PHASE_COMPLETED;
- for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
- jam();
- ptrAss(logPartPtr, logPartRecord);
- if (logPartPtr.p->logPartState != LogPartRecord::SR_FOURTH_PHASE_COMPLETED) {
- if (logPartPtr.p->logPartState != LogPartRecord::SR_FOURTH_PHASE_STARTED) {
- jam();
- systemErrorLab(signal);
- return;
- } else {
- jam();
- /* ------------------------------------------------------------------
- * THIS LOG PART WAS NOT COMPLETED YET.
- * EXIT AND WAIT FOR IT TO COMPLETE
- * ----------------------------------------------------------------- */
- return;
- }//if
- }//if
- }//for
- /* ------------------------------------------------------------------------
- * ALL LOG PARTS HAVE COMPLETED PHASE FOUR OF THE SYSTEM RESTART.
- * WE CAN NOW SEND START_RECCONF TO THE MASTER DIH IF IT WAS A
- * SYSTEM RESTART. OTHERWISE WE WILL CONTINUE WITH AN INITIAL START.
- * SET LOG PART STATE TO IDLE TO
- * INDICATE THAT NOTHING IS GOING ON IN THE LOG PART.
- * ----------------------------------------------------------------------- */
- for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
- ptrAss(logPartPtr, logPartRecord);
- logPartPtr.p->logPartState = LogPartRecord::IDLE;
- }//for
-
- if ((cstartType == NodeState::ST_INITIAL_START) ||
- (cstartType == NodeState::ST_INITIAL_NODE_RESTART)) {
- jam();
-
- ndbrequire(cinitialStartOngoing == ZTRUE);
- cinitialStartOngoing = ZFALSE;
-
- checkStartCompletedLab(signal);
- return;
- } else if ((cstartType == NodeState::ST_NODE_RESTART) ||
- (cstartType == NodeState::ST_SYSTEM_RESTART)) {
- jam();
- StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
- conf->startingNodeId = getOwnNodeId();
- sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
- StartRecConf::SignalLength, JBB);
-
- if(cstartType == NodeState::ST_SYSTEM_RESTART){
- fragptr.i = c_redo_log_complete_frags;
- while(fragptr.i != RNIL){
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- signal->theData[0] = fragptr.p->tabRef;
- signal->theData[1] = fragptr.p->fragId;
- sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
- fragptr.i = fragptr.p->nextFrag;
- }
- }
- } else {
- ndbrequire(false);
- }//if
- return;
-}//Dblqh::srFourthComp()
-
-/* ######################################################################### */
-/* ####### ERROR MODULE ####### */
-/* */
-/* ######################################################################### */
-void Dblqh::warningHandlerLab(Signal* signal)
-{
- systemErrorLab(signal);
- return;
-}//Dblqh::warningHandlerLab()
-
-/*---------------------------------------------------------------------------*/
-/* AN ERROR OCCURRED THAT WE WILL NOT TREAT AS SYSTEM ERROR. MOST OFTEN THIS */
-/* WAS CAUSED BY AN ERRONEUS SIGNAL SENT BY ANOTHER NODE. WE DO NOT WISH TO */
-/* CRASH BECAUSE OF FAULTS IN OTHER NODES. THUS WE ONLY REPORT A WARNING. */
-/* THIS IS CURRENTLY NOT IMPLEMENTED AND FOR THE MOMENT WE GENERATE A SYSTEM */
-/* ERROR SINCE WE WANT TO FIND FAULTS AS QUICKLY AS POSSIBLE IN A TEST PHASE.*/
-/* IN A LATER PHASE WE WILL CHANGE THIS TO BE A WARNING MESSAGE INSTEAD. */
-/*---------------------------------------------------------------------------*/
-/*---------------------------------------------------------------------------*/
-/* THIS TYPE OF ERROR SHOULD NOT GENERATE A SYSTEM ERROR IN A PRODUCT */
-/* RELEASE. THIS IS A TEMPORARY SOLUTION DURING TEST PHASE TO QUICKLY */
-/* FIND ERRORS. NORMALLY THIS SHOULD GENERATE A WARNING MESSAGE ONTO */
-/* SOME ERROR LOGGER. THIS WILL LATER BE IMPLEMENTED BY SOME SIGNAL. */
-/*---------------------------------------------------------------------------*/
-/* ------ SYSTEM ERROR SITUATIONS ------- */
-/* IN SITUATIONS WHERE THE STATE IS ERRONEOUS OR IF THE ERROR OCCURS IN */
-/* THE COMMIT, COMPLETE OR ABORT PHASE, WE PERFORM A CRASH OF THE AXE VM*/
-/*---------------------------------------------------------------------------*/
-
-void Dblqh::systemErrorLab(Signal* signal)
-{
- progError(0, 0);
-/*************************************************************************>*/
-/* WE WANT TO INVOKE AN IMMEDIATE ERROR HERE SO WE GET THAT BY */
-/* INSERTING A CERTAIN POINTER OUT OF RANGE. */
-/*************************************************************************>*/
-}//Dblqh::systemErrorLab()
-
-/* ------- ERROR SITUATIONS ------- */
-
-void Dblqh::aiStateErrorCheckLab(Signal* signal, Uint32* dataPtr, Uint32 length)
-{
- ndbrequire(tcConnectptr.p->abortState != TcConnectionrec::ABORT_IDLE);
- if (tcConnectptr.p->transactionState != TcConnectionrec::IDLE) {
- jam();
-/*************************************************************************>*/
-/* TRANSACTION ABORT IS ONGOING. IT CAN STILL BE A PART OF AN */
-/* OPERATION THAT SHOULD CONTINUE SINCE THE TUPLE HAS NOT ARRIVED */
-/* YET. THIS IS POSSIBLE IF ACTIVE CREATION OF THE FRAGMENT IS */
-/* ONGOING. */
-/*************************************************************************>*/
- if (tcConnectptr.p->activeCreat == ZTRUE) {
- jam();
-/*************************************************************************>*/
-/* ONGOING ABORTS DURING ACTIVE CREATION MUST SAVE THE ATTRIBUTE INFO*/
-/* SO THAT IT CAN BE SENT TO THE NEXT NODE IN THE COMMIT CHAIN. THIS */
-/* IS NEEDED SINCE ALL ABORTS DURING CREATION OF A FRAGMENT ARE NOT */
-/* REALLY ERRORS. A MISSING TUPLE TO BE UPDATED SIMPLY MEANS THAT */
-/* IT HASN'T BEEN TRANSFERRED TO THE NEW REPLICA YET. */
-/*************************************************************************>*/
-/*************************************************************************>*/
-/* AFTER THIS ERROR THE ABORT MUST BE COMPLETED. TO ENSURE THIS SET */
-/* ACTIVE CREATION TO FALSE. THIS WILL ENSURE THAT THE ABORT IS */
-/* COMPLETED. */
-/*************************************************************************>*/
- if (saveTupattrbuf(signal, dataPtr, length) == ZOK) {
- jam();
- if (tcConnectptr.p->transactionState ==
- TcConnectionrec::WAIT_AI_AFTER_ABORT) {
- if (tcConnectptr.p->currTupAiLen == tcConnectptr.p->totReclenAi) {
- jam();
-/*************************************************************************>*/
-/* WE WERE WAITING FOR MORE ATTRIBUTE INFO AFTER A SUCCESSFUL ABORT */
-/* IN ACTIVE CREATION STATE. THE TRANSACTION SHOULD CONTINUE AS IF */
-/* IT WAS COMMITTED. NOW ALL INFO HAS ARRIVED AND WE CAN CONTINUE */
-/* WITH NORMAL PROCESSING AS IF THE TRANSACTION WAS PREPARED. */
-/* SINCE THE FRAGMENT IS UNDER CREATION WE KNOW THAT LOGGING IS */
-/* DISABLED. WE STILL HAVE TO CATER FOR DIRTY OPERATION OR NOT. */
-/*************************************************************************>*/
- tcConnectptr.p->abortState = TcConnectionrec::ABORT_IDLE;
- rwConcludedAiLab(signal);
- return;
- } else {
- ndbrequire(tcConnectptr.p->currTupAiLen < tcConnectptr.p->totReclenAi);
- jam();
- return; /* STILL WAITING FOR MORE ATTRIBUTE INFO */
- }//if
- }//if
- } else {
- jam();
-/*************************************************************************>*/
-/* AFTER THIS ERROR THE ABORT MUST BE COMPLETED. TO ENSURE THIS SET */
-/* ACTIVE CREATION TO ABORT. THIS WILL ENSURE THAT THE ABORT IS */
-/* COMPLETED AND THAT THE ERROR CODE IS PROPERLY SET */
-/*************************************************************************>*/
- tcConnectptr.p->errorCode = terrorCode;
- tcConnectptr.p->activeCreat = ZFALSE;
- if (tcConnectptr.p->transactionState ==
- TcConnectionrec::WAIT_AI_AFTER_ABORT) {
- jam();
-/*************************************************************************>*/
-/* ABORT IS ALREADY COMPLETED. WE NEED TO RESTART IT FROM WHERE IT */
-/* WAS INTERRUPTED. */
-/*************************************************************************>*/
- continueAbortLab(signal);
- return;
- } else {
- jam();
- return;
-/*************************************************************************>*/
-// Abort is ongoing. It will complete since we set the activeCreat = ZFALSE
-/*************************************************************************>*/
- }//if
- }//if
- }//if
- }//if
-/*************************************************************************>*/
-/* TRANSACTION HAVE BEEN ABORTED. THUS IGNORE ALL SIGNALS BELONGING TO IT. */
-/*************************************************************************>*/
- return;
-}//Dblqh::aiStateErrorCheckLab()
-
-void Dblqh::takeOverErrorLab(Signal* signal)
-{
- terrorCode = ZTAKE_OVER_ERROR;
- abortErrorLab(signal);
- return;
-}//Dblqh::takeOverErrorLab()
-
-/* ##########################################################################
- * TEST MODULE
- * ######################################################################### */
-#ifdef VM_TRACE
-void Dblqh::execTESTSIG(Signal* signal)
-{
- jamEntry();
- Uint32 userpointer = signal->theData[0];
- BlockReference userblockref = signal->theData[1];
- Uint32 testcase = signal->theData[2];
-
- signal->theData[0] = userpointer;
- signal->theData[1] = cownref;
- signal->theData[2] = testcase;
- sendSignal(userblockref, GSN_TESTSIG, signal, 25, JBB);
- return;
-}//Dblqh::execTESTSIG()
-
-/* *************** */
-/* MEMCHECKREQ > */
-/* *************** */
-/* ************************************************************************>>
- * THIS SIGNAL IS PURELY FOR TESTING PURPOSES. IT CHECKS THE FREE LIST
- * AND REPORTS THE NUMBER OF FREE RECORDS.
- * THIS CAN BE DONE TO ENSURE THAT NO RECORDS HAS BEEN LOST
- * ************************************************************************> */
-void Dblqh::execMEMCHECKREQ(Signal* signal)
-{
- Uint32* dataPtr = &signal->theData[0];
- jamEntry();
- BlockReference userblockref = signal->theData[0];
- Uint32 index = 0;
- for (Uint32 i = 0; i < 7; i++)
- dataPtr[i] = 0;
- addfragptr.i = cfirstfreeAddfragrec;
- while (addfragptr.i != RNIL) {
- ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
- addfragptr.i = addfragptr.p->nextAddfragrec;
- dataPtr[index]++;
- }//while
- index++;
- attrinbufptr.i = cfirstfreeAttrinbuf;
- while (attrinbufptr.i != RNIL) {
- ptrCheckGuard(attrinbufptr, cattrinbufFileSize, attrbuf);
- attrinbufptr.i = attrinbufptr.p->attrbuf[ZINBUF_NEXT];
- dataPtr[index]++;
- }//while
- index++;
- databufptr.i = cfirstfreeDatabuf;
- while (databufptr.i != RNIL) {
- ptrCheckGuard(databufptr, cdatabufFileSize, databuf);
- databufptr.i = databufptr.p->nextDatabuf;
- dataPtr[index]++;
- }//while
- index++;
- fragptr.i = cfirstfreeFragrec;
- while (fragptr.i != RNIL) {
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- fragptr.i = fragptr.p->nextFrag;
- dataPtr[index]++;
- }//while
- index++;
- for (tabptr.i = 0;
- tabptr.i < ctabrecFileSize;
- tabptr.i++) {
- ptrAss(tabptr, tablerec);
- if (tabptr.p->tableStatus == Tablerec::NOT_DEFINED) {
- dataPtr[index]++;
- }//if
- }//for
- index++;
- tcConnectptr.i = cfirstfreeTcConrec;
- while (tcConnectptr.i != RNIL) {
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- tcConnectptr.i = tcConnectptr.p->nextTcConnectrec;
- dataPtr[index]++;
- }//while
- sendSignal(userblockref, GSN_MEMCHECKCONF, signal, 10, JBB);
- return;
-}//Dblqh::execMEMCHECKREQ()
-
-#endif
-
-/* ************************************************************************* */
-/* ************************* STATEMENT BLOCKS ****************************** */
-/* ************************************************************************* */
-/* ========================================================================= */
-/* ====== BUILD LINKED LIST OF LOG PAGES AFTER RECEIVING FSREADCONF ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::buildLinkedLogPageList(Signal* signal)
-{
- LogPageRecordPtr bllLogPagePtr;
-
- arrGuard(lfoPtr.p->noPagesRw - 1, 16);
- arrGuard(lfoPtr.p->noPagesRw, 16);
- for (UintR tbllIndex = 0; tbllIndex < lfoPtr.p->noPagesRw; tbllIndex++) {
- jam();
- /* ----------------------------------------------------------------------
- * BUILD LINKED LIST BUT ALSO ENSURE THAT PAGE IS NOT SEEN AS DIRTY
- * INITIALLY.
- * --------------------------------------------------------------------- */
- bllLogPagePtr.i = lfoPtr.p->logPageArray[tbllIndex];
- ptrCheckGuard(bllLogPagePtr, clogPageFileSize, logPageRecord);
-
-// #if VM_TRACE
-// // Check logPage checksum before modifying it
-// Uint32 calcCheckSum = calcPageCheckSum(bllLogPagePtr);
-// Uint32 checkSum = bllLogPagePtr.p->logPageWord[ZPOS_CHECKSUM];
-// if (checkSum != calcCheckSum) {
-// ndbout << "Redolog: Checksum failure." << endl;
-// progError(__LINE__, ERR_NDBREQUIRE, "Redolog: Checksum failure.");
-// }
-// #endif
-
- bllLogPagePtr.p->logPageWord[ZNEXT_PAGE] =
- lfoPtr.p->logPageArray[tbllIndex + 1];
- bllLogPagePtr.p->logPageWord[ZPOS_DIRTY] = ZNOT_DIRTY;
- }//for
- bllLogPagePtr.i = lfoPtr.p->logPageArray[lfoPtr.p->noPagesRw - 1];
- ptrCheckGuard(bllLogPagePtr, clogPageFileSize, logPageRecord);
- bllLogPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
-}//Dblqh::buildLinkedLogPageList()
-
-/* =========================================================================
- * ======= CHANGE TO NEXT MBYTE IN LOG =======
- *
- * ========================================================================= */
-void Dblqh::changeMbyte(Signal* signal)
-{
- writeNextLog(signal);
- writeFileDescriptor(signal);
-}//Dblqh::changeMbyte()
-
-/* ========================================================================= */
-/* ====== CHECK IF THIS COMMIT LOG RECORD IS TO BE EXECUTED ======= */
-/* */
-/* SUBROUTINE SHORT NAME = CEL */
-/* ========================================================================= */
-Uint32 Dblqh::checkIfExecLog(Signal* signal)
-{
- tabptr.i = tcConnectptr.p->tableref;
- ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
- if (getFragmentrec(signal, tcConnectptr.p->fragmentid) &&
- (tabptr.p->schemaVersion == tcConnectptr.p->schemaVersion)) {
- if (fragptr.p->execSrStatus != Fragrecord::IDLE) {
- if (fragptr.p->execSrNoReplicas > logPartPtr.p->execSrExecuteIndex) {
- ndbrequire((fragptr.p->execSrNoReplicas - 1) < 4);
- for (Uint32 i = logPartPtr.p->execSrExecuteIndex;
- i < fragptr.p->execSrNoReplicas;
- i++) {
- jam();
- if (tcConnectptr.p->gci >= fragptr.p->execSrStartGci[i]) {
- if (tcConnectptr.p->gci <= fragptr.p->execSrLastGci[i]) {
- jam();
- logPartPtr.p->execSrExecuteIndex = i;
- return ZOK;
- }//if
- }//if
- }//for
- }//if
- }//if
- }//if
- return ZNOT_OK;
-}//Dblqh::checkIfExecLog()
-
-/* ========================================================================= */
-/* == CHECK IF THERE IS LESS THAN 192 KBYTE IN THE BUFFER PLUS INCOMING === */
-/* READS ALREADY STARTED. IF SO IS THE CASE THEN START ANOTHER READ IF */
-/* THERE ARE MORE PAGES IN THIS MBYTE. */
-/* */
-/* ========================================================================= */
-void Dblqh::checkReadExecSr(Signal* signal)
-{
- logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG;
- logPartPtr.p->execSrPagesRead = logPartPtr.p->execSrPagesRead + 8;
- logPartPtr.p->execSrPagesReading = logPartPtr.p->execSrPagesReading - 8;
- if ((logPartPtr.p->execSrPagesRead + logPartPtr.p->execSrPagesReading) <
- ZREAD_AHEAD_SIZE) {
- jam();
- /* ----------------------------------------------------------------------
- * WE HAVE LESS THAN 64 KBYTE OF LOG PAGES REMAINING IN MEMORY OR ON
- * ITS WAY TO MAIN MEMORY. READ IN 8 MORE PAGES.
- * --------------------------------------------------------------------- */
- if ((logPartPtr.p->execSrPagesRead + logPartPtr.p->execSrPagesExecuted) <
- ZPAGES_IN_MBYTE) {
- jam();
- /* --------------------------------------------------------------------
- * THERE ARE MORE PAGES TO READ IN THIS MBYTE. READ THOSE FIRST
- * IF >= ZPAGES_IN_MBYTE THEN THERE ARE NO MORE PAGES TO READ. THUS
- * WE PROCEED WITH EXECUTION OF THE LOG.
- * ------------------------------------------------------------------- */
- readExecSr(signal);
- logPartPtr.p->logExecState = LogPartRecord::LES_WAIT_READ_EXEC_SR;
- }//if
- }//if
-}//Dblqh::checkReadExecSr()
-
-/* ========================================================================= */
-/* ==== CHECK IF START OF NEW FRAGMENT IS COMPLETED AND WE CAN ======= */
-/* ==== GET THE START GCI ======= */
-/* */
-/* SUBROUTINE SHORT NAME = CTC */
-/* ========================================================================= */
-void Dblqh::checkScanTcCompleted(Signal* signal)
-{
- tcConnectptr.p->logWriteState = TcConnectionrec::NOT_STARTED;
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- fragptr.p->activeTcCounter = fragptr.p->activeTcCounter - 1;
- if (fragptr.p->activeTcCounter == 0) {
- jam();
- fragptr.p->startGci = cnewestGci + 1;
- tabptr.i = tcConnectptr.p->tableref;
- ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
- sendCopyActiveConf(signal, tcConnectptr.p->tableref);
- }//if
-}//Dblqh::checkScanTcCompleted()
-
-/* ==========================================================================
- * === CHECK IF ALL PARTS OF A SYSTEM RESTART ON A FRAGMENT ARE COMPLETED ===
- *
- * SUBROUTINE SHORT NAME = CSC
- * ========================================================================= */
-void Dblqh::checkSrCompleted(Signal* signal)
-{
- LcpLocRecordPtr cscLcpLocptr;
-
- terrorCode = ZOK;
- ptrGuard(lcpPtr);
- cscLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
-CSC_ACC_DOWHILE:
- ptrCheckGuard(cscLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- if (cscLcpLocptr.p->lcpLocstate != LcpLocRecord::SR_ACC_COMPLETED) {
- jam();
- if (cscLcpLocptr.p->lcpLocstate != LcpLocRecord::SR_ACC_STARTED) {
- jam();
- systemErrorLab(signal);
- return;
- }//if
- return;
- }//if
- cscLcpLocptr.i = cscLcpLocptr.p->nextLcpLoc;
- if (cscLcpLocptr.i != RNIL) {
- jam();
- goto CSC_ACC_DOWHILE;
- }//if
- cscLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
-CSC_TUP_DOWHILE:
- ptrCheckGuard(cscLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- if (cscLcpLocptr.p->lcpLocstate != LcpLocRecord::SR_TUP_COMPLETED) {
- jam();
- if (cscLcpLocptr.p->lcpLocstate != LcpLocRecord::SR_TUP_STARTED) {
- jam();
- systemErrorLab(signal);
- return;
- }//if
- return;
- }//if
- cscLcpLocptr.i = cscLcpLocptr.p->nextLcpLoc;
- if (cscLcpLocptr.i != RNIL) {
- jam();
- goto CSC_TUP_DOWHILE;
- }//if
- lcpPtr.p->lcpState = LcpRecord::LCP_SR_COMPLETED;
-}//Dblqh::checkSrCompleted()
-
-/* ------------------------------------------------------------------------- */
-/* ------ CLOSE A FILE DURING EXECUTION OF FRAGMENT LOG ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::closeFile(Signal* signal, LogFileRecordPtr clfLogFilePtr)
-{
- signal->theData[0] = clfLogFilePtr.p->fileRef;
- signal->theData[1] = cownref;
- signal->theData[2] = clfLogFilePtr.i;
- signal->theData[3] = ZCLOSE_NO_DELETE;
- sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
-}//Dblqh::closeFile()
-
-
-/* ---------------------------------------------------------------- */
-/* ---------------- A LOG PAGE HAVE BEEN COMPLETED ---------------- */
-/* */
-/* SUBROUTINE SHORT NAME = CLP */
-// Input Pointers:
-// logFilePtr
-// logPagePtr
-// logPartPtr
-// Defines lfoPtr
-/* ---------------------------------------------------------------- */
-void Dblqh::completedLogPage(Signal* signal, Uint32 clpType)
-{
- LogPageRecordPtr clpLogPagePtr;
- LogPageRecordPtr wlpLogPagePtr;
- UintR twlpNoPages;
- UintR twlpType;
-
- if (logFilePtr.p->firstFilledPage == RNIL) {
- jam();
- logFilePtr.p->firstFilledPage = logPagePtr.i;
- } else {
- jam();
- clpLogPagePtr.i = logFilePtr.p->lastFilledPage;
- ptrCheckGuard(clpLogPagePtr, clogPageFileSize, logPageRecord);
- clpLogPagePtr.p->logPageWord[ZNEXT_PAGE] = logPagePtr.i;
- }//if
- logFilePtr.p->lastFilledPage = logPagePtr.i;
- logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
- logFilePtr.p->noLogpagesInBuffer = logFilePtr.p->noLogpagesInBuffer + 1;
- if (logFilePtr.p->noLogpagesInBuffer != ZMAX_PAGES_WRITTEN) {
- if (clpType != ZLAST_WRITE_IN_FILE) {
- if (clpType != ZENFORCE_WRITE) {
- jam();
- return;
- }//if
- }//if
- }//if
- twlpType = clpType;
-/* ------------------------------------------------------------------------- */
-/* ------ WRITE A SET OF LOG PAGES TO DISK ------- */
-/* */
-/* SUBROUTINE SHORT NAME: WLP */
-/* ------------------------------------------------------------------------- */
- seizeLfo(signal);
- initLfo(signal);
- Uint32* dataPtr = &signal->theData[6];
- twlpNoPages = 0;
- wlpLogPagePtr.i = logFilePtr.p->firstFilledPage;
- do {
- dataPtr[twlpNoPages] = wlpLogPagePtr.i;
- twlpNoPages++;
- ptrCheckGuard(wlpLogPagePtr, clogPageFileSize, logPageRecord);
-
- // Calculate checksum for page
- wlpLogPagePtr.p->logPageWord[ZPOS_CHECKSUM] = calcPageCheckSum(wlpLogPagePtr);
- wlpLogPagePtr.i = wlpLogPagePtr.p->logPageWord[ZNEXT_PAGE];
- } while (wlpLogPagePtr.i != RNIL);
- ndbrequire(twlpNoPages < 9);
- dataPtr[twlpNoPages] = logFilePtr.p->filePosition;
-/* -------------------------------------------------- */
-/* SET TIMER ON THIS LOG PART TO SIGNIFY THAT A */
-/* LOG RECORD HAS BEEN SENT AT THIS TIME. */
-/* -------------------------------------------------- */
- logPartPtr.p->logPartTimer = logPartPtr.p->logTimer;
- signal->theData[0] = logFilePtr.p->fileRef;
- signal->theData[1] = cownref;
- signal->theData[2] = lfoPtr.i;
- logFilePtr.p->logFilePagesToDiskWithoutSynch += twlpNoPages;
- if (twlpType == ZLAST_WRITE_IN_FILE) {
- jam();
- logFilePtr.p->logFilePagesToDiskWithoutSynch = 0;
- signal->theData[3] = ZLIST_OF_MEM_PAGES_SYNCH;
- } else if (logFilePtr.p->logFilePagesToDiskWithoutSynch >
- MAX_REDO_PAGES_WITHOUT_SYNCH) {
- jam();
- logFilePtr.p->logFilePagesToDiskWithoutSynch = 0;
- signal->theData[3] = ZLIST_OF_MEM_PAGES_SYNCH;
- } else {
- jam();
- signal->theData[3] = ZLIST_OF_MEM_PAGES;
- }//if
- signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
- signal->theData[5] = twlpNoPages;
- sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 15, JBA);
- if (twlpType == ZNORMAL) {
- jam();
- lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG;
- } else if (twlpType == ZLAST_WRITE_IN_FILE) {
- jam();
- lfoPtr.p->lfoState = LogFileOperationRecord::LAST_WRITE_IN_FILE;
- } else {
- ndbrequire(twlpType == ZENFORCE_WRITE);
- jam();
- lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG;
- }//if
- /* ----------------------------------------------------------------------- */
- /* ------ MOVE PAGES FROM LOG FILE TO LFO RECORD ------- */
- /* */
- /* ----------------------------------------------------------------------- */
- /* -------------------------------------------------- */
- /* MOVE PAGES TO LFO RECORD AND REMOVE THEM */
- /* FROM LOG FILE RECORD. */
- /* -------------------------------------------------- */
- lfoPtr.p->firstLfoPage = logFilePtr.p->firstFilledPage;
- logFilePtr.p->firstFilledPage = RNIL;
- logFilePtr.p->lastFilledPage = RNIL;
- logFilePtr.p->noLogpagesInBuffer = 0;
-
- lfoPtr.p->noPagesRw = twlpNoPages;
- lfoPtr.p->lfoPageNo = logFilePtr.p->filePosition;
- lfoPtr.p->lfoWordWritten = ZPAGE_SIZE - 1;
- logFilePtr.p->filePosition += twlpNoPages;
-}//Dblqh::completedLogPage()
-
-/* ---------------------------------------------------------------- */
-/* ---------------- DELETE FRAGMENT RECORD ------------------------ */
-/* */
-/* SUBROUTINE SHORT NAME = DFR */
-/* ---------------------------------------------------------------- */
-void Dblqh::deleteFragrec(Uint32 fragId)
-{
- Uint32 indexFound= RNIL;
- fragptr.i = RNIL;
- for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
- jam();
- if (tabptr.p->fragid[i] == fragId) {
- fragptr.i = tabptr.p->fragrec[i];
- indexFound = i;
- break;
- }//if
- }//for
- if (fragptr.i != RNIL) {
- jam();
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- tabptr.p->fragid[indexFound] = ZNIL;
- tabptr.p->fragrec[indexFound] = RNIL;
- releaseFragrec();
- }//if
-}//Dblqh::deleteFragrec()
-
-/* ------------------------------------------------------------------------- */
-/* ------- FIND LOG FILE RECORD GIVEN FILE NUMBER ------- */
-/* */
-/* INPUT: TFLF_FILE_NO THE FILE NUMBER */
-/* FLF_LOG_PART_PTR THE LOG PART RECORD */
-/* OUTPUT: FLF_LOG_FILE_PTR THE FOUND LOG FILE RECORD */
-/* SUBROUTINE SHORT NAME = FLF */
-/* ------------------------------------------------------------------------- */
-void Dblqh::findLogfile(Signal* signal,
- Uint32 fileNo,
- LogPartRecordPtr flfLogPartPtr,
- LogFileRecordPtr* parLogFilePtr)
-{
- LogFileRecordPtr locLogFilePtr;
- locLogFilePtr.i = flfLogPartPtr.p->firstLogfile;
- Uint32 loopCount = 0;
- while (true) {
- ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
- if (locLogFilePtr.p->fileNo == fileNo) {
- jam();
- ndbrequire(loopCount == fileNo);
- parLogFilePtr->i = locLogFilePtr.i;
- parLogFilePtr->p = locLogFilePtr.p;
- return;
- }//if
- locLogFilePtr.i = locLogFilePtr.p->nextLogFile;
- loopCount++;
- ndbrequire(loopCount < flfLogPartPtr.p->noLogFiles);
- }//while
-}//Dblqh::findLogfile()
-
-/* ------------------------------------------------------------------------- */
-/* ------ FIND PAGE REFERENCE IN MEMORY BUFFER AT LOG EXECUTION ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::findPageRef(Signal* signal, CommitLogRecord* commitLogRecord)
-{
- UintR tfprIndex;
-
- logPagePtr.i = RNIL;
- if (ERROR_INSERTED(5020)) {
- // Force system to read page from disk
- return;
- }
- pageRefPtr.i = logPartPtr.p->lastPageRef;
- do {
- ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
- if (commitLogRecord->fileNo == pageRefPtr.p->prFileNo) {
- if (commitLogRecord->startPageNo >= pageRefPtr.p->prPageNo) {
- if (commitLogRecord->startPageNo < (Uint16) (pageRefPtr.p->prPageNo + 8)) {
- jam();
- tfprIndex = commitLogRecord->startPageNo - pageRefPtr.p->prPageNo;
- logPagePtr.i = pageRefPtr.p->pageRef[tfprIndex];
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- return;
- }//if
- }//if
- }//if
- pageRefPtr.i = pageRefPtr.p->prPrev;
- } while (pageRefPtr.i != RNIL);
-}//Dblqh::findPageRef()
-
-/* ------------------------------------------------------------------------- */
-/* ------ GET FIRST OPERATION QUEUED FOR LOGGING ------- */
-/* */
-/* SUBROUTINE SHORT NAME = GFL */
-/* ------------------------------------------------------------------------- */
-void Dblqh::getFirstInLogQueue(Signal* signal)
-{
- TcConnectionrecPtr gflTcConnectptr;
-/* -------------------------------------------------- */
-/* GET THE FIRST FROM THE LOG QUEUE AND REMOVE */
-/* IT FROM THE QUEUE. */
-/* -------------------------------------------------- */
- gflTcConnectptr.i = logPartPtr.p->firstLogQueue;
- ptrCheckGuard(gflTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- logPartPtr.p->firstLogQueue = gflTcConnectptr.p->nextTcLogQueue;
- if (logPartPtr.p->firstLogQueue == RNIL) {
- jam();
- logPartPtr.p->lastLogQueue = RNIL;
- }//if
-}//Dblqh::getFirstInLogQueue()
-
-/* ---------------------------------------------------------------- */
-/* ---------------- GET FRAGMENT RECORD --------------------------- */
-/* INPUT: TFRAGID FRAGMENT ID LOOKING FOR */
-/* TABPTR TABLE ID */
-/* SUBROUTINE SHORT NAME = GFR */
-/* ---------------------------------------------------------------- */
-bool Dblqh::getFragmentrec(Signal* signal, Uint32 fragId)
-{
- for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (UintR)~i; i--) {
- jam();
- if (tabptr.p->fragid[i] == fragId) {
- fragptr.i = tabptr.p->fragrec[i];
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- return true;
- }//if
- }//for
- return false;
-}//Dblqh::getFragmentrec()
-
-/* ========================================================================= */
-/* ====== INITIATE FRAGMENT RECORD ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initialiseAddfragrec(Signal* signal)
-{
- if (caddfragrecFileSize != 0) {
- for (addfragptr.i = 0; addfragptr.i < caddfragrecFileSize; addfragptr.i++) {
- ptrAss(addfragptr, addFragRecord);
- addfragptr.p->addfragStatus = AddFragRecord::FREE;
- addfragptr.p->nextAddfragrec = addfragptr.i + 1;
- }//for
- addfragptr.i = caddfragrecFileSize - 1;
- ptrAss(addfragptr, addFragRecord);
- addfragptr.p->nextAddfragrec = RNIL;
- cfirstfreeAddfragrec = 0;
- } else {
- jam();
- cfirstfreeAddfragrec = RNIL;
- }//if
-}//Dblqh::initialiseAddfragrec()
-
-/* ========================================================================= */
-/* ====== INITIATE ATTRIBUTE IN AND OUT DATA BUFFER ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initialiseAttrbuf(Signal* signal)
-{
- if (cattrinbufFileSize != 0) {
- for (attrinbufptr.i = 0;
- attrinbufptr.i < cattrinbufFileSize;
- attrinbufptr.i++) {
- refresh_watch_dog();
- ptrAss(attrinbufptr, attrbuf);
- attrinbufptr.p->attrbuf[ZINBUF_NEXT] = attrinbufptr.i + 1;
- }//for
- /* NEXT ATTRINBUF */
- attrinbufptr.i = cattrinbufFileSize - 1;
- ptrAss(attrinbufptr, attrbuf);
- attrinbufptr.p->attrbuf[ZINBUF_NEXT] = RNIL; /* NEXT ATTRINBUF */
- cfirstfreeAttrinbuf = 0;
- } else {
- jam();
- cfirstfreeAttrinbuf = RNIL;
- }//if
-}//Dblqh::initialiseAttrbuf()
-
-/* ========================================================================= */
-/* ====== INITIATE DATA BUFFER ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initialiseDatabuf(Signal* signal)
-{
- if (cdatabufFileSize != 0) {
- for (databufptr.i = 0; databufptr.i < cdatabufFileSize; databufptr.i++) {
- refresh_watch_dog();
- ptrAss(databufptr, databuf);
- databufptr.p->nextDatabuf = databufptr.i + 1;
- }//for
- databufptr.i = cdatabufFileSize - 1;
- ptrAss(databufptr, databuf);
- databufptr.p->nextDatabuf = RNIL;
- cfirstfreeDatabuf = 0;
- } else {
- jam();
- cfirstfreeDatabuf = RNIL;
- }//if
-}//Dblqh::initialiseDatabuf()
-
-/* ========================================================================= */
-/* ====== INITIATE FRAGMENT RECORD ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initialiseFragrec(Signal* signal)
-{
- if (cfragrecFileSize != 0) {
- for (fragptr.i = 0; fragptr.i < cfragrecFileSize; fragptr.i++) {
- refresh_watch_dog();
- ptrAss(fragptr, fragrecord);
- fragptr.p->fragStatus = Fragrecord::FREE;
- fragptr.p->fragActiveStatus = ZFALSE;
- fragptr.p->execSrStatus = Fragrecord::IDLE;
- fragptr.p->srStatus = Fragrecord::SS_IDLE;
- fragptr.p->nextFrag = fragptr.i + 1;
- }//for
- fragptr.i = cfragrecFileSize - 1;
- ptrAss(fragptr, fragrecord);
- fragptr.p->nextFrag = RNIL;
- cfirstfreeFragrec = 0;
- } else {
- jam();
- cfirstfreeFragrec = RNIL;
- }//if
-}//Dblqh::initialiseFragrec()
-
-/* ========================================================================= */
-/* ====== INITIATE FRAGMENT RECORD ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initialiseGcprec(Signal* signal)
-{
- UintR tigpIndex;
-
- if (cgcprecFileSize != 0) {
- for (gcpPtr.i = 0; gcpPtr.i < cgcprecFileSize; gcpPtr.i++) {
- ptrAss(gcpPtr, gcpRecord);
- for (tigpIndex = 0; tigpIndex <= 3; tigpIndex++) {
- gcpPtr.p->gcpLogPartState[tigpIndex] = ZIDLE;
- gcpPtr.p->gcpSyncReady[tigpIndex] = ZFALSE;
- }//for
- }//for
- }//if
-}//Dblqh::initialiseGcprec()
-
-/* ========================================================================= */
-/* ====== INITIATE LCP RECORD ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initialiseLcpRec(Signal* signal)
-{
- if (clcpFileSize != 0) {
- for (lcpPtr.i = 0; lcpPtr.i < clcpFileSize; lcpPtr.i++) {
- ptrAss(lcpPtr, lcpRecord);
- lcpPtr.p->lcpState = LcpRecord::LCP_IDLE;
- lcpPtr.p->lcpQueued = false;
- lcpPtr.p->firstLcpLocAcc = RNIL;
- lcpPtr.p->firstLcpLocTup = RNIL;
- lcpPtr.p->reportEmpty = false;
- lcpPtr.p->lastFragmentFlag = false;
- }//for
- }//if
-}//Dblqh::initialiseLcpRec()
-
-/* ========================================================================= */
-/* ====== INITIATE LCP LOCAL RECORD ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initialiseLcpLocrec(Signal* signal)
-{
- if (clcpLocrecFileSize != 0) {
- for (lcpLocptr.i = 0; lcpLocptr.i < clcpLocrecFileSize; lcpLocptr.i++) {
- ptrAss(lcpLocptr, lcpLocRecord);
- lcpLocptr.p->nextLcpLoc = lcpLocptr.i + 1;
- lcpLocptr.p->lcpLocstate = LcpLocRecord::IDLE;
- lcpLocptr.p->masterLcpRec = RNIL;
- lcpLocptr.p->waitingBlock = LcpLocRecord::NONE;
- }//for
- lcpLocptr.i = clcpLocrecFileSize - 1;
- ptrAss(lcpLocptr, lcpLocRecord);
- lcpLocptr.p->nextLcpLoc = RNIL;
- cfirstfreeLcpLoc = 0;
- } else {
- jam();
- cfirstfreeLcpLoc = RNIL;
- }//if
-}//Dblqh::initialiseLcpLocrec()
-
-/* ========================================================================= */
-/* ====== INITIATE LOG FILE OPERATION RECORD ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initialiseLfo(Signal* signal)
-{
- if (clfoFileSize != 0) {
- for (lfoPtr.i = 0; lfoPtr.i < clfoFileSize; lfoPtr.i++) {
- ptrAss(lfoPtr, logFileOperationRecord);
- lfoPtr.p->lfoState = LogFileOperationRecord::IDLE;
- lfoPtr.p->lfoTimer = 0;
- lfoPtr.p->nextLfo = lfoPtr.i + 1;
- }//for
- lfoPtr.i = clfoFileSize - 1;
- ptrAss(lfoPtr, logFileOperationRecord);
- lfoPtr.p->nextLfo = RNIL;
- cfirstfreeLfo = 0;
- } else {
- jam();
- cfirstfreeLfo = RNIL;
- }//if
-}//Dblqh::initialiseLfo()
-
-/* ========================================================================= */
-/* ====== INITIATE LOG FILE RECORD ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initialiseLogFile(Signal* signal)
-{
- if (clogFileFileSize != 0) {
- for (logFilePtr.i = 0; logFilePtr.i < clogFileFileSize; logFilePtr.i++) {
- ptrAss(logFilePtr, logFileRecord);
- logFilePtr.p->nextLogFile = logFilePtr.i + 1;
- logFilePtr.p->logFileStatus = LogFileRecord::LFS_IDLE;
- }//for
- logFilePtr.i = clogFileFileSize - 1;
- ptrAss(logFilePtr, logFileRecord);
- logFilePtr.p->nextLogFile = RNIL;
- cfirstfreeLogFile = 0;
- } else {
- jam();
- cfirstfreeLogFile = RNIL;
- }//if
-}//Dblqh::initialiseLogFile()
-
-/* ========================================================================= */
-/* ====== INITIATE LOG PAGES ======= */
-/* */
-/* ========================================================================= */
-void Dblqh::initialiseLogPage(Signal* signal)
-{
- if (clogPageFileSize != 0) {
- for (logPagePtr.i = 0; logPagePtr.i < clogPageFileSize; logPagePtr.i++) {
- refresh_watch_dog();
- ptrAss(logPagePtr, logPageRecord);
- logPagePtr.p->logPageWord[ZNEXT_PAGE] = logPagePtr.i + 1;
- }//for
- logPagePtr.i = clogPageFileSize - 1;
- ptrAss(logPagePtr, logPageRecord);
- logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
- cfirstfreeLogPage = 0;
- } else {
- jam();
- cfirstfreeLogPage = RNIL;
- }//if
- cnoOfLogPages = clogPageFileSize;
-}//Dblqh::initialiseLogPage()
-
-/* =========================================================================
- * ====== INITIATE LOG PART RECORD =======
- *
- * ========================================================================= */
-void Dblqh::initialiseLogPart(Signal* signal)
-{
- for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
- ptrAss(logPartPtr, logPartRecord);
- logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_FALSE;
- logPartPtr.p->LogLqhKeyReqSent = ZFALSE;
- logPartPtr.p->logPartNewestCompletedGCI = (UintR)-1;
- }//for
-}//Dblqh::initialiseLogPart()
-
-void Dblqh::initialisePageRef(Signal* signal)
-{
- if (cpageRefFileSize != 0) {
- for (pageRefPtr.i = 0;
- pageRefPtr.i < cpageRefFileSize;
- pageRefPtr.i++) {
- ptrAss(pageRefPtr, pageRefRecord);
- pageRefPtr.p->prNext = pageRefPtr.i + 1;
- }//for
- pageRefPtr.i = cpageRefFileSize - 1;
- ptrAss(pageRefPtr, pageRefRecord);
- pageRefPtr.p->prNext = RNIL;
- cfirstfreePageRef = 0;
- } else {
- jam();
- cfirstfreePageRef = RNIL;
- }//if
-}//Dblqh::initialisePageRef()
-
-/* ==========================================================================
- * ======= INITIATE RECORDS =======
- *
- * TAKES CARE OF INITIATION OF ALL RECORDS IN THIS BLOCK.
- * ========================================================================= */
-void Dblqh::initialiseRecordsLab(Signal* signal, Uint32 data,
- Uint32 retRef, Uint32 retData)
-{
- Uint32 i;
- switch (data) {
- case 0:
- jam();
- for (i = 0; i < MAX_NDB_NODES; i++) {
- cnodeSrState[i] = ZSTART_SR;
- cnodeExecSrState[i] = ZSTART_SR;
- }//for
- for (i = 0; i < 1024; i++) {
- ctransidHash[i] = RNIL;
- }//for
- for (i = 0; i < 4; i++) {
- cactiveCopy[i] = RNIL;
- }//for
- cnoActiveCopy = 0;
- cCounterAccCommitBlocked = 0;
- cCounterTupCommitBlocked = 0;
- caccCommitBlocked = false;
- ctupCommitBlocked = false;
- cCommitBlocked = false;
- ccurrentGcprec = RNIL;
- caddNodeState = ZFALSE;
- cstartRecReq = ZFALSE;
- cnewestGci = (UintR)-1;
- cnewestCompletedGci = (UintR)-1;
- crestartOldestGci = 0;
- crestartNewestGci = 0;
- cfirstWaitFragSr = RNIL;
- cfirstCompletedFragSr = RNIL;
- csrPhaseStarted = ZSR_NO_PHASE_STARTED;
- csrPhasesCompleted = 0;
- cmasterDihBlockref = 0;
- cnoFragmentsExecSr = 0;
- clcpCompletedState = LCP_IDLE;
- csrExecUndoLogState = EULS_IDLE;
- c_lcpId = 0;
- cnoOfFragsCheckpointed = 0;
- break;
- case 1:
- jam();
- initialiseAddfragrec(signal);
- break;
- case 2:
- jam();
- initialiseAttrbuf(signal);
- break;
- case 3:
- jam();
- initialiseDatabuf(signal);
- break;
- case 4:
- jam();
- initialiseFragrec(signal);
- break;
- case 5:
- jam();
- initialiseGcprec(signal);
- initialiseLcpRec(signal);
- initialiseLcpLocrec(signal);
- break;
- case 6:
- jam();
- initialiseLogPage(signal);
- break;
- case 7:
- jam();
- initialiseLfo(signal);
- break;
- case 8:
- jam();
- initialiseLogFile(signal);
- initialiseLogPart(signal);
- break;
- case 9:
- jam();
- initialisePageRef(signal);
- break;
- case 10:
- jam();
- initialiseScanrec(signal);
- break;
- case 11:
- jam();
- initialiseTabrec(signal);
- break;
- case 12:
- jam();
- initialiseTcNodeFailRec(signal);
- initialiseTcrec(signal);
- {
- ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = retData;
- sendSignal(retRef, GSN_READ_CONFIG_CONF, signal,
- ReadConfigConf::SignalLength, JBB);
- }
- return;
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
-
- signal->theData[0] = ZINITIALISE_RECORDS;
- signal->theData[1] = data + 1;
- signal->theData[2] = 0;
- signal->theData[3] = retRef;
- signal->theData[4] = retData;
- sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 5, JBB);
-
- return;
-}//Dblqh::initialiseRecordsLab()
-
-/* ==========================================================================
- * ======= INITIATE TC CONNECTION RECORD =======
- *
- * ========================================================================= */
-void Dblqh::initialiseScanrec(Signal* signal)
-{
- ndbrequire(cscanrecFileSize > 1);
- DLList<ScanRecord> tmp(c_scanRecordPool);
- while (tmp.seize(scanptr)){
- //new (scanptr.p) ScanRecord();
- refresh_watch_dog();
- scanptr.p->scanType = ScanRecord::ST_IDLE;
- scanptr.p->scanState = ScanRecord::SCAN_FREE;
- scanptr.p->scanTcWaiting = ZFALSE;
- scanptr.p->nextHash = RNIL;
- scanptr.p->prevHash = RNIL;
- scanptr.p->scan_acc_index= 0;
- scanptr.p->scan_acc_attr_recs= 0;
- }
- tmp.release();
-}//Dblqh::initialiseScanrec()
-
-/* ==========================================================================
- * ======= INITIATE TABLE RECORD =======
- *
- * ========================================================================= */
-void Dblqh::initialiseTabrec(Signal* signal)
-{
- if (ctabrecFileSize != 0) {
- for (tabptr.i = 0; tabptr.i < ctabrecFileSize; tabptr.i++) {
- refresh_watch_dog();
- ptrAss(tabptr, tablerec);
- tabptr.p->tableStatus = Tablerec::NOT_DEFINED;
- tabptr.p->usageCount = 0;
- for (Uint32 i = 0; i <= (MAX_FRAG_PER_NODE - 1); i++) {
- tabptr.p->fragid[i] = ZNIL;
- tabptr.p->fragrec[i] = RNIL;
- }//for
- }//for
- }//if
-}//Dblqh::initialiseTabrec()
-
-/* ==========================================================================
- * ======= INITIATE TC CONNECTION RECORD =======
- *
- * ========================================================================= */
-void Dblqh::initialiseTcrec(Signal* signal)
-{
- if (ctcConnectrecFileSize != 0) {
- for (tcConnectptr.i = 0;
- tcConnectptr.i < ctcConnectrecFileSize;
- tcConnectptr.i++) {
- refresh_watch_dog();
- ptrAss(tcConnectptr, tcConnectionrec);
- tcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED;
- tcConnectptr.p->tcScanRec = RNIL;
- tcConnectptr.p->logWriteState = TcConnectionrec::NOT_STARTED;
- tcConnectptr.p->firstAttrinbuf = RNIL;
- tcConnectptr.p->lastAttrinbuf = RNIL;
- tcConnectptr.p->firstTupkeybuf = RNIL;
- tcConnectptr.p->lastTupkeybuf = RNIL;
- tcConnectptr.p->tcTimer = 0;
- tcConnectptr.p->nextTcConnectrec = tcConnectptr.i + 1;
- }//for
- tcConnectptr.i = ctcConnectrecFileSize - 1;
- ptrAss(tcConnectptr, tcConnectionrec);
- tcConnectptr.p->nextTcConnectrec = RNIL;
- cfirstfreeTcConrec = 0;
- } else {
- jam();
- cfirstfreeTcConrec = RNIL;
- }//if
-}//Dblqh::initialiseTcrec()
-
-/* ==========================================================================
- * ======= INITIATE TC CONNECTION RECORD =======
- *
- * ========================================================================= */
-void Dblqh::initialiseTcNodeFailRec(Signal* signal)
-{
- if (ctcNodeFailrecFileSize != 0) {
- for (tcNodeFailptr.i = 0;
- tcNodeFailptr.i < ctcNodeFailrecFileSize;
- tcNodeFailptr.i++) {
- ptrAss(tcNodeFailptr, tcNodeFailRecord);
- tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_FALSE;
- }//for
- }//if
-}//Dblqh::initialiseTcNodeFailRec()
-
-/* ==========================================================================
- * ======= INITIATE FRAGMENT RECORD =======
- *
- * SUBROUTINE SHORT NAME = IF
- * ========================================================================= */
-void Dblqh::initFragrec(Signal* signal,
- Uint32 tableId,
- Uint32 fragId,
- Uint32 copyType)
-{
- new (fragptr.p) Fragrecord();
- fragptr.p->m_scanNumberMask.set(); // All is free
- fragptr.p->accBlockref = caccBlockref;
- fragptr.p->accBlockedList = RNIL;
- fragptr.p->activeList = RNIL;
- fragptr.p->firstWaitQueue = RNIL;
- fragptr.p->lastWaitQueue = RNIL;
- fragptr.p->fragStatus = Fragrecord::DEFINED;
- fragptr.p->fragCopy = copyType;
- fragptr.p->tupBlockref = ctupBlockref;
- fragptr.p->tuxBlockref = ctuxBlockref;
- fragptr.p->lcpRef = RNIL;
- fragptr.p->logFlag = Fragrecord::STATE_TRUE;
- fragptr.p->lcpFlag = Fragrecord::LCP_STATE_TRUE;
- for (Uint32 i = 0; i < MAX_LCP_STORED; i++) {
- fragptr.p->lcpId[i] = 0;
- }//for
- fragptr.p->maxGciCompletedInLcp = 0;
- fragptr.p->maxGciInLcp = 0;
- fragptr.p->copyFragState = ZIDLE;
- fragptr.p->nextFrag = RNIL;
- fragptr.p->newestGci = cnewestGci;
- fragptr.p->nextLcp = 0;
- fragptr.p->tabRef = tableId;
- fragptr.p->fragId = fragId;
- fragptr.p->srStatus = Fragrecord::SS_IDLE;
- fragptr.p->execSrStatus = Fragrecord::IDLE;
- fragptr.p->execSrNoReplicas = 0;
- fragptr.p->fragDistributionKey = 0;
- fragptr.p->activeTcCounter = 0;
- fragptr.p->tableFragptr = RNIL;
-}//Dblqh::initFragrec()
-
-/* ==========================================================================
- * ======= INITIATE FRAGMENT RECORD FOR SYSTEM RESTART =======
- *
- * SUBROUTINE SHORT NAME = IFS
- * ========================================================================= */
-void Dblqh::initFragrecSr(Signal* signal)
-{
- const StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0];
- Uint32 lcpNo = startFragReq->lcpNo;
- Uint32 noOfLogNodes = startFragReq->noOfLogNodes;
- ndbrequire(noOfLogNodes <= 4);
- fragptr.p->fragStatus = Fragrecord::CRASH_RECOVERING;
- fragptr.p->srBlockref = startFragReq->userRef;
- fragptr.p->srUserptr = startFragReq->userPtr;
- fragptr.p->srChkpnr = lcpNo;
- if (lcpNo == (MAX_LCP_STORED - 1)) {
- jam();
- fragptr.p->lcpId[lcpNo] = startFragReq->lcpId;
- fragptr.p->nextLcp = 0;
- } else if (lcpNo < (MAX_LCP_STORED - 1)) {
- jam();
- fragptr.p->lcpId[lcpNo] = startFragReq->lcpId;
- fragptr.p->nextLcp = lcpNo + 1;
- } else {
- ndbrequire(lcpNo == ZNIL);
- jam();
- fragptr.p->nextLcp = 0;
- }//if
- fragptr.p->srNoLognodes = noOfLogNodes;
- fragptr.p->logFlag = Fragrecord::STATE_FALSE;
- fragptr.p->srStatus = Fragrecord::SS_IDLE;
- if (noOfLogNodes > 0) {
- jam();
- for (Uint32 i = 0; i < noOfLogNodes; i++) {
- jam();
- fragptr.p->srStartGci[i] = startFragReq->startGci[i];
- fragptr.p->srLastGci[i] = startFragReq->lastGci[i];
- fragptr.p->srLqhLognode[i] = startFragReq->lqhLogNode[i];
- }//for
- fragptr.p->newestGci = startFragReq->lastGci[noOfLogNodes - 1];
- } else {
- fragptr.p->newestGci = cnewestGci;
- }//if
-}//Dblqh::initFragrecSr()
-
-/* ==========================================================================
- * ======= INITIATE INFORMATION ABOUT GLOBAL CHECKPOINTS =======
- * IN LOG FILE RECORDS
- *
- * INPUT: LOG_FILE_PTR CURRENT LOG FILE
- * TNO_FD_DESCRIPTORS THE NUMBER OF FILE DESCRIPTORS
- * TO READ FROM THE LOG PAGE
- * LOG_PAGE_PTR PAGE ZERO IN LOG FILE
- * SUBROUTINE SHORT NAME = IGL
- * ========================================================================= */
-void Dblqh::initGciInLogFileRec(Signal* signal, Uint32 noFdDescriptors)
-{
- LogFileRecordPtr iglLogFilePtr;
- UintR tiglLoop;
- UintR tiglIndex;
-
- tiglLoop = 0;
- iglLogFilePtr.i = logFilePtr.i;
- iglLogFilePtr.p = logFilePtr.p;
-IGL_LOOP:
- for (tiglIndex = 0; tiglIndex <= ZNO_MBYTES_IN_FILE - 1; tiglIndex++) {
- arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE);
- iglLogFilePtr.p->logMaxGciCompleted[tiglIndex] =
- logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
- arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + ZNO_MBYTES_IN_FILE) +
- (tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE);
- iglLogFilePtr.p->logMaxGciStarted[tiglIndex] =
- logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- ZNO_MBYTES_IN_FILE) +
- (tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
- arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (2 * ZNO_MBYTES_IN_FILE)) + (tiglLoop * ZFD_PART_SIZE)) +
- tiglIndex, ZPAGE_SIZE);
- iglLogFilePtr.p->logLastPrepRef[tiglIndex] =
- logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (2 * ZNO_MBYTES_IN_FILE)) +
- (tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
- }//for
- tiglLoop = tiglLoop + 1;
- if (tiglLoop < noFdDescriptors) {
- jam();
- iglLogFilePtr.i = iglLogFilePtr.p->prevLogFile;
- ptrCheckGuard(iglLogFilePtr, clogFileFileSize, logFileRecord);
- goto IGL_LOOP;
- }//if
-}//Dblqh::initGciInLogFileRec()
-
-/* ==========================================================================
- * ======= INITIATE LCP RECORD WHEN USED FOR SYSTEM RESTART =======
- *
- * SUBROUTINE SHORT NAME = ILS
- * ========================================================================= */
-void Dblqh::initLcpSr(Signal* signal,
- Uint32 lcpNo,
- Uint32 lcpId,
- Uint32 tableId,
- Uint32 fragId,
- Uint32 fragPtr)
-{
- lcpPtr.p->lcpQueued = false;
- lcpPtr.p->currentFragment.fragPtrI = fragPtr;
- lcpPtr.p->currentFragment.lcpFragOrd.lcpNo = lcpNo;
- lcpPtr.p->currentFragment.lcpFragOrd.lcpId = lcpId;
- lcpPtr.p->currentFragment.lcpFragOrd.tableId = tableId;
- lcpPtr.p->currentFragment.lcpFragOrd.fragmentId = fragId;
- lcpPtr.p->lcpState = LcpRecord::LCP_SR_WAIT_FRAGID;
- lcpPtr.p->firstLcpLocAcc = RNIL;
- lcpPtr.p->firstLcpLocTup = RNIL;
- lcpPtr.p->lcpAccptr = RNIL;
-}//Dblqh::initLcpSr()
-
-/* ==========================================================================
- * ======= INITIATE LOG PART =======
- *
- * ========================================================================= */
-void Dblqh::initLogpart(Signal* signal)
-{
- logPartPtr.p->execSrLogPage = RNIL;
- logPartPtr.p->execSrLogPageIndex = ZNIL;
- logPartPtr.p->execSrExecuteIndex = 0;
- logPartPtr.p->noLogFiles = cnoLogFiles;
- logPartPtr.p->logLap = 0;
- logPartPtr.p->logTailFileNo = 0;
- logPartPtr.p->logTailMbyte = 0;
- logPartPtr.p->lastMbyte = ZNIL;
- logPartPtr.p->logPartState = LogPartRecord::SR_FIRST_PHASE;
- logPartPtr.p->logExecState = LogPartRecord::LES_IDLE;
- logPartPtr.p->firstLogTcrec = RNIL;
- logPartPtr.p->lastLogTcrec = RNIL;
- logPartPtr.p->firstLogQueue = RNIL;
- logPartPtr.p->lastLogQueue = RNIL;
- logPartPtr.p->gcprec = RNIL;
- logPartPtr.p->firstPageRef = RNIL;
- logPartPtr.p->lastPageRef = RNIL;
- logPartPtr.p->headFileNo = ZNIL;
- logPartPtr.p->headPageNo = ZNIL;
- logPartPtr.p->headPageIndex = ZNIL;
-}//Dblqh::initLogpart()
-
-/* ==========================================================================
- * ======= INITIATE LOG POINTERS =======
- *
- * ========================================================================= */
-void Dblqh::initLogPointers(Signal* signal)
-{
- logPartPtr.i = tcConnectptr.p->hashValue & 3;
- ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
- logFilePtr.i = logPartPtr.p->currentLogfile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logPagePtr.i = logFilePtr.p->currentLogpage;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
-}//Dblqh::initLogPointers()
-
-/* ------------------------------------------------------------------------- */
-/* ------- INIT REQUEST INFO BEFORE EXECUTING A LOG RECORD ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::initReqinfoExecSr(Signal* signal)
-{
- UintR Treqinfo = 0;
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- LqhKeyReq::setKeyLen(Treqinfo, regTcPtr->primKeyLen);
-/* ------------------------------------------------------------------------- */
-/* NUMBER OF BACKUPS AND STANDBYS ARE ZERO AND NEED NOT BE SET. */
-/* REPLICA TYPE IS CLEARED BY SEND_LQHKEYREQ. */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* SET LAST REPLICA NUMBER TO ZERO (BIT 10-11) */
-/* ------------------------------------------------------------------------- */
-/* ------------------------------------------------------------------------- */
-/* SET DIRTY FLAG */
-/* ------------------------------------------------------------------------- */
- LqhKeyReq::setDirtyFlag(Treqinfo, 1);
-/* ------------------------------------------------------------------------- */
-/* SET SIMPLE TRANSACTION */
-/* ------------------------------------------------------------------------- */
- LqhKeyReq::setSimpleFlag(Treqinfo, 1);
-/* ------------------------------------------------------------------------- */
-/* SET OPERATION TYPE AND LOCK MODE (NEVER READ OPERATION OR SCAN IN LOG) */
-/* ------------------------------------------------------------------------- */
- LqhKeyReq::setLockType(Treqinfo, regTcPtr->operation);
- LqhKeyReq::setOperation(Treqinfo, regTcPtr->operation);
- regTcPtr->reqinfo = Treqinfo;
-/* ------------------------------------------------------------------------ */
-/* NO OF BACKUP IS SET TO ONE AND NUMBER OF STANDBY NODES IS SET TO ZERO. */
-/* THUS THE RECEIVING NODE WILL EXPECT THAT IT IS THE LAST NODE AND WILL */
-/* SEND COMPLETED AS THE RESPONSE SIGNAL SINCE DIRTY_OP BIT IS SET. */
-/* ------------------------------------------------------------------------ */
-/* ------------------------------------------------------------------------- */
-/* SET REPLICA TYPE TO PRIMARY AND NUMBER OF REPLICA TO ONE */
-/* ------------------------------------------------------------------------- */
- regTcPtr->lastReplicaNo = 0;
- regTcPtr->apiVersionNo = 0;
- regTcPtr->nextSeqNoReplica = 0;
- regTcPtr->opExec = 0;
- regTcPtr->storedProcId = ZNIL;
- regTcPtr->readlenAi = 0;
- regTcPtr->nodeAfterNext[0] = ZNIL;
- regTcPtr->nodeAfterNext[1] = ZNIL;
- regTcPtr->dirtyOp = ZFALSE;
- regTcPtr->tcBlockref = cownref;
-}//Dblqh::initReqinfoExecSr()
-
-/* --------------------------------------------------------------------------
- * ------- INSERT FRAGMENT -------
- *
- * ------------------------------------------------------------------------- */
-bool Dblqh::insertFragrec(Signal* signal, Uint32 fragId)
-{
- terrorCode = ZOK;
- if (cfirstfreeFragrec == RNIL) {
- jam();
- terrorCode = ZNO_FREE_FRAGMENTREC;
- return false;
- }//if
- seizeFragmentrec(signal);
- for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
- jam();
- if (tabptr.p->fragid[i] == ZNIL) {
- jam();
- tabptr.p->fragid[i] = fragId;
- tabptr.p->fragrec[i] = fragptr.i;
- return true;
- }//if
- }//for
- terrorCode = ZTOO_MANY_FRAGMENTS;
- return false;
-}//Dblqh::insertFragrec()
-
-/* --------------------------------------------------------------------------
- * ------- LINK OPERATION IN ACTIVE LIST ON FRAGMENT -------
- *
- * SUBROUTINE SHORT NAME: LFQ
-// Input Pointers:
-// tcConnectptr
-// fragptr
-* ------------------------------------------------------------------------- */
-void Dblqh::linkFragQueue(Signal* signal)
-{
- TcConnectionrecPtr lfqTcConnectptr;
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- Fragrecord * const regFragPtr = fragptr.p;
- Uint32 tcIndex = tcConnectptr.i;
-
- lfqTcConnectptr.i = regFragPtr->lastWaitQueue;
- regTcPtr->nextTc = RNIL;
- regFragPtr->lastWaitQueue = tcIndex;
- regTcPtr->prevTc = lfqTcConnectptr.i;
- ndbrequire(regTcPtr->listState == TcConnectionrec::NOT_IN_LIST);
- regTcPtr->listState = TcConnectionrec::WAIT_QUEUE_LIST;
- if (lfqTcConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(lfqTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- lfqTcConnectptr.p->nextTc = tcIndex;
- } else {
- regFragPtr->firstWaitQueue = tcIndex;
- }//if
- return;
-}//Dblqh::linkFragQueue()
-
-/* -------------------------------------------------------------------------
- * ------- LINK OPERATION INTO WAITING FOR LOGGING -------
- *
- * SUBROUTINE SHORT NAME = LWL
-// Input Pointers:
-// tcConnectptr
-// logPartPtr
- * ------------------------------------------------------------------------- */
-void Dblqh::linkWaitLog(Signal* signal, LogPartRecordPtr regLogPartPtr)
-{
- TcConnectionrecPtr lwlTcConnectptr;
-
-/* -------------------------------------------------- */
-/* LINK ACTIVE OPERATION INTO QUEUE WAITING FOR */
-/* ACCESS TO THE LOG PART. */
-/* -------------------------------------------------- */
- lwlTcConnectptr.i = regLogPartPtr.p->lastLogQueue;
- if (lwlTcConnectptr.i == RNIL) {
- jam();
- regLogPartPtr.p->firstLogQueue = tcConnectptr.i;
- } else {
- jam();
- ptrCheckGuard(lwlTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- lwlTcConnectptr.p->nextTcLogQueue = tcConnectptr.i;
- }//if
- regLogPartPtr.p->lastLogQueue = tcConnectptr.i;
- tcConnectptr.p->nextTcLogQueue = RNIL;
- if (regLogPartPtr.p->LogLqhKeyReqSent == ZFALSE) {
- jam();
- regLogPartPtr.p->LogLqhKeyReqSent = ZTRUE;
- signal->theData[0] = ZLOG_LQHKEYREQ;
- signal->theData[1] = regLogPartPtr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- }//if
-}//Dblqh::linkWaitLog()
-
-/* --------------------------------------------------------------------------
- * ------- START THE NEXT OPERATION ON THIS LOG PART IF ANY -------
- * ------- OPERATIONS ARE QUEUED. -------
- *
- * SUBROUTINE SHORT NAME = LNS
-// Input Pointers:
-// tcConnectptr
-// logPartPtr
- * ------------------------------------------------------------------------- */
-void Dblqh::logNextStart(Signal* signal)
-{
- LogPartRecordPtr lnsLogPartPtr;
- UintR tlnsStillWaiting;
- LogPartRecord * const regLogPartPtr = logPartPtr.p;
-
- if ((regLogPartPtr->firstLogQueue == RNIL) &&
- (regLogPartPtr->logPartState == LogPartRecord::ACTIVE) &&
- (regLogPartPtr->waitWriteGciLog != LogPartRecord::WWGL_TRUE)) {
-// --------------------------------------------------------------------------
-// Optimised route for the common case
-// --------------------------------------------------------------------------
- regLogPartPtr->logPartState = LogPartRecord::IDLE;
- return;
- }//if
- if (regLogPartPtr->firstLogQueue != RNIL) {
- jam();
- if (regLogPartPtr->LogLqhKeyReqSent == ZFALSE) {
- jam();
- regLogPartPtr->LogLqhKeyReqSent = ZTRUE;
- signal->theData[0] = ZLOG_LQHKEYREQ;
- signal->theData[1] = logPartPtr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- }//if
- } else {
- if (regLogPartPtr->logPartState == LogPartRecord::ACTIVE) {
- jam();
- regLogPartPtr->logPartState = LogPartRecord::IDLE;
- } else {
- jam();
- }//if
- }//if
- if (regLogPartPtr->waitWriteGciLog != LogPartRecord::WWGL_TRUE) {
- jam();
- return;
- } else {
- jam();
-/* --------------------------------------------------------------------------
- * A COMPLETE GCI LOG RECORD IS WAITING TO BE WRITTEN. WE GIVE THIS HIGHEST
- * PRIORITY AND WRITE IT IMMEDIATELY. AFTER WRITING IT WE CHECK IF ANY MORE
- * LOG PARTS ARE WAITING. IF NOT WE SEND A SIGNAL THAT INITIALISES THE GCP
- * RECORD TO WAIT UNTIL ALL COMPLETE GCI LOG RECORDS HAVE REACHED TO DISK.
- * -------------------------------------------------------------------------- */
- writeCompletedGciLog(signal);
- logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_FALSE;
- tlnsStillWaiting = ZFALSE;
- for (lnsLogPartPtr.i = 0; lnsLogPartPtr.i < 4; lnsLogPartPtr.i++) {
- jam();
- ptrAss(lnsLogPartPtr, logPartRecord);
- if (lnsLogPartPtr.p->waitWriteGciLog == LogPartRecord::WWGL_TRUE) {
- jam();
- tlnsStillWaiting = ZTRUE;
- }//if
- }//for
- if (tlnsStillWaiting == ZFALSE) {
- jam();
- signal->theData[0] = ZINIT_GCP_REC;
- sendSignal(cownref, GSN_CONTINUEB, signal, 1, JBB);
- }//if
- }//if
-}//Dblqh::logNextStart()
-
-/* --------------------------------------------------------------------------
- * ------- MOVE PAGES FROM LFO RECORD TO PAGE REFERENCE RECORD -------
- * WILL ALWAYS MOVE 8 PAGES TO A PAGE REFERENCE RECORD.
- *
- * SUBROUTINE SHORT NAME = MPR
- * ------------------------------------------------------------------------- */
-void Dblqh::moveToPageRef(Signal* signal)
-{
- LogPageRecordPtr mprLogPagePtr;
- PageRefRecordPtr mprPageRefPtr;
- UintR tmprIndex;
-
-/* --------------------------------------------------------------------------
- * ------- INSERT PAGE REFERENCE RECORD -------
- *
- * INPUT: LFO_PTR LOG FILE OPERATION RECORD
- * LOG_PART_PTR LOG PART RECORD
- * PAGE_REF_PTR THE PAGE REFERENCE RECORD TO BE INSERTED.
- * ------------------------------------------------------------------------- */
- PageRefRecordPtr iprPageRefPtr;
-
- if ((logPartPtr.p->mmBufferSize + 8) >= ZMAX_MM_BUFFER_SIZE) {
- jam();
- pageRefPtr.i = logPartPtr.p->firstPageRef;
- ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
- releasePrPages(signal);
- removePageRef(signal);
- } else {
- jam();
- logPartPtr.p->mmBufferSize = logPartPtr.p->mmBufferSize + 8;
- }//if
- seizePageRef(signal);
- if (logPartPtr.p->firstPageRef == RNIL) {
- jam();
- logPartPtr.p->firstPageRef = pageRefPtr.i;
- } else {
- jam();
- iprPageRefPtr.i = logPartPtr.p->lastPageRef;
- ptrCheckGuard(iprPageRefPtr, cpageRefFileSize, pageRefRecord);
- iprPageRefPtr.p->prNext = pageRefPtr.i;
- }//if
- pageRefPtr.p->prPrev = logPartPtr.p->lastPageRef;
- logPartPtr.p->lastPageRef = pageRefPtr.i;
-
- pageRefPtr.p->prFileNo = logFilePtr.p->fileNo;
- pageRefPtr.p->prPageNo = lfoPtr.p->lfoPageNo;
- tmprIndex = 0;
- mprLogPagePtr.i = lfoPtr.p->firstLfoPage;
-MPR_LOOP:
- arrGuard(tmprIndex, 8);
- pageRefPtr.p->pageRef[tmprIndex] = mprLogPagePtr.i;
- tmprIndex = tmprIndex + 1;
- ptrCheckGuard(mprLogPagePtr, clogPageFileSize, logPageRecord);
- mprLogPagePtr.i = mprLogPagePtr.p->logPageWord[ZNEXT_PAGE];
- if (mprLogPagePtr.i != RNIL) {
- jam();
- goto MPR_LOOP;
- }//if
- mprPageRefPtr.i = pageRefPtr.p->prPrev;
- if (mprPageRefPtr.i != RNIL) {
- jam();
- ptrCheckGuard(mprPageRefPtr, cpageRefFileSize, pageRefRecord);
- mprLogPagePtr.i = mprPageRefPtr.p->pageRef[7];
- ptrCheckGuard(mprLogPagePtr, clogPageFileSize, logPageRecord);
- mprLogPagePtr.p->logPageWord[ZNEXT_PAGE] = pageRefPtr.p->pageRef[0];
- }//if
-}//Dblqh::moveToPageRef()
-
-/* ------------------------------------------------------------------------- */
-/* ------- READ THE ATTRINFO FROM THE LOG ------- */
-/* */
-/* SUBROUTINE SHORT NAME = RA */
-/* ------------------------------------------------------------------------- */
-void Dblqh::readAttrinfo(Signal* signal)
-{
- Uint32 remainingLen = tcConnectptr.p->totSendlenAi;
- if (remainingLen == 0) {
- jam();
- tcConnectptr.p->reclenAiLqhkey = 0;
- return;
- }//if
- Uint32 dataLen = remainingLen;
- if (remainingLen > 5)
- dataLen = 5;
- readLogData(signal, dataLen, &tcConnectptr.p->firstAttrinfo[0]);
- tcConnectptr.p->reclenAiLqhkey = dataLen;
- remainingLen -= dataLen;
- while (remainingLen > 0) {
- jam();
- dataLen = remainingLen;
- if (remainingLen > 22)
- dataLen = 22;
- seizeAttrinbuf(signal);
- readLogData(signal, dataLen, &attrinbufptr.p->attrbuf[0]);
- attrinbufptr.p->attrbuf[ZINBUF_DATA_LEN] = dataLen;
- remainingLen -= dataLen;
- }//while
-}//Dblqh::readAttrinfo()
-
-/* ------------------------------------------------------------------------- */
-/* ------- READ COMMIT LOG ------- */
-/* */
-/* SUBROUTINE SHORT NAME = RCL */
-/* ------------------------------------------------------------------------- */
-void Dblqh::readCommitLog(Signal* signal, CommitLogRecord* commitLogRecord)
-{
- Uint32 trclPageIndex = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- if ((trclPageIndex + (ZCOMMIT_LOG_SIZE - 1)) < ZPAGE_SIZE) {
- jam();
- tcConnectptr.p->tableref = logPagePtr.p->logPageWord[trclPageIndex + 0];
- tcConnectptr.p->schemaVersion = logPagePtr.p->logPageWord[trclPageIndex + 1];
- tcConnectptr.p->fragmentid = logPagePtr.p->logPageWord[trclPageIndex + 2];
- commitLogRecord->fileNo = logPagePtr.p->logPageWord[trclPageIndex + 3];
- commitLogRecord->startPageNo = logPagePtr.p->logPageWord[trclPageIndex + 4];
- commitLogRecord->startPageIndex = logPagePtr.p->logPageWord[trclPageIndex + 5];
- commitLogRecord->stopPageNo = logPagePtr.p->logPageWord[trclPageIndex + 6];
- tcConnectptr.p->gci = logPagePtr.p->logPageWord[trclPageIndex + 7];
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
- (trclPageIndex + ZCOMMIT_LOG_SIZE) - 1;
- } else {
- jam();
- tcConnectptr.p->tableref = readLogword(signal);
- tcConnectptr.p->schemaVersion = readLogword(signal);
- tcConnectptr.p->fragmentid = readLogword(signal);
- commitLogRecord->fileNo = readLogword(signal);
- commitLogRecord->startPageNo = readLogword(signal);
- commitLogRecord->startPageIndex = readLogword(signal);
- commitLogRecord->stopPageNo = readLogword(signal);
- tcConnectptr.p->gci = readLogword(signal);
- }//if
- tcConnectptr.p->transid[0] = logPartPtr.i + 65536;
- tcConnectptr.p->transid[1] = (DBLQH << 20) + (cownNodeid << 8);
-}//Dblqh::readCommitLog()
-
-/* ------------------------------------------------------------------------- */
-/* ------- READ LOG PAGES FROM DISK IN ORDER TO EXECUTE A LOG ------- */
-/* RECORD WHICH WAS NOT FOUND IN MAIN MEMORY. */
-/* */
-/* SUBROUTINE SHORT NAME = REL */
-/* ------------------------------------------------------------------------- */
-void Dblqh::readExecLog(Signal* signal)
-{
- UintR trelIndex;
- UintR trelI;
-
- seizeLfo(signal);
- initLfo(signal);
- trelI = logPartPtr.p->execSrStopPageNo - logPartPtr.p->execSrStartPageNo;
- arrGuard(trelI + 1, 16);
- lfoPtr.p->logPageArray[trelI + 1] = logPartPtr.p->execSrStartPageNo;
- for (trelIndex = logPartPtr.p->execSrStopPageNo; (trelIndex >= logPartPtr.p->execSrStartPageNo) &&
- (UintR)~trelIndex; trelIndex--) {
- jam();
- seizeLogpage(signal);
- arrGuard(trelI, 16);
- lfoPtr.p->logPageArray[trelI] = logPagePtr.i;
- trelI--;
- }//for
- lfoPtr.p->lfoPageNo = logPartPtr.p->execSrStartPageNo;
- lfoPtr.p->noPagesRw = (logPartPtr.p->execSrStopPageNo -
- logPartPtr.p->execSrStartPageNo) + 1;
- lfoPtr.p->firstLfoPage = lfoPtr.p->logPageArray[0];
- signal->theData[0] = logFilePtr.p->fileRef;
- signal->theData[1] = cownref;
- signal->theData[2] = lfoPtr.i;
- signal->theData[3] = ZLIST_OF_MEM_PAGES; // edtjamo TR509 //ZLIST_OF_PAIRS;
- signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
- signal->theData[5] = lfoPtr.p->noPagesRw;
- signal->theData[6] = lfoPtr.p->logPageArray[0];
- signal->theData[7] = lfoPtr.p->logPageArray[1];
- signal->theData[8] = lfoPtr.p->logPageArray[2];
- signal->theData[9] = lfoPtr.p->logPageArray[3];
- signal->theData[10] = lfoPtr.p->logPageArray[4];
- signal->theData[11] = lfoPtr.p->logPageArray[5];
- signal->theData[12] = lfoPtr.p->logPageArray[6];
- signal->theData[13] = lfoPtr.p->logPageArray[7];
- signal->theData[14] = lfoPtr.p->logPageArray[8];
- signal->theData[15] = lfoPtr.p->logPageArray[9];
- sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 16, JBA);
-}//Dblqh::readExecLog()
-
-/* ------------------------------------------------------------------------- */
-/* ------- READ 64 KBYTES WHEN EXECUTING THE FRAGMENT LOG ------- */
-/* */
-/* SUBROUTINE SHORT NAME = RES */
-/* ------------------------------------------------------------------------- */
-void Dblqh::readExecSrNewMbyte(Signal* signal)
-{
- logFilePtr.p->currentFilepage = logFilePtr.p->currentMbyte * ZPAGES_IN_MBYTE;
- logFilePtr.p->filePosition = logFilePtr.p->currentMbyte * ZPAGES_IN_MBYTE;
- logPartPtr.p->execSrPagesRead = 0;
- logPartPtr.p->execSrPagesReading = 0;
- logPartPtr.p->execSrPagesExecuted = 0;
- readExecSr(signal);
- logPartPtr.p->logExecState = LogPartRecord::LES_WAIT_READ_EXEC_SR_NEW_MBYTE;
-}//Dblqh::readExecSrNewMbyte()
-
-/* ------------------------------------------------------------------------- */
-/* ------- READ 64 KBYTES WHEN EXECUTING THE FRAGMENT LOG ------- */
-/* */
-/* SUBROUTINE SHORT NAME = RES */
-/* ------------------------------------------------------------------------- */
-void Dblqh::readExecSr(Signal* signal)
-{
- UintR tresPageid;
- UintR tresIndex;
-
- tresPageid = logFilePtr.p->filePosition;
- seizeLfo(signal);
- initLfo(signal);
- for (tresIndex = 7; (UintR)~tresIndex; tresIndex--) {
- jam();
-/* ------------------------------------------------------------------------- */
-/* GO BACKWARDS SINCE WE INSERT AT THE BEGINNING AND WE WANT THAT FIRST PAGE */
-/* SHALL BE FIRST AND LAST PAGE LAST. */
-/* ------------------------------------------------------------------------- */
- seizeLogpage(signal);
- lfoPtr.p->logPageArray[tresIndex] = logPagePtr.i;
- }//for
- lfoPtr.p->lfoState = LogFileOperationRecord::READ_EXEC_SR;
- lfoPtr.p->lfoPageNo = tresPageid;
- logFilePtr.p->filePosition = logFilePtr.p->filePosition + 8;
- logPartPtr.p->execSrPagesReading = logPartPtr.p->execSrPagesReading + 8;
- lfoPtr.p->noPagesRw = 8;
- lfoPtr.p->firstLfoPage = lfoPtr.p->logPageArray[0];
- signal->theData[0] = logFilePtr.p->fileRef;
- signal->theData[1] = cownref;
- signal->theData[2] = lfoPtr.i;
- signal->theData[3] = ZLIST_OF_MEM_PAGES;
- signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
- signal->theData[5] = 8;
- signal->theData[6] = lfoPtr.p->logPageArray[0];
- signal->theData[7] = lfoPtr.p->logPageArray[1];
- signal->theData[8] = lfoPtr.p->logPageArray[2];
- signal->theData[9] = lfoPtr.p->logPageArray[3];
- signal->theData[10] = lfoPtr.p->logPageArray[4];
- signal->theData[11] = lfoPtr.p->logPageArray[5];
- signal->theData[12] = lfoPtr.p->logPageArray[6];
- signal->theData[13] = lfoPtr.p->logPageArray[7];
- signal->theData[14] = tresPageid;
- sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 15, JBA);
-}//Dblqh::readExecSr()
-
-/* ------------------------------------------------------------------------- */
-/* ------------ READ THE PRIMARY KEY FROM THE LOG ---------------- */
-/* */
-/* SUBROUTINE SHORT NAME = RK */
-/* --------------------------------------------------------------------------*/
-void Dblqh::readKey(Signal* signal)
-{
- Uint32 remainingLen = tcConnectptr.p->primKeyLen;
- ndbrequire(remainingLen != 0);
- Uint32 dataLen = remainingLen;
- if (remainingLen > 4)
- dataLen = 4;
- readLogData(signal, dataLen, &tcConnectptr.p->tupkeyData[0]);
- remainingLen -= dataLen;
- while (remainingLen > 0) {
- jam();
- seizeTupkeybuf(signal);
- dataLen = remainingLen;
- if (dataLen > 4)
- dataLen = 4;
- readLogData(signal, dataLen, &databufptr.p->data[0]);
- remainingLen -= dataLen;
- }//while
-}//Dblqh::readKey()
-
-/* ------------------------------------------------------------------------- */
-/* ------------ READ A NUMBER OF WORDS FROM LOG INTO CDATA ---------------- */
-/* */
-/* SUBROUTINE SHORT NAME = RLD */
-/* --------------------------------------------------------------------------*/
-void Dblqh::readLogData(Signal* signal, Uint32 noOfWords, Uint32* dataPtr)
-{
- ndbrequire(noOfWords < 32);
- Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- if ((logPos + noOfWords) >= ZPAGE_SIZE) {
- for (Uint32 i = 0; i < noOfWords; i++)
- dataPtr[i] = readLogwordExec(signal);
- } else {
- MEMCOPY_NO_WORDS(dataPtr, &logPagePtr.p->logPageWord[logPos], noOfWords);
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + noOfWords;
- }//if
-}//Dblqh::readLogData()
-
-/* ------------------------------------------------------------------------- */
-/* ------------ READ THE LOG HEADER OF A PREPARE LOG HEADER ---------------- */
-/* */
-/* SUBROUTINE SHORT NAME = RLH */
-/* --------------------------------------------------------------------------*/
-void Dblqh::readLogHeader(Signal* signal)
-{
- Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- if ((logPos + ZLOG_HEAD_SIZE) < ZPAGE_SIZE) {
- jam();
- tcConnectptr.p->hashValue = logPagePtr.p->logPageWord[logPos + 2];
- tcConnectptr.p->operation = logPagePtr.p->logPageWord[logPos + 3];
- tcConnectptr.p->totSendlenAi = logPagePtr.p->logPageWord[logPos + 4];
- tcConnectptr.p->primKeyLen = logPagePtr.p->logPageWord[logPos + 5];
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + ZLOG_HEAD_SIZE;
- } else {
- jam();
- readLogwordExec(signal); /* IGNORE PREPARE LOG RECORD TYPE */
- readLogwordExec(signal); /* IGNORE LOG RECORD SIZE */
- tcConnectptr.p->hashValue = readLogwordExec(signal);
- tcConnectptr.p->operation = readLogwordExec(signal);
- tcConnectptr.p->totSendlenAi = readLogwordExec(signal);
- tcConnectptr.p->primKeyLen = readLogwordExec(signal);
- }//if
-}//Dblqh::readLogHeader()
-
-/* ------------------------------------------------------------------------- */
-/* ------- READ A WORD FROM THE LOG ------- */
-/* */
-/* OUTPUT: TLOG_WORD */
-/* SUBROUTINE SHORT NAME = RLW */
-/* ------------------------------------------------------------------------- */
-Uint32 Dblqh::readLogword(Signal* signal)
-{
- Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- ndbrequire(logPos < ZPAGE_SIZE);
- Uint32 logWord = logPagePtr.p->logPageWord[logPos];
- logPos++;
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos;
- if (logPos >= ZPAGE_SIZE) {
- jam();
- logPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE];
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
- logFilePtr.p->currentLogpage = logPagePtr.i;
- logFilePtr.p->currentFilepage++;
- logPartPtr.p->execSrPagesRead--;
- logPartPtr.p->execSrPagesExecuted++;
- }//if
- return logWord;
-}//Dblqh::readLogword()
-
-/* ------------------------------------------------------------------------- */
-/* ------- READ A WORD FROM THE LOG WHEN EXECUTING A LOG RECORD ------- */
-/* */
-/* OUTPUT: TLOG_WORD */
-/* SUBROUTINE SHORT NAME = RWE */
-/* ------------------------------------------------------------------------- */
-Uint32 Dblqh::readLogwordExec(Signal* signal)
-{
- Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- ndbrequire(logPos < ZPAGE_SIZE);
- Uint32 logWord = logPagePtr.p->logPageWord[logPos];
- logPos++;
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos;
- if (logPos >= ZPAGE_SIZE) {
- jam();
- logPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE];
- if (logPagePtr.i != RNIL){
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
- } else {
- // Reading word at the last pos in the last page
- // Don't step forward to next page!
- jam();
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]++;
- }
- }//if
- return logWord;
-}//Dblqh::readLogwordExec()
-
-/* ------------------------------------------------------------------------- */
-/* ------- READ A SINGLE PAGE FROM THE LOG ------- */
-/* */
-/* INPUT: TRSP_PAGE_NO */
-/* SUBROUTINE SHORT NAME = RSP */
-/* ------------------------------------------------------------------------- */
-void Dblqh::readSinglePage(Signal* signal, Uint32 pageNo)
-{
- seizeLfo(signal);
- initLfo(signal);
- seizeLogpage(signal);
- lfoPtr.p->firstLfoPage = logPagePtr.i;
- lfoPtr.p->lfoPageNo = pageNo;
- lfoPtr.p->noPagesRw = 1;
- signal->theData[0] = logFilePtr.p->fileRef;
- signal->theData[1] = cownref;
- signal->theData[2] = lfoPtr.i;
- signal->theData[3] = ZLIST_OF_PAIRS;
- signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
- signal->theData[5] = 1;
- signal->theData[6] = logPagePtr.i;
- signal->theData[7] = pageNo;
- sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
-}//Dblqh::readSinglePage()
-
-/* --------------------------------------------------------------------------
- * ------- RELEASE OPERATION FROM ACTIVE LIST ON FRAGMENT -------
- *
- * SUBROUTINE SHORT NAME = RAC
- * ------------------------------------------------------------------------- */
-void Dblqh::releaseAccList(Signal* signal)
-{
- TcConnectionrecPtr racTcNextConnectptr;
- TcConnectionrecPtr racTcPrevConnectptr;
-
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- racTcPrevConnectptr.i = tcConnectptr.p->prevTc;
- racTcNextConnectptr.i = tcConnectptr.p->nextTc;
- if (tcConnectptr.p->listState != TcConnectionrec::ACC_BLOCK_LIST) {
- jam();
- systemError(signal);
- }//if
- tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
- if (racTcNextConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(racTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- racTcNextConnectptr.p->prevTc = racTcPrevConnectptr.i;
- }//if
- if (racTcPrevConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(racTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- racTcPrevConnectptr.p->nextTc = tcConnectptr.p->nextTc;
- } else {
- jam();
- /* ---------------------------------------------------------------------
- * OPERATION RECORD IS FIRST IN ACTIVE LIST
- * THIS MEANS THAT THERE EXISTS NO PREVIOUS TC THAT NEEDS TO BE UPDATED.
- * --------------------------------------------------------------------- */
- fragptr.p->accBlockedList = racTcNextConnectptr.i;
- }//if
-}//Dblqh::releaseAccList()
-
-/* --------------------------------------------------------------------------
- * ------- REMOVE COPY FRAGMENT FROM ACTIVE COPY LIST -------
- *
- * ------------------------------------------------------------------------- */
-void Dblqh::releaseActiveCopy(Signal* signal)
-{
- /* MUST BE 8 BIT */
- UintR tracFlag;
- UintR tracIndex;
-
- tracFlag = ZFALSE;
- for (tracIndex = 0; tracIndex < 4; tracIndex++) {
- if (tracFlag == ZFALSE) {
- jam();
- if (cactiveCopy[tracIndex] == fragptr.i) {
- jam();
- tracFlag = ZTRUE;
- }//if
- } else {
- if (tracIndex < 3) {
- jam();
- cactiveCopy[tracIndex - 1] = cactiveCopy[tracIndex];
- } else {
- jam();
- cactiveCopy[3] = RNIL;
- }//if
- }//if
- }//for
- ndbrequire(tracFlag == ZTRUE);
- cnoActiveCopy--;
-}//Dblqh::releaseActiveCopy()
-
-/* --------------------------------------------------------------------------
- * ------- RELEASE OPERATION FROM ACTIVE LIST ON FRAGMENT -------
- *
- * SUBROUTINE SHORT NAME = RAL
- * ------------------------------------------------------------------------- */
-void Dblqh::releaseActiveList(Signal* signal)
-{
- TcConnectionrecPtr ralTcNextConnectptr;
- TcConnectionrecPtr ralTcPrevConnectptr;
- ralTcPrevConnectptr.i = tcConnectptr.p->prevTc;
- ralTcNextConnectptr.i = tcConnectptr.p->nextTc;
- ndbrequire(tcConnectptr.p->listState == TcConnectionrec::IN_ACTIVE_LIST);
- tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
- if (ralTcNextConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(ralTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- ralTcNextConnectptr.p->prevTc = ralTcPrevConnectptr.i;
- }//if
- if (ralTcPrevConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(ralTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- ralTcPrevConnectptr.p->nextTc = tcConnectptr.p->nextTc;
- } else {
- jam();
- /* ----------------------------------------------------------------------
- * OPERATION RECORD IS FIRST IN ACTIVE LIST
- * THIS MEANS THAT THERE EXISTS NO PREVIOUS TC THAT NEEDS TO BE UPDATED.
- * --------------------------------------------------------------------- */
- fragptr.p->activeList = ralTcNextConnectptr.i;
- }//if
-}//Dblqh::releaseActiveList()
-
-/* --------------------------------------------------------------------------
- * ------- RELEASE ADD FRAGMENT RECORD -------
- *
- * ------------------------------------------------------------------------- */
-void Dblqh::releaseAddfragrec(Signal* signal)
-{
- addfragptr.p->addfragStatus = AddFragRecord::FREE;
- addfragptr.p->nextAddfragrec = cfirstfreeAddfragrec;
- cfirstfreeAddfragrec = addfragptr.i;
-}//Dblqh::releaseAddfragrec()
-
-/* --------------------------------------------------------------------------
- * ------- RELEASE FRAGMENT RECORD -------
- *
- * ------------------------------------------------------------------------- */
-void Dblqh::releaseFragrec()
-{
- fragptr.p->fragStatus = Fragrecord::FREE;
- fragptr.p->nextFrag = cfirstfreeFragrec;
- cfirstfreeFragrec = fragptr.i;
-}//Dblqh::releaseFragrec()
-
-/* --------------------------------------------------------------------------
- * ------- RELEASE LCP LOCAL RECORD -------
- *
- * ------------------------------------------------------------------------- */
-void Dblqh::releaseLcpLoc(Signal* signal)
-{
- lcpLocptr.p->lcpLocstate = LcpLocRecord::IDLE;
- lcpLocptr.p->nextLcpLoc = cfirstfreeLcpLoc;
- cfirstfreeLcpLoc = lcpLocptr.i;
-}//Dblqh::releaseLcpLoc()
-
-/* --------------------------------------------------------------------------
- * ------- RELEASE A PAGE REFERENCE RECORD. -------
- *
- * ------------------------------------------------------------------------- */
-void Dblqh::releasePageRef(Signal* signal)
-{
- pageRefPtr.p->prNext = cfirstfreePageRef;
- cfirstfreePageRef = pageRefPtr.i;
-}//Dblqh::releasePageRef()
-
-/* --------------------------------------------------------------------------
- * --- RELEASE ALL PAGES IN THE MM BUFFER AFTER EXECUTING THE LOG ON IT. ----
- *
- * ------------------------------------------------------------------------- */
-void Dblqh::releaseMmPages(Signal* signal)
-{
-RMP_LOOP:
- jam();
- pageRefPtr.i = logPartPtr.p->firstPageRef;
- if (pageRefPtr.i != RNIL) {
- jam();
- ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
- releasePrPages(signal);
- removePageRef(signal);
- goto RMP_LOOP;
- }//if
-}//Dblqh::releaseMmPages()
-
-/* --------------------------------------------------------------------------
- * ------- RELEASE A SET OF PAGES AFTER EXECUTING THE LOG ON IT. -------
- *
- * ------------------------------------------------------------------------- */
-void Dblqh::releasePrPages(Signal* signal)
-{
- UintR trppIndex;
-
- for (trppIndex = 0; trppIndex <= 7; trppIndex++) {
- jam();
- logPagePtr.i = pageRefPtr.p->pageRef[trppIndex];
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- releaseLogpage(signal);
- }//for
-}//Dblqh::releasePrPages()
-
-/* --------------------------------------------------------------------------
- * ------- RELEASE OPERATION FROM WAIT QUEUE LIST ON FRAGMENT -------
- *
- * SUBROUTINE SHORT NAME : RWA
- * ------------------------------------------------------------------------- */
-void Dblqh::releaseWaitQueue(Signal* signal)
-{
- TcConnectionrecPtr rwaTcNextConnectptr;
- TcConnectionrecPtr rwaTcPrevConnectptr;
-
- fragptr.i = tcConnectptr.p->fragmentptr;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- rwaTcPrevConnectptr.i = tcConnectptr.p->prevTc;
- rwaTcNextConnectptr.i = tcConnectptr.p->nextTc;
- if (tcConnectptr.p->listState != TcConnectionrec::WAIT_QUEUE_LIST) {
- jam();
- systemError(signal);
- }//if
- tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
- if (rwaTcNextConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(rwaTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- rwaTcNextConnectptr.p->prevTc = rwaTcPrevConnectptr.i;
- } else {
- jam();
- fragptr.p->lastWaitQueue = rwaTcPrevConnectptr.i;
- }//if
- if (rwaTcPrevConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(rwaTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- rwaTcPrevConnectptr.p->nextTc = rwaTcNextConnectptr.i;
- } else {
- jam();
- fragptr.p->firstWaitQueue = rwaTcNextConnectptr.i;
- }//if
-}//Dblqh::releaseWaitQueue()
-
-/* --------------------------------------------------------------------------
- * ------- REMOVE OPERATION RECORD FROM LIST ON LOG PART OF NOT -------
- * COMPLETED OPERATIONS IN THE LOG.
- *
- * SUBROUTINE SHORT NAME = RLO
- * ------------------------------------------------------------------------- */
-void Dblqh::removeLogTcrec(Signal* signal)
-{
- TcConnectionrecPtr rloTcNextConnectptr;
- TcConnectionrecPtr rloTcPrevConnectptr;
- rloTcPrevConnectptr.i = tcConnectptr.p->prevLogTcrec;
- rloTcNextConnectptr.i = tcConnectptr.p->nextLogTcrec;
- if (rloTcNextConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(rloTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- rloTcNextConnectptr.p->prevLogTcrec = rloTcPrevConnectptr.i;
- } else {
- jam();
- logPartPtr.p->lastLogTcrec = rloTcPrevConnectptr.i;
- }//if
- if (rloTcPrevConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(rloTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- rloTcPrevConnectptr.p->nextLogTcrec = rloTcNextConnectptr.i;
- } else {
- jam();
- logPartPtr.p->firstLogTcrec = rloTcNextConnectptr.i;
- }//if
-}//Dblqh::removeLogTcrec()
-
-/* --------------------------------------------------------------------------
- * ------- REMOVE PAGE REFERENCE RECORD FROM LIST IN THIS LOG PART -------
- *
- * SUBROUTINE SHORT NAME = RPR
- * ------------------------------------------------------------------------- */
-void Dblqh::removePageRef(Signal* signal)
-{
- PageRefRecordPtr rprPageRefPtr;
-
- pageRefPtr.i = logPartPtr.p->firstPageRef;
- if (pageRefPtr.i != RNIL) {
- jam();
- ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
- if (pageRefPtr.p->prNext == RNIL) {
- jam();
- logPartPtr.p->lastPageRef = RNIL;
- logPartPtr.p->firstPageRef = RNIL;
- } else {
- jam();
- logPartPtr.p->firstPageRef = pageRefPtr.p->prNext;
- rprPageRefPtr.i = pageRefPtr.p->prNext;
- ptrCheckGuard(rprPageRefPtr, cpageRefFileSize, pageRefRecord);
- rprPageRefPtr.p->prPrev = RNIL;
- }//if
- releasePageRef(signal);
- }//if
-}//Dblqh::removePageRef()
-
-/* ------------------------------------------------------------------------- */
-/* ------- RETURN FROM EXECUTION OF LOG ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-Uint32 Dblqh::returnExecLog(Signal* signal)
-{
- tcConnectptr.p->connectState = TcConnectionrec::CONNECTED;
- initLogPointers(signal);
- logPartPtr.p->execSrExecuteIndex++;
- Uint32 result = checkIfExecLog(signal);
- if (result == ZOK) {
- jam();
-/* ------------------------------------------------------------------------- */
-/* THIS LOG RECORD WILL BE EXECUTED AGAIN TOWARDS ANOTHER NODE. */
-/* ------------------------------------------------------------------------- */
- logPagePtr.i = logPartPtr.p->execSrLogPage;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
- logPartPtr.p->execSrLogPageIndex;
- } else {
- jam();
-/* ------------------------------------------------------------------------- */
-/* NO MORE EXECUTION OF THIS LOG RECORD. */
-/* ------------------------------------------------------------------------- */
- if (logPartPtr.p->logExecState ==
- LogPartRecord::LES_EXEC_LOGREC_FROM_FILE) {
- jam();
-/* ------------------------------------------------------------------------- */
-/* THE LOG RECORD WAS READ FROM DISK. RELEASE ITS PAGES IMMEDIATELY. */
-/* ------------------------------------------------------------------------- */
- lfoPtr.i = logPartPtr.p->execSrLfoRec;
- ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
- releaseLfoPages(signal);
- releaseLfo(signal);
- logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG;
- if (logPartPtr.p->execSrExecLogFile != logPartPtr.p->currentLogfile) {
- jam();
- LogFileRecordPtr clfLogFilePtr;
- clfLogFilePtr.i = logPartPtr.p->execSrExecLogFile;
- ptrCheckGuard(clfLogFilePtr, clogFileFileSize, logFileRecord);
- clfLogFilePtr.p->logFileStatus = LogFileRecord::CLOSING_EXEC_LOG;
- closeFile(signal, clfLogFilePtr);
- result = ZCLOSE_FILE;
- }//if
- }//if
- logPartPtr.p->execSrExecuteIndex = 0;
- logPartPtr.p->execSrLogPage = RNIL;
- logPartPtr.p->execSrLogPageIndex = ZNIL;
- logPagePtr.i = logFilePtr.p->currentLogpage;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPartPtr.p->savePageIndex;
- }//if
- return result;
-}//Dblqh::returnExecLog()
-
-/* --------------------------------------------------------------------------
- * ------- SEIZE ADD FRAGMENT RECORD ------
- *
- * ------------------------------------------------------------------------- */
-void Dblqh::seizeAddfragrec(Signal* signal)
-{
- addfragptr.i = cfirstfreeAddfragrec;
- ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
- cfirstfreeAddfragrec = addfragptr.p->nextAddfragrec;
-}//Dblqh::seizeAddfragrec()
-
-/* --------------------------------------------------------------------------
- * ------- SEIZE FRAGMENT RECORD -------
- *
- * ------------------------------------------------------------------------- */
-void Dblqh::seizeFragmentrec(Signal* signal)
-{
- fragptr.i = cfirstfreeFragrec;
- ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- cfirstfreeFragrec = fragptr.p->nextFrag;
- fragptr.p->nextFrag = RNIL;
-}//Dblqh::seizeFragmentrec()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SEIZE A PAGE REFERENCE RECORD. ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dblqh::seizePageRef(Signal* signal)
-{
- pageRefPtr.i = cfirstfreePageRef;
- ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
- cfirstfreePageRef = pageRefPtr.p->prNext;
- pageRefPtr.p->prNext = RNIL;
-}//Dblqh::seizePageRef()
-
-/* --------------------------------------------------------------------------
- * ------- SEND ABORTED -------
- *
- * ------------------------------------------------------------------------- */
-void Dblqh::sendAborted(Signal* signal)
-{
- UintR TlastInd;
- if (tcConnectptr.p->nextReplica == ZNIL) {
- TlastInd = ZTRUE;
- } else {
- TlastInd = ZFALSE;
- }//if
- signal->theData[0] = tcConnectptr.p->tcOprec;
- signal->theData[1] = tcConnectptr.p->transid[0];
- signal->theData[2] = tcConnectptr.p->transid[1];
- signal->theData[3] = cownNodeid;
- signal->theData[4] = TlastInd;
- sendSignal(tcConnectptr.p->tcBlockref, GSN_ABORTED, signal, 5, JBB);
- return;
-}//Dblqh::sendAborted()
-
-/* --------------------------------------------------------------------------
- * ------- SEND LQH_TRANSCONF -------
- *
- * ------------------------------------------------------------------------- */
-void Dblqh::sendLqhTransconf(Signal* signal, LqhTransConf::OperationStatus stat)
-{
- tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec;
- ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
-
- Uint32 reqInfo = 0;
- LqhTransConf::setReplicaType(reqInfo, tcConnectptr.p->replicaType);
- LqhTransConf::setReplicaNo(reqInfo, tcConnectptr.p->seqNoReplica);
- LqhTransConf::setLastReplicaNo(reqInfo, tcConnectptr.p->lastReplicaNo);
- LqhTransConf::setSimpleFlag(reqInfo, tcConnectptr.p->opSimple);
- LqhTransConf::setDirtyFlag(reqInfo, tcConnectptr.p->dirtyOp);
- LqhTransConf::setOperation(reqInfo, tcConnectptr.p->operation);
-
- LqhTransConf * const lqhTransConf = (LqhTransConf *)&signal->theData[0];
- lqhTransConf->tcRef = tcNodeFailptr.p->newTcRef;
- lqhTransConf->lqhNodeId = cownNodeid;
- lqhTransConf->operationStatus = stat;
- lqhTransConf->lqhConnectPtr = tcConnectptr.i;
- lqhTransConf->transId1 = tcConnectptr.p->transid[0];
- lqhTransConf->transId2 = tcConnectptr.p->transid[1];
- lqhTransConf->oldTcOpRec = tcConnectptr.p->tcOprec;
- lqhTransConf->requestInfo = reqInfo;
- lqhTransConf->gci = tcConnectptr.p->gci;
- lqhTransConf->nextNodeId1 = tcConnectptr.p->nextReplica;
- lqhTransConf->nextNodeId2 = tcConnectptr.p->nodeAfterNext[0];
- lqhTransConf->nextNodeId3 = tcConnectptr.p->nodeAfterNext[1];
- lqhTransConf->apiRef = tcConnectptr.p->applRef;
- lqhTransConf->apiOpRec = tcConnectptr.p->applOprec;
- lqhTransConf->tableId = tcConnectptr.p->tableref;
- sendSignal(tcNodeFailptr.p->newTcBlockref, GSN_LQH_TRANSCONF,
- signal, LqhTransConf::SignalLength, JBB);
- tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1;
- signal->theData[0] = ZLQH_TRANS_NEXT;
- signal->theData[1] = tcNodeFailptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
-}//Dblqh::sendLqhTransconf()
-
-/* --------------------------------------------------------------------------
- * ------- START ANOTHER PHASE OF LOG EXECUTION -------
- * RESET THE VARIABLES NEEDED BY THIS PROCESS AND SEND THE START SIGNAL
- *
- * ------------------------------------------------------------------------- */
-void Dblqh::startExecSr(Signal* signal)
-{
- cnoFragmentsExecSr = 0;
- signal->theData[0] = cfirstCompletedFragSr;
- signal->theData[1] = RNIL;
- sendSignal(cownref, GSN_START_EXEC_SR, signal, 2, JBB);
-}//Dblqh::startExecSr()
-
-/* ¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤
- * ¤¤¤¤¤¤¤ LOG MODULE ¤¤¤¤¤¤¤
- * ¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤ */
-/* --------------------------------------------------------------------------
- * ------- STEP FORWARD IN FRAGMENT LOG DURING LOG EXECUTION -------
- *
- * ------------------------------------------------------------------------- */
-void Dblqh::stepAhead(Signal* signal, Uint32 stepAheadWords)
-{
- UintR tsaPos;
-
- tsaPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- while ((stepAheadWords + tsaPos) >= ZPAGE_SIZE) {
- jam();
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_SIZE;
- stepAheadWords = stepAheadWords - (ZPAGE_SIZE - tsaPos);
- logFilePtr.p->currentLogpage = logPagePtr.p->logPageWord[ZNEXT_PAGE];
- logPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE];
- logFilePtr.p->currentFilepage++;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
- logPartPtr.p->execSrPagesRead--;
- logPartPtr.p->execSrPagesExecuted++;
- tsaPos = ZPAGE_HEADER_SIZE;
- }//while
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = stepAheadWords + tsaPos;
-}//Dblqh::stepAhead()
-
-/* --------------------------------------------------------------------------
- * ------- WRITE A ABORT LOG RECORD -------
- *
- * SUBROUTINE SHORT NAME: WAL
- * ------------------------------------------------------------------------- */
-void Dblqh::writeAbortLog(Signal* signal)
-{
- if ((ZABORT_LOG_SIZE + ZNEXT_LOG_SIZE) >
- logFilePtr.p->remainingWordsInMbyte) {
- jam();
- changeMbyte(signal);
- }//if
- logFilePtr.p->remainingWordsInMbyte =
- logFilePtr.p->remainingWordsInMbyte - ZABORT_LOG_SIZE;
- writeLogWord(signal, ZABORT_TYPE);
- writeLogWord(signal, tcConnectptr.p->transid[0]);
- writeLogWord(signal, tcConnectptr.p->transid[1]);
-}//Dblqh::writeAbortLog()
-
-/* --------------------------------------------------------------------------
- * ------- WRITE A COMMIT LOG RECORD -------
- *
- * SUBROUTINE SHORT NAME: WCL
- * ------------------------------------------------------------------------- */
-void Dblqh::writeCommitLog(Signal* signal, LogPartRecordPtr regLogPartPtr)
-{
- LogFileRecordPtr regLogFilePtr;
- LogPageRecordPtr regLogPagePtr;
- TcConnectionrec * const regTcPtr = tcConnectptr.p;
- regLogFilePtr.i = regLogPartPtr.p->currentLogfile;
- ptrCheckGuard(regLogFilePtr, clogFileFileSize, logFileRecord);
- regLogPagePtr.i = regLogFilePtr.p->currentLogpage;
- Uint32 twclTmp = regLogFilePtr.p->remainingWordsInMbyte;
- ptrCheckGuard(regLogPagePtr, clogPageFileSize, logPageRecord);
- logPartPtr = regLogPartPtr;
- logFilePtr = regLogFilePtr;
- logPagePtr = regLogPagePtr;
- if ((ZCOMMIT_LOG_SIZE + ZNEXT_LOG_SIZE) > twclTmp) {
- jam();
- changeMbyte(signal);
- twclTmp = logFilePtr.p->remainingWordsInMbyte;
- }//if
-
- Uint32 twclLogPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- Uint32 tableId = regTcPtr->tableref;
- Uint32 schemaVersion = regTcPtr->schemaVersion;
- Uint32 fragId = regTcPtr->fragmentid;
- Uint32 fileNo = regTcPtr->logStartFileNo;
- Uint32 startPageNo = regTcPtr->logStartPageNo;
- Uint32 pageIndex = regTcPtr->logStartPageIndex;
- Uint32 stopPageNo = regTcPtr->logStopPageNo;
- Uint32 gci = regTcPtr->gci;
- logFilePtr.p->remainingWordsInMbyte = twclTmp - ZCOMMIT_LOG_SIZE;
-
- if ((twclLogPos + ZCOMMIT_LOG_SIZE) >= ZPAGE_SIZE) {
- writeLogWord(signal, ZCOMMIT_TYPE);
- writeLogWord(signal, tableId);
- writeLogWord(signal, schemaVersion);
- writeLogWord(signal, fragId);
- writeLogWord(signal, fileNo);
- writeLogWord(signal, startPageNo);
- writeLogWord(signal, pageIndex);
- writeLogWord(signal, stopPageNo);
- writeLogWord(signal, gci);
- } else {
- Uint32* dataPtr = &logPagePtr.p->logPageWord[twclLogPos];
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = twclLogPos + ZCOMMIT_LOG_SIZE;
- dataPtr[0] = ZCOMMIT_TYPE;
- dataPtr[1] = tableId;
- dataPtr[2] = schemaVersion;
- dataPtr[3] = fragId;
- dataPtr[4] = fileNo;
- dataPtr[5] = startPageNo;
- dataPtr[6] = pageIndex;
- dataPtr[7] = stopPageNo;
- dataPtr[8] = gci;
- }//if
- TcConnectionrecPtr rloTcNextConnectptr;
- TcConnectionrecPtr rloTcPrevConnectptr;
- rloTcPrevConnectptr.i = regTcPtr->prevLogTcrec;
- rloTcNextConnectptr.i = regTcPtr->nextLogTcrec;
- if (rloTcNextConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(rloTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- rloTcNextConnectptr.p->prevLogTcrec = rloTcPrevConnectptr.i;
- } else {
- regLogPartPtr.p->lastLogTcrec = rloTcPrevConnectptr.i;
- }//if
- if (rloTcPrevConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(rloTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- rloTcPrevConnectptr.p->nextLogTcrec = rloTcNextConnectptr.i;
- } else {
- regLogPartPtr.p->firstLogTcrec = rloTcNextConnectptr.i;
- }//if
-}//Dblqh::writeCommitLog()
-
-/* --------------------------------------------------------------------------
- * ------- WRITE A COMPLETED GCI LOG RECORD -------
- *
- * SUBROUTINE SHORT NAME: WCG
-// Input Pointers:
-// logFilePtr
-// logPartPtr
- * ------------------------------------------------------------------------- */
-void Dblqh::writeCompletedGciLog(Signal* signal)
-{
- if ((ZCOMPLETED_GCI_LOG_SIZE + ZNEXT_LOG_SIZE) >
- logFilePtr.p->remainingWordsInMbyte) {
- jam();
- changeMbyte(signal);
- }//if
- logFilePtr.p->remainingWordsInMbyte =
- logFilePtr.p->remainingWordsInMbyte - ZCOMPLETED_GCI_LOG_SIZE;
- writeLogWord(signal, ZCOMPLETED_GCI_TYPE);
- writeLogWord(signal, cnewestCompletedGci);
- logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci;
-}//Dblqh::writeCompletedGciLog()
-
-/* --------------------------------------------------------------------------
- * ------- WRITE A DIRTY PAGE DURING LOG EXECUTION -------
- *
- * SUBROUTINE SHORT NAME: WD
- * ------------------------------------------------------------------------- */
-void Dblqh::writeDirty(Signal* signal)
-{
- logPagePtr.p->logPageWord[ZPOS_DIRTY] = ZNOT_DIRTY;
-
- // Calculate checksum for page
- logPagePtr.p->logPageWord[ZPOS_CHECKSUM] = calcPageCheckSum(logPagePtr);
-
- seizeLfo(signal);
- initLfo(signal);
- lfoPtr.p->lfoPageNo = logPartPtr.p->prevFilepage;
- lfoPtr.p->noPagesRw = 1;
- lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_DIRTY;
- lfoPtr.p->firstLfoPage = logPagePtr.i;
- signal->theData[0] = logFilePtr.p->fileRef;
- signal->theData[1] = cownref;
- signal->theData[2] = lfoPtr.i;
- signal->theData[3] = ZLIST_OF_PAIRS_SYNCH;
- signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
- signal->theData[5] = 1;
- signal->theData[6] = logPagePtr.i;
- signal->theData[7] = logPartPtr.p->prevFilepage;
- sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
-}//Dblqh::writeDirty()
-
-/* --------------------------------------------------------------------------
- * ------- WRITE A WORD INTO THE LOG, CHECK FOR NEW PAGE -------
- *
- * SUBROUTINE SHORT NAME: WLW
- * ------------------------------------------------------------------------- */
-void Dblqh::writeLogWord(Signal* signal, Uint32 data)
-{
- Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
- ndbrequire(logPos < ZPAGE_SIZE);
- logPagePtr.p->logPageWord[logPos] = data;
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + 1;
- if ((logPos + 1) == ZPAGE_SIZE) {
- jam();
- completedLogPage(signal, ZNORMAL);
- seizeLogpage(signal);
- initLogpage(signal);
- logFilePtr.p->currentLogpage = logPagePtr.i;
- logFilePtr.p->currentFilepage++;
- }//if
-}//Dblqh::writeLogWord()
-
-/* --------------------------------------------------------------------------
- * ------- WRITE A NEXT LOG RECORD AND CHANGE TO NEXT MBYTE -------
- *
- * SUBROUTINE SHORT NAME: WNL
-// Input Pointers:
-// logFilePtr(Redefines)
-// logPagePtr (Redefines)
-// logPartPtr
- * ------------------------------------------------------------------------- */
-void Dblqh::writeNextLog(Signal* signal)
-{
- LogFileRecordPtr wnlNextLogFilePtr;
- UintR twnlNextFileNo;
- UintR twnlNewMbyte;
- UintR twnlRemWords;
- UintR twnlNextMbyte;
-
-/* -------------------------------------------------- */
-/* CALCULATE THE NEW NUMBER OF REMAINING WORDS */
-/* AS 128*2036 WHERE 128 * 8 KBYTE = 1 MBYTE */
-/* AND 2036 IS THE NUMBER OF WORDS IN A PAGE */
-/* THAT IS USED FOR LOG INFORMATION. */
-/* -------------------------------------------------- */
- twnlRemWords = ZPAGE_SIZE - ZPAGE_HEADER_SIZE;
- twnlRemWords = twnlRemWords * ZPAGES_IN_MBYTE;
- wnlNextLogFilePtr.i = logFilePtr.p->nextLogFile;
- ptrCheckGuard(wnlNextLogFilePtr, clogFileFileSize, logFileRecord);
-/* -------------------------------------------------- */
-/* WRITE THE NEXT LOG RECORD. */
-/* -------------------------------------------------- */
- ndbrequire(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] < ZPAGE_SIZE);
- logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] =
- ZNEXT_MBYTE_TYPE;
- if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) {
- jam();
-/* -------------------------------------------------- */
-/* CALCULATE THE NEW REMAINING WORDS WHEN */
-/* CHANGING LOG FILE IS PERFORMED */
-/* -------------------------------------------------- */
- twnlRemWords = twnlRemWords - (ZPAGE_SIZE - ZPAGE_HEADER_SIZE);
-/* -------------------------------------------------- */
-/* ENSURE THAT THE LOG PAGES ARE WRITTEN AFTER */
-/* WE HAVE CHANGED MBYTE. */
-/* -------------------------------------------------- */
-/* ENSURE LAST PAGE IN PREVIOUS MBYTE IS */
-/* WRITTEN AND THAT THE STATE OF THE WRITE IS */
-/* PROPERLY SET. */
-/* -------------------------------------------------- */
-/* WE HAVE TO CHANGE LOG FILE */
-/* -------------------------------------------------- */
- completedLogPage(signal, ZLAST_WRITE_IN_FILE);
- if (wnlNextLogFilePtr.p->fileNo == 0) {
- jam();
-/* -------------------------------------------------- */
-/* WE HAVE FINALISED A LOG LAP, START FROM LOG */
-/* FILE 0 AGAIN */
-/* -------------------------------------------------- */
- logPartPtr.p->logLap++;
- }//if
- logPartPtr.p->currentLogfile = wnlNextLogFilePtr.i;
- logFilePtr.i = wnlNextLogFilePtr.i;
- logFilePtr.p = wnlNextLogFilePtr.p;
- twnlNewMbyte = 0;
- } else {
- jam();
-/* -------------------------------------------------- */
-/* INCREMENT THE CURRENT MBYTE */
-/* SET PAGE INDEX TO PAGE HEADER SIZE */
-/* -------------------------------------------------- */
- completedLogPage(signal, ZENFORCE_WRITE);
- twnlNewMbyte = logFilePtr.p->currentMbyte + 1;
- }//if
-/* -------------------------------------------------- */
-/* CHANGE TO NEW LOG FILE IF NECESSARY */
-/* UPDATE THE FILE POSITION TO THE NEW MBYTE */
-/* FOUND IN PAGE PART OF TNEXT_LOG_PTR */
-/* ALLOCATE AND INITIATE A NEW PAGE SINCE WE */
-/* HAVE SENT THE PREVIOUS PAGE TO DISK. */
-/* SET THE NEW NUMBER OF REMAINING WORDS IN THE */
-/* NEW MBYTE ALLOCATED. */
-/* -------------------------------------------------- */
- logFilePtr.p->currentMbyte = twnlNewMbyte;
- logFilePtr.p->filePosition = twnlNewMbyte * ZPAGES_IN_MBYTE;
- logFilePtr.p->currentFilepage = twnlNewMbyte * ZPAGES_IN_MBYTE;
- logFilePtr.p->remainingWordsInMbyte = twnlRemWords;
- seizeLogpage(signal);
- if (logFilePtr.p->currentMbyte == 0) {
- jam();
- logFilePtr.p->lastPageWritten = 0;
- if (logFilePtr.p->fileNo == 0) {
- jam();
- releaseLogpage(signal);
- logPagePtr.i = logFilePtr.p->logPageZero;
- ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
- }//if
- }//if
- initLogpage(signal);
- logFilePtr.p->currentLogpage = logPagePtr.i;
- if (logFilePtr.p->currentMbyte == 0) {
- jam();
-/* -------------------------------------------------- */
-/* THIS IS A NEW FILE, WRITE THE FILE DESCRIPTOR*/
-/* ALSO OPEN THE NEXT LOG FILE TO ENSURE THAT */
-/* THIS FILE IS OPEN WHEN ITS TURN COMES. */
-/* -------------------------------------------------- */
- writeFileHeaderOpen(signal, ZNORMAL);
- openNextLogfile(signal);
- logFilePtr.p->fileChangeState = LogFileRecord::BOTH_WRITES_ONGOING;
- }//if
- if (logFilePtr.p->fileNo == logPartPtr.p->logTailFileNo) {
- if (logFilePtr.p->currentMbyte == logPartPtr.p->logTailMbyte) {
- jam();
-/* -------------------------------------------------- */
-/* THE HEAD AND TAIL HAS MET. THIS SHOULD NEVER */
-/* OCCUR. CAN HAPPEN IF THE LOCAL CHECKPOINTS */
-/* TAKE FAR TOO LONG TIME. SO TIMING PROBLEMS */
-/* CAN INVOKE THIS SYSTEM CRASH. HOWEVER ONLY */
-/* VERY SERIOUS TIMING PROBLEMS. */
-/* -------------------------------------------------- */
- systemError(signal);
- }//if
- }//if
- if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) {
- jam();
- twnlNextMbyte = 0;
- if (logFilePtr.p->fileChangeState != LogFileRecord::NOT_ONGOING) {
- jam();
- logPartPtr.p->logPartState = LogPartRecord::FILE_CHANGE_PROBLEM;
- }//if
- twnlNextFileNo = wnlNextLogFilePtr.p->fileNo;
- } else {
- jam();
- twnlNextMbyte = logFilePtr.p->currentMbyte + 1;
- twnlNextFileNo = logFilePtr.p->fileNo;
- }//if
- if (twnlNextFileNo == logPartPtr.p->logTailFileNo) {
- if (logPartPtr.p->logTailMbyte == twnlNextMbyte) {
- jam();
-/* -------------------------------------------------- */
-/* THE NEXT MBYTE WILL BE THE TAIL. WE MUST */
-/* STOP LOGGING NEW OPERATIONS. THIS OPERATION */
-/* ALLOWED TO PASS. ALSO COMMIT, NEXT, COMPLETED*/
-/* GCI, ABORT AND FRAGMENT SPLIT IS ALLOWED. */
-/* OPERATIONS ARE ALLOWED AGAIN WHEN THE TAIL */
-/* IS MOVED FORWARD AS A RESULT OF A START_LCP */
-/* _ROUND SIGNAL ARRIVING FROM DBDIH. */
-/* -------------------------------------------------- */
- logPartPtr.p->logPartState = LogPartRecord::TAIL_PROBLEM;
- }//if
- }//if
-}//Dblqh::writeNextLog()
-
-void
-Dblqh::execDUMP_STATE_ORD(Signal* signal)
-{
- DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0];
- if(dumpState->args[0] == DumpStateOrd::CommitAckMarkersSize){
- infoEvent("LQH: m_commitAckMarkerPool: %d free size: %d",
- m_commitAckMarkerPool.getNoOfFree(),
- m_commitAckMarkerPool.getSize());
- }
- if(dumpState->args[0] == DumpStateOrd::CommitAckMarkersDump){
- infoEvent("LQH: m_commitAckMarkerPool: %d free size: %d",
- m_commitAckMarkerPool.getNoOfFree(),
- m_commitAckMarkerPool.getSize());
-
- CommitAckMarkerIterator iter;
- for(m_commitAckMarkerHash.first(iter); iter.curr.i != RNIL;
- m_commitAckMarkerHash.next(iter)){
- infoEvent("CommitAckMarker: i = %d (0x%x, 0x%x)"
- " ApiRef: 0x%x apiOprec: 0x%x TcNodeId: %d",
- iter.curr.i,
- iter.curr.p->transid1,
- iter.curr.p->transid2,
- iter.curr.p->apiRef,
- iter.curr.p->apiOprec,
- iter.curr.p->tcNodeId);
- }
- }
-
- // Dump info about number of log pages
- if(dumpState->args[0] == DumpStateOrd::LqhDumpNoLogPages){
- infoEvent("LQH: Log pages : %d Free: %d",
- clogPageFileSize,
- cnoOfLogPages);
- }
-
- // Dump all defined tables that LQH knowns about
- if(dumpState->args[0] == DumpStateOrd::LqhDumpAllDefinedTabs){
- for(Uint32 i = 0; i<ctabrecFileSize; i++){
- TablerecPtr tabPtr;
- tabPtr.i = i;
- ptrAss(tabPtr, tablerec);
- if(tabPtr.p->tableStatus != Tablerec::NOT_DEFINED){
- infoEvent("Table %d Status: %d Usage: %d",
- i, tabPtr.p->tableStatus, tabPtr.p->usageCount);
- }
- }
- return;
- }
-
- // Dump all ScanRecords
- if (dumpState->args[0] == DumpStateOrd::LqhDumpAllScanRec){
- Uint32 recordNo = 0;
- if (signal->length() == 1)
- infoEvent("LQH: Dump all ScanRecords - size: %d",
- cscanrecFileSize);
- else if (signal->length() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- dumpState->args[0] = DumpStateOrd::LqhDumpOneScanRec;
- dumpState->args[1] = recordNo;
- execDUMP_STATE_ORD(signal);
-
- if (recordNo < cscanrecFileSize-1){
- dumpState->args[0] = DumpStateOrd::LqhDumpAllScanRec;
- dumpState->args[1] = recordNo+1;
- sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
- }
- return;
- }
-
- // Dump all active ScanRecords
- if (dumpState->args[0] == DumpStateOrd::LqhDumpAllActiveScanRec){
- Uint32 recordNo = 0;
- if (signal->length() == 1)
- infoEvent("LQH: Dump active ScanRecord - size: %d",
- cscanrecFileSize);
- else if (signal->length() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- ScanRecordPtr sp;
- sp.i = recordNo;
- c_scanRecordPool.getPtr(scanptr);
- if (sp.p->scanState != ScanRecord::SCAN_FREE){
- dumpState->args[0] = DumpStateOrd::LqhDumpOneScanRec;
- dumpState->args[1] = recordNo;
- execDUMP_STATE_ORD(signal);
- }
-
- if (recordNo < cscanrecFileSize-1){
- dumpState->args[0] = DumpStateOrd::LqhDumpAllActiveScanRec;
- dumpState->args[1] = recordNo+1;
- sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
- }
- return;
- }
-
- if(dumpState->args[0] == DumpStateOrd::LqhDumpOneScanRec){
- Uint32 recordNo = RNIL;
- if (signal->length() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- if (recordNo >= cscanrecFileSize)
- return;
-
- ScanRecordPtr sp;
- sp.i = recordNo;
- c_scanRecordPool.getPtr(sp);
- infoEvent("Dblqh::ScanRecord[%d]: state=%d, type=%d, "
- "complStatus=%d, scanNodeId=%d",
- sp.i,
- sp.p->scanState,
- sp.p->scanType,
- sp.p->scanCompletedStatus,
- sp.p->scanNodeId);
- infoEvent(" apiBref=0x%x, scanAccPtr=%d",
- sp.p->scanApiBlockref,
- sp.p->scanAccPtr);
- infoEvent(" copyptr=%d, ailen=%d, complOps=%d, concurrOps=%d",
- sp.p->copyPtr,
- sp.p->scanAiLength,
- sp.p->m_curr_batch_size_rows,
- sp.p->m_max_batch_size_rows);
- infoEvent(" errCnt=%d, localFid=%d, schV=%d",
- sp.p->scanErrorCounter,
- sp.p->scanLocalFragid,
- sp.p->scanSchemaVersion);
- infoEvent(" stpid=%d, flag=%d, lhold=%d, lmode=%d, num=%d",
- sp.p->scanStoredProcId,
- sp.p->scanFlag,
- sp.p->scanLockHold,
- sp.p->scanLockMode,
- sp.p->scanNumber);
- infoEvent(" relCount=%d, TCwait=%d, TCRec=%d, KIflag=%d",
- sp.p->scanReleaseCounter,
- sp.p->scanTcWaiting,
- sp.p->scanTcrec,
- sp.p->scanKeyinfoFlag);
- return;
- }
- if(dumpState->args[0] == DumpStateOrd::LqhDumpLcpState){
-
- infoEvent("== LQH LCP STATE ==");
- infoEvent(" clcpCompletedState=%d, c_lcpId=%d, cnoOfFragsCheckpointed=%d",
- clcpCompletedState,
- c_lcpId,
- cnoOfFragsCheckpointed);
-
- LcpRecordPtr TlcpPtr;
- // Print information about the current local checkpoint
- TlcpPtr.i = 0;
- ptrAss(TlcpPtr, lcpRecord);
- infoEvent(" lcpState=%d firstLcpLocTup=%d firstLcpLocAcc=%d",
- TlcpPtr.p->lcpState,
- TlcpPtr.p->firstLcpLocTup,
- TlcpPtr.p->firstLcpLocAcc);
- infoEvent(" lcpAccptr=%d lastFragmentFlag=%d",
- TlcpPtr.p->lcpAccptr,
- TlcpPtr.p->lastFragmentFlag);
- infoEvent("currentFragment.fragPtrI=%d",
- TlcpPtr.p->currentFragment.fragPtrI);
- infoEvent("currentFragment.lcpFragOrd.tableId=%d",
- TlcpPtr.p->currentFragment.lcpFragOrd.tableId);
- infoEvent(" lcpQueued=%d reportEmpty=%d",
- TlcpPtr.p->lcpQueued,
- TlcpPtr.p->reportEmpty);
- char buf[8*_NDB_NODE_BITMASK_SIZE+1];
- infoEvent(" m_EMPTY_LCP_REQ=%d",
- TlcpPtr.p->m_EMPTY_LCP_REQ.getText(buf));
-
- return;
- }
-
-
-
-}//Dblqh::execDUMP_STATE_ORD()
-
-void Dblqh::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
-
- switch (var) {
-
- case NoOfConcurrentCheckpointsAfterRestart:
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfConcurrentCheckpointsDuringRestart:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-}//execSET_VAR_REQ()
-
-
-/* **************************************************************** */
-/* ---------------------------------------------------------------- */
-/* ---------------------- TRIGGER HANDLING ------------------------ */
-/* ---------------------------------------------------------------- */
-/* */
-/* All trigger signals from TRIX are forwarded top TUP */
-/* ---------------------------------------------------------------- */
-/* **************************************************************** */
-
-// Trigger signals
-void
-Dblqh::execCREATE_TRIG_REQ(Signal* signal)
-{
- jamEntry();
- NodeId myNodeId = getOwnNodeId();
- BlockReference tupref = calcTupBlockRef(myNodeId);
-
- sendSignal(tupref, GSN_CREATE_TRIG_REQ, signal, CreateTrigReq::SignalLength, JBB);
-}
-
-void
-Dblqh::execCREATE_TRIG_CONF(Signal* signal)
-{
- jamEntry();
- NodeId myNodeId = getOwnNodeId();
- BlockReference dictref = calcDictBlockRef(myNodeId);
-
- sendSignal(dictref, GSN_CREATE_TRIG_CONF, signal, CreateTrigConf::SignalLength, JBB);
-}
-
-void
-Dblqh::execCREATE_TRIG_REF(Signal* signal)
-{
- jamEntry();
- NodeId myNodeId = getOwnNodeId();
- BlockReference dictref = calcDictBlockRef(myNodeId);
-
- sendSignal(dictref, GSN_CREATE_TRIG_REF, signal, CreateTrigRef::SignalLength, JBB);
-}
-
-void
-Dblqh::execDROP_TRIG_REQ(Signal* signal)
-{
- jamEntry();
- NodeId myNodeId = getOwnNodeId();
- BlockReference tupref = calcTupBlockRef(myNodeId);
-
- sendSignal(tupref, GSN_DROP_TRIG_REQ, signal, DropTrigReq::SignalLength, JBB);
-}
-
-void
-Dblqh::execDROP_TRIG_CONF(Signal* signal)
-{
- jamEntry();
- NodeId myNodeId = getOwnNodeId();
- BlockReference dictref = calcDictBlockRef(myNodeId);
-
- sendSignal(dictref, GSN_DROP_TRIG_CONF, signal, DropTrigConf::SignalLength, JBB);
-}
-
-void
-Dblqh::execDROP_TRIG_REF(Signal* signal)
-{
- jamEntry();
- NodeId myNodeId = getOwnNodeId();
- BlockReference dictref = calcDictBlockRef(myNodeId);
-
- sendSignal(dictref, GSN_DROP_TRIG_REF, signal, DropTrigRef::SignalLength, JBB);
-}
-
-Uint32 Dblqh::calcPageCheckSum(LogPageRecordPtr logP){
- Uint32 checkSum = 37;
-#ifdef VM_TRACE
- for (Uint32 i = (ZPOS_CHECKSUM+1); i<ZPAGE_SIZE; i++)
- checkSum = logP.p->logPageWord[i] ^ checkSum;
-#endif
- return checkSum;
- }
-
diff --git a/ndb/src/kernel/blocks/dblqh/Makefile.am b/ndb/src/kernel/blocks/dblqh/Makefile.am
deleted file mode 100644
index 854860b269c..00000000000
--- a/ndb/src/kernel/blocks/dblqh/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-#SUBDIRS = redoLogReader
-
-noinst_LIBRARIES = libdblqh.a
-
-libdblqh_a_SOURCES = DblqhInit.cpp DblqhMain.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libdblqh.dsp
-
-libdblqh.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libdblqh_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
deleted file mode 100644
index 2983b02de67..00000000000
--- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ /dev/null
@@ -1,1948 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef DBTC_H
-#define DBTC_H
-
-#include <ndb_limits.h>
-#include <pc.hpp>
-#include <SimulatedBlock.hpp>
-#include <DLHashTable.hpp>
-#include <SLList.hpp>
-#include <DLList.hpp>
-#include <DLFifoList.hpp>
-#include <DataBuffer.hpp>
-#include <Bitmask.hpp>
-#include <AttributeList.hpp>
-#include <signaldata/AttrInfo.hpp>
-#include <signaldata/LqhTransConf.hpp>
-#include <signaldata/LqhKey.hpp>
-#include <signaldata/TrigAttrInfo.hpp>
-#include <signaldata/TcIndx.hpp>
-#include <signaldata/TransIdAI.hpp>
-#include <signaldata/EventReport.hpp>
-#include <trigger_definitions.h>
-#include <SignalCounter.hpp>
-
-#ifdef DBTC_C
-/*
- * 2.2 LOCAL SYMBOLS
- * -----------------
- */
-#define Z8NIL 255
-#define ZAPI_CONNECT_FILESIZE 20
-#define ZATTRBUF_FILESIZE 4000
-#define ZCLOSED 2
-#define ZCOMMITING 0 /* VALUE FOR TRANSTATUS */
-#define ZCOMMIT_SETUP 2
-#define ZCONTINUE_ABORT_080 4
-#define ZDATABUF_FILESIZE 4000
-#define ZGCP_FILESIZE 10
-#define ZINBUF_DATA_LEN 24 /* POSITION OF 'DATA LENGHT'-VARIABLE. */
-#define ZINBUF_NEXT 27 /* POSITION OF 'NEXT'-VARIABLE. */
-#define ZINBUF_PREV 26 /* POSITION OF 'PREVIOUS'-VARIABLE. */
-#define ZINTSPH1 1
-#define ZINTSPH2 2
-#define ZINTSPH3 3
-#define ZINTSPH6 6
-#define ZLASTPHASE 255
-#define ZMAX_DATA_IN_LQHKEYREQ 12
-#define ZNODEBUF_FILESIZE 2000
-#define ZNR_OF_SEIZE 10
-#define ZSCANREC_FILE_SIZE 100
-#define ZSCAN_FRAGREC_FILE_SIZE 400
-#define ZSCAN_OPREC_FILE_SIZE 400
-#define ZSEND_ATTRINFO 0
-#define ZSPH1 1
-#define ZTABREC_FILESIZE 16
-#define ZTAKE_OVER_ACTIVE 1
-#define ZTAKE_OVER_IDLE 0
-#define ZTC_CONNECT_FILESIZE 200
-#define ZTCOPCONF_SIZE 6
-
-// ----------------------------------------
-// Error Codes for Scan
-// ----------------------------------------
-#define ZNO_CONCURRENCY_ERROR 242
-#define ZTOO_HIGH_CONCURRENCY_ERROR 244
-#define ZNO_SCANREC_ERROR 245
-#define ZNO_FRAGMENT_ERROR 246
-#define ZSCAN_AI_LEN_ERROR 269
-#define ZSCAN_LQH_ERROR 270
-#define ZSCAN_FRAG_LQH_ERROR 274
-
-#define ZSCANTIME_OUT_ERROR 296
-#define ZSCANTIME_OUT_ERROR2 297
-
-// ----------------------------------------
-// Error Codes for transactions
-// ----------------------------------------
-#define ZSTATE_ERROR 202
-#define ZLENGTH_ERROR 207 // Also Scan
-#define ZERO_KEYLEN_ERROR 208
-#define ZSIGNAL_ERROR 209
-#define ZGET_ATTRBUF_ERROR 217 // Also Scan
-#define ZGET_DATAREC_ERROR 218
-#define ZMORE_AI_IN_TCKEYREQ_ERROR 220
-#define ZCOMMITINPROGRESS 230
-#define ZROLLBACKNOTALLOWED 232
-#define ZNO_FREE_TC_CONNECTION 233 // Also Scan
-#define ZABORTINPROGRESS 237
-#define ZPREPAREINPROGRESS 238
-#define ZWRONG_SCHEMA_VERSION_ERROR 241 // Also Scan
-#define ZSCAN_NODE_ERROR 250
-#define ZTRANS_STATUS_ERROR 253
-#define ZTIME_OUT_ERROR 266
-#define ZSIMPLE_READ_WITHOUT_AI 271
-#define ZNO_AI_WITH_UPDATE 272
-#define ZSEIZE_API_COPY_ERROR 275
-#define ZSCANINPROGRESS 276
-#define ZABORT_ERROR 277
-#define ZCOMMIT_TYPE_ERROR 278
-
-#define ZNO_FREE_TC_MARKER 279
-#define ZNODE_SHUTDOWN_IN_PROGRESS 280
-#define ZCLUSTER_SHUTDOWN_IN_PROGRESS 281
-#define ZWRONG_STATE 282
-#define ZCLUSTER_IN_SINGLEUSER_MODE 299
-
-#define ZDROP_TABLE_IN_PROGRESS 283
-#define ZNO_SUCH_TABLE 284
-#define ZUNKNOWN_TABLE_ERROR 285
-#define ZNODEFAIL_BEFORE_COMMIT 286
-#define ZINDEX_CORRUPT_ERROR 287
-
-// ----------------------------------------
-// Seize error
-// ----------------------------------------
-#define ZNO_FREE_API_CONNECTION 219
-#define ZSYSTEM_NOT_STARTED_ERROR 203
-
-// ----------------------------------------
-// Release errors
-// ----------------------------------------
-#define ZINVALID_CONNECTION 229
-
-
-#define ZNOT_FOUND 626
-#define ZALREADYEXIST 630
-#define ZINCONSISTENTHASHINDEX 892
-#define ZNOTUNIQUE 893
-#endif
-
-class Dbtc: public SimulatedBlock {
-public:
- enum ConnectionState {
- CS_CONNECTED = 0,
- CS_DISCONNECTED = 1,
- CS_STARTED = 2,
- CS_RECEIVING = 3,
- CS_PREPARED = 4,
- CS_START_PREPARING = 5,
- CS_REC_PREPARING = 6,
- CS_RESTART = 7,
- CS_ABORTING = 8,
- CS_COMPLETING = 9,
- CS_COMPLETE_SENT = 10,
- CS_PREPARE_TO_COMMIT = 11,
- CS_COMMIT_SENT = 12,
- CS_START_COMMITTING = 13,
- CS_COMMITTING = 14,
- CS_REC_COMMITTING = 15,
- CS_WAIT_ABORT_CONF = 16,
- CS_WAIT_COMPLETE_CONF = 17,
- CS_WAIT_COMMIT_CONF = 18,
- CS_FAIL_ABORTING = 19,
- CS_FAIL_ABORTED = 20,
- CS_FAIL_PREPARED = 21,
- CS_FAIL_COMMITTING = 22,
- CS_FAIL_COMMITTED = 23,
- CS_FAIL_COMPLETED = 24,
- CS_START_SCAN = 25
- };
-
- enum OperationState {
- OS_CONNECTING_DICT = 0,
- OS_CONNECTED = 1,
- OS_OPERATING = 2,
- OS_PREPARED = 3,
- OS_COMMITTING = 4,
- OS_COMMITTED = 5,
- OS_COMPLETING = 6,
- OS_COMPLETED = 7,
- OS_RESTART = 8,
- OS_ABORTING = 9,
- OS_ABORT_SENT = 10,
- OS_TAKE_OVER = 11,
- OS_WAIT_DIH = 12,
- OS_WAIT_KEYINFO = 13,
- OS_WAIT_ATTR = 14,
- OS_WAIT_COMMIT_CONF = 15,
- OS_WAIT_ABORT_CONF = 16,
- OS_WAIT_COMPLETE_CONF = 17,
- OS_WAIT_SCAN = 18
- };
-
- enum AbortState {
- AS_IDLE = 0,
- AS_ACTIVE = 1
- };
-
- enum HostState {
- HS_ALIVE = 0,
- HS_DEAD = 1
- };
-
- enum LqhTransState {
- LTS_IDLE = 0,
- LTS_ACTIVE = 1
- };
-
- enum TakeOverState {
- TOS_NOT_DEFINED = 0,
- TOS_IDLE = 1,
- TOS_ACTIVE = 2,
- TOS_COMPLETED = 3,
- TOS_NODE_FAILED = 4
- };
-
- enum FailState {
- FS_IDLE = 0,
- FS_LISTENING = 1,
- FS_COMPLETING = 2
- };
-
- enum SystemStartState {
- SSS_TRUE = 0,
- SSS_FALSE = 1
- };
-
- enum TimeOutCheckState {
- TOCS_TRUE = 0,
- TOCS_FALSE = 1
- };
-
- enum ReturnSignal {
- RS_NO_RETURN = 0,
- RS_TCKEYCONF = 1,
- RS_TC_COMMITCONF = 3,
- RS_TCROLLBACKCONF = 4,
- RS_TCROLLBACKREP = 5
- };
-
- enum IndexOperationState {
- IOS_NOOP = 0,
- IOS_INDEX_ACCESS = 1,
- IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF = 2,
- IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI = 3,
- IOS_INDEX_OPERATION = 4
- };
-
- enum IndexState {
- IS_BUILDING = 0, // build in progress, start state at create
- IS_ONLINE = 1 // ready to use
- };
-
-
- /**--------------------------------------------------------------------------
- * LOCAL SYMBOLS PER 'SYMBOL-VALUED' VARIABLE
- *
- *
- * NSYMB ZAPI_CONNECT_FILESIZE = 20
- * NSYMB ZTC_CONNECT_FILESIZE = 200
- * NSYMB ZHOST_FILESIZE = 16
- * NSYMB ZDATABUF_FILESIZE = 4000
- * NSYMB ZATTRBUF_FILESIZE = 4000
- * NSYMB ZGCP_FILESIZE = 10
- *
- *
- * ABORTED CODES
- * TPHASE NSYMB ZSPH1 = 1
- * NSYMB ZLASTPHASE = 255
- *
- *
- * LQH_TRANS
- * NSYMB ZTRANS_ABORTED = 1
- * NSYMB ZTRANS_PREPARED = 2
- * NSYMB ZTRANS_COMMITTED = 3
- * NSYMB ZCOMPLETED_LQH_TRANS = 4
- * NSYMB ZTRANS_COMPLETED = 5
- *
- *
- * TAKE OVER
- * NSYMB ZTAKE_OVER_IDLE = 0
- * NSYMB ZTAKE_OVER_ACTIVE = 1
- *
- * ATTRBUF (ATTRBUF_RECORD)
- * NSYMB ZINBUF_DATA_LEN = 24
- * NSYMB ZINBUF_NEXTFREE = 25 (NOT USED )
- * NSYMB ZINBUF_PREV = 26
- * NSYMB ZINBUF_NEXT = 27
- -------------------------------------------------------------------------*/
- /*
- 2.3 RECORDS AND FILESIZES
- -------------------------
- */
- /* **************************************************************** */
- /* ---------------------------------------------------------------- */
- /* ------------------- TRIGGER AND INDEX DATA --------------------- */
- /* ---------------------------------------------------------------- */
- /* **************************************************************** */
- /* ********* DEFINED TRIGGER DATA ********* */
- /* THIS RECORD FORMS LISTS OF ACTIVE */
- /* TRIGGERS FOR EACH TABLE. */
- /* THE RECORDS ARE MANAGED BY A TRIGGER */
- /* POOL WHERE A TRIGGER RECORD IS SEIZED */
- /* WHEN A TRIGGER IS ACTIVATED AND RELEASED */
- /* WHEN THE TRIGGER IS DEACTIVATED. */
- /* **************************************** */
- struct TcDefinedTriggerData {
- /**
- * Trigger id, used to identify the trigger
- */
- UintR triggerId;
-
- /**
- * Trigger type, defines what the trigger is used for
- */
- TriggerType::Value triggerType;
-
- /**
- * Trigger type, defines what the trigger is used for
- */
- TriggerEvent::Value triggerEvent;
-
- /**
- * Attribute mask, defines what attributes are to be monitored
- * Can be seen as a compact representation of SQL column name list
- */
- Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask;
-
- /**
- * Next ptr (used in pool/list)
- */
- union {
- Uint32 nextPool;
- Uint32 nextList;
- };
-
- /**
- * Index id, only used by secondary_index triggers. This is same as
- * index table id in DICT.
- **/
- Uint32 indexId;
-
- /**
- * Prev pointer (used in list)
- */
- Uint32 prevList;
-
- inline void print(NdbOut & s) const {
- s << "[DefinedTriggerData = " << triggerId << "]";
- }
- };
- typedef Ptr<TcDefinedTriggerData> DefinedTriggerPtr;
-
- /**
- * Pool of trigger data record
- */
- ArrayPool<TcDefinedTriggerData> c_theDefinedTriggerPool;
-
- /**
- * The list of active triggers
- */
- DLList<TcDefinedTriggerData> c_theDefinedTriggers;
-
- typedef DataBuffer<11> AttributeBuffer;
-
- AttributeBuffer::DataBufferPool c_theAttributeBufferPool;
-
- UintR c_transactionBufferSpace;
-
-
- /* ********** FIRED TRIGGER DATA ********** */
- /* THIS RECORD FORMS LISTS OF FIRED */
- /* TRIGGERS FOR A TRANSACTION. */
- /* THE RECORDS ARE MANAGED BY A TRIGGER */
- /* POOL WHERE A TRIGGER RECORD IS SEIZED */
- /* WHEN A TRIGGER IS ACTIVATED AND RELEASED */
- /* WHEN THE TRIGGER IS DEACTIVATED. */
- /* **************************************** */
- struct TcFiredTriggerData {
- TcFiredTriggerData() {}
-
- /**
- * Trigger id, used to identify the trigger
- **/
- Uint32 triggerId;
-
- /**
- * The operation that fired the trigger
- */
- Uint32 fireingOperation;
-
- /**
- * Used for scrapping in case of node failure
- */
- Uint32 nodeId;
-
- /**
- * Trigger attribute info, primary key value(s)
- */
- AttributeBuffer::Head keyValues;
-
- /**
- * Trigger attribute info, attribute value(s) before operation
- */
- AttributeBuffer::Head beforeValues;
-
- /**
- * Trigger attribute info, attribute value(s) after operation
- */
- AttributeBuffer::Head afterValues;
-
- /**
- * Next ptr (used in pool/list)
- */
- union {
- Uint32 nextPool;
- Uint32 nextList;
- Uint32 nextHash;
- };
-
- /**
- * Prev pointer (used in list)
- */
- union {
- Uint32 prevList;
- Uint32 prevHash;
- };
-
- inline void print(NdbOut & s) const {
- s << "[FiredTriggerData = " << triggerId << "]";
- }
-
- inline Uint32 hashValue() const {
- return fireingOperation ^ nodeId;
- }
-
- inline bool equal(const TcFiredTriggerData & rec) const {
- return fireingOperation == rec.fireingOperation && nodeId == rec.nodeId;
- }
- };
- typedef Ptr<TcFiredTriggerData> FiredTriggerPtr;
-
- /**
- * Pool of trigger data record
- */
- ArrayPool<TcFiredTriggerData> c_theFiredTriggerPool;
- DLHashTable<TcFiredTriggerData> c_firedTriggerHash;
- AttributeBuffer::DataBufferPool c_theTriggerAttrInfoPool;
-
- Uint32 c_maxNumberOfDefinedTriggers;
- Uint32 c_maxNumberOfFiredTriggers;
-
- struct AttrInfoRecord {
- /**
- * Pre-allocated AttrInfo signal
- */
- AttrInfo attrInfo;
-
- /**
- * Next ptr (used in pool/list)
- */
- union {
- Uint32 nextPool;
- Uint32 nextList;
- };
- /**
- * Prev pointer (used in list)
- */
- Uint32 prevList;
- };
-
-
- /* ************* INDEX DATA *************** */
- /* THIS RECORD FORMS LISTS OF ACTIVE */
- /* INDEX FOR EACH TABLE. */
- /* THE RECORDS ARE MANAGED BY A INDEX */
- /* POOL WHERE AN INDEX RECORD IS SEIZED */
- /* WHEN AN INDEX IS CREATED AND RELEASED */
- /* WHEN THE INDEX IS DROPPED. */
- /* **************************************** */
- struct TcIndexData {
- /**
- * IndexState
- */
- IndexState indexState;
-
- /**
- * Index id, same as index table id in DICT
- */
- Uint32 indexId;
-
- /**
- * Index attribute list. Only the length is used in v21x.
- */
- AttributeList attributeList;
-
- /**
- * Primary table id, the primary table to be indexed
- */
- Uint32 primaryTableId;
-
- /**
- * Primary key position in secondary table
- */
- Uint32 primaryKeyPos;
-
- /**
- * Next ptr (used in pool/list)
- */
- union {
- Uint32 nextPool;
- Uint32 nextList;
- };
- /**
- * Prev pointer (used in list)
- */
- Uint32 prevList;
- };
-
- typedef Ptr<TcIndexData> TcIndexDataPtr;
-
- /**
- * Pool of index data record
- */
- ArrayPool<TcIndexData> c_theIndexPool;
-
- /**
- * The list of defined indexes
- */
- ArrayList<TcIndexData> c_theIndexes;
- UintR c_maxNumberOfIndexes;
-
- struct TcIndexOperation {
- TcIndexOperation(AttributeBuffer::DataBufferPool & abp) :
- indexOpState(IOS_NOOP),
- expectedKeyInfo(0),
- keyInfo(abp),
- expectedAttrInfo(0),
- attrInfo(abp),
- expectedTransIdAI(0),
- transIdAI(abp),
- indexReadTcConnect(RNIL)
- {}
-
- ~TcIndexOperation()
- {
- }
-
- // Index data
- Uint32 indexOpId;
- IndexOperationState indexOpState; // Used to mark on-going TcKeyReq
- Uint32 expectedKeyInfo;
- AttributeBuffer keyInfo; // For accumulating IndxKeyInfo
- Uint32 expectedAttrInfo;
- AttributeBuffer attrInfo; // For accumulating IndxAttrInfo
- Uint32 expectedTransIdAI;
- AttributeBuffer transIdAI; // For accumulating TransId_AI
-
- TcKeyReq tcIndxReq;
- UintR connectionIndex;
- UintR indexReadTcConnect; //
-
- /**
- * Next ptr (used in pool/list)
- */
- union {
- Uint32 nextPool;
- Uint32 nextList;
- };
- /**
- * Prev pointer (used in list)
- */
- Uint32 prevList;
- };
-
- typedef Ptr<TcIndexOperation> TcIndexOperationPtr;
-
- /**
- * Pool of index data record
- */
- ArrayPool<TcIndexOperation> c_theIndexOperationPool;
-
- UintR c_maxNumberOfIndexOperations;
-
- /************************** API CONNECT RECORD ***********************
- * The API connect record contains the connection record to which the
- * application connects.
- *
- * The application can send one operation at a time. It can send a
- * new operation immediately after sending the previous operation.
- * Thereby several operations can be active in one transaction within TC.
- * This is achieved by using the API connect record.
- * Each active operation is handled by the TC connect record.
- * As soon as the TC connect record has sent the
- * request to the LQH it is ready to receive new operations.
- * The LQH connect record takes care of waiting for an operation to
- * complete.
- * When an operation has completed on the LQH connect record,
- * a new operation can be started on this LQH connect record.
- *******************************************************************
- *
- * API CONNECT RECORD ALIGNED TO BE 256 BYTES
- ********************************************************************/
-
- /*******************************************************************>*/
- // We break out the API Timer for optimisation on scanning rather than
- // on fast access.
- /*******************************************************************>*/
- inline void setApiConTimer(Uint32 apiConPtrI, Uint32 value, Uint32 line){
- c_apiConTimer[apiConPtrI] = value;
- c_apiConTimer_line[apiConPtrI] = line;
- }
-
- inline Uint32 getApiConTimer(Uint32 apiConPtrI) const {
- return c_apiConTimer[apiConPtrI];
- }
- UintR* c_apiConTimer;
- UintR* c_apiConTimer_line;
-
- struct ApiConnectRecord {
- ApiConnectRecord(ArrayPool<TcFiredTriggerData> & firedTriggerPool,
- ArrayPool<TcIndexOperation> & seizedIndexOpPool):
- theFiredTriggers(firedTriggerPool),
- isIndexOp(false),
- theSeizedIndexOperations(seizedIndexOpPool)
- {}
-
- //---------------------------------------------------
- // First 16 byte cache line. Hot variables.
- //---------------------------------------------------
- ConnectionState apiConnectstate;
- UintR transid[2];
- UintR firstTcConnect;
-
- //---------------------------------------------------
- // Second 16 byte cache line. Hot variables.
- //---------------------------------------------------
- UintR lqhkeyconfrec;
- UintR cachePtr;
- UintR currSavePointId;
- UintR counter;
-
- //---------------------------------------------------
- // Third 16 byte cache line. First and second cache
- // line plus this will be enough for copy API records.
- // Variables used in late phases.
- //---------------------------------------------------
- UintR nextGcpConnect;
- UintR prevGcpConnect;
- UintR gcpPointer;
- UintR ndbapiConnect;
-
- //---------------------------------------------------
- // Fourth 16 byte cache line. Only used in late phases.
- // Plus 4 bytes of error handling.
- //---------------------------------------------------
- UintR nextApiConnect;
- BlockReference ndbapiBlockref;
- UintR apiCopyRecord;
- UintR globalcheckpointid;
-
- //---------------------------------------------------
- // Second 64 byte cache line starts. First 16 byte
- // cache line in this one. Variables primarily used
- // in early phase.
- //---------------------------------------------------
- UintR lastTcConnect;
- UintR lqhkeyreqrec;
- AbortState abortState;
- Uint32 buddyPtr;
- Uint8 m_exec_flag;
- Uint8 unused2;
- Uint8 takeOverRec;
- Uint8 currentReplicaNo;
-
- //---------------------------------------------------
- // Error Handling variables. If cache line 32 bytes
- // ensures that cache line is still only read in
- // early phases.
- //---------------------------------------------------
- union {
- UintR apiScanRec;
- UintR commitAckMarker;
- };
- UintR currentTcConnect;
- BlockReference tcBlockref;
- Uint16 returncode;
- Uint16 takeOverInd;
-
- //---------------------------------------------------
- // Second 64 byte cache line. Third 16 byte cache line
- // in this one. Variables primarily used in early phase
- // and checked in late phase.
- // Fourth cache line is the tcSendArray that is used
- // when two and three operations are responded to in
- // parallel. The first two entries in tcSendArray is
- // part of the third cache line.
- //---------------------------------------------------
- //---------------------------------------------------
- // timeOutCounter is used waiting for ABORTCONF, COMMITCONF
- // and COMPLETECONF
- //---------------------------------------------------
- UintR failureNr;
- Uint8 tckeyrec; // Ändrad från R
- Uint8 tcindxrec;
- Uint8 apiFailState; // Ändrad från R
- ReturnSignal returnsignal;
- Uint8 timeOutCounter;
-
- UintR tcSendArray[6];
-
- // Trigger data
-
- /**
- * The list of fired triggers
- */
- DLFifoList<TcFiredTriggerData> theFiredTriggers;
-
- bool triggerPending; // Used to mark waiting for a CONTINUEB
-
- // Index data
-
- bool isIndexOp; // Used to mark on-going TcKeyReq as indx table access
- bool indexOpReturn;
- UintR noIndexOp; // No outstanding index ops
-
- // Index op return context
- UintR indexOp;
- UintR clientData;
- UintR attrInfoLen;
-
- UintR accumulatingIndexOp;
- UintR executingIndexOp;
- UintR tcIndxSendArray[6];
- ArrayList<TcIndexOperation> theSeizedIndexOperations;
- };
-
- typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr;
-
-
- /************************** TC CONNECT RECORD ************************/
- /* *******************************************************************/
- /* TC CONNECT RECORD KEEPS ALL INFORMATION TO CARRY OUT A TRANSACTION*/
- /* THE TRANSACTION CONTROLLER ESTABLISHES CONNECTIONS TO DIFFERENT */
- /* BLOCKS TO CARRY OUT THE TRANSACTION. THERE CAN BE SEVERAL RECORDS */
- /* PER ACTIVE TRANSACTION. THE TC CONNECT RECORD COOPERATES WITH THE */
- /* API CONNECT RECORD FOR COMMUNICATION WITH THE API AND WITH THE */
- /* LQH CONNECT RECORD FOR COMMUNICATION WITH THE LQH'S INVOLVED IN */
- /* THE TRANSACTION. TC CONNECT RECORD IS PERMANENTLY CONNECTED TO A */
- /* RECORD IN DICT AND ONE IN DIH. IT CONTAINS A LIST OF ACTIVE LQH */
- /* CONNECT RECORDS AND A LIST OF STARTED BUT NOT ACTIVE LQH CONNECT */
- /* RECORDS. IT DOES ALSO CONTAIN A LIST OF ALL OPERATIONS THAT ARE */
- /* EXECUTED WITH THE TC CONNECT RECORD. */
- /*******************************************************************>*/
- /* TC_CONNECT RECORD ALIGNED TO BE 128 BYTES */
- /*******************************************************************>*/
- struct TcConnectRecord {
- //---------------------------------------------------
- // First 16 byte cache line. Those variables are only
- // used in error cases.
- //---------------------------------------------------
- UintR tcOprec; /* TC OPREC of operation being taken over */
- Uint16 failData[4]; /* Failed nodes when taking over an operation */
- UintR nextTcFailHash;
-
- //---------------------------------------------------
- // Second 16 byte cache line. Those variables are used
- // from LQHKEYCONF to sending COMMIT and COMPLETED.
- //---------------------------------------------------
- UintR lastLqhCon; /* Connect record in last replicas Lqh record */
- Uint16 lastLqhNodeId; /* Node id of last replicas Lqh */
- Uint16 m_execAbortOption;/* TcKeyReq::ExecuteAbortOption */
- UintR commitAckMarker; /* CommitMarker I value */
-
- //---------------------------------------------------
- // Third 16 byte cache line. The hottest variables.
- //---------------------------------------------------
- OperationState tcConnectstate; /* THE STATE OF THE CONNECT*/
- UintR apiConnect; /* POINTER TO API CONNECT RECORD */
- UintR nextTcConnect; /* NEXT TC RECORD*/
- Uint8 dirtyOp;
- Uint8 lastReplicaNo; /* NUMBER OF THE LAST REPLICA IN THE OPERATION */
- Uint8 noOfNodes; /* TOTAL NUMBER OF NODES IN OPERATION */
- Uint8 operation; /* OPERATION TYPE */
- /* 0 = READ REQUEST */
- /* 1 = UPDATE REQUEST */
- /* 2 = INSERT REQUEST */
- /* 3 = DELETE REQUEST */
-
- //---------------------------------------------------
- // Fourth 16 byte cache line. The mildly hot variables.
- // tcNodedata expands 4 Bytes into the next cache line
- // with indexes almost never used.
- //---------------------------------------------------
- UintR clientData; /* SENDERS OPERATION POINTER */
- UintR dihConnectptr; /* CONNECTION TO DIH BLOCK ON THIS NODE */
- UintR prevTcConnect; /* DOUBLY LINKED LIST OF TC CONNECT RECORDS*/
- UintR savePointId;
-
- Uint16 tcNodedata[4];
-
- // Trigger data
- FiredTriggerPtr accumulatingTriggerData;
- UintR noFiredTriggers;
- UintR noReceivedTriggers;
- UintR triggerExecutionCount;
- UintR triggeringOperation;
- UintR savedState[LqhKeyConf::SignalLength];
-
- // Index data
- bool isIndexOp; // Used to mark on-going TcKeyReq as index table access
- UintR indexOp;
- UintR currentIndexId;
- UintR attrInfoLen;
- };
-
- friend struct TcConnectRecord;
-
- typedef Ptr<TcConnectRecord> TcConnectRecordPtr;
-
- // ********************** CACHE RECORD **************************************
- //---------------------------------------------------------------------------
- // This record is used between reception of TCKEYREQ and sending of LQHKEYREQ
- // It is separatedso as to improve the cache hit rate and also to minimise
- // the necessary memory storage in NDB Cluster.
- //---------------------------------------------------------------------------
-
- struct CacheRecord {
- //---------------------------------------------------
- // First 16 byte cache line. Variables used by
- // ATTRINFO processing.
- //---------------------------------------------------
- UintR firstAttrbuf; /* POINTER TO LINKED LIST OF ATTRIBUTE BUFFERS */
- UintR lastAttrbuf; /* POINTER TO LINKED LIST OF ATTRIBUTE BUFFERS */
- UintR currReclenAi;
- Uint16 attrlength; /* ATTRIBUTE INFORMATION LENGTH */
- Uint16 save1;
-
- //---------------------------------------------------
- // Second 16 byte cache line. Variables initiated by
- // TCKEYREQ and used in LQHKEYREQ.
- //---------------------------------------------------
- UintR attrinfo15[4];
-
- //---------------------------------------------------
- // Third 16 byte cache line. Variables initiated by
- // TCKEYREQ and used in LQHKEYREQ.
- //---------------------------------------------------
- UintR attrinfo0;
- UintR schemaVersion;/* SCHEMA VERSION USED IN TRANSACTION */
- UintR tableref; /* POINTER TO THE TABLE IN WHICH THE FRAGMENT EXISTS*/
- Uint16 apiVersionNo;
- Uint16 keylen; /* KEY LENGTH SENT BY REQUEST SIGNAL */
-
- //---------------------------------------------------
- // Fourth 16 byte cache line. Variables initiated by
- // TCKEYREQ and used in LQHKEYREQ.
- //---------------------------------------------------
- UintR keydata[4]; /* RECEIVES FIRST 16 BYTES OF TUPLE KEY */
-
- //---------------------------------------------------
- // First 16 byte cache line in second 64 byte cache
- // line. Diverse use.
- //---------------------------------------------------
- UintR fragmentid; /* THE COMPUTED FRAGMENT ID */
- UintR hashValue; /* THE HASH VALUE USED TO LOCATE FRAGMENT */
-
- Uint8 distributionKeyIndicator;
- Uint8 m_special_hash; // collation or distribution key
- Uint8 unused2;
- Uint8 lenAiInTckeyreq; /* LENGTH OF ATTRIBUTE INFORMATION IN TCKEYREQ */
-
- Uint8 fragmentDistributionKey; /* DIH generation no */
-
- /**
- * EXECUTION MODE OF OPERATION
- * 0 = NORMAL EXECUTION, 1 = INTERPRETED EXECUTION
- */
- Uint8 opExec;
-
- /**
- * LOCK TYPE OF OPERATION IF READ OPERATION
- * 0 = READ LOCK, 1 = WRITE LOCK
- */
- Uint8 opLock;
-
- /**
- * IS THE OPERATION A SIMPLE TRANSACTION
- * 0 = NO, 1 = YES
- */
- Uint8 opSimple;
-
- //---------------------------------------------------
- // Second 16 byte cache line in second 64 byte cache
- // line. Diverse use.
- //---------------------------------------------------
- UintR distributionKey;
- UintR nextCacheRec;
- UintR unused3;
- Uint32 scanInfo;
-
- //---------------------------------------------------
- // Third 16 byte cache line in second 64
- // byte cache line. Diverse use.
- //---------------------------------------------------
- Uint32 unused4;
- Uint32 scanTakeOverInd;
- UintR firstKeybuf; /* POINTER THE LINKED LIST OF KEY BUFFERS */
- UintR lastKeybuf; /* VARIABLE POINTING TO THE LAST KEY BUFFER */
-
- //---------------------------------------------------
- // Fourth 16 byte cache line in second 64
- // byte cache line. Not used currently.
- //---------------------------------------------------
- UintR packedCacheVar[4];
- };
-
- typedef Ptr<CacheRecord> CacheRecordPtr;
-
- /* ************************ HOST RECORD ********************************** */
- /********************************************************/
- /* THIS RECORD CONTAINS ALIVE-STATUS ON ALL NODES IN THE*/
- /* SYSTEM */
- /********************************************************/
- /* THIS RECORD IS ALIGNED TO BE 128 BYTES. */
- /********************************************************/
- struct HostRecord {
- HostState hostStatus;
- LqhTransState lqhTransStatus;
- TakeOverState takeOverStatus;
- bool inPackedList;
- UintR noOfPackedWordsLqh;
- UintR packedWordsLqh[26];
- UintR noOfWordsTCKEYCONF;
- UintR packedWordsTCKEYCONF[30];
- UintR noOfWordsTCINDXCONF;
- UintR packedWordsTCINDXCONF[30];
- BlockReference hostLqhBlockRef;
- }; /* p2c: size = 128 bytes */
-
- typedef Ptr<HostRecord> HostRecordPtr;
-
- /* *********** TABLE RECORD ********************************************* */
-
- /********************************************************/
- /* THIS RECORD CONTAINS THE CURRENT SCHEMA VERSION OF */
- /* ALL TABLES IN THE SYSTEM. */
- /********************************************************/
- struct TableRecord {
- Uint32 currentSchemaVersion;
- Uint8 enabled;
- Uint8 dropping;
- Uint8 tableType;
- Uint8 storedTable;
-
- Uint8 noOfKeyAttr;
- Uint8 hasCharAttr;
- Uint8 noOfDistrKeys;
-
- struct KeyAttr {
- Uint32 attributeDescriptor;
- CHARSET_INFO* charsetInfo;
- } keyAttr[MAX_ATTRIBUTES_IN_INDEX];
-
- bool checkTable(Uint32 schemaVersion) const {
- return enabled && !dropping && (schemaVersion == currentSchemaVersion);
- }
-
- Uint32 getErrorCode(Uint32 schemaVersion) const;
-
- struct DropTable {
- Uint32 senderRef;
- Uint32 senderData;
- SignalCounter waitDropTabCount;
- } dropTable;
- };
- typedef Ptr<TableRecord> TableRecordPtr;
-
- /**
- * There is max 16 ScanFragRec's for
- * each scan started in TC. Each ScanFragRec is used by
- * a scan fragment "process" that scans one fragment at a time.
- * It will receive max 16 tuples in each request
- */
- struct ScanFragRec {
- ScanFragRec(){
- stopFragTimer();
- lqhBlockref = 0;
- scanFragState = IDLE;
- scanRec = RNIL;
- }
- /**
- * ScanFragState
- * WAIT_GET_PRIMCONF : Waiting for DIGETPRIMCONF when starting a new
- * fragment scan
- * LQH_ACTIVE : The scan process has sent a command to LQH and is
- * waiting for the response
- * LQH_ACTIVE_CLOSE : The scan process has sent close to LQH and is
- * waiting for the response
- * DELIVERED : The result have been delivered, this scan frag process
- * are waiting for a SCAN_NEXTREQ to tell us to continue scanning
- * RETURNING_FROM_DELIVERY : SCAN_NEXTREQ received and continuing scan
- * soon
- * QUEUED_FOR_DELIVERY : Result queued in TC and waiting for delivery
- * to API
- * COMPLETED : The fragment scan processes has completed and finally
- * sent a SCAN_PROCCONF
- */
- enum ScanFragState {
- IDLE = 0,
- WAIT_GET_PRIMCONF = 1,
- LQH_ACTIVE = 2,
- DELIVERED = 4,
- QUEUED_FOR_DELIVERY = 6,
- COMPLETED = 7
- };
- // Timer for checking timeout of this fragment scan
- Uint32 scanFragTimer;
-
- // Id of the current scanned fragment
- Uint32 scanFragId;
-
- // Blockreference of LQH
- BlockReference lqhBlockref;
-
- // getNodeInfo.m_connectCount, set at seize used so that
- // I don't accidently kill a starting node
- Uint32 m_connectCount;
-
- // State of this fragment scan
- ScanFragState scanFragState;
-
- // Id of the ScanRecord this fragment scan belongs to
- Uint32 scanRec;
-
- // The value of fragmentCompleted in the last received SCAN_FRAGCONF
- Uint8 m_scan_frag_conf_status;
-
- inline void startFragTimer(Uint32 timeVal){
- scanFragTimer = timeVal;
- }
- inline void stopFragTimer(void){
- scanFragTimer = 0;
- }
-
- Uint32 m_ops;
- Uint32 m_chksum;
- Uint32 m_apiPtr;
- Uint32 m_totalLen;
- union {
- Uint32 nextPool;
- Uint32 nextList;
- };
- Uint32 prevList;
- };
-
- typedef Ptr<ScanFragRec> ScanFragRecPtr;
- typedef LocalDLList<ScanFragRec> ScanFragList;
-
- /**
- * Each scan allocates one ScanRecord to store information
- * about the current scan
- *
- */
- struct ScanRecord {
- ScanRecord() {}
- /** NOTE! This is the old comment for ScanState. - MASV
- * STATE TRANSITIONS OF SCAN_STATE. SCAN_STATE IS THE STATE
- * VARIABLE OF THE RECEIVE AND DELIVERY PROCESS.
- * THE PROCESS HAS THREE STEPS IT GOES THROUGH.
- * 1) THE INITIAL STATES WHEN RECEIVING DATA FOR THE SCAN.
- * - WAIT_SCAN_TAB_INFO
- * - WAIT_AI
- * - WAIT_FRAGMENT_COUNT
- * 2) THE EXECUTION STATES WHEN THE SCAN IS PERFORMED.
- * - SCAN_NEXT_ORDERED
- * - DELIVERED
- * - QUEUED_DELIVERED
- * 3) THE CLOSING STATE WHEN THE SCAN PROCESS IS CLOSING UP
- * EVERYTHING.
- * - CLOSING_SCAN
- * INITIAL START WHEN SCAN_TABREQ RECEIVED
- * -> WAIT_SCAN_TAB_INFO (IF ANY SCAN_TABINFO TO BE RECEIVED)
- * -> WAIT_AI (IF NO SCAN_TAB_INFO BUT ATTRINFO IS RECEIVED)
- * -> WAIT_FRAGMENT_COUNT (IF NEITHER SCAN_TABINFO OR ATTRINFO
- * RECEIVED)
- *
- * WAIT_SCAN_TAB_INFO TRANSITIONS:
- * -> WAIT_SCAN_TABINFO (WHEN MORE SCAN_TABINFO RECEIVED)
- * -> WAIT_AI (WHEN ATTRINFO RECEIVED AFTER RECEIVING ALL
- * SCAN_TABINFO)
- * -> WAIT_FRAGMENT_COUNT (WHEN NO ATTRINFO RECEIVED AFTER
- * RECEIVING ALL SCAN_TABINFO )
- * WAIT_AI TRANSITIONS:
- * -> WAIT_AI (WHEN MORE ATTRINFO RECEIVED)
- * -> WAIT_FRAGMENT_COUNT (WHEN ALL ATTRINFO RECEIVED)
- *
- * WAIT_FRAGMENT_COUNT TRANSITIONS:
- * -> SCAN_NEXT_ORDERED
- *
- * SCAN_NEXT_ORDERED TRANSITIONS:
- * -> DELIVERED (WHEN FIRST SCAN_FRAGCONF ARRIVES WITH OPERATIONS
- * TO REPORT IN IT)
- * -> CLOSING_SCAN (WHEN SCAN IS CLOSED BY SCAN_NEXTREQ OR BY SOME
- * ERROR)
- *
- * DELIVERED TRANSITIONS:
- * -> SCAN_NEXT_ORDERED (IF SCAN_NEXTREQ ARRIVES BEFORE ANY NEW
- * OPERATIONS TO REPORT ARRIVES)
- * -> QUEUED_DELIVERED (IF NEW OPERATION TO REPORT ARRIVES BEFORE
- * SCAN_NEXTREQ)
- * -> CLOSING_SCAN (WHEN SCAN IS CLOSED BY SCAN_NEXTREQ OR BY SOME
- * ERROR)
- *
- * QUEUED_DELIVERED TRANSITIONS:
- * -> DELIVERED (WHEN SCAN_NEXTREQ ARRIVES AND QUEUED OPERATIONS
- * TO REPORT ARE SENT TO THE APPLICATION)
- * -> CLOSING_SCAN (WHEN SCAN IS CLOSED BY SCAN_NEXTREQ OR BY
- * SOME ERROR)
- */
- enum ScanState {
- IDLE = 0,
- WAIT_SCAN_TAB_INFO = 1,
- WAIT_AI = 2,
- WAIT_FRAGMENT_COUNT = 3,
- RUNNING = 4,
- CLOSING_SCAN = 5
- };
-
- // State of this scan
- ScanState scanState;
-
- DLList<ScanFragRec>::Head m_running_scan_frags; // Currently in LQH
- union { Uint32 m_queued_count; Uint32 scanReceivedOperations; };
- DLList<ScanFragRec>::Head m_queued_scan_frags; // In TC !sent to API
- DLList<ScanFragRec>::Head m_delivered_scan_frags;// Delivered to API
-
- // Id of the next fragment to be scanned. Used by scan fragment
- // processes when they are ready for the next fragment
- Uint32 scanNextFragId;
-
- // Total number of fragments in the table we are scanning
- Uint32 scanNoFrag;
-
- // Index of next ScanRecords when in free list
- Uint32 nextScan;
-
- // Length of expected attribute information
- union { Uint32 scanAiLength; Uint32 m_booked_fragments_count; };
-
- Uint32 scanKeyLen;
-
- // Reference to ApiConnectRecord
- Uint32 scanApiRec;
-
- // Reference to TcConnectRecord
- Uint32 scanTcrec;
-
- // Number of scan frag processes that belong to this scan
- Uint32 scanParallel;
-
- // Schema version used by this scan
- Uint32 scanSchemaVersion;
-
- // Index of stored procedure belonging to this scan
- Uint32 scanStoredProcId;
-
- // The index of table that is scanned
- Uint32 scanTableref;
-
- // Number of operation records per scanned fragment
- // Number of operations in first batch
- // Max number of bytes per batch
- union {
- Uint16 first_batch_size_rows;
- Uint16 batch_size_rows;
- };
- Uint32 batch_byte_size;
-
- Uint32 scanRequestInfo; // ScanFrag format
-
- // Close is ordered
- bool m_close_scan_req;
- };
- typedef Ptr<ScanRecord> ScanRecordPtr;
-
- /* **********************************************************************$ */
- /* ******$ DATA BUFFER ******$ */
- /* */
- /* THIS BUFFER IS USED AS A GENERAL DATA STORAGE. */
- /* **********************************************************************$ */
- struct DatabufRecord {
- UintR data[4];
- /* 4 * 1 WORD = 4 WORD */
- UintR nextDatabuf;
- }; /* p2c: size = 20 bytes */
-
- typedef Ptr<DatabufRecord> DatabufRecordPtr;
-
- /* **********************************************************************$ */
- /* ******$ ATTRIBUTE INFORMATION RECORD ******$ */
- /*
- * CAN CONTAIN ONE (1) ATTRINFO SIGNAL. ONE SIGNAL CONTAINS 24 ATTR.
- * INFO WORDS. BUT 32 ELEMENTS ARE USED TO MAKE PLEX HAPPY.
- * SOME OF THE ELEMENTS ARE USED TO THE FOLLOWING THINGS:
- * DATA LENGHT IN THIS RECORD IS STORED IN THE ELEMENT INDEXED BY
- * ZINBUF_DATA_LEN.
- * NEXT FREE ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY
- * PREVIOUS ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_PREV
- * (NOT USED YET).
- * NEXT ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_NEXT. */
- /* ******************************************************************** */
- struct AttrbufRecord {
- UintR attrbuf[32];
- }; /* p2c: size = 128 bytes */
-
- typedef Ptr<AttrbufRecord> AttrbufRecordPtr;
-
- /*************************************************************************>*/
- /* GLOBAL CHECKPOINT INFORMATION RECORD */
- /* */
- /* THIS RECORD IS USED TO STORE THE GLOBALCHECKPOINT NUMBER AND A
- * COUNTER DURING THE COMPLETION PHASE OF THE TRANSACTION */
- /*************************************************************************>*/
- /* */
- /* GCP RECORD ALIGNED TO BE 32 BYTES */
- /*************************************************************************>*/
- struct GcpRecord {
- UintR gcpUnused1[2]; /* p2c: Not used */
- UintR firstApiConnect;
- UintR lastApiConnect;
- UintR gcpId;
- UintR nextGcp;
- UintR gcpUnused2; /* p2c: Not used */
- Uint16 gcpNomoretransRec;
- }; /* p2c: size = 32 bytes */
-
- typedef Ptr<GcpRecord> GcpRecordPtr;
-
- /*************************************************************************>*/
- /* TC_FAIL_RECORD */
- /* THIS RECORD IS USED WHEN HANDLING TAKE OVER OF ANOTHER FAILED
- * TC NODE. */
- /*************************************************************************>*/
- struct TcFailRecord {
- Uint16 queueList[MAX_NDB_NODES];
- Uint8 takeOverProcState[MAX_NDB_NODES];
- UintR completedTakeOver;
- UintR currentHashIndexTakeOver;
- FailState failStatus;
- Uint16 queueIndex;
- Uint16 takeOverNode;
- }; /* p2c: size = 64 bytes */
-
- typedef Ptr<TcFailRecord> TcFailRecordPtr;
-
-public:
- Dbtc(const class Configuration &);
- virtual ~Dbtc();
-
-private:
- BLOCK_DEFINES(Dbtc);
-
- // Transit signals
- void execPACKED_SIGNAL(Signal* signal);
- void execABORTED(Signal* signal);
- void execATTRINFO(Signal* signal);
- void execCONTINUEB(Signal* signal);
- void execKEYINFO(Signal* signal);
- void execSCAN_NEXTREQ(Signal* signal);
- void execSCAN_PROCREQ(Signal* signal);
- void execSCAN_PROCCONF(Signal* signal);
- void execTAKE_OVERTCREQ(Signal* signal);
- void execTAKE_OVERTCCONF(Signal* signal);
- void execLQHKEYREF(Signal* signal);
- void execTRANSID_AI_R(Signal* signal);
- void execKEYINFO20_R(Signal* signal);
-
- // Received signals
- void execDUMP_STATE_ORD(Signal* signal);
- void execSEND_PACKED(Signal* signal);
- void execCOMPLETED(Signal* signal);
- void execCOMMITTED(Signal* signal);
- void execDIGETNODESREF(Signal* signal);
- void execDIGETPRIMCONF(Signal* signal);
- void execDIGETPRIMREF(Signal* signal);
- void execDISEIZECONF(Signal* signal);
- void execDIVERIFYCONF(Signal* signal);
- void execDI_FCOUNTCONF(Signal* signal);
- void execDI_FCOUNTREF(Signal* signal);
- void execGCP_NOMORETRANS(Signal* signal);
- void execLQHKEYCONF(Signal* signal);
- void execNDB_STTOR(Signal* signal);
- void execREAD_NODESCONF(Signal* signal);
- void execREAD_NODESREF(Signal* signal);
- void execSTTOR(Signal* signal);
- void execTC_COMMITREQ(Signal* signal);
- void execTC_CLOPSIZEREQ(Signal* signal);
- void execTCGETOPSIZEREQ(Signal* signal);
- void execTCKEYREQ(Signal* signal);
- void execTCRELEASEREQ(Signal* signal);
- void execTCSEIZEREQ(Signal* signal);
- void execTCROLLBACKREQ(Signal* signal);
- void execTC_HBREP(Signal* signal);
- void execTC_SCHVERREQ(Signal* signal);
- void execSCAN_TABREQ(Signal* signal);
- void execSCAN_TABINFO(Signal* signal);
- void execSCAN_FRAGCONF(Signal* signal);
- void execSCAN_FRAGREF(Signal* signal);
- void execREAD_CONFIG_REQ(Signal* signal);
- void execLQH_TRANSCONF(Signal* signal);
- void execCOMPLETECONF(Signal* signal);
- void execCOMMITCONF(Signal* signal);
- void execABORTCONF(Signal* signal);
- void execNODE_FAILREP(Signal* signal);
- void execINCL_NODEREQ(Signal* signal);
- void execTIME_SIGNAL(Signal* signal);
- void execAPI_FAILREQ(Signal* signal);
- void execSCAN_HBREP(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
-
- void execABORT_ALL_REQ(Signal* signal);
-
- void execCREATE_TRIG_REQ(Signal* signal);
- void execDROP_TRIG_REQ(Signal* signal);
- void execFIRE_TRIG_ORD(Signal* signal);
- void execTRIG_ATTRINFO(Signal* signal);
- void execCREATE_INDX_REQ(Signal* signal);
- void execDROP_INDX_REQ(Signal* signal);
- void execTCINDXREQ(Signal* signal);
- void execINDXKEYINFO(Signal* signal);
- void execINDXATTRINFO(Signal* signal);
- void execALTER_INDX_REQ(Signal* signal);
-
- // Index table lookup
- void execTCKEYCONF(Signal* signal);
- void execTCKEYREF(Signal* signal);
- void execTRANSID_AI(Signal* signal);
- void execTCROLLBACKREP(Signal* signal);
-
- void execCREATE_TAB_REQ(Signal* signal);
- void execPREP_DROP_TAB_REQ(Signal* signal);
- void execDROP_TAB_REQ(Signal* signal);
- void execWAIT_DROP_TAB_REF(Signal* signal);
- void execWAIT_DROP_TAB_CONF(Signal* signal);
- void checkWaitDropTabFailedLqh(Signal*, Uint32 nodeId, Uint32 tableId);
- void execALTER_TAB_REQ(Signal* signal);
- void set_timeout_value(Uint32 timeOut);
- void set_appl_timeout_value(Uint32 timeOut);
- void set_no_parallel_takeover(Uint32);
- void updateBuddyTimer(ApiConnectRecordPtr);
-
- // Statement blocks
- void updatePackedList(Signal* signal, HostRecord* ahostptr,
- Uint16 ahostIndex);
- void clearTcNodeData(Signal* signal,
- UintR TLastLqhIndicator,
- UintR Tstart);
- void errorReport(Signal* signal, int place);
- void warningReport(Signal* signal, int place);
- void printState(Signal* signal, int place);
- int seizeTcRecord(Signal* signal);
- int seizeCacheRecord(Signal* signal);
- void TCKEY_abort(Signal* signal, int place);
- void copyFromToLen(UintR* sourceBuffer, UintR* destBuffer, UintR copyLen);
- void reportNodeFailed(Signal* signal, Uint32 nodeId);
- void sendPackedTCKEYCONF(Signal* signal,
- HostRecord * ahostptr,
- UintR hostId);
- void sendPackedTCINDXCONF(Signal* signal,
- HostRecord * ahostptr,
- UintR hostId);
- void sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr);
- void sendCommitLqh(Signal* signal,
- TcConnectRecord * const regTcPtr);
- void sendCompleteLqh(Signal* signal,
- TcConnectRecord * const regTcPtr);
- void sendTCKEY_FAILREF(Signal* signal, const ApiConnectRecord *);
- void sendTCKEY_FAILCONF(Signal* signal, ApiConnectRecord *);
- void checkStartTimeout(Signal* signal);
- void checkStartFragTimeout(Signal* signal);
- void timeOutFoundFragLab(Signal* signal, Uint32 TscanConPtr);
- void timeOutLoopStartFragLab(Signal* signal, Uint32 TscanConPtr);
- int releaseAndAbort(Signal* signal);
- void findApiConnectFail(Signal* signal);
- void findTcConnectFail(Signal* signal);
- void initApiConnectFail(Signal* signal);
- void initTcConnectFail(Signal* signal);
- void initTcFail(Signal* signal);
- void releaseTakeOver(Signal* signal);
- void setupFailData(Signal* signal);
- void updateApiStateFail(Signal* signal);
- void updateTcStateFail(Signal* signal);
- void handleApiFailState(Signal* signal, UintR anApiConnectptr);
- void handleFailedApiNode(Signal* signal,
- UintR aFailedNode,
- UintR anApiConnectPtr);
- void handleScanStop(Signal* signal, UintR aFailedNode);
- void initScanTcrec(Signal* signal);
- void initScanrec(ScanRecordPtr, const class ScanTabReq*,
- const UintR scanParallel,
- const UintR noOprecPerFrag);
- void initScanfragrec(Signal* signal);
- void releaseScanResources(ScanRecordPtr);
- ScanRecordPtr seizeScanrec(Signal* signal);
- void sendScanFragReq(Signal*, ScanRecord*, ScanFragRec*);
- void sendScanTabConf(Signal* signal, ScanRecordPtr);
- void close_scan_req(Signal*, ScanRecordPtr, bool received_req);
- void close_scan_req_send_conf(Signal*, ScanRecordPtr);
-
- void checkGcp(Signal* signal);
- void commitGciHandling(Signal* signal, UintR Tgci);
- void copyApi(Signal* signal);
- void DIVER_node_fail_handling(Signal* signal, UintR Tgci);
- void gcpTcfinished(Signal* signal);
- void handleGcp(Signal* signal);
- void hash(Signal* signal);
- bool handle_special_hash(Uint32 dstHash[4],
- Uint32* src, Uint32 srcLen,
- Uint32 tabPtrI, bool distr);
-
- void initApiConnect(Signal* signal);
- void initApiConnectRec(Signal* signal,
- ApiConnectRecord * const regApiPtr,
- bool releaseIndexOperations = false);
- void initattrbuf(Signal* signal);
- void initdatabuf(Signal* signal);
- void initgcp(Signal* signal);
- void inithost(Signal* signal);
- void initialiseScanrec(Signal* signal);
- void initialiseScanFragrec(Signal* signal);
- void initialiseScanOprec(Signal* signal);
- void initTable(Signal* signal);
- void initialiseTcConnect(Signal* signal);
- void linkApiToGcp(Signal* signal);
- void linkGciInGcilist(Signal* signal);
- void linkKeybuf(Signal* signal);
- void linkTcInConnectionlist(Signal* signal);
- void releaseAbortResources(Signal* signal);
- void releaseApiCon(Signal* signal, UintR aApiConnectPtr);
- void releaseApiConCopy(Signal* signal);
- void releaseApiConnectFail(Signal* signal);
- void releaseAttrinfo();
- void releaseGcp(Signal* signal);
- void releaseKeys();
- void releaseSimpleRead(Signal*, ApiConnectRecordPtr, TcConnectRecord*);
- void releaseDirtyWrite(Signal* signal);
- void releaseTcCon();
- void releaseTcConnectFail(Signal* signal);
- void releaseTransResources(Signal* signal);
- void saveAttrbuf(Signal* signal);
- void seizeApiConnect(Signal* signal);
- void seizeApiConnectCopy(Signal* signal);
- void seizeApiConnectFail(Signal* signal);
- void seizeDatabuf(Signal* signal);
- void seizeGcp(Signal* signal);
- void seizeTcConnect(Signal* signal);
- void seizeTcConnectFail(Signal* signal);
- void sendApiCommit(Signal* signal);
- void sendAttrinfo(Signal* signal,
- UintR TattrinfoPtr,
- AttrbufRecord * const regAttrPtr,
- UintR TBref);
- void sendContinueTimeOutControl(Signal* signal, Uint32 TapiConPtr);
- void sendKeyinfo(Signal* signal, BlockReference TBRef, Uint32 len);
- void sendlqhkeyreq(Signal* signal, BlockReference TBRef);
- void sendSystemError(Signal* signal);
- void sendtckeyconf(Signal* signal, UintR TcommitFlag);
- void sendTcIndxConf(Signal* signal, UintR TcommitFlag);
- void unlinkApiConnect(Signal* signal);
- void unlinkGcp(Signal* signal);
- void unlinkReadyTcCon(Signal* signal);
- void handleFailedOperation(Signal* signal,
- const LqhKeyRef * const lqhKeyRef,
- bool gotLqhKeyRef);
- void markOperationAborted(ApiConnectRecord * const regApiPtr,
- TcConnectRecord * const regTcPtr);
- void clearCommitAckMarker(ApiConnectRecord * const regApiPtr,
- TcConnectRecord * const regTcPtr);
- // Trigger and index handling
- bool saveINDXKEYINFO(Signal* signal,
- TcIndexOperation* indexOp,
- const Uint32 *src,
- Uint32 len);
- bool receivedAllINDXKEYINFO(TcIndexOperation* indexOp);
- bool saveINDXATTRINFO(Signal* signal,
- TcIndexOperation* indexOp,
- const Uint32 *src,
- Uint32 len);
- bool receivedAllINDXATTRINFO(TcIndexOperation* indexOp);
- bool saveTRANSID_AI(Signal* signal,
- TcIndexOperation* indexOp,
- const Uint32 *src,
- Uint32 len);
- bool receivedAllTRANSID_AI(TcIndexOperation* indexOp);
- void readIndexTable(Signal* signal,
- ApiConnectRecord* regApiPtr,
- TcIndexOperation* indexOp);
- void executeIndexOperation(Signal* signal,
- ApiConnectRecord* regApiPtr,
- TcIndexOperation* indexOp);
- bool seizeIndexOperation(ApiConnectRecord* regApiPtr,
- TcIndexOperationPtr& indexOpPtr);
- void releaseIndexOperation(ApiConnectRecord* regApiPtr,
- TcIndexOperation* indexOp);
- void releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr);
- void setupIndexOpReturn(ApiConnectRecord* regApiPtr,
- TcConnectRecord* regTcPtr);
-
- void saveTriggeringOpState(Signal* signal,
- TcConnectRecord* trigOp);
- void restoreTriggeringOpState(Signal* signal,
- TcConnectRecord* trigOp);
- void continueTriggeringOp(Signal* signal,
- TcConnectRecord* trigOp);
-
- void scheduleFiredTrigger(ApiConnectRecordPtr* transPtr,
- TcConnectRecordPtr* opPtr);
- void executeTriggers(Signal* signal, ApiConnectRecordPtr* transPtr);
- void executeTrigger(Signal* signal,
- TcFiredTriggerData* firedTriggerData,
- ApiConnectRecordPtr* transPtr,
- TcConnectRecordPtr* opPtr);
- void executeIndexTrigger(Signal* signal,
- TcDefinedTriggerData* definedTriggerData,
- TcFiredTriggerData* firedTriggerData,
- ApiConnectRecordPtr* transPtr,
- TcConnectRecordPtr* opPtr);
- void insertIntoIndexTable(Signal* signal,
- TcFiredTriggerData* firedTriggerData,
- ApiConnectRecordPtr* transPtr,
- TcConnectRecordPtr* opPtr,
- TcIndexData* indexData,
- bool holdOperation = false);
- void deleteFromIndexTable(Signal* signal,
- TcFiredTriggerData* firedTriggerData,
- ApiConnectRecordPtr* transPtr,
- TcConnectRecordPtr* opPtr,
- TcIndexData* indexData,
- bool holdOperation = false);
- void releaseFiredTriggerData(DLFifoList<TcFiredTriggerData>* triggers);
- // Generated statement blocks
- void warningHandlerLab(Signal* signal);
- void systemErrorLab(Signal* signal);
- void sendSignalErrorRefuseLab(Signal* signal);
- void scanTabRefLab(Signal* signal, Uint32 errCode);
- void diFcountReqLab(Signal* signal, ScanRecordPtr);
- void signalErrorRefuseLab(Signal* signal);
- void abort080Lab(Signal* signal);
- void packKeyData000Lab(Signal* signal, BlockReference TBRef, Uint32 len);
- void abortScanLab(Signal* signal, ScanRecordPtr, Uint32 errCode);
- void sendAbortedAfterTimeout(Signal* signal, int Tcheck);
- void abort010Lab(Signal* signal);
- void abort015Lab(Signal* signal);
- void packLqhkeyreq(Signal* signal, BlockReference TBRef);
- void packLqhkeyreq040Lab(Signal* signal,
- UintR anAttrBufIndex,
- BlockReference TBRef);
- void packLqhkeyreq040Lab(Signal* signal);
- void returnFromQueuedDeliveryLab(Signal* signal);
- void startTakeOverLab(Signal* signal);
- void toCompleteHandlingLab(Signal* signal);
- void toCommitHandlingLab(Signal* signal);
- void toAbortHandlingLab(Signal* signal);
- void abortErrorLab(Signal* signal);
- void nodeTakeOverCompletedLab(Signal* signal);
- void ndbsttorry010Lab(Signal* signal);
- void commit020Lab(Signal* signal);
- void complete010Lab(Signal* signal);
- void releaseAtErrorLab(Signal* signal);
- void seizeDatabuferrorLab(Signal* signal);
- void scanAttrinfoLab(Signal* signal, UintR Tlen);
- void seizeAttrbuferrorLab(Signal* signal);
- void attrinfoDihReceivedLab(Signal* signal);
- void aiErrorLab(Signal* signal);
- void attrinfo020Lab(Signal* signal);
- void scanReleaseResourcesLab(Signal* signal);
- void scanCompletedLab(Signal* signal);
- void scanError(Signal* signal, ScanRecordPtr, Uint32 errorCode);
- void diverify010Lab(Signal* signal);
- void intstartphase2x010Lab(Signal* signal);
- void intstartphase3x010Lab(Signal* signal);
- void sttorryLab(Signal* signal);
- void abortBeginErrorLab(Signal* signal);
- void tabStateErrorLab(Signal* signal);
- void wrongSchemaVersionErrorLab(Signal* signal);
- void noFreeConnectionErrorLab(Signal* signal);
- void tckeyreq050Lab(Signal* signal);
- void timeOutFoundLab(Signal* signal, UintR anAdd);
- void completeTransAtTakeOverLab(Signal* signal, UintR TtakeOverInd);
- void completeTransAtTakeOverDoLast(Signal* signal, UintR TtakeOverInd);
- void completeTransAtTakeOverDoOne(Signal* signal, UintR TtakeOverInd);
- void timeOutLoopStartLab(Signal* signal, Uint32 apiConnectPtr);
- void initialiseRecordsLab(Signal* signal, UintR Tdata0, Uint32, Uint32);
- void tckeyreq020Lab(Signal* signal);
- void intstartphase2x020Lab(Signal* signal);
- void intstartphase1x010Lab(Signal* signal);
- void startphase1x010Lab(Signal* signal);
-
- void lqhKeyConf_checkTransactionState(Signal * signal,
- ApiConnectRecord * const regApiPtr);
-
- void checkDropTab(Signal* signal);
-
- void checkScanActiveInFailedLqh(Signal* signal,
- Uint32 scanPtrI,
- Uint32 failedNodeId);
- void checkScanFragList(Signal*, Uint32 failedNodeId, ScanRecord * scanP,
- LocalDLList<ScanFragRec>::Head&);
-
- // Initialisation
- void initData();
- void initRecords();
-
- // Transit signals
-
-
- ApiConnectRecord *apiConnectRecord;
- ApiConnectRecordPtr apiConnectptr;
- UintR capiConnectFilesize;
-
- TcConnectRecord *tcConnectRecord;
- TcConnectRecordPtr tcConnectptr;
- UintR ctcConnectFilesize;
-
- CacheRecord *cacheRecord;
- CacheRecordPtr cachePtr;
- UintR ccacheFilesize;
-
- AttrbufRecord *attrbufRecord;
- AttrbufRecordPtr attrbufptr;
- UintR cattrbufFilesize;
-
- HostRecord *hostRecord;
- HostRecordPtr hostptr;
- UintR chostFilesize;
-
- GcpRecord *gcpRecord;
- GcpRecordPtr gcpPtr;
- UintR cgcpFilesize;
-
- TableRecord *tableRecord;
- UintR ctabrecFilesize;
-
- UintR thashValue;
- UintR tdistrHashValue;
-
- UintR ttransid_ptr;
- UintR cfailure_nr;
- UintR coperationsize;
- UintR ctcTimer;
-
- ApiConnectRecordPtr tmpApiConnectptr;
- UintR tcheckGcpId;
-
- struct TransCounters {
- enum { Off, Timer, Started } c_trans_status;
- UintR cattrinfoCount;
- UintR ctransCount;
- UintR ccommitCount;
- UintR creadCount;
- UintR csimpleReadCount;
- UintR cwriteCount;
- UintR cabortCount;
- UintR cconcurrentOp;
- Uint32 c_scan_count;
- Uint32 c_range_scan_count;
- void reset () {
- cattrinfoCount = ctransCount = ccommitCount = creadCount =
- csimpleReadCount = cwriteCount = cabortCount =
- c_scan_count = c_range_scan_count = 0;
- }
- Uint32 report(Signal* signal){
- signal->theData[0] = NDB_LE_TransReportCounters;
- signal->theData[1] = ctransCount;
- signal->theData[2] = ccommitCount;
- signal->theData[3] = creadCount;
- signal->theData[4] = csimpleReadCount;
- signal->theData[5] = cwriteCount;
- signal->theData[6] = cattrinfoCount;
- signal->theData[7] = cconcurrentOp;
- signal->theData[8] = cabortCount;
- signal->theData[9] = c_scan_count;
- signal->theData[10] = c_range_scan_count;
- return 11;
- }
- } c_counters;
-
- Uint16 cownNodeid;
- Uint16 terrorCode;
-
- UintR cfirstfreeAttrbuf;
- UintR cfirstfreeTcConnect;
- UintR cfirstfreeApiConnectCopy;
- UintR cfirstfreeCacheRec;
-
- UintR cfirstgcp;
- UintR clastgcp;
- UintR cfirstfreeGcp;
- UintR cfirstfreeScanrec;
-
- TableRecordPtr tabptr;
- UintR cfirstfreeApiConnectFail;
- UintR cfirstfreeApiConnect;
-
- UintR cfirstfreeDatabuf;
- BlockReference cdihblockref;
- BlockReference cownref; /* OWN BLOCK REFERENCE */
-
- ApiConnectRecordPtr timeOutptr;
-
- ScanRecord *scanRecord;
- UintR cscanrecFileSize;
-
- UnsafeArrayPool<ScanFragRec> c_scan_frag_pool;
- ScanFragRecPtr scanFragptr;
-
- UintR cscanFragrecFileSize;
- UintR cdatabufFilesize;
-
- BlockReference cdictblockref;
- BlockReference cerrorBlockref;
- BlockReference clqhblockref;
- BlockReference cndbcntrblockref;
-
- Uint16 csignalKey;
- Uint16 csystemnodes;
- Uint16 cnodes[4];
- NodeId cmasterNodeId;
- UintR cnoParallelTakeOver;
- TimeOutCheckState ctimeOutCheckFragActive;
-
- UintR ctimeOutCheckFragCounter;
- UintR ctimeOutCheckCounter;
- UintR ctimeOutValue;
- UintR ctimeOutCheckDelay;
- Uint32 ctimeOutCheckHeartbeat;
- Uint32 ctimeOutCheckLastHeartbeat;
- Uint32 ctimeOutMissedHeartbeats;
- Uint32 c_appl_timeout_value;
-
- SystemStartState csystemStart;
- TimeOutCheckState ctimeOutCheckActive;
-
- BlockReference capiFailRef;
- UintR cpackedListIndex;
- Uint16 cpackedList[MAX_NODES];
- UintR capiConnectClosing[MAX_NODES];
- UintR con_lineNodes;
-
- DatabufRecord *databufRecord;
- DatabufRecordPtr databufptr;
- DatabufRecordPtr tmpDatabufptr;
-
- UintR treqinfo;
- UintR ttransid1;
- UintR ttransid2;
-
- UintR tabortInd;
-
- NodeId tnodeid;
- BlockReference tblockref;
-
- LqhTransConf::OperationStatus ttransStatus;
- UintR ttcOprec;
- NodeId tfailedNodeId;
- Uint8 tcurrentReplicaNo;
- Uint8 tpad1;
-
- UintR tgci;
- UintR tapplRef;
- UintR tapplOprec;
-
- UintR tindex;
- UintR tmaxData;
- UintR tmp;
-
- UintR tnodes;
- BlockReference tusersblkref;
- UintR tuserpointer;
- UintR tloadCode;
-
- UintR tconfig1;
- UintR tconfig2;
-
- UintR cdata[32];
- UintR ctransidFailHash[512];
- UintR ctcConnectFailHash[1024];
-
- /**
- * Commit Ack handling
- */
-public:
- struct CommitAckMarker {
- Uint32 transid1;
- Uint32 transid2;
- union { Uint32 nextPool; Uint32 nextHash; };
- Uint32 prevHash;
- Uint32 apiConnectPtr;
- Uint16 apiNodeId;
- Uint16 noOfLqhs;
- Uint16 lqhNodeId[MAX_REPLICAS];
-
- inline bool equal(const CommitAckMarker & p) const {
- return ((p.transid1 == transid1) && (p.transid2 == transid2));
- }
-
- inline Uint32 hashValue() const {
- return transid1;
- }
- };
-private:
- typedef Ptr<CommitAckMarker> CommitAckMarkerPtr;
- typedef DLHashTable<CommitAckMarker>::Iterator CommitAckMarkerIterator;
-
- ArrayPool<CommitAckMarker> m_commitAckMarkerPool;
- DLHashTable<CommitAckMarker> m_commitAckMarkerHash;
-
- void execTC_COMMIT_ACK(Signal* signal);
- void sendRemoveMarkers(Signal*, const CommitAckMarker *);
- void sendRemoveMarker(Signal* signal,
- NodeId nodeId,
- Uint32 transid1,
- Uint32 transid2);
- void removeMarkerForFailedAPI(Signal* signal, Uint32 nodeId, Uint32 bucket);
-
- bool getAllowStartTransaction() const {
- if(getNodeState().getSingleUserMode())
- return true;
- return getNodeState().startLevel < NodeState::SL_STOPPING_2;
- }
-
- void checkAbortAllTimeout(Signal* signal, Uint32 sleepTime);
- struct AbortAllRecord {
- AbortAllRecord(){ clientRef = 0; }
- Uint32 clientData;
- BlockReference clientRef;
-
- Uint32 oldTimeOutValue;
- };
- AbortAllRecord c_abortRec;
-
- /************************** API CONNECT RECORD ***********************/
- /* *******************************************************************/
- /* THE API CONNECT RECORD CONTAINS THE CONNECTION RECORD TO WHICH THE*/
- /* APPLICATION CONNECTS. THE APPLICATION CAN SEND ONE OPERATION AT A */
- /* TIME. IT CAN SEND A NEW OPERATION IMMEDIATELY AFTER SENDING THE */
- /* PREVIOUS OPERATION. THEREBY SEVERAL OPERATIONS CAN BE ACTIVE IN */
- /* ONE TRANSACTION WITHIN TC. THIS IS ACHIEVED BY USING THE API */
- /* CONNECT RECORD. EACH ACTIVE OPERATION IS HANDLED BY THE TC */
- /* CONNECT RECORD. AS SOON AS THE TC CONNECT RECORD HAS SENT THE */
- /* REQUEST TO THE LQH IT IS READY TO RECEIVE NEW OPERATIONS. THE */
- /* LQH CONNECT RECORD TAKES CARE OF WAITING FOR AN OPERATION TO */
- /* COMPLETE. WHEN AN OPERATION HAS COMPLETED ON THE LQH CONNECT */
- /* RECORD A NEW OPERATION CAN BE STARTED ON THIS LQH CONNECT RECORD. */
- /*******************************************************************>*/
- /* */
- /* API CONNECT RECORD ALIGNED TO BE 256 BYTES */
- /*******************************************************************>*/
- /************************** TC CONNECT RECORD ************************/
- /* *******************************************************************/
- /* TC CONNECT RECORD KEEPS ALL INFORMATION TO CARRY OUT A TRANSACTION*/
- /* THE TRANSACTION CONTROLLER ESTABLISHES CONNECTIONS TO DIFFERENT */
- /* BLOCKS TO CARRY OUT THE TRANSACTION. THERE CAN BE SEVERAL RECORDS */
- /* PER ACTIVE TRANSACTION. THE TC CONNECT RECORD COOPERATES WITH THE */
- /* API CONNECT RECORD FOR COMMUNICATION WITH THE API AND WITH THE */
- /* LQH CONNECT RECORD FOR COMMUNICATION WITH THE LQH'S INVOLVED IN */
- /* THE TRANSACTION. TC CONNECT RECORD IS PERMANENTLY CONNECTED TO A */
- /* RECORD IN DICT AND ONE IN DIH. IT CONTAINS A LIST OF ACTIVE LQH */
- /* CONNECT RECORDS AND A LIST OF STARTED BUT NOT ACTIVE LQH CONNECT */
- /* RECORDS. IT DOES ALSO CONTAIN A LIST OF ALL OPERATIONS THAT ARE */
- /* EXECUTED WITH THE TC CONNECT RECORD. */
- /*******************************************************************>*/
- /* TC_CONNECT RECORD ALIGNED TO BE 128 BYTES */
- /*******************************************************************>*/
- UintR cfirstfreeTcConnectFail;
-
- /* POINTER FOR THE LQH RECORD*/
- /* ************************ HOST RECORD ********************************* */
- /********************************************************/
- /* THIS RECORD CONTAINS ALIVE-STATUS ON ALL NODES IN THE*/
- /* SYSTEM */
- /********************************************************/
- /* THIS RECORD IS ALIGNED TO BE 8 BYTES. */
- /********************************************************/
- /* ************************ TABLE RECORD ******************************** */
- /********************************************************/
- /* THIS RECORD CONTAINS THE CURRENT SCHEMA VERSION OF */
- /* ALL TABLES IN THE SYSTEM. */
- /********************************************************/
- /*-------------------------------------------------------------------------*/
- /* THE TC CONNECTION USED BY THIS SCAN. */
- /*-------------------------------------------------------------------------*/
- /*-------------------------------------------------------------------------*/
- /* LENGTH READ FOR A PARTICULAR SCANNED OPERATION. */
- /*-------------------------------------------------------------------------*/
- /*-------------------------------------------------------------------------*/
- /* REFERENCE TO THE SCAN RECORD FOR THIS SCAN PROCESS. */
- /*-------------------------------------------------------------------------*/
- /* *********************************************************************** */
- /* ******$ DATA BUFFER ******$ */
- /* */
- /* THIS BUFFER IS USED AS A GENERAL DATA STORAGE. */
- /* *********************************************************************** */
- /* *********************************************************************** */
- /* ******$ ATTRIBUTE INFORMATION RECORD ******$ */
- /*
- CAN CONTAIN ONE (1) ATTRINFO SIGNAL. ONE SIGNAL CONTAINS 24 ATTR.
- INFO WORDS. BUT 32 ELEMENTS ARE USED TO MAKE PLEX HAPPY.
- SOME OF THE ELEMENTS ARE USED TO THE FOLLOWING THINGS:
- DATA LENGHT IN THIS RECORD IS STORED IN THE ELEMENT INDEXED BY
- ZINBUF_DATA_LEN.
- NEXT FREE ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY
- PREVIOUS ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_PREV
- (NOT USED YET).
- NEXT ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_NEXT.
- */
- /* ********************************************************************** */
- /**************************************************************************/
- /* GLOBAL CHECKPOINT INFORMATION RECORD */
- /* */
- /* THIS RECORD IS USED TO STORE THE GCP NUMBER AND A COUNTER */
- /* DURING THE COMPLETION PHASE OF THE TRANSACTION */
- /**************************************************************************/
- /* */
- /* GCP RECORD ALIGNED TO BE 32 BYTES */
- /**************************************************************************/
- /**************************************************************************/
- /* TC_FAIL_RECORD */
- /* THIS RECORD IS USED WHEN HANDLING TAKE OVER OF ANOTHER FAILED TC NODE.*/
- /**************************************************************************/
- TcFailRecord *tcFailRecord;
- TcFailRecordPtr tcNodeFailptr;
- /**************************************************************************/
- // Temporary variables that are not allowed to use for storage between
- // signals. They
- // can only be used in a signal to transfer values between subroutines.
- // In the long run
- // those variables should be removed and exchanged for stack
- // variable communication.
- /**************************************************************************/
-};
-#endif
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
deleted file mode 100644
index e4cce29ba30..00000000000
--- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ /dev/null
@@ -1,13096 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#define DBTC_C
-
-#include "Dbtc.hpp"
-#include "md5_hash.hpp"
-#include <RefConvert.hpp>
-#include <ndb_limits.h>
-#include <my_sys.h>
-
-#include <signaldata/EventReport.hpp>
-#include <signaldata/TcKeyReq.hpp>
-#include <signaldata/TcKeyConf.hpp>
-#include <signaldata/TcKeyRef.hpp>
-#include <signaldata/KeyInfo.hpp>
-#include <signaldata/AttrInfo.hpp>
-#include <signaldata/TransIdAI.hpp>
-#include <signaldata/TcRollbackRep.hpp>
-#include <signaldata/NodeFailRep.hpp>
-#include <signaldata/ReadNodesConf.hpp>
-#include <signaldata/NFCompleteRep.hpp>
-#include <signaldata/LqhKey.hpp>
-#include <signaldata/TcCommit.hpp>
-#include <signaldata/TcContinueB.hpp>
-#include <signaldata/TcKeyFailConf.hpp>
-#include <signaldata/AbortAll.hpp>
-#include <signaldata/ScanFrag.hpp>
-#include <signaldata/ScanTab.hpp>
-#include <signaldata/PrepDropTab.hpp>
-#include <signaldata/DropTab.hpp>
-#include <signaldata/AlterTab.hpp>
-#include <signaldata/CreateTrig.hpp>
-#include <signaldata/DropTrig.hpp>
-#include <signaldata/FireTrigOrd.hpp>
-#include <signaldata/TrigAttrInfo.hpp>
-#include <signaldata/CreateIndx.hpp>
-#include <signaldata/DropIndx.hpp>
-#include <signaldata/AlterIndx.hpp>
-#include <signaldata/ScanTab.hpp>
-#include <signaldata/SystemError.hpp>
-#include <signaldata/DumpStateOrd.hpp>
-#include <signaldata/DisconnectRep.hpp>
-#include <signaldata/TcHbRep.hpp>
-
-#include <signaldata/PrepDropTab.hpp>
-#include <signaldata/DropTab.hpp>
-#include <signaldata/TcIndx.hpp>
-#include <signaldata/IndxKeyInfo.hpp>
-#include <signaldata/IndxAttrInfo.hpp>
-#include <signaldata/PackedSignal.hpp>
-#include <AttributeHeader.hpp>
-#include <signaldata/DictTabInfo.hpp>
-#include <AttributeDescriptor.hpp>
-#include <SectionReader.hpp>
-
-#include <NdbOut.hpp>
-#include <DebuggerNames.hpp>
-
-// Use DEBUG to print messages that should be
-// seen only when we debug the product
-#ifdef VM_TRACE
-#define DEBUG(x) ndbout << "DBTC: "<< x << endl;
-#else
-#define DEBUG(x)
-#endif
-
-#define INTERNAL_TRIGGER_TCKEYREQ_JBA 0
-
-#ifdef VM_TRACE
-NdbOut &
-operator<<(NdbOut& out, Dbtc::ConnectionState state){
- switch(state){
- case Dbtc::CS_CONNECTED: out << "CS_CONNECTED"; break;
- case Dbtc::CS_DISCONNECTED: out << "CS_DISCONNECTED"; break;
- case Dbtc::CS_STARTED: out << "CS_STARTED"; break;
- case Dbtc::CS_RECEIVING: out << "CS_RECEIVING"; break;
- case Dbtc::CS_PREPARED: out << "CS_PREPARED"; break;
- case Dbtc::CS_START_PREPARING: out << "CS_START_PREPARING"; break;
- case Dbtc::CS_REC_PREPARING: out << "CS_REC_PREPARING"; break;
- case Dbtc::CS_RESTART: out << "CS_RESTART"; break;
- case Dbtc::CS_ABORTING: out << "CS_ABORTING"; break;
- case Dbtc::CS_COMPLETING: out << "CS_COMPLETING"; break;
- case Dbtc::CS_COMPLETE_SENT: out << "CS_COMPLETE_SENT"; break;
- case Dbtc::CS_PREPARE_TO_COMMIT: out << "CS_PREPARE_TO_COMMIT"; break;
- case Dbtc::CS_COMMIT_SENT: out << "CS_COMMIT_SENT"; break;
- case Dbtc::CS_START_COMMITTING: out << "CS_START_COMMITTING"; break;
- case Dbtc::CS_COMMITTING: out << "CS_COMMITTING"; break;
- case Dbtc::CS_REC_COMMITTING: out << "CS_REC_COMMITTING"; break;
- case Dbtc::CS_WAIT_ABORT_CONF: out << "CS_WAIT_ABORT_CONF"; break;
- case Dbtc::CS_WAIT_COMPLETE_CONF: out << "CS_WAIT_COMPLETE_CONF"; break;
- case Dbtc::CS_WAIT_COMMIT_CONF: out << "CS_WAIT_COMMIT_CONF"; break;
- case Dbtc::CS_FAIL_ABORTING: out << "CS_FAIL_ABORTING"; break;
- case Dbtc::CS_FAIL_ABORTED: out << "CS_FAIL_ABORTED"; break;
- case Dbtc::CS_FAIL_PREPARED: out << "CS_FAIL_PREPARED"; break;
- case Dbtc::CS_FAIL_COMMITTING: out << "CS_FAIL_COMMITTING"; break;
- case Dbtc::CS_FAIL_COMMITTED: out << "CS_FAIL_COMMITTED"; break;
- case Dbtc::CS_FAIL_COMPLETED: out << "CS_FAIL_COMPLETED"; break;
- case Dbtc::CS_START_SCAN: out << "CS_START_SCAN"; break;
- default:
- out << "Unknown: " << (int)state; break;
- }
- return out;
-}
-NdbOut &
-operator<<(NdbOut& out, Dbtc::OperationState state){
- out << (int)state;
- return out;
-}
-NdbOut &
-operator<<(NdbOut& out, Dbtc::AbortState state){
- out << (int)state;
- return out;
-}
-NdbOut &
-operator<<(NdbOut& out, Dbtc::ReturnSignal state){
- out << (int)state;
- return out;
-}
-NdbOut &
-operator<<(NdbOut& out, Dbtc::ScanRecord::ScanState state){
- out << (int)state;
- return out;
-}
-NdbOut &
-operator<<(NdbOut& out, Dbtc::ScanFragRec::ScanFragState state){
- out << (int)state;
- return out;
-}
-#endif
-
-void
-Dbtc::updateBuddyTimer(ApiConnectRecordPtr apiPtr)
-{
- if (apiPtr.p->buddyPtr != RNIL) {
- jam();
- ApiConnectRecordPtr buddyApiPtr;
- buddyApiPtr.i = apiPtr.p->buddyPtr;
- ptrCheckGuard(buddyApiPtr, capiConnectFilesize, apiConnectRecord);
- if (getApiConTimer(buddyApiPtr.i) != 0) {
- if ((apiPtr.p->transid[0] == buddyApiPtr.p->transid[0]) &&
- (apiPtr.p->transid[1] == buddyApiPtr.p->transid[1])) {
- jam();
- setApiConTimer(buddyApiPtr.i, ctcTimer, __LINE__);
- } else {
- jam();
- // Not a buddy anymore since not the same transid
- apiPtr.p->buddyPtr = RNIL;
- }//if
- }//if
- }//if
-}
-
-void Dbtc::execCONTINUEB(Signal* signal)
-{
- UintR tcase;
-
- jamEntry();
- tcase = signal->theData[0];
- UintR Tdata0 = signal->theData[1];
- UintR Tdata1 = signal->theData[2];
- UintR Tdata2 = signal->theData[3];
- switch (tcase) {
- case TcContinueB::ZRETURN_FROM_QUEUED_DELIVERY:
- jam();
- ndbrequire(false);
- return;
- case TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER:
- jam();
- tcNodeFailptr.i = Tdata0;
- ptrCheckGuard(tcNodeFailptr, 1, tcFailRecord);
- completeTransAtTakeOverLab(signal, Tdata1);
- return;
- case TcContinueB::ZCONTINUE_TIME_OUT_CONTROL:
- jam();
- timeOutLoopStartLab(signal, Tdata0);
- return;
- case TcContinueB::ZNODE_TAKE_OVER_COMPLETED:
- jam();
- tnodeid = Tdata0;
- tcNodeFailptr.i = 0;
- ptrAss(tcNodeFailptr, tcFailRecord);
- nodeTakeOverCompletedLab(signal);
- return;
- case TcContinueB::ZINITIALISE_RECORDS:
- jam();
- initialiseRecordsLab(signal, Tdata0, Tdata2, signal->theData[4]);
- return;
- case TcContinueB::ZSEND_COMMIT_LOOP:
- jam();
- apiConnectptr.i = Tdata0;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- tcConnectptr.i = Tdata1;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- commit020Lab(signal);
- return;
- case TcContinueB::ZSEND_COMPLETE_LOOP:
- jam();
- apiConnectptr.i = Tdata0;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- tcConnectptr.i = Tdata1;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- complete010Lab(signal);
- return;
- case TcContinueB::ZHANDLE_FAILED_API_NODE:
- jam();
- handleFailedApiNode(signal, Tdata0, Tdata1);
- return;
- case TcContinueB::ZTRANS_EVENT_REP:
- jam();
- /* -------------------------------------------------------------------- */
- // Report information about transaction activity once per second.
- /* -------------------------------------------------------------------- */
- if (c_counters.c_trans_status == TransCounters::Timer){
- Uint32 len = c_counters.report(signal);
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, len, JBB);
-
- c_counters.reset();
- signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP;
- sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 1);
- }
- return;
- case TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL:
- jam();
- timeOutLoopStartFragLab(signal, Tdata0);
- return;
- case TcContinueB::ZABORT_BREAK:
- jam();
- tcConnectptr.i = Tdata0;
- apiConnectptr.i = Tdata1;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- apiConnectptr.p->counter--;
- abort015Lab(signal);
- return;
- case TcContinueB::ZABORT_TIMEOUT_BREAK:
- jam();
- tcConnectptr.i = Tdata0;
- apiConnectptr.i = Tdata1;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- apiConnectptr.p->counter--;
- sendAbortedAfterTimeout(signal, 1);
- return;
- case TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS:
- jam();
- removeMarkerForFailedAPI(signal, Tdata0, Tdata1);
- return;
- case TcContinueB::ZWAIT_ABORT_ALL:
- jam();
- checkAbortAllTimeout(signal, Tdata0);
- return;
- case TcContinueB::ZCHECK_SCAN_ACTIVE_FAILED_LQH:
- jam();
- checkScanActiveInFailedLqh(signal, Tdata0, Tdata1);
- return;
- case TcContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH:
- jam();
- checkWaitDropTabFailedLqh(signal, Tdata0, Tdata1);
- return;
- case TcContinueB::TRIGGER_PENDING:
- jam();
- ApiConnectRecordPtr transPtr;
- transPtr.i = Tdata0;
- ptrCheckGuard(transPtr, capiConnectFilesize, apiConnectRecord);
- transPtr.p->triggerPending = false;
- executeTriggers(signal, &transPtr);
- return;
- case TcContinueB::DelayTCKEYCONF:
- jam();
- apiConnectptr.i = Tdata0;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- sendtckeyconf(signal, Tdata1);
- return;
- default:
- ndbrequire(false);
- }//switch
-}
-
-void Dbtc::execDIGETNODESREF(Signal* signal)
-{
- jamEntry();
- terrorCode = signal->theData[1];
- releaseAtErrorLab(signal);
-}
-
-void Dbtc::execINCL_NODEREQ(Signal* signal)
-{
- jamEntry();
- tblockref = signal->theData[0];
- hostptr.i = signal->theData[1];
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
- hostptr.p->hostStatus = HS_ALIVE;
- hostptr.p->takeOverStatus = TOS_IDLE;
- signal->theData[0] = cownref;
- sendSignal(tblockref, GSN_INCL_NODECONF, signal, 1, JBB);
-}
-
-void Dbtc::execREAD_NODESREF(Signal* signal)
-{
- jamEntry();
- ndbrequire(false);
-}
-
-void Dbtc::execTC_SCHVERREQ(Signal* signal)
-{
- jamEntry();
- if (! assembleFragments(signal)) {
- jam();
- return;
- }
- tabptr.i = signal->theData[0];
- ptrCheckGuard(tabptr, ctabrecFilesize, tableRecord);
- tabptr.p->currentSchemaVersion = signal->theData[1];
- tabptr.p->storedTable = (bool)signal->theData[2];
- BlockReference retRef = signal->theData[3];
- tabptr.p->tableType = (Uint8)signal->theData[4];
- BlockReference retPtr = signal->theData[5];
- Uint32 noOfKeyAttr = signal->theData[6];
- ndbrequire(noOfKeyAttr <= MAX_ATTRIBUTES_IN_INDEX);
- Uint32 hasCharAttr = 0;
- Uint32 noOfDistrKeys = 0;
- SegmentedSectionPtr s0Ptr;
- signal->getSection(s0Ptr, 0);
- SectionReader r0(s0Ptr, getSectionSegmentPool());
- Uint32 i = 0;
- while (i < noOfKeyAttr) {
- jam();
- Uint32 attributeDescriptor = ~0;
- Uint32 csNumber = ~0;
- if (! r0.getWord(&attributeDescriptor) ||
- ! r0.getWord(&csNumber)) {
- jam();
- break;
- }
- CHARSET_INFO* cs = 0;
- if (csNumber != 0) {
- cs = all_charsets[csNumber];
- ndbrequire(cs != 0);
- hasCharAttr = 1;
- }
-
- noOfDistrKeys += AttributeDescriptor::getDKey(attributeDescriptor);
- tabptr.p->keyAttr[i].attributeDescriptor = attributeDescriptor;
- tabptr.p->keyAttr[i].charsetInfo = cs;
- i++;
- }
- ndbrequire(i == noOfKeyAttr);
- releaseSections(signal);
-
- ndbrequire(tabptr.p->enabled == false);
- tabptr.p->enabled = true;
- tabptr.p->dropping = false;
- tabptr.p->noOfKeyAttr = noOfKeyAttr;
- tabptr.p->hasCharAttr = hasCharAttr;
- tabptr.p->noOfDistrKeys = noOfDistrKeys;
-
- signal->theData[0] = tabptr.i;
- signal->theData[1] = retPtr;
- sendSignal(retRef, GSN_TC_SCHVERCONF, signal, 2, JBB);
-}//Dbtc::execTC_SCHVERREQ()
-
-void
-Dbtc::execPREP_DROP_TAB_REQ(Signal* signal)
-{
- jamEntry();
-
- PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
-
- TableRecordPtr tabPtr;
- tabPtr.i = req->tableId;
- ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
-
- Uint32 senderRef = req->senderRef;
- Uint32 senderData = req->senderData;
-
- if(!tabPtr.p->enabled){
- jam();
- PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = senderData;
- ref->tableId = tabPtr.i;
- ref->errorCode = PrepDropTabRef::NoSuchTable;
- sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal,
- PrepDropTabRef::SignalLength, JBB);
- return;
- }
-
- if(tabPtr.p->dropping){
- jam();
- PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = senderData;
- ref->tableId = tabPtr.i;
- ref->errorCode = PrepDropTabRef::DropInProgress;
- sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal,
- PrepDropTabRef::SignalLength, JBB);
- return;
- }
-
- tabPtr.p->dropping = true;
- tabPtr.p->dropTable.senderRef = senderRef;
- tabPtr.p->dropTable.senderData = senderData;
-
- {
- WaitDropTabReq * req = (WaitDropTabReq*)signal->getDataPtrSend();
- req->tableId = tabPtr.i;
- req->senderRef = reference();
-
- HostRecordPtr hostPtr;
- tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor();
- for (hostPtr.i = 1; hostPtr.i < MAX_NDB_NODES; hostPtr.i++) {
- jam();
- ptrAss(hostPtr, hostRecord);
- if (hostPtr.p->hostStatus == HS_ALIVE) {
- jam();
- tabPtr.p->dropTable.waitDropTabCount.setWaitingFor(hostPtr.i);
- sendSignal(calcLqhBlockRef(hostPtr.i), GSN_WAIT_DROP_TAB_REQ,
- signal, WaitDropTabReq::SignalLength, JBB);
- }//for
- }//if
-
- ndbrequire(tabPtr.p->dropTable.waitDropTabCount.done() != true);
- }
-}
-
-void
-Dbtc::execWAIT_DROP_TAB_CONF(Signal* signal)
-{
- jamEntry();
- WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr();
-
- TableRecordPtr tabPtr;
- tabPtr.i = conf->tableId;
- ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
-
- ndbrequire(tabPtr.p->dropping == true);
- Uint32 nodeId = refToNode(conf->senderRef);
- tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId);
-
- if(!tabPtr.p->dropTable.waitDropTabCount.done()){
- jam();
- return;
- }
-
- {
- PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend();
- conf->tableId = tabPtr.i;
- conf->senderRef = reference();
- conf->senderData = tabPtr.p->dropTable.senderData;
- sendSignal(tabPtr.p->dropTable.senderRef, GSN_PREP_DROP_TAB_CONF, signal,
- PrepDropTabConf::SignalLength, JBB);
- tabPtr.p->dropTable.senderRef = 0;
- }
-}
-
-void
-Dbtc::execWAIT_DROP_TAB_REF(Signal* signal)
-{
- jamEntry();
- WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtr();
-
- TableRecordPtr tabPtr;
- tabPtr.i = ref->tableId;
- ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
-
- ndbrequire(tabPtr.p->dropping == true);
- Uint32 nodeId = refToNode(ref->senderRef);
- tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId);
-
- ndbrequire(ref->errorCode == WaitDropTabRef::NoSuchTable ||
- ref->errorCode == WaitDropTabRef::NF_FakeErrorREF);
-
- if(!tabPtr.p->dropTable.waitDropTabCount.done()){
- jam();
- return;
- }
-
- {
- PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend();
- conf->tableId = tabPtr.i;
- conf->senderRef = reference();
- conf->senderData = tabPtr.p->dropTable.senderData;
- sendSignal(tabPtr.p->dropTable.senderRef, GSN_PREP_DROP_TAB_CONF, signal,
- PrepDropTabConf::SignalLength, JBB);
- tabPtr.p->dropTable.senderRef = 0;
- }
-}
-
-void
-Dbtc::checkWaitDropTabFailedLqh(Signal* signal, Uint32 nodeId, Uint32 tableId)
-{
-
- TableRecordPtr tabPtr;
- tabPtr.i = tableId;
-
- WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr();
- conf->tableId = tableId;
-
- const Uint32 RT_BREAK = 16;
- for(Uint32 i = 0; i<RT_BREAK && tabPtr.i < ctabrecFilesize; i++, tabPtr.i++){
- jam();
- ptrAss(tabPtr, tableRecord);
- if(tabPtr.p->enabled && tabPtr.p->dropping){
- if(tabPtr.p->dropTable.waitDropTabCount.isWaitingFor(nodeId)){
- jam();
- conf->senderRef = calcLqhBlockRef(nodeId);
- execWAIT_DROP_TAB_CONF(signal);
- tabPtr.i++;
- break;
- }
- }
- }
-
- if(tabPtr.i == ctabrecFilesize){
- /**
- * Finished
- */
- jam();
- return;
- }
-
- signal->theData[0] = TcContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH;
- signal->theData[1] = nodeId;
- signal->theData[2] = tabPtr.i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
-}
-
-void
-Dbtc::execDROP_TAB_REQ(Signal* signal)
-{
- jamEntry();
-
- DropTabReq* req = (DropTabReq*)signal->getDataPtr();
-
- TableRecordPtr tabPtr;
- tabPtr.i = req->tableId;
- ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
-
- Uint32 senderRef = req->senderRef;
- Uint32 senderData = req->senderData;
- DropTabReq::RequestType rt = (DropTabReq::RequestType)req->requestType;
-
- if(!tabPtr.p->enabled && rt == DropTabReq::OnlineDropTab){
- jam();
- DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = senderData;
- ref->tableId = tabPtr.i;
- ref->errorCode = DropTabRef::NoSuchTable;
- sendSignal(senderRef, GSN_DROP_TAB_REF, signal,
- DropTabRef::SignalLength, JBB);
- return;
- }
-
- if(!tabPtr.p->dropping && rt == DropTabReq::OnlineDropTab){
- jam();
- DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = senderData;
- ref->tableId = tabPtr.i;
- ref->errorCode = DropTabRef::DropWoPrep;
- sendSignal(senderRef, GSN_DROP_TAB_REF, signal,
- DropTabRef::SignalLength, JBB);
- return;
- }
-
- tabPtr.p->enabled = false;
- tabPtr.p->dropping = false;
-
- DropTabConf * conf = (DropTabConf*)signal->getDataPtrSend();
- conf->tableId = tabPtr.i;
- conf->senderRef = reference();
- conf->senderData = senderData;
- sendSignal(senderRef, GSN_DROP_TAB_CONF, signal,
- PrepDropTabConf::SignalLength, JBB);
-}
-
-void Dbtc::execALTER_TAB_REQ(Signal * signal)
-{
- AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr();
- const Uint32 senderRef = req->senderRef;
- const Uint32 senderData = req->senderData;
- const Uint32 changeMask = req->changeMask;
- const Uint32 tableId = req->tableId;
- const Uint32 tableVersion = req->tableVersion;
- const Uint32 gci = req->gci;
- AlterTabReq::RequestType requestType =
- (AlterTabReq::RequestType) req->requestType;
-
- TableRecordPtr tabPtr;
- tabPtr.i = req->tableId;
- ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
- tabPtr.p->currentSchemaVersion = tableVersion;
-
- // Request handled successfully
- AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = senderData;
- conf->changeMask = changeMask;
- conf->tableId = tableId;
- conf->tableVersion = tableVersion;
- conf->gci = gci;
- conf->requestType = requestType;
- sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal,
- AlterTabConf::SignalLength, JBB);
-}
-
-/* ***************************************************************************/
-/* START / RESTART */
-/* ***************************************************************************/
-void Dbtc::execREAD_CONFIG_REQ(Signal* signal)
-{
- const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
- Uint32 ref = req->senderRef;
- Uint32 senderData = req->senderData;
- ndbrequire(req->noOfParameters == 0);
-
- jamEntry();
-
- const ndb_mgm_configuration_iterator * p =
- theConfiguration.getOwnConfigIterator();
- ndbrequire(p != 0);
-
- UintR apiConnect;
- UintR tcConnect;
- UintR tables;
- UintR localScan;
- UintR tcScan;
-
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_API_CONNECT, &apiConnect));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_TC_CONNECT, &tcConnect));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_TABLE, &tables));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_LOCAL_SCAN, &localScan));
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_SCAN, &tcScan));
-
- ccacheFilesize = (apiConnect/3) + 1;
- capiConnectFilesize = apiConnect;
- ctcConnectFilesize = tcConnect;
- ctabrecFilesize = tables;
- cscanrecFileSize = tcScan;
- cscanFragrecFileSize = localScan;
-
- initRecords();
- initialiseRecordsLab(signal, 0, ref, senderData);
-
- Uint32 val = 3000;
- ndb_mgm_get_int_parameter(p, CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT, &val);
- set_timeout_value(val);
-
- val = 3000;
- ndb_mgm_get_int_parameter(p, CFG_DB_TRANSACTION_INACTIVE_TIMEOUT, &val);
- set_appl_timeout_value(val);
-
- val = 1;
- //ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_TRANSACTION_TAKEOVER, &val);
- set_no_parallel_takeover(val);
-
- ctimeOutCheckDelay = 50; // 500ms
-}//Dbtc::execSIZEALT_REP()
-
-void Dbtc::execSTTOR(Signal* signal)
-{
- Uint16 tphase;
-
- jamEntry();
- /* START CASE */
- tphase = signal->theData[1];
- csignalKey = signal->theData[6];
- switch (tphase) {
- case ZSPH1:
- jam();
- startphase1x010Lab(signal);
- return;
- default:
- jam();
- sttorryLab(signal); /* START PHASE 255 */
- return;
- }//switch
-}//Dbtc::execSTTOR()
-
-void Dbtc::sttorryLab(Signal* signal)
-{
- signal->theData[0] = csignalKey;
- signal->theData[1] = 3; /* BLOCK CATEGORY */
- signal->theData[2] = 2; /* SIGNAL VERSION NUMBER */
- signal->theData[3] = ZSPH1;
- signal->theData[4] = 255;
- sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
-}//Dbtc::sttorryLab()
-
-/* ***************************************************************************/
-/* INTERNAL START / RESTART */
-/*****************************************************************************/
-void Dbtc::execNDB_STTOR(Signal* signal)
-{
- Uint16 tndbstartphase;
- Uint16 tstarttype;
-
- jamEntry();
- tusersblkref = signal->theData[0];
- tnodeid = signal->theData[1];
- tndbstartphase = signal->theData[2]; /* START PHASE */
- tstarttype = signal->theData[3]; /* START TYPE */
- switch (tndbstartphase) {
- case ZINTSPH1:
- jam();
- intstartphase1x010Lab(signal);
- return;
- case ZINTSPH2:
- jam();
- intstartphase2x010Lab(signal);
- return;
- case ZINTSPH3:
- jam();
- intstartphase3x010Lab(signal); /* SEIZE CONNECT RECORD IN EACH LQH*/
-// Start transaction event reporting.
- c_counters.c_trans_status = TransCounters::Timer;
- c_counters.reset();
- signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP;
- sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 1);
- return;
- case ZINTSPH6:
- jam();
- csystemStart = SSS_TRUE;
- break;
- default:
- jam();
- break;
- }//switch
- ndbsttorry010Lab(signal);
- return;
-}//Dbtc::execNDB_STTOR()
-
-void Dbtc::ndbsttorry010Lab(Signal* signal)
-{
- signal->theData[0] = cownref;
- sendSignal(cndbcntrblockref, GSN_NDB_STTORRY, signal, 1, JBB);
-}//Dbtc::ndbsttorry010Lab()
-
-void
-Dbtc::set_timeout_value(Uint32 timeOut)
-{
- timeOut = timeOut / 10;
- if (timeOut < 2) {
- jam();
- timeOut = 100;
- }//if
- ctimeOutValue = timeOut;
-}
-
-void
-Dbtc::set_appl_timeout_value(Uint32 timeOut)
-{
- if (timeOut)
- {
- timeOut /= 10;
- if (timeOut < ctimeOutValue) {
- jam();
- c_appl_timeout_value = ctimeOutValue;
- }//if
- }
- c_appl_timeout_value = timeOut;
-}
-
-void
-Dbtc::set_no_parallel_takeover(Uint32 noParallelTakeOver)
-{
- if (noParallelTakeOver == 0) {
- jam();
- noParallelTakeOver = 1;
- } else if (noParallelTakeOver > MAX_NDB_NODES) {
- jam();
- noParallelTakeOver = MAX_NDB_NODES;
- }//if
- cnoParallelTakeOver = noParallelTakeOver;
-}
-
-/* ***************************************************************************/
-/* S T A R T P H A S E 1 X */
-/* INITIALISE BLOCKREF AND BLOCKNUMBERS */
-/* ***************************************************************************/
-void Dbtc::startphase1x010Lab(Signal* signal)
-{
- csystemStart = SSS_FALSE;
- ctimeOutCheckCounter = 0;
- ctimeOutCheckFragCounter = 0;
- ctimeOutMissedHeartbeats = 0;
- ctimeOutCheckHeartbeat = 0;
- ctimeOutCheckLastHeartbeat = 0;
- ctimeOutCheckActive = TOCS_FALSE;
- ctimeOutCheckFragActive = TOCS_FALSE;
- sttorryLab(signal);
-}//Dbtc::startphase1x010Lab()
-
-/*****************************************************************************/
-/* I N T S T A R T P H A S E 1 X */
-/* INITIALISE ALL RECORDS. */
-/*****************************************************************************/
-void Dbtc::intstartphase1x010Lab(Signal* signal)
-{
- cownNodeid = tnodeid;
- cownref = calcTcBlockRef(cownNodeid);
- clqhblockref = calcLqhBlockRef(cownNodeid);
- cdihblockref = calcDihBlockRef(cownNodeid);
- cdictblockref = calcDictBlockRef(cownNodeid);
- cndbcntrblockref = calcNdbCntrBlockRef(cownNodeid);
- cerrorBlockref = calcNdbCntrBlockRef(cownNodeid);
- coperationsize = 0;
- cfailure_nr = 0;
- ndbsttorry010Lab(signal);
-}//Dbtc::intstartphase1x010Lab()
-
-/*****************************************************************************/
-/* I N T S T A R T P H A S E 2 X */
-/* SET-UP LOCAL CONNECTIONS. */
-/*****************************************************************************/
-void Dbtc::intstartphase2x010Lab(Signal* signal)
-{
- tcConnectptr.i = cfirstfreeTcConnect;
- intstartphase2x020Lab(signal);
-}//Dbtc::intstartphase2x010Lab()
-
-void Dbtc::intstartphase2x020Lab(Signal* signal)
-{
- if (tcConnectptr.i == RNIL) {
- jam();
- ndbsttorry010Lab(signal);
- return;
- }//if
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- tcConnectptr.p->tcConnectstate = OS_CONNECTING_DICT;
-/* ****************** */
-/* DISEIZEREQ < */
-/* ****************** */
- signal->theData[0] = tcConnectptr.i;
- signal->theData[1] = cownref;
- sendSignal(cdihblockref, GSN_DISEIZEREQ, signal, 2, JBB);
-}//Dbtc::intstartphase2x020Lab()
-
-void Dbtc::execDISEIZECONF(Signal* signal)
-{
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- tcConnectptr.p->dihConnectptr = signal->theData[1];
- tcConnectptr.i = tcConnectptr.p->nextTcConnect;
- intstartphase2x020Lab(signal);
-}//Dbtc::execDISEIZECONF()
-
-/*****************************************************************************/
-/* I N T S T A R T P H A S E 3 X */
-/* PREPARE DISTRIBUTED CONNECTIONS */
-/*****************************************************************************/
-void Dbtc::intstartphase3x010Lab(Signal* signal)
-{
- signal->theData[0] = cownref;
- sendSignal(cndbcntrblockref, GSN_READ_NODESREQ, signal, 1, JBB);
-}//Dbtc::intstartphase3x010Lab()
-
-void Dbtc::execREAD_NODESCONF(Signal* signal)
-{
- UintR guard0;
-
- jamEntry();
-
- ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
-
- csystemnodes = readNodes->noOfNodes;
- cmasterNodeId = readNodes->masterNodeId;
-
- con_lineNodes = 0;
- arrGuard(csystemnodes, MAX_NDB_NODES);
- guard0 = csystemnodes - 1;
- arrGuard(guard0, MAX_NDB_NODES); // Check not zero nodes
-
- for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
- jam();
- if (NodeBitmask::get(readNodes->allNodes, i)) {
- hostptr.i = i;
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
-
- hostptr.p->takeOverStatus = TOS_IDLE;
-
- if (NodeBitmask::get(readNodes->inactiveNodes, i)) {
- jam();
- hostptr.p->hostStatus = HS_DEAD;
- } else {
- jam();
- con_lineNodes++;
- hostptr.p->hostStatus = HS_ALIVE;
- }//if
- }//if
- }//for
- ndbsttorry010Lab(signal);
-}//Dbtc::execREAD_NODESCONF()
-
-/*****************************************************************************/
-/* A P I _ F A I L R E Q */
-// An API node has failed for some reason. We need to disconnect all API
-// connections to the API node. This also includes
-/*****************************************************************************/
-void Dbtc::execAPI_FAILREQ(Signal* signal)
-{
- /***************************************************************************
- * Set the block reference to return API_FAILCONF to. Set the number of api
- * connects currently closing to one to indicate that we are still in the
- * process of going through the api connect records. Thus checking for zero
- * can only be true after all api connect records have been checked.
- **************************************************************************/
- jamEntry();
- capiFailRef = signal->theData[1];
- arrGuard(signal->theData[0], MAX_NODES);
- capiConnectClosing[signal->theData[0]] = 1;
- handleFailedApiNode(signal, signal->theData[0], (UintR)0);
-}
-
-void
-Dbtc::handleFailedApiNode(Signal* signal,
- UintR TapiFailedNode,
- UintR TapiConnectPtr)
-{
- UintR TloopCount = 0;
- arrGuard(TapiFailedNode, MAX_NODES);
- apiConnectptr.i = TapiConnectPtr;
- do {
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- const UintR TapiNode = refToNode(apiConnectptr.p->ndbapiBlockref);
- if (TapiNode == TapiFailedNode) {
-#ifdef VM_TRACE
- if (apiConnectptr.p->apiFailState != ZFALSE) {
- ndbout << "Error in previous API fail handling discovered" << endl
- << " apiConnectptr.i = " << apiConnectptr.i << endl
- << " apiConnectstate = " << apiConnectptr.p->apiConnectstate
- << endl
- << " ndbapiBlockref = " << hex
- << apiConnectptr.p->ndbapiBlockref << endl
- << " apiNode = " << refToNode(apiConnectptr.p->ndbapiBlockref)
- << endl;
- if (apiConnectptr.p->lastTcConnect != RNIL){
- jam();
- tcConnectptr.i = apiConnectptr.p->lastTcConnect;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- ndbout << " tcConnectptr.i = " << tcConnectptr.i << endl
- << " tcConnectstate = " << tcConnectptr.p->tcConnectstate
- << endl;
- }
- }//if
-#endif
-
- apiConnectptr.p->returnsignal = RS_NO_RETURN;
- /***********************************************************************/
- // The connected node is the failed node.
- /**********************************************************************/
- switch(apiConnectptr.p->apiConnectstate) {
- case CS_DISCONNECTED:
- /*********************************************************************/
- // These states do not need any special handling.
- // Simply continue with the next.
- /*********************************************************************/
- jam();
- break;
- case CS_ABORTING:
- /*********************************************************************/
- // This could actually mean that the API connection is already
- // ready to release if the abortState is IDLE.
- /*********************************************************************/
- if (apiConnectptr.p->abortState == AS_IDLE) {
- jam();
- releaseApiCon(signal, apiConnectptr.i);
- } else {
- jam();
- capiConnectClosing[TapiFailedNode]++;
- apiConnectptr.p->apiFailState = ZTRUE;
- }//if
- break;
- case CS_WAIT_ABORT_CONF:
- case CS_WAIT_COMMIT_CONF:
- case CS_START_COMMITTING:
- case CS_PREPARE_TO_COMMIT:
- case CS_COMMITTING:
- case CS_COMMIT_SENT:
- /*********************************************************************/
- // These states indicate that an abort process or commit process is
- // already ongoing. We will set a state in the api record indicating
- // that the API node has failed.
- // Also we will increase the number of outstanding api records to
- // wait for before we can respond with API_FAILCONF.
- /*********************************************************************/
- jam();
- capiConnectClosing[TapiFailedNode]++;
- apiConnectptr.p->apiFailState = ZTRUE;
- break;
- case CS_START_SCAN:
- /*********************************************************************/
- // The api record was performing a scan operation. We need to check
- // on the scan state. Since completing a scan process might involve
- // sending several signals we will increase the loop count by 64.
- /*********************************************************************/
- jam();
-
- apiConnectptr.p->apiFailState = ZTRUE;
- capiConnectClosing[TapiFailedNode]++;
-
- ScanRecordPtr scanPtr;
- scanPtr.i = apiConnectptr.p->apiScanRec;
- ptrCheckGuard(scanPtr, cscanrecFileSize, scanRecord);
- close_scan_req(signal, scanPtr, true);
-
- TloopCount += 64;
- break;
- case CS_CONNECTED:
- /*********************************************************************/
- // The api record is connected to failed node. We need to release the
- // connection and set it in a disconnected state.
- /*********************************************************************/
- jam();
- releaseApiCon(signal, apiConnectptr.i);
- break;
- case CS_REC_COMMITTING:
- case CS_RECEIVING:
- case CS_STARTED:
- /*********************************************************************/
- // The api record was in the process of performing a transaction but
- // had not yet sent all information.
- // We need to initiate an ABORT since the API will not provide any
- // more information.
- // Since the abort can send many signals we will insert a real-time
- // break after checking this record.
- /*********************************************************************/
- jam();
- apiConnectptr.p->apiFailState = ZTRUE;
- capiConnectClosing[TapiFailedNode]++;
- abort010Lab(signal);
- TloopCount = 256;
- break;
- case CS_PREPARED:
- jam();
- case CS_REC_PREPARING:
- jam();
- case CS_START_PREPARING:
- jam();
- /*********************************************************************/
- // Not implemented yet.
- /*********************************************************************/
- systemErrorLab(signal);
- break;
- case CS_RESTART:
- jam();
- case CS_COMPLETING:
- jam();
- case CS_COMPLETE_SENT:
- jam();
- case CS_WAIT_COMPLETE_CONF:
- jam();
- case CS_FAIL_ABORTING:
- jam();
- case CS_FAIL_ABORTED:
- jam();
- case CS_FAIL_PREPARED:
- jam();
- case CS_FAIL_COMMITTING:
- jam();
- case CS_FAIL_COMMITTED:
- /*********************************************************************/
- // These states are only valid on copy and fail API connections.
- /*********************************************************************/
- default:
- jam();
- systemErrorLab(signal);
- break;
- }//switch
- } else {
- jam();
- }//if
- apiConnectptr.i++;
- if (apiConnectptr.i > ((capiConnectFilesize / 3) - 1)) {
- jam();
- /**
- * Finished with scanning connection record
- *
- * Now scan markers
- */
- removeMarkerForFailedAPI(signal, TapiFailedNode, 0);
- return;
- }//if
- } while (TloopCount++ < 256);
- signal->theData[0] = TcContinueB::ZHANDLE_FAILED_API_NODE;
- signal->theData[1] = TapiFailedNode;
- signal->theData[2] = apiConnectptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
-}//Dbtc::handleFailedApiNode()
-
-void
-Dbtc::removeMarkerForFailedAPI(Signal* signal,
- Uint32 nodeId,
- Uint32 startBucket)
-{
- TcFailRecordPtr node_fail_ptr;
- node_fail_ptr.i = 0;
- ptrAss(node_fail_ptr, tcFailRecord);
- if(node_fail_ptr.p->failStatus != FS_IDLE) {
- jam();
- DEBUG("Restarting removeMarkerForFailedAPI");
- /**
- * TC take-over in progress
- * needs to restart as this
- * creates new markers
- */
- signal->theData[0] = TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS;
- signal->theData[1] = nodeId;
- signal->theData[2] = 0;
- sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 500, 3);
- return;
- }
-
- CommitAckMarkerIterator iter;
- m_commitAckMarkerHash.next(startBucket, iter);
-
- const Uint32 RT_BREAK = 256;
- for(Uint32 i = 0; i<RT_BREAK || iter.bucket == startBucket; i++){
- jam();
-
- if(iter.curr.i == RNIL){
- jam();
- /**
- * Done with iteration
- */
- capiConnectClosing[nodeId]--;
- if (capiConnectClosing[nodeId] == 0) {
- jam();
- /********************************************************************/
- // No outstanding ABORT or COMMIT's of this failed API node.
- // We can respond with API_FAILCONF
- /********************************************************************/
- signal->theData[0] = nodeId;
- signal->theData[1] = cownref;
- sendSignal(capiFailRef, GSN_API_FAILCONF, signal, 2, JBB);
- }
- return;
- }
-
- if(iter.curr.p->apiNodeId == nodeId){
- jam();
-
- /**
- * Check so that the record is not still in use
- *
- */
- ApiConnectRecordPtr apiConnectPtr;
- apiConnectPtr.i = iter.curr.p->apiConnectPtr;
- ptrCheckGuard(apiConnectPtr, capiConnectFilesize, apiConnectRecord);
- if(apiConnectPtr.p->commitAckMarker == iter.curr.i){
- jam();
- /**
- * The record is still active
- *
- * Don't remove it, but continueb instead
- */
- break;
- }
- sendRemoveMarkers(signal, iter.curr.p);
- m_commitAckMarkerHash.release(iter.curr);
-
- break;
- }
- m_commitAckMarkerHash.next(iter);
- }
-
- signal->theData[0] = TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS;
- signal->theData[1] = nodeId;
- signal->theData[2] = iter.bucket;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
-}
-
-void Dbtc::handleApiFailState(Signal* signal, UintR TapiConnectptr)
-{
- ApiConnectRecordPtr TlocalApiConnectptr;
- UintR TfailedApiNode;
-
- TlocalApiConnectptr.i = TapiConnectptr;
- ptrCheckGuard(TlocalApiConnectptr, capiConnectFilesize, apiConnectRecord);
- TfailedApiNode = refToNode(TlocalApiConnectptr.p->ndbapiBlockref);
- arrGuard(TfailedApiNode, MAX_NODES);
- capiConnectClosing[TfailedApiNode]--;
- releaseApiCon(signal, TapiConnectptr);
- TlocalApiConnectptr.p->apiFailState = ZFALSE;
- if (capiConnectClosing[TfailedApiNode] == 0) {
- jam();
- signal->theData[0] = TfailedApiNode;
- signal->theData[1] = cownref;
- sendSignal(capiFailRef, GSN_API_FAILCONF, signal, 2, JBB);
- }//if
-}//Dbtc::handleApiFailState()
-
-/****************************************************************************
- * T C S E I Z E R E Q
- * THE APPLICATION SENDS A REQUEST TO SEIZE A CONNECT RECORD TO CARRY OUT A
- * TRANSACTION
- * TC BLOCK TAKE OUT A CONNECT RECORD FROM THE FREE LIST AND ESTABLISHES ALL
- * NECESSARY CONNECTION BEFORE REPLYING TO THE APPLICATION BLOCK
- ****************************************************************************/
-void Dbtc::execTCSEIZEREQ(Signal* signal)
-{
- UintR tapiPointer;
- BlockReference tapiBlockref; /* SENDER BLOCK REFERENCE*/
-
- jamEntry();
- tapiPointer = signal->theData[0]; /* REQUEST SENDERS CONNECT RECORD POINTER*/
- tapiBlockref = signal->theData[1]; /* SENDERS BLOCK REFERENCE*/
-
- const NodeState::StartLevel sl =
- (NodeState::StartLevel)getNodeState().startLevel;
-
- const NodeId senderNodeId = refToNode(tapiBlockref);
- const bool local = senderNodeId == getOwnNodeId() || senderNodeId == 0;
-
- if(!(senderNodeId == getNodeState().getSingleUserApi()) &&
- !getNodeState().getSingleUserMode()) {
- if(!(sl==NodeState::SL_SINGLEUSER &&
- senderNodeId == getNodeState().getSingleUserApi())) {
- if (!(sl == NodeState::SL_STARTED ||
- (sl == NodeState::SL_STARTING && local == true))) {
- jam();
-
- Uint32 errCode;
- if(!(sl == NodeState::SL_SINGLEUSER && local))
- {
- switch(sl){
- case NodeState::SL_STARTING:
- errCode = ZSYSTEM_NOT_STARTED_ERROR;
- break;
- case NodeState::SL_STOPPING_1:
- case NodeState::SL_STOPPING_2:
- case NodeState::SL_STOPPING_3:
- case NodeState::SL_STOPPING_4:
- if(getNodeState().stopping.systemShutdown)
- errCode = ZCLUSTER_SHUTDOWN_IN_PROGRESS;
- else
- errCode = ZNODE_SHUTDOWN_IN_PROGRESS;
- break;
- case NodeState::SL_SINGLEUSER:
- errCode = ZCLUSTER_IN_SINGLEUSER_MODE;
- break;
- default:
- errCode = ZWRONG_STATE;
- break;
- }
- signal->theData[0] = tapiPointer;
- signal->theData[1] = errCode;
- sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB);
- return;
- }//if (!(sl == SL_SINGLEUSER))
- } //if
- }
- }
-
- seizeApiConnect(signal);
- if (terrorCode == ZOK) {
- jam();
- apiConnectptr.p->ndbapiConnect = tapiPointer;
- apiConnectptr.p->ndbapiBlockref = tapiBlockref;
- signal->theData[0] = apiConnectptr.p->ndbapiConnect;
- signal->theData[1] = apiConnectptr.i;
- sendSignal(tapiBlockref, GSN_TCSEIZECONF, signal, 2, JBB);
- return;
- }
-
- signal->theData[0] = tapiPointer;
- signal->theData[1] = terrorCode;
- sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB);
-}//Dbtc::execTCSEIZEREQ()
-
-/****************************************************************************/
-/* T C R E L E A S E Q */
-/* REQUEST TO RELEASE A CONNECT RECORD */
-/****************************************************************************/
-void Dbtc::execTCRELEASEREQ(Signal* signal)
-{
- UintR tapiPointer;
- BlockReference tapiBlockref; /* SENDER BLOCK REFERENCE*/
-
- jamEntry();
- tapiPointer = signal->theData[0]; /* REQUEST SENDERS CONNECT RECORD POINTER*/
- tapiBlockref = signal->theData[1];/* SENDERS BLOCK REFERENCE*/
- tuserpointer = signal->theData[2];
- if (tapiPointer >= capiConnectFilesize) {
- jam();
- signal->theData[0] = tuserpointer;
- signal->theData[1] = ZINVALID_CONNECTION;
- signal->theData[2] = __LINE__;
- sendSignal(tapiBlockref, GSN_TCRELEASEREF, signal, 3, JBB);
- return;
- } else {
- jam();
- apiConnectptr.i = tapiPointer;
- }//if
- ptrAss(apiConnectptr, apiConnectRecord);
- if (apiConnectptr.p->apiConnectstate == CS_DISCONNECTED) {
- jam();
- signal->theData[0] = tuserpointer;
- sendSignal(tapiBlockref, GSN_TCRELEASECONF, signal, 1, JBB);
- } else {
- if (tapiBlockref == apiConnectptr.p->ndbapiBlockref) {
- if (apiConnectptr.p->apiConnectstate == CS_CONNECTED ||
- (apiConnectptr.p->apiConnectstate == CS_ABORTING &&
- apiConnectptr.p->abortState == AS_IDLE) ||
- (apiConnectptr.p->apiConnectstate == CS_STARTED &&
- apiConnectptr.p->firstTcConnect == RNIL))
- {
- jam(); /* JUST REPLY OK */
- releaseApiCon(signal, apiConnectptr.i);
- signal->theData[0] = tuserpointer;
- sendSignal(tapiBlockref,
- GSN_TCRELEASECONF, signal, 1, JBB);
- } else {
- jam();
- signal->theData[0] = tuserpointer;
- signal->theData[1] = ZINVALID_CONNECTION;
- signal->theData[2] = __LINE__;
- signal->theData[3] = apiConnectptr.p->apiConnectstate;
- sendSignal(tapiBlockref,
- GSN_TCRELEASEREF, signal, 4, JBB);
- }
- } else {
- jam();
- signal->theData[0] = tuserpointer;
- signal->theData[1] = ZINVALID_CONNECTION;
- signal->theData[2] = __LINE__;
- signal->theData[3] = tapiBlockref;
- signal->theData[4] = apiConnectptr.p->ndbapiBlockref;
- sendSignal(tapiBlockref, GSN_TCRELEASEREF, signal, 5, JBB);
- }//if
- }//if
-}//Dbtc::execTCRELEASEREQ()
-
-/****************************************************************************/
-// Error Handling for TCKEYREQ messages
-/****************************************************************************/
-void Dbtc::signalErrorRefuseLab(Signal* signal)
-{
- ptrGuard(apiConnectptr);
- if (apiConnectptr.p->apiConnectstate != CS_DISCONNECTED) {
- jam();
- apiConnectptr.p->abortState = AS_IDLE;
- apiConnectptr.p->apiConnectstate = CS_ABORTING;
- }//if
- sendSignalErrorRefuseLab(signal);
-}//Dbtc::signalErrorRefuseLab()
-
-void Dbtc::sendSignalErrorRefuseLab(Signal* signal)
-{
- ndbassert(false);
- ptrGuard(apiConnectptr);
- if (apiConnectptr.p->apiConnectstate != CS_DISCONNECTED) {
- jam();
- ndbrequire(false);
- signal->theData[0] = apiConnectptr.p->ndbapiConnect;
- signal->theData[1] = signal->theData[ttransid_ptr];
- signal->theData[2] = signal->theData[ttransid_ptr + 1];
- signal->theData[3] = ZSIGNAL_ERROR;
- sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREP,
- signal, 4, JBB);
- }
-}//Dbtc::sendSignalErrorRefuseLab()
-
-void Dbtc::abortBeginErrorLab(Signal* signal)
-{
- apiConnectptr.p->transid[0] = signal->theData[ttransid_ptr];
- apiConnectptr.p->transid[1] = signal->theData[ttransid_ptr + 1];
- abortErrorLab(signal);
-}//Dbtc::abortBeginErrorLab()
-
-void Dbtc::printState(Signal* signal, int place)
-{
-#ifdef VM_TRACE // Change to if 0 to disable these printouts
- ndbout << "-- Dbtc::printState -- " << endl;
- ndbout << "Received from place = " << place
- << " apiConnectptr.i = " << apiConnectptr.i
- << " apiConnectstate = " << apiConnectptr.p->apiConnectstate << endl;
- ndbout << "ctcTimer = " << ctcTimer
- << " ndbapiBlockref = " << hex <<apiConnectptr.p->ndbapiBlockref
- << " Transid = " << apiConnectptr.p->transid[0]
- << " " << apiConnectptr.p->transid[1] << endl;
- ndbout << " apiTimer = " << getApiConTimer(apiConnectptr.i)
- << " counter = " << apiConnectptr.p->counter
- << " lqhkeyconfrec = " << apiConnectptr.p->lqhkeyconfrec
- << " lqhkeyreqrec = " << apiConnectptr.p->lqhkeyreqrec << endl;
- ndbout << "abortState = " << apiConnectptr.p->abortState
- << " apiScanRec = " << apiConnectptr.p->apiScanRec
- << " returncode = " << apiConnectptr.p->returncode << endl;
- ndbout << "tckeyrec = " << apiConnectptr.p->tckeyrec
- << " returnsignal = " << apiConnectptr.p->returnsignal
- << " apiFailState = " << apiConnectptr.p->apiFailState << endl;
- if (apiConnectptr.p->cachePtr != RNIL) {
- jam();
- CacheRecord *localCacheRecord = cacheRecord;
- UintR TcacheFilesize = ccacheFilesize;
- UintR TcachePtr = apiConnectptr.p->cachePtr;
- if (TcachePtr < TcacheFilesize) {
- jam();
- CacheRecord * const regCachePtr = &localCacheRecord[TcachePtr];
- ndbout << "currReclenAi = " << regCachePtr->currReclenAi
- << " attrlength = " << regCachePtr->attrlength
- << " tableref = " << regCachePtr->tableref
- << " keylen = " << regCachePtr->keylen << endl;
- } else {
- jam();
- systemErrorLab(signal);
- }//if
- }//if
-#endif
- return;
-}//Dbtc::printState()
-
-void
-Dbtc::TCKEY_abort(Signal* signal, int place)
-{
- switch (place) {
- case 0:
- jam();
- terrorCode = ZSTATE_ERROR;
- apiConnectptr.p->firstTcConnect = RNIL;
- printState(signal, 4);
- abortBeginErrorLab(signal);
- return;
- case 1:
- jam();
- printState(signal, 3);
- sendSignalErrorRefuseLab(signal);
- return;
- case 2:{
- printState(signal, 6);
- const TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0];
- const Uint32 t1 = tcKeyReq->transId1;
- const Uint32 t2 = tcKeyReq->transId2;
- signal->theData[0] = apiConnectptr.p->ndbapiConnect;
- signal->theData[1] = t1;
- signal->theData[2] = t2;
- signal->theData[3] = ZABORT_ERROR;
- ndbrequire(false);
- sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREP,
- signal, 4, JBB);
- return;
- }
- case 3:
- jam();
- printState(signal, 7);
- noFreeConnectionErrorLab(signal);
- return;
- case 4:
- jam();
- terrorCode = ZERO_KEYLEN_ERROR;
- releaseAtErrorLab(signal);
- return;
- case 5:
- jam();
- terrorCode = ZNO_AI_WITH_UPDATE;
- releaseAtErrorLab(signal);
- return;
- case 6:
- jam();
- warningHandlerLab(signal);
- return;
-
- case 7:
- jam();
- tabStateErrorLab(signal);
- return;
-
- case 8:
- jam();
- wrongSchemaVersionErrorLab(signal);
- return;
-
- case 9:
- jam();
- terrorCode = ZSTATE_ERROR;
- releaseAtErrorLab(signal);
- return;
-
- case 10:
- jam();
- systemErrorLab(signal);
- return;
-
- case 11:
- jam();
- terrorCode = ZMORE_AI_IN_TCKEYREQ_ERROR;
- releaseAtErrorLab(signal);
- return;
-
- case 12:
- jam();
- terrorCode = ZSIMPLE_READ_WITHOUT_AI;
- releaseAtErrorLab(signal);
- return;
-
- case 13:
- jam();
- switch (tcConnectptr.p->tcConnectstate) {
- case OS_WAIT_KEYINFO:
- jam();
- printState(signal, 8);
- terrorCode = ZSTATE_ERROR;
- abortErrorLab(signal);
- return;
- default:
- jam();
- /********************************************************************/
- /* MISMATCH BETWEEN STATE ON API CONNECTION AND THIS */
- /* PARTICULAR TC CONNECT RECORD. THIS MUST BE CAUSED BY NDB */
- /* INTERNAL ERROR. */
- /********************************************************************/
- systemErrorLab(signal);
- return;
- }//switch
- return;
-
- case 15:
- jam();
- terrorCode = ZSCAN_NODE_ERROR;
- releaseAtErrorLab(signal);
- return;
-
- case 16:
- jam();
- systemErrorLab(signal);
- return;
-
- case 17:
- jam();
- systemErrorLab(signal);
- return;
-
- case 18:
- jam();
- warningHandlerLab(signal);
- return;
-
- case 19:
- jam();
- return;
-
- case 20:
- jam();
- warningHandlerLab(signal);
- return;
-
- case 21:
- jam();
- systemErrorLab(signal);
- return;
-
- case 22:
- jam();
- systemErrorLab(signal);
- return;
-
- case 23:
- jam();
- systemErrorLab(signal);
- return;
-
- case 24:
- jam();
- seizeAttrbuferrorLab(signal);
- return;
-
- case 25:
- jam();
- warningHandlerLab(signal);
- return;
-
- case 26:
- jam();
- return;
-
- case 27:
- systemErrorLab(signal);
- jam();
- return;
-
- case 28:
- jam();
- // NOT USED
- return;
-
- case 29:
- jam();
- systemErrorLab(signal);
- return;
-
- case 30:
- jam();
- systemErrorLab(signal);
- return;
-
- case 31:
- jam();
- systemErrorLab(signal);
- return;
-
- case 32:
- jam();
- systemErrorLab(signal);
- return;
-
- case 33:
- jam();
- systemErrorLab(signal);
- return;
-
- case 34:
- jam();
- systemErrorLab(signal);
- return;
-
- case 35:
- jam();
- systemErrorLab(signal);
- return;
-
- case 36:
- jam();
- systemErrorLab(signal);
- return;
-
- case 37:
- jam();
- systemErrorLab(signal);
- return;
-
- case 38:
- jam();
- systemErrorLab(signal);
- return;
-
- case 39:
- jam();
- systemErrorLab(signal);
- return;
-
- case 40:
- jam();
- systemErrorLab(signal);
- return;
-
- case 41:
- jam();
- systemErrorLab(signal);
- return;
-
- case 42:
- jam();
- systemErrorLab(signal);
- return;
-
- case 43:
- jam();
- systemErrorLab(signal);
- return;
-
- case 44:
- jam();
- systemErrorLab(signal);
- return;
-
- case 45:
- jam();
- systemErrorLab(signal);
- return;
-
- case 46:
- jam();
- systemErrorLab(signal);
- return;
-
- case 47:
- jam();
- terrorCode = apiConnectptr.p->returncode;
- releaseAtErrorLab(signal);
- return;
-
- case 48:
- jam();
- terrorCode = ZCOMMIT_TYPE_ERROR;
- releaseAtErrorLab(signal);
- return;
-
- case 49:
- jam();
- abortErrorLab(signal);
- return;
-
- case 50:
- jam();
- systemErrorLab(signal);
- return;
-
- case 51:
- jam();
- abortErrorLab(signal);
- return;
-
- case 52:
- jam();
- abortErrorLab(signal);
- return;
-
- case 53:
- jam();
- abortErrorLab(signal);
- return;
-
- case 54:
- jam();
- abortErrorLab(signal);
- return;
-
- case 55:
- jam();
- printState(signal, 5);
- sendSignalErrorRefuseLab(signal);
- return;
-
- case 56:{
- jam();
- terrorCode = ZNO_FREE_TC_MARKER;
- abortErrorLab(signal);
- return;
- }
- case 57:{
- jam();
- /**
- * Initialize object before starting error handling
- */
- initApiConnectRec(signal, apiConnectptr.p, true);
- switch(getNodeState().startLevel){
- case NodeState::SL_STOPPING_2:
- case NodeState::SL_STOPPING_3:
- case NodeState::SL_STOPPING_4:
- if(getNodeState().stopping.systemShutdown)
- terrorCode = ZCLUSTER_SHUTDOWN_IN_PROGRESS;
- else
- terrorCode = ZNODE_SHUTDOWN_IN_PROGRESS;
- break;
- case NodeState::SL_SINGLEUSER:
- terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE;
- break;
- default:
- terrorCode = ZWRONG_STATE;
- break;
- }
- abortErrorLab(signal);
- return;
- }
-
- case 58:{
- jam();
- releaseAtErrorLab(signal);
- return;
- }
-
- case 59:{
- jam();
- terrorCode = ZABORTINPROGRESS;
- abortErrorLab(signal);
- return;
- }
-
- default:
- jam();
- systemErrorLab(signal);
- return;
- }//switch
-}
-
-void Dbtc::execKEYINFO(Signal* signal)
-{
- UintR compare_transid1, compare_transid2;
- jamEntry();
- apiConnectptr.i = signal->theData[0];
- tmaxData = 20;
- if (apiConnectptr.i >= capiConnectFilesize) {
- jam();
- warningHandlerLab(signal);
- return;
- }//if
- ptrAss(apiConnectptr, apiConnectRecord);
- ttransid_ptr = 1;
- compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
- compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
- jam();
- printState(signal, 10);
- sendSignalErrorRefuseLab(signal);
- return;
- }//if
- switch (apiConnectptr.p->apiConnectstate) {
- case CS_RECEIVING:
- case CS_REC_COMMITTING:
- case CS_START_SCAN:
- jam();
- /*empty*/;
- break;
- /* OK */
- case CS_ABORTING:
- jam();
- return; /* IGNORE */
- case CS_CONNECTED:
- jam();
- /****************************************************************>*/
- /* MOST LIKELY CAUSED BY A MISSED SIGNAL. SEND REFUSE AND */
- /* SET STATE TO ABORTING. */
- /****************************************************************>*/
- printState(signal, 11);
- signalErrorRefuseLab(signal);
- return;
- case CS_STARTED:
- jam();
- /****************************************************************>*/
- /* MOST LIKELY CAUSED BY A MISSED SIGNAL. SEND REFUSE AND */
- /* SET STATE TO ABORTING. SINCE A TRANSACTION WAS STARTED */
- /* WE ALSO NEED TO ABORT THIS TRANSACTION. */
- /****************************************************************>*/
- terrorCode = ZSIGNAL_ERROR;
- printState(signal, 2);
- abortErrorLab(signal);
- return;
- default:
- jam();
- warningHandlerLab(signal);
- return;
- }//switch
-
- CacheRecord *localCacheRecord = cacheRecord;
- UintR TcacheFilesize = ccacheFilesize;
- UintR TcachePtr = apiConnectptr.p->cachePtr;
- UintR TtcTimer = ctcTimer;
- CacheRecord * const regCachePtr = &localCacheRecord[TcachePtr];
- if (TcachePtr >= TcacheFilesize) {
- TCKEY_abort(signal, 42);
- return;
- }//if
- setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
- cachePtr.i = TcachePtr;
- cachePtr.p = regCachePtr;
-
- tcConnectptr.i = apiConnectptr.p->lastTcConnect;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- switch (tcConnectptr.p->tcConnectstate) {
- case OS_WAIT_KEYINFO:
- jam();
- tckeyreq020Lab(signal);
- return;
- case OS_WAIT_SCAN:
- break;
- default:
- jam();
- terrorCode = ZSTATE_ERROR;
- abortErrorLab(signal);
- return;
- }//switch
-
- UintR TdataPos = 0;
- UintR TkeyLen = regCachePtr->keylen;
- UintR Tlen = regCachePtr->save1;
-
- do {
- if (cfirstfreeDatabuf == RNIL) {
- jam();
- seizeDatabuferrorLab(signal);
- return;
- }//if
- linkKeybuf(signal);
- arrGuard(TdataPos, 19);
- databufptr.p->data[0] = signal->theData[TdataPos + 3];
- databufptr.p->data[1] = signal->theData[TdataPos + 4];
- databufptr.p->data[2] = signal->theData[TdataPos + 5];
- databufptr.p->data[3] = signal->theData[TdataPos + 6];
- Tlen = Tlen + 4;
- TdataPos = TdataPos + 4;
- if (Tlen < TkeyLen) {
- jam();
- if (TdataPos >= tmaxData) {
- jam();
- /*----------------------------------------------------*/
- /** EXIT AND WAIT FOR SIGNAL KEYINFO OR KEYINFO9 **/
- /** WHEN EITHER OF THE SIGNALS IS RECEIVED A JUMP **/
- /** TO LABEL "KEYINFO_LABEL" IS DONE. THEN THE **/
- /** PROGRAM RETURNS TO LABEL TCKEYREQ020 **/
- /*----------------------------------------------------*/
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- regCachePtr->save1 = Tlen;
- return;
- }//if
- } else {
- jam();
- return;
- }//if
- } while (1);
- return;
-}//Dbtc::execKEYINFO()
-
-/*---------------------------------------------------------------------------*/
-/* */
-/* MORE THAN FOUR WORDS OF KEY DATA. WE NEED TO PACK THIS IN KEYINFO SIGNALS.*/
-/* WE WILL ALWAYS PACK 4 WORDS AT A TIME. */
-/*---------------------------------------------------------------------------*/
-void Dbtc::packKeyData000Lab(Signal* signal,
- BlockReference TBRef,
- Uint32 totalLen)
-{
- CacheRecord * const regCachePtr = cachePtr.p;
-
- jam();
- Uint32 len = 0;
- databufptr.i = regCachePtr->firstKeybuf;
- signal->theData[0] = tcConnectptr.i;
- signal->theData[1] = apiConnectptr.p->transid[0];
- signal->theData[2] = apiConnectptr.p->transid[1];
- Uint32 * dst = signal->theData+3;
- ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
-
- do {
- jam();
- databufptr.i = databufptr.p->nextDatabuf;
- dst[len + 0] = databufptr.p->data[0];
- dst[len + 1] = databufptr.p->data[1];
- dst[len + 2] = databufptr.p->data[2];
- dst[len + 3] = databufptr.p->data[3];
- len += 4;
- if (totalLen <= 4) {
- jam();
- /*---------------------------------------------------------------------*/
- /* LAST PACK OF KEY DATA HAVE BEEN SENT */
- /*---------------------------------------------------------------------*/
- /* THERE WERE UNSENT INFORMATION, SEND IT. */
- /*---------------------------------------------------------------------*/
- sendSignal(TBRef, GSN_KEYINFO, signal, 3 + len, JBB);
- return;
- } else if(len == KeyInfo::DataLength){
- jam();
- len = 0;
- sendSignal(TBRef, GSN_KEYINFO, signal, 3 + KeyInfo::DataLength, JBB);
- }
- totalLen -= 4;
- ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
- } while (1);
-}//Dbtc::packKeyData000Lab()
-
-void Dbtc::tckeyreq020Lab(Signal* signal)
-{
- CacheRecord * const regCachePtr = cachePtr.p;
- UintR TdataPos = 0;
- UintR TkeyLen = regCachePtr->keylen;
- UintR Tlen = regCachePtr->save1;
-
- do {
- if (cfirstfreeDatabuf == RNIL) {
- jam();
- seizeDatabuferrorLab(signal);
- return;
- }//if
- linkKeybuf(signal);
- arrGuard(TdataPos, 19);
- databufptr.p->data[0] = signal->theData[TdataPos + 3];
- databufptr.p->data[1] = signal->theData[TdataPos + 4];
- databufptr.p->data[2] = signal->theData[TdataPos + 5];
- databufptr.p->data[3] = signal->theData[TdataPos + 6];
- Tlen = Tlen + 4;
- TdataPos = TdataPos + 4;
- if (Tlen < TkeyLen) {
- jam();
- if (TdataPos >= tmaxData) {
- jam();
- /*----------------------------------------------------*/
- /** EXIT AND WAIT FOR SIGNAL KEYINFO OR KEYINFO9 **/
- /** WHEN EITHER OF THE SIGNALS IS RECEIVED A JUMP **/
- /** TO LABEL "KEYINFO_LABEL" IS DONE. THEN THE **/
- /** PROGRAM RETURNS TO LABEL TCKEYREQ020 **/
- /*----------------------------------------------------*/
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- regCachePtr->save1 = Tlen;
- tcConnectptr.p->tcConnectstate = OS_WAIT_KEYINFO;
- return;
- }//if
- } else {
- jam();
- tckeyreq050Lab(signal);
- return;
- }//if
- } while (1);
- return;
-}//Dbtc::tckeyreq020Lab()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SAVE ATTRIBUTE INFORMATION IN OPERATION RECORD ------- */
-/* ------------------------------------------------------------------------- */
-void Dbtc::saveAttrbuf(Signal* signal)
-{
- CacheRecord * const regCachePtr = cachePtr.p;
- UintR TfirstfreeAttrbuf = cfirstfreeAttrbuf;
- UintR TattrbufFilesize = cattrbufFilesize;
- UintR TTcfirstAttrbuf = regCachePtr->firstAttrbuf;
- UintR Tlen = signal->length() - 3;
- AttrbufRecord *localAttrbufRecord = attrbufRecord;
-
- AttrbufRecord * const regAttrPtr = &localAttrbufRecord[TfirstfreeAttrbuf];
- if (TfirstfreeAttrbuf >= TattrbufFilesize) {
- TCKEY_abort(signal, 21);
- return;
- }//if
- UintR Tnext = regAttrPtr->attrbuf[ZINBUF_NEXT];
- if (TTcfirstAttrbuf == RNIL) {
- jam();
- regCachePtr->firstAttrbuf = TfirstfreeAttrbuf;
- } else {
- AttrbufRecordPtr saAttrbufptr;
-
- saAttrbufptr.i = regCachePtr->lastAttrbuf;
- jam();
- if (saAttrbufptr.i >= TattrbufFilesize) {
- TCKEY_abort(signal, 22);
- return;
- }//if
- saAttrbufptr.p = &localAttrbufRecord[saAttrbufptr.i];
- saAttrbufptr.p->attrbuf[ZINBUF_NEXT] = TfirstfreeAttrbuf;
- }//if
-
- cfirstfreeAttrbuf = Tnext;
- regAttrPtr->attrbuf[ZINBUF_NEXT] = RNIL;
- regCachePtr->lastAttrbuf = TfirstfreeAttrbuf;
- regAttrPtr->attrbuf[ZINBUF_DATA_LEN] = Tlen;
-
- UintR Tdata1 = signal->theData[3];
- UintR Tdata2 = signal->theData[4];
- UintR Tdata3 = signal->theData[5];
- UintR Tdata4 = signal->theData[6];
- UintR Tdata5 = signal->theData[7];
- UintR Tdata6 = signal->theData[8];
- UintR Tdata7 = signal->theData[9];
- UintR Tdata8 = signal->theData[10];
-
- regAttrPtr->attrbuf[0] = Tdata1;
- regAttrPtr->attrbuf[1] = Tdata2;
- regAttrPtr->attrbuf[2] = Tdata3;
- regAttrPtr->attrbuf[3] = Tdata4;
- regAttrPtr->attrbuf[4] = Tdata5;
- regAttrPtr->attrbuf[5] = Tdata6;
- regAttrPtr->attrbuf[6] = Tdata7;
- regAttrPtr->attrbuf[7] = Tdata8;
-
- if (Tlen > 8) {
-
- Tdata1 = signal->theData[11];
- Tdata2 = signal->theData[12];
- Tdata3 = signal->theData[13];
- Tdata4 = signal->theData[14];
- Tdata5 = signal->theData[15];
- Tdata6 = signal->theData[16];
- Tdata7 = signal->theData[17];
-
- regAttrPtr->attrbuf[8] = Tdata1;
- regAttrPtr->attrbuf[9] = Tdata2;
- regAttrPtr->attrbuf[10] = Tdata3;
- regAttrPtr->attrbuf[11] = Tdata4;
- regAttrPtr->attrbuf[12] = Tdata5;
- regAttrPtr->attrbuf[13] = Tdata6;
- regAttrPtr->attrbuf[14] = Tdata7;
- jam();
- if (Tlen > 15) {
-
- Tdata1 = signal->theData[18];
- Tdata2 = signal->theData[19];
- Tdata3 = signal->theData[20];
- Tdata4 = signal->theData[21];
- Tdata5 = signal->theData[22];
- Tdata6 = signal->theData[23];
- Tdata7 = signal->theData[24];
-
- jam();
- regAttrPtr->attrbuf[15] = Tdata1;
- regAttrPtr->attrbuf[16] = Tdata2;
- regAttrPtr->attrbuf[17] = Tdata3;
- regAttrPtr->attrbuf[18] = Tdata4;
- regAttrPtr->attrbuf[19] = Tdata5;
- regAttrPtr->attrbuf[20] = Tdata6;
- regAttrPtr->attrbuf[21] = Tdata7;
- }//if
- }//if
-}//Dbtc::saveAttrbuf()
-
-void Dbtc::execATTRINFO(Signal* signal)
-{
- UintR compare_transid1, compare_transid2;
- UintR Tdata1 = signal->theData[0];
- UintR Tlength = signal->length();
- UintR TapiConnectFilesize = capiConnectFilesize;
- ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
-
- jamEntry();
- apiConnectptr.i = Tdata1;
- ttransid_ptr = 1;
- if (Tdata1 >= TapiConnectFilesize) {
- DEBUG("Drop ATTRINFO, wrong apiConnectptr");
- TCKEY_abort(signal, 18);
- return;
- }//if
-
- UintR Tdata2 = signal->theData[1];
- UintR Tdata3 = signal->theData[2];
- ApiConnectRecord * const regApiPtr = &localApiConnectRecord[Tdata1];
- compare_transid1 = regApiPtr->transid[0] ^ Tdata2;
- compare_transid2 = regApiPtr->transid[1] ^ Tdata3;
- apiConnectptr.p = regApiPtr;
- compare_transid1 = compare_transid1 | compare_transid2;
-
- if (compare_transid1 != 0) {
- DEBUG("Drop ATTRINFO, wrong transid, lenght="<<Tlength
- << " transid("<<hex<<Tdata2<<", "<<Tdata3);
- TCKEY_abort(signal, 19);
- return;
- }//if
- if (Tlength < 4) {
- DEBUG("Drop ATTRINFO, wrong length = " << Tlength);
- TCKEY_abort(signal, 20);
- return;
- }
- Tlength -= 3;
- UintR TcompREC_COMMIT = (regApiPtr->apiConnectstate == CS_REC_COMMITTING);
- UintR TcompRECEIVING = (regApiPtr->apiConnectstate == CS_RECEIVING);
- UintR TcompBOTH = TcompREC_COMMIT | TcompRECEIVING;
-
- if (TcompBOTH) {
- jam();
- if (ERROR_INSERTED(8015)) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- if (ERROR_INSERTED(8016)) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- CacheRecord *localCacheRecord = cacheRecord;
- UintR TcacheFilesize = ccacheFilesize;
- UintR TcachePtr = regApiPtr->cachePtr;
- UintR TtcTimer = ctcTimer;
- CacheRecord * const regCachePtr = &localCacheRecord[TcachePtr];
- if (TcachePtr >= TcacheFilesize) {
- TCKEY_abort(signal, 43);
- return;
- }//if
- UintR TfirstfreeAttrbuf = cfirstfreeAttrbuf;
- UintR TcurrReclenAi = regCachePtr->currReclenAi;
- UintR TattrLen = regCachePtr->attrlength;
-
- setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
- cachePtr.i = TcachePtr;
- cachePtr.p = regCachePtr;
- TcurrReclenAi = TcurrReclenAi + Tlength;
- regCachePtr->currReclenAi = TcurrReclenAi;
- int TattrlengthRemain = TattrLen - TcurrReclenAi;
-
- if (TfirstfreeAttrbuf == RNIL) {
- DEBUG("No more attrinfo buffers");
- TCKEY_abort(signal, 24);
- return;
- }//if
- saveAttrbuf(signal);
- if (TattrlengthRemain == 0) {
- /****************************************************************>*/
- /* HERE WE HAVE FOUND THAT THE LAST SIGNAL BELONGING TO THIS */
- /* OPERATION HAVE BEEN RECEIVED. THIS MEANS THAT WE CAN NOW REUSE */
- /* THE API CONNECT RECORD. HOWEVER IF PREPARE OR COMMIT HAVE BEEN */
- /* RECEIVED THEN IT IS NOT ALLOWED TO RECEIVE ANY FURTHER */
- /* OPERATIONS. */
- /****************************************************************>*/
- UintR TlastConnect = regApiPtr->lastTcConnect;
- if (TcompRECEIVING) {
- jam();
- regApiPtr->apiConnectstate = CS_STARTED;
- } else {
- jam();
- regApiPtr->apiConnectstate = CS_START_COMMITTING;
- }//if
- tcConnectptr.i = TlastConnect;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- attrinfoDihReceivedLab(signal);
- } else if (TattrlengthRemain < 0) {
- jam();
- DEBUG("ATTRINFO wrong total length="<<Tlength
- <<", TattrlengthRemain="<<TattrlengthRemain
- <<", TattrLen="<<TattrLen
- <<", TcurrReclenAi="<<TcurrReclenAi);
- tcConnectptr.i = regApiPtr->lastTcConnect;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- aiErrorLab(signal);
- }//if
- return;
- } else if (regApiPtr->apiConnectstate == CS_START_SCAN) {
- jam();
- scanAttrinfoLab(signal, Tlength);
- return;
- } else {
- switch (regApiPtr->apiConnectstate) {
- case CS_ABORTING:
- jam();
- /* JUST IGNORE THE SIGNAL*/
- // DEBUG("Drop ATTRINFO, CS_ABORTING");
- return;
- case CS_CONNECTED:
- jam();
- /* MOST LIKELY CAUSED BY A MISSED SIGNAL.*/
- // DEBUG("Drop ATTRINFO, CS_CONNECTED");
- return;
- case CS_STARTED:
- jam();
- /****************************************************************>*/
- /* MOST LIKELY CAUSED BY A MISSED SIGNAL. SEND REFUSE AND */
- /* SET STATE TO ABORTING. SINCE A TRANSACTION WAS STARTED */
- /* WE ALSO NEED TO ABORT THIS TRANSACTION. */
- /****************************************************************>*/
- terrorCode = ZSIGNAL_ERROR;
- printState(signal, 1);
- abortErrorLab(signal);
- return;
- default:
- jam();
- /****************************************************************>*/
- /* SIGNAL RECEIVED IN AN UNEXPECTED STATE. WE IGNORE SIGNAL */
- /* SINCE WE DO NOT REALLY KNOW WHERE THE ERROR OCCURRED. */
- /****************************************************************>*/
- DEBUG("Drop ATTRINFO, illegal state="<<regApiPtr->apiConnectstate);
- printState(signal, 9);
- return;
- }//switch
- }//if
-}//Dbtc::execATTRINFO()
-
-/* *********************************************************************>> */
-/* */
-/* MODULE: HASH MODULE */
-/* DESCRIPTION: CONTAINS THE HASH VALUE CALCULATION */
-/* *********************************************************************> */
-void Dbtc::hash(Signal* signal)
-{
- DatabufRecordPtr locDatabufptr;
- UintR ti;
- UintR Tdata0;
- UintR Tdata1;
- UintR Tdata2;
- UintR Tdata3;
- UintR* Tdata32;
-
- CacheRecord * const regCachePtr = cachePtr.p;
- Tdata32 = signal->theData;
-
- Tdata0 = regCachePtr->keydata[0];
- Tdata1 = regCachePtr->keydata[1];
- Tdata2 = regCachePtr->keydata[2];
- Tdata3 = regCachePtr->keydata[3];
- Tdata32[0] = Tdata0;
- Tdata32[1] = Tdata1;
- Tdata32[2] = Tdata2;
- Tdata32[3] = Tdata3;
- if (regCachePtr->keylen > 4) {
- locDatabufptr.i = regCachePtr->firstKeybuf;
- ti = 4;
- while (locDatabufptr.i != RNIL) {
- ptrCheckGuard(locDatabufptr, cdatabufFilesize, databufRecord);
- Tdata0 = locDatabufptr.p->data[0];
- Tdata1 = locDatabufptr.p->data[1];
- Tdata2 = locDatabufptr.p->data[2];
- Tdata3 = locDatabufptr.p->data[3];
- Tdata32[ti ] = Tdata0;
- Tdata32[ti + 1] = Tdata1;
- Tdata32[ti + 2] = Tdata2;
- Tdata32[ti + 3] = Tdata3;
- locDatabufptr.i = locDatabufptr.p->nextDatabuf;
- ti += 4;
- }//while
- }//if
-
- UintR keylen = (UintR)regCachePtr->keylen;
- Uint32 distKey = regCachePtr->distributionKeyIndicator;
-
- Uint32 tmp[4];
- if(!regCachePtr->m_special_hash)
- {
- md5_hash(tmp, (Uint64*)&Tdata32[0], keylen);
- }
- else
- {
- handle_special_hash(tmp, Tdata32, keylen, regCachePtr->tableref, !distKey);
- }
-
- thashValue = tmp[0];
- if (distKey){
- jam();
- tdistrHashValue = regCachePtr->distributionKey;
- } else {
- jam();
- tdistrHashValue = tmp[1];
- }//if
-}//Dbtc::hash()
-
-bool
-Dbtc::handle_special_hash(Uint32 dstHash[4], Uint32* src, Uint32 srcLen,
- Uint32 tabPtrI,
- bool distr)
-{
- Uint64 Tmp[MAX_KEY_SIZE_IN_WORDS * 4 * MAX_XFRM_MULTIPLY];
- const Uint32 dstSize = sizeof(Tmp) / 4;
- const TableRecord* tabPtrP = &tableRecord[tabPtrI];
- const Uint32 noOfKeyAttr = tabPtrP->noOfKeyAttr;
- Uint32 noOfDistrKeys = tabPtrP->noOfDistrKeys;
- const bool hasCharAttr = tabPtrP->hasCharAttr;
-
- Uint32 *dst = (Uint32*)Tmp;
- Uint32 dstPos = 0;
- Uint32 srcPos = 0;
- Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX];
- if(hasCharAttr){
- Uint32 i = 0;
- while (i < noOfKeyAttr) {
- const TableRecord::KeyAttr& keyAttr = tabPtrP->keyAttr[i];
-
- Uint32 srcBytes =
- AttributeDescriptor::getSizeInBytes(keyAttr.attributeDescriptor);
- Uint32 srcWords = (srcBytes + 3) / 4;
- Uint32 dstWords = ~0;
- uchar* dstPtr = (uchar*)&dst[dstPos];
- const uchar* srcPtr = (const uchar*)&src[srcPos];
- CHARSET_INFO* cs = keyAttr.charsetInfo;
-
- if (cs == NULL) {
- jam();
- memcpy(dstPtr, srcPtr, srcWords << 2);
- dstWords = srcWords;
- } else {
- jam();
- Uint32 typeId =
- AttributeDescriptor::getType(keyAttr.attributeDescriptor);
- Uint32 lb, len;
- bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
- ndbrequire(ok);
- Uint32 xmul = cs->strxfrm_multiply;
- if (xmul == 0)
- xmul = 1;
- /*
- * Varchar is really Char. End spaces do not matter. To get
- * same hash we blank-pad to maximum length via strnxfrm.
- * TODO use MySQL charset-aware hash function instead
- */
- Uint32 dstLen = xmul * (srcBytes - lb);
- ndbrequire(dstLen <= ((dstSize - dstPos) << 2));
- int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
- ndbrequire(n != -1);
- while ((n & 3) != 0) {
- dstPtr[n++] = 0;
- }
- dstWords = (n >> 2);
- }
- dstPos += dstWords;
- srcPos += srcWords;
- keyPartLen[i++] = dstWords;
- }
- }
- else
- {
- dst = src;
- dstPos = srcLen;
- }
-
- md5_hash(dstHash, (Uint64*)dst, dstPos);
-
- if(distr && noOfDistrKeys)
- {
- jam();
- src = dst;
- dstPos = 0;
- Uint32 i = 0;
- if(hasCharAttr)
- {
- while (i < noOfKeyAttr && noOfDistrKeys)
- {
- const TableRecord::KeyAttr& keyAttr = tabPtrP->keyAttr[i];
- Uint32 len = keyPartLen[i];
- if(AttributeDescriptor::getDKey(keyAttr.attributeDescriptor))
- {
- noOfDistrKeys--;
- memmove(dst+dstPos, src, len << 2);
- dstPos += len;
- }
- src += len;
- i++;
- }
- }
- else
- {
- while (i < noOfKeyAttr && noOfDistrKeys)
- {
- const TableRecord::KeyAttr& keyAttr = tabPtrP->keyAttr[i];
- Uint32 len =
- AttributeDescriptor::getSizeInBytes(keyAttr.attributeDescriptor);
- len = (len + 3) / 4;
- if(AttributeDescriptor::getDKey(keyAttr.attributeDescriptor))
- {
- noOfDistrKeys--;
- memmove(dst+dstPos, src, len << 2);
- dstPos += len;
- }
- src += len;
- i++;
- }
- }
- Uint32 tmp[4];
- md5_hash(tmp, (Uint64*)dst, dstPos);
- dstHash[1] = tmp[1];
- }
- return true; // success
-}
-
-/*
-INIT_API_CONNECT_REC
----------------------------
-*/
-/* ========================================================================= */
-/* ======= INIT_API_CONNECT_REC ======= */
-/* */
-/* ========================================================================= */
-void Dbtc::initApiConnectRec(Signal* signal,
- ApiConnectRecord * const regApiPtr,
- bool releaseIndexOperations)
-{
- const TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0];
- UintR TfailureNr = cfailure_nr;
- UintR TtransCount = c_counters.ctransCount;
- UintR Ttransid0 = tcKeyReq->transId1;
- UintR Ttransid1 = tcKeyReq->transId2;
-
- regApiPtr->m_exec_flag = 0;
- regApiPtr->returncode = 0;
- regApiPtr->returnsignal = RS_TCKEYCONF;
- ndbassert(regApiPtr->firstTcConnect == RNIL);
- regApiPtr->firstTcConnect = RNIL;
- regApiPtr->lastTcConnect = RNIL;
- regApiPtr->globalcheckpointid = 0;
- regApiPtr->lqhkeyconfrec = 0;
- regApiPtr->lqhkeyreqrec = 0;
- regApiPtr->tckeyrec = 0;
- regApiPtr->tcindxrec = 0;
- regApiPtr->failureNr = TfailureNr;
- regApiPtr->transid[0] = Ttransid0;
- regApiPtr->transid[1] = Ttransid1;
- regApiPtr->commitAckMarker = RNIL;
- regApiPtr->buddyPtr = RNIL;
- regApiPtr->currSavePointId = 0;
- // Trigger data
- releaseFiredTriggerData(&regApiPtr->theFiredTriggers),
- // Index data
- regApiPtr->indexOpReturn = false;
- regApiPtr->noIndexOp = 0;
- if(releaseIndexOperations)
- releaseAllSeizedIndexOperations(regApiPtr);
-
- c_counters.ctransCount = TtransCount + 1;
-}//Dbtc::initApiConnectRec()
-
-int
-Dbtc::seizeTcRecord(Signal* signal)
-{
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- TcConnectRecord *localTcConnectRecord = tcConnectRecord;
- UintR TfirstfreeTcConnect = cfirstfreeTcConnect;
- UintR TtcConnectFilesize = ctcConnectFilesize;
- tcConnectptr.i = TfirstfreeTcConnect;
- if (TfirstfreeTcConnect >= TtcConnectFilesize) {
- int place = 3;
- if (TfirstfreeTcConnect != RNIL) {
- place = 10;
- }//if
- TCKEY_abort(signal, place);
- return 1;
- }//if
- //--------------------------------------------------------------------------
- // Optimised version of ptrAss(tcConnectptr, tcConnectRecord)
- //--------------------------------------------------------------------------
- TcConnectRecord * const regTcPtr =
- &localTcConnectRecord[TfirstfreeTcConnect];
-
- UintR TconcurrentOp = c_counters.cconcurrentOp;
- UintR TlastTcConnect = regApiPtr->lastTcConnect;
- UintR TtcConnectptrIndex = tcConnectptr.i;
- TcConnectRecordPtr tmpTcConnectptr;
-
- cfirstfreeTcConnect = regTcPtr->nextTcConnect;
- tcConnectptr.p = regTcPtr;
-
- c_counters.cconcurrentOp = TconcurrentOp + 1;
- regTcPtr->prevTcConnect = TlastTcConnect;
- regTcPtr->nextTcConnect = RNIL;
- regTcPtr->accumulatingTriggerData.i = RNIL;
- regTcPtr->accumulatingTriggerData.p = NULL;
- regTcPtr->noFiredTriggers = 0;
- regTcPtr->noReceivedTriggers = 0;
- regTcPtr->triggerExecutionCount = 0;
- regTcPtr->triggeringOperation = RNIL;
- regTcPtr->isIndexOp = false;
- regTcPtr->indexOp = RNIL;
- regTcPtr->currentIndexId = RNIL;
-
- regApiPtr->lastTcConnect = TtcConnectptrIndex;
-
- if (TlastTcConnect == RNIL) {
- jam();
- regApiPtr->firstTcConnect = TtcConnectptrIndex;
- } else {
- tmpTcConnectptr.i = TlastTcConnect;
- jam();
- ptrCheckGuard(tmpTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
- tmpTcConnectptr.p->nextTcConnect = TtcConnectptrIndex;
- }//if
- return 0;
-}//Dbtc::seizeTcRecord()
-
-int
-Dbtc::seizeCacheRecord(Signal* signal)
-{
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- UintR TfirstfreeCacheRec = cfirstfreeCacheRec;
- UintR TcacheFilesize = ccacheFilesize;
- CacheRecord *localCacheRecord = cacheRecord;
- if (TfirstfreeCacheRec >= TcacheFilesize) {
- TCKEY_abort(signal, 41);
- return 1;
- }//if
- CacheRecord * const regCachePtr = &localCacheRecord[TfirstfreeCacheRec];
-
- regApiPtr->cachePtr = TfirstfreeCacheRec;
- cfirstfreeCacheRec = regCachePtr->nextCacheRec;
- cachePtr.i = TfirstfreeCacheRec;
- cachePtr.p = regCachePtr;
-
-#ifdef VM_TRACE
- // This is a good place to check that resources have
- // been properly released from CacheRecord
- ndbrequire(regCachePtr->firstKeybuf == RNIL);
- ndbrequire(regCachePtr->lastKeybuf == RNIL);
-#endif
- regCachePtr->firstKeybuf = RNIL;
- regCachePtr->lastKeybuf = RNIL;
- regCachePtr->firstAttrbuf = RNIL;
- regCachePtr->lastAttrbuf = RNIL;
- regCachePtr->currReclenAi = 0;
- return 0;
-}//Dbtc::seizeCacheRecord()
-
-/*****************************************************************************/
-/* T C K E Y R E Q */
-/* AFTER HAVING ESTABLISHED THE CONNECT, THE APPLICATION BLOCK SENDS AN */
-/* OPERATION REQUEST TO TC. ALL NECESSARY INFORMATION TO CARRY OUT REQUEST */
-/* IS FURNISHED IN PARAMETERS. TC STORES THIS INFORMATION AND ENQUIRES */
-/* FROM DIH ABOUT THE NODES WHICH MAY HAVE THE REQUESTED DATA */
-/*****************************************************************************/
-void Dbtc::execTCKEYREQ(Signal* signal)
-{
- UintR compare_transid1, compare_transid2;
- UintR titcLenAiInTckeyreq;
- UintR TkeyLength;
- const TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtr();
- UintR Treqinfo;
-
- jamEntry();
- /*-------------------------------------------------------------------------
- * Common error routines are used for several signals, they need to know
- * where to find the transaction identifier in the signal.
- *-------------------------------------------------------------------------*/
- const UintR TapiIndex = tcKeyReq->apiConnectPtr;
- const UintR TapiMaxIndex = capiConnectFilesize;
- const UintR TtabIndex = tcKeyReq->tableId;
- const UintR TtabMaxIndex = ctabrecFilesize;
- ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
-
- ttransid_ptr = 6;
- apiConnectptr.i = TapiIndex;
- if (TapiIndex >= TapiMaxIndex) {
- TCKEY_abort(signal, 6);
- return;
- }//if
- if (TtabIndex >= TtabMaxIndex) {
- TCKEY_abort(signal, 7);
- return;
- }//if
-
- Treqinfo = tcKeyReq->requestInfo;
- //--------------------------------------------------------------------------
- // Optimised version of ptrAss(tabptr, tableRecord)
- // Optimised version of ptrAss(apiConnectptr, apiConnectRecord)
- //--------------------------------------------------------------------------
- ApiConnectRecord * const regApiPtr = &localApiConnectRecord[TapiIndex];
- apiConnectptr.p = regApiPtr;
-
- Uint32 TstartFlag = tcKeyReq->getStartFlag(Treqinfo);
- Uint32 TexecFlag = TcKeyReq::getExecuteFlag(Treqinfo);
-
- bool isIndexOp = regApiPtr->isIndexOp;
- bool isIndexOpReturn = regApiPtr->indexOpReturn;
- regApiPtr->isIndexOp = false; // Reset marker
- regApiPtr->m_exec_flag |= TexecFlag;
- switch (regApiPtr->apiConnectstate) {
- case CS_CONNECTED:{
- if (TstartFlag == 1 && getAllowStartTransaction() == true){
- //---------------------------------------------------------------------
- // Initialise API connect record if transaction is started.
- //---------------------------------------------------------------------
- jam();
- initApiConnectRec(signal, regApiPtr);
- regApiPtr->m_exec_flag = TexecFlag;
- } else {
- if(getAllowStartTransaction() == true){
- /*------------------------------------------------------------------
- * WE EXPECTED A START TRANSACTION. SINCE NO OPERATIONS HAVE BEEN
- * RECEIVED WE INDICATE THIS BY SETTING FIRST_TC_CONNECT TO RNIL TO
- * ENSURE PROPER OPERATION OF THE COMMON ABORT HANDLING.
- *-----------------------------------------------------------------*/
- TCKEY_abort(signal, 0);
- return;
- } else {
- /**
- * getAllowStartTransaction() == false
- */
- TCKEY_abort(signal, 57);
- return;
- }//if
- }
- }
- break;
- case CS_STARTED:
- if(TstartFlag == 1 && regApiPtr->firstTcConnect == RNIL)
- {
- /**
- * If last operation in last transaction was a simple/dirty read
- * it does not have to be committed or rollbacked hence,
- * the state will be CS_STARTED
- */
- jam();
- initApiConnectRec(signal, regApiPtr);
- regApiPtr->m_exec_flag = TexecFlag;
- } else {
- //----------------------------------------------------------------------
- // Transaction is started already.
- // Check that the operation is on the same transaction.
- //-----------------------------------------------------------------------
- compare_transid1 = regApiPtr->transid[0] ^ tcKeyReq->transId1;
- compare_transid2 = regApiPtr->transid[1] ^ tcKeyReq->transId2;
- jam();
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
- TCKEY_abort(signal, 1);
- return;
- }//if
- }
- break;
- case CS_ABORTING:
- if (regApiPtr->abortState == AS_IDLE) {
- if (TstartFlag == 1) {
- //--------------------------------------------------------------------
- // Previous transaction had been aborted and the abort was completed.
- // It is then OK to start a new transaction again.
- //--------------------------------------------------------------------
- jam();
- initApiConnectRec(signal, regApiPtr);
- regApiPtr->m_exec_flag = TexecFlag;
- } else if(TexecFlag) {
- TCKEY_abort(signal, 59);
- return;
- } else {
- //--------------------------------------------------------------------
- // The current transaction was aborted successfully.
- // We will not do anything before we receive an operation
- // with a start indicator. We will ignore this signal.
- //--------------------------------------------------------------------
- jam();
- DEBUG("Drop TCKEYREQ - apiConnectState=CS_ABORTING, ==AS_IDLE");
- return;
- }//if
- } else {
- //----------------------------------------------------------------------
- // Previous transaction is still aborting
- //----------------------------------------------------------------------
- jam();
- if (TstartFlag == 1) {
- //--------------------------------------------------------------------
- // If a new transaction tries to start while the old is
- // still aborting, we will report this to the starting API.
- //--------------------------------------------------------------------
- TCKEY_abort(signal, 2);
- return;
- } else if(TexecFlag) {
- TCKEY_abort(signal, 59);
- return;
- }
- //----------------------------------------------------------------------
- // Ignore signals without start indicator set when aborting transaction.
- //----------------------------------------------------------------------
- DEBUG("Drop TCKEYREQ - apiConnectState=CS_ABORTING, !=AS_IDLE");
- return;
- }//if
- break;
- case CS_START_COMMITTING:
- jam();
- if(isIndexOpReturn || TcKeyReq::getExecutingTrigger(Treqinfo)){
- break;
- }
- default:
- jam();
- /*----------------------------------------------------------------------
- * IN THIS CASE THE NDBAPI IS AN UNTRUSTED ENTITY THAT HAS SENT A SIGNAL
- * WHEN IT WAS NOT EXPECTED TO.
- * WE MIGHT BE IN A PROCESS TO RECEIVE, PREPARE,
- * COMMIT OR COMPLETE AND OBVIOUSLY THIS IS NOT A DESIRED EVENT.
- * WE WILL ALWAYS COMPLETE THE ABORT HANDLING BEFORE WE ALLOW
- * ANYTHING TO HAPPEN ON THIS CONNECTION AGAIN.
- * THUS THERE IS NO ACTION FROM THE API THAT CAN SPEED UP THIS PROCESS.
- *---------------------------------------------------------------------*/
- TCKEY_abort(signal, 55);
- return;
- }//switch
-
- TableRecordPtr localTabptr;
- localTabptr.i = TtabIndex;
- localTabptr.p = &tableRecord[TtabIndex];
- if (localTabptr.p->checkTable(tcKeyReq->tableSchemaVersion)) {
- ;
- } else {
- /*-----------------------------------------------------------------------*/
- /* THE API IS WORKING WITH AN OLD SCHEMA VERSION. IT NEEDS REPLACEMENT. */
- /* COULD ALSO BE THAT THE TABLE IS NOT DEFINED. */
- /*-----------------------------------------------------------------------*/
- TCKEY_abort(signal, 8);
- return;
- }//if
-
- //-------------------------------------------------------------------------
- // Error Insertion for testing purposes. Test to see what happens when no
- // more TC records available.
- //-------------------------------------------------------------------------
- if (ERROR_INSERTED(8032)) {
- TCKEY_abort(signal, 3);
- return;
- }//if
-
- if (seizeTcRecord(signal) != 0) {
- return;
- }//if
-
- if (seizeCacheRecord(signal) != 0) {
- return;
- }//if
-
- TcConnectRecord * const regTcPtr = tcConnectptr.p;
- CacheRecord * const regCachePtr = cachePtr.p;
-
- /*
- INIT_TC_CONNECT_REC
- -------------------------
- */
- /* ---------------------------------------------------------------------- */
- /* ------- INIT OPERATION RECORD WITH SIGNAL DATA AND RNILS ------- */
- /* */
- /* ---------------------------------------------------------------------- */
-
- UintR TapiVersionNo = tcKeyReq->getAPIVersion(tcKeyReq->attrLen);
- UintR Tlqhkeyreqrec = regApiPtr->lqhkeyreqrec;
- regApiPtr->lqhkeyreqrec = Tlqhkeyreqrec + 1;
- regCachePtr->apiVersionNo = TapiVersionNo;
-
- UintR TapiConnectptrIndex = apiConnectptr.i;
- UintR TsenderData = tcKeyReq->senderData;
- UintR TattrLen = tcKeyReq->getAttrinfoLen(tcKeyReq->attrLen);
- UintR TattrinfoCount = c_counters.cattrinfoCount;
-
- regTcPtr->apiConnect = TapiConnectptrIndex;
- regTcPtr->clientData = TsenderData;
- regTcPtr->commitAckMarker = RNIL;
- regTcPtr->isIndexOp = isIndexOp;
- regTcPtr->indexOp = regApiPtr->executingIndexOp;
- regTcPtr->savePointId = regApiPtr->currSavePointId;
- regApiPtr->executingIndexOp = RNIL;
-
- if (TcKeyReq::getExecutingTrigger(Treqinfo)) {
- // Save the TcOperationPtr for fireing operation
- regTcPtr->triggeringOperation = TsenderData;
- }
-
- if (TexecFlag){
- Uint32 currSPId = regApiPtr->currSavePointId;
- regApiPtr->currSavePointId = ++currSPId;
- }
-
- regCachePtr->attrlength = TattrLen;
- c_counters.cattrinfoCount = TattrinfoCount + TattrLen;
-
- UintR TtabptrIndex = localTabptr.i;
- UintR TtableSchemaVersion = tcKeyReq->tableSchemaVersion;
- Uint8 TOperationType = tcKeyReq->getOperationType(Treqinfo);
- regCachePtr->tableref = TtabptrIndex;
- regCachePtr->schemaVersion = TtableSchemaVersion;
- regTcPtr->operation = TOperationType;
-
- Uint8 TSimpleFlag = tcKeyReq->getSimpleFlag(Treqinfo);
- Uint8 TDirtyFlag = tcKeyReq->getDirtyFlag(Treqinfo);
- Uint8 TInterpretedFlag = tcKeyReq->getInterpretedFlag(Treqinfo);
- Uint8 TDistrKeyFlag = tcKeyReq->getDistributionKeyFlag(Treqinfo);
- Uint8 TexecuteFlag = TexecFlag;
-
- regCachePtr->opSimple = TSimpleFlag;
- regCachePtr->opExec = TInterpretedFlag;
- regTcPtr->dirtyOp = TDirtyFlag;
- regCachePtr->distributionKeyIndicator = TDistrKeyFlag;
-
- //-------------------------------------------------------------
- // The next step is to read the upto three conditional words.
- //-------------------------------------------------------------
- Uint32 TkeyIndex;
- Uint32* TOptionalDataPtr = (Uint32*)&tcKeyReq->scanInfo;
- {
- Uint32 TDistrGHIndex = tcKeyReq->getScanIndFlag(Treqinfo);
- Uint32 TDistrKeyIndex = TDistrGHIndex;
-
- Uint32 TscanInfo = tcKeyReq->getTakeOverScanInfo(TOptionalDataPtr[0]);
-
- regCachePtr->scanTakeOverInd = TDistrGHIndex;
- regCachePtr->scanInfo = TscanInfo;
-
- regCachePtr->distributionKey = TOptionalDataPtr[TDistrKeyIndex];
-
- TkeyIndex = TDistrKeyIndex + TDistrKeyFlag;
- }
- Uint32* TkeyDataPtr = &TOptionalDataPtr[TkeyIndex];
-
- UintR Tdata1 = TkeyDataPtr[0];
- UintR Tdata2 = TkeyDataPtr[1];
- UintR Tdata3 = TkeyDataPtr[2];
- UintR Tdata4 = TkeyDataPtr[3];
- UintR Tdata5;
-
- regCachePtr->keydata[0] = Tdata1;
- regCachePtr->keydata[1] = Tdata2;
- regCachePtr->keydata[2] = Tdata3;
- regCachePtr->keydata[3] = Tdata4;
-
- TkeyLength = tcKeyReq->getKeyLength(Treqinfo);
- Uint32 TAIDataIndex;
- if (TkeyLength > 8) {
- TAIDataIndex = TkeyIndex + 8;
- } else {
- if (TkeyLength == 0) {
- TCKEY_abort(signal, 4);
- return;
- }//if
- TAIDataIndex = TkeyIndex + TkeyLength;
- }//if
- Uint32* TAIDataPtr = &TOptionalDataPtr[TAIDataIndex];
-
- titcLenAiInTckeyreq = tcKeyReq->getAIInTcKeyReq(Treqinfo);
- regCachePtr->keylen = TkeyLength;
- regCachePtr->lenAiInTckeyreq = titcLenAiInTckeyreq;
- regCachePtr->currReclenAi = titcLenAiInTckeyreq;
- regCachePtr->m_special_hash =
- localTabptr.p->hasCharAttr | (localTabptr.p->noOfDistrKeys > 0);
- Tdata1 = TAIDataPtr[0];
- Tdata2 = TAIDataPtr[1];
- Tdata3 = TAIDataPtr[2];
- Tdata4 = TAIDataPtr[3];
- Tdata5 = TAIDataPtr[4];
-
- regCachePtr->attrinfo0 = Tdata1;
- regCachePtr->attrinfo15[0] = Tdata2;
- regCachePtr->attrinfo15[1] = Tdata3;
- regCachePtr->attrinfo15[2] = Tdata4;
- regCachePtr->attrinfo15[3] = Tdata5;
-
- if (TOperationType == ZREAD) {
- Uint32 TreadCount = c_counters.creadCount;
- jam();
- regCachePtr->opLock = 0;
- c_counters.creadCount = TreadCount + 1;
- } else if(TOperationType == ZREAD_EX){
- Uint32 TreadCount = c_counters.creadCount;
- jam();
- TOperationType = ZREAD;
- regTcPtr->operation = ZREAD;
- regCachePtr->opLock = ZUPDATE;
- c_counters.creadCount = TreadCount + 1;
- } else {
- if(regApiPtr->commitAckMarker == RNIL){
- jam();
- CommitAckMarkerPtr tmp;
- if(!m_commitAckMarkerHash.seize(tmp)){
- TCKEY_abort(signal, 56);
- return;
- } else {
- regTcPtr->commitAckMarker = tmp.i;
- regApiPtr->commitAckMarker = tmp.i;
- tmp.p->transid1 = tcKeyReq->transId1;
- tmp.p->transid2 = tcKeyReq->transId2;
- tmp.p->apiNodeId = refToNode(regApiPtr->ndbapiBlockref);
- tmp.p->apiConnectPtr = TapiIndex;
- tmp.p->noOfLqhs = 0;
- m_commitAckMarkerHash.add(tmp);
- }
- }
-
- UintR TwriteCount = c_counters.cwriteCount;
- UintR Toperationsize = coperationsize;
- /* --------------------------------------------------------------------
- * THIS IS A TEMPORARY TABLE, DON'T UPDATE coperationsize.
- * THIS VARIABLE CONTROLS THE INTERVAL BETWEEN LCP'S AND
- * TEMP TABLES DON'T PARTICIPATE.
- * -------------------------------------------------------------------- */
- if (localTabptr.p->storedTable) {
- coperationsize = ((Toperationsize + TattrLen) + TkeyLength) + 17;
- }
- c_counters.cwriteCount = TwriteCount + 1;
- switch (TOperationType) {
- case ZUPDATE:
- jam();
- if (TattrLen == 0) {
- //TCKEY_abort(signal, 5);
- //return;
- }//if
- /*---------------------------------------------------------------------*/
- // The missing break is intentional since we also want to set the opLock
- // variable also for updates
- /*---------------------------------------------------------------------*/
- case ZINSERT:
- case ZDELETE:
- jam();
- regCachePtr->opLock = TOperationType;
- break;
- case ZWRITE:
- jam();
- // A write operation is originally an insert operation.
- regCachePtr->opLock = ZINSERT;
- break;
- default:
- TCKEY_abort(signal, 9);
- return;
- }//switch
- }//if
-
- Uint32 TabortOption = tcKeyReq->getAbortOption(Treqinfo);
- regTcPtr->m_execAbortOption = TabortOption;
-
- /*-------------------------------------------------------------------------
- * Check error handling per operation
- * If CommitFlag is set state accordingly and check for early abort
- *------------------------------------------------------------------------*/
- if (tcKeyReq->getCommitFlag(Treqinfo) == 1) {
- ndbrequire(TexecuteFlag);
- regApiPtr->apiConnectstate = CS_REC_COMMITTING;
- } else {
- /* ---------------------------------------------------------------------
- * PREPARE TRANSACTION IS NOT IMPLEMENTED YET.
- * ---------------------------------------------------------------------
- * ELSIF (TREQINFO => 3) (*) 1 = 1 THEN
- * IF PREPARE TRANSACTION THEN
- * API_CONNECTPTR:API_CONNECTSTATE = REC_PREPARING
- * SET STATE TO PREPARING
- * --------------------------------------------------------------------- */
- if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
- jam();
- // Trigger execution at commit
- regApiPtr->apiConnectstate = CS_REC_COMMITTING;
- } else {
- jam();
- regApiPtr->apiConnectstate = CS_RECEIVING;
- }//if
- }//if
- if (TkeyLength <= 4) {
- tckeyreq050Lab(signal);
- return;
- } else {
- if (cfirstfreeDatabuf != RNIL) {
- jam();
- linkKeybuf(signal);
- Tdata1 = TkeyDataPtr[4];
- Tdata2 = TkeyDataPtr[5];
- Tdata3 = TkeyDataPtr[6];
- Tdata4 = TkeyDataPtr[7];
-
- DatabufRecord * const regDataPtr = databufptr.p;
- regDataPtr->data[0] = Tdata1;
- regDataPtr->data[1] = Tdata2;
- regDataPtr->data[2] = Tdata3;
- regDataPtr->data[3] = Tdata4;
- } else {
- jam();
- seizeDatabuferrorLab(signal);
- return;
- }//if
- if (TkeyLength <= 8) {
- jam();
- tckeyreq050Lab(signal);
- return;
- } else {
- jam();
- /* --------------------------------------------------------------------
- * THE TCKEYREQ DIDN'T CONTAIN ALL KEY DATA,
- * SAVE STATE AND WAIT FOR KEYINFO
- * --------------------------------------------------------------------*/
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- regCachePtr->save1 = 8;
- regTcPtr->tcConnectstate = OS_WAIT_KEYINFO;
- return;
- }//if
- }//if
- return;
-}//Dbtc::execTCKEYREQ()
-
-void Dbtc::tckeyreq050Lab(Signal* signal)
-{
- UintR tnoOfBackup;
- UintR tnoOfStandby;
- UintR tnodeinfo;
-
- hash(signal); /* NOW IT IS TIME TO CALCULATE THE HASH VALUE*/
-
- CacheRecord * const regCachePtr = cachePtr.p;
- TcConnectRecord * const regTcPtr = tcConnectptr.p;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
-
- UintR TtcTimer = ctcTimer;
- UintR ThashValue = thashValue;
- UintR TdistrHashValue = tdistrHashValue;
- UintR TdihConnectptr = regTcPtr->dihConnectptr;
- UintR Ttableref = regCachePtr->tableref;
-
- TableRecordPtr localTabptr;
- localTabptr.i = Ttableref;
- localTabptr.p = &tableRecord[localTabptr.i];
- Uint32 schemaVersion = regCachePtr->schemaVersion;
- if(localTabptr.p->checkTable(schemaVersion)){
- ;
- } else {
- terrorCode = localTabptr.p->getErrorCode(schemaVersion);
- TCKEY_abort(signal, 58);
- return;
- }
-
- setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
- regCachePtr->hashValue = ThashValue;
-
- signal->theData[0] = TdihConnectptr;
- signal->theData[1] = Ttableref;
- signal->theData[2] = TdistrHashValue;
-
- /*-------------------------------------------------------------*/
- /* FOR EFFICIENCY REASONS WE AVOID THE SIGNAL SENDING HERE AND */
- /* PROCEED IMMEDIATELY TO DIH. IN MULTI-THREADED VERSIONS WE */
- /* HAVE TO INSERT A MUTEX ON DIH TO ENSURE PROPER OPERATION. */
- /* SINCE THIS SIGNAL AND DIVERIFYREQ ARE THE ONLY SIGNALS SENT */
- /* TO DIH IN TRAFFIC IT SHOULD BE OK (3% OF THE EXECUTION TIME */
- /* IS SPENT IN DIH AND EVEN LESS IN REPLICATED NDB. */
- /*-------------------------------------------------------------*/
- EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal, 3);
- UintR TerrorIndicator = signal->theData[0];
- jamEntry();
- if (TerrorIndicator != 0) {
- execDIGETNODESREF(signal);
- return;
- }
-
- if(ERROR_INSERTED(8050) && signal->theData[3] != getOwnNodeId())
- {
- ndbassert(false);
- signal->theData[1] = 626;
- execDIGETNODESREF(signal);
- return;
- }
-
- /****************>>*/
- /* DIGETNODESCONF >*/
- /* ***************>*/
-
- UintR Tdata1 = signal->theData[1];
- UintR Tdata2 = signal->theData[2];
- UintR Tdata3 = signal->theData[3];
- UintR Tdata4 = signal->theData[4];
- UintR Tdata5 = signal->theData[5];
- UintR Tdata6 = signal->theData[6];
-
- regCachePtr->fragmentid = Tdata1;
- tnodeinfo = Tdata2;
-
- regTcPtr->tcNodedata[0] = Tdata3;
- regTcPtr->tcNodedata[1] = Tdata4;
- regTcPtr->tcNodedata[2] = Tdata5;
- regTcPtr->tcNodedata[3] = Tdata6;
-
- Uint8 Toperation = regTcPtr->operation;
- Uint8 Tdirty = regTcPtr->dirtyOp;
- tnoOfBackup = tnodeinfo & 3;
- tnoOfStandby = (tnodeinfo >> 8) & 3;
-
- regCachePtr->fragmentDistributionKey = (tnodeinfo >> 16) & 255;
- if (Toperation == ZREAD) {
- if (Tdirty == 1) {
- jam();
- /*-------------------------------------------------------------*/
- /* A SIMPLE READ CAN SELECT ANY OF THE PRIMARY AND */
- /* BACKUP NODES TO READ. WE WILL TRY TO SELECT THIS */
- /* NODE IF POSSIBLE TO AVOID UNNECESSARY COMMUNICATION */
- /* WITH SIMPLE READS. */
- /*-------------------------------------------------------------*/
- arrGuard(tnoOfBackup, 4);
- UintR Tindex;
- UintR TownNode = cownNodeid;
- for (Tindex = 1; Tindex <= tnoOfBackup; Tindex++) {
- UintR Tnode = regTcPtr->tcNodedata[Tindex];
- jam();
- if (Tnode == TownNode) {
- jam();
- regTcPtr->tcNodedata[0] = Tnode;
- }//if
- }//for
- if(ERROR_INSERTED(8048) || ERROR_INSERTED(8049))
- {
- for (Tindex = 0; Tindex <= tnoOfBackup; Tindex++)
- {
- UintR Tnode = regTcPtr->tcNodedata[Tindex];
- jam();
- if (Tnode != TownNode) {
- jam();
- regTcPtr->tcNodedata[0] = Tnode;
- ndbout_c("Choosing %d", Tnode);
- }//if
- }//for
- }
- }//if
- jam();
- regTcPtr->lastReplicaNo = 0;
- regTcPtr->noOfNodes = 1;
- } else {
- UintR TlastReplicaNo;
- jam();
- TlastReplicaNo = tnoOfBackup + tnoOfStandby;
- regTcPtr->lastReplicaNo = (Uint8)TlastReplicaNo;
- regTcPtr->noOfNodes = (Uint8)(TlastReplicaNo + 1);
- }//if
- if (regCachePtr->lenAiInTckeyreq == regCachePtr->attrlength) {
- /****************************************************************>*/
- /* HERE WE HAVE FOUND THAT THE LAST SIGNAL BELONGING TO THIS */
- /* OPERATION HAVE BEEN RECEIVED. THIS MEANS THAT WE CAN NOW REUSE */
- /* THE API CONNECT RECORD. HOWEVER IF PREPARE OR COMMIT HAVE BEEN */
- /* RECEIVED THEN IT IS NOT ALLOWED TO RECEIVE ANY FURTHER */
- /* OPERATIONS. WE KNOW THAT WE WILL WAIT FOR DICT NEXT. IT IS NOT */
- /* POSSIBLE FOR THE TC CONNECTION TO BE READY YET. */
- /****************************************************************>*/
- switch (regApiPtr->apiConnectstate) {
- case CS_RECEIVING:
- jam();
- regApiPtr->apiConnectstate = CS_STARTED;
- break;
- case CS_REC_COMMITTING:
- jam();
- regApiPtr->apiConnectstate = CS_START_COMMITTING;
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- }//switch
- attrinfoDihReceivedLab(signal);
- return;
- } else {
- if (regCachePtr->lenAiInTckeyreq < regCachePtr->attrlength) {
- TtcTimer = ctcTimer;
- jam();
- setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
- regTcPtr->tcConnectstate = OS_WAIT_ATTR;
- return;
- } else {
- TCKEY_abort(signal, 11);
- return;
- }//if
- }//if
- return;
-}//Dbtc::tckeyreq050Lab()
-
-void Dbtc::attrinfoDihReceivedLab(Signal* signal)
-{
- CacheRecord * const regCachePtr = cachePtr.p;
- TcConnectRecord * const regTcPtr = tcConnectptr.p;
- Uint16 Tnode = regTcPtr->tcNodedata[0];
-
- TableRecordPtr localTabptr;
- localTabptr.i = regCachePtr->tableref;
- localTabptr.p = &tableRecord[localTabptr.i];
-
- if(localTabptr.p->checkTable(regCachePtr->schemaVersion)){
- ;
- } else {
- terrorCode = localTabptr.p->getErrorCode(regCachePtr->schemaVersion);
- TCKEY_abort(signal, 58);
- return;
- }
- arrGuard(Tnode, MAX_NDB_NODES);
- packLqhkeyreq(signal, calcLqhBlockRef(Tnode));
-}//Dbtc::attrinfoDihReceivedLab()
-
-void Dbtc::packLqhkeyreq(Signal* signal,
- BlockReference TBRef)
-{
- CacheRecord * const regCachePtr = cachePtr.p;
- UintR Tkeylen = regCachePtr->keylen;
- UintR TfirstAttrbuf = regCachePtr->firstAttrbuf;
- sendlqhkeyreq(signal, TBRef);
- if (Tkeylen > 4) {
- packKeyData000Lab(signal, TBRef, Tkeylen - 4);
- releaseKeys();
- }//if
- packLqhkeyreq040Lab(signal,
- TfirstAttrbuf,
- TBRef);
-}//Dbtc::packLqhkeyreq()
-
-void Dbtc::sendlqhkeyreq(Signal* signal,
- BlockReference TBRef)
-{
- UintR tslrAttrLen;
- UintR Tdata10;
- TcConnectRecord * const regTcPtr = tcConnectptr.p;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- CacheRecord * const regCachePtr = cachePtr.p;
-#ifdef ERROR_INSERT
- if (ERROR_INSERTED(8002)) {
- systemErrorLab(signal);
- }//if
- if (ERROR_INSERTED(8007)) {
- if (apiConnectptr.p->apiConnectstate == CS_STARTED) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- }//if
- if (ERROR_INSERTED(8008)) {
- if (apiConnectptr.p->apiConnectstate == CS_START_COMMITTING) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- }//if
- if (ERROR_INSERTED(8009)) {
- if (apiConnectptr.p->apiConnectstate == CS_STARTED) {
- return;
- }//if
- }//if
- if (ERROR_INSERTED(8010)) {
- if (apiConnectptr.p->apiConnectstate == CS_START_COMMITTING) {
- return;
- }//if
- }//if
-#endif
-
- tslrAttrLen = 0;
- LqhKeyReq::setAttrLen(tslrAttrLen, regCachePtr->attrlength);
- /* ---------------------------------------------------------------------- */
- // Bit16 == 0 since StoredProcedures are not yet supported.
- /* ---------------------------------------------------------------------- */
- LqhKeyReq::setDistributionKey(tslrAttrLen, regCachePtr->fragmentDistributionKey);
- LqhKeyReq::setScanTakeOverFlag(tslrAttrLen, regCachePtr->scanTakeOverInd);
-
- Tdata10 = 0;
- LqhKeyReq::setKeyLen(Tdata10, regCachePtr->keylen);
- LqhKeyReq::setLastReplicaNo(Tdata10, regTcPtr->lastReplicaNo);
- LqhKeyReq::setLockType(Tdata10, regCachePtr->opLock);
- /* ---------------------------------------------------------------------- */
- // Indicate Application Reference is present in bit 15
- /* ---------------------------------------------------------------------- */
- LqhKeyReq::setApplicationAddressFlag(Tdata10, 1);
- LqhKeyReq::setDirtyFlag(Tdata10, regTcPtr->dirtyOp);
- LqhKeyReq::setInterpretedFlag(Tdata10, regCachePtr->opExec);
- LqhKeyReq::setSimpleFlag(Tdata10, regCachePtr->opSimple);
- LqhKeyReq::setOperation(Tdata10, regTcPtr->operation);
- /* -----------------------------------------------------------------------
- * Sequential Number of first LQH = 0, bit 22-23
- * IF ATTRIBUTE INFORMATION IS SENT IN TCKEYREQ,
- * IT IS ALSO SENT IN LQHKEYREQ
- * ----------------------------------------------------------------------- */
- LqhKeyReq::setAIInLqhKeyReq(Tdata10, regCachePtr->lenAiInTckeyreq);
- /* -----------------------------------------------------------------------
- * Bit 27 == 0 since TC record is the same as the client record.
- * Bit 28 == 0 since readLenAi can only be set after reading in LQH.
- * ----------------------------------------------------------------------- */
- //LqhKeyReq::setAPIVersion(Tdata10, regCachePtr->apiVersionNo);
- Uint32 commitAckMarker = regTcPtr->commitAckMarker;
- if(commitAckMarker != RNIL){
- jam();
-
- LqhKeyReq::setMarkerFlag(Tdata10, 1);
-
- CommitAckMarker * tmp;
- tmp = m_commitAckMarkerHash.getPtr(commitAckMarker);
-
- /**
- * Populate LQH array
- */
- const Uint32 noOfLqhs = regTcPtr->noOfNodes;
- tmp->noOfLqhs = noOfLqhs;
- for(Uint32 i = 0; i<noOfLqhs; i++){
- tmp->lqhNodeId[i] = regTcPtr->tcNodedata[i];
- }
- }
-
- /* ************************************************************> */
- /* NO READ LENGTH SENT FROM TC. SEQUENTIAL NUMBER IS 1 AND IT */
- /* IS SENT TO A PRIMARY NODE. */
- /* ************************************************************> */
- UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6;
-
- LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)signal->getDataPtrSend();
-
- sig0 = tcConnectptr.i;
- sig2 = regCachePtr->hashValue;
- sig4 = cownref;
- sig5 = regTcPtr->savePointId;
-
- lqhKeyReq->clientConnectPtr = sig0;
- lqhKeyReq->attrLen = tslrAttrLen;
- lqhKeyReq->hashValue = sig2;
- lqhKeyReq->requestInfo = Tdata10;
- lqhKeyReq->tcBlockref = sig4;
- lqhKeyReq->savePointId = sig5;
-
- sig0 = regCachePtr->tableref + (regCachePtr->schemaVersion << 16);
- sig1 = regCachePtr->fragmentid + (regTcPtr->tcNodedata[1] << 16);
- sig2 = regApiPtr->transid[0];
- sig3 = regApiPtr->transid[1];
- sig4 = regApiPtr->ndbapiBlockref;
- sig5 = regTcPtr->clientData;
- sig6 = regCachePtr->scanInfo;
-
- lqhKeyReq->tableSchemaVersion = sig0;
- lqhKeyReq->fragmentData = sig1;
- lqhKeyReq->transId1 = sig2;
- lqhKeyReq->transId2 = sig3;
- lqhKeyReq->scanInfo = sig6;
-
- lqhKeyReq->variableData[0] = sig4;
- lqhKeyReq->variableData[1] = sig5;
-
- UintR nextPos = 2;
-
- if (regTcPtr->lastReplicaNo > 1) {
- sig0 = (UintR)regTcPtr->tcNodedata[2] +
- (UintR)(regTcPtr->tcNodedata[3] << 16);
- lqhKeyReq->variableData[nextPos] = sig0;
- nextPos++;
- }//if
-
- sig0 = regCachePtr->keydata[0];
- sig1 = regCachePtr->keydata[1];
- sig2 = regCachePtr->keydata[2];
- sig3 = regCachePtr->keydata[3];
- UintR Tkeylen = regCachePtr->keylen;
-
- lqhKeyReq->variableData[nextPos + 0] = sig0;
- lqhKeyReq->variableData[nextPos + 1] = sig1;
- lqhKeyReq->variableData[nextPos + 2] = sig2;
- lqhKeyReq->variableData[nextPos + 3] = sig3;
-
- if (Tkeylen < 4) {
- nextPos += Tkeylen;
- } else {
- nextPos += 4;
- }//if
-
- sig0 = regCachePtr->attrinfo0;
- sig1 = regCachePtr->attrinfo15[0];
- sig2 = regCachePtr->attrinfo15[1];
- sig3 = regCachePtr->attrinfo15[2];
- sig4 = regCachePtr->attrinfo15[3];
- UintR TlenAi = regCachePtr->lenAiInTckeyreq;
-
- lqhKeyReq->variableData[nextPos + 0] = sig0;
- lqhKeyReq->variableData[nextPos + 1] = sig1;
- lqhKeyReq->variableData[nextPos + 2] = sig2;
- lqhKeyReq->variableData[nextPos + 3] = sig3;
- lqhKeyReq->variableData[nextPos + 4] = sig4;
-
- nextPos += TlenAi;
-
- // Reset trigger count
- regTcPtr->accumulatingTriggerData.i = RNIL;
- regTcPtr->accumulatingTriggerData.p = NULL;
- regTcPtr->noFiredTriggers = 0;
- regTcPtr->triggerExecutionCount = 0;
-
- sendSignal(TBRef, GSN_LQHKEYREQ, signal,
- nextPos + LqhKeyReq::FixedSignalLength, JBB);
-}//Dbtc::sendlqhkeyreq()
-
-void Dbtc::packLqhkeyreq040Lab(Signal* signal,
- UintR anAttrBufIndex,
- BlockReference TBRef)
-{
- TcConnectRecord * const regTcPtr = tcConnectptr.p;
- CacheRecord * const regCachePtr = cachePtr.p;
-#ifdef ERROR_INSERT
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- if (ERROR_INSERTED(8009)) {
- if (regApiPtr->apiConnectstate == CS_STARTED) {
- attrbufptr.i = RNIL;
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- }//if
- if (ERROR_INSERTED(8010)) {
- if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
- attrbufptr.i = RNIL;
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- }//if
-#endif
-
- UintR TattrbufFilesize = cattrbufFilesize;
- AttrbufRecord *localAttrbufRecord = attrbufRecord;
- while (1) {
- if (anAttrBufIndex == RNIL) {
- UintR TtcTimer = ctcTimer;
- UintR Tread = (regTcPtr->operation == ZREAD);
- UintR Tsimple = (regCachePtr->opSimple == ZTRUE);
- UintR Tboth = Tread & Tsimple;
- setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
- jam();
- /*--------------------------------------------------------------------
- * WE HAVE SENT ALL THE SIGNALS OF THIS OPERATION. SET STATE AND EXIT.
- *---------------------------------------------------------------------*/
- releaseAttrinfo();
- if (Tboth) {
- jam();
- releaseSimpleRead(signal, apiConnectptr, tcConnectptr.p);
- return;
- }//if
- regTcPtr->tcConnectstate = OS_OPERATING;
- return;
- }//if
- if (anAttrBufIndex < TattrbufFilesize) {
- AttrbufRecord * const regAttrPtr = &localAttrbufRecord[anAttrBufIndex];
- anAttrBufIndex = regAttrPtr->attrbuf[ZINBUF_NEXT];
- sendAttrinfo(signal,
- tcConnectptr.i,
- regAttrPtr,
- TBRef);
- } else {
- TCKEY_abort(signal, 17);
- return;
- }//if
- }//while
-}//Dbtc::packLqhkeyreq040Lab()
-
-/* ========================================================================= */
-/* ------- RELEASE ALL ATTRINFO RECORDS IN AN OPERATION RECORD ------- */
-/* ========================================================================= */
-void Dbtc::releaseAttrinfo()
-{
- UintR Tmp;
- AttrbufRecordPtr Tattrbufptr;
- CacheRecord * const regCachePtr = cachePtr.p;
- UintR TattrbufFilesize = cattrbufFilesize;
- UintR TfirstfreeAttrbuf = cfirstfreeAttrbuf;
- Tattrbufptr.i = regCachePtr->firstAttrbuf;
- AttrbufRecord *localAttrbufRecord = attrbufRecord;
-
- while (Tattrbufptr.i < TattrbufFilesize) {
- Tattrbufptr.p = &localAttrbufRecord[Tattrbufptr.i];
- Tmp = Tattrbufptr.p->attrbuf[ZINBUF_NEXT];
- Tattrbufptr.p->attrbuf[ZINBUF_NEXT] = TfirstfreeAttrbuf;
- TfirstfreeAttrbuf = Tattrbufptr.i;
- Tattrbufptr.i = Tmp;
- jam();
- }//while
- if (Tattrbufptr.i == RNIL) {
-//---------------------------------------------------
-// Now we will release the cache record at the same
-// time as releasing the attrinfo records.
-//---------------------------------------------------
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- UintR TfirstfreeCacheRec = cfirstfreeCacheRec;
- UintR TCacheIndex = cachePtr.i;
- cfirstfreeAttrbuf = TfirstfreeAttrbuf;
- regCachePtr->nextCacheRec = TfirstfreeCacheRec;
- cfirstfreeCacheRec = TCacheIndex;
- regApiPtr->cachePtr = RNIL;
- return;
- }//if
- systemErrorLab(0);
- return;
-}//Dbtc::releaseAttrinfo()
-
-/* ========================================================================= */
-/* ------- RELEASE ALL RECORDS CONNECTED TO A SIMPLE OPERATION ------- */
-/* ========================================================================= */
-void Dbtc::releaseSimpleRead(Signal* signal,
- ApiConnectRecordPtr regApiPtr,
- TcConnectRecord* regTcPtr)
-{
- Uint32 Ttckeyrec = regApiPtr.p->tckeyrec;
- Uint32 TclientData = regTcPtr->clientData;
- Uint32 Tnode = regTcPtr->tcNodedata[0];
- Uint32 Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec;
- Uint32 TsimpleReadCount = c_counters.csimpleReadCount;
- ConnectionState state = regApiPtr.p->apiConnectstate;
-
- regApiPtr.p->tcSendArray[Ttckeyrec] = TclientData;
- regApiPtr.p->tcSendArray[Ttckeyrec + 1] = TcKeyConf::SimpleReadBit | Tnode;
- regApiPtr.p->tckeyrec = Ttckeyrec + 2;
-
- unlinkReadyTcCon(signal);
- releaseTcCon();
-
- /**
- * No LQHKEYCONF in Simple/Dirty read
- * Therefore decrese no LQHKEYCONF(REF) we are waiting for
- */
- c_counters.csimpleReadCount = TsimpleReadCount + 1;
- regApiPtr.p->lqhkeyreqrec = --Tlqhkeyreqrec;
-
- if(Tlqhkeyreqrec == 0)
- {
- /**
- * Special case of lqhKeyConf_checkTransactionState:
- * - commit with zero operations: handle only for simple read
- */
- sendtckeyconf(signal, state == CS_START_COMMITTING);
- regApiPtr.p->apiConnectstate =
- (state == CS_START_COMMITTING ? CS_CONNECTED : state);
- setApiConTimer(regApiPtr.i, 0, __LINE__);
-
- return;
- }
-
- /**
- * Emulate LQHKEYCONF
- */
- lqhKeyConf_checkTransactionState(signal, regApiPtr.p);
-}//Dbtc::releaseSimpleRead()
-
-/* ------------------------------------------------------------------------- */
-/* ------- CHECK IF ALL TC CONNECTIONS ARE COMPLETED ------- */
-/* ------------------------------------------------------------------------- */
-void Dbtc::unlinkReadyTcCon(Signal* signal)
-{
- TcConnectRecordPtr urtTcConnectptr;
-
- TcConnectRecord * const regTcPtr = tcConnectptr.p;
- TcConnectRecord *localTcConnectRecord = tcConnectRecord;
- UintR TtcConnectFilesize = ctcConnectFilesize;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- if (regTcPtr->prevTcConnect != RNIL) {
- jam();
- urtTcConnectptr.i = regTcPtr->prevTcConnect;
- ptrCheckGuard(urtTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
- urtTcConnectptr.p->nextTcConnect = regTcPtr->nextTcConnect;
- } else {
- jam();
- regApiPtr->firstTcConnect = regTcPtr->nextTcConnect;
- }//if
- if (regTcPtr->nextTcConnect != RNIL) {
- jam();
- urtTcConnectptr.i = regTcPtr->nextTcConnect;
- ptrCheckGuard(urtTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
- urtTcConnectptr.p->prevTcConnect = regTcPtr->prevTcConnect;
- } else {
- jam();
- regApiPtr->lastTcConnect = tcConnectptr.p->prevTcConnect;
- }//if
-}//Dbtc::unlinkReadyTcCon()
-
-void Dbtc::releaseTcCon()
-{
- TcConnectRecord * const regTcPtr = tcConnectptr.p;
- UintR TfirstfreeTcConnect = cfirstfreeTcConnect;
- UintR TconcurrentOp = c_counters.cconcurrentOp;
- UintR TtcConnectptrIndex = tcConnectptr.i;
-
- regTcPtr->tcConnectstate = OS_CONNECTED;
- regTcPtr->nextTcConnect = TfirstfreeTcConnect;
- regTcPtr->apiConnect = RNIL;
- regTcPtr->isIndexOp = false;
- regTcPtr->indexOp = RNIL;
- cfirstfreeTcConnect = TtcConnectptrIndex;
- c_counters.cconcurrentOp = TconcurrentOp - 1;
-}//Dbtc::releaseTcCon()
-
-void Dbtc::execPACKED_SIGNAL(Signal* signal)
-{
- LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
-
- UintR Ti;
- UintR Tstep = 0;
- UintR Tlength;
- UintR TpackedData[28];
- UintR Tdata1, Tdata2, Tdata3, Tdata4;
-
- jamEntry();
- Tlength = signal->length();
- if (Tlength > 25) {
- jam();
- systemErrorLab(signal);
- return;
- }//if
- Uint32* TpackDataPtr;
- for (Ti = 0; Ti < Tlength; Ti += 4) {
- Uint32* TsigDataPtr = &signal->theData[Ti];
- Tdata1 = TsigDataPtr[0];
- Tdata2 = TsigDataPtr[1];
- Tdata3 = TsigDataPtr[2];
- Tdata4 = TsigDataPtr[3];
-
- TpackDataPtr = &TpackedData[Ti];
- TpackDataPtr[0] = Tdata1;
- TpackDataPtr[1] = Tdata2;
- TpackDataPtr[2] = Tdata3;
- TpackDataPtr[3] = Tdata4;
- }//for
- while (Tlength > Tstep) {
-
- TpackDataPtr = &TpackedData[Tstep];
- Tdata1 = TpackDataPtr[0];
- Tdata2 = TpackDataPtr[1];
- Tdata3 = TpackDataPtr[2];
-
- lqhKeyConf->connectPtr = Tdata1 & 0x0FFFFFFF;
- lqhKeyConf->opPtr = Tdata2;
- lqhKeyConf->userRef = Tdata3;
-
- switch (Tdata1 >> 28) {
- case ZCOMMITTED:
- signal->header.theLength = 3;
- execCOMMITTED(signal);
- Tstep += 3;
- break;
- case ZCOMPLETED:
- signal->header.theLength = 3;
- execCOMPLETED(signal);
- Tstep += 3;
- break;
- case ZLQHKEYCONF:
- jam();
- Tdata1 = TpackDataPtr[3];
- Tdata2 = TpackDataPtr[4];
- Tdata3 = TpackDataPtr[5];
- Tdata4 = TpackDataPtr[6];
-
- lqhKeyConf->readLen = Tdata1;
- lqhKeyConf->transId1 = Tdata2;
- lqhKeyConf->transId2 = Tdata3;
- lqhKeyConf->noFiredTriggers = Tdata4;
- signal->header.theLength = LqhKeyConf::SignalLength;
- execLQHKEYCONF(signal);
- Tstep += LqhKeyConf::SignalLength;
- break;
- default:
- systemErrorLab(signal);
- return;
- }//switch
- }//while
- return;
-}//Dbtc::execPACKED_SIGNAL()
-
-void Dbtc::execLQHKEYCONF(Signal* signal)
-{
- const LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
- UintR compare_transid1, compare_transid2;
- BlockReference tlastLqhBlockref;
- UintR tlastLqhConnect;
- UintR treadlenAi;
- UintR TtcConnectptrIndex;
- UintR TtcConnectFilesize = ctcConnectFilesize;
-
- tlastLqhConnect = lqhKeyConf->connectPtr;
- TtcConnectptrIndex = lqhKeyConf->opPtr;
- tlastLqhBlockref = lqhKeyConf->userRef;
- treadlenAi = lqhKeyConf->readLen;
- TcConnectRecord *localTcConnectRecord = tcConnectRecord;
-
- /*------------------------------------------------------------------------
- * NUMBER OF EXTERNAL TRIGGERS FIRED IN DATA[6]
- * OPERATION IS NOW COMPLETED. CHECK FOR CORRECT OPERATION POINTER
- * TO ENSURE NO CRASHES BECAUSE OF ERRONEUS NODES. CHECK STATE OF
- * OPERATION. THEN SET OPERATION STATE AND RETRIEVE ALL POINTERS
- * OF THIS OPERATION. PUT COMPLETED OPERATION IN LIST OF COMPLETED
- * OPERATIONS ON THE LQH CONNECT RECORD.
- *------------------------------------------------------------------------
- * THIS SIGNAL ALWAYS ARRIVE BEFORE THE ABORTED SIGNAL ARRIVES SINCE IT USES
- * THE SAME PATH BACK TO TC AS THE ABORTED SIGNAL DO. WE DO HOWEVER HAVE A
- * PROBLEM WHEN WE ENCOUNTER A TIME-OUT WAITING FOR THE ABORTED SIGNAL.
- * THEN THIS SIGNAL MIGHT ARRIVE WHEN THE TC CONNECT RECORD HAVE BEEN REUSED
- * BY OTHER TRANSACTION THUS WE CHECK THE TRANSACTION ID OF THE SIGNAL
- * BEFORE ACCEPTING THIS SIGNAL.
- * Due to packing of LQHKEYCONF the ABORTED signal can now arrive before
- * this.
- * This is more reason to ignore the signal if not all states are correct.
- *------------------------------------------------------------------------*/
- if (TtcConnectptrIndex >= TtcConnectFilesize) {
- TCKEY_abort(signal, 25);
- return;
- }//if
- TcConnectRecord* const regTcPtr = &localTcConnectRecord[TtcConnectptrIndex];
- OperationState TtcConnectstate = regTcPtr->tcConnectstate;
- tcConnectptr.i = TtcConnectptrIndex;
- tcConnectptr.p = regTcPtr;
- if (TtcConnectstate != OS_OPERATING) {
- warningReport(signal, 23);
- return;
- }//if
- ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
- UintR TapiConnectptrIndex = regTcPtr->apiConnect;
- UintR TapiConnectFilesize = capiConnectFilesize;
- UintR Ttrans1 = lqhKeyConf->transId1;
- UintR Ttrans2 = lqhKeyConf->transId2;
- Uint32 noFired = lqhKeyConf->noFiredTriggers;
-
- if (TapiConnectptrIndex >= TapiConnectFilesize) {
- TCKEY_abort(signal, 29);
- return;
- }//if
- ApiConnectRecord * const regApiPtr =
- &localApiConnectRecord[TapiConnectptrIndex];
- apiConnectptr.i = TapiConnectptrIndex;
- apiConnectptr.p = regApiPtr;
- compare_transid1 = regApiPtr->transid[0] ^ Ttrans1;
- compare_transid2 = regApiPtr->transid[1] ^ Ttrans2;
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
- warningReport(signal, 24);
- return;
- }//if
-
-#ifdef ERROR_INSERT
- if (ERROR_INSERTED(8029)) {
- systemErrorLab(signal);
- }//if
- if (ERROR_INSERTED(8003)) {
- if (regApiPtr->apiConnectstate == CS_STARTED) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- }//if
- if (ERROR_INSERTED(8004)) {
- if (regApiPtr->apiConnectstate == CS_RECEIVING) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- }//if
- if (ERROR_INSERTED(8005)) {
- if (regApiPtr->apiConnectstate == CS_REC_COMMITTING) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- }//if
- if (ERROR_INSERTED(8006)) {
- if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- }//if
- if (ERROR_INSERTED(8023)) {
- SET_ERROR_INSERT_VALUE(8024);
- return;
- }//if
-#endif
- UintR TtcTimer = ctcTimer;
- regTcPtr->lastLqhCon = tlastLqhConnect;
- regTcPtr->lastLqhNodeId = refToNode(tlastLqhBlockref);
- regTcPtr->noFiredTriggers = noFired;
-
- UintR Ttckeyrec = (UintR)regApiPtr->tckeyrec;
- UintR TclientData = regTcPtr->clientData;
- UintR TdirtyOp = regTcPtr->dirtyOp;
- ConnectionState TapiConnectstate = regApiPtr->apiConnectstate;
- if (Ttckeyrec > (ZTCOPCONF_SIZE - 2)) {
- TCKEY_abort(signal, 30);
- return;
- }
- if (TapiConnectstate == CS_ABORTING) {
- warningReport(signal, 27);
- return;
- }//if
-
- setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
-
- if (regTcPtr->isIndexOp) {
- jam();
- // This was an internal TCKEYREQ
- // will be returned unpacked
- regTcPtr->attrInfoLen = treadlenAi;
- } else {
- if (noFired == 0 && regTcPtr->triggeringOperation == RNIL) {
- jam();
- /*
- * Skip counting triggering operations the first round
- * since they will enter execLQHKEYCONF a second time
- * Skip counting internally generated TcKeyReq
- */
- regApiPtr->tcSendArray[Ttckeyrec] = TclientData;
- regApiPtr->tcSendArray[Ttckeyrec + 1] = treadlenAi;
- regApiPtr->tckeyrec = Ttckeyrec + 2;
- }//if
- }//if
- if (TdirtyOp == ZTRUE) {
- UintR Tlqhkeyreqrec = regApiPtr->lqhkeyreqrec;
- jam();
- releaseDirtyWrite(signal);
- regApiPtr->lqhkeyreqrec = Tlqhkeyreqrec - 1;
- } else {
- jam();
- if (noFired == 0) {
- jam();
- // No triggers to execute
- UintR Tlqhkeyconfrec = regApiPtr->lqhkeyconfrec;
- regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec + 1;
- regTcPtr->tcConnectstate = OS_PREPARED;
- }
- }//if
-
- /**
- * And now decide what to do next
- */
- if (regTcPtr->triggeringOperation != RNIL) {
- jam();
- // This operation was created by a trigger execting operation
- // Restart it if we have executed all it's triggers
- TcConnectRecordPtr opPtr;
-
- opPtr.i = regTcPtr->triggeringOperation;
- ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
- opPtr.p->triggerExecutionCount--;
- if (opPtr.p->triggerExecutionCount == 0) {
- /*
- We have completed current trigger execution
- Continue triggering operation
- */
- jam();
- continueTriggeringOp(signal, opPtr.p);
- }
- } else if (noFired == 0) {
- // This operation did not fire any triggers, finish operation
- jam();
- if (regTcPtr->isIndexOp) {
- jam();
- setupIndexOpReturn(regApiPtr, regTcPtr);
- }
- lqhKeyConf_checkTransactionState(signal, regApiPtr);
- } else {
- // We have fired triggers
- jam();
- saveTriggeringOpState(signal, regTcPtr);
- if (regTcPtr->noReceivedTriggers == noFired) {
- ApiConnectRecordPtr transPtr;
-
- // We have received all data
- jam();
- transPtr.i = TapiConnectptrIndex;
- transPtr.p = regApiPtr;
- executeTriggers(signal, &transPtr);
- }
- // else wait for more trigger data
- }
-}//Dbtc::execLQHKEYCONF()
-
-void Dbtc::setupIndexOpReturn(ApiConnectRecord* regApiPtr,
- TcConnectRecord* regTcPtr)
-{
- regApiPtr->indexOpReturn = true;
- regApiPtr->indexOp = regTcPtr->indexOp;
- regApiPtr->clientData = regTcPtr->clientData;
- regApiPtr->attrInfoLen = regTcPtr->attrInfoLen;
-}
-
-/**
- * lqhKeyConf_checkTransactionState
- *
- * This functions checks state variables, and
- * decides if it should wait for more LQHKEYCONF signals
- * or if it should start commiting
- */
-void
-Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
- ApiConnectRecord * const apiConnectPtrP)
-{
-/*---------------------------------------------------------------*/
-/* IF THE COMMIT FLAG IS SET IN SIGNAL TCKEYREQ THEN DBTC HAS TO */
-/* SEND TCKEYCONF FOR ALL OPERATIONS EXCEPT THE LAST ONE. WHEN */
-/* THE TRANSACTION THEN IS COMMITTED TCKEYCONF IS SENT FOR THE */
-/* WHOLE TRANSACTION */
-/* IF THE COMMIT FLAG IS NOT RECECIVED DBTC WILL SEND TCKEYCONF */
-/* FOR ALL OPERATIONS, AND THEN WAIT FOR THE API TO CONCLUDE THE */
-/* TRANSACTION */
-/*---------------------------------------------------------------*/
- ConnectionState TapiConnectstate = apiConnectPtrP->apiConnectstate;
- UintR Tlqhkeyconfrec = apiConnectPtrP->lqhkeyconfrec;
- UintR Tlqhkeyreqrec = apiConnectPtrP->lqhkeyreqrec;
- int TnoOfOutStanding = Tlqhkeyreqrec - Tlqhkeyconfrec;
-
- switch (TapiConnectstate) {
- case CS_START_COMMITTING:
- if (TnoOfOutStanding == 0) {
- jam();
- diverify010Lab(signal);
- return;
- } else if (TnoOfOutStanding > 0) {
- if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
- jam();
- sendtckeyconf(signal, 0);
- return;
- } else if (apiConnectPtrP->indexOpReturn) {
- jam();
- sendtckeyconf(signal, 0);
- return;
- }//if
- jam();
- return;
- } else {
- TCKEY_abort(signal, 44);
- return;
- }//if
- return;
- case CS_STARTED:
- case CS_RECEIVING:
- if (TnoOfOutStanding == 0) {
- jam();
- sendtckeyconf(signal, 2);
- return;
- } else {
- if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
- jam();
- sendtckeyconf(signal, 0);
- return;
- } else if (apiConnectPtrP->indexOpReturn) {
- jam();
- sendtckeyconf(signal, 0);
- return;
- }//if
- jam();
- }//if
- return;
- case CS_REC_COMMITTING:
- if (TnoOfOutStanding > 0) {
- if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
- jam();
- sendtckeyconf(signal, 0);
- return;
- } else if (apiConnectPtrP->indexOpReturn) {
- jam();
- sendtckeyconf(signal, 0);
- return;
- }//if
- jam();
- return;
- }//if
- TCKEY_abort(signal, 45);
- return;
- case CS_CONNECTED:
- jam();
-/*---------------------------------------------------------------*/
-/* WE HAVE CONCLUDED THE TRANSACTION SINCE IT WAS ONLY */
-/* CONSISTING OF DIRTY WRITES AND ALL OF THOSE WERE */
-/* COMPLETED. ENSURE TCKEYREC IS ZERO TO PREVENT ERRORS. */
-/*---------------------------------------------------------------*/
- apiConnectPtrP->tckeyrec = 0;
- return;
- default:
- TCKEY_abort(signal, 46);
- return;
- }//switch
-}//Dbtc::lqhKeyConf_checkTransactionState()
-
-void Dbtc::sendtckeyconf(Signal* signal, UintR TcommitFlag)
-{
- if(ERROR_INSERTED(8049)){
- CLEAR_ERROR_INSERT_VALUE;
- signal->theData[0] = TcContinueB::DelayTCKEYCONF;
- signal->theData[1] = apiConnectptr.i;
- signal->theData[2] = TcommitFlag;
- sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 3000, 3);
- return;
- }
-
- HostRecordPtr localHostptr;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- const UintR TopWords = (UintR)regApiPtr->tckeyrec;
- localHostptr.i = refToNode(regApiPtr->ndbapiBlockref);
- const Uint32 type = getNodeInfo(localHostptr.i).m_type;
- const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
- const BlockNumber TblockNum = refToBlock(regApiPtr->ndbapiBlockref);
- const Uint32 Tmarker = (regApiPtr->commitAckMarker == RNIL) ? 0 : 1;
- ptrAss(localHostptr, hostRecord);
- UintR TcurrLen = localHostptr.p->noOfWordsTCKEYCONF;
- UintR confInfo = 0;
- TcKeyConf::setCommitFlag(confInfo, TcommitFlag == 1);
- TcKeyConf::setMarkerFlag(confInfo, Tmarker);
- const UintR TpacketLen = 6 + TopWords;
- regApiPtr->tckeyrec = 0;
-
- if (regApiPtr->indexOpReturn) {
- jam();
- // Return internally generated TCKEY
- TcKeyConf * const tcKeyConf = (TcKeyConf *)signal->getDataPtrSend();
- TcKeyConf::setNoOfOperations(confInfo, 1);
- tcKeyConf->apiConnectPtr = regApiPtr->indexOp;
- tcKeyConf->gci = regApiPtr->globalcheckpointid;
- tcKeyConf->confInfo = confInfo;
- tcKeyConf->transId1 = regApiPtr->transid[0];
- tcKeyConf->transId2 = regApiPtr->transid[1];
- tcKeyConf->operations[0].apiOperationPtr = regApiPtr->clientData;
- tcKeyConf->operations[0].attrInfoLen = regApiPtr->attrInfoLen;
- Uint32 sigLen = TcKeyConf::StaticLength + TcKeyConf::OperationLength;
- EXECUTE_DIRECT(DBTC, GSN_TCKEYCONF, signal, sigLen);
- regApiPtr->indexOpReturn = false;
- if (TopWords == 0) {
- jam();
- return; // No queued TcKeyConf
- }//if
- }//if
- if(TcommitFlag){
- jam();
- regApiPtr->m_exec_flag = 0;
- }
- TcKeyConf::setNoOfOperations(confInfo, (TopWords >> 1));
- if ((TpacketLen > 25) || !is_api){
- TcKeyConf * const tcKeyConf = (TcKeyConf *)signal->getDataPtrSend();
-
- jam();
- tcKeyConf->apiConnectPtr = regApiPtr->ndbapiConnect;
- tcKeyConf->gci = regApiPtr->globalcheckpointid;;
- tcKeyConf->confInfo = confInfo;
- tcKeyConf->transId1 = regApiPtr->transid[0];
- tcKeyConf->transId2 = regApiPtr->transid[1];
- copyFromToLen(&regApiPtr->tcSendArray[0],
- (UintR*)&tcKeyConf->operations,
- (UintR)ZTCOPCONF_SIZE);
- sendSignal(regApiPtr->ndbapiBlockref,
- GSN_TCKEYCONF, signal, (TpacketLen - 1), JBB);
- return;
- } else if (((TcurrLen + TpacketLen) > 25) && (TcurrLen > 0)) {
- jam();
- sendPackedTCKEYCONF(signal, localHostptr.p, localHostptr.i);
- TcurrLen = 0;
- } else {
- jam();
- updatePackedList(signal, localHostptr.p, localHostptr.i);
- }//if
- // -------------------------------------------------------------------------
- // The header contains the block reference of receiver plus the real signal
- // length - 3, since we have the real signal length plus one additional word
- // for the header we have to do - 4.
- // -------------------------------------------------------------------------
- UintR Tpack0 = (TblockNum << 16) + (TpacketLen - 4);
- UintR Tpack1 = regApiPtr->ndbapiConnect;
- UintR Tpack2 = regApiPtr->globalcheckpointid;
- UintR Tpack3 = confInfo;
- UintR Tpack4 = regApiPtr->transid[0];
- UintR Tpack5 = regApiPtr->transid[1];
-
- localHostptr.p->noOfWordsTCKEYCONF = TcurrLen + TpacketLen;
-
- localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 0] = Tpack0;
- localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 1] = Tpack1;
- localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 2] = Tpack2;
- localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 3] = Tpack3;
- localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 4] = Tpack4;
- localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 5] = Tpack5;
-
- UintR Ti;
- for (Ti = 6; Ti < TpacketLen; Ti++) {
- localHostptr.p->packedWordsTCKEYCONF[TcurrLen + Ti] =
- regApiPtr->tcSendArray[Ti - 6];
- }//for
-}//Dbtc::sendtckeyconf()
-
-void Dbtc::copyFromToLen(UintR* sourceBuffer, UintR* destBuffer, UintR Tlen)
-{
- UintR Tindex = 0;
- UintR Ti;
- while (Tlen >= 4) {
- UintR Tdata0 = sourceBuffer[Tindex + 0];
- UintR Tdata1 = sourceBuffer[Tindex + 1];
- UintR Tdata2 = sourceBuffer[Tindex + 2];
- UintR Tdata3 = sourceBuffer[Tindex + 3];
- Tlen -= 4;
- destBuffer[Tindex + 0] = Tdata0;
- destBuffer[Tindex + 1] = Tdata1;
- destBuffer[Tindex + 2] = Tdata2;
- destBuffer[Tindex + 3] = Tdata3;
- Tindex += 4;
- }//while
- for (Ti = 0; Ti < Tlen; Ti++, Tindex++) {
- destBuffer[Tindex] = sourceBuffer[Tindex];
- }//for
-}//Dbtc::copyFromToLen()
-
-void Dbtc::execSEND_PACKED(Signal* signal)
-{
- HostRecordPtr Thostptr;
- HostRecord *localHostRecord = hostRecord;
- UintR i;
- UintR TpackedListIndex = cpackedListIndex;
- jamEntry();
- for (i = 0; i < TpackedListIndex; i++) {
- Thostptr.i = cpackedList[i];
- ptrAss(Thostptr, localHostRecord);
- arrGuard(Thostptr.i - 1, MAX_NODES - 1);
- UintR TnoOfPackedWordsLqh = Thostptr.p->noOfPackedWordsLqh;
- UintR TnoOfWordsTCKEYCONF = Thostptr.p->noOfWordsTCKEYCONF;
- UintR TnoOfWordsTCINDXCONF = Thostptr.p->noOfWordsTCINDXCONF;
- jam();
- if (TnoOfPackedWordsLqh > 0) {
- jam();
- sendPackedSignalLqh(signal, Thostptr.p);
- }//if
- if (TnoOfWordsTCKEYCONF > 0) {
- jam();
- sendPackedTCKEYCONF(signal, Thostptr.p, (Uint32)Thostptr.i);
- }//if
- if (TnoOfWordsTCINDXCONF > 0) {
- jam();
- sendPackedTCINDXCONF(signal, Thostptr.p, (Uint32)Thostptr.i);
- }//if
- Thostptr.p->inPackedList = false;
- }//for
- cpackedListIndex = 0;
- return;
-}//Dbtc::execSEND_PACKED()
-
-void
-Dbtc::updatePackedList(Signal* signal, HostRecord* ahostptr, Uint16 ahostIndex)
-{
- if (ahostptr->inPackedList == false) {
- UintR TpackedListIndex = cpackedListIndex;
- jam();
- ahostptr->inPackedList = true;
- cpackedList[TpackedListIndex] = ahostIndex;
- cpackedListIndex = TpackedListIndex + 1;
- }//if
-}//Dbtc::updatePackedList()
-
-void Dbtc::sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr)
-{
- UintR Tj;
- UintR TnoOfWords = ahostptr->noOfPackedWordsLqh;
- for (Tj = 0; Tj < TnoOfWords; Tj += 4) {
- UintR sig0 = ahostptr->packedWordsLqh[Tj + 0];
- UintR sig1 = ahostptr->packedWordsLqh[Tj + 1];
- UintR sig2 = ahostptr->packedWordsLqh[Tj + 2];
- UintR sig3 = ahostptr->packedWordsLqh[Tj + 3];
- signal->theData[Tj + 0] = sig0;
- signal->theData[Tj + 1] = sig1;
- signal->theData[Tj + 2] = sig2;
- signal->theData[Tj + 3] = sig3;
- }//for
- ahostptr->noOfPackedWordsLqh = 0;
- sendSignal(ahostptr->hostLqhBlockRef,
- GSN_PACKED_SIGNAL,
- signal,
- TnoOfWords,
- JBB);
-}//Dbtc::sendPackedSignalLqh()
-
-void Dbtc::sendPackedTCKEYCONF(Signal* signal,
- HostRecord * ahostptr,
- UintR hostId)
-{
- UintR Tj;
- UintR TnoOfWords = ahostptr->noOfWordsTCKEYCONF;
- BlockReference TBref = numberToRef(API_PACKED, hostId);
- for (Tj = 0; Tj < ahostptr->noOfWordsTCKEYCONF; Tj += 4) {
- UintR sig0 = ahostptr->packedWordsTCKEYCONF[Tj + 0];
- UintR sig1 = ahostptr->packedWordsTCKEYCONF[Tj + 1];
- UintR sig2 = ahostptr->packedWordsTCKEYCONF[Tj + 2];
- UintR sig3 = ahostptr->packedWordsTCKEYCONF[Tj + 3];
- signal->theData[Tj + 0] = sig0;
- signal->theData[Tj + 1] = sig1;
- signal->theData[Tj + 2] = sig2;
- signal->theData[Tj + 3] = sig3;
- }//for
- ahostptr->noOfWordsTCKEYCONF = 0;
- sendSignal(TBref, GSN_TCKEYCONF, signal, TnoOfWords, JBB);
-}//Dbtc::sendPackedTCKEYCONF()
-
-void Dbtc::sendPackedTCINDXCONF(Signal* signal,
- HostRecord * ahostptr,
- UintR hostId)
-{
- UintR Tj;
- UintR TnoOfWords = ahostptr->noOfWordsTCINDXCONF;
- BlockReference TBref = numberToRef(API_PACKED, hostId);
- for (Tj = 0; Tj < ahostptr->noOfWordsTCINDXCONF; Tj += 4) {
- UintR sig0 = ahostptr->packedWordsTCINDXCONF[Tj + 0];
- UintR sig1 = ahostptr->packedWordsTCINDXCONF[Tj + 1];
- UintR sig2 = ahostptr->packedWordsTCINDXCONF[Tj + 2];
- UintR sig3 = ahostptr->packedWordsTCINDXCONF[Tj + 3];
- signal->theData[Tj + 0] = sig0;
- signal->theData[Tj + 1] = sig1;
- signal->theData[Tj + 2] = sig2;
- signal->theData[Tj + 3] = sig3;
- }//for
- ahostptr->noOfWordsTCINDXCONF = 0;
- sendSignal(TBref, GSN_TCINDXCONF, signal, TnoOfWords, JBB);
-}//Dbtc::sendPackedTCINDXCONF()
-
-/*
-4.3.11 DIVERIFY
----------------
-*/
-/*****************************************************************************/
-/* D I V E R I F Y */
-/* */
-/*****************************************************************************/
-void Dbtc::diverify010Lab(Signal* signal)
-{
- UintR TfirstfreeApiConnectCopy = cfirstfreeApiConnectCopy;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- signal->theData[0] = apiConnectptr.i;
- if (ERROR_INSERTED(8022)) {
- jam();
- systemErrorLab(signal);
- }//if
- if (TfirstfreeApiConnectCopy != RNIL) {
- seizeApiConnectCopy(signal);
- regApiPtr->apiConnectstate = CS_PREPARE_TO_COMMIT;
- /*-----------------------------------------------------------------------
- * WE COME HERE ONLY IF THE TRANSACTION IS PREPARED ON ALL TC CONNECTIONS.
- * THUS WE CAN START THE COMMIT PHASE BY SENDING DIVERIFY ON ALL TC
- * CONNECTIONS AND THEN WHEN ALL DIVERIFYCONF HAVE BEEN RECEIVED THE
- * COMMIT MESSAGE CAN BE SENT TO ALL INVOLVED PARTS.
- *-----------------------------------------------------------------------*/
- EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal, 1);
- if (signal->theData[2] == 0) {
- execDIVERIFYCONF(signal);
- }
- return;
- } else {
- /*-----------------------------------------------------------------------
- * There were no free copy connections available. We must abort the
- * transaction since otherwise we will have a problem with the report
- * to the application.
- * This should more or less not happen but if it happens we do not want to
- * crash and we do not want to create code to handle it properly since
- * it is difficult to test it and will be complex to handle a problem
- * more or less not occurring.
- *-----------------------------------------------------------------------*/
- terrorCode = ZSEIZE_API_COPY_ERROR;
- abortErrorLab(signal);
- return;
- }//if
-}//Dbtc::diverify010Lab()
-
-/* ------------------------------------------------------------------------- */
-/* ------- SEIZE_API_CONNECT ------- */
-/* SEIZE CONNECT RECORD FOR A REQUEST */
-/* ------------------------------------------------------------------------- */
-void Dbtc::seizeApiConnectCopy(Signal* signal)
-{
- ApiConnectRecordPtr locApiConnectptr;
-
- ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
- UintR TapiConnectFilesize = capiConnectFilesize;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
-
- locApiConnectptr.i = cfirstfreeApiConnectCopy;
- ptrCheckGuard(locApiConnectptr, TapiConnectFilesize, localApiConnectRecord);
- cfirstfreeApiConnectCopy = locApiConnectptr.p->nextApiConnect;
- locApiConnectptr.p->nextApiConnect = RNIL;
- regApiPtr->apiCopyRecord = locApiConnectptr.i;
- regApiPtr->triggerPending = false;
- regApiPtr->isIndexOp = false;
-}//Dbtc::seizeApiConnectCopy()
-
-void Dbtc::execDIVERIFYCONF(Signal* signal)
-{
- UintR TapiConnectptrIndex = signal->theData[0];
- UintR TapiConnectFilesize = capiConnectFilesize;
- UintR Tgci = signal->theData[1];
- ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
-
- jamEntry();
- if (ERROR_INSERTED(8017)) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- if (TapiConnectptrIndex >= TapiConnectFilesize) {
- TCKEY_abort(signal, 31);
- return;
- }//if
- ApiConnectRecord * const regApiPtr =
- &localApiConnectRecord[TapiConnectptrIndex];
- ConnectionState TapiConnectstate = regApiPtr->apiConnectstate;
- UintR TApifailureNr = regApiPtr->failureNr;
- UintR Tfailure_nr = cfailure_nr;
- apiConnectptr.i = TapiConnectptrIndex;
- apiConnectptr.p = regApiPtr;
- if (TapiConnectstate != CS_PREPARE_TO_COMMIT) {
- TCKEY_abort(signal, 32);
- return;
- }//if
- /*--------------------------------------------------------------------------
- * THIS IS THE COMMIT POINT. IF WE ARRIVE HERE THE TRANSACTION IS COMMITTED
- * UNLESS EVERYTHING CRASHES BEFORE WE HAVE BEEN ABLE TO REPORT THE COMMIT
- * DECISION. THERE IS NO TURNING BACK FROM THIS DECISION FROM HERE ON.
- * WE WILL INSERT THE TRANSACTION INTO ITS PROPER QUEUE OF
- * TRANSACTIONS FOR ITS GLOBAL CHECKPOINT.
- *-------------------------------------------------------------------------*/
- if (TApifailureNr != Tfailure_nr) {
- DIVER_node_fail_handling(signal, Tgci);
- return;
- }//if
- commitGciHandling(signal, Tgci);
-
- /**************************************************************************
- * C O M M I T
- * THE TRANSACTION HAVE NOW BEEN VERIFIED AND NOW THE COMMIT PHASE CAN START
- **************************************************************************/
-
- UintR TtcConnectptrIndex = regApiPtr->firstTcConnect;
- UintR TtcConnectFilesize = ctcConnectFilesize;
- TcConnectRecord *localTcConnectRecord = tcConnectRecord;
-
- regApiPtr->counter = regApiPtr->lqhkeyconfrec;
- regApiPtr->apiConnectstate = CS_COMMITTING;
- if (TtcConnectptrIndex >= TtcConnectFilesize) {
- TCKEY_abort(signal, 33);
- return;
- }//if
- TcConnectRecord* const regTcPtr = &localTcConnectRecord[TtcConnectptrIndex];
- tcConnectptr.i = TtcConnectptrIndex;
- tcConnectptr.p = regTcPtr;
- commit020Lab(signal);
-}//Dbtc::execDIVERIFYCONF()
-
-/*--------------------------------------------------------------------------*/
-/* COMMIT_GCI_HANDLING */
-/* SET UP GLOBAL CHECKPOINT DATA STRUCTURE AT THE COMMIT POINT. */
-/*--------------------------------------------------------------------------*/
-void Dbtc::commitGciHandling(Signal* signal, UintR Tgci)
-{
- GcpRecordPtr localGcpPointer;
-
- UintR TgcpFilesize = cgcpFilesize;
- UintR Tfirstgcp = cfirstgcp;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- GcpRecord *localGcpRecord = gcpRecord;
-
- regApiPtr->globalcheckpointid = Tgci;
- if (Tfirstgcp != RNIL) {
- /* IF THIS GLOBAL CHECKPOINT ALREADY EXISTS */
- localGcpPointer.i = Tfirstgcp;
- ptrCheckGuard(localGcpPointer, TgcpFilesize, localGcpRecord);
- do {
- if (regApiPtr->globalcheckpointid == localGcpPointer.p->gcpId) {
- jam();
- gcpPtr.i = localGcpPointer.i;
- gcpPtr.p = localGcpPointer.p;
- linkApiToGcp(signal);
- return;
- } else {
- localGcpPointer.i = localGcpPointer.p->nextGcp;
- jam();
- if (localGcpPointer.i != RNIL) {
- jam();
- ptrCheckGuard(localGcpPointer, TgcpFilesize, localGcpRecord);
- continue;
- }//if
- }//if
- seizeGcp(signal);
- linkApiToGcp(signal);
- return;
- } while (1);
- } else {
- jam();
- seizeGcp(signal);
- linkApiToGcp(signal);
- }//if
-}//Dbtc::commitGciHandling()
-
-/* --------------------------------------------------------------------------*/
-/* -LINK AN API CONNECT RECORD IN STATE PREPARED INTO THE LIST WITH GLOBAL - */
-/* CHECKPOINTS. WHEN THE TRANSACTION I COMPLETED THE API CONNECT RECORD IS */
-/* LINKED OUT OF THE LIST. */
-/*---------------------------------------------------------------------------*/
-void Dbtc::linkApiToGcp(Signal* signal)
-{
- ApiConnectRecordPtr localApiConnectptr;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- GcpRecord * const regGcpPtr = gcpPtr.p;
- UintR TapiConnectptrIndex = apiConnectptr.i;
- ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
-
- regApiPtr->nextGcpConnect = RNIL;
- if (regGcpPtr->firstApiConnect == RNIL) {
- regGcpPtr->firstApiConnect = TapiConnectptrIndex;
- jam();
- } else {
- UintR TapiConnectFilesize = capiConnectFilesize;
- localApiConnectptr.i = regGcpPtr->lastApiConnect;
- jam();
- ptrCheckGuard(localApiConnectptr,
- TapiConnectFilesize, localApiConnectRecord);
- localApiConnectptr.p->nextGcpConnect = TapiConnectptrIndex;
- }//if
- UintR TlastApiConnect = regGcpPtr->lastApiConnect;
- regApiPtr->gcpPointer = gcpPtr.i;
- regApiPtr->prevGcpConnect = TlastApiConnect;
- regGcpPtr->lastApiConnect = TapiConnectptrIndex;
-}//Dbtc::linkApiToGcp()
-
-void Dbtc::seizeGcp(Signal* signal)
-{
- GcpRecordPtr tmpGcpPointer;
- GcpRecordPtr localGcpPointer;
-
- UintR Tfirstgcp = cfirstgcp;
- UintR Tglobalcheckpointid = apiConnectptr.p->globalcheckpointid;
- UintR TgcpFilesize = cgcpFilesize;
- GcpRecord *localGcpRecord = gcpRecord;
-
- localGcpPointer.i = cfirstfreeGcp;
- ptrCheckGuard(localGcpPointer, TgcpFilesize, localGcpRecord);
- UintR TfirstfreeGcp = localGcpPointer.p->nextGcp;
- localGcpPointer.p->gcpId = Tglobalcheckpointid;
- localGcpPointer.p->nextGcp = RNIL;
- localGcpPointer.p->firstApiConnect = RNIL;
- localGcpPointer.p->lastApiConnect = RNIL;
- localGcpPointer.p->gcpNomoretransRec = ZFALSE;
- cfirstfreeGcp = TfirstfreeGcp;
-
- if (Tfirstgcp == RNIL) {
- jam();
- cfirstgcp = localGcpPointer.i;
- } else {
- tmpGcpPointer.i = clastgcp;
- jam();
- ptrCheckGuard(tmpGcpPointer, TgcpFilesize, localGcpRecord);
- tmpGcpPointer.p->nextGcp = localGcpPointer.i;
- }//if
- clastgcp = localGcpPointer.i;
- gcpPtr = localGcpPointer;
-}//Dbtc::seizeGcp()
-
-/*---------------------------------------------------------------------------*/
-// Send COMMIT messages to all LQH operations involved in the transaction.
-/*---------------------------------------------------------------------------*/
-void Dbtc::commit020Lab(Signal* signal)
-{
- TcConnectRecordPtr localTcConnectptr;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- UintR TtcConnectFilesize = ctcConnectFilesize;
- TcConnectRecord *localTcConnectRecord = tcConnectRecord;
-
- localTcConnectptr.p = tcConnectptr.p;
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- UintR Tcount = 0;
- do {
- /*-----------------------------------------------------------------------
- * WE ARE NOW READY TO RELEASE ALL OPERATIONS ON THE LQH
- *-----------------------------------------------------------------------*/
- /* *********< */
- /* COMMIT < */
- /* *********< */
- localTcConnectptr.i = localTcConnectptr.p->nextTcConnect;
- localTcConnectptr.p->tcConnectstate = OS_COMMITTING;
- sendCommitLqh(signal, localTcConnectptr.p);
-
- if (localTcConnectptr.i != RNIL) {
- Tcount = Tcount + 1;
- if (Tcount < 16) {
- ptrCheckGuard(localTcConnectptr,
- TtcConnectFilesize, localTcConnectRecord);
- jam();
- continue;
- } else {
- jam();
- if (ERROR_INSERTED(8014)) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- signal->theData[0] = TcContinueB::ZSEND_COMMIT_LOOP;
- signal->theData[1] = apiConnectptr.i;
- signal->theData[2] = localTcConnectptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
- return;
- }//if
- } else {
- jam();
- regApiPtr->apiConnectstate = CS_COMMIT_SENT;
- return;
- }//if
- } while (1);
-}//Dbtc::commit020Lab()
-
-void Dbtc::sendCommitLqh(Signal* signal,
- TcConnectRecord * const regTcPtr)
-{
- HostRecordPtr Thostptr;
- UintR ThostFilesize = chostFilesize;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- Thostptr.i = regTcPtr->lastLqhNodeId;
- ptrCheckGuard(Thostptr, ThostFilesize, hostRecord);
- if (Thostptr.p->noOfPackedWordsLqh > 21) {
- jam();
- sendPackedSignalLqh(signal, Thostptr.p);
- } else {
- jam();
- updatePackedList(signal, Thostptr.p, Thostptr.i);
- }//if
- UintR Tindex = Thostptr.p->noOfPackedWordsLqh;
- UintR* TDataPtr = &Thostptr.p->packedWordsLqh[Tindex];
- UintR Tdata1 = regTcPtr->lastLqhCon;
- UintR Tdata2 = regApiPtr->globalcheckpointid;
- UintR Tdata3 = regApiPtr->transid[0];
- UintR Tdata4 = regApiPtr->transid[1];
-
- TDataPtr[0] = Tdata1 | (ZCOMMIT << 28);
- TDataPtr[1] = Tdata2;
- TDataPtr[2] = Tdata3;
- TDataPtr[3] = Tdata4;
- Thostptr.p->noOfPackedWordsLqh = Tindex + 4;
-}//Dbtc::sendCommitLqh()
-
-void
-Dbtc::DIVER_node_fail_handling(Signal* signal, UintR Tgci)
-{
- /*------------------------------------------------------------------------
- * AT LEAST ONE NODE HAS FAILED DURING THE TRANSACTION. WE NEED TO CHECK IF
- * THIS IS SO SERIOUS THAT WE NEED TO ABORT THE TRANSACTION. IN BOTH THE
- * ABORT AND THE COMMIT CASES WE NEED TO SET-UP THE DATA FOR THE
- * ABORT/COMMIT/COMPLETE HANDLING AS ALSO USED BY TAKE OVER FUNCTIONALITY.
- *------------------------------------------------------------------------*/
- tabortInd = ZFALSE;
- setupFailData(signal);
- if (tabortInd == ZFALSE) {
- jam();
- commitGciHandling(signal, Tgci);
- toCommitHandlingLab(signal);
- } else {
- jam();
- apiConnectptr.p->returnsignal = RS_TCROLLBACKREP;
- apiConnectptr.p->returncode = ZNODEFAIL_BEFORE_COMMIT;
- toAbortHandlingLab(signal);
- }//if
- return;
-}//Dbtc::DIVER_node_fail_handling()
-
-
-/* ------------------------------------------------------------------------- */
-/* ------- ENTER COMMITTED ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dbtc::execCOMMITTED(Signal* signal)
-{
- TcConnectRecordPtr localTcConnectptr;
- ApiConnectRecordPtr localApiConnectptr;
-
- UintR TtcConnectFilesize = ctcConnectFilesize;
- UintR TapiConnectFilesize = capiConnectFilesize;
- TcConnectRecord *localTcConnectRecord = tcConnectRecord;
- ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
-
-#ifdef ERROR_INSERT
- if (ERROR_INSERTED(8018)) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- if (ERROR_INSERTED(8030)) {
- systemErrorLab(signal);
- }//if
- if (ERROR_INSERTED(8025)) {
- SET_ERROR_INSERT_VALUE(8026);
- return;
- }//if
- if (ERROR_INSERTED(8041)) {
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_COMMITTED, signal, 2000, 3);
- return;
- }//if
- if (ERROR_INSERTED(8042)) {
- SET_ERROR_INSERT_VALUE(8046);
- sendSignalWithDelay(cownref, GSN_COMMITTED, signal, 2000, 4);
- return;
- }//if
-#endif
- localTcConnectptr.i = signal->theData[0];
- jamEntry();
- ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
- localApiConnectptr.i = localTcConnectptr.p->apiConnect;
- if (localTcConnectptr.p->tcConnectstate != OS_COMMITTING) {
- warningReport(signal, 4);
- return;
- }//if
- ptrCheckGuard(localApiConnectptr, TapiConnectFilesize,
- localApiConnectRecord);
- UintR Tcounter = localApiConnectptr.p->counter - 1;
- ConnectionState TapiConnectstate = localApiConnectptr.p->apiConnectstate;
- UintR Tdata1 = localApiConnectptr.p->transid[0] - signal->theData[1];
- UintR Tdata2 = localApiConnectptr.p->transid[1] - signal->theData[2];
- Tdata1 = Tdata1 | Tdata2;
- bool TcheckCondition =
- (TapiConnectstate != CS_COMMIT_SENT) || (Tcounter != 0);
-
- setApiConTimer(localApiConnectptr.i, ctcTimer, __LINE__);
- localApiConnectptr.p->counter = Tcounter;
- localTcConnectptr.p->tcConnectstate = OS_COMMITTED;
- if (Tdata1 != 0) {
- warningReport(signal, 5);
- return;
- }//if
- if (TcheckCondition) {
- jam();
- /*-------------------------------------------------------*/
- // We have not sent all COMMIT requests yet. We could be
- // in the state that all sent are COMMITTED but we are
- // still waiting for a CONTINUEB to send the rest of the
- // COMMIT requests.
- /*-------------------------------------------------------*/
- return;
- }//if
- if (ERROR_INSERTED(8020)) {
- jam();
- systemErrorLab(signal);
- }//if
- /*-------------------------------------------------------*/
- /* THE ENTIRE TRANSACTION IS NOW COMMITED */
- /* NOW WE NEED TO SEND THE RESPONSE TO THE APPLICATION. */
- /* THE APPLICATION CAN THEN REUSE THE API CONNECTION AND */
- /* THEREFORE WE NEED TO MOVE THE API CONNECTION TO A */
- /* NEW API CONNECT RECORD. */
- /*-------------------------------------------------------*/
-
- apiConnectptr = localApiConnectptr;
- sendApiCommit(signal);
-
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- localTcConnectptr.i = regApiPtr->firstTcConnect;
- UintR Tlqhkeyconfrec = regApiPtr->lqhkeyconfrec;
- ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
- regApiPtr->counter = Tlqhkeyconfrec;
-
- tcConnectptr = localTcConnectptr;
- complete010Lab(signal);
- return;
-
-}//Dbtc::execCOMMITTED()
-
-/*-------------------------------------------------------*/
-/* SEND_API_COMMIT */
-/* SEND COMMIT DECISION TO THE API. */
-/*-------------------------------------------------------*/
-void Dbtc::sendApiCommit(Signal* signal)
-{
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
-
- if (regApiPtr->returnsignal == RS_TCKEYCONF) {
- sendtckeyconf(signal, 1);
- } else if (regApiPtr->returnsignal == RS_TC_COMMITCONF) {
- jam();
- TcCommitConf * const commitConf = (TcCommitConf *)&signal->theData[0];
- if(regApiPtr->commitAckMarker == RNIL){
- jam();
- commitConf->apiConnectPtr = regApiPtr->ndbapiConnect;
- } else {
- jam();
- commitConf->apiConnectPtr = regApiPtr->ndbapiConnect | 1;
- }
- commitConf->transId1 = regApiPtr->transid[0];
- commitConf->transId2 = regApiPtr->transid[1];
- commitConf->gci = regApiPtr->globalcheckpointid;
- sendSignal(regApiPtr->ndbapiBlockref, GSN_TC_COMMITCONF, signal,
- TcCommitConf::SignalLength, JBB);
- } else if (regApiPtr->returnsignal == RS_NO_RETURN) {
- jam();
- } else {
- TCKEY_abort(signal, 37);
- return;
- }//if
- UintR TapiConnectFilesize = capiConnectFilesize;
- UintR TcommitCount = c_counters.ccommitCount;
- UintR TapiIndex = apiConnectptr.i;
- UintR TnewApiIndex = regApiPtr->apiCopyRecord;
- UintR TapiFailState = regApiPtr->apiFailState;
- ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
-
- tmpApiConnectptr.p = apiConnectptr.p;
- tmpApiConnectptr.i = TapiIndex;
- c_counters.ccommitCount = TcommitCount + 1;
- apiConnectptr.i = TnewApiIndex;
- ptrCheckGuard(apiConnectptr, TapiConnectFilesize, localApiConnectRecord);
- copyApi(signal);
- if (TapiFailState != ZTRUE) {
- return;
- } else {
- jam();
- handleApiFailState(signal, tmpApiConnectptr.i);
- return;
- }//if
-}//Dbtc::sendApiCommit()
-
-/* ========================================================================= */
-/* ======= COPY_API ======= */
-/* COPY API RECORD ALSO RESET THE OLD API RECORD SO THAT IT */
-/* IS PREPARED TO RECEIVE A NEW TRANSACTION. */
-/*===========================================================================*/
-void Dbtc::copyApi(Signal* signal)
-{
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- ApiConnectRecord * const regTmpApiPtr = tmpApiConnectptr.p;
-
- UintR TndbapiConnect = regTmpApiPtr->ndbapiConnect;
- UintR TfirstTcConnect = regTmpApiPtr->firstTcConnect;
- UintR Ttransid1 = regTmpApiPtr->transid[0];
- UintR Ttransid2 = regTmpApiPtr->transid[1];
- UintR Tlqhkeyconfrec = regTmpApiPtr->lqhkeyconfrec;
- UintR TgcpPointer = regTmpApiPtr->gcpPointer;
- UintR TgcpFilesize = cgcpFilesize;
- UintR TcommitAckMarker = regTmpApiPtr->commitAckMarker;
- GcpRecord *localGcpRecord = gcpRecord;
-
- regApiPtr->ndbapiBlockref = regTmpApiPtr->ndbapiBlockref;
- regApiPtr->ndbapiConnect = TndbapiConnect;
- regApiPtr->firstTcConnect = TfirstTcConnect;
- regApiPtr->apiConnectstate = CS_COMPLETING;
- regApiPtr->transid[0] = Ttransid1;
- regApiPtr->transid[1] = Ttransid2;
- regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec;
- regApiPtr->commitAckMarker = TcommitAckMarker;
-
- gcpPtr.i = TgcpPointer;
- ptrCheckGuard(gcpPtr, TgcpFilesize, localGcpRecord);
- unlinkApiConnect(signal);
- linkApiToGcp(signal);
- setApiConTimer(tmpApiConnectptr.i, 0, __LINE__);
- regTmpApiPtr->apiConnectstate = CS_CONNECTED;
- regTmpApiPtr->commitAckMarker = RNIL;
- regTmpApiPtr->firstTcConnect = RNIL;
- regTmpApiPtr->lastTcConnect = RNIL;
-}//Dbtc::copyApi()
-
-void Dbtc::unlinkApiConnect(Signal* signal)
-{
- ApiConnectRecordPtr localApiConnectptr;
- ApiConnectRecord * const regTmpApiPtr = tmpApiConnectptr.p;
- UintR TapiConnectFilesize = capiConnectFilesize;
- UintR TprevGcpConnect = regTmpApiPtr->prevGcpConnect;
- UintR TnextGcpConnect = regTmpApiPtr->nextGcpConnect;
- ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
-
- if (TprevGcpConnect == RNIL) {
- gcpPtr.p->firstApiConnect = TnextGcpConnect;
- jam();
- } else {
- localApiConnectptr.i = TprevGcpConnect;
- jam();
- ptrCheckGuard(localApiConnectptr,
- TapiConnectFilesize, localApiConnectRecord);
- localApiConnectptr.p->nextGcpConnect = TnextGcpConnect;
- }//if
- if (TnextGcpConnect == RNIL) {
- gcpPtr.p->lastApiConnect = TprevGcpConnect;
- jam();
- } else {
- localApiConnectptr.i = TnextGcpConnect;
- jam();
- ptrCheckGuard(localApiConnectptr,
- TapiConnectFilesize, localApiConnectRecord);
- localApiConnectptr.p->prevGcpConnect = TprevGcpConnect;
- }//if
-}//Dbtc::unlinkApiConnect()
-
-void Dbtc::complete010Lab(Signal* signal)
-{
- TcConnectRecordPtr localTcConnectptr;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- UintR TtcConnectFilesize = ctcConnectFilesize;
- TcConnectRecord *localTcConnectRecord = tcConnectRecord;
-
- localTcConnectptr.p = tcConnectptr.p;
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- UintR TapiConnectptrIndex = apiConnectptr.i;
- UintR Tcount = 0;
- do {
- localTcConnectptr.p->apiConnect = TapiConnectptrIndex;
- localTcConnectptr.p->tcConnectstate = OS_COMPLETING;
-
- /* ************ */
- /* COMPLETE < */
- /* ************ */
- const Uint32 nextTcConnect = localTcConnectptr.p->nextTcConnect;
- sendCompleteLqh(signal, localTcConnectptr.p);
- localTcConnectptr.i = nextTcConnect;
- if (localTcConnectptr.i != RNIL) {
- Tcount++;
- if (Tcount < 16) {
- ptrCheckGuard(localTcConnectptr,
- TtcConnectFilesize, localTcConnectRecord);
- jam();
- continue;
- } else {
- jam();
- if (ERROR_INSERTED(8013)) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- signal->theData[0] = TcContinueB::ZSEND_COMPLETE_LOOP;
- signal->theData[1] = apiConnectptr.i;
- signal->theData[2] = localTcConnectptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
- return;
- }//if
- } else {
- jam();
- regApiPtr->apiConnectstate = CS_COMPLETE_SENT;
- return;
- }//if
- } while (1);
-}//Dbtc::complete010Lab()
-
-void Dbtc::sendCompleteLqh(Signal* signal,
- TcConnectRecord * const regTcPtr)
-{
- HostRecordPtr Thostptr;
- UintR ThostFilesize = chostFilesize;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- Thostptr.i = regTcPtr->lastLqhNodeId;
- ptrCheckGuard(Thostptr, ThostFilesize, hostRecord);
- if (Thostptr.p->noOfPackedWordsLqh > 22) {
- jam();
- sendPackedSignalLqh(signal, Thostptr.p);
- } else {
- jam();
- updatePackedList(signal, Thostptr.p, Thostptr.i);
- }//if
-
- UintR Tindex = Thostptr.p->noOfPackedWordsLqh;
- UintR* TDataPtr = &Thostptr.p->packedWordsLqh[Tindex];
- UintR Tdata1 = regTcPtr->lastLqhCon | (ZCOMPLETE << 28);
- UintR Tdata2 = regApiPtr->transid[0];
- UintR Tdata3 = regApiPtr->transid[1];
-
- TDataPtr[0] = Tdata1;
- TDataPtr[1] = Tdata2;
- TDataPtr[2] = Tdata3;
- Thostptr.p->noOfPackedWordsLqh = Tindex + 3;
-}//Dbtc::sendCompleteLqh()
-
-void
-Dbtc::execTC_COMMIT_ACK(Signal* signal){
- jamEntry();
-
- CommitAckMarker key;
- key.transid1 = signal->theData[0];
- key.transid2 = signal->theData[1];
-
- CommitAckMarkerPtr removedMarker;
- m_commitAckMarkerHash.release(removedMarker, key);
- if (removedMarker.i == RNIL) {
- jam();
- warningHandlerLab(signal);
- return;
- }//if
- sendRemoveMarkers(signal, removedMarker.p);
-}
-
-void
-Dbtc::sendRemoveMarkers(Signal* signal, const CommitAckMarker * marker){
- jam();
- const Uint32 noOfLqhs = marker->noOfLqhs;
- const Uint32 transId1 = marker->transid1;
- const Uint32 transId2 = marker->transid2;
-
- for(Uint32 i = 0; i<noOfLqhs; i++){
- jam();
- const NodeId nodeId = marker->lqhNodeId[i];
- sendRemoveMarker(signal, nodeId, transId1, transId2);
- }
-}
-
-void
-Dbtc::sendRemoveMarker(Signal* signal,
- NodeId nodeId,
- Uint32 transid1,
- Uint32 transid2){
- /**
- * Seize host ptr
- */
- HostRecordPtr hostPtr;
- const UintR ThostFilesize = chostFilesize;
- hostPtr.i = nodeId;
- ptrCheckGuard(hostPtr, ThostFilesize, hostRecord);
-
- if (hostPtr.p->noOfPackedWordsLqh > (25 - 3)){
- jam();
- sendPackedSignalLqh(signal, hostPtr.p);
- } else {
- jam();
- updatePackedList(signal, hostPtr.p, hostPtr.i);
- }//if
-
- UintR numWord = hostPtr.p->noOfPackedWordsLqh;
- UintR* dataPtr = &hostPtr.p->packedWordsLqh[numWord];
-
- dataPtr[0] = (ZREMOVE_MARKER << 28);
- dataPtr[1] = transid1;
- dataPtr[2] = transid2;
- hostPtr.p->noOfPackedWordsLqh = numWord + 3;
-}
-
-void Dbtc::execCOMPLETED(Signal* signal)
-{
- TcConnectRecordPtr localTcConnectptr;
- ApiConnectRecordPtr localApiConnectptr;
-
- UintR TtcConnectFilesize = ctcConnectFilesize;
- UintR TapiConnectFilesize = capiConnectFilesize;
- TcConnectRecord *localTcConnectRecord = tcConnectRecord;
- ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
-
-#ifdef ERROR_INSERT
- if (ERROR_INSERTED(8031)) {
- systemErrorLab(signal);
- }//if
- if (ERROR_INSERTED(8019)) {
- CLEAR_ERROR_INSERT_VALUE;
- return;
- }//if
- if (ERROR_INSERTED(8027)) {
- SET_ERROR_INSERT_VALUE(8028);
- return;
- }//if
- if (ERROR_INSERTED(8043)) {
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_COMPLETED, signal, 2000, 3);
- return;
- }//if
- if (ERROR_INSERTED(8044)) {
- SET_ERROR_INSERT_VALUE(8047);
- sendSignalWithDelay(cownref, GSN_COMPLETED, signal, 2000, 3);
- return;
- }//if
-#endif
- localTcConnectptr.i = signal->theData[0];
- jamEntry();
- ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
- bool Tcond1 = (localTcConnectptr.p->tcConnectstate != OS_COMPLETING);
- localApiConnectptr.i = localTcConnectptr.p->apiConnect;
- if (Tcond1) {
- warningReport(signal, 6);
- return;
- }//if
- ptrCheckGuard(localApiConnectptr, TapiConnectFilesize,
- localApiConnectRecord);
- UintR Tdata1 = localApiConnectptr.p->transid[0] - signal->theData[1];
- UintR Tdata2 = localApiConnectptr.p->transid[1] - signal->theData[2];
- UintR Tcounter = localApiConnectptr.p->counter - 1;
- ConnectionState TapiConnectstate = localApiConnectptr.p->apiConnectstate;
- Tdata1 = Tdata1 | Tdata2;
- bool TcheckCondition =
- (TapiConnectstate != CS_COMPLETE_SENT) || (Tcounter != 0);
- if (Tdata1 != 0) {
- warningReport(signal, 7);
- return;
- }//if
- setApiConTimer(localApiConnectptr.i, ctcTimer, __LINE__);
- localApiConnectptr.p->counter = Tcounter;
- localTcConnectptr.p->tcConnectstate = OS_COMPLETED;
- localTcConnectptr.p->noOfNodes = 0; // == releaseNodes(signal)
- if (TcheckCondition) {
- jam();
- /*-------------------------------------------------------*/
- // We have not sent all COMPLETE requests yet. We could be
- // in the state that all sent are COMPLETED but we are
- // still waiting for a CONTINUEB to send the rest of the
- // COMPLETE requests.
- /*-------------------------------------------------------*/
- return;
- }//if
- if (ERROR_INSERTED(8021)) {
- jam();
- systemErrorLab(signal);
- }//if
- apiConnectptr = localApiConnectptr;
- releaseTransResources(signal);
-}//Dbtc::execCOMPLETED()
-
-/*---------------------------------------------------------------------------*/
-/* RELEASE_TRANS_RESOURCES */
-/* RELEASE ALL RESOURCES THAT ARE CONNECTED TO THIS TRANSACTION. */
-/*---------------------------------------------------------------------------*/
-void Dbtc::releaseTransResources(Signal* signal)
-{
- TcConnectRecordPtr localTcConnectptr;
- UintR TtcConnectFilesize = ctcConnectFilesize;
- TcConnectRecord *localTcConnectRecord = tcConnectRecord;
-
- localTcConnectptr.i = apiConnectptr.p->firstTcConnect;
- do {
- jam();
- ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
- UintR rtrTcConnectptrIndex = localTcConnectptr.p->nextTcConnect;
- tcConnectptr.i = localTcConnectptr.i;
- tcConnectptr.p = localTcConnectptr.p;
- localTcConnectptr.i = rtrTcConnectptrIndex;
- releaseTcCon();
- } while (localTcConnectptr.i != RNIL);
- handleGcp(signal);
- releaseFiredTriggerData(&apiConnectptr.p->theFiredTriggers);
- releaseAllSeizedIndexOperations(apiConnectptr.p);
- releaseApiConCopy(signal);
-}//Dbtc::releaseTransResources()
-
-/* *********************************************************************>> */
-/* MODULE: HANDLE_GCP */
-/* DESCRIPTION: HANDLES GLOBAL CHECKPOINT HANDLING AT THE COMPLETION */
-/* OF THE COMMIT PHASE AND THE ABORT PHASE. WE MUST ENSURE THAT TC */
-/* SENDS GCP_TCFINISHED WHEN ALL TRANSACTIONS BELONGING TO A CERTAIN */
-/* GLOBAL CHECKPOINT HAVE COMPLETED. */
-/* *********************************************************************>> */
-void Dbtc::handleGcp(Signal* signal)
-{
- GcpRecord *localGcpRecord = gcpRecord;
- GcpRecordPtr localGcpPtr;
- UintR TapiConnectptrIndex = apiConnectptr.i;
- UintR TgcpFilesize = cgcpFilesize;
- localGcpPtr.i = apiConnectptr.p->gcpPointer;
- tmpApiConnectptr.i = TapiConnectptrIndex;
- tmpApiConnectptr.p = apiConnectptr.p;
- ptrCheckGuard(localGcpPtr, TgcpFilesize, localGcpRecord);
- gcpPtr.i = localGcpPtr.i;
- gcpPtr.p = localGcpPtr.p;
- unlinkApiConnect(signal);
- if (localGcpPtr.p->firstApiConnect == RNIL) {
- if (localGcpPtr.p->gcpNomoretransRec == ZTRUE) {
- jam();
- tcheckGcpId = localGcpPtr.p->gcpId;
- gcpTcfinished(signal);
- unlinkGcp(signal);
- }//if
- }//if
-}//Dbtc::handleGcp()
-
-void Dbtc::releaseApiConCopy(Signal* signal)
-{
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- UintR TfirstfreeApiConnectCopyOld = cfirstfreeApiConnectCopy;
- cfirstfreeApiConnectCopy = apiConnectptr.i;
- regApiPtr->nextApiConnect = TfirstfreeApiConnectCopyOld;
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
- regApiPtr->apiConnectstate = CS_RESTART;
-}//Dbtc::releaseApiConCopy()
-
-/* ========================================================================= */
-/* ------- RELEASE ALL RECORDS CONNECTED TO A DIRTY WRITE OPERATION ------- */
-/* ========================================================================= */
-void Dbtc::releaseDirtyWrite(Signal* signal)
-{
- unlinkReadyTcCon(signal);
- releaseTcCon();
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
- if (regApiPtr->firstTcConnect == RNIL) {
- jam();
- regApiPtr->apiConnectstate = CS_CONNECTED;
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
- sendtckeyconf(signal, 1);
- }//if
- }//if
-}//Dbtc::releaseDirtyWrite()
-
-/*****************************************************************************
- * L Q H K E Y R E F
- * WHEN LQHKEYREF IS RECEIVED DBTC WILL CHECK IF COMMIT FLAG WAS SENT FROM THE
- * APPLICATION. IF SO, THE WHOLE TRANSACTION WILL BE ROLLED BACK AND SIGNAL
- * TCROLLBACKREP WILL BE SENT TO THE API.
- *
- * OTHERWISE TC WILL CHECK THE ERRORCODE. IF THE ERRORCODE IS INDICATING THAT
- * THE "ROW IS NOT FOUND" FOR UPDATE/READ/DELETE OPERATIONS AND "ROW ALREADY
- * EXISTS" FOR INSERT OPERATIONS, DBTC WILL RELEASE THE OPERATION AND THEN
- * SEND RETURN SIGNAL TCKEYREF TO THE USER. THE USER THEN HAVE TO SEND
- * SIGNAL TC_COMMITREQ OR TC_ROLLBACKREQ TO CONCLUDE THE TRANSACTION.
- * IF ANY TCKEYREQ WITH COMMIT IS RECEIVED AND API_CONNECTSTATE EQUALS
- * "REC_LQHREFUSE",
- * THE OPERATION WILL BE TREATED AS AN OPERATION WITHOUT COMMIT. WHEN ANY
- * OTHER FAULTCODE IS RECEIVED THE WHOLE TRANSACTION MUST BE ROLLED BACK
- *****************************************************************************/
-void Dbtc::execLQHKEYREF(Signal* signal)
-{
- const LqhKeyRef * const lqhKeyRef = (LqhKeyRef *)signal->getDataPtr();
- jamEntry();
-
- UintR compare_transid1, compare_transid2;
- UintR TtcConnectFilesize = ctcConnectFilesize;
- /*-------------------------------------------------------------------------
- *
- * RELEASE NODE BUFFER(S) TO INDICATE THAT THIS OPERATION HAVE NO
- * TRANSACTION PARTS ACTIVE ANYMORE.
- * LQHKEYREF HAVE CLEARED ALL PARTS ON ITS PATH BACK TO TC.
- *-------------------------------------------------------------------------*/
- if (lqhKeyRef->connectPtr < TtcConnectFilesize) {
- /*-----------------------------------------------------------------------
- * WE HAVE TO CHECK THAT THE TRANSACTION IS STILL VALID. FIRST WE CHECK
- * THAT THE LQH IS STILL CONNECTED TO A TC, IF THIS HOLDS TRUE THEN THE
- * TC MUST BE CONNECTED TO AN API CONNECT RECORD.
- * WE MUST ENSURE THAT THE TRANSACTION ID OF THIS API CONNECT
- * RECORD IS STILL THE SAME AS THE ONE LQHKEYREF REFERS TO.
- * IF NOT SIMPLY EXIT AND FORGET THE SIGNAL SINCE THE TRANSACTION IS
- * ALREADY COMPLETED (ABORTED).
- *-----------------------------------------------------------------------*/
- tcConnectptr.i = lqhKeyRef->connectPtr;
- Uint32 errCode = terrorCode = lqhKeyRef->errorCode;
- ptrAss(tcConnectptr, tcConnectRecord);
- TcConnectRecord * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->tcConnectstate == OS_OPERATING) {
- apiConnectptr.i = regTcPtr->apiConnect;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- compare_transid1 = regApiPtr->transid[0] ^ lqhKeyRef->transId1;
- compare_transid2 = regApiPtr->transid[1] ^ lqhKeyRef->transId2;
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
- warningReport(signal, 25);
- return;
- }//if
-
- const ConnectionState state = regApiPtr->apiConnectstate;
- const Uint32 triggeringOp = regTcPtr->triggeringOperation;
- if (triggeringOp != RNIL) {
- jam();
- // This operation was created by a trigger execting operation
- TcConnectRecordPtr opPtr;
- TcConnectRecord *localTcConnectRecord = tcConnectRecord;
-
- const Uint32 currentIndexId = regTcPtr->currentIndexId;
- ndbassert(currentIndexId != 0); // Only index triggers so far
-
- opPtr.i = triggeringOp;
- ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
-
- // The operation executed an index trigger
- const Uint32 opType = regTcPtr->operation;
- if (errCode == ZALREADYEXIST)
- errCode = terrorCode = ZNOTUNIQUE;
- else if (!(opType == ZDELETE && errCode == ZNOT_FOUND)) {
- jam();
- /**
- * "Normal path"
- */
- // fall-through
- } else {
- jam();
- /** ZDELETE && NOT_FOUND */
- TcIndexData* indexData = c_theIndexes.getPtr(currentIndexId);
- if(indexData->indexState == IS_BUILDING && state != CS_ABORTING){
- jam();
- /**
- * Ignore error
- */
- regApiPtr->lqhkeyconfrec++;
-
- unlinkReadyTcCon(signal);
- releaseTcCon();
-
- opPtr.p->triggerExecutionCount--;
- if (opPtr.p->triggerExecutionCount == 0) {
- /**
- * We have completed current trigger execution
- * Continue triggering operation
- */
- jam();
- continueTriggeringOp(signal, opPtr.p);
- }
- return;
- }
- }
- }
-
- Uint32 marker = regTcPtr->commitAckMarker;
- markOperationAborted(regApiPtr, regTcPtr);
-
- if(regApiPtr->apiConnectstate == CS_ABORTING){
- /**
- * We're already aborting' so don't send an "extra" TCKEYREF
- */
- jam();
- return;
- }
-
- const Uint32 abort = regTcPtr->m_execAbortOption;
- if (abort == TcKeyReq::AbortOnError || triggeringOp != RNIL) {
- /**
- * No error is allowed on this operation
- */
- TCKEY_abort(signal, 49);
- return;
- }//if
-
- if (marker != RNIL){
- /**
- * This was an insert/update/delete/write which failed
- * that contained the marker
- * Currently unsupported to place new marker
- */
- TCKEY_abort(signal, 49);
- return;
- }
-
- /* *************** */
- /* TCKEYREF < */
- /* *************** */
- TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend();
- tcKeyRef->transId[0] = regApiPtr->transid[0];
- tcKeyRef->transId[1] = regApiPtr->transid[1];
- tcKeyRef->errorCode = terrorCode;
- bool isIndexOp = regTcPtr->isIndexOp;
- Uint32 indexOp = tcConnectptr.p->indexOp;
- Uint32 clientData = regTcPtr->clientData;
- unlinkReadyTcCon(signal); /* LINK TC CONNECT RECORD OUT OF */
- releaseTcCon(); /* RELEASE THE TC CONNECT RECORD */
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- if (isIndexOp) {
- jam();
- regApiPtr->lqhkeyreqrec--; // Compensate for extra during read
- tcKeyRef->connectPtr = indexOp;
- EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength);
- apiConnectptr.i = regTcPtr->apiConnect;
- apiConnectptr.p = regApiPtr;
- } else {
- jam();
- tcKeyRef->connectPtr = clientData;
- sendSignal(regApiPtr->ndbapiBlockref,
- GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB);
- }//if
-
- /*---------------------------------------------------------------------
- * SINCE WE ARE NOT ABORTING WE NEED TO UPDATE THE COUNT OF HOW MANY
- * LQHKEYREQ THAT HAVE RETURNED.
- * IF NO MORE OUTSTANDING LQHKEYREQ'S THEN WE NEED TO
- * TCKEYCONF (IF THERE IS ANYTHING TO SEND).
- *---------------------------------------------------------------------*/
- regApiPtr->lqhkeyreqrec--;
- if (regApiPtr->lqhkeyconfrec == regApiPtr->lqhkeyreqrec) {
- if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
- if(regApiPtr->lqhkeyconfrec) {
- jam();
- diverify010Lab(signal);
- } else {
- jam();
- sendtckeyconf(signal, 1);
- regApiPtr->apiConnectstate = CS_CONNECTED;
- }
- return;
- } else if (regApiPtr->tckeyrec > 0 || regApiPtr->m_exec_flag) {
- jam();
- sendtckeyconf(signal, 2);
- return;
- }
- }//if
- return;
-
- } else {
- warningReport(signal, 26);
- }//if
- } else {
- errorReport(signal, 6);
- }//if
- return;
-}//Dbtc::execLQHKEYREF()
-
-void Dbtc::clearCommitAckMarker(ApiConnectRecord * const regApiPtr,
- TcConnectRecord * const regTcPtr)
-{
- const Uint32 commitAckMarker = regTcPtr->commitAckMarker;
- if (regApiPtr->commitAckMarker == RNIL)
- ndbassert(commitAckMarker == RNIL);
- if (commitAckMarker != RNIL)
- ndbassert(regApiPtr->commitAckMarker != RNIL);
- if(commitAckMarker != RNIL){
- jam();
- m_commitAckMarkerHash.release(commitAckMarker);
- regTcPtr->commitAckMarker = RNIL;
- regApiPtr->commitAckMarker = RNIL;
- }
-}
-
-void Dbtc::markOperationAborted(ApiConnectRecord * const regApiPtr,
- TcConnectRecord * const regTcPtr)
-{
- /*------------------------------------------------------------------------
- * RELEASE NODES TO INDICATE THAT THE OPERATION IS ALREADY ABORTED IN THE
- * LQH'S ALSO SET STATE TO ABORTING TO INDICATE THE ABORT IS
- * ALREADY COMPLETED.
- *------------------------------------------------------------------------*/
- regTcPtr->noOfNodes = 0; // == releaseNodes(signal)
- regTcPtr->tcConnectstate = OS_ABORTING;
- clearCommitAckMarker(regApiPtr, regTcPtr);
-}
-
-/*--------------------------------------*/
-/* EXIT AND WAIT FOR SIGNAL TCOMMITREQ */
-/* OR TCROLLBACKREQ FROM THE USER TO */
-/* CONTINUE THE TRANSACTION */
-/*--------------------------------------*/
-void Dbtc::execTC_COMMITREQ(Signal* signal)
-{
- UintR compare_transid1, compare_transid2;
-
- jamEntry();
- apiConnectptr.i = signal->theData[0];
- if (apiConnectptr.i < capiConnectFilesize) {
- ptrAss(apiConnectptr, apiConnectRecord);
- compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
- compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
- jam();
- return;
- }//if
-
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
-
- const Uint32 apiConnectPtr = regApiPtr->ndbapiConnect;
- const Uint32 apiBlockRef = regApiPtr->ndbapiBlockref;
- const Uint32 transId1 = regApiPtr->transid[0];
- const Uint32 transId2 = regApiPtr->transid[1];
- Uint32 errorCode = 0;
-
- regApiPtr->m_exec_flag = 1;
- switch (regApiPtr->apiConnectstate) {
- case CS_STARTED:
- tcConnectptr.i = regApiPtr->firstTcConnect;
- if (tcConnectptr.i != RNIL) {
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- if (regApiPtr->lqhkeyconfrec == regApiPtr->lqhkeyreqrec) {
- jam();
- /*******************************************************************/
- // The proper case where the application is waiting for commit or
- // abort order.
- // Start the commit order.
- /*******************************************************************/
- regApiPtr->returnsignal = RS_TC_COMMITCONF;
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- diverify010Lab(signal);
- return;
- } else {
- jam();
- /*******************************************************************/
- // The transaction is started but not all operations are completed.
- // It is not possible to commit the transaction in this state.
- // We will abort it instead.
- /*******************************************************************/
- regApiPtr->returnsignal = RS_NO_RETURN;
- errorCode = ZTRANS_STATUS_ERROR;
- abort010Lab(signal);
- }//if
- } else {
- jam();
- /**
- * No operations, accept commit
- */
- TcCommitConf * const commitConf = (TcCommitConf *)&signal->theData[0];
- commitConf->apiConnectPtr = apiConnectPtr;
- commitConf->transId1 = transId1;
- commitConf->transId2 = transId2;
- commitConf->gci = 0;
- sendSignal(apiBlockRef, GSN_TC_COMMITCONF, signal,
- TcCommitConf::SignalLength, JBB);
-
- regApiPtr->returnsignal = RS_NO_RETURN;
- releaseAbortResources(signal);
- return;
- }//if
- break;
- case CS_RECEIVING:
- jam();
- /***********************************************************************/
- // A transaction is still receiving data. We cannot commit an unfinished
- // transaction. We will abort it instead.
- /***********************************************************************/
- regApiPtr->returnsignal = RS_NO_RETURN;
- errorCode = ZPREPAREINPROGRESS;
- abort010Lab(signal);
- break;
-
- case CS_START_COMMITTING:
- case CS_COMMITTING:
- case CS_COMMIT_SENT:
- case CS_COMPLETING:
- case CS_COMPLETE_SENT:
- case CS_REC_COMMITTING:
- case CS_PREPARE_TO_COMMIT:
- jam();
- /***********************************************************************/
- // The transaction is already performing a commit but it is not concluded
- // yet.
- /***********************************************************************/
- errorCode = ZCOMMITINPROGRESS;
- break;
- case CS_ABORTING:
- jam();
- errorCode = ZABORTINPROGRESS;
- break;
- case CS_START_SCAN:
- jam();
- /***********************************************************************/
- // The transaction is a scan. Scans cannot commit
- /***********************************************************************/
- errorCode = ZSCANINPROGRESS;
- break;
- case CS_PREPARED:
- jam();
- return;
- case CS_START_PREPARING:
- jam();
- return;
- case CS_REC_PREPARING:
- jam();
- return;
- break;
- default:
- warningHandlerLab(signal);
- return;
- }//switch
- TcCommitRef * const commitRef = (TcCommitRef*)&signal->theData[0];
- commitRef->apiConnectPtr = apiConnectPtr;
- commitRef->transId1 = transId1;
- commitRef->transId2 = transId2;
- commitRef->errorCode = errorCode;
- sendSignal(apiBlockRef, GSN_TC_COMMITREF, signal,
- TcCommitRef::SignalLength, JBB);
- return;
- } else /** apiConnectptr.i < capiConnectFilesize */ {
- jam();
- warningHandlerLab(signal);
- return;
- }
-}//Dbtc::execTC_COMMITREQ()
-
-void Dbtc::execTCROLLBACKREQ(Signal* signal)
-{
- UintR compare_transid1, compare_transid2;
-
- jamEntry();
- apiConnectptr.i = signal->theData[0];
- if (apiConnectptr.i >= capiConnectFilesize) {
- goto TC_ROLL_warning;
- }//if
- ptrAss(apiConnectptr, apiConnectRecord);
- compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
- compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
- jam();
- return;
- }//if
-
- apiConnectptr.p->m_exec_flag = 1;
- switch (apiConnectptr.p->apiConnectstate) {
- case CS_STARTED:
- case CS_RECEIVING:
- jam();
- apiConnectptr.p->returnsignal = RS_TCROLLBACKCONF;
- abort010Lab(signal);
- return;
- case CS_CONNECTED:
- jam();
- signal->theData[0] = apiConnectptr.p->ndbapiConnect;
- signal->theData[1] = apiConnectptr.p->transid[0];
- signal->theData[2] = apiConnectptr.p->transid[1];
- sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKCONF,
- signal, 3, JBB);
- break;
- case CS_START_SCAN:
- case CS_PREPARE_TO_COMMIT:
- case CS_COMMITTING:
- case CS_COMMIT_SENT:
- case CS_COMPLETING:
- case CS_COMPLETE_SENT:
- case CS_WAIT_COMMIT_CONF:
- case CS_WAIT_COMPLETE_CONF:
- case CS_RESTART:
- case CS_DISCONNECTED:
- case CS_START_COMMITTING:
- case CS_REC_COMMITTING:
- jam();
- /* ***************< */
- /* TC_ROLLBACKREF < */
- /* ***************< */
- signal->theData[0] = apiConnectptr.p->ndbapiConnect;
- signal->theData[1] = apiConnectptr.p->transid[0];
- signal->theData[2] = apiConnectptr.p->transid[1];
- signal->theData[3] = ZROLLBACKNOTALLOWED;
- signal->theData[4] = apiConnectptr.p->apiConnectstate;
- sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREF,
- signal, 5, JBB);
- break;
- /* SEND A REFUSAL SIGNAL*/
- case CS_ABORTING:
- jam();
- if (apiConnectptr.p->abortState == AS_IDLE) {
- jam();
- signal->theData[0] = apiConnectptr.p->ndbapiConnect;
- signal->theData[1] = apiConnectptr.p->transid[0];
- signal->theData[2] = apiConnectptr.p->transid[1];
- sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKCONF,
- signal, 3, JBB);
- } else {
- jam();
- apiConnectptr.p->returnsignal = RS_TCROLLBACKCONF;
- }//if
- break;
- case CS_WAIT_ABORT_CONF:
- jam();
- apiConnectptr.p->returnsignal = RS_TCROLLBACKCONF;
- break;
- case CS_START_PREPARING:
- jam();
- case CS_PREPARED:
- jam();
- case CS_REC_PREPARING:
- jam();
- default:
- goto TC_ROLL_system_error;
- break;
- }//switch
- return;
-
-TC_ROLL_warning:
- jam();
- warningHandlerLab(signal);
- return;
-
-TC_ROLL_system_error:
- jam();
- systemErrorLab(signal);
- return;
-}//Dbtc::execTCROLLBACKREQ()
-
-void Dbtc::execTC_HBREP(Signal* signal)
-{
- const TcHbRep * const tcHbRep =
- (TcHbRep *)signal->getDataPtr();
-
- jamEntry();
- apiConnectptr.i = tcHbRep->apiConnectPtr;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
-
- if (apiConnectptr.p->transid[0] == tcHbRep->transId1 &&
- apiConnectptr.p->transid[1] == tcHbRep->transId2){
-
- if (getApiConTimer(apiConnectptr.i) != 0){
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- } else {
- DEBUG("TCHBREP received when timer was off apiConnectptr.i="
- << apiConnectptr.i);
- }
- }
-}//Dbtc::execTCHBREP()
-
-/*
-4.3.15 ABORT
------------
-*/
-/*****************************************************************************/
-/* A B O R T */
-/* */
-/*****************************************************************************/
-void Dbtc::warningReport(Signal* signal, int place)
-{
- switch (place) {
- case 0:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "ABORTED to not active TC record" << endl;
-#endif
- break;
- case 1:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "ABORTED to TC record active with new transaction" << endl;
-#endif
- break;
- case 2:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "ABORTED to active TC record not expecting ABORTED" << endl;
-#endif
- break;
- case 3:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "ABORTED to TC rec active with trans but wrong node" << endl;
- ndbout << "This is ok when aborting in node failure situations" << endl;
-#endif
- break;
- case 4:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received COMMITTED in wrong state in Dbtc" << endl;
-#endif
- break;
- case 5:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received COMMITTED with wrong transid in Dbtc" << endl;
-#endif
- break;
- case 6:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received COMPLETED in wrong state in Dbtc" << endl;
-#endif
- break;
- case 7:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received COMPLETED with wrong transid in Dbtc" << endl;
-#endif
- break;
- case 8:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received COMMITCONF with tc-rec in wrong state in Dbtc" << endl;
-#endif
- break;
- case 9:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received COMMITCONF with api-rec in wrong state in Dbtc" <<endl;
-#endif
- break;
- case 10:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received COMMITCONF with wrong transid in Dbtc" << endl;
-#endif
- break;
- case 11:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received COMMITCONF from wrong nodeid in Dbtc" << endl;
-#endif
- break;
- case 12:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received COMPLETECONF, tc-rec in wrong state in Dbtc" << endl;
-#endif
- break;
- case 13:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received COMPLETECONF, api-rec in wrong state in Dbtc" << endl;
-#endif
- break;
- case 14:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received COMPLETECONF with wrong transid in Dbtc" << endl;
-#endif
- break;
- case 15:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received COMPLETECONF from wrong nodeid in Dbtc" << endl;
-#endif
- break;
- case 16:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received ABORTCONF, tc-rec in wrong state in Dbtc" << endl;
-#endif
- break;
- case 17:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received ABORTCONF, api-rec in wrong state in Dbtc" << endl;
-#endif
- break;
- case 18:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received ABORTCONF with wrong transid in Dbtc" << endl;
-#endif
- break;
- case 19:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received ABORTCONF from wrong nodeid in Dbtc" << endl;
-#endif
- break;
- case 20:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Time-out waiting for ABORTCONF in Dbtc" << endl;
-#endif
- break;
- case 21:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Time-out waiting for COMMITCONF in Dbtc" << endl;
-#endif
- break;
- case 22:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Time-out waiting for COMPLETECONF in Dbtc" << endl;
-#endif
- break;
- case 23:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received LQHKEYCONF in wrong tc-state in Dbtc" << endl;
-#endif
- break;
- case 24:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received LQHKEYREF to wrong transid in Dbtc" << endl;
-#endif
- break;
- case 25:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received LQHKEYREF in wrong state in Dbtc" << endl;
-#endif
- break;
- case 26:
- jam();
-#ifdef ABORT_TRACE
- ndbout << "Received LQHKEYCONF to wrong transid in Dbtc" << endl;
-#endif
- break;
- case 27:
- jam();
- // printState(signal, 27);
-#ifdef ABORT_TRACE
- ndbout << "Received LQHKEYCONF in wrong api-state in Dbtc" << endl;
-#endif
- break;
- default:
- jam();
- break;
- }//switch
- return;
-}//Dbtc::warningReport()
-
-void Dbtc::errorReport(Signal* signal, int place)
-{
- switch (place) {
- case 0:
- jam();
- break;
- case 1:
- jam();
- break;
- case 2:
- jam();
- break;
- case 3:
- jam();
- break;
- case 4:
- jam();
- break;
- case 5:
- jam();
- break;
- case 6:
- jam();
- break;
- default:
- jam();
- break;
- }//switch
- systemErrorLab(signal);
- return;
-}//Dbtc::errorReport()
-
-/* ------------------------------------------------------------------------- */
-/* ------- ENTER ABORTED ------- */
-/* */
-/*-------------------------------------------------------------------------- */
-void Dbtc::execABORTED(Signal* signal)
-{
- UintR compare_transid1, compare_transid2;
-
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- UintR Tnodeid = signal->theData[3];
- UintR TlastLqhInd = signal->theData[4];
-
- if (ERROR_INSERTED(8040)) {
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_ABORTED, signal, 2000, 5);
- return;
- }//if
- /*------------------------------------------------------------------------
- * ONE PARTICIPANT IN THE TRANSACTION HAS REPORTED THAT IT IS ABORTED.
- *------------------------------------------------------------------------*/
- if (tcConnectptr.i >= ctcConnectFilesize) {
- errorReport(signal, 0);
- return;
- }//if
- /*-------------------------------------------------------------------------
- * WE HAVE TO CHECK THAT THIS IS NOT AN OLD SIGNAL BELONGING TO A
- * TRANSACTION ALREADY ABORTED. THIS CAN HAPPEN WHEN TIME-OUT OCCURS
- * IN TC WAITING FOR ABORTED.
- *-------------------------------------------------------------------------*/
- ptrAss(tcConnectptr, tcConnectRecord);
- if (tcConnectptr.p->tcConnectstate != OS_ABORT_SENT) {
- warningReport(signal, 2);
- return;
- /*-----------------------------------------------------------------------*/
- // ABORTED reported on an operation not expecting ABORT.
- /*-----------------------------------------------------------------------*/
- }//if
- apiConnectptr.i = tcConnectptr.p->apiConnect;
- if (apiConnectptr.i >= capiConnectFilesize) {
- warningReport(signal, 0);
- return;
- }//if
- ptrAss(apiConnectptr, apiConnectRecord);
- compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
- compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
- warningReport(signal, 1);
- return;
- }//if
- if (ERROR_INSERTED(8024)) {
- jam();
- systemErrorLab(signal);
- }//if
-
- /**
- * Release marker
- */
- clearCommitAckMarker(apiConnectptr.p, tcConnectptr.p);
-
- Uint32 i;
- Uint32 Tfound = 0;
- for (i = 0; i < tcConnectptr.p->noOfNodes; i++) {
- jam();
- if (tcConnectptr.p->tcNodedata[i] == Tnodeid) {
- /*---------------------------------------------------------------------
- * We have received ABORTED from one of the participants in this
- * operation in this aborted transaction.
- * Record all nodes that have completed abort.
- * If last indicator is set it means that no more replica has
- * heard of the operation and are thus also aborted.
- *---------------------------------------------------------------------*/
- jam();
- Tfound = 1;
- clearTcNodeData(signal, TlastLqhInd, i);
- }//if
- }//for
- if (Tfound == 0) {
- warningReport(signal, 3);
- return;
- }
- for (i = 0; i < tcConnectptr.p->noOfNodes; i++) {
- if (tcConnectptr.p->tcNodedata[i] != 0) {
- /*--------------------------------------------------------------------
- * There are still outstanding ABORTED's to wait for.
- *--------------------------------------------------------------------*/
- jam();
- return;
- }//if
- }//for
- tcConnectptr.p->noOfNodes = 0;
- tcConnectptr.p->tcConnectstate = OS_ABORTING;
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- apiConnectptr.p->counter--;
- if (apiConnectptr.p->counter > 0) {
- jam();
- /*----------------------------------------------------------------------
- * WE ARE STILL WAITING FOR MORE PARTICIPANTS TO SEND ABORTED.
- *----------------------------------------------------------------------*/
- return;
- }//if
- /*------------------------------------------------------------------------*/
- /* */
- /* WE HAVE NOW COMPLETED THE ABORT PROCESS. WE HAVE RECEIVED ABORTED */
- /* FROM ALL PARTICIPANTS IN THE TRANSACTION. WE CAN NOW RELEASE ALL */
- /* RESOURCES CONNECTED TO THE TRANSACTION AND SEND THE ABORT RESPONSE */
- /*------------------------------------------------------------------------*/
- releaseAbortResources(signal);
-}//Dbtc::execABORTED()
-
-void Dbtc::clearTcNodeData(Signal* signal,
- UintR TLastLqhIndicator,
- UintR Tstart)
-{
- UintR Ti;
- if (TLastLqhIndicator == ZTRUE) {
- for (Ti = Tstart ; Ti < tcConnectptr.p->noOfNodes; Ti++) {
- jam();
- tcConnectptr.p->tcNodedata[Ti] = 0;
- }//for
- } else {
- jam();
- tcConnectptr.p->tcNodedata[Tstart] = 0;
- }//for
-}//clearTcNodeData()
-
-void Dbtc::abortErrorLab(Signal* signal)
-{
- ptrGuard(apiConnectptr);
- ApiConnectRecord * transP = apiConnectptr.p;
- if (transP->apiConnectstate == CS_ABORTING && transP->abortState != AS_IDLE){
- jam();
- return;
- }
- transP->returnsignal = RS_TCROLLBACKREP;
- if(transP->returncode == 0){
- jam();
- transP->returncode = terrorCode;
- }
- abort010Lab(signal);
-}//Dbtc::abortErrorLab()
-
-void Dbtc::abort010Lab(Signal* signal)
-{
- ApiConnectRecord * transP = apiConnectptr.p;
- if (transP->apiConnectstate == CS_ABORTING && transP->abortState != AS_IDLE){
- jam();
- return;
- }
- transP->apiConnectstate = CS_ABORTING;
- /*------------------------------------------------------------------------*/
- /* AN ABORT DECISION HAS BEEN TAKEN FOR SOME REASON. WE NEED TO ABORT */
- /* ALL PARTICIPANTS IN THE TRANSACTION. */
- /*------------------------------------------------------------------------*/
- transP->abortState = AS_ACTIVE;
- transP->counter = 0;
-
- if (transP->firstTcConnect == RNIL) {
- jam();
- /*-----------------------------------------------------------------------*/
- /* WE HAVE NO PARTICIPANTS IN THE TRANSACTION. */
- /*-----------------------------------------------------------------------*/
- releaseAbortResources(signal);
- return;
- }//if
- tcConnectptr.i = transP->firstTcConnect;
- abort015Lab(signal);
-}//Dbtc::abort010Lab()
-
-/*--------------------------------------------------------------------------*/
-/* */
-/* WE WILL ABORT ONE NODE PER OPERATION AT A TIME. THIS IS TO KEEP */
-/* ERROR HANDLING OF THIS PROCESS FAIRLY SIMPLE AND TRACTABLE. */
-/* EVEN IF NO NODE OF THIS PARTICULAR NODE NUMBER NEEDS ABORTION WE */
-/* MUST ENSURE THAT ALL NODES ARE CHECKED. THUS A FAULTY NODE DOES */
-/* NOT MEAN THAT ALL NODES IN AN OPERATION IS ABORTED. FOR THIS REASON*/
-/* WE SET THE TCONTINUE_ABORT TO TRUE WHEN A FAULTY NODE IS DETECTED. */
-/*--------------------------------------------------------------------------*/
-void Dbtc::abort015Lab(Signal* signal)
-{
- Uint32 TloopCount = 0;
-ABORT020:
- jam();
- TloopCount++;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- switch (tcConnectptr.p->tcConnectstate) {
- case OS_WAIT_DIH:
- case OS_WAIT_KEYINFO:
- case OS_WAIT_ATTR:
- jam();
- /*----------------------------------------------------------------------*/
- /* WE ARE STILL WAITING FOR MORE KEYINFO/ATTRINFO. WE HAVE NOT CONTACTED*/
- /* ANY LQH YET AND SO WE CAN SIMPLY SET STATE TO ABORTING. */
- /*----------------------------------------------------------------------*/
- tcConnectptr.p->noOfNodes = 0; // == releaseAbort(signal)
- tcConnectptr.p->tcConnectstate = OS_ABORTING;
- break;
- case OS_CONNECTED:
- jam();
- /*-----------------------------------------------------------------------
- * WE ARE STILL IN THE INITIAL PHASE OF THIS OPERATION.
- * NEED NOT BOTHER ABOUT ANY LQH ABORTS.
- *-----------------------------------------------------------------------*/
- tcConnectptr.p->noOfNodes = 0; // == releaseAbort(signal)
- tcConnectptr.p->tcConnectstate = OS_ABORTING;
- break;
- case OS_PREPARED:
- jam();
- case OS_OPERATING:
- jam();
- /*----------------------------------------------------------------------
- * WE HAVE SENT LQHKEYREQ AND ARE IN SOME STATE OF EITHER STILL
- * SENDING THE OPERATION, WAITING FOR REPLIES, WAITING FOR MORE
- * ATTRINFO OR OPERATION IS PREPARED. WE NEED TO ABORT ALL LQH'S.
- *----------------------------------------------------------------------*/
- releaseAndAbort(signal);
- tcConnectptr.p->tcConnectstate = OS_ABORT_SENT;
- TloopCount += 127;
- break;
- case OS_ABORTING:
- jam();
- break;
- case OS_ABORT_SENT:
- jam();
- DEBUG("ABORT_SENT state in abort015Lab(), not expected");
- systemErrorLab(signal);
- return;
- default:
- jam();
- DEBUG("tcConnectstate = " << tcConnectptr.p->tcConnectstate);
- systemErrorLab(signal);
- return;
- }//switch
-
- if (tcConnectptr.p->nextTcConnect != RNIL) {
- jam();
- tcConnectptr.i = tcConnectptr.p->nextTcConnect;
- if (TloopCount < 1024) {
- goto ABORT020;
- } else {
- jam();
- /*---------------------------------------------------------------------
- * Reset timer to avoid time-out in real-time break.
- * Increase counter to ensure that we don't think that all ABORTED have
- * been received before all have been sent.
- *---------------------------------------------------------------------*/
- apiConnectptr.p->counter++;
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- signal->theData[0] = TcContinueB::ZABORT_BREAK;
- signal->theData[1] = tcConnectptr.i;
- signal->theData[2] = apiConnectptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
- return;
- }//if
- }//if
- if (apiConnectptr.p->counter > 0) {
- jam();
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- return;
- }//if
- /*-----------------------------------------------------------------------
- * WE HAVE NOW COMPLETED THE ABORT PROCESS. WE HAVE RECEIVED ABORTED
- * FROM ALL PARTICIPANTS IN THE TRANSACTION. WE CAN NOW RELEASE ALL
- * RESOURCES CONNECTED TO THE TRANSACTION AND SEND THE ABORT RESPONSE
- *------------------------------------------------------------------------*/
- releaseAbortResources(signal);
-}//Dbtc::abort015Lab()
-
-/*--------------------------------------------------------------------------*/
-/* RELEASE KEY AND ATTRINFO OBJECTS AND SEND ABORT TO THE LQH BLOCK. */
-/*--------------------------------------------------------------------------*/
-int Dbtc::releaseAndAbort(Signal* signal)
-{
- HostRecordPtr localHostptr;
- UintR TnoLoops = tcConnectptr.p->noOfNodes;
-
- apiConnectptr.p->counter++;
- bool prevAlive = false;
- for (Uint32 Ti = 0; Ti < TnoLoops ; Ti++) {
- localHostptr.i = tcConnectptr.p->tcNodedata[Ti];
- ptrCheckGuard(localHostptr, chostFilesize, hostRecord);
- if (localHostptr.p->hostStatus == HS_ALIVE) {
- jam();
- if (prevAlive) {
- // if previous is alive, its LQH forwards abort to this node
- jam();
- continue;
- }
- /* ************< */
- /* ABORT < */
- /* ************< */
- tblockref = calcLqhBlockRef(localHostptr.i);
- signal->theData[0] = tcConnectptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = apiConnectptr.p->transid[0];
- signal->theData[3] = apiConnectptr.p->transid[1];
- sendSignal(tblockref, GSN_ABORT, signal, 4, JBB);
- prevAlive = true;
- } else {
- jam();
- signal->theData[0] = tcConnectptr.i;
- signal->theData[1] = apiConnectptr.p->transid[0];
- signal->theData[2] = apiConnectptr.p->transid[1];
- signal->theData[3] = localHostptr.i;
- signal->theData[4] = ZFALSE;
- sendSignal(cownref, GSN_ABORTED, signal, 5, JBB);
- prevAlive = false;
- }//if
- }//for
- return 1;
-}//Dbtc::releaseAndAbort()
-
-/* ------------------------------------------------------------------------- */
-/* ------- ENTER TIME_SIGNAL ------- */
-/* */
-/* ------------------------------------------------------------------------- */
-void Dbtc::execTIME_SIGNAL(Signal* signal)
-{
-
- jamEntry();
- ctcTimer++;
- if (csystemStart != SSS_TRUE) {
- jam();
- return;
- }//if
- checkStartTimeout(signal);
- checkStartFragTimeout(signal);
-}//Dbtc::execTIME_SIGNAL()
-
-/*------------------------------------------------*/
-/* Start timeout handling if not already going on */
-/*------------------------------------------------*/
-void Dbtc::checkStartTimeout(Signal* signal)
-{
- ctimeOutCheckCounter++;
- if (ctimeOutCheckActive == TOCS_TRUE) {
- jam();
- // Check heartbeat of timeout loop
- if(ctimeOutCheckHeartbeat > ctimeOutCheckLastHeartbeat){
- jam();
- ctimeOutMissedHeartbeats = 0;
- }else{
- jam();
- ctimeOutMissedHeartbeats++;
- if (ctimeOutMissedHeartbeats > 100){
- jam();
- systemErrorLab(signal);
- }
- }
- ctimeOutCheckLastHeartbeat = ctimeOutCheckHeartbeat;
- return;
- }//if
- if (ctimeOutCheckCounter < ctimeOutCheckDelay) {
- jam();
- /*------------------------------------------------------------------*/
- /* */
- /* NO TIME-OUT CHECKED THIS TIME. WAIT MORE. */
- /*------------------------------------------------------------------*/
- return;
- }//if
- ctimeOutCheckActive = TOCS_TRUE;
- ctimeOutCheckCounter = 0;
- timeOutLoopStartLab(signal, 0); // 0 is first api connect record
- return;
-}//Dbtc::execTIME_SIGNAL()
-
-/*----------------------------------------------------------------*/
-/* Start fragment (scan) timeout handling if not already going on */
-/*----------------------------------------------------------------*/
-void Dbtc::checkStartFragTimeout(Signal* signal)
-{
- ctimeOutCheckFragCounter++;
- if (ctimeOutCheckFragActive == TOCS_TRUE) {
- jam();
- return;
- }//if
- if (ctimeOutCheckFragCounter < ctimeOutCheckDelay) {
- jam();
- /*------------------------------------------------------------------*/
- /* NO TIME-OUT CHECKED THIS TIME. WAIT MORE. */
- /*------------------------------------------------------------------*/
- return;
- }//if
-
- // Go through the fragment records and look for timeout in a scan.
- ctimeOutCheckFragActive = TOCS_TRUE;
- ctimeOutCheckFragCounter = 0;
- timeOutLoopStartFragLab(signal, 0); // 0 means first scan record
-}//checkStartFragTimeout()
-
-/*------------------------------------------------------------------*/
-/* IT IS NOW TIME TO CHECK WHETHER ANY TRANSACTIONS HAVE */
-/* BEEN DELAYED FOR SO LONG THAT WE ARE FORCED TO PERFORM */
-/* SOME ACTION, EITHER ABORT OR RESEND OR REMOVE A NODE FROM */
-/* THE WAITING PART OF A PROTOCOL. */
-/*
-The algorithm used here is to check 1024 transactions at a time before
-doing a real-time break.
-To avoid aborting both transactions in a deadlock detected by time-out
-we insert a random extra time-out of upto 630 ms by using the lowest
-six bits of the api connect reference.
-We spread it out from 0 to 630 ms if base time-out is larger than 3 sec,
-we spread it out from 0 to 70 ms if base time-out is smaller than 300 msec,
-and otherwise we spread it out 310 ms.
-*/
-/*------------------------------------------------------------------*/
-void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr)
-{
- Uint32 end_ptr, time_passed, time_out_value, mask_value;
- const Uint32 api_con_sz= capiConnectFilesize;
- const Uint32 tc_timer= ctcTimer;
- const Uint32 time_out_param= ctimeOutValue;
-
- ctimeOutCheckHeartbeat = tc_timer;
-
- if (api_con_ptr + 1024 < api_con_sz) {
- jam();
- end_ptr= api_con_ptr + 1024;
- } else {
- jam();
- end_ptr= api_con_sz;
- }
- if (time_out_param > 300) {
- jam();
- mask_value= 63;
- } else if (time_out_param < 30) {
- jam();
- mask_value= 7;
- } else {
- jam();
- mask_value= 31;
- }
- for ( ; api_con_ptr < end_ptr; api_con_ptr++) {
- Uint32 api_timer= getApiConTimer(api_con_ptr);
- jam();
- if (api_timer != 0) {
- time_out_value= time_out_param + (api_con_ptr & mask_value);
- time_passed= tc_timer - api_timer;
- if (time_passed > time_out_value) {
- jam();
- timeOutFoundLab(signal, api_con_ptr);
- return;
- }
- }
- }
- if (api_con_ptr == api_con_sz) {
- jam();
- /*------------------------------------------------------------------*/
- /* */
- /* WE HAVE NOW CHECKED ALL TRANSACTIONS FOR TIME-OUT AND ALSO */
- /* STARTED TIME-OUT HANDLING OF THOSE WE FOUND. WE ARE NOW */
- /* READY AND CAN WAIT FOR THE NEXT TIME-OUT CHECK. */
- /*------------------------------------------------------------------*/
- ctimeOutCheckActive = TOCS_FALSE;
- } else {
- jam();
- sendContinueTimeOutControl(signal, api_con_ptr);
- }
- return;
-}//Dbtc::timeOutLoopStartLab()
-
-void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr)
-{
- sendContinueTimeOutControl(signal, TapiConPtr + 1);
-
- apiConnectptr.i = TapiConPtr;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- /*------------------------------------------------------------------*/
- /* */
- /* THIS TRANSACTION HAVE EXPERIENCED A TIME-OUT AND WE NEED TO*/
- /* FIND OUT WHAT WE NEED TO DO BASED ON THE STATE INFORMATION.*/
- /*------------------------------------------------------------------*/
- DEBUG("[ H'" << hex << apiConnectptr.p->transid[0]
- << " H'" << apiConnectptr.p->transid[1] << "] " << dec
- << "Time-out in state = " << apiConnectptr.p->apiConnectstate
- << " apiConnectptr.i = " << apiConnectptr.i
- << " - exec: " << apiConnectptr.p->m_exec_flag
- << " - place: " << c_apiConTimer_line[apiConnectptr.i]);
- switch (apiConnectptr.p->apiConnectstate) {
- case CS_STARTED:
- ndbrequire(c_apiConTimer_line[apiConnectptr.i] != 3615);
- if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec){
- jam();
- /*
- We are waiting for application to continue the transaction. In this
- particular state we will use the application timeout parameter rather
- than the shorter Deadlock detection timeout.
- */
- if (c_appl_timeout_value == 0 ||
- (ctcTimer - getApiConTimer(apiConnectptr.i)) <= c_appl_timeout_value) {
- jam();
- return;
- }//if
- }
- apiConnectptr.p->returnsignal = RS_TCROLLBACKREP;
- apiConnectptr.p->returncode = ZTIME_OUT_ERROR;
- abort010Lab(signal);
- return;
- case CS_RECEIVING:
- case CS_REC_COMMITTING:
- case CS_START_COMMITTING:
- jam();
- /*------------------------------------------------------------------*/
- /* WE ARE STILL IN THE PREPARE PHASE AND THE TRANSACTION HAS */
- /* NOT YET REACHED ITS COMMIT POINT. THUS IT IS NOW OK TO */
- /* START ABORTING THE TRANSACTION. ALSO START CHECKING THE */
- /* REMAINING TRANSACTIONS. */
- /*------------------------------------------------------------------*/
- terrorCode = ZTIME_OUT_ERROR;
- abortErrorLab(signal);
- return;
- case CS_COMMITTING:
- jam();
- /*------------------------------------------------------------------*/
- // We are simply waiting for a signal in the job buffer. Only extreme
- // conditions should get us here. We ignore it.
- /*------------------------------------------------------------------*/
- case CS_COMPLETING:
- jam();
- /*------------------------------------------------------------------*/
- // We are simply waiting for a signal in the job buffer. Only extreme
- // conditions should get us here. We ignore it.
- /*------------------------------------------------------------------*/
- case CS_PREPARE_TO_COMMIT:
- jam();
- /*------------------------------------------------------------------*/
- /* WE ARE WAITING FOR DIH TO COMMIT THE TRANSACTION. WE SIMPLY*/
- /* KEEP WAITING SINCE THERE IS NO BETTER IDEA ON WHAT TO DO. */
- /* IF IT IS BLOCKED THEN NO TRANSACTION WILL PASS THIS GATE. */
- // To ensure against strange bugs we crash the system if we have passed
- // time-out period by a factor of 10 and it is also at least 5 seconds.
- /*------------------------------------------------------------------*/
- if (((ctcTimer - getApiConTimer(apiConnectptr.i)) > (10 * ctimeOutValue)) &&
- ((ctcTimer - getApiConTimer(apiConnectptr.i)) > 500)) {
- jam();
- systemErrorLab(signal);
- }//if
- break;
- case CS_COMMIT_SENT:
- jam();
- /*------------------------------------------------------------------*/
- /* WE HAVE SENT COMMIT TO A NUMBER OF NODES. WE ARE CURRENTLY */
- /* WAITING FOR THEIR REPLY. WITH NODE RECOVERY SUPPORTED WE */
- /* WILL CHECK FOR CRASHED NODES AND RESEND THE COMMIT SIGNAL */
- /* TO THOSE NODES THAT HAVE MISSED THE COMMIT SIGNAL DUE TO */
- /* A NODE FAILURE. */
- /*------------------------------------------------------------------*/
- tabortInd = ZCOMMIT_SETUP;
- setupFailData(signal);
- toCommitHandlingLab(signal);
- return;
- case CS_COMPLETE_SENT:
- jam();
- /*--------------------------------------------------------------------*/
- /* WE HAVE SENT COMPLETE TO A NUMBER OF NODES. WE ARE CURRENTLY */
- /* WAITING FOR THEIR REPLY. WITH NODE RECOVERY SUPPORTED WE */
- /* WILL CHECK FOR CRASHED NODES AND RESEND THE COMPLETE SIGNAL */
- /* TO THOSE NODES THAT HAVE MISSED THE COMPLETE SIGNAL DUE TO */
- /* A NODE FAILURE. */
- /*--------------------------------------------------------------------*/
- tabortInd = ZCOMMIT_SETUP;
- setupFailData(signal);
- toCompleteHandlingLab(signal);
- return;
- case CS_ABORTING:
- jam();
- /*------------------------------------------------------------------*/
- /* TIME-OUT DURING ABORT. WE NEED TO SEND ABORTED FOR ALL */
- /* NODES THAT HAVE FAILED BEFORE SENDING ABORTED. */
- /*------------------------------------------------------------------*/
- tcConnectptr.i = apiConnectptr.p->firstTcConnect;
- sendAbortedAfterTimeout(signal, 0);
- break;
- case CS_START_SCAN:{
- jam();
- ScanRecordPtr scanPtr;
- scanPtr.i = apiConnectptr.p->apiScanRec;
- ptrCheckGuard(scanPtr, cscanrecFileSize, scanRecord);
- scanError(signal, scanPtr, ZSCANTIME_OUT_ERROR);
- break;
- }
- case CS_WAIT_ABORT_CONF:
- jam();
- tcConnectptr.i = apiConnectptr.p->currentTcConnect;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- arrGuard(apiConnectptr.p->currentReplicaNo, 4);
- hostptr.i = tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo];
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
- if (hostptr.p->hostStatus == HS_ALIVE) {
- /*------------------------------------------------------------------*/
- // Time-out waiting for ABORTCONF. We will resend the ABORTREQ just in
- // case.
- /*------------------------------------------------------------------*/
- warningReport(signal, 20);
- apiConnectptr.p->timeOutCounter++;
- if (apiConnectptr.p->timeOutCounter > 3) {
- /*------------------------------------------------------------------*/
- // 100 time-outs are not acceptable. We will shoot down the node
- // not responding.
- /*------------------------------------------------------------------*/
- reportNodeFailed(signal, hostptr.i);
- }//if
- apiConnectptr.p->currentReplicaNo++;
- }//if
- tcurrentReplicaNo = (Uint8)Z8NIL;
- toAbortHandlingLab(signal);
- return;
- case CS_WAIT_COMMIT_CONF:
- jam();
- tcConnectptr.i = apiConnectptr.p->currentTcConnect;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- arrGuard(apiConnectptr.p->currentReplicaNo, 4);
- hostptr.i = tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo];
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
- if (hostptr.p->hostStatus == HS_ALIVE) {
- /*------------------------------------------------------------------*/
- // Time-out waiting for COMMITCONF. We will resend the COMMITREQ just in
- // case.
- /*------------------------------------------------------------------*/
- warningReport(signal, 21);
- apiConnectptr.p->timeOutCounter++;
- if (apiConnectptr.p->timeOutCounter > 3) {
- /*------------------------------------------------------------------*/
- // 100 time-outs are not acceptable. We will shoot down the node
- // not responding.
- /*------------------------------------------------------------------*/
- reportNodeFailed(signal, hostptr.i);
- }//if
- apiConnectptr.p->currentReplicaNo++;
- }//if
- tcurrentReplicaNo = (Uint8)Z8NIL;
- toCommitHandlingLab(signal);
- return;
- case CS_WAIT_COMPLETE_CONF:
- jam();
- tcConnectptr.i = apiConnectptr.p->currentTcConnect;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- arrGuard(apiConnectptr.p->currentReplicaNo, 4);
- hostptr.i = tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo];
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
- if (hostptr.p->hostStatus == HS_ALIVE) {
- /*------------------------------------------------------------------*/
- // Time-out waiting for COMPLETECONF. We will resend the COMPLETEREQ
- // just in case.
- /*------------------------------------------------------------------*/
- warningReport(signal, 22);
- apiConnectptr.p->timeOutCounter++;
- if (apiConnectptr.p->timeOutCounter > 100) {
- /*------------------------------------------------------------------*/
- // 100 time-outs are not acceptable. We will shoot down the node
- // not responding.
- /*------------------------------------------------------------------*/
- reportNodeFailed(signal, hostptr.i);
- }//if
- apiConnectptr.p->currentReplicaNo++;
- }//if
- tcurrentReplicaNo = (Uint8)Z8NIL;
- toCompleteHandlingLab(signal);
- return;
- case CS_FAIL_PREPARED:
- jam();
- case CS_FAIL_COMMITTING:
- jam();
- case CS_FAIL_COMMITTED:
- jam();
- case CS_REC_PREPARING:
- jam();
- case CS_START_PREPARING:
- jam();
- case CS_PREPARED:
- jam();
- case CS_RESTART:
- jam();
- case CS_FAIL_ABORTED:
- jam();
- case CS_DISCONNECTED:
- jam();
- default:
- jam();
- /*------------------------------------------------------------------*/
- /* AN IMPOSSIBLE STATE IS SET. CRASH THE SYSTEM. */
- /*------------------------------------------------------------------*/
- DEBUG("State = " << apiConnectptr.p->apiConnectstate);
- systemErrorLab(signal);
- return;
- }//switch
- return;
-}//Dbtc::timeOutFoundLab()
-
-void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
-{
- ApiConnectRecord * transP = apiConnectptr.p;
- if(transP->abortState == AS_IDLE){
- jam();
- warningEvent("TC: %d: %d state=%d abort==IDLE place: %d fop=%d t: %d",
- __LINE__,
- apiConnectptr.i,
- transP->apiConnectstate,
- c_apiConTimer_line[apiConnectptr.i],
- transP->firstTcConnect,
- c_apiConTimer[apiConnectptr.i]
- );
- ndbout_c("TC: %d: %d state=%d abort==IDLE place: %d fop=%d t: %d",
- __LINE__,
- apiConnectptr.i,
- transP->apiConnectstate,
- c_apiConTimer_line[apiConnectptr.i],
- transP->firstTcConnect,
- c_apiConTimer[apiConnectptr.i]
- );
- ndbrequire(false);
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
- return;
- }
-
- OperationState tmp[16];
-
- Uint32 TloopCount = 0;
- do {
- jam();
- if (tcConnectptr.i == RNIL) {
- jam();
- if (Tcheck == 0) {
- jam();
- /*------------------------------------------------------------------
- * All nodes had already reported ABORTED for all tcConnect records.
- * Crash since it is an error situation that we then received a
- * time-out.
- *------------------------------------------------------------------*/
- char buf[96]; buf[0] = 0;
- char buf2[96];
- BaseString::snprintf(buf, sizeof(buf), "TC %d: %d ops:",
- __LINE__, apiConnectptr.i);
- for(Uint32 i = 0; i<TloopCount; i++){
- BaseString::snprintf(buf2, sizeof(buf2), "%s %d", buf, tmp[i]);
- BaseString::snprintf(buf, sizeof(buf), buf2);
- }
- warningEvent(buf);
- ndbout_c(buf);
- ndbrequire(false);
- }
- releaseAbortResources(signal);
- return;
- }//if
- TloopCount++;
- if (TloopCount >= 1024) {
- jam();
- /*------------------------------------------------------------------*/
- // Insert a real-time break for large transactions to avoid blowing
- // away the job buffer.
- /*------------------------------------------------------------------*/
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- apiConnectptr.p->counter++;
- signal->theData[0] = TcContinueB::ZABORT_TIMEOUT_BREAK;
- signal->theData[1] = tcConnectptr.i;
- signal->theData[2] = apiConnectptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
- return;
- }//if
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- if(TloopCount < 16){
- jam();
- tmp[TloopCount-1] = tcConnectptr.p->tcConnectstate;
- }
-
- if (tcConnectptr.p->tcConnectstate == OS_ABORT_SENT) {
- jam();
- /*------------------------------------------------------------------*/
- // We have sent an ABORT signal to this node but not yet received any
- // reply. We have to send an ABORTED signal on our own in some cases.
- // If the node is declared as up and running and still do not respond
- // in time to the ABORT signal we will declare it as dead.
- /*------------------------------------------------------------------*/
- UintR Ti = 0;
- arrGuard(tcConnectptr.p->noOfNodes, 4);
- for (Ti = 0; Ti < tcConnectptr.p->noOfNodes; Ti++) {
- jam();
- if (tcConnectptr.p->tcNodedata[Ti] != 0) {
- TloopCount += 31;
- Tcheck = 1;
- hostptr.i = tcConnectptr.p->tcNodedata[Ti];
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
- if (hostptr.p->hostStatus == HS_ALIVE) {
- jam();
- /*---------------------------------------------------------------
- * A backup replica has not sent ABORTED.
- * Could be that a node before him has crashed.
- * Send an ABORT signal specifically to this node.
- * We will not send to any more nodes after this
- * to avoid race problems.
- * To also ensure that we use this message also as a heartbeat
- * we will move this node to the primary replica seat.
- * The primary replica and any failed node after it will
- * be removed from the node list. Update also number of nodes.
- * Finally break the loop to ensure we don't mess
- * things up by executing another loop.
- * We also update the timer to ensure we don't get time-out
- * too early.
- *--------------------------------------------------------------*/
- BlockReference TBRef = calcLqhBlockRef(hostptr.i);
- signal->theData[0] = tcConnectptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = apiConnectptr.p->transid[0];
- signal->theData[3] = apiConnectptr.p->transid[1];
- sendSignal(TBRef, GSN_ABORT, signal, 4, JBB);
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- break;
- } else {
- jam();
- /*--------------------------------------------------------------
- * The node we are waiting for is dead. We will send ABORTED to
- * ourselves vicarious for the failed node.
- *--------------------------------------------------------------*/
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- signal->theData[0] = tcConnectptr.i;
- signal->theData[1] = apiConnectptr.p->transid[0];
- signal->theData[2] = apiConnectptr.p->transid[1];
- signal->theData[3] = hostptr.i;
- signal->theData[4] = ZFALSE;
- sendSignal(cownref, GSN_ABORTED, signal, 5, JBB);
- }//if
- }//if
- }//for
- }//if
- tcConnectptr.i = tcConnectptr.p->nextTcConnect;
- } while (1);
-}//Dbtc::sendAbortedAfterTimeout()
-
-void Dbtc::reportNodeFailed(Signal* signal, Uint32 nodeId)
-{
- DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0];
- rep->nodeId = nodeId;
- rep->err = DisconnectRep::TcReportNodeFailed;
- sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal,
- DisconnectRep::SignalLength, JBB);
-}//Dbtc::reportNodeFailed()
-
-/*-------------------------------------------------*/
-/* Timeout-loop for scanned fragments. */
-/*-------------------------------------------------*/
-void Dbtc::timeOutLoopStartFragLab(Signal* signal, Uint32 TscanConPtr)
-{
- ScanFragRecPtr timeOutPtr[8];
- UintR tfragTimer[8];
- UintR texpiredTime[8];
- UintR TloopCount = 0;
- Uint32 TtcTimer = ctcTimer;
-
- while ((TscanConPtr + 8) < cscanFragrecFileSize) {
- jam();
- timeOutPtr[0].i = TscanConPtr + 0;
- timeOutPtr[1].i = TscanConPtr + 1;
- timeOutPtr[2].i = TscanConPtr + 2;
- timeOutPtr[3].i = TscanConPtr + 3;
- timeOutPtr[4].i = TscanConPtr + 4;
- timeOutPtr[5].i = TscanConPtr + 5;
- timeOutPtr[6].i = TscanConPtr + 6;
- timeOutPtr[7].i = TscanConPtr + 7;
-
- c_scan_frag_pool.getPtrForce(timeOutPtr[0]);
- c_scan_frag_pool.getPtrForce(timeOutPtr[1]);
- c_scan_frag_pool.getPtrForce(timeOutPtr[2]);
- c_scan_frag_pool.getPtrForce(timeOutPtr[3]);
- c_scan_frag_pool.getPtrForce(timeOutPtr[4]);
- c_scan_frag_pool.getPtrForce(timeOutPtr[5]);
- c_scan_frag_pool.getPtrForce(timeOutPtr[6]);
- c_scan_frag_pool.getPtrForce(timeOutPtr[7]);
-
- tfragTimer[0] = timeOutPtr[0].p->scanFragTimer;
- tfragTimer[1] = timeOutPtr[1].p->scanFragTimer;
- tfragTimer[2] = timeOutPtr[2].p->scanFragTimer;
- tfragTimer[3] = timeOutPtr[3].p->scanFragTimer;
- tfragTimer[4] = timeOutPtr[4].p->scanFragTimer;
- tfragTimer[5] = timeOutPtr[5].p->scanFragTimer;
- tfragTimer[6] = timeOutPtr[6].p->scanFragTimer;
- tfragTimer[7] = timeOutPtr[7].p->scanFragTimer;
-
- texpiredTime[0] = TtcTimer - tfragTimer[0];
- texpiredTime[1] = TtcTimer - tfragTimer[1];
- texpiredTime[2] = TtcTimer - tfragTimer[2];
- texpiredTime[3] = TtcTimer - tfragTimer[3];
- texpiredTime[4] = TtcTimer - tfragTimer[4];
- texpiredTime[5] = TtcTimer - tfragTimer[5];
- texpiredTime[6] = TtcTimer - tfragTimer[6];
- texpiredTime[7] = TtcTimer - tfragTimer[7];
-
- for (Uint32 Ti = 0; Ti < 8; Ti++) {
- jam();
- if (tfragTimer[Ti] != 0) {
-
- if (texpiredTime[Ti] > ctimeOutValue) {
- jam();
- DEBUG("Fragment timeout found:"<<
- " ctimeOutValue=" <<ctimeOutValue
- <<", texpiredTime="<<texpiredTime[Ti]<<endl
- <<" tfragTimer="<<tfragTimer[Ti]
- <<", ctcTimer="<<ctcTimer);
- timeOutFoundFragLab(signal, TscanConPtr + Ti);
- return;
- }//if
- }//if
- }//for
- TscanConPtr += 8;
- /*----------------------------------------------------------------*/
- /* We split the process up checking 1024 fragmentrecords at a time*/
- /* to maintain real time behaviour. */
- /*----------------------------------------------------------------*/
- if (TloopCount++ > 128 ) {
- jam();
- signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL;
- signal->theData[1] = TscanConPtr;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- return;
- }//if
- }//while
- for ( ; TscanConPtr < cscanFragrecFileSize; TscanConPtr++){
- jam();
- timeOutPtr[0].i = TscanConPtr;
- c_scan_frag_pool.getPtrForce(timeOutPtr[0]);
- if (timeOutPtr[0].p->scanFragTimer != 0) {
- texpiredTime[0] = ctcTimer - timeOutPtr[0].p->scanFragTimer;
- if (texpiredTime[0] > ctimeOutValue) {
- jam();
- DEBUG("Fragment timeout found:"<<
- " ctimeOutValue=" <<ctimeOutValue
- <<", texpiredTime="<<texpiredTime[0]<<endl
- <<" tfragTimer="<<tfragTimer[0]
- <<", ctcTimer="<<ctcTimer);
- timeOutFoundFragLab(signal, TscanConPtr);
- return;
- }//if
- }//if
- }//for
- ctimeOutCheckFragActive = TOCS_FALSE;
-
- return;
-}//timeOutLoopStartFragLab()
-
-/*--------------------------------------------------------------------------*/
-/*Handle the heartbeat signal from LQH in a scan process */
-// (Set timer on fragrec.)
-/*--------------------------------------------------------------------------*/
-void Dbtc::execSCAN_HBREP(Signal* signal)
-{
- jamEntry();
-
- scanFragptr.i = signal->theData[0];
- c_scan_frag_pool.getPtr(scanFragptr);
- switch (scanFragptr.p->scanFragState){
- case ScanFragRec::LQH_ACTIVE:
- break;
- default:
- DEBUG("execSCAN_HBREP: scanFragState="<<scanFragptr.p->scanFragState);
- systemErrorLab(signal);
- break;
- }
-
- ScanRecordPtr scanptr;
- scanptr.i = scanFragptr.p->scanRec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
-
- apiConnectptr.i = scanptr.p->scanApiRec;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
-
- if (!(apiConnectptr.p->transid[0] == signal->theData[1] &&
- apiConnectptr.p->transid[1] == signal->theData[2])){
- jam();
- /**
- * Send signal back to sender so that the crash occurs there
- */
- // Save original transid
- signal->theData[3] = signal->theData[0];
- signal->theData[4] = signal->theData[1];
- // Set transid to illegal values
- signal->theData[1] = RNIL;
- signal->theData[2] = RNIL;
-
- sendSignal(signal->senderBlockRef(), GSN_SCAN_HBREP, signal, 5, JBA);
- DEBUG("SCAN_HBREP with wrong transid("
- <<signal->theData[3]<<", "<<signal->theData[4]<<")");
- return;
- }//if
-
- // Update timer on ScanFragRec
- if (scanFragptr.p->scanFragTimer != 0){
- updateBuddyTimer(apiConnectptr);
- scanFragptr.p->startFragTimer(ctcTimer);
- } else {
- ndbassert(false);
- DEBUG("SCAN_HBREP when scanFragTimer was turned off");
- }
-}//execSCAN_HBREP()
-
-/*--------------------------------------------------------------------------*/
-/* Timeout has occured on a fragment which means a scan has timed out. */
-/* If this is true we have an error in LQH/ACC. */
-/*--------------------------------------------------------------------------*/
-void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr)
-{
- ScanFragRecPtr ptr;
- c_scan_frag_pool.getPtr(ptr, TscanConPtr);
- DEBUG(TscanConPtr << " timeOutFoundFragLab: scanFragState = "<< ptr.p->scanFragState);
-
- /*-------------------------------------------------------------------------*/
- // The scan fragment has expired its timeout. Check its state to decide
- // what to do.
- /*-------------------------------------------------------------------------*/
- switch (ptr.p->scanFragState) {
- case ScanFragRec::WAIT_GET_PRIMCONF:
- jam();
- ndbrequire(false);
- break;
- case ScanFragRec::LQH_ACTIVE:{
- jam();
-
- /**
- * The LQH expired it's timeout, try to close it
- */
- Uint32 nodeId = refToNode(ptr.p->lqhBlockref);
- Uint32 connectCount = getNodeInfo(nodeId).m_connectCount;
- ScanRecordPtr scanptr;
- scanptr.i = ptr.p->scanRec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
-
- if(connectCount != ptr.p->m_connectCount){
- jam();
- /**
- * The node has died
- */
- ptr.p->scanFragState = ScanFragRec::COMPLETED;
- ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
-
- run.release(ptr);
- ptr.p->stopFragTimer();
- }
-
- scanError(signal, scanptr, ZSCAN_FRAG_LQH_ERROR);
- break;
- }
- case ScanFragRec::DELIVERED:
- jam();
- case ScanFragRec::IDLE:
- jam();
- case ScanFragRec::QUEUED_FOR_DELIVERY:
- jam();
- /*-----------------------------------------------------------------------
- * Should never occur. We will simply report set the timer to zero and
- * continue. In a debug version we should crash here but not in a release
- * version. In a release version we will simply set the time-out to zero.
- *-----------------------------------------------------------------------*/
-#ifdef VM_TRACE
- systemErrorLab(signal);
-#endif
- scanFragptr.p->stopFragTimer();
- break;
- default:
- jam();
- /*-----------------------------------------------------------------------
- * Non-existent state. Crash.
- *-----------------------------------------------------------------------*/
- systemErrorLab(signal);
- break;
- }//switch
-
- signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL;
- signal->theData[1] = TscanConPtr + 1;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- return;
-}//timeOutFoundFragLab()
-
-
-/*
- 4.3.16 GCP_NOMORETRANS
- ----------------------
-*/
-/*****************************************************************************
- * G C P _ N O M O R E T R A N S
- *
- * WHEN DBTC RECEIVES SIGNAL GCP_NOMORETRANS A CHECK IS DONE TO FIND OUT IF
- * THERE ARE ANY GLOBAL CHECKPOINTS GOING ON - CFIRSTGCP /= RNIL. DBTC THEN
- * SEARCHES THE GCP_RECORD FILE TO FIND OUT IF THERE ARE ANY TRANSACTIONS NOT
- * CONCLUDED WITH THIS SPECIFIC CHECKPOINT - GCP_PTR:GCP_ID = TCHECK_GCP_ID.
- * FOR EACH TRANSACTION WHERE API_CONNECTSTATE EQUALS PREPARED, COMMITTING,
- * COMMITTED OR COMPLETING SIGNAL CONTINUEB IS SENT WITH A DELAY OF 100 MS,
- * THE COUNTER GCP_PTR:OUTSTANDINGAPI IS INCREASED. WHEN CONTINUEB IS RECEIVED
- * THE COUNTER IS DECREASED AND A CHECK IS DONE TO FIND OUT IF ALL
- * TRANSACTIONS ARE CONCLUDED. IF SO, SIGNAL GCP_TCFINISHED IS SENT.
- *****************************************************************************/
-void Dbtc::execGCP_NOMORETRANS(Signal* signal)
-{
- jamEntry();
- tcheckGcpId = signal->theData[1];
- if (cfirstgcp != RNIL) {
- jam();
- /* A GLOBAL CHECKPOINT IS GOING ON */
- gcpPtr.i = cfirstgcp; /* SET POINTER TO FIRST GCP IN QUEUE*/
- ptrCheckGuard(gcpPtr, cgcpFilesize, gcpRecord);
- if (gcpPtr.p->gcpId == tcheckGcpId) {
- jam();
- if (gcpPtr.p->firstApiConnect != RNIL) {
- jam();
- gcpPtr.p->gcpNomoretransRec = ZTRUE;
- } else {
- jam();
- gcpTcfinished(signal);
- unlinkGcp(signal);
- }//if
- } else {
- jam();
- /*------------------------------------------------------------*/
- /* IF IT IS NOT THE FIRST THEN THERE SHOULD BE NO */
- /* RECORD FOR THIS GLOBAL CHECKPOINT. WE ALWAYS REMOVE */
- /* THE GLOBAL CHECKPOINTS IN ORDER. */
- /*------------------------------------------------------------*/
- gcpTcfinished(signal);
- }//if
- } else {
- jam();
- gcpTcfinished(signal);
- }//if
- return;
-}//Dbtc::execGCP_NOMORETRANS()
-
-/*****************************************************************************/
-/* */
-/* TAKE OVER MODULE */
-/* */
-/*****************************************************************************/
-/* */
-/* THIS PART OF TC TAKES OVER THE COMMIT/ABORT OF TRANSACTIONS WHERE THE */
-/* NODE ACTING AS TC HAVE FAILED. IT STARTS BY QUERYING ALL NODES ABOUT */
-/* ANY OPERATIONS PARTICIPATING IN A TRANSACTION WHERE THE TC NODE HAVE */
-/* FAILED. */
-/* */
-/* AFTER RECEIVING INFORMATION FROM ALL NODES ABOUT OPERATION STATUS THIS */
-/* CODE WILL ENSURE THAT ALL AFFECTED TRANSACTIONS ARE PROPERLY ABORTED OR*/
-/* COMMITTED. THE ORIGINATING APPLICATION NODE WILL ALSO BE CONTACTED. */
-/* IF THE ORIGINATING APPLICATION ALSO FAILED THEN THERE IS CURRENTLY NO */
-/* WAY TO FIND OUT WHETHER A TRANSACTION WAS PERFORMED OR NOT. */
-/*****************************************************************************/
-void Dbtc::execNODE_FAILREP(Signal* signal)
-{
- HostRecordPtr tmpHostptr;
- jamEntry();
-
- NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
-
- cfailure_nr = nodeFail->failNo;
- const Uint32 tnoOfNodes = nodeFail->noOfNodes;
- const Uint32 tnewMasterId = nodeFail->masterNodeId;
-
- arrGuard(tnoOfNodes, MAX_NDB_NODES);
- int index = 0;
- for (unsigned i = 1; i< MAX_NDB_NODES; i++) {
- if(NodeBitmask::get(nodeFail->theNodes, i)){
- cdata[index] = i;
- index++;
- }//if
- }//for
-
- tcNodeFailptr.i = 0;
- ptrAss(tcNodeFailptr, tcFailRecord);
- Uint32 tindex;
- for (tindex = 0; tindex < tnoOfNodes; tindex++) {
- jam();
- hostptr.i = cdata[tindex];
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
- /*------------------------------------------------------------*/
- /* SET STATUS OF THE FAILED NODE TO DEAD SINCE IT HAS */
- /* FAILED. */
- /*------------------------------------------------------------*/
- hostptr.p->hostStatus = HS_DEAD;
-
- if (hostptr.p->takeOverStatus == TOS_COMPLETED) {
- jam();
- /*------------------------------------------------------------*/
- /* A VERY UNUSUAL SITUATION. THE TAKE OVER WAS COMPLETED*/
- /* EVEN BEFORE WE HEARD ABOUT THE NODE FAILURE REPORT. */
- /* HOWEVER UNUSUAL THIS SITUATION IS POSSIBLE. */
- /*------------------------------------------------------------*/
- /* RELEASE THE CURRENTLY UNUSED LQH CONNECTIONS. THE */
- /* REMAINING WILL BE RELEASED WHEN THE TRANSACTION THAT */
- /* USED THEM IS COMPLETED. */
- /*------------------------------------------------------------*/
- {
- NFCompleteRep * const nfRep = (NFCompleteRep *)&signal->theData[0];
- nfRep->blockNo = DBTC;
- nfRep->nodeId = cownNodeid;
- nfRep->failedNodeId = hostptr.i;
- }
- sendSignal(cdihblockref, GSN_NF_COMPLETEREP, signal,
- NFCompleteRep::SignalLength, JBB);
- } else {
- ndbrequire(hostptr.p->takeOverStatus == TOS_IDLE);
- hostptr.p->takeOverStatus = TOS_NODE_FAILED;
- }//if
-
- if (tcNodeFailptr.p->failStatus == FS_LISTENING) {
- jam();
- /*------------------------------------------------------------*/
- /* THE CURRENT TAKE OVER CAN BE AFFECTED BY THIS NODE */
- /* FAILURE. */
- /*------------------------------------------------------------*/
- if (hostptr.p->lqhTransStatus == LTS_ACTIVE) {
- jam();
- /*------------------------------------------------------------*/
- /* WE WERE WAITING FOR THE FAILED NODE IN THE TAKE OVER */
- /* PROTOCOL FOR TC. */
- /*------------------------------------------------------------*/
- signal->theData[0] = TcContinueB::ZNODE_TAKE_OVER_COMPLETED;
- signal->theData[1] = hostptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
- }//if
- }//if
-
- }//for
-
- const bool masterFailed = (cmasterNodeId != tnewMasterId);
- cmasterNodeId = tnewMasterId;
-
- if(getOwnNodeId() == cmasterNodeId && masterFailed){
- /**
- * Master has failed and I'm the new master
- */
- jam();
-
- for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
- jam();
- ptrAss(hostptr, hostRecord);
- if (hostptr.p->hostStatus != HS_ALIVE) {
- jam();
- if (hostptr.p->takeOverStatus == TOS_COMPLETED) {
- jam();
- /*------------------------------------------------------------*/
- /* SEND TAKE OVER CONFIRMATION TO ALL ALIVE NODES IF */
- /* TAKE OVER IS COMPLETED. THIS IS PERFORMED TO ENSURE */
- /* THAT ALL NODES AGREE ON THE IDLE STATE OF THE TAKE */
- /* OVER. THIS MIGHT BE MISSED IN AN ERROR SITUATION IF */
- /* MASTER FAILS AFTER SENDING CONFIRMATION TO NEW */
- /* MASTER BUT FAILING BEFORE SENDING TO ANOTHER NODE */
- /* WHICH WAS NOT MASTER. IF THIS NODE LATER BECOMES */
- /* MASTER IT MIGHT START A NEW TAKE OVER EVEN AFTER THE */
- /* CRASHED NODE HAVE ALREADY RECOVERED. */
- /*------------------------------------------------------------*/
- for(tmpHostptr.i = 1; tmpHostptr.i < MAX_NDB_NODES;tmpHostptr.i++) {
- jam();
- ptrAss(tmpHostptr, hostRecord);
- if (tmpHostptr.p->hostStatus == HS_ALIVE) {
- jam();
- tblockref = calcTcBlockRef(tmpHostptr.i);
- signal->theData[0] = hostptr.i;
- sendSignal(tblockref, GSN_TAKE_OVERTCCONF, signal, 1, JBB);
- }//if
- }//for
- }//if
- }//if
- }//for
- }
-
- if(getOwnNodeId() == cmasterNodeId){
- jam();
- for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
- jam();
- ptrAss(hostptr, hostRecord);
- if (hostptr.p->hostStatus != HS_ALIVE) {
- jam();
- if (hostptr.p->takeOverStatus == TOS_NODE_FAILED) {
- jam();
- /*------------------------------------------------------------*/
- /* CONCLUDE ALL ACTIVITIES THE FAILED TC DID CONTROL */
- /* SINCE WE ARE THE MASTER. THIS COULD HAVE BEEN STARTED*/
- /* BY A PREVIOUS MASTER BUT HAVE NOT BEEN CONCLUDED YET.*/
- /*------------------------------------------------------------*/
- hostptr.p->takeOverStatus = TOS_ACTIVE;
- signal->theData[0] = hostptr.i;
- sendSignal(cownref, GSN_TAKE_OVERTCREQ, signal, 1, JBB);
- }//if
- }//if
- }//for
- }//if
- for (tindex = 0; tindex < tnoOfNodes; tindex++) {
- jam();
- hostptr.i = cdata[tindex];
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
- /*------------------------------------------------------------*/
- /* LOOP THROUGH AND ABORT ALL SCANS THAT WHERE */
- /* CONTROLLED BY THIS TC AND ACTIVE IN THE FAILED */
- /* NODE'S LQH */
- /*------------------------------------------------------------*/
- checkScanActiveInFailedLqh(signal, 0, hostptr.i);
- checkWaitDropTabFailedLqh(signal, hostptr.i, 0); // nodeid, tableid
- }//for
-
-}//Dbtc::execNODE_FAILREP()
-
-void Dbtc::checkScanActiveInFailedLqh(Signal* signal,
- Uint32 scanPtrI,
- Uint32 failedNodeId){
-
- ScanRecordPtr scanptr;
- for (scanptr.i = scanPtrI; scanptr.i < cscanrecFileSize; scanptr.i++) {
- jam();
- ptrAss(scanptr, scanRecord);
- bool found = false;
- if (scanptr.p->scanState != ScanRecord::IDLE){
- jam();
- ScanFragRecPtr ptr;
- ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
-
- for(run.first(ptr); !ptr.isNull(); ){
- jam();
- ScanFragRecPtr curr = ptr;
- run.next(ptr);
- if (curr.p->scanFragState == ScanFragRec::LQH_ACTIVE &&
- refToNode(curr.p->lqhBlockref) == failedNodeId){
- jam();
-
- run.release(curr);
- curr.p->scanFragState = ScanFragRec::COMPLETED;
- curr.p->stopFragTimer();
- found = true;
- }
- }
- }
- if(found){
- jam();
- scanError(signal, scanptr, ZSCAN_LQH_ERROR);
- }
-
- // Send CONTINUEB to continue later
- signal->theData[0] = TcContinueB::ZCHECK_SCAN_ACTIVE_FAILED_LQH;
- signal->theData[1] = scanptr.i + 1; // Check next scanptr
- signal->theData[2] = failedNodeId;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
- return;
- }//for
-}
-
-void
-Dbtc::checkScanFragList(Signal* signal,
- Uint32 failedNodeId,
- ScanRecord * scanP,
- ScanFragList::Head & head){
-
- DEBUG("checkScanActiveInFailedLqh: scanFragError");
-}
-
-void Dbtc::execTAKE_OVERTCCONF(Signal* signal)
-{
- jamEntry();
- tfailedNodeId = signal->theData[0];
- hostptr.i = tfailedNodeId;
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
- switch (hostptr.p->takeOverStatus) {
- case TOS_IDLE:
- jam();
- /*------------------------------------------------------------*/
- /* THIS MESSAGE ARRIVED EVEN BEFORE THE NODE_FAILREP */
- /* MESSAGE. THIS IS POSSIBLE IN EXTREME SITUATIONS. */
- /* WE SET THE STATE TO TAKE_OVER_COMPLETED AND WAIT */
- /* FOR THE NODE_FAILREP MESSAGE. */
- /*------------------------------------------------------------*/
- hostptr.p->takeOverStatus = TOS_COMPLETED;
- break;
- case TOS_NODE_FAILED:
- case TOS_ACTIVE:
- jam();
- /*------------------------------------------------------------*/
- /* WE ARE NOT MASTER AND THE TAKE OVER IS ACTIVE OR WE */
- /* ARE MASTER AND THE TAKE OVER IS ACTIVE. IN BOTH */
- /* WE SET THE STATE TO TAKE_OVER_COMPLETED. */
- /*------------------------------------------------------------*/
- /* RELEASE THE CURRENTLY UNUSED LQH CONNECTIONS. THE */
- /* REMAINING WILL BE RELEASED WHEN THE TRANSACTION THAT */
- /* USED THEM IS COMPLETED. */
- /*------------------------------------------------------------*/
- hostptr.p->takeOverStatus = TOS_COMPLETED;
- {
- NFCompleteRep * const nfRep = (NFCompleteRep *)&signal->theData[0];
- nfRep->blockNo = DBTC;
- nfRep->nodeId = cownNodeid;
- nfRep->failedNodeId = hostptr.i;
- }
- sendSignal(cdihblockref, GSN_NF_COMPLETEREP, signal,
- NFCompleteRep::SignalLength, JBB);
- break;
- case TOS_COMPLETED:
- jam();
- /*------------------------------------------------------------*/
- /* WE HAVE ALREADY RECEIVED THE CONF SIGNAL. IT IS MOST */
- /* LIKELY SENT FROM A NEW MASTER WHICH WASN'T SURE IF */
- /* THIS NODE HEARD THE CONF SIGNAL FROM THE OLD MASTER. */
- /* WE SIMPLY IGNORE THE MESSAGE. */
- /*------------------------------------------------------------*/
- /*empty*/;
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- }//switch
-}//Dbtc::execTAKE_OVERTCCONF()
-
-void Dbtc::execTAKE_OVERTCREQ(Signal* signal)
-{
- jamEntry();
- tfailedNodeId = signal->theData[0];
- tcNodeFailptr.i = 0;
- ptrAss(tcNodeFailptr, tcFailRecord);
- if (tcNodeFailptr.p->failStatus != FS_IDLE) {
- jam();
- /*------------------------------------------------------------*/
- /* WE CAN CURRENTLY ONLY HANDLE ONE TAKE OVER AT A TIME */
- /*------------------------------------------------------------*/
- /* IF MORE THAN ONE TAKE OVER IS REQUESTED WE WILL */
- /* QUEUE THE TAKE OVER AND START IT AS SOON AS THE */
- /* PREVIOUS ARE COMPLETED. */
- /*------------------------------------------------------------*/
- arrGuard(tcNodeFailptr.p->queueIndex, MAX_NDB_NODES);
- tcNodeFailptr.p->queueList[tcNodeFailptr.p->queueIndex] = tfailedNodeId;
- tcNodeFailptr.p->queueIndex = tcNodeFailptr.p->queueIndex + 1;
- return;
- }//if
- startTakeOverLab(signal);
-}//Dbtc::execTAKE_OVERTCREQ()
-
-/*------------------------------------------------------------*/
-/* INITIALISE THE HASH TABLES FOR STORING TRANSACTIONS */
-/* AND OPERATIONS DURING TC TAKE OVER. */
-/*------------------------------------------------------------*/
-void Dbtc::startTakeOverLab(Signal* signal)
-{
- for (tindex = 0; tindex <= 511; tindex++) {
- ctransidFailHash[tindex] = RNIL;
- }//for
- for (tindex = 0; tindex <= 1023; tindex++) {
- ctcConnectFailHash[tindex] = RNIL;
- }//for
- tcNodeFailptr.p->failStatus = FS_LISTENING;
- tcNodeFailptr.p->takeOverNode = tfailedNodeId;
- for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
- jam();
- ptrAss(hostptr, hostRecord);
- if (hostptr.p->hostStatus == HS_ALIVE) {
- jam();
- tblockref = calcLqhBlockRef(hostptr.i);
- hostptr.p->lqhTransStatus = LTS_ACTIVE;
- signal->theData[0] = tcNodeFailptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = tfailedNodeId;
- sendSignal(tblockref, GSN_LQH_TRANSREQ, signal, 3, JBB);
- }//if
- }//for
-}//Dbtc::startTakeOverLab()
-
-/*------------------------------------------------------------*/
-/* A REPORT OF AN OPERATION WHERE TC FAILED HAS ARRIVED.*/
-/*------------------------------------------------------------*/
-void Dbtc::execLQH_TRANSCONF(Signal* signal)
-{
- jamEntry();
- LqhTransConf * const lqhTransConf = (LqhTransConf *)&signal->theData[0];
-
- tcNodeFailptr.i = lqhTransConf->tcRef;
- ptrCheckGuard(tcNodeFailptr, 1, tcFailRecord);
- tnodeid = lqhTransConf->lqhNodeId;
- ttransStatus = (LqhTransConf::OperationStatus)lqhTransConf->operationStatus;
- ttransid1 = lqhTransConf->transId1;
- ttransid2 = lqhTransConf->transId2;
- ttcOprec = lqhTransConf->oldTcOpRec;
- treqinfo = lqhTransConf->requestInfo;
- tgci = lqhTransConf->gci;
- cnodes[0] = lqhTransConf->nextNodeId1;
- cnodes[1] = lqhTransConf->nextNodeId2;
- cnodes[2] = lqhTransConf->nextNodeId3;
- const Uint32 ref = tapplRef = lqhTransConf->apiRef;
- tapplOprec = lqhTransConf->apiOpRec;
- const Uint32 tableId = lqhTransConf->tableId;
-
- if (ttransStatus == LqhTransConf::LastTransConf){
- jam();
- /*------------------------------------------------------------*/
- /* A NODE HAS REPORTED COMPLETION OF TAKE OVER REPORTING*/
- /*------------------------------------------------------------*/
- nodeTakeOverCompletedLab(signal);
- return;
- }//if
- if (ttransStatus == LqhTransConf::Marker){
- jam();
- treqinfo = 0;
- LqhTransConf::setMarkerFlag(treqinfo, 1);
- } else {
- TableRecordPtr tabPtr;
- tabPtr.i = tableId;
- ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
- switch((DictTabInfo::TableType)tabPtr.p->tableType){
- case DictTabInfo::SystemTable:
- case DictTabInfo::UserTable:
- break;
- default:
- tapplRef = 0;
- tapplOprec = 0;
- }
- }
-
- findApiConnectFail(signal);
-
- if(apiConnectptr.p->ndbapiBlockref == 0 && tapplRef != 0){
- apiConnectptr.p->ndbapiBlockref = ref;
- apiConnectptr.p->ndbapiConnect = tapplOprec;
- }
-
- if (ttransStatus != LqhTransConf::Marker){
- jam();
- findTcConnectFail(signal);
- }
-}//Dbtc::execLQH_TRANSCONF()
-
-/*------------------------------------------------------------*/
-/* A NODE HAS REPORTED COMPLETION OF TAKE OVER REPORTING*/
-/*------------------------------------------------------------*/
-void Dbtc::nodeTakeOverCompletedLab(Signal* signal)
-{
- Uint32 guard0;
-
- hostptr.i = tnodeid;
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
- hostptr.p->lqhTransStatus = LTS_IDLE;
- for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
- jam();
- ptrAss(hostptr, hostRecord);
- if (hostptr.p->hostStatus == HS_ALIVE) {
- if (hostptr.p->lqhTransStatus == LTS_ACTIVE) {
- jam();
- /*------------------------------------------------------------*/
- /* NOT ALL NODES ARE COMPLETED WITH REPORTING IN THE */
- /* TAKE OVER. */
- /*------------------------------------------------------------*/
- return;
- }//if
- }//if
- }//for
- /*------------------------------------------------------------*/
- /* ALL NODES HAVE REPORTED ON THE STATUS OF THE VARIOUS */
- /* OPERATIONS THAT WAS CONTROLLED BY THE FAILED TC. WE */
- /* ARE NOW IN A POSITION TO COMPLETE ALL OF THOSE */
- /* TRANSACTIONS EITHER IN A SUCCESSFUL WAY OR IN AN */
- /* UNSUCCESSFUL WAY. WE WILL ALSO REPORT THIS CONCLUSION*/
- /* TO THE APPLICATION IF THAT IS STILL ALIVE. */
- /*------------------------------------------------------------*/
- tcNodeFailptr.p->currentHashIndexTakeOver = 0;
- tcNodeFailptr.p->completedTakeOver = 0;
- tcNodeFailptr.p->failStatus = FS_COMPLETING;
- guard0 = cnoParallelTakeOver - 1;
- /*------------------------------------------------------------*/
- /* WE WILL COMPLETE THE TRANSACTIONS BY STARTING A */
- /* NUMBER OF PARALLEL ACTIVITIES. EACH ACTIVITY WILL */
- /* COMPLETE ONE TRANSACTION AT A TIME AND IN THAT */
- /* TRANSACTION IT WILL COMPLETE ONE OPERATION AT A TIME.*/
- /* WHEN ALL ACTIVITIES ARE COMPLETED THEN THE TAKE OVER */
- /* IS COMPLETED. */
- /*------------------------------------------------------------*/
- arrGuard(guard0, MAX_NDB_NODES);
- for (tindex = 0; tindex <= guard0; tindex++) {
- jam();
- tcNodeFailptr.p->takeOverProcState[tindex] = ZTAKE_OVER_ACTIVE;
- signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
- signal->theData[1] = tcNodeFailptr.i;
- signal->theData[2] = tindex;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
- }//for
-}//Dbtc::nodeTakeOverCompletedLab()
-
-/*------------------------------------------------------------*/
-/* COMPLETE A NEW TRANSACTION FROM THE HASH TABLE OF */
-/* TRANSACTIONS TO COMPLETE. */
-/*------------------------------------------------------------*/
-void Dbtc::completeTransAtTakeOverLab(Signal* signal, UintR TtakeOverInd)
-{
- jam();
- while (tcNodeFailptr.p->currentHashIndexTakeOver < 512){
- jam();
- apiConnectptr.i =
- ctransidFailHash[tcNodeFailptr.p->currentHashIndexTakeOver];
- if (apiConnectptr.i != RNIL) {
- jam();
- /*------------------------------------------------------------*/
- /* WE HAVE FOUND A TRANSACTION THAT NEEDS TO BE */
- /* COMPLETED. REMOVE IT FROM THE HASH TABLE SUCH THAT */
- /* NOT ANOTHER ACTIVITY ALSO TRIES TO COMPLETE THIS */
- /* TRANSACTION. */
- /*------------------------------------------------------------*/
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- ctransidFailHash[tcNodeFailptr.p->currentHashIndexTakeOver] =
- apiConnectptr.p->nextApiConnect;
-
- completeTransAtTakeOverDoOne(signal, TtakeOverInd);
- // One transaction taken care of, return from this function
- // and wait for the next CONTINUEB to continue processing
- break;
-
- } else {
- if (tcNodeFailptr.p->currentHashIndexTakeOver < 511){
- jam();
- tcNodeFailptr.p->currentHashIndexTakeOver++;
- } else {
- jam();
- completeTransAtTakeOverDoLast(signal, TtakeOverInd);
- tcNodeFailptr.p->currentHashIndexTakeOver++;
- }//if
- }//if
- }//while
-}//Dbtc::completeTransAtTakeOverLab()
-
-
-
-
-void Dbtc::completeTransAtTakeOverDoLast(Signal* signal, UintR TtakeOverInd)
-{
- Uint32 guard0;
- /*------------------------------------------------------------*/
- /* THERE ARE NO MORE TRANSACTIONS TO COMPLETE. THIS */
- /* ACTIVITY IS COMPLETED. */
- /*------------------------------------------------------------*/
- arrGuard(TtakeOverInd, MAX_NDB_NODES);
- if (tcNodeFailptr.p->takeOverProcState[TtakeOverInd] != ZTAKE_OVER_ACTIVE) {
- jam();
- systemErrorLab(signal);
- return;
- }//if
- tcNodeFailptr.p->takeOverProcState[TtakeOverInd] = ZTAKE_OVER_IDLE;
- tcNodeFailptr.p->completedTakeOver++;
-
- if (tcNodeFailptr.p->completedTakeOver == cnoParallelTakeOver) {
- jam();
- /*------------------------------------------------------------*/
- /* WE WERE THE LAST ACTIVITY THAT WAS COMPLETED. WE NEED*/
- /* TO REPORT THE COMPLETION OF THE TAKE OVER TO ALL */
- /* NODES THAT ARE ALIVE. */
- /*------------------------------------------------------------*/
- for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
- jam();
- ptrAss(hostptr, hostRecord);
- if (hostptr.p->hostStatus == HS_ALIVE) {
- jam();
- tblockref = calcTcBlockRef(hostptr.i);
- signal->theData[0] = tcNodeFailptr.p->takeOverNode;
- sendSignal(tblockref, GSN_TAKE_OVERTCCONF, signal, 1, JBB);
- }//if
- }//for
- if (tcNodeFailptr.p->queueIndex > 0) {
- jam();
- /*------------------------------------------------------------*/
- /* THERE ARE MORE NODES TO TAKE OVER. WE NEED TO START */
- /* THE TAKE OVER. */
- /*------------------------------------------------------------*/
- tfailedNodeId = tcNodeFailptr.p->queueList[0];
- guard0 = tcNodeFailptr.p->queueIndex - 1;
- arrGuard(guard0 + 1, MAX_NDB_NODES);
- for (tindex = 0; tindex <= guard0; tindex++) {
- jam();
- tcNodeFailptr.p->queueList[tindex] =
- tcNodeFailptr.p->queueList[tindex + 1];
- }//for
- tcNodeFailptr.p->queueIndex--;
- startTakeOverLab(signal);
- return;
- } else {
- jam();
- tcNodeFailptr.p->failStatus = FS_IDLE;
- }//if
- }//if
- return;
-}//Dbtc::completeTransAtTakeOverDoLast()
-
-void Dbtc::completeTransAtTakeOverDoOne(Signal* signal, UintR TtakeOverInd)
-{
- apiConnectptr.p->takeOverRec = (Uint8)tcNodeFailptr.i;
- apiConnectptr.p->takeOverInd = TtakeOverInd;
-
- switch (apiConnectptr.p->apiConnectstate) {
- case CS_FAIL_COMMITTED:
- jam();
- /*------------------------------------------------------------*/
- /* ALL PARTS OF THE TRANSACTIONS REPORTED COMMITTED. WE */
- /* HAVE THUS COMPLETED THE COMMIT PHASE. WE CAN REPORT */
- /* COMMITTED TO THE APPLICATION AND CONTINUE WITH THE */
- /* COMPLETE PHASE. */
- /*------------------------------------------------------------*/
- sendTCKEY_FAILCONF(signal, apiConnectptr.p);
- tcConnectptr.i = apiConnectptr.p->firstTcConnect;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- apiConnectptr.p->currentTcConnect = tcConnectptr.i;
- apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
- tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
- toCompleteHandlingLab(signal);
- return;
- case CS_FAIL_COMMITTING:
- jam();
- /*------------------------------------------------------------*/
- /* AT LEAST ONE PART WAS ONLY PREPARED AND AT LEAST ONE */
- /* PART WAS COMMITTED. COMPLETE THE COMMIT PHASE FIRST. */
- /* THEN CONTINUE AS AFTER COMMITTED. */
- /*------------------------------------------------------------*/
- tcConnectptr.i = apiConnectptr.p->firstTcConnect;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- apiConnectptr.p->currentTcConnect = tcConnectptr.i;
- apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
- tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
- toCommitHandlingLab(signal);
- return;
- case CS_FAIL_ABORTING:
- case CS_FAIL_PREPARED:
- jam();
- /*------------------------------------------------------------*/
- /* WE WILL ABORT THE TRANSACTION IF IT IS IN A PREPARED */
- /* STATE IN THIS VERSION. IN LATER VERSIONS WE WILL */
- /* HAVE TO ADD CODE FOR HANDLING OF PREPARED-TO-COMMIT */
- /* TRANSACTIONS. THESE ARE NOT ALLOWED TO ABORT UNTIL WE*/
- /* HAVE HEARD FROM THE TRANSACTION COORDINATOR. */
- /* */
- /* IT IS POSSIBLE TO COMMIT TRANSACTIONS THAT ARE */
- /* PREPARED ACTUALLY. WE WILL LEAVE THIS PROBLEM UNTIL */
- /* LATER VERSIONS. */
- /*------------------------------------------------------------*/
- tcConnectptr.i = apiConnectptr.p->firstTcConnect;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- apiConnectptr.p->currentTcConnect = tcConnectptr.i;
- apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
- tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
- toAbortHandlingLab(signal);
- return;
- case CS_FAIL_ABORTED:
- jam();
- sendTCKEY_FAILREF(signal, apiConnectptr.p);
-
- signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
- signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec;
- signal->theData[2] = apiConnectptr.p->takeOverInd;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
- releaseTakeOver(signal);
- break;
- case CS_FAIL_COMPLETED:
- jam();
- sendTCKEY_FAILCONF(signal, apiConnectptr.p);
-
- signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
- signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec;
- signal->theData[2] = apiConnectptr.p->takeOverInd;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
- releaseApiConnectFail(signal);
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- }//switch
-}//Dbtc::completeTransAtTakeOverDoOne()
-
-void
-Dbtc::sendTCKEY_FAILREF(Signal* signal, const ApiConnectRecord * regApiPtr){
- jam();
-
- const Uint32 ref = regApiPtr->ndbapiBlockref;
- if(ref != 0){
- signal->theData[0] = regApiPtr->ndbapiConnect;
- signal->theData[1] = regApiPtr->transid[0];
- signal->theData[2] = regApiPtr->transid[1];
-
- sendSignal(ref, GSN_TCKEY_FAILREF, signal, 3, JBB);
- }
-}
-
-void
-Dbtc::sendTCKEY_FAILCONF(Signal* signal, ApiConnectRecord * regApiPtr){
- jam();
- TcKeyFailConf * const failConf = (TcKeyFailConf *)&signal->theData[0];
-
- const Uint32 ref = regApiPtr->ndbapiBlockref;
- const Uint32 marker = regApiPtr->commitAckMarker;
- if(ref != 0){
- failConf->apiConnectPtr = regApiPtr->ndbapiConnect | (marker != RNIL);
- failConf->transId1 = regApiPtr->transid[0];
- failConf->transId2 = regApiPtr->transid[1];
-
- sendSignal(regApiPtr->ndbapiBlockref,
- GSN_TCKEY_FAILCONF, signal, TcKeyFailConf::SignalLength, JBB);
- }
- regApiPtr->commitAckMarker = RNIL;
-}
-
-/*------------------------------------------------------------*/
-/* THIS PART HANDLES THE ABORT PHASE IN THE CASE OF A */
-/* NODE FAILURE BEFORE THE COMMIT DECISION. */
-/*------------------------------------------------------------*/
-/* ABORT REQUEST SUCCESSFULLY COMPLETED ON TNODEID */
-/*------------------------------------------------------------*/
-void Dbtc::execABORTCONF(Signal* signal)
-{
- UintR compare_transid1, compare_transid2;
-
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- tnodeid = signal->theData[2];
- if (ERROR_INSERTED(8045)) {
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_ABORTCONF, signal, 2000, 5);
- return;
- }//if
- if (tcConnectptr.i >= ctcConnectFilesize) {
- errorReport(signal, 5);
- return;
- }//if
- ptrAss(tcConnectptr, tcConnectRecord);
- if (tcConnectptr.p->tcConnectstate != OS_WAIT_ABORT_CONF) {
- warningReport(signal, 16);
- return;
- }//if
- apiConnectptr.i = tcConnectptr.p->apiConnect;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- if (apiConnectptr.p->apiConnectstate != CS_WAIT_ABORT_CONF) {
- warningReport(signal, 17);
- return;
- }//if
- compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[3];
- compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[4];
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
- warningReport(signal, 18);
- return;
- }//if
- arrGuard(apiConnectptr.p->currentReplicaNo, 4);
- if (tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo] !=
- tnodeid) {
- warningReport(signal, 19);
- return;
- }//if
- tcurrentReplicaNo = (Uint8)Z8NIL;
- tcConnectptr.p->tcConnectstate = OS_ABORTING;
- toAbortHandlingLab(signal);
-}//Dbtc::execABORTCONF()
-
-void Dbtc::toAbortHandlingLab(Signal* signal)
-{
- do {
- if (tcurrentReplicaNo != (Uint8)Z8NIL) {
- jam();
- arrGuard(tcurrentReplicaNo, 4);
- const LqhTransConf::OperationStatus stat =
- (LqhTransConf::OperationStatus)
- tcConnectptr.p->failData[tcurrentReplicaNo];
- switch(stat){
- case LqhTransConf::InvalidStatus:
- case LqhTransConf::Aborted:
- jam();
- /*empty*/;
- break;
- case LqhTransConf::Prepared:
- jam();
- hostptr.i = tcConnectptr.p->tcNodedata[tcurrentReplicaNo];
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
- if (hostptr.p->hostStatus == HS_ALIVE) {
- jam();
- tblockref = calcLqhBlockRef(hostptr.i);
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- tcConnectptr.p->tcConnectstate = OS_WAIT_ABORT_CONF;
- apiConnectptr.p->apiConnectstate = CS_WAIT_ABORT_CONF;
- apiConnectptr.p->timeOutCounter = 0;
- signal->theData[0] = tcConnectptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = apiConnectptr.p->transid[0];
- signal->theData[3] = apiConnectptr.p->transid[1];
- signal->theData[4] = apiConnectptr.p->tcBlockref;
- signal->theData[5] = tcConnectptr.p->tcOprec;
- sendSignal(tblockref, GSN_ABORTREQ, signal, 6, JBB);
- return;
- }//if
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- }//switch
- }//if
- if (apiConnectptr.p->currentReplicaNo > 0) {
- jam();
- /*------------------------------------------------------------*/
- /* THERE IS STILL ANOTHER REPLICA THAT NEEDS TO BE */
- /* ABORTED. */
- /*------------------------------------------------------------*/
- apiConnectptr.p->currentReplicaNo--;
- tcurrentReplicaNo = apiConnectptr.p->currentReplicaNo;
- } else {
- /*------------------------------------------------------------*/
- /* THE LAST REPLICA IN THIS OPERATION HAVE COMMITTED. */
- /*------------------------------------------------------------*/
- tcConnectptr.i = tcConnectptr.p->nextTcConnect;
- if (tcConnectptr.i == RNIL) {
- /*------------------------------------------------------------*/
- /* WE HAVE COMPLETED THE ABORT PHASE. WE CAN NOW REPORT */
- /* THE ABORT STATUS TO THE APPLICATION AND CONTINUE */
- /* WITH THE NEXT TRANSACTION. */
- /*------------------------------------------------------------*/
- if (apiConnectptr.p->takeOverRec != (Uint8)Z8NIL) {
- jam();
- sendTCKEY_FAILREF(signal, apiConnectptr.p);
- const Uint32 marker = apiConnectptr.p->commitAckMarker;
- if(marker != RNIL){
- jam();
-
- CommitAckMarkerPtr tmp;
- tmp.i = marker;
- tmp.p = m_commitAckMarkerHash.getPtr(tmp.i);
-
- m_commitAckMarkerHash.release(tmp);
- apiConnectptr.p->commitAckMarker = RNIL;
- }
-
- /*------------------------------------------------------------*/
- /* WE HAVE COMPLETED THIS TRANSACTION NOW AND CAN */
- /* CONTINUE THE PROCESS WITH THE NEXT TRANSACTION. */
- /*------------------------------------------------------------*/
- signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
- signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec;
- signal->theData[2] = apiConnectptr.p->takeOverInd;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
- releaseTakeOver(signal);
- } else {
- jam();
- releaseAbortResources(signal);
- }//if
- return;
- }//if
- apiConnectptr.p->currentTcConnect = tcConnectptr.i;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
- tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
- }//if
- } while (1);
-}//Dbtc::toAbortHandlingLab()
-
-/*------------------------------------------------------------*/
-/* THIS PART HANDLES THE COMMIT PHASE IN THE CASE OF A */
-/* NODE FAILURE IN THE MIDDLE OF THE COMMIT PHASE. */
-/*------------------------------------------------------------*/
-/* COMMIT REQUEST SUCCESSFULLY COMPLETED ON TNODEID */
-/*------------------------------------------------------------*/
-void Dbtc::execCOMMITCONF(Signal* signal)
-{
- UintR compare_transid1, compare_transid2;
-
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- tnodeid = signal->theData[1];
- if (ERROR_INSERTED(8046)) {
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_COMMITCONF, signal, 2000, 4);
- return;
- }//if
- if (tcConnectptr.i >= ctcConnectFilesize) {
- errorReport(signal, 4);
- return;
- }//if
- ptrAss(tcConnectptr, tcConnectRecord);
- if (tcConnectptr.p->tcConnectstate != OS_WAIT_COMMIT_CONF) {
- warningReport(signal, 8);
- return;
- }//if
- apiConnectptr.i = tcConnectptr.p->apiConnect;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- if (apiConnectptr.p->apiConnectstate != CS_WAIT_COMMIT_CONF) {
- warningReport(signal, 9);
- return;
- }//if
- compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[2];
- compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[3];
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
- warningReport(signal, 10);
- return;
- }//if
- arrGuard(apiConnectptr.p->currentReplicaNo, 4);
- if (tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo] !=
- tnodeid) {
- warningReport(signal, 11);
- return;
- }//if
- if (ERROR_INSERTED(8026)) {
- jam();
- systemErrorLab(signal);
- }//if
- tcurrentReplicaNo = (Uint8)Z8NIL;
- tcConnectptr.p->tcConnectstate = OS_COMMITTED;
- toCommitHandlingLab(signal);
-}//Dbtc::execCOMMITCONF()
-
-void Dbtc::toCommitHandlingLab(Signal* signal)
-{
- do {
- if (tcurrentReplicaNo != (Uint8)Z8NIL) {
- jam();
- arrGuard(tcurrentReplicaNo, 4);
- switch (tcConnectptr.p->failData[tcurrentReplicaNo]) {
- case LqhTransConf::InvalidStatus:
- jam();
- /*empty*/;
- break;
- case LqhTransConf::Committed:
- jam();
- /*empty*/;
- break;
- case LqhTransConf::Prepared:
- jam();
- /*------------------------------------------------------------*/
- /* THE NODE WAS PREPARED AND IS WAITING FOR ABORT OR */
- /* COMMIT REQUEST FROM TC. */
- /*------------------------------------------------------------*/
- hostptr.i = tcConnectptr.p->tcNodedata[tcurrentReplicaNo];
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
- if (hostptr.p->hostStatus == HS_ALIVE) {
- jam();
- tblockref = calcLqhBlockRef(hostptr.i);
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- apiConnectptr.p->apiConnectstate = CS_WAIT_COMMIT_CONF;
- apiConnectptr.p->timeOutCounter = 0;
- tcConnectptr.p->tcConnectstate = OS_WAIT_COMMIT_CONF;
- signal->theData[0] = tcConnectptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = apiConnectptr.p->globalcheckpointid;
- signal->theData[3] = apiConnectptr.p->transid[0];
- signal->theData[4] = apiConnectptr.p->transid[1];
- signal->theData[5] = apiConnectptr.p->tcBlockref;
- signal->theData[6] = tcConnectptr.p->tcOprec;
- sendSignal(tblockref, GSN_COMMITREQ, signal, 7, JBB);
- return;
- }//if
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- break;
- }//switch
- }//if
- if (apiConnectptr.p->currentReplicaNo > 0) {
- jam();
- /*------------------------------------------------------------*/
- /* THERE IS STILL ANOTHER REPLICA THAT NEEDS TO BE */
- /* COMMITTED. */
- /*------------------------------------------------------------*/
- apiConnectptr.p->currentReplicaNo--;
- tcurrentReplicaNo = apiConnectptr.p->currentReplicaNo;
- } else {
- /*------------------------------------------------------------*/
- /* THE LAST REPLICA IN THIS OPERATION HAVE COMMITTED. */
- /*------------------------------------------------------------*/
- tcConnectptr.i = tcConnectptr.p->nextTcConnect;
- if (tcConnectptr.i == RNIL) {
- /*------------------------------------------------------------*/
- /* WE HAVE COMPLETED THE COMMIT PHASE. WE CAN NOW REPORT*/
- /* THE COMMIT STATUS TO THE APPLICATION AND CONTINUE */
- /* WITH THE COMPLETE PHASE. */
- /*------------------------------------------------------------*/
- if (apiConnectptr.p->takeOverRec != (Uint8)Z8NIL) {
- jam();
- sendTCKEY_FAILCONF(signal, apiConnectptr.p);
- } else {
- jam();
- sendApiCommit(signal);
- }//if
- apiConnectptr.p->currentTcConnect = apiConnectptr.p->firstTcConnect;
- tcConnectptr.i = apiConnectptr.p->firstTcConnect;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
- apiConnectptr.p->currentReplicaNo = tcurrentReplicaNo;
- toCompleteHandlingLab(signal);
- return;
- }//if
- apiConnectptr.p->currentTcConnect = tcConnectptr.i;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
- tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
- }//if
- } while (1);
-}//Dbtc::toCommitHandlingLab()
-
-/*------------------------------------------------------------*/
-/* COMMON PART TO HANDLE COMPLETE PHASE WHEN ANY NODE */
-/* HAVE FAILED. */
-/*------------------------------------------------------------*/
-/* THE NODE WITH TNODEID HAVE COMPLETED THE OPERATION */
-/*------------------------------------------------------------*/
-void Dbtc::execCOMPLETECONF(Signal* signal)
-{
- UintR compare_transid1, compare_transid2;
-
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- tnodeid = signal->theData[1];
- if (ERROR_INSERTED(8047)) {
- CLEAR_ERROR_INSERT_VALUE;
- sendSignalWithDelay(cownref, GSN_COMPLETECONF, signal, 2000, 4);
- return;
- }//if
- if (tcConnectptr.i >= ctcConnectFilesize) {
- errorReport(signal, 3);
- return;
- }//if
- ptrAss(tcConnectptr, tcConnectRecord);
- if (tcConnectptr.p->tcConnectstate != OS_WAIT_COMPLETE_CONF) {
- warningReport(signal, 12);
- return;
- }//if
- apiConnectptr.i = tcConnectptr.p->apiConnect;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- if (apiConnectptr.p->apiConnectstate != CS_WAIT_COMPLETE_CONF) {
- warningReport(signal, 13);
- return;
- }//if
- compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[2];
- compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[3];
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
- warningReport(signal, 14);
- return;
- }//if
- arrGuard(apiConnectptr.p->currentReplicaNo, 4);
- if (tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo] !=
- tnodeid) {
- warningReport(signal, 15);
- return;
- }//if
- if (ERROR_INSERTED(8028)) {
- jam();
- systemErrorLab(signal);
- }//if
- tcConnectptr.p->tcConnectstate = OS_COMPLETED;
- tcurrentReplicaNo = (Uint8)Z8NIL;
- toCompleteHandlingLab(signal);
-}//Dbtc::execCOMPLETECONF()
-
-void Dbtc::toCompleteHandlingLab(Signal* signal)
-{
- do {
- if (tcurrentReplicaNo != (Uint8)Z8NIL) {
- jam();
- arrGuard(tcurrentReplicaNo, 4);
- switch (tcConnectptr.p->failData[tcurrentReplicaNo]) {
- case LqhTransConf::InvalidStatus:
- jam();
- /*empty*/;
- break;
- default:
- jam();
- /*------------------------------------------------------------*/
- /* THIS NODE DID NOT REPORT ANYTHING FOR THIS OPERATION */
- /* IT MUST HAVE FAILED. */
- /*------------------------------------------------------------*/
- /*------------------------------------------------------------*/
- /* SEND COMPLETEREQ TO THE NEXT REPLICA. */
- /*------------------------------------------------------------*/
- hostptr.i = tcConnectptr.p->tcNodedata[tcurrentReplicaNo];
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
- if (hostptr.p->hostStatus == HS_ALIVE) {
- jam();
- tblockref = calcLqhBlockRef(hostptr.i);
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- tcConnectptr.p->tcConnectstate = OS_WAIT_COMPLETE_CONF;
- apiConnectptr.p->apiConnectstate = CS_WAIT_COMPLETE_CONF;
- apiConnectptr.p->timeOutCounter = 0;
- tcConnectptr.p->apiConnect = apiConnectptr.i;
- signal->theData[0] = tcConnectptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = apiConnectptr.p->transid[0];
- signal->theData[3] = apiConnectptr.p->transid[1];
- signal->theData[4] = apiConnectptr.p->tcBlockref;
- signal->theData[5] = tcConnectptr.p->tcOprec;
- sendSignal(tblockref, GSN_COMPLETEREQ, signal, 6, JBB);
- return;
- }//if
- break;
- }//switch
- }//if
- if (apiConnectptr.p->currentReplicaNo != 0) {
- jam();
- /*------------------------------------------------------------*/
- /* THERE ARE STILL MORE REPLICAS IN THIS OPERATION. WE */
- /* NEED TO CONTINUE WITH THOSE REPLICAS. */
- /*------------------------------------------------------------*/
- apiConnectptr.p->currentReplicaNo--;
- tcurrentReplicaNo = apiConnectptr.p->currentReplicaNo;
- } else {
- tcConnectptr.i = tcConnectptr.p->nextTcConnect;
- if (tcConnectptr.i == RNIL) {
- /*------------------------------------------------------------*/
- /* WE HAVE COMPLETED THIS TRANSACTION NOW AND CAN */
- /* CONTINUE THE PROCESS WITH THE NEXT TRANSACTION. */
- /*------------------------------------------------------------*/
- if (apiConnectptr.p->takeOverRec != (Uint8)Z8NIL) {
- jam();
- signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
- signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec;
- signal->theData[2] = apiConnectptr.p->takeOverInd;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
- releaseTakeOver(signal);
- } else {
- jam();
- releaseTransResources(signal);
- }//if
- return;
- }//if
- /*------------------------------------------------------------*/
- /* WE HAVE COMPLETED AN OPERATION AND THERE ARE MORE TO */
- /* COMPLETE. TAKE THE NEXT OPERATION AND START WITH THE */
- /* FIRST REPLICA SINCE IT IS THE COMPLETE PHASE. */
- /*------------------------------------------------------------*/
- apiConnectptr.p->currentTcConnect = tcConnectptr.i;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
- apiConnectptr.p->currentReplicaNo = tcurrentReplicaNo;
- }//if
- } while (1);
-}//Dbtc::toCompleteHandlingLab()
-
-/*------------------------------------------------------------*/
-/* */
-/* FIND THE API CONNECT RECORD FOR THIS TRANSACTION */
-/* DURING TAKE OVER FROM A FAILED TC. IF NONE EXISTS */
-/* YET THEN SEIZE A NEW API CONNECT RECORD AND LINK IT */
-/* INTO THE HASH TABLE. */
-/*------------------------------------------------------------*/
-void Dbtc::findApiConnectFail(Signal* signal)
-{
- ApiConnectRecordPtr fafPrevApiConnectptr;
- ApiConnectRecordPtr fafNextApiConnectptr;
- UintR tfafHashNumber;
-
- tfafHashNumber = ttransid1 & 511;
- fafPrevApiConnectptr.i = RNIL;
- ptrNull(fafPrevApiConnectptr);
- arrGuard(tfafHashNumber, 512);
- fafNextApiConnectptr.i = ctransidFailHash[tfafHashNumber];
- ptrCheck(fafNextApiConnectptr, capiConnectFilesize, apiConnectRecord);
-FAF_LOOP:
- jam();
- if (fafNextApiConnectptr.i == RNIL) {
- jam();
- if (cfirstfreeApiConnectFail == RNIL) {
- jam();
- systemErrorLab(signal);
- return;
- }//if
- seizeApiConnectFail(signal);
- if (fafPrevApiConnectptr.i == RNIL) {
- jam();
- ctransidFailHash[tfafHashNumber] = apiConnectptr.i;
- } else {
- jam();
- ptrGuard(fafPrevApiConnectptr);
- fafPrevApiConnectptr.p->nextApiConnect = apiConnectptr.i;
- }//if
- apiConnectptr.p->nextApiConnect = RNIL;
- initApiConnectFail(signal);
- } else {
- jam();
- fafPrevApiConnectptr.i = fafNextApiConnectptr.i;
- fafPrevApiConnectptr.p = fafNextApiConnectptr.p;
- apiConnectptr.i = fafNextApiConnectptr.i;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- fafNextApiConnectptr.i = apiConnectptr.p->nextApiConnect;
- ptrCheck(fafNextApiConnectptr, capiConnectFilesize, apiConnectRecord);
- if ((apiConnectptr.p->transid[1] != ttransid2) ||
- (apiConnectptr.p->transid[0] != ttransid1)) {
- goto FAF_LOOP;
- }//if
- updateApiStateFail(signal);
- }//if
-}//Dbtc::findApiConnectFail()
-
-/*----------------------------------------------------------*/
-/* FIND THE TC CONNECT AND IF NOT FOUND ALLOCATE A NEW */
-/*----------------------------------------------------------*/
-void Dbtc::findTcConnectFail(Signal* signal)
-{
- UintR tftfHashNumber;
-
- tftfHashNumber = (ttransid1 ^ ttcOprec) & 1023;
- tcConnectptr.i = ctcConnectFailHash[tftfHashNumber];
- do {
- if (tcConnectptr.i == RNIL) {
- jam();
- if (cfirstfreeTcConnectFail == RNIL) {
- jam();
- systemErrorLab(signal);
- return;
- }//if
- seizeTcConnectFail(signal);
- linkTcInConnectionlist(signal);
- tcConnectptr.p->nextTcFailHash = ctcConnectFailHash[tftfHashNumber];
- ctcConnectFailHash[tftfHashNumber] = tcConnectptr.i;
- initTcConnectFail(signal);
- return;
- } else {
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- if (tcConnectptr.p->tcOprec != ttcOprec) {
- jam(); /* FRAGMENTID = TC_OPREC HERE, LOOP ANOTHER TURN */
- tcConnectptr.i = tcConnectptr.p->nextTcFailHash;
- } else {
- updateTcStateFail(signal);
- return;
- }//if
- }//if
- } while (1);
-}//Dbtc::findTcConnectFail()
-
-/*----------------------------------------------------------*/
-/* INITIALISE AN API CONNECT FAIL RECORD */
-/*----------------------------------------------------------*/
-void Dbtc::initApiConnectFail(Signal* signal)
-{
- apiConnectptr.p->transid[0] = ttransid1;
- apiConnectptr.p->transid[1] = ttransid2;
- apiConnectptr.p->firstTcConnect = RNIL;
- apiConnectptr.p->currSavePointId = 0;
- apiConnectptr.p->lastTcConnect = RNIL;
- tblockref = calcTcBlockRef(tcNodeFailptr.p->takeOverNode);
-
- apiConnectptr.p->tcBlockref = tblockref;
- apiConnectptr.p->ndbapiBlockref = 0;
- apiConnectptr.p->ndbapiConnect = 0;
- apiConnectptr.p->buddyPtr = RNIL;
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
- switch(ttransStatus){
- case LqhTransConf::Committed:
- jam();
- apiConnectptr.p->globalcheckpointid = tgci;
- apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTED;
- break;
- case LqhTransConf::Prepared:
- jam();
- apiConnectptr.p->apiConnectstate = CS_FAIL_PREPARED;
- break;
- case LqhTransConf::Aborted:
- jam();
- apiConnectptr.p->apiConnectstate = CS_FAIL_ABORTED;
- break;
- case LqhTransConf::Marker:
- jam();
- apiConnectptr.p->apiConnectstate = CS_FAIL_COMPLETED;
- break;
- default:
- jam();
- systemErrorLab(signal);
- }//if
- apiConnectptr.p->commitAckMarker = RNIL;
- if(LqhTransConf::getMarkerFlag(treqinfo)){
- jam();
- CommitAckMarkerPtr tmp;
- m_commitAckMarkerHash.seize(tmp);
-
- ndbrequire(tmp.i != RNIL);
-
- apiConnectptr.p->commitAckMarker = tmp.i;
- tmp.p->transid1 = ttransid1;
- tmp.p->transid2 = ttransid2;
- tmp.p->apiNodeId = refToNode(tapplRef);
- tmp.p->noOfLqhs = 1;
- tmp.p->lqhNodeId[0] = tnodeid;
- tmp.p->apiConnectPtr = apiConnectptr.i;
- m_commitAckMarkerHash.add(tmp);
- }
-}//Dbtc::initApiConnectFail()
-
-/*------------------------------------------------------------*/
-/* INITIALISE AT TC CONNECT AT TAKE OVER WHEN ALLOCATING*/
-/* THE TC CONNECT RECORD. */
-/*------------------------------------------------------------*/
-void Dbtc::initTcConnectFail(Signal* signal)
-{
- tcConnectptr.p->apiConnect = apiConnectptr.i;
- tcConnectptr.p->tcOprec = ttcOprec;
- Uint32 treplicaNo = LqhTransConf::getReplicaNo(treqinfo);
- for (Uint32 i = 0; i < MAX_REPLICAS; i++) {
- tcConnectptr.p->failData[i] = LqhTransConf::InvalidStatus;
- }//for
- tcConnectptr.p->tcNodedata[treplicaNo] = tnodeid;
- tcConnectptr.p->failData[treplicaNo] = ttransStatus;
- tcConnectptr.p->lastReplicaNo = LqhTransConf::getLastReplicaNo(treqinfo);
- tcConnectptr.p->dirtyOp = LqhTransConf::getDirtyFlag(treqinfo);
-
-}//Dbtc::initTcConnectFail()
-
-/*----------------------------------------------------------*/
-/* INITIALISE TC NODE FAIL RECORD. */
-/*----------------------------------------------------------*/
-void Dbtc::initTcFail(Signal* signal)
-{
- tcNodeFailptr.i = 0;
- ptrAss(tcNodeFailptr, tcFailRecord);
- tcNodeFailptr.p->queueIndex = 0;
- tcNodeFailptr.p->failStatus = FS_IDLE;
-}//Dbtc::initTcFail()
-
-/*----------------------------------------------------------*/
-/* RELEASE_TAKE_OVER */
-/*----------------------------------------------------------*/
-void Dbtc::releaseTakeOver(Signal* signal)
-{
- TcConnectRecordPtr rtoNextTcConnectptr;
-
- rtoNextTcConnectptr.i = apiConnectptr.p->firstTcConnect;
- do {
- jam();
- tcConnectptr.i = rtoNextTcConnectptr.i;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- rtoNextTcConnectptr.i = tcConnectptr.p->nextTcConnect;
- releaseTcConnectFail(signal);
- } while (rtoNextTcConnectptr.i != RNIL);
- releaseApiConnectFail(signal);
-}//Dbtc::releaseTakeOver()
-
-/*---------------------------------------------------------------------------*/
-/* SETUP_FAIL_DATA */
-/* SETUP DATA TO REUSE TAKE OVER CODE FOR HANDLING ABORT/COMMIT IN NODE */
-/* FAILURE SITUATIONS. */
-/*---------------------------------------------------------------------------*/
-void Dbtc::setupFailData(Signal* signal)
-{
- tcConnectptr.i = apiConnectptr.p->firstTcConnect;
- do {
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- switch (tcConnectptr.p->tcConnectstate) {
- case OS_PREPARED:
- case OS_COMMITTING:
- jam();
- for (tindex = 0; tindex <= tcConnectptr.p->lastReplicaNo; tindex++) {
- jam();
- /*-------------------------------------------------------------------
- * KEYDATA IS USED TO KEEP AN INDICATION OF STATE IN LQH.
- * IN THIS CASE ALL LQH'S ARE PREPARED AND WAITING FOR
- * COMMIT/ABORT DECISION.
- *------------------------------------------------------------------*/
- arrGuard(tindex, 4);
- tcConnectptr.p->failData[tindex] = LqhTransConf::Prepared;
- }//for
- break;
- case OS_COMMITTED:
- case OS_COMPLETING:
- jam();
- for (tindex = 0; tindex <= tcConnectptr.p->lastReplicaNo; tindex++) {
- jam();
- /*-------------------------------------------------------------------
- * KEYDATA IS USED TO KEEP AN INDICATION OF STATE IN LQH.
- * IN THIS CASE ALL LQH'S ARE COMMITTED AND WAITING FOR
- * COMPLETE MESSAGE.
- *------------------------------------------------------------------*/
- arrGuard(tindex, 4);
- tcConnectptr.p->failData[tindex] = LqhTransConf::Committed;
- }//for
- break;
- case OS_COMPLETED:
- jam();
- for (tindex = 0; tindex <= tcConnectptr.p->lastReplicaNo; tindex++) {
- jam();
- /*-------------------------------------------------------------------
- * KEYDATA IS USED TO KEEP AN INDICATION OF STATE IN LQH.
- * IN THIS CASE ALL LQH'S ARE COMPLETED.
- *-------------------------------------------------------------------*/
- arrGuard(tindex, 4);
- tcConnectptr.p->failData[tindex] = LqhTransConf::InvalidStatus;
- }//for
- break;
- default:
- jam();
- sendSystemError(signal);
- break;
- }//switch
- if (tabortInd != ZCOMMIT_SETUP) {
- jam();
- for (UintR Ti = 0; Ti <= tcConnectptr.p->lastReplicaNo; Ti++) {
- hostptr.i = tcConnectptr.p->tcNodedata[Ti];
- ptrCheckGuard(hostptr, chostFilesize, hostRecord);
- if (hostptr.p->hostStatus != HS_ALIVE) {
- jam();
- /*-----------------------------------------------------------------
- * FAILURE OF ANY INVOLVED NODE ALWAYS INVOKES AN ABORT DECISION.
- *-----------------------------------------------------------------*/
- tabortInd = ZTRUE;
- }//if
- }//for
- }//if
- tcConnectptr.p->tcConnectstate = OS_TAKE_OVER;
- tcConnectptr.p->tcOprec = tcConnectptr.i;
- tcConnectptr.i = tcConnectptr.p->nextTcConnect;
- } while (tcConnectptr.i != RNIL);
- apiConnectptr.p->tcBlockref = cownref;
- apiConnectptr.p->currentTcConnect = apiConnectptr.p->firstTcConnect;
- tcConnectptr.i = apiConnectptr.p->firstTcConnect;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
- tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
-}//Dbtc::setupFailData()
-
-/*----------------------------------------------------------*/
-/* UPDATE THE STATE OF THE API CONNECT FOR THIS PART. */
-/*----------------------------------------------------------*/
-void Dbtc::updateApiStateFail(Signal* signal)
-{
- if(LqhTransConf::getMarkerFlag(treqinfo)){
- jam();
- const Uint32 marker = apiConnectptr.p->commitAckMarker;
- if(marker == RNIL){
- jam();
-
- CommitAckMarkerPtr tmp;
- m_commitAckMarkerHash.seize(tmp);
- ndbrequire(tmp.i != RNIL);
-
- apiConnectptr.p->commitAckMarker = tmp.i;
- tmp.p->transid1 = ttransid1;
- tmp.p->transid2 = ttransid2;
- tmp.p->apiNodeId = refToNode(tapplRef);
- tmp.p->noOfLqhs = 1;
- tmp.p->lqhNodeId[0] = tnodeid;
- tmp.p->apiConnectPtr = apiConnectptr.i;
- m_commitAckMarkerHash.add(tmp);
- } else {
- jam();
-
- CommitAckMarkerPtr tmp;
- tmp.i = marker;
- tmp.p = m_commitAckMarkerHash.getPtr(marker);
-
- const Uint32 noOfLqhs = tmp.p->noOfLqhs;
- ndbrequire(noOfLqhs < MAX_REPLICAS);
- tmp.p->lqhNodeId[noOfLqhs] = tnodeid;
- tmp.p->noOfLqhs = (noOfLqhs + 1);
- }
- }
-
- switch (ttransStatus) {
- case LqhTransConf::Committed:
- jam();
- switch (apiConnectptr.p->apiConnectstate) {
- case CS_FAIL_COMMITTING:
- case CS_FAIL_COMMITTED:
- jam();
- ndbrequire(tgci == apiConnectptr.p->globalcheckpointid);
- break;
- case CS_FAIL_PREPARED:
- jam();
- apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTING;
- apiConnectptr.p->globalcheckpointid = tgci;
- break;
- case CS_FAIL_COMPLETED:
- jam();
- apiConnectptr.p->globalcheckpointid = tgci;
- apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTED;
- break;
- default:
- jam();
- systemErrorLab(signal);
- break;
- }//switch
- break;
- case LqhTransConf::Prepared:
- jam();
- switch (apiConnectptr.p->apiConnectstate) {
- case CS_FAIL_COMMITTED:
- jam();
- apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTING;
- break;
- case CS_FAIL_ABORTED:
- jam();
- apiConnectptr.p->apiConnectstate = CS_FAIL_ABORTING;
- break;
- case CS_FAIL_COMMITTING:
- case CS_FAIL_PREPARED:
- case CS_FAIL_ABORTING:
- jam();
- /*empty*/;
- break;
- default:
- jam();
- systemErrorLab(signal);
- break;
- }//switch
- break;
- case LqhTransConf::Aborted:
- jam();
- switch (apiConnectptr.p->apiConnectstate) {
- case CS_FAIL_COMMITTING:
- case CS_FAIL_COMMITTED:
- jam();
- systemErrorLab(signal);
- break;
- case CS_FAIL_PREPARED:
- jam();
- apiConnectptr.p->apiConnectstate = CS_FAIL_ABORTING;
- break;
- case CS_FAIL_ABORTING:
- case CS_FAIL_ABORTED:
- jam();
- /*empty*/;
- break;
- default:
- jam();
- systemErrorLab(signal);
- break;
- }//switch
- break;
- case LqhTransConf::Marker:
- jam();
- break;
- default:
- jam();
- systemErrorLab(signal);
- break;
- }//switch
-}//Dbtc::updateApiStateFail()
-
-/*------------------------------------------------------------*/
-/* UPDATE_TC_STATE_FAIL */
-/* */
-/* WE NEED TO UPDATE THE STATUS OF TC_CONNECT RECORD AND*/
-/* WE ALSO NEED TO CHECK THAT THERE IS CONSISTENCY */
-/* BETWEEN THE DIFFERENT REPLICAS. */
-/*------------------------------------------------------------*/
-void Dbtc::updateTcStateFail(Signal* signal)
-{
- const Uint8 treplicaNo = LqhTransConf::getReplicaNo(treqinfo);
- const Uint8 tlastReplicaNo = LqhTransConf::getLastReplicaNo(treqinfo);
- const Uint8 tdirtyOp = LqhTransConf::getDirtyFlag(treqinfo);
-
- TcConnectRecord * regTcPtr = tcConnectptr.p;
-
- ndbrequire(regTcPtr->apiConnect == apiConnectptr.i);
- ndbrequire(regTcPtr->failData[treplicaNo] == LqhTransConf::InvalidStatus);
- ndbrequire(regTcPtr->lastReplicaNo == tlastReplicaNo);
- ndbrequire(regTcPtr->dirtyOp == tdirtyOp);
-
- regTcPtr->tcNodedata[treplicaNo] = tnodeid;
- regTcPtr->failData[treplicaNo] = ttransStatus;
-}//Dbtc::updateTcStateFail()
-
-void Dbtc::execTCGETOPSIZEREQ(Signal* signal)
-{
- jamEntry();
- CRASH_INSERTION(8000);
-
- UintR Tuserpointer = signal->theData[0]; /* DBDIH POINTER */
- BlockReference Tusersblkref = signal->theData[1];/* DBDIH BLOCK REFERENCE */
- signal->theData[0] = Tuserpointer;
- signal->theData[1] = coperationsize;
- sendSignal(Tusersblkref, GSN_TCGETOPSIZECONF, signal, 2, JBB);
-}//Dbtc::execTCGETOPSIZEREQ()
-
-void Dbtc::execTC_CLOPSIZEREQ(Signal* signal)
-{
- jamEntry();
- CRASH_INSERTION(8001);
-
- tuserpointer = signal->theData[0];
- tusersblkref = signal->theData[1];
- /* DBDIH BLOCK REFERENCE */
- coperationsize = 0;
- signal->theData[0] = tuserpointer;
- sendSignal(tusersblkref, GSN_TC_CLOPSIZECONF, signal, 1, JBB);
-}//Dbtc::execTC_CLOPSIZEREQ()
-
-/* ######################################################################### */
-/* ####### ERROR MODULE ####### */
-/* ######################################################################### */
-void Dbtc::tabStateErrorLab(Signal* signal)
-{
- terrorCode = ZSTATE_ERROR;
- releaseAtErrorLab(signal);
-}//Dbtc::tabStateErrorLab()
-
-void Dbtc::wrongSchemaVersionErrorLab(Signal* signal)
-{
- const TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0];
-
- TableRecordPtr tabPtr;
- tabPtr.i = tcKeyReq->tableId;
- const Uint32 schemVer = tcKeyReq->tableSchemaVersion;
- ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
-
- terrorCode = tabPtr.p->getErrorCode(schemVer);
-
- abortErrorLab(signal);
-}//Dbtc::wrongSchemaVersionErrorLab()
-
-void Dbtc::noFreeConnectionErrorLab(Signal* signal)
-{
- terrorCode = ZNO_FREE_TC_CONNECTION;
- abortErrorLab(signal); /* RECORD. OTHERWISE GOTO ERRORHANDLING */
-}//Dbtc::noFreeConnectionErrorLab()
-
-void Dbtc::aiErrorLab(Signal* signal)
-{
- terrorCode = ZLENGTH_ERROR;
- abortErrorLab(signal);
-}//Dbtc::aiErrorLab()
-
-void Dbtc::seizeAttrbuferrorLab(Signal* signal)
-{
- terrorCode = ZGET_ATTRBUF_ERROR;
- abortErrorLab(signal);
-}//Dbtc::seizeAttrbuferrorLab()
-
-void Dbtc::seizeDatabuferrorLab(Signal* signal)
-{
- terrorCode = ZGET_DATAREC_ERROR;
- releaseAtErrorLab(signal);
-}//Dbtc::seizeDatabuferrorLab()
-
-void Dbtc::releaseAtErrorLab(Signal* signal)
-{
- ptrGuard(tcConnectptr);
- tcConnectptr.p->tcConnectstate = OS_ABORTING;
- /*-------------------------------------------------------------------------*
- * A FAILURE OF THIS OPERATION HAS OCCURRED. THIS FAILURE WAS EITHER A
- * FAULTY PARAMETER OR A RESOURCE THAT WAS NOT AVAILABLE.
- * WE WILL ABORT THE ENTIRE TRANSACTION SINCE THIS IS THE SAFEST PATH
- * TO HANDLE THIS PROBLEM.
- * SINCE WE HAVE NOT YET CONTACTED ANY LQH WE SET NUMBER OF NODES TO ZERO
- * WE ALSO SET THE STATE TO ABORTING TO INDICATE THAT WE ARE NOT EXPECTING
- * ANY SIGNALS.
- *-------------------------------------------------------------------------*/
- tcConnectptr.p->noOfNodes = 0;
- abortErrorLab(signal);
-}//Dbtc::releaseAtErrorLab()
-
-void Dbtc::warningHandlerLab(Signal* signal)
-{
- ndbassert(false);
-}//Dbtc::warningHandlerLab()
-
-void Dbtc::systemErrorLab(Signal* signal)
-{
- progError(0, 0);
-}//Dbtc::systemErrorLab()
-
-
-/* ######################################################################### *
- * ####### SCAN MODULE ####### *
- * ######################################################################### *
-
- The application orders a scan of a table. We divide the scan into a scan on
- each fragment. The scan uses the primary replicas since the scan might be
- used for an update in a separate transaction.
-
- Scans are always done as a separate transaction. Locks from the scan
- can be overtaken by another transaction. Scans can never lock the entire
- table. Locks are released immediately after the read has been verified
- by the application. There is not even an option to leave the locks.
- The reason is that this would hurt real-time behaviour too much.
-
- -# The first step in handling a scan of a table is to receive all signals
- defining the scan. If failures occur during this step we release all
- resource and reply with SCAN_TABREF providing the error code.
- If system load is too high, the request will not be allowed.
-
- -# The second step retrieves the number of fragments that exist in the
- table. It also ensures that the table actually exist. After this,
- the scan is ready to be parallelised. The idea is that the receiving
- process (hereafter called delivery process) will start up a number
- of scan processes. Each of these scan processes will
- independently scan one fragment at a time. The delivery
- process object is the scan record and the scan process object is
- the scan fragment record plus the scan operation record.
-
- -# The third step is thus performed in parallel. In the third step each
- scan process retrieves the primary replica of the fragment it will
- scan. Then it starts the scan as soon as the load on that node permits.
-
- The LQH returns either when it retrieved the maximum number of tuples or
- when it has retrived at least one tuple and is hindered by a lock to
- retrieve the next tuple. This is to ensure that a scan process never
- can be involved in a deadlock situation.
-
- When the scan process receives a number of tuples to report to the
- application it checks the state of the delivery process. Only one delivery
- at a time is handled by the application. Thus if the delivery process
- has already sent a number of tuples to the application this set of tuples
- are queued.
-
- When the application requests the next set of tuples it is immediately
- delivered if any are queued, otherwise it waits for the next scan
- process that is ready to deliver.
-
-
- ERROR HANDLING
-
- As already mentioned it is rather easy to handle errors before the scan
- processes have started. In this case it is enough to release the resources
- and send SCAN_TAB_REF.
-
- If an error occurs in any of the scan processes then we have to stop all
- scan processes. We do however only stop the delivery process and ask
- the api to order us to close the scan. The reason is that we can easily
- enter into difficult timing problems since the application and this
- block is out of synch we will thus always start by report the error to
- the application and wait for a close request. This error report uses the
- SCAN_TABREF signal with a special error code that the api must check for.
-
-
- CLOSING AN ACTIVE SCAN
-
- The application can close a scan for several reasons before it is completed.
- One reason was mentioned above where an error in a scan process led to a
- request to close the scan. Another reason could simply be that the
- application found what it looked for and is thus not interested in the
- rest of the scan.
-
- IT COULD ALSO BE DEPENDENT ON INTERNAL ERRORS IN THE API.
-
- When a close scan request is received, all scan processes are stopped and all
- resources belonging to those scan processes are released. Stopping the scan
- processes most often includes communication with an LQH where the local scan
- is controlled. Finally all resources belonging to the scan is released and
- the SCAN_TABCONF is sent with an indication of that the scan is closed.
-
-
- CLOSING A COMPLETED SCAN
-
- When all scan processes are completed then a report is sent to the
- application which indicates that no more tuples can be fetched.
- The application will send a close scan and the same action as when
- closing an active scan is performed.
- In this case it will of course not find any active scan processes.
- It will even find all scan processes already released.
-
- The reason for requiring the api to close the scan is the same as above.
- It is to avoid any timing problems due to that the api and this block
- is out of synch.
-
- * ######################################################################## */
-void Dbtc::execSCAN_TABREQ(Signal* signal)
-{
- const ScanTabReq * const scanTabReq = (ScanTabReq *)&signal->theData[0];
- const Uint32 ri = scanTabReq->requestInfo;
- const Uint32 aiLength = (scanTabReq->attrLenKeyLen & 0xFFFF);
- const Uint32 keyLen = scanTabReq->attrLenKeyLen >> 16;
- const Uint32 schemaVersion = scanTabReq->tableSchemaVersion;
- const Uint32 transid1 = scanTabReq->transId1;
- const Uint32 transid2 = scanTabReq->transId2;
- const Uint32 tmpXX = scanTabReq->buddyConPtr;
- const Uint32 buddyPtr = (tmpXX == 0xFFFFFFFF ? RNIL : tmpXX);
- Uint32 currSavePointId = 0;
-
- Uint32 scanConcurrency = scanTabReq->getParallelism(ri);
- Uint32 noOprecPerFrag = ScanTabReq::getScanBatch(ri);
- Uint32 scanParallel = scanConcurrency;
- Uint32 errCode;
- ScanRecordPtr scanptr;
-
- jamEntry();
-
- SegmentedSectionPtr api_op_ptr;
- signal->getSection(api_op_ptr, 0);
- copy(&cdata[0], api_op_ptr);
- releaseSections(signal);
-
- apiConnectptr.i = scanTabReq->apiConnectPtr;
- tabptr.i = scanTabReq->tableId;
-
- if (apiConnectptr.i >= capiConnectFilesize)
- {
- jam();
- warningHandlerLab(signal);
- return;
- }//if
-
- ptrAss(apiConnectptr, apiConnectRecord);
- ApiConnectRecord * transP = apiConnectptr.p;
-
- if (transP->apiConnectstate != CS_CONNECTED) {
- jam();
- // could be left over from TCKEYREQ rollback
- if (transP->apiConnectstate == CS_ABORTING &&
- transP->abortState == AS_IDLE) {
- jam();
- } else if(transP->apiConnectstate == CS_STARTED &&
- transP->firstTcConnect == RNIL){
- jam();
- // left over from simple/dirty read
- } else {
- jam();
- errCode = ZSTATE_ERROR;
- goto SCAN_TAB_error_no_state_change;
- }
- }
-
- if(tabptr.i >= ctabrecFilesize)
- {
- errCode = ZUNKNOWN_TABLE_ERROR;
- goto SCAN_TAB_error;
- }
-
- ptrAss(tabptr, tableRecord);
- if ((aiLength == 0) ||
- (!tabptr.p->checkTable(schemaVersion)) ||
- (scanConcurrency == 0) ||
- (cfirstfreeTcConnect == RNIL) ||
- (cfirstfreeScanrec == RNIL)) {
- goto SCAN_error_check;
- }
- if (buddyPtr != RNIL) {
- jam();
- ApiConnectRecordPtr buddyApiPtr;
- buddyApiPtr.i = buddyPtr;
- ptrCheckGuard(buddyApiPtr, capiConnectFilesize, apiConnectRecord);
- if ((transid1 == buddyApiPtr.p->transid[0]) &&
- (transid2 == buddyApiPtr.p->transid[1])) {
- jam();
-
- if (buddyApiPtr.p->apiConnectstate == CS_ABORTING) {
- // transaction has been aborted
- jam();
- errCode = buddyApiPtr.p->returncode;
- goto SCAN_TAB_error;
- }//if
- currSavePointId = buddyApiPtr.p->currSavePointId;
- buddyApiPtr.p->currSavePointId++;
- }
- }
-
- seizeTcConnect(signal);
- tcConnectptr.p->apiConnect = apiConnectptr.i;
- tcConnectptr.p->tcConnectstate = OS_WAIT_SCAN;
- apiConnectptr.p->lastTcConnect = tcConnectptr.i;
-
- seizeCacheRecord(signal);
- cachePtr.p->keylen = keyLen;
- cachePtr.p->save1 = 0;
- cachePtr.p->distributionKey = scanTabReq->distributionKey;
- cachePtr.p->distributionKeyIndicator= ScanTabReq::getDistributionKeyFlag(ri);
- scanptr = seizeScanrec(signal);
-
- ndbrequire(transP->apiScanRec == RNIL);
- ndbrequire(scanptr.p->scanApiRec == RNIL);
-
- initScanrec(scanptr, scanTabReq, scanParallel, noOprecPerFrag);
-
- transP->apiScanRec = scanptr.i;
- transP->returncode = 0;
- transP->transid[0] = transid1;
- transP->transid[1] = transid2;
- transP->buddyPtr = buddyPtr;
-
- // The scan is started
- transP->apiConnectstate = CS_START_SCAN;
- transP->currSavePointId = currSavePointId;
-
- /**********************************************************
- * We start the timer on scanRec to be able to discover a
- * timeout in the API the API now is in charge!
- ***********************************************************/
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- updateBuddyTimer(apiConnectptr);
-
- /***********************************************************
- * WE HAVE NOW RECEIVED ALL REFERENCES TO SCAN OBJECTS IN
- * THE API. WE ARE NOW READY TO RECEIVE THE ATTRIBUTE INFO
- * IF ANY TO RECEIVE.
- **********************************************************/
- scanptr.p->scanState = ScanRecord::WAIT_AI;
- return;
-
- SCAN_error_check:
- if (aiLength == 0) {
- jam()
- errCode = ZSCAN_AI_LEN_ERROR;
- goto SCAN_TAB_error;
- }//if
- if (!tabptr.p->checkTable(schemaVersion)){
- jam();
- errCode = tabptr.p->getErrorCode(schemaVersion);
- goto SCAN_TAB_error;
- }//if
- if (scanConcurrency == 0) {
- jam();
- errCode = ZNO_CONCURRENCY_ERROR;
- goto SCAN_TAB_error;
- }//if
- if (cfirstfreeTcConnect == RNIL) {
- jam();
- errCode = ZNO_FREE_TC_CONNECTION;
- goto SCAN_TAB_error;
- }//if
- ndbrequire(cfirstfreeScanrec == RNIL);
- jam();
- errCode = ZNO_SCANREC_ERROR;
- goto SCAN_TAB_error;
-
-SCAN_TAB_error:
- jam();
- /**
- * Prepare for up coming ATTRINFO/KEYINFO
- */
- transP->apiConnectstate = CS_ABORTING;
- transP->abortState = AS_IDLE;
- transP->transid[0] = transid1;
- transP->transid[1] = transid2;
-
-SCAN_TAB_error_no_state_change:
-
- ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
- ref->apiConnectPtr = transP->ndbapiConnect;
- ref->transId1 = transid1;
- ref->transId2 = transid2;
- ref->errorCode = errCode;
- ref->closeNeeded = 0;
- sendSignal(transP->ndbapiBlockref, GSN_SCAN_TABREF,
- signal, ScanTabRef::SignalLength, JBB);
- return;
-}//Dbtc::execSCAN_TABREQ()
-
-void Dbtc::initScanrec(ScanRecordPtr scanptr,
- const ScanTabReq * scanTabReq,
- UintR scanParallel,
- UintR noOprecPerFrag)
-{
- const UintR ri = scanTabReq->requestInfo;
- scanptr.p->scanTcrec = tcConnectptr.i;
- scanptr.p->scanApiRec = apiConnectptr.i;
- scanptr.p->scanAiLength = scanTabReq->attrLenKeyLen & 0xFFFF;
- scanptr.p->scanKeyLen = scanTabReq->attrLenKeyLen >> 16;
- scanptr.p->scanTableref = tabptr.i;
- scanptr.p->scanSchemaVersion = scanTabReq->tableSchemaVersion;
- scanptr.p->scanParallel = scanParallel;
- scanptr.p->first_batch_size_rows = scanTabReq->first_batch_size;
- scanptr.p->batch_byte_size = scanTabReq->batch_byte_size;
- scanptr.p->batch_size_rows = noOprecPerFrag;
-
- Uint32 tmp = 0;
- ScanFragReq::setLockMode(tmp, ScanTabReq::getLockMode(ri));
- ScanFragReq::setHoldLockFlag(tmp, ScanTabReq::getHoldLockFlag(ri));
- ScanFragReq::setKeyinfoFlag(tmp, ScanTabReq::getKeyinfoFlag(ri));
- ScanFragReq::setReadCommittedFlag(tmp,ScanTabReq::getReadCommittedFlag(ri));
- ScanFragReq::setRangeScanFlag(tmp, ScanTabReq::getRangeScanFlag(ri));
- ScanFragReq::setDescendingFlag(tmp, ScanTabReq::getDescendingFlag(ri));
- ScanFragReq::setTupScanFlag(tmp, ScanTabReq::getTupScanFlag(ri));
- ScanFragReq::setAttrLen(tmp, scanTabReq->attrLenKeyLen & 0xFFFF);
-
- scanptr.p->scanRequestInfo = tmp;
- scanptr.p->scanStoredProcId = scanTabReq->storedProcId;
- scanptr.p->scanState = ScanRecord::RUNNING;
- scanptr.p->m_queued_count = 0;
-
- ScanFragList list(c_scan_frag_pool,
- scanptr.p->m_running_scan_frags);
- for (Uint32 i = 0; i < scanParallel; i++) {
- jam();
- ScanFragRecPtr ptr;
- ndbrequire(list.seize(ptr));
- ptr.p->scanRec = scanptr.i;
- ptr.p->scanFragId = 0;
- ptr.p->m_apiPtr = cdata[i];
- }//for
-
- (* (ScanTabReq::getRangeScanFlag(ri) ?
- &c_counters.c_range_scan_count :
- &c_counters.c_scan_count))++;
-}//Dbtc::initScanrec()
-
-void Dbtc::scanTabRefLab(Signal* signal, Uint32 errCode)
-{
- ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
- ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
- ref->transId1 = apiConnectptr.p->transid[0];
- ref->transId2 = apiConnectptr.p->transid[1];
- ref->errorCode = errCode;
- ref->closeNeeded = 0;
- sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABREF,
- signal, ScanTabRef::SignalLength, JBB);
-}//Dbtc::scanTabRefLab()
-
-/*---------------------------------------------------------------------------*/
-/* */
-/* RECEPTION OF ATTRINFO FOR SCAN TABLE REQUEST. */
-/*---------------------------------------------------------------------------*/
-void Dbtc::scanAttrinfoLab(Signal* signal, UintR Tlen)
-{
- ScanRecordPtr scanptr;
- scanptr.i = apiConnectptr.p->apiScanRec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- cachePtr.i = apiConnectptr.p->cachePtr;
- ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
- CacheRecord * const regCachePtr = cachePtr.p;
- ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_AI);
-
- regCachePtr->currReclenAi = regCachePtr->currReclenAi + Tlen;
- if (regCachePtr->currReclenAi < scanptr.p->scanAiLength) {
- if (cfirstfreeAttrbuf == RNIL) {
- goto scanAttrinfo_attrbuf_error;
- }//if
- saveAttrbuf(signal);
- } else {
- if (regCachePtr->currReclenAi > scanptr.p->scanAiLength) {
- goto scanAttrinfo_len_error;
- } else {
- /* CURR_RECLEN_AI = SCAN_AI_LENGTH */
- if (cfirstfreeAttrbuf == RNIL) {
- goto scanAttrinfo_attrbuf2_error;
- }//if
- saveAttrbuf(signal);
- /**************************************************
- * WE HAVE NOW RECEIVED ALL INFORMATION CONCERNING
- * THIS SCAN. WE ARE READY TO START THE ACTUAL
- * EXECUTION OF THE SCAN QUERY
- **************************************************/
- diFcountReqLab(signal, scanptr);
- return;
- }//if
- }//if
- return;
-
-scanAttrinfo_attrbuf_error:
- jam();
- abortScanLab(signal, scanptr, ZGET_ATTRBUF_ERROR);
- return;
-
-scanAttrinfo_attrbuf2_error:
- jam();
- abortScanLab(signal, scanptr, ZGET_ATTRBUF_ERROR);
- return;
-
-scanAttrinfo_len_error:
- jam();
- abortScanLab(signal, scanptr, ZLENGTH_ERROR);
- return;
-}//Dbtc::scanAttrinfoLab()
-
-void Dbtc::diFcountReqLab(Signal* signal, ScanRecordPtr scanptr)
-{
- /**
- * Check so that the table is not being dropped
- */
- TableRecordPtr tabPtr;
- tabPtr.i = scanptr.p->scanTableref;
- tabPtr.p = &tableRecord[tabPtr.i];
- if (tabPtr.p->checkTable(scanptr.p->scanSchemaVersion)){
- ;
- } else {
- abortScanLab(signal, scanptr,
- tabPtr.p->getErrorCode(scanptr.p->scanSchemaVersion));
- return;
- }
-
- scanptr.p->scanNextFragId = 0;
- scanptr.p->m_booked_fragments_count= 0;
- scanptr.p->scanState = ScanRecord::WAIT_FRAGMENT_COUNT;
-
- if(!cachePtr.p->distributionKeyIndicator)
- {
- jam();
- /*************************************************
- * THE FIRST STEP TO RECEIVE IS SUCCESSFULLY COMPLETED.
- * WE MUST FIRST GET THE NUMBER OF FRAGMENTS IN THE TABLE.
- ***************************************************/
- signal->theData[0] = tcConnectptr.p->dihConnectptr;
- signal->theData[1] = scanptr.p->scanTableref;
- sendSignal(cdihblockref, GSN_DI_FCOUNTREQ, signal, 2, JBB);
- }
- else
- {
- signal->theData[0] = tcConnectptr.p->dihConnectptr;
- signal->theData[1] = tabPtr.i;
- signal->theData[2] = cachePtr.p->distributionKey;
- EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal, 3);
- UintR TerrorIndicator = signal->theData[0];
- jamEntry();
- if (TerrorIndicator != 0) {
- signal->theData[0] = tcConnectptr.i;
- //signal->theData[1] Contains error
- execDI_FCOUNTREF(signal);
- return;
- }
-
- UintR Tdata1 = signal->theData[1];
- scanptr.p->scanNextFragId = Tdata1;
-
- signal->theData[0] = tcConnectptr.i;
- signal->theData[1] = 1; // Frag count
- execDI_FCOUNTCONF(signal);
- }
- return;
-}//Dbtc::diFcountReqLab()
-
-/********************************************************************
- * execDI_FCOUNTCONF
- *
- * WE HAVE ASKED DIH ABOUT THE NUMBER OF FRAGMENTS IN THIS TABLE.
- * WE WILL NOW START A NUMBER OF PARALLEL SCAN PROCESSES. EACH OF
- * THESE WILL SCAN ONE FRAGMENT AT A TIME. THEY WILL CONTINUE THIS
- * UNTIL THERE ARE NO MORE FRAGMENTS TO SCAN OR UNTIL THE APPLICATION
- * CLOSES THE SCAN.
- ********************************************************************/
-void Dbtc::execDI_FCOUNTCONF(Signal* signal)
-{
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- Uint32 tfragCount = signal->theData[1];
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- apiConnectptr.i = tcConnectptr.p->apiConnect;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- ScanRecordPtr scanptr;
- scanptr.i = apiConnectptr.p->apiScanRec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
- ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_FRAGMENT_COUNT);
- if (apiConnectptr.p->apiFailState == ZTRUE) {
- jam();
- releaseScanResources(scanptr);
- handleApiFailState(signal, apiConnectptr.i);
- return;
- }//if
- if (tfragCount == 0) {
- jam();
- abortScanLab(signal, scanptr, ZNO_FRAGMENT_ERROR);
- return;
- }//if
-
- /**
- * Check so that the table is not being dropped
- */
- TableRecordPtr tabPtr;
- tabPtr.i = scanptr.p->scanTableref;
- tabPtr.p = &tableRecord[tabPtr.i];
- if (tabPtr.p->checkTable(scanptr.p->scanSchemaVersion)){
- ;
- } else {
- abortScanLab(signal, scanptr,
- tabPtr.p->getErrorCode(scanptr.p->scanSchemaVersion));
- return;
- }
-
- scanptr.p->scanParallel = tfragCount;
- scanptr.p->scanNoFrag = tfragCount;
- scanptr.p->scanState = ScanRecord::RUNNING;
-
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
- updateBuddyTimer(apiConnectptr);
-
- ScanFragRecPtr ptr;
- ScanFragList list(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
- for (list.first(ptr); !ptr.isNull() && tfragCount;
- list.next(ptr), tfragCount--){
- jam();
-
- ptr.p->lqhBlockref = 0;
- ptr.p->startFragTimer(ctcTimer);
- ptr.p->scanFragId = scanptr.p->scanNextFragId++;
- ptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
- ptr.p->startFragTimer(ctcTimer);
-
- signal->theData[0] = tcConnectptr.p->dihConnectptr;
- signal->theData[1] = ptr.i;
- signal->theData[2] = scanptr.p->scanTableref;
- signal->theData[3] = ptr.p->scanFragId;
- sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
- }//for
-
- ScanFragList queued(c_scan_frag_pool, scanptr.p->m_queued_scan_frags);
- for (; !ptr.isNull();)
- {
- ptr.p->m_ops = 0;
- ptr.p->m_totalLen = 0;
- ptr.p->m_scan_frag_conf_status = 1;
- ptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY;
- ptr.p->stopFragTimer();
-
- ScanFragRecPtr tmp = ptr;
- list.next(ptr);
- list.remove(tmp);
- queued.add(tmp);
- scanptr.p->m_queued_count++;
- }
-}//Dbtc::execDI_FCOUNTCONF()
-
-/******************************************************
- * execDI_FCOUNTREF
- ******************************************************/
-void Dbtc::execDI_FCOUNTREF(Signal* signal)
-{
- jamEntry();
- tcConnectptr.i = signal->theData[0];
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- const Uint32 errCode = signal->theData[1];
- apiConnectptr.i = tcConnectptr.p->apiConnect;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- ScanRecordPtr scanptr;
- scanptr.i = apiConnectptr.p->apiScanRec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
- ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_FRAGMENT_COUNT);
- if (apiConnectptr.p->apiFailState == ZTRUE) {
- jam();
- releaseScanResources(scanptr);
- handleApiFailState(signal, apiConnectptr.i);
- return;
- }//if
- abortScanLab(signal, scanptr, errCode);
-}//Dbtc::execDI_FCOUNTREF()
-
-void Dbtc::abortScanLab(Signal* signal, ScanRecordPtr scanptr, Uint32 errCode)
-{
- scanTabRefLab(signal, errCode);
- releaseScanResources(scanptr);
-}//Dbtc::abortScanLab()
-
-void Dbtc::releaseScanResources(ScanRecordPtr scanPtr)
-{
- if (apiConnectptr.p->cachePtr != RNIL) {
- cachePtr.i = apiConnectptr.p->cachePtr;
- ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
- releaseKeys();
- releaseAttrinfo();
- }//if
- tcConnectptr.i = scanPtr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- releaseTcCon();
-
- ndbrequire(scanPtr.p->m_running_scan_frags.isEmpty());
- ndbrequire(scanPtr.p->m_queued_scan_frags.isEmpty());
- ndbrequire(scanPtr.p->m_delivered_scan_frags.isEmpty());
-
- ndbassert(scanPtr.p->scanApiRec == apiConnectptr.i);
- ndbassert(apiConnectptr.p->apiScanRec == scanPtr.i);
-
- // link into free list
- scanPtr.p->nextScan = cfirstfreeScanrec;
- scanPtr.p->scanState = ScanRecord::IDLE;
- scanPtr.p->scanTcrec = RNIL;
- scanPtr.p->scanApiRec = RNIL;
- cfirstfreeScanrec = scanPtr.i;
-
- apiConnectptr.p->apiScanRec = RNIL;
- apiConnectptr.p->apiConnectstate = CS_CONNECTED;
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
-}//Dbtc::releaseScanResources()
-
-
-/****************************************************************
- * execDIGETPRIMCONF
- *
- * WE HAVE RECEIVED THE PRIMARY NODE OF THIS FRAGMENT.
- * WE ARE NOW READY TO ASK FOR PERMISSION TO LOAD THIS
- * SPECIFIC NODE WITH A SCAN OPERATION.
- ****************************************************************/
-void Dbtc::execDIGETPRIMCONF(Signal* signal)
-{
- jamEntry();
- // tcConnectptr.i in theData[0] is not used
- scanFragptr.i = signal->theData[1];
- c_scan_frag_pool.getPtr(scanFragptr);
-
- tnodeid = signal->theData[2];
- arrGuard(tnodeid, MAX_NDB_NODES);
-
- ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF);
- scanFragptr.p->stopFragTimer();
-
- ScanRecordPtr scanptr;
- scanptr.i = scanFragptr.p->scanRec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
-
- /**
- * This must be false as select count(*) otherwise
- * can "pass" committing on backup fragments and
- * get incorrect row count
- */
- if(false && ScanFragReq::getReadCommittedFlag(scanptr.p->scanRequestInfo))
- {
- jam();
- Uint32 max = 3+signal->theData[6];
- Uint32 nodeid = getOwnNodeId();
- for(Uint32 i = 3; i<max; i++)
- if(signal->theData[i] == nodeid)
- {
- jam();
- tnodeid = nodeid;
- break;
- }
- }
-
- {
- /**
- * Check table
- */
- TableRecordPtr tabPtr;
- tabPtr.i = scanptr.p->scanTableref;
- ptrAss(tabPtr, tableRecord);
- Uint32 schemaVersion = scanptr.p->scanSchemaVersion;
- if(tabPtr.p->checkTable(schemaVersion) == false){
- jam();
- ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
-
- run.release(scanFragptr);
- scanError(signal, scanptr, tabPtr.p->getErrorCode(schemaVersion));
- return;
- }
- }
-
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- apiConnectptr.i = scanptr.p->scanApiRec;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- cachePtr.i = apiConnectptr.p->cachePtr;
- ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
- switch (scanptr.p->scanState) {
- case ScanRecord::CLOSING_SCAN:
- jam();
- updateBuddyTimer(apiConnectptr);
- {
- ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
-
- run.release(scanFragptr);
- }
- close_scan_req_send_conf(signal, scanptr);
- return;
- default:
- jam();
- /*empty*/;
- break;
- }//switch
- Uint32 ref = calcLqhBlockRef(tnodeid);
- scanFragptr.p->lqhBlockref = ref;
- scanFragptr.p->m_connectCount = getNodeInfo(tnodeid).m_connectCount;
- sendScanFragReq(signal, scanptr.p, scanFragptr.p);
- if(ERROR_INSERTED(8035))
- globalTransporterRegistry.performSend();
- attrbufptr.i = cachePtr.p->firstAttrbuf;
- while (attrbufptr.i != RNIL) {
- jam();
- ptrCheckGuard(attrbufptr, cattrbufFilesize, attrbufRecord);
- sendAttrinfo(signal,
- scanFragptr.i,
- attrbufptr.p,
- ref);
- attrbufptr.i = attrbufptr.p->attrbuf[ZINBUF_NEXT];
- if(ERROR_INSERTED(8035))
- globalTransporterRegistry.performSend();
- }//while
- scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
- scanFragptr.p->startFragTimer(ctcTimer);
- updateBuddyTimer(apiConnectptr);
- /*********************************************
- * WE HAVE NOW STARTED A FRAGMENT SCAN. NOW
- * WAIT FOR THE FIRST SCANNED RECORDS
- *********************************************/
-}//Dbtc::execDIGETPRIMCONF
-
-/***************************************************
- * execDIGETPRIMREF
- *
- * WE ARE NOW FORCED TO STOP THE SCAN. THIS ERROR
- * IS NOT RECOVERABLE SINCE THERE IS A PROBLEM WITH
- * FINDING A PRIMARY REPLICA OF A CERTAIN FRAGMENT.
- ***************************************************/
-void Dbtc::execDIGETPRIMREF(Signal* signal)
-{
- jamEntry();
- // tcConnectptr.i in theData[0] is not used.
- scanFragptr.i = signal->theData[1];
- const Uint32 errCode = signal->theData[2];
- c_scan_frag_pool.getPtr(scanFragptr);
- ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF);
-
- ScanRecordPtr scanptr;
- scanptr.i = scanFragptr.p->scanRec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
-
- ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
-
- run.release(scanFragptr);
-
- scanError(signal, scanptr, errCode);
-}//Dbtc::execDIGETPRIMREF()
-
-/**
- * Dbtc::execSCAN_FRAGREF
- * Our attempt to scan a fragment was refused
- * set error code and close all other fragment
- * scan's belonging to this scan
- */
-void Dbtc::execSCAN_FRAGREF(Signal* signal)
-{
- const ScanFragRef * const ref = (ScanFragRef *)&signal->theData[0];
-
- jamEntry();
- const Uint32 errCode = ref->errorCode;
-
- scanFragptr.i = ref->senderData;
- c_scan_frag_pool.getPtr(scanFragptr);
-
- ScanRecordPtr scanptr;
- scanptr.i = scanFragptr.p->scanRec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
-
- apiConnectptr.i = scanptr.p->scanApiRec;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
-
- Uint32 transid1 = apiConnectptr.p->transid[0] ^ ref->transId1;
- Uint32 transid2 = apiConnectptr.p->transid[1] ^ ref->transId2;
- transid1 = transid1 | transid2;
- if (transid1 != 0) {
- jam();
- systemErrorLab(signal);
- }//if
-
- /**
- * Set errorcode, close connection to this lqh fragment,
- * stop fragment timer and call scanFragError to start
- * close of the other fragment scans
- */
- ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
- {
- scanFragptr.p->scanFragState = ScanFragRec::COMPLETED;
- ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
-
- run.release(scanFragptr);
- scanFragptr.p->stopFragTimer();
- }
- scanError(signal, scanptr, errCode);
-}//Dbtc::execSCAN_FRAGREF()
-
-/**
- * Dbtc::scanError
- *
- * Called when an error occurs during
- */
-void Dbtc::scanError(Signal* signal, ScanRecordPtr scanptr, Uint32 errorCode)
-{
- jam();
- ScanRecord* scanP = scanptr.p;
-
- DEBUG("scanError, errorCode = "<< errorCode <<
- ", scanState = " << scanptr.p->scanState);
-
- apiConnectptr.i = scanP->scanApiRec;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- ndbrequire(apiConnectptr.p->apiScanRec == scanptr.i);
-
- if(scanP->scanState == ScanRecord::CLOSING_SCAN){
- jam();
- close_scan_req_send_conf(signal, scanptr);
- return;
- }
-
- ndbrequire(scanP->scanState == ScanRecord::RUNNING);
-
- /**
- * Close scan wo/ having received an order to do so
- */
- close_scan_req(signal, scanptr, false);
-
- const bool apiFail = (apiConnectptr.p->apiFailState == ZTRUE);
- if(apiFail){
- jam();
- return;
- }
-
- ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
- ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
- ref->transId1 = apiConnectptr.p->transid[0];
- ref->transId2 = apiConnectptr.p->transid[1];
- ref->errorCode = errorCode;
- ref->closeNeeded = 1;
- sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABREF,
- signal, ScanTabRef::SignalLength, JBB);
-}//Dbtc::scanError()
-
-/************************************************************
- * execSCAN_FRAGCONF
- *
- * A NUMBER OF OPERATIONS HAVE BEEN COMPLETED IN THIS
- * FRAGMENT. TAKE CARE OF AND ISSUE FURTHER ACTIONS.
- ************************************************************/
-void Dbtc::execSCAN_FRAGCONF(Signal* signal)
-{
- Uint32 transid1, transid2, total_len;
- jamEntry();
-
- const ScanFragConf * const conf = (ScanFragConf*)&signal->theData[0];
- const Uint32 noCompletedOps = conf->completedOps;
- const Uint32 status = conf->fragmentCompleted;
-
- scanFragptr.i = conf->senderData;
- c_scan_frag_pool.getPtr(scanFragptr);
-
- ScanRecordPtr scanptr;
- scanptr.i = scanFragptr.p->scanRec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
-
- apiConnectptr.i = scanptr.p->scanApiRec;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
-
- transid1 = apiConnectptr.p->transid[0] ^ conf->transId1;
- transid2 = apiConnectptr.p->transid[1] ^ conf->transId2;
- total_len= conf->total_len;
- transid1 = transid1 | transid2;
- if (transid1 != 0) {
- jam();
- systemErrorLab(signal);
- }//if
-
- ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
-
- if(scanptr.p->scanState == ScanRecord::CLOSING_SCAN){
- jam();
- if(status == 0){
- /**
- * We have started closing = we sent a close -> ignore this
- */
- return;
- } else {
- jam();
- ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
-
- run.release(scanFragptr);
- scanFragptr.p->stopFragTimer();
- scanFragptr.p->scanFragState = ScanFragRec::COMPLETED;
- }
- close_scan_req_send_conf(signal, scanptr);
- return;
- }
-
- if(noCompletedOps == 0 && status != 0 &&
- scanptr.p->scanNextFragId+scanptr.p->m_booked_fragments_count < scanptr.p->scanNoFrag){
- /**
- * Start on next fragment
- */
- scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
- scanFragptr.p->startFragTimer(ctcTimer);
-
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- scanFragptr.p->scanFragId = scanptr.p->scanNextFragId++;
- signal->theData[0] = tcConnectptr.p->dihConnectptr;
- signal->theData[1] = scanFragptr.i;
- signal->theData[2] = scanptr.p->scanTableref;
- signal->theData[3] = scanFragptr.p->scanFragId;
- sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
- return;
- }
- /*
- Uint32 totalLen = 0;
- for(Uint32 i = 0; i<noCompletedOps; i++){
- Uint32 tmp = conf->opReturnDataLen[i];
- totalLen += tmp;
- }
- */
- {
- ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
- ScanFragList queued(c_scan_frag_pool, scanptr.p->m_queued_scan_frags);
-
- run.remove(scanFragptr);
- queued.add(scanFragptr);
- scanptr.p->m_queued_count++;
- }
-
- scanFragptr.p->m_scan_frag_conf_status = status;
- scanFragptr.p->m_ops = noCompletedOps;
- scanFragptr.p->m_totalLen = total_len;
- scanFragptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY;
- scanFragptr.p->stopFragTimer();
-
- if(scanptr.p->m_queued_count > /** Min */ 0){
- jam();
- sendScanTabConf(signal, scanptr);
- }
-}//Dbtc::execSCAN_FRAGCONF()
-
-/****************************************************************************
- * execSCAN_NEXTREQ
- *
- * THE APPLICATION HAVE PROCESSED THE TUPLES TRANSFERRED AND IS NOW READY FOR
- * MORE. THIS SIGNAL IS ALSO USED TO CLOSE THE SCAN.
- ****************************************************************************/
-void Dbtc::execSCAN_NEXTREQ(Signal* signal)
-{
- const ScanNextReq * const req = (ScanNextReq *)&signal->theData[0];
- const UintR transid1 = req->transId1;
- const UintR transid2 = req->transId2;
- const UintR stopScan = req->stopScan;
-
- jamEntry();
-
- apiConnectptr.i = req->apiConnectPtr;
- if (apiConnectptr.i >= capiConnectFilesize) {
- jam();
- warningHandlerLab(signal);
- return;
- }//if
- ptrAss(apiConnectptr, apiConnectRecord);
-
- /**
- * Check transid
- */
- const UintR ctransid1 = apiConnectptr.p->transid[0] ^ transid1;
- const UintR ctransid2 = apiConnectptr.p->transid[1] ^ transid2;
- if ((ctransid1 | ctransid2) != 0){
- ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
- ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
- ref->transId1 = transid1;
- ref->transId2 = transid2;
- ref->errorCode = ZSTATE_ERROR;
- ref->closeNeeded = 0;
- sendSignal(signal->senderBlockRef(), GSN_SCAN_TABREF,
- signal, ScanTabRef::SignalLength, JBB);
- DEBUG("Wrong transid");
- return;
- }
-
- /**
- * Check state of API connection
- */
- if (apiConnectptr.p->apiConnectstate != CS_START_SCAN) {
- jam();
- if (apiConnectptr.p->apiConnectstate == CS_CONNECTED) {
- jam();
- /*********************************************************************
- * The application sends a SCAN_NEXTREQ after experiencing a time-out.
- * We will send a SCAN_TABREF to indicate a time-out occurred.
- *********************************************************************/
- DEBUG("scanTabRefLab: ZSCANTIME_OUT_ERROR2");
- ndbout_c("apiConnectptr(%d) -> abort", apiConnectptr.i);
- ndbrequire(false); //B2 indication of strange things going on
- scanTabRefLab(signal, ZSCANTIME_OUT_ERROR2);
- return;
- }
- DEBUG("scanTabRefLab: ZSTATE_ERROR");
- DEBUG(" apiConnectstate="<<apiConnectptr.p->apiConnectstate);
- ndbrequire(false); //B2 indication of strange things going on
- scanTabRefLab(signal, ZSTATE_ERROR);
- return;
- }//if
-
- /*******************************************************
- * START THE ACTUAL LOGIC OF SCAN_NEXTREQ.
- ********************************************************/
- // Stop the timer that is used to check for timeout in the API
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
- ScanRecordPtr scanptr;
- scanptr.i = apiConnectptr.p->apiScanRec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
- ScanRecord* scanP = scanptr.p;
-
- const Uint32 len = signal->getLength() - 4;
-
- if (stopScan == ZTRUE) {
- jam();
- /*********************************************************************
- * APPLICATION IS CLOSING THE SCAN.
- **********************************************************************/
- close_scan_req(signal, scanptr, true);
- return;
- }//if
-
- if (scanptr.p->scanState == ScanRecord::CLOSING_SCAN){
- jam();
- /**
- * The scan is closing (typically due to error)
- * but the API hasn't understood it yet
- *
- * Wait for API close request
- */
- return;
- }
-
- // Copy op ptrs so I dont overwrite them when sending...
- memcpy(signal->getDataPtrSend()+25, signal->getDataPtr()+4, 4 * len);
-
- ScanFragNextReq tmp;
- tmp.closeFlag = ZFALSE;
- tmp.transId1 = apiConnectptr.p->transid[0];
- tmp.transId2 = apiConnectptr.p->transid[1];
- tmp.batch_size_rows = scanP->batch_size_rows;
- tmp.batch_size_bytes = scanP->batch_byte_size;
-
- ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags);
- ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
- for(Uint32 i = 0 ; i<len; i++){
- jam();
- scanFragptr.i = signal->theData[i+25];
- c_scan_frag_pool.getPtr(scanFragptr);
- ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::DELIVERED);
-
- scanFragptr.p->startFragTimer(ctcTimer);
- scanFragptr.p->m_ops = 0;
-
- if(scanFragptr.p->m_scan_frag_conf_status)
- {
- /**
- * last scan was complete
- */
- jam();
- ndbrequire(scanptr.p->scanNextFragId < scanptr.p->scanNoFrag);
- jam();
- ndbassert(scanptr.p->m_booked_fragments_count);
- scanptr.p->m_booked_fragments_count--;
- scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
-
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- scanFragptr.p->scanFragId = scanptr.p->scanNextFragId++;
- signal->theData[0] = tcConnectptr.p->dihConnectptr;
- signal->theData[1] = scanFragptr.i;
- signal->theData[2] = scanptr.p->scanTableref;
- signal->theData[3] = scanFragptr.p->scanFragId;
- sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
- }
- else
- {
- jam();
- scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
- ScanFragNextReq * req = (ScanFragNextReq*)signal->getDataPtrSend();
- * req = tmp;
- req->senderData = scanFragptr.i;
- sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
- ScanFragNextReq::SignalLength, JBB);
- }
- delivered.remove(scanFragptr);
- running.add(scanFragptr);
- }//for
-
-}//Dbtc::execSCAN_NEXTREQ()
-
-void
-Dbtc::close_scan_req(Signal* signal, ScanRecordPtr scanPtr, bool req_received){
-
- ScanRecord* scanP = scanPtr.p;
- ndbrequire(scanPtr.p->scanState != ScanRecord::IDLE);
- scanPtr.p->scanState = ScanRecord::CLOSING_SCAN;
- scanPtr.p->m_close_scan_req = req_received;
-
- /**
- * Queue : Action
- * ============= : =================
- * completed : -
- * running : close -> LQH
- * delivered w/ : close -> LQH
- * delivered wo/ : move to completed
- * queued w/ : close -> LQH
- * queued wo/ : move to completed
- */
-
- ScanFragNextReq * nextReq = (ScanFragNextReq*)&signal->theData[0];
- nextReq->closeFlag = ZTRUE;
- nextReq->transId1 = apiConnectptr.p->transid[0];
- nextReq->transId2 = apiConnectptr.p->transid[1];
-
- {
- ScanFragRecPtr ptr;
- ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags);
- ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
- ScanFragList queued(c_scan_frag_pool, scanP->m_queued_scan_frags);
-
- // Close running
- for(running.first(ptr); !ptr.isNull(); ){
- ScanFragRecPtr curr = ptr; // Remove while iterating...
- running.next(ptr);
-
- if(curr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF){
- jam();
- continue;
- }
- ndbrequire(curr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
-
- curr.p->startFragTimer(ctcTimer);
- curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
- nextReq->senderData = curr.i;
- sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
- ScanFragNextReq::SignalLength, JBB);
- }
-
- // Close delivered
- for(delivered.first(ptr); !ptr.isNull(); ){
- jam();
- ScanFragRecPtr curr = ptr; // Remove while iterating...
- delivered.next(ptr);
-
- ndbrequire(curr.p->scanFragState == ScanFragRec::DELIVERED);
- delivered.remove(curr);
-
- if(curr.p->m_ops > 0 && curr.p->m_scan_frag_conf_status == 0){
- jam();
- running.add(curr);
- curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
- curr.p->startFragTimer(ctcTimer);
- nextReq->senderData = curr.i;
- sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
- ScanFragNextReq::SignalLength, JBB);
-
- } else {
- jam();
- c_scan_frag_pool.release(curr);
- curr.p->scanFragState = ScanFragRec::COMPLETED;
- curr.p->stopFragTimer();
- }
- }//for
-
- /**
- * All queued with data should be closed
- */
- for(queued.first(ptr); !ptr.isNull(); ){
- jam();
- ndbrequire(ptr.p->scanFragState == ScanFragRec::QUEUED_FOR_DELIVERY);
- ScanFragRecPtr curr = ptr; // Remove while iterating...
- queued.next(ptr);
-
- queued.remove(curr);
- scanP->m_queued_count--;
-
- if(curr.p->m_ops > 0){
- jam();
- running.add(curr);
- curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
- curr.p->startFragTimer(ctcTimer);
- nextReq->senderData = curr.i;
- sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
- ScanFragNextReq::SignalLength, JBB);
- } else {
- jam();
- c_scan_frag_pool.release(curr);
- curr.p->scanFragState = ScanFragRec::COMPLETED;
- curr.p->stopFragTimer();
- }
- }
- }
- close_scan_req_send_conf(signal, scanPtr);
-}
-
-void
-Dbtc::close_scan_req_send_conf(Signal* signal, ScanRecordPtr scanPtr){
-
- jam();
-
- ndbrequire(scanPtr.p->m_queued_scan_frags.isEmpty());
- ndbrequire(scanPtr.p->m_delivered_scan_frags.isEmpty());
- //ndbrequire(scanPtr.p->m_running_scan_frags.isEmpty());
-
-#if 0
- {
- ScanFragList comp(c_scan_frag_pool, scanPtr.p->m_completed_scan_frags);
- ScanFragRecPtr ptr;
- for(comp.first(ptr); !ptr.isNull(); comp.next(ptr)){
- ndbrequire(ptr.p->scanFragTimer == 0);
- ndbrequire(ptr.p->scanFragState == ScanFragRec::COMPLETED);
- }
- }
-#endif
-
- if(!scanPtr.p->m_running_scan_frags.isEmpty()){
- jam();
- return;
- }
-
- const bool apiFail = (apiConnectptr.p->apiFailState == ZTRUE);
-
- if(!scanPtr.p->m_close_scan_req){
- jam();
- /**
- * The API hasn't order closing yet
- */
- return;
- }
-
- Uint32 ref = apiConnectptr.p->ndbapiBlockref;
- if(!apiFail && ref){
- jam();
- ScanTabConf * conf = (ScanTabConf*)&signal->theData[0];
- conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
- conf->requestInfo = ScanTabConf::EndOfData;
- conf->transId1 = apiConnectptr.p->transid[0];
- conf->transId2 = apiConnectptr.p->transid[1];
- sendSignal(ref, GSN_SCAN_TABCONF, signal, ScanTabConf::SignalLength, JBB);
- }
-
- releaseScanResources(scanPtr);
-
- if(apiFail){
- jam();
- /**
- * API has failed
- */
- handleApiFailState(signal, apiConnectptr.i);
- }
-}
-
-Dbtc::ScanRecordPtr
-Dbtc::seizeScanrec(Signal* signal) {
- ScanRecordPtr scanptr;
- scanptr.i = cfirstfreeScanrec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
- cfirstfreeScanrec = scanptr.p->nextScan;
- scanptr.p->nextScan = RNIL;
- ndbrequire(scanptr.p->scanState == ScanRecord::IDLE);
- return scanptr;
-}//Dbtc::seizeScanrec()
-
-void Dbtc::sendScanFragReq(Signal* signal,
- ScanRecord* scanP,
- ScanFragRec* scanFragP)
-{
- ScanFragReq * const req = (ScanFragReq *)&signal->theData[0];
- Uint32 requestInfo = scanP->scanRequestInfo;
- ScanFragReq::setScanPrio(requestInfo, 1);
- apiConnectptr.i = scanP->scanApiRec;
- req->tableId = scanP->scanTableref;
- req->schemaVersion = scanP->scanSchemaVersion;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- req->senderData = scanFragptr.i;
- req->requestInfo = requestInfo;
- req->fragmentNoKeyLen = scanFragP->scanFragId | (scanP->scanKeyLen << 16);
- req->resultRef = apiConnectptr.p->ndbapiBlockref;
- req->savePointId = apiConnectptr.p->currSavePointId;
- req->transId1 = apiConnectptr.p->transid[0];
- req->transId2 = apiConnectptr.p->transid[1];
- req->clientOpPtr = scanFragP->m_apiPtr;
- req->batch_size_rows= scanP->batch_size_rows;
- req->batch_size_bytes= scanP->batch_byte_size;
- sendSignal(scanFragP->lqhBlockref, GSN_SCAN_FRAGREQ, signal,
- ScanFragReq::SignalLength, JBB);
- if(scanP->scanKeyLen > 0)
- {
- tcConnectptr.i = scanFragptr.i;
- packKeyData000Lab(signal, scanFragP->lqhBlockref, scanP->scanKeyLen);
- }
- updateBuddyTimer(apiConnectptr);
- scanFragP->startFragTimer(ctcTimer);
-}//Dbtc::sendScanFragReq()
-
-
-void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) {
- jam();
- Uint32* ops = signal->getDataPtrSend()+4;
- Uint32 op_count = scanPtr.p->m_queued_count;
- if(4 + 3 * op_count > 25){
- jam();
- ops += 21;
- }
-
- int left = scanPtr.p->scanNoFrag - scanPtr.p->scanNextFragId;
- Uint32 booked = scanPtr.p->m_booked_fragments_count;
-
- ScanTabConf * conf = (ScanTabConf*)&signal->theData[0];
- conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
- conf->requestInfo = op_count;
- conf->transId1 = apiConnectptr.p->transid[0];
- conf->transId2 = apiConnectptr.p->transid[1];
- ScanFragRecPtr ptr;
- {
- ScanFragList queued(c_scan_frag_pool, scanPtr.p->m_queued_scan_frags);
- ScanFragList delivered(c_scan_frag_pool,scanPtr.p->m_delivered_scan_frags);
- for(queued.first(ptr); !ptr.isNull(); ){
- ndbrequire(ptr.p->scanFragState == ScanFragRec::QUEUED_FOR_DELIVERY);
- ScanFragRecPtr curr = ptr; // Remove while iterating...
- queued.next(ptr);
-
- bool done = curr.p->m_scan_frag_conf_status && (left <= (int)booked);
- if(curr.p->m_scan_frag_conf_status)
- booked++;
-
- * ops++ = curr.p->m_apiPtr;
- * ops++ = done ? RNIL : curr.i;
- * ops++ = (curr.p->m_totalLen << 10) + curr.p->m_ops;
-
- queued.remove(curr);
- if(!done){
- delivered.add(curr);
- curr.p->scanFragState = ScanFragRec::DELIVERED;
- curr.p->stopFragTimer();
- } else {
- c_scan_frag_pool.release(curr);
- curr.p->scanFragState = ScanFragRec::COMPLETED;
- curr.p->stopFragTimer();
- }
- }
- }
-
- scanPtr.p->m_booked_fragments_count = booked;
- if(scanPtr.p->m_delivered_scan_frags.isEmpty() &&
- scanPtr.p->m_running_scan_frags.isEmpty())
- {
- conf->requestInfo = op_count | ScanTabConf::EndOfData;
- releaseScanResources(scanPtr);
- }
-
- if(4 + 3 * op_count > 25){
- jam();
- LinearSectionPtr ptr[3];
- ptr[0].p = signal->getDataPtrSend()+25;
- ptr[0].sz = 3 * op_count;
- sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABCONF, signal,
- ScanTabConf::SignalLength, JBB, ptr, 1);
- } else {
- jam();
- sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABCONF, signal,
- ScanTabConf::SignalLength + 3 * op_count, JBB);
- }
- scanPtr.p->m_queued_count = 0;
-}//Dbtc::sendScanTabConf()
-
-
-void Dbtc::gcpTcfinished(Signal* signal)
-{
- signal->theData[1] = tcheckGcpId;
- sendSignal(cdihblockref, GSN_GCP_TCFINISHED, signal, 2, JBB);
-}//Dbtc::gcpTcfinished()
-
-void Dbtc::initApiConnect(Signal* signal)
-{
- Uint32 tiacTmp;
- Uint32 guard4;
-
- tiacTmp = capiConnectFilesize / 3;
- ndbrequire(tiacTmp > 0);
- guard4 = tiacTmp + 1;
- for (cachePtr.i = 0; cachePtr.i < guard4; cachePtr.i++) {
- refresh_watch_dog();
- ptrAss(cachePtr, cacheRecord);
- cachePtr.p->firstAttrbuf = RNIL;
- cachePtr.p->lastAttrbuf = RNIL;
- cachePtr.p->firstKeybuf = RNIL;
- cachePtr.p->lastKeybuf = RNIL;
- cachePtr.p->nextCacheRec = cachePtr.i + 1;
- }//for
- cachePtr.i = tiacTmp;
- ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
- cachePtr.p->nextCacheRec = RNIL;
- cfirstfreeCacheRec = 0;
-
- guard4 = tiacTmp - 1;
- for (apiConnectptr.i = 0; apiConnectptr.i <= guard4; apiConnectptr.i++) {
- refresh_watch_dog();
- jam();
- ptrAss(apiConnectptr, apiConnectRecord);
- apiConnectptr.p->apiConnectstate = CS_DISCONNECTED;
- apiConnectptr.p->apiFailState = ZFALSE;
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
- apiConnectptr.p->takeOverRec = (Uint8)Z8NIL;
- apiConnectptr.p->cachePtr = RNIL;
- apiConnectptr.p->nextApiConnect = apiConnectptr.i + 1;
- apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref
- apiConnectptr.p->commitAckMarker = RNIL;
- apiConnectptr.p->firstTcConnect = RNIL;
- apiConnectptr.p->lastTcConnect = RNIL;
- apiConnectptr.p->triggerPending = false;
- apiConnectptr.p->isIndexOp = false;
- apiConnectptr.p->accumulatingIndexOp = RNIL;
- apiConnectptr.p->executingIndexOp = RNIL;
- apiConnectptr.p->buddyPtr = RNIL;
- apiConnectptr.p->currSavePointId = 0;
- }//for
- apiConnectptr.i = tiacTmp - 1;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- apiConnectptr.p->nextApiConnect = RNIL;
- cfirstfreeApiConnect = 0;
- guard4 = (2 * tiacTmp) - 1;
- for (apiConnectptr.i = tiacTmp; apiConnectptr.i <= guard4; apiConnectptr.i++)
- {
- refresh_watch_dog();
- jam();
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- apiConnectptr.p->apiConnectstate = CS_RESTART;
- apiConnectptr.p->apiFailState = ZFALSE;
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
- apiConnectptr.p->takeOverRec = (Uint8)Z8NIL;
- apiConnectptr.p->cachePtr = RNIL;
- apiConnectptr.p->nextApiConnect = apiConnectptr.i + 1;
- apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref
- apiConnectptr.p->commitAckMarker = RNIL;
- apiConnectptr.p->firstTcConnect = RNIL;
- apiConnectptr.p->lastTcConnect = RNIL;
- apiConnectptr.p->triggerPending = false;
- apiConnectptr.p->isIndexOp = false;
- apiConnectptr.p->accumulatingIndexOp = RNIL;
- apiConnectptr.p->executingIndexOp = RNIL;
- apiConnectptr.p->buddyPtr = RNIL;
- apiConnectptr.p->currSavePointId = 0;
- }//for
- apiConnectptr.i = (2 * tiacTmp) - 1;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- apiConnectptr.p->nextApiConnect = RNIL;
- cfirstfreeApiConnectCopy = tiacTmp;
- guard4 = (3 * tiacTmp) - 1;
- for (apiConnectptr.i = 2 * tiacTmp; apiConnectptr.i <= guard4;
- apiConnectptr.i++) {
- refresh_watch_dog();
- jam();
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
- apiConnectptr.p->apiFailState = ZFALSE;
- apiConnectptr.p->apiConnectstate = CS_RESTART;
- apiConnectptr.p->takeOverRec = (Uint8)Z8NIL;
- apiConnectptr.p->cachePtr = RNIL;
- apiConnectptr.p->nextApiConnect = apiConnectptr.i + 1;
- apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref
- apiConnectptr.p->commitAckMarker = RNIL;
- apiConnectptr.p->firstTcConnect = RNIL;
- apiConnectptr.p->lastTcConnect = RNIL;
- apiConnectptr.p->triggerPending = false;
- apiConnectptr.p->isIndexOp = false;
- apiConnectptr.p->accumulatingIndexOp = RNIL;
- apiConnectptr.p->executingIndexOp = RNIL;
- apiConnectptr.p->buddyPtr = RNIL;
- apiConnectptr.p->currSavePointId = 0;
- }//for
- apiConnectptr.i = (3 * tiacTmp) - 1;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- apiConnectptr.p->nextApiConnect = RNIL;
- cfirstfreeApiConnectFail = 2 * tiacTmp;
-}//Dbtc::initApiConnect()
-
-void Dbtc::initattrbuf(Signal* signal)
-{
- ndbrequire(cattrbufFilesize > 0);
- for (attrbufptr.i = 0; attrbufptr.i < cattrbufFilesize; attrbufptr.i++) {
- refresh_watch_dog();
- jam();
- ptrAss(attrbufptr, attrbufRecord);
- attrbufptr.p->attrbuf[ZINBUF_NEXT] = attrbufptr.i + 1; /* NEXT ATTRBUF */
- }//for
- attrbufptr.i = cattrbufFilesize - 1;
- ptrAss(attrbufptr, attrbufRecord);
- attrbufptr.p->attrbuf[ZINBUF_NEXT] = RNIL; /* NEXT ATTRBUF */
- cfirstfreeAttrbuf = 0;
-}//Dbtc::initattrbuf()
-
-void Dbtc::initdatabuf(Signal* signal)
-{
- ndbrequire(cdatabufFilesize > 0);
- for (databufptr.i = 0; databufptr.i < cdatabufFilesize; databufptr.i++) {
- refresh_watch_dog();
- ptrAss(databufptr, databufRecord);
- databufptr.p->nextDatabuf = databufptr.i + 1;
- }//for
- databufptr.i = cdatabufFilesize - 1;
- ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
- databufptr.p->nextDatabuf = RNIL;
- cfirstfreeDatabuf = 0;
-}//Dbtc::initdatabuf()
-
-void Dbtc::initgcp(Signal* signal)
-{
- ndbrequire(cgcpFilesize > 0);
- for (gcpPtr.i = 0; gcpPtr.i < cgcpFilesize; gcpPtr.i++) {
- ptrAss(gcpPtr, gcpRecord);
- gcpPtr.p->nextGcp = gcpPtr.i + 1;
- }//for
- gcpPtr.i = cgcpFilesize - 1;
- ptrCheckGuard(gcpPtr, cgcpFilesize, gcpRecord);
- gcpPtr.p->nextGcp = RNIL;
- cfirstfreeGcp = 0;
- cfirstgcp = RNIL;
- clastgcp = RNIL;
-}//Dbtc::initgcp()
-
-void Dbtc::inithost(Signal* signal)
-{
- cpackedListIndex = 0;
- ndbrequire(chostFilesize > 0);
- for (hostptr.i = 0; hostptr.i < chostFilesize; hostptr.i++) {
- jam();
- ptrAss(hostptr, hostRecord);
- hostptr.p->hostStatus = HS_DEAD;
- hostptr.p->inPackedList = false;
- hostptr.p->takeOverStatus = TOS_NOT_DEFINED;
- hostptr.p->lqhTransStatus = LTS_IDLE;
- hostptr.p->noOfWordsTCKEYCONF = 0;
- hostptr.p->noOfWordsTCINDXCONF = 0;
- hostptr.p->noOfPackedWordsLqh = 0;
- hostptr.p->hostLqhBlockRef = calcLqhBlockRef(hostptr.i);
- }//for
-}//Dbtc::inithost()
-
-void Dbtc::initialiseRecordsLab(Signal* signal, UintR Tdata0,
- Uint32 retRef, Uint32 retData)
-{
- switch (Tdata0) {
- case 0:
- jam();
- initApiConnect(signal);
- break;
- case 1:
- jam();
- initattrbuf(signal);
- break;
- case 2:
- jam();
- initdatabuf(signal);
- break;
- case 3:
- jam();
- initgcp(signal);
- break;
- case 4:
- jam();
- inithost(signal);
- break;
- case 5:
- jam();
- // UNUSED Free to initialise something
- break;
- case 6:
- jam();
- initTable(signal);
- break;
- case 7:
- jam();
- initialiseScanrec(signal);
- break;
- case 8:
- jam();
- initialiseScanOprec(signal);
- break;
- case 9:
- jam();
- initialiseScanFragrec(signal);
- break;
- case 10:
- jam();
- initialiseTcConnect(signal);
- break;
- case 11:
- jam();
- initTcFail(signal);
-
- {
- ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = retData;
- sendSignal(retRef, GSN_READ_CONFIG_CONF, signal,
- ReadConfigConf::SignalLength, JBB);
- }
- return;
- break;
- default:
- jam();
- systemErrorLab(signal);
- return;
- break;
- }//switch
-
- signal->theData[0] = TcContinueB::ZINITIALISE_RECORDS;
- signal->theData[1] = Tdata0 + 1;
- signal->theData[2] = 0;
- signal->theData[3] = retRef;
- signal->theData[4] = retData;
- sendSignal(DBTC_REF, GSN_CONTINUEB, signal, 5, JBB);
-}
-
-/* ========================================================================= */
-/* ======= INITIALISE_SCANREC ======= */
-/* */
-/* ========================================================================= */
-void Dbtc::initialiseScanrec(Signal* signal)
-{
- ScanRecordPtr scanptr;
- ndbrequire(cscanrecFileSize > 0);
- for (scanptr.i = 0; scanptr.i < cscanrecFileSize; scanptr.i++) {
- refresh_watch_dog();
- jam();
- ptrAss(scanptr, scanRecord);
- new (scanptr.p) ScanRecord();
- scanptr.p->scanState = ScanRecord::IDLE;
- scanptr.p->scanApiRec = RNIL;
- scanptr.p->nextScan = scanptr.i + 1;
- }//for
- scanptr.i = cscanrecFileSize - 1;
- ptrAss(scanptr, scanRecord);
- scanptr.p->nextScan = RNIL;
- cfirstfreeScanrec = 0;
-}//Dbtc::initialiseScanrec()
-
-void Dbtc::initialiseScanFragrec(Signal* signal)
-{
-}//Dbtc::initialiseScanFragrec()
-
-void Dbtc::initialiseScanOprec(Signal* signal)
-{
-}//Dbtc::initialiseScanOprec()
-
-void Dbtc::initTable(Signal* signal)
-{
-
- ndbrequire(ctabrecFilesize > 0);
- for (tabptr.i = 0; tabptr.i < ctabrecFilesize; tabptr.i++) {
- refresh_watch_dog();
- ptrAss(tabptr, tableRecord);
- tabptr.p->currentSchemaVersion = 0;
- tabptr.p->storedTable = true;
- tabptr.p->tableType = 0;
- tabptr.p->enabled = false;
- tabptr.p->dropping = false;
- tabptr.p->noOfKeyAttr = 0;
- tabptr.p->hasCharAttr = 0;
- tabptr.p->noOfDistrKeys = 0;
- for (unsigned k = 0; k < MAX_ATTRIBUTES_IN_INDEX; k++) {
- tabptr.p->keyAttr[k].attributeDescriptor = 0;
- tabptr.p->keyAttr[k].charsetInfo = 0;
- }
- }//for
-}//Dbtc::initTable()
-
-void Dbtc::initialiseTcConnect(Signal* signal)
-{
- ndbrequire(ctcConnectFilesize >= 2);
-
- // Place half of tcConnectptr's in cfirstfreeTcConnectFail list
- Uint32 titcTmp = ctcConnectFilesize / 2;
- for (tcConnectptr.i = 0; tcConnectptr.i < titcTmp; tcConnectptr.i++) {
- refresh_watch_dog();
- jam();
- ptrAss(tcConnectptr, tcConnectRecord);
- tcConnectptr.p->tcConnectstate = OS_RESTART;
- tcConnectptr.p->apiConnect = RNIL;
- tcConnectptr.p->noOfNodes = 0;
- tcConnectptr.p->nextTcConnect = tcConnectptr.i + 1;
- }//for
- tcConnectptr.i = titcTmp - 1;
- ptrAss(tcConnectptr, tcConnectRecord);
- tcConnectptr.p->nextTcConnect = RNIL;
- cfirstfreeTcConnectFail = 0;
-
- // Place other half in cfirstfreeTcConnect list
- for (tcConnectptr.i = titcTmp; tcConnectptr.i < ctcConnectFilesize;
- tcConnectptr.i++) {
- refresh_watch_dog();
- jam();
- ptrAss(tcConnectptr, tcConnectRecord);
- tcConnectptr.p->tcConnectstate = OS_RESTART;
- tcConnectptr.p->apiConnect = RNIL;
- tcConnectptr.p->noOfNodes = 0;
- tcConnectptr.p->nextTcConnect = tcConnectptr.i + 1;
- }//for
- tcConnectptr.i = ctcConnectFilesize - 1;
- ptrAss(tcConnectptr, tcConnectRecord);
- tcConnectptr.p->nextTcConnect = RNIL;
- cfirstfreeTcConnect = titcTmp;
- c_counters.cconcurrentOp = 0;
-}//Dbtc::initialiseTcConnect()
-
-/* ------------------------------------------------------------------------- */
-/* ---- LINK A GLOBAL CHECKPOINT RECORD INTO THE LIST WITH TRANSACTIONS */
-/* WAITING FOR COMPLETION. */
-/* ------------------------------------------------------------------------- */
-void Dbtc::linkGciInGcilist(Signal* signal)
-{
- GcpRecordPtr tmpGcpPointer;
- if (cfirstgcp == RNIL) {
- jam();
- cfirstgcp = gcpPtr.i;
- } else {
- jam();
- tmpGcpPointer.i = clastgcp;
- ptrCheckGuard(tmpGcpPointer, cgcpFilesize, gcpRecord);
- tmpGcpPointer.p->nextGcp = gcpPtr.i;
- }//if
- clastgcp = gcpPtr.i;
-}//Dbtc::linkGciInGcilist()
-
-/* ------------------------------------------------------------------------- */
-/* ------- LINK SECONDARY KEY BUFFER IN OPERATION RECORD ------- */
-/* ------------------------------------------------------------------------- */
-void Dbtc::linkKeybuf(Signal* signal)
-{
- seizeDatabuf(signal);
- tmpDatabufptr.i = cachePtr.p->lastKeybuf;
- cachePtr.p->lastKeybuf = databufptr.i;
- if (tmpDatabufptr.i == RNIL) {
- jam();
- cachePtr.p->firstKeybuf = databufptr.i;
- } else {
- jam();
- ptrCheckGuard(tmpDatabufptr, cdatabufFilesize, databufRecord);
- tmpDatabufptr.p->nextDatabuf = databufptr.i;
- }//if
-}//Dbtc::linkKeybuf()
-
-/* ------------------------------------------------------------------------- */
-/* ------- LINK A TC CONNECT RECORD INTO THE API LIST OF TC CONNECTIONS --- */
-/* ------------------------------------------------------------------------- */
-void Dbtc::linkTcInConnectionlist(Signal* signal)
-{
- /* POINTER FOR THE CONNECT_RECORD */
- TcConnectRecordPtr ltcTcConnectptr;
-
- tcConnectptr.p->nextTcConnect = RNIL;
- ltcTcConnectptr.i = apiConnectptr.p->lastTcConnect;
- ptrCheck(ltcTcConnectptr, ctcConnectFilesize, tcConnectRecord);
- apiConnectptr.p->lastTcConnect = tcConnectptr.i;
- if (ltcTcConnectptr.i == RNIL) {
- jam();
- apiConnectptr.p->firstTcConnect = tcConnectptr.i;
- } else {
- jam();
- ptrGuard(ltcTcConnectptr);
- ltcTcConnectptr.p->nextTcConnect = tcConnectptr.i;
- }//if
-}//Dbtc::linkTcInConnectionlist()
-
-/*---------------------------------------------------------------------------*/
-/* RELEASE_ABORT_RESOURCES */
-/* THIS CODE RELEASES ALL RESOURCES AFTER AN ABORT OF A TRANSACTION AND ALSO */
-/* SENDS THE ABORT DECISION TO THE APPLICATION. */
-/*---------------------------------------------------------------------------*/
-void Dbtc::releaseAbortResources(Signal* signal)
-{
- TcConnectRecordPtr rarTcConnectptr;
-
- c_counters.cabortCount++;
- if (apiConnectptr.p->cachePtr != RNIL) {
- cachePtr.i = apiConnectptr.p->cachePtr;
- ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
- releaseAttrinfo();
- releaseKeys();
- }//if
- tcConnectptr.i = apiConnectptr.p->firstTcConnect;
- while (tcConnectptr.i != RNIL) {
- jam();
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- // Clear any markers that were set in CS_RECEIVING state
- clearCommitAckMarker(apiConnectptr.p, tcConnectptr.p);
- rarTcConnectptr.i = tcConnectptr.p->nextTcConnect;
- releaseTcCon();
- tcConnectptr.i = rarTcConnectptr.i;
- }//while
- apiConnectptr.p->firstTcConnect = RNIL;
- apiConnectptr.p->lastTcConnect = RNIL;
-
- // MASV let state be CS_ABORTING until all
- // signals in the "air" have been received. Reset to CS_CONNECTED
- // will be done when a TCKEYREQ with start flag is recieved
- // or releaseApiCon is called
- // apiConnectptr.p->apiConnectstate = CS_CONNECTED;
- apiConnectptr.p->apiConnectstate = CS_ABORTING;
- apiConnectptr.p->abortState = AS_IDLE;
-
- if(apiConnectptr.p->m_exec_flag || apiConnectptr.p->apiFailState == ZTRUE){
- jam();
- bool ok = false;
- Uint32 blockRef = apiConnectptr.p->ndbapiBlockref;
- ReturnSignal ret = apiConnectptr.p->returnsignal;
- apiConnectptr.p->returnsignal = RS_NO_RETURN;
- apiConnectptr.p->m_exec_flag = 0;
- switch(ret){
- case RS_TCROLLBACKCONF:
- jam();
- ok = true;
- signal->theData[0] = apiConnectptr.p->ndbapiConnect;
- signal->theData[1] = apiConnectptr.p->transid[0];
- signal->theData[2] = apiConnectptr.p->transid[1];
- sendSignal(blockRef, GSN_TCROLLBACKCONF, signal, 3, JBB);
- break;
- case RS_TCROLLBACKREP:{
- jam();
- ok = true;
- TcRollbackRep * const tcRollbackRep =
- (TcRollbackRep *) signal->getDataPtr();
-
- tcRollbackRep->connectPtr = apiConnectptr.p->ndbapiConnect;
- tcRollbackRep->transId[0] = apiConnectptr.p->transid[0];
- tcRollbackRep->transId[1] = apiConnectptr.p->transid[1];
- tcRollbackRep->returnCode = apiConnectptr.p->returncode;
- sendSignal(blockRef, GSN_TCROLLBACKREP, signal,
- TcRollbackRep::SignalLength, JBB);
- }
- break;
- case RS_NO_RETURN:
- jam();
- ok = true;
- break;
- case RS_TCKEYCONF:
- case RS_TC_COMMITCONF:
- break;
- }
- if(!ok){
- jam();
- ndbout_c("returnsignal = %d", apiConnectptr.p->returnsignal);
- sendSystemError(signal);
- }//if
-
- }
- setApiConTimer(apiConnectptr.i, 0,
- 100000+c_apiConTimer_line[apiConnectptr.i]);
- if (apiConnectptr.p->apiFailState == ZTRUE) {
- jam();
- handleApiFailState(signal, apiConnectptr.i);
- return;
- }//if
-}//Dbtc::releaseAbortResources()
-
-void Dbtc::releaseApiCon(Signal* signal, UintR TapiConnectPtr)
-{
- ApiConnectRecordPtr TlocalApiConnectptr;
-
- TlocalApiConnectptr.i = TapiConnectPtr;
- ptrCheckGuard(TlocalApiConnectptr, capiConnectFilesize, apiConnectRecord);
- TlocalApiConnectptr.p->nextApiConnect = cfirstfreeApiConnect;
- cfirstfreeApiConnect = TlocalApiConnectptr.i;
- setApiConTimer(TlocalApiConnectptr.i, 0, __LINE__);
- TlocalApiConnectptr.p->apiConnectstate = CS_DISCONNECTED;
- ndbassert(TlocalApiConnectptr.p->apiScanRec == RNIL);
- TlocalApiConnectptr.p->ndbapiBlockref = 0;
-}//Dbtc::releaseApiCon()
-
-void Dbtc::releaseApiConnectFail(Signal* signal)
-{
- apiConnectptr.p->apiConnectstate = CS_RESTART;
- apiConnectptr.p->takeOverRec = (Uint8)Z8NIL;
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
- apiConnectptr.p->nextApiConnect = cfirstfreeApiConnectFail;
- cfirstfreeApiConnectFail = apiConnectptr.i;
-}//Dbtc::releaseApiConnectFail()
-
-void Dbtc::releaseGcp(Signal* signal)
-{
- ptrGuard(gcpPtr);
- gcpPtr.p->nextGcp = cfirstfreeGcp;
- cfirstfreeGcp = gcpPtr.i;
-}//Dbtc::releaseGcp()
-
-void Dbtc::releaseKeys()
-{
- UintR Tmp;
- databufptr.i = cachePtr.p->firstKeybuf;
- while (databufptr.i != RNIL) {
- jam();
- ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
- Tmp = databufptr.p->nextDatabuf;
- databufptr.p->nextDatabuf = cfirstfreeDatabuf;
- cfirstfreeDatabuf = databufptr.i;
- databufptr.i = Tmp;
- }//while
- cachePtr.p->firstKeybuf = RNIL;
- cachePtr.p->lastKeybuf = RNIL;
-}//Dbtc::releaseKeys()
-
-void Dbtc::releaseTcConnectFail(Signal* signal)
-{
- ptrGuard(tcConnectptr);
- tcConnectptr.p->nextTcConnect = cfirstfreeTcConnectFail;
- cfirstfreeTcConnectFail = tcConnectptr.i;
-}//Dbtc::releaseTcConnectFail()
-
-void Dbtc::seizeApiConnect(Signal* signal)
-{
- if (cfirstfreeApiConnect != RNIL) {
- jam();
- terrorCode = ZOK;
- apiConnectptr.i = cfirstfreeApiConnect; /* ASSIGN A FREE RECORD FROM */
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- cfirstfreeApiConnect = apiConnectptr.p->nextApiConnect;
- apiConnectptr.p->nextApiConnect = RNIL;
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
- apiConnectptr.p->apiConnectstate = CS_CONNECTED; /* STATE OF CONNECTION */
- apiConnectptr.p->triggerPending = false;
- apiConnectptr.p->isIndexOp = false;
- } else {
- jam();
- terrorCode = ZNO_FREE_API_CONNECTION;
- }//if
-}//Dbtc::seizeApiConnect()
-
-void Dbtc::seizeApiConnectFail(Signal* signal)
-{
- apiConnectptr.i = cfirstfreeApiConnectFail;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- cfirstfreeApiConnectFail = apiConnectptr.p->nextApiConnect;
-}//Dbtc::seizeApiConnectFail()
-
-void Dbtc::seizeDatabuf(Signal* signal)
-{
- databufptr.i = cfirstfreeDatabuf;
- ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
- cfirstfreeDatabuf = databufptr.p->nextDatabuf;
- databufptr.p->nextDatabuf = RNIL;
-}//Dbtc::seizeDatabuf()
-
-void Dbtc::seizeTcConnect(Signal* signal)
-{
- tcConnectptr.i = cfirstfreeTcConnect;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- cfirstfreeTcConnect = tcConnectptr.p->nextTcConnect;
- c_counters.cconcurrentOp++;
- tcConnectptr.p->isIndexOp = false;
-}//Dbtc::seizeTcConnect()
-
-void Dbtc::seizeTcConnectFail(Signal* signal)
-{
- tcConnectptr.i = cfirstfreeTcConnectFail;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- cfirstfreeTcConnectFail = tcConnectptr.p->nextTcConnect;
-}//Dbtc::seizeTcConnectFail()
-
-void Dbtc::sendAttrinfo(Signal* signal,
- UintR TattrinfoPtr,
- AttrbufRecord * const regAttrPtr,
- UintR TBref)
-{
- UintR TdataPos;
- UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6, sig7;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- TdataPos = regAttrPtr->attrbuf[ZINBUF_DATA_LEN];
- sig0 = TattrinfoPtr;
- sig1 = regApiPtr->transid[0];
- sig2 = regApiPtr->transid[1];
-
- signal->theData[0] = sig0;
- signal->theData[1] = sig1;
- signal->theData[2] = sig2;
-
- sig0 = regAttrPtr->attrbuf[0];
- sig1 = regAttrPtr->attrbuf[1];
- sig2 = regAttrPtr->attrbuf[2];
- sig3 = regAttrPtr->attrbuf[3];
- sig4 = regAttrPtr->attrbuf[4];
- sig5 = regAttrPtr->attrbuf[5];
- sig6 = regAttrPtr->attrbuf[6];
- sig7 = regAttrPtr->attrbuf[7];
-
- signal->theData[3] = sig0;
- signal->theData[4] = sig1;
- signal->theData[5] = sig2;
- signal->theData[6] = sig3;
- signal->theData[7] = sig4;
- signal->theData[8] = sig5;
- signal->theData[9] = sig6;
- signal->theData[10] = sig7;
-
- if (TdataPos > 8) {
- sig0 = regAttrPtr->attrbuf[8];
- sig1 = regAttrPtr->attrbuf[9];
- sig2 = regAttrPtr->attrbuf[10];
- sig3 = regAttrPtr->attrbuf[11];
- sig4 = regAttrPtr->attrbuf[12];
- sig5 = regAttrPtr->attrbuf[13];
- sig6 = regAttrPtr->attrbuf[14];
-
- jam();
- signal->theData[11] = sig0;
- signal->theData[12] = sig1;
- signal->theData[13] = sig2;
- signal->theData[14] = sig3;
- signal->theData[15] = sig4;
- signal->theData[16] = sig5;
- signal->theData[17] = sig6;
-
- if (TdataPos > 15) {
-
- sig0 = regAttrPtr->attrbuf[15];
- sig1 = regAttrPtr->attrbuf[16];
- sig2 = regAttrPtr->attrbuf[17];
- sig3 = regAttrPtr->attrbuf[18];
- sig4 = regAttrPtr->attrbuf[19];
- sig5 = regAttrPtr->attrbuf[20];
- sig6 = regAttrPtr->attrbuf[21];
-
- jam();
- signal->theData[18] = sig0;
- signal->theData[19] = sig1;
- signal->theData[20] = sig2;
- signal->theData[21] = sig3;
- signal->theData[22] = sig4;
- signal->theData[23] = sig5;
- signal->theData[24] = sig6;
- }//if
- }//if
- sendSignal(TBref, GSN_ATTRINFO, signal, TdataPos + 3, JBB);
-}//Dbtc::sendAttrinfo()
-
-void Dbtc::sendContinueTimeOutControl(Signal* signal, Uint32 TapiConPtr)
-{
- signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_CONTROL;
- signal->theData[1] = TapiConPtr;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
-}//Dbtc::sendContinueTimeOutControl()
-
-void Dbtc::sendKeyinfo(Signal* signal, BlockReference TBRef, Uint32 len)
-{
- signal->theData[0] = tcConnectptr.i;
- signal->theData[1] = apiConnectptr.p->transid[0];
- signal->theData[2] = apiConnectptr.p->transid[1];
- signal->theData[3] = cdata[0];
- signal->theData[4] = cdata[1];
- signal->theData[5] = cdata[2];
- signal->theData[6] = cdata[3];
- signal->theData[7] = cdata[4];
- signal->theData[8] = cdata[5];
- signal->theData[9] = cdata[6];
- signal->theData[10] = cdata[7];
- signal->theData[11] = cdata[8];
- signal->theData[12] = cdata[9];
- signal->theData[13] = cdata[10];
- signal->theData[14] = cdata[11];
- signal->theData[15] = cdata[12];
- signal->theData[16] = cdata[13];
- signal->theData[17] = cdata[14];
- signal->theData[18] = cdata[15];
- signal->theData[19] = cdata[16];
- signal->theData[20] = cdata[17];
- signal->theData[21] = cdata[18];
- signal->theData[22] = cdata[19];
- sendSignal(TBRef, GSN_KEYINFO, signal, 3 + len, JBB);
-}//Dbtc::sendKeyinfo()
-
-void Dbtc::sendSystemError(Signal* signal)
-{
- progError(0, 0);
-}//Dbtc::sendSystemError()
-
-/* ========================================================================= */
-/* ------- LINK ACTUAL GCP OUT OF LIST ------- */
-/* ------------------------------------------------------------------------- */
-void Dbtc::unlinkGcp(Signal* signal)
-{
- if (cfirstgcp == gcpPtr.i) {
- jam();
- cfirstgcp = gcpPtr.p->nextGcp;
- if (gcpPtr.i == clastgcp) {
- jam();
- clastgcp = RNIL;
- }//if
- } else {
- jam();
- /* --------------------------------------------------------------------
- * WE ARE TRYING TO REMOVE A GLOBAL CHECKPOINT WHICH WAS NOT THE OLDEST.
- * THIS IS A SYSTEM ERROR.
- * ------------------------------------------------------------------- */
- sendSystemError(signal);
- }//if
- gcpPtr.p->nextGcp = cfirstfreeGcp;
- cfirstfreeGcp = gcpPtr.i;
-}//Dbtc::unlinkGcp()
-
-void
-Dbtc::execDUMP_STATE_ORD(Signal* signal)
-{
- DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0];
- if(signal->theData[0] == DumpStateOrd::CommitAckMarkersSize){
- infoEvent("TC: m_commitAckMarkerPool: %d free size: %d",
- m_commitAckMarkerPool.getNoOfFree(),
- m_commitAckMarkerPool.getSize());
- }
- if(signal->theData[0] == DumpStateOrd::CommitAckMarkersDump){
- infoEvent("TC: m_commitAckMarkerPool: %d free size: %d",
- m_commitAckMarkerPool.getNoOfFree(),
- m_commitAckMarkerPool.getSize());
-
- CommitAckMarkerIterator iter;
- for(m_commitAckMarkerHash.first(iter); iter.curr.i != RNIL;
- m_commitAckMarkerHash.next(iter)){
- infoEvent("CommitAckMarker: i = %d (0x%x, 0x%x)"
- " Api: %d Lghs(%d): %d %d %d %d bucket = %d",
- iter.curr.i,
- iter.curr.p->transid1,
- iter.curr.p->transid2,
- iter.curr.p->apiNodeId,
- iter.curr.p->noOfLqhs,
- iter.curr.p->lqhNodeId[0],
- iter.curr.p->lqhNodeId[1],
- iter.curr.p->lqhNodeId[2],
- iter.curr.p->lqhNodeId[3],
- iter.bucket);
- }
- }
- // Dump all ScanFragRecs
- if (dumpState->args[0] == DumpStateOrd::TcDumpAllScanFragRec){
- Uint32 recordNo = 0;
- if (signal->getLength() == 1)
- infoEvent("TC: Dump all ScanFragRec - size: %d",
- cscanFragrecFileSize);
- else if (signal->getLength() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- dumpState->args[0] = DumpStateOrd::TcDumpOneScanFragRec;
- dumpState->args[1] = recordNo;
- execDUMP_STATE_ORD(signal);
-
- if (recordNo < cscanFragrecFileSize-1){
- dumpState->args[0] = DumpStateOrd::TcDumpAllScanFragRec;
- dumpState->args[1] = recordNo+1;
- sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
- }
- }
-
- // Dump one ScanFragRec
- if (dumpState->args[0] == DumpStateOrd::TcDumpOneScanFragRec){
- Uint32 recordNo = RNIL;
- if (signal->getLength() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- if (recordNo >= cscanFragrecFileSize)
- return;
-
- ScanFragRecPtr sfp;
- sfp.i = recordNo;
- c_scan_frag_pool.getPtr(sfp);
- infoEvent("Dbtc::ScanFragRec[%d]: state=%d fragid=%d",
- sfp.i,
- sfp.p->scanFragState,
- sfp.p->scanFragId);
- infoEvent(" nodeid=%d, timer=%d",
- refToNode(sfp.p->lqhBlockref),
- sfp.p->scanFragTimer);
- }
-
- // Dump all ScanRecords
- if (dumpState->args[0] == DumpStateOrd::TcDumpAllScanRec){
- Uint32 recordNo = 0;
- if (signal->getLength() == 1)
- infoEvent("TC: Dump all ScanRecord - size: %d",
- cscanrecFileSize);
- else if (signal->getLength() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- dumpState->args[0] = DumpStateOrd::TcDumpOneScanRec;
- dumpState->args[1] = recordNo;
- execDUMP_STATE_ORD(signal);
-
- if (recordNo < cscanrecFileSize-1){
- dumpState->args[0] = DumpStateOrd::TcDumpAllScanRec;
- dumpState->args[1] = recordNo+1;
- sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
- }
- }
-
- // Dump all active ScanRecords
- if (dumpState->args[0] == DumpStateOrd::TcDumpAllActiveScanRec){
- Uint32 recordNo = 0;
- if (signal->getLength() == 1)
- infoEvent("TC: Dump active ScanRecord - size: %d",
- cscanrecFileSize);
- else if (signal->getLength() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- ScanRecordPtr sp;
- sp.i = recordNo;
- ptrAss(sp, scanRecord);
- if (sp.p->scanState != ScanRecord::IDLE){
- dumpState->args[0] = DumpStateOrd::TcDumpOneScanRec;
- dumpState->args[1] = recordNo;
- execDUMP_STATE_ORD(signal);
- }
-
- if (recordNo < cscanrecFileSize-1){
- dumpState->args[0] = DumpStateOrd::TcDumpAllActiveScanRec;
- dumpState->args[1] = recordNo+1;
- sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
- }
- }
-
- // Dump one ScanRecord
- // and associated ScanFragRec and ApiConnectRecord
- if (dumpState->args[0] == DumpStateOrd::TcDumpOneScanRec){
- Uint32 recordNo = RNIL;
- if (signal->getLength() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- if (recordNo >= cscanrecFileSize)
- return;
-
- ScanRecordPtr sp;
- sp.i = recordNo;
- ptrAss(sp, scanRecord);
- infoEvent("Dbtc::ScanRecord[%d]: state=%d"
- "nextfrag=%d, nofrag=%d",
- sp.i,
- sp.p->scanState,
- sp.p->scanNextFragId,
- sp.p->scanNoFrag);
- infoEvent(" ailen=%d, para=%d, receivedop=%d, noOprePperFrag=%d",
- sp.p->scanAiLength,
- sp.p->scanParallel,
- sp.p->scanReceivedOperations,
- sp.p->batch_size_rows);
- infoEvent(" schv=%d, tab=%d, sproc=%d",
- sp.p->scanSchemaVersion,
- sp.p->scanTableref,
- sp.p->scanStoredProcId);
- infoEvent(" apiRec=%d, next=%d",
- sp.p->scanApiRec, sp.p->nextScan);
-
- if (sp.p->scanState != ScanRecord::IDLE){
- // Request dump of ScanFragRec
- ScanFragRecPtr sfptr;
-#define DUMP_SFR(x){\
- ScanFragList list(c_scan_frag_pool, x);\
- for(list.first(sfptr); !sfptr.isNull(); list.next(sfptr)){\
- dumpState->args[0] = DumpStateOrd::TcDumpOneScanFragRec; \
- dumpState->args[1] = sfptr.i;\
- execDUMP_STATE_ORD(signal);\
- }}
-
- DUMP_SFR(sp.p->m_running_scan_frags);
- DUMP_SFR(sp.p->m_queued_scan_frags);
- DUMP_SFR(sp.p->m_delivered_scan_frags);
-
- // Request dump of ApiConnectRecord
- dumpState->args[0] = DumpStateOrd::TcDumpOneApiConnectRec;
- dumpState->args[1] = sp.p->scanApiRec;
- execDUMP_STATE_ORD(signal);
- }
-
- }
-
- // Dump all ApiConnectRecord(s)
- if (dumpState->args[0] == DumpStateOrd::TcDumpAllApiConnectRec){
- Uint32 recordNo = 0;
- if (signal->getLength() == 1)
- infoEvent("TC: Dump all ApiConnectRecord - size: %d",
- capiConnectFilesize);
- else if (signal->getLength() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- dumpState->args[0] = DumpStateOrd::TcDumpOneApiConnectRec;
- dumpState->args[1] = recordNo;
- execDUMP_STATE_ORD(signal);
-
- if (recordNo < capiConnectFilesize-1){
- dumpState->args[0] = DumpStateOrd::TcDumpAllApiConnectRec;
- dumpState->args[1] = recordNo+1;
- sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
- }
- }
-
- // Dump one ApiConnectRecord
- if (dumpState->args[0] == DumpStateOrd::TcDumpOneApiConnectRec){
- Uint32 recordNo = RNIL;
- if (signal->getLength() == 2)
- recordNo = dumpState->args[1];
- else
- return;
-
- if (recordNo >= capiConnectFilesize)
- return;
-
- ApiConnectRecordPtr ap;
- ap.i = recordNo;
- ptrAss(ap, apiConnectRecord);
- infoEvent("Dbtc::ApiConnectRecord[%d]: state=%d, abortState=%d, "
- "apiFailState=%d",
- ap.i,
- ap.p->apiConnectstate,
- ap.p->abortState,
- ap.p->apiFailState);
- infoEvent(" transid(0x%x, 0x%x), apiBref=0x%x, scanRec=%d",
- ap.p->transid[0],
- ap.p->transid[1],
- ap.p->ndbapiBlockref,
- ap.p->apiScanRec);
- infoEvent(" ctcTimer=%d, apiTimer=%d, counter=%d, retcode=%d, "
- "retsig=%d",
- ctcTimer, getApiConTimer(ap.i),
- ap.p->counter,
- ap.p->returncode,
- ap.p->returnsignal);
- infoEvent(" lqhkeyconfrec=%d, lqhkeyreqrec=%d, "
- "tckeyrec=%d",
- ap.p->lqhkeyconfrec,
- ap.p->lqhkeyreqrec,
- ap.p->tckeyrec);
- infoEvent(" next=%d ",
- ap.p->nextApiConnect);
- }
-
- if (dumpState->args[0] == DumpStateOrd::TcSetTransactionTimeout){
- jam();
- if(signal->getLength() > 1){
- set_timeout_value(signal->theData[1]);
- }
- }
-
- if (dumpState->args[0] == DumpStateOrd::TcSetApplTransactionTimeout){
- jam();
- if(signal->getLength() > 1){
- set_appl_timeout_value(signal->theData[1]);
- }
- }
-
- if (dumpState->args[0] == DumpStateOrd::StartTcTimer){
- c_counters.c_trans_status = TransCounters::Started;
- c_counters.reset();
- }
-
- if (dumpState->args[0] == DumpStateOrd::StopTcTimer){
- c_counters.c_trans_status = TransCounters::Off;
- Uint32 len = c_counters.report(signal);
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, len, JBB);
- c_counters.reset();
- }
-
- if (dumpState->args[0] == DumpStateOrd::StartPeriodicTcTimer){
- c_counters.c_trans_status = TransCounters::Timer;
- c_counters.reset();
- signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP;
- sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 1);
- }
-}//Dbtc::execDUMP_STATE_ORD()
-
-void Dbtc::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
-
- switch (var) {
-
- case TransactionInactiveTime:
- jam();
- set_appl_timeout_value(val);
- break;
- case TransactionDeadlockDetectionTimeout:
- set_timeout_value(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfConcurrentProcessesHandleTakeover:
- set_no_parallel_takeover(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-}
-
-void Dbtc::execABORT_ALL_REQ(Signal* signal)
-{
- jamEntry();
- AbortAllReq * req = (AbortAllReq*)&signal->theData[0];
- AbortAllRef * ref = (AbortAllRef*)&signal->theData[0];
-
- const Uint32 senderData = req->senderData;
- const BlockReference senderRef = req->senderRef;
-
- if(getAllowStartTransaction() == true && !getNodeState().getSingleUserMode()){
- jam();
-
- ref->senderData = senderData;
- ref->errorCode = AbortAllRef::InvalidState;
- sendSignal(senderRef, GSN_ABORT_ALL_REF, signal,
- AbortAllRef::SignalLength, JBB);
- return;
- }
-
- if(c_abortRec.clientRef != 0){
- jam();
-
- ref->senderData = senderData;
- ref->errorCode = AbortAllRef::AbortAlreadyInProgress;
- sendSignal(senderRef, GSN_ABORT_ALL_REF, signal,
- AbortAllRef::SignalLength, JBB);
- return;
- }
-
- if(refToNode(senderRef) != getOwnNodeId()){
- jam();
-
- ref->senderData = senderData;
- ref->errorCode = AbortAllRef::FunctionNotImplemented;
- sendSignal(senderRef, GSN_ABORT_ALL_REF, signal,
- AbortAllRef::SignalLength, JBB);
- return;
- }
-
- c_abortRec.clientRef = senderRef;
- c_abortRec.clientData = senderData;
- c_abortRec.oldTimeOutValue = ctimeOutValue;
-
- ctimeOutValue = 0;
- const Uint32 sleepTime = (2 * 10 * ctimeOutCheckDelay + 199) / 200;
-
- checkAbortAllTimeout(signal, (sleepTime == 0 ? 1 : sleepTime));
-}
-
-void Dbtc::checkAbortAllTimeout(Signal* signal, Uint32 sleepTime)
-{
-
- ndbrequire(c_abortRec.clientRef != 0);
-
- if(sleepTime > 0){
- jam();
-
- sleepTime -= 1;
- signal->theData[0] = TcContinueB::ZWAIT_ABORT_ALL;
- signal->theData[1] = sleepTime;
- sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 200, 2);
- return;
- }
-
- AbortAllConf * conf = (AbortAllConf*)&signal->theData[0];
- conf->senderData = c_abortRec.clientData;
- sendSignal(c_abortRec.clientRef, GSN_ABORT_ALL_CONF, signal,
- AbortAllConf::SignalLength, JBB);
-
- ctimeOutValue = c_abortRec.oldTimeOutValue;
- c_abortRec.clientRef = 0;
-}
-
-/* **************************************************************** */
-/* ---------------------------------------------------------------- */
-/* ------------------ TRIGGER AND INDEX HANDLING ------------------ */
-/* ---------------------------------------------------------------- */
-/* **************************************************************** */
-
-void Dbtc::execCREATE_TRIG_REQ(Signal* signal)
-{
- jamEntry();
- CreateTrigReq * const createTrigReq =
- (CreateTrigReq *)&signal->theData[0];
- TcDefinedTriggerData* triggerData;
- DefinedTriggerPtr triggerPtr;
- BlockReference sender = signal->senderBlockRef();
-
- releaseSections(signal);
-
- triggerPtr.i = createTrigReq->getTriggerId();
- if (ERROR_INSERTED(8033) ||
- !c_theDefinedTriggers.seizeId(triggerPtr,
- createTrigReq->getTriggerId())) {
- CLEAR_ERROR_INSERT_VALUE;
- // Failed to allocate trigger record
- CreateTrigRef * const createTrigRef =
- (CreateTrigRef *)&signal->theData[0];
-
- createTrigRef->setConnectionPtr(createTrigReq->getConnectionPtr());
- createTrigRef->setErrorCode(CreateTrigRef::TooManyTriggers);
- sendSignal(sender, GSN_CREATE_TRIG_REF,
- signal, CreateTrigRef::SignalLength, JBB);
- return;
- }
-
- triggerData = triggerPtr.p;
- triggerData->triggerId = createTrigReq->getTriggerId();
- triggerData->triggerType = createTrigReq->getTriggerType();
- triggerData->triggerEvent = createTrigReq->getTriggerEvent();
- triggerData->attributeMask = createTrigReq->getAttributeMask();
- if (triggerData->triggerType == TriggerType::SECONDARY_INDEX)
- triggerData->indexId = createTrigReq->getIndexId();
- CreateTrigConf * const createTrigConf =
- (CreateTrigConf *)&signal->theData[0];
-
- createTrigConf->setConnectionPtr(createTrigReq->getConnectionPtr());
- sendSignal(sender, GSN_CREATE_TRIG_CONF,
- signal, CreateTrigConf::SignalLength, JBB);
-}
-
-
-void Dbtc::execDROP_TRIG_REQ(Signal* signal)
-{
- jamEntry();
- DropTrigReq * const dropTrigReq = (DropTrigReq *)&signal->theData[0];
- BlockReference sender = signal->senderBlockRef();
-
- if ((c_theDefinedTriggers.getPtr(dropTrigReq->getTriggerId())) == NULL) {
- jam();
- // Failed to find find trigger record
- DropTrigRef * const dropTrigRef = (DropTrigRef *)&signal->theData[0];
-
- dropTrigRef->setConnectionPtr(dropTrigReq->getConnectionPtr());
- dropTrigRef->setErrorCode(DropTrigRef::TriggerNotFound);
- sendSignal(sender, GSN_DROP_TRIG_REF,
- signal, DropTrigRef::SignalLength, JBB);
- return;
- }
-
- // Release trigger record
- c_theDefinedTriggers.release(dropTrigReq->getTriggerId());
-
- DropTrigConf * const dropTrigConf = (DropTrigConf *)&signal->theData[0];
-
- dropTrigConf->setConnectionPtr(dropTrigReq->getConnectionPtr());
- sendSignal(sender, GSN_DROP_TRIG_CONF,
- signal, DropTrigConf::SignalLength, JBB);
-}
-
-void Dbtc::execCREATE_INDX_REQ(Signal* signal)
-{
- jamEntry();
- CreateIndxReq * const createIndxReq =
- (CreateIndxReq *)signal->getDataPtr();
- TcIndexData* indexData;
- TcIndexDataPtr indexPtr;
- BlockReference sender = signal->senderBlockRef();
-
- if (ERROR_INSERTED(8034) ||
- !c_theIndexes.seizeId(indexPtr, createIndxReq->getIndexId())) {
- CLEAR_ERROR_INSERT_VALUE;
- // Failed to allocate index record
- CreateIndxRef * const createIndxRef =
- (CreateIndxRef *)&signal->theData[0];
-
- createIndxRef->setConnectionPtr(createIndxReq->getConnectionPtr());
- createIndxRef->setErrorCode(CreateIndxRef::TooManyIndexes);
- releaseSections(signal);
- sendSignal(sender, GSN_CREATE_INDX_REF,
- signal, CreateIndxRef::SignalLength, JBB);
- return;
- }
- indexData = indexPtr.p;
- // Indexes always start in state IS_BUILDING
- // Will become IS_ONLINE in execALTER_INDX_REQ
- indexData->indexState = IS_BUILDING;
- indexData->indexId = indexPtr.i;
- indexData->primaryTableId = createIndxReq->getTableId();
-
- // So far need only attribute count
- SegmentedSectionPtr ssPtr;
- signal->getSection(ssPtr, CreateIndxReq::ATTRIBUTE_LIST_SECTION);
- SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
- r0.reset(); // undo implicit first()
- if (!r0.getWord(&indexData->attributeList.sz) ||
- !r0.getWords(indexData->attributeList.id, indexData->attributeList.sz)) {
- ndbrequire(false);
- }
- indexData->primaryKeyPos = indexData->attributeList.sz;
-
- releaseSections(signal);
-
- CreateIndxConf * const createIndxConf =
- (CreateIndxConf *)&signal->theData[0];
-
- createIndxConf->setConnectionPtr(createIndxReq->getConnectionPtr());
- createIndxConf->setTableId(createIndxReq->getTableId());
- createIndxConf->setIndexId(createIndxReq->getIndexId());
- sendSignal(sender, GSN_CREATE_INDX_CONF,
- signal, CreateIndxConf::SignalLength, JBB);
-}
-
-void Dbtc::execALTER_INDX_REQ(Signal* signal)
-{
- jamEntry();
- AlterIndxReq * const alterIndxReq = (AlterIndxReq *)signal->getDataPtr();
- TcIndexData* indexData;
- //BlockReference sender = signal->senderBlockRef();
- BlockReference sender = (BlockReference) alterIndxReq->getUserRef();
- Uint32 connectionPtr = alterIndxReq->getConnectionPtr();
- AlterIndxReq::RequestType requestType = alterIndxReq->getRequestType();
- Uint32 tableId = alterIndxReq->getTableId();
- Uint32 indexId = alterIndxReq->getIndexId();
- bool online = (alterIndxReq->getOnline() == 1) ? true : false;
-
- if ((indexData = c_theIndexes.getPtr(indexId)) == NULL) {
- jam();
- // Failed to find index record
- AlterIndxRef * const alterIndxRef =
- (AlterIndxRef *)signal->getDataPtrSend();
-
- alterIndxRef->setUserRef(reference());
- alterIndxRef->setConnectionPtr(connectionPtr);
- alterIndxRef->setRequestType(requestType);
- alterIndxRef->setTableId(tableId);
- alterIndxRef->setIndexId(indexId);
- alterIndxRef->setErrorCode(AlterIndxRef::IndexNotFound);
- alterIndxRef->setErrorLine(__LINE__);
- alterIndxRef->setErrorNode(getOwnNodeId());
- sendSignal(sender, GSN_ALTER_INDX_REF,
- signal, AlterIndxRef::SignalLength, JBB);
- return;
- }
- // Found index record, alter it's state
- if (online) {
- jam();
- indexData->indexState = IS_ONLINE;
- } else {
- jam();
- indexData->indexState = IS_BUILDING;
- }//if
- AlterIndxConf * const alterIndxConf =
- (AlterIndxConf *)signal->getDataPtrSend();
-
- alterIndxConf->setUserRef(reference());
- alterIndxConf->setConnectionPtr(connectionPtr);
- alterIndxConf->setRequestType(requestType);
- alterIndxConf->setTableId(tableId);
- alterIndxConf->setIndexId(indexId);
- sendSignal(sender, GSN_ALTER_INDX_CONF,
- signal, AlterIndxConf::SignalLength, JBB);
-}
-
-void Dbtc::execFIRE_TRIG_ORD(Signal* signal)
-{
- jamEntry();
- FireTrigOrd * const fireOrd = (FireTrigOrd *)signal->getDataPtr();
- ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
- ApiConnectRecordPtr transPtr;
- TcConnectRecord *localTcConnectRecord = tcConnectRecord;
- TcConnectRecordPtr opPtr;
-
- /**
- * TODO
- * Check transid,
- * Fix overload i.e invalid word count
- */
- TcFiredTriggerData key;
- key.fireingOperation = fireOrd->getConnectionPtr();
- key.nodeId = refToNode(signal->getSendersBlockRef());
- FiredTriggerPtr trigPtr;
- if(c_firedTriggerHash.find(trigPtr, key)){
-
- c_firedTriggerHash.remove(trigPtr);
-
- bool ok = trigPtr.p->keyValues.getSize() == fireOrd->m_noPrimKeyWords;
- ok &= trigPtr.p->afterValues.getSize() == fireOrd->m_noAfterValueWords;
- ok &= trigPtr.p->beforeValues.getSize() == fireOrd->m_noBeforeValueWords;
- if(ok){
- opPtr.i = key.fireingOperation;
- ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
- transPtr.i = opPtr.p->apiConnect;
- transPtr.p = &localApiConnectRecord[transPtr.i];
-
- opPtr.p->noReceivedTriggers++;
- opPtr.p->triggerExecutionCount++;
-
- // Insert fired trigger in execution queue
- transPtr.p->theFiredTriggers.add(trigPtr);
- if (opPtr.p->noReceivedTriggers == opPtr.p->noFiredTriggers) {
- executeTriggers(signal, &transPtr);
- }
- return;
- }
- jam();
- c_theFiredTriggerPool.release(trigPtr);
- }
- jam();
- /**
- * Failed to find record or invalid word counts
- */
- ndbrequire(false);
-}
-
-void Dbtc::execTRIG_ATTRINFO(Signal* signal)
-{
- jamEntry();
- TrigAttrInfo * const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtr();
- Uint32 attrInfoLength = signal->getLength() - TrigAttrInfo::StaticLength;
- const Uint32 *src = trigAttrInfo->getData();
- FiredTriggerPtr firedTrigPtr;
-
- TcFiredTriggerData key;
- key.fireingOperation = trigAttrInfo->getConnectionPtr();
- key.nodeId = refToNode(signal->getSendersBlockRef());
- if(!c_firedTriggerHash.find(firedTrigPtr, key)){
- jam();
- if(!c_firedTriggerHash.seize(firedTrigPtr)){
- jam();
- /**
- * Will be handled when FIRE_TRIG_ORD arrives
- */
- ndbout_c("op: %d node: %d failed to seize",
- key.fireingOperation, key.nodeId);
- return;
- }
- ndbrequire(firedTrigPtr.p->keyValues.getSize() == 0 &&
- firedTrigPtr.p->beforeValues.getSize() == 0 &&
- firedTrigPtr.p->afterValues.getSize() == 0);
-
- firedTrigPtr.p->nodeId = refToNode(signal->getSendersBlockRef());
- firedTrigPtr.p->fireingOperation = key.fireingOperation;
- firedTrigPtr.p->triggerId = trigAttrInfo->getTriggerId();
- c_firedTriggerHash.add(firedTrigPtr);
- }
-
- AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
- switch (trigAttrInfo->getAttrInfoType()) {
- case(TrigAttrInfo::PRIMARY_KEY):
- jam();
- {
- LocalDataBuffer<11> buf(pool, firedTrigPtr.p->keyValues);
- buf.append(src, attrInfoLength);
- }
- break;
- case(TrigAttrInfo::BEFORE_VALUES):
- jam();
- {
- LocalDataBuffer<11> buf(pool, firedTrigPtr.p->beforeValues);
- buf.append(src, attrInfoLength);
- }
- break;
- case(TrigAttrInfo::AFTER_VALUES):
- jam();
- {
- LocalDataBuffer<11> buf(pool, firedTrigPtr.p->afterValues);
- buf.append(src, attrInfoLength);
- }
- break;
- default:
- ndbrequire(false);
- }
-}
-
-void Dbtc::execDROP_INDX_REQ(Signal* signal)
-{
- jamEntry();
- DropIndxReq * const dropIndxReq = (DropIndxReq *)signal->getDataPtr();
- TcIndexData* indexData;
- BlockReference sender = signal->senderBlockRef();
-
- if ((indexData = c_theIndexes.getPtr(dropIndxReq->getIndexId())) == NULL) {
- jam();
- // Failed to find index record
- DropIndxRef * const dropIndxRef =
- (DropIndxRef *)signal->getDataPtrSend();
-
- dropIndxRef->setConnectionPtr(dropIndxReq->getConnectionPtr());
- dropIndxRef->setErrorCode(DropIndxRef::IndexNotFound);
- sendSignal(sender, GSN_DROP_INDX_REF,
- signal, DropIndxRef::SignalLength, JBB);
- return;
- }
- // Release index record
- c_theIndexes.release(dropIndxReq->getIndexId());
-
- DropIndxConf * const dropIndxConf =
- (DropIndxConf *)signal->getDataPtrSend();
-
- dropIndxConf->setConnectionPtr(dropIndxReq->getConnectionPtr());
- sendSignal(sender, GSN_DROP_INDX_CONF,
- signal, DropIndxConf::SignalLength, JBB);
-}
-
-void Dbtc::execTCINDXREQ(Signal* signal)
-{
- jamEntry();
-
- TcKeyReq * const tcIndxReq = (TcKeyReq *)signal->getDataPtr();
- const UintR TapiIndex = tcIndxReq->apiConnectPtr;
- Uint32 tcIndxRequestInfo = tcIndxReq->requestInfo;
- Uint32 startFlag = tcIndxReq->getStartFlag(tcIndxRequestInfo);
- Uint32 * dataPtr = &tcIndxReq->scanInfo;
- Uint32 indexBufSize = 8; // Maximum for index in TCINDXREQ
- Uint32 attrBufSize = 5; // Maximum for attrInfo in TCINDXREQ
- ApiConnectRecordPtr transPtr;
- transPtr.i = TapiIndex;
- if (transPtr.i >= capiConnectFilesize) {
- jam();
- warningHandlerLab(signal);
- return;
- }//if
- ptrAss(transPtr, apiConnectRecord);
- ApiConnectRecord * const regApiPtr = transPtr.p;
- // Seize index operation
- TcIndexOperationPtr indexOpPtr;
- if ((startFlag == 1) &&
- (regApiPtr->apiConnectstate == CS_CONNECTED ||
- (regApiPtr->apiConnectstate == CS_STARTED &&
- regApiPtr->firstTcConnect == RNIL)) ||
- (regApiPtr->apiConnectstate == CS_ABORTING &&
- regApiPtr->abortState == AS_IDLE)) {
- jam();
- // This is a newly started transaction, clean-up
- releaseAllSeizedIndexOperations(regApiPtr);
-
- regApiPtr->transid[0] = tcIndxReq->transId1;
- regApiPtr->transid[1] = tcIndxReq->transId2;
- }//if
-
- if (ERROR_INSERTED(8036) || !seizeIndexOperation(regApiPtr, indexOpPtr)) {
- jam();
- // Failed to allocate index operation
- terrorCode = 288;
- regApiPtr->m_exec_flag |= TcKeyReq::getExecuteFlag(tcIndxRequestInfo);
- apiConnectptr = transPtr;
- abortErrorLab(signal);
- return;
- }
- TcIndexOperation* indexOp = indexOpPtr.p;
- indexOp->indexOpId = indexOpPtr.i;
-
- // Save original signal
- indexOp->tcIndxReq = *tcIndxReq;
- indexOp->connectionIndex = TapiIndex;
- regApiPtr->accumulatingIndexOp = indexOp->indexOpId;
-
- // If operation is readTupleExclusive or updateTuple then read index
- // table with exclusive lock
- Uint32 indexLength = TcKeyReq::getKeyLength(tcIndxRequestInfo);
- Uint32 attrLength = tcIndxReq->attrLen;
- indexOp->expectedKeyInfo = indexLength;
- Uint32 includedIndexLength = MIN(indexLength, indexBufSize);
- indexOp->expectedAttrInfo = attrLength;
- Uint32 includedAttrLength = MIN(attrLength, attrBufSize);
- if (saveINDXKEYINFO(signal,
- indexOp,
- dataPtr,
- includedIndexLength)) {
- jam();
- // We have received all we need
- readIndexTable(signal, regApiPtr, indexOp);
- return;
- }
- dataPtr += includedIndexLength;
- if (saveINDXATTRINFO(signal,
- indexOp,
- dataPtr,
- includedAttrLength)) {
- jam();
- // We have received all we need
- readIndexTable(signal, regApiPtr, indexOp);
- return;
- }
-}
-
-
-void Dbtc::sendTcIndxConf(Signal* signal, UintR TcommitFlag)
-{
- HostRecordPtr localHostptr;
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- const UintR TopWords = (UintR)regApiPtr->tcindxrec;
- localHostptr.i = refToNode(regApiPtr->ndbapiBlockref);
- const Uint32 type = getNodeInfo(localHostptr.i).m_type;
- const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
- const BlockNumber TblockNum = refToBlock(regApiPtr->ndbapiBlockref);
- const Uint32 Tmarker = (regApiPtr->commitAckMarker == RNIL ? 0 : 1);
- ptrAss(localHostptr, hostRecord);
- UintR TcurrLen = localHostptr.p->noOfWordsTCINDXCONF;
- UintR confInfo = 0;
- TcIndxConf::setNoOfOperations(confInfo, (TopWords >> 1));
- TcIndxConf::setCommitFlag(confInfo, TcommitFlag == 1);
- TcIndxConf::setMarkerFlag(confInfo, Tmarker);
- const UintR TpacketLen = 6 + TopWords;
- regApiPtr->tcindxrec = 0;
-
- if(TcommitFlag || (regApiPtr->lqhkeyreqrec == regApiPtr->lqhkeyconfrec)){
- jam();
- regApiPtr->m_exec_flag = 0;
- }
-
- if ((TpacketLen > 25) || !is_api){
- TcIndxConf * const tcIndxConf = (TcIndxConf *)signal->getDataPtrSend();
-
- jam();
- tcIndxConf->apiConnectPtr = regApiPtr->ndbapiConnect;
- tcIndxConf->gci = regApiPtr->globalcheckpointid;;
- tcIndxConf->confInfo = confInfo;
- tcIndxConf->transId1 = regApiPtr->transid[0];
- tcIndxConf->transId2 = regApiPtr->transid[1];
- copyFromToLen(&regApiPtr->tcIndxSendArray[0],
- (UintR*)&tcIndxConf->operations,
- (UintR)ZTCOPCONF_SIZE);
- sendSignal(regApiPtr->ndbapiBlockref,
- GSN_TCINDXCONF, signal, (TpacketLen - 1), JBB);
- return;
- } else if (((TcurrLen + TpacketLen) > 25) && (TcurrLen > 0)) {
- jam();
- sendPackedTCINDXCONF(signal, localHostptr.p, localHostptr.i);
- TcurrLen = 0;
- } else {
- jam();
- updatePackedList(signal, localHostptr.p, localHostptr.i);
- }//if
-// -------------------------------------------------------------------------
-// The header contains the block reference of receiver plus the real signal
-// length - 3, since we have the real signal length plus one additional word
-// for the header we have to do - 4.
-// -------------------------------------------------------------------------
- UintR Tpack0 = (TblockNum << 16) + (TpacketLen - 4);
- UintR Tpack1 = regApiPtr->ndbapiConnect;
- UintR Tpack2 = regApiPtr->globalcheckpointid;
- UintR Tpack3 = confInfo;
- UintR Tpack4 = regApiPtr->transid[0];
- UintR Tpack5 = regApiPtr->transid[1];
-
- localHostptr.p->noOfWordsTCINDXCONF = TcurrLen + TpacketLen;
-
- localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 0] = Tpack0;
- localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 1] = Tpack1;
- localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 2] = Tpack2;
- localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 3] = Tpack3;
- localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 4] = Tpack4;
- localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 5] = Tpack5;
-
- UintR Ti;
- for (Ti = 6; Ti < TpacketLen; Ti++) {
- localHostptr.p->packedWordsTCINDXCONF[TcurrLen + Ti] =
- regApiPtr->tcIndxSendArray[Ti - 6];
- }//for
-}//Dbtc::sendTcIndxConf()
-
-void Dbtc::execINDXKEYINFO(Signal* signal)
-{
- jamEntry();
- Uint32 keyInfoLength = signal->getLength() - IndxKeyInfo::HeaderLength;
- IndxKeyInfo * const indxKeyInfo = (IndxKeyInfo *)signal->getDataPtr();
- const Uint32 *src = indxKeyInfo->getData();
- const UintR TconnectIndex = indxKeyInfo->connectPtr;
- ApiConnectRecordPtr transPtr;
- transPtr.i = TconnectIndex;
- if (transPtr.i >= capiConnectFilesize) {
- jam();
- warningHandlerLab(signal);
- return;
- }//if
- ptrAss(transPtr, apiConnectRecord);
- ApiConnectRecord * const regApiPtr = transPtr.p;
- TcIndexOperationPtr indexOpPtr;
- TcIndexOperation* indexOp;
-
- if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
- {
- indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
- if (saveINDXKEYINFO(signal,
- indexOp,
- src,
- keyInfoLength)) {
- jam();
- // We have received all we need
- readIndexTable(signal, regApiPtr, indexOp);
- }
- }
-}
-
-void Dbtc::execINDXATTRINFO(Signal* signal)
-{
- jamEntry();
- Uint32 attrInfoLength = signal->getLength() - IndxAttrInfo::HeaderLength;
- IndxAttrInfo * const indxAttrInfo = (IndxAttrInfo *)signal->getDataPtr();
- const Uint32 *src = indxAttrInfo->getData();
- const UintR TconnectIndex = indxAttrInfo->connectPtr;
- ApiConnectRecordPtr transPtr;
- transPtr.i = TconnectIndex;
- if (transPtr.i >= capiConnectFilesize) {
- jam();
- warningHandlerLab(signal);
- return;
- }//if
- ptrAss(transPtr, apiConnectRecord);
- ApiConnectRecord * const regApiPtr = transPtr.p;
- TcIndexOperationPtr indexOpPtr;
- TcIndexOperation* indexOp;
-
- if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
- {
- indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
- if (saveINDXATTRINFO(signal,
- indexOp,
- src,
- attrInfoLength)) {
- jam();
- // We have received all we need
- readIndexTable(signal, regApiPtr, indexOp);
- }
- }
-}
-
-/**
- * Save signal INDXKEYINFO
- * Return true if we have received all needed data
- */
-bool Dbtc::saveINDXKEYINFO(Signal* signal,
- TcIndexOperation* indexOp,
- const Uint32 *src,
- Uint32 len)
-{
- if (!indexOp->keyInfo.append(src, len)) {
- jam();
- // Failed to seize keyInfo, abort transaction
-#ifdef VM_TRACE
- ndbout_c("Dbtc::saveINDXKEYINFO: Failed to seize keyinfo\n");
-#endif
- // Abort transaction
- apiConnectptr.i = indexOp->connectionIndex;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- releaseIndexOperation(apiConnectptr.p, indexOp);
- terrorCode = 4000;
- abortErrorLab(signal);
- return false;
- }
- if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
- jam();
- return true;
- }
- return false;
-}
-
-bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp)
-{
- return (indexOp->keyInfo.getSize() == indexOp->expectedKeyInfo);
-}
-
-/**
- * Save signal INDXATTRINFO
- * Return true if we have received all needed data
- */
-bool Dbtc::saveINDXATTRINFO(Signal* signal,
- TcIndexOperation* indexOp,
- const Uint32 *src,
- Uint32 len)
-{
- if (!indexOp->attrInfo.append(src, len)) {
- jam();
-#ifdef VM_TRACE
- ndbout_c("Dbtc::saveINDXATTRINFO: Failed to seize attrInfo\n");
-#endif
- apiConnectptr.i = indexOp->connectionIndex;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- releaseIndexOperation(apiConnectptr.p, indexOp);
- terrorCode = 4000;
- abortErrorLab(signal);
- return false;
- }
- if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
- jam();
- return true;
- }
- return false;
-}
-
-bool Dbtc::receivedAllINDXATTRINFO(TcIndexOperation* indexOp)
-{
- return (indexOp->attrInfo.getSize() == indexOp->expectedAttrInfo);
-}
-
-bool Dbtc::saveTRANSID_AI(Signal* signal,
- TcIndexOperation* indexOp,
- const Uint32 *src,
- Uint32 len)
-{
- Uint32 currentTransIdAILength = indexOp->transIdAI.getSize();
-
- if (currentTransIdAILength == 0) {
- jam();
- // Read first AttributeHeader to get expected size
- // of the single key attribute expected
- AttributeHeader* head = (AttributeHeader *) src;
- indexOp->expectedTransIdAI = head->getHeaderSize() + head->getDataSize();
- }
- if (!indexOp->transIdAI.append(src, len)) {
- jam();
-#ifdef VM_TRACE
- ndbout_c("Dbtc::saveTRANSID_AI: Failed to seize transIdAI\n");
-#endif
- apiConnectptr.i = indexOp->connectionIndex;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- releaseIndexOperation(apiConnectptr.p, indexOp);
- terrorCode = 4000;
- abortErrorLab(signal);
- return false;
- }
- return true;
-}
-
-bool Dbtc::receivedAllTRANSID_AI(TcIndexOperation* indexOp)
-{
- return (indexOp->transIdAI.getSize() == indexOp->expectedTransIdAI);
-}
-
-/**
- * Receive signal TCINDXCONF
- * This can be either the return of reading an index table
- * or performing an index operation
- */
-void Dbtc::execTCKEYCONF(Signal* signal)
-{
- TcKeyConf * const tcKeyConf = (TcKeyConf *)signal->getDataPtr();
- TcIndexOperationPtr indexOpPtr;
-
- jamEntry();
- indexOpPtr.i = tcKeyConf->apiConnectPtr;
- TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
- Uint32 confInfo = tcKeyConf->confInfo;
-
- /**
- * Check on TCKEYCONF wheater the the transaction was committed
- */
- Uint32 Tcommit = TcKeyConf::getCommitFlag(confInfo);
-
- indexOpPtr.p = indexOp;
- if (!indexOp) {
- jam();
- // Missing index operation
- return;
- }
- const UintR TconnectIndex = indexOp->connectionIndex;
- ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
- apiConnectptr.p = regApiPtr;
- apiConnectptr.i = TconnectIndex;
- switch(indexOp->indexOpState) {
- case(IOS_NOOP): {
- jam();
- // Should never happen, abort
- TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
-
- tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
- tcIndxRef->transId[0] = regApiPtr->transid[0];
- tcIndxRef->transId[1] = regApiPtr->transid[1];
- tcIndxRef->errorCode = 4349;
- sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcKeyRef::SignalLength, JBB);
- return;
- }
- case(IOS_INDEX_ACCESS): {
- jam();
- // Wait for TRANSID_AI
- indexOp->indexOpState = IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI;
- break;
- }
- case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI): {
- jam();
- // Double TCKEYCONF, should never happen, abort
- TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
-
- tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
- tcIndxRef->transId[0] = regApiPtr->transid[0];
- tcIndxRef->transId[1] = regApiPtr->transid[1];
- tcIndxRef->errorCode = 4349;
- sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcKeyRef::SignalLength, JBB);
- return;
- }
- case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): {
- jam();
- // Continue with index operation
- executeIndexOperation(signal, regApiPtr, indexOp);
- break;
- }
- case(IOS_INDEX_OPERATION): {
- // We are done, send TCINDXCONF
- jam();
- Uint32 Ttcindxrec = regApiPtr->tcindxrec;
- // Copy reply from TcKeyConf
-
- ndbassert(regApiPtr->noIndexOp);
- regApiPtr->noIndexOp--; // Decrease count
- regApiPtr->tcIndxSendArray[Ttcindxrec] = indexOp->tcIndxReq.senderData;
- regApiPtr->tcIndxSendArray[Ttcindxrec + 1] =
- tcKeyConf->operations[0].attrInfoLen;
- regApiPtr->tcindxrec = Ttcindxrec + 2;
- if (regApiPtr->noIndexOp == 0) {
- jam();
- sendTcIndxConf(signal, Tcommit);
- } else if (regApiPtr->tcindxrec == ZTCOPCONF_SIZE) {
- jam();
- sendTcIndxConf(signal, 0);
- }
- releaseIndexOperation(regApiPtr, indexOp);
- break;
- }
- }
-}
-
-void Dbtc::execTCKEYREF(Signal* signal)
-{
- TcKeyRef * const tcKeyRef = (TcKeyRef *)signal->getDataPtr();
- TcIndexOperationPtr indexOpPtr;
-
- jamEntry();
- indexOpPtr.i = tcKeyRef->connectPtr;
- TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
- indexOpPtr.p = indexOp;
- if (!indexOp) {
- jam();
- // Missing index operation
- return;
- }
- const UintR TconnectIndex = indexOp->connectionIndex;
- ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
- Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
- Uint32 commitFlg = TcKeyReq::getCommitFlag(tcKeyRequestInfo);
-
- switch(indexOp->indexOpState) {
- case(IOS_NOOP): {
- jam();
- // Should never happen, abort
- break;
- }
- case(IOS_INDEX_ACCESS):
- case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI):
- case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): {
- jam();
- // If we fail index access for a non-read operation during commit
- // we abort transaction
- if (commitFlg == 1) {
- jam();
- releaseIndexOperation(regApiPtr, indexOp);
- apiConnectptr.i = indexOp->connectionIndex;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- terrorCode = tcKeyRef->errorCode;
- abortErrorLab(signal);
- break;
- }
- /**
- * Increase count as it will be decreased below...
- * (and the code is written to handle failing lookup on "real" table
- * not lookup on index table)
- */
- regApiPtr->noIndexOp++;
- // else continue
- }
- case(IOS_INDEX_OPERATION): {
- // Send TCINDXREF
-
- jam();
- TcKeyReq * const tcIndxReq = &indexOp->tcIndxReq;
- TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
-
- ndbassert(regApiPtr->noIndexOp);
- regApiPtr->noIndexOp--; // Decrease count
- tcIndxRef->connectPtr = tcIndxReq->senderData;
- tcIndxRef->transId[0] = tcKeyRef->transId[0];
- tcIndxRef->transId[1] = tcKeyRef->transId[1];
- tcIndxRef->errorCode = tcKeyRef->errorCode;
- sendSignal(regApiPtr->ndbapiBlockref,
- GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB);
- return;
- }
- }
-}
-
-void Dbtc::execTRANSID_AI_R(Signal* signal){
- TransIdAI * const transIdAI = (TransIdAI *)signal->getDataPtr();
- Uint32 sigLen = signal->length();
- Uint32 dataLen = sigLen - TransIdAI::HeaderLength - 1;
- Uint32 recBlockref = transIdAI->attrData[dataLen];
-
- jamEntry();
-
- /**
- * Forward signal to final destination
- * Truncate last word since that was used to hold the final dest.
- */
- sendSignal(recBlockref, GSN_TRANSID_AI,
- signal, sigLen - 1, JBB);
-}
-
-void Dbtc::execKEYINFO20_R(Signal* signal){
- KeyInfo20 * const keyInfo = (KeyInfo20 *)signal->getDataPtr();
- Uint32 sigLen = signal->length();
- Uint32 dataLen = sigLen - KeyInfo20::HeaderLength - 1;
- Uint32 recBlockref = keyInfo->keyData[dataLen];
-
- jamEntry();
-
- /**
- * Forward signal to final destination
- * Truncate last word since that was used to hold the final dest.
- */
- sendSignal(recBlockref, GSN_KEYINFO20,
- signal, sigLen - 1, JBB);
-}
-
-
-void Dbtc::execTRANSID_AI(Signal* signal)
-{
- TransIdAI * const transIdAI = (TransIdAI *)signal->getDataPtr();
-
- jamEntry();
- TcIndexOperationPtr indexOpPtr;
- indexOpPtr.i = transIdAI->connectPtr;
- TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
- indexOpPtr.p = indexOp;
- if (!indexOp) {
- jam();
- // Missing index operation
- }
- const UintR TconnectIndex = indexOp->connectionIndex;
- // ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
- ApiConnectRecordPtr transPtr;
-
- transPtr.i = TconnectIndex;
- ptrCheckGuard(transPtr, capiConnectFilesize, apiConnectRecord);
- ApiConnectRecord * const regApiPtr = transPtr.p;
-
- // Acccumulate attribute data
- if (!saveTRANSID_AI(signal,
- indexOp,
- transIdAI->getData(),
- signal->getLength() - TransIdAI::HeaderLength)) {
- jam();
- // Failed to allocate space for TransIdAI
- TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
-
- tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
- tcIndxRef->transId[0] = regApiPtr->transid[0];
- tcIndxRef->transId[1] = regApiPtr->transid[1];
- tcIndxRef->errorCode = 4000;
- sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcKeyRef::SignalLength, JBB);
- return;
- }
-
- switch(indexOp->indexOpState) {
- case(IOS_NOOP): {
- jam();
- // Should never happen, abort
- TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
-
- tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
- tcIndxRef->transId[0] = regApiPtr->transid[0];
- tcIndxRef->transId[1] = regApiPtr->transid[1];
- tcIndxRef->errorCode = 4349;
- sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcKeyRef::SignalLength, JBB);
- return;
- break;
- }
- case(IOS_INDEX_ACCESS): {
- jam();
- // Check if all TRANSID_AI have been received
- if (receivedAllTRANSID_AI(indexOp)) {
- jam();
- // Wait for TRANSID_AI
- indexOp->indexOpState = IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF;
- }
- break;
- }
- case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): {
- jam();
-#ifdef VM_TRACE
- ndbout_c("Dbtc::execTRANSID_AI: Too many TRANSID_AI, ignore for now\n");
-#endif
- /*
- // Too many TRANSID_AI
- TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
-
- tcIndexRef->connectPtr = indexOp->tcIndxReq.senderData;
- tcIndxRef->transId[0] = regApiPtr->transid[0];
- tcIndxRef->transId[1] = regApiPtr->transid[1];
- tcIndxRef->errorCode = 4349;
- sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcKeyRef::SignalLength, JBB);
- */
- break;
- }
- case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI): {
- jam();
- // Check if all TRANSID_AI have been received
- if (receivedAllTRANSID_AI(indexOp)) {
- jam();
- // Continue with index operation
- executeIndexOperation(signal, regApiPtr, indexOp);
- }
- // else continue waiting for more TRANSID_AI
- break;
- }
- case(IOS_INDEX_OPERATION): {
- // Should never receive TRANSID_AI in this state!!
- jam();
- TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
-
- tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
- tcIndxRef->transId[0] = regApiPtr->transid[0];
- tcIndxRef->transId[1] = regApiPtr->transid[1];
- tcIndxRef->errorCode = 4349;
- sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcKeyRef::SignalLength, JBB);
- return;
- }
- }
-}
-
-void Dbtc::execTCROLLBACKREP(Signal* signal)
-{
- TcRollbackRep* tcRollbackRep = (TcRollbackRep *)signal->getDataPtr();
- jamEntry();
- TcIndexOperationPtr indexOpPtr;
- indexOpPtr.i = tcRollbackRep->connectPtr;
- TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
- indexOpPtr.p = indexOp;
- tcRollbackRep = (TcRollbackRep *)signal->getDataPtrSend();
- tcRollbackRep->connectPtr = indexOp->tcIndxReq.senderData;
- sendSignal(apiConnectptr.p->ndbapiBlockref,
- GSN_TCROLLBACKREP, signal, TcRollbackRep::SignalLength, JBB);
-}
-
-/**
- * Read index table with the index attributes as PK
- */
-void Dbtc::readIndexTable(Signal* signal,
- ApiConnectRecord* regApiPtr,
- TcIndexOperation* indexOp)
-{
- Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
- Uint32 dataPos = 0;
- TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
- Uint32 * dataPtr = &tcKeyReq->scanInfo;
- Uint32 tcKeyLength = TcKeyReq::StaticLength;
- Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
- AttributeBuffer::DataBufferIterator keyIter;
- Uint32 keyLength = TcKeyReq::getKeyLength(tcKeyRequestInfo);
- TcIndexData* indexData;
- Uint32 transId1 = indexOp->tcIndxReq.transId1;
- Uint32 transId2 = indexOp->tcIndxReq.transId2;
-
- const Operation_t opType =
- (Operation_t)TcKeyReq::getOperationType(tcKeyRequestInfo);
-
- // Find index table
- if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq.tableId)) == NULL) {
- jam();
- // Failed to find index record
- TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
-
- tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
- tcIndxRef->transId[0] = regApiPtr->transid[0];
- tcIndxRef->transId[1] = regApiPtr->transid[1];
- tcIndxRef->errorCode = 4000;
- sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcKeyRef::SignalLength, JBB);
- return;
- }
- tcKeyReq->transId1 = transId1;
- tcKeyReq->transId2 = transId2;
- tcKeyReq->tableId = indexData->indexId;
- tcKeyLength += MIN(keyLength, keyBufSize);
- tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq.tableSchemaVersion;
- TcKeyReq::setOperationType(tcKeyRequestInfo,
- opType == ZREAD ? ZREAD : ZREAD_EX);
- TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 1); // Allways send one AttrInfo
- TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, 0);
- BlockReference originalReceiver = regApiPtr->ndbapiBlockref;
- regApiPtr->ndbapiBlockref = reference(); // Send result to me
- tcKeyReq->senderData = indexOp->indexOpId;
- indexOp->indexOpState = IOS_INDEX_ACCESS;
- regApiPtr->executingIndexOp = regApiPtr->accumulatingIndexOp;
- regApiPtr->accumulatingIndexOp = RNIL;
- regApiPtr->isIndexOp = true;
-
- Uint32 remainingKey = indexOp->keyInfo.getSize();
- bool moreKeyData = indexOp->keyInfo.first(keyIter);
- // *********** KEYINFO in TCKEYREQ ***********
- while((dataPos < keyBufSize) &&
- (remainingKey-- != 0)) {
- *dataPtr++ = *keyIter.data;
- dataPos++;
- moreKeyData = indexOp->keyInfo.next(keyIter);
- }
- // *********** ATTRINFO in TCKEYREQ ***********
- tcKeyReq->attrLen = 1; // Primary key is stored as one attribute
- AttributeHeader::init(dataPtr, indexData->primaryKeyPos, 0);
- tcKeyLength++;
- tcKeyReq->requestInfo = tcKeyRequestInfo;
-
- ndbassert(TcKeyReq::getDirtyFlag(tcKeyRequestInfo) == 0);
- ndbassert(TcKeyReq::getSimpleFlag(tcKeyRequestInfo) == 0);
- EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
-
- /**
- * "Fool" TC not to start commiting transaction since it always will
- * have one outstanding lqhkeyreq
- * This is later decreased when the index read is complete
- */
- regApiPtr->lqhkeyreqrec++;
-
- /**
- * Remember ptr to index read operation
- * (used to set correct save point id on index operation later)
- */
- indexOp->indexReadTcConnect = regApiPtr->lastTcConnect;
-
- jamEntry();
- // *********** KEYINFO ***********
- if (moreKeyData) {
- jam();
- // Send KEYINFO sequence
- KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
-
- keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
- keyInfo->transId[0] = transId1;
- keyInfo->transId[1] = transId2;
- dataPtr = (Uint32 *) &keyInfo->keyData;
- dataPos = 0;
- while(remainingKey-- != 0) {// If we have not read complete key
- *dataPtr++ = *keyIter.data;
- dataPos++;
- if (dataPos == KeyInfo::DataLength) {
- // Flush KEYINFO
- EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + KeyInfo::DataLength);
- jamEntry();
- dataPos = 0;
- dataPtr = (Uint32 *) &keyInfo->keyData;
- }
- moreKeyData = indexOp->keyInfo.next(keyIter);
- }
- if (dataPos != 0) {
- // Flush last KEYINFO
- EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + dataPos);
- jamEntry();
- }
- }
-
- regApiPtr->ndbapiBlockref = originalReceiver; // reset original receiver
-}
-
-/**
- * Execute the index operation with the result from
- * the index table read as PK
- */
-void Dbtc::executeIndexOperation(Signal* signal,
- ApiConnectRecord* regApiPtr,
- TcIndexOperation* indexOp) {
-
- Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
- Uint32 attrBufSize = 5;
- Uint32 dataPos = 0;
- TcKeyReq * const tcIndxReq = &indexOp->tcIndxReq;
- TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
- Uint32 * dataPtr = &tcKeyReq->scanInfo;
- Uint32 tcKeyLength = TcKeyReq::StaticLength;
- Uint32 tcKeyRequestInfo = tcIndxReq->requestInfo;
- TcIndexData* indexData;
- AttributeBuffer::DataBufferIterator attrIter;
- AttributeBuffer::DataBufferIterator aiIter;
- bool moreKeyData = indexOp->transIdAI.first(aiIter);
-
- // Find index table
- if ((indexData = c_theIndexes.getPtr(tcIndxReq->tableId)) == NULL) {
- jam();
- // Failed to find index record
- TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
-
- tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
- tcIndxRef->transId[0] = regApiPtr->transid[0];
- tcIndxRef->transId[1] = regApiPtr->transid[1];
- tcIndxRef->errorCode = 4349;
- sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcKeyRef::SignalLength, JBB);
- return;
- }
- // Find schema version of primary table
- TableRecordPtr tabPtr;
- tabPtr.i = indexData->primaryTableId;
- ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
-
- tcKeyReq->apiConnectPtr = tcIndxReq->apiConnectPtr;
- tcKeyReq->attrLen = tcIndxReq->attrLen;
- tcKeyReq->tableId = indexData->primaryTableId;
- tcKeyReq->tableSchemaVersion = tabPtr.p->currentSchemaVersion;
- tcKeyReq->transId1 = regApiPtr->transid[0];
- tcKeyReq->transId2 = regApiPtr->transid[1];
- tcKeyReq->senderData = tcIndxReq->senderData; // Needed for TRANSID_AI to API
- indexOp->indexOpState = IOS_INDEX_OPERATION;
- regApiPtr->isIndexOp = true;
- regApiPtr->executingIndexOp = indexOp->indexOpId;;
- regApiPtr->noIndexOp++; // Increase count
-
- // Filter out AttributeHeader:s since this should not be in key
- AttributeHeader* attrHeader = (AttributeHeader *) aiIter.data;
-
- Uint32 headerSize = attrHeader->getHeaderSize();
- Uint32 keySize = attrHeader->getDataSize();
- TcKeyReq::setKeyLength(tcKeyRequestInfo, keySize);
- // Skip header
- if (headerSize == 1) {
- jam();
- moreKeyData = indexOp->transIdAI.next(aiIter);
- } else {
- jam();
- moreKeyData = indexOp->transIdAI.next(aiIter, headerSize - 1);
- }//if
- while(// If we have not read complete key
- (keySize != 0) &&
- (dataPos < keyBufSize)) {
- *dataPtr++ = *aiIter.data;
- dataPos++;
- keySize--;
- moreKeyData = indexOp->transIdAI.next(aiIter);
- }
- tcKeyLength += dataPos;
-
- Uint32 attributesLength = indexOp->attrInfo.getSize();
- if (attributesLength <= attrBufSize) {
- jam();
- // ATTRINFO fits in TCKEYREQ
- // Pack ATTRINFO IN TCKEYREQ
- TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, indexOp->attrInfo.getSize());
- // Insert IndxAttrInfo
- for(bool moreAttrData = indexOp->attrInfo.first(attrIter);
- moreAttrData;
- moreAttrData = indexOp->attrInfo.next(attrIter)) {
- *dataPtr++ = *attrIter.data;
- }
- tcKeyLength += attributesLength;
- } else {
- jam();
- // No ATTRINFO in TCKEYREQ
- TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 0);
- }
-
- TcKeyReq::setCommitFlag(tcKeyRequestInfo, 0);
- TcKeyReq::setExecuteFlag(tcKeyRequestInfo, 0);
- TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, 0);
- tcKeyReq->requestInfo = tcKeyRequestInfo;
-
- ndbassert(TcKeyReq::getDirtyFlag(tcKeyRequestInfo) == 0);
- ndbassert(TcKeyReq::getSimpleFlag(tcKeyRequestInfo) == 0);
-
- /**
- * Decrease lqhkeyreqrec to compensate for addition
- * during read of index table
- * I.e. let TC start committing when other operations has completed
- */
- regApiPtr->lqhkeyreqrec--;
-
- /**
- * Fix savepoint id -
- * fix so that index operation has the same savepoint id
- * as the read of the index table (TCINDXREQ)
- */
- TcConnectRecordPtr tmp;
- tmp.i = indexOp->indexReadTcConnect;
- ptrCheckGuard(tmp, ctcConnectFilesize, tcConnectRecord);
- const Uint32 currSavePointId = regApiPtr->currSavePointId;
- regApiPtr->currSavePointId = tmp.p->savePointId;
- EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
- regApiPtr->currSavePointId = currSavePointId;
-
- jamEntry();
- // *********** KEYINFO ***********
- if (moreKeyData) {
- jam();
- // Send KEYINFO sequence
- KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
-
- keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
- keyInfo->transId[0] = regApiPtr->transid[0];
- keyInfo->transId[1] = regApiPtr->transid[1];
- dataPtr = (Uint32 *) &keyInfo->keyData;
- dataPos = 0;
- // Pack any part of a key attribute that did no fit TCKEYREQ
- while(keySize-- != 0) {// If we have not read complete key
- *dataPtr++ = *aiIter.data;
- dataPos++;
- if (dataPos == KeyInfo::DataLength) {
- // Flush KEYINFO
- EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + KeyInfo::DataLength);
- jamEntry();
- dataPos = 0;
- dataPtr = (Uint32 *) &keyInfo->keyData;
- }
- moreKeyData = indexOp->transIdAI.next(aiIter);
- }
- if (dataPos != 0) {
- // Flush last KEYINFO
- EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + dataPos);
- jamEntry();
- }
- }
-
- // *********** ATTRINFO ***********
- if (attributesLength > attrBufSize) {
- jam();
- // No ATTRINFO in TcKeyReq
- TcKeyReq::setAIInTcKeyReq(tcKeyReq->requestInfo, 0);
- // Send ATTRINFO sequence
- AttrInfo * const attrInfo = (AttrInfo *)signal->getDataPtrSend();
- Uint32 attrInfoPos = 0;
-
- attrInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
- attrInfo->transId[0] = regApiPtr->transid[0];
- attrInfo->transId[1] = regApiPtr->transid[1];
- dataPtr = (Uint32 *) &attrInfo->attrData;
-
-
- // Insert attribute values (insert key values of primary table)
- for(bool moreAttrData = indexOp->attrInfo.first(attrIter);
- moreAttrData;
- moreAttrData = indexOp->attrInfo.next(attrIter)) {
- *dataPtr++ = *attrIter.data;
- attrInfoPos++;
- if (attrInfoPos == AttrInfo::DataLength) {
- // Flush ATTRINFO
- EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
- AttrInfo::HeaderLength + AttrInfo::DataLength);
- jamEntry();
- attrInfoPos = 0;
- dataPtr = (Uint32 *) &attrInfo->attrData;
- }
- }
- if (attrInfoPos != 0) {
- // Send last ATTRINFO
- EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
- AttrInfo::HeaderLength + attrInfoPos);
- jamEntry();
- }
- }
-}
-
-bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr,
- TcIndexOperationPtr& indexOpPtr)
-{
- return regApiPtr->theSeizedIndexOperations.seize(indexOpPtr);
-}
-
-void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
- TcIndexOperation* indexOp)
-{
- indexOp->indexOpState = IOS_NOOP;
- indexOp->expectedKeyInfo = 0;
- indexOp->keyInfo.release();
- indexOp->expectedAttrInfo = 0;
- indexOp->attrInfo.release();
- indexOp->expectedTransIdAI = 0;
- indexOp->transIdAI.release();
- regApiPtr->theSeizedIndexOperations.release(indexOp->indexOpId);
-}
-
-void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr)
-{
- TcIndexOperationPtr seizedIndexOpPtr;
-
- regApiPtr->theSeizedIndexOperations.first(seizedIndexOpPtr);
- while(seizedIndexOpPtr.i != RNIL) {
- jam();
- TcIndexOperation* indexOp = seizedIndexOpPtr.p;
-
- indexOp->indexOpState = IOS_NOOP;
- indexOp->expectedKeyInfo = 0;
- indexOp->keyInfo.release();
- indexOp->expectedAttrInfo = 0;
- indexOp->attrInfo.release();
- indexOp->expectedTransIdAI = 0;
- indexOp->transIdAI.release();
- regApiPtr->theSeizedIndexOperations.next(seizedIndexOpPtr);
- }
- regApiPtr->theSeizedIndexOperations.release();
-}
-
-void Dbtc::saveTriggeringOpState(Signal* signal, TcConnectRecord* trigOp)
-{
- LqhKeyConf * lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
- copyFromToLen((UintR*)lqhKeyConf,
- &trigOp->savedState[0],
- LqhKeyConf::SignalLength);
-}
-
-void Dbtc::continueTriggeringOp(Signal* signal, TcConnectRecord* trigOp)
-{
- LqhKeyConf * lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
- copyFromToLen(&trigOp->savedState[0],
- (UintR*)lqhKeyConf,
- LqhKeyConf::SignalLength);
-
- lqhKeyConf->noFiredTriggers = 0;
- trigOp->noReceivedTriggers = 0;
-
- // All triggers executed successfully, continue operation
- execLQHKEYCONF(signal);
-}
-
-void Dbtc::scheduleFiredTrigger(ApiConnectRecordPtr* transPtr,
- TcConnectRecordPtr* opPtr)
-{
- // Set initial values for trigger fireing operation
- opPtr->p->triggerExecutionCount++;
-
- // Insert fired trigger in execution queue
- transPtr->p->theFiredTriggers.add(opPtr->p->accumulatingTriggerData);
- opPtr->p->accumulatingTriggerData.i = RNIL;
- opPtr->p->accumulatingTriggerData.p = NULL;
-}
-
-void Dbtc::executeTriggers(Signal* signal, ApiConnectRecordPtr* transPtr)
-{
- ApiConnectRecord* regApiPtr = transPtr->p;
- TcConnectRecord *localTcConnectRecord = tcConnectRecord;
- TcConnectRecordPtr opPtr;
- FiredTriggerPtr trigPtr;
-
- if (!regApiPtr->theFiredTriggers.isEmpty()) {
- jam();
- if ((regApiPtr->apiConnectstate == CS_STARTED) ||
- (regApiPtr->apiConnectstate == CS_START_COMMITTING)) {
- jam();
- regApiPtr->theFiredTriggers.first(trigPtr);
- while (trigPtr.i != RNIL) {
- jam();
- // Execute all ready triggers in parallel
- opPtr.i = trigPtr.p->fireingOperation;
- ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
- FiredTriggerPtr nextTrigPtr = trigPtr;
- regApiPtr->theFiredTriggers.next(nextTrigPtr);
- if (opPtr.p->noReceivedTriggers == opPtr.p->noFiredTriggers) {
- jam();
- // Fireing operation is ready to have a trigger executing
- executeTrigger(signal, trigPtr.p, transPtr, &opPtr);
- // Should allow for interleaving here by sending a CONTINUEB and
- // return
- // Release trigger records
- AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
- LocalDataBuffer<11> tmp1(pool, trigPtr.p->keyValues);
- tmp1.release();
- LocalDataBuffer<11> tmp2(pool, trigPtr.p->beforeValues);
- tmp2.release();
- LocalDataBuffer<11> tmp3(pool, trigPtr.p->afterValues);
- tmp3.release();
- regApiPtr->theFiredTriggers.release(trigPtr.i);
- }
- trigPtr = nextTrigPtr;
- }
- return;
- // No more triggers, continue transaction after last executed trigger has
- // reurned (in execLQHKEYCONF or execLQHKEYREF)
- } else {
- // Wait until transaction is ready to execute a trigger
- jam();
- if (!regApiPtr->triggerPending) {
- jam();
- regApiPtr->triggerPending = true;
- signal->theData[0] = TcContinueB::TRIGGER_PENDING;
- signal->theData[1] = transPtr->i;
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
- }
- // else
- // We are already waiting for a pending trigger (CONTINUEB)
- }
- }
-}
-
-void Dbtc::executeTrigger(Signal* signal,
- TcFiredTriggerData* firedTriggerData,
- ApiConnectRecordPtr* transPtr,
- TcConnectRecordPtr* opPtr)
-{
- TcDefinedTriggerData* definedTriggerData;
-
- if ((definedTriggerData =
- c_theDefinedTriggers.getPtr(firedTriggerData->triggerId))
- != NULL) {
- switch(definedTriggerData->triggerType) {
- case(TriggerType::SECONDARY_INDEX):
- jam();
- executeIndexTrigger(signal, definedTriggerData, firedTriggerData,
- transPtr, opPtr);
- break;
- default:
- ndbrequire(false);
- }
- }
-}
-
-void Dbtc::executeIndexTrigger(Signal* signal,
- TcDefinedTriggerData* definedTriggerData,
- TcFiredTriggerData* firedTriggerData,
- ApiConnectRecordPtr* transPtr,
- TcConnectRecordPtr* opPtr)
-{
- TcIndexData* indexData;
-
- indexData = c_theIndexes.getPtr(definedTriggerData->indexId);
- ndbassert(indexData != NULL);
-
- switch (definedTriggerData->triggerEvent) {
- case(TriggerEvent::TE_INSERT): {
- jam();
- insertIntoIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData);
- break;
- }
- case(TriggerEvent::TE_DELETE): {
- jam();
- deleteFromIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData);
- break;
- }
- case(TriggerEvent::TE_UPDATE): {
- jam();
- deleteFromIndexTable(signal, firedTriggerData, transPtr, opPtr,
- indexData, true); // Hold the triggering operation
- insertIntoIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData);
- break;
- }
- default:
- ndbrequire(false);
- }
-}
-
-void Dbtc::releaseFiredTriggerData(DLFifoList<TcFiredTriggerData>* triggers)
-{
- FiredTriggerPtr trigPtr;
-
- triggers->first(trigPtr);
- while (trigPtr.i != RNIL) {
- jam();
- // Release trigger records
-
- AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
- LocalDataBuffer<11> tmp1(pool, trigPtr.p->keyValues);
- tmp1.release();
- LocalDataBuffer<11> tmp2(pool, trigPtr.p->beforeValues);
- tmp2.release();
- LocalDataBuffer<11> tmp3(pool, trigPtr.p->afterValues);
- tmp3.release();
-
- triggers->next(trigPtr);
- }
- triggers->release();
-}
-
-void Dbtc::insertIntoIndexTable(Signal* signal,
- TcFiredTriggerData* firedTriggerData,
- ApiConnectRecordPtr* transPtr,
- TcConnectRecordPtr* opPtr,
- TcIndexData* indexData,
- bool holdOperation)
-{
- ApiConnectRecord* regApiPtr = transPtr->p;
- TcConnectRecord* opRecord = opPtr->p;
- TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
- Uint32 tcKeyRequestInfo = 0;
- Uint32 tcKeyLength = TcKeyReq::StaticLength;
- TableRecordPtr indexTabPtr;
- AttributeBuffer::DataBufferIterator iter;
- Uint32 attrId = 0;
- Uint32 keyLength = 0;
- Uint32 totalPrimaryKeyLength = 0;
- Uint32 hops;
-
- indexTabPtr.i = indexData->indexId;
- ptrCheckGuard(indexTabPtr, ctabrecFilesize, tableRecord);
- tcKeyReq->apiConnectPtr = transPtr->i;
- tcKeyReq->senderData = opPtr->i;
- if (holdOperation) {
- jam();
- opRecord->triggerExecutionCount++;
- }//if
- // Calculate key length and renumber attribute id:s
- AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
- LocalDataBuffer<11> afterValues(pool, firedTriggerData->afterValues);
- bool skipNull = false;
- for(bool moreKeyAttrs = afterValues.first(iter); moreKeyAttrs; attrId++) {
- jam();
- AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
-
- // Filter out NULL valued attributes
- if (attrHeader->isNULL()) {
- skipNull = true;
- break;
- }
- attrHeader->setAttributeId(attrId);
- keyLength += attrHeader->getDataSize();
- hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
- moreKeyAttrs = afterValues.next(iter, hops);
- }
- if (skipNull) {
- jam();
- opRecord->triggerExecutionCount--;
- if (opRecord->triggerExecutionCount == 0) {
- /*
- We have completed current trigger execution
- Continue triggering operation
- */
- jam();
- continueTriggeringOp(signal, opRecord);
- }//if
- return;
- }//if
-
- // Calculate total length of primary key to be stored in index table
- LocalDataBuffer<11> keyValues(pool, firedTriggerData->keyValues);
- for(bool moreAttrData = keyValues.first(iter); moreAttrData; ) {
- jam();
- AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
-
- totalPrimaryKeyLength += attrHeader->getDataSize();
- hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
- moreAttrData = keyValues.next(iter, hops);
- }
- AttributeHeader pkAttrHeader(attrId, totalPrimaryKeyLength);
-
- TcKeyReq::setKeyLength(tcKeyRequestInfo, keyLength);
- tcKeyReq->attrLen = afterValues.getSize() +
- pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize();
- tcKeyReq->tableId = indexData->indexId;
- TcKeyReq::setOperationType(tcKeyRequestInfo, ZINSERT);
- TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, true);
- tcKeyReq->tableSchemaVersion = indexTabPtr.p->currentSchemaVersion;
- tcKeyReq->transId1 = regApiPtr->transid[0];
- tcKeyReq->transId2 = regApiPtr->transid[1];
- Uint32 * dataPtr = &tcKeyReq->scanInfo;
- // Write first part of key in TCKEYREQ
- Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
- Uint32 attrBufSize = 5; // Maximum for key in TCKEYREQ
- Uint32 dataPos = 0;
- // Filter out AttributeHeader:s since this should no be in key
- bool moreKeyData = afterValues.first(iter);
- Uint32 headerSize = 0, keyAttrSize = 0, dataSize = 0, headAndData = 0;
-
- while (moreKeyData && (dataPos < keyBufSize)) {
- /*
- * If we have not read complete key
- * and it fits in the signal
- */
- jam();
- AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
-
- headerSize = attrHeader->getHeaderSize();
- keyAttrSize = attrHeader->getDataSize();
- headAndData = headerSize + attrHeader->getDataSize();
- // Skip header
- if (headerSize == 1) {
- jam();
- moreKeyData = afterValues.next(iter);
- } else {
- jam();
- moreKeyData = afterValues.next(iter, headerSize - 1);
- }//if
- while((keyAttrSize != 0) && (dataPos < keyBufSize)) {
- // If we have not read complete key
- jam();
- *dataPtr++ = *iter.data;
- dataPos++;
- keyAttrSize--;
- moreKeyData = afterValues.next(iter);
- }
- if (keyAttrSize != 0) {
- jam();
- break;
- }//if
- }
-
- tcKeyLength += dataPos;
- Uint32 attributesLength = afterValues.getSize() +
- pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize();
- if (attributesLength <= attrBufSize) {
- jam();
- // ATTRINFO fits in TCKEYREQ
- // Pack ATTRINFO IN TCKEYREQ as one attribute
- TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, attributesLength);
- bool moreAttrData;
- // Insert primary key attributes (insert after values of primary table)
- for(moreAttrData = afterValues.first(iter);
- moreAttrData;
- moreAttrData = afterValues.next(iter)) {
- *dataPtr++ = *iter.data;
- }
- // Insert attribute values (insert key values of primary table)
- // as one attribute
- pkAttrHeader.insertHeader(dataPtr);
- dataPtr += pkAttrHeader.getHeaderSize();
- moreAttrData = keyValues.first(iter);
- while(moreAttrData) {
- jam();
- AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
-
- headerSize = attrHeader->getHeaderSize();
- dataSize = attrHeader->getDataSize();
- // Skip header
- if (headerSize == 1) {
- jam();
- moreAttrData = keyValues.next(iter);
- } else {
- jam();
- moreAttrData = keyValues.next(iter, headerSize - 1);
- }//if
- // Copy attribute data
- while(dataSize-- != 0) {
- *dataPtr++ = *iter.data;
- moreAttrData = keyValues.next(iter);
- }
- }
- tcKeyLength += attributesLength;
- } else {
- jam();
- // No ATTRINFO in TCKEYREQ
- TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 0);
- }
- tcKeyReq->requestInfo = tcKeyRequestInfo;
-
- /**
- * Fix savepoint id -
- * fix so that insert has same savepoint id as triggering operation
- */
- const Uint32 currSavePointId = regApiPtr->currSavePointId;
- regApiPtr->currSavePointId = opRecord->savePointId;
- EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
- regApiPtr->currSavePointId = currSavePointId;
- tcConnectptr.p->currentIndexId = indexData->indexId;
- jamEntry();
-
- // *********** KEYINFO ***********
- if (moreKeyData) {
- jam();
- // Send KEYINFO sequence
- KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
-
- keyInfo->connectPtr = transPtr->i;
- keyInfo->transId[0] = regApiPtr->transid[0];
- keyInfo->transId[1] = regApiPtr->transid[1];
- dataPtr = (Uint32 *) &keyInfo->keyData;
- dataPos = 0;
- // Pack any part of a key attribute that did no fit TCKEYREQ
- while((keyAttrSize != 0) && (dataPos < KeyInfo::DataLength)) {
- // If we have not read complete key
- *dataPtr++ = *iter.data;
- dataPos++;
- keyAttrSize--;
- if (dataPos == KeyInfo::DataLength) {
- jam();
- // Flush KEYINFO
-#if INTERNAL_TRIGGER_TCKEYREQ_JBA
- sendSignal(reference(), GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + KeyInfo::DataLength, JBA);
-#else
- EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + KeyInfo::DataLength);
- jamEntry();
-#endif
- dataPtr = (Uint32 *) &keyInfo->keyData;
- dataPos = 0;
- }
- moreKeyData = afterValues.next(iter);
- }
-
- while(moreKeyData) {
- jam();
- AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
-
- headerSize = attrHeader->getHeaderSize();
- keyAttrSize = attrHeader->getDataSize();
- headAndData = headerSize + attrHeader->getDataSize();
- // Skip header
- if (headerSize == 1) {
- jam();
- moreKeyData = afterValues.next(iter);
- } else {
- jam();
- moreKeyData = afterValues.next(iter, headerSize - 1);
- }//if
- while (keyAttrSize-- != 0) {
- *dataPtr++ = *iter.data;
- dataPos++;
- if (dataPos == KeyInfo::DataLength) {
- jam();
- // Flush KEYINFO
-#if INTERNAL_TRIGGER_TCKEYREQ_JBA
- sendSignal(reference(), GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + KeyInfo::DataLength, JBA);
-#else
- EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + KeyInfo::DataLength);
- jamEntry();
-#endif
- dataPtr = (Uint32 *) &keyInfo->keyData;
- dataPos = 0;
- }
- moreKeyData = afterValues.next(iter);
- }
- }
- if (dataPos != 0) {
- jam();
- // Flush last KEYINFO
-#if INTERNAL_TRIGGER_TCKEYREQ_JBA
- sendSignal(reference(), GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + dataPos, JBA);
-#else
- EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + dataPos);
- jamEntry();
-#endif
- }
- }
-
- // *********** ATTRINFO ***********
- if (attributesLength > attrBufSize) {
- jam();
- // No ATTRINFO in TcKeyReq
- TcKeyReq::setAIInTcKeyReq(tcKeyReq->requestInfo, 0);
- // Send ATTRINFO sequence
- AttrInfo * const attrInfo = (AttrInfo *)signal->getDataPtrSend();
- Uint32 attrInfoPos = 0;
-
- attrInfo->connectPtr = transPtr->i;
- attrInfo->transId[0] = regApiPtr->transid[0];
- attrInfo->transId[1] = regApiPtr->transid[1];
- dataPtr = (Uint32 *) &attrInfo->attrData;
-
- bool moreAttrData;
- // Insert primary key attributes (insert after values of primary table)
- for(moreAttrData = afterValues.first(iter);
- moreAttrData;
- moreAttrData = afterValues.next(iter)) {
- *dataPtr++ = *iter.data;
- attrInfoPos++;
- if (attrInfoPos == AttrInfo::DataLength) {
- jam();
- // Flush ATTRINFO
-#if INTERNAL_TRIGGER_TCKEYREQ_JBA
- sendSignal(reference(), GSN_ATTRINFO, signal,
- AttrInfo::HeaderLength + AttrInfo::DataLength, JBA);
-#else
- EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
- AttrInfo::HeaderLength + AttrInfo::DataLength);
- jamEntry();
-#endif
- dataPtr = (Uint32 *) &attrInfo->attrData;
- attrInfoPos = 0;
- }
- }
- // Insert attribute values (insert key values of primary table)
- // as one attribute
- pkAttrHeader.insertHeader(dataPtr);
- dataPtr += pkAttrHeader.getHeaderSize();
- attrInfoPos += pkAttrHeader.getHeaderSize();
- moreAttrData = keyValues.first(iter);
- while(moreAttrData) {
- jam();
- AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
-
- headerSize = attrHeader->getHeaderSize();
- dataSize = attrHeader->getDataSize();
- // Skip header
- if (headerSize == 1) {
- jam();
- moreAttrData = keyValues.next(iter);
- } else {
- jam();
- moreAttrData = keyValues.next(iter, headerSize - 1);
- }//if
- while(dataSize-- != 0) { // If we have not read complete key
- if (attrInfoPos == AttrInfo::DataLength) {
- jam();
- // Flush ATTRINFO
-#if INTERNAL_TRIGGER_TCKEYREQ_JBA
- sendSignal(reference(), GSN_ATTRINFO, signal,
- AttrInfo::HeaderLength + AttrInfo::DataLength, JBA);
-#else
- EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
- AttrInfo::HeaderLength + AttrInfo::DataLength);
- jamEntry();
-#endif
- dataPtr = (Uint32 *) &attrInfo->attrData;
- attrInfoPos = 0;
- }
- *dataPtr++ = *iter.data;
- attrInfoPos++;
- moreAttrData = keyValues.next(iter);
- }
- }
- if (attrInfoPos != 0) {
- jam();
- // Flush last ATTRINFO
-#if INTERNAL_TRIGGER_TCKEYREQ_JBA
- sendSignal(reference(), GSN_ATTRINFO, signal,
- AttrInfo::HeaderLength + attrInfoPos, JBA);
-#else
- EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
- AttrInfo::HeaderLength + attrInfoPos);
- jamEntry();
-#endif
- }
- }
-}
-
-void Dbtc::deleteFromIndexTable(Signal* signal,
- TcFiredTriggerData* firedTriggerData,
- ApiConnectRecordPtr* transPtr,
- TcConnectRecordPtr* opPtr,
- TcIndexData* indexData,
- bool holdOperation)
-{
- ApiConnectRecord* regApiPtr = transPtr->p;
- TcConnectRecord* opRecord = opPtr->p;
- TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
- Uint32 tcKeyRequestInfo = 0;
- Uint32 tcKeyLength = 12; // Static length
- TableRecordPtr indexTabPtr;
- AttributeBuffer::DataBufferIterator iter;
- Uint32 attrId = 0;
- Uint32 keyLength = 0;
- Uint32 hops;
-
- indexTabPtr.i = indexData->indexId;
- ptrCheckGuard(indexTabPtr, ctabrecFilesize, tableRecord);
- tcKeyReq->apiConnectPtr = transPtr->i;
- tcKeyReq->senderData = opPtr->i;
- if (holdOperation) {
- jam();
- opRecord->triggerExecutionCount++;
- }//if
- // Calculate key length and renumber attribute id:s
- AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
- LocalDataBuffer<11> beforeValues(pool, firedTriggerData->beforeValues);
- bool skipNull = false;
- for(bool moreKeyAttrs = beforeValues.first(iter);
- (moreKeyAttrs);
- attrId++) {
- jam();
- AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
-
- // Filter out NULL valued attributes
- if (attrHeader->isNULL()) {
- skipNull = true;
- break;
- }
- attrHeader->setAttributeId(attrId);
- keyLength += attrHeader->getDataSize();
- hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
- moreKeyAttrs = beforeValues.next(iter, hops);
- }
-
- if (skipNull) {
- jam();
- opRecord->triggerExecutionCount--;
- if (opRecord->triggerExecutionCount == 0) {
- /*
- We have completed current trigger execution
- Continue triggering operation
- */
- jam();
- continueTriggeringOp(signal, opRecord);
- }//if
- return;
- }//if
-
- TcKeyReq::setKeyLength(tcKeyRequestInfo, keyLength);
- tcKeyReq->attrLen = 0;
- tcKeyReq->tableId = indexData->indexId;
- TcKeyReq::setOperationType(tcKeyRequestInfo, ZDELETE);
- TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, true);
- tcKeyReq->tableSchemaVersion = indexTabPtr.p->currentSchemaVersion;
- tcKeyReq->transId1 = regApiPtr->transid[0];
- tcKeyReq->transId2 = regApiPtr->transid[1];
- Uint32 * dataPtr = &tcKeyReq->scanInfo;
- // Write first part of key in TCKEYREQ
- Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
- Uint32 dataPos = 0;
- // Filter out AttributeHeader:s since this should no be in key
- bool moreKeyData = beforeValues.first(iter);
- Uint32 headerSize = 0, keyAttrSize = 0, headAndData = 0;
-
- while (moreKeyData &&
- (dataPos < keyBufSize)) {
- /*
- If we have not read complete key
- and it fits in the signal
- */
- jam();
- AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
-
- headerSize = attrHeader->getHeaderSize();
- keyAttrSize = attrHeader->getDataSize();
- headAndData = headerSize + attrHeader->getDataSize();
- // Skip header
- if (headerSize == 1) {
- jam();
- moreKeyData = beforeValues.next(iter);
- } else {
- jam();
- moreKeyData = beforeValues.next(iter, headerSize - 1);
- }//if
- while((keyAttrSize != 0) &&
- (dataPos < keyBufSize)) {
- // If we have not read complete key
- jam();
- *dataPtr++ = *iter.data;
- dataPos++;
- keyAttrSize--;
- moreKeyData = beforeValues.next(iter);
- }
- if (keyAttrSize != 0) {
- jam();
- break;
- }//if
- }
-
- tcKeyLength += dataPos;
- tcKeyReq->requestInfo = tcKeyRequestInfo;
-
- /**
- * Fix savepoint id -
- * fix so that delete has same savepoint id as triggering operation
- */
- const Uint32 currSavePointId = regApiPtr->currSavePointId;
- regApiPtr->currSavePointId = opRecord->savePointId;
- EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
- regApiPtr->currSavePointId = currSavePointId;
- tcConnectptr.p->currentIndexId = indexData->indexId;
- jamEntry();
-
- // *********** KEYINFO ***********
- if (moreKeyData) {
- jam();
- // Send KEYINFO sequence
- KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
-
- keyInfo->connectPtr = transPtr->i;
- keyInfo->transId[0] = regApiPtr->transid[0];
- keyInfo->transId[1] = regApiPtr->transid[1];
- dataPtr = (Uint32 *) &keyInfo->keyData;
- dataPos = 0;
- // Pack any part of a key attribute that did no fit TCKEYREQ
- while((keyAttrSize != 0) &&
- (dataPos < KeyInfo::DataLength)) {
- // If we have not read complete key
- *dataPtr++ = *iter.data;
- dataPos++;
- keyAttrSize--;
- if (dataPos == KeyInfo::DataLength) {
- jam();
- // Flush KEYINFO
-#if INTERNAL_TRIGGER_TCKEYREQ_JBA
- sendSignal(reference(), GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + KeyInfo::DataLength, JBA);
-#else
- EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + KeyInfo::DataLength);
- jamEntry();
-#endif
- dataPtr = (Uint32 *) &keyInfo->keyData;
- dataPos = 0;
- }
- moreKeyData = beforeValues.next(iter);
- }
-
- while(moreKeyData) {
- jam();
- AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
-
- headerSize = attrHeader->getHeaderSize();
- keyAttrSize = attrHeader->getDataSize();
- headAndData = headerSize + attrHeader->getDataSize();
- // Skip header
- if (headerSize == 1) {
- jam();
- moreKeyData = beforeValues.next(iter);
- } else {
- jam();
- moreKeyData = beforeValues.next(iter,
- headerSize - 1);
- }//if
- while (keyAttrSize-- != 0) {
- *dataPtr++ = *iter.data;
- dataPos++;
- if (dataPos == KeyInfo::DataLength) {
- jam();
- // Flush KEYINFO
-#if INTERNAL_TRIGGER_TCKEYREQ_JBA
- sendSignal(reference(), GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + KeyInfo::DataLength, JBA);
-#else
- EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + KeyInfo::DataLength);
- jamEntry();
-#endif
- dataPtr = (Uint32 *) &keyInfo->keyData;
- dataPos = 0;
- }
- moreKeyData = beforeValues.next(iter);
- }
- }
- if (dataPos != 0) {
- jam();
- // Flush last KEYINFO
-#if INTERNAL_TRIGGER_TCKEYREQ_JBA
- sendSignal(reference(), GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + dataPos, JBA);
-#else
- EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
- KeyInfo::HeaderLength + dataPos);
- jamEntry();
-#endif
- }
- }
-}
-
-Uint32
-Dbtc::TableRecord::getErrorCode(Uint32 schemaVersion) const {
- if(!enabled)
- return ZNO_SUCH_TABLE;
- if(dropping)
- return ZDROP_TABLE_IN_PROGRESS;
- if(schemaVersion != currentSchemaVersion)
- return ZWRONG_SCHEMA_VERSION_ERROR;
- ErrorReporter::handleAssert("Dbtc::TableRecord::getErrorCode",
- __FILE__, __LINE__);
- return 0;
-}
-
diff --git a/ndb/src/kernel/blocks/dbtc/Makefile.am b/ndb/src/kernel/blocks/dbtc/Makefile.am
deleted file mode 100644
index 98ee2639bac..00000000000
--- a/ndb/src/kernel/blocks/dbtc/Makefile.am
+++ /dev/null
@@ -1,23 +0,0 @@
-noinst_LIBRARIES = libdbtc.a
-
-libdbtc_a_SOURCES = DbtcInit.cpp DbtcMain.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libdbtc.dsp
-
-libdbtc.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libdbtc_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
deleted file mode 100644
index f985e44d307..00000000000
--- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ /dev/null
@@ -1,2469 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef DBTUP_H
-#define DBTUP_H
-
-#include <pc.hpp>
-#include <SimulatedBlock.hpp>
-#include <ndb_limits.h>
-#include <trigger_definitions.h>
-#include <ArrayList.hpp>
-#include <AttributeHeader.hpp>
-#include <Bitmask.hpp>
-#include <signaldata/TupKey.hpp>
-#include <signaldata/CreateTrig.hpp>
-#include <signaldata/DropTrig.hpp>
-#include <signaldata/TrigAttrInfo.hpp>
-#include <signaldata/BuildIndx.hpp>
-
-#define ZWORDS_ON_PAGE 8192 /* NUMBER OF WORDS ON A PAGE. */
-#define ZATTRBUF_SIZE 32 /* SIZE OF ATTRIBUTE RECORD BUFFER */
-#define ZMIN_PAGE_LIMIT_TUPKEYREQ 5
-#define ZTUP_VERSION_BITS 15
-
-#ifdef DBTUP_C
-//------------------------------------------------------------------
-// Jam Handling:
-//
-// When DBTUP reports lines through jam in the trace files it has to
-// be interpreted. 4024 means as an example line 24 in DbtupCommit.cpp
-// Thus 4000 is added to the line number beacuse it is located in the
-// file DbtupCommit.cpp. The following is the exhaustive list of the
-// added value in the various files. ndbrequire, ptrCheckGuard still
-// only reports the line number in the file it currently is located in.
-//
-// DbtupExecQuery.cpp 0
-// DbtupBuffer.cpp 2000
-// DbtupRoutines.cpp 3000
-// DbtupCommit.cpp 5000
-// DbtupFixAlloc.cpp 6000
-// DbtupTrigger.cpp 7000
-// DbtupAbort.cpp 9000
-// DbtupLCP.cpp 10000
-// DbtupUndoLog.cpp 12000
-// DbtupPageMap.cpp 14000
-// DbtupPagMan.cpp 16000
-// DbtupStoredProcDef.cpp 18000
-// DbtupMeta.cpp 20000
-// DbtupTabDesMan.cpp 22000
-// DbtupGen.cpp 24000
-// DbtupSystemRestart.cpp 26000
-// DbtupIndex.cpp 28000
-// DbtupDebug.cpp 30000
-// DbtupScan.cpp 32000
-//------------------------------------------------------------------
-
-/*
-2.2 LOCAL SYMBOLS
------------------
-*/
-/* ---------------------------------------------------------------- */
-/* S I Z E O F R E C O R D S */
-/* ---------------------------------------------------------------- */
-#define ZNO_OF_ATTRBUFREC 10000 /* SIZE OF ATTRIBUTE INFO FILE */
-#define ZNO_OF_CONCURRENT_OPEN_OP 40 /* NUMBER OF CONCURRENT OPENS */
-#define ZNO_OF_CONCURRENT_WRITE_OP 80 /* NUMBER OF CONCURRENT DISK WRITES*/
-#define ZNO_OF_FRAGOPREC 20 /* NUMBER OF CONCURRENT ADD FRAG. */
-#define ZNO_OF_LCP_REC 10 /* NUMBER OF CONCURRENT CHECKPOINTS*/
-#define TOT_PAGE_RECORD_SPACE 262144 /* SIZE OF PAGE RECORD FILE. */
-#define ZNO_OF_PAGE TOT_PAGE_RECORD_SPACE/ZWORDS_ON_PAGE
-#define ZNO_OF_PAGE_RANGE_REC 128 /* SIZE OF PAGE RANGE FILE */
-#define ZNO_OF_PARALLELL_UNDO_FILES 16 /* NUMBER OF PARALLEL UNDO FILES */
-#define ZNO_OF_RESTART_INFO_REC 10 /* MAXIMUM PARALLELL RESTART INFOS */
- /* 24 SEGMENTS WITH 8 PAGES IN EACH*/
- /* PLUS ONE UNDO BUFFER CACHE */
-// Undo record identifiers are 32-bits with page index 13-bits
-#define ZUNDO_RECORD_ID_PAGE_INDEX 13 /* 13 BITS = 8192 WORDS/PAGE */
-#define ZUNDO_RECORD_ID_PAGE_INDEX_MASK (ZWORDS_ON_PAGE - 1) /* 1111111111111 */
-
-// Trigger constants
-#define ZDEFAULT_MAX_NO_TRIGGERS_PER_TABLE 16
-
-/* ---------------------------------------------------------------- */
-// VARIABLE NUMBERS OF PAGE_WORD, UNDO_WORD AND LOGIC_WORD FOR
-// COMMUNICATION WITH FILE SYSTEM
-/* ---------------------------------------------------------------- */
-#define ZBASE_ADDR_PAGE_WORD 1 /* BASE ADDRESS OF PAGE_WORD VAR */
-#define ZBASE_ADDR_UNDO_WORD 2 /* BASE ADDRESS OF UNDO_WORD VAR */
-#define ZBASE_ADDR_LOGIC_WORD 3 /* BASE ADDRESS OF LOGIC_WORD VAR */
-
-/* ---------------------------------------------------------------- */
-// NUMBER OF PAGES SENT TO DISK IN DATA BUFFER AND UNDO BUFFER WHEN
-// OPTIMUM PERFORMANCE IS ACHIEVED.
-/* ---------------------------------------------------------------- */
-#define ZUB_SEGMENT_SIZE 8 /* SEGMENT SIZE OF UNDO BUFFER */
-#define ZDB_SEGMENT_SIZE 8 /* SEGMENT SIZE OF DATA BUFFER */
-
-/* ---------------------------------------------------------------- */
-/* A ATTRIBUTE MAY BE NULL, DYNAMIC OR NORMAL. A NORMAL ATTRIBUTE */
-/* IS A ATTRIBUTE THAT IS NOT NULL OR DYNAMIC. A NULL ATTRIBUTE */
-/* MAY HAVE NO VALUE. A DYNAMIC ATTRIBUTE IS A NULL ATTRIBUTE THAT */
-/* DOES NOT HAVE TO BE A MEMBER OF EVERY TUPLE I A CERTAIN TABLE. */
-/* ---------------------------------------------------------------- */
-/**
- * #defines moved into include/kernel/Interpreter.hpp
- */
-#define ZMAX_REGISTER 21
-#define ZINSERT_DELETE 0
-/* ---------------------------------------------------------------- */
-/* THE MINIMUM SIZE OF AN 'EMPTY' TUPLE HEADER IN R-WORDS */
-/* ---------------------------------------------------------------- */
-#define ZTUP_HEAD_MINIMUM_SIZE 2
- /* THE TUPLE HEADER FIELD 'SIZE OF NULL ATTR. FIELD' SPECIFYES */
- /* THE SIZE OF THE TUPLE HEADER FIELD 'NULL ATTR. FIELD'. */
- /* THE TUPLE HEADER FIELD 'TYPE' SPECIFYES THE TYPE OF THE TUPLE */
- /* HEADER. */
- /* TUPLE ATTRIBUTE INDEX CLUSTERS, ATTRIBUTE */
- /* CLUSTERS AND A DYNAMIC ATTRIBUTE HEADER. */
- /* IT MAY ALSO CONTAIN SHORT ATTRIBUTES AND */
- /* POINTERS TO LONG ATTRIBUTE HEADERS. */
- /* TUPLE ATTRIBUTE INDEX CLUSTERS, ATTRIBUTE */
- /* CLUSTERS AND A DYNAMIC ATTRIBUTE HEADER. */
-
-#define ZTH_TYPE3 2 /* TUPLE HEADER THAT MAY HAVE A POINTER TO */
- /* A DYNAMIC ATTRIBUTE HEADER. IT MAY ALSO */
- /* CONTAIN SHORT ATTRIBUTES AND POINTERS */
- /* TO LONG ATTRIBUTE HEADERS. */
-
- /* DATA STRUCTURE TYPES */
- /* WHEN ATTRIBUTE INFO IS SENT WITH A ATTRINFO-SIGNAL THE */
- /* VARIABLE TYPE IS SPECIFYED. THIS MUST BE DONE TO BE ABLE TO */
- /* NOW HOW MUCH DATA OF A ATTRIBUTE TO READ FROM ATTRINFO. */
-#define ZFIXED_ARRAY 2 /* ZFIXED ARRAY FIELD. */
-#define ZNON_ARRAY 1 /* NORMAL FIELD. */
-#define ZVAR_ARRAY 0 /* VARIABLE ARRAY FIELD */
-#define ZNOT_STORE 3 /* THE ATTR IS STORED IN THE INDEX BLOCK */
-#define ZMAX_SMALL_VAR_ARRAY 256
-
- /* PLEASE OBSERVE THAT THEESE CONSTANTS CORRESPONDS TO THE NUMBER */
- /* OF BITS NEEDED TO REPRESENT THEM D O N O T C H A N G E */
-#define Z1BIT_VAR 0 /* 1 BIT VARIABLE. */
-#define Z2BIT_VAR 1 /* 2 BIT VARIABLE. */
-#define Z4BIT_VAR 2 /* 4 BIT VARIABLE. */
-#define Z8BIT_VAR 3 /* 8 BIT VARIABLE. */
-#define Z16BIT_VAR 4 /* 16 BIT VARIABLE. */
-#define Z32BIT_VAR 5 /* 32 BIT VARIABLE. */
-#define Z64BIT_VAR 6 /* 64 BIT VARIABLE. */
-#define Z128BIT_VAR 7 /* 128 BIT VARIABLE. */
-
- /* WHEN A REQUEST CAN NOT BE EXECUTED BECAUSE OF A ERROR THE */
- /* ERROR MUST BE IDENTIFYED BY MEANS OF A ERROR CODE AND SENT TO */
- /* THE REQUESTER. */
-#define ZGET_OPREC_ERROR 804 // TUP_SEIZEREF
-
-#define ZEXIST_FRAG_ERROR 816 // Add fragment
-#define ZFULL_FRAGRECORD_ERROR 817 // Add fragment
-#define ZNO_FREE_PAGE_RANGE_ERROR 818 // Add fragment
-#define ZNOFREE_FRAGOP_ERROR 830 // Add fragment
-#define ZTOO_LARGE_TUPLE_ERROR 851 // Add fragment
-#define ZNO_FREE_TAB_ENTRY_ERROR 852 // Add fragment
-#define ZNO_PAGES_ALLOCATED_ERROR 881 // Add fragment
-
-#define ZGET_REALPID_ERROR 809
-#define ZNOT_IMPLEMENTED_ERROR 812
-#define ZSEIZE_ATTRINBUFREC_ERROR 805
-#define ZTOO_MUCH_ATTRINFO_ERROR 823
-#define ZMEM_NOTABDESCR_ERROR 826
-#define ZMEM_NOMEM_ERROR 827
-#define ZAI_INCONSISTENCY_ERROR 829
-#define ZNO_ILLEGAL_NULL_ATTR 839
-#define ZNOT_NULL_ATTR 840
-#define ZNO_INSTRUCTION_ERROR 871
-#define ZOUTSIDE_OF_PROGRAM_ERROR 876
-#define ZSTORED_PROC_ID_ERROR 877
-#define ZREGISTER_INIT_ERROR 878
-#define ZATTRIBUTE_ID_ERROR 879
-#define ZTRY_TO_READ_TOO_MUCH_ERROR 880
-#define ZTOTAL_LEN_ERROR 882
-#define ZATTR_INTERPRETER_ERROR 883
-#define ZSTACK_OVERFLOW_ERROR 884
-#define ZSTACK_UNDERFLOW_ERROR 885
-#define ZTOO_MANY_INSTRUCTIONS_ERROR 886
-#define ZTRY_TO_UPDATE_ERROR 888
-#define ZCALL_ERROR 890
-#define ZTEMPORARY_RESOURCE_FAILURE 891
-
-#define ZSTORED_SEIZE_ATTRINBUFREC_ERROR 873 // Part of Scan
-
-#define ZREAD_ONLY_CONSTRAINT_VIOLATION 893
-#define ZVAR_SIZED_NOT_SUPPORTED 894
-#define ZINCONSISTENT_NULL_ATTRIBUTE_COUNT 895
-#define ZTUPLE_CORRUPTED_ERROR 896
-#define ZTRY_UPDATE_PRIMARY_KEY 897
-#define ZMUST_BE_ABORTED_ERROR 898
-#define ZTUPLE_DELETED_ERROR 626
-#define ZINSERT_ERROR 630
-
-#define ZINVALID_CHAR_FORMAT 744
-
-
- /* SOME WORD POSITIONS OF FIELDS IN SOME HEADERS */
-#define ZPAGE_STATE_POS 0 /* POSITION OF PAGE STATE */
-#define ZPAGE_NEXT_POS 1 /* POSITION OF THE NEXT POINTER WHEN IN FREELIST */
-#define ZPAGE_PREV_POS 2 /* POSITION OF THE PREVIOUS POINTER WHEN IN FREELIST */
-#define ZFREELIST_HEADER_POS 3 /* POSITION OF THE FIRST FREELIST */
-#define ZPAGE_FRAG_PAGE_ID_POS 4 /* POSITION OF FRAG PAGE ID WHEN USED*/
-#define ZPAGE_NEXT_CLUST_POS 5 /* POSITION OF NEXT FREE SET OF PAGES */
-#define ZPAGE_FIRST_CLUST_POS 2 /* POSITION OF THE POINTER TO THE FIRST PAGE IN A CLUSTER */
-#define ZPAGE_LAST_CLUST_POS 6 /* POSITION OF THE POINTER TO THE LAST PAGE IN A CLUSTER */
-#define ZPAGE_PREV_CLUST_POS 7 /* POSITION OF THE PREVIOUS POINTER */
-#define ZPAGE_HEADER_SIZE 32 /* NUMBER OF WORDS IN MEM PAGEHEADER */
-#define ZDISK_PAGE_HEADER_SIZE 32 /* NUMBER OF WORDS IN DISK PAGEHEADER */
-#define ZNO_OF_FREE_BLOCKS 3 /* NO OF FREE BLOCK IN THE DISK PAGE */
-#define ZDISK_PAGE_ID 8 /* ID OF THE PAGE ON THE DISK */
-#define ZBLOCK_LIST 9
-#define ZCOPY_OF_PAGE 10
-#define ZPAGE_PHYSICAL_INDEX 11
-#define ZNEXT_IN_PAGE_USED_LIST 12
-#define ZPREV_IN_PAGE_USED_LIST 13
-#define ZDISK_USED_TYPE 14
-#define ZFREE_COMMON 1 /* PAGE STATE, PAGE IN COMMON AREA */
-#define ZEMPTY_MM 2 /* PAGE STATE, PAGE IN EMPTY LIST */
-#define ZTH_MM_FREE 3 /* PAGE STATE, TUPLE HEADER PAGE WITH FREE AREA */
-#define ZTH_MM_FULL 4 /* PAGE STATE, TUPLE HEADER PAGE WHICH IS FULL */
-#define ZAC_MM_FREE 5 /* PAGE STATE, ATTRIBUTE CLUSTER PAGE WITH FREE AREA */
-#define ZTH_MM_FREE_COPY 7 /* PAGE STATE, TH COPY PAGE WITH FREE AREA */
-#define ZTH_MM_FULL_COPY 8 /* PAGE STATE, TH COPY PAGE WHICH IS FULL */
-#define ZAC_MM_FREE_COPY 9 /* PAGE STATE, AC COPY PAGE WITH FREE AREA */
-#define ZMAX_NO_COPY_PAGES 4 /* THE MAXIMUM NUMBER OF COPY PAGES ALLOWED PER FRAGMENT */
-
- /* CONSTANTS USED TO HANDLE TABLE DESCRIPTOR RECORDS */
- /* ALL POSITIONS AND SIZES IS BASED ON R-WORDS (32-BIT ON APZ 212) */
-#define ZTD_HEADER 0 /* HEADER POSITION */
-#define ZTD_DATASIZE 1 /* SIZE OF THE DATA IN THIS CHUNK */
-#define ZTD_SIZE 2 /* TOTAL SIZE OF TABLE DESCRIPTOR */
-
- /* TRAILER POSITIONS FROM END OF TABLE DESCRIPTOR RECORD */
-#define ZTD_TR_SIZE 1 /* SIZE DESCRIPTOR POS FROM END+1 */
-#define ZTD_TR_TYPE 2
-#define ZTD_TRAILER_SIZE 2 /* TOTAL SIZE OF TABLE TRAILER */
-#define ZAD_SIZE 2 /* TOTAL SIZE OF ATTR DESCRIPTOR */
-#define ZAD_LOG_SIZE 1 /* TWO LOG OF TOTAL SIZE OF ATTR DESCRIPTOR */
-
- /* CONSTANTS USED TO HANDLE TABLE DESCRIPTOR AS A FREELIST */
-#define ZTD_FL_HEADER 0 /* HEADER POSITION */
-#define ZTD_FL_SIZE 1 /* TOTAL SIZE OF THIS FREELIST ENTRY */
-#define ZTD_FL_PREV 2 /* PREVIOUS RECORD IN FREELIST */
-#define ZTD_FL_NEXT 3 /* NEXT RECORD IN FREELIST */
-#define ZTD_FREE_SIZE 16 /* SIZE NEEDED TO HOLD ONE FL ENTRY */
-
- /* CONSTANTS USED IN LSB OF TABLE DESCRIPTOR HEADER DESCRIBING USAGE */
-#define ZTD_TYPE_FREE 0 /* RECORD LINKED INTO FREELIST */
-#define ZTD_TYPE_NORMAL 1 /* RECORD USED AS TABLE DESCRIPTOR */
- /* ATTRIBUTE OPERATION CONSTANTS */
-#define ZLEAF 1
-#define ZNON_LEAF 2
-
- /* ATTRINBUFREC VARIABLE POSITIONS. */
-#define ZBUF_PREV 29 /* POSITION OF 'PREV'-VARIABLE (USED BY INTERPRETED EXEC) */
-#define ZBUF_DATA_LEN 30 /* POSITION OF 'DATA LENGTH'-VARIABLE. */
-#define ZBUF_NEXT 31 /* POSITION OF 'NEXT'-VARIABLE. */
-#define ZSAVE_BUF_NEXT 28
-#define ZSAVE_BUF_DATA_LEN 27
-
- /* RETURN POINTS. */
- /* RESTART PHASES */
-#define ZSTARTPHASE1 1
-#define ZSTARTPHASE2 2
-#define ZSTARTPHASE3 3
-#define ZSTARTPHASE4 4
-#define ZSTARTPHASE6 6
-
-#define ZADDFRAG 0
-
- /* CHECKPOINT RECORD TYPES */
-#define ZLCPR_TYPE_INSERT_TH 0 /* INSERT TUPLE HEADER */
-#define ZLCPR_TYPE_DELETE_TH 1 /* DELETE TUPLE HEADER */
-#define ZLCPR_TYPE_UPDATE_TH 2 /* DON'T CREATE IT, JUST UPDETE */
-#define ZLCPR_TYPE_INSERT_TH_NO_DATA 3 /* INSERT TUPLE HEADER */
-#define ZLCPR_ABORT_UPDATE 4 /* UNDO AN UPDATE OPERATION THAT WAS ACTIVE IN LCP */
-#define ZLCPR_ABORT_INSERT 5 /* UNDO AN INSERT OPERATION THAT WAS ACTIVE IN LCP */
-#define ZTABLE_DESCRIPTOR 6 /* TABLE DESCRIPTOR */
-#define ZINDICATE_NO_OP_ACTIVE 7 /* ENSURE THAT NO OPERATION ACTIVE AFTER RESTART */
-#define ZLCPR_UNDO_LOG_PAGE_HEADER 8 /* CHANGE IN PAGE HEADER IS UNDO LOGGED */
-#define ZLCPR_TYPE_UPDATE_GCI 9 /* Update GCI at commit time */
-#define ZNO_CHECKPOINT_RECORDS 10 /* NUMBER OF CHECKPOINTRECORD TYPES */
-
- /* RESULT CODES */
- /* ELEMENT POSITIONS IN SYSTEM RESTART INFO PAGE OF THE DATA FILE */
-#define ZSRI_NO_OF_FRAG_PAGES_POS 10 /* NUMBER OF FRAGMENT PAGES WHEN CHECKPOINT STARTED */
-#define ZSRI_TUP_RESERVED_SIZE_POS 11 /* RESERVED SIZE OF THE TUPLE WHEN CP STARTED */
-#define ZSRI_TUP_FIXED_AREA_POS 12 /* SIZE OF THE TUPLE FIXED AREA WHEN CP STARTED */
-#define ZSRI_TAB_DESCR_SIZE 13 /* SIZE OF THE TABLE DESCRIPTOR WHEN CP STARTED */
-#define ZSRI_NO_OF_ATTRIBUTES_POS 14 /* NUMBER OF ATTRIBUTES */
-#define ZSRI_UNDO_LOG_END_REC_ID 15 /* LAST UNDO LOG RECORD ID FOR THIS CHECKPOINT */
-#define ZSRI_UNDO_LOG_END_PAGE_ID 16 /* LAST USED LOG PAGE ID FOR THIS CHECKPOINT */
-#define ZSRI_TH_FREE_FIRST 17 /* FIRST FREE PAGE OF TUPLE HEADERS */
-#define ZSRI_TH_FREE_COPY_FIRST 18 /* FIRST FREE PAGE OF TUPLE HEADER COPIES */
-#define ZSRI_EMPTY_PRIM_PAGE 27 /* FIRST EMPTY PAGE */
-#define ZSRI_NO_COPY_PAGES_ALLOC 28 /* NO COPY PAGES IN FRAGMENT AT LOCAL CHECKPOINT */
-#define ZSRI_UNDO_FILE_VER 29 /* CHECK POINT ID OF THE UNDO FILE */
-#define ZSRI_NO_OF_INDEX_ATTR 30 /* No of index attributes */
-#define ZNO_OF_PAGES_CLUSTER_REC 0
-
-//------------------------------------------------------------
-// TUP_CONTINUEB codes
-//------------------------------------------------------------
-#define ZSTART_EXEC_UNDO_LOG 0
-#define ZCONT_START_SAVE_CL 1
-#define ZCONT_SAVE_DP 2
-#define ZCONT_EXECUTE_LC 3
-#define ZCONT_LOAD_DP 4
-#define ZLOAD_BAL_LCP_TIMER 5
-#define ZINITIALISE_RECORDS 6
-#define ZREL_FRAG 7
-#define ZREPORT_MEMORY_USAGE 8
-#define ZBUILD_INDEX 9
-
-#define ZINDEX_STORAGE 0
-#define ZDATA_WORD_AT_DISK_PAGE 2030
-#define ZALLOC_DISK_PAGE_LAST_INDEX 2047
-#define ZWORD_IN_BLOCK 127 /* NO OF WORD IN A BLOCK */
-#define ZNO_DISK_PAGES_FILE_REC 100
-#define ZMASK_PAGE_INDEX 0x7ff
-#define ZBIT_PAGE_INDEX 11 /* 8 KBYT PAGE = 2048 WORDS */
-#define ZSCAN_PROCEDURE 0
-#define ZCOPY_PROCEDURE 2
-#define ZSTORED_PROCEDURE_DELETE 3
-#define ZSTORED_PROCEDURE_FREE 0xffff
-#define ZMIN_PAGE_LIMIT_TUP_COMMITREQ 2
-#define ZUNDO_PAGE_HEADER_SIZE 2 /* SIZE OF UNDO PAGE HEADER */
-#endif
-
-class Dbtup: public SimulatedBlock {
-public:
-
- typedef bool (Dbtup::* ReadFunction)(Uint32*,
- AttributeHeader*,
- Uint32,
- Uint32);
- typedef bool (Dbtup::* UpdateFunction)(Uint32*,
- Uint32,
- Uint32);
-// State values
-enum State {
- NOT_INITIALIZED = 0,
- COMMON_AREA_PAGES = 1,
- UNDO_RESTART_PAGES = 2,
- UNDO_PAGES = 3,
- READ_ONE_PAGE = 4,
- CHECKPOINT_DATA_READ = 7,
- CHECKPOINT_DATA_READ_PAGE_ZERO = 8,
- CHECKPOINT_DATA_WRITE = 9,
- CHECKPOINT_DATA_WRITE_LAST = 10,
- CHECKPOINT_DATA_WRITE_FLUSH = 11,
- CHECKPOINT_UNDO_READ = 12,
- CHECKPOINT_UNDO_READ_FIRST = 13,
- CHECKPOINT_UNDO_WRITE = 14,
- CHECKPOINT_UNDO_WRITE_FLUSH = 15,
- CHECKPOINT_TD_READ = 16,
- IDLE = 17,
- ACTIVE = 18,
- SYSTEM_RESTART = 19,
- NO_OTHER_OP = 20,
- COMMIT_DELETE = 21,
- TO_BE_COMMITTED = 22,
- ABORTED = 23,
- ALREADY_ABORTED_INSERT = 24,
- ALREADY_ABORTED = 25,
- ABORT_INSERT = 26,
- ABORT_UPDATE = 27,
- INIT = 28,
- INITIAL_READ = 29,
- INTERPRETED_EXECUTION = 30,
- FINAL_READ = 31,
- FINAL_UPDATE = 32,
- DISCONNECTED = 33,
- DEFINED = 34,
- ERROR_WAIT_TUPKEYREQ = 35,
- STARTED = 36,
- NOT_DEFINED = 37,
- COMPLETED = 38,
- WAIT_ABORT = 39,
- NORMAL_PAGE = 40,
- COPY_PAGE = 41,
- DELETE_BLOCK = 42,
- WAIT_STORED_PROCEDURE_ATTR_INFO = 43,
- DATA_FILE_READ = 45,
- DATA_FILE_WRITE = 46,
- LCP_DATA_FILE_READ = 47,
- LCP_DATA_FILE_WRITE = 48,
- LCP_DATA_FILE_WRITE_WITH_UNDO = 49,
- LCP_DATA_FILE_CLOSE = 50,
- LCP_UNDO_FILE_READ = 51,
- LCP_UNDO_FILE_CLOSE = 52,
- LCP_UNDO_FILE_WRITE = 53,
- OPENING_DATA_FILE = 54,
- INITIATING_RESTART_INFO = 55,
- INITIATING_FRAGMENT = 56,
- OPENING_UNDO_FILE = 57,
- READING_RESTART_INFO = 58,
- INIT_UNDO_SEGMENTS = 59,
- READING_TAB_DESCR = 60,
- READING_DATA_PAGES = 61,
- WAIT_COPY_PROCEDURE = 62,
- TOO_MUCH_AI = 63,
- SAME_PAGE = 64,
- DEFINING = 65,
- TUPLE_BLOCKED = 66,
- ERROR_WAIT_STORED_PROCREQ = 67
-};
-
-// Records
-/* ************** ATTRIBUTE INFO BUFFER RECORD ****************** */
-/* THIS RECORD IS USED AS A BUFFER FOR INCOMING AND OUTGOING DATA */
-/* ************************************************************** */
-struct Attrbufrec {
- Uint32 attrbuf[ZATTRBUF_SIZE];
-}; /* p2c: size = 128 bytes */
-
-typedef Ptr<Attrbufrec> AttrbufrecPtr;
-
-/* ********** CHECKPOINT INFORMATION ************ */
-/* THIS RECORD HOLDS INFORMATION NEEDED TO */
-/* PERFORM A CHECKPOINT. IT'S POSSIBLE TO RUN */
-/* MULTIPLE CHECKPOINTS AT A TIME. THIS RECORD */
-/* MAKES IT POSSIBLE TO DISTINGER BETWEEN THE */
-/* DIFFERENT CHECKPOINTS. */
-/* ********************************************** */
-struct CheckpointInfo {
- Uint32 lcpNextRec; /* NEXT RECORD IN FREELIST */
- Uint32 lcpCheckpointVersion; /* VERSION OF THE CHECKPOINT */
- Uint32 lcpLocalLogInfoP; /* POINTER TO A LOCAL LOG INFO RECORD */
- Uint32 lcpUserptr; /* USERPOINTER TO THE BLOCK REQUESTING THE CP */
- Uint32 lcpFragmentP; /* FRAGMENT POINTER TO WHICH THE CHECKPOINT APPLIES */
- Uint32 lcpFragmentId; /* FRAGMENT ID */
- Uint32 lcpTabPtr; /* TABLE POINTER */
- Uint32 lcpDataBufferSegmentP; /* POINTER TO A DISK BUFFER SEGMENT POINTER (DATA) */
- Uint32 lcpDataFileHandle; /* FILE HANDLES FOR DATA FILE. LOG FILE HANDLE IN LOCAL_LOG_INFO_RECORD */
- /* FILE HANDLE TO THE OPEN DATA FILE */
- Uint32 lcpNoOfPages;
- Uint32 lcpThFreeFirst;
- Uint32 lcpThFreeCopyFirst;
- Uint32 lcpEmptyPrimPage;
- Uint32 lcpNoCopyPagesAlloc;
- Uint32 lcpTmpOperPtr; /* TEMPORARY STORAGE OF OPER_PTR DURING SAVE */
- BlockReference lcpBlockref; /* BLOCKREFERENCE TO THE BLOCK REQUESTING THE CP */
-};
-typedef Ptr<CheckpointInfo> CheckpointInfoPtr;
-
-/* *********** DISK BUFFER SEGMENT INFO ********* */
-/* THIS RECORD HOLDS INFORMATION NEEDED DURING */
-/* A WRITE OF THE DATA BUFFER TO DISK. WHEN THE */
-/* WRITE SIGNAL IS SENT A POINTER TO THIS RECORD */
-/* IS INCLUDED. WHEN THE WRITE IS COMPLETED AND */
-/* CONFIRMED THE PTR TO THIS RECORD IS RETURNED */
-/* AND THE BUFFER PAGES COULD EASILY BE LOCATED */
-/* AND DEALLOCATED. THE CHECKPOINT_INFO_VERSION */
-/* KEEPS TRACK OF THE CHECPOINT_INFO_RECORD THAT */
-/* INITIATED THE WRITE AND THE CP_PAGE_TO_DISK */
-/* ELEMENT COULD BE INCREASED BY THE NUMBER OF */
-/* PAGES WRITTEN. */
-/* ********************************************** */
-struct DiskBufferSegmentInfo {
- Uint32 pdxDataPage[16]; /* ARRAY OF DATA BUFFER PAGES */
- Uint32 pdxUndoBufferSet[2];
- Uint32 pdxNextRec;
- State pdxBuffertype;
- State pdxOperation;
- /*---------------------------------------------------------------------------*/
- /* PDX_FLAGS BITS AND THEIR USAGE: */
- /* BIT 0 1 COMMENT */
- /*---------------------------------------------------------------------------*/
- /* 0 SEGMENT INVALID SEGMENT VALID USED DURING READS */
- /* 1-15 NOT USED */
- /*---------------------------------------------------------------------------*/
- Uint32 pdxCheckpointInfoP; /* USED DURING LOCAL CHKP */
- Uint32 pdxRestartInfoP; /* USED DURING RESTART */
- Uint32 pdxLocalLogInfoP; /* POINTS TO A LOCAL LOG INFO */
- Uint32 pdxFilePage; /* START PAGE IN FILE */
- Uint32 pdxNumDataPages; /* NUMBER OF DATA PAGES */
-};
-typedef Ptr<DiskBufferSegmentInfo> DiskBufferSegmentInfoPtr;
-
-struct Fragoperrec {
- bool definingFragment;
- Uint32 nextFragoprec;
- Uint32 lqhPtrFrag;
- Uint32 fragidFrag;
- Uint32 tableidFrag;
- Uint32 fragPointer;
- Uint32 attributeCount;
- Uint32 currNullBit;
- Uint32 noOfNullBits;
- Uint32 noOfNewAttrCount;
- Uint32 charsetIndex;
- BlockReference lqhBlockrefFrag;
- bool inUse;
-};
-typedef Ptr<Fragoperrec> FragoperrecPtr;
-
- // Position for use by scan
- struct PagePos {
- Uint32 m_fragId; // "base" fragment id
- Uint32 m_fragBit; // two fragments in 5.0
- Uint32 m_pageId;
- Uint32 m_tupleNo;
- bool m_match;
- };
-
- // Tup scan op (compare Dbtux::ScanOp)
- struct ScanOp {
- enum {
- Undef = 0,
- First = 1, // before first entry
- Locked = 4, // at current entry (no lock needed)
- Next = 5, // looking for next extry
- Last = 6, // after last entry
- Invalid = 9 // cannot return REF to LQH currently
- };
- Uint16 m_state;
- Uint16 m_lockwait; // unused
- Uint32 m_userPtr; // scanptr.i in LQH
- Uint32 m_userRef;
- Uint32 m_tableId;
- Uint32 m_fragId; // "base" fragment id
- Uint32 m_fragPtrI[2];
- Uint32 m_transId1;
- Uint32 m_transId2;
- PagePos m_scanPos;
- union {
- Uint32 nextPool;
- Uint32 nextList;
- };
- Uint32 prevList;
- };
- typedef Ptr<ScanOp> ScanOpPtr;
- ArrayPool<ScanOp> c_scanOpPool;
-
- void scanFirst(Signal* signal, ScanOpPtr scanPtr);
- void scanNext(Signal* signal, ScanOpPtr scanPtr);
- void scanClose(Signal* signal, ScanOpPtr scanPtr);
- void releaseScanOp(ScanOpPtr& scanPtr);
-
-struct Fragrecord {
- Uint32 nextStartRange;
- Uint32 currentPageRange;
- Uint32 rootPageRange;
- Uint32 noOfPages;
- Uint32 emptyPrimPage;
-
- Uint32 firstusedOprec;
- Uint32 lastusedOprec;
-
- Uint32 thFreeFirst;
- Uint32 thFreeCopyFirst;
- Uint32 noCopyPagesAlloc;
-
- Uint32 checkpointVersion;
- Uint32 minPageNotWrittenInCheckpoint;
- Uint32 maxPageWrittenInCheckpoint;
- State fragStatus;
- Uint32 fragTableId;
- Uint32 fragmentId;
- Uint32 nextfreefrag;
-
- DLList<ScanOp> m_scanList;
- Fragrecord(ArrayPool<ScanOp> & scanOpPool) : m_scanList(scanOpPool) {}
-};
-typedef Ptr<Fragrecord> FragrecordPtr;
-
- /* ************ LOCAL LOG FILE INFO ************* */
- /* THIS RECORD HOLDS INFORMATION NEEDED DURING */
- /* CHECKPOINT AND RESTART. THERE ARE FOUR */
- /* PARALLELL UNDO LOG FILES, EACH ONE REPRESENTED */
- /* BY AN ENTITY OF THIS RECORD. */
- /* BECAUSE EACH FILE IS SHARED BETWEEN FOUR */
- /* TABLES AND HAS ITS OWN PAGEPOINTERS AND */
- /* WORDPOINTERS. */
- /* ********************************************** */
-struct LocalLogInfo {
- Uint32 lliActiveLcp; /* NUMBER OF ACTIVE LOCAL CHECKPOINTS ON THIS FILE */
- Uint32 lliEndPageId; /* PAGE IDENTIFIER OF LAST PAGE WITH LOG DATA */
- Uint32 lliPrevRecordId; /* PREVIOUS RECORD IN THIS LOGFILE */
- Uint32 lliLogFilePage; /* PAGE IN LOGFILE */
- Uint32 lliNumFragments; /* NO OF FRAGMENTS RESTARTING FROM THIS LOCAL LOG */
- Uint32 lliUndoBufferSegmentP; /* POINTER TO A DISK BUFFER SEGMENT POINTER (UNDO) */
- Uint32 lliUndoFileHandle; /* FILE HANDLE OF UNDO LOG FILE */
- Uint32 lliUndoPage; /* UNDO PAGE IN BUFFER */
- Uint32 lliUndoWord;
- Uint32 lliUndoPagesToDiskWithoutSynch;
-};
-typedef Ptr<LocalLogInfo> LocalLogInfoPtr;
-
-struct Operationrec {
-// Easy to remove (2 words)
- Uint32 attroutbufLen;
- Uint32 logSize;
-
-// Needed (20 words)
- State tupleState;
- Uint32 prevActiveOp;
- Uint32 nextActiveOp;
- Uint32 nextOprecInList;
- Uint32 prevOprecInList;
- Uint32 tableRef;
- Uint32 fragId;
- Uint32 fragmentPtr;
- Uint32 fragPageId;
- Uint32 realPageId;
- bool undoLogged;
- Uint32 realPageIdC;
- Uint32 fragPageIdC;
- Uint32 firstAttrinbufrec;
- Uint32 lastAttrinbufrec;
- Uint32 attrinbufLen;
- Uint32 currentAttrinbufLen;
- Uint32 userpointer;
- State transstate;
- Uint32 savePointId;
-
-// Easy to remove (3 words)
- Uint32 tcOperationPtr;
- Uint32 transid1;
- Uint32 transid2;
-
-// Needed (2 words)
- Uint16 pageIndex;
- Uint16 pageOffset;
- Uint16 pageOffsetC;
- Uint16 pageIndexC;
-// Hard to remove
- Uint16 tupVersion;
-
-// Easy to remove (1.5 word)
- BlockReference recBlockref;
- BlockReference userblockref;
- Uint16 storedProcedureId;
-
- Uint8 inFragList;
- Uint8 inActiveOpList;
- Uint8 deleteInsertFlag;
-
-// Needed (1 word)
- Uint8 dirtyOp;
- Uint8 interpretedExec;
- Uint8 optype;
- Uint8 opSimple;
-
-// Used by triggers
- Uint32 primaryReplica;
- BlockReference coordinatorTC;
- Uint32 tcOpIndex;
- Uint32 gci;
- Uint32 noFiredTriggers;
- union {
- Uint32 hashValue; // only used in TUP_COMMITREQ
- Uint32 lastRow;
- };
- Bitmask<MAXNROFATTRIBUTESINWORDS> changeMask;
-};
-typedef Ptr<Operationrec> OperationrecPtr;
-
-struct Page {
- Uint32 pageWord[ZWORDS_ON_PAGE];
-};
-typedef Ptr<Page> PagePtr;
-
- /* ****************************** PAGE RANGE RECORD ************************** */
- /* PAGE RANGES AND BASE PAGE ID. EACH RANGE HAS A CORRESPONDING BASE PAGE ID */
- /* THAT IS USED TO CALCULATE REAL PAGE ID FROM A FRAGMENT PAGE ID AND A TABLE */
- /* REFERENCE. */
- /* THE PAGE RANGES ARE ORGANISED IN A B-TREE FASHION WHERE THE VARIABLE TYPE */
- /* SPECIFIES IF A LEAF NODE HAS BEEN REACHED. IF A LEAF NODE HAS BEEN REACHED */
- /* THEN BASE_PAGE_ID IS THE BASE_PAGE_ID OF THE SET OF PAGES THAT WAS */
- /* ALLOCATED IN THAT RANGE. OTHERWISE BASE_PAGE_ID IS THE POINTER TO THE NEXT */
- /* PAGE_RANGE RECORD. */
- /* *************************************************************************** */
-struct PageRange {
- Uint32 startRange[4]; /* START OF RANGE */
- Uint32 endRange[4]; /* END OF THIS RANGE */
- Uint32 basePageId[4]; /* BASE PAGE ID. */
-/*---- VARIABLE BASE_PAGE_ID2 (4) 8 DS NEEDED WHEN SUPPORTING 40 BIT PAGE ID -------*/
- Uint8 type[4]; /* TYPE OF BASE PAGE ID */
- Uint32 nextFree; /* NEXT FREE PAGE RANGE RECORD */
- Uint32 parentPtr; /* THE PARENT TO THE PAGE RANGE REC IN THE B-TREE */
- Uint8 currentIndexPos;
-};
-typedef Ptr<PageRange> PageRangePtr;
-
- /* *********** PENDING UNDO WRITE INFO ********** */
- /* THIS RECORD HOLDS INFORMATION NEEDED DURING */
- /* A FILE OPEN OPERATION */
- /* IF THE FILE OPEN IS A PART OF A CHECKPOINT THE */
- /* CHECKPOINT_INFO_P WILL HOLD A POINTER TO THE */
- /* CHECKPOINT_INFOR_PTR RECORD */
- /* IF IT IS A PART OF RESTART THE PFO_RESTART_INFO*/
- /* ELEMENT WILL POINT TO A RESTART INFO RECORD */
- /* ********************************************** */
-struct PendingFileOpenInfo {
- Uint32 pfoNextRec;
- State pfoOpenType;
- Uint32 pfoCheckpointInfoP;
- Uint32 pfoRestartInfoP;
-};
-typedef Ptr<PendingFileOpenInfo> PendingFileOpenInfoPtr;
-
-struct RestartInfoRecord {
- Uint32 sriNextRec;
- State sriState; /* BLOCKREFERENCE TO THE REQUESTING BLOCK */
- Uint32 sriUserptr; /* USERPOINTER TO THE REQUESTING BLOCK */
- Uint32 sriDataBufferSegmentP; /* POINTER TO A DISK BUFFER SEGMENT POINTER (DATA) */
- Uint32 sriDataFileHandle; /* FILE HANDLE TO THE OPEN DATA FILE */
- Uint32 sriCheckpointVersion; /* CHECKPOINT VERSION TO RESTART FROM */
- Uint32 sriFragid; /* FRAGMENT ID */
- Uint32 sriFragP; /* FRAGMENT POINTER */
- Uint32 sriTableId; /* TABLE ID */
- Uint32 sriLocalLogInfoP; /* POINTER TO A LOCAL LOG INFO RECORD */
- Uint32 sriNumDataPages; /* NUMBER OF DATA PAGES TO READ */
- Uint32 sriCurDataPageFromBuffer; /* THE CHECKPOINT IS COMPLETED */
- BlockReference sriBlockref;
-};
-typedef Ptr<RestartInfoRecord> RestartInfoRecordPtr;
-
- /* ************* TRIGGER DATA ************* */
- /* THIS RECORD FORMS LISTS OF ACTIVE */
- /* TRIGGERS FOR EACH TABLE. */
- /* THE RECORDS ARE MANAGED BY A TRIGGER */
- /* POOL wHERE A TRIGGER RECORD IS SEIZED */
- /* WHEN A TRIGGER IS ACTIVATED AND RELEASED */
- /* WHEN THE TRIGGER IS DEACTIVATED. */
- /* **************************************** */
-struct TupTriggerData {
-
- /**
- * Trigger id, used by DICT/TRIX to identify the trigger
- */
- Uint32 triggerId;
-
- /**
- * Index id is needed for ordered index.
- */
- Uint32 indexId;
-
- /**
- * Trigger type etc, defines what the trigger is used for
- */
- TriggerType::Value triggerType;
- TriggerActionTime::Value triggerActionTime;
- TriggerEvent::Value triggerEvent;
- /**
- * Receiver block
- */
- Uint32 m_receiverBlock;
-
- /**
- * Monitor all replicas, i.e. trigger will fire on all nodes where tuples
- * are stored
- */
- bool monitorReplicas;
-
- /**
- * Monitor all attributes, the trigger monitors all changes to attributes
- * in the table
- */
- bool monitorAllAttributes;
-
- /**
- * Send only changed attributes at trigger firing time.
- */
- bool sendOnlyChangedAttributes;
-
- /**
- * Send also before values at trigger firing time.
- */
- bool sendBeforeValues;
-
- /**
- * Attribute mask, defines what attributes are to be monitored
- * Can be seen as a compact representation of SQL column name list
- */
- Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask;
-
- /**
- * Next ptr (used in pool/list)
- */
- union {
- Uint32 nextPool;
- Uint32 nextList;
- };
-
- /**
- * Prev pointer (used in list)
- */
- Uint32 prevList;
-
- inline void print(NdbOut & s) const { s << "[TriggerData = " << triggerId << "]"; };
-};
-
-typedef Ptr<TupTriggerData> TriggerPtr;
-
-/**
- * Pool of trigger data record
- */
-ArrayPool<TupTriggerData> c_triggerPool;
-
- /* ************ TABLE RECORD ************ */
- /* THIS RECORD FORMS A LIST OF TABLE */
- /* REFERENCE INFORMATION. ONE RECORD */
- /* PER TABLE REFERENCE. */
- /* ************************************** */
-struct Tablerec {
- Tablerec(ArrayPool<TupTriggerData> & triggerPool) :
- afterInsertTriggers(triggerPool),
- afterDeleteTriggers(triggerPool),
- afterUpdateTriggers(triggerPool),
- subscriptionInsertTriggers(triggerPool),
- subscriptionDeleteTriggers(triggerPool),
- subscriptionUpdateTriggers(triggerPool),
- constraintUpdateTriggers(triggerPool),
- tuxCustomTriggers(triggerPool)
- {}
-
- Bitmask<MAXNROFATTRIBUTESINWORDS> notNullAttributeMask;
-
- ReadFunction* readFunctionArray;
- UpdateFunction* updateFunctionArray;
- CHARSET_INFO** charsetArray;
-
- Uint32 readKeyArray;
- Uint32 tabDescriptor;
- Uint32 attributeGroupDescriptor;
-
- bool GCPIndicator;
- bool checksumIndicator;
-
- Uint16 tupheadsize;
- Uint16 noOfAttr;
- Uint16 noOfKeyAttr;
- Uint16 noOfCharsets;
- Uint16 noOfNewAttr;
- Uint16 noOfNullAttr;
- Uint16 noOfAttributeGroups;
-
- Uint8 tupChecksumIndex;
- Uint8 tupNullIndex;
- Uint8 tupNullWords;
- Uint8 tupGCPIndex;
-
- // Lists of trigger data for active triggers
- ArrayList<TupTriggerData> afterInsertTriggers;
- ArrayList<TupTriggerData> afterDeleteTriggers;
- ArrayList<TupTriggerData> afterUpdateTriggers;
- ArrayList<TupTriggerData> subscriptionInsertTriggers;
- ArrayList<TupTriggerData> subscriptionDeleteTriggers;
- ArrayList<TupTriggerData> subscriptionUpdateTriggers;
- ArrayList<TupTriggerData> constraintUpdateTriggers;
-
- // List of ordered indexes
- ArrayList<TupTriggerData> tuxCustomTriggers;
-
- Uint32 fragid[2 * MAX_FRAG_PER_NODE];
- Uint32 fragrec[2 * MAX_FRAG_PER_NODE];
-
- struct {
- Uint32 tabUserPtr;
- Uint32 tabUserRef;
- } m_dropTable;
- State tableStatus;
-};
-
-typedef Ptr<Tablerec> TablerecPtr;
-
-struct storedProc {
- Uint32 storedLinkFirst;
- Uint32 storedLinkLast;
- Uint32 storedCounter;
- Uint32 nextPool;
- Uint16 storedCode;
- Uint16 storedProcLength;
-};
-
-typedef Ptr<storedProc> StoredProcPtr;
-
-ArrayPool<storedProc> c_storedProcPool;
-
-/* **************************** TABLE_DESCRIPTOR RECORD ******************************** */
-/* THIS VARIABLE IS USED TO STORE TABLE DESCRIPTIONS. A TABLE DESCRIPTION IS STORED AS A */
-/* CONTIGUOS ARRAY IN THIS VARIABLE. WHEN A NEW TABLE IS ADDED A CHUNK IS ALLOCATED IN */
-/* THIS RECORD. WHEN ATTRIBUTES ARE ADDED TO THE TABLE, A NEW CHUNK OF PROPER SIZE IS */
-/* ALLOCATED AND ALL DATA IS COPIED TO THIS NEW CHUNK AND THEN THE OLD CHUNK IS PUT IN */
-/* THE FREE LIST. EACH TABLE IS DESCRIBED BY A NUMBER OF TABLE DESCRIPTIVE ATTRIBUTES */
-/* AND A NUMBER OF ATTRIBUTE DESCRIPTORS AS SHOWN IN FIGURE BELOW */
-/* */
-/* WHEN ALLOCATING A TABLE DESCRIPTOR THE SIZE IS ALWAYS A MULTIPLE OF 16 WORDS. */
-/* */
-/* ---------------------------------------------- */
-/* | TRAILER USED FOR ALLOC/DEALLOC | */
-/* ---------------------------------------------- */
-/* | TABLE DESCRIPTIVE ATTRIBUTES | */
-/* ---------------------------------------------- */
-/* | ATTRIBUTE DESCRIPTION 1 | */
-/* ---------------------------------------------- */
-/* | ATTRIBUTE DESCRIPTION 2 | */
-/* ---------------------------------------------- */
-/* | | */
-/* | | */
-/* | | */
-/* ---------------------------------------------- */
-/* | ATTRIBUTE DESCRIPTION N | */
-/* ---------------------------------------------- */
-/* */
-/* THE TABLE DESCRIPTIVE ATTRIBUTES CONTAINS THE FOLLOWING ATTRIBUTES: */
-/* */
-/* ---------------------------------------------- */
-/* | HEADER (TYPE OF INFO) | */
-/* ---------------------------------------------- */
-/* | SIZE OF WHOLE CHUNK (INCL. TRAILER) | */
-/* ---------------------------------------------- */
-/* | TABLE IDENTITY | */
-/* ---------------------------------------------- */
-/* | FRAGMENT IDENTITY | */
-/* ---------------------------------------------- */
-/* | NUMBER OF ATTRIBUTES | */
-/* ---------------------------------------------- */
-/* | SIZE OF FIXED ATTRIBUTES | */
-/* ---------------------------------------------- */
-/* | NUMBER OF NULL FIELDS | */
-/* ---------------------------------------------- */
-/* | NOT USED | */
-/* ---------------------------------------------- */
-/* */
-/* THESE ATTRIBUTES ARE ALL ONE R-VARIABLE IN THE RECORD. */
-/* NORMALLY ONLY ONE TABLE DESCRIPTOR IS USED. DURING SCHEMA CHANGES THERE COULD */
-/* HOWEVER EXIST MORE THAN ONE TABLE DESCRIPTION SINCE THE SCHEMA CHANGE OF VARIOUS */
-/* FRAGMENTS ARE NOT SYNCHRONISED. THIS MEANS THAT ALTHOUGH THE SCHEMA HAS CHANGED */
-/* IN ALL FRAGMENTS, BUT THE FRAGMENTS HAVE NOT REMOVED THE ATTRIBUTES IN THE SAME */
-/* TIME-FRAME. THEREBY SOME ATTRIBUTE INFORMATION MIGHT DIFFER BETWEEN FRAGMENTS. */
-/* EXAMPLES OF ATTRIBUTES THAT MIGHT DIFFER ARE SIZE OF FIXED ATTRIBUTES, NUMBER OF */
-/* ATTRIBUTES, FIELD START WORD, START BIT. */
-/* */
-/* AN ATTRIBUTE DESCRIPTION CONTAINS THE FOLLOWING ATTRIBUTES: */
-/* */
-/* ---------------------------------------------- */
-/* | Field Type, 4 bits (LSB Bits) | */
-/* ---------------------------------------------- */
-/* | Attribute Size, 4 bits | */
-/* ---------------------------------------------- */
-/* | NULL indicator 1 bit | */
-/* ---------------------------------------------- */
-/* | Indicator if TUP stores attr. 1 bit | */
-/* ---------------------------------------------- */
-/* | Not used 6 bits | */
-/* ---------------------------------------------- */
-/* | No. of elements in fixed array 16 bits | */
-/* ---------------------------------------------- */
-/* ---------------------------------------------- */
-/* | Field Start Word, 21 bits (LSB Bits) | */
-/* ---------------------------------------------- */
-/* | NULL Bit, 11 bits | */
-/* ---------------------------------------------- */
-/* */
-/* THE ATTRIBUTE SIZE CAN BE 1,2,4,8,16,32,64 AND 128 BITS. */
-/* */
-/* THE UNUSED PARTS OF THE RECORDS ARE PUT IN A LINKED LIST OF FREE PARTS. EACH OF */
-/* THOSE FREE PARTS HAVE THREE RECORDS ASSIGNED AS SHOWN IN THIS STRUCTURE */
-/* ALL FREE PARTS ARE SET INTO A CHUNK LIST WHERE EACH CHUNK IS AT LEAST 16 WORDS */
-/* */
-/* ---------------------------------------------- */
-/* | HEADER = RNIL | */
-/* ---------------------------------------------- */
-/* | SIZE OF FREE AREA | */
-/* ---------------------------------------------- */
-/* | POINTER TO PREVIOUS FREE AREA | */
-/* ---------------------------------------------- */
-/* | POINTER TO NEXT FREE AREA | */
-/* ---------------------------------------------- */
-/* */
-/* IF THE POINTER TO THE NEXT AREA IS RNIL THEN THIS IS THE LAST FREE AREA. */
-/* */
-/*****************************************************************************************/
-struct TableDescriptor {
- Uint32 tabDescr;
-};
-typedef Ptr<TableDescriptor> TableDescriptorPtr;
-
-struct HostBuffer {
- bool inPackedList;
- Uint32 packetLenTA;
- Uint32 noOfPacketsTA;
- Uint32 packetBufferTA[30];
-};
-typedef Ptr<HostBuffer> HostBufferPtr;
-
- /* **************** UNDO PAGE RECORD ******************* */
- /* THIS RECORD FORMS AN UNDO PAGE CONTAINING A NUMBER OF */
- /* DATA WORDS. CURRENTLY THERE ARE 2048 WORDS ON A PAGE */
- /* EACH OF 32 BITS (4 BYTES) WHICH FORMS AN UNDO PAGE */
- /* WITH A TOTAL OF 8192 BYTES */
- /* ***************************************************** */
-struct UndoPage {
- Uint32 undoPageWord[ZWORDS_ON_PAGE]; /* 32 KB */
-};
-typedef Ptr<UndoPage> UndoPagePtr;
-
- /*
- * Build index operation record.
- */
- struct BuildIndexRec {
- // request cannot use signal class due to extra members
- Uint32 m_request[BuildIndxReq::SignalLength];
- Uint32 m_triggerPtrI; // the index trigger
- Uint32 m_fragNo; // fragment number under Tablerec
- Uint32 m_pageId; // logical fragment page id
- Uint32 m_tupleNo; // tuple number on page (pageIndex >> 1)
- BuildIndxRef::ErrorCode m_errorCode;
- union {
- Uint32 nextPool;
- Uint32 nextList;
- };
- Uint32 prevList;
- };
- typedef Ptr<BuildIndexRec> BuildIndexPtr;
- ArrayPool<BuildIndexRec> c_buildIndexPool;
- ArrayList<BuildIndexRec> c_buildIndexList;
- Uint32 c_noOfBuildIndexRec;
-
-public:
- Dbtup(const class Configuration &);
- virtual ~Dbtup();
-
- /*
- * TUX uses logical tuple address when talking to ACC and LQH.
- */
- void tuxGetTupAddr(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32& tupAddr);
-
- /*
- * TUX index in TUP has single Uint32 array attribute which stores an
- * index node. TUX reads and writes the node directly via pointer.
- */
- int tuxAllocNode(Signal* signal, Uint32 fragPtrI, Uint32& pageId, Uint32& pageOffset, Uint32*& node);
- void tuxFreeNode(Signal* signal, Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* node);
- void tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& node);
-
- /*
- * TUX reads primary table attributes for index keys. Tuple is
- * specified by location of original tuple and version number. Input
- * is attribute ids in AttributeHeader format. Output is attribute
- * data with headers. Uses readAttributes with xfrm option set.
- * Returns number of words or negative (-terrorCode) on error.
- */
- int tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut);
-
- /*
- * TUX reads primary key without headers into an array of words. Used
- * for md5 summing and when returning keyinfo. Returns number of
- * words or negative (-terrorCode) on error.
- */
- int tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut, bool xfrmFlag);
-
- /*
- * ACC reads primary key without headers into an array of words. At
- * this point in ACC deconstruction, ACC still uses logical references
- * to fragment and tuple.
- */
- int accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag);
-
- /*
- * TUX checks if tuple is visible to scan.
- */
- bool tuxQueryTh(Uint32 fragPtrI, Uint32 tupAddr, Uint32 tupVersion, Uint32 transId1, Uint32 transId2, Uint32 savePointId);
-
-private:
- BLOCK_DEFINES(Dbtup);
-
- // Transit signals
- void execDEBUG_SIG(Signal* signal);
- void execCONTINUEB(Signal* signal);
-
- // Received signals
- void execDUMP_STATE_ORD(Signal* signal);
- void execSEND_PACKED(Signal* signal);
- void execSTTOR(Signal* signal);
- void execTUP_LCPREQ(Signal* signal);
- void execEND_LCPREQ(Signal* signal);
- void execSTART_RECREQ(Signal* signal);
- void execMEMCHECKREQ(Signal* signal);
- void execTUPSEIZEREQ(Signal* signal);
- void execTUPRELEASEREQ(Signal* signal);
- void execSTORED_PROCREQ(Signal* signal);
- void execTUPFRAGREQ(Signal* signal);
- void execTUP_ADD_ATTRREQ(Signal* signal);
- void execTUP_COMMITREQ(Signal* signal);
- void execTUP_ABORTREQ(Signal* signal);
- void execTUP_SRREQ(Signal* signal);
- void execTUP_PREPLCPREQ(Signal* signal);
- void execFSOPENCONF(Signal* signal);
- void execFSOPENREF(Signal* signal);
- void execFSCLOSECONF(Signal* signal);
- void execFSCLOSEREF(Signal* signal);
- void execFSWRITECONF(Signal* signal);
- void execFSWRITEREF(Signal* signal);
- void execFSREADCONF(Signal* signal);
- void execFSREADREF(Signal* signal);
- void execNDB_STTOR(Signal* signal);
- void execREAD_CONFIG_REQ(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
- void execDROP_TAB_REQ(Signal* signal);
- void execALTER_TAB_REQ(Signal* signal);
- void execFSREMOVECONF(Signal* signal);
- void execFSREMOVEREF(Signal* signal);
- void execTUP_ALLOCREQ(Signal* signal);
- void execTUP_DEALLOCREQ(Signal* signal);
- void execTUP_WRITELOG_REQ(Signal* signal);
-
- // Ordered index related
- void execBUILDINDXREQ(Signal* signal);
- void buildIndex(Signal* signal, Uint32 buildPtrI);
- void buildIndexReply(Signal* signal, const BuildIndexRec* buildRec);
-
- // Tup scan
- void execACC_SCANREQ(Signal* signal);
- void execNEXT_SCANREQ(Signal* signal);
- void execACC_CHECK_SCAN(Signal* signal);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
-// Methods to handle execution of TUPKEYREQ + ATTRINFO.
-//
-// Module Execution Manager
-//
-// The TUPKEYREQ signal is central to this block. This signal is used
-// by everybody that needs to read data residing in DBTUP. The data is
-// read using an interpreter approach.
-//
-// Operations only needing to read execute a simplified version of the
-// interpreter where the only instruction is read Attribute to send.
-// Operations only needing to update the record (insert or update)
-// execute a simplified version of the interpreter where the only
-// instruction is write Attribute.
-//
-// Currently TUPKEYREQ is used in the following situations.
-// 1) Normal transaction execution. Can be any of the types described
-// below.
-// 2) Execution of fragment redo log during system restart.
-// In this situation there will only be normal updates, inserts
-// and deletes performed.
-// 3) A special type of normal transaction execution is to write the
-// records arriving from the primary replica in the node restart
-// processing. This will always be normal write operations which
-// are translated to inserts or updates before arriving to TUP.
-// 4) Scan processing. The scan processing will use normal reads or
-// interpreted reads in their execution. There will be one TUPKEYREQ
-// signal for each record processed.
-// 5) Copy fragment processing. This is a special type of scan used in the
-// primary replica at system restart. It reads the entire reads and
-// converts those to writes to the starting node. In this special case
-// LQH acts as an API node and receives also the ATTRINFO sent in the
-// TRANSID_AI signals.
-//
-// Signal Diagram:
-//
-// In Signals:
-// -----------
-//
-// Logically there is one request TUPKEYREQ which requests to read/write data
-// of one tuple in the database. Since the definition of what to read and write
-// can be bigger than the maximum signal size we segment the signal. The definition
-// of what to read/write/interpreted program is sent before the TUPKEYREQ signal.
-//
-// ---> ATTRINFO
-// ...
-// ---> ATTRINFO
-// ---> TUPKEYREQ
-// The number of ATTRINFO signals can be anything between 0 and upwards.
-// The total size of the ATTRINFO is not allowed to be more than 16384 words.
-// There is always one and only one TUPKEYREQ.
-//
-// Response Signals (successful case):
-//
-// Simple/Dirty Read Operation
-// ---------------------------
-//
-// <---- TRANSID_AI (to API)
-// ...
-// <---- TRANSID_AI (to API)
-// <---- READCONF (to API)
-// <---- TUPKEYCONF (to LQH)
-// There is always exactly one READCONF25 sent last. The number of
-// TRANSID_AI is dependent on how much that was read. The maximum size
-// of the ATTRINFO sent back is 16384 words. The signals are sent
-// directly to the application with an address provided by the
-// TUPKEYREQ signal.
-// A positive response signal is also sent to LQH.
-//
-// Normal Read Operation
-// ---------------------
-//
-// <---- TRANSID_AI (to API)
-// ...
-// <---- TRANSID_AI (to API)
-// <---- TUPKEYCONF (to LQH)
-// The number of TRANSID_AI is dependent on how much that was read.
-// The maximum size of the ATTRINFO sent back is 16384 words. The
-// signals are sent directly to the application with an address
-// provided by the TUPKEYREQ signal.
-// A positive response signal is also sent to LQH.
-//
-// Normal update/insert/delete operation
-// -------------------------------------
-//
-// <---- TUPKEYCONF
-// After successful updating of the tuple LQH is informed of this.
-//
-// Delete with read
-// ----------------
-//
-// Will behave as a normal read although it also prepares the
-// deletion of the tuple.
-//
-// Interpreted Update
-// ------------------
-//
-// <---- TRANSID_AI (to API)
-// ...
-// <---- TRANSID_AI (to API)
-// <---- TUP_ATTRINFO (to LQH)
-// ...
-// <---- TUP_ATTRINFO (to LQH)
-// <---- TUPKEYCONF (to LQH)
-//
-// The interpreted Update contains five sections:
-// The first section performs read Attribute operations
-// that send results back to the API.
-//
-// The second section executes the interpreted program
-// where data from attributes can be updated and it
-// can also read attribute values into the registers.
-//
-// The third section performs unconditional updates of
-// attributes.
-//
-// The fourth section can read the attributes to be sent to the
-// API after updating the record.
-//
-// The fifth section contains subroutines used by the interpreter
-// in the second section.
-//
-// All types of interpreted programs contains the same five sections.
-// The only difference is that only interpreted updates can update
-// attributes. Interpreted inserts are not allowed.
-//
-// Interpreted Updates have to send back the information about the
-// attributes they have updated. This information will be shipped to
-// the log and also to any other replicas. Thus interpreted updates
-// are only performed in the primary replica. The fragment redo log
-// in LQH will contain information so that normal update/inserts/deletes
-// can be performed using TUPKEYREQ.
-//
-// Interpreted Read
-// ----------------
-//
-// From a signalling point of view the Interpreted Read behaves as
-// as a Normal Read. The interpreted Read is often used by Scan's.
-//
-// Interpreted Delete
-// ------------------
-//
-// <---- TUPKEYCONF
-// After successful prepartion to delete the tuple LQH is informed
-// of this.
-//
-// Interpreted Delete with Read
-// ----------------------------
-//
-// From a signalling point of view an interpreted delete with read
-// behaves as a normal read.
-//
-// Continuation after successful case:
-//
-// After a read of any kind the operation record is ready to be used
-// again by a new operation.
-//
-// Any updates, inserts or deletes waits for either of two messages.
-// A commit specifying that the operation is to be performed for real
-// or an abort specifying that the operation is to be rolled back and
-// the record to be restored in its original format.
-//
-// This is handled by the module Transaction Manager.
-//
-// Response Signals (unsuccessful case):
-//
-// <---- TUPKEYREF (to LQH)
-// A signal is sent back to LQH informing about the unsuccessful
-// operation. In this case TUP waits for an abort signal to arrive
-// before the operation record is ready for the next operation.
-// This is handled by the Transaction Manager.
-//------------------------------------------------------------------
-//------------------------------------------------------------------
-
-// *****************************************************************
-// Signal Reception methods.
-// *****************************************************************
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void execTUPKEYREQ(Signal* signal);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void execATTRINFO(Signal* signal);
-
-// Trigger signals
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void execCREATE_TRIG_REQ(Signal* signal);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void execDROP_TRIG_REQ(Signal* signal);
-
-// *****************************************************************
-// Support methods for ATTRINFO.
-// *****************************************************************
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void handleATTRINFOforTUPKEYREQ(Signal* signal,
- Uint32 length,
- Operationrec * const regOperPtr);
-
-// *****************************************************************
-// Setting up the environment for reads, inserts, updates and deletes.
-// *****************************************************************
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- int handleReadReq(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTabPtr,
- Page* pagePtr);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- int handleUpdateReq(Signal* signal,
- Operationrec* const regOperPtr,
- Fragrecord* const regFragPtr,
- Tablerec* const regTabPtr,
- Page* const pagePtr);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- int handleInsertReq(Signal* signal,
- Operationrec* const regOperPtr,
- Fragrecord* const regFragPtr,
- Tablerec* const regTabPtr,
- Page* const pagePtr);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- int handleDeleteReq(Signal* signal,
- Operationrec* const regOperPtr,
- Fragrecord* const regFragPtr,
- Tablerec* const regTabPtr,
- Page* const pagePtr);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- int updateStartLab(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTabPtr,
- Page* const pagePtr);
-
-// *****************************************************************
-// Interpreter Handling methods.
-// *****************************************************************
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- int interpreterStartLab(Signal* signal,
- Page* const pagePtr,
- Uint32 TupHeadOffset);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- int interpreterNextLab(Signal* signal,
- Page* const pagePtr,
- Uint32 TupHeadOffset,
- Uint32* logMemory,
- Uint32* mainProgram,
- Uint32 TmainProgLen,
- Uint32* subroutineProg,
- Uint32 TsubroutineLen,
- Uint32 * tmpArea,
- Uint32 tmpAreaSz);
-
-// *****************************************************************
-// Signal Sending methods.
-// *****************************************************************
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void sendReadAttrinfo(Signal* signal,
- Uint32 TnoOfData,
- const Operationrec * const regOperPtr);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void sendLogAttrinfo(Signal* signal,
- Uint32 TlogSize,
- Operationrec * const regOperPtr);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void sendTUPKEYCONF(Signal* signal, Operationrec *
- const regOperPtr,
- Uint32 TlogSize);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
-// *****************************************************************
-// The methods that perform the actual read and update of attributes
-// in the tuple.
-// *****************************************************************
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- int readAttributes(Page* const pagePtr,
- Uint32 TupHeadOffset,
- const Uint32* inBuffer,
- Uint32 inBufLen,
- Uint32* outBuffer,
- Uint32 TmaxRead,
- bool xfrmFlag);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- int readAttributesWithoutHeader(Page* const pagePtr,
- Uint32 TupHeadOffset,
- Uint32* inBuffer,
- Uint32 inBufLen,
- Uint32* outBuffer,
- Uint32* attrBuffer,
- Uint32 TmaxRead);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- int updateAttributes(Page* const pagePtr,
- Uint32 TupHeadOffset,
- Uint32* inBuffer,
- Uint32 inBufLen);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readFixedSizeTHOneWordNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateFixedSizeTHOneWordNotNULL(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readFixedSizeTHTwoWordNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateFixedSizeTHTwoWordNotNULL(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readFixedSizeTHManyWordNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readFixedSizeTHOneWordNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateFixedSizeTHOneWordNULLable(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readFixedSizeTHTwoWordNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateFixedSizeTHTwoWordNULLable(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readFixedSizeTHManyWordNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readFixedSizeTHZeroWordNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateFixedSizeTHManyWordNULLable(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readVariableSizedAttr(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateVariableSizedAttr(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readVarSizeUnlimitedNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateVarSizeUnlimitedNotNULL(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readVarSizeUnlimitedNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateVarSizeUnlimitedNULLable(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readBigVarSizeNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateBigVarSizeNotNULL(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readBigVarSizeNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateBigVarSizeNULLable(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readSmallVarSizeNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateSmallVarSizeNotNULL(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readSmallVarSizeNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateSmallVarSizeNULLable(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readDynFixedSize(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateDynFixedSize(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readDynVarSizeUnlimited(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateDynVarSizeUnlimited(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readDynBigVarSize(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateDynBigVarSize(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool readDynSmallVarSize(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool updateDynSmallVarSize(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
-
-
- bool readBitsNULLable(Uint32* outBuffer, AttributeHeader*, Uint32, Uint32);
- bool updateBitsNULLable(Uint32* inBuffer, Uint32, Uint32);
- bool readBitsNotNULL(Uint32* outBuffer, AttributeHeader*, Uint32, Uint32);
- bool updateBitsNotNULL(Uint32* inBuffer, Uint32, Uint32);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- bool nullFlagCheck(Uint32 attrDes2);
- Uint32 read_psuedo(Uint32 attrId, Uint32* outBuffer);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void setUpQueryRoutines(Tablerec* const regTabPtr);
-
-// *****************************************************************
-// Service methods.
-// *****************************************************************
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void copyAttrinfo(Signal* signal, Operationrec * const regOperPtr, Uint32* inBuffer);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void initOpConnection(Operationrec* regOperPtr, Fragrecord*);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void initOperationrec(Signal* signal);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- int initStoredOperationrec(Operationrec* const regOperPtr,
- Uint32 storedId);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void insertActiveOpList(Signal* signal,
- OperationrecPtr regOperPtr,
- Page * const pagePtr,
- Uint32 pageOffset);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void linkOpIntoFragList(OperationrecPtr regOperPtr,
- Fragrecord* const regFragPtr);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void bufferTRANSID_AI(Signal* signal, BlockReference aRef, Uint32 Tlen);
-
-//------------------------------------------------------------------
-// Trigger handling routines
-//------------------------------------------------------------------
- ArrayList<TupTriggerData>* findTriggerList(Tablerec* table,
- TriggerType::Value ttype,
- TriggerActionTime::Value ttime,
- TriggerEvent::Value tevent);
-
- bool createTrigger(Tablerec* table, const CreateTrigReq* req);
-
- Uint32 dropTrigger(Tablerec* table, const DropTrigReq* req);
-
- void checkImmediateTriggersAfterInsert(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const tablePtr);
-
- void checkImmediateTriggersAfterUpdate(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const tablePtr);
-
- void checkImmediateTriggersAfterDelete(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const tablePtr);
-
-#if 0
- void checkDeferredTriggers(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTablePtr);
-#endif
- void checkDetachedTriggers(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTablePtr);
-
- void fireImmediateTriggers(Signal* signal,
- ArrayList<TupTriggerData>& triggerList,
- Operationrec* const regOperPtr);
-
- void fireDeferredTriggers(Signal* signal,
- ArrayList<TupTriggerData>& triggerList,
- Operationrec* const regOperPtr);
-
- void fireDetachedTriggers(Signal* signal,
- ArrayList<TupTriggerData>& triggerList,
- Operationrec* const regOperPtr);
-
- void executeTriggers(Signal* signal,
- ArrayList<TupTriggerData>& triggerList,
- Operationrec* const regOperPtr);
-
- void executeTrigger(Signal* signal,
- TupTriggerData* const trigPtr,
- Operationrec* const regOperPtr);
-
- bool readTriggerInfo(TupTriggerData* const trigPtr,
- Operationrec* const regOperPtr,
- Uint32* const keyBuffer,
- Uint32& noPrimKey,
- Uint32* const mainBuffer,
- Uint32& noMainWords,
- Uint32* const copyBuffer,
- Uint32& noCopyWords);
-
- void sendTrigAttrInfo(Signal* signal,
- Uint32* data,
- Uint32 dataLen,
- bool executeDirect,
- BlockReference receiverReference);
-
- Uint32 setAttrIds(Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask,
- Uint32 noOfAttributes,
- Uint32* inBuffer);
-
- void sendFireTrigOrd(Signal* signal,
- Operationrec * const regOperPtr,
- TupTriggerData* const trigPtr,
- Uint32 noPrimKeySignals,
- Uint32 noBeforeSignals,
- Uint32 noAfterSignals);
-
- bool primaryKey(Tablerec* const, Uint32);
-
- // these set terrorCode and return non-zero on error
-
- int executeTuxInsertTriggers(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTabPtr);
-
- int executeTuxUpdateTriggers(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTabPtr);
-
- int executeTuxDeleteTriggers(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTabPtr);
-
- int addTuxEntries(Signal* signal,
- Operationrec* regOperPtr,
- Tablerec* regTabPtr);
-
- // these crash the node on error
-
- void executeTuxCommitTriggers(Signal* signal,
- Operationrec* regOperPtr,
- Tablerec* const regTabPtr);
-
- void executeTuxAbortTriggers(Signal* signal,
- Operationrec* regOperPtr,
- Tablerec* const regTabPtr);
-
- void removeTuxEntries(Signal* signal,
- Operationrec* regOperPtr,
- Tablerec* regTabPtr);
-
-// *****************************************************************
-// Error Handling routines.
-// *****************************************************************
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- int TUPKEY_abort(Signal* signal, int error_type);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void tupkeyErrorLab(Signal* signal);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
-// Methods to handle execution of TUP_COMMITREQ + TUP_ABORTREQ.
-//
-// Module Transaction Manager
-//
-// The Transaction Manager module is responsible for the commit
-// and abort of operations started by the Execution Manager.
-//
-// Commit Operation:
-// ----------------
-//
-// Failures in commit processing is not allowed since that would
-// leave the database in an unreliable state. Thus the only way
-// to handle failures in commit processing is to crash the node.
-//
-// TUP_COMMITREQ can only be received in the wait state after a
-// successful TUPKEYREQ which was not a read operation.
-//
-// Commit of Delete:
-// -----------------
-//
-// This will actually perform the deletion of the record unless
-// other operations also are connected to the record. In this case
-// we will set the delete state on the record that becomes the owner
-// of the record.
-//
-// Commit of Update:
-// ----------------
-//
-// We will release the copy record where the original record was kept.
-// Also here we will take special care if more operations are updating
-// the record simultaneously.
-//
-// Commit of Insert:
-// -----------------
-//
-// Will simply reset the state of the operation record.
-//
-// Signal Diagram:
-// ---> TUP_COMMITREQ (from LQH)
-// <---- TUP_COMMITCONF (to LQH)
-//
-//
-// Abort Operation:
-// ----------------
-//
-// Signal Diagram:
-// ---> TUP_ABORTREQ (from LQH)
-// <---- TUP_ABORTCONF (to LQH)
-//
-// Failures in abort processing is not allowed since that would
-// leave the database in an unreliable state. Thus the only way
-// to handle failures in abort processing is to crash the node.
-//
-// Abort messages can arrive at any time. It can arrive even before
-// anything at all have arrived of the operation. It can arrive after
-// receiving a number of ATTRINFO but before TUPKEYREQ has been received.
-// It must arrive after that we sent TUPKEYREF in response to TUPKEYREQ
-// and finally it can arrive after successfully performing the TUPKEYREQ
-// in all cases including the read case.
-//------------------------------------------------------------------
-//------------------------------------------------------------------
-
-#if 0
- void checkPages(Fragrecord* const regFragPtr);
-#endif
- void printoutTuplePage(Uint32 fragid, Uint32 pageid, Uint32 printLimit);
-
- bool checkUpdateOfPrimaryKey(Uint32* updateBuffer, Tablerec* const regTabPtr);
-
- void setNullBits(Page* const regPage, Tablerec* const regTabPtr, Uint32 pageOffset);
- bool checkNullAttributes(Operationrec* const, Tablerec* const);
- bool getPage(PagePtr& pagePtr,
- Operationrec* const regOperPtr,
- Fragrecord* const regFragPtr,
- Tablerec* const regTabPtr);
-
- bool getPageLastCommitted(Operationrec* const regOperPtr,
- Operationrec* const leaderOpPtr);
-
- bool getPageThroughSavePoint(Operationrec* const regOperPtr,
- Operationrec* const leaderOpPtr);
-
- Uint32 calculateChecksum(Page* const pagePtr, Uint32 tupHeadOffset, Uint32 tupHeadSize);
- void setChecksum(Page* const pagePtr, Uint32 tupHeadOffset, Uint32 tupHeadSize);
-
- void commitSimple(Signal* signal,
- Operationrec* const regOperPtr,
- Fragrecord* const regFragPtr,
- Tablerec* const regTabPtr);
-
- void commitRecord(Signal* signal,
- Operationrec* const regOperPtr,
- Fragrecord* const regFragPtr,
- Tablerec* const regTabPtr);
-
- void setTupleStatesSetOpType(Operationrec* const regOperPtr,
- Page* const pagePtr,
- Uint32& opType,
- OperationrecPtr& firstOpPtr);
-
- void findBeforeValueOperation(OperationrecPtr& befOpPtr,
- OperationrecPtr firstOpPtr);
-
- void calculateChangeMask(Page* const PagePtr,
- Tablerec* const regTabPtr,
- Uint32 pageOffset,
- Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask);
-
- void updateGcpId(Signal* signal,
- Operationrec* const regOperPtr,
- Fragrecord* const regFragPtr,
- Tablerec* const regTabPtr);
-
- void abortUpdate(Signal* signal,
- Operationrec* const regOperPtr,
- Fragrecord* const regFragPtr,
- Tablerec* const regTabPtr);
- void commitUpdate(Signal* signal,
- Operationrec* const regOperPtr,
- Fragrecord* const regFragPtr,
- Tablerec* const regTabPtr);
-
- void setTupleStateOnPreviousOps(Uint32 prevOpIndex);
- void copyMem(Signal* signal, Uint32 sourceIndex, Uint32 destIndex);
-
- void freeAllAttrBuffers(Operationrec* const regOperPtr);
- void freeAttrinbufrec(Uint32 anAttrBufRec);
- void removeActiveOpList(Operationrec* const regOperPtr);
-
- void updatePackedList(Signal* signal, Uint16 ahostIndex);
-
- void setUpDescriptorReferences(Uint32 descriptorReference,
- Tablerec* const regTabPtr,
- const Uint32* offset);
- void setUpKeyArray(Tablerec* const regTabPtr);
- bool addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex);
- void deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId);
- void abortAddFragOp(Signal* signal);
- void releaseTabDescr(Tablerec* const regTabPtr);
- void getFragmentrec(FragrecordPtr& regFragPtr, Uint32 fragId, Tablerec* const regTabPtr);
-
- void initialiseRecordsLab(Signal* signal, Uint32 switchData, Uint32, Uint32);
- void initializeAttrbufrec();
- void initializeCheckpointInfoRec();
- void initializeDiskBufferSegmentRecord();
- void initializeFragoperrec();
- void initializeFragrecord();
- void initializeHostBuffer();
- void initializeLocalLogInfo();
- void initializeOperationrec();
- void initializePendingFileOpenInfoRecord();
- void initializeRestartInfoRec();
- void initializeTablerec();
- void initializeTabDescr();
- void initializeUndoPage();
-
- void initTab(Tablerec* const regTabPtr);
-
- void startphase3Lab(Signal* signal, Uint32 config1, Uint32 config2);
-
- void fragrefuseLab(Signal* signal, FragoperrecPtr fragOperPtr);
- void fragrefuse1Lab(Signal* signal, FragoperrecPtr fragOperPtr);
- void fragrefuse2Lab(Signal* signal, FragoperrecPtr fragOperPtr, FragrecordPtr regFragPtr);
- void fragrefuse3Lab(Signal* signal,
- FragoperrecPtr fragOperPtr,
- FragrecordPtr regFragPtr,
- Tablerec* const regTabPtr,
- Uint32 fragId);
- void fragrefuse4Lab(Signal* signal,
- FragoperrecPtr fragOperPtr,
- FragrecordPtr regFragPtr,
- Tablerec* const regTabPtr,
- Uint32 fragId);
- void addattrrefuseLab(Signal* signal,
- FragrecordPtr regFragPtr,
- FragoperrecPtr fragOperPtr,
- Tablerec* const regTabPtr,
- Uint32 fragId);
-
-
- void checkLcpActiveBufferPage(Uint32 minPageNotWrittenInCheckpoint, DiskBufferSegmentInfoPtr dbsiPtr);
- void lcpWriteListDataPageSegment(Signal* signal,
- DiskBufferSegmentInfoPtr dbsiPtr,
- CheckpointInfoPtr ciPtr,
- bool flushFlag);
- void lcpFlushLogLab(Signal* signal, CheckpointInfoPtr ciPtr);
- void lcpClosedDataFileLab(Signal* signal, CheckpointInfoPtr ciPtr);
- void lcpEndconfLab(Signal* signal);
- void lcpSaveDataPageLab(Signal* signal, Uint32 ciIndex);
- void lcpCompletedLab(Signal* signal, Uint32 ciIndex);
- void lcpFlushRestartInfoLab(Signal* signal, Uint32 ciIndex);
- void lcpSaveCopyListLab(Signal* signal, CheckpointInfoPtr ciPtr);
-
- void sendFSREMOVEREQ(Signal* signal, TablerecPtr tabPtr);
- void releaseFragment(Signal* signal, Uint32 tableId);
-
- void allocDataBufferSegment(Signal* signal, DiskBufferSegmentInfoPtr& dbsiPtr);
- void allocRestartUndoBufferSegment(Signal* signal, DiskBufferSegmentInfoPtr& dbsiPtr, LocalLogInfoPtr lliPtr);
- void freeDiskBufferSegmentRecord(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr);
- void freeUndoBufferPages(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr);
-
- void releaseCheckpointInfoRecord(CheckpointInfoPtr ciPtr);
- void releaseDiskBufferSegmentRecord(DiskBufferSegmentInfoPtr dbsiPtr);
- void releaseFragoperrec(FragoperrecPtr fragOperPtr);
- void releaseFragrec(FragrecordPtr regFragPtr);
- void releasePendingFileOpenInfoRecord(PendingFileOpenInfoPtr pfoPtr);
- void releaseRestartInfoRecord(RestartInfoRecordPtr riPtr);
-
- void seizeDiskBufferSegmentRecord(DiskBufferSegmentInfoPtr& dbsiPtr);
- void seizeCheckpointInfoRecord(CheckpointInfoPtr& ciPtr);
- void seizeFragoperrec(FragoperrecPtr& fragOperPtr);
- void seizeFragrecord(FragrecordPtr& regFragPtr);
- void seizeOpRec(OperationrecPtr& regOperPtr);
- void seizePendingFileOpenInfoRecord(PendingFileOpenInfoPtr& pfoiPtr);
- void seizeRestartInfoRecord(RestartInfoRecordPtr& riPtr);
-
- // Initialisation
- void initData();
- void initRecords();
-
- void rfrClosedDataFileLab(Signal* signal, Uint32 restartIndex);
- void rfrCompletedLab(Signal* signal, RestartInfoRecordPtr riPtr);
- void rfrInitRestartInfoLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr);
- void rfrLoadDataPagesLab(Signal* signal, RestartInfoRecordPtr riPtr, DiskBufferSegmentInfoPtr dbsiPtr);
- void rfrReadFirstUndoSegment(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr);
- void rfrReadNextDataSegment(Signal* signal, RestartInfoRecordPtr riPtr, DiskBufferSegmentInfoPtr dbsiPtr);
- void rfrReadNextUndoSegment(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr);
- void rfrReadRestartInfoLab(Signal* signal, RestartInfoRecordPtr riPtr);
- void rfrReadSecondUndoLogLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr);
-
- void startExecUndoLogLab(Signal* signal, Uint32 lliIndex);
- void readExecUndoLogLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr);
- void closeExecUndoLogLab(Signal* signal, LocalLogInfoPtr lliPtr);
- void endExecUndoLogLab(Signal* signal, Uint32 lliIndex);
-
- struct XlcStruct {
- Uint32 PageId;
- Uint32 PageIndex;
- Uint32 LogRecordType;
- Uint32 FragId;
- FragrecordPtr FragPtr;
- LocalLogInfoPtr LliPtr;
- DiskBufferSegmentInfoPtr DbsiPtr;
- UndoPagePtr UPPtr;
- TablerecPtr TabPtr;
- };
-
- void xlcGetNextRecordLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr);
- void xlcRestartCompletedLab(Signal* signal);
-
- void xlcCopyData(XlcStruct& xlcStruct, Uint32 pageOffset, Uint32 noOfWords, PagePtr pagePtr);
- void xlcGetLogHeader(XlcStruct& xlcStruct);
- Uint32 xlcGetLogWord(XlcStruct& xlcStruct);
-
- void xlcAbortInsert(Signal* signal, XlcStruct& xlcStruct);
- void xlcAbortUpdate(Signal* signal, XlcStruct& xlcStruct);
- void xlcDeleteTh(XlcStruct& xlcStruct);
- void xlcIndicateNoOpActive(XlcStruct& xlcStruct);
- void xlcInsertTh(XlcStruct& xlcStruct);
- void xlcTableDescriptor(XlcStruct& xlcStruct);
- void xlcUndoLogPageHeader(XlcStruct& xlcStruct);
- void xlcUpdateTh(XlcStruct& xlcStruct);
- void xlcUpdateGCI(XlcStruct& xlcStruct);
-
-
- void cprAddData(Signal* signal,
- Fragrecord* const regFragPtr,
- Uint32 pageIndex,
- Uint32 noOfWords,
- Uint32 startOffset);
- void cprAddGCIUpdate(Signal* signal,
- Uint32 prevGCI,
- Fragrecord* const regFragPtr);
- void cprAddLogHeader(Signal* signal,
- LocalLogInfo* const lliPtr,
- Uint32 recordType,
- Uint32 tableId,
- Uint32 fragId);
- void cprAddUndoLogPageHeader(Signal* signal,
- Page* const regPagePtr,
- Fragrecord* const regFragPtr);
- void cprAddUndoLogRecord(Signal* signal,
- Uint32 recordType,
- Uint32 pageId,
- Uint32 pageIndex,
- Uint32 tableId,
- Uint32 fragId,
- Uint32 localLogIndex);
- void cprAddAbortUpdate(Signal* signal,
- LocalLogInfo* const lliPtr,
- Operationrec* const regOperPtr);
- void cprAddUndoLogWord(Signal* signal,
- LocalLogInfo* const lliPtr,
- Uint32 undoWord);
- bool isUndoLoggingNeeded(Fragrecord* const regFragPtr, Uint32 pageId);
- bool isUndoLoggingActive(Fragrecord* const regFragPtr);
- bool isUndoLoggingBlocked(Fragrecord* const regFragPtr);
- bool isPageUndoLogged(Fragrecord* const regFragPtr, Uint32 pageId);
-
- void seizeUndoBufferSegment(Signal* signal, UndoPagePtr& regUndoPagePtr);
- void lcpWriteUndoSegment(Signal* signal, LocalLogInfo* const lliPtr, bool flushFlag);
-
-
- void deleteScanProcedure(Signal* signal, Operationrec* regOperPtr);
- void copyProcedure(Signal* signal,
- TablerecPtr regTabPtr,
- Operationrec* regOperPtr);
- void scanProcedure(Signal* signal,
- Operationrec* regOperPtr,
- Uint32 lenAttrInfo);
- void storedSeizeAttrinbufrecErrorLab(Signal* signal,
- Operationrec* regOperPtr);
- bool storedProcedureAttrInfo(Signal* signal,
- Operationrec* regOperPtr,
- Uint32 length,
- Uint32 firstWord,
- bool copyProc);
-
-//-----------------------------------------------------------------------------
-// Table Descriptor Memory Manager
-//-----------------------------------------------------------------------------
-
-// Public methods
- Uint32 getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset);
- Uint32 allocTabDescr(const Tablerec* regTabPtr, Uint32* offset);
- void freeTabDescr(Uint32 retRef, Uint32 retNo);
- Uint32 getTabDescrWord(Uint32 index);
- void setTabDescrWord(Uint32 index, Uint32 word);
-
-// Private methods
- Uint32 sizeOfReadFunction();
- void removeTdArea(Uint32 tabDesRef, Uint32 list);
- void insertTdArea(Uint32 sizeOfChunk, Uint32 tabDesRef, Uint32 list);
- Uint32 itdaMergeTabDescr(Uint32 retRef, Uint32 retNo);
-
-//------------------------------------------------------------------------------------------------------
-// Page Memory Manager
-//------------------------------------------------------------------------------------------------------
-
-// Public methods
- void allocConsPages(Uint32 noOfPagesToAllocate,
- Uint32& noOfPagesAllocated,
- Uint32& allocPageRef);
- void returnCommonArea(Uint32 retPageRef, Uint32 retNo);
- void initializePage();
-
-// Private methods
- void removeCommonArea(Uint32 remPageRef, Uint32 list);
- void insertCommonArea(Uint32 insPageRef, Uint32 list);
- void findFreeLeftNeighbours(Uint32& allocPageRef, Uint32& noPagesAllocated, Uint32 noPagesToAllocate);
- void findFreeRightNeighbours(Uint32& allocPageRef, Uint32& noPagesAllocated, Uint32 noPagesToAllocate);
- Uint32 nextHigherTwoLog(Uint32 input);
-
-// Private data
- Uint32 cfreepageList[16];
-
-//------------------------------------------------------------------------------------------------------
-// Page Mapper, convert logical page id's to physical page id's
-// The page mapper also handles the pages allocated to the fragment.
-//------------------------------------------------------------------------------------------------------
-//
-// Public methods
- Uint32 getRealpid(Fragrecord* const regFragPtr, Uint32 logicalPageId);
- Uint32 getNoOfPages(Fragrecord* const regFragPtr);
- void initPageRangeSize(Uint32 size);
- bool insertPageRangeTab(Fragrecord* const regFragPtr,
- Uint32 startPageId,
- Uint32 noPages);
- void releaseFragPages(Fragrecord* const regFragPtr);
- void initFragRange(Fragrecord* const regFragPtr);
- void initializePageRange();
- Uint32 getEmptyPage(Fragrecord* const regFragPtr);
- Uint32 allocFragPages(Fragrecord* const regFragPtr, Uint32 noOfPagesAllocated);
-
-// Private methods
- Uint32 leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr currPageRangePtr);
- void releasePagerange(PageRangePtr regPRPtr);
- void seizePagerange(PageRangePtr& regPageRangePtr);
- void errorHandler(Uint32 errorCode);
- void allocMoreFragPages(Fragrecord* const regFragPtr);
-
-// Private data
- Uint32 cfirstfreerange;
- PageRange *pageRange;
- Uint32 c_noOfFreePageRanges;
- Uint32 cnoOfPageRangeRec;
-
-//------------------------------------------------------------------------------------------------------
-// Fixed Allocator
-// Allocates and deallocates tuples of fixed size on a fragment.
-//------------------------------------------------------------------------------------------------------
-//
-// Public methods
- bool allocTh(Fragrecord* const regFragPtr,
- Tablerec* const regTabPtr,
- Uint32 pageType,
- Signal* signal,
- Uint32& pageOffset,
- PagePtr& pagePtr);
-
- void freeThSr(Tablerec* const regTabPtr,
- Page* const regPagePtr,
- Uint32 freePageOffset);
-
- void freeTh(Fragrecord* const regFragPtr,
- Tablerec* const regTabPtr,
- Signal* signal,
- Page* const regPagePtr,
- Uint32 freePageOffset);
-
- void getThAtPageSr(Page* const regPagePtr,
- Uint32& pageOffset);
-
-// Private methods
- void convertThPage(Uint32 Tupheadsize,
- Page* const regPagePtr);
-
- void getThAtPage(Fragrecord* const regFragPtr,
- Page* const regPagePtr,
- Signal* signal,
- Uint32& pageOffset);
-
- void getEmptyPageThCopy(Fragrecord* const regFragPtr,
- Signal* signal,
- Page* const regPagePtr);
-
- void getEmptyPageTh(Fragrecord* const regFragPtr,
- Signal* signal,
- Page* const regPagePtr);
-
-//------------------------------------------------------------------------------------------------------
-// Temporary variables used for storing commonly used variables in certain modules
-//------------------------------------------------------------------------------------------------------
-
- FragrecordPtr fragptr;
- OperationrecPtr operPtr;
- TablerecPtr tabptr;
-
-// readAttributes and updateAttributes module
- Uint32 tCheckOffset;
- Uint32 tMaxRead;
- Uint32 tOutBufIndex;
- Uint32* tTupleHeader;
- bool tXfrmFlag;
-
-// updateAttributes module
- Uint32 tInBufIndex;
- Uint32 tInBufLen;
-
- Uint32 terrorCode;
-
-//------------------------------------------------------------------------------------------------------
-// Common stored variables. Variables that have a valid value always.
-//------------------------------------------------------------------------------------------------------
- Uint32 cnoOfLcpRec;
- Uint32 cnoOfParallellUndoFiles;
- Uint32 cnoOfUndoPage;
-
- Attrbufrec *attrbufrec;
- Uint32 cfirstfreeAttrbufrec;
- Uint32 cnoOfAttrbufrec;
- Uint32 cnoFreeAttrbufrec;
-
- CheckpointInfo *checkpointInfo;
- Uint32 cfirstfreeLcp;
-
- DiskBufferSegmentInfo *diskBufferSegmentInfo;
- Uint32 cfirstfreePdx;
- Uint32 cnoOfConcurrentWriteOp;
-
- Fragoperrec *fragoperrec;
- Uint32 cfirstfreeFragopr;
- Uint32 cnoOfFragoprec;
-
- Fragrecord *fragrecord;
- Uint32 cfirstfreefrag;
- Uint32 cnoOfFragrec;
-
- HostBuffer *hostBuffer;
-
- LocalLogInfo *localLogInfo;
- Uint32 cnoOfLocalLogInfo;
-
- Uint32 cfirstfreeOprec;
- Operationrec *operationrec;
- Uint32 cnoOfOprec;
-
- Page *page;
- Uint32 cnoOfPage;
- Uint32 cnoOfAllocatedPages;
-
- PendingFileOpenInfo *pendingFileOpenInfo;
- Uint32 cfirstfreePfo;
- Uint32 cnoOfConcurrentOpenOp;
-
- RestartInfoRecord *restartInfoRecord;
- Uint32 cfirstfreeSri;
- Uint32 cnoOfRestartInfoRec;
-
- Tablerec *tablerec;
- Uint32 cnoOfTablerec;
-
- TableDescriptor *tableDescriptor;
- Uint32 cnoOfTabDescrRec;
-
- UndoPage *undoPage;
- Uint32 cfirstfreeUndoSeg;
- Int32 cnoFreeUndoSeg;
-
-
-
- Uint32 cnoOfDataPagesToDiskWithoutSynch;
-
- Uint32 cdata[32];
- Uint32 cdataPages[16];
- Uint32 cpackedListIndex;
- Uint32 cpackedList[MAX_NODES];
- Uint32 cfreeTdList[16];
- Uint32 clastBitMask;
- Uint32 clblPageCounter;
- Uint32 clblPagesPerTick;
- Uint32 clblPagesPerTickAfterSr;
- BlockReference clqhBlockref;
- Uint32 clqhUserpointer;
- Uint32 cminusOne;
- BlockReference cndbcntrRef;
- Uint32 cundoFileVersion;
- BlockReference cownref;
- Uint32 cownNodeId;
- Uint32 czero;
-
- // A little bit bigger to cover overwrites in copy algorithms (16384 real size).
-#define ZATTR_BUFFER_SIZE 16384
- Uint32 clogMemBuffer[ZATTR_BUFFER_SIZE + 16];
- Uint32 coutBuffer[ZATTR_BUFFER_SIZE + 16];
- Uint32 cinBuffer[ZATTR_BUFFER_SIZE + 16];
- Uint32 totNoOfPagesAllocated;
-
- // Trigger variables
- Uint32 c_maxTriggersPerTable;
-
- // Counters for num UNDO log records executed
- Uint32 cSrUndoRecords[9];
-
- STATIC_CONST(MAX_PARALLELL_TUP_SRREQ = 2);
- Uint32 c_sr_free_page_0;
-
- Uint32 c_errorInsert4000TableId;
-
- void initGlobalTemporaryVars();
- void reportMemoryUsage(Signal* signal, int incDec);
-
-
-#ifdef VM_TRACE
- struct Th {
- Uint32 data[1];
- };
- friend class NdbOut& operator<<(NdbOut&, const Operationrec&);
- friend class NdbOut& operator<<(NdbOut&, const Th&);
-#endif
-};
-
-inline
-bool Dbtup::isUndoLoggingNeeded(Fragrecord* const regFragPtr,
- Uint32 pageId)
-{
- if ((regFragPtr->checkpointVersion != RNIL) &&
- (pageId >= regFragPtr->minPageNotWrittenInCheckpoint) &&
- (pageId < regFragPtr->maxPageWrittenInCheckpoint)) {
- return true;
- }//if
- return false;
-}//Dbtup::isUndoLoggingNeeded()
-
-inline
-bool Dbtup::isUndoLoggingActive(Fragrecord* const regFragPtr)
-{
- if (regFragPtr->checkpointVersion != RNIL) {
- return true;
- }//if
- return false;
-}//Dbtup::isUndoLoggingNeeded()
-
-inline
-bool Dbtup::isUndoLoggingBlocked(Fragrecord* const regFragPtr)
-{
- if ((regFragPtr->checkpointVersion != RNIL) &&
- (cnoFreeUndoSeg < ZMIN_PAGE_LIMIT_TUPKEYREQ)) {
- return true;
- }//if
- return false;
-}//Dbtup::isUndoLoggingNeeded()
-
-inline
-bool Dbtup::isPageUndoLogged(Fragrecord* const regFragPtr,
- Uint32 pageId)
-{
- if ((pageId >= regFragPtr->minPageNotWrittenInCheckpoint) &&
- (pageId < regFragPtr->maxPageWrittenInCheckpoint)) {
- return true;
- }//if
- return false;
-}//Dbtup::isUndoLoggingNeeded()
-
-#endif
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
deleted file mode 100644
index 3170d23499a..00000000000
--- a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
+++ /dev/null
@@ -1,1186 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-#define DBTUP_C
-#include "Dbtup.hpp"
-#include <RefConvert.hpp>
-#include <ndb_limits.h>
-#include <pc.hpp>
-#include <AttributeDescriptor.hpp>
-#include "AttributeOffset.hpp"
-#include <AttributeHeader.hpp>
-
-#define ljam() { jamLine(3000 + __LINE__); }
-#define ljamEntry() { jamEntryLine(3000 + __LINE__); }
-
-void
-Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
-{
- Uint32 startDescriptor = regTabPtr->tabDescriptor;
- ndbrequire((startDescriptor + (regTabPtr->noOfAttr << ZAD_LOG_SIZE)) <= cnoOfTabDescrRec);
- for (Uint32 i = 0; i < regTabPtr->noOfAttr; i++) {
- Uint32 attrDescriptorStart = startDescriptor + (i << ZAD_LOG_SIZE);
- Uint32 attrDescriptor = tableDescriptor[attrDescriptorStart].tabDescr;
- Uint32 attrOffset = tableDescriptor[attrDescriptorStart + 1].tabDescr;
- if (!AttributeDescriptor::getDynamic(attrDescriptor)) {
- if ((AttributeDescriptor::getArrayType(attrDescriptor) == ZNON_ARRAY) ||
- (AttributeDescriptor::getArrayType(attrDescriptor) == ZFIXED_ARRAY)) {
- if (!AttributeDescriptor::getNullable(attrDescriptor)) {
- if (AttributeDescriptor::getSize(attrDescriptor) == 0){
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNotNULL;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNotNULL;
- } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1){
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHOneWordNotNULL;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHOneWordNotNULL;
- } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 2) {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHTwoWordNotNULL;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHTwoWordNotNULL;
- } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) > 2) {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNotNULL;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNotNULL;
- } else {
- ndbrequire(false);
- }//if
- // replace functions for char attribute
- if (AttributeOffset::getCharsetFlag(attrOffset)) {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNotNULL;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNotNULL;
- }
- } else {
- if (AttributeDescriptor::getSize(attrDescriptor) == 0){
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNULLable;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNULLable;
- } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1){
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHOneWordNULLable;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
- } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 2) {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHTwoWordNULLable;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
- } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) > 2) {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNULLable;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
- } else {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHZeroWordNULLable;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
- }//if
- // replace functions for char attribute
- if (AttributeOffset::getCharsetFlag(attrOffset)) {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNULLable;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
- }
- }//if
- } else if (AttributeDescriptor::getArrayType(attrDescriptor) == ZVAR_ARRAY) {
- if (!AttributeDescriptor::getNullable(attrDescriptor)) {
- if (AttributeDescriptor::getArraySize(attrDescriptor) == 0) {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readVarSizeUnlimitedNotNULL;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateVarSizeUnlimitedNotNULL;
- } else if (AttributeDescriptor::getArraySize(attrDescriptor) > ZMAX_SMALL_VAR_ARRAY) {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readBigVarSizeNotNULL;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateBigVarSizeNotNULL;
- } else {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readSmallVarSizeNotNULL;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateSmallVarSizeNotNULL;
- }//if
- } else {
- if (AttributeDescriptor::getArraySize(attrDescriptor) == 0) {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readVarSizeUnlimitedNULLable;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateVarSizeUnlimitedNULLable;
- } else if (AttributeDescriptor::getArraySize(attrDescriptor) > ZMAX_SMALL_VAR_ARRAY) {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readBigVarSizeNULLable;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateBigVarSizeNULLable;
- } else {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readSmallVarSizeNULLable;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateSmallVarSizeNULLable;
- }//if
- }//if
- } else {
- ndbrequire(false);
- }//if
- } else {
- if ((AttributeDescriptor::getArrayType(attrDescriptor) == ZNON_ARRAY) ||
- (AttributeDescriptor::getArrayType(attrDescriptor) == ZFIXED_ARRAY)) {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readDynFixedSize;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateDynFixedSize;
- } else if (AttributeDescriptor::getType(attrDescriptor) == ZVAR_ARRAY) {
- if (AttributeDescriptor::getArraySize(attrDescriptor) == 0) {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readDynVarSizeUnlimited;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateDynVarSizeUnlimited;
- } else if (AttributeDescriptor::getArraySize(attrDescriptor) > ZMAX_SMALL_VAR_ARRAY) {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readDynBigVarSize;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateDynBigVarSize;
- } else {
- ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readDynSmallVarSize;
- regTabPtr->updateFunctionArray[i] = &Dbtup::updateDynSmallVarSize;
- }//if
- } else {
- ndbrequire(false);
- }//if
- }//if
- }//for
-}//Dbtup::setUpQueryRoutines()
-
-/* ---------------------------------------------------------------- */
-/* THIS ROUTINE IS USED TO READ A NUMBER OF ATTRIBUTES IN THE */
-/* DATABASE AND PLACE THE RESULT IN ATTRINFO RECORDS. */
-//
-// In addition to the parameters used in the call it also relies on the
-// following variables set-up properly.
-//
-// operPtr.p Operation record pointer
-// fragptr.p Fragment record pointer
-// tabptr.p Table record pointer
-/* ---------------------------------------------------------------- */
-int Dbtup::readAttributes(Page* const pagePtr,
- Uint32 tupHeadOffset,
- const Uint32* inBuffer,
- Uint32 inBufLen,
- Uint32* outBuffer,
- Uint32 maxRead,
- bool xfrmFlag)
-{
- Tablerec* const regTabPtr = tabptr.p;
- Uint32 numAttributes = regTabPtr->noOfAttr;
- Uint32 attrDescriptorStart = regTabPtr->tabDescriptor;
- Uint32 inBufIndex = 0;
-
- ndbrequire(attrDescriptorStart + (numAttributes << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
-
- tOutBufIndex = 0;
- tCheckOffset = regTabPtr->tupheadsize;
- tMaxRead = maxRead;
- tTupleHeader = &pagePtr->pageWord[tupHeadOffset];
- tXfrmFlag = xfrmFlag;
-
- ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE);
- while (inBufIndex < inBufLen) {
- Uint32 tmpAttrBufIndex = tOutBufIndex;
- AttributeHeader ahIn(inBuffer[inBufIndex]);
- inBufIndex++;
- Uint32 attributeId = ahIn.getAttributeId();
- Uint32 attrDescriptorIndex = attrDescriptorStart + (attributeId << ZAD_LOG_SIZE);
- ljam();
-
- AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, 0);
- AttributeHeader* ahOut = (AttributeHeader*)&outBuffer[tmpAttrBufIndex];
- tOutBufIndex = tmpAttrBufIndex + 1;
- if (attributeId < numAttributes) {
- Uint32 attributeDescriptor = tableDescriptor[attrDescriptorIndex].tabDescr;
- Uint32 attributeOffset = tableDescriptor[attrDescriptorIndex + 1].tabDescr;
- ReadFunction f = regTabPtr->readFunctionArray[attributeId];
- if ((this->*f)(outBuffer,
- ahOut,
- attributeDescriptor,
- attributeOffset)) {
- continue;
- } else {
- return -1;
- }//if
- } else if(attributeId & AttributeHeader::PSUEDO){
- Uint32 sz = read_psuedo(attributeId,
- outBuffer+tmpAttrBufIndex+1);
- AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, sz);
- tOutBufIndex = tmpAttrBufIndex + 1 + sz;
- } else {
- terrorCode = ZATTRIBUTE_ID_ERROR;
- return -1;
- }//if
- }//while
- return tOutBufIndex;
-}//Dbtup::readAttributes()
-
-#if 0
-int Dbtup::readAttributesWithoutHeader(Page* const pagePtr,
- Uint32 tupHeadOffset,
- Uint32* inBuffer,
- Uint32 inBufLen,
- Uint32* outBuffer,
- Uint32* attrBuffer,
- Uint32 maxRead)
-{
- Tablerec* const regTabPtr = tabptr.p;
- Uint32 numAttributes = regTabPtr->noOfAttr;
- Uint32 attrDescriptorStart = regTabPtr->tabDescriptor;
- Uint32 inBufIndex = 0;
- Uint32 attrBufIndex = 0;
-
- ndbrequire(attrDescriptorStart + (numAttributes << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
-
- tOutBufIndex = 0;
- tCheckOffset = regTabPtr->tupheadsize;
- tMaxRead = maxRead;
- tTupleHeader = &pagePtr->pageWord[tupHeadOffset];
-
- ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE);
- while (inBufIndex < inBufLen) {
- AttributeHeader ahIn(inBuffer[inBufIndex]);
- inBufIndex++;
- Uint32 attributeId = ahIn.getAttributeId();
- Uint32 attrDescriptorIndex = attrDescriptorStart + (attributeId << ZAD_LOG_SIZE);
- ljam();
-
- AttributeHeader::init(&attrBuffer[attrBufIndex], attributeId, 0);
- AttributeHeader* ahOut = (AttributeHeader*)&attrBuffer[attrBufIndex];
- attrBufIndex++;
- if (attributeId < numAttributes) {
- Uint32 attributeDescriptor = tableDescriptor[attrDescriptorIndex].tabDescr;
- Uint32 attributeOffset = tableDescriptor[attrDescriptorIndex + 1].tabDescr;
- ReadFunction f = regTabPtr->readFunctionArray[attributeId];
- if ((this->*f)(outBuffer,
- ahOut,
- attributeDescriptor,
- attributeOffset)) {
- continue;
- } else {
- return -1;
- }//if
- } else {
- terrorCode = ZATTRIBUTE_ID_ERROR;
- return -1;
- }//if
- }//while
- ndbrequire(attrBufIndex == inBufLen);
- return tOutBufIndex;
-}//Dbtup::readAttributes()
-#endif
-
-bool
-Dbtup::readFixedSizeTHOneWordNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- Uint32 indexBuf = tOutBufIndex;
- Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
- Uint32 const wordRead = tTupleHeader[readOffset];
- Uint32 newIndexBuf = indexBuf + 1;
- Uint32 maxRead = tMaxRead;
-
- ndbrequire(readOffset < tCheckOffset);
- if (newIndexBuf <= maxRead) {
- ljam();
- outBuffer[indexBuf] = wordRead;
- ahOut->setDataSize(1);
- tOutBufIndex = newIndexBuf;
- return true;
- } else {
- ljam();
- terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
- return false;
- }//if
-}//Dbtup::readFixedSizeTHOneWordNotNULL()
-
-bool
-Dbtup::readFixedSizeTHTwoWordNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- Uint32 indexBuf = tOutBufIndex;
- Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
- Uint32 const wordReadFirst = tTupleHeader[readOffset];
- Uint32 const wordReadSecond = tTupleHeader[readOffset + 1];
- Uint32 newIndexBuf = indexBuf + 2;
- Uint32 maxRead = tMaxRead;
-
- ndbrequire(readOffset + 1 < tCheckOffset);
- if (newIndexBuf <= maxRead) {
- ljam();
- ahOut->setDataSize(2);
- outBuffer[indexBuf] = wordReadFirst;
- outBuffer[indexBuf + 1] = wordReadSecond;
- tOutBufIndex = newIndexBuf;
- return true;
- } else {
- ljam();
- terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
- return false;
- }//if
-}//Dbtup::readFixedSizeTHTwoWordNotNULL()
-
-bool
-Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- Uint32 indexBuf = tOutBufIndex;
- Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attrDes2);
- Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
- Uint32 attrNoOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor);
- Uint32 maxRead = tMaxRead;
-
- ndbrequire((readOffset + attrNoOfWords - 1) < tCheckOffset);
- if (! charsetFlag || ! tXfrmFlag) {
- Uint32 newIndexBuf = indexBuf + attrNoOfWords;
- if (newIndexBuf <= maxRead) {
- ljam();
- ahOut->setDataSize(attrNoOfWords);
- MEMCOPY_NO_WORDS(&outBuffer[indexBuf],
- &tTupleHeader[readOffset],
- attrNoOfWords);
- tOutBufIndex = newIndexBuf;
- return true;
- } else {
- ljam();
- terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
- }//if
- } else {
- ljam();
- Tablerec* regTabPtr = tabptr.p;
- Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(attrDescriptor);
- uchar* dstPtr = (uchar*)&outBuffer[indexBuf];
- const uchar* srcPtr = (uchar*)&tTupleHeader[readOffset];
- Uint32 i = AttributeOffset::getCharsetPos(attrDes2);
- ndbrequire(i < regTabPtr->noOfCharsets);
- CHARSET_INFO* cs = regTabPtr->charsetArray[i];
- Uint32 typeId = AttributeDescriptor::getType(attrDescriptor);
- Uint32 lb, len;
- bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
- if (ok) {
- Uint32 xmul = cs->strxfrm_multiply;
- if (xmul == 0)
- xmul = 1;
- // see comment in DbtcMain.cpp
- Uint32 dstLen = xmul * (srcBytes - lb);
- Uint32 maxIndexBuf = indexBuf + (dstLen >> 2);
- if (maxIndexBuf <= maxRead) {
- ljam();
- int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
- ndbrequire(n != -1);
- while ((n & 3) != 0) {
- dstPtr[n++] = 0;
- }
- Uint32 dstWords = (n >> 2);
- ahOut->setDataSize(dstWords);
- Uint32 newIndexBuf = indexBuf + dstWords;
- ndbrequire(newIndexBuf <= maxRead);
- tOutBufIndex = newIndexBuf;
- return true;
- } else {
- ljam();
- terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
- }
- } else {
- ljam();
- terrorCode = ZTUPLE_CORRUPTED_ERROR;
- }
- }
- return false;
-}//Dbtup::readFixedSizeTHManyWordNotNULL()
-
-bool
-Dbtup::readFixedSizeTHOneWordNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- if (!nullFlagCheck(attrDes2)) {
- ljam();
- return readFixedSizeTHOneWordNotNULL(outBuffer,
- ahOut,
- attrDescriptor,
- attrDes2);
- } else {
- ljam();
- ahOut->setNULL();
- return true;
- }//if
-}//Dbtup::readFixedSizeTHOneWordNULLable()
-
-bool
-Dbtup::readFixedSizeTHTwoWordNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- if (!nullFlagCheck(attrDes2)) {
- ljam();
- return readFixedSizeTHTwoWordNotNULL(outBuffer,
- ahOut,
- attrDescriptor,
- attrDes2);
- } else {
- ljam();
- ahOut->setNULL();
- return true;
- }//if
-}//Dbtup::readFixedSizeTHTwoWordNULLable()
-
-bool
-Dbtup::readFixedSizeTHManyWordNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- if (!nullFlagCheck(attrDes2)) {
- ljam();
- return readFixedSizeTHManyWordNotNULL(outBuffer,
- ahOut,
- attrDescriptor,
- attrDes2);
- } else {
- ljam();
- ahOut->setNULL();
- return true;
- }//if
-}//Dbtup::readFixedSizeTHManyWordNULLable()
-
-bool
-Dbtup::readFixedSizeTHZeroWordNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- if (nullFlagCheck(attrDes2)) {
- ljam();
- ahOut->setNULL();
- }//if
- return true;
-}//Dbtup::readFixedSizeTHZeroWordNULLable()
-
-bool
-Dbtup::nullFlagCheck(Uint32 attrDes2)
-{
- Tablerec* const regTabPtr = tabptr.p;
- Uint32 nullFlagOffsetInTuple = AttributeOffset::getNullFlagOffset(attrDes2);
- ndbrequire(nullFlagOffsetInTuple < regTabPtr->tupNullWords);
- nullFlagOffsetInTuple += regTabPtr->tupNullIndex;
- ndbrequire(nullFlagOffsetInTuple < tCheckOffset);
-
- return (AttributeOffset::isNULL(tTupleHeader[nullFlagOffsetInTuple], attrDes2));
-}//Dbtup::nullFlagCheck()
-
-bool
-Dbtup::readVariableSizedAttr(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::readVariableSizedAttr()
-
-bool
-Dbtup::readVarSizeUnlimitedNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::readVarSizeUnlimitedNotNULL()
-
-bool
-Dbtup::readVarSizeUnlimitedNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::readVarSizeUnlimitedNULLable()
-
-bool
-Dbtup::readBigVarSizeNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::readBigVarSizeNotNULL()
-
-bool
-Dbtup::readBigVarSizeNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::readBigVarSizeNULLable()
-
-bool
-Dbtup::readSmallVarSizeNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::readSmallVarSizeNotNULL()
-
-bool
-Dbtup::readSmallVarSizeNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::readSmallVarSizeNULLable()
-
-bool
-Dbtup::readDynFixedSize(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::readDynFixedSize()
-
-bool
-Dbtup::readDynVarSizeUnlimited(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::readDynVarSizeUnlimited()
-
-bool
-Dbtup::readDynBigVarSize(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::readDynBigVarSize()
-
-bool
-Dbtup::readDynSmallVarSize(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::readDynSmallVarSize()
-
-/* ---------------------------------------------------------------------- */
-/* THIS ROUTINE IS USED TO UPDATE A NUMBER OF ATTRIBUTES. IT IS */
-/* USED BY THE INSERT ROUTINE, THE UPDATE ROUTINE AND IT CAN BE */
-/* CALLED SEVERAL TIMES FROM THE INTERPRETER. */
-// In addition to the parameters used in the call it also relies on the
-// following variables set-up properly.
-//
-// pagep.p Page record pointer
-// fragptr.p Fragment record pointer
-// operPtr.p Operation record pointer
-// tabptr.p Table record pointer
-/* ---------------------------------------------------------------------- */
-int Dbtup::updateAttributes(Page* const pagePtr,
- Uint32 tupHeadOffset,
- Uint32* inBuffer,
- Uint32 inBufLen)
-{
- Tablerec* const regTabPtr = tabptr.p;
- Operationrec* const regOperPtr = operPtr.p;
- Uint32 numAttributes = regTabPtr->noOfAttr;
- Uint32 attrDescriptorStart = regTabPtr->tabDescriptor;
- ndbrequire(attrDescriptorStart + (numAttributes << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
-
- tCheckOffset = regTabPtr->tupheadsize;
- tTupleHeader = &pagePtr->pageWord[tupHeadOffset];
- Uint32 inBufIndex = 0;
- tInBufIndex = 0;
- tInBufLen = inBufLen;
-
- ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE);
- while (inBufIndex < inBufLen) {
- AttributeHeader ahIn(inBuffer[inBufIndex]);
- Uint32 attributeId = ahIn.getAttributeId();
- Uint32 attrDescriptorIndex = attrDescriptorStart + (attributeId << ZAD_LOG_SIZE);
- if (attributeId < numAttributes) {
- Uint32 attrDescriptor = tableDescriptor[attrDescriptorIndex].tabDescr;
- Uint32 attributeOffset = tableDescriptor[attrDescriptorIndex + 1].tabDescr;
- if ((AttributeDescriptor::getPrimaryKey(attrDescriptor)) &&
- (regOperPtr->optype != ZINSERT)) {
- if (checkUpdateOfPrimaryKey(&inBuffer[inBufIndex], regTabPtr)) {
- ljam();
- terrorCode = ZTRY_UPDATE_PRIMARY_KEY;
- return -1;
- }//if
- }//if
- UpdateFunction f = regTabPtr->updateFunctionArray[attributeId];
- ljam();
- regOperPtr->changeMask.set(attributeId);
- if ((this->*f)(inBuffer,
- attrDescriptor,
- attributeOffset)) {
- inBufIndex = tInBufIndex;
- continue;
- } else {
- ljam();
- return -1;
- }//if
- } else {
- ljam();
- terrorCode = ZATTRIBUTE_ID_ERROR;
- return -1;
- }//if
- }//while
- return 0;
-}//Dbtup::updateAttributes()
-
-bool
-Dbtup::checkUpdateOfPrimaryKey(Uint32* updateBuffer, Tablerec* const regTabPtr)
-{
- Uint32 keyReadBuffer[MAX_KEY_SIZE_IN_WORDS];
- Uint32 attributeHeader;
- AttributeHeader* ahOut = (AttributeHeader*)&attributeHeader;
- AttributeHeader ahIn(*updateBuffer);
- Uint32 attributeId = ahIn.getAttributeId();
- Uint32 attrDescriptorIndex = regTabPtr->tabDescriptor + (attributeId << ZAD_LOG_SIZE);
- Uint32 attrDescriptor = tableDescriptor[attrDescriptorIndex].tabDescr;
- Uint32 attributeOffset = tableDescriptor[attrDescriptorIndex + 1].tabDescr;
- ReadFunction f = regTabPtr->readFunctionArray[attributeId];
-
- AttributeHeader::init(&attributeHeader, attributeId, 0);
- tOutBufIndex = 0;
- tMaxRead = MAX_KEY_SIZE_IN_WORDS;
-
- bool tmp = tXfrmFlag;
- tXfrmFlag = false;
- ndbrequire((this->*f)(&keyReadBuffer[0], ahOut, attrDescriptor, attributeOffset));
- tXfrmFlag = tmp;
- ndbrequire(tOutBufIndex == ahOut->getDataSize());
- if (ahIn.getDataSize() != ahOut->getDataSize()) {
- ljam();
- return true;
- }//if
- if (memcmp(&keyReadBuffer[0], &updateBuffer[1], tOutBufIndex << 2) != 0) {
- ljam();
- return true;
- }//if
- return false;
-}//Dbtup::checkUpdateOfPrimaryKey()
-
-#if 0
-void Dbtup::checkPages(Fragrecord* const regFragPtr)
-{
- Uint32 noPages = getNoOfPages(regFragPtr);
- for (Uint32 i = 0; i < noPages ; i++) {
- PagePtr pagePtr;
- pagePtr.i = getRealpid(regFragPtr, i);
- ptrCheckGuard(pagePtr, cnoOfPage, page);
- ndbrequire(pagePtr.p->pageWord[1] != (RNIL - 1));
- }
-}
-#endif
-
-bool
-Dbtup::updateFixedSizeTHOneWordNotNULL(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- Uint32 indexBuf = tInBufIndex;
- Uint32 inBufLen = tInBufLen;
- Uint32 updateOffset = AttributeOffset::getOffset(attrDes2);
- AttributeHeader ahIn(inBuffer[indexBuf]);
- Uint32 nullIndicator = ahIn.isNULL();
- Uint32 newIndex = indexBuf + 2;
- ndbrequire(updateOffset < tCheckOffset);
-
- if (newIndex <= inBufLen) {
- Uint32 updateWord = inBuffer[indexBuf + 1];
- if (!nullIndicator) {
- ljam();
- tInBufIndex = newIndex;
- tTupleHeader[updateOffset] = updateWord;
- return true;
- } else {
- ljam();
- terrorCode = ZNOT_NULL_ATTR;
- return false;
- }//if
- } else {
- ljam();
- terrorCode = ZAI_INCONSISTENCY_ERROR;
- return false;
- }//if
- return true;
-}//Dbtup::updateFixedSizeTHOneWordNotNULL()
-
-bool
-Dbtup::updateFixedSizeTHTwoWordNotNULL(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- Uint32 indexBuf = tInBufIndex;
- Uint32 inBufLen = tInBufLen;
- Uint32 updateOffset = AttributeOffset::getOffset(attrDes2);
- AttributeHeader ahIn(inBuffer[indexBuf]);
- Uint32 nullIndicator = ahIn.isNULL();
- Uint32 newIndex = indexBuf + 3;
- ndbrequire((updateOffset + 1) < tCheckOffset);
-
- if (newIndex <= inBufLen) {
- Uint32 updateWord1 = inBuffer[indexBuf + 1];
- Uint32 updateWord2 = inBuffer[indexBuf + 2];
- if (!nullIndicator) {
- ljam();
- tInBufIndex = newIndex;
- tTupleHeader[updateOffset] = updateWord1;
- tTupleHeader[updateOffset + 1] = updateWord2;
- return true;
- } else {
- ljam();
- terrorCode = ZNOT_NULL_ATTR;
- return false;
- }//if
- } else {
- ljam();
- terrorCode = ZAI_INCONSISTENCY_ERROR;
- return false;
- }//if
-}//Dbtup::updateFixedSizeTHTwoWordNotNULL()
-
-bool
-Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- Uint32 indexBuf = tInBufIndex;
- Uint32 inBufLen = tInBufLen;
- Uint32 updateOffset = AttributeOffset::getOffset(attrDes2);
- Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attrDes2);
- AttributeHeader ahIn(inBuffer[indexBuf]);
- Uint32 nullIndicator = ahIn.isNULL();
- Uint32 noOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor);
- Uint32 newIndex = indexBuf + noOfWords + 1;
- ndbrequire((updateOffset + noOfWords - 1) < tCheckOffset);
-
- if (newIndex <= inBufLen) {
- if (!nullIndicator) {
- ljam();
- if (charsetFlag) {
- ljam();
- Tablerec* regTabPtr = tabptr.p;
- Uint32 typeId = AttributeDescriptor::getType(attrDescriptor);
- Uint32 bytes = AttributeDescriptor::getSizeInBytes(attrDescriptor);
- Uint32 i = AttributeOffset::getCharsetPos(attrDes2);
- ndbrequire(i < regTabPtr->noOfCharsets);
- // not const in MySQL
- CHARSET_INFO* cs = regTabPtr->charsetArray[i];
- int not_used;
- const char* ssrc = (const char*)&inBuffer[tInBufIndex + 1];
- Uint32 lb, len;
- if (! NdbSqlUtil::get_var_length(typeId, ssrc, bytes, lb, len)) {
- ljam();
- terrorCode = ZINVALID_CHAR_FORMAT;
- return false;
- }
- // fast fix bug#7340
- if (typeId != NDB_TYPE_TEXT &&
- (*cs->cset->well_formed_len)(cs, ssrc + lb, ssrc + lb + len, ZNIL, &not_used) != len) {
- ljam();
- terrorCode = ZINVALID_CHAR_FORMAT;
- return false;
- }
- }
- tInBufIndex = newIndex;
- MEMCOPY_NO_WORDS(&tTupleHeader[updateOffset],
- &inBuffer[indexBuf + 1],
- noOfWords);
- return true;
- } else {
- ljam();
- terrorCode = ZNOT_NULL_ATTR;
- return false;
- }//if
- } else {
- ljam();
- terrorCode = ZAI_INCONSISTENCY_ERROR;
- return false;
- }//if
-}//Dbtup::updateFixedSizeTHManyWordNotNULL()
-
-bool
-Dbtup::updateFixedSizeTHManyWordNULLable(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- Tablerec* const regTabPtr = tabptr.p;
- AttributeHeader ahIn(inBuffer[tInBufIndex]);
- Uint32 nullIndicator = ahIn.isNULL();
- Uint32 nullFlagOffset = AttributeOffset::getNullFlagOffset(attrDes2);
- Uint32 nullFlagBitOffset = AttributeOffset::getNullFlagBitOffset(attrDes2);
- Uint32 nullWordOffset = nullFlagOffset + regTabPtr->tupNullIndex;
- ndbrequire((nullFlagOffset < regTabPtr->tupNullWords) &&
- (nullWordOffset < tCheckOffset));
- Uint32 nullBits = tTupleHeader[nullWordOffset];
-
- if (!nullIndicator) {
- nullBits &= (~(1 << nullFlagBitOffset));
- ljam();
- tTupleHeader[nullWordOffset] = nullBits;
- return updateFixedSizeTHManyWordNotNULL(inBuffer,
- attrDescriptor,
- attrDes2);
- } else {
- Uint32 newIndex = tInBufIndex + 1;
- if (newIndex <= tInBufLen) {
- nullBits |= (1 << nullFlagBitOffset);
- ljam();
- tTupleHeader[nullWordOffset] = nullBits;
- tInBufIndex = newIndex;
- return true;
- } else {
- ljam();
- terrorCode = ZAI_INCONSISTENCY_ERROR;
- return false;
- }//if
- }//if
-}//Dbtup::updateFixedSizeTHManyWordNULLable()
-
-bool
-Dbtup::updateVariableSizedAttr(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::updateVariableSizedAttr()
-
-bool
-Dbtup::updateVarSizeUnlimitedNotNULL(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::updateVarSizeUnlimitedNotNULL()
-
-bool
-Dbtup::updateVarSizeUnlimitedNULLable(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::updateVarSizeUnlimitedNULLable()
-
-bool
-Dbtup::updateBigVarSizeNotNULL(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::updateBigVarSizeNotNULL()
-
-bool
-Dbtup::updateBigVarSizeNULLable(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::updateBigVarSizeNULLable()
-
-bool
-Dbtup::updateSmallVarSizeNotNULL(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::updateSmallVarSizeNotNULL()
-
-bool
-Dbtup::updateSmallVarSizeNULLable(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::updateSmallVarSizeNULLable()
-
-bool
-Dbtup::updateDynFixedSize(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::updateDynFixedSize()
-
-bool
-Dbtup::updateDynVarSizeUnlimited(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::updateDynVarSizeUnlimited()
-
-bool
-Dbtup::updateDynBigVarSize(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::updateDynBigVarSize()
-
-bool
-Dbtup::updateDynSmallVarSize(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- ljam();
- terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
- return false;
-}//Dbtup::updateDynSmallVarSize()
-
-Uint32
-Dbtup::read_psuedo(Uint32 attrId, Uint32* outBuffer){
- Uint32 tmp[sizeof(SignalHeader)+25];
- Signal * signal = (Signal*)&tmp;
- switch(attrId){
- case AttributeHeader::FRAGMENT:
- * outBuffer = operPtr.p->fragId >> 1; // remove "hash" bit
- return 1;
- case AttributeHeader::FRAGMENT_MEMORY:
- {
- Uint64 tmp= fragptr.p->noOfPages;
- tmp*= 32768;
- memcpy(outBuffer,&tmp,8);
- }
- return 2;
- case AttributeHeader::ROW_SIZE:
- * outBuffer = tabptr.p->tupheadsize << 2;
- return 1;
- case AttributeHeader::ROW_COUNT:
- case AttributeHeader::COMMIT_COUNT:
- signal->theData[0] = operPtr.p->userpointer;
- signal->theData[1] = attrId;
-
- EXECUTE_DIRECT(DBLQH, GSN_READ_PSUEDO_REQ, signal, 2);
- outBuffer[0] = signal->theData[0];
- outBuffer[1] = signal->theData[1];
- return 2;
- case AttributeHeader::RANGE_NO:
- signal->theData[0] = operPtr.p->userpointer;
- signal->theData[1] = attrId;
-
- EXECUTE_DIRECT(DBLQH, GSN_READ_PSUEDO_REQ, signal, 2);
- outBuffer[0] = signal->theData[0];
- return 1;
- default:
- return 0;
- }
-}
-
-bool
-Dbtup::readBitsNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- Tablerec* const regTabPtr = tabptr.p;
- Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
- Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
- Uint32 indexBuf = tOutBufIndex;
- Uint32 newIndexBuf = indexBuf + ((bitCount + 31) >> 5);
- Uint32 maxRead = tMaxRead;
-
- if (newIndexBuf <= maxRead) {
- ljam();
- ahOut->setDataSize((bitCount + 31) >> 5);
- tOutBufIndex = newIndexBuf;
-
- BitmaskImpl::getField(regTabPtr->tupNullWords,
- tTupleHeader+regTabPtr->tupNullIndex,
- pos,
- bitCount,
- outBuffer+indexBuf);
-
- return true;
- } else {
- ljam();
- terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
- return false;
- }//if
-}
-
-bool
-Dbtup::readBitsNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- Tablerec* const regTabPtr = tabptr.p;
- Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
- Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
-
- Uint32 indexBuf = tOutBufIndex;
- Uint32 newIndexBuf = indexBuf + ((bitCount + 31) >> 5);
- Uint32 maxRead = tMaxRead;
-
- if(BitmaskImpl::get(regTabPtr->tupNullWords,
- tTupleHeader+regTabPtr->tupNullIndex,
- pos))
- {
- ljam();
- ahOut->setNULL();
- return true;
- }
-
-
- if (newIndexBuf <= maxRead) {
- ljam();
- ahOut->setDataSize((bitCount + 31) >> 5);
- tOutBufIndex = newIndexBuf;
- BitmaskImpl::getField(regTabPtr->tupNullWords,
- tTupleHeader+regTabPtr->tupNullIndex,
- pos+1,
- bitCount,
- outBuffer+indexBuf);
- return true;
- } else {
- ljam();
- terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
- return false;
- }//if
-}
-
-bool
-Dbtup::updateBitsNotNULL(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- Tablerec* const regTabPtr = tabptr.p;
- Uint32 indexBuf = tInBufIndex;
- Uint32 inBufLen = tInBufLen;
- AttributeHeader ahIn(inBuffer[indexBuf]);
- Uint32 nullIndicator = ahIn.isNULL();
- Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
- Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
- Uint32 newIndex = indexBuf + 1 + ((bitCount + 31) >> 5);
-
- if (newIndex <= inBufLen) {
- if (!nullIndicator) {
- BitmaskImpl::setField(regTabPtr->tupNullWords,
- tTupleHeader+regTabPtr->tupNullIndex,
- pos,
- bitCount,
- inBuffer+indexBuf+1);
- tInBufIndex = newIndex;
- return true;
- } else {
- ljam();
- terrorCode = ZNOT_NULL_ATTR;
- return false;
- }//if
- } else {
- ljam();
- terrorCode = ZAI_INCONSISTENCY_ERROR;
- return false;
- }//if
- return true;
-}
-
-bool
-Dbtup::updateBitsNULLable(Uint32* inBuffer,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- Tablerec* const regTabPtr = tabptr.p;
- AttributeHeader ahIn(inBuffer[tInBufIndex]);
- Uint32 indexBuf = tInBufIndex;
- Uint32 nullIndicator = ahIn.isNULL();
- Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
- Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
-
- if (!nullIndicator) {
- BitmaskImpl::clear(regTabPtr->tupNullWords,
- tTupleHeader+regTabPtr->tupNullIndex,
- pos);
- BitmaskImpl::setField(regTabPtr->tupNullWords,
- tTupleHeader+regTabPtr->tupNullIndex,
- pos+1,
- bitCount,
- inBuffer+indexBuf+1);
-
- Uint32 newIndex = indexBuf + 1 + ((bitCount + 31) >> 5);
- tInBufIndex = newIndex;
- return true;
- } else {
- Uint32 newIndex = tInBufIndex + 1;
- if (newIndex <= tInBufLen) {
- ljam();
- BitmaskImpl::set(regTabPtr->tupNullWords,
- tTupleHeader+regTabPtr->tupNullIndex,
- pos);
-
- tInBufIndex = newIndex;
- return true;
- } else {
- ljam();
- terrorCode = ZAI_INCONSISTENCY_ERROR;
- return false;
- }//if
- }//if
-}
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
deleted file mode 100644
index 2b65a8402c2..00000000000
--- a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
+++ /dev/null
@@ -1,1152 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-#define DBTUP_C
-#include "Dbtup.hpp"
-#include <RefConvert.hpp>
-#include <ndb_limits.h>
-#include <pc.hpp>
-#include <AttributeDescriptor.hpp>
-#include "AttributeOffset.hpp"
-#include <AttributeHeader.hpp>
-#include <signaldata/FireTrigOrd.hpp>
-#include <signaldata/CreateTrig.hpp>
-#include <signaldata/TuxMaint.hpp>
-
-#define ljam() { jamLine(7000 + __LINE__); }
-#define ljamEntry() { jamEntryLine(7000 + __LINE__); }
-
-/* **************************************************************** */
-/* ---------------------------------------------------------------- */
-/* ----------------------- TRIGGER HANDLING ----------------------- */
-/* ---------------------------------------------------------------- */
-/* **************************************************************** */
-
-ArrayList<Dbtup::TupTriggerData>*
-Dbtup::findTriggerList(Tablerec* table,
- TriggerType::Value ttype,
- TriggerActionTime::Value ttime,
- TriggerEvent::Value tevent)
-{
- ArrayList<TupTriggerData>* tlist = NULL;
- switch (ttype) {
- case TriggerType::SUBSCRIPTION:
- case TriggerType::SUBSCRIPTION_BEFORE:
- switch (tevent) {
- case TriggerEvent::TE_INSERT:
- ljam();
- if (ttime == TriggerActionTime::TA_DETACHED)
- tlist = &table->subscriptionInsertTriggers;
- break;
- case TriggerEvent::TE_UPDATE:
- ljam();
- if (ttime == TriggerActionTime::TA_DETACHED)
- tlist = &table->subscriptionUpdateTriggers;
- break;
- case TriggerEvent::TE_DELETE:
- ljam();
- if (ttime == TriggerActionTime::TA_DETACHED)
- tlist = &table->subscriptionDeleteTriggers;
- break;
- default:
- break;
- }
- break;
- case TriggerType::SECONDARY_INDEX:
- switch (tevent) {
- case TriggerEvent::TE_INSERT:
- ljam();
- if (ttime == TriggerActionTime::TA_AFTER)
- tlist = &table->afterInsertTriggers;
- break;
- case TriggerEvent::TE_UPDATE:
- ljam();
- if (ttime == TriggerActionTime::TA_AFTER)
- tlist = &table->afterUpdateTriggers;
- break;
- case TriggerEvent::TE_DELETE:
- ljam();
- if (ttime == TriggerActionTime::TA_AFTER)
- tlist = &table->afterDeleteTriggers;
- break;
- default:
- break;
- }
- break;
- case TriggerType::ORDERED_INDEX:
- switch (tevent) {
- case TriggerEvent::TE_CUSTOM:
- ljam();
- if (ttime == TriggerActionTime::TA_CUSTOM)
- tlist = &table->tuxCustomTriggers;
- break;
- default:
- break;
- }
- break;
- case TriggerType::READ_ONLY_CONSTRAINT:
- switch (tevent) {
- case TriggerEvent::TE_UPDATE:
- ljam();
- if (ttime == TriggerActionTime::TA_AFTER)
- tlist = &table->constraintUpdateTriggers;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- return tlist;
-}
-
-// Trigger signals
-void
-Dbtup::execCREATE_TRIG_REQ(Signal* signal)
-{
- ljamEntry();
- BlockReference senderRef = signal->getSendersBlockRef();
- const CreateTrigReq reqCopy = *(const CreateTrigReq*)signal->getDataPtr();
- const CreateTrigReq* const req = &reqCopy;
-
- // Find table
- TablerecPtr tabPtr;
- tabPtr.i = req->getTableId();
- ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
-
- // Create trigger and associate it with the table
- if (createTrigger(tabPtr.p, req)) {
- ljam();
- // Send conf
- CreateTrigConf* const conf = (CreateTrigConf*)signal->getDataPtrSend();
- conf->setUserRef(reference());
- conf->setConnectionPtr(req->getConnectionPtr());
- conf->setRequestType(req->getRequestType());
- conf->setTableId(req->getTableId());
- conf->setIndexId(req->getIndexId());
- conf->setTriggerId(req->getTriggerId());
- conf->setTriggerInfo(req->getTriggerInfo());
- sendSignal(senderRef, GSN_CREATE_TRIG_CONF,
- signal, CreateTrigConf::SignalLength, JBB);
- } else {
- ljam();
- // Send ref
- CreateTrigRef* const ref = (CreateTrigRef*)signal->getDataPtrSend();
- ref->setUserRef(reference());
- ref->setConnectionPtr(req->getConnectionPtr());
- ref->setRequestType(req->getRequestType());
- ref->setTableId(req->getTableId());
- ref->setIndexId(req->getIndexId());
- ref->setTriggerId(req->getTriggerId());
- ref->setTriggerInfo(req->getTriggerInfo());
- ref->setErrorCode(CreateTrigRef::TooManyTriggers);
- sendSignal(senderRef, GSN_CREATE_TRIG_REF,
- signal, CreateTrigRef::SignalLength, JBB);
- }
-}//Dbtup::execCREATE_TRIG_REQ()
-
-void
-Dbtup::execDROP_TRIG_REQ(Signal* signal)
-{
- ljamEntry();
- BlockReference senderRef = signal->getSendersBlockRef();
- const DropTrigReq reqCopy = *(const DropTrigReq*)signal->getDataPtr();
- const DropTrigReq* const req = &reqCopy;
-
- // Find table
- TablerecPtr tabPtr;
- tabPtr.i = req->getTableId();
- ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
-
- // Drop trigger
- Uint32 r = dropTrigger(tabPtr.p, req);
- if (r == 0){
- // Send conf
- DropTrigConf* const conf = (DropTrigConf*)signal->getDataPtrSend();
- conf->setUserRef(senderRef);
- conf->setConnectionPtr(req->getConnectionPtr());
- conf->setRequestType(req->getRequestType());
- conf->setTableId(req->getTableId());
- conf->setIndexId(req->getIndexId());
- conf->setTriggerId(req->getTriggerId());
- sendSignal(senderRef, GSN_DROP_TRIG_CONF,
- signal, DropTrigConf::SignalLength, JBB);
- } else {
- // Send ref
- DropTrigRef* const ref = (DropTrigRef*)signal->getDataPtrSend();
- ref->setUserRef(senderRef);
- ref->setConnectionPtr(req->getConnectionPtr());
- ref->setRequestType(req->getRequestType());
- ref->setTableId(req->getTableId());
- ref->setIndexId(req->getIndexId());
- ref->setTriggerId(req->getTriggerId());
- ref->setErrorCode((DropTrigRef::ErrorCode)r);
- ref->setErrorLine(__LINE__);
- ref->setErrorNode(refToNode(reference()));
- sendSignal(senderRef, GSN_DROP_TRIG_REF,
- signal, DropTrigRef::SignalLength, JBB);
- }
-}//Dbtup::DROP_TRIG_REQ()
-
-/* ---------------------------------------------------------------- */
-/* ------------------------- createTrigger ------------------------ */
-/* */
-/* Creates a new trigger record by fetching one from the trigger */
-/* pool and associates it with the given table. */
-/* Trigger type can be one of secondary_index, subscription, */
-/* constraint(NYI), foreign_key(NYI), schema_upgrade(NYI), */
-/* api_trigger(NYI) or sql_trigger(NYI). */
-/* Note that this method only checks for total number of allowed */
-/* triggers. Checking the number of allowed triggers per table is */
-/* done by TRIX. */
-/* */
-/* ---------------------------------------------------------------- */
-bool
-Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req)
-{
- if (ERROR_INSERTED(4003)) {
- CLEAR_ERROR_INSERT_VALUE;
- return false;
- }
- TriggerType::Value ttype = req->getTriggerType();
- TriggerActionTime::Value ttime = req->getTriggerActionTime();
- TriggerEvent::Value tevent = req->getTriggerEvent();
-
- ArrayList<TupTriggerData>* tlist = findTriggerList(table, ttype, ttime, tevent);
- ndbrequire(tlist != NULL);
-
- TriggerPtr tptr;
- if (!tlist->seize(tptr))
- return false;
-
- // Set trigger id
- tptr.p->triggerId = req->getTriggerId();
-
- // ndbout_c("Create TupTrigger %u = %u %u %u %u", tptr.p->triggerId, table, ttype, ttime, tevent);
-
- // Set index id
- tptr.p->indexId = req->getIndexId();
-
- // Set trigger type etc
- tptr.p->triggerType = ttype;
- tptr.p->triggerActionTime = ttime;
- tptr.p->triggerEvent = tevent;
-
- tptr.p->sendBeforeValues = true;
- if ((tptr.p->triggerType == TriggerType::SUBSCRIPTION) &&
- ((tptr.p->triggerEvent == TriggerEvent::TE_UPDATE) ||
- (tptr.p->triggerEvent == TriggerEvent::TE_DELETE))) {
- ljam();
- tptr.p->sendBeforeValues = false;
- }
- tptr.p->sendOnlyChangedAttributes = false;
- if (((tptr.p->triggerType == TriggerType::SUBSCRIPTION) ||
- (tptr.p->triggerType == TriggerType::SUBSCRIPTION_BEFORE)) &&
- (tptr.p->triggerEvent == TriggerEvent::TE_UPDATE)) {
- ljam();
- tptr.p->sendOnlyChangedAttributes = true;
- }
-
- // Set monitor all
- tptr.p->monitorAllAttributes = req->getMonitorAllAttributes();
- tptr.p->monitorReplicas = req->getMonitorReplicas();
- tptr.p->m_receiverBlock = refToBlock(req->getReceiverRef());
-
- tptr.p->attributeMask.clear();
- if (tptr.p->monitorAllAttributes) {
- ljam();
- for(Uint32 i = 0; i < table->noOfAttr; i++) {
- if (!primaryKey(table, i)) {
- ljam();
- tptr.p->attributeMask.set(i);
- }
- }
- } else {
- // Set attribute mask
- ljam();
- tptr.p->attributeMask = req->getAttributeMask();
- }
- return true;
-}//Dbtup::createTrigger()
-
-bool
-Dbtup::primaryKey(Tablerec* const regTabPtr, Uint32 attrId)
-{
- Uint32 attrDescriptorStart = regTabPtr->tabDescriptor;
- Uint32 attrDescriptor = getTabDescrWord(attrDescriptorStart + (attrId * ZAD_SIZE));
- return (bool)AttributeDescriptor::getPrimaryKey(attrDescriptor);
-}//Dbtup::primaryKey()
-
-/* ---------------------------------------------------------------- */
-/* -------------------------- dropTrigger ------------------------- */
-/* */
-/* Deletes a trigger record by disassociating it with the given */
-/* table and returning it to the trigger pool. */
-/* Trigger type can be one of secondary_index, subscription, */
-/* constraint(NYI), foreign_key(NYI), schema_upgrade(NYI), */
-/* api_trigger(NYI) or sql_trigger(NYI). */
-/* */
-/* ---------------------------------------------------------------- */
-Uint32
-Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req)
-{
- Uint32 triggerId = req->getTriggerId();
-
- TriggerType::Value ttype = req->getTriggerType();
- TriggerActionTime::Value ttime = req->getTriggerActionTime();
- TriggerEvent::Value tevent = req->getTriggerEvent();
-
- // ndbout_c("Drop TupTrigger %u = %u %u %u %u", triggerId, table, ttype, ttime, tevent);
-
- ArrayList<TupTriggerData>* tlist = findTriggerList(table, ttype, ttime, tevent);
- ndbrequire(tlist != NULL);
-
- Ptr<TupTriggerData> ptr;
- for (tlist->first(ptr); !ptr.isNull(); tlist->next(ptr)) {
- ljam();
- if (ptr.p->triggerId == triggerId) {
- ljam();
- tlist->release(ptr.i);
- return 0;
- }
- }
- return DropTrigRef::TriggerNotFound;
-}//Dbtup::dropTrigger()
-
-/* ---------------------------------------------------------------- */
-/* -------------- checkImmediateTriggersAfterOp ------------------ */
-/* */
-/* Called after an insert, delete, or update operation takes */
-/* place. Fetches before tuple for deletes and updates and */
-/* after tuple for inserts and updates. */
-/* Executes immediate triggers by sending FIRETRIGORD */
-/* */
-/* ---------------------------------------------------------------- */
-void Dbtup::checkImmediateTriggersAfterInsert(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTablePtr)
-{
- if(refToBlock(regOperPtr->coordinatorTC) == DBLQH) {
- return;
- }
-
- if ((regOperPtr->primaryReplica) &&
- (!(regTablePtr->afterInsertTriggers.isEmpty()))) {
- ljam();
- fireImmediateTriggers(signal,
- regTablePtr->afterInsertTriggers,
- regOperPtr);
- }//if
-}//Dbtup::checkImmediateTriggersAfterInsert()
-
-void Dbtup::checkImmediateTriggersAfterUpdate(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTablePtr)
-{
- if(refToBlock(regOperPtr->coordinatorTC) == DBLQH) {
- return;
- }
-
- if ((regOperPtr->primaryReplica) &&
- (!(regTablePtr->afterUpdateTriggers.isEmpty()))) {
- ljam();
- fireImmediateTriggers(signal,
- regTablePtr->afterUpdateTriggers,
- regOperPtr);
- }//if
- if ((regOperPtr->primaryReplica) &&
- (!(regTablePtr->constraintUpdateTriggers.isEmpty()))) {
- ljam();
- fireImmediateTriggers(signal,
- regTablePtr->constraintUpdateTriggers,
- regOperPtr);
- }//if
-}//Dbtup::checkImmediateTriggersAfterUpdate()
-
-void Dbtup::checkImmediateTriggersAfterDelete(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTablePtr)
-{
- if(refToBlock(regOperPtr->coordinatorTC) == DBLQH) {
- return;
- }
-
- if ((regOperPtr->primaryReplica) &&
- (!(regTablePtr->afterDeleteTriggers.isEmpty()))) {
- ljam();
- executeTriggers(signal,
- regTablePtr->afterDeleteTriggers,
- regOperPtr);
- }//if
-}//Dbtup::checkImmediateTriggersAfterDelete()
-
-#if 0
-/* ---------------------------------------------------------------- */
-/* --------------------- checkDeferredTriggers -------------------- */
-/* */
-/* Called before commit after an insert, delete, or update */
-/* operation. Fetches before tuple for deletes and updates and */
-/* after tuple for inserts and updates. */
-/* Executes deferred triggers by sending FIRETRIGORD */
-/* */
-/* ---------------------------------------------------------------- */
-void Dbtup::checkDeferredTriggers(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTablePtr)
-{
- ljam();
- // NYI
-}//Dbtup::checkDeferredTriggers()
-#endif
-
-/* ---------------------------------------------------------------- */
-/* --------------------- checkDetachedTriggers -------------------- */
-/* */
-/* Called at commit after an insert, delete, or update operation. */
-/* Fetches before tuple for deletes and updates and */
-/* after tuple for inserts and updates. */
-/* Executes detached triggers by sending FIRETRIGORD */
-/* */
-/* ---------------------------------------------------------------- */
-void Dbtup::checkDetachedTriggers(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTablePtr)
-{
- switch(regOperPtr->optype) {
- case(ZINSERT):
- ljam();
- if (regTablePtr->subscriptionInsertTriggers.isEmpty()) {
- // Table has no active triggers monitoring inserts at commit
- ljam();
- return;
- }//if
-
- // If any fired immediate insert trigger then fetch after tuple
- fireDetachedTriggers(signal,
- regTablePtr->subscriptionInsertTriggers,
- regOperPtr);
- break;
- case(ZDELETE):
- ljam();
- if (regTablePtr->subscriptionDeleteTriggers.isEmpty()) {
- // Table has no active triggers monitoring deletes at commit
- ljam();
- return;
- }//if
-
- // Execute any after delete triggers by sending
- // FIRETRIGORD with the before tuple
- executeTriggers(signal,
- regTablePtr->subscriptionDeleteTriggers,
- regOperPtr);
- break;
- case(ZUPDATE):
- ljam();
- if (regTablePtr->subscriptionUpdateTriggers.isEmpty()) {
- // Table has no active triggers monitoring updates at commit
- ljam();
- return;
- }//if
-
- // If any fired immediate update trigger then fetch after tuple
- // and send two FIRETRIGORD one with before tuple and one with after tuple
- fireDetachedTriggers(signal,
- regTablePtr->subscriptionUpdateTriggers,
- regOperPtr);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
-}//Dbtup::CheckDetachedTriggers()
-
-void
-Dbtup::fireImmediateTriggers(Signal* signal,
- ArrayList<TupTriggerData>& triggerList,
- Operationrec* const regOperPtr)
-{
- TriggerPtr trigPtr;
- triggerList.first(trigPtr);
- while (trigPtr.i != RNIL) {
- ljam();
- if (trigPtr.p->monitorAllAttributes ||
- trigPtr.p->attributeMask.overlaps(regOperPtr->changeMask)) {
- ljam();
- executeTrigger(signal,
- trigPtr.p,
- regOperPtr);
- }//if
- triggerList.next(trigPtr);
- }//while
-}//Dbtup::fireImmediateTriggers()
-
-#if 0
-void
-Dbtup::fireDeferredTriggers(Signal* signal,
- ArrayList<TupTriggerData>& triggerList,
- Operationrec* const regOperPtr)
-{
- TriggerPtr trigPtr;
- triggerList.first(trigPtr);
- while (trigPtr.i != RNIL) {
- ljam();
- if (trigPtr.p->monitorAllAttributes ||
- trigPtr.p->attributeMask.overlaps(regOperPtr->changeMask)) {
- ljam();
- executeTrigger(signal,
- trigPtr,
- regOperPtr);
- }//if
- triggerList.next(trigPtr);
- }//while
-}//Dbtup::fireDeferredTriggers()
-#endif
-
-void
-Dbtup::fireDetachedTriggers(Signal* signal,
- ArrayList<TupTriggerData>& triggerList,
- Operationrec* const regOperPtr)
-{
- TriggerPtr trigPtr;
- triggerList.first(trigPtr);
- while (trigPtr.i != RNIL) {
- ljam();
- if ((trigPtr.p->monitorReplicas || regOperPtr->primaryReplica) &&
- (trigPtr.p->monitorAllAttributes ||
- trigPtr.p->attributeMask.overlaps(regOperPtr->changeMask))) {
- ljam();
- executeTrigger(signal,
- trigPtr.p,
- regOperPtr);
- }//if
- triggerList.next(trigPtr);
- }//while
-}//Dbtup::fireDetachedTriggers()
-
-void Dbtup::executeTriggers(Signal* signal,
- ArrayList<TupTriggerData>& triggerList,
- Operationrec* regOperPtr)
-{
- TriggerPtr trigPtr;
- triggerList.first(trigPtr);
- while (trigPtr.i != RNIL) {
- ljam();
- executeTrigger(signal,
- trigPtr.p,
- regOperPtr);
- triggerList.next(trigPtr);
-
- }//while
-}//Dbtup::executeTriggers()
-
-void Dbtup::executeTrigger(Signal* signal,
- TupTriggerData* const trigPtr,
- Operationrec* const regOperPtr)
-{
-
- /**
- * The block below does not work together with GREP.
- * I have 2 db nodes (2 replicas) -> one node group.
- * I want to have FIRETRIG_ORD sent to all SumaParticipants,
- * from all nodes in the node group described above. However,
- * only one of the nodes in the node group actually sends the
- * FIRE_TRIG_ORD, and the other node enters this "hack" below.
- * I don't really know what the code snippet below does, but it
- * does not work with GREP the way Lars and I want it.
- * We need to have triggers fired from both the primary and the
- * backup replica, not only the primary as it is now.
- *
- * Note: In Suma, I have changed triggers to be created with
- * setMonitorReplicas(true).
- * /Johan
- *
- * See RT 709
- */
- // XXX quick fix to NR, should fix in LQHKEYREQ instead
- /*
- if (refToBlock(regOperPtr->coordinatorTC) == DBLQH) {
- jam();
- return;
- }
- */
- BlockReference ref = trigPtr->m_receiverBlock;
- Uint32* const keyBuffer = &cinBuffer[0];
- Uint32* const mainBuffer = &coutBuffer[0];
- Uint32* const copyBuffer = &clogMemBuffer[0];
-
- Uint32 noPrimKey, noMainWords, noCopyWords;
-
- if (ref == BACKUP) {
- ljam();
- /*
- In order for the implementation of BACKUP to work even when changing
- primaries in the middle of the backup we need to set the trigger on
- all replicas. This check checks whether this is the node where this
- trigger should be fired. The check should preferably have been put
- completely in the BACKUP block but it was about five times simpler
- to put it here and also much faster for the backup (small overhead
- for everybody else.
- */
- signal->theData[0] = trigPtr->triggerId;
- signal->theData[1] = regOperPtr->fragId;
- EXECUTE_DIRECT(BACKUP, GSN_BACKUP_TRIG_REQ, signal, 2);
- ljamEntry();
- if (signal->theData[0] == 0) {
- ljam();
- return;
- }//if
- }//if
- if (!readTriggerInfo(trigPtr,
- regOperPtr,
- keyBuffer,
- noPrimKey,
- mainBuffer,
- noMainWords,
- copyBuffer,
- noCopyWords)) {
- ljam();
- return;
- }//if
-//--------------------------------------------------------------------
-// Now all data for this trigger has been read. It is now time to send
-// the trigger information consisting of two or three sets of TRIG_
-// ATTRINFO signals and one FIRE_TRIG_ORD signal.
-// We start by setting common header info for all TRIG_ATTRINFO signals.
-//--------------------------------------------------------------------
- bool executeDirect;
- TrigAttrInfo* const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtrSend();
- trigAttrInfo->setConnectionPtr(regOperPtr->tcOpIndex);
- trigAttrInfo->setTriggerId(trigPtr->triggerId);
-
- switch(trigPtr->triggerType) {
- case (TriggerType::SECONDARY_INDEX):
- ljam();
- ref = regOperPtr->coordinatorTC;
- executeDirect = false;
- break;
- case (TriggerType::SUBSCRIPTION):
- case (TriggerType::SUBSCRIPTION_BEFORE):
- ljam();
- // Since only backup uses subscription triggers we send to backup directly for now
- ref = trigPtr->m_receiverBlock;
- executeDirect = true;
- break;
- case (TriggerType::READ_ONLY_CONSTRAINT):
- terrorCode = ZREAD_ONLY_CONSTRAINT_VIOLATION;
- // XXX should return status and abort the rest
- return;
- default:
- ndbrequire(false);
- executeDirect= false; // remove warning
- }//switch
-
- regOperPtr->noFiredTriggers++;
-
- trigAttrInfo->setAttrInfoType(TrigAttrInfo::PRIMARY_KEY);
- sendTrigAttrInfo(signal, keyBuffer, noPrimKey, executeDirect, ref);
-
- Uint32 noAfter = 0;
- Uint32 noBefore = 0;
- switch(regOperPtr->optype) {
- case(ZINSERT):
- ljam();
- // Send AttrInfo signals with new attribute values
- trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
- sendTrigAttrInfo(signal, mainBuffer, noMainWords, executeDirect, ref);
- noAfter = noMainWords;
- break;
- case(ZDELETE):
- if (trigPtr->sendBeforeValues) {
- ljam();
- trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
- sendTrigAttrInfo(signal, mainBuffer, noMainWords, executeDirect, ref);
- noBefore = noMainWords;
- }//if
- break;
- case(ZUPDATE):
- ljam();
- if (trigPtr->sendBeforeValues) {
- ljam();
- trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
- sendTrigAttrInfo(signal, copyBuffer, noCopyWords, executeDirect, ref);
- noBefore = noCopyWords;
- }//if
- trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
- sendTrigAttrInfo(signal, mainBuffer, noMainWords, executeDirect, ref);
- noAfter = noMainWords;
- break;
- default:
- ndbrequire(false);
- }//switch
- sendFireTrigOrd(signal,
- regOperPtr,
- trigPtr,
- noPrimKey,
- noBefore,
- noAfter);
-}//Dbtup::executeTrigger()
-
-Uint32 Dbtup::setAttrIds(Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask,
- Uint32 noOfAttributes,
- Uint32* inBuffer)
-{
- Uint32 bufIndx = 0;
- for (Uint32 i = 0; i < noOfAttributes; i++) {
- ljam();
- if (attributeMask.get(i)) {
- ljam();
- AttributeHeader::init(&inBuffer[bufIndx++], i, 0);
- }//if
- }//for
- return bufIndx;
-}//Dbtup::setAttrIds()
-
-bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
- Operationrec* const regOperPtr,
- Uint32* const keyBuffer,
- Uint32& noPrimKey,
- Uint32* const mainBuffer,
- Uint32& noMainWords,
- Uint32* const copyBuffer,
- Uint32& noCopyWords)
-{
- noCopyWords = 0;
- noMainWords = 0;
- Uint32 readBuffer[MAX_ATTRIBUTES_IN_TABLE];
- PagePtr pagep;
-
-//---------------------------------------------------------------------------
-// Set-up variables needed by readAttributes operPtr.p, tabptr.p
-//---------------------------------------------------------------------------
- operPtr.p = regOperPtr;
- tabptr.i = regOperPtr->tableRef;
- ptrCheckGuard(tabptr, cnoOfTablerec, tablerec);
- Tablerec* const regTabPtr = tabptr.p;
-//--------------------------------------------------------------------
-// Initialise pagep and tuple offset for read of main tuple
-//--------------------------------------------------------------------
- Uint32 tupheadoffset = regOperPtr->pageOffset;
- pagep.i = regOperPtr->realPageId;
- ptrCheckGuard(pagep, cnoOfPage, page);
-
-//--------------------------------------------------------------------
-// Read Primary Key Values
-//--------------------------------------------------------------------
- int ret= readAttributes(pagep.p,
- tupheadoffset,
- &tableDescriptor[regTabPtr->readKeyArray].tabDescr,
- regTabPtr->noOfKeyAttr,
- keyBuffer,
- ZATTR_BUFFER_SIZE,
- false);
- ndbrequire(ret != -1);
- noPrimKey= ret;
-
- Uint32 numAttrsToRead;
- if ((regOperPtr->optype == ZUPDATE) &&
- (trigPtr->sendOnlyChangedAttributes)) {
- ljam();
-//--------------------------------------------------------------------
-// Update that sends only changed information
-//--------------------------------------------------------------------
- Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask;
- attributeMask = trigPtr->attributeMask;
- attributeMask.bitAND(regOperPtr->changeMask);
- numAttrsToRead = setAttrIds(attributeMask, regTabPtr->noOfAttr, &readBuffer[0]);
-
- } else if ((regOperPtr->optype == ZDELETE) &&
- (!trigPtr->sendBeforeValues)) {
- ljam();
-//--------------------------------------------------------------------
-// Delete without sending before values only read Primary Key
-//--------------------------------------------------------------------
- return true;
- } else {
- ljam();
-//--------------------------------------------------------------------
-// All others send all attributes that are monitored
-//--------------------------------------------------------------------
- numAttrsToRead = setAttrIds(trigPtr->attributeMask, regTabPtr->noOfAttr, &readBuffer[0]);
- }//if
- ndbrequire(numAttrsToRead < MAX_ATTRIBUTES_IN_TABLE);
-//--------------------------------------------------------------------
-// Read Main tuple values
-//--------------------------------------------------------------------
- if ((regOperPtr->optype != ZDELETE) ||
- (trigPtr->sendBeforeValues)) {
- ljam();
- int ret= readAttributes(pagep.p,
- tupheadoffset,
- &readBuffer[0],
- numAttrsToRead,
- mainBuffer,
- ZATTR_BUFFER_SIZE,
- false);
- ndbrequire(ret != -1);
- noMainWords= ret;
- } else {
- ljam();
- noMainWords = 0;
- }//if
-//--------------------------------------------------------------------
-// Read Copy tuple values for UPDATE's
-//--------------------------------------------------------------------
-// Initialise pagep and tuple offset for read of copy tuple
-//--------------------------------------------------------------------
- if ((regOperPtr->optype == ZUPDATE) &&
- (trigPtr->sendBeforeValues)) {
- ljam();
-
- tupheadoffset = regOperPtr->pageOffsetC;
- pagep.i = regOperPtr->realPageIdC;
- ptrCheckGuard(pagep, cnoOfPage, page);
-
- int ret= readAttributes(pagep.p,
- tupheadoffset,
- &readBuffer[0],
- numAttrsToRead,
- copyBuffer,
- ZATTR_BUFFER_SIZE,
- false);
-
- ndbrequire(ret != -1);
- noCopyWords = ret;
- if ((noMainWords == noCopyWords) &&
- (memcmp(mainBuffer, copyBuffer, noMainWords << 2) == 0)) {
-//--------------------------------------------------------------------
-// Although a trigger was fired it was not necessary since the old
-// value and the new value was exactly the same
-//--------------------------------------------------------------------
- ljam();
- return false;
- }//if
- }//if
- return true;
-}//Dbtup::readTriggerInfo()
-
-void Dbtup::sendTrigAttrInfo(Signal* signal,
- Uint32* data,
- Uint32 dataLen,
- bool executeDirect,
- BlockReference receiverReference)
-{
- TrigAttrInfo* const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtrSend();
- Uint32 sigLen;
- Uint32 dataIndex = 0;
- do {
- sigLen = dataLen - dataIndex;
- if (sigLen > TrigAttrInfo::DataLength) {
- ljam();
- sigLen = TrigAttrInfo::DataLength;
- }//if
- MEMCOPY_NO_WORDS(trigAttrInfo->getData(),
- data + dataIndex,
- sigLen);
- if (executeDirect) {
- ljam();
- EXECUTE_DIRECT(receiverReference,
- GSN_TRIG_ATTRINFO,
- signal,
- TrigAttrInfo::StaticLength + sigLen);
- ljamEntry();
- } else {
- ljam();
- sendSignal(receiverReference,
- GSN_TRIG_ATTRINFO,
- signal,
- TrigAttrInfo::StaticLength + sigLen,
- JBB);
- }//if
- dataIndex += sigLen;
- } while (dataLen != dataIndex);
-}//Dbtup::sendTrigAttrInfo()
-
-void Dbtup::sendFireTrigOrd(Signal* signal,
- Operationrec * const regOperPtr,
- TupTriggerData* const trigPtr,
- Uint32 noPrimKeyWords,
- Uint32 noBeforeValueWords,
- Uint32 noAfterValueWords)
-{
- FireTrigOrd* const fireTrigOrd = (FireTrigOrd *)signal->getDataPtrSend();
-
- fireTrigOrd->setConnectionPtr(regOperPtr->tcOpIndex);
- fireTrigOrd->setTriggerId(trigPtr->triggerId);
-
- switch(regOperPtr->optype) {
- case(ZINSERT):
- ljam();
- fireTrigOrd->setTriggerEvent(TriggerEvent::TE_INSERT);
- break;
- case(ZDELETE):
- ljam();
- fireTrigOrd->setTriggerEvent(TriggerEvent::TE_DELETE);
- break;
- case(ZUPDATE):
- ljam();
- fireTrigOrd->setTriggerEvent(TriggerEvent::TE_UPDATE);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
-
- fireTrigOrd->setNoOfPrimaryKeyWords(noPrimKeyWords);
- fireTrigOrd->setNoOfBeforeValueWords(noBeforeValueWords);
- fireTrigOrd->setNoOfAfterValueWords(noAfterValueWords);
-
- switch(trigPtr->triggerType) {
- case (TriggerType::SECONDARY_INDEX):
- ljam();
- sendSignal(regOperPtr->coordinatorTC, GSN_FIRE_TRIG_ORD,
- signal, FireTrigOrd::SignalLength, JBB);
- break;
- case (TriggerType::SUBSCRIPTION_BEFORE): // Only Suma
- ljam();
- // Since only backup uses subscription triggers we
- // send to backup directly for now
- fireTrigOrd->setGCI(regOperPtr->gci);
- fireTrigOrd->setHashValue(regOperPtr->hashValue);
- EXECUTE_DIRECT(trigPtr->m_receiverBlock,
- GSN_FIRE_TRIG_ORD,
- signal,
- FireTrigOrd::SignalWithHashValueLength);
- break;
- case (TriggerType::SUBSCRIPTION):
- ljam();
- // Since only backup uses subscription triggers we
- // send to backup directly for now
- fireTrigOrd->setGCI(regOperPtr->gci);
- EXECUTE_DIRECT(trigPtr->m_receiverBlock,
- GSN_FIRE_TRIG_ORD,
- signal,
- FireTrigOrd::SignalWithGCILength);
- break;
- default:
- ndbrequire(false);
- break;
- }//switch
-}//Dbtup::sendFireTrigOrd()
-
-/*
- * Ordered index triggers.
- *
- * Insert: add entry to index
- * Update: add entry to index, de|ay remove until commit
- * Delete: do nothing, delay remove until commit
- * Commit: remove entry delayed from update and delete
- * Abort : remove entry added by insert and update
- *
- * See Notes.txt for the details.
- */
-
-int
-Dbtup::executeTuxInsertTriggers(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTabPtr)
-{
- TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
- PagePtr pagePtr;
- pagePtr.i = regOperPtr->realPageId;
- ptrCheckGuard(pagePtr, cnoOfPage, page);
- Uint32 tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffset + 1];
- ndbrequire(tupVersion == regOperPtr->tupVersion);
- // fill in constant part
- req->tableId = regOperPtr->tableRef;
- req->fragId = regOperPtr->fragId;
- req->pageId = regOperPtr->realPageId;
- req->pageOffset = regOperPtr->pageOffset;
- req->tupVersion = tupVersion;
- req->opInfo = TuxMaintReq::OpAdd;
- return addTuxEntries(signal, regOperPtr, regTabPtr);
-}
-
-int
-Dbtup::executeTuxUpdateTriggers(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTabPtr)
-{
- TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
- PagePtr pagePtr;
- pagePtr.i = regOperPtr->realPageId;
- ptrCheckGuard(pagePtr, cnoOfPage, page);
- Uint32 tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffset + 1];
- ndbrequire(tupVersion == regOperPtr->tupVersion);
- // fill in constant part
- req->tableId = regOperPtr->tableRef;
- req->fragId = regOperPtr->fragId;
- req->pageId = regOperPtr->realPageId;
- req->pageOffset = regOperPtr->pageOffset;
- req->tupVersion = tupVersion;
- req->opInfo = TuxMaintReq::OpAdd;
- return addTuxEntries(signal, regOperPtr, regTabPtr);
-}
-
-int
-Dbtup::addTuxEntries(Signal* signal,
- Operationrec* regOperPtr,
- Tablerec* regTabPtr)
-{
- TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
- const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
- TriggerPtr triggerPtr;
- Uint32 failPtrI;
- triggerList.first(triggerPtr);
- while (triggerPtr.i != RNIL) {
- ljam();
- req->indexId = triggerPtr.p->indexId;
- req->errorCode = RNIL;
- EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
- signal, TuxMaintReq::SignalLength);
- ljamEntry();
- if (req->errorCode != 0) {
- ljam();
- terrorCode = req->errorCode;
- failPtrI = triggerPtr.i;
- goto fail;
- }
- triggerList.next(triggerPtr);
- }
- return 0;
-fail:
- req->opInfo = TuxMaintReq::OpRemove;
- triggerList.first(triggerPtr);
- while (triggerPtr.i != failPtrI) {
- ljam();
- req->indexId = triggerPtr.p->indexId;
- req->errorCode = RNIL;
- EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
- signal, TuxMaintReq::SignalLength);
- ljamEntry();
- ndbrequire(req->errorCode == 0);
- triggerList.next(triggerPtr);
- }
-#ifdef VM_TRACE
- ndbout << "aborted partial tux update: op " << hex << regOperPtr << endl;
-#endif
- return -1;
-}
-
-int
-Dbtup::executeTuxDeleteTriggers(Signal* signal,
- Operationrec* const regOperPtr,
- Tablerec* const regTabPtr)
-{
- // do nothing
- return 0;
-}
-
-void
-Dbtup::executeTuxCommitTriggers(Signal* signal,
- Operationrec* regOperPtr,
- Tablerec* const regTabPtr)
-{
- TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
- // get version
- Uint32 tupVersion;
- if (regOperPtr->optype == ZINSERT) {
- if (! regOperPtr->deleteInsertFlag)
- return;
- ljam();
- PagePtr pagePtr;
- pagePtr.i = regOperPtr->realPageIdC;
- ptrCheckGuard(pagePtr, cnoOfPage, page);
- tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffsetC + 1];
- ndbrequire(tupVersion != regOperPtr->tupVersion);
- } else if (regOperPtr->optype == ZUPDATE) {
- ljam();
- PagePtr pagePtr;
- pagePtr.i = regOperPtr->realPageIdC;
- ptrCheckGuard(pagePtr, cnoOfPage, page);
- tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffsetC + 1];
- ndbrequire(tupVersion != regOperPtr->tupVersion);
- } else if (regOperPtr->optype == ZDELETE) {
- if (regOperPtr->deleteInsertFlag)
- return;
- ljam();
- PagePtr pagePtr;
- pagePtr.i = regOperPtr->realPageId;
- ptrCheckGuard(pagePtr, cnoOfPage, page);
- tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffset + 1];
- ndbrequire(tupVersion == regOperPtr->tupVersion);
- } else {
- ndbrequire(false);
- tupVersion= 0; // remove warning
- }
- // fill in constant part
- req->tableId = regOperPtr->tableRef;
- req->fragId = regOperPtr->fragId;
- req->pageId = regOperPtr->realPageId;
- req->pageOffset = regOperPtr->pageOffset;
- req->tupVersion = tupVersion;
- req->opInfo = TuxMaintReq::OpRemove;
- removeTuxEntries(signal, regOperPtr, regTabPtr);
-}
-
-void
-Dbtup::executeTuxAbortTriggers(Signal* signal,
- Operationrec* regOperPtr,
- Tablerec* const regTabPtr)
-{
- TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
- // get version
- Uint32 tupVersion;
- if (regOperPtr->optype == ZINSERT) {
- ljam();
- tupVersion = regOperPtr->tupVersion;
- } else if (regOperPtr->optype == ZUPDATE) {
- ljam();
- tupVersion = regOperPtr->tupVersion;
- } else if (regOperPtr->optype == ZDELETE) {
- ljam();
- return;
- } else {
- ndbrequire(false);
- tupVersion= 0; // remove warning
- }
- // fill in constant part
- req->tableId = regOperPtr->tableRef;
- req->fragId = regOperPtr->fragId;
- req->pageId = regOperPtr->realPageId;
- req->pageOffset = regOperPtr->pageOffset;
- req->tupVersion = tupVersion;
- req->opInfo = TuxMaintReq::OpRemove;
- removeTuxEntries(signal, regOperPtr, regTabPtr);
-}
-
-void
-Dbtup::removeTuxEntries(Signal* signal,
- Operationrec* regOperPtr,
- Tablerec* regTabPtr)
-{
- TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
- const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
- TriggerPtr triggerPtr;
- triggerList.first(triggerPtr);
- while (triggerPtr.i != RNIL) {
- ljam();
- req->indexId = triggerPtr.p->indexId;
- req->errorCode = RNIL,
- EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
- signal, TuxMaintReq::SignalLength);
- ljamEntry();
- // must succeed
- ndbrequire(req->errorCode == 0);
- triggerList.next(triggerPtr);
- }
-}
diff --git a/ndb/src/kernel/blocks/dbtup/Makefile.am b/ndb/src/kernel/blocks/dbtup/Makefile.am
deleted file mode 100644
index 2d14ad41025..00000000000
--- a/ndb/src/kernel/blocks/dbtup/Makefile.am
+++ /dev/null
@@ -1,42 +0,0 @@
-noinst_LIBRARIES = libdbtup.a
-
-libdbtup_a_SOURCES = \
- DbtupExecQuery.cpp \
- DbtupBuffer.cpp \
- DbtupRoutines.cpp \
- DbtupCommit.cpp \
- DbtupFixAlloc.cpp \
- DbtupTrigger.cpp \
- DbtupAbort.cpp \
- DbtupLCP.cpp \
- DbtupUndoLog.cpp \
- DbtupPageMap.cpp \
- DbtupPagMan.cpp \
- DbtupStoredProcDef.cpp \
- DbtupMeta.cpp \
- DbtupTabDesMan.cpp \
- DbtupGen.cpp \
- DbtupSystemRestart.cpp \
- DbtupIndex.cpp \
- DbtupScan.cpp \
- DbtupDebug.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libdbtup.dsp
-
-libdbtup.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libdbtup_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbtux/Makefile.am b/ndb/src/kernel/blocks/dbtux/Makefile.am
deleted file mode 100644
index b5951e8ed37..00000000000
--- a/ndb/src/kernel/blocks/dbtux/Makefile.am
+++ /dev/null
@@ -1,34 +0,0 @@
-noinst_LIBRARIES = libdbtux.a
-
-libdbtux_a_SOURCES = \
- DbtuxGen.cpp \
- DbtuxMeta.cpp \
- DbtuxMaint.cpp \
- DbtuxNode.cpp \
- DbtuxTree.cpp \
- DbtuxScan.cpp \
- DbtuxSearch.cpp \
- DbtuxCmp.cpp \
- DbtuxDebug.cpp
-
-INCLUDES_LOC = -I$(top_srcdir)/ndb/src/kernel/blocks/dbtup
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libdbtux.dsp
-
-libdbtux.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libdbtux_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbutil/Makefile.am b/ndb/src/kernel/blocks/dbutil/Makefile.am
deleted file mode 100644
index 925356c2f76..00000000000
--- a/ndb/src/kernel/blocks/dbutil/Makefile.am
+++ /dev/null
@@ -1,23 +0,0 @@
-noinst_LIBRARIES = libdbutil.a
-
-libdbutil_a_SOURCES = DbUtil.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libdbutil.dsp
-
-libdbutil.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libdbutil_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/grep/Makefile.am b/ndb/src/kernel/blocks/grep/Makefile.am
deleted file mode 100644
index 6d2b422784b..00000000000
--- a/ndb/src/kernel/blocks/grep/Makefile.am
+++ /dev/null
@@ -1,23 +0,0 @@
-noinst_LIBRARIES = libgrep.a
-
-libgrep_a_SOURCES = Grep.cpp GrepInit.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libgrep.dsp
-
-libgrep.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libgrep_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/ndbcntr/Makefile.am b/ndb/src/kernel/blocks/ndbcntr/Makefile.am
deleted file mode 100644
index 3f24675b2b3..00000000000
--- a/ndb/src/kernel/blocks/ndbcntr/Makefile.am
+++ /dev/null
@@ -1,26 +0,0 @@
-noinst_LIBRARIES = libndbcntr.a
-
-libndbcntr_a_SOURCES = \
- NdbcntrInit.cpp \
- NdbcntrSysTable.cpp \
- NdbcntrMain.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libndbcntr.dsp
-
-libndbcntr.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libndbcntr_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/ndbfs/Makefile.am b/ndb/src/kernel/blocks/ndbfs/Makefile.am
deleted file mode 100644
index a22386f8612..00000000000
--- a/ndb/src/kernel/blocks/ndbfs/Makefile.am
+++ /dev/null
@@ -1,27 +0,0 @@
-noinst_LIBRARIES = libndbfs.a
-
-libndbfs_a_SOURCES = \
- AsyncFile.cpp \
- Ndbfs.cpp VoidFs.cpp \
- Filename.cpp \
- CircularIndex.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libndbfs.dsp
-
-libndbfs.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libndbfs_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/qmgr/Makefile.am b/ndb/src/kernel/blocks/qmgr/Makefile.am
deleted file mode 100644
index 278af2a7865..00000000000
--- a/ndb/src/kernel/blocks/qmgr/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-noinst_LIBRARIES = libqmgr.a
-
-libqmgr_a_SOURCES = \
- QmgrInit.cpp \
- QmgrMain.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libqmgr.dsp
-
-libqmgr.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libqmgr_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/suma/Makefile.am b/ndb/src/kernel/blocks/suma/Makefile.am
deleted file mode 100644
index 5a74dbb74eb..00000000000
--- a/ndb/src/kernel/blocks/suma/Makefile.am
+++ /dev/null
@@ -1,23 +0,0 @@
-noinst_LIBRARIES = libsuma.a
-
-libsuma_a_SOURCES = Suma.cpp SumaInit.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libsuma.dsp
-
-libsuma.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libsuma_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/trix/Makefile.am b/ndb/src/kernel/blocks/trix/Makefile.am
deleted file mode 100644
index 343063a6283..00000000000
--- a/ndb/src/kernel/blocks/trix/Makefile.am
+++ /dev/null
@@ -1,23 +0,0 @@
-noinst_LIBRARIES = libtrix.a
-
-libtrix_a_SOURCES = Trix.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libtrix.dsp
-
-libtrix.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libtrix_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/error/Makefile.am b/ndb/src/kernel/error/Makefile.am
deleted file mode 100644
index 54f3de2d76d..00000000000
--- a/ndb/src/kernel/error/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-noinst_LIBRARIES = liberror.a
-
-liberror_a_SOURCES = TimeModule.cpp \
- ErrorReporter.cpp \
- ErrorMessages.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: liberror.dsp
-
-liberror.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(liberror_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/vm/Makefile.am b/ndb/src/kernel/vm/Makefile.am
deleted file mode 100644
index d9e57ce9dd6..00000000000
--- a/ndb/src/kernel/vm/Makefile.am
+++ /dev/null
@@ -1,44 +0,0 @@
-#SUBDIRS = testCopy testDataBuffer testSimplePropertiesSection
-#ifneq ($(USE_EDITLINE), N)
-#DIRS += testLongSig
-#endif
-
-noinst_LIBRARIES = libkernel.a
-
-libkernel_a_SOURCES = \
- SimulatedBlock.cpp \
- FastScheduler.cpp \
- TimeQueue.cpp \
- VMSignal.cpp \
- ThreadConfig.cpp \
- TransporterCallback.cpp \
- Emulator.cpp \
- Configuration.cpp \
- WatchDog.cpp \
- SimplePropertiesSection.cpp \
- SectionReader.cpp \
- MetaData.cpp \
- Mutex.cpp SafeCounter.cpp \
- SuperPool.cpp
-
-INCLUDES_LOC = -I$(top_srcdir)/ndb/src/mgmapi
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_kernel.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libkernel.dsp
-
-libkernel.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libkernel_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/mgmapi/Makefile.am b/ndb/src/mgmapi/Makefile.am
deleted file mode 100644
index db730bf8c89..00000000000
--- a/ndb/src/mgmapi/Makefile.am
+++ /dev/null
@@ -1,30 +0,0 @@
-
-noinst_LTLIBRARIES = libmgmapi.la
-
-libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp
-
-INCLUDES_LOC = -I$(top_srcdir)/ndb/include/mgmapi
-
-DEFS_LOC = -DNO_DEBUG_MESSAGES -DNDB_PORT="\"@ndb_port@\""
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_util.mk.am
-
-#ndbtest_PROGRAMS = ndb_test_mgmapi
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libmgmapi.dsp
-
-libmgmapi.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libmgmapi_la_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/mgmclient/Makefile.am b/ndb/src/mgmclient/Makefile.am
deleted file mode 100644
index c63e8d1bff8..00000000000
--- a/ndb/src/mgmclient/Makefile.am
+++ /dev/null
@@ -1,58 +0,0 @@
-
-noinst_LTLIBRARIES = libndbmgmclient.la
-ndbtools_PROGRAMS = ndb_mgm
-
-libndbmgmclient_la_SOURCES = CommandInterpreter.cpp
-libndbmgmclient_la_LIBADD = ../mgmapi/libmgmapi.la \
- ../common/logger/liblogger.la \
- ../common/portlib/libportlib.la \
- ../common/util/libgeneral.la \
- ../common/portlib/libportlib.la
-
-
-ndb_mgm_SOURCES = main.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_ndbapi.mk.am
-
-INCLUDES += -I$(top_srcdir)/ndb/include/mgmapi \
- -I$(top_srcdir)/ndb/src/common/mgmcommon
-
-LDADD_LOC = $(noinst_LTLIBRARIES) \
- ../common/portlib/libportlib.la \
- @readline_link@ \
- $(top_builddir)/dbug/libdbug.a \
- $(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a \
- @TERMCAP_LIB@ @NDB_SCI_LIBS@
-
-ndb_mgm_LDFLAGS = @ndb_bin_am_ldflags@
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: ndb_mgm.dsp libndbmgmclient.dsp
-
-ndb_mgm.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(ndbtools_PROGRAMS)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(ndb_mgm_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
-
-libndbmgmclient.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libndbmgmclient_la_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB
diff --git a/ndb/src/mgmsrv/Makefile.am b/ndb/src/mgmsrv/Makefile.am
deleted file mode 100644
index 7fd3fa66b43..00000000000
--- a/ndb/src/mgmsrv/Makefile.am
+++ /dev/null
@@ -1,60 +0,0 @@
-MYSQLDATAdir = $(localstatedir)
-MYSQLSHAREdir = $(pkgdatadir)
-MYSQLBASEdir= $(prefix)
-#MYSQLCLUSTERdir= $(prefix)/mysql-cluster
-MYSQLCLUSTERdir= .
-
-ndbbin_PROGRAMS = ndb_mgmd
-
-ndb_mgmd_SOURCES = \
- MgmtSrvr.cpp \
- MgmtSrvrGeneralSignalHandling.cpp \
- main.cpp \
- Services.cpp \
- convertStrToInt.cpp \
- SignalQueue.cpp \
- MgmtSrvrConfig.cpp \
- ConfigInfo.cpp \
- InitConfigFileParser.cpp \
- Config.cpp
-
-INCLUDES_LOC = -I$(top_srcdir)/ndb/src/ndbapi \
- -I$(top_srcdir)/ndb/src/mgmapi \
- -I$(top_srcdir)/ndb/src/common/mgmcommon \
- -I$(top_srcdir)/ndb/src/mgmclient
-
-LDADD_LOC = $(top_srcdir)/ndb/src/mgmclient/CommandInterpreter.o \
- $(top_builddir)/ndb/src/libndbclient.la \
- $(top_builddir)/dbug/libdbug.a \
- $(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a \
- @readline_link@ \
- @NDB_SCI_LIBS@ \
- @TERMCAP_LIB@
-
-DEFS_LOC = -DDEFAULT_MYSQL_HOME="\"$(MYSQLBASEdir)\"" \
- -DDATADIR="\"$(MYSQLDATAdir)\"" \
- -DSHAREDIR="\"$(MYSQLSHAREdir)\"" \
- -DMYSQLCLUSTERDIR="\"$(MYSQLCLUSTERdir)\""
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_ndbapi.mk.am
-
-ndb_mgmd_LDFLAGS = @ndb_bin_am_ldflags@
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: ndb_mgmd.dsp
-
-ndb_mgmd.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(ndbbin_PROGRAMS)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(ndb_mgmd_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
diff --git a/ndb/src/ndbapi/Makefile.am b/ndb/src/ndbapi/Makefile.am
deleted file mode 100644
index b734e058b87..00000000000
--- a/ndb/src/ndbapi/Makefile.am
+++ /dev/null
@@ -1,62 +0,0 @@
-#SUBDIRS = signal-sender
-
-noinst_LTLIBRARIES = libndbapi.la
-
-libndbapi_la_SOURCES = \
- TransporterFacade.cpp \
- ClusterMgr.cpp \
- Ndb.cpp \
- NdbPoolImpl.cpp \
- NdbPool.cpp \
- Ndblist.cpp \
- Ndbif.cpp \
- Ndbinit.cpp \
- Ndberr.cpp \
- ndberror.c \
- NdbErrorOut.cpp \
- NdbTransaction.cpp \
- NdbTransactionScan.cpp \
- NdbOperation.cpp \
- NdbOperationSearch.cpp \
- NdbOperationScan.cpp \
- NdbOperationInt.cpp \
- NdbOperationDefine.cpp \
- NdbOperationExec.cpp \
- NdbScanOperation.cpp NdbScanFilter.cpp \
- NdbIndexOperation.cpp \
- NdbEventOperation.cpp \
- NdbEventOperationImpl.cpp \
- NdbApiSignal.cpp \
- NdbRecAttr.cpp \
- NdbUtil.cpp \
- NdbReceiver.cpp \
- NdbDictionary.cpp \
- NdbDictionaryImpl.cpp \
- DictCache.cpp \
- ndb_cluster_connection.cpp \
- NdbBlob.cpp
-
-INCLUDES_LOC = -I$(top_srcdir)/ndb/src/mgmapi
-
-# Ndbapi cannot handle -O3
-NDB_CXXFLAGS_RELEASE_LOC = -O2
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_ndbapi.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libndbapi.dsp
-
-libndbapi.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libndbapi_la_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/ndbapi/NdbBlob.cpp b/ndb/src/ndbapi/NdbBlob.cpp
deleted file mode 100644
index d06d6b4ef4d..00000000000
--- a/ndb/src/ndbapi/NdbBlob.cpp
+++ /dev/null
@@ -1,1589 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include <Ndb.hpp>
-#include <NdbDictionaryImpl.hpp>
-#include <NdbTransaction.hpp>
-#include <NdbOperation.hpp>
-#include <NdbIndexOperation.hpp>
-#include <NdbRecAttr.hpp>
-#include <NdbBlob.hpp>
-#include "NdbBlobImpl.hpp"
-#include <NdbScanOperation.hpp>
-
-/*
- * Reading index table directly (as a table) is faster but there are
- * bugs or limitations. Keep the code and make possible to choose.
- */
-static const bool g_ndb_blob_ok_to_read_index_table = false;
-
-// state (inline)
-
-inline void
-NdbBlob::setState(State newState)
-{
- DBUG_ENTER("NdbBlob::setState");
- DBUG_PRINT("info", ("this=%p newState=%u", this, newState));
- theState = newState;
- DBUG_VOID_RETURN;
-}
-
-// define blob table
-
-int
-NdbBlob::getBlobTableName(char* btname, Ndb* anNdb, const char* tableName, const char* columnName)
-{
- NdbTableImpl* t = anNdb->theDictionary->m_impl.getTable(tableName);
- if (t == NULL)
- return -1;
- NdbColumnImpl* c = t->getColumn(columnName);
- if (c == NULL)
- return -1;
- getBlobTableName(btname, t, c);
- return 0;
-}
-
-void
-NdbBlob::getBlobTableName(char* btname, const NdbTableImpl* t, const NdbColumnImpl* c)
-{
- assert(t != 0 && c != 0 && c->getBlobType());
- memset(btname, 0, NdbBlobImpl::BlobTableNameSize);
- sprintf(btname, "NDB$BLOB_%d_%d", (int)t->m_tableId, (int)c->m_attrId);
-}
-
-void
-NdbBlob::getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnImpl* c)
-{
- char btname[NdbBlobImpl::BlobTableNameSize];
- getBlobTableName(btname, t, c);
- bt.setName(btname);
- bt.setLogging(t->getLogging());
- bt.setFragmentType(t->getFragmentType());
- { NdbDictionary::Column bc("PK");
- bc.setType(NdbDictionary::Column::Unsigned);
- assert(t->m_keyLenInWords != 0);
- bc.setLength(t->m_keyLenInWords);
- bc.setPrimaryKey(true);
- bc.setDistributionKey(true);
- bt.addColumn(bc);
- }
- { NdbDictionary::Column bc("DIST");
- bc.setType(NdbDictionary::Column::Unsigned);
- bc.setPrimaryKey(true);
- bc.setDistributionKey(true);
- bt.addColumn(bc);
- }
- { NdbDictionary::Column bc("PART");
- bc.setType(NdbDictionary::Column::Unsigned);
- bc.setPrimaryKey(true);
- bc.setDistributionKey(false);
- bt.addColumn(bc);
- }
- { NdbDictionary::Column bc("DATA");
- switch (c->m_type) {
- case NdbDictionary::Column::Blob:
- bc.setType(NdbDictionary::Column::Binary);
- break;
- case NdbDictionary::Column::Text:
- bc.setType(NdbDictionary::Column::Char);
- break;
- default:
- assert(false);
- break;
- }
- bc.setLength(c->getPartSize());
- bt.addColumn(bc);
- }
-}
-
-// initialization
-
-NdbBlob::NdbBlob()
-{
- init();
-}
-
-void
-NdbBlob::init()
-{
- theState = Idle;
- theNdb = NULL;
- theNdbCon = NULL;
- theNdbOp = NULL;
- theTable = NULL;
- theAccessTable = NULL;
- theBlobTable = NULL;
- theColumn = NULL;
- theFillChar = 0;
- theInlineSize = 0;
- thePartSize = 0;
- theStripeSize = 0;
- theGetFlag = false;
- theGetBuf = NULL;
- theSetFlag = false;
- theSetBuf = NULL;
- theGetSetBytes = 0;
- thePendingBlobOps = 0;
- theActiveHook = NULL;
- theActiveHookArg = NULL;
- theHead = NULL;
- theInlineData = NULL;
- theHeadInlineRecAttr = NULL;
- theHeadInlineReadOp = NULL;
- theHeadInlineUpdateFlag = false;
- theNullFlag = -1;
- theLength = 0;
- thePos = 0;
- theNext = NULL;
-}
-
-void
-NdbBlob::release()
-{
- setState(Idle);
-}
-
-// buffers
-
-NdbBlob::Buf::Buf() :
- data(NULL),
- size(0),
- maxsize(0)
-{
-}
-
-NdbBlob::Buf::~Buf()
-{
- delete [] data;
-}
-
-void
-NdbBlob::Buf::alloc(unsigned n)
-{
- size = n;
- if (maxsize < n) {
- delete [] data;
- // align to Uint64
- if (n % 8 != 0)
- n += 8 - n % 8;
- data = new char [n];
- maxsize = n;
- }
-#ifdef VM_TRACE
- memset(data, 'X', maxsize);
-#endif
-}
-
-void
-NdbBlob::Buf::copyfrom(const NdbBlob::Buf& src)
-{
- assert(size == src.size);
- memcpy(data, src.data, size);
-}
-
-// classify operations (inline)
-
-inline bool
-NdbBlob::isTableOp()
-{
- return theTable == theAccessTable;
-}
-
-inline bool
-NdbBlob::isIndexOp()
-{
- return theTable != theAccessTable;
-}
-
-inline bool
-NdbBlob::isKeyOp()
-{
- return
- theNdbOp->theOperationType == NdbOperation::InsertRequest ||
- theNdbOp->theOperationType == NdbOperation::UpdateRequest ||
- theNdbOp->theOperationType == NdbOperation::WriteRequest ||
- theNdbOp->theOperationType == NdbOperation::ReadRequest ||
- theNdbOp->theOperationType == NdbOperation::ReadExclusive ||
- theNdbOp->theOperationType == NdbOperation::DeleteRequest;
-}
-
-inline bool
-NdbBlob::isReadOp()
-{
- return
- theNdbOp->theOperationType == NdbOperation::ReadRequest ||
- theNdbOp->theOperationType == NdbOperation::ReadExclusive;
-}
-
-inline bool
-NdbBlob::isInsertOp()
-{
- return
- theNdbOp->theOperationType == NdbOperation::InsertRequest;
-}
-
-inline bool
-NdbBlob::isUpdateOp()
-{
- return
- theNdbOp->theOperationType == NdbOperation::UpdateRequest;
-}
-
-inline bool
-NdbBlob::isWriteOp()
-{
- return
- theNdbOp->theOperationType == NdbOperation::WriteRequest;
-}
-
-inline bool
-NdbBlob::isDeleteOp()
-{
- return
- theNdbOp->theOperationType == NdbOperation::DeleteRequest;
-}
-
-inline bool
-NdbBlob::isScanOp()
-{
- return
- theNdbOp->theOperationType == NdbOperation::OpenScanRequest ||
- theNdbOp->theOperationType == NdbOperation::OpenRangeScanRequest;
-}
-
-// computations (inline)
-
-inline Uint32
-NdbBlob::getPartNumber(Uint64 pos)
-{
- assert(thePartSize != 0 && pos >= theInlineSize);
- return (pos - theInlineSize) / thePartSize;
-}
-
-inline Uint32
-NdbBlob::getPartCount()
-{
- if (theLength <= theInlineSize)
- return 0;
- return 1 + getPartNumber(theLength - 1);
-}
-
-inline Uint32
-NdbBlob::getDistKey(Uint32 part)
-{
- assert(theStripeSize != 0);
- return (part / theStripeSize) % theStripeSize;
-}
-
-// getters and setters
-
-int
-NdbBlob::getTableKeyValue(NdbOperation* anOp)
-{
- DBUG_ENTER("NdbBlob::getTableKeyValue");
- Uint32* data = (Uint32*)theKeyBuf.data;
- unsigned pos = 0;
- for (unsigned i = 0; i < theTable->m_columns.size(); i++) {
- NdbColumnImpl* c = theTable->m_columns[i];
- assert(c != NULL);
- if (c->m_pk) {
- unsigned len = c->m_attrSize * c->m_arraySize;
- if (anOp->getValue_impl(c, (char*)&data[pos]) == NULL) {
- setErrorCode(anOp);
- DBUG_RETURN(-1);
- }
- // odd bytes receive no data and must be zeroed
- while (len % 4 != 0) {
- char* p = (char*)&data[pos] + len++;
- *p = 0;
- }
- pos += len / 4;
- }
- }
- assert(pos == theKeyBuf.size / 4);
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::setTableKeyValue(NdbOperation* anOp)
-{
- DBUG_ENTER("NdbBlob::setTableKeyValue");
- DBUG_DUMP("info", theKeyBuf.data, 4 * theTable->m_keyLenInWords);
- const Uint32* data = (const Uint32*)theKeyBuf.data;
- const unsigned columns = theTable->m_columns.size();
- unsigned pos = 0;
- for (unsigned i = 0; i < columns; i++) {
- NdbColumnImpl* c = theTable->m_columns[i];
- assert(c != NULL);
- if (c->m_pk) {
- unsigned len = c->m_attrSize * c->m_arraySize;
- if (anOp->equal_impl(c, (const char*)&data[pos], len) == -1) {
- setErrorCode(anOp);
- DBUG_RETURN(-1);
- }
- pos += (len + 3) / 4;
- }
- }
- assert(pos == theKeyBuf.size / 4);
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::setAccessKeyValue(NdbOperation* anOp)
-{
- DBUG_ENTER("NdbBlob::setAccessKeyValue");
- DBUG_DUMP("info", theAccessKeyBuf.data, 4 * theAccessTable->m_keyLenInWords);
- const Uint32* data = (const Uint32*)theAccessKeyBuf.data;
- const unsigned columns = theAccessTable->m_columns.size();
- unsigned pos = 0;
- for (unsigned i = 0; i < columns; i++) {
- NdbColumnImpl* c = theAccessTable->m_columns[i];
- assert(c != NULL);
- if (c->m_pk) {
- unsigned len = c->m_attrSize * c->m_arraySize;
- if (anOp->equal_impl(c, (const char*)&data[pos], len) == -1) {
- setErrorCode(anOp);
- DBUG_RETURN(-1);
- }
- pos += (len + 3) / 4;
- }
- }
- assert(pos == theAccessKeyBuf.size / 4);
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::setPartKeyValue(NdbOperation* anOp, Uint32 part)
-{
- DBUG_ENTER("NdbBlob::setPartKeyValue");
- DBUG_PRINT("info", ("dist=%u part=%u key=", getDistKey(part), part));
- DBUG_DUMP("info", theKeyBuf.data, 4 * theTable->m_keyLenInWords);
- Uint32* data = (Uint32*)theKeyBuf.data;
- unsigned size = theTable->m_keyLenInWords;
- // TODO use attr ids after compatibility with 4.1.7 not needed
- if (anOp->equal("PK", theKeyBuf.data) == -1 ||
- anOp->equal("DIST", getDistKey(part)) == -1 ||
- anOp->equal("PART", part) == -1) {
- setErrorCode(anOp);
- DBUG_RETURN(-1);
- }
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::getHeadInlineValue(NdbOperation* anOp)
-{
- DBUG_ENTER("NdbBlob::getHeadInlineValue");
- theHeadInlineRecAttr = anOp->getValue_impl(theColumn, theHeadInlineBuf.data);
- if (theHeadInlineRecAttr == NULL) {
- setErrorCode(anOp);
- DBUG_RETURN(-1);
- }
- DBUG_RETURN(0);
-}
-
-void
-NdbBlob::getHeadFromRecAttr()
-{
- DBUG_ENTER("NdbBlob::getHeadFromRecAttr");
- assert(theHeadInlineRecAttr != NULL);
- theNullFlag = theHeadInlineRecAttr->isNULL();
- assert(theNullFlag != -1);
- theLength = ! theNullFlag ? theHead->length : 0;
- DBUG_VOID_RETURN;
-}
-
-int
-NdbBlob::setHeadInlineValue(NdbOperation* anOp)
-{
- DBUG_ENTER("NdbBlob::setHeadInlineValue");
- theHead->length = theLength;
- if (theLength < theInlineSize)
- memset(theInlineData + theLength, 0, theInlineSize - theLength);
- assert(theNullFlag != -1);
- const char* aValue = theNullFlag ? 0 : theHeadInlineBuf.data;
- if (anOp->setValue(theColumn, aValue, theHeadInlineBuf.size) == -1) {
- setErrorCode(anOp);
- DBUG_RETURN(-1);
- }
- theHeadInlineUpdateFlag = false;
- DBUG_RETURN(0);
-}
-
-// getValue/setValue
-
-int
-NdbBlob::getValue(void* data, Uint32 bytes)
-{
- DBUG_ENTER("NdbBlob::getValue");
- DBUG_PRINT("info", ("data=%p bytes=%u", data, bytes));
- if (theGetFlag || theState != Prepared) {
- setErrorCode(NdbBlobImpl::ErrState);
- DBUG_RETURN(-1);
- }
- if (! isReadOp() && ! isScanOp()) {
- setErrorCode(NdbBlobImpl::ErrUsage);
- DBUG_RETURN(-1);
- }
- if (data == NULL && bytes != 0) {
- setErrorCode(NdbBlobImpl::ErrUsage);
- DBUG_RETURN(-1);
- }
- theGetFlag = true;
- theGetBuf = static_cast<char*>(data);
- theGetSetBytes = bytes;
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::setValue(const void* data, Uint32 bytes)
-{
- DBUG_ENTER("NdbBlob::setValue");
- DBUG_PRINT("info", ("data=%p bytes=%u", data, bytes));
- if (theSetFlag || theState != Prepared) {
- setErrorCode(NdbBlobImpl::ErrState);
- DBUG_RETURN(-1);
- }
- if (! isInsertOp() && ! isUpdateOp() && ! isWriteOp()) {
- setErrorCode(NdbBlobImpl::ErrUsage);
- DBUG_RETURN(-1);
- }
- if (data == NULL && bytes != 0) {
- setErrorCode(NdbBlobImpl::ErrUsage);
- DBUG_RETURN(-1);
- }
- theSetFlag = true;
- theSetBuf = static_cast<const char*>(data);
- theGetSetBytes = bytes;
- if (isInsertOp()) {
- // write inline part now
- if (theSetBuf != NULL) {
- Uint32 n = theGetSetBytes;
- if (n > theInlineSize)
- n = theInlineSize;
- assert(thePos == 0);
- if (writeDataPrivate(theSetBuf, n) == -1)
- DBUG_RETURN(-1);
- } else {
- theNullFlag = true;
- theLength = 0;
- }
- if (setHeadInlineValue(theNdbOp) == -1)
- DBUG_RETURN(-1);
- }
- DBUG_RETURN(0);
-}
-
-// activation hook
-
-int
-NdbBlob::setActiveHook(ActiveHook activeHook, void* arg)
-{
- DBUG_ENTER("NdbBlob::setActiveHook");
- DBUG_PRINT("info", ("hook=%p arg=%p", (void*)activeHook, arg));
- if (theState != Prepared) {
- setErrorCode(NdbBlobImpl::ErrState);
- DBUG_RETURN(-1);
- }
- theActiveHook = activeHook;
- theActiveHookArg = arg;
- DBUG_RETURN(0);
-}
-
-// misc operations
-
-int
-NdbBlob::getNull(bool& isNull)
-{
- DBUG_ENTER("NdbBlob::getNull");
- if (theState == Prepared && theSetFlag) {
- isNull = (theSetBuf == NULL);
- DBUG_RETURN(0);
- }
- if (theNullFlag == -1) {
- setErrorCode(NdbBlobImpl::ErrState);
- DBUG_RETURN(-1);
- }
- isNull = theNullFlag;
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::setNull()
-{
- DBUG_ENTER("NdbBlob::setNull");
- if (theNullFlag == -1) {
- if (theState == Prepared) {
- DBUG_RETURN(setValue(0, 0));
- }
- setErrorCode(NdbBlobImpl::ErrState);
- DBUG_RETURN(-1);
- }
- if (theNullFlag)
- DBUG_RETURN(0);
- if (deleteParts(0, getPartCount()) == -1)
- DBUG_RETURN(-1);
- theNullFlag = true;
- theLength = 0;
- theHeadInlineUpdateFlag = true;
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::getLength(Uint64& len)
-{
- DBUG_ENTER("NdbBlob::getLength");
- if (theState == Prepared && theSetFlag) {
- len = theGetSetBytes;
- DBUG_RETURN(0);
- }
- if (theNullFlag == -1) {
- setErrorCode(NdbBlobImpl::ErrState);
- DBUG_RETURN(-1);
- }
- len = theLength;
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::truncate(Uint64 length)
-{
- DBUG_ENTER("NdbBlob::truncate");
- DBUG_PRINT("info", ("length=%llu", length));
- if (theNullFlag == -1) {
- setErrorCode(NdbBlobImpl::ErrState);
- DBUG_RETURN(-1);
- }
- if (theLength > length) {
- if (length > theInlineSize) {
- Uint32 part1 = getPartNumber(length - 1);
- Uint32 part2 = getPartNumber(theLength - 1);
- assert(part2 >= part1);
- if (part2 > part1 && deleteParts(part1 + 1, part2 - part1) == -1)
- DBUG_RETURN(-1);
- } else {
- if (deleteParts(0, getPartCount()) == -1)
- DBUG_RETURN(-1);
- }
- theLength = length;
- theHeadInlineUpdateFlag = true;
- if (thePos > length)
- thePos = length;
- }
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::getPos(Uint64& pos)
-{
- DBUG_ENTER("NdbBlob::getPos");
- if (theNullFlag == -1) {
- setErrorCode(NdbBlobImpl::ErrState);
- DBUG_RETURN(-1);
- }
- pos = thePos;
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::setPos(Uint64 pos)
-{
- DBUG_ENTER("NdbBlob::setPos");
- DBUG_PRINT("info", ("pos=%llu", pos));
- if (theNullFlag == -1) {
- setErrorCode(NdbBlobImpl::ErrState);
- DBUG_RETURN(-1);
- }
- if (pos > theLength) {
- setErrorCode(NdbBlobImpl::ErrSeek);
- DBUG_RETURN(-1);
- }
- thePos = pos;
- DBUG_RETURN(0);
-}
-
-// read/write
-
-int
-NdbBlob::readData(void* data, Uint32& bytes)
-{
- if (theState != Active) {
- setErrorCode(NdbBlobImpl::ErrState);
- return -1;
- }
- char* buf = static_cast<char*>(data);
- return readDataPrivate(buf, bytes);
-}
-
-int
-NdbBlob::readDataPrivate(char* buf, Uint32& bytes)
-{
- DBUG_ENTER("NdbBlob::readDataPrivate");
- DBUG_PRINT("info", ("bytes=%u", bytes));
- assert(thePos <= theLength);
- Uint64 pos = thePos;
- if (bytes > theLength - pos)
- bytes = theLength - pos;
- Uint32 len = bytes;
- if (len > 0) {
- // inline part
- if (pos < theInlineSize) {
- Uint32 n = theInlineSize - pos;
- if (n > len)
- n = len;
- memcpy(buf, theInlineData + pos, n);
- pos += n;
- buf += n;
- len -= n;
- }
- }
- if (len > 0 && thePartSize == 0) {
- setErrorCode(NdbBlobImpl::ErrSeek);
- DBUG_RETURN(-1);
- }
- if (len > 0) {
- assert(pos >= theInlineSize);
- Uint32 off = (pos - theInlineSize) % thePartSize;
- // partial first block
- if (off != 0) {
- DBUG_PRINT("info", ("partial first block pos=%llu len=%u", pos, len));
- Uint32 part = (pos - theInlineSize) / thePartSize;
- if (readParts(thePartBuf.data, part, 1) == -1)
- DBUG_RETURN(-1);
- // need result now
- if (executePendingBlobReads() == -1)
- DBUG_RETURN(-1);
- Uint32 n = thePartSize - off;
- if (n > len)
- n = len;
- memcpy(buf, thePartBuf.data + off, n);
- pos += n;
- buf += n;
- len -= n;
- }
- }
- if (len > 0) {
- assert((pos - theInlineSize) % thePartSize == 0);
- // complete blocks in the middle
- if (len >= thePartSize) {
- Uint32 part = (pos - theInlineSize) / thePartSize;
- Uint32 count = len / thePartSize;
- if (readParts(buf, part, count) == -1)
- DBUG_RETURN(-1);
- Uint32 n = thePartSize * count;
- pos += n;
- buf += n;
- len -= n;
- }
- }
- if (len > 0) {
- // partial last block
- DBUG_PRINT("info", ("partial last block pos=%llu len=%u", pos, len));
- assert((pos - theInlineSize) % thePartSize == 0 && len < thePartSize);
- Uint32 part = (pos - theInlineSize) / thePartSize;
- if (readParts(thePartBuf.data, part, 1) == -1)
- DBUG_RETURN(-1);
- // need result now
- if (executePendingBlobReads() == -1)
- DBUG_RETURN(-1);
- memcpy(buf, thePartBuf.data, len);
- Uint32 n = len;
- pos += n;
- buf += n;
- len -= n;
- }
- assert(len == 0);
- thePos = pos;
- assert(thePos <= theLength);
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::writeData(const void* data, Uint32 bytes)
-{
- if (theState != Active) {
- setErrorCode(NdbBlobImpl::ErrState);
- return -1;
- }
- const char* buf = static_cast<const char*>(data);
- return writeDataPrivate(buf, bytes);
-}
-
-int
-NdbBlob::writeDataPrivate(const char* buf, Uint32 bytes)
-{
- DBUG_ENTER("NdbBlob::writeDataPrivate");
- DBUG_PRINT("info", ("bytes=%u", bytes));
- assert(thePos <= theLength);
- Uint64 pos = thePos;
- Uint32 len = bytes;
- // any write makes blob not NULL
- if (theNullFlag) {
- theNullFlag = false;
- theHeadInlineUpdateFlag = true;
- }
- if (len > 0) {
- // inline part
- if (pos < theInlineSize) {
- Uint32 n = theInlineSize - pos;
- if (n > len)
- n = len;
- memcpy(theInlineData + pos, buf, n);
- theHeadInlineUpdateFlag = true;
- pos += n;
- buf += n;
- len -= n;
- }
- }
- if (len > 0 && thePartSize == 0) {
- setErrorCode(NdbBlobImpl::ErrSeek);
- DBUG_RETURN(-1);
- }
- if (len > 0) {
- assert(pos >= theInlineSize);
- Uint32 off = (pos - theInlineSize) % thePartSize;
- // partial first block
- if (off != 0) {
- DBUG_PRINT("info", ("partial first block pos=%llu len=%u", pos, len));
- // flush writes to guarantee correct read
- if (executePendingBlobWrites() == -1)
- DBUG_RETURN(-1);
- Uint32 part = (pos - theInlineSize) / thePartSize;
- if (readParts(thePartBuf.data, part, 1) == -1)
- DBUG_RETURN(-1);
- // need result now
- if (executePendingBlobReads() == -1)
- DBUG_RETURN(-1);
- Uint32 n = thePartSize - off;
- if (n > len) {
- memset(thePartBuf.data + off + len, theFillChar, n - len);
- n = len;
- }
- memcpy(thePartBuf.data + off, buf, n);
- if (updateParts(thePartBuf.data, part, 1) == -1)
- DBUG_RETURN(-1);
- pos += n;
- buf += n;
- len -= n;
- }
- }
- if (len > 0) {
- assert((pos - theInlineSize) % thePartSize == 0);
- // complete blocks in the middle
- if (len >= thePartSize) {
- Uint32 part = (pos - theInlineSize) / thePartSize;
- Uint32 count = len / thePartSize;
- for (unsigned i = 0; i < count; i++) {
- if (part + i < getPartCount()) {
- if (updateParts(buf, part + i, 1) == -1)
- DBUG_RETURN(-1);
- } else {
- if (insertParts(buf, part + i, 1) == -1)
- DBUG_RETURN(-1);
- }
- Uint32 n = thePartSize;
- pos += n;
- buf += n;
- len -= n;
- }
- }
- }
- if (len > 0) {
- // partial last block
- DBUG_PRINT("info", ("partial last block pos=%llu len=%u", pos, len));
- assert((pos - theInlineSize) % thePartSize == 0 && len < thePartSize);
- Uint32 part = (pos - theInlineSize) / thePartSize;
- if (theLength > pos + len) {
- // flush writes to guarantee correct read
- if (executePendingBlobWrites() == -1)
- DBUG_RETURN(-1);
- if (readParts(thePartBuf.data, part, 1) == -1)
- DBUG_RETURN(-1);
- // need result now
- if (executePendingBlobReads() == -1)
- DBUG_RETURN(-1);
- memcpy(thePartBuf.data, buf, len);
- if (updateParts(thePartBuf.data, part, 1) == -1)
- DBUG_RETURN(-1);
- } else {
- memcpy(thePartBuf.data, buf, len);
- memset(thePartBuf.data + len, theFillChar, thePartSize - len);
- if (part < getPartCount()) {
- if (updateParts(thePartBuf.data, part, 1) == -1)
- DBUG_RETURN(-1);
- } else {
- if (insertParts(thePartBuf.data, part, 1) == -1)
- DBUG_RETURN(-1);
- }
- }
- Uint32 n = len;
- pos += n;
- buf += n;
- len -= n;
- }
- assert(len == 0);
- if (theLength < pos) {
- theLength = pos;
- theHeadInlineUpdateFlag = true;
- }
- thePos = pos;
- assert(thePos <= theLength);
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::readParts(char* buf, Uint32 part, Uint32 count)
-{
- DBUG_ENTER("NdbBlob::readParts");
- DBUG_PRINT("info", ("part=%u count=%u", part, count));
- Uint32 n = 0;
- while (n < count) {
- NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
- if (tOp == NULL ||
- tOp->committedRead() == -1 ||
- setPartKeyValue(tOp, part + n) == -1 ||
- tOp->getValue((Uint32)3, buf) == NULL) {
- setErrorCode(tOp);
- DBUG_RETURN(-1);
- }
- tOp->m_abortOption = NdbTransaction::AbortOnError;
- buf += thePartSize;
- n++;
- thePendingBlobOps |= (1 << NdbOperation::ReadRequest);
- theNdbCon->thePendingBlobOps |= (1 << NdbOperation::ReadRequest);
- }
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::insertParts(const char* buf, Uint32 part, Uint32 count)
-{
- DBUG_ENTER("NdbBlob::insertParts");
- DBUG_PRINT("info", ("part=%u count=%u", part, count));
- Uint32 n = 0;
- while (n < count) {
- NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
- if (tOp == NULL ||
- tOp->insertTuple() == -1 ||
- setPartKeyValue(tOp, part + n) == -1 ||
- tOp->setValue((Uint32)3, buf) == -1) {
- setErrorCode(tOp);
- DBUG_RETURN(-1);
- }
- tOp->m_abortOption = NdbTransaction::AbortOnError;
- buf += thePartSize;
- n++;
- thePendingBlobOps |= (1 << NdbOperation::InsertRequest);
- theNdbCon->thePendingBlobOps |= (1 << NdbOperation::InsertRequest);
- }
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::updateParts(const char* buf, Uint32 part, Uint32 count)
-{
- DBUG_ENTER("NdbBlob::updateParts");
- DBUG_PRINT("info", ("part=%u count=%u", part, count));
- Uint32 n = 0;
- while (n < count) {
- NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
- if (tOp == NULL ||
- tOp->updateTuple() == -1 ||
- setPartKeyValue(tOp, part + n) == -1 ||
- tOp->setValue((Uint32)3, buf) == -1) {
- setErrorCode(tOp);
- DBUG_RETURN(-1);
- }
- tOp->m_abortOption = NdbTransaction::AbortOnError;
- buf += thePartSize;
- n++;
- thePendingBlobOps |= (1 << NdbOperation::UpdateRequest);
- theNdbCon->thePendingBlobOps |= (1 << NdbOperation::UpdateRequest);
- }
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::deleteParts(Uint32 part, Uint32 count)
-{
- DBUG_ENTER("NdbBlob::deleteParts");
- DBUG_PRINT("info", ("part=%u count=%u", part, count));
- Uint32 n = 0;
- while (n < count) {
- NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
- if (tOp == NULL ||
- tOp->deleteTuple() == -1 ||
- setPartKeyValue(tOp, part + n) == -1) {
- setErrorCode(tOp);
- DBUG_RETURN(-1);
- }
- tOp->m_abortOption = NdbTransaction::AbortOnError;
- n++;
- thePendingBlobOps |= (1 << NdbOperation::DeleteRequest);
- theNdbCon->thePendingBlobOps |= (1 << NdbOperation::DeleteRequest);
- }
- DBUG_RETURN(0);
-}
-
-/*
- * Number of blob parts not known. Used to check for race condition
- * when writeTuple is used for insert. Deletes all parts found.
- */
-int
-NdbBlob::deletePartsUnknown(Uint32 part)
-{
- DBUG_ENTER("NdbBlob::deletePartsUnknown");
- DBUG_PRINT("info", ("part=%u count=all", part));
- static const unsigned maxbat = 256;
- static const unsigned minbat = 1;
- unsigned bat = minbat;
- NdbOperation* tOpList[maxbat];
- Uint32 count = 0;
- while (true) {
- Uint32 n;
- n = 0;
- while (n < bat) {
- NdbOperation*& tOp = tOpList[n]; // ref
- tOp = theNdbCon->getNdbOperation(theBlobTable);
- if (tOp == NULL ||
- tOp->deleteTuple() == -1 ||
- setPartKeyValue(tOp, part + count + n) == -1) {
- setErrorCode(tOp);
- DBUG_RETURN(-1);
- }
- tOp->m_abortOption= NdbTransaction::AO_IgnoreError;
- n++;
- }
- DBUG_PRINT("info", ("bat=%u", bat));
- if (theNdbCon->executeNoBlobs(NdbTransaction::NoCommit) == -1)
- DBUG_RETURN(-1);
- n = 0;
- while (n < bat) {
- NdbOperation* tOp = tOpList[n];
- if (tOp->theError.code != 0) {
- if (tOp->theError.code != 626) {
- setErrorCode(tOp);
- DBUG_RETURN(-1);
- }
- // first non-existent part
- DBUG_PRINT("info", ("count=%u", count));
- DBUG_RETURN(0);
- }
- n++;
- count++;
- }
- bat *= 4;
- if (bat > maxbat)
- bat = maxbat;
- }
-}
-
-// pending ops
-
-int
-NdbBlob::executePendingBlobReads()
-{
- DBUG_ENTER("NdbBlob::executePendingBlobReads");
- Uint8 flags = (1 << NdbOperation::ReadRequest);
- if (thePendingBlobOps & flags) {
- if (theNdbCon->executeNoBlobs(NdbTransaction::NoCommit) == -1)
- DBUG_RETURN(-1);
- thePendingBlobOps = 0;
- theNdbCon->thePendingBlobOps = 0;
- }
- DBUG_RETURN(0);
-}
-
-int
-NdbBlob::executePendingBlobWrites()
-{
- DBUG_ENTER("NdbBlob::executePendingBlobWrites");
- Uint8 flags = 0xFF & ~(1 << NdbOperation::ReadRequest);
- if (thePendingBlobOps & flags) {
- if (theNdbCon->executeNoBlobs(NdbTransaction::NoCommit) == -1)
- DBUG_RETURN(-1);
- thePendingBlobOps = 0;
- theNdbCon->thePendingBlobOps = 0;
- }
- DBUG_RETURN(0);
-}
-
-// callbacks
-
-int
-NdbBlob::invokeActiveHook()
-{
- DBUG_ENTER("NdbBlob::invokeActiveHook");
- assert(theState == Active && theActiveHook != NULL);
- int ret = (*theActiveHook)(this, theActiveHookArg);
- if (ret != 0) {
- // no error is set on blob level
- DBUG_RETURN(-1);
- }
- DBUG_RETURN(0);
-}
-
-// blob handle maintenance
-
-/*
- * Prepare blob handle linked to an operation. Checks blob table.
- * Allocates buffers. For key operation fetches key data from signal
- * data. For read operation adds read of head+inline.
- */
-int
-NdbBlob::atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl* aColumn)
-{
- DBUG_ENTER("NdbBlob::atPrepare");
- DBUG_PRINT("info", ("this=%p op=%p con=%p", this, anOp, aCon));
- assert(theState == Idle);
- // ndb api stuff
- theNdb = anOp->theNdb;
- theNdbCon = aCon; // for scan, this is the real transaction (m_transConnection)
- theNdbOp = anOp;
- theTable = anOp->m_currentTable;
- theAccessTable = anOp->m_accessTable;
- theColumn = aColumn;
- NdbDictionary::Column::Type partType = NdbDictionary::Column::Undefined;
- switch (theColumn->getType()) {
- case NdbDictionary::Column::Blob:
- partType = NdbDictionary::Column::Binary;
- theFillChar = 0x0;
- break;
- case NdbDictionary::Column::Text:
- partType = NdbDictionary::Column::Char;
- theFillChar = 0x20;
- break;
- default:
- setErrorCode(NdbBlobImpl::ErrUsage);
- DBUG_RETURN(-1);
- }
- // sizes
- theInlineSize = theColumn->getInlineSize();
- thePartSize = theColumn->getPartSize();
- theStripeSize = theColumn->getStripeSize();
- // sanity check
- assert((NDB_BLOB_HEAD_SIZE << 2) == sizeof(Head));
- assert(theColumn->m_attrSize * theColumn->m_arraySize == sizeof(Head) + theInlineSize);
- if (thePartSize > 0) {
- const NdbDictionary::Table* bt = NULL;
- const NdbDictionary::Column* bc = NULL;
- if (theStripeSize == 0 ||
- (bt = theColumn->getBlobTable()) == NULL ||
- (bc = bt->getColumn("DATA")) == NULL ||
- bc->getType() != partType ||
- bc->getLength() != (int)thePartSize) {
- setErrorCode(NdbBlobImpl::ErrTable);
- DBUG_RETURN(-1);
- }
- theBlobTable = &NdbTableImpl::getImpl(*bt);
- }
- // buffers
- theKeyBuf.alloc(theTable->m_keyLenInWords << 2);
- theAccessKeyBuf.alloc(theAccessTable->m_keyLenInWords << 2);
- theHeadInlineBuf.alloc(sizeof(Head) + theInlineSize);
- theHeadInlineCopyBuf.alloc(sizeof(Head) + theInlineSize);
- thePartBuf.alloc(thePartSize);
- theHead = (Head*)theHeadInlineBuf.data;
- theInlineData = theHeadInlineBuf.data + sizeof(Head);
- // handle different operation types
- bool supportedOp = false;
- if (isKeyOp()) {
- if (isTableOp()) {
- // get table key
- Uint32* data = (Uint32*)theKeyBuf.data;
- unsigned size = theTable->m_keyLenInWords;
- if (theNdbOp->getKeyFromTCREQ(data, size) == -1) {
- setErrorCode(NdbBlobImpl::ErrUsage);
- DBUG_RETURN(-1);
- }
- }
- if (isIndexOp()) {
- // get index key
- Uint32* data = (Uint32*)theAccessKeyBuf.data;
- unsigned size = theAccessTable->m_keyLenInWords;
- if (theNdbOp->getKeyFromTCREQ(data, size) == -1) {
- setErrorCode(NdbBlobImpl::ErrUsage);
- DBUG_RETURN(-1);
- }
- }
- if (isReadOp()) {
- // add read of head+inline in this op
- if (getHeadInlineValue(theNdbOp) == -1)
- DBUG_RETURN(-1);
- }
- if (isInsertOp()) {
- // becomes NULL unless set before execute
- theNullFlag = true;
- theLength = 0;
- }
- if (isWriteOp()) {
- // becomes NULL unless set before execute
- theNullFlag = true;
- theLength = 0;
- theHeadInlineUpdateFlag = true;
- }
- supportedOp = true;
- }
- if (isScanOp()) {
- // add read of head+inline in this op
- if (getHeadInlineValue(theNdbOp) == -1)
- DBUG_RETURN(-1);
- supportedOp = true;
- }
- if (! supportedOp) {
- setErrorCode(NdbBlobImpl::ErrUsage);
- DBUG_RETURN(-1);
- }
- setState(Prepared);
- DBUG_RETURN(0);
-}
-
-/*
- * Before execute of prepared operation. May add new operations before
- * this one. May ask that this operation and all before it (a "batch")
- * is executed immediately in no-commit mode. In this case remaining
- * prepared operations are saved in a separate list. They are added
- * back after postExecute.
- */
-int
-NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch)
-{
- DBUG_ENTER("NdbBlob::preExecute");
- DBUG_PRINT("info", ("this=%p op=%p con=%p", this, theNdbOp, theNdbCon));
- if (theState == Invalid)
- DBUG_RETURN(-1);
- assert(theState == Prepared);
- // handle different operation types
- assert(isKeyOp());
- if (isReadOp()) {
- if (theGetFlag && theGetSetBytes > theInlineSize) {
- // need blob head before proceeding
- batch = true;
- }
- }
- if (isInsertOp()) {
- if (theSetFlag && theGetSetBytes > theInlineSize) {
- // add ops to write rest of a setValue
- assert(theSetBuf != NULL);
- const char* buf = theSetBuf + theInlineSize;
- Uint32 bytes = theGetSetBytes - theInlineSize;
- assert(thePos == theInlineSize);
- if (writeDataPrivate(buf, bytes) == -1)
- DBUG_RETURN(-1);
- if (theHeadInlineUpdateFlag) {
- // add an operation to update head+inline
- NdbOperation* tOp = theNdbCon->getNdbOperation(theTable);
- if (tOp == NULL ||
- tOp->updateTuple() == -1 ||
- setTableKeyValue(tOp) == -1 ||
- setHeadInlineValue(tOp) == -1) {
- setErrorCode(NdbBlobImpl::ErrAbort);
- DBUG_RETURN(-1);
- }
- DBUG_PRINT("info", ("add op to update head+inline"));
- }
- }
- }
- if (isTableOp()) {
- if (isUpdateOp() || isWriteOp() || isDeleteOp()) {
- // add operation before this one to read head+inline
- NdbOperation* tOp = theNdbCon->getNdbOperation(theTable, theNdbOp);
- if (tOp == NULL ||
- tOp->readTuple() == -1 ||
- setTableKeyValue(tOp) == -1 ||
- getHeadInlineValue(tOp) == -1) {
- setErrorCode(tOp);
- DBUG_RETURN(-1);
- }
- if (isWriteOp()) {
- tOp->m_abortOption = NdbTransaction::AO_IgnoreError;
- }
- theHeadInlineReadOp = tOp;
- // execute immediately
- batch = true;
- DBUG_PRINT("info", ("add op before to read head+inline"));
- }
- }
- if (isIndexOp()) {
- // add op before this one to read table key
- NdbBlob* tFirstBlob = theNdbOp->theBlobList;
- if (this == tFirstBlob) {
- // first blob does it for all
- if (g_ndb_blob_ok_to_read_index_table) {
- Uint32 pkAttrId = theAccessTable->getNoOfColumns() - 1;
- NdbOperation* tOp = theNdbCon->getNdbOperation(theAccessTable, theNdbOp);
- if (tOp == NULL ||
- tOp->readTuple() == -1 ||
- setAccessKeyValue(tOp) == -1 ||
- tOp->getValue(pkAttrId, theKeyBuf.data) == NULL) {
- setErrorCode(tOp);
- DBUG_RETURN(-1);
- }
- } else {
- NdbIndexOperation* tOp = theNdbCon->getNdbIndexOperation(theAccessTable->m_index, theTable, theNdbOp);
- if (tOp == NULL ||
- tOp->readTuple() == -1 ||
- setAccessKeyValue(tOp) == -1 ||
- getTableKeyValue(tOp) == -1) {
- setErrorCode(tOp);
- DBUG_RETURN(-1);
- }
- }
- }
- DBUG_PRINT("info", ("added op before to read table key"));
- if (isUpdateOp() || isDeleteOp()) {
- // add op before this one to read head+inline via index
- NdbIndexOperation* tOp = theNdbCon->getNdbIndexOperation(theAccessTable->m_index, theTable, theNdbOp);
- if (tOp == NULL ||
- tOp->readTuple() == -1 ||
- setAccessKeyValue(tOp) == -1 ||
- getHeadInlineValue(tOp) == -1) {
- setErrorCode(tOp);
- DBUG_RETURN(-1);
- }
- if (isWriteOp()) {
- tOp->m_abortOption = NdbTransaction::AO_IgnoreError;
- }
- theHeadInlineReadOp = tOp;
- // execute immediately
- batch = true;
- DBUG_PRINT("info", ("added index op before to read head+inline"));
- }
- if (isWriteOp()) {
- // XXX until IgnoreError fixed for index op
- batch = true;
- }
- }
- if (isWriteOp()) {
- if (theSetFlag) {
- // write head+inline now
- theNullFlag = true;
- theLength = 0;
- if (theSetBuf != NULL) {
- Uint32 n = theGetSetBytes;
- if (n > theInlineSize)
- n = theInlineSize;
- assert(thePos == 0);
- if (writeDataPrivate(theSetBuf, n) == -1)
- DBUG_RETURN(-1);
- }
- if (setHeadInlineValue(theNdbOp) == -1)
- DBUG_RETURN(-1);
- // the read op before us may overwrite
- theHeadInlineCopyBuf.copyfrom(theHeadInlineBuf);
- }
- }
- if (theActiveHook != NULL) {
- // need blob head for callback
- batch = true;
- }
- DBUG_PRINT("info", ("batch=%u", batch));
- DBUG_RETURN(0);
-}
-
-/*
- * After execute, for any operation. If already Active, this routine
- * has been done previously. Operations which requested a no-commit
- * batch can add new operations after this one. They are added before
- * any remaining prepared operations.
- */
-int
-NdbBlob::postExecute(NdbTransaction::ExecType anExecType)
-{
- DBUG_ENTER("NdbBlob::postExecute");
- DBUG_PRINT("info", ("this=%p op=%p con=%p anExecType=%u", this, theNdbOp, theNdbCon, anExecType));
- if (theState == Invalid)
- DBUG_RETURN(-1);
- if (theState == Active) {
- setState(anExecType == NdbTransaction::NoCommit ? Active : Closed);
- DBUG_PRINT("info", ("skip active"));
- DBUG_RETURN(0);
- }
- assert(theState == Prepared);
- setState(anExecType == NdbTransaction::NoCommit ? Active : Closed);
- assert(isKeyOp());
- if (isIndexOp()) {
- NdbBlob* tFirstBlob = theNdbOp->theBlobList;
- if (this != tFirstBlob) {
- // copy key from first blob
- assert(theKeyBuf.size == tFirstBlob->theKeyBuf.size);
- memcpy(theKeyBuf.data, tFirstBlob->theKeyBuf.data, tFirstBlob->theKeyBuf.size);
- }
- }
- if (isReadOp()) {
- getHeadFromRecAttr();
- if (setPos(0) == -1)
- DBUG_RETURN(-1);
- if (theGetFlag) {
- assert(theGetSetBytes == 0 || theGetBuf != 0);
- assert(theGetSetBytes <= theInlineSize ||
- anExecType == NdbTransaction::NoCommit);
- Uint32 bytes = theGetSetBytes;
- if (readDataPrivate(theGetBuf, bytes) == -1)
- DBUG_RETURN(-1);
- }
- }
- if (isUpdateOp()) {
- assert(anExecType == NdbTransaction::NoCommit);
- getHeadFromRecAttr();
- if (theSetFlag) {
- // setValue overwrites everything
- if (theSetBuf != NULL) {
- if (truncate(0) == -1)
- DBUG_RETURN(-1);
- assert(thePos == 0);
- if (writeDataPrivate(theSetBuf, theGetSetBytes) == -1)
- DBUG_RETURN(-1);
- } else {
- if (setNull() == -1)
- DBUG_RETURN(-1);
- }
- }
- }
- if (isWriteOp() && isTableOp()) {
- assert(anExecType == NdbTransaction::NoCommit);
- if (theHeadInlineReadOp->theError.code == 0) {
- int tNullFlag = theNullFlag;
- Uint64 tLength = theLength;
- Uint64 tPos = thePos;
- getHeadFromRecAttr();
- DBUG_PRINT("info", ("tuple found"));
- if (truncate(0) == -1)
- DBUG_RETURN(-1);
- // restore previous head+inline
- theHeadInlineBuf.copyfrom(theHeadInlineCopyBuf);
- theNullFlag = tNullFlag;
- theLength = tLength;
- thePos = tPos;
- } else {
- if (theHeadInlineReadOp->theError.code != 626) {
- setErrorCode(theHeadInlineReadOp);
- DBUG_RETURN(-1);
- }
- DBUG_PRINT("info", ("tuple not found"));
- /*
- * Read found no tuple but it is possible that a tuple was
- * created after the read by another transaction. Delete all
- * blob parts which may exist.
- */
- if (deletePartsUnknown(0) == -1)
- DBUG_RETURN(-1);
- }
- if (theSetFlag && theGetSetBytes > theInlineSize) {
- assert(theSetBuf != NULL);
- const char* buf = theSetBuf + theInlineSize;
- Uint32 bytes = theGetSetBytes - theInlineSize;
- assert(thePos == theInlineSize);
- if (writeDataPrivate(buf, bytes) == -1)
- DBUG_RETURN(-1);
- }
- }
- if (isWriteOp() && isIndexOp()) {
- // XXX until IgnoreError fixed for index op
- if (deletePartsUnknown(0) == -1)
- DBUG_RETURN(-1);
- if (theSetFlag && theGetSetBytes > theInlineSize) {
- assert(theSetBuf != NULL);
- const char* buf = theSetBuf + theInlineSize;
- Uint32 bytes = theGetSetBytes - theInlineSize;
- assert(thePos == theInlineSize);
- if (writeDataPrivate(buf, bytes) == -1)
- DBUG_RETURN(-1);
- }
- }
- if (isDeleteOp()) {
- assert(anExecType == NdbTransaction::NoCommit);
- getHeadFromRecAttr();
- if (deleteParts(0, getPartCount()) == -1)
- DBUG_RETURN(-1);
- }
- setState(anExecType == NdbTransaction::NoCommit ? Active : Closed);
- // activation callback
- if (theActiveHook != NULL) {
- if (invokeActiveHook() == -1)
- DBUG_RETURN(-1);
- }
- if (anExecType == NdbTransaction::NoCommit && theHeadInlineUpdateFlag) {
- NdbOperation* tOp = theNdbCon->getNdbOperation(theTable);
- if (tOp == NULL ||
- tOp->updateTuple() == -1 ||
- setTableKeyValue(tOp) == -1 ||
- setHeadInlineValue(tOp) == -1) {
- setErrorCode(NdbBlobImpl::ErrAbort);
- DBUG_RETURN(-1);
- }
- tOp->m_abortOption = NdbTransaction::AbortOnError;
- DBUG_PRINT("info", ("added op to update head+inline"));
- }
- DBUG_RETURN(0);
-}
-
-/*
- * Before commit of completed operation. For write add operation to
- * update head+inline.
- */
-int
-NdbBlob::preCommit()
-{
- DBUG_ENTER("NdbBlob::preCommit");
- DBUG_PRINT("info", ("this=%p op=%p con=%p", this, theNdbOp, theNdbCon));
- if (theState == Invalid)
- DBUG_RETURN(-1);
- assert(theState == Active);
- assert(isKeyOp());
- if (isInsertOp() || isUpdateOp() || isWriteOp()) {
- if (theHeadInlineUpdateFlag) {
- // add an operation to update head+inline
- NdbOperation* tOp = theNdbCon->getNdbOperation(theTable);
- if (tOp == NULL ||
- tOp->updateTuple() == -1 ||
- setTableKeyValue(tOp) == -1 ||
- setHeadInlineValue(tOp) == -1) {
- setErrorCode(NdbBlobImpl::ErrAbort);
- DBUG_RETURN(-1);
- }
- tOp->m_abortOption = NdbTransaction::AbortOnError;
- DBUG_PRINT("info", ("added op to update head+inline"));
- }
- }
- DBUG_RETURN(0);
-}
-
-/*
- * After next scan result. Handle like read op above.
- */
-int
-NdbBlob::atNextResult()
-{
- DBUG_ENTER("NdbBlob::atNextResult");
- DBUG_PRINT("info", ("this=%p op=%p con=%p", this, theNdbOp, theNdbCon));
- if (theState == Invalid)
- DBUG_RETURN(-1);
- assert(isScanOp());
- // get primary key
- { Uint32* data = (Uint32*)theKeyBuf.data;
- unsigned size = theTable->m_keyLenInWords;
- if (((NdbScanOperation*)theNdbOp)->getKeyFromKEYINFO20(data, size) == -1) {
- setErrorCode(NdbBlobImpl::ErrUsage);
- DBUG_RETURN(-1);
- }
- }
- getHeadFromRecAttr();
- if (setPos(0) == -1)
- DBUG_RETURN(-1);
- if (theGetFlag) {
- assert(theGetSetBytes == 0 || theGetBuf != 0);
- Uint32 bytes = theGetSetBytes;
- if (readDataPrivate(theGetBuf, bytes) == -1)
- DBUG_RETURN(-1);
- }
- setState(Active);
- // activation callback
- if (theActiveHook != NULL) {
- if (invokeActiveHook() == -1)
- DBUG_RETURN(-1);
- }
- DBUG_RETURN(0);
-}
-
-// misc
-
-const NdbDictionary::Column*
-NdbBlob::getColumn()
-{
- return theColumn;
-}
-
-// errors
-
-void
-NdbBlob::setErrorCode(int anErrorCode, bool invalidFlag)
-{
- DBUG_ENTER("NdbBlob::setErrorCode");
- DBUG_PRINT("info", ("this=%p code=%u", this, anErrorCode));
- theError.code = anErrorCode;
- // conditionally copy error to operation level
- if (theNdbOp != NULL && theNdbOp->theError.code == 0)
- theNdbOp->setErrorCode(theError.code);
- if (invalidFlag)
- setState(Invalid);
- DBUG_VOID_RETURN;
-}
-
-void
-NdbBlob::setErrorCode(NdbOperation* anOp, bool invalidFlag)
-{
- int code = 0;
- if (anOp != NULL && (code = anOp->theError.code) != 0)
- ;
- else if ((code = theNdbCon->theError.code) != 0)
- ;
- else if ((code = theNdb->theError.code) != 0)
- ;
- else
- code = NdbBlobImpl::ErrUnknown;
- setErrorCode(code, invalidFlag);
-}
-
-void
-NdbBlob::setErrorCode(NdbTransaction* aCon, bool invalidFlag)
-{
- int code = 0;
- if (theNdbCon != NULL && (code = theNdbCon->theError.code) != 0)
- ;
- else if ((code = theNdb->theError.code) != 0)
- ;
- else
- code = NdbBlobImpl::ErrUnknown;
- setErrorCode(code, invalidFlag);
-}
-
-// info about all blobs in this operation
-
-NdbBlob*
-NdbBlob::blobsFirstBlob()
-{
- return theNdbOp->theBlobList;
-}
-
-NdbBlob*
-NdbBlob::blobsNextBlob()
-{
- return theNext;
-}
-
-// debug
-
-#ifdef VM_TRACE
-inline int
-NdbBlob::getOperationType() const
-{
- return theNdbOp != NULL ? theNdbOp->theOperationType : -1;
-}
-
-NdbOut&
-operator<<(NdbOut& out, const NdbBlob& blob)
-{
- ndbout << dec << "o=" << blob.getOperationType();
- ndbout << dec << " s=" << (Uint32) blob.theState;
- ndbout << dec << " n=" << blob.theNullFlag;;
- ndbout << dec << " l=" << blob.theLength;
- ndbout << dec << " p=" << blob.thePos;
- ndbout << dec << " u=" << (Uint32)blob.theHeadInlineUpdateFlag;
- ndbout << dec << " g=" << (Uint32)blob.theGetSetBytes;
- return out;
-}
-#endif
diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp
deleted file mode 100644
index 79b6fb4c0e8..00000000000
--- a/ndb/src/ndbapi/NdbDictionary.cpp
+++ /dev/null
@@ -1,1054 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include <NdbDictionary.hpp>
-#include "NdbDictionaryImpl.hpp"
-#include <NdbOut.hpp>
-
-/*****************************************************************
- * Column facade
- */
-NdbDictionary::Column::Column(const char * name)
- : m_impl(* new NdbColumnImpl(* this))
-{
- setName(name);
-}
-
-NdbDictionary::Column::Column(const NdbDictionary::Column & org)
- : m_impl(* new NdbColumnImpl(* this))
-{
- m_impl = org.m_impl;
-}
-
-NdbDictionary::Column::Column(NdbColumnImpl& impl)
- : m_impl(impl)
-{
-}
-
-NdbDictionary::Column::~Column(){
- NdbColumnImpl * tmp = &m_impl;
- if(this != tmp){
- delete tmp;
- }
-}
-
-NdbDictionary::Column&
-NdbDictionary::Column::operator=(const NdbDictionary::Column& column)
-{
- m_impl = column.m_impl;
-
- return *this;
-}
-
-void
-NdbDictionary::Column::setName(const char * name){
- m_impl.m_name.assign(name);
-}
-
-const char*
-NdbDictionary::Column::getName() const {
- return m_impl.m_name.c_str();
-}
-
-void
-NdbDictionary::Column::setType(Type t){
- m_impl.init(t);
-}
-
-NdbDictionary::Column::Type
-NdbDictionary::Column::getType() const {
- return m_impl.m_type;
-}
-
-void
-NdbDictionary::Column::setPrecision(int val){
- m_impl.m_precision = val;
-}
-
-int
-NdbDictionary::Column::getPrecision() const {
- return m_impl.m_precision;
-}
-
-void
-NdbDictionary::Column::setScale(int val){
- m_impl.m_scale = val;
-}
-
-int
-NdbDictionary::Column::getScale() const{
- return m_impl.m_scale;
-}
-
-void
-NdbDictionary::Column::setLength(int length){
- m_impl.m_length = length;
-}
-
-int
-NdbDictionary::Column::getLength() const{
- return m_impl.m_length;
-}
-
-void
-NdbDictionary::Column::setInlineSize(int size)
-{
- m_impl.m_precision = size;
-}
-
-void
-NdbDictionary::Column::setCharset(CHARSET_INFO* cs)
-{
- m_impl.m_cs = cs;
-}
-
-CHARSET_INFO*
-NdbDictionary::Column::getCharset() const
-{
- return m_impl.m_cs;
-}
-
-int
-NdbDictionary::Column::getInlineSize() const
-{
- return m_impl.m_precision;
-}
-
-void
-NdbDictionary::Column::setPartSize(int size)
-{
- m_impl.m_scale = size;
-}
-
-int
-NdbDictionary::Column::getPartSize() const
-{
- return m_impl.m_scale;
-}
-
-void
-NdbDictionary::Column::setStripeSize(int size)
-{
- m_impl.m_length = size;
-}
-
-int
-NdbDictionary::Column::getStripeSize() const
-{
- return m_impl.m_length;
-}
-
-int
-NdbDictionary::Column::getSize() const{
- return m_impl.m_attrSize;
-}
-
-void
-NdbDictionary::Column::setNullable(bool val){
- m_impl.m_nullable = val;
-}
-
-bool
-NdbDictionary::Column::getNullable() const {
- return m_impl.m_nullable;
-}
-
-void
-NdbDictionary::Column::setPrimaryKey(bool val){
- m_impl.m_pk = val;
-}
-
-bool
-NdbDictionary::Column::getPrimaryKey() const {
- return m_impl.m_pk;
-}
-
-void
-NdbDictionary::Column::setPartitionKey(bool val){
- m_impl.m_distributionKey = val;
-}
-
-bool
-NdbDictionary::Column::getPartitionKey() const{
- return m_impl.m_distributionKey;
-}
-
-const NdbDictionary::Table *
-NdbDictionary::Column::getBlobTable() const {
- NdbTableImpl * t = m_impl.m_blobTable;
- if (t)
- return t->m_facade;
- return 0;
-}
-
-void
-NdbDictionary::Column::setAutoIncrement(bool val){
- m_impl.m_autoIncrement = val;
-}
-
-bool
-NdbDictionary::Column::getAutoIncrement() const {
- return m_impl.m_autoIncrement;
-}
-
-void
-NdbDictionary::Column::setAutoIncrementInitialValue(Uint64 val){
- m_impl.m_autoIncrementInitialValue = val;
-}
-
-void
-NdbDictionary::Column::setDefaultValue(const char* defaultValue)
-{
- m_impl.m_defaultValue.assign(defaultValue);
-}
-
-const char*
-NdbDictionary::Column::getDefaultValue() const
-{
- return m_impl.m_defaultValue.c_str();
-}
-
-int
-NdbDictionary::Column::getColumnNo() const {
- return m_impl.m_attrId;
-}
-
-bool
-NdbDictionary::Column::equal(const NdbDictionary::Column & col) const {
- return m_impl.equal(col.m_impl);
-}
-
-int
-NdbDictionary::Column::getSizeInBytes() const
-{
- return m_impl.m_attrSize * m_impl.m_arraySize;
-}
-
-/*****************************************************************
- * Table facade
- */
-NdbDictionary::Table::Table(const char * name)
- : m_impl(* new NdbTableImpl(* this))
-{
- setName(name);
-}
-
-NdbDictionary::Table::Table(const NdbDictionary::Table & org)
- : NdbDictionary::Object(),
- m_impl(* new NdbTableImpl(* this))
-{
- m_impl.assign(org.m_impl);
-}
-
-NdbDictionary::Table::Table(NdbTableImpl & impl)
- : m_impl(impl)
-{
-}
-
-NdbDictionary::Table::~Table(){
- NdbTableImpl * tmp = &m_impl;
- if(this != tmp){
- delete tmp;
- }
-}
-
-NdbDictionary::Table&
-NdbDictionary::Table::operator=(const NdbDictionary::Table& table)
-{
- m_impl.assign(table.m_impl);
-
- m_impl.m_facade = this;
- return *this;
-}
-
-void
-NdbDictionary::Table::setName(const char * name){
- m_impl.setName(name);
-}
-
-const char *
-NdbDictionary::Table::getName() const {
- return m_impl.getName();
-}
-
-int
-NdbDictionary::Table::getTableId() const {
- return m_impl.m_tableId;
-}
-
-void
-NdbDictionary::Table::addColumn(const Column & c){
- NdbColumnImpl* col = new NdbColumnImpl;
- (* col) = NdbColumnImpl::getImpl(c);
- m_impl.m_columns.push_back(col);
- if(c.getPrimaryKey()){
- m_impl.m_noOfKeys++;
- }
- if (col->getBlobType()) {
- m_impl.m_noOfBlobs++;
- }
- m_impl.buildColumnHash();
-}
-
-const NdbDictionary::Column*
-NdbDictionary::Table::getColumn(const char * name) const {
- return m_impl.getColumn(name);
-}
-
-const NdbDictionary::Column*
-NdbDictionary::Table::getColumn(const int attrId) const {
- return m_impl.getColumn(attrId);
-}
-
-NdbDictionary::Column*
-NdbDictionary::Table::getColumn(const char * name)
-{
- return m_impl.getColumn(name);
-}
-
-NdbDictionary::Column*
-NdbDictionary::Table::getColumn(const int attrId)
-{
- return m_impl.getColumn(attrId);
-}
-
-void
-NdbDictionary::Table::setLogging(bool val){
- m_impl.m_logging = val;
-}
-
-bool
-NdbDictionary::Table::getLogging() const {
- return m_impl.m_logging;
-}
-
-void
-NdbDictionary::Table::setFragmentType(FragmentType ft){
- m_impl.m_fragmentType = ft;
-}
-
-NdbDictionary::Object::FragmentType
-NdbDictionary::Table::getFragmentType() const {
- return m_impl.m_fragmentType;
-}
-
-void
-NdbDictionary::Table::setKValue(int kValue){
- m_impl.m_kvalue = kValue;
-}
-
-int
-NdbDictionary::Table::getKValue() const {
- return m_impl.m_kvalue;
-}
-
-void
-NdbDictionary::Table::setMinLoadFactor(int lf){
- m_impl.m_minLoadFactor = lf;
-}
-
-int
-NdbDictionary::Table::getMinLoadFactor() const {
- return m_impl.m_minLoadFactor;
-}
-
-void
-NdbDictionary::Table::setMaxLoadFactor(int lf){
- m_impl.m_maxLoadFactor = lf;
-}
-
-int
-NdbDictionary::Table::getMaxLoadFactor() const {
- return m_impl.m_maxLoadFactor;
-}
-
-int
-NdbDictionary::Table::getNoOfColumns() const {
- return m_impl.m_columns.size();
-}
-
-int
-NdbDictionary::Table::getNoOfPrimaryKeys() const {
- return m_impl.m_noOfKeys;
-}
-
-const char*
-NdbDictionary::Table::getPrimaryKey(int no) const {
- int count = 0;
- for (unsigned i = 0; i < m_impl.m_columns.size(); i++) {
- if (m_impl.m_columns[i]->m_pk) {
- if (count++ == no)
- return m_impl.m_columns[i]->m_name.c_str();
- }
- }
- return 0;
-}
-
-const void*
-NdbDictionary::Table::getFrmData() const {
- return m_impl.m_frm.get_data();
-}
-
-Uint32
-NdbDictionary::Table::getFrmLength() const {
- return m_impl.m_frm.length();
-}
-
-void
-NdbDictionary::Table::setFrm(const void* data, Uint32 len){
- m_impl.m_frm.assign(data, len);
-}
-
-NdbDictionary::Object::Status
-NdbDictionary::Table::getObjectStatus() const {
- return m_impl.m_status;
-}
-
-int
-NdbDictionary::Table::getObjectVersion() const {
- return m_impl.m_version;
-}
-
-bool
-NdbDictionary::Table::equal(const NdbDictionary::Table & col) const {
- return m_impl.equal(col.m_impl);
-}
-
-int
-NdbDictionary::Table::getRowSizeInBytes() const {
- int sz = 0;
- for(int i = 0; i<getNoOfColumns(); i++){
- const NdbDictionary::Column * c = getColumn(i);
- sz += (c->getSizeInBytes()+ 3) / 4;
- }
- return sz * 4;
-}
-
-int
-NdbDictionary::Table::getReplicaCount() const {
- return m_impl.m_replicaCount;
-}
-
-int
-NdbDictionary::Table::createTableInDb(Ndb* pNdb, bool equalOk) const {
- const NdbDictionary::Table * pTab =
- pNdb->getDictionary()->getTable(getName());
- if(pTab != 0 && equal(* pTab))
- return 0;
- if(pTab != 0 && !equal(* pTab))
- return -1;
- return pNdb->getDictionary()->createTable(* this);
-}
-
-/*****************************************************************
- * Index facade
- */
-NdbDictionary::Index::Index(const char * name)
- : m_impl(* new NdbIndexImpl(* this))
-{
- setName(name);
-}
-
-NdbDictionary::Index::Index(NdbIndexImpl & impl)
- : m_impl(impl)
-{
-}
-
-NdbDictionary::Index::~Index(){
- NdbIndexImpl * tmp = &m_impl;
- if(this != tmp){
- delete tmp;
- }
-}
-
-void
-NdbDictionary::Index::setName(const char * name){
- m_impl.setName(name);
-}
-
-const char *
-NdbDictionary::Index::getName() const {
- return m_impl.getName();
-}
-
-void
-NdbDictionary::Index::setTable(const char * table){
- m_impl.setTable(table);
-}
-
-const char *
-NdbDictionary::Index::getTable() const {
- return m_impl.getTable();
-}
-
-unsigned
-NdbDictionary::Index::getNoOfColumns() const {
- return m_impl.m_columns.size();
-}
-
-int
-NdbDictionary::Index::getNoOfIndexColumns() const {
- return m_impl.m_columns.size();
-}
-
-const NdbDictionary::Column *
-NdbDictionary::Index::getColumn(unsigned no) const {
- if(no < m_impl.m_columns.size())
- return m_impl.m_columns[no];
- return NULL;
-}
-
-const char *
-NdbDictionary::Index::getIndexColumn(int no) const {
- const NdbDictionary::Column* col = getColumn(no);
-
- if (col)
- return col->getName();
- else
- return NULL;
-}
-
-void
-NdbDictionary::Index::addColumn(const Column & c){
- NdbColumnImpl* col = new NdbColumnImpl;
- (* col) = NdbColumnImpl::getImpl(c);
- m_impl.m_columns.push_back(col);
-}
-
-void
-NdbDictionary::Index::addColumnName(const char * name){
- const Column c(name);
- addColumn(c);
-}
-
-void
-NdbDictionary::Index::addIndexColumn(const char * name){
- const Column c(name);
- addColumn(c);
-}
-
-void
-NdbDictionary::Index::addColumnNames(unsigned noOfNames, const char ** names){
- for(unsigned i = 0; i < noOfNames; i++) {
- const Column c(names[i]);
- addColumn(c);
- }
-}
-
-void
-NdbDictionary::Index::addIndexColumns(int noOfNames, const char ** names){
- for(int i = 0; i < noOfNames; i++) {
- const Column c(names[i]);
- addColumn(c);
- }
-}
-
-void
-NdbDictionary::Index::setType(NdbDictionary::Index::Type t){
- m_impl.m_type = t;
-}
-
-NdbDictionary::Index::Type
-NdbDictionary::Index::getType() const {
- return m_impl.m_type;
-}
-
-void
-NdbDictionary::Index::setLogging(bool val){
- m_impl.m_logging = val;
-}
-
-bool
-NdbDictionary::Index::getLogging() const {
- return m_impl.m_logging;
-}
-
-NdbDictionary::Object::Status
-NdbDictionary::Index::getObjectStatus() const {
- return m_impl.m_status;
-}
-
-int
-NdbDictionary::Index::getObjectVersion() const {
- return m_impl.m_version;
-}
-
-/*****************************************************************
- * Event facade
- */
-NdbDictionary::Event::Event(const char * name)
- : m_impl(* new NdbEventImpl(* this))
-{
- setName(name);
-}
-
-NdbDictionary::Event::Event(const char * name, const Table& table)
- : m_impl(* new NdbEventImpl(* this))
-{
- setName(name);
- setTable(table);
-}
-
-NdbDictionary::Event::Event(NdbEventImpl & impl)
- : m_impl(impl)
-{
-}
-
-NdbDictionary::Event::~Event()
-{
- NdbEventImpl * tmp = &m_impl;
- if(this != tmp){
- delete tmp;
- }
-}
-
-void
-NdbDictionary::Event::setName(const char * name)
-{
- m_impl.setName(name);
-}
-
-const char *
-NdbDictionary::Event::getName() const
-{
- return m_impl.getName();
-}
-
-void
-NdbDictionary::Event::setTable(const Table& table)
-{
- m_impl.setTable(table);
-}
-
-void
-NdbDictionary::Event::setTable(const char * table)
-{
- m_impl.setTable(table);
-}
-
-const char*
-NdbDictionary::Event::getTableName() const
-{
- return m_impl.getTableName();
-}
-
-void
-NdbDictionary::Event::addTableEvent(const TableEvent t)
-{
- m_impl.addTableEvent(t);
-}
-
-void
-NdbDictionary::Event::setDurability(EventDurability d)
-{
- m_impl.setDurability(d);
-}
-
-NdbDictionary::Event::EventDurability
-NdbDictionary::Event::getDurability() const
-{
- return m_impl.getDurability();
-}
-
-void
-NdbDictionary::Event::addColumn(const Column & c){
- NdbColumnImpl* col = new NdbColumnImpl;
- (* col) = NdbColumnImpl::getImpl(c);
- m_impl.m_columns.push_back(col);
-}
-
-void
-NdbDictionary::Event::addEventColumn(unsigned attrId)
-{
- m_impl.m_attrIds.push_back(attrId);
-}
-
-void
-NdbDictionary::Event::addEventColumn(const char * name)
-{
- const Column c(name);
- addColumn(c);
-}
-
-void
-NdbDictionary::Event::addEventColumns(int n, const char ** names)
-{
- for (int i = 0; i < n; i++)
- addEventColumn(names[i]);
-}
-
-int NdbDictionary::Event::getNoOfEventColumns() const
-{
- return m_impl.getNoOfEventColumns();
-}
-
-NdbDictionary::Object::Status
-NdbDictionary::Event::getObjectStatus() const
-{
- return m_impl.m_status;
-}
-
-int
-NdbDictionary::Event::getObjectVersion() const
-{
- return m_impl.m_version;
-}
-
-void NdbDictionary::Event::print()
-{
- m_impl.print();
-}
-
-/*****************************************************************
- * Dictionary facade
- */
-NdbDictionary::Dictionary::Dictionary(Ndb & ndb)
- : m_impl(* new NdbDictionaryImpl(ndb, *this))
-{
-}
-
-NdbDictionary::Dictionary::Dictionary(NdbDictionaryImpl & impl)
- : m_impl(impl)
-{
-}
-NdbDictionary::Dictionary::~Dictionary(){
- NdbDictionaryImpl * tmp = &m_impl;
- if(this != tmp){
- delete tmp;
- }
-}
-
-int
-NdbDictionary::Dictionary::createTable(const Table & t){
- return m_impl.createTable(NdbTableImpl::getImpl(t));
-}
-
-int
-NdbDictionary::Dictionary::dropTable(Table & t){
- return m_impl.dropTable(NdbTableImpl::getImpl(t));
-}
-
-int
-NdbDictionary::Dictionary::dropTable(const char * name){
- return m_impl.dropTable(name);
-}
-
-int
-NdbDictionary::Dictionary::alterTable(const Table & t){
- return m_impl.alterTable(NdbTableImpl::getImpl(t));
-}
-
-const NdbDictionary::Table *
-NdbDictionary::Dictionary::getTable(const char * name, void **data) const
-{
- NdbTableImpl * t = m_impl.getTable(name, data);
- if(t)
- return t->m_facade;
- return 0;
-}
-
-void NdbDictionary::Dictionary::set_local_table_data_size(unsigned sz)
-{
- m_impl.m_local_table_data_size= sz;
-}
-
-const NdbDictionary::Table *
-NdbDictionary::Dictionary::getTable(const char * name) const
-{
- return getTable(name, 0);
-}
-
-void
-NdbDictionary::Dictionary::invalidateTable(const char * name){
- DBUG_ENTER("NdbDictionaryImpl::invalidateTable");
- NdbTableImpl * t = m_impl.getTable(name);
- if(t)
- m_impl.invalidateObject(* t);
- DBUG_VOID_RETURN;
-}
-
-void
-NdbDictionary::Dictionary::removeCachedTable(const char * name){
- NdbTableImpl * t = m_impl.getTable(name);
- if(t)
- m_impl.removeCachedObject(* t);
-}
-
-int
-NdbDictionary::Dictionary::createIndex(const Index & ind)
-{
- return m_impl.createIndex(NdbIndexImpl::getImpl(ind));
-}
-
-int
-NdbDictionary::Dictionary::dropIndex(const char * indexName,
- const char * tableName)
-{
- return m_impl.dropIndex(indexName, tableName);
-}
-
-const NdbDictionary::Index *
-NdbDictionary::Dictionary::getIndex(const char * indexName,
- const char * tableName) const
-{
- NdbIndexImpl * i = m_impl.getIndex(indexName, tableName);
- if(i)
- return i->m_facade;
- return 0;
-}
-
-void
-NdbDictionary::Dictionary::invalidateIndex(const char * indexName,
- const char * tableName){
- DBUG_ENTER("NdbDictionaryImpl::invalidateIndex");
- NdbIndexImpl * i = m_impl.getIndex(indexName, tableName);
- if(i) {
- assert(i->m_table != 0);
- m_impl.invalidateObject(* i->m_table);
- }
- DBUG_VOID_RETURN;
-}
-
-void
-NdbDictionary::Dictionary::removeCachedIndex(const char * indexName,
- const char * tableName){
- NdbIndexImpl * i = m_impl.getIndex(indexName, tableName);
- if(i) {
- assert(i->m_table != 0);
- m_impl.removeCachedObject(* i->m_table);
- }
-}
-
-const NdbDictionary::Table *
-NdbDictionary::Dictionary::getIndexTable(const char * indexName,
- const char * tableName) const
-{
- NdbIndexImpl * i = m_impl.getIndex(indexName, tableName);
- NdbTableImpl * t = m_impl.getTable(tableName);
- if(i && t) {
- NdbTableImpl * it = m_impl.getIndexTable(i, t);
- return it->m_facade;
- }
- return 0;
-}
-
-
-int
-NdbDictionary::Dictionary::createEvent(const Event & ev)
-{
- return m_impl.createEvent(NdbEventImpl::getImpl(ev));
-}
-
-int
-NdbDictionary::Dictionary::dropEvent(const char * eventName)
-{
- return m_impl.dropEvent(eventName);
-}
-
-const NdbDictionary::Event *
-NdbDictionary::Dictionary::getEvent(const char * eventName)
-{
- NdbEventImpl * t = m_impl.getEvent(eventName);
- if(t)
- return t->m_facade;
- return 0;
-}
-
-int
-NdbDictionary::Dictionary::listObjects(List& list, Object::Type type)
-{
- return m_impl.listObjects(list, type);
-}
-
-int
-NdbDictionary::Dictionary::listObjects(List& list, Object::Type type) const
-{
- return m_impl.listObjects(list, type);
-}
-
-int
-NdbDictionary::Dictionary::listIndexes(List& list, const char * tableName)
-{
- const NdbDictionary::Table* tab= getTable(tableName);
- if(tab == 0)
- {
- return -1;
- }
- return m_impl.listIndexes(list, tab->getTableId());
-}
-
-int
-NdbDictionary::Dictionary::listIndexes(List& list,
- const char * tableName) const
-{
- const NdbDictionary::Table* tab= getTable(tableName);
- if(tab == 0)
- {
- return -1;
- }
- return m_impl.listIndexes(list, tab->getTableId());
-}
-
-const struct NdbError &
-NdbDictionary::Dictionary::getNdbError() const {
- return m_impl.getNdbError();
-}
-
-// printers
-
-NdbOut&
-operator<<(NdbOut& out, const NdbDictionary::Column& col)
-{
- const CHARSET_INFO *cs = col.getCharset();
- const char *csname = cs ? cs->name : "?";
- out << col.getName() << " ";
- switch (col.getType()) {
- case NdbDictionary::Column::Tinyint:
- out << "Tinyint";
- break;
- case NdbDictionary::Column::Tinyunsigned:
- out << "Tinyunsigned";
- break;
- case NdbDictionary::Column::Smallint:
- out << "Smallint";
- break;
- case NdbDictionary::Column::Smallunsigned:
- out << "Smallunsigned";
- break;
- case NdbDictionary::Column::Mediumint:
- out << "Mediumint";
- break;
- case NdbDictionary::Column::Mediumunsigned:
- out << "Mediumunsigned";
- break;
- case NdbDictionary::Column::Int:
- out << "Int";
- break;
- case NdbDictionary::Column::Unsigned:
- out << "Unsigned";
- break;
- case NdbDictionary::Column::Bigint:
- out << "Bigint";
- break;
- case NdbDictionary::Column::Bigunsigned:
- out << "Bigunsigned";
- break;
- case NdbDictionary::Column::Float:
- out << "Float";
- break;
- case NdbDictionary::Column::Double:
- out << "Double";
- break;
- case NdbDictionary::Column::Olddecimal:
- out << "Olddecimal(" << col.getPrecision() << "," << col.getScale() << ")";
- break;
- case NdbDictionary::Column::Olddecimalunsigned:
- out << "Olddecimalunsigned(" << col.getPrecision() << "," << col.getScale() << ")";
- break;
- case NdbDictionary::Column::Decimal:
- out << "Decimal(" << col.getPrecision() << "," << col.getScale() << ")";
- break;
- case NdbDictionary::Column::Decimalunsigned:
- out << "Decimalunsigned(" << col.getPrecision() << "," << col.getScale() << ")";
- break;
- case NdbDictionary::Column::Char:
- out << "Char(" << col.getLength() << ";" << csname << ")";
- break;
- case NdbDictionary::Column::Varchar:
- out << "Varchar(" << col.getLength() << ";" << csname << ")";
- break;
- case NdbDictionary::Column::Binary:
- out << "Binary(" << col.getLength() << ")";
- break;
- case NdbDictionary::Column::Varbinary:
- out << "Varbinary(" << col.getLength() << ")";
- break;
- case NdbDictionary::Column::Datetime:
- out << "Datetime";
- break;
- case NdbDictionary::Column::Date:
- out << "Date";
- break;
- case NdbDictionary::Column::Blob:
- out << "Blob(" << col.getInlineSize() << "," << col.getPartSize()
- << ";" << col.getStripeSize() << ")";
- break;
- case NdbDictionary::Column::Text:
- out << "Text(" << col.getInlineSize() << "," << col.getPartSize()
- << ";" << col.getStripeSize() << ";" << csname << ")";
- break;
- case NdbDictionary::Column::Time:
- out << "Time";
- break;
- case NdbDictionary::Column::Year:
- out << "Year";
- break;
- case NdbDictionary::Column::Timestamp:
- out << "Timestamp";
- break;
- case NdbDictionary::Column::Undefined:
- out << "Undefined";
- break;
- case NdbDictionary::Column::Bit:
- out << "Bit(" << col.getLength() << ")";
- break;
- case NdbDictionary::Column::Longvarchar:
- out << "Longvarchar(" << col.getLength() << ";" << csname << ")";
- break;
- case NdbDictionary::Column::Longvarbinary:
- out << "Longvarbinary(" << col.getLength() << ")";
- break;
- default:
- out << "Type" << (Uint32)col.getType();
- break;
- }
- // show unusual (non-MySQL) array size
- if (col.getLength() != 1) {
- switch (col.getType()) {
- case NdbDictionary::Column::Char:
- case NdbDictionary::Column::Varchar:
- case NdbDictionary::Column::Binary:
- case NdbDictionary::Column::Varbinary:
- case NdbDictionary::Column::Blob:
- case NdbDictionary::Column::Text:
- case NdbDictionary::Column::Bit:
- case NdbDictionary::Column::Longvarchar:
- case NdbDictionary::Column::Longvarbinary:
- break;
- default:
- out << " [" << col.getLength() << "]";
- break;
- }
- }
- if (col.getPrimaryKey())
- out << " PRIMARY KEY";
- else if (! col.getNullable())
- out << " NOT NULL";
- else
- out << " NULL";
-
- if(col.getDistributionKey())
- out << " DISTRIBUTION KEY";
-
- return out;
-}
-
-const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT = 0;
-const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT_MEMORY = 0;
-const NdbDictionary::Column * NdbDictionary::Column::ROW_COUNT = 0;
-const NdbDictionary::Column * NdbDictionary::Column::COMMIT_COUNT = 0;
-const NdbDictionary::Column * NdbDictionary::Column::ROW_SIZE = 0;
-const NdbDictionary::Column * NdbDictionary::Column::RANGE_NO = 0;
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
deleted file mode 100644
index 34d1614d043..00000000000
--- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ /dev/null
@@ -1,3163 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include "NdbDictionaryImpl.hpp"
-#include "API.hpp"
-#include <NdbOut.hpp>
-#include "NdbApiSignal.hpp"
-#include "TransporterFacade.hpp"
-#include <signaldata/GetTabInfo.hpp>
-#include <signaldata/DictTabInfo.hpp>
-#include <signaldata/CreateTable.hpp>
-#include <signaldata/CreateIndx.hpp>
-#include <signaldata/CreateEvnt.hpp>
-#include <signaldata/SumaImpl.hpp>
-#include <signaldata/DropTable.hpp>
-#include <signaldata/AlterTable.hpp>
-#include <signaldata/DropIndx.hpp>
-#include <signaldata/ListTables.hpp>
-#include <SimpleProperties.hpp>
-#include <Bitmask.hpp>
-#include <AttributeList.hpp>
-#include <NdbEventOperation.hpp>
-#include "NdbEventOperationImpl.hpp"
-#include <NdbBlob.hpp>
-#include "NdbBlobImpl.hpp"
-#include <AttributeHeader.hpp>
-#include <my_sys.h>
-
-#define DEBUG_PRINT 0
-#define INCOMPATIBLE_VERSION -2
-
-//#define EVENT_DEBUG
-
-/**
- * Column
- */
-NdbColumnImpl::NdbColumnImpl()
- : NdbDictionary::Column(* this), m_attrId(-1), m_facade(this)
-{
- init();
-}
-
-NdbColumnImpl::NdbColumnImpl(NdbDictionary::Column & f)
- : NdbDictionary::Column(* this), m_attrId(-1), m_facade(&f)
-{
- init();
-}
-
-NdbColumnImpl&
-NdbColumnImpl::operator=(const NdbColumnImpl& col)
-{
- m_attrId = col.m_attrId;
- m_name = col.m_name;
- m_type = col.m_type;
- m_precision = col.m_precision;
- m_cs = col.m_cs;
- m_scale = col.m_scale;
- m_length = col.m_length;
- m_pk = col.m_pk;
- m_distributionKey = col.m_distributionKey;
- m_nullable = col.m_nullable;
- m_autoIncrement = col.m_autoIncrement;
- m_autoIncrementInitialValue = col.m_autoIncrementInitialValue;
- m_defaultValue = col.m_defaultValue;
- m_attrSize = col.m_attrSize;
- m_arraySize = col.m_arraySize;
- m_keyInfoPos = col.m_keyInfoPos;
- m_blobTable = col.m_blobTable;
- // Do not copy m_facade !!
-
- return *this;
-}
-
-void
-NdbColumnImpl::init(Type t)
-{
- // do not use default_charset_info as it may not be initialized yet
- // use binary collation until NDB tests can handle charsets
- CHARSET_INFO* default_cs = &my_charset_bin;
- m_type = t;
- switch (m_type) {
- case Tinyint:
- case Tinyunsigned:
- case Smallint:
- case Smallunsigned:
- case Mediumint:
- case Mediumunsigned:
- case Int:
- case Unsigned:
- case Bigint:
- case Bigunsigned:
- case Float:
- case Double:
- m_precision = 0;
- m_scale = 0;
- m_length = 1;
- m_cs = NULL;
- break;
- case Olddecimal:
- case Olddecimalunsigned:
- case Decimal:
- case Decimalunsigned:
- m_precision = 10;
- m_scale = 0;
- m_length = 1;
- m_cs = NULL;
- break;
- case Char:
- case Varchar:
- m_precision = 0;
- m_scale = 0;
- m_length = 1;
- m_cs = default_cs;
- break;
- case Binary:
- case Varbinary:
- case Datetime:
- case Date:
- m_precision = 0;
- m_scale = 0;
- m_length = 1;
- m_cs = NULL;
- break;
- case Blob:
- m_precision = 256;
- m_scale = 8000;
- m_length = 4;
- m_cs = NULL;
- break;
- case Text:
- m_precision = 256;
- m_scale = 8000;
- m_length = 4;
- m_cs = default_cs;
- break;
- case Time:
- case Year:
- case Timestamp:
- m_precision = 0;
- m_scale = 0;
- m_length = 1;
- m_cs = NULL;
- break;
- case Bit:
- m_precision = 0;
- m_scale = 0;
- m_length = 1;
- m_cs = NULL;
- break;
- case Longvarchar:
- m_precision = 0;
- m_scale = 0;
- m_length = 1; // legal
- m_cs = default_cs;
- break;
- case Longvarbinary:
- m_precision = 0;
- m_scale = 0;
- m_length = 1; // legal
- m_cs = NULL;
- break;
- default:
- case Undefined:
- assert(false);
- break;
- }
- m_pk = false;
- m_nullable = false;
- m_distributionKey = false;
- m_keyInfoPos = 0;
- // next 2 are set at run time
- m_attrSize = 0;
- m_arraySize = 0;
- m_autoIncrement = false;
- m_autoIncrementInitialValue = 1;
- m_blobTable = NULL;
-}
-
-NdbColumnImpl::~NdbColumnImpl()
-{
-}
-
-bool
-NdbColumnImpl::equal(const NdbColumnImpl& col) const
-{
- DBUG_ENTER("NdbColumnImpl::equal");
- if(strcmp(m_name.c_str(), col.m_name.c_str()) != 0){
- DBUG_RETURN(false);
- }
- if(m_type != col.m_type){
- DBUG_RETURN(false);
- }
- if(m_pk != col.m_pk){
- DBUG_RETURN(false);
- }
- if(m_nullable != col.m_nullable){
- DBUG_RETURN(false);
- }
-#ifdef ndb_dictionary_dkey_fixed
- if(m_pk){
- if(m_distributionKey != col.m_distributionKey){
- DBUG_RETURN(false);
- }
- }
-#endif
- if (m_precision != col.m_precision ||
- m_scale != col.m_scale ||
- m_length != col.m_length ||
- m_cs != col.m_cs) {
- DBUG_RETURN(false);
- }
- if (m_autoIncrement != col.m_autoIncrement){
- DBUG_RETURN(false);
- }
- if(strcmp(m_defaultValue.c_str(), col.m_defaultValue.c_str()) != 0){
- DBUG_RETURN(false);
- }
-
- DBUG_RETURN(true);
-}
-
-NdbDictionary::Column *
-NdbColumnImpl::create_psuedo(const char * name){
- NdbDictionary::Column * col = new NdbDictionary::Column();
- col->setName(name);
- if(!strcmp(name, "NDB$FRAGMENT")){
- col->setType(NdbDictionary::Column::Unsigned);
- col->m_impl.m_attrId = AttributeHeader::FRAGMENT;
- col->m_impl.m_attrSize = 4;
- col->m_impl.m_arraySize = 1;
- } else if(!strcmp(name, "NDB$FRAGMENT_MEMORY")){
- col->setType(NdbDictionary::Column::Bigunsigned);
- col->m_impl.m_attrId = AttributeHeader::FRAGMENT_MEMORY;
- col->m_impl.m_attrSize = 8;
- col->m_impl.m_arraySize = 1;
- } else if(!strcmp(name, "NDB$ROW_COUNT")){
- col->setType(NdbDictionary::Column::Bigunsigned);
- col->m_impl.m_attrId = AttributeHeader::ROW_COUNT;
- col->m_impl.m_attrSize = 8;
- col->m_impl.m_arraySize = 1;
- } else if(!strcmp(name, "NDB$COMMIT_COUNT")){
- col->setType(NdbDictionary::Column::Bigunsigned);
- col->m_impl.m_attrId = AttributeHeader::COMMIT_COUNT;
- col->m_impl.m_attrSize = 8;
- col->m_impl.m_arraySize = 1;
- } else if(!strcmp(name, "NDB$ROW_SIZE")){
- col->setType(NdbDictionary::Column::Unsigned);
- col->m_impl.m_attrId = AttributeHeader::ROW_SIZE;
- col->m_impl.m_attrSize = 4;
- col->m_impl.m_arraySize = 1;
- } else if(!strcmp(name, "NDB$RANGE_NO")){
- col->setType(NdbDictionary::Column::Unsigned);
- col->m_impl.m_attrId = AttributeHeader::RANGE_NO;
- col->m_impl.m_attrSize = 4;
- col->m_impl.m_arraySize = 1;
- } else {
- abort();
- }
- return col;
-}
-
-/**
- * NdbTableImpl
- */
-
-NdbTableImpl::NdbTableImpl()
- : NdbDictionary::Table(* this), m_facade(this)
-{
- init();
-}
-
-NdbTableImpl::NdbTableImpl(NdbDictionary::Table & f)
- : NdbDictionary::Table(* this), m_facade(&f)
-{
- init();
-}
-
-NdbTableImpl::~NdbTableImpl()
-{
- if (m_index != 0) {
- delete m_index;
- m_index = 0;
- }
- for (unsigned i = 0; i < m_columns.size(); i++)
- delete m_columns[i];
-}
-
-void
-NdbTableImpl::init(){
- m_changeMask= 0;
- m_tableId= RNIL;
- m_frm.clear();
- m_fragmentType= NdbDictionary::Object::FragAllSmall;
- m_hashValueMask= 0;
- m_hashpointerValue= 0;
- m_logging= true;
- m_kvalue= 6;
- m_minLoadFactor= 78;
- m_maxLoadFactor= 80;
- m_keyLenInWords= 0;
- m_fragmentCount= 0;
- m_dictionary= NULL;
- m_index= NULL;
- m_indexType= NdbDictionary::Index::Undefined;
- m_noOfKeys= 0;
- m_noOfDistributionKeys= 0;
- m_noOfBlobs= 0;
- m_replicaCount= 0;
-}
-
-bool
-NdbTableImpl::equal(const NdbTableImpl& obj) const
-{
- DBUG_ENTER("NdbTableImpl::equal");
- if ((m_internalName.c_str() == NULL) ||
- (strcmp(m_internalName.c_str(), "") == 0) ||
- (obj.m_internalName.c_str() == NULL) ||
- (strcmp(obj.m_internalName.c_str(), "") == 0)) {
- // Shallow equal
- if(strcmp(getName(), obj.getName()) != 0){
- DBUG_PRINT("info",("name %s != %s",getName(),obj.getName()));
- DBUG_RETURN(false);
- }
- } else
- // Deep equal
- if(strcmp(m_internalName.c_str(), obj.m_internalName.c_str()) != 0){
- {
- DBUG_PRINT("info",("m_internalName %s != %s",
- m_internalName.c_str(),obj.m_internalName.c_str()));
- DBUG_RETURN(false);
- }
- }
- if(m_fragmentType != obj.m_fragmentType){
- DBUG_PRINT("info",("m_fragmentType %d != %d",m_fragmentType,obj.m_fragmentType));
- DBUG_RETURN(false);
- }
- if(m_columns.size() != obj.m_columns.size()){
- DBUG_PRINT("info",("m_columns.size %d != %d",m_columns.size(),obj.m_columns.size()));
- DBUG_RETURN(false);
- }
-
- for(unsigned i = 0; i<obj.m_columns.size(); i++){
- if(!m_columns[i]->equal(* obj.m_columns[i])){
- DBUG_PRINT("info",("m_columns [%d] != [%d]",i,i));
- DBUG_RETURN(false);
- }
- }
-
- if(m_logging != obj.m_logging){
- DBUG_PRINT("info",("m_logging %d != %d",m_logging,obj.m_logging));
- DBUG_RETURN(false);
- }
-
- if(m_kvalue != obj.m_kvalue){
- DBUG_PRINT("info",("m_kvalue %d != %d",m_kvalue,obj.m_kvalue));
- DBUG_RETURN(false);
- }
-
- if(m_minLoadFactor != obj.m_minLoadFactor){
- DBUG_PRINT("info",("m_minLoadFactor %d != %d",m_minLoadFactor,obj.m_minLoadFactor));
- DBUG_RETURN(false);
- }
-
- if(m_maxLoadFactor != obj.m_maxLoadFactor){
- DBUG_PRINT("info",("m_maxLoadFactor %d != %d",m_maxLoadFactor,obj.m_maxLoadFactor));
- DBUG_RETURN(false);
- }
-
- DBUG_RETURN(true);
-}
-
-void
-NdbTableImpl::assign(const NdbTableImpl& org)
-{
- m_tableId = org.m_tableId;
- m_internalName.assign(org.m_internalName);
- m_externalName.assign(org.m_externalName);
- m_newExternalName.assign(org.m_newExternalName);
- m_frm.assign(org.m_frm.get_data(), org.m_frm.length());
- m_fragmentType = org.m_fragmentType;
- m_fragmentCount = org.m_fragmentCount;
-
- for(unsigned i = 0; i<org.m_columns.size(); i++){
- NdbColumnImpl * col = new NdbColumnImpl();
- const NdbColumnImpl * iorg = org.m_columns[i];
- (* col) = (* iorg);
- m_columns.push_back(col);
- }
-
- m_logging = org.m_logging;
- m_kvalue = org.m_kvalue;
- m_minLoadFactor = org.m_minLoadFactor;
- m_maxLoadFactor = org.m_maxLoadFactor;
-
- if (m_index != 0)
- delete m_index;
- m_index = org.m_index;
-
- m_noOfDistributionKeys = org.m_noOfDistributionKeys;
- m_noOfKeys = org.m_noOfKeys;
- m_keyLenInWords = org.m_keyLenInWords;
- m_noOfBlobs = org.m_noOfBlobs;
-
- m_version = org.m_version;
- m_status = org.m_status;
-}
-
-void NdbTableImpl::setName(const char * name)
-{
- m_newExternalName.assign(name);
-}
-
-const char *
-NdbTableImpl::getName() const
-{
- if (m_newExternalName.empty())
- return m_externalName.c_str();
- else
- return m_newExternalName.c_str();
-}
-
-
-void
-NdbTableImpl::buildColumnHash(){
- const Uint32 size = m_columns.size();
-
- int i;
- for(i = 31; i >= 0; i--){
- if(((1 << i) & size) != 0){
- m_columnHashMask = (1 << (i + 1)) - 1;
- break;
- }
- }
-
- Vector<Uint32> hashValues;
- Vector<Vector<Uint32> > chains; chains.fill(size, hashValues);
- for(i = 0; i< (int) size; i++){
- Uint32 hv = Hash(m_columns[i]->getName()) & 0xFFFE;
- Uint32 bucket = hv & m_columnHashMask;
- bucket = (bucket < size ? bucket : bucket - size);
- assert(bucket < size);
- hashValues.push_back(hv);
- chains[bucket].push_back(i);
- }
-
- m_columnHash.clear();
- Uint32 tmp = 1;
- m_columnHash.fill((unsigned)size-1, tmp); // Default no chaining
-
- Uint32 pos = 0; // In overflow vector
- for(i = 0; i< (int) size; i++){
- Uint32 sz = chains[i].size();
- if(sz == 1){
- Uint32 col = chains[i][0];
- Uint32 hv = hashValues[col];
- Uint32 bucket = hv & m_columnHashMask;
- bucket = (bucket < size ? bucket : bucket - size);
- m_columnHash[bucket] = (col << 16) | hv | 1;
- } else if(sz > 1){
- Uint32 col = chains[i][0];
- Uint32 hv = hashValues[col];
- Uint32 bucket = hv & m_columnHashMask;
- bucket = (bucket < size ? bucket : bucket - size);
- m_columnHash[bucket] = (sz << 16) | (((size - bucket) + pos) << 1);
- for(size_t j = 0; j<sz; j++, pos++){
- Uint32 col = chains[i][j];
- Uint32 hv = hashValues[col];
- m_columnHash.push_back((col << 16) | hv);
- }
- }
- }
-
- m_columnHash.push_back(0); // Overflow when looping in end of array
-
-#if 0
- for(size_t i = 0; i<m_columnHash.size(); i++){
- Uint32 tmp = m_columnHash[i];
- int col = -1;
- if(i < size && (tmp & 1) == 1){
- col = (tmp >> 16);
- } else if(i >= size){
- col = (tmp >> 16);
- }
- ndbout_c("m_columnHash[%d] %s = %x",
- i, col > 0 ? m_columns[col]->getName() : "" , m_columnHash[i]);
- }
-#endif
-}
-
-Uint32
-NdbTableImpl::get_nodes(Uint32 hashValue, const Uint16 ** nodes) const
-{
- if(m_replicaCount > 0)
- {
- Uint32 fragmentId = hashValue & m_hashValueMask;
- if(fragmentId < m_hashpointerValue)
- {
- fragmentId = hashValue & ((m_hashValueMask << 1) + 1);
- }
- Uint32 pos = fragmentId * m_replicaCount;
- if(pos + m_replicaCount <= m_fragments.size())
- {
- * nodes = m_fragments.getBase()+pos;
- return m_replicaCount;
- }
- }
- return 0;
-}
-
-/**
- * NdbIndexImpl
- */
-
-NdbIndexImpl::NdbIndexImpl() :
- NdbDictionary::Index(* this),
- m_facade(this)
-{
- init();
-}
-
-NdbIndexImpl::NdbIndexImpl(NdbDictionary::Index & f) :
- NdbDictionary::Index(* this),
- m_facade(&f)
-{
- init();
-}
-
-void NdbIndexImpl::init()
-{
- m_indexId= RNIL;
- m_type= NdbDictionary::Index::Undefined;
- m_logging= true;
- m_table= NULL;
-}
-
-NdbIndexImpl::~NdbIndexImpl(){
- for (unsigned i = 0; i < m_columns.size(); i++)
- delete m_columns[i];
-}
-
-void NdbIndexImpl::setName(const char * name)
-{
- m_externalName.assign(name);
-}
-
-const char *
-NdbIndexImpl::getName() const
-{
- return m_externalName.c_str();
-}
-
-void
-NdbIndexImpl::setTable(const char * table)
-{
- m_tableName.assign(table);
-}
-
-const char *
-NdbIndexImpl::getTable() const
-{
- return m_tableName.c_str();
-}
-
-const NdbTableImpl *
-NdbIndexImpl::getIndexTable() const
-{
- return m_table;
-}
-
-/**
- * NdbEventImpl
- */
-
-NdbEventImpl::NdbEventImpl() :
- NdbDictionary::Event(* this),
- m_facade(this)
-{
- init();
-}
-
-NdbEventImpl::NdbEventImpl(NdbDictionary::Event & f) :
- NdbDictionary::Event(* this),
- m_facade(&f)
-{
- init();
-}
-
-void NdbEventImpl::init()
-{
- m_eventId= RNIL;
- m_eventKey= RNIL;
- m_tableId= RNIL;
- mi_type= 0;
- m_dur= NdbDictionary::Event::ED_UNDEFINED;
- m_tableImpl= NULL;
- m_bufferId= RNIL;
- eventOp= NULL;
-}
-
-NdbEventImpl::~NdbEventImpl()
-{
- for (unsigned i = 0; i < m_columns.size(); i++)
- delete m_columns[i];
-}
-
-void NdbEventImpl::setName(const char * name)
-{
- m_externalName.assign(name);
-}
-
-const char *NdbEventImpl::getName() const
-{
- return m_externalName.c_str();
-}
-
-void
-NdbEventImpl::setTable(const NdbDictionary::Table& table)
-{
- m_tableImpl= &NdbTableImpl::getImpl(table);
- m_tableName.assign(m_tableImpl->getName());
-}
-
-void
-NdbEventImpl::setTable(const char * table)
-{
- m_tableName.assign(table);
-}
-
-const char *
-NdbEventImpl::getTableName() const
-{
- return m_tableName.c_str();
-}
-
-void
-NdbEventImpl::addTableEvent(const NdbDictionary::Event::TableEvent t = NdbDictionary::Event::TE_ALL)
-{
- switch (t) {
- case NdbDictionary::Event::TE_INSERT : mi_type |= 1; break;
- case NdbDictionary::Event::TE_DELETE : mi_type |= 2; break;
- case NdbDictionary::Event::TE_UPDATE : mi_type |= 4; break;
- default: mi_type = 4 | 2 | 1; // all types
- }
-}
-
-void
-NdbEventImpl::setDurability(NdbDictionary::Event::EventDurability d)
-{
- m_dur = d;
-}
-
-NdbDictionary::Event::EventDurability
-NdbEventImpl::getDurability() const
-{
- return m_dur;
-}
-
-int NdbEventImpl::getNoOfEventColumns() const
-{
- return m_attrIds.size() + m_columns.size();
-}
-
-/**
- * NdbDictionaryImpl
- */
-
-NdbDictionaryImpl::NdbDictionaryImpl(Ndb &ndb)
- : NdbDictionary::Dictionary(* this),
- m_facade(this),
- m_receiver(m_error),
- m_ndb(ndb)
-{
- m_globalHash = 0;
- m_local_table_data_size= 0;
-}
-
-NdbDictionaryImpl::NdbDictionaryImpl(Ndb &ndb,
- NdbDictionary::Dictionary & f)
- : NdbDictionary::Dictionary(* this),
- m_facade(&f),
- m_receiver(m_error),
- m_ndb(ndb)
-{
- m_globalHash = 0;
- m_local_table_data_size= 0;
-}
-
-static int f_dictionary_count = 0;
-
-NdbDictionaryImpl::~NdbDictionaryImpl()
-{
- NdbElement_t<Ndb_local_table_info> * curr = m_localHash.m_tableHash.getNext(0);
- if(m_globalHash){
- while(curr != 0){
- m_globalHash->lock();
- m_globalHash->release(curr->theData->m_table_impl);
- Ndb_local_table_info::destroy(curr->theData);
- m_globalHash->unlock();
-
- curr = m_localHash.m_tableHash.getNext(curr);
- }
-
- m_globalHash->lock();
- if(--f_dictionary_count == 0){
- delete NdbDictionary::Column::FRAGMENT;
- delete NdbDictionary::Column::FRAGMENT_MEMORY;
- delete NdbDictionary::Column::ROW_COUNT;
- delete NdbDictionary::Column::COMMIT_COUNT;
- delete NdbDictionary::Column::ROW_SIZE;
- delete NdbDictionary::Column::RANGE_NO;
- NdbDictionary::Column::FRAGMENT= 0;
- NdbDictionary::Column::FRAGMENT_MEMORY= 0;
- NdbDictionary::Column::ROW_COUNT= 0;
- NdbDictionary::Column::COMMIT_COUNT= 0;
- NdbDictionary::Column::ROW_SIZE= 0;
- NdbDictionary::Column::RANGE_NO= 0;
- }
- m_globalHash->unlock();
- } else {
- assert(curr == 0);
- }
-}
-
-Ndb_local_table_info *
-NdbDictionaryImpl::fetchGlobalTableImpl(const BaseString& internalTableName)
-{
- NdbTableImpl *impl;
-
- m_globalHash->lock();
- impl = m_globalHash->get(internalTableName.c_str());
- m_globalHash->unlock();
-
- if (impl == 0){
- impl = m_receiver.getTable(internalTableName,
- m_ndb.usingFullyQualifiedNames());
- m_globalHash->lock();
- m_globalHash->put(internalTableName.c_str(), impl);
- m_globalHash->unlock();
-
- if(impl == 0){
- return 0;
- }
- }
-
- Ndb_local_table_info *info=
- Ndb_local_table_info::create(impl, m_local_table_data_size);
-
- m_localHash.put(internalTableName.c_str(), info);
-
- m_ndb.theFirstTupleId[impl->getTableId()] = ~0;
- m_ndb.theLastTupleId[impl->getTableId()] = ~0;
-
- return info;
-}
-
-#if 0
-bool
-NdbDictionaryImpl::setTransporter(class TransporterFacade * tf)
-{
- if(tf != 0){
- m_globalHash = &tf->m_globalDictCache;
- return m_receiver.setTransporter(tf);
- }
-
- return false;
-}
-#endif
-
-bool
-NdbDictionaryImpl::setTransporter(class Ndb* ndb,
- class TransporterFacade * tf)
-{
- m_globalHash = &tf->m_globalDictCache;
- if(m_receiver.setTransporter(ndb, tf)){
- m_globalHash->lock();
- if(f_dictionary_count++ == 0){
- NdbDictionary::Column::FRAGMENT=
- NdbColumnImpl::create_psuedo("NDB$FRAGMENT");
- NdbDictionary::Column::FRAGMENT_MEMORY=
- NdbColumnImpl::create_psuedo("NDB$FRAGMENT_MEMORY");
- NdbDictionary::Column::ROW_COUNT=
- NdbColumnImpl::create_psuedo("NDB$ROW_COUNT");
- NdbDictionary::Column::COMMIT_COUNT=
- NdbColumnImpl::create_psuedo("NDB$COMMIT_COUNT");
- NdbDictionary::Column::ROW_SIZE=
- NdbColumnImpl::create_psuedo("NDB$ROW_SIZE");
- NdbDictionary::Column::RANGE_NO=
- NdbColumnImpl::create_psuedo("NDB$RANGE_NO");
- }
- m_globalHash->unlock();
- return true;
- }
- return false;
-}
-
-NdbTableImpl *
-NdbDictionaryImpl::getIndexTable(NdbIndexImpl * index,
- NdbTableImpl * table)
-{
- const BaseString internalName(
- m_ndb.internalize_index_name(table, index->getName()));
- return getTable(m_ndb.externalizeTableName(internalName.c_str()));
-}
-
-#if 0
-bool
-NdbDictInterface::setTransporter(class TransporterFacade * tf)
-{
- if(tf == 0)
- return false;
-
- Guard g(tf->theMutexPtr);
-
- m_blockNumber = tf->open(this,
- execSignal,
- execNodeStatus);
-
- if ( m_blockNumber == -1 ) {
- m_error.code= 4105;
- return false; // no more free blocknumbers
- }//if
- Uint32 theNode = tf->ownId();
- m_reference = numberToRef(m_blockNumber, theNode);
- m_transporter = tf;
- m_waiter.m_mutex = tf->theMutexPtr;
-
- return true;
-}
-#endif
-
-bool
-NdbDictInterface::setTransporter(class Ndb* ndb, class TransporterFacade * tf)
-{
- m_reference = ndb->getReference();
- m_transporter = tf;
- m_waiter.m_mutex = tf->theMutexPtr;
-
- return true;
-}
-
-NdbDictInterface::~NdbDictInterface()
-{
-}
-
-void
-NdbDictInterface::execSignal(void* dictImpl,
- class NdbApiSignal* signal,
- class LinearSectionPtr ptr[3])
-{
- NdbDictInterface * tmp = (NdbDictInterface*)dictImpl;
-
- const Uint32 gsn = signal->readSignalNumber();
- switch(gsn){
- case GSN_GET_TABINFOREF:
- tmp->execGET_TABINFO_REF(signal, ptr);
- break;
- case GSN_GET_TABINFO_CONF:
- tmp->execGET_TABINFO_CONF(signal, ptr);
- break;
- case GSN_CREATE_TABLE_REF:
- tmp->execCREATE_TABLE_REF(signal, ptr);
- break;
- case GSN_CREATE_TABLE_CONF:
- tmp->execCREATE_TABLE_CONF(signal, ptr);
- break;
- case GSN_DROP_TABLE_REF:
- tmp->execDROP_TABLE_REF(signal, ptr);
- break;
- case GSN_DROP_TABLE_CONF:
- tmp->execDROP_TABLE_CONF(signal, ptr);
- break;
- case GSN_ALTER_TABLE_REF:
- tmp->execALTER_TABLE_REF(signal, ptr);
- break;
- case GSN_ALTER_TABLE_CONF:
- tmp->execALTER_TABLE_CONF(signal, ptr);
- break;
- case GSN_CREATE_INDX_REF:
- tmp->execCREATE_INDX_REF(signal, ptr);
- break;
- case GSN_CREATE_INDX_CONF:
- tmp->execCREATE_INDX_CONF(signal, ptr);
- break;
- case GSN_DROP_INDX_REF:
- tmp->execDROP_INDX_REF(signal, ptr);
- break;
- case GSN_DROP_INDX_CONF:
- tmp->execDROP_INDX_CONF(signal, ptr);
- break;
- case GSN_CREATE_EVNT_REF:
- tmp->execCREATE_EVNT_REF(signal, ptr);
- break;
- case GSN_CREATE_EVNT_CONF:
- tmp->execCREATE_EVNT_CONF(signal, ptr);
- break;
- case GSN_SUB_START_CONF:
- tmp->execSUB_START_CONF(signal, ptr);
- break;
- case GSN_SUB_START_REF:
- tmp->execSUB_START_REF(signal, ptr);
- break;
- case GSN_SUB_TABLE_DATA:
- tmp->execSUB_TABLE_DATA(signal, ptr);
- break;
- case GSN_SUB_GCP_COMPLETE_REP:
- tmp->execSUB_GCP_COMPLETE_REP(signal, ptr);
- break;
- case GSN_SUB_STOP_CONF:
- tmp->execSUB_STOP_CONF(signal, ptr);
- break;
- case GSN_SUB_STOP_REF:
- tmp->execSUB_STOP_REF(signal, ptr);
- break;
- case GSN_DROP_EVNT_REF:
- tmp->execDROP_EVNT_REF(signal, ptr);
- break;
- case GSN_DROP_EVNT_CONF:
- tmp->execDROP_EVNT_CONF(signal, ptr);
- break;
- case GSN_LIST_TABLES_CONF:
- tmp->execLIST_TABLES_CONF(signal, ptr);
- break;
- default:
- abort();
- }
-}
-
-void
-NdbDictInterface::execNodeStatus(void* dictImpl, Uint32 aNode,
- bool alive, bool nfCompleted)
-{
- NdbDictInterface * tmp = (NdbDictInterface*)dictImpl;
-
- if(!alive && !nfCompleted){
- return;
- }
-
- if (!alive && nfCompleted){
- tmp->m_waiter.nodeFail(aNode);
- }
-}
-
-int
-NdbDictInterface::dictSignal(NdbApiSignal* signal,
- LinearSectionPtr ptr[3],int noLSP,
- const int useMasterNodeId,
- const Uint32 RETRIES,
- const WaitSignalType wst,
- const int theWait,
- const int *errcodes,
- const int noerrcodes,
- const int temporaryMask)
-{
- DBUG_ENTER("NdbDictInterface::dictSignal");
- DBUG_PRINT("enter", ("useMasterNodeId: %d", useMasterNodeId));
- for(Uint32 i = 0; i<RETRIES; i++){
- //if (useMasterNodeId == 0)
- m_buffer.clear();
-
- // Protected area
- m_transporter->lock_mutex();
- Uint32 aNodeId;
- if (useMasterNodeId) {
- if ((m_masterNodeId == 0) ||
- (!m_transporter->get_node_alive(m_masterNodeId))) {
- m_masterNodeId = m_transporter->get_an_alive_node();
- }//if
- aNodeId = m_masterNodeId;
- } else {
- aNodeId = m_transporter->get_an_alive_node();
- }
- if(aNodeId == 0){
- m_error.code= 4009;
- m_transporter->unlock_mutex();
- DBUG_RETURN(-1);
- }
- {
- int r;
- if (ptr) {
-#ifdef EVENT_DEBUG
- printf("Long signal %d ptr", noLSP);
- for (int q=0;q<noLSP;q++) {
- printf(" sz %d", ptr[q].sz);
- }
- printf("\n");
-#endif
- r = m_transporter->sendFragmentedSignal(signal, aNodeId, ptr, noLSP);
- } else {
-#ifdef EVENT_DEBUG
- printf("Short signal\n");
-#endif
- r = m_transporter->sendSignal(signal, aNodeId);
- }
- if(r != 0){
- m_transporter->unlock_mutex();
- continue;
- }
- }
-
- m_error.code= 0;
-
- m_waiter.m_node = aNodeId;
- m_waiter.m_state = wst;
-
- m_waiter.wait(theWait);
- m_transporter->unlock_mutex();
- // End of Protected area
-
- if(m_waiter.m_state == NO_WAIT && m_error.code == 0){
- // Normal return
- DBUG_RETURN(0);
- }
-
- /**
- * Handle error codes
- */
- if(m_waiter.m_state == WAIT_NODE_FAILURE)
- continue;
-
- if(m_waiter.m_state == WST_WAIT_TIMEOUT)
- {
- m_error.code = 4008;
- DBUG_RETURN(-1);
- }
-
- if ( (temporaryMask & m_error.code) != 0 ) {
- continue;
- }
- if (errcodes) {
- int doContinue = 0;
- for (int j=0; j < noerrcodes; j++)
- if(m_error.code == errcodes[j]) {
- doContinue = 1;
- break;
- }
- if (doContinue)
- continue;
- }
-
- DBUG_RETURN(-1);
- }
- DBUG_RETURN(-1);
-}
-#if 0
-/*
- Get dictionary information for a table using table id as reference
-
- DESCRIPTION
- Sends a GET_TABINFOREQ signal containing the table id
- */
-NdbTableImpl *
-NdbDictInterface::getTable(int tableId, bool fullyQualifiedNames)
-{
- NdbApiSignal tSignal(m_reference);
- GetTabInfoReq* const req = CAST_PTR(GetTabInfoReq, tSignal.getDataPtrSend());
-
- req->senderRef = m_reference;
- req->senderData = 0;
- req->requestType =
- GetTabInfoReq::RequestById | GetTabInfoReq::LongSignalConf;
- req->tableId = tableId;
- tSignal.theReceiversBlockNumber = DBDICT;
- tSignal.theVerId_signalNumber = GSN_GET_TABINFOREQ;
- tSignal.theLength = GetTabInfoReq::SignalLength;
-
- return getTable(&tSignal, 0, 0, fullyQualifiedNames);
-}
-#endif
-
-
-/*
- Get dictionary information for a table using table name as the reference
-
- DESCRIPTION
- Send GET_TABINFOREQ signal with the table name in the first
- long section part
-*/
-
-NdbTableImpl *
-NdbDictInterface::getTable(const BaseString& name, bool fullyQualifiedNames)
-{
- NdbApiSignal tSignal(m_reference);
- GetTabInfoReq* const req = CAST_PTR(GetTabInfoReq, tSignal.getDataPtrSend());
-
- const Uint32 namelen= name.length() + 1; // NULL terminated
- const Uint32 namelen_words= (namelen + 3) >> 2; // Size in words
-
- req->senderRef= m_reference;
- req->senderData= 0;
- req->requestType=
- GetTabInfoReq::RequestByName | GetTabInfoReq::LongSignalConf;
- req->tableNameLen= namelen;
- tSignal.theReceiversBlockNumber= DBDICT;
- tSignal.theVerId_signalNumber= GSN_GET_TABINFOREQ;
- tSignal.theLength= GetTabInfoReq::SignalLength;
-
- // Copy name to m_buffer to get a word sized buffer
- m_buffer.clear();
- m_buffer.grow(namelen_words*4);
- m_buffer.append(name.c_str(), namelen);
-
- LinearSectionPtr ptr[1];
- ptr[0].p= (Uint32*)m_buffer.get_data();
- ptr[0].sz= namelen_words;
-
- return getTable(&tSignal, ptr, 1, fullyQualifiedNames);
-}
-
-
-NdbTableImpl *
-NdbDictInterface::getTable(class NdbApiSignal * signal,
- LinearSectionPtr ptr[3],
- Uint32 noOfSections, bool fullyQualifiedNames)
-{
- int errCodes[] = {GetTabInfoRef::Busy };
-
- int r = dictSignal(signal,ptr,noOfSections,
- 0/*do not use masternode id*/,
- 100,
- WAIT_GET_TAB_INFO_REQ,
- WAITFOR_RESPONSE_TIMEOUT,
- errCodes, 1);
- if (r) return 0;
-
- NdbTableImpl * rt = 0;
- m_error.code= parseTableInfo(&rt,
- (Uint32*)m_buffer.get_data(),
- m_buffer.length() / 4, fullyQualifiedNames);
- rt->buildColumnHash();
- return rt;
-}
-
-void
-NdbDictInterface::execGET_TABINFO_CONF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- const GetTabInfoConf* conf = CAST_CONSTPTR(GetTabInfoConf, signal->getDataPtr());
- if(signal->isFirstFragment()){
- m_fragmentId = signal->getFragmentId();
- m_buffer.grow(4 * conf->totalLen);
- } else {
- if(m_fragmentId != signal->getFragmentId()){
- abort();
- }
- }
-
- const Uint32 i = GetTabInfoConf::DICT_TAB_INFO;
- m_buffer.append(ptr[i].p, 4 * ptr[i].sz);
-
- if(!signal->isLastFragment()){
- return;
- }
-
- m_waiter.signal(NO_WAIT);
-}
-
-void
-NdbDictInterface::execGET_TABINFO_REF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- const GetTabInfoRef* ref = CAST_CONSTPTR(GetTabInfoRef, signal->getDataPtr());
-
- m_error.code= ref->errorCode;
- m_waiter.signal(NO_WAIT);
-}
-
-/*****************************************************************
- * Pack/Unpack tables
- */
-struct ApiKernelMapping {
- Int32 kernelConstant;
- Int32 apiConstant;
-};
-
-Uint32
-getApiConstant(Int32 kernelConstant, const ApiKernelMapping map[], Uint32 def)
-{
- int i = 0;
- while(map[i].kernelConstant != kernelConstant){
- if(map[i].kernelConstant == -1 &&
- map[i].apiConstant == -1){
- return def;
- }
- i++;
- }
- return map[i].apiConstant;
-}
-
-Uint32
-getKernelConstant(Int32 apiConstant, const ApiKernelMapping map[], Uint32 def)
-{
- int i = 0;
- while(map[i].apiConstant != apiConstant){
- if(map[i].kernelConstant == -1 &&
- map[i].apiConstant == -1){
- return def;
- }
- i++;
- }
- return map[i].kernelConstant;
-}
-
-static const
-ApiKernelMapping
-fragmentTypeMapping[] = {
- { DictTabInfo::AllNodesSmallTable, NdbDictionary::Object::FragAllSmall },
- { DictTabInfo::AllNodesMediumTable, NdbDictionary::Object::FragAllMedium },
- { DictTabInfo::AllNodesLargeTable, NdbDictionary::Object::FragAllLarge },
- { DictTabInfo::SingleFragment, NdbDictionary::Object::FragSingle },
- { -1, -1 }
-};
-
-static const
-ApiKernelMapping
-objectTypeMapping[] = {
- { DictTabInfo::SystemTable, NdbDictionary::Object::SystemTable },
- { DictTabInfo::UserTable, NdbDictionary::Object::UserTable },
- { DictTabInfo::UniqueHashIndex, NdbDictionary::Object::UniqueHashIndex },
- { DictTabInfo::OrderedIndex, NdbDictionary::Object::OrderedIndex },
- { DictTabInfo::HashIndexTrigger, NdbDictionary::Object::HashIndexTrigger },
- { DictTabInfo::IndexTrigger, NdbDictionary::Object::IndexTrigger },
- { DictTabInfo::SubscriptionTrigger,NdbDictionary::Object::SubscriptionTrigger },
- { DictTabInfo::ReadOnlyConstraint ,NdbDictionary::Object::ReadOnlyConstraint },
- { -1, -1 }
-};
-
-static const
-ApiKernelMapping
-objectStateMapping[] = {
- { DictTabInfo::StateOffline, NdbDictionary::Object::StateOffline },
- { DictTabInfo::StateBuilding, NdbDictionary::Object::StateBuilding },
- { DictTabInfo::StateDropping, NdbDictionary::Object::StateDropping },
- { DictTabInfo::StateOnline, NdbDictionary::Object::StateOnline },
- { DictTabInfo::StateBroken, NdbDictionary::Object::StateBroken },
- { -1, -1 }
-};
-
-static const
-ApiKernelMapping
-objectStoreMapping[] = {
- { DictTabInfo::StoreTemporary, NdbDictionary::Object::StoreTemporary },
- { DictTabInfo::StorePermanent, NdbDictionary::Object::StorePermanent },
- { -1, -1 }
-};
-
-static const
-ApiKernelMapping
-indexTypeMapping[] = {
- { DictTabInfo::UniqueHashIndex, NdbDictionary::Index::UniqueHashIndex },
- { DictTabInfo::OrderedIndex, NdbDictionary::Index::OrderedIndex },
- { -1, -1 }
-};
-
-int
-NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
- const Uint32 * data, Uint32 len,
- bool fullyQualifiedNames)
-{
- DBUG_ENTER("NdbDictInterface::parseTableInfo");
-
- SimplePropertiesLinearReader it(data, len);
- DictTabInfo::Table tableDesc; tableDesc.init();
- SimpleProperties::UnpackStatus s;
- s = SimpleProperties::unpack(it, &tableDesc,
- DictTabInfo::TableMapping,
- DictTabInfo::TableMappingSize,
- true, true);
-
- if(s != SimpleProperties::Break){
- DBUG_RETURN(703);
- }
- const char * internalName = tableDesc.TableName;
- const char * externalName = Ndb::externalizeTableName(internalName, fullyQualifiedNames);
-
- NdbTableImpl * impl = new NdbTableImpl();
- impl->m_tableId = tableDesc.TableId;
- impl->m_version = tableDesc.TableVersion;
- impl->m_status = NdbDictionary::Object::Retrieved;
- impl->m_internalName.assign(internalName);
- impl->m_externalName.assign(externalName);
-
- impl->m_frm.assign(tableDesc.FrmData, tableDesc.FrmLen);
-
- impl->m_fragmentType = (NdbDictionary::Object::FragmentType)
- getApiConstant(tableDesc.FragmentType,
- fragmentTypeMapping,
- (Uint32)NdbDictionary::Object::FragUndefined);
-
- impl->m_logging = tableDesc.TableLoggedFlag;
- impl->m_kvalue = tableDesc.TableKValue;
- impl->m_minLoadFactor = tableDesc.MinLoadFactor;
- impl->m_maxLoadFactor = tableDesc.MaxLoadFactor;
-
- impl->m_indexType = (NdbDictionary::Index::Type)
- getApiConstant(tableDesc.TableType,
- indexTypeMapping,
- NdbDictionary::Index::Undefined);
-
- if(impl->m_indexType == NdbDictionary::Index::Undefined){
- } else {
- const char * externalPrimary =
- Ndb::externalizeTableName(tableDesc.PrimaryTable, fullyQualifiedNames);
- impl->m_primaryTable.assign(externalPrimary);
- }
-
- Uint32 keyInfoPos = 0;
- Uint32 keyCount = 0;
- Uint32 blobCount = 0;
- Uint32 distKeys = 0;
-
- Uint32 i;
- for(i = 0; i < tableDesc.NoOfAttributes; i++) {
- DictTabInfo::Attribute attrDesc; attrDesc.init();
- s = SimpleProperties::unpack(it,
- &attrDesc,
- DictTabInfo::AttributeMapping,
- DictTabInfo::AttributeMappingSize,
- true, true);
- if(s != SimpleProperties::Break){
- delete impl;
- DBUG_RETURN(703);
- }
-
- NdbColumnImpl * col = new NdbColumnImpl();
- col->m_attrId = attrDesc.AttributeId;
- col->setName(attrDesc.AttributeName);
-
- // check type and compute attribute size and array size
- if (! attrDesc.translateExtType()) {
- delete impl;
- DBUG_RETURN(703);
- }
- col->m_type = (NdbDictionary::Column::Type)attrDesc.AttributeExtType;
- col->m_precision = (attrDesc.AttributeExtPrecision & 0xFFFF);
- col->m_scale = attrDesc.AttributeExtScale;
- col->m_length = attrDesc.AttributeExtLength;
- // charset in upper half of precision
- unsigned cs_number = (attrDesc.AttributeExtPrecision >> 16);
- // charset is defined exactly for char types
- if (col->getCharType() != (cs_number != 0)) {
- delete impl;
- DBUG_RETURN(703);
- }
- if (col->getCharType()) {
- col->m_cs = get_charset(cs_number, MYF(0));
- if (col->m_cs == NULL) {
- delete impl;
- DBUG_RETURN(743);
- }
- }
- col->m_attrSize = (1 << attrDesc.AttributeSize) / 8;
- col->m_arraySize = attrDesc.AttributeArraySize;
- if(attrDesc.AttributeSize == 0)
- {
- col->m_attrSize = 4;
- col->m_arraySize = (attrDesc.AttributeArraySize + 31) >> 5;
- }
-
- col->m_pk = attrDesc.AttributeKeyFlag;
- col->m_distributionKey = attrDesc.AttributeDKey;
- col->m_nullable = attrDesc.AttributeNullableFlag;
- col->m_autoIncrement = (attrDesc.AttributeAutoIncrement ? true : false);
- col->m_autoIncrementInitialValue = ~0;
- col->m_defaultValue.assign(attrDesc.AttributeDefaultValue);
-
- if(attrDesc.AttributeKeyFlag){
- col->m_keyInfoPos = keyInfoPos + 1;
- keyInfoPos += ((col->m_attrSize * col->m_arraySize + 3) / 4);
- keyCount++;
-
- if(attrDesc.AttributeDKey)
- distKeys++;
- } else {
- col->m_keyInfoPos = 0;
- }
- if (col->getBlobType())
- blobCount++;
- NdbColumnImpl * null = 0;
- impl->m_columns.fill(attrDesc.AttributeId, null);
- if(impl->m_columns[attrDesc.AttributeId] != 0){
- delete col;
- delete impl;
- DBUG_RETURN(703);
- }
- impl->m_columns[attrDesc.AttributeId] = col;
- it.next();
- }
-
- impl->m_noOfKeys = keyCount;
- impl->m_keyLenInWords = keyInfoPos;
- impl->m_noOfBlobs = blobCount;
- impl->m_noOfDistributionKeys = distKeys;
-
- if(tableDesc.FragmentDataLen > 0)
- {
- Uint32 replicaCount = tableDesc.FragmentData[0];
- Uint32 fragCount = tableDesc.FragmentData[1];
-
- impl->m_replicaCount = replicaCount;
- impl->m_fragmentCount = fragCount;
-
- for(i = 0; i<(fragCount*replicaCount); i++)
- {
- impl->m_fragments.push_back(tableDesc.FragmentData[i+2]);
- }
-
- Uint32 topBit = (1 << 31);
- for(; topBit && !(fragCount & topBit); ){
- topBit >>= 1;
- }
- impl->m_hashValueMask = topBit - 1;
- impl->m_hashpointerValue = fragCount - (impl->m_hashValueMask + 1);
- }
- else
- {
- impl->m_fragmentCount = tableDesc.FragmentCount;
- impl->m_replicaCount = 0;
- impl->m_hashValueMask = 0;
- impl->m_hashpointerValue = 0;
- }
-
- if(distKeys == 0)
- {
- for(i = 0; i < tableDesc.NoOfAttributes; i++)
- {
- if(impl->m_columns[i]->getPrimaryKey())
- impl->m_columns[i]->m_distributionKey = true;
- }
- }
-
- * ret = impl;
-
- DBUG_RETURN(0);
-}
-
-/*****************************************************************
- * Create table and alter table
- */
-int
-NdbDictionaryImpl::createTable(NdbTableImpl &t)
-{
- if (m_receiver.createTable(m_ndb, t) != 0)
- return -1;
- if (t.m_noOfBlobs == 0)
- return 0;
- // update table def from DICT
- Ndb_local_table_info *info=
- get_local_table_info(t.m_internalName,false);
- if (info == NULL) {
- m_error.code= 709;
- return -1;
- }
- if (createBlobTables(*(info->m_table_impl)) != 0) {
- int save_code = m_error.code;
- (void)dropTable(t);
- m_error.code= save_code;
- return -1;
- }
- return 0;
-}
-
-int
-NdbDictionaryImpl::createBlobTables(NdbTableImpl &t)
-{
- for (unsigned i = 0; i < t.m_columns.size(); i++) {
- NdbColumnImpl & c = *t.m_columns[i];
- if (! c.getBlobType() || c.getPartSize() == 0)
- continue;
- NdbTableImpl bt;
- NdbBlob::getBlobTable(bt, &t, &c);
- if (createTable(bt) != 0)
- return -1;
- // Save BLOB table handle
- Ndb_local_table_info *info=
- get_local_table_info(bt.m_internalName, false);
- if (info == 0) {
- return -1;
- }
- c.m_blobTable = info->m_table_impl;
- }
-
- return 0;
-}
-
-int
-NdbDictionaryImpl::addBlobTables(NdbTableImpl &t)
-{
- unsigned n= t.m_noOfBlobs;
- // optimized for blob column being the last one
- // and not looking for more than one if not neccessary
- for (unsigned i = t.m_columns.size(); i > 0 && n > 0;) {
- i--;
- NdbColumnImpl & c = *t.m_columns[i];
- if (! c.getBlobType() || c.getPartSize() == 0)
- continue;
- n--;
- char btname[NdbBlobImpl::BlobTableNameSize];
- NdbBlob::getBlobTableName(btname, &t, &c);
- // Save BLOB table handle
- NdbTableImpl * cachedBlobTable = getTable(btname);
- if (cachedBlobTable == 0) {
- return -1;
- }
- c.m_blobTable = cachedBlobTable;
- }
-
- return 0;
-}
-
-int
-NdbDictInterface::createTable(Ndb & ndb,
- NdbTableImpl & impl)
-{
- return createOrAlterTable(ndb, impl, false);
-}
-
-int NdbDictionaryImpl::alterTable(NdbTableImpl &impl)
-{
- BaseString internalName(impl.m_internalName);
- const char * originalInternalName = internalName.c_str();
-
- DBUG_ENTER("NdbDictionaryImpl::alterTable");
- if(!get_local_table_info(internalName, false)){
- m_error.code= 709;
- DBUG_RETURN(-1);
- }
- // Alter the table
- int ret = m_receiver.alterTable(m_ndb, impl);
- if(ret == 0){
- // Remove cached information and let it be refreshed at next access
- if (m_localHash.get(originalInternalName) != NULL) {
- m_localHash.drop(originalInternalName);
- m_globalHash->lock();
- NdbTableImpl * cachedImpl = m_globalHash->get(originalInternalName);
- // If in local cache it must be in global
- if (!cachedImpl)
- abort();
- cachedImpl->m_status = NdbDictionary::Object::Invalid;
- m_globalHash->drop(cachedImpl);
- m_globalHash->unlock();
- }
- }
- DBUG_RETURN(ret);
-}
-
-int
-NdbDictInterface::alterTable(Ndb & ndb,
- NdbTableImpl & impl)
-{
- return createOrAlterTable(ndb, impl, true);
-}
-
-int
-NdbDictInterface::createOrAlterTable(Ndb & ndb,
- NdbTableImpl & impl,
- bool alter)
-{
- DBUG_ENTER("NdbDictInterface::createOrAlterTable");
- unsigned i;
- if((unsigned)impl.getNoOfPrimaryKeys() > NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY){
- m_error.code= 4317;
- DBUG_RETURN(-1);
- }
- unsigned sz = impl.m_columns.size();
- if (sz > NDB_MAX_ATTRIBUTES_IN_TABLE){
- m_error.code= 4318;
- DBUG_RETURN(-1);
- }
-
- if (!impl.m_newExternalName.empty()) {
- impl.m_externalName.assign(impl.m_newExternalName);
- AlterTableReq::setNameFlag(impl.m_changeMask, true);
- }
-
- //validate();
- //aggregate();
-
- const BaseString internalName(
- ndb.internalize_table_name(impl.m_externalName.c_str()));
- impl.m_internalName.assign(internalName);
- UtilBufferWriter w(m_buffer);
- DictTabInfo::Table tmpTab; tmpTab.init();
- BaseString::snprintf(tmpTab.TableName,
- sizeof(tmpTab.TableName),
- internalName.c_str());
-
- bool haveAutoIncrement = false;
- Uint64 autoIncrementValue = 0;
- Uint32 distKeys= 0;
- for(i = 0; i<sz; i++){
- const NdbColumnImpl * col = impl.m_columns[i];
- if(col == 0)
- continue;
- if (col->m_autoIncrement) {
- if (haveAutoIncrement) {
- m_error.code= 4335;
- DBUG_RETURN(-1);
- }
- haveAutoIncrement = true;
- autoIncrementValue = col->m_autoIncrementInitialValue;
- }
- if (col->m_distributionKey)
- distKeys++;
- }
-
- // Check max length of frm data
- if (impl.m_frm.length() > MAX_FRM_DATA_SIZE){
- m_error.code= 1229;
- DBUG_RETURN(-1);
- }
- tmpTab.FrmLen = impl.m_frm.length();
- memcpy(tmpTab.FrmData, impl.m_frm.get_data(), impl.m_frm.length());
-
- tmpTab.TableLoggedFlag = impl.m_logging;
- tmpTab.TableKValue = impl.m_kvalue;
- tmpTab.MinLoadFactor = impl.m_minLoadFactor;
- tmpTab.MaxLoadFactor = impl.m_maxLoadFactor;
- tmpTab.TableType = DictTabInfo::UserTable;
- tmpTab.NoOfAttributes = sz;
-
- tmpTab.FragmentType = getKernelConstant(impl.m_fragmentType,
- fragmentTypeMapping,
- DictTabInfo::AllNodesSmallTable);
- tmpTab.TableVersion = rand();
-
- SimpleProperties::UnpackStatus s;
- s = SimpleProperties::pack(w,
- &tmpTab,
- DictTabInfo::TableMapping,
- DictTabInfo::TableMappingSize, true);
-
- if(s != SimpleProperties::Eof){
- abort();
- }
-
- if (distKeys == impl.m_noOfKeys)
- distKeys= 0;
- impl.m_noOfDistributionKeys= distKeys;
-
- for(i = 0; i<sz; i++){
- const NdbColumnImpl * col = impl.m_columns[i];
- if(col == 0)
- continue;
-
- DictTabInfo::Attribute tmpAttr; tmpAttr.init();
- BaseString::snprintf(tmpAttr.AttributeName, sizeof(tmpAttr.AttributeName),
- col->m_name.c_str());
- tmpAttr.AttributeId = i;
- tmpAttr.AttributeKeyFlag = col->m_pk;
- tmpAttr.AttributeNullableFlag = col->m_nullable;
- tmpAttr.AttributeDKey = distKeys ? col->m_distributionKey : 0;
-
- tmpAttr.AttributeExtType = (Uint32)col->m_type;
- tmpAttr.AttributeExtPrecision = ((unsigned)col->m_precision & 0xFFFF);
- tmpAttr.AttributeExtScale = col->m_scale;
- tmpAttr.AttributeExtLength = col->m_length;
-
- // check type and compute attribute size and array size
- if (! tmpAttr.translateExtType()) {
- m_error.code= 703;
- DBUG_RETURN(-1);
- }
- // charset is defined exactly for char types
- if (col->getCharType() != (col->m_cs != NULL)) {
- m_error.code= 703;
- DBUG_RETURN(-1);
- }
- // primary key type check
- if (col->m_pk && ! NdbSqlUtil::usable_in_pk(col->m_type, col->m_cs)) {
- m_error.code= (col->m_cs != 0 ? 743 : 739);
- DBUG_RETURN(-1);
- }
- // distribution key not supported for Char attribute
- if (distKeys && col->m_distributionKey && col->m_cs != NULL) {
- m_error.code= 745;
- DBUG_RETURN(-1);
- }
- // charset in upper half of precision
- if (col->getCharType()) {
- tmpAttr.AttributeExtPrecision |= (col->m_cs->number << 16);
- }
-
- tmpAttr.AttributeAutoIncrement = col->m_autoIncrement;
- BaseString::snprintf(tmpAttr.AttributeDefaultValue,
- sizeof(tmpAttr.AttributeDefaultValue),
- col->m_defaultValue.c_str());
- s = SimpleProperties::pack(w,
- &tmpAttr,
- DictTabInfo::AttributeMapping,
- DictTabInfo::AttributeMappingSize, true);
- w.add(DictTabInfo::AttributeEnd, 1);
- }
-
- NdbApiSignal tSignal(m_reference);
- tSignal.theReceiversBlockNumber = DBDICT;
-
- LinearSectionPtr ptr[1];
- ptr[0].p = (Uint32*)m_buffer.get_data();
- ptr[0].sz = m_buffer.length() / 4;
- int ret;
- if (alter)
- {
- AlterTableReq * const req =
- CAST_PTR(AlterTableReq, tSignal.getDataPtrSend());
-
- req->senderRef = m_reference;
- req->senderData = 0;
- req->changeMask = impl.m_changeMask;
- req->tableId = impl.m_tableId;
- req->tableVersion = impl.m_version;;
- tSignal.theVerId_signalNumber = GSN_ALTER_TABLE_REQ;
- tSignal.theLength = AlterTableReq::SignalLength;
- ret= alterTable(&tSignal, ptr);
- }
- else
- {
- CreateTableReq * const req =
- CAST_PTR(CreateTableReq, tSignal.getDataPtrSend());
-
- req->senderRef = m_reference;
- req->senderData = 0;
- tSignal.theVerId_signalNumber = GSN_CREATE_TABLE_REQ;
- tSignal.theLength = CreateTableReq::SignalLength;
- ret= createTable(&tSignal, ptr);
-
- if (ret)
- DBUG_RETURN(ret);
-
- if (haveAutoIncrement) {
- if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(),
- autoIncrementValue)) {
- if (ndb.theError.code == 0) {
- m_error.code= 4336;
- ndb.theError = m_error;
- } else
- m_error= ndb.theError;
- ret = -1; // errorcode set in initialize_autoincrement
- }
- }
- }
- DBUG_RETURN(ret);
-}
-
-int
-NdbDictInterface::createTable(NdbApiSignal* signal, LinearSectionPtr ptr[3])
-{
-#if DEBUG_PRINT
- ndbout_c("BufferLen = %d", ptr[0].sz);
- SimplePropertiesLinearReader r(ptr[0].p, ptr[0].sz);
- r.printAll(ndbout);
-#endif
- const int noErrCodes = 2;
- int errCodes[noErrCodes] =
- {CreateTableRef::Busy,
- CreateTableRef::NotMaster};
- return dictSignal(signal,ptr,1,
- 1/*use masternode id*/,
- 100,
- WAIT_CREATE_INDX_REQ,
- WAITFOR_RESPONSE_TIMEOUT,
- errCodes,noErrCodes);
-}
-
-
-void
-NdbDictInterface::execCREATE_TABLE_CONF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
-#if 0
- const CreateTableConf* const conf=
- CAST_CONSTPTR(CreateTableConf, signal->getDataPtr());
- Uint32 tableId= conf->tableId;
- Uint32 tableVersion= conf->tableVersion;
-#endif
- m_waiter.signal(NO_WAIT);
-}
-
-void
-NdbDictInterface::execCREATE_TABLE_REF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- const CreateTableRef* const ref=
- CAST_CONSTPTR(CreateTableRef, signal->getDataPtr());
- m_error.code= ref->errorCode;
- m_masterNodeId = ref->masterNodeId;
- m_waiter.signal(NO_WAIT);
-}
-
-int
-NdbDictInterface::alterTable(NdbApiSignal* signal, LinearSectionPtr ptr[3])
-{
-#if DEBUG_PRINT
- ndbout_c("BufferLen = %d", ptr[0].sz);
- SimplePropertiesLinearReader r(ptr[0].p, ptr[0].sz);
- r.printAll(ndbout);
-#endif
- const int noErrCodes = 2;
- int errCodes[noErrCodes] =
- {AlterTableRef::NotMaster,
- AlterTableRef::Busy};
- int r = dictSignal(signal,ptr,1,
- 1/*use masternode id*/,
- 100,WAIT_ALTER_TAB_REQ,
- WAITFOR_RESPONSE_TIMEOUT,
- errCodes, noErrCodes);
- if(m_error.code == AlterTableRef::InvalidTableVersion) {
- // Clear caches and try again
- return INCOMPATIBLE_VERSION;
- }
-
- return r;
-}
-
-void
-NdbDictInterface::execALTER_TABLE_CONF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- //AlterTableConf* const conf = CAST_CONSTPTR(AlterTableConf, signal->getDataPtr());
- m_waiter.signal(NO_WAIT);
-}
-
-void
-NdbDictInterface::execALTER_TABLE_REF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- const AlterTableRef * const ref =
- CAST_CONSTPTR(AlterTableRef, signal->getDataPtr());
- m_error.code= ref->errorCode;
- m_masterNodeId = ref->masterNodeId;
- m_waiter.signal(NO_WAIT);
-}
-
-/*****************************************************************
- * Drop table
- */
-int
-NdbDictionaryImpl::dropTable(const char * name)
-{
- DBUG_ENTER("NdbDictionaryImpl::dropTable");
- DBUG_PRINT("enter",("name: %s", name));
- NdbTableImpl * tab = getTable(name);
- if(tab == 0){
- DBUG_RETURN(-1);
- }
- int ret = dropTable(* tab);
- // If table stored in cache is incompatible with the one in the kernel
- // we must clear the cache and try again
- if (ret == INCOMPATIBLE_VERSION) {
- const BaseString internalTableName(m_ndb.internalize_table_name(name));
-
- DBUG_PRINT("info",("INCOMPATIBLE_VERSION internal_name: %s", internalTableName.c_str()));
- m_localHash.drop(internalTableName.c_str());
- m_globalHash->lock();
- tab->m_status = NdbDictionary::Object::Invalid;
- m_globalHash->drop(tab);
- m_globalHash->unlock();
- DBUG_RETURN(dropTable(name));
- }
-
- DBUG_RETURN(ret);
-}
-
-int
-NdbDictionaryImpl::dropTable(NdbTableImpl & impl)
-{
- int res;
- const char * name = impl.getName();
- if(impl.m_status == NdbDictionary::Object::New){
- return dropTable(name);
- }
-
- if (impl.m_indexType != NdbDictionary::Index::Undefined) {
- m_receiver.m_error.code= 1228;
- return -1;
- }
-
- List list;
- if ((res = listIndexes(list, impl.m_tableId)) == -1){
- return -1;
- }
- for (unsigned i = 0; i < list.count; i++) {
- const List::Element& element = list.elements[i];
- if ((res = dropIndex(element.name, name)) == -1)
- {
- return -1;
- }
- }
-
- if (impl.m_noOfBlobs != 0) {
- if (dropBlobTables(impl) != 0){
- return -1;
- }
- }
-
- int ret = m_receiver.dropTable(impl);
- if(ret == 0 || m_error.code == 709){
- const char * internalTableName = impl.m_internalName.c_str();
-
-
- m_localHash.drop(internalTableName);
- m_globalHash->lock();
- impl.m_status = NdbDictionary::Object::Invalid;
- m_globalHash->drop(&impl);
- m_globalHash->unlock();
-
- return 0;
- }
-
- return ret;
-}
-
-int
-NdbDictionaryImpl::dropBlobTables(NdbTableImpl & t)
-{
- DBUG_ENTER("NdbDictionaryImpl::dropBlobTables");
- for (unsigned i = 0; i < t.m_columns.size(); i++) {
- NdbColumnImpl & c = *t.m_columns[i];
- if (! c.getBlobType() || c.getPartSize() == 0)
- continue;
- char btname[NdbBlobImpl::BlobTableNameSize];
- NdbBlob::getBlobTableName(btname, &t, &c);
- if (dropTable(btname) != 0) {
- if (m_error.code != 709){
- DBUG_PRINT("exit",("error %u - exiting",m_error.code));
- DBUG_RETURN(-1);
- }
- DBUG_PRINT("info",("error %u - continuing",m_error.code));
- }
- }
- DBUG_RETURN(0);
-}
-
-int
-NdbDictInterface::dropTable(const NdbTableImpl & impl)
-{
- NdbApiSignal tSignal(m_reference);
- tSignal.theReceiversBlockNumber = DBDICT;
- tSignal.theVerId_signalNumber = GSN_DROP_TABLE_REQ;
- tSignal.theLength = DropTableReq::SignalLength;
-
- DropTableReq * const req = CAST_PTR(DropTableReq, tSignal.getDataPtrSend());
- req->senderRef = m_reference;
- req->senderData = 0;
- req->tableId = impl.m_tableId;
- req->tableVersion = impl.m_version;
-
- return dropTable(&tSignal, 0);
-}
-
-int
-NdbDictInterface::dropTable(NdbApiSignal* signal, LinearSectionPtr ptr[3])
-{
- const int noErrCodes = 3;
- int errCodes[noErrCodes] =
- {DropTableRef::NoDropTableRecordAvailable,
- DropTableRef::NotMaster,
- DropTableRef::Busy};
- int r = dictSignal(signal,NULL,0,
- 1/*use masternode id*/,
- 100,WAIT_DROP_TAB_REQ,
- WAITFOR_RESPONSE_TIMEOUT,
- errCodes, noErrCodes);
- if(m_error.code == DropTableRef::InvalidTableVersion) {
- // Clear caches and try again
- return INCOMPATIBLE_VERSION;
- }
- return r;
-}
-
-void
-NdbDictInterface::execDROP_TABLE_CONF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- DBUG_ENTER("NdbDictInterface::execDROP_TABLE_CONF");
- //DropTableConf* const conf = CAST_CONSTPTR(DropTableConf, signal->getDataPtr());
-
- m_waiter.signal(NO_WAIT);
- DBUG_VOID_RETURN;
-}
-
-void
-NdbDictInterface::execDROP_TABLE_REF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- DBUG_ENTER("NdbDictInterface::execDROP_TABLE_REF");
- const DropTableRef* const ref = CAST_CONSTPTR(DropTableRef, signal->getDataPtr());
- m_error.code= ref->errorCode;
- m_masterNodeId = ref->masterNodeId;
- m_waiter.signal(NO_WAIT);
- DBUG_VOID_RETURN;
-}
-
-int
-NdbDictionaryImpl::invalidateObject(NdbTableImpl & impl)
-{
- const char * internalTableName = impl.m_internalName.c_str();
- DBUG_ENTER("NdbDictionaryImpl::invalidateObject");
- DBUG_PRINT("enter", ("internal_name: %s", internalTableName));
- m_localHash.drop(internalTableName);
- m_globalHash->lock();
- impl.m_status = NdbDictionary::Object::Invalid;
- m_globalHash->drop(&impl);
- m_globalHash->unlock();
- DBUG_RETURN(0);
-}
-
-int
-NdbDictionaryImpl::removeCachedObject(NdbTableImpl & impl)
-{
- const char * internalTableName = impl.m_internalName.c_str();
-
- m_localHash.drop(internalTableName);
- m_globalHash->lock();
- m_globalHash->release(&impl);
- m_globalHash->unlock();
- return 0;
-}
-
-/*****************************************************************
- * Get index info
- */
-NdbIndexImpl*
-NdbDictionaryImpl::getIndexImpl(const char * externalName,
- const BaseString& internalName)
-{
- Ndb_local_table_info * info = get_local_table_info(internalName,
- false);
- if(info == 0){
- m_error.code = 4243;
- return 0;
- }
- NdbTableImpl * tab = info->m_table_impl;
-
- if(tab->m_indexType == NdbDictionary::Index::Undefined){
- // Not an index
- m_error.code = 4243;
- return 0;
- }
-
- NdbTableImpl* prim = getTable(tab->m_primaryTable.c_str());
- if(prim == 0){
- m_error.code = 4243;
- return 0;
- }
-
- /**
- * Create index impl
- */
- NdbIndexImpl* idx;
- if(NdbDictInterface::create_index_obj_from_table(&idx, tab, prim) == 0){
- idx->m_table = tab;
- idx->m_externalName.assign(externalName);
- idx->m_internalName.assign(internalName);
- // TODO Assign idx to tab->m_index
- // Don't do it right now since assign can't asign a table with index
- // tab->m_index = idx;
- return idx;
- }
- return 0;
-}
-
-int
-NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst,
- NdbTableImpl* tab,
- const NdbTableImpl* prim){
- NdbIndexImpl *idx = new NdbIndexImpl();
- idx->m_version = tab->m_version;
- idx->m_status = tab->m_status;
- idx->m_indexId = tab->m_tableId;
- idx->m_externalName.assign(tab->getName());
- idx->m_tableName.assign(prim->m_externalName);
- NdbDictionary::Index::Type type = idx->m_type = tab->m_indexType;
- idx->m_logging = tab->m_logging;
- // skip last attribute (NDB$PK or NDB$TNODE)
-
- const Uint32 distKeys = prim->m_noOfDistributionKeys;
- Uint32 keyCount = (distKeys ? distKeys : prim->m_noOfKeys);
-
- unsigned i;
- for(i = 0; i+1<tab->m_columns.size(); i++){
- NdbColumnImpl* org = tab->m_columns[i];
-
- NdbColumnImpl* col = new NdbColumnImpl;
- // Copy column definition
- *col = * org;
- idx->m_columns.push_back(col);
-
- /**
- * reverse map
- */
- const NdbColumnImpl* primCol = prim->getColumn(col->getName());
- int key_id = primCol->getColumnNo();
- int fill = -1;
- idx->m_key_ids.fill(key_id, fill);
- idx->m_key_ids[key_id] = i;
- col->m_keyInfoPos = key_id;
-
- if(type == NdbDictionary::Index::OrderedIndex &&
- (primCol->m_distributionKey ||
- (distKeys == 0 && primCol->getPrimaryKey())))
- {
- keyCount--;
- org->m_distributionKey = 1;
- }
- }
-
- if(keyCount == 0)
- {
- tab->m_noOfDistributionKeys = (distKeys ? distKeys : prim->m_noOfKeys);
- }
- else
- {
- for(i = 0; i+1<tab->m_columns.size(); i++)
- tab->m_columns[i]->m_distributionKey = 0;
- }
-
- * dst = idx;
- return 0;
-}
-
-/*****************************************************************
- * Create index
- */
-int
-NdbDictionaryImpl::createIndex(NdbIndexImpl &ix)
-{
- NdbTableImpl* tab = getTable(ix.getTable());
- if(tab == 0){
- m_error.code = 4249;
- return -1;
- }
-
- return m_receiver.createIndex(m_ndb, ix, * tab);
-}
-
-int
-NdbDictInterface::createIndex(Ndb & ndb,
- NdbIndexImpl & impl,
- const NdbTableImpl & table)
-{
- //validate();
- //aggregate();
- unsigned i;
- UtilBufferWriter w(m_buffer);
- const size_t len = strlen(impl.m_externalName.c_str()) + 1;
- if(len > MAX_TAB_NAME_SIZE) {
- m_error.code = 4241;
- return -1;
- }
- const BaseString internalName(
- ndb.internalize_index_name(&table, impl.getName()));
- impl.m_internalName.assign(internalName);
-
- w.add(DictTabInfo::TableName, internalName.c_str());
- w.add(DictTabInfo::TableLoggedFlag, impl.m_logging);
-
- NdbApiSignal tSignal(m_reference);
- tSignal.theReceiversBlockNumber = DBDICT;
- tSignal.theVerId_signalNumber = GSN_CREATE_INDX_REQ;
- tSignal.theLength = CreateIndxReq::SignalLength;
-
- CreateIndxReq * const req = CAST_PTR(CreateIndxReq, tSignal.getDataPtrSend());
-
- req->setUserRef(m_reference);
- req->setConnectionPtr(0);
- req->setRequestType(CreateIndxReq::RT_USER);
-
- Uint32 it = getKernelConstant(impl.m_type,
- indexTypeMapping,
- DictTabInfo::UndefTableType);
-
- if(it == DictTabInfo::UndefTableType){
- m_error.code = 4250;
- return -1;
- }
- req->setIndexType((DictTabInfo::TableType) it);
-
- req->setTableId(table.m_tableId);
- req->setOnline(true);
- AttributeList attributeList;
- attributeList.sz = impl.m_columns.size();
- for(i = 0; i<attributeList.sz; i++){
- const NdbColumnImpl* col =
- table.getColumn(impl.m_columns[i]->m_name.c_str());
- if(col == 0){
- m_error.code = 4247;
- return -1;
- }
- // Copy column definition
- *impl.m_columns[i] = *col;
-
- // index key type check
- if (it == DictTabInfo::UniqueHashIndex &&
- ! NdbSqlUtil::usable_in_hash_index(col->m_type, col->m_cs) ||
- it == DictTabInfo::OrderedIndex &&
- ! NdbSqlUtil::usable_in_ordered_index(col->m_type, col->m_cs)) {
- m_error.code = 743;
- return -1;
- }
- attributeList.id[i] = col->m_attrId;
- }
- LinearSectionPtr ptr[2];
- ptr[0].p = (Uint32*)&attributeList;
- ptr[0].sz = 1 + attributeList.sz;
- ptr[1].p = (Uint32*)m_buffer.get_data();
- ptr[1].sz = m_buffer.length() >> 2; //BUG?
- return createIndex(&tSignal, ptr);
-}
-
-int
-NdbDictInterface::createIndex(NdbApiSignal* signal,
- LinearSectionPtr ptr[3])
-{
- const int noErrCodes = 2;
- int errCodes[noErrCodes] = {CreateIndxRef::Busy, CreateIndxRef::NotMaster};
- return dictSignal(signal,ptr,2,
- 1 /*use masternode id*/,
- 100,
- WAIT_CREATE_INDX_REQ,
- -1,
- errCodes,noErrCodes);
-}
-
-void
-NdbDictInterface::execCREATE_INDX_CONF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- //CreateTableConf* const conf = CAST_CONSTPTR(CreateTableConf, signal->getDataPtr());
-
- m_waiter.signal(NO_WAIT);
-}
-
-void
-NdbDictInterface::execCREATE_INDX_REF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- const CreateIndxRef* const ref = CAST_CONSTPTR(CreateIndxRef, signal->getDataPtr());
- m_error.code = ref->getErrorCode();
- if(m_error.code == ref->NotMaster)
- m_masterNodeId= ref->masterNodeId;
- m_waiter.signal(NO_WAIT);
-}
-
-/*****************************************************************
- * Drop index
- */
-int
-NdbDictionaryImpl::dropIndex(const char * indexName,
- const char * tableName)
-{
- NdbIndexImpl * idx = getIndex(indexName, tableName);
- if (idx == 0) {
- m_error.code = 4243;
- return -1;
- }
- int ret = dropIndex(*idx, tableName);
- // If index stored in cache is incompatible with the one in the kernel
- // we must clear the cache and try again
- if (ret == INCOMPATIBLE_VERSION) {
- const BaseString internalIndexName((tableName)
- ?
- m_ndb.internalize_index_name(getTable(tableName), indexName)
- :
- m_ndb.internalize_table_name(indexName)); // Index is also a table
-
- m_localHash.drop(internalIndexName.c_str());
- m_globalHash->lock();
- idx->m_table->m_status = NdbDictionary::Object::Invalid;
- m_globalHash->drop(idx->m_table);
- m_globalHash->unlock();
- return dropIndex(indexName, tableName);
- }
-
- return ret;
-}
-
-int
-NdbDictionaryImpl::dropIndex(NdbIndexImpl & impl, const char * tableName)
-{
- const char * indexName = impl.getName();
- if (tableName || m_ndb.usingFullyQualifiedNames()) {
- NdbTableImpl * timpl = impl.m_table;
-
- if (timpl == 0) {
- m_error.code = 709;
- return -1;
- }
-
- const BaseString internalIndexName((tableName)
- ?
- m_ndb.internalize_index_name(getTable(tableName), indexName)
- :
- m_ndb.internalize_table_name(indexName)); // Index is also a table
-
- if(impl.m_status == NdbDictionary::Object::New){
- return dropIndex(indexName, tableName);
- }
-
- int ret = m_receiver.dropIndex(impl, *timpl);
- if(ret == 0){
- m_localHash.drop(internalIndexName.c_str());
- m_globalHash->lock();
- impl.m_table->m_status = NdbDictionary::Object::Invalid;
- m_globalHash->drop(impl.m_table);
- m_globalHash->unlock();
- }
- return ret;
- }
-
- m_error.code = 4243;
- return -1;
-}
-
-int
-NdbDictInterface::dropIndex(const NdbIndexImpl & impl,
- const NdbTableImpl & timpl)
-{
- NdbApiSignal tSignal(m_reference);
- tSignal.theReceiversBlockNumber = DBDICT;
- tSignal.theVerId_signalNumber = GSN_DROP_INDX_REQ;
- tSignal.theLength = DropIndxReq::SignalLength;
-
- DropIndxReq * const req = CAST_PTR(DropIndxReq, tSignal.getDataPtrSend());
- req->setUserRef(m_reference);
- req->setConnectionPtr(0);
- req->setRequestType(DropIndxReq::RT_USER);
- req->setTableId(~0); // DICT overwrites
- req->setIndexId(timpl.m_tableId);
- req->setIndexVersion(timpl.m_version);
-
- return dropIndex(&tSignal, 0);
-}
-
-int
-NdbDictInterface::dropIndex(NdbApiSignal* signal, LinearSectionPtr ptr[3])
-{
- const int noErrCodes = 2;
- int errCodes[noErrCodes] = {DropIndxRef::Busy, DropIndxRef::NotMaster};
- int r = dictSignal(signal,NULL,0,
- 1/*Use masternode id*/,
- 100,
- WAIT_DROP_INDX_REQ,
- WAITFOR_RESPONSE_TIMEOUT,
- errCodes,noErrCodes);
- if(m_error.code == DropIndxRef::InvalidIndexVersion) {
- // Clear caches and try again
- return INCOMPATIBLE_VERSION;
- }
- return r;
-}
-
-void
-NdbDictInterface::execDROP_INDX_CONF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- m_waiter.signal(NO_WAIT);
-}
-
-void
-NdbDictInterface::execDROP_INDX_REF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- const DropIndxRef* const ref = CAST_CONSTPTR(DropIndxRef, signal->getDataPtr());
- m_error.code = ref->getErrorCode();
- if(m_error.code == ref->NotMaster)
- m_masterNodeId= ref->masterNodeId;
- m_waiter.signal(NO_WAIT);
-}
-
-/*****************************************************************
- * Create event
- */
-
-int
-NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
-{
- int i;
- NdbTableImpl* tab = getTable(evnt.getTableName());
-
- if(tab == 0){
-#ifdef EVENT_DEBUG
- ndbout_c("NdbDictionaryImpl::createEvent: table not found: %s",
- evnt.getTableName());
-#endif
- return -1;
- }
-
- evnt.m_tableId = tab->m_tableId;
- evnt.m_tableImpl = tab;
-#ifdef EVENT_DEBUG
- ndbout_c("Event on tableId=%d", evnt.m_tableId);
-#endif
-
- NdbTableImpl &table = *evnt.m_tableImpl;
-
-
- int attributeList_sz = evnt.m_attrIds.size();
-
- for (i = 0; i < attributeList_sz; i++) {
- NdbColumnImpl *col_impl = table.getColumn(evnt.m_attrIds[i]);
- if (col_impl) {
- evnt.m_facade->addColumn(*(col_impl->m_facade));
- } else {
- ndbout_c("Attr id %u in table %s not found", evnt.m_attrIds[i],
- evnt.getTableName());
- m_error.code= 4713;
- return -1;
- }
- }
-
- evnt.m_attrIds.clear();
-
- attributeList_sz = evnt.m_columns.size();
-#ifdef EVENT_DEBUG
- ndbout_c("creating event %s", evnt.m_externalName.c_str());
- ndbout_c("no of columns %d", evnt.m_columns.size());
-#endif
- int pk_count = 0;
- evnt.m_attrListBitmask.clear();
-
- for(i = 0; i<attributeList_sz; i++){
- const NdbColumnImpl* col =
- table.getColumn(evnt.m_columns[i]->m_name.c_str());
- if(col == 0){
- m_error.code= 4247;
- return -1;
- }
- // Copy column definition
- *evnt.m_columns[i] = *col;
-
- if(col->m_pk){
- pk_count++;
- }
-
- evnt.m_attrListBitmask.set(col->m_attrId);
- }
-
- // Sort index attributes according to primary table (using insertion sort)
- for(i = 1; i < attributeList_sz; i++) {
- NdbColumnImpl* temp = evnt.m_columns[i];
- unsigned int j = i;
- while((j > 0) && (evnt.m_columns[j - 1]->m_attrId > temp->m_attrId)) {
- evnt.m_columns[j] = evnt.m_columns[j - 1];
- j--;
- }
- evnt.m_columns[j] = temp;
- }
- // Check for illegal duplicate attributes
- for(i = 1; i<attributeList_sz; i++) {
- if (evnt.m_columns[i-1]->m_attrId == evnt.m_columns[i]->m_attrId) {
- m_error.code= 4258;
- return -1;
- }
- }
-
-#ifdef EVENT_DEBUG
- char buf[128] = {0};
- evnt.m_attrListBitmask.getText(buf);
- ndbout_c("createEvent: mask = %s", buf);
-#endif
-
- // NdbDictInterface m_receiver;
- return m_receiver.createEvent(m_ndb, evnt, 0 /* getFlag unset */);
-}
-
-int
-NdbDictInterface::createEvent(class Ndb & ndb,
- NdbEventImpl & evnt,
- int getFlag)
-{
- NdbApiSignal tSignal(m_reference);
- tSignal.theReceiversBlockNumber = DBDICT;
- tSignal.theVerId_signalNumber = GSN_CREATE_EVNT_REQ;
- if (getFlag)
- tSignal.theLength = CreateEvntReq::SignalLengthGet;
- else
- tSignal.theLength = CreateEvntReq::SignalLengthCreate;
-
- CreateEvntReq * const req = CAST_PTR(CreateEvntReq, tSignal.getDataPtrSend());
-
- req->setUserRef(m_reference);
- req->setUserData(0);
-
- if (getFlag) {
- // getting event from Dictionary
- req->setRequestType(CreateEvntReq::RT_USER_GET);
- } else {
- // creating event in Dictionary
- req->setRequestType(CreateEvntReq::RT_USER_CREATE);
- req->setTableId(evnt.m_tableId);
- req->setAttrListBitmask(evnt.m_attrListBitmask);
- req->setEventType(evnt.mi_type);
- }
-
- UtilBufferWriter w(m_buffer);
-
- const size_t len = strlen(evnt.m_externalName.c_str()) + 1;
- if(len > MAX_TAB_NAME_SIZE) {
- m_error.code= 4241;
- return -1;
- }
-
- w.add(SimpleProperties::StringValue, evnt.m_externalName.c_str());
-
- if (getFlag == 0)
- {
- const BaseString internal_tabname(
- ndb.internalize_table_name(evnt.m_tableName.c_str()));
- w.add(SimpleProperties::StringValue,
- internal_tabname.c_str());
- }
-
- LinearSectionPtr ptr[1];
- ptr[0].p = (Uint32*)m_buffer.get_data();
- ptr[0].sz = (m_buffer.length()+3) >> 2;
-
- int ret = createEvent(&tSignal, ptr, 1);
-
- if (ret) {
- return ret;
- }
-
- char *dataPtr = (char *)m_buffer.get_data();
- unsigned int lenCreateEvntConf = *((unsigned int *)dataPtr);
- dataPtr += sizeof(lenCreateEvntConf);
- CreateEvntConf const * evntConf = (CreateEvntConf *)dataPtr;
- dataPtr += lenCreateEvntConf;
-
- // NdbEventImpl *evntImpl = (NdbEventImpl *)evntConf->getUserData();
-
- if (getFlag) {
- evnt.m_tableId = evntConf->getTableId();
- evnt.m_attrListBitmask = evntConf->getAttrListBitmask();
- evnt.mi_type = evntConf->getEventType();
- evnt.setTable(dataPtr);
- } else {
- if (evnt.m_tableId != evntConf->getTableId() ||
- //evnt.m_attrListBitmask != evntConf->getAttrListBitmask() ||
- evnt.mi_type != evntConf->getEventType()) {
- ndbout_c("ERROR*************");
- return 1;
- }
- }
-
- evnt.m_eventId = evntConf->getEventId();
- evnt.m_eventKey = evntConf->getEventKey();
-
- return ret;
-}
-
-int
-NdbDictInterface::createEvent(NdbApiSignal* signal,
- LinearSectionPtr ptr[3], int noLSP)
-{
- const int noErrCodes = 1;
- int errCodes[noErrCodes] = {CreateEvntRef::Busy};
- return dictSignal(signal,ptr,noLSP,
- 1 /*use masternode id*/,
- 100,
- WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/,
- -1,
- errCodes,noErrCodes, CreateEvntRef::Temporary);
-}
-
-int
-NdbDictionaryImpl::executeSubscribeEvent(NdbEventImpl & ev)
-{
- // NdbDictInterface m_receiver;
- return m_receiver.executeSubscribeEvent(m_ndb, ev);
-}
-
-int
-NdbDictInterface::executeSubscribeEvent(class Ndb & ndb,
- NdbEventImpl & evnt)
-{
- DBUG_ENTER("NdbDictInterface::executeSubscribeEvent");
- NdbApiSignal tSignal(m_reference);
- // tSignal.theReceiversBlockNumber = SUMA;
- tSignal.theReceiversBlockNumber = DBDICT;
- tSignal.theVerId_signalNumber = GSN_SUB_START_REQ;
- tSignal.theLength = SubStartReq::SignalLength2;
-
- SubStartReq * sumaStart = CAST_PTR(SubStartReq, tSignal.getDataPtrSend());
-
- sumaStart->subscriptionId = evnt.m_eventId;
- sumaStart->subscriptionKey = evnt.m_eventKey;
- sumaStart->part = SubscriptionData::TableData;
- sumaStart->subscriberData = evnt.m_bufferId & 0xFF;
- sumaStart->subscriberRef = m_reference;
-
- DBUG_RETURN(executeSubscribeEvent(&tSignal, NULL));
-}
-
-int
-NdbDictInterface::executeSubscribeEvent(NdbApiSignal* signal,
- LinearSectionPtr ptr[3])
-{
- return dictSignal(signal,NULL,0,
- 1 /*use masternode id*/,
- 100,
- WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/,
- -1,
- NULL,0);
-}
-
-int
-NdbDictionaryImpl::stopSubscribeEvent(NdbEventImpl & ev)
-{
- // NdbDictInterface m_receiver;
- return m_receiver.stopSubscribeEvent(m_ndb, ev);
-}
-
-int
-NdbDictInterface::stopSubscribeEvent(class Ndb & ndb,
- NdbEventImpl & evnt)
-{
- DBUG_ENTER("NdbDictInterface::stopSubscribeEvent");
-
- NdbApiSignal tSignal(m_reference);
- // tSignal.theReceiversBlockNumber = SUMA;
- tSignal.theReceiversBlockNumber = DBDICT;
- tSignal.theVerId_signalNumber = GSN_SUB_STOP_REQ;
- tSignal.theLength = SubStopReq::SignalLength;
-
- SubStopReq * sumaStop = CAST_PTR(SubStopReq, tSignal.getDataPtrSend());
-
- sumaStop->subscriptionId = evnt.m_eventId;
- sumaStop->subscriptionKey = evnt.m_eventKey;
- sumaStop->subscriberData = evnt.m_bufferId & 0xFF;
- sumaStop->part = (Uint32) SubscriptionData::TableData;
- sumaStop->subscriberRef = m_reference;
-
- DBUG_RETURN(stopSubscribeEvent(&tSignal, NULL));
-}
-
-int
-NdbDictInterface::stopSubscribeEvent(NdbApiSignal* signal,
- LinearSectionPtr ptr[3])
-{
- return dictSignal(signal,NULL,0,
- 1 /*use masternode id*/,
- 100,
- WAIT_CREATE_INDX_REQ /*WAIT_SUB_STOP__REQ*/,
- -1,
- NULL,0);
-}
-
-NdbEventImpl *
-NdbDictionaryImpl::getEvent(const char * eventName)
-{
- NdbEventImpl *ev = new NdbEventImpl();
-
- if (ev == NULL) {
- return NULL;
- }
-
- ev->setName(eventName);
-
- int ret = m_receiver.createEvent(m_ndb, *ev, 1 /* getFlag set */);
-
- if (ret) {
- delete ev;
- return NULL;
- }
-
- // We only have the table name with internal name
- ev->setTable(m_ndb.externalizeTableName(ev->getTableName()));
- ev->m_tableImpl = getTable(ev->getTableName());
-
- // get the columns from the attrListBitmask
-
- NdbTableImpl &table = *ev->m_tableImpl;
- AttributeMask & mask = ev->m_attrListBitmask;
- int attributeList_sz = mask.count();
- int id = -1;
-
-#ifdef EVENT_DEBUG
- ndbout_c("NdbDictionaryImpl::getEvent attributeList_sz = %d",
- attributeList_sz);
- char buf[128] = {0};
- mask.getText(buf);
- ndbout_c("mask = %s", buf);
-#endif
-
- for(int i = 0; i < attributeList_sz; i++) {
- id++; while (!mask.get(id)) id++;
-
- const NdbColumnImpl* col = table.getColumn(id);
- if(col == 0) {
-#ifdef EVENT_DEBUG
- ndbout_c("NdbDictionaryImpl::getEvent could not find column id %d", id);
-#endif
- m_error.code= 4247;
- delete ev;
- return NULL;
- }
- NdbColumnImpl* new_col = new NdbColumnImpl;
- // Copy column definition
- *new_col = *col;
-
- ev->m_columns.push_back(new_col);
- }
-
- return ev;
-}
-
-void
-NdbDictInterface::execCREATE_EVNT_CONF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- DBUG_ENTER("NdbDictInterface::execCREATE_EVNT_CONF");
-
- m_buffer.clear();
- unsigned int len = signal->getLength() << 2;
- m_buffer.append((char *)&len, sizeof(len));
- m_buffer.append(signal->getDataPtr(), len);
-
- if (signal->m_noOfSections > 0) {
- m_buffer.append((char *)ptr[0].p, strlen((char *)ptr[0].p)+1);
- }
-
- const CreateEvntConf * const createEvntConf=
- CAST_CONSTPTR(CreateEvntConf, signal->getDataPtr());
-
- Uint32 subscriptionId = createEvntConf->getEventId();
- Uint32 subscriptionKey = createEvntConf->getEventKey();
-
- DBUG_PRINT("info",("subscriptionId=%d,subscriptionKey=%d",
- subscriptionId,subscriptionKey));
- m_waiter.signal(NO_WAIT);
- DBUG_VOID_RETURN;
-}
-
-void
-NdbDictInterface::execCREATE_EVNT_REF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- DBUG_ENTER("NdbDictInterface::execCREATE_EVNT_REF");
-
- const CreateEvntRef* const ref=
- CAST_CONSTPTR(CreateEvntRef, signal->getDataPtr());
- m_error.code= ref->getErrorCode();
- DBUG_PRINT("error",("error=%d,line=%d,node=%d",ref->getErrorCode(),
- ref->getErrorLine(),ref->getErrorNode()));
- m_waiter.signal(NO_WAIT);
- DBUG_VOID_RETURN;
-}
-
-void
-NdbDictInterface::execSUB_STOP_CONF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- DBUG_ENTER("NdbDictInterface::execSUB_STOP_CONF");
- const SubStopConf * const subStopConf=
- CAST_CONSTPTR(SubStopConf, signal->getDataPtr());
-
- Uint32 subscriptionId = subStopConf->subscriptionId;
- Uint32 subscriptionKey = subStopConf->subscriptionKey;
- Uint32 subscriberData = subStopConf->subscriberData;
-
- DBUG_PRINT("info",("subscriptionId=%d,subscriptionKey=%d,subscriberData=%d",
- subscriptionId,subscriptionKey,subscriberData));
- m_waiter.signal(NO_WAIT);
- DBUG_VOID_RETURN;
-}
-
-void
-NdbDictInterface::execSUB_STOP_REF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- DBUG_ENTER("NdbDictInterface::execSUB_STOP_REF");
- const SubStopRef * const subStopRef=
- CAST_CONSTPTR(SubStopRef, signal->getDataPtr());
-
- Uint32 subscriptionId = subStopRef->subscriptionId;
- Uint32 subscriptionKey = subStopRef->subscriptionKey;
- Uint32 subscriberData = subStopRef->subscriberData;
- m_error.code= subStopRef->errorCode;
-
- DBUG_PRINT("error",("subscriptionId=%d,subscriptionKey=%d,subscriberData=%d,error=%d",
- subscriptionId,subscriptionKey,subscriberData,m_error.code));
- m_waiter.signal(NO_WAIT);
- DBUG_VOID_RETURN;
-}
-
-void
-NdbDictInterface::execSUB_START_CONF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- DBUG_ENTER("NdbDictInterface::execSUB_START_CONF");
- const SubStartConf * const subStartConf=
- CAST_CONSTPTR(SubStartConf, signal->getDataPtr());
-
- Uint32 subscriptionId = subStartConf->subscriptionId;
- Uint32 subscriptionKey = subStartConf->subscriptionKey;
- SubscriptionData::Part part =
- (SubscriptionData::Part)subStartConf->part;
- Uint32 subscriberData = subStartConf->subscriberData;
-
- switch(part) {
- case SubscriptionData::MetaData: {
- DBUG_PRINT("error",("SubscriptionData::MetaData"));
- m_error.code= 1;
- break;
- }
- case SubscriptionData::TableData: {
- DBUG_PRINT("info",("SubscriptionData::TableData"));
- break;
- }
- default: {
- DBUG_PRINT("error",("wrong data"));
- m_error.code= 2;
- break;
- }
- }
- DBUG_PRINT("info",("subscriptionId=%d,subscriptionKey=%d,subscriberData=%d",
- subscriptionId,subscriptionKey,subscriberData));
- m_waiter.signal(NO_WAIT);
- DBUG_VOID_RETURN;
-}
-
-void
-NdbDictInterface::execSUB_START_REF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- DBUG_ENTER("NdbDictInterface::execSUB_START_REF");
- const SubStartRef * const subStartRef=
- CAST_CONSTPTR(SubStartRef, signal->getDataPtr());
- m_error.code= subStartRef->errorCode;
- m_waiter.signal(NO_WAIT);
- DBUG_VOID_RETURN;
-}
-void
-NdbDictInterface::execSUB_GCP_COMPLETE_REP(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- const SubGcpCompleteRep * const rep=
- CAST_CONSTPTR(SubGcpCompleteRep, signal->getDataPtr());
-
- const Uint32 gci = rep->gci;
- // const Uint32 senderRef = rep->senderRef;
- const Uint32 subscriberData = rep->subscriberData;
-
- const Uint32 bufferId = subscriberData;
-
- const Uint32 ref = signal->theSendersBlockRef;
-
- NdbApiSignal tSignal(m_reference);
- SubGcpCompleteAcc * acc=
- CAST_PTR(SubGcpCompleteAcc, tSignal.getDataPtrSend());
-
- acc->rep = *rep;
-
- tSignal.theReceiversBlockNumber = refToBlock(ref);
- tSignal.theVerId_signalNumber = GSN_SUB_GCP_COMPLETE_ACC;
- tSignal.theLength = SubGcpCompleteAcc::SignalLength;
-
- Uint32 aNodeId = refToNode(ref);
-
- // m_transporter->lock_mutex();
- int r;
- r = m_transporter->sendSignal(&tSignal, aNodeId);
- // m_transporter->unlock_mutex();
-
- NdbGlobalEventBufferHandle::latestGCI(bufferId, gci);
-}
-
-void
-NdbDictInterface::execSUB_TABLE_DATA(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
-#ifdef EVENT_DEBUG
- const char * FNAME = "NdbDictInterface::execSUB_TABLE_DATA";
-#endif
- //TODO
- const SubTableData * const sdata = CAST_CONSTPTR(SubTableData, signal->getDataPtr());
-
- // const Uint32 gci = sdata->gci;
- // const Uint32 operation = sdata->operation;
- // const Uint32 tableId = sdata->tableId;
- // const Uint32 noOfAttrs = sdata->noOfAttributes;
- // const Uint32 dataLen = sdata->dataSize;
- const Uint32 subscriberData = sdata->subscriberData;
- // const Uint32 logType = sdata->logType;
-
- for (int i=signal->m_noOfSections;i < 3; i++) {
- ptr[i].p = NULL;
- ptr[i].sz = 0;
- }
-#ifdef EVENT_DEBUG
- ndbout_c("%s: senderData %d, gci %d, operation %d, tableId %d, noOfAttrs %d, dataLen %d",
- FNAME, subscriberData, gci, operation, tableId, noOfAttrs, dataLen);
- ndbout_c("ptr[0] %u %u ptr[1] %u %u ptr[2] %u %u\n",
- ptr[0].p,ptr[0].sz,ptr[1].p,ptr[1].sz,ptr[2].p,ptr[2].sz);
-#endif
- const Uint32 bufferId = subscriberData;
-
- NdbGlobalEventBufferHandle::insertDataL(bufferId,
- sdata, ptr);
-}
-
-/*****************************************************************
- * Drop event
- */
-int
-NdbDictionaryImpl::dropEvent(const char * eventName)
-{
- NdbEventImpl *ev= new NdbEventImpl();
- ev->setName(eventName);
- int ret= m_receiver.dropEvent(*ev);
- delete ev;
-
- // printf("__________________RET %u\n", ret);
- return ret;
-}
-
-int
-NdbDictInterface::dropEvent(const NdbEventImpl &evnt)
-{
- NdbApiSignal tSignal(m_reference);
- tSignal.theReceiversBlockNumber = DBDICT;
- tSignal.theVerId_signalNumber = GSN_DROP_EVNT_REQ;
- tSignal.theLength = DropEvntReq::SignalLength;
-
- DropEvntReq * const req = CAST_PTR(DropEvntReq, tSignal.getDataPtrSend());
-
- req->setUserRef(m_reference);
- req->setUserData(0);
-
- UtilBufferWriter w(m_buffer);
-
- w.add(SimpleProperties::StringValue, evnt.m_externalName.c_str());
-
- LinearSectionPtr ptr[1];
- ptr[0].p = (Uint32*)m_buffer.get_data();
- ptr[0].sz = (m_buffer.length()+3) >> 2;
-
- return dropEvent(&tSignal, ptr, 1);
-}
-
-int
-NdbDictInterface::dropEvent(NdbApiSignal* signal,
- LinearSectionPtr ptr[3], int noLSP)
-{
- //TODO
- const int noErrCodes = 1;
- int errCodes[noErrCodes] = {DropEvntRef::Busy};
- return dictSignal(signal,ptr,noLSP,
- 1 /*use masternode id*/,
- 100,
- WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/,
- -1,
- errCodes,noErrCodes, DropEvntRef::Temporary);
-}
-void
-NdbDictInterface::execDROP_EVNT_CONF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- DBUG_ENTER("NdbDictInterface::execDROP_EVNT_CONF");
- m_waiter.signal(NO_WAIT);
- DBUG_VOID_RETURN;
-}
-
-void
-NdbDictInterface::execDROP_EVNT_REF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- DBUG_ENTER("NdbDictInterface::execDROP_EVNT_REF");
- const DropEvntRef* const ref=
- CAST_CONSTPTR(DropEvntRef, signal->getDataPtr());
- m_error.code= ref->getErrorCode();
-
- DBUG_PRINT("info",("ErrorCode=%u Errorline=%u ErrorNode=%u",
- ref->getErrorCode(), ref->getErrorLine(), ref->getErrorNode()));
-
- m_waiter.signal(NO_WAIT);
- DBUG_VOID_RETURN;
-}
-
-/*****************************************************************
- * List objects or indexes
- */
-int
-NdbDictionaryImpl::listObjects(List& list, NdbDictionary::Object::Type type)
-{
- ListTablesReq req;
- req.requestData = 0;
- req.setTableType(getKernelConstant(type, objectTypeMapping, 0));
- req.setListNames(true);
- return m_receiver.listObjects(list, req.requestData, m_ndb.usingFullyQualifiedNames());
-}
-
-int
-NdbDictionaryImpl::listIndexes(List& list, Uint32 indexId)
-{
- ListTablesReq req;
- req.requestData = 0;
- req.setTableId(indexId);
- req.setListNames(true);
- req.setListIndexes(true);
- return m_receiver.listObjects(list, req.requestData, m_ndb.usingFullyQualifiedNames());
-}
-
-int
-NdbDictInterface::listObjects(NdbDictionary::Dictionary::List& list,
- Uint32 requestData, bool fullyQualifiedNames)
-{
- NdbApiSignal tSignal(m_reference);
- ListTablesReq* const req = CAST_PTR(ListTablesReq, tSignal.getDataPtrSend());
- req->senderRef = m_reference;
- req->senderData = 0;
- req->requestData = requestData;
- tSignal.theReceiversBlockNumber = DBDICT;
- tSignal.theVerId_signalNumber = GSN_LIST_TABLES_REQ;
- tSignal.theLength = ListTablesReq::SignalLength;
- if (listObjects(&tSignal) != 0)
- return -1;
- // count
- const Uint32* data = (const Uint32*)m_buffer.get_data();
- const unsigned length = m_buffer.length() / 4;
- list.count = 0;
- bool ok = true;
- unsigned pos, count;
- pos = count = 0;
- while (pos < length) {
- // table id - name length - name
- pos++;
- if (pos >= length) {
- ok = false;
- break;
- }
- Uint32 n = (data[pos++] + 3) >> 2;
- pos += n;
- if (pos > length) {
- ok = false;
- break;
- }
- count++;
- }
- if (! ok) {
- // bad signal data
- m_error.code= 4213;
- return -1;
- }
- list.count = count;
- list.elements = new NdbDictionary::Dictionary::List::Element[count];
- pos = count = 0;
- while (pos < length) {
- NdbDictionary::Dictionary::List::Element& element = list.elements[count];
- Uint32 d = data[pos++];
- element.id = ListTablesConf::getTableId(d);
- element.type = (NdbDictionary::Object::Type)
- getApiConstant(ListTablesConf::getTableType(d), objectTypeMapping, 0);
- element.state = (NdbDictionary::Object::State)
- getApiConstant(ListTablesConf::getTableState(d), objectStateMapping, 0);
- element.store = (NdbDictionary::Object::Store)
- getApiConstant(ListTablesConf::getTableStore(d), objectStoreMapping, 0);
- // table or index name
- Uint32 n = (data[pos++] + 3) >> 2;
- BaseString databaseName;
- BaseString schemaName;
- BaseString objectName;
- if ((element.type == NdbDictionary::Object::UniqueHashIndex) ||
- (element.type == NdbDictionary::Object::OrderedIndex)) {
- char * indexName = new char[n << 2];
- memcpy(indexName, &data[pos], n << 2);
- databaseName = Ndb::getDatabaseFromInternalName(indexName);
- schemaName = Ndb::getSchemaFromInternalName(indexName);
- objectName = BaseString(Ndb::externalizeIndexName(indexName, fullyQualifiedNames));
- delete [] indexName;
- } else if ((element.type == NdbDictionary::Object::SystemTable) ||
- (element.type == NdbDictionary::Object::UserTable)) {
- char * tableName = new char[n << 2];
- memcpy(tableName, &data[pos], n << 2);
- databaseName = Ndb::getDatabaseFromInternalName(tableName);
- schemaName = Ndb::getSchemaFromInternalName(tableName);
- objectName = BaseString(Ndb::externalizeTableName(tableName, fullyQualifiedNames));
- delete [] tableName;
- }
- else {
- char * otherName = new char[n << 2];
- memcpy(otherName, &data[pos], n << 2);
- objectName = BaseString(otherName);
- delete [] otherName;
- }
- element.database = new char[databaseName.length() + 1];
- strcpy(element.database, databaseName.c_str());
- element.schema = new char[schemaName.length() + 1];
- strcpy(element.schema, schemaName.c_str());
- element.name = new char[objectName.length() + 1];
- strcpy(element.name, objectName.c_str());
- pos += n;
- count++;
- }
- return 0;
-}
-
-int
-NdbDictInterface::listObjects(NdbApiSignal* signal)
-{
- const Uint32 RETRIES = 100;
- for (Uint32 i = 0; i < RETRIES; i++) {
- m_buffer.clear();
- // begin protected
- m_transporter->lock_mutex();
- Uint16 aNodeId = m_transporter->get_an_alive_node();
- if (aNodeId == 0) {
- m_error.code= 4009;
- m_transporter->unlock_mutex();
- return -1;
- }
- if (m_transporter->sendSignal(signal, aNodeId) != 0) {
- m_transporter->unlock_mutex();
- continue;
- }
- m_error.code= 0;
- m_waiter.m_node = aNodeId;
- m_waiter.m_state = WAIT_LIST_TABLES_CONF;
- m_waiter.wait(WAITFOR_RESPONSE_TIMEOUT);
- m_transporter->unlock_mutex();
- // end protected
- if (m_waiter.m_state == NO_WAIT && m_error.code == 0)
- return 0;
- if (m_waiter.m_state == WAIT_NODE_FAILURE)
- continue;
- return -1;
- }
- return -1;
-}
-
-void
-NdbDictInterface::execLIST_TABLES_CONF(NdbApiSignal* signal,
- LinearSectionPtr ptr[3])
-{
- const unsigned off = ListTablesConf::HeaderLength;
- const unsigned len = (signal->getLength() - off);
- m_buffer.append(signal->getDataPtr() + off, len << 2);
- if (signal->getLength() < ListTablesConf::SignalLength) {
- // last signal has less than full length
- m_waiter.signal(NO_WAIT);
- }
-}
-
-template class Vector<int>;
-template class Vector<Uint16>;
-template class Vector<Uint32>;
-template class Vector<Vector<Uint32> >;
-template class Vector<NdbTableImpl*>;
-template class Vector<NdbColumnImpl*>;
-
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
deleted file mode 100644
index 754d0000718..00000000000
--- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ /dev/null
@@ -1,706 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef NdbDictionaryImpl_H
-#define NdbDictionaryImpl_H
-
-#include <ndb_types.h>
-#include <kernel_types.h>
-#include <NdbError.hpp>
-#include <BaseString.hpp>
-#include <Vector.hpp>
-#include <UtilBuffer.hpp>
-#include <NdbDictionary.hpp>
-#include <Bitmask.hpp>
-#include <AttributeList.hpp>
-#include <Ndb.hpp>
-#include "NdbWaiter.hpp"
-#include "DictCache.hpp"
-
-class NdbDictObjectImpl {
-public:
- Uint32 m_version;
- NdbDictionary::Object::Status m_status;
-
- bool change();
-protected:
- NdbDictObjectImpl() :
- m_status(NdbDictionary::Object::New) {
- }
-};
-
-/**
- * Column
- */
-class NdbColumnImpl : public NdbDictionary::Column {
-public:
- NdbColumnImpl();
- NdbColumnImpl(NdbDictionary::Column &); // This is not a copy constructor
- ~NdbColumnImpl();
- NdbColumnImpl& operator=(const NdbColumnImpl&);
- void init(Type t = Unsigned);
-
- int m_attrId;
- BaseString m_name;
- NdbDictionary::Column::Type m_type;
- int m_precision;
- int m_scale;
- int m_length;
- CHARSET_INFO * m_cs; // not const in MySQL
-
- bool m_pk;
- bool m_distributionKey;
- bool m_nullable;
- bool m_autoIncrement;
- Uint64 m_autoIncrementInitialValue;
- BaseString m_defaultValue;
- NdbTableImpl * m_blobTable;
-
- /**
- * Internal types and sizes, and aggregates
- */
- Uint32 m_attrSize; // element size (size when arraySize==1)
- Uint32 m_arraySize; // length or length+2 for Var* types
- Uint32 m_keyInfoPos;
- // TODO: use bits in attr desc 2
- bool getInterpretableType() const ;
- bool getCharType() const;
- bool getStringType() const;
- bool getBlobType() const;
-
- /**
- * Equality/assign
- */
- bool equal(const NdbColumnImpl&) const;
-
- static NdbColumnImpl & getImpl(NdbDictionary::Column & t);
- static const NdbColumnImpl & getImpl(const NdbDictionary::Column & t);
- NdbDictionary::Column * m_facade;
-
- static NdbDictionary::Column * create_psuedo(const char *);
-};
-
-class NdbTableImpl : public NdbDictionary::Table, public NdbDictObjectImpl {
-public:
- NdbTableImpl();
- NdbTableImpl(NdbDictionary::Table &);
- ~NdbTableImpl();
-
- void init();
- void setName(const char * name);
- const char * getName() const;
-
- Uint32 m_changeMask;
- Uint32 m_tableId;
- BaseString m_internalName;
- BaseString m_externalName;
- BaseString m_newExternalName; // Used for alter table
- UtilBuffer m_frm;
- NdbDictionary::Object::FragmentType m_fragmentType;
-
- /**
- *
- */
- Uint32 m_columnHashMask;
- Vector<Uint32> m_columnHash;
- Vector<NdbColumnImpl *> m_columns;
- void buildColumnHash();
-
- /**
- * Fragment info
- */
- Uint32 m_hashValueMask;
- Uint32 m_hashpointerValue;
- Vector<Uint16> m_fragments;
-
- bool m_logging;
- int m_kvalue;
- int m_minLoadFactor;
- int m_maxLoadFactor;
- Uint16 m_keyLenInWords;
- Uint16 m_fragmentCount;
-
- NdbDictionaryImpl * m_dictionary;
- NdbIndexImpl * m_index;
- NdbColumnImpl * getColumn(unsigned attrId);
- NdbColumnImpl * getColumn(const char * name);
- const NdbColumnImpl * getColumn(unsigned attrId) const;
- const NdbColumnImpl * getColumn(const char * name) const;
-
- /**
- * Index only stuff
- */
- BaseString m_primaryTable;
- NdbDictionary::Index::Type m_indexType;
-
- /**
- * Aggregates
- */
- Uint8 m_noOfKeys;
- Uint8 m_noOfDistributionKeys;
- Uint8 m_noOfBlobs;
-
- Uint8 m_replicaCount;
-
- /**
- * Equality/assign
- */
- bool equal(const NdbTableImpl&) const;
- void assign(const NdbTableImpl&);
-
- static NdbTableImpl & getImpl(NdbDictionary::Table & t);
- static NdbTableImpl & getImpl(const NdbDictionary::Table & t);
- NdbDictionary::Table * m_facade;
-
- /**
- * Return count
- */
- Uint32 get_nodes(Uint32 hashValue, const Uint16** nodes) const ;
-};
-
-class NdbIndexImpl : public NdbDictionary::Index, public NdbDictObjectImpl {
-public:
- NdbIndexImpl();
- NdbIndexImpl(NdbDictionary::Index &);
- ~NdbIndexImpl();
-
- void init();
- void setName(const char * name);
- const char * getName() const;
- void setTable(const char * table);
- const char * getTable() const;
- const NdbTableImpl * getIndexTable() const;
-
- Uint32 m_indexId;
- BaseString m_internalName;
- BaseString m_externalName;
- BaseString m_tableName;
- Vector<NdbColumnImpl *> m_columns;
- Vector<int> m_key_ids;
- NdbDictionary::Index::Type m_type;
-
- bool m_logging;
-
- NdbTableImpl * m_table;
-
- static NdbIndexImpl & getImpl(NdbDictionary::Index & t);
- static NdbIndexImpl & getImpl(const NdbDictionary::Index & t);
- NdbDictionary::Index * m_facade;
-};
-
-class NdbEventImpl : public NdbDictionary::Event, public NdbDictObjectImpl {
-public:
- NdbEventImpl();
- NdbEventImpl(NdbDictionary::Event &);
- ~NdbEventImpl();
-
- void init();
- void setName(const char * name);
- const char * getName() const;
- void setTable(const NdbDictionary::Table& table);
- void setTable(const char * table);
- const char * getTableName() const;
- void addTableEvent(const NdbDictionary::Event::TableEvent t);
- void setDurability(NdbDictionary::Event::EventDurability d);
- NdbDictionary::Event::EventDurability getDurability() const;
- void addEventColumn(const NdbColumnImpl &c);
- int getNoOfEventColumns() const;
-
- void print() {
- ndbout_c("NdbEventImpl: id=%d, key=%d",
- m_eventId,
- m_eventKey);
- };
-
- Uint32 m_eventId;
- Uint32 m_eventKey;
- Uint32 m_tableId;
- AttributeMask m_attrListBitmask;
- //BaseString m_internalName;
- BaseString m_externalName;
- Uint32 mi_type;
- NdbDictionary::Event::EventDurability m_dur;
-
-
- NdbTableImpl *m_tableImpl;
- BaseString m_tableName;
- Vector<NdbColumnImpl *> m_columns;
- Vector<unsigned> m_attrIds;
-
- int m_bufferId;
-
- NdbEventOperation *eventOp;
-
- static NdbEventImpl & getImpl(NdbDictionary::Event & t);
- static NdbEventImpl & getImpl(const NdbDictionary::Event & t);
- NdbDictionary::Event * m_facade;
-};
-
-
-class NdbDictInterface {
-public:
- NdbDictInterface(NdbError& err) : m_error(err) {
- m_reference = 0;
- m_masterNodeId = 0;
- m_transporter= NULL;
- }
- ~NdbDictInterface();
-
- bool setTransporter(class Ndb * ndb, class TransporterFacade * tf);
- bool setTransporter(class TransporterFacade * tf);
-
- // To abstract the stuff thats made in all create/drop/lists below
- int
- dictSignal(NdbApiSignal* signal,
- LinearSectionPtr ptr[3], int noLPTR,
- const int useMasterNodeId,
- const Uint32 RETRIES,
- const WaitSignalType wst,
- const int theWait,
- const int *errcodes,
- const int noerrcodes,
- const int temporaryMask = 0);
-
- int createOrAlterTable(class Ndb & ndb, NdbTableImpl &, bool alter);
-
- int createTable(class Ndb & ndb, NdbTableImpl &);
- int createTable(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
-
- int alterTable(class Ndb & ndb, NdbTableImpl &);
- int alterTable(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
-
- int createIndex(class Ndb & ndb,
- NdbIndexImpl &,
- const NdbTableImpl &);
- int createIndex(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
-
- int createEvent(class Ndb & ndb, NdbEventImpl &, int getFlag);
- int createEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3], int noLSP);
-
- int dropTable(const NdbTableImpl &);
- int dropTable(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
-
- int dropIndex(const NdbIndexImpl &, const NdbTableImpl &);
- int dropIndex(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
-
- int dropEvent(const NdbEventImpl &);
- int dropEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3], int noLSP);
-
- int executeSubscribeEvent(class Ndb & ndb, NdbEventImpl &);
- int executeSubscribeEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
-
- int stopSubscribeEvent(class Ndb & ndb, NdbEventImpl &);
- int stopSubscribeEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
-
- int listObjects(NdbDictionary::Dictionary::List& list, Uint32 requestData, bool fullyQualifiedNames);
- int listObjects(NdbApiSignal* signal);
-
-/* NdbTableImpl * getTable(int tableId, bool fullyQualifiedNames); */
- NdbTableImpl * getTable(const BaseString& name, bool fullyQualifiedNames);
- NdbTableImpl * getTable(class NdbApiSignal * signal,
- LinearSectionPtr ptr[3],
- Uint32 noOfSections, bool fullyQualifiedNames);
-
- static int parseTableInfo(NdbTableImpl ** dst,
- const Uint32 * data, Uint32 len,
- bool fullyQualifiedNames);
-
- static int create_index_obj_from_table(NdbIndexImpl ** dst,
- NdbTableImpl* index_table,
- const NdbTableImpl* primary_table);
-
- NdbError & m_error;
-private:
- Uint32 m_reference;
- Uint32 m_masterNodeId;
-
- NdbWaiter m_waiter;
- class TransporterFacade * m_transporter;
-
- friend class Ndb;
- static void execSignal(void* dictImpl,
- class NdbApiSignal* signal,
- struct LinearSectionPtr ptr[3]);
-
- static void execNodeStatus(void* dictImpl, Uint32,
- bool alive, bool nfCompleted);
-
- void execGET_TABINFO_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execGET_TABINFO_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execCREATE_TABLE_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execCREATE_TABLE_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execALTER_TABLE_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execALTER_TABLE_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
-
- void execCREATE_INDX_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execCREATE_INDX_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execDROP_INDX_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execDROP_INDX_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
-
- void execCREATE_EVNT_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execCREATE_EVNT_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execSUB_START_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execSUB_START_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execSUB_TABLE_DATA(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execSUB_GCP_COMPLETE_REP(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execSUB_STOP_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execSUB_STOP_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execDROP_EVNT_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execDROP_EVNT_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
-
- void execDROP_TABLE_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execDROP_TABLE_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execLIST_TABLES_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
-
- Uint32 m_fragmentId;
- UtilBuffer m_buffer;
-};
-
-class NdbDictionaryImpl : public NdbDictionary::Dictionary {
-public:
- NdbDictionaryImpl(Ndb &ndb);
- NdbDictionaryImpl(Ndb &ndb, NdbDictionary::Dictionary & f);
- ~NdbDictionaryImpl();
-
- bool setTransporter(class Ndb * ndb, class TransporterFacade * tf);
- bool setTransporter(class TransporterFacade * tf);
-
- int createTable(NdbTableImpl &t);
- int createBlobTables(NdbTableImpl &);
- int addBlobTables(NdbTableImpl &);
- int alterTable(NdbTableImpl &t);
- int dropTable(const char * name);
- int dropTable(NdbTableImpl &);
- int dropBlobTables(NdbTableImpl &);
- int invalidateObject(NdbTableImpl &);
- int removeCachedObject(NdbTableImpl &);
-
- int createIndex(NdbIndexImpl &ix);
- int dropIndex(const char * indexName,
- const char * tableName);
- int dropIndex(NdbIndexImpl &, const char * tableName);
- NdbTableImpl * getIndexTable(NdbIndexImpl * index,
- NdbTableImpl * table);
-
- int createEvent(NdbEventImpl &);
- int dropEvent(const char * eventName);
-
- int executeSubscribeEvent(NdbEventImpl &);
- int stopSubscribeEvent(NdbEventImpl &);
-
- int listObjects(List& list, NdbDictionary::Object::Type type);
- int listIndexes(List& list, Uint32 indexId);
-
- NdbTableImpl * getTable(const char * tableName, void **data= 0);
- Ndb_local_table_info* get_local_table_info(
- const BaseString& internalTableName, bool do_add_blob_tables);
- NdbIndexImpl * getIndex(const char * indexName,
- const char * tableName);
- NdbEventImpl * getEvent(const char * eventName);
- NdbEventImpl * getEventImpl(const char * internalName);
-
- const NdbError & getNdbError() const;
- NdbError m_error;
- Uint32 m_local_table_data_size;
-
- LocalDictCache m_localHash;
- GlobalDictCache * m_globalHash;
-
- static NdbDictionaryImpl & getImpl(NdbDictionary::Dictionary & t);
- static const NdbDictionaryImpl & getImpl(const NdbDictionary::Dictionary &t);
- NdbDictionary::Dictionary * m_facade;
-
- NdbDictInterface m_receiver;
- Ndb & m_ndb;
-private:
- NdbIndexImpl * getIndexImpl(const char * name,
- const BaseString& internalName);
- Ndb_local_table_info * fetchGlobalTableImpl(const BaseString& internalName);
-};
-
-inline
-NdbEventImpl &
-NdbEventImpl::getImpl(const NdbDictionary::Event & t){
- return t.m_impl;
-}
-
-inline
-NdbEventImpl &
-NdbEventImpl::getImpl(NdbDictionary::Event & t){
- return t.m_impl;
-}
-
-inline
-NdbColumnImpl &
-NdbColumnImpl::getImpl(NdbDictionary::Column & t){
- return t.m_impl;
-}
-
-inline
-const NdbColumnImpl &
-NdbColumnImpl::getImpl(const NdbDictionary::Column & t){
- return t.m_impl;
-}
-
-inline
-bool
-NdbColumnImpl::getInterpretableType() const {
- return (m_type == NdbDictionary::Column::Unsigned ||
- m_type == NdbDictionary::Column::Bigunsigned);
-}
-
-inline
-bool
-NdbColumnImpl::getCharType() const {
- return (m_type == NdbDictionary::Column::Char ||
- m_type == NdbDictionary::Column::Varchar ||
- m_type == NdbDictionary::Column::Text ||
- m_type == NdbDictionary::Column::Longvarchar);
-}
-
-inline
-bool
-NdbColumnImpl::getStringType() const {
- return (m_type == NdbDictionary::Column::Char ||
- m_type == NdbDictionary::Column::Varchar ||
- m_type == NdbDictionary::Column::Longvarchar ||
- m_type == NdbDictionary::Column::Binary ||
- m_type == NdbDictionary::Column::Varbinary ||
- m_type == NdbDictionary::Column::Longvarbinary);
-}
-
-inline
-bool
-NdbColumnImpl::getBlobType() const {
- return (m_type == NdbDictionary::Column::Blob ||
- m_type == NdbDictionary::Column::Text);
-}
-
-inline
-NdbTableImpl &
-NdbTableImpl::getImpl(NdbDictionary::Table & t){
- return t.m_impl;
-}
-
-inline
-NdbTableImpl &
-NdbTableImpl::getImpl(const NdbDictionary::Table & t){
- return t.m_impl;
-}
-
-inline
-NdbColumnImpl *
-NdbTableImpl::getColumn(unsigned attrId){
- if(m_columns.size() > attrId){
- return m_columns[attrId];
- }
- return 0;
-}
-
-inline
-Uint32
-Hash( const char* str ){
- Uint32 h = 0;
- Uint32 len = strlen(str);
- while(len >= 4){
- h = (h << 5) + h + str[0];
- h = (h << 5) + h + str[1];
- h = (h << 5) + h + str[2];
- h = (h << 5) + h + str[3];
- len -= 4;
- str += 4;
- }
-
- switch(len){
- case 3:
- h = (h << 5) + h + *str++;
- case 2:
- h = (h << 5) + h + *str++;
- case 1:
- h = (h << 5) + h + *str++;
- }
- return h + h;
-}
-
-
-inline
-NdbColumnImpl *
-NdbTableImpl::getColumn(const char * name){
-
- Uint32 sz = m_columns.size();
- NdbColumnImpl** cols = m_columns.getBase();
- const Uint32 * hashtable = m_columnHash.getBase();
-
- if(sz > 5 && false){
- Uint32 hashValue = Hash(name) & 0xFFFE;
- Uint32 bucket = hashValue & m_columnHashMask;
- bucket = (bucket < sz ? bucket : bucket - sz);
- hashtable += bucket;
- Uint32 tmp = * hashtable;
- if((tmp & 1) == 1 ){ // No chaining
- sz = 1;
- } else {
- sz = (tmp >> 16);
- hashtable += (tmp & 0xFFFE) >> 1;
- tmp = * hashtable;
- }
- do {
- if(hashValue == (tmp & 0xFFFE)){
- NdbColumnImpl* col = cols[tmp >> 16];
- if(strncmp(name, col->m_name.c_str(), col->m_name.length()) == 0){
- return col;
- }
- }
- hashtable++;
- tmp = * hashtable;
- } while(--sz > 0);
-#if 0
- Uint32 dir = m_columnHash[bucket];
- Uint32 pos = bucket + ((dir & 0xFFFE) >> 1);
- Uint32 cnt = dir >> 16;
- ndbout_c("col: %s hv: %x bucket: %d dir: %x pos: %d cnt: %d tmp: %d -> 0",
- name, hashValue, bucket, dir, pos, cnt, tmp);
-#endif
- return 0;
- } else {
- for(Uint32 i = 0; i<sz; i++){
- NdbColumnImpl* col = * cols++;
- if(col != 0 && strcmp(name, col->m_name.c_str()) == 0)
- return col;
- }
- }
- return 0;
-}
-
-inline
-const NdbColumnImpl *
-NdbTableImpl::getColumn(unsigned attrId) const {
- if(m_columns.size() > attrId){
- return m_columns[attrId];
- }
- return 0;
-}
-
-inline
-const NdbColumnImpl *
-NdbTableImpl::getColumn(const char * name) const {
- Uint32 sz = m_columns.size();
- NdbColumnImpl* const * cols = m_columns.getBase();
- for(Uint32 i = 0; i<sz; i++, cols++){
- NdbColumnImpl* col = * cols;
- if(col != 0 && strcmp(name, col->m_name.c_str()) == 0)
- return col;
- }
- return 0;
-}
-
-inline
-NdbIndexImpl &
-NdbIndexImpl::getImpl(NdbDictionary::Index & t){
- return t.m_impl;
-}
-
-inline
-NdbIndexImpl &
-NdbIndexImpl::getImpl(const NdbDictionary::Index & t){
- return t.m_impl;
-}
-
-inline
-NdbDictionaryImpl &
-NdbDictionaryImpl::getImpl(NdbDictionary::Dictionary & t){
- return t.m_impl;
-}
-
-inline
-const NdbDictionaryImpl &
-NdbDictionaryImpl::getImpl(const NdbDictionary::Dictionary & t){
- return t.m_impl;
-}
-
-/*****************************************************************
- * Inline:d getters
- */
-
-inline
-NdbTableImpl *
-NdbDictionaryImpl::getTable(const char * table_name, void **data)
-{
- const BaseString internal_tabname(m_ndb.internalize_table_name(table_name));
- Ndb_local_table_info *info=
- get_local_table_info(internal_tabname, true);
- if (info == 0)
- return 0;
-
- if (data)
- *data= info->m_local_data;
-
- return info->m_table_impl;
-}
-
-inline
-Ndb_local_table_info *
-NdbDictionaryImpl::get_local_table_info(const BaseString& internalTableName,
- bool do_add_blob_tables)
-{
- Ndb_local_table_info *info= m_localHash.get(internalTableName.c_str());
- if (info == 0) {
- info= fetchGlobalTableImpl(internalTableName);
- if (info == 0) {
- return 0;
- }
- }
- if (do_add_blob_tables && info->m_table_impl->m_noOfBlobs)
- addBlobTables(*(info->m_table_impl));
-
- return info; // autoincrement already initialized
-}
-
-inline
-NdbIndexImpl *
-NdbDictionaryImpl::getIndex(const char * index_name,
- const char * table_name)
-{
- if (table_name || m_ndb.usingFullyQualifiedNames())
- {
- const BaseString internal_indexname(
- (table_name)
- ?
- m_ndb.internalize_index_name(getTable(table_name), index_name)
- :
- m_ndb.internalize_table_name(index_name)); // Index is also a table
-
- if (internal_indexname.length())
- {
- Ndb_local_table_info * info=
- get_local_table_info(internal_indexname, false);
- if (info)
- {
- NdbTableImpl * tab= info->m_table_impl;
- if (tab->m_index == 0)
- tab->m_index= getIndexImpl(index_name, internal_indexname);
- if (tab->m_index != 0)
- tab->m_index->m_table= tab;
- return tab->m_index;
- }
- }
- }
-
- m_error.code= 4243;
- return 0;
-}
-
-#endif
diff --git a/ndb/test/ndbapi/Makefile.am b/ndb/test/ndbapi/Makefile.am
deleted file mode 100644
index 1d2dfb3f948..00000000000
--- a/ndb/test/ndbapi/Makefile.am
+++ /dev/null
@@ -1,159 +0,0 @@
-
-SUBDIRS = bank
-
-ndbtest_PROGRAMS = \
-flexBench \
-drop_all_tabs \
-create_all_tabs \
-create_tab \
-flexAsynch \
-flexBench \
-flexHammer \
-flexTT \
-testBackup \
-testBasic \
-testBasicAsynch \
-testBlobs \
-testDataBuffers \
-testDict \
-testIndex \
-testMgm \
-testNdbApi \
-testNodeRestart \
-testOIBasic \
-testOperations \
-testRestartGci \
-testScan \
-testScanInterpreter \
-testScanPerf \
-testSystemRestart \
-testTimeout \
-testTransactions \
-testDeadlock \
-test_event ndbapi_slow_select testReadPerf testLcp \
-testPartitioning \
-testBitfield \
-DbCreate DbAsyncGenerator \
-test_event_multi_table
-
-#flexTimedAsynch
-#testBlobs
-#flex_bench_mysql
-
-create_all_tabs_SOURCES = create_all_tabs.cpp
-create_tab_SOURCES = create_tab.cpp
-drop_all_tabs_SOURCES = drop_all_tabs.cpp
-flexAsynch_SOURCES = flexAsynch.cpp
-flexBench_SOURCES = flexBench.cpp
-flexHammer_SOURCES = flexHammer.cpp
-flexTT_SOURCES = flexTT.cpp
-#flexTimedAsynch_SOURCES = flexTimedAsynch.cpp
-#flex_bench_mysql_SOURCES = flex_bench_mysql.cpp
-testBackup_SOURCES = testBackup.cpp
-testBasic_SOURCES = testBasic.cpp
-testBasicAsynch_SOURCES = testBasicAsynch.cpp
-testBlobs_SOURCES = testBlobs.cpp
-testDataBuffers_SOURCES = testDataBuffers.cpp
-testDict_SOURCES = testDict.cpp
-testIndex_SOURCES = testIndex.cpp
-testMgm_SOURCES = testMgm.cpp
-testNdbApi_SOURCES = testNdbApi.cpp
-testNodeRestart_SOURCES = testNodeRestart.cpp
-testOIBasic_SOURCES = testOIBasic.cpp
-testOperations_SOURCES = testOperations.cpp
-testRestartGci_SOURCES = testRestartGci.cpp
-testScan_SOURCES = testScan.cpp ScanFunctions.hpp
-testScanInterpreter_SOURCES = testScanInterpreter.cpp ScanFilter.hpp ScanInterpretTest.hpp
-testScanPerf_SOURCES = testScanPerf.cpp
-testSystemRestart_SOURCES = testSystemRestart.cpp
-testTimeout_SOURCES = testTimeout.cpp
-testTransactions_SOURCES = testTransactions.cpp
-testDeadlock_SOURCES = testDeadlock.cpp
-test_event_SOURCES = test_event.cpp
-ndbapi_slow_select_SOURCES = slow_select.cpp
-testReadPerf_SOURCES = testReadPerf.cpp
-testLcp_SOURCES = testLcp.cpp
-testPartitioning_SOURCES = testPartitioning.cpp
-testBitfield_SOURCES = testBitfield.cpp
-DbCreate_SOURCES = bench/mainPopulate.cpp bench/dbPopulate.cpp bench/userInterface.cpp bench/dbPopulate.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp
-DbAsyncGenerator_SOURCES = bench/mainAsyncGenerator.cpp bench/asyncGenerator.cpp bench/ndb_async2.cpp bench/dbGenerator.h bench/macros.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp
-test_event_multi_table_SOURCES = test_event_multi_table.cpp
-
-INCLUDES_LOC = -I$(top_srcdir)/ndb/include/kernel
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_ndbapitest.mk.am
-
-##testDict_INCLUDES = $(INCLUDES) -I$(top_srcdir)/ndb/include/kernel
-##testIndex_INCLUDES = $(INCLUDES) -I$(top_srcdir)/ndb/include/kernel
-##testSystemRestart_INCLUDES = $(INCLUDES) -I$(top_srcdir)/ndb/include/kernel
-##testTransactions_INCLUDES = $(INCLUDES) -I$(top_srcdir)/ndb/include/kernel
-testBackup_LDADD = $(LDADD) bank/libbank.a
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-
-
-windoze-dsp: flexBench.dsp testBasic.dsp testBlobs.dsp \
- testScan.dsp
-
-flexBench.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ flexBench
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(flexBench_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
-
-testBasic.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ testBasic
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(testBasic_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
-
-testOIBasic.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ testOIBasic
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(testOIBasic_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
-
-testBlobs.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ testBlobs
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(testBlobs_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
-
-testScan.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ testScan
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(testScan_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
diff --git a/ndb/test/ndbapi/bank/Makefile.am b/ndb/test/ndbapi/bank/Makefile.am
deleted file mode 100644
index d4f82a7f9c4..00000000000
--- a/ndb/test/ndbapi/bank/Makefile.am
+++ /dev/null
@@ -1,24 +0,0 @@
-
-ndbtest_PROGRAMS = testBank bankSumAccounts bankValidateAllGLs bankMakeGL bankTransactionMaker bankCreator bankTimer
-
-noinst_LIBRARIES = libbank.a
-
-libbank_a_SOURCES = Bank.cpp BankLoad.cpp Bank.hpp
-
-testBank_SOURCES = testBank.cpp
-bankSumAccounts_SOURCES = bankSumAccounts.cpp
-bankValidateAllGLs_SOURCES = bankValidateAllGLs.cpp
-bankMakeGL_SOURCES = bankMakeGL.cpp
-bankTransactionMaker_SOURCES = bankTransactionMaker.cpp
-bankCreator_SOURCES = bankCreator.cpp
-bankTimer_SOURCES = bankTimer.cpp
-
-LDADD_LOC = $(noinst_LIBRARIES)
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_ndbapitest.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp:
diff --git a/ndb/test/run-test/Makefile.am b/ndb/test/run-test/Makefile.am
deleted file mode 100644
index 60d64a7697f..00000000000
--- a/ndb/test/run-test/Makefile.am
+++ /dev/null
@@ -1,34 +0,0 @@
-
-testdir=$(prefix)/mysql-test/ndb
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_util.mk.am
-include $(top_srcdir)/ndb/config/type_mgmapiclient.mk.am
-
-test_PROGRAMS = atrt
-test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \
- conf-daily-basic-ndbmaster.txt \
- conf-daily-basic-shark.txt \
- conf-daily-devel-ndbmaster.txt \
- conf-daily-sql-ndbmaster.txt \
- conf-daily-basic-dl145a.txt
-test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
- atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh
-
-atrt_SOURCES = main.cpp run-test.hpp
-INCLUDES_LOC = -I$(top_srcdir)/ndb/test/include
-LDADD_LOC = $(top_builddir)/ndb/test/src/libNDBT.a \
- $(top_builddir)/ndb/src/libndbclient.la \
- $(top_builddir)/dbug/libdbug.a \
- $(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
-
-wrappersdir=$(prefix)/bin
-wrappers_SCRIPTS=atrt-testBackup atrt-mysql-test-run
-
-EXTRA_DIST = $(test_DATA) $(test_SCRIPTS) $(wrappers_SCRIPTS) README.ATRT
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp:
diff --git a/ndb/test/run-test/README b/ndb/test/run-test/README
deleted file mode 100644
index d5da8f05c17..00000000000
--- a/ndb/test/run-test/README
+++ /dev/null
@@ -1,43 +0,0 @@
-run-test/README
-
-This document describes how atrt works and how to use it.
-
-atrt is a test program driver.
-atrt supports fully distributed test and utilizes ndb_cpcd.
-
-=================================
-atrt has the following main loop:
-
-/**
- * Psuedo code for atrt
- */
- read config file (default d.txt)
- contact each ndb_cpcd
- start each ndb_mgmd
- connect to each ndb_mgmd
- for each read(test case)
- do
- if previous test failed (or is first test)
- stop each ndbd
- start each ndbd
- wait for ndbd to get started
-
- start each mysqld
-
- start each test prg
-
- wait while all is running and max time not elapsed
-
- stop each mysqld
-
- stop each test prg
-
- gather result
-
- done
-/**
- * End of psuedo code
- */
-
-=================================
-
diff --git a/ndb/test/src/Makefile.am b/ndb/test/src/Makefile.am
deleted file mode 100644
index 289633b060a..00000000000
--- a/ndb/test/src/Makefile.am
+++ /dev/null
@@ -1,35 +0,0 @@
-
-noinst_LIBRARIES = libNDBT.a
-
-libNDBT_a_SOURCES = \
- NDBT_ReturnCodes.cpp \
- NDBT_Error.cpp NDBT_Tables.cpp NDBT_ResultRow.cpp \
- NDBT_Test.cpp HugoCalculator.cpp \
- HugoOperations.cpp HugoTransactions.cpp \
- HugoAsynchTransactions.cpp UtilTransactions.cpp \
- NdbRestarter.cpp NdbRestarts.cpp NDBT_Output.cpp \
- NdbBackup.cpp NdbConfig.cpp NdbGrep.cpp NDBT_Table.cpp \
- NdbSchemaCon.cpp NdbSchemaOp.cpp getarg.c \
- CpcClient.cpp
-
-INCLUDES_LOC = -I$(top_srcdir)/ndb/src/common/mgmcommon -I$(top_srcdir)/ndb/include/mgmcommon -I$(top_srcdir)/ndb/include/kernel -I$(top_srcdir)/ndb/src/mgmapi
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_ndbapitest.mk.am
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: libNDBT.dsp
-
-libNDBT.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-lib.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-lib.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(libNDBT_a_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LIB
diff --git a/ndb/test/tools/Makefile.am b/ndb/test/tools/Makefile.am
deleted file mode 100644
index 873136e254d..00000000000
--- a/ndb/test/tools/Makefile.am
+++ /dev/null
@@ -1,30 +0,0 @@
-
-ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc
-
-# transproxy
-
-hugoFill_SOURCES = hugoFill.cpp
-hugoLoad_SOURCES = hugoLoad.cpp
-hugoLockRecords_SOURCES = hugoLockRecords.cpp
-hugoPkDelete_SOURCES = hugoPkDelete.cpp
-hugoPkRead_SOURCES = hugoPkRead.cpp
-hugoPkReadRecord_SOURCES = hugoPkReadRecord.cpp
-hugoPkUpdate_SOURCES = hugoPkUpdate.cpp
-hugoScanRead_SOURCES = hugoScanRead.cpp
-hugoScanUpdate_SOURCES = hugoScanUpdate.cpp
-restart_SOURCES = restart.cpp
-# transproxy_SOURCES = transproxy.cpp
-verify_index_SOURCES = verify_index.cpp
-copy_tab_SOURCES = copy_tab.cpp
-create_index_SOURCES = create_index.cpp
-ndb_cpcc_SOURCES = cpcc.cpp
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_ndbapitest.mk.am
-
-ndb_cpcc_LDADD = $(LDADD)
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp:
diff --git a/ndb/tools/Makefile.am b/ndb/tools/Makefile.am
deleted file mode 100644
index 1008b166dfc..00000000000
--- a/ndb/tools/Makefile.am
+++ /dev/null
@@ -1,157 +0,0 @@
-
-ndbtools_PROGRAMS = \
- ndb_test_platform \
- ndb_waiter \
- ndb_drop_table \
- ndb_delete_all \
- ndb_desc \
- ndb_drop_index \
- ndb_show_tables \
- ndb_select_all \
- ndb_select_count \
- ndb_restore
-
-tools_common_sources = ../test/src/NDBT_ReturnCodes.cpp \
- ../test/src/NDBT_Table.cpp \
- ../test/src/NDBT_Output.cpp
-
-ndb_test_platform_SOURCES = ndb_test_platform.cpp
-ndb_waiter_SOURCES = waiter.cpp $(tools_common_sources)
-ndb_delete_all_SOURCES = delete_all.cpp $(tools_common_sources)
-ndb_desc_SOURCES = desc.cpp $(tools_common_sources)
-ndb_drop_index_SOURCES = drop_index.cpp $(tools_common_sources)
-ndb_drop_table_SOURCES = drop_tab.cpp $(tools_common_sources)
-ndb_show_tables_SOURCES = listTables.cpp $(tools_common_sources)
-ndb_select_all_SOURCES = select_all.cpp \
- ../test/src/NDBT_ResultRow.cpp \
- $(tools_common_sources)
-ndb_select_count_SOURCES = select_count.cpp $(tools_common_sources)
-ndb_restore_SOURCES = restore/restore_main.cpp \
- restore/consumer.cpp \
- restore/consumer_restore.cpp \
- restore/consumer_printer.cpp \
- restore/Restore.cpp \
- ../test/src/NDBT_ResultRow.cpp $(tools_common_sources)
-
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_ndbapitools.mk.am
-
-ndb_test_platform_LDFLAGS = @ndb_bin_am_ldflags@
-ndb_waiter_LDFLAGS = @ndb_bin_am_ldflags@
-ndb_drop_table_LDFLAGS = @ndb_bin_am_ldflags@
-ndb_delete_all_LDFLAGS = @ndb_bin_am_ldflags@
-ndb_desc_LDFLAGS = @ndb_bin_am_ldflags@
-ndb_drop_index_LDFLAGS = @ndb_bin_am_ldflags@
-ndb_show_tables_LDFLAGS = @ndb_bin_am_ldflags@
-ndb_select_all_LDFLAGS = @ndb_bin_am_ldflags@
-ndb_select_count_LDFLAGS = @ndb_bin_am_ldflags@
-ndb_restore_LDFLAGS = @ndb_bin_am_ldflags@
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
-
-windoze-dsp: \
- ndb_waiter.dsp \
- ndb_drop_table.dsp \
- ndb_delete_all.dsp \
- ndb_desc.dsp \
- ndb_drop_index.dsp \
- ndb_show_tables.dsp \
- ndb_select_all.dsp \
- ndb_select_count.dsp
-
-ndb_waiter.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ ndb_waiter
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(ndb_waiter_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
-
-ndb_drop_table.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ ndb_drop_table
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(ndb_drop_table_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
-
-ndb_delete_all.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ ndb_delete_all
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(ndb_delete_all_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
-
-ndb_desc.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ ndb_desc
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(ndb_desc_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
-
-ndb_drop_index.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ ndb_drop_index
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(ndb_drop_index_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
-
-ndb_show_tables.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ ndb_show_tables
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(ndb_show_tables_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
-
-ndb_select_all.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ ndb_select_all
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(ndb_select_all_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
-
-ndb_select_count.dsp: Makefile \
- $(top_srcdir)/ndb/config/win-prg.am \
- $(top_srcdir)/ndb/config/win-name \
- $(top_srcdir)/ndb/config/win-includes \
- $(top_srcdir)/ndb/config/win-sources \
- $(top_srcdir)/ndb/config/win-libraries
- cat $(top_srcdir)/ndb/config/win-prg.am > $@
- @$(top_srcdir)/ndb/config/win-name $@ ndb_select_count
- @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
- @$(top_srcdir)/ndb/config/win-sources $@ $(ndb_select_count_SOURCES)
- @$(top_srcdir)/ndb/config/win-libraries $@ LINK $(LDADD)
diff --git a/ndb/tools/restore/Restore.cpp b/ndb/tools/restore/Restore.cpp
deleted file mode 100644
index b53255820db..00000000000
--- a/ndb/tools/restore/Restore.cpp
+++ /dev/null
@@ -1,940 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include "Restore.hpp"
-#include <NdbTCP.h>
-#include <OutputStream.hpp>
-#include <Bitmask.hpp>
-
-#include <AttributeHeader.hpp>
-#include <trigger_definitions.h>
-#include <SimpleProperties.hpp>
-#include <signaldata/DictTabInfo.hpp>
-
-Uint16 Twiddle16(Uint16 in); // Byte shift 16-bit data
-Uint32 Twiddle32(Uint32 in); // Byte shift 32-bit data
-Uint64 Twiddle64(Uint64 in); // Byte shift 64-bit data
-
-bool
-BackupFile::Twiddle(const AttributeDesc* attr_desc, AttributeData* attr_data, Uint32 arraySize){
- Uint32 i;
-
- if(m_hostByteOrder)
- return true;
-
- if(arraySize == 0){
- arraySize = attr_desc->arraySize;
- }
-
- switch(attr_desc->size){
- case 8:
-
- return true;
- case 16:
- for(i = 0; i<arraySize; i++){
- attr_data->u_int16_value[i] = Twiddle16(attr_data->u_int16_value[i]);
- }
- return true;
- case 32:
- for(i = 0; i<arraySize; i++){
- attr_data->u_int32_value[i] = Twiddle32(attr_data->u_int32_value[i]);
- }
- return true;
- case 64:
- for(i = 0; i<arraySize; i++){
- attr_data->u_int64_value[i] = Twiddle64(attr_data->u_int64_value[i]);
- }
- return true;
- default:
- return false;
- } // switch
-
-} // Twiddle
-
-FilteredNdbOut err(* new FileOutputStream(stderr), 0, 0);
-FilteredNdbOut info(* new FileOutputStream(stdout), 1, 1);
-FilteredNdbOut debug(* new FileOutputStream(stdout), 2, 0);
-
-// To decide in what byte order data is
-const Uint32 magicByteOrder = 0x12345678;
-const Uint32 swappedMagicByteOrder = 0x78563412;
-
-RestoreMetaData::RestoreMetaData(const char* path, Uint32 nodeId, Uint32 bNo) {
-
- debug << "RestoreMetaData constructor" << endl;
- setCtlFile(nodeId, bNo, path);
-}
-
-RestoreMetaData::~RestoreMetaData(){
- for(Uint32 i= 0; i < allTables.size(); i++)
- delete allTables[i];
- allTables.clear();
-}
-
-TableS *
-RestoreMetaData::getTable(Uint32 tableId) const {
- for(Uint32 i= 0; i < allTables.size(); i++)
- if(allTables[i]->getTableId() == tableId)
- return allTables[i];
- return NULL;
-}
-
-Uint32
-RestoreMetaData::getStopGCP() const {
- return m_stopGCP;
-}
-
-int
-RestoreMetaData::loadContent()
-{
- Uint32 noOfTables = readMetaTableList();
- if(noOfTables == 0) {
- return 1;
- }
- for(Uint32 i = 0; i<noOfTables; i++){
- if(!readMetaTableDesc()){
- return 0;
- }
- }
- if(!readGCPEntry())
- return 0;
- return 1;
-}
-
-Uint32
-RestoreMetaData::readMetaTableList() {
-
- Uint32 sectionInfo[2];
-
- if (buffer_read(&sectionInfo, sizeof(sectionInfo), 1) != 1){
- err << "readMetaTableList read header error" << endl;
- return 0;
- }
- sectionInfo[0] = ntohl(sectionInfo[0]);
- sectionInfo[1] = ntohl(sectionInfo[1]);
-
- const Uint32 tabCount = sectionInfo[1] - 2;
-
- void *tmp;
- if (buffer_get_ptr(&tmp, 4, tabCount) != tabCount){
- err << "readMetaTableList read tabCount error" << endl;
- return 0;
- }
-
- return tabCount;
-}
-
-bool
-RestoreMetaData::readMetaTableDesc() {
-
- Uint32 sectionInfo[2];
-
- // Read section header
- if (buffer_read(&sectionInfo, sizeof(sectionInfo), 1) != 1){
- err << "readMetaTableDesc read header error" << endl;
- return false;
- } // if
- sectionInfo[0] = ntohl(sectionInfo[0]);
- sectionInfo[1] = ntohl(sectionInfo[1]);
-
- assert(sectionInfo[0] == BackupFormat::TABLE_DESCRIPTION);
-
- // Read dictTabInfo buffer
- const Uint32 len = (sectionInfo[1] - 2);
- void *ptr;
- if (buffer_get_ptr(&ptr, 4, len) != len){
- err << "readMetaTableDesc read error" << endl;
- return false;
- } // if
-
- return parseTableDescriptor((Uint32*)ptr, len);
-}
-
-bool
-RestoreMetaData::readGCPEntry() {
-
- Uint32 data[4];
-
- BackupFormat::CtlFile::GCPEntry * dst =
- (BackupFormat::CtlFile::GCPEntry *)&data[0];
-
- if(buffer_read(dst, 4, 4) != 4){
- err << "readGCPEntry read error" << endl;
- return false;
- }
-
- dst->SectionType = ntohl(dst->SectionType);
- dst->SectionLength = ntohl(dst->SectionLength);
-
- if(dst->SectionType != BackupFormat::GCP_ENTRY){
- err << "readGCPEntry invalid format" << endl;
- return false;
- }
-
- dst->StartGCP = ntohl(dst->StartGCP);
- dst->StopGCP = ntohl(dst->StopGCP);
-
- m_startGCP = dst->StartGCP;
- m_stopGCP = dst->StopGCP;
- return true;
-}
-
-TableS::TableS(Uint32 version, NdbTableImpl* tableImpl)
- : m_dictTable(tableImpl)
-{
- m_dictTable = tableImpl;
- m_noOfNullable = m_nullBitmaskSize = 0;
- m_auto_val_id= ~(Uint32)0;
- m_max_auto_val= 0;
- backupVersion = version;
-
- for (int i = 0; i < tableImpl->getNoOfColumns(); i++)
- createAttr(tableImpl->getColumn(i));
-}
-
-TableS::~TableS()
-{
- for (Uint32 i= 0; i < allAttributesDesc.size(); i++)
- delete allAttributesDesc[i];
-}
-
-// Parse dictTabInfo buffer and pushback to to vector storage
-bool
-RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len)
-{
- NdbTableImpl* tableImpl = 0;
- int ret = NdbDictInterface::parseTableInfo(&tableImpl, data, len, false);
-
- if (ret != 0) {
- err << "parseTableInfo " << " failed" << endl;
- return false;
- }
- if(tableImpl == 0)
- return false;
-
- debug << "parseTableInfo " << tableImpl->getName() << " done" << endl;
-
- TableS * table = new TableS(m_fileHeader.NdbVersion, tableImpl);
- if(table == NULL) {
- return false;
- }
-
- debug << "Parsed table id " << table->getTableId() << endl;
- debug << "Parsed table #attr " << table->getNoOfAttributes() << endl;
- debug << "Parsed table schema version not used " << endl;
-
- debug << "Pushing table " << table->getTableName() << endl;
- debug << " with " << table->getNoOfAttributes() << " attributes" << endl;
-
- allTables.push_back(table);
-
- return true;
-}
-
-// Constructor
-RestoreDataIterator::RestoreDataIterator(const RestoreMetaData & md, void (* _free_data_callback)())
- : BackupFile(_free_data_callback), m_metaData(md)
-{
- debug << "RestoreDataIterator constructor" << endl;
- setDataFile(md, 0);
-}
-
-TupleS & TupleS::operator=(const TupleS& tuple)
-{
- prepareRecord(*tuple.m_currentTable);
-
- if (allAttrData)
- memcpy(allAttrData, tuple.allAttrData, getNoOfAttributes()*sizeof(AttributeData));
-
- return *this;
-}
-int TupleS::getNoOfAttributes() const {
- if (m_currentTable == 0)
- return 0;
- return m_currentTable->getNoOfAttributes();
-}
-
-TableS * TupleS::getTable() const {
- return m_currentTable;
-}
-
-const AttributeDesc * TupleS::getDesc(int i) const {
- return m_currentTable->allAttributesDesc[i];
-}
-
-AttributeData * TupleS::getData(int i) const{
- return &(allAttrData[i]);
-}
-
-bool
-TupleS::prepareRecord(TableS & tab){
- if (allAttrData) {
- if (getNoOfAttributes() == tab.getNoOfAttributes())
- {
- m_currentTable = &tab;
- return true;
- }
- delete [] allAttrData;
- m_currentTable= 0;
- }
-
- allAttrData = new AttributeData[tab.getNoOfAttributes()];
- if (allAttrData == 0)
- return false;
-
- m_currentTable = &tab;
-
- return true;
-}
-
-const TupleS *
-RestoreDataIterator::getNextTuple(int & res)
-{
- Uint32 dataLength = 0;
- // Read record length
- if (buffer_read(&dataLength, sizeof(dataLength), 1) != 1){
- err << "getNextTuple:Error reading length of data part" << endl;
- res = -1;
- return NULL;
- } // if
-
- // Convert length from network byte order
- dataLength = ntohl(dataLength);
- const Uint32 dataLenBytes = 4 * dataLength;
-
- if (dataLength == 0) {
- // Zero length for last tuple
- // End of this data fragment
- debug << "End of fragment" << endl;
- res = 0;
- return NULL;
- } // if
-
- // Read tuple data
- void *_buf_ptr;
- if (buffer_get_ptr(&_buf_ptr, 1, dataLenBytes) != dataLenBytes) {
- err << "getNextTuple:Read error: " << endl;
- res = -1;
- return NULL;
- }
-
- Uint32 *buf_ptr = (Uint32*)_buf_ptr, *ptr = buf_ptr;
- ptr += m_currentTable->m_nullBitmaskSize;
- Uint32 i;
- for(i= 0; i < m_currentTable->m_fixedKeys.size(); i++){
- assert(ptr < buf_ptr + dataLength);
-
- const Uint32 attrId = m_currentTable->m_fixedKeys[i]->attrId;
-
- AttributeData * attr_data = m_tuple.getData(attrId);
- const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
-
- const Uint32 sz = attr_desc->getSizeInWords();
-
- attr_data->null = false;
- attr_data->void_value = ptr;
-
- if(!Twiddle(attr_desc, attr_data))
- {
- res = -1;
- return NULL;
- }
- ptr += sz;
- }
-
- for(i = 0; i < m_currentTable->m_fixedAttribs.size(); i++){
- assert(ptr < buf_ptr + dataLength);
-
- const Uint32 attrId = m_currentTable->m_fixedAttribs[i]->attrId;
-
- AttributeData * attr_data = m_tuple.getData(attrId);
- const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
-
- const Uint32 sz = attr_desc->getSizeInWords();
-
- attr_data->null = false;
- attr_data->void_value = ptr;
-
- if(!Twiddle(attr_desc, attr_data))
- {
- res = -1;
- return NULL;
- }
-
- ptr += sz;
- }
-
- for(i = 0; i < m_currentTable->m_variableAttribs.size(); i++){
- const Uint32 attrId = m_currentTable->m_variableAttribs[i]->attrId;
-
- AttributeData * attr_data = m_tuple.getData(attrId);
- const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
-
- if(attr_desc->m_column->getNullable()){
- const Uint32 ind = attr_desc->m_nullBitIndex;
- if(BitmaskImpl::get(m_currentTable->m_nullBitmaskSize,
- buf_ptr,ind)){
- attr_data->null = true;
- attr_data->void_value = NULL;
- continue;
- }
- }
-
- assert(ptr < buf_ptr + dataLength);
-
- typedef BackupFormat::DataFile::VariableData VarData;
- VarData * data = (VarData *)ptr;
- Uint32 sz = ntohl(data->Sz);
- Uint32 id = ntohl(data->Id);
- assert(id == attrId);
-
- attr_data->null = false;
- attr_data->void_value = &data->Data[0];
-
- /**
- * Compute array size
- */
- const Uint32 arraySize = (4 * sz) / (attr_desc->size / 8);
- assert(arraySize >= attr_desc->arraySize);
- if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize))
- {
- res = -1;
- return NULL;
- }
-
- ptr += (sz + 2);
- }
-
- m_count ++;
- res = 0;
- return &m_tuple;
-} // RestoreDataIterator::getNextTuple
-
-BackupFile::BackupFile(void (* _free_data_callback)())
- : free_data_callback(_free_data_callback)
-{
- m_file = 0;
- m_path[0] = 0;
- m_fileName[0] = 0;
-
- m_buffer_sz = 64*1024;
- m_buffer = malloc(m_buffer_sz);
- m_buffer_ptr = m_buffer;
- m_buffer_data_left = 0;
-}
-
-BackupFile::~BackupFile(){
- if(m_file != 0)
- fclose(m_file);
- if(m_buffer != 0)
- free(m_buffer);
-}
-
-bool
-BackupFile::openFile(){
- if(m_file != NULL){
- fclose(m_file);
- m_file = 0;
- }
-
- m_file = fopen(m_fileName, "r");
- return m_file != 0;
-}
-
-Uint32 BackupFile::buffer_get_ptr_ahead(void **p_buf_ptr, Uint32 size, Uint32 nmemb)
-{
- Uint32 sz = size*nmemb;
- if (sz > m_buffer_data_left) {
-
- if (free_data_callback)
- (*free_data_callback)();
-
- memcpy(m_buffer, m_buffer_ptr, m_buffer_data_left);
-
- size_t r = fread(((char *)m_buffer) + m_buffer_data_left, 1, m_buffer_sz - m_buffer_data_left, m_file);
- m_buffer_data_left += r;
- m_buffer_ptr = m_buffer;
-
- if (sz > m_buffer_data_left)
- sz = size * (m_buffer_data_left / size);
- }
-
- *p_buf_ptr = m_buffer_ptr;
-
- return sz/size;
-}
-Uint32 BackupFile::buffer_get_ptr(void **p_buf_ptr, Uint32 size, Uint32 nmemb)
-{
- Uint32 r = buffer_get_ptr_ahead(p_buf_ptr, size, nmemb);
-
- m_buffer_ptr = ((char*)m_buffer_ptr)+(r*size);
- m_buffer_data_left -= (r*size);
-
- return r;
-}
-
-Uint32 BackupFile::buffer_read_ahead(void *ptr, Uint32 size, Uint32 nmemb)
-{
- void *buf_ptr;
- Uint32 r = buffer_get_ptr_ahead(&buf_ptr, size, nmemb);
- memcpy(ptr, buf_ptr, r*size);
-
- return r;
-}
-
-Uint32 BackupFile::buffer_read(void *ptr, Uint32 size, Uint32 nmemb)
-{
- void *buf_ptr;
- Uint32 r = buffer_get_ptr(&buf_ptr, size, nmemb);
- memcpy(ptr, buf_ptr, r*size);
-
- return r;
-}
-
-void
-BackupFile::setCtlFile(Uint32 nodeId, Uint32 backupId, const char * path){
- m_nodeId = nodeId;
- m_expectedFileHeader.BackupId = backupId;
- m_expectedFileHeader.FileType = BackupFormat::CTL_FILE;
-
- char name[PATH_MAX]; const Uint32 sz = sizeof(name);
- BaseString::snprintf(name, sz, "BACKUP-%d.%d.ctl", backupId, nodeId);
- setName(path, name);
-}
-
-void
-BackupFile::setDataFile(const BackupFile & bf, Uint32 no){
- m_nodeId = bf.m_nodeId;
- m_expectedFileHeader = bf.m_fileHeader;
- m_expectedFileHeader.FileType = BackupFormat::DATA_FILE;
-
- char name[PATH_MAX]; const Uint32 sz = sizeof(name);
- BaseString::snprintf(name, sz, "BACKUP-%d-%d.%d.Data",
- m_expectedFileHeader.BackupId, no, m_nodeId);
- setName(bf.m_path, name);
-}
-
-void
-BackupFile::setLogFile(const BackupFile & bf, Uint32 no){
- m_nodeId = bf.m_nodeId;
- m_expectedFileHeader = bf.m_fileHeader;
- m_expectedFileHeader.FileType = BackupFormat::LOG_FILE;
-
- char name[PATH_MAX]; const Uint32 sz = sizeof(name);
- BaseString::snprintf(name, sz, "BACKUP-%d.%d.log",
- m_expectedFileHeader.BackupId, m_nodeId);
- setName(bf.m_path, name);
-}
-
-void
-BackupFile::setName(const char * p, const char * n){
- const Uint32 sz = sizeof(m_path);
- if(p != 0 && strlen(p) > 0){
- if(p[strlen(p)-1] == '/'){
- BaseString::snprintf(m_path, sz, "%s", p);
- } else {
- BaseString::snprintf(m_path, sz, "%s%s", p, "/");
- }
- } else {
- m_path[0] = 0;
- }
-
- BaseString::snprintf(m_fileName, sizeof(m_fileName), "%s%s", m_path, n);
- debug << "Filename = " << m_fileName << endl;
-}
-
-bool
-BackupFile::readHeader(){
- if(!openFile()){
- return false;
- }
-
- if(buffer_read(&m_fileHeader, sizeof(m_fileHeader), 1) != 1){
- err << "readDataFileHeader: Error reading header" << endl;
- return false;
- }
-
- // Convert from network to host byte order for platform compatibility
- m_fileHeader.NdbVersion = ntohl(m_fileHeader.NdbVersion);
- m_fileHeader.SectionType = ntohl(m_fileHeader.SectionType);
- m_fileHeader.SectionLength = ntohl(m_fileHeader.SectionLength);
- m_fileHeader.FileType = ntohl(m_fileHeader.FileType);
- m_fileHeader.BackupId = ntohl(m_fileHeader.BackupId);
- m_fileHeader.BackupKey_0 = ntohl(m_fileHeader.BackupKey_0);
- m_fileHeader.BackupKey_1 = ntohl(m_fileHeader.BackupKey_1);
-
- debug << "FileHeader: " << m_fileHeader.Magic << " " <<
- m_fileHeader.NdbVersion << " " <<
- m_fileHeader.SectionType << " " <<
- m_fileHeader.SectionLength << " " <<
- m_fileHeader.FileType << " " <<
- m_fileHeader.BackupId << " " <<
- m_fileHeader.BackupKey_0 << " " <<
- m_fileHeader.BackupKey_1 << " " <<
- m_fileHeader.ByteOrder << endl;
-
- debug << "ByteOrder is " << m_fileHeader.ByteOrder << endl;
- debug << "magicByteOrder is " << magicByteOrder << endl;
-
- if (m_fileHeader.FileType != m_expectedFileHeader.FileType){
- abort();
- }
-
- // Check for BackupFormat::FileHeader::ByteOrder if swapping is needed
- if (m_fileHeader.ByteOrder == magicByteOrder) {
- m_hostByteOrder = true;
- } else if (m_fileHeader.ByteOrder == swappedMagicByteOrder){
- m_hostByteOrder = false;
- } else {
- abort();
- }
-
- return true;
-} // BackupFile::readHeader
-
-bool
-BackupFile::validateFooter(){
- return true;
-}
-
-bool RestoreDataIterator::readFragmentHeader(int & ret)
-{
- BackupFormat::DataFile::FragmentHeader Header;
-
- debug << "RestoreDataIterator::getNextFragment" << endl;
-
- if (buffer_read(&Header, sizeof(Header), 1) != 1){
- ret = 0;
- return false;
- } // if
-
- Header.SectionType = ntohl(Header.SectionType);
- Header.SectionLength = ntohl(Header.SectionLength);
- Header.TableId = ntohl(Header.TableId);
- Header.FragmentNo = ntohl(Header.FragmentNo);
- Header.ChecksumType = ntohl(Header.ChecksumType);
-
- debug << "FragmentHeader: " << Header.SectionType
- << " " << Header.SectionLength
- << " " << Header.TableId
- << " " << Header.FragmentNo
- << " " << Header.ChecksumType << endl;
-
- m_currentTable = m_metaData.getTable(Header.TableId);
- if(m_currentTable == 0){
- ret = -1;
- return false;
- }
-
- if(!m_tuple.prepareRecord(*m_currentTable))
- {
- ret =-1;
- return false;
- }
-
- info << "_____________________________________________________" << endl
- << "Restoring data in table: " << m_currentTable->getTableName()
- << "(" << Header.TableId << ") fragment "
- << Header.FragmentNo << endl;
-
- m_count = 0;
- ret = 0;
-
- return true;
-} // RestoreDataIterator::getNextFragment
-
-
-bool
-RestoreDataIterator::validateFragmentFooter() {
- BackupFormat::DataFile::FragmentFooter footer;
-
- if (buffer_read(&footer, sizeof(footer), 1) != 1){
- err << "getFragmentFooter:Error reading fragment footer" << endl;
- return false;
- }
-
- // TODO: Handle footer, nothing yet
- footer.SectionType = ntohl(footer.SectionType);
- footer.SectionLength = ntohl(footer.SectionLength);
- footer.TableId = ntohl(footer.TableId);
- footer.FragmentNo = ntohl(footer.FragmentNo);
- footer.NoOfRecords = ntohl(footer.NoOfRecords);
- footer.Checksum = ntohl(footer.Checksum);
-
- assert(m_count == footer.NoOfRecords);
-
- return true;
-} // RestoreDataIterator::getFragmentFooter
-
-AttributeDesc::AttributeDesc(NdbDictionary::Column *c)
- : m_column(c)
-{
- size = 8*NdbColumnImpl::getImpl(* c).m_attrSize;
- arraySize = NdbColumnImpl::getImpl(* c).m_arraySize;
-}
-
-void TableS::createAttr(NdbDictionary::Column *column)
-{
- AttributeDesc * d = new AttributeDesc(column);
- if(d == NULL) {
- ndbout_c("Restore: Failed to allocate memory");
- abort();
- }
- d->attrId = allAttributesDesc.size();
- allAttributesDesc.push_back(d);
-
- if (d->m_column->getAutoIncrement())
- m_auto_val_id= d->attrId;
-
- if(d->m_column->getPrimaryKey() && backupVersion <= MAKE_VERSION(4,1,7))
- {
- m_fixedKeys.push_back(d);
- return;
- }
-
- if(!d->m_column->getNullable())
- {
- m_fixedAttribs.push_back(d);
- return;
- }
-
- /* Nullable attr*/
- d->m_nullBitIndex = m_noOfNullable;
- m_noOfNullable++;
- m_nullBitmaskSize = (m_noOfNullable + 31) / 32;
- m_variableAttribs.push_back(d);
-} // TableS::createAttr
-
-Uint16 Twiddle16(Uint16 in)
-{
- Uint16 retVal = 0;
-
- retVal = ((in & 0xFF00) >> 8) |
- ((in & 0x00FF) << 8);
-
- return(retVal);
-} // Twiddle16
-
-Uint32 Twiddle32(Uint32 in)
-{
- Uint32 retVal = 0;
-
- retVal = ((in & 0x000000FF) << 24) |
- ((in & 0x0000FF00) << 8) |
- ((in & 0x00FF0000) >> 8) |
- ((in & 0xFF000000) >> 24);
-
- return(retVal);
-} // Twiddle32
-
-Uint64 Twiddle64(Uint64 in)
-{
- Uint64 retVal = 0;
-
- retVal =
- ((in & (Uint64)0x00000000000000FFLL) << 56) |
- ((in & (Uint64)0x000000000000FF00LL) << 40) |
- ((in & (Uint64)0x0000000000FF0000LL) << 24) |
- ((in & (Uint64)0x00000000FF000000LL) << 8) |
- ((in & (Uint64)0x000000FF00000000LL) >> 8) |
- ((in & (Uint64)0x0000FF0000000000LL) >> 24) |
- ((in & (Uint64)0x00FF000000000000LL) >> 40) |
- ((in & (Uint64)0xFF00000000000000LL) >> 56);
-
- return(retVal);
-} // Twiddle64
-
-
-RestoreLogIterator::RestoreLogIterator(const RestoreMetaData & md)
- : m_metaData(md)
-{
- debug << "RestoreLog constructor" << endl;
- setLogFile(md, 0);
-
- m_count = 0;
- m_last_gci = 0;
-}
-
-const LogEntry *
-RestoreLogIterator::getNextLogEntry(int & res) {
- // Read record length
- typedef BackupFormat::LogFile::LogEntry LogE;
-
- LogE * logE= 0;
- Uint32 len= ~0;
- const Uint32 stopGCP = m_metaData.getStopGCP();
- do {
- if (buffer_read_ahead(&len, sizeof(Uint32), 1) != 1){
- res= -1;
- return 0;
- }
- len= ntohl(len);
-
- Uint32 data_len = sizeof(Uint32) + len*4;
- if (buffer_get_ptr((void **)(&logE), 1, data_len) != data_len) {
- res= -2;
- return 0;
- }
-
- if(len == 0){
- res= 0;
- return 0;
- }
-
- logE->TableId= ntohl(logE->TableId);
- logE->TriggerEvent= ntohl(logE->TriggerEvent);
-
- const bool hasGcp= (logE->TriggerEvent & 0x10000) != 0;
- logE->TriggerEvent &= 0xFFFF;
-
- if(hasGcp){
- len--;
- m_last_gci = ntohl(logE->Data[len-2]);
- }
- } while(m_last_gci > stopGCP + 1);
-
- m_logEntry.m_table = m_metaData.getTable(logE->TableId);
- switch(logE->TriggerEvent){
- case TriggerEvent::TE_INSERT:
- m_logEntry.m_type = LogEntry::LE_INSERT;
- break;
- case TriggerEvent::TE_UPDATE:
- m_logEntry.m_type = LogEntry::LE_UPDATE;
- break;
- case TriggerEvent::TE_DELETE:
- m_logEntry.m_type = LogEntry::LE_DELETE;
- break;
- default:
- res = -1;
- return NULL;
- }
-
- const TableS * tab = m_logEntry.m_table;
- m_logEntry.clear();
-
- AttributeHeader * ah = (AttributeHeader *)&logE->Data[0];
- AttributeHeader *end = (AttributeHeader *)&logE->Data[len - 2];
- AttributeS * attr;
- while(ah < end){
- attr= m_logEntry.add_attr();
- if(attr == NULL) {
- ndbout_c("Restore: Failed to allocate memory");
- res = -1;
- return 0;
- }
-
- attr->Desc = (* tab)[ah->getAttributeId()];
- assert(attr->Desc != 0);
-
- const Uint32 sz = ah->getDataSize();
- if(sz == 0){
- attr->Data.null = true;
- attr->Data.void_value = NULL;
- } else {
- attr->Data.null = false;
- attr->Data.void_value = ah->getDataPtr();
- }
-
- Twiddle(attr->Desc, &(attr->Data));
-
- ah = ah->getNext();
- }
-
- m_count ++;
- res = 0;
- return &m_logEntry;
-}
-
-NdbOut &
-operator<<(NdbOut& ndbout, const AttributeS& attr){
- const AttributeData & data = attr.Data;
- const AttributeDesc & desc = *(attr.Desc);
-
- if (data.null)
- {
- ndbout << "<NULL>";
- return ndbout;
- }
-
- NdbRecAttr tmprec;
- tmprec.setup(desc.m_column, (char *)data.void_value);
- ndbout << tmprec;
-
- return ndbout;
-}
-
-// Print tuple data
-NdbOut&
-operator<<(NdbOut& ndbout, const TupleS& tuple)
-{
- ndbout << tuple.getTable()->getTableName() << "; ";
- for (int i = 0; i < tuple.getNoOfAttributes(); i++)
- {
- AttributeData * attr_data = tuple.getData(i);
- const AttributeDesc * attr_desc = tuple.getDesc(i);
- const AttributeS attr = {attr_desc, *attr_data};
- debug << i << " " << attr_desc->m_column->getName();
- ndbout << attr;
-
- if (i != (tuple.getNoOfAttributes() - 1))
- ndbout << delimiter << " ";
- } // for
- return ndbout;
-}
-
-// Print tuple data
-NdbOut&
-operator<<(NdbOut& ndbout, const LogEntry& logE)
-{
- switch(logE.m_type)
- {
- case LogEntry::LE_INSERT:
- ndbout << "INSERT " << logE.m_table->getTableName() << " ";
- break;
- case LogEntry::LE_DELETE:
- ndbout << "DELETE " << logE.m_table->getTableName() << " ";
- break;
- case LogEntry::LE_UPDATE:
- ndbout << "UPDATE " << logE.m_table->getTableName() << " ";
- break;
- default:
- ndbout << "Unknown log entry type (not insert, delete or update)" ;
- }
-
- for (Uint32 i= 0; i < logE.size();i++)
- {
- const AttributeS * attr = logE[i];
- ndbout << attr->Desc->m_column->getName() << "=";
- ndbout << (* attr);
- if (i < (logE.size() - 1))
- ndbout << ", ";
- }
- return ndbout;
-}
-
-#include <NDBT.hpp>
-
-NdbOut &
-operator<<(NdbOut& ndbout, const TableS & table){
-
- ndbout << (* (NDBT_Table*)table.m_dictTable) << endl;
- return ndbout;
-}
-
-template class Vector<TableS*>;
-template class Vector<AttributeS*>;
-template class Vector<AttributeDesc*>;
-
diff --git a/scripts/Makefile.am b/scripts/Makefile.am
index 225e8bdccbb..79764ab63cc 100644
--- a/scripts/Makefile.am
+++ b/scripts/Makefile.am
@@ -31,7 +31,6 @@ bin_SCRIPTS = @server_scripts@ \
mysqlhotcopy \
mysqldumpslow \
mysql_explain_log \
- mysql_tableinfo \
mysqld_multi \
mysql_create_system_tables
@@ -55,7 +54,6 @@ EXTRA_SCRIPTS = make_binary_distribution.sh \
mysqldumpslow.sh \
mysql_explain_log.sh \
mysqld_multi.sh \
- mysql_tableinfo.sh \
mysqld_safe.sh \
mysql_create_system_tables.sh
diff --git a/scripts/mysql_fix_privilege_tables.sql b/scripts/mysql_fix_privilege_tables.sql
index b93e0a47b1b..bc9da056758 100644
--- a/scripts/mysql_fix_privilege_tables.sql
+++ b/scripts/mysql_fix_privilege_tables.sql
@@ -1,5 +1,5 @@
-- This script converts any old privilege tables to privilege tables suitable
--- for MySQL 4.0.
+-- for this version of MySQL
-- You can safely ignore all 'Duplicate column' and 'Unknown column' errors"
-- because these just mean that your tables are already up to date.
@@ -123,7 +123,6 @@ UPDATE user SET Show_db_priv= Select_priv, Super_priv=Process_priv, Execute_priv
-- Add fields that can be used to limit number of questions and connections
-- for some users.
-
ALTER TABLE user
ADD max_questions int(11) NOT NULL DEFAULT 0 AFTER x509_subject,
ADD max_updates int(11) unsigned NOT NULL DEFAULT 0 AFTER max_questions,
@@ -138,8 +137,8 @@ ALTER TABLE db
ADD Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
ADD Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
ALTER TABLE host
-ADD Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
-ADD Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
+ADD Create_tmp_table_priv enum('N','Y') DEFAULT 'N' NOT NULL,
+ADD Lock_tables_priv enum('N','Y') DEFAULT 'N' NOT NULL;
alter table user change max_questions max_questions int(11) unsigned DEFAULT 0 NOT NULL;
alter table tables_priv add KEY Grantor (Grantor);
diff --git a/scripts/mysql_tableinfo.sh b/scripts/mysql_tableinfo.sh
deleted file mode 100644
index 2ed7e381fa3..00000000000
--- a/scripts/mysql_tableinfo.sh
+++ /dev/null
@@ -1,494 +0,0 @@
-#!@PERL@ -w
-
-use strict;
-use Getopt::Long;
-use DBI;
-
-=head1 NAME
-
-WARNING: MySQL versions 5.0 and above feature the INFORMATION_SCHEMA
-pseudo-database which contains always up-to-date metadata information
-about all tables. So instead of using this script one can now
-simply query the INFORMATION_SCHEMA.SCHEMATA, INFORMATION_SCHEMA.TABLES,
-INFORMATION_SCHEMA.COLUMNS, INFORMATION_SCHEMA.STATISTICS pseudo-tables.
-Please see the MySQL manual for more information about INFORMATION_SCHEMA.
-This script will be removed from the MySQL distribution in version 5.1.
-
-mysql_tableinfo - creates and populates information tables with
-the output of SHOW DATABASES, SHOW TABLES (or SHOW TABLE STATUS),
-SHOW COLUMNS and SHOW INDEX.
-
-This is version 1.1.
-
-=head1 SYNOPSIS
-
- mysql_tableinfo [OPTIONS] database_to_write [database_like_wild] [table_like_wild]
-
- Do not backquote (``) database_to_write,
- and do not quote ('') database_like_wild or table_like_wild
-
- Examples:
-
- mysql_tableinfo info
-
- mysql_tableinfo info this_db
-
- mysql_tableinfo info %a% b%
-
- mysql_tableinfo info --clear-only
-
- mysql_tableinfo info --col --idx --table-status
-
-=cut
-
-# Documentation continued at end of file
-
-
-sub usage {
- die @_,"\nExecute 'perldoc $0' for documentation\n";
-}
-
-my %opt = (
- 'user' => scalar getpwuid($>),
- 'host' => "localhost",
- 'prefix' => "", #to avoid 'use of uninitialized value...'
-);
-Getopt::Long::Configure(qw(no_ignore_case)); # disambuguate -p and -P
-GetOptions( \%opt,
- "help",
- "user|u=s",
- "password|p=s",
- "host|h=s",
- "port|P=s",
- "socket|S=s",
- "tbl-status",
- "col",
- "idx",
- "clear",
- "clear-only",
- "prefix=s",
- "quiet|q",
-) or usage("Invalid option");
-
-if (!$opt{'quiet'})
- {
- print <<EOF
-WARNING: MySQL versions 5.0 and above feature the INFORMATION_SCHEMA
-pseudo-database which contains always up-to-date metadata information
-about all tables. So instead of using this script one can now
-simply query the INFORMATION_SCHEMA.SCHEMATA, INFORMATION_SCHEMA.TABLES,
-INFORMATION_SCHEMA.COLUMNS, INFORMATION_SCHEMA.STATISTICS pseudo-tables.
-Please see the MySQL manual for more information about INFORMATION_SCHEMA.
-This script will be removed from the MySQL distribution in version 5.1.
-EOF
- }
-
-if ($opt{'help'}) {usage();}
-
-my ($db_to_write,$db_like_wild,$tbl_like_wild);
-if (@ARGV==0)
-{
- usage("Not enough arguments");
-}
-$db_to_write="`$ARGV[0]`"; shift @ARGV;
-$db_like_wild=($ARGV[0])?$ARGV[0]:"%"; shift @ARGV;
-$tbl_like_wild=($ARGV[0])?$ARGV[0]:"%"; shift @ARGV;
-if (@ARGV>0) { usage("Too many arguments"); }
-
-$0 = $1 if $0 =~ m:/([^/]+)$:;
-
-my $info_db="`".$opt{'prefix'}."db`";
-my $info_tbl="`".$opt{'prefix'}."tbl".
- (($opt{'tbl-status'})?"_status":"")."`";
-my $info_col="`".$opt{'prefix'}."col`";
-my $info_idx="`".$opt{'prefix'}."idx`";
-
-
-# --- connect to the database ---
-
-my $dsn = ";host=$opt{'host'}";
-$dsn .= ";port=$opt{'port'}" if $opt{'port'};
-$dsn .= ";mysql_socket=$opt{'socket'}" if $opt{'socket'};
-
-my $dbh = DBI->connect("dbi:mysql:$dsn;mysql_read_default_group=perl",
- $opt{'user'}, $opt{'password'},
-{
- RaiseError => 1,
- PrintError => 0,
- AutoCommit => 1,
-});
-
-$db_like_wild=$dbh->quote($db_like_wild);
-$tbl_like_wild=$dbh->quote($tbl_like_wild);
-
-#Ask
-
-if (!$opt{'quiet'})
-{
- print "\n!! This program is going to do:\n\n";
- print "**DROP** TABLE ...\n" if ($opt{'clear'} or $opt{'clear-only'});
- print "**DELETE** FROM ... WHERE `Database` LIKE $db_like_wild AND `Table` LIKE $tbl_like_wild
-**INSERT** INTO ...
-
-on the following tables :\n";
-
- foreach (($info_db, $info_tbl),
- (($opt{'col'})?$info_col:()),
- (($opt{'idx'})?$info_idx:()))
- {
- print(" $db_to_write.$_\n");
- }
- print "\nContinue (you can skip this confirmation step with --quiet) ? (y|n) [n]";
- if (<STDIN> !~ /^\s*y\s*$/i)
- {
- print "Nothing done!\n";exit;
- }
-}
-
-if ($opt{'clear'} or $opt{'clear-only'})
-{
-#do not drop the $db_to_write database !
- foreach (($info_db, $info_tbl),
- (($opt{'col'})?$info_col:()),
- (($opt{'idx'})?$info_idx:()))
- {
- $dbh->do("DROP TABLE IF EXISTS $db_to_write.$_");
- }
- if ($opt{'clear-only'})
- {
- print "Wrote to database $db_to_write .\n" unless ($opt{'quiet'});
- exit;
- }
-}
-
-
-my %sth;
-my %extra_col_desc;
-my %row;
-my %done_create_table;
-
-#create the $db_to_write database
-$dbh->do("CREATE DATABASE IF NOT EXISTS $db_to_write");
-$dbh->do("USE $db_to_write");
-
-#get databases
-$sth{'db'}=$dbh->prepare("SHOW DATABASES LIKE $db_like_wild");
-$sth{'db'}->execute;
-
-#create $info_db which will receive info about databases.
-#Ensure that the first column to be called "Database" (as SHOW DATABASES LIKE
-#returns a varying
-#column name (of the form "Database (%...)") which is not suitable)
-$extra_col_desc{'db'}=do_create_table("db",$info_db,undef,"`Database`");
-#we'll remember the type of the `Database` column (as returned by
-#SHOW DATABASES), which we will need when creating the next tables.
-
-#clear out-of-date info from this table
-$dbh->do("DELETE FROM $info_db WHERE `Database` LIKE $db_like_wild");
-
-
-while ($row{'db'}=$sth{'db'}->fetchrow_arrayref) #go through all databases
-{
-
-#insert the database name
- $dbh->do("INSERT INTO $info_db VALUES("
- .join(',' , ( map $dbh->quote($_), @{$row{'db'}} ) ).")" );
-
-#for each database, get tables
-
- $sth{'tbl'}=$dbh->prepare("SHOW TABLE"
- .( ($opt{'tbl-status'}) ?
- " STATUS"
- : "S" )
- ." from `$row{'db'}->[0]` LIKE $tbl_like_wild");
- $sth{'tbl'}->execute;
- unless ($done_create_table{$info_tbl})
-
-#tables must be created only once, and out-of-date info must be
-#cleared once
- {
- $done_create_table{$info_tbl}=1;
- $extra_col_desc{'tbl'}=
- do_create_table("tbl",$info_tbl,
-#add an extra column (database name) at the left
-#and ensure that the table name will be called "Table"
-#(this is unncessesary with
-#SHOW TABLE STATUS, but necessary with SHOW TABLES (which returns a column
-#named "Tables_in_..."))
- "`Database` ".$extra_col_desc{'db'},"`Table`");
- $dbh->do("DELETE FROM $info_tbl WHERE `Database` LIKE $db_like_wild AND `Table` LIKE $tbl_like_wild");
- }
-
- while ($row{'tbl'}=$sth{'tbl'}->fetchrow_arrayref)
- {
- $dbh->do("INSERT INTO $info_tbl VALUES("
- .$dbh->quote($row{'db'}->[0]).","
- .join(',' , ( map $dbh->quote($_), @{$row{'tbl'}} ) ).")");
-
-#for each table, get columns...
-
- if ($opt{'col'})
- {
- $sth{'col'}=$dbh->prepare("SHOW COLUMNS FROM `$row{'tbl'}->[0]` FROM `$row{'db'}->[0]`");
- $sth{'col'}->execute;
- unless ($done_create_table{$info_col})
- {
- $done_create_table{$info_col}=1;
- do_create_table("col",$info_col,
- "`Database` ".$extra_col_desc{'db'}.","
- ."`Table` ".$extra_col_desc{'tbl'}.","
- ."`Seq_in_table` BIGINT(3)");
-#We need to add a sequence number (1 for the first column of the table,
-#2 for the second etc) so that users are able to retrieve columns in order
-#if they want. This is not needed for INDEX
-#(where there is already Seq_in_index)
- $dbh->do("DELETE FROM $info_col WHERE `Database`
- LIKE $db_like_wild
- AND `Table` LIKE $tbl_like_wild");
- }
- my $col_number=0;
- while ($row{'col'}=$sth{'col'}->fetchrow_arrayref)
- {
- $dbh->do("INSERT INTO $info_col VALUES("
- .$dbh->quote($row{'db'}->[0]).","
- .$dbh->quote($row{'tbl'}->[0]).","
- .++$col_number.","
- .join(',' , ( map $dbh->quote($_), @{$row{'col'}} ) ).")");
- }
- }
-
-#and get index.
-
- if ($opt{'idx'})
- {
- $sth{'idx'}=$dbh->prepare("SHOW INDEX FROM `$row{'tbl'}->[0]` FROM `$row{'db'}->[0]`");
- $sth{'idx'}->execute;
- unless ($done_create_table{$info_idx})
- {
- $done_create_table{$info_idx}=1;
- do_create_table("idx",$info_idx,
- "`Database` ".$extra_col_desc{'db'});
- $dbh->do("DELETE FROM $info_idx WHERE `Database`
- LIKE $db_like_wild
- AND `Table` LIKE $tbl_like_wild");
- }
- while ($row{'idx'}=$sth{'idx'}->fetchrow_arrayref)
- {
- $dbh->do("INSERT INTO $info_idx VALUES("
- .$dbh->quote($row{'db'}->[0]).","
- .join(',' , ( map $dbh->quote($_), @{$row{'idx'}} ) ).")");
- }
- }
- }
-}
-
-print "Wrote to database $db_to_write .\n" unless ($opt{'quiet'});
-exit;
-
-
-sub do_create_table
-{
- my ($sth_key,$target_tbl,$extra_col_desc,$first_col_name)=@_;
- my $create_table_query=$extra_col_desc;
- my ($i,$first_col_desc,$col_desc);
-
- for ($i=0;$i<$sth{$sth_key}->{NUM_OF_FIELDS};$i++)
- {
- if ($create_table_query) { $create_table_query.=", "; }
- $col_desc=$sth{$sth_key}->{mysql_type_name}->[$i];
- if ($col_desc =~ /char|int/i)
- {
- $col_desc.="($sth{$sth_key}->{PRECISION}->[$i])";
- }
- elsif ($col_desc =~ /decimal|numeric/i) #(never seen that)
- {
- $col_desc.=
- "($sth{$sth_key}->{PRECISION}->[$i],$sth{$sth_key}->{SCALE}->[$i])";
- }
- elsif ($col_desc !~ /date/i) #date and datetime are OK,
- #no precision or scale for them
- {
- warn "unexpected column type '$col_desc'
-(neither 'char','int','decimal|numeric')
-when creating $target_tbl, hope table creation will go OK\n";
- }
- if ($i==0) {$first_col_desc=$col_desc};
- $create_table_query.=
- ( ($i==0 and $first_col_name) ?
- "$first_col_name " :"`$sth{$sth_key}->{NAME}->[$i]` " )
- .$col_desc;
- }
-if ($create_table_query)
-{
- $dbh->do("CREATE TABLE IF NOT EXISTS $target_tbl ($create_table_query)");
-}
-return $first_col_desc;
-}
-
-__END__
-
-
-=head1 DESCRIPTION
-
-mysql_tableinfo asks a MySQL server information about its
-databases, tables, table columns and index, and stores this
-in tables called `db`, `tbl` (or `tbl_status`), `col`, `idx`
-(with an optional prefix specified with --prefix).
-After that, you can query these information tables, for example
-to build your admin scripts with SQL queries, like
-
-SELECT CONCAT("CHECK TABLE ",`database`,".",`table`," EXTENDED;")
-FROM info.tbl WHERE ... ;
-
-as people usually do with some other RDBMS
-(note: to increase the speed of your queries on the info tables,
-you may add some index on them).
-
-The database_like_wild and table_like_wild instructs the program
-to gather information only about databases and tables
-whose names match these patterns. If the info
-tables already exist, their rows matching the patterns are simply
-deleted and replaced by the new ones. That is,
-old rows not matching the patterns are not touched.
-If the database_like_wild and table_like_wild arguments
-are not specified on the command-line they default to "%".
-
-The program :
-
-- does CREATE DATABASE IF NOT EXISTS database_to_write
-where database_to_write is the database name specified on the command-line.
-
-- does CREATE TABLE IF NOT EXISTS database_to_write.`db`
-
-- fills database_to_write.`db` with the output of
-SHOW DATABASES LIKE database_like_wild
-
-- does CREATE TABLE IF NOT EXISTS database_to_write.`tbl`
-(respectively database_to_write.`tbl_status`
-if the --tbl-status option is on)
-
-- for every found database,
-fills database_to_write.`tbl` (respectively database_to_write.`tbl_status`)
-with the output of
-SHOW TABLES FROM found_db LIKE table_like_wild
-(respectively SHOW TABLE STATUS FROM found_db LIKE table_like_wild)
-
-- if the --col option is on,
- * does CREATE TABLE IF NOT EXISTS database_to_write.`col`
- * for every found table,
- fills database_to_write.`col` with the output of
- SHOW COLUMNS FROM found_tbl FROM found_db
-
-- if the --idx option is on,
- * does CREATE TABLE IF NOT EXISTS database_to_write.`idx`
- * for every found table,
- fills database_to_write.`idx` with the output of
- SHOW INDEX FROM found_tbl FROM found_db
-
-Some options may modify this general scheme (see below).
-
-As mentioned, the contents of the info tables are the output of
-SHOW commands. In fact the contents are slightly more complete :
-
-- the `tbl` (or `tbl_status`) info table
- has an extra column which contains the database name,
-
-- the `col` info table
- has an extra column which contains the table name,
- and an extra column which contains, for each described column,
- the number of this column in the table owning it (this extra column
- is called `Seq_in_table`). `Seq_in_table` makes it possible for you
- to retrieve your columns in sorted order, when you are querying
- the `col` table.
-
-- the `index` info table
- has an extra column which contains the database name.
-
-Caution: info tables contain certain columns (e.g.
-Database, Table, Null...) whose names, as they are MySQL reserved words,
-need to be backquoted (`...`) when used in SQL statements.
-
-Caution: as information fetching and info tables filling happen at the
-same time, info tables may contain inaccurate information about
-themselves.
-
-=head1 OPTIONS
-
-=over 4
-
-=item --clear
-
-Does DROP TABLE on the info tables (only those that the program is
-going to fill, for example if you do not use --col it won't drop
-the `col` table) and processes normally. Does not drop database_to_write.
-
-=item --clear-only
-
-Same as --clear but exits after the DROPs.
-
-=item --col
-
-Adds columns information (into table `col`).
-
-=item --idx
-
-Adds index information (into table `idx`).
-
-=item --prefix prefix
-
-The info tables are named from the concatenation of prefix and,
-respectively, db, tbl (or tbl_status), col, idx. Do not quote ('')
-or backquote (``) prefix.
-
-=item -q, --quiet
-
-Does not warn you about what the script is going to do (DROP TABLE etc)
-and does not ask for a confirmation before starting.
-
-=item --tbl-status
-
-Instead of using SHOW TABLES, uses SHOW TABLE STATUS
-(much more complete information, but slower).
-
-=item --help
-
-Display helpscreen and exit
-
-=item -u, --user=#
-
-user for database login if not current user. Give a user
-who has sufficient privileges (CREATE, ...).
-
-=item -p, --password=#
-
-password to use when connecting to server
-
-=item -h, --host=#
-
-host to connect to
-
-=item -P, --port=#
-
-port to use when connecting to server
-
-=item -S, --socket=#
-
-UNIX domain socket to use when connecting to server
-
-=head1 WARRANTY
-
-This software is free and comes without warranty of any kind.
-
-Patches adding bug fixes, documentation and new features are welcome.
-
-=head1 TO DO
-
-Nothing: starting from MySQL 5.0, this program is replaced by the
-INFORMATION_SCHEMA pseudo-database.
-
-=head1 AUTHOR
-
-2002-06-18 Guilhem Bichot (guilhem.bichot@mines-paris.org)
-
-And all the authors of mysqlhotcopy, which served as a model for
-the structure of the program.
diff --git a/server-tools/Makefile.am b/server-tools/Makefile.am
index ed316b9ac38..a249a6f6792 100644
--- a/server-tools/Makefile.am
+++ b/server-tools/Makefile.am
@@ -1 +1,2 @@
-SUBDIRS= instance-manager
+SUBDIRS = . instance-manager
+DIST_SUBDIRS = . instance-manager
diff --git a/sql/Makefile.am b/sql/Makefile.am
index cabb4fee905..2eab9052ba7 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -29,9 +29,9 @@ libexec_PROGRAMS = mysqld
noinst_PROGRAMS = gen_lex_hash
bin_PROGRAMS = mysql_tzinfo_to_sql
gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@
-LDADD = $(top_builddir)/myisam/libmyisam.a \
- $(top_builddir)/myisammrg/libmyisammrg.a \
- $(top_builddir)/heap/libheap.a \
+LDADD = $(top_builddir)/storage/myisam/libmyisam.a \
+ $(top_builddir)/storage/myisammrg/libmyisammrg.a \
+ $(top_builddir)/storage/heap/libheap.a \
$(top_builddir)/vio/libvio.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/dbug/libdbug.a \
@@ -55,7 +55,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
ha_ndbcluster.h opt_range.h protocol.h \
sql_select.h structs.h table.h sql_udf.h hash_filo.h\
lex.h lex_symbol.h sql_acl.h sql_crypt.h \
- log_event.h sql_repl.h slave.h \
+ log_event.h sql_repl.h slave.h rpl_filter.h \
stacktrace.h sql_sort.h sql_cache.h set_var.h \
spatial.h gstream.h client_settings.h tzfile.h \
tztime.h my_decimal.h\
@@ -63,7 +63,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
parse_file.h sql_view.h sql_trigger.h \
examples/ha_example.h examples/ha_archive.h \
examples/ha_tina.h ha_blackhole.h \
- ha_federated.h
+ ha_federated.h ha_partition.h
mysqld_SOURCES = sql_lex.cc sql_handler.cc \
item.cc item_sum.cc item_buff.cc item_func.cc \
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
@@ -89,7 +89,8 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \
sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \
sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
- slave.cc sql_repl.cc sql_union.cc sql_derived.cc \
+ slave.cc sql_repl.cc rpl_filter.cc \
+ sql_union.cc sql_derived.cc \
client.c sql_client.cc mini_client_errors.c pack.c\
stacktrace.c repl_failsafe.h repl_failsafe.cc \
sql_olap.cc sql_view.cc \
@@ -99,6 +100,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
sp_cache.cc parse_file.cc sql_trigger.cc \
examples/ha_example.cc examples/ha_archive.cc \
examples/ha_tina.cc ha_blackhole.cc \
+ ha_partition.cc sql_partition.cc \
ha_federated.cc
gen_lex_hash_SOURCES = gen_lex_hash.cc
diff --git a/sql/examples/ha_tina.cc b/sql/examples/ha_tina.cc
index 9c774c1f75c..50fec4e2883 100644
--- a/sql/examples/ha_tina.cc
+++ b/sql/examples/ha_tina.cc
@@ -96,7 +96,8 @@ static byte* tina_get_key(TINA_SHARE *share,uint *length,
int get_mmap(TINA_SHARE *share, int write)
{
DBUG_ENTER("ha_tina::get_mmap");
- if (share->mapped_file && munmap(share->mapped_file, share->file_stat.st_size))
+ if (share->mapped_file && my_munmap(share->mapped_file,
+ share->file_stat.st_size))
DBUG_RETURN(1);
if (my_fstat(share->data_file, &share->file_stat, MYF(MY_WME)) == -1)
@@ -105,13 +106,13 @@ int get_mmap(TINA_SHARE *share, int write)
if (share->file_stat.st_size)
{
if (write)
- share->mapped_file= (byte *)mmap(NULL, share->file_stat.st_size,
- PROT_READ|PROT_WRITE, MAP_SHARED,
- share->data_file, 0);
+ share->mapped_file= (byte *)my_mmap(NULL, share->file_stat.st_size,
+ PROT_READ|PROT_WRITE, MAP_SHARED,
+ share->data_file, 0);
else
- share->mapped_file= (byte *)mmap(NULL, share->file_stat.st_size,
- PROT_READ, MAP_PRIVATE,
- share->data_file, 0);
+ share->mapped_file= (byte *)my_mmap(NULL, share->file_stat.st_size,
+ PROT_READ, MAP_PRIVATE,
+ share->data_file, 0);
if ((share->mapped_file ==(caddr_t)-1))
{
/*
@@ -219,7 +220,7 @@ static int free_share(TINA_SHARE *share)
if (!--share->use_count){
/* Drop the mapped file */
if (share->mapped_file)
- munmap(share->mapped_file, share->file_stat.st_size);
+ my_munmap(share->mapped_file, share->file_stat.st_size);
result_code= my_close(share->data_file,MYF(0));
hash_delete(&tina_open_tables, (byte*) share);
thr_lock_delete(&share->lock);
@@ -791,7 +792,7 @@ int ha_tina::rnd_end()
if (my_chsize(share->data_file, length, 0, MYF(MY_WME)))
DBUG_RETURN(-1);
- if (munmap(share->mapped_file, length))
+ if (my_munmap(share->mapped_file, length))
DBUG_RETURN(-1);
/* We set it to null so that get_mmap() won't try to unmap it */
diff --git a/sql/field.cc b/sql/field.cc
index a9b22a2fca9..7661041eca2 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1233,6 +1233,7 @@ Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
flags=null_ptr ? 0: NOT_NULL_FLAG;
comment.str= (char*) "";
comment.length=0;
+ fieldnr= 0;
}
uint Field::offset()
@@ -6283,7 +6284,8 @@ my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
}
-int Field_varstring::cmp(const char *a_ptr, const char *b_ptr)
+int Field_varstring::cmp_max(const char *a_ptr, const char *b_ptr,
+ uint max_len)
{
uint a_length, b_length;
int diff;
@@ -6298,6 +6300,8 @@ int Field_varstring::cmp(const char *a_ptr, const char *b_ptr)
a_length= uint2korr(a_ptr);
b_length= uint2korr(b_ptr);
}
+ set_if_smaller(a_length, max_len);
+ set_if_smaller(b_length, max_len);
diff= field_charset->coll->strnncollsp(field_charset,
(const uchar*) a_ptr+
length_bytes,
@@ -6928,13 +6932,16 @@ int Field_blob::cmp(const char *a,uint32 a_length, const char *b,
}
-int Field_blob::cmp(const char *a_ptr, const char *b_ptr)
+int Field_blob::cmp_max(const char *a_ptr, const char *b_ptr,
+ uint max_length)
{
char *blob1,*blob2;
memcpy_fixed(&blob1,a_ptr+packlength,sizeof(char*));
memcpy_fixed(&blob2,b_ptr+packlength,sizeof(char*));
- return Field_blob::cmp(blob1,get_length(a_ptr),
- blob2,get_length(b_ptr));
+ uint a_len= get_length(a_ptr), b_len= get_length(b_ptr);
+ set_if_smaller(a_len, max_length);
+ set_if_smaller(b_len, max_length);
+ return Field_blob::cmp(blob1,a_len,blob2,b_len);
}
@@ -7951,6 +7958,35 @@ my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value)
}
+/*
+ Compare two bit fields using pointers within the record.
+ SYNOPSIS
+ cmp_max()
+ a Pointer to field->ptr in first record
+ b Pointer to field->ptr in second record
+ max_len Maximum length used in index
+ DESCRIPTION
+ This method is used from key_rec_cmp used by merge sorts used
+ by partitioned index read and later other similar places.
+ The a and b pointer must be pointers to the field in a record
+ (not the table->record[0] necessarily)
+*/
+int Field_bit::cmp_max(const char *a, const char *b, uint max_len)
+{
+ my_ptrdiff_t a_diff= a - ptr;
+ my_ptrdiff_t b_diff= b - ptr;
+ if (bit_len)
+ {
+ int flag;
+ uchar bits_a= get_rec_bits(bit_ptr+a_diff, bit_ofs, bit_len);
+ uchar bits_b= get_rec_bits(bit_ptr+b_diff, bit_ofs, bit_len);
+ if ((flag= (int) (bits_a - bits_b)))
+ return flag;
+ }
+ return memcmp(a, b, field_length);
+}
+
+
int Field_bit::key_cmp(const byte *str, uint length)
{
if (bit_len)
diff --git a/sql/field.h b/sql/field.h
index 523cf444c30..9b6df35de43 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -87,7 +87,11 @@ public:
utype unireg_check;
uint32 field_length; // Length of field
uint field_index; // field number in fields array
- uint16 flags;
+ uint32 flags;
+ /* fieldnr is the id of the field (first field = 1) as is also
+ used in key_part.
+ */
+ uint16 fieldnr;
uchar null_bit; // Bit used to test null bit
Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,uchar null_bit_arg,
@@ -150,6 +154,8 @@ public:
virtual enum_field_types type() const =0;
virtual enum_field_types real_type() const { return type(); }
inline int cmp(const char *str) { return cmp(ptr,str); }
+ virtual int cmp_max(const char *a, const char *b, uint max_len)
+ { return cmp(a, b); }
virtual int cmp(const char *,const char *)=0;
virtual int cmp_binary(const char *a,const char *b, uint32 max_length=~0L)
{ return memcmp(a,b,pack_length()); }
@@ -1055,7 +1061,11 @@ public:
longlong val_int(void);
String *val_str(String*,String *);
my_decimal *val_decimal(my_decimal *);
- int cmp(const char *,const char*);
+ int cmp_max(const char *, const char *, uint max_length);
+ int cmp(const char *a,const char*b)
+ {
+ return cmp_max(a, b, ~0);
+ }
void sort_string(char *buff,uint length);
void get_key_image(char *buff,uint length, imagetype type);
void set_key_image(char *buff,uint length);
@@ -1111,7 +1121,9 @@ public:
longlong val_int(void);
String *val_str(String*,String *);
my_decimal *val_decimal(my_decimal *);
- int cmp(const char *,const char*);
+ int cmp_max(const char *, const char *, uint max_length);
+ int cmp(const char *a,const char*b)
+ { return cmp_max(a, b, ~0); }
int cmp(const char *a, uint32 a_length, const char *b, uint32 b_length);
int cmp_binary(const char *a,const char *b, uint32 max_length=~0L);
int key_cmp(const byte *,const byte*);
@@ -1135,6 +1147,10 @@ public:
{
memcpy_fixed(str,ptr+packlength,sizeof(char*));
}
+ inline void get_ptr(char **str, uint row_offset)
+ {
+ memcpy_fixed(str,ptr+packlength+row_offset,sizeof(char*));
+ }
inline void set_ptr(char *length,char *data)
{
memcpy(ptr,length,packlength);
@@ -1303,6 +1319,7 @@ public:
my_decimal *val_decimal(my_decimal *);
int cmp(const char *a, const char *b)
{ return cmp_binary(a, b); }
+ int cmp_max(const char *a, const char *b, uint max_length);
int key_cmp(const byte *a, const byte *b)
{ return cmp_binary((char *) a, (char *) b); }
int key_cmp(const byte *str, uint length);
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index 26e743d4a71..3b9cdbe29f7 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -1372,7 +1372,7 @@ int ha_berkeley::delete_row(const byte * record)
}
-int ha_berkeley::index_init(uint keynr)
+int ha_berkeley::index_init(uint keynr, bool sorted)
{
int error;
DBUG_ENTER("ha_berkeley::index_init");
@@ -1650,7 +1650,7 @@ int ha_berkeley::rnd_init(bool scan)
{
DBUG_ENTER("rnd_init");
current_row.flags=DB_DBT_REALLOC;
- DBUG_RETURN(index_init(primary_key));
+ DBUG_RETURN(index_init(primary_key, 0));
}
int ha_berkeley::rnd_end()
@@ -2158,7 +2158,7 @@ ulonglong ha_berkeley::get_auto_increment()
(void) ha_berkeley::extra(HA_EXTRA_KEYREAD);
/* Set 'active_index' */
- ha_berkeley::index_init(table->s->next_number_index);
+ ha_berkeley::index_init(table->s->next_number_index, 0);
if (!table->s->next_number_key_offset)
{ // Autoincrement at key-start
@@ -2497,7 +2497,7 @@ void ha_berkeley::get_status()
if (!(share->status & STATUS_PRIMARY_KEY_INIT))
{
(void) extra(HA_EXTRA_KEYREAD);
- index_init(primary_key);
+ index_init(primary_key, 0);
if (!index_last(table->record[1]))
share->auto_ident=uint5korr(current_ident);
index_end();
diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h
index aa92908ecde..9c0668b967c 100644
--- a/sql/ha_berkeley.h
+++ b/sql/ha_berkeley.h
@@ -93,7 +93,7 @@ class ha_berkeley: public handler
const char **bas_ext() const;
ulong table_flags(void) const { return int_table_flags; }
uint max_supported_keys() const { return MAX_KEY-1; }
- uint extra_rec_buf_length() { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; }
+ uint extra_rec_buf_length() const { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; }
ha_rows estimate_rows_upper_bound();
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
bool has_transactions() { return 1;}
@@ -104,7 +104,7 @@ class ha_berkeley: public handler
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
int delete_row(const byte * buf);
- int index_init(uint index);
+ int index_init(uint index, bool sorted);
int index_end();
int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc
index 1d7b8cda8e2..63454bf88ed 100644
--- a/sql/ha_federated.cc
+++ b/sql/ha_federated.cc
@@ -1544,9 +1544,6 @@ inline uint field_in_record_is_null(TABLE *table,
int ha_federated::write_row(byte *buf)
{
bool has_fields= FALSE;
- uint all_fields_have_same_query_id= 1;
- ulong current_query_id= 1;
- ulong tmp_query_id= 1;
char insert_buffer[FEDERATED_QUERY_BUFFER_SIZE];
char values_buffer[FEDERATED_QUERY_BUFFER_SIZE];
char insert_field_value_buffer[STRING_BUFFER_USUAL_SIZE];
@@ -1575,14 +1572,6 @@ int ha_federated::write_row(byte *buf)
table->timestamp_field->set_time();
/*
- get the current query id - the fields that we add to the insert
- statement to send to the foreign will not be appended unless they match
- this query id
- */
- current_query_id= table->in_use->query_id;
- DBUG_PRINT("info", ("current query id %d", current_query_id));
-
- /*
start both our field and field values strings
*/
insert_string.append(FEDERATED_INSERT);
@@ -1595,21 +1584,8 @@ int ha_federated::write_row(byte *buf)
values_string.append(FEDERATED_OPENPAREN);
/*
- Even if one field is different, all_fields_same_query_id can't remain
- 0 if it remains 0, then that means no fields were specified in the query
- such as in the case of INSERT INTO table VALUES (val1, val2, valN)
-
- */
- for (field= table->field; *field; field++)
- {
- if (field > table->field && tmp_query_id != (*field)->query_id)
- all_fields_have_same_query_id= 0;
-
- tmp_query_id= (*field)->query_id;
- }
- /*
loop through the field pointer array, add any fields to both the values
- list and the fields list that match the current query id
+ list and the fields list that is part of the write set
You might ask "Why an index variable (has_fields) ?" My answer is that
we need to count how many fields we actually need
@@ -1617,8 +1593,7 @@ int ha_federated::write_row(byte *buf)
for (field= table->field; *field; field++)
{
/* if there is a query id and if it's equal to the current query id */
- if (((*field)->query_id && (*field)->query_id == current_query_id)
- || all_fields_have_same_query_id)
+ if (ha_get_bit_in_write_set((*field)->fieldnr))
{
/*
There are some fields. This will be used later to determine
@@ -2082,7 +2057,7 @@ error:
}
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
-int ha_federated::index_init(uint keynr)
+int ha_federated::index_init(uint keynr, bool sorted)
{
int error;
DBUG_ENTER("ha_federated::index_init");
diff --git a/sql/ha_federated.h b/sql/ha_federated.h
index 58b78ab0dde..ecaa59d1268 100644
--- a/sql/ha_federated.h
+++ b/sql/ha_federated.h
@@ -248,7 +248,7 @@ public:
int write_row(byte *buf);
int update_row(const byte *old_data, byte *new_data);
int delete_row(const byte *buf);
- int index_init(uint keynr);
+ int index_init(uint keynr, bool sorted);
int index_read(byte *buf, const byte *key,
uint key_len, enum ha_rkey_function find_flag);
int index_read_idx(byte *buf, uint idx, const byte *key,
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 69451493d4b..3f86399aacd 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -107,28 +107,28 @@ typedef byte mysql_byte;
/* Include necessary InnoDB headers */
extern "C" {
-#include "../innobase/include/univ.i"
-#include "../innobase/include/os0file.h"
-#include "../innobase/include/os0thread.h"
-#include "../innobase/include/srv0start.h"
-#include "../innobase/include/srv0srv.h"
-#include "../innobase/include/trx0roll.h"
-#include "../innobase/include/trx0trx.h"
-#include "../innobase/include/trx0sys.h"
-#include "../innobase/include/mtr0mtr.h"
-#include "../innobase/include/row0ins.h"
-#include "../innobase/include/row0mysql.h"
-#include "../innobase/include/row0sel.h"
-#include "../innobase/include/row0upd.h"
-#include "../innobase/include/log0log.h"
-#include "../innobase/include/lock0lock.h"
-#include "../innobase/include/dict0crea.h"
-#include "../innobase/include/btr0cur.h"
-#include "../innobase/include/btr0btr.h"
-#include "../innobase/include/fsp0fsp.h"
-#include "../innobase/include/sync0sync.h"
-#include "../innobase/include/fil0fil.h"
-#include "../innobase/include/trx0xa.h"
+#include "../storage/innobase/include/univ.i"
+#include "../storage/innobase/include/os0file.h"
+#include "../storage/innobase/include/os0thread.h"
+#include "../storage/innobase/include/srv0start.h"
+#include "../storage/innobase/include/srv0srv.h"
+#include "../storage/innobase/include/trx0roll.h"
+#include "../storage/innobase/include/trx0trx.h"
+#include "../storage/innobase/include/trx0sys.h"
+#include "../storage/innobase/include/mtr0mtr.h"
+#include "../storage/innobase/include/row0ins.h"
+#include "../storage/innobase/include/row0mysql.h"
+#include "../storage/innobase/include/row0sel.h"
+#include "../storage/innobase/include/row0upd.h"
+#include "../storage/innobase/include/log0log.h"
+#include "../storage/innobase/include/lock0lock.h"
+#include "../storage/innobase/include/dict0crea.h"
+#include "../storage/innobase/include/btr0cur.h"
+#include "../storage/innobase/include/btr0btr.h"
+#include "../storage/innobase/include/fsp0fsp.h"
+#include "../storage/innobase/include/sync0sync.h"
+#include "../storage/innobase/include/fil0fil.h"
+#include "../storage/innobase/include/trx0xa.h"
}
#define HA_INNOBASE_ROWS_IN_TABLE 10000 /* to get optimization right */
@@ -2999,7 +2999,8 @@ build_template(
goto include_field;
}
- if (thd->query_id == field->query_id) {
+ if (table->file->ha_get_bit_in_read_set(i+1) ||
+ table->file->ha_get_bit_in_write_set(i+1)) {
/* This field is needed in the query */
goto include_field;
@@ -3619,7 +3620,8 @@ int
ha_innobase::index_init(
/*====================*/
/* out: 0 or error number */
- uint keynr) /* in: key (index) number */
+ uint keynr, /* in: key (index) number */
+ bool sorted) /* in: 1 if result MUST be sorted according to index */
{
int error = 0;
DBUG_ENTER("index_init");
@@ -6670,7 +6672,7 @@ ha_innobase::innobase_read_and_init_auto_inc(
}
(void) extra(HA_EXTRA_KEYREAD);
- index_init(table->s->next_number_index);
+ index_init(table->s->next_number_index, 1);
/* Starting from 5.0.9, we use a consistent read to read the auto-inc
column maximum value. This eliminates the spurious deadlocks caused
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
index 1584a2182c9..acfe8b950eb 100644
--- a/sql/ha_innodb.h
+++ b/sql/ha_innodb.h
@@ -124,7 +124,7 @@ class ha_innobase: public handler
int delete_row(const byte * buf);
void unlock_row();
- int index_init(uint index);
+ int index_init(uint index, bool sorted);
int index_end();
int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
@@ -152,6 +152,16 @@ class ha_innobase: public handler
int transactional_table_lock(THD *thd, int lock_type);
int start_stmt(THD *thd);
+ int ha_retrieve_all_cols()
+ {
+ ha_set_all_bits_in_read_set();
+ return extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ }
+ int ha_retrieve_all_pk()
+ {
+ ha_set_primary_key_in_read_set();
+ return extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
+ }
void position(byte *record);
ha_rows records_in_range(uint inx, key_range *min_key, key_range
*max_key);
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index fefa05e92b0..2595a1cca26 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -27,8 +27,8 @@
#ifndef MASTER
#include "../srclib/myisam/myisamdef.h"
#else
-#include "../myisam/myisamdef.h"
-#include "../myisam/rt_index.h"
+#include "../storage/myisam/myisamdef.h"
+#include "../storage/myisam/rt_index.h"
#endif
ulong myisam_recover_options= HA_RECOVER_NONE;
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
index 8c4b4e790b1..ae17b60b7e1 100644
--- a/sql/ha_myisammrg.cc
+++ b/sql/ha_myisammrg.cc
@@ -25,7 +25,7 @@
#ifndef MASTER
#include "../srclib/myisammrg/myrg_def.h"
#else
-#include "../myisammrg/myrg_def.h"
+#include "../storage/myisammrg/myrg_def.h"
#endif
/*****************************************************************************
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 9e6725178d5..59d05368779 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -34,6 +34,7 @@
// options from from mysqld.cc
extern my_bool opt_ndb_optimized_node_selection;
+extern my_bool opt_ndb_linear_hash;
extern const char *opt_ndbcluster_connectstring;
// Default value for parallelism
@@ -41,7 +42,7 @@ static const int parallelism= 0;
// Default value for max number of transactions
// createable against NDB from this handler
-static const int max_transactions= 2;
+static const int max_transactions= 3; // should really be 2 but there is a transaction to much allocated when loch table is used
static const char *ha_ndb_ext=".ndb";
@@ -100,6 +101,7 @@ static HASH ndbcluster_open_tables;
static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length,
my_bool not_used __attribute__((unused)));
+static void ndb_set_fragmentation(NDBTAB & tab, TABLE *table, uint pk_len);
static NDB_SHARE *get_share(const char *table_name);
static void free_share(NDB_SHARE *share);
@@ -855,21 +857,18 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
/*
Check if any set or get of blob value in current query.
*/
-bool ha_ndbcluster::uses_blob_value(bool all_fields)
+bool ha_ndbcluster::uses_blob_value()
{
if (table->s->blob_fields == 0)
return FALSE;
- if (all_fields)
- return TRUE;
{
uint no_fields= table->s->fields;
int i;
- THD *thd= current_thd;
// They always put blobs at the end..
for (i= no_fields - 1; i >= 0; i--)
{
- Field *field= table->field[i];
- if (thd->query_id == field->query_id)
+ if ((m_write_op && ha_get_bit_in_write_set(i+1)) ||
+ (!m_write_op && ha_get_bit_in_read_set(i+1)))
{
return TRUE;
}
@@ -1142,7 +1141,7 @@ int ha_ndbcluster::get_ndb_lock_type(enum thr_lock_type type)
{
if (type >= TL_WRITE_ALLOW_WRITE)
return NdbOperation::LM_Exclusive;
- else if (uses_blob_value(m_retrieve_all_fields))
+ else if (uses_blob_value())
return NdbOperation::LM_Read;
else
return NdbOperation::LM_CommittedRead;
@@ -1294,17 +1293,14 @@ inline
int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
{
uint i;
- THD *thd= current_thd;
-
DBUG_ENTER("define_read_attrs");
// Define attributes to read
for (i= 0; i < table->s->fields; i++)
{
Field *field= table->field[i];
- if ((thd->query_id == field->query_id) ||
- ((field->flags & PRI_KEY_FLAG)) ||
- m_retrieve_all_fields)
+ if (ha_get_bit_in_read_set(i+1) ||
+ ((field->flags & PRI_KEY_FLAG)))
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(op->getNdbError());
@@ -1331,11 +1327,13 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
DBUG_RETURN(0);
}
+
/*
Read one record from NDB using primary key
*/
-int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
+int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf,
+ uint32 part_id)
{
uint no_fields= table->s->fields;
NdbConnection *trans= m_active_trans;
@@ -1345,6 +1343,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
DBUG_ENTER("pk_read");
DBUG_PRINT("enter", ("key_len: %u", key_len));
DBUG_DUMP("key", (char*)key, key_len);
+ m_write_op= FALSE;
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
@@ -1352,6 +1351,8 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
op->readTuple(lm) != 0)
ERR_RETURN(trans->getNdbError());
+ if (m_use_partition_function)
+ op->setPartitionId(part_id);
if (table->s->primary_key == MAX_KEY)
{
// This table has no primary key, use "hidden" primary key
@@ -1389,17 +1390,20 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
Read one complementing record from NDB using primary key from old_data
*/
-int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
+int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data,
+ uint32 old_part_id)
{
uint no_fields= table->s->fields, i;
NdbTransaction *trans= m_active_trans;
NdbOperation *op;
- THD *thd= current_thd;
DBUG_ENTER("complemented_pk_read");
+ m_write_op= FALSE;
- if (m_retrieve_all_fields)
+ if (ha_get_all_bit_in_read_set())
+ {
// We have allready retrieved all fields, nothing to complement
DBUG_RETURN(0);
+ }
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
@@ -1409,12 +1413,16 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
int res;
if ((res= set_primary_key_from_record(op, old_data)))
ERR_RETURN(trans->getNdbError());
+
+ if (m_use_partition_function)
+ op->setPartitionId(old_part_id);
+
// Read all unreferenced non-key field(s)
for (i= 0; i < no_fields; i++)
{
Field *field= table->field[i];
if (!((field->flags & PRI_KEY_FLAG) ||
- (thd->query_id == field->query_id)))
+ (ha_get_bit_in_read_set(i+1))))
{
if (get_ndb_value(op, field, i, new_data))
ERR_RETURN(trans->getNdbError());
@@ -1438,7 +1446,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
{
Field *field= table->field[i];
if (!((field->flags & PRI_KEY_FLAG) ||
- (thd->query_id == field->query_id)))
+ (ha_get_bit_in_read_set(i+1))))
{
m_value[i].ptr= NULL;
}
@@ -1467,6 +1475,17 @@ int ha_ndbcluster::peek_row(const byte *record)
if ((res= set_primary_key_from_record(op, record)))
ERR_RETURN(trans->getNdbError());
+ if (m_use_partition_function)
+ {
+ uint32 part_id;
+ int error;
+ if ((error= m_part_info->get_partition_id(m_part_info, &part_id)))
+ {
+ DBUG_RETURN(error);
+ }
+ op->setPartitionId(part_id);
+ }
+
if (execute_no_commit_ie(this,trans) != 0)
{
table->status= STATUS_NOT_FOUND;
@@ -1805,7 +1824,8 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
const key_range *end_key,
- bool sorted, bool descending, byte* buf)
+ bool sorted, bool descending,
+ byte* buf, part_id_range *part_spec)
{
int res;
bool restart;
@@ -1816,6 +1836,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
DBUG_PRINT("enter", ("index: %u, sorted: %d, descending: %d",
active_index, sorted, descending));
DBUG_PRINT("enter", ("Starting new ordered scan on %s", m_tabname));
+ m_write_op= FALSE;
// Check that sorted seems to be initialised
DBUG_ASSERT(sorted == 0 || sorted == 1);
@@ -1830,11 +1851,17 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
(const NDBTAB *) m_table)) ||
op->readTuples(lm, 0, parallelism, sorted, descending))
ERR_RETURN(trans->getNdbError());
+ if (m_use_partition_function && part_spec != NULL &&
+ part_spec->start_part == part_spec->end_part)
+ op->setPartitionId(part_spec->start_part);
m_active_cursor= op;
} else {
restart= TRUE;
op= (NdbIndexScanOperation*)m_active_cursor;
+ if (m_use_partition_function && part_spec != NULL &&
+ part_spec->start_part == part_spec->end_part)
+ op->setPartitionId(part_spec->start_part);
DBUG_ASSERT(op->getSorted() == sorted);
DBUG_ASSERT(op->getLockMode() ==
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type));
@@ -1875,6 +1902,7 @@ int ha_ndbcluster::full_table_scan(byte *buf)
DBUG_ENTER("full_table_scan");
DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname));
+ m_write_op= FALSE;
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
@@ -1904,6 +1932,7 @@ int ha_ndbcluster::write_row(byte *record)
NdbOperation *op;
int res;
THD *thd= current_thd;
+ m_write_op= TRUE;
DBUG_ENTER("write_row");
@@ -1932,6 +1961,17 @@ int ha_ndbcluster::write_row(byte *record)
if (res != 0)
ERR_RETURN(trans->getNdbError());
+ if (m_use_partition_function)
+ {
+ uint32 part_id;
+ int error;
+ if ((error= m_part_info->get_partition_id(m_part_info, &part_id)))
+ {
+ DBUG_RETURN(error);
+ }
+ op->setPartitionId(part_id);
+ }
+
if (table->s->primary_key == MAX_KEY)
{
// Table has hidden primary key
@@ -2089,25 +2129,35 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
NdbScanOperation* cursor= m_active_cursor;
NdbOperation *op;
uint i;
+ uint32 old_part_id= 0, new_part_id= 0;
+ int error;
DBUG_ENTER("update_row");
+ m_write_op= TRUE;
statistic_increment(thd->status_var.ha_update_count, &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
{
table->timestamp_field->set_time();
- // Set query_id so that field is really updated
- table->timestamp_field->query_id= thd->query_id;
+ ha_set_bit_in_write_set(table->timestamp_field->fieldnr);
+ }
+
+ if (m_use_partition_function &&
+ (error= get_parts_for_update(old_data, new_data, table->record[0],
+ m_part_info, &old_part_id, &new_part_id)))
+ {
+ DBUG_RETURN(error);
}
/* Check for update of primary key for special handling */
if ((table->s->primary_key != MAX_KEY) &&
- (key_cmp(table->s->primary_key, old_data, new_data)))
+ (key_cmp(table->s->primary_key, old_data, new_data)) ||
+ (old_part_id != new_part_id))
{
int read_res, insert_res, delete_res, undo_res;
DBUG_PRINT("info", ("primary key update, doing pk read+delete+insert"));
// Get all old fields, since we optimize away fields not in query
- read_res= complemented_pk_read(old_data, new_data);
+ read_res= complemented_pk_read(old_data, new_data, old_part_id);
if (read_res)
{
DBUG_PRINT("info", ("pk read failed"));
@@ -2161,8 +2211,10 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if (!(op= cursor->updateCurrentTuple()))
ERR_RETURN(trans->getNdbError());
m_ops_pending++;
- if (uses_blob_value(FALSE))
+ if (uses_blob_value())
m_blobs_pending= TRUE;
+ if (m_use_partition_function)
+ cursor->setPartitionId(new_part_id);
}
else
{
@@ -2170,6 +2222,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
op->updateTuple() != 0)
ERR_RETURN(trans->getNdbError());
+ if (m_use_partition_function)
+ op->setPartitionId(new_part_id);
if (table->s->primary_key == MAX_KEY)
{
// This table has no primary key, use "hidden" primary key
@@ -2199,7 +2253,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
for (i= 0; i < table->s->fields; i++)
{
Field *field= table->field[i];
- if (((thd->query_id == field->query_id) || m_retrieve_all_fields) &&
+ if (ha_get_bit_in_write_set(i+1) &&
(!(field->flags & PRI_KEY_FLAG)) &&
set_ndb_value(op, field, i))
ERR_RETURN(op->getNdbError());
@@ -2225,11 +2279,21 @@ int ha_ndbcluster::delete_row(const byte *record)
NdbTransaction *trans= m_active_trans;
NdbScanOperation* cursor= m_active_cursor;
NdbOperation *op;
+ uint32 part_id;
+ int error;
DBUG_ENTER("delete_row");
+ m_write_op= TRUE;
statistic_increment(thd->status_var.ha_delete_count,&LOCK_status);
m_rows_changed++;
+ if (m_use_partition_function &&
+ (error= get_part_for_delete(record, table->record[0], m_part_info,
+ &part_id)))
+ {
+ DBUG_RETURN(error);
+ }
+
if (cursor)
{
/*
@@ -2244,6 +2308,9 @@ int ha_ndbcluster::delete_row(const byte *record)
ERR_RETURN(trans->getNdbError());
m_ops_pending++;
+ if (m_use_partition_function)
+ cursor->setPartitionId(part_id);
+
no_uncommitted_rows_update(-1);
if (!m_primary_key_update)
@@ -2257,6 +2324,9 @@ int ha_ndbcluster::delete_row(const byte *record)
op->deleteTuple() != 0)
ERR_RETURN(trans->getNdbError());
+ if (m_use_partition_function)
+ op->setPartitionId(part_id);
+
no_uncommitted_rows_update(-1);
if (table->s->primary_key == MAX_KEY)
@@ -2382,8 +2452,6 @@ void ha_ndbcluster::print_results()
DBUG_ENTER("print_results");
#ifndef DBUG_OFF
- const NDBTAB *tab= (const NDBTAB*) m_table;
-
if (!_db_on_)
DBUG_VOID_RETURN;
@@ -2438,11 +2506,13 @@ print_value:
}
-int ha_ndbcluster::index_init(uint index)
+int ha_ndbcluster::index_init(uint index, bool sorted)
{
DBUG_ENTER("ha_ndbcluster::index_init");
- DBUG_PRINT("enter", ("index: %u", index));
- DBUG_RETURN(handler::index_init(index));
+ DBUG_PRINT("enter", ("index: %u sorted: %d", index, sorted));
+ active_index= index;
+ m_sorted= sorted;
+ DBUG_RETURN(0);
}
@@ -2479,55 +2549,16 @@ int ha_ndbcluster::index_read(byte *buf,
const byte *key, uint key_len,
enum ha_rkey_function find_flag)
{
+ key_range start_key;
+ bool descending= FALSE;
DBUG_ENTER("ha_ndbcluster::index_read");
DBUG_PRINT("enter", ("active_index: %u, key_len: %u, find_flag: %d",
active_index, key_len, find_flag));
- int error;
- ndb_index_type type= get_index_type(active_index);
- const KEY* key_info= table->key_info+active_index;
- switch (type){
- case PRIMARY_KEY_ORDERED_INDEX:
- case PRIMARY_KEY_INDEX:
- if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len)
- {
- if (m_active_cursor && (error= close_scan()))
- DBUG_RETURN(error);
- DBUG_RETURN(pk_read(key, key_len, buf));
- }
- else if (type == PRIMARY_KEY_INDEX)
- {
- DBUG_RETURN(1);
- }
- break;
- case UNIQUE_ORDERED_INDEX:
- case UNIQUE_INDEX:
- if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len &&
- !check_null_in_key(key_info, key, key_len))
- {
- if (m_active_cursor && (error= close_scan()))
- DBUG_RETURN(error);
- DBUG_RETURN(unique_index_read(key, key_len, buf));
- }
- else if (type == UNIQUE_INDEX)
- {
- DBUG_RETURN(1);
- }
- break;
- case ORDERED_INDEX:
- break;
- default:
- case UNDEFINED_INDEX:
- DBUG_ASSERT(FALSE);
- DBUG_RETURN(1);
- break;
- }
-
- key_range start_key;
start_key.key= key;
start_key.length= key_len;
start_key.flag= find_flag;
- bool descending= FALSE;
+ descending= FALSE;
switch (find_flag) {
case HA_READ_KEY_OR_PREV:
case HA_READ_BEFORE_KEY:
@@ -2538,8 +2569,8 @@ int ha_ndbcluster::index_read(byte *buf,
default:
break;
}
- error= ordered_index_scan(&start_key, 0, TRUE, descending, buf);
- DBUG_RETURN(error == HA_ERR_END_OF_FILE ? HA_ERR_KEY_NOT_FOUND : error);
+ DBUG_RETURN(read_range_first_to_buf(&start_key, 0, descending,
+ m_sorted, buf));
}
@@ -2550,7 +2581,7 @@ int ha_ndbcluster::index_read_idx(byte *buf, uint index_no,
statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status);
DBUG_ENTER("ha_ndbcluster::index_read_idx");
DBUG_PRINT("enter", ("index_no: %u, key_len: %u", index_no, key_len));
- index_init(index_no);
+ index_init(index_no, 0);
DBUG_RETURN(index_read(buf, key, key_len, find_flag));
}
@@ -2581,7 +2612,7 @@ int ha_ndbcluster::index_first(byte *buf)
// Start the ordered index scan and fetch the first row
// Only HA_READ_ORDER indexes get called by index_first
- DBUG_RETURN(ordered_index_scan(0, 0, TRUE, FALSE, buf));
+ DBUG_RETURN(ordered_index_scan(0, 0, TRUE, FALSE, buf, NULL));
}
@@ -2589,7 +2620,7 @@ int ha_ndbcluster::index_last(byte *buf)
{
DBUG_ENTER("ha_ndbcluster::index_last");
statistic_increment(current_thd->status_var.ha_read_last_count,&LOCK_status);
- DBUG_RETURN(ordered_index_scan(0, 0, TRUE, TRUE, buf));
+ DBUG_RETURN(ordered_index_scan(0, 0, TRUE, TRUE, buf, NULL));
}
int ha_ndbcluster::index_read_last(byte * buf, const byte * key, uint key_len)
@@ -2598,66 +2629,76 @@ int ha_ndbcluster::index_read_last(byte * buf, const byte * key, uint key_len)
DBUG_RETURN(index_read(buf, key, key_len, HA_READ_PREFIX_LAST));
}
-inline
int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
const key_range *end_key,
- bool eq_r, bool sorted,
+ bool desc, bool sorted,
byte* buf)
{
- KEY* key_info;
- int error= 1;
+ part_id_range part_spec;
+ ndb_index_type type= get_index_type(active_index);
+ const KEY* key_info= table->key_info+active_index;
+ int error;
DBUG_ENTER("ha_ndbcluster::read_range_first_to_buf");
- DBUG_PRINT("info", ("eq_r: %d, sorted: %d", eq_r, sorted));
+ DBUG_PRINT("info", ("desc: %d, sorted: %d", desc, sorted));
- switch (get_index_type(active_index)){
+ if (m_use_partition_function)
+ {
+ get_partition_set(table, buf, active_index, start_key, &part_spec);
+ if (part_spec.start_part > part_spec.end_part)
+ {
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
+ else if (part_spec.start_part == part_spec.end_part)
+ {
+ /*
+ Only one partition is required to scan, if sorted is required we
+ don't need it any more since output from one ordered partitioned
+ index is always sorted.
+ */
+ sorted= FALSE;
+ }
+ }
+ m_write_op= FALSE;
+ switch (type){
case PRIMARY_KEY_ORDERED_INDEX:
case PRIMARY_KEY_INDEX:
- key_info= table->key_info + active_index;
if (start_key &&
start_key->length == key_info->key_length &&
start_key->flag == HA_READ_KEY_EXACT)
{
if (m_active_cursor && (error= close_scan()))
DBUG_RETURN(error);
- error= pk_read(start_key->key, start_key->length, buf);
- DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
+ DBUG_RETURN(pk_read(start_key->key, start_key->length, buf,
+ part_spec.start_part));
}
break;
case UNIQUE_ORDERED_INDEX:
case UNIQUE_INDEX:
- key_info= table->key_info + active_index;
if (start_key && start_key->length == key_info->key_length &&
start_key->flag == HA_READ_KEY_EXACT &&
!check_null_in_key(key_info, start_key->key, start_key->length))
{
if (m_active_cursor && (error= close_scan()))
DBUG_RETURN(error);
- error= unique_index_read(start_key->key, start_key->length, buf);
- DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
+ DBUG_RETURN(unique_index_read(start_key->key, start_key->length, buf));
}
break;
default:
break;
}
-
// Start the ordered index scan and fetch the first row
- error= ordered_index_scan(start_key, end_key, sorted, FALSE, buf);
- DBUG_RETURN(error);
+ DBUG_RETURN(ordered_index_scan(start_key, end_key, sorted, desc, buf,
+ &part_spec));
}
-
int ha_ndbcluster::read_range_first(const key_range *start_key,
const key_range *end_key,
bool eq_r, bool sorted)
{
byte* buf= table->record[0];
DBUG_ENTER("ha_ndbcluster::read_range_first");
-
- DBUG_RETURN(read_range_first_to_buf(start_key,
- end_key,
- eq_r,
- sorted,
- buf));
+ DBUG_RETURN(read_range_first_to_buf(start_key, end_key, FALSE,
+ sorted, buf));
}
int ha_ndbcluster::read_range_next()
@@ -2683,7 +2724,7 @@ int ha_ndbcluster::rnd_init(bool scan)
DBUG_RETURN(-1);
}
}
- index_init(table->s->primary_key);
+ index_init(table->s->primary_key, 0);
DBUG_RETURN(0);
}
@@ -2750,7 +2791,20 @@ int ha_ndbcluster::rnd_pos(byte *buf, byte *pos)
&LOCK_status);
// The primary key for the record is stored in pos
// Perform a pk_read using primary key "index"
- DBUG_RETURN(pk_read(pos, ref_length, buf));
+ {
+ part_id_range part_spec;
+ if (m_use_partition_function)
+ {
+ key_range key_spec;
+ KEY *key_info= table->key_info + active_index;
+ key_spec.key= pos;
+ key_spec.length= ref_length;
+ key_spec.flag= HA_READ_KEY_EXACT;
+ get_full_part_id_from_key(table, buf, key_info, &key_spec, &part_spec);
+ DBUG_ASSERT(part_spec.start_part == part_spec.end_part);
+ }
+ DBUG_RETURN(pk_read(pos, ref_length, buf, part_spec.start_part));
+ }
}
@@ -2873,83 +2927,11 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
{
DBUG_ENTER("extra");
switch (operation) {
- case HA_EXTRA_NORMAL: /* Optimize for space (def) */
- DBUG_PRINT("info", ("HA_EXTRA_NORMAL"));
- break;
- case HA_EXTRA_QUICK: /* Optimize for speed */
- DBUG_PRINT("info", ("HA_EXTRA_QUICK"));
- break;
case HA_EXTRA_RESET: /* Reset database to after open */
DBUG_PRINT("info", ("HA_EXTRA_RESET"));
DBUG_PRINT("info", ("Clearing condition stack"));
cond_clear();
break;
- case HA_EXTRA_CACHE: /* Cash record in HA_rrnd() */
- DBUG_PRINT("info", ("HA_EXTRA_CACHE"));
- break;
- case HA_EXTRA_NO_CACHE: /* End cacheing of records (def) */
- DBUG_PRINT("info", ("HA_EXTRA_NO_CACHE"));
- break;
- case HA_EXTRA_NO_READCHECK: /* No readcheck on update */
- DBUG_PRINT("info", ("HA_EXTRA_NO_READCHECK"));
- break;
- case HA_EXTRA_READCHECK: /* Use readcheck (def) */
- DBUG_PRINT("info", ("HA_EXTRA_READCHECK"));
- break;
- case HA_EXTRA_KEYREAD: /* Read only key to database */
- DBUG_PRINT("info", ("HA_EXTRA_KEYREAD"));
- break;
- case HA_EXTRA_NO_KEYREAD: /* Normal read of records (def) */
- DBUG_PRINT("info", ("HA_EXTRA_NO_KEYREAD"));
- break;
- case HA_EXTRA_NO_USER_CHANGE: /* No user is allowed to write */
- DBUG_PRINT("info", ("HA_EXTRA_NO_USER_CHANGE"));
- break;
- case HA_EXTRA_KEY_CACHE:
- DBUG_PRINT("info", ("HA_EXTRA_KEY_CACHE"));
- break;
- case HA_EXTRA_NO_KEY_CACHE:
- DBUG_PRINT("info", ("HA_EXTRA_NO_KEY_CACHE"));
- break;
- case HA_EXTRA_WAIT_LOCK: /* Wait until file is avalably (def) */
- DBUG_PRINT("info", ("HA_EXTRA_WAIT_LOCK"));
- break;
- case HA_EXTRA_NO_WAIT_LOCK: /* If file is locked, return quickly */
- DBUG_PRINT("info", ("HA_EXTRA_NO_WAIT_LOCK"));
- break;
- case HA_EXTRA_WRITE_CACHE: /* Use write cache in ha_write() */
- DBUG_PRINT("info", ("HA_EXTRA_WRITE_CACHE"));
- break;
- case HA_EXTRA_FLUSH_CACHE: /* flush write_record_cache */
- DBUG_PRINT("info", ("HA_EXTRA_FLUSH_CACHE"));
- break;
- case HA_EXTRA_NO_KEYS: /* Remove all update of keys */
- DBUG_PRINT("info", ("HA_EXTRA_NO_KEYS"));
- break;
- case HA_EXTRA_KEYREAD_CHANGE_POS: /* Keyread, but change pos */
- DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_CHANGE_POS")); /* xxxxchk -r must be used */
- break;
- case HA_EXTRA_REMEMBER_POS: /* Remember pos for next/prev */
- DBUG_PRINT("info", ("HA_EXTRA_REMEMBER_POS"));
- break;
- case HA_EXTRA_RESTORE_POS:
- DBUG_PRINT("info", ("HA_EXTRA_RESTORE_POS"));
- break;
- case HA_EXTRA_REINIT_CACHE: /* init cache from current record */
- DBUG_PRINT("info", ("HA_EXTRA_REINIT_CACHE"));
- break;
- case HA_EXTRA_FORCE_REOPEN: /* Datafile have changed on disk */
- DBUG_PRINT("info", ("HA_EXTRA_FORCE_REOPEN"));
- break;
- case HA_EXTRA_FLUSH: /* Flush tables to disk */
- DBUG_PRINT("info", ("HA_EXTRA_FLUSH"));
- break;
- case HA_EXTRA_NO_ROWS: /* Don't write rows */
- DBUG_PRINT("info", ("HA_EXTRA_NO_ROWS"));
- break;
- case HA_EXTRA_RESET_STATE: /* Reset positions */
- DBUG_PRINT("info", ("HA_EXTRA_RESET_STATE"));
- break;
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
if (current_thd->lex->sql_command == SQLCOM_REPLACE)
@@ -2968,34 +2950,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
m_use_write= FALSE;
m_ignore_dup_key= FALSE;
break;
- case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
- where field->query_id is the same as
- the current query id */
- DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_ALL_COLS"));
- m_retrieve_all_fields= TRUE;
- break;
- case HA_EXTRA_PREPARE_FOR_DELETE:
- DBUG_PRINT("info", ("HA_EXTRA_PREPARE_FOR_DELETE"));
- break;
- case HA_EXTRA_PREPARE_FOR_UPDATE: /* Remove read cache if problems */
- DBUG_PRINT("info", ("HA_EXTRA_PREPARE_FOR_UPDATE"));
- break;
- case HA_EXTRA_PRELOAD_BUFFER_SIZE:
- DBUG_PRINT("info", ("HA_EXTRA_PRELOAD_BUFFER_SIZE"));
- break;
- case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
- DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_PRIMARY_KEY"));
- m_retrieve_primary_key= TRUE;
- break;
- case HA_EXTRA_CHANGE_KEY_TO_UNIQUE:
- DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_UNIQUE"));
- break;
- case HA_EXTRA_CHANGE_KEY_TO_DUP:
- DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_DUP"));
- case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
- DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_PRESERVE_FIELDS"));
+ default:
break;
-
}
DBUG_RETURN(0);
@@ -3274,8 +3230,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
DBUG_ASSERT(m_active_trans);
// Start of transaction
m_rows_changed= 0;
- m_retrieve_all_fields= FALSE;
- m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
{
NDBDICT *dict= ndb->getDictionary();
@@ -3411,8 +3365,6 @@ int ha_ndbcluster::start_stmt(THD *thd)
m_active_trans= trans;
// Start of statement
- m_retrieve_all_fields= FALSE;
- m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
DBUG_RETURN(error);
@@ -3787,56 +3739,6 @@ static int create_ndb_column(NDBCOL &col,
return 0;
}
-/*
- Create a table in NDB Cluster
- */
-
-static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
-{
- if (form->s->max_rows == (ha_rows) 0) /* default setting, don't set fragmentation */
- return;
- /**
- * get the number of fragments right
- */
- uint no_fragments;
- {
-#if MYSQL_VERSION_ID >= 50000
- uint acc_row_size= 25 + /*safety margin*/ 2;
-#else
- uint acc_row_size= pk_length*4;
- /* add acc overhead */
- if (pk_length <= 8) /* main page will set the limit */
- acc_row_size+= 25 + /*safety margin*/ 2;
- else /* overflow page will set the limit */
- acc_row_size+= 4 + /*safety margin*/ 4;
-#endif
- ulonglong acc_fragment_size= 512*1024*1024;
- ulonglong max_rows= form->s->max_rows;
-#if MYSQL_VERSION_ID >= 50100
- no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1;
-#else
- no_fragments= ((max_rows*acc_row_size)/acc_fragment_size+1
- +1/*correct rounding*/)/2;
-#endif
- }
- {
- uint no_nodes= g_ndb_cluster_connection->no_db_nodes();
- NDBTAB::FragmentType ftype;
- if (no_fragments > 2*no_nodes)
- {
- ftype= NDBTAB::FragAllLarge;
- if (no_fragments > 4*no_nodes)
- push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
- "Ndb might have problems storing the max amount of rows specified");
- }
- else if (no_fragments > no_nodes)
- ftype= NDBTAB::FragAllMedium;
- else
- ftype= NDBTAB::FragAllSmall;
- tab.setFragmentType(ftype);
- }
-}
-
int ha_ndbcluster::create(const char *name,
TABLE *form,
HA_CREATE_INFO *info)
@@ -3939,7 +3841,22 @@ int ha_ndbcluster::create(const char *name,
}
}
- ndb_set_fragmentation(tab, form, pk_length);
+ // Check partition info
+ partition_info *part_info= form->s->part_info;
+ if (part_info)
+ {
+ int error;
+ if ((error= set_up_partition_info(part_info, form, (void*)&tab)))
+ {
+ DBUG_RETURN(error);
+ }
+ }
+ else
+ {
+ ndb_set_fragmentation(tab, form, pk_length);
+ }
+
+
if ((my_errno= check_ndb_connection()))
DBUG_RETURN(my_errno);
@@ -4188,11 +4105,12 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_NEED_READ_RANGE_BUFFER |
HA_CAN_BIT_FIELD),
m_share(0),
+ m_part_info(NULL),
+ m_use_partition_function(FALSE),
+ m_sorted(FALSE),
m_use_write(FALSE),
m_ignore_dup_key(FALSE),
m_primary_key_update(FALSE),
- m_retrieve_all_fields(FALSE),
- m_retrieve_primary_key(FALSE),
m_rows_to_insert((ha_rows) 1),
m_rows_inserted((ha_rows) 0),
m_bulk_insert_rows((ha_rows) 1024),
@@ -4304,6 +4222,15 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
if (!res)
info(HA_STATUS_VARIABLE | HA_STATUS_CONST);
+ if (table->s->part_info)
+ {
+ m_part_info= table->s->part_info;
+ if (!(m_part_info->part_type == HASH_PARTITION &&
+ m_part_info->list_of_part_fields &&
+ !is_sub_partitioned(m_part_info)))
+ m_use_partition_function= TRUE;
+ }
+
DBUG_RETURN(res);
}
@@ -5516,6 +5443,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
HANDLER_BUFFER *buffer)
{
DBUG_ENTER("ha_ndbcluster::read_multi_range_first");
+ m_write_op= FALSE;
int res;
KEY* key_info= table->key_info + active_index;
@@ -5523,7 +5451,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
ulong reclength= table->s->reclength;
NdbOperation* op;
- if (uses_blob_value(m_retrieve_all_fields))
+ if (uses_blob_value())
{
/**
* blobs can't be batched currently
@@ -5575,12 +5503,29 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
for (; multi_range_curr<multi_range_end && curr+reclength <= end_of_buffer;
multi_range_curr++)
{
- switch (index_type){
+ part_id_range part_spec;
+ if (m_use_partition_function)
+ {
+ get_partition_set(table, curr, active_index,
+ &multi_range_curr->start_key,
+ &part_spec);
+ if (part_spec.start_part > part_spec.end_part)
+ {
+ /*
+ We can skip this partition since the key won't fit into any
+ partition
+ */
+ curr += reclength;
+ multi_range_curr->range_flag |= SKIP_RANGE;
+ continue;
+ }
+ }
+ switch(index_type){
case PRIMARY_KEY_ORDERED_INDEX:
if (!(multi_range_curr->start_key.length == key_info->key_length &&
- multi_range_curr->start_key.flag == HA_READ_KEY_EXACT))
- goto range;
- /* fall through */
+ multi_range_curr->start_key.flag == HA_READ_KEY_EXACT))
+ goto range;
+ // else fall through
case PRIMARY_KEY_INDEX:
{
multi_range_curr->range_flag |= UNIQUE_RANGE;
@@ -5588,7 +5533,9 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
!op->readTuple(lm) &&
!set_primary_key(op, multi_range_curr->start_key.key) &&
!define_read_attrs(curr, op) &&
- (op->setAbortOption(AO_IgnoreError), TRUE))
+ (op->setAbortOption(AO_IgnoreError), TRUE) &&
+ (!m_use_partition_function ||
+ (op->setPartitionId(part_spec.start_part), true)))
curr += reclength;
else
ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError());
@@ -5597,11 +5544,11 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
break;
case UNIQUE_ORDERED_INDEX:
if (!(multi_range_curr->start_key.length == key_info->key_length &&
- multi_range_curr->start_key.flag == HA_READ_KEY_EXACT &&
- !check_null_in_key(key_info, multi_range_curr->start_key.key,
- multi_range_curr->start_key.length)))
- goto range;
- /* fall through */
+ multi_range_curr->start_key.flag == HA_READ_KEY_EXACT &&
+ !check_null_in_key(key_info, multi_range_curr->start_key.key,
+ multi_range_curr->start_key.length)))
+ goto range;
+ // else fall through
case UNIQUE_INDEX:
{
multi_range_curr->range_flag |= UNIQUE_RANGE;
@@ -5615,8 +5562,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError());
break;
}
- case ORDERED_INDEX:
- {
+ case ORDERED_INDEX: {
range:
multi_range_curr->range_flag &= ~(uint)UNIQUE_RANGE;
if (scanOp == 0)
@@ -5691,7 +5637,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
}
#if 0
-#define DBUG_MULTI_RANGE(x) printf("read_multi_range_next: case %d\n", x);
+#define DBUG_MULTI_RANGE(x) DBUG_PRINT("info", ("read_multi_range_next: case %d\n", x));
#else
#define DBUG_MULTI_RANGE(x)
#endif
@@ -5702,6 +5648,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
DBUG_ENTER("ha_ndbcluster::read_multi_range_next");
if (m_disable_multi_read)
{
+ DBUG_MULTI_RANGE(11);
DBUG_RETURN(handler::read_multi_range_next(multi_range_found_p));
}
@@ -5711,10 +5658,16 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
const NdbOperation* op= m_current_multi_operation;
for (;multi_range_curr < m_multi_range_defined; multi_range_curr++)
{
+ DBUG_MULTI_RANGE(12);
+ if (multi_range_curr->range_flag & SKIP_RANGE)
+ continue;
if (multi_range_curr->range_flag & UNIQUE_RANGE)
{
if (op->getNdbError().code == 0)
+ {
+ DBUG_MULTI_RANGE(13);
goto found_next;
+ }
op= m_active_trans->getNextCompletedOperation(op);
m_multi_range_result_ptr += reclength;
@@ -5731,6 +5684,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
}
else
{
+ DBUG_MULTI_RANGE(14);
goto close_scan;
}
}
@@ -5764,6 +5718,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
DBUG_ASSERT(range_no == -1);
if ((res= m_multi_cursor->nextResult(true)))
{
+ DBUG_MULTI_RANGE(15);
goto close_scan;
}
multi_range_curr--; // Will be increased in for-loop
@@ -5791,12 +5746,16 @@ close_scan:
}
else
{
+ DBUG_MULTI_RANGE(9);
DBUG_RETURN(ndb_err(m_active_trans));
}
}
if (multi_range_curr == multi_range_end)
+ {
+ DBUG_MULTI_RANGE(16);
DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
/**
* Read remaining ranges
@@ -7013,6 +6972,8 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
: NULL;
break;
default:
+ field= NULL; //Keep compiler happy
+ DBUG_ASSERT(0);
break;
}
switch ((negated) ?
@@ -7360,4 +7321,178 @@ ha_ndbcluster::generate_scan_filter(Ndb_cond_stack *ndb_cond_stack,
DBUG_RETURN(0);
}
+
+/*
+ Create a table in NDB Cluster
+ */
+static uint get_no_fragments(ulonglong max_rows)
+{
+#if MYSQL_VERSION_ID >= 50000
+ uint acc_row_size= 25 + /*safety margin*/ 2;
+#else
+ uint acc_row_size= pk_length*4;
+ /* add acc overhead */
+ if (pk_length <= 8) /* main page will set the limit */
+ acc_row_size+= 25 + /*safety margin*/ 2;
+ else /* overflow page will set the limit */
+ acc_row_size+= 4 + /*safety margin*/ 4;
+#endif
+ ulonglong acc_fragment_size= 512*1024*1024;
+#if MYSQL_VERSION_ID >= 50100
+ return (max_rows*acc_row_size)/acc_fragment_size+1;
+#else
+ return ((max_rows*acc_row_size)/acc_fragment_size+1
+ +1/*correct rounding*/)/2;
+#endif
+}
+
+
+/*
+ Routine to adjust default number of partitions to always be a multiple
+ of number of nodes and never more than 4 times the number of nodes.
+
+*/
+static bool adjusted_frag_count(uint no_fragments, uint no_nodes,
+ uint &reported_frags)
+{
+ uint i= 0;
+ reported_frags= no_nodes;
+ while (reported_frags < no_fragments && ++i < 4 &&
+ (reported_frags + no_nodes) < MAX_PARTITIONS)
+ reported_frags+= no_nodes;
+ return (reported_frags < no_fragments);
+}
+
+int ha_ndbcluster::get_default_no_partitions(ulonglong max_rows)
+{
+ uint reported_frags;
+ uint no_fragments= get_no_fragments(max_rows);
+ uint no_nodes= g_ndb_cluster_connection->no_db_nodes();
+ adjusted_frag_count(no_fragments, no_nodes, reported_frags);
+ return (int)reported_frags;
+}
+
+
+/*
+ User defined partitioning set-up. We need to check how many fragments the
+ user wants defined and which node groups to put those into. Later we also
+ want to attach those partitions to a tablespace.
+
+ All the functionality of the partition function, partition limits and so
+ forth are entirely handled by the MySQL Server. There is one exception to
+ this rule for PARTITION BY KEY where NDB handles the hash function and
+ this type can thus be handled transparently also by NDB API program.
+ For RANGE, HASH and LIST and subpartitioning the NDB API programs must
+ implement the function to map to a partition.
+*/
+
+uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
+ TABLE *table,
+ void *tab_par)
+{
+ DBUG_ENTER("ha_ndbcluster::set_up_partition_info");
+ ushort node_group[MAX_PARTITIONS];
+ ulong ng_index= 0, i, j;
+ NDBTAB *tab= (NDBTAB*)tab_par;
+ NDBTAB::FragmentType ftype= NDBTAB::UserDefined;
+ partition_element *part_elem;
+
+ if (part_info->part_type == HASH_PARTITION &&
+ part_info->list_of_part_fields == TRUE)
+ {
+ Field **fields= part_info->part_field_array;
+
+ if (part_info->linear_hash_ind)
+ ftype= NDBTAB::DistrKeyLin;
+ else
+ ftype= NDBTAB::DistrKeyHash;
+
+ for (i= 0; i < part_info->part_field_list.elements; i++)
+ {
+ NDBCOL *col= tab->getColumn(fields[i]->fieldnr - 1);
+ DBUG_PRINT("info",("setting dist key on %s", col->getName()));
+ col->setPartitionKey(TRUE);
+ }
+ }
+ List_iterator<partition_element> part_it(part_info->partitions);
+ for (i= 0; i < part_info->no_parts; i++)
+ {
+ part_elem= part_it++;
+ if (!is_sub_partitioned(part_info))
+ {
+ node_group[ng_index++]= part_elem->nodegroup_id;
+ //Here we should insert tablespace id based on tablespace name
+ }
+ else
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ for (j= 0; j < part_info->no_subparts; j++)
+ {
+ part_elem= sub_it++;
+ node_group[ng_index++]= part_elem->nodegroup_id;
+ //Here we should insert tablespace id based on tablespace name
+ }
+ }
+ }
+ {
+ uint no_nodes= g_ndb_cluster_connection->no_db_nodes();
+ if (ng_index > 4 * no_nodes)
+ {
+ DBUG_RETURN(1300);
+ }
+ }
+ tab->setNodeGroupIds(&node_group, ng_index);
+ tab->setFragmentType(ftype);
+ DBUG_RETURN(0);
+}
+
+
+/*
+ This routine is used to set-up fragmentation when the user has only specified
+ ENGINE = NDB and no user defined partitioning what so ever. Thus all values
+ will be based on default values. We will choose Linear Hash or Hash with
+ perfect spread dependent on a session variable defined in MySQL.
+*/
+
+static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
+{
+ NDBTAB::FragmentType ftype;
+ ushort node_group[MAX_PARTITIONS];
+ uint no_nodes= g_ndb_cluster_connection->no_db_nodes(), no_fragments, i;
+ DBUG_ENTER("ndb_set_fragmentation");
+
+ if (form->s->max_rows == (ha_rows) 0)
+ {
+ no_fragments= no_nodes;
+ }
+ else
+ {
+ /*
+ Ensure that we get enough fragments to handle all rows and ensure that
+ the table is fully distributed by keeping the number of fragments a
+ multiple of the number of nodes.
+ */
+ uint fragments= get_no_fragments(form->s->max_rows);
+ if (adjusted_frag_count(fragments, no_nodes, no_fragments))
+ {
+ push_warning(current_thd,
+ MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
+ "Ndb might have problems storing the max amount of rows specified");
+ }
+ }
+ /*
+ Always start with node group 0 and continue with next node group from
+ there
+ */
+ node_group[0]= 0;
+ for (i= 1; i < no_fragments; i++)
+ node_group[i]= UNDEF_NODEGROUP;
+ if (opt_ndb_linear_hash)
+ ftype= NDBTAB::DistrKeyLin;
+ else
+ ftype= NDBTAB::DistrKeyHash;
+ tab.setFragmentType(ftype);
+ tab.setNodeGroupIds(&node_group, no_fragments);
+ DBUG_VOID_RETURN;
+}
#endif /* HAVE_NDBCLUSTER_DB */
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index b34f8dd063c..f85b0fa8a04 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -420,7 +420,7 @@ class ha_ndbcluster: public handler
int write_row(byte *buf);
int update_row(const byte *old_data, byte *new_data);
int delete_row(const byte *buf);
- int index_init(uint index);
+ int index_init(uint index, bool sorted);
int index_end();
int index_read(byte *buf, const byte *key, uint key_len,
enum ha_rkey_function find_flag);
@@ -462,6 +462,11 @@ class ha_ndbcluster: public handler
const char * table_type() const;
const char ** bas_ext() const;
ulong table_flags(void) const;
+ ulong partition_flags(void) const
+ {
+ return (HA_CAN_PARTITION | HA_CAN_UPDATE_PARTITION_KEY |
+ HA_CAN_PARTITION_UNIQUE);
+ }
ulong index_flags(uint idx, uint part, bool all_parts) const;
uint max_supported_record_length() const;
uint max_supported_keys() const;
@@ -471,6 +476,7 @@ class ha_ndbcluster: public handler
int rename_table(const char *from, const char *to);
int delete_table(const char *name);
int create(const char *name, TABLE *form, HA_CREATE_INFO *info);
+ int get_default_no_partitions(ulonglong max_rows);
THR_LOCK_DATA **store_lock(THD *thd,
THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
@@ -549,15 +555,21 @@ private:
NDB_INDEX_TYPE get_index_type_from_table(uint index_no) const;
int check_index_fields_not_null(uint index_no);
- int pk_read(const byte *key, uint key_len, byte *buf);
- int complemented_pk_read(const byte *old_data, byte *new_data);
- int peek_row(const byte *record);
- int unique_index_read(const byte *key, uint key_len,
- byte *buf);
+ uint set_up_partition_info(partition_info *part_info,
+ TABLE *table,
+ void *tab);
+ int complemented_pk_read(const byte *old_data, byte *new_data,
+ uint32 old_part_id);
+ int pk_read(const byte *key, uint key_len, byte *buf, uint32 part_id);
int ordered_index_scan(const key_range *start_key,
const key_range *end_key,
- bool sorted, bool descending, byte* buf);
+ bool sorted, bool descending, byte* buf,
+ part_id_range *part_spec);
int full_table_scan(byte * buf);
+
+ int peek_row(const byte *record);
+ int unique_index_read(const byte *key, uint key_len,
+ byte *buf);
int fetch_next(NdbScanOperation* op);
int next_result(byte *buf);
int define_read_attrs(byte* buf, NdbOperation* op);
@@ -589,7 +601,7 @@ private:
ulonglong get_auto_increment();
void invalidate_dictionary_cache(bool global);
int ndb_err(NdbTransaction*);
- bool uses_blob_value(bool all_fields);
+ bool uses_blob_value();
char *update_table_comment(const char * comment);
@@ -637,11 +649,15 @@ private:
// NdbRecAttr has no reference to blob
typedef union { const NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
+ partition_info *m_part_info;
+ byte *m_rec0;
+ Field **m_part_field_array;
+ bool m_use_partition_function;
+ bool m_sorted;
bool m_use_write;
bool m_ignore_dup_key;
bool m_primary_key_update;
- bool m_retrieve_all_fields;
- bool m_retrieve_primary_key;
+ bool m_write_op;
ha_rows m_rows_to_insert;
ha_rows m_rows_inserted;
ha_rows m_bulk_insert_rows;
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
new file mode 100644
index 00000000000..1f67637c5e5
--- /dev/null
+++ b/sql/ha_partition.cc
@@ -0,0 +1,3179 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ This handler was developed by Mikael Ronström for version 5.1 of MySQL.
+ It is an abstraction layer on top of other handlers such as MyISAM,
+ InnoDB, Federated, Berkeley DB and so forth. Partitioned tables can also
+ be handled by a storage engine. The current example of this is NDB
+ Cluster that has internally handled partitioning. This have benefits in
+ that many loops needed in the partition handler can be avoided.
+
+ Partitioning has an inherent feature which in some cases is positive and
+ in some cases is negative. It splits the data into chunks. This makes
+ the data more manageable, queries can easily be parallelised towards the
+ parts and indexes are split such that there are less levels in the
+ index trees. The inherent disadvantage is that to use a split index
+ one has to scan all index parts which is ok for large queries but for
+ small queries it can be a disadvantage.
+
+ Partitioning lays the foundation for more manageable databases that are
+ extremely large. It does also lay the foundation for more parallelism
+ in the execution of queries. This functionality will grow with later
+ versions of MySQL.
+
+ You can enable it in your buld by doing the following during your build
+ process:
+ ./configure --with-partition
+
+ The partition is setup to use table locks. It implements an partition "SHARE"
+ that is inserted into a hash by table name. You can use this to store
+ information of state that any partition handler object will be able to see
+ if it is using the same table.
+
+ Please read the object definition in ha_partition.h before reading the rest
+ if this file.
+*/
+
+#ifdef __GNUC__
+#pragma implementation // gcc: Class implementation
+#endif
+
+#include <mysql_priv.h>
+
+#ifdef HAVE_PARTITION_DB
+#include "ha_partition.h"
+
+static const char *ha_par_ext= ".par";
+#ifdef NOT_USED
+static int free_share(PARTITION_SHARE * share);
+static PARTITION_SHARE *get_share(const char *table_name, TABLE * table);
+#endif
+
+/****************************************************************************
+ MODULE create/delete handler object
+****************************************************************************/
+
+static handlerton partition_hton = {
+ "partition",
+ 0, /* slot */
+ 0, /* savepoint size */
+ NULL /*ndbcluster_close_connection*/,
+ NULL, /* savepoint_set */
+ NULL, /* savepoint_rollback */
+ NULL, /* savepoint_release */
+ NULL /*ndbcluster_commit*/,
+ NULL /*ndbcluster_rollback*/,
+ NULL, /* prepare */
+ NULL, /* recover */
+ NULL, /* commit_by_xid */
+ NULL, /* rollback_by_xid */
+ HTON_NO_FLAGS
+};
+
+ha_partition::ha_partition(TABLE *table)
+ :handler(&partition_hton, table), m_part_info(NULL), m_create_handler(FALSE),
+ m_is_sub_partitioned(0)
+{
+ DBUG_ENTER("ha_partition::ha_partition(table)");
+ init_handler_variables();
+ if (table)
+ {
+ if (table->s->part_info)
+ {
+ m_part_info= table->s->part_info;
+ m_is_sub_partitioned= is_sub_partitioned(m_part_info);
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+ha_partition::ha_partition(partition_info *part_info)
+ :handler(&partition_hton, NULL), m_part_info(part_info), m_create_handler(TRUE),
+ m_is_sub_partitioned(is_sub_partitioned(m_part_info))
+
+{
+ DBUG_ENTER("ha_partition::ha_partition(part_info)");
+ init_handler_variables();
+ DBUG_ASSERT(m_part_info);
+ DBUG_VOID_RETURN;
+}
+
+
+void ha_partition::init_handler_variables()
+{
+ active_index= MAX_KEY;
+ m_file_buffer= NULL;
+ m_name_buffer_ptr= NULL;
+ m_engine_array= NULL;
+ m_file= NULL;
+ m_tot_parts= 0;
+ m_has_transactions= 0;
+ m_pkey_is_clustered= 0;
+ m_lock_type= F_UNLCK;
+ m_part_spec.start_part= NO_CURRENT_PART_ID;
+ m_scan_value= 2;
+ m_ref_length= 0;
+ m_part_spec.end_part= NO_CURRENT_PART_ID;
+ m_index_scan_type= partition_no_index_scan;
+ m_start_key.key= NULL;
+ m_start_key.length= 0;
+ m_myisam= FALSE;
+ m_innodb= FALSE;
+ m_extra_cache= FALSE;
+ m_extra_cache_size= 0;
+ m_table_flags= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
+ m_low_byte_first= 1;
+ m_part_field_array= NULL;
+ m_ordered_rec_buffer= NULL;
+ m_top_entry= NO_CURRENT_PART_ID;
+ m_rec_length= 0;
+ m_last_part= 0;
+ m_rec0= 0;
+ m_curr_key_info= 0;
+
+#ifdef DONT_HAVE_TO_BE_INITALIZED
+ m_start_key.flag= 0;
+ m_ordered= TRUE;
+#endif
+}
+
+
+ha_partition::~ha_partition()
+{
+ DBUG_ENTER("ha_partition::~ha_partition()");
+ if (m_file != NULL)
+ {
+ uint i;
+ for (i= 0; i < m_tot_parts; i++)
+ delete m_file[i];
+ }
+ my_free((char*) m_ordered_rec_buffer, MYF(MY_ALLOW_ZERO_PTR));
+
+ clear_handler_file();
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ The partition handler is only a layer on top of other engines. Thus it
+ can't really perform anything without the underlying handlers. Thus we
+ add this method as part of the allocation of a handler object.
+
+ 1) Allocation of underlying handlers
+ If we have access to the partition info we will allocate one handler
+ instance for each partition.
+ 2) Allocation without partition info
+ The cases where we don't have access to this information is when called
+ in preparation for delete_table and rename_table and in that case we
+ only need to set HA_FILE_BASED. In that case we will use the .par file
+ that contains information about the partitions and their engines and
+ the names of each partition.
+ 3) Table flags initialisation
+ We need also to set table flags for the partition handler. This is not
+ static since it depends on what storage engines are used as underlying
+ handlers.
+ The table flags is set in this routine to simulate the behaviour of a
+ normal storage engine
+ The flag HA_FILE_BASED will be set independent of the underlying handlers
+ 4) Index flags initialisation
+ When knowledge exists on the indexes it is also possible to initialise the
+ index flags. Again the index flags must be initialised by using the under-
+ lying handlers since this is storage engine dependent.
+ The flag HA_READ_ORDER will be reset for the time being to indicate no
+ ordered output is available from partition handler indexes. Later a merge
+ sort will be performed using the underlying handlers.
+ 5) primary_key_is_clustered, has_transactions and low_byte_first is
+ calculated here.
+*/
+
+int ha_partition::ha_initialise()
+{
+ handler **file_array, *file;
+ DBUG_ENTER("ha_partition::set_up_constants");
+
+ if (m_part_info)
+ {
+ m_tot_parts= get_tot_partitions(m_part_info);
+ DBUG_ASSERT(m_tot_parts > 0);
+ if (m_create_handler)
+ {
+ if (new_handlers_from_part_info())
+ DBUG_RETURN(1);
+ }
+ else if (get_from_handler_file(table->s->path))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), 129); //Temporary fix TODO print_error
+ DBUG_RETURN(1);
+ }
+ /*
+ We create all underlying table handlers here. We only do it if we have
+ access to the partition info. We do it in this special method to be
+ able to report allocation errors.
+ */
+ /*
+ Set up table_flags, low_byte_first, primary_key_is_clustered and
+ has_transactions since they are called often in all kinds of places,
+ other parameters are calculated on demand.
+ HA_FILE_BASED is always set for partition handler since we use a
+ special file for handling names of partitions, engine types.
+ HA_CAN_GEOMETRY, HA_CAN_FULLTEXT, HA_CAN_SQL_HANDLER,
+ HA_CAN_INSERT_DELAYED is disabled until further investigated.
+ */
+ m_table_flags= m_file[0]->table_flags();
+ m_low_byte_first= m_file[0]->low_byte_first();
+ m_has_transactions= TRUE;
+ m_pkey_is_clustered= TRUE;
+ file_array= m_file;
+ do
+ {
+ file= *file_array;
+ if (m_low_byte_first != file->low_byte_first())
+ {
+ // Cannot have handlers with different endian
+ my_error(ER_MIX_HANDLER_ERROR, MYF(0));
+ DBUG_RETURN(1);
+ }
+ if (!file->has_transactions())
+ m_has_transactions= FALSE;
+ if (!file->primary_key_is_clustered())
+ m_pkey_is_clustered= FALSE;
+ m_table_flags&= file->table_flags();
+ } while (*(++file_array));
+ m_table_flags&= ~(HA_CAN_GEOMETRY & HA_CAN_FULLTEXT &
+ HA_CAN_SQL_HANDLER & HA_CAN_INSERT_DELAYED);
+ /*
+ TODO RONM:
+ Make sure that the tree works without partition defined, compiles
+ and goes through mysql-test-run.
+ */
+ }
+ m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
+ DBUG_RETURN(0);
+}
+
+/****************************************************************************
+ MODULE meta data changes
+****************************************************************************/
+/*
+ Used to delete a table. By the time delete_table() has been called all
+ opened references to this table will have been closed (and your globally
+ shared references released. The variable name will just be the name of
+ the table. You will need to remove any files you have created at this
+ point.
+
+ If you do not implement this, the default delete_table() is called from
+ handler.cc and it will delete all files with the file extentions returned
+ by bas_ext().
+
+ Called from handler.cc by delete_table and ha_create_table(). Only used
+ during create if the table_flag HA_DROP_BEFORE_CREATE was specified for
+ the storage engine.
+*/
+
+int ha_partition::delete_table(const char *name)
+{
+ int error;
+ DBUG_ENTER("ha_partition::delete_table");
+ if ((error= del_ren_cre_table(name, NULL, NULL, NULL)))
+ DBUG_RETURN(error);
+ DBUG_RETURN(handler::delete_table(name));
+}
+
+
+/*
+ Renames a table from one name to another from alter table call.
+
+ If you do not implement this, the default rename_table() is called from
+ handler.cc and it will delete all files with the file extentions returned
+ by bas_ext().
+
+ Called from sql_table.cc by mysql_rename_table().
+*/
+
+int ha_partition::rename_table(const char *from, const char *to)
+{
+ int error;
+ DBUG_ENTER("ha_partition::rename_table");
+ if ((error= del_ren_cre_table(from, to, NULL, NULL)))
+ DBUG_RETURN(error);
+ DBUG_RETURN(handler::rename_table(from, to));
+}
+
+
+/*
+ create_handler_files is called to create any handler specific files
+ before opening the file with openfrm to later call ::create on the
+ file object.
+ In the partition handler this is used to store the names of partitions
+ and types of engines in the partitions.
+*/
+
+int ha_partition::create_handler_files(const char *name)
+{
+ DBUG_ENTER("ha_partition::create_handler_files()");
+ if (create_handler_file(name))
+ {
+ my_error(ER_CANT_CREATE_HANDLER_FILE, MYF(0));
+ DBUG_RETURN(1);
+ }
+ DBUG_RETURN(0);
+}
+
+
+/*
+ create() is called to create a table. The variable name will have the name
+ of the table. When create() is called you do not need to worry about
+ opening the table. Also, the FRM file will have already been created so
+ adjusting create_info will not do you any good. You can overwrite the frm
+ file at this point if you wish to change the table definition, but there
+ are no methods currently provided for doing that.
+
+ Called from handle.cc by ha_create_table().
+*/
+
+int ha_partition::create(const char *name, TABLE *table_arg,
+ HA_CREATE_INFO *create_info)
+{
+ char t_name[FN_REFLEN];
+ DBUG_ENTER("ha_partition::create");
+
+ strmov(t_name, name);
+ *fn_ext(t_name)= 0;
+ if (del_ren_cre_table(t_name, NULL, table_arg, create_info))
+ {
+ handler::delete_table(t_name);
+ DBUG_RETURN(1);
+ }
+ DBUG_RETURN(0);
+}
+
+
+void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
+{
+ return;
+}
+
+
+char *ha_partition::update_table_comment(const char *comment)
+{
+ return (char*) comment; // Nothing to change
+}
+
+
+/*
+ This method is used to calculate the partition name, service routine to
+ the del_ren_cre_table method.
+*/
+
+static void create_partition_name(char *out, const char *in1, const char *in2)
+{
+ strxmov(out, in1, "_", in2, NullS);
+}
+
+
+/*
+ Common routine to handle delete_table and rename_table.
+ The routine uses the partition handler file to get the
+ names of the partition instances. Both these routines
+ are called after creating the handler without table
+ object and thus the file is needed to discover the
+ names of the partitions and the underlying storage engines.
+*/
+
+uint ha_partition::del_ren_cre_table(const char *from,
+ const char *to,
+ TABLE *table_arg,
+ HA_CREATE_INFO *create_info)
+{
+ int save_error= 0, error;
+ char from_buff[FN_REFLEN], to_buff[FN_REFLEN];
+ char *name_buffer_ptr;
+ uint i;
+ handler **file;
+ DBUG_ENTER("del_ren_cre_table()");
+
+ if (get_from_handler_file(from))
+ DBUG_RETURN(TRUE);
+ DBUG_ASSERT(m_file_buffer);
+ name_buffer_ptr= m_name_buffer_ptr;
+ file= m_file;
+ i= 0;
+ do
+ {
+ create_partition_name(from_buff, from, name_buffer_ptr);
+ if (to != NULL)
+ { // Rename branch
+ create_partition_name(to_buff, to, name_buffer_ptr);
+ error= (*file)->rename_table((const char*) from_buff,
+ (const char*) to_buff);
+ }
+ else if (table_arg == NULL) // delete branch
+ error= (*file)->delete_table((const char*) from_buff);
+ else
+ {
+ set_up_table_before_create(table_arg, create_info, i);
+ error= (*file)->create(from_buff, table_arg, create_info);
+ }
+ name_buffer_ptr= strend(name_buffer_ptr) + 1;
+ if (error)
+ save_error= error;
+ i++;
+ } while (*(++file));
+ DBUG_RETURN(save_error);
+}
+
+
+partition_element *ha_partition::find_partition_element(uint part_id)
+{
+ uint i;
+ uint curr_part_id= 0;
+ List_iterator_fast < partition_element > part_it(m_part_info->partitions);
+
+ for (i= 0; i < m_part_info->no_parts; i++)
+ {
+ partition_element *part_elem;
+ part_elem= part_it++;
+ if (m_is_sub_partitioned)
+ {
+ uint j;
+ List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
+ for (j= 0; j < m_part_info->no_subparts; j++)
+ {
+ part_elem= sub_it++;
+ if (part_id == curr_part_id++)
+ return part_elem;
+ }
+ }
+ else if (part_id == curr_part_id++)
+ return part_elem;
+ }
+ DBUG_ASSERT(0);
+ current_thd->fatal_error(); // Abort
+ return NULL;
+}
+
+
+void ha_partition::set_up_table_before_create(TABLE *table,
+ HA_CREATE_INFO *info,
+ uint part_id)
+{
+ /*
+ Set up
+ 1) Comment on partition
+ 2) MAX_ROWS, MIN_ROWS on partition
+ 3) Index file name on partition
+ 4) Data file name on partition
+ */
+ partition_element *part_elem= find_partition_element(part_id);
+ if (!part_elem)
+ return; // Fatal error
+ table->s->max_rows= part_elem->part_max_rows;
+ table->s->min_rows= part_elem->part_min_rows;
+ info->index_file_name= part_elem->index_file_name;
+ info->data_file_name= part_elem->data_file_name;
+}
+
+
+/*
+ Routine used to add two names with '_' in between then. Service routine
+ to create_handler_file
+ Include the NULL in the count of characters since it is needed as separator
+ between the partition names.
+*/
+
+static uint name_add(char *dest, const char *first_name, const char *sec_name)
+{
+ return (uint) (strxmov(dest, first_name, "_", sec_name, NullS) -dest) + 1;
+}
+
+
+/*
+ Method used to create handler file with names of partitions, their
+ engine types and the number of partitions.
+*/
+
+bool ha_partition::create_handler_file(const char *name)
+{
+ partition_element *part_elem, *subpart_elem;
+ uint i, j, part_name_len, subpart_name_len;
+ uint tot_partition_words, tot_name_len;
+ uint tot_len_words, tot_len_byte, chksum, tot_name_words;
+ char *name_buffer_ptr;
+ uchar *file_buffer, *engine_array;
+ bool result= TRUE;
+ char file_name[FN_REFLEN];
+ File file;
+ List_iterator_fast < partition_element > part_it(m_part_info->partitions);
+ DBUG_ENTER("create_handler_file");
+
+ DBUG_PRINT("info", ("table name = %s", name));
+ tot_name_len= 0;
+ for (i= 0; i < m_part_info->no_parts; i++)
+ {
+ part_elem= part_it++;
+ part_name_len= strlen(part_elem->partition_name);
+ if (!m_is_sub_partitioned)
+ tot_name_len+= part_name_len + 1;
+ else
+ {
+ List_iterator_fast<partition_element> sub_it(part_elem->subpartitions);
+ for (j= 0; j < m_part_info->no_subparts; j++)
+ {
+ subpart_elem= sub_it++;
+ subpart_name_len= strlen(subpart_elem->partition_name);
+ tot_name_len+= part_name_len + subpart_name_len + 2;
+ }
+ }
+ }
+ /*
+ File format:
+ Length in words 4 byte
+ Checksum 4 byte
+ Total number of partitions 4 byte
+ Array of engine types n * 4 bytes where
+ n = (m_tot_parts + 3)/4
+ Length of name part in bytes 4 bytes
+ Name part m * 4 bytes where
+ m = ((length_name_part + 3)/4)*4
+
+ All padding bytes are zeroed
+ */
+ tot_partition_words= (m_tot_parts + 3) / 4;
+ tot_name_words= (tot_name_len + 3) / 4;
+ tot_len_words= 4 + tot_partition_words + tot_name_words;
+ tot_len_byte= 4 * tot_len_words;
+ if (!(file_buffer= (uchar *) my_malloc(tot_len_byte, MYF(MY_ZEROFILL))))
+ DBUG_RETURN(TRUE);
+ engine_array= (file_buffer + 12);
+ name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4));
+ part_it.rewind();
+ for (i= 0; i < m_part_info->no_parts; i++)
+ {
+ part_elem= part_it++;
+ if (!m_is_sub_partitioned)
+ {
+ name_buffer_ptr= strmov(name_buffer_ptr, part_elem->partition_name)+1;
+ *engine_array= (uchar) part_elem->engine_type;
+ DBUG_PRINT("info", ("engine: %u", *engine_array));
+ engine_array++;
+ }
+ else
+ {
+ List_iterator_fast<partition_element> sub_it(part_elem->subpartitions);
+ for (j= 0; j < m_part_info->no_subparts; j++)
+ {
+ subpart_elem= sub_it++;
+ name_buffer_ptr+= name_add(name_buffer_ptr,
+ part_elem->partition_name,
+ subpart_elem->partition_name);
+ *engine_array= (uchar) part_elem->engine_type;
+ engine_array++;
+ }
+ }
+ }
+ chksum= 0;
+ int4store(file_buffer, tot_len_words);
+ int4store(file_buffer + 8, m_tot_parts);
+ int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len);
+ for (i= 0; i < tot_len_words; i++)
+ chksum^= uint4korr(file_buffer + 4 * i);
+ int4store(file_buffer + 4, chksum);
+ /*
+ Remove .frm extension and replace with .par
+ Create and write and close file
+ to be used at open, delete_table and rename_table
+ */
+ fn_format(file_name, name, "", ".par", MYF(MY_REPLACE_EXT));
+ if ((file= my_create(file_name, CREATE_MODE, O_RDWR | O_TRUNC,
+ MYF(MY_WME))) >= 0)
+ {
+ result= my_write(file, (byte *) file_buffer, tot_len_byte,
+ MYF(MY_WME | MY_NABP));
+ VOID(my_close(file, MYF(0)));
+ }
+ else
+ result= TRUE;
+ my_free((char*) file_buffer, MYF(0));
+ DBUG_RETURN(result);
+}
+
+
+void ha_partition::clear_handler_file()
+{
+ my_free((char*) m_file_buffer, MYF(MY_ALLOW_ZERO_PTR));
+ m_file_buffer= NULL;
+ m_name_buffer_ptr= NULL;
+ m_engine_array= NULL;
+}
+
+
+bool ha_partition::create_handlers()
+{
+ uint i;
+ uint alloc_len= (m_tot_parts + 1) * sizeof(handler*);
+ DBUG_ENTER("create_handlers");
+
+ if (!(m_file= (handler **) sql_alloc(alloc_len)))
+ DBUG_RETURN(TRUE);
+ bzero(m_file, alloc_len);
+ for (i= 0; i < m_tot_parts; i++)
+ {
+ if (!(m_file[i]= get_new_handler(table, (enum db_type) m_engine_array[i])))
+ DBUG_RETURN(TRUE);
+ DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i]));
+ }
+ m_file[m_tot_parts]= 0;
+ /* For the moment we only support partition over the same table engine */
+ if (m_engine_array[0] == (uchar) DB_TYPE_MYISAM)
+ {
+ DBUG_PRINT("info", ("MyISAM"));
+ m_myisam= TRUE;
+ }
+ else if (m_engine_array[0] == (uchar) DB_TYPE_INNODB)
+ {
+ DBUG_PRINT("info", ("InnoDB"));
+ m_innodb= TRUE;
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+bool ha_partition::new_handlers_from_part_info()
+{
+ uint i, j;
+ partition_element *part_elem;
+ uint alloc_len= (m_tot_parts + 1) * sizeof(handler*);
+ List_iterator_fast <partition_element> part_it(m_part_info->partitions);
+ DBUG_ENTER("ha_partition::new_handlers_from_part_info");
+
+ if (!(m_file= (handler **) sql_alloc(alloc_len)))
+ goto error;
+ bzero(m_file, alloc_len);
+ DBUG_ASSERT(m_part_info->no_parts > 0);
+
+ i= 0;
+ /*
+ Don't know the size of the underlying storage engine, invent a number of
+ bytes allocated for error message if allocation fails
+ */
+ alloc_len= 128;
+ do
+ {
+ part_elem= part_it++;
+ if (!(m_file[i]= get_new_handler(table, part_elem->engine_type)))
+ goto error;
+ DBUG_PRINT("info", ("engine_type: %u", (uint) part_elem->engine_type));
+ if (m_is_sub_partitioned)
+ {
+ for (j= 0; j < m_part_info->no_subparts; j++)
+ {
+ if (!(m_file[i]= get_new_handler(table, part_elem->engine_type)))
+ goto error;
+ DBUG_PRINT("info", ("engine_type: %u", (uint) part_elem->engine_type));
+ }
+ }
+ } while (++i < m_part_info->no_parts);
+ if (part_elem->engine_type == DB_TYPE_MYISAM)
+ {
+ DBUG_PRINT("info", ("MyISAM"));
+ m_myisam= TRUE;
+ }
+ DBUG_RETURN(FALSE);
+error:
+ my_error(ER_OUTOFMEMORY, MYF(0), alloc_len);
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Open handler file to get partition names, engine types and number of
+ partitions.
+*/
+
+bool ha_partition::get_from_handler_file(const char *name)
+{
+ char buff[FN_REFLEN], *address_tot_name_len;
+ File file;
+ char *file_buffer, *name_buffer_ptr;
+ uchar *engine_array;
+ uint i, len_bytes, len_words, tot_partition_words, tot_name_words, chksum;
+ DBUG_ENTER("ha_partition::get_from_handler_file");
+ DBUG_PRINT("enter", ("table name: '%s'", name));
+
+ if (m_file_buffer)
+ DBUG_RETURN(FALSE);
+ fn_format(buff, name, "", ha_par_ext, MYF(0));
+
+ /* Following could be done with my_stat to read in whole file */
+ if ((file= my_open(buff, O_RDONLY | O_SHARE, MYF(0))) < 0)
+ DBUG_RETURN(TRUE);
+ if (my_read(file, (byte *) & buff[0], 8, MYF(MY_NABP)))
+ goto err1;
+ len_words= uint4korr(buff);
+ len_bytes= 4 * len_words;
+ if (!(file_buffer= my_malloc(len_bytes, MYF(0))))
+ goto err1;
+ VOID(my_seek(file, 0, MY_SEEK_SET, MYF(0)));
+ if (my_read(file, (byte *) file_buffer, len_bytes, MYF(MY_NABP)))
+ goto err2;
+
+ chksum= 0;
+ for (i= 0; i < len_words; i++)
+ chksum ^= uint4korr((file_buffer) + 4 * i);
+ if (chksum)
+ goto err2;
+ m_tot_parts= uint4korr((file_buffer) + 8);
+ tot_partition_words= (m_tot_parts + 3) / 4;
+ engine_array= (uchar *) ((file_buffer) + 12);
+ address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words;
+ tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4;
+ if (len_words != (tot_partition_words + tot_name_words + 4))
+ goto err2;
+ name_buffer_ptr= file_buffer + 16 + 4 * tot_partition_words;
+ VOID(my_close(file, MYF(0)));
+ m_file_buffer= file_buffer; // Will be freed in clear_handler_file()
+ m_name_buffer_ptr= name_buffer_ptr;
+ m_engine_array= engine_array;
+ if (!m_file && create_handlers())
+ {
+ clear_handler_file();
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+
+err2:
+ my_free(file_buffer, MYF(0));
+err1:
+ VOID(my_close(file, MYF(0)));
+ DBUG_RETURN(TRUE);
+}
+
+/****************************************************************************
+ MODULE open/close object
+****************************************************************************/
+/*
+ Used for opening tables. The name will be the name of the file.
+ A table is opened when it needs to be opened. For instance
+ when a request comes in for a select on the table (tables are not
+ open and closed for each request, they are cached).
+
+ Called from handler.cc by handler::ha_open(). The server opens all tables
+ by calling ha_open() which then calls the handler specific open().
+*/
+
+int ha_partition::open(const char *name, int mode, uint test_if_locked)
+{
+ int error;
+ char name_buff[FN_REFLEN];
+ char *name_buffer_ptr= m_name_buffer_ptr;
+ handler **file;
+ uint alloc_len;
+ DBUG_ENTER("ha_partition::open");
+
+ ref_length= 0;
+ m_part_field_array= m_part_info->full_part_field_array;
+ if (get_from_handler_file(name))
+ DBUG_RETURN(1);
+ m_start_key.length= 0;
+ m_rec0= table->record[0];
+ m_rec_length= table->s->reclength;
+ alloc_len= m_tot_parts * (m_rec_length + PARTITION_BYTES_IN_POS);
+ alloc_len+= table->s->max_key_length;
+ if (!m_ordered_rec_buffer)
+ {
+ if (!(m_ordered_rec_buffer= my_malloc(alloc_len, MYF(MY_WME))))
+ {
+ DBUG_RETURN(1);
+ }
+ {
+ /*
+ We set-up one record per partition and each record has 2 bytes in
+ front where the partition id is written. This is used by ordered
+ index_read.
+ We also set-up a reference to the first record for temporary use in
+ setting up the scan.
+ */
+ char *ptr= m_ordered_rec_buffer;
+ uint i= 0;
+ do
+ {
+ int2store(ptr, i);
+ ptr+= m_rec_length + PARTITION_BYTES_IN_POS;
+ } while (++i < m_tot_parts);
+ m_start_key.key= ptr;
+ }
+ }
+ file= m_file;
+ do
+ {
+ create_partition_name(name_buff, name, name_buffer_ptr);
+ if ((error= (*file)->ha_open((const char*) name_buff, mode,
+ test_if_locked)))
+ goto err_handler;
+ name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
+ set_if_bigger(ref_length, ((*file)->ref_length));
+ } while (*(++file));
+ /*
+ Add 2 bytes for partition id in position ref length.
+ ref_length=max_in_all_partitions(ref_length) + PARTITION_BYTES_IN_POS
+ */
+ ref_length+= PARTITION_BYTES_IN_POS;
+ m_ref_length= ref_length;
+ /*
+ Release buffer read from .par file. It will not be reused again after
+ being opened once.
+ */
+ clear_handler_file();
+ /*
+ Initialise priority queue, initialised to reading forward.
+ */
+ if ((error= init_queue(&queue, m_tot_parts, (uint) PARTITION_BYTES_IN_POS,
+ 0, key_rec_cmp, (void*)this)))
+ goto err_handler;
+ /*
+ Some handlers update statistics as part of the open call. This will in
+ some cases corrupt the statistics of the partition handler and thus
+ to ensure we have correct statistics we call info from open after
+ calling open on all individual handlers.
+ */
+ info(HA_STATUS_VARIABLE | HA_STATUS_CONST);
+ DBUG_RETURN(0);
+
+err_handler:
+ while (file-- != m_file)
+ (*file)->close();
+ DBUG_RETURN(error);
+}
+
+/*
+ Closes a table. We call the free_share() function to free any resources
+ that we have allocated in the "shared" structure.
+
+ Called from sql_base.cc, sql_select.cc, and table.cc.
+ In sql_select.cc it is only used to close up temporary tables or during
+ the process where a temporary table is converted over to being a
+ myisam table.
+ For sql_base.cc look at close_data_tables().
+*/
+
+int ha_partition::close(void)
+{
+ handler **file;
+ DBUG_ENTER("ha_partition::close");
+ file= m_file;
+ do
+ {
+ (*file)->close();
+ } while (*(++file));
+ DBUG_RETURN(0);
+}
+
+
+/****************************************************************************
+ MODULE start/end statement
+****************************************************************************/
+/*
+ A number of methods to define various constants for the handler. In
+ the case of the partition handler we need to use some max and min
+ of the underlying handlers in most cases.
+*/
+
+/*
+ First you should go read the section "locking functions for mysql" in
+ lock.cc to understand this.
+ This create a lock on the table. If you are implementing a storage engine
+ that can handle transactions look at ha_berkely.cc to see how you will
+ want to goo about doing this. Otherwise you should consider calling
+ flock() here.
+ Originally this method was used to set locks on file level to enable
+ several MySQL Servers to work on the same data. For transactional
+ engines it has been "abused" to also mean start and end of statements
+ to enable proper rollback of statements and transactions. When LOCK
+ TABLES has been issued the start_stmt method takes over the role of
+ indicating start of statement but in this case there is no end of
+ statement indicator(?).
+
+ Called from lock.cc by lock_external() and unlock_external(). Also called
+ from sql_table.cc by copy_data_between_tables().
+*/
+
+int ha_partition::external_lock(THD *thd, int lock_type)
+{
+ uint error;
+ handler **file;
+ DBUG_ENTER("ha_partition::external_lock");
+ file= m_file;
+ do
+ {
+ if ((error= (*file)->external_lock(thd, lock_type)))
+ {
+ if (lock_type != F_UNLCK)
+ goto err_handler;
+ }
+ } while (*(++file));
+ m_lock_type= lock_type; // For the future (2009?)
+ DBUG_RETURN(0);
+
+err_handler:
+ while (file-- != m_file)
+ (*file)->external_lock(thd, F_UNLCK);
+ DBUG_RETURN(error);
+}
+
+
+/*
+ The idea with handler::store_lock() is the following:
+
+ The statement decided which locks we should need for the table
+ for updates/deletes/inserts we get WRITE locks, for SELECT... we get
+ read locks.
+
+ Before adding the lock into the table lock handler (see thr_lock.c)
+ mysqld calls store lock with the requested locks. Store lock can now
+ modify a write lock to a read lock (or some other lock), ignore the
+ lock (if we don't want to use MySQL table locks at all) or add locks
+ for many tables (like we do when we are using a MERGE handler).
+
+ Berkeley DB for partition changes all WRITE locks to TL_WRITE_ALLOW_WRITE
+ (which signals that we are doing WRITES, but we are still allowing other
+ reader's and writer's.
+
+ When releasing locks, store_lock() are also called. In this case one
+ usually doesn't have to do anything.
+
+ store_lock is called when holding a global mutex to ensure that only
+ one thread at a time changes the locking information of tables.
+
+ In some exceptional cases MySQL may send a request for a TL_IGNORE;
+ This means that we are requesting the same lock as last time and this
+ should also be ignored. (This may happen when someone does a flush
+ table when we have opened a part of the tables, in which case mysqld
+ closes and reopens the tables and tries to get the same locks at last
+ time). In the future we will probably try to remove this.
+
+ Called from lock.cc by get_lock_data().
+*/
+
+THR_LOCK_DATA **ha_partition::store_lock(THD *thd,
+ THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+{
+ handler **file;
+ DBUG_ENTER("ha_partition::store_lock");
+ file= m_file;
+ do
+ {
+ to= (*file)->store_lock(thd, to, lock_type);
+ } while (*(++file));
+ DBUG_RETURN(to);
+}
+
+
+int ha_partition::start_stmt(THD *thd)
+{
+ int error= 0;
+ handler **file;
+ DBUG_ENTER("ha_partition::start_stmt");
+ file= m_file;
+ do
+ {
+ if ((error= (*file)->start_stmt(thd)))
+ break;
+ } while (*(++file));
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Returns the number of store locks needed in call to store lock.
+ We return number of partitions since we call store_lock on each
+ underlying handler. Assists the above functions in allocating
+ sufficient space for lock structures.
+*/
+
+uint ha_partition::lock_count() const
+{
+ DBUG_ENTER("ha_partition::lock_count");
+ DBUG_RETURN(m_tot_parts);
+}
+
+
+/*
+ Record currently processed was not in the result set of the statement
+ and is thus unlocked. Used for UPDATE and DELETE queries.
+*/
+
+void ha_partition::unlock_row()
+{
+ m_file[m_last_part]->unlock_row();
+ return;
+}
+
+
+/****************************************************************************
+ MODULE change record
+****************************************************************************/
+
+/*
+ write_row() inserts a row. buf() is a byte array of data, normally record[0].
+
+ You can use the field information to extract the data from the native byte
+ array type.
+
+ Example of this would be:
+ for (Field **field=table->field ; *field ; field++)
+ {
+ ...
+ }
+
+ See ha_tina.cc for an partition of extracting all of the data as strings.
+ ha_berekly.cc has an partition of how to store it intact by "packing" it
+ for ha_berkeley's own native storage type.
+
+ See the note for update_row() on auto_increments and timestamps. This
+ case also applied to write_row().
+
+ Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
+ sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
+
+ ADDITIONAL INFO:
+
+ Most handlers set timestamp when calling write row if any such fields
+ exists. Since we are calling an underlying handler we assume the´
+ underlying handler will assume this responsibility.
+
+ Underlying handlers will also call update_auto_increment to calculate
+ the new auto increment value. We will catch the call to
+ get_auto_increment and ensure this increment value is maintained by
+ only one of the underlying handlers.
+*/
+
+int ha_partition::write_row(byte * buf)
+{
+ uint32 part_id;
+ int error;
+#ifdef NOT_NEEDED
+ byte *rec0= m_rec0;
+#endif
+ DBUG_ENTER("ha_partition::write_row");
+ DBUG_ASSERT(buf == m_rec0);
+
+#ifdef NOT_NEEDED
+ if (likely(buf == rec0))
+#endif
+ error= m_part_info->get_partition_id(m_part_info, &part_id);
+#ifdef NOT_NEEDED
+ else
+ {
+ set_field_ptr(m_part_field_array, buf, rec0);
+ error= m_part_info->get_partition_id(m_part_info, &part_id);
+ set_field_ptr(m_part_field_array, rec0, buf);
+ }
+#endif
+ if (unlikely(error))
+ DBUG_RETURN(error);
+ m_last_part= part_id;
+ DBUG_PRINT("info", ("Insert in partition %d", part_id));
+ DBUG_RETURN(m_file[part_id]->write_row(buf));
+}
+
+
+/*
+ Yes, update_row() does what you expect, it updates a row. old_data will
+ have the previous row record in it, while new_data will have the newest
+ data in it.
+ Keep in mind that the server can do updates based on ordering if an
+ ORDER BY clause was used. Consecutive ordering is not guarenteed.
+
+ Currently new_data will not have an updated auto_increament record, or
+ and updated timestamp field. You can do these for partition by doing these:
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
+ table->timestamp_field->set_time();
+ if (table->next_number_field && record == table->record[0])
+ update_auto_increment();
+
+ Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
+ new_data is always record[0]
+ old_data is normally record[1] but may be anything
+
+*/
+
+int ha_partition::update_row(const byte *old_data, byte *new_data)
+{
+ uint32 new_part_id, old_part_id;
+ int error;
+ DBUG_ENTER("ha_partition::update_row");
+
+ if ((error= get_parts_for_update(old_data, new_data, table->record[0],
+ m_part_info, &old_part_id, &new_part_id)))
+ {
+ DBUG_RETURN(error);
+ }
+
+ /*
+ TODO:
+ set_internal_auto_increment=
+ max(set_internal_auto_increment, new_data->auto_increment)
+ */
+ m_last_part= new_part_id;
+ if (new_part_id == old_part_id)
+ {
+ DBUG_PRINT("info", ("Update in partition %d", new_part_id));
+ DBUG_RETURN(m_file[new_part_id]->update_row(old_data, new_data));
+ }
+ else
+ {
+ DBUG_PRINT("info", ("Update from partition %d to partition %d",
+ old_part_id, new_part_id));
+ if ((error= m_file[new_part_id]->write_row(new_data)))
+ DBUG_RETURN(error);
+ if ((error= m_file[old_part_id]->delete_row(old_data)))
+ {
+#ifdef IN_THE_FUTURE
+ (void) m_file[new_part_id]->delete_last_inserted_row(new_data);
+#endif
+ DBUG_RETURN(error);
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
+/*
+ This will delete a row. buf will contain a copy of the row to be deleted.
+ The server will call this right after the current row has been read
+ (from either a previous rnd_xxx() or index_xxx() call).
+ If you keep a pointer to the last row or can access a primary key it will
+ make doing the deletion quite a bit easier.
+ Keep in mind that the server does no guarentee consecutive deletions.
+ ORDER BY clauses can be used.
+
+ Called in sql_acl.cc and sql_udf.cc to manage internal table information.
+ Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select
+ it is used for removing duplicates while in insert it is used for REPLACE
+ calls.
+
+ buf is either record[0] or record[1]
+
+*/
+
+int ha_partition::delete_row(const byte *buf)
+{
+ uint32 part_id;
+ int error;
+ DBUG_ENTER("ha_partition::delete_row");
+
+ if ((error= get_part_for_delete(buf, m_rec0, m_part_info, &part_id)))
+ {
+ DBUG_RETURN(error);
+ }
+ m_last_part= part_id;
+ DBUG_RETURN(m_file[part_id]->delete_row(buf));
+}
+
+
+/*
+ Used to delete all rows in a table. Both for cases of truncate and
+ for cases where the optimizer realizes that all rows will be
+ removed as a result of a SQL statement.
+
+ Called from item_sum.cc by Item_func_group_concat::clear(),
+ Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
+ Called from sql_delete.cc by mysql_delete().
+ Called from sql_select.cc by JOIN::reinit().
+ Called from sql_union.cc by st_select_lex_unit::exec().
+*/
+
+int ha_partition::delete_all_rows()
+{
+ int error;
+ handler **file;
+ DBUG_ENTER("ha_partition::delete_all_rows");
+ file= m_file;
+ do
+ {
+ if ((error= (*file)->delete_all_rows()))
+ DBUG_RETURN(error);
+ } while (*(++file));
+ DBUG_RETURN(0);
+}
+
+/*
+ rows == 0 means we will probably insert many rows
+*/
+
+void ha_partition::start_bulk_insert(ha_rows rows)
+{
+ handler **file;
+ DBUG_ENTER("ha_partition::start_bulk_insert");
+ if (!rows)
+ {
+ /* Avoid allocation big caches in all underlaying handlers */
+ DBUG_VOID_RETURN;
+ }
+ rows= rows/m_tot_parts + 1;
+ file= m_file;
+ do
+ {
+ (*file)->start_bulk_insert(rows);
+ } while (*(++file));
+ DBUG_VOID_RETURN;
+}
+
+
+int ha_partition::end_bulk_insert()
+{
+ int error= 0;
+ handler **file;
+ DBUG_ENTER("ha_partition::end_bulk_insert");
+
+ file= m_file;
+ do
+ {
+ int tmp;
+ /* We want to execute end_bulk_insert() on all handlers */
+ if ((tmp= (*file)->end_bulk_insert()))
+ error= tmp;
+ } while (*(++file));
+ DBUG_RETURN(error);
+}
+
+/****************************************************************************
+ MODULE full table scan
+****************************************************************************/
+/*
+ Initialize engine for random reads
+
+ SYNOPSIS
+ ha_partition::rnd_init()
+ scan 0 Initialize for random reads through rnd_pos()
+ 1 Initialize for random scan through rnd_next()
+
+ NOTES
+ rnd_init() is called when the server wants the storage engine to do a
+ table scan or when the server wants to access data through rnd_pos.
+
+ When scan is used we will scan one handler partition at a time.
+ When preparing for rnd_pos we will init all handler partitions.
+ No extra cache handling is needed when scannning is not performed.
+
+ Before initialising we will call rnd_end to ensure that we clean up from
+ any previous incarnation of a table scan.
+ Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+ sql_table.cc, and sql_update.cc.
+*/
+
+int ha_partition::rnd_init(bool scan)
+{
+ int error;
+ handler **file;
+ DBUG_ENTER("ha_partition::rnd_init");
+
+ include_partition_fields_in_used_fields();
+ if (scan)
+ {
+ /*
+ rnd_end() is needed for partitioning to reset internal data if scan
+ is already in use
+ */
+
+ rnd_end();
+ if (partition_scan_set_up(rec_buf(0), FALSE))
+ {
+ /*
+ The set of partitions to scan is empty. We return success and return
+ end of file on first rnd_next.
+ */
+ DBUG_RETURN(0);
+ }
+ /*
+ We will use the partition set in our scan, using the start and stop
+ partition and checking each scan before start dependent on bittfields.
+ */
+ late_extra_cache(m_part_spec.start_part);
+ DBUG_PRINT("info", ("rnd_init on partition %d",m_part_spec.start_part));
+ error= m_file[m_part_spec.start_part]->ha_rnd_init(1);
+ m_scan_value= 1; // Scan active
+ if (error)
+ m_scan_value= 2; // No scan active
+ DBUG_RETURN(error);
+ }
+ file= m_file;
+ do
+ {
+ if ((error= (*file)->ha_rnd_init(0)))
+ goto err;
+ } while (*(++file));
+ m_scan_value= 0;
+ DBUG_RETURN(0);
+
+err:
+ while (file--)
+ (*file)->ha_rnd_end();
+ DBUG_RETURN(error);
+}
+
+
+int ha_partition::rnd_end()
+{
+ handler **file;
+ DBUG_ENTER("ha_partition::rnd_end");
+ switch (m_scan_value) {
+ case 2: // Error
+ break;
+ case 1: // Table scan
+ if (m_part_spec.start_part != NO_CURRENT_PART_ID)
+ {
+ late_extra_no_cache(m_part_spec.start_part);
+ m_file[m_part_spec.start_part]->ha_rnd_end();
+ }
+ break;
+ case 0:
+ file= m_file;
+ do
+ {
+ (*file)->ha_rnd_end();
+ } while (*(++file));
+ break;
+ }
+ m_part_spec.start_part= NO_CURRENT_PART_ID;
+ m_scan_value= 2;
+ DBUG_RETURN(0);
+}
+
+
+/*
+ read next row during full table scan (scan in random row order)
+
+ SYNOPSIS
+ rnd_next()
+ buf buffer that should be filled with data
+
+ This is called for each row of the table scan. When you run out of records
+ you should return HA_ERR_END_OF_FILE.
+ The Field structure for the table is the key to getting data into buf
+ in a manner that will allow the server to understand it.
+
+ Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+ sql_table.cc, and sql_update.cc.
+*/
+
+int ha_partition::rnd_next(byte *buf)
+{
+ DBUG_ASSERT(m_scan_value);
+ uint part_id= m_part_spec.start_part; // Cache of this variable
+ handler *file= m_file[part_id];
+ int result= HA_ERR_END_OF_FILE;
+ DBUG_ENTER("ha_partition::rnd_next");
+
+ DBUG_ASSERT(m_scan_value == 1);
+
+ if (part_id > m_part_spec.end_part)
+ {
+ /*
+ The original set of partitions to scan was empty and thus we report
+ the result here.
+ */
+ goto end;
+ }
+ while (TRUE)
+ {
+ if ((result= file->rnd_next(buf)))
+ {
+ if (result == HA_ERR_RECORD_DELETED)
+ continue; // Probably MyISAM
+
+ if (result != HA_ERR_END_OF_FILE)
+ break; // Return error
+
+ /* End current partition */
+ late_extra_no_cache(part_id);
+ DBUG_PRINT("info", ("rnd_end on partition %d", part_id));
+ if ((result= file->ha_rnd_end()))
+ break;
+ /* Shift to next partition */
+ if (++part_id > m_part_spec.end_part)
+ {
+ result= HA_ERR_END_OF_FILE;
+ break;
+ }
+ file= m_file[part_id];
+ DBUG_PRINT("info", ("rnd_init on partition %d", part_id));
+ if ((result= file->ha_rnd_init(1)))
+ break;
+ late_extra_cache(part_id);
+ }
+ else
+ {
+ m_part_spec.start_part= part_id;
+ m_last_part= part_id;
+ table->status= 0;
+ DBUG_RETURN(0);
+ }
+ }
+
+end:
+ m_part_spec.start_part= NO_CURRENT_PART_ID;
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(result);
+}
+
+
+inline void store_part_id_in_pos(byte *pos, uint part_id)
+{
+ int2store(pos, part_id);
+}
+
+inline uint get_part_id_from_pos(const byte *pos)
+{
+ return uint2korr(pos);
+}
+
+/*
+ position() is called after each call to rnd_next() if the data needs
+ to be ordered. You can do something like the following to store
+ the position:
+ ha_store_ptr(ref, ref_length, current_position);
+
+ The server uses ref to store data. ref_length in the above case is
+ the size needed to store current_position. ref is just a byte array
+ that the server will maintain. If you are using offsets to mark rows, then
+ current_position should be the offset. If it is a primary key like in
+ BDB, then it needs to be a primary key.
+
+ Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
+*/
+
+void ha_partition::position(const byte *record)
+{
+ handler *file= m_file[m_last_part];
+ DBUG_ENTER("ha_partition::position");
+ file->position(record);
+ store_part_id_in_pos(ref, m_last_part);
+ memcpy((ref + PARTITION_BYTES_IN_POS), file->ref,
+ (ref_length - PARTITION_BYTES_IN_POS));
+
+#ifdef SUPPORTING_PARTITION_OVER_DIFFERENT_ENGINES
+#ifdef HAVE_purify
+ bzero(ref + PARTITION_BYTES_IN_POS + ref_length, max_ref_length-ref_length);
+#endif /* HAVE_purify */
+#endif
+ DBUG_VOID_RETURN;
+}
+
+/*
+ This is like rnd_next, but you are given a position to use
+ to determine the row. The position will be of the type that you stored in
+ ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key
+ or position you saved when position() was called.
+ Called from filesort.cc records.cc sql_insert.cc sql_select.cc
+ sql_update.cc.
+*/
+
+int ha_partition::rnd_pos(byte * buf, byte *pos)
+{
+ uint part_id;
+ handler *file;
+ DBUG_ENTER("ha_partition::rnd_pos");
+
+ part_id= get_part_id_from_pos((const byte *) pos);
+ DBUG_ASSERT(part_id < m_tot_parts);
+ file= m_file[part_id];
+ m_last_part= part_id;
+ DBUG_RETURN(file->rnd_pos(buf, (pos + PARTITION_BYTES_IN_POS)));
+}
+
+
+/****************************************************************************
+ MODULE index scan
+****************************************************************************/
+/*
+ Positions an index cursor to the index specified in the handle. Fetches the
+ row if available. If the key value is null, begin at the first key of the
+ index.
+
+ There are loads of optimisations possible here for the partition handler.
+ The same optimisations can also be checked for full table scan although
+ only through conditions and not from index ranges.
+ Phase one optimisations:
+ Check if the fields of the partition function are bound. If so only use
+ the single partition it becomes bound to.
+ Phase two optimisations:
+ If it can be deducted through range or list partitioning that only a
+ subset of the partitions are used, then only use those partitions.
+*/
+
+/*
+ index_init is always called before starting index scans (except when
+ starting through index_read_idx and using read_range variants).
+*/
+
+int ha_partition::index_init(uint inx, bool sorted)
+{
+ int error= 0;
+ handler **file;
+ DBUG_ENTER("ha_partition::index_init");
+
+ active_index= inx;
+ m_part_spec.start_part= NO_CURRENT_PART_ID;
+ m_start_key.length= 0;
+ m_ordered= sorted;
+ m_curr_key_info= table->key_info+inx;
+ include_partition_fields_in_used_fields();
+
+ file= m_file;
+ do
+ {
+ /* TODO RONM: Change to index_init() when code is stable */
+ if ((error= (*file)->ha_index_init(inx, sorted)))
+ {
+ DBUG_ASSERT(0); // Should never happen
+ break;
+ }
+ } while (*(++file));
+ DBUG_RETURN(error);
+}
+
+
+/*
+ index_end is called at the end of an index scan to clean up any
+ things needed to clean up.
+*/
+
+int ha_partition::index_end()
+{
+ int error= 0;
+ handler **file;
+ DBUG_ENTER("ha_partition::index_end");
+
+ active_index= MAX_KEY;
+ m_part_spec.start_part= NO_CURRENT_PART_ID;
+ file= m_file;
+ do
+ {
+ int tmp;
+ /* We want to execute index_end() on all handlers */
+ /* TODO RONM: Change to index_end() when code is stable */
+ if ((tmp= (*file)->ha_index_end()))
+ error= tmp;
+ } while (*(++file));
+ DBUG_RETURN(error);
+}
+
+
+/*
+ index_read starts a new index scan using a start key. The MySQL Server
+ will check the end key on its own. Thus to function properly the
+ partitioned handler need to ensure that it delivers records in the sort
+ order of the MySQL Server.
+ index_read can be restarted without calling index_end on the previous
+ index scan and without calling index_init. In this case the index_read
+ is on the same index as the previous index_scan. This is particularly
+ used in conjuntion with multi read ranges.
+*/
+
+int ha_partition::index_read(byte * buf, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag)
+{
+ DBUG_ENTER("ha_partition::index_read");
+ end_range= 0;
+ DBUG_RETURN(common_index_read(buf, key, key_len, find_flag));
+}
+
+
+int ha_partition::common_index_read(byte *buf, const byte *key, uint key_len,
+ enum ha_rkey_function find_flag)
+{
+ int error;
+ DBUG_ENTER("ha_partition::common_index_read");
+
+ memcpy((void*)m_start_key.key, key, key_len);
+ m_start_key.length= key_len;
+ m_start_key.flag= find_flag;
+ m_index_scan_type= partition_index_read;
+
+ if ((error= partition_scan_set_up(buf, TRUE)))
+ {
+ DBUG_RETURN(error);
+ }
+
+ if (!m_ordered_scan_ongoing ||
+ (find_flag == HA_READ_KEY_EXACT &&
+ (key_len >= m_curr_key_info->key_length ||
+ key_len == 0)))
+ {
+ /*
+ We use unordered index scan either when read_range is used and flag
+ is set to not use ordered or when an exact key is used and in this
+ case all records will be sorted equal and thus the sort order of the
+ resulting records doesn't matter.
+ We also use an unordered index scan when the number of partitions to
+ scan is only one.
+ The unordered index scan will use the partition set created.
+ Need to set unordered scan ongoing since we can come here even when
+ it isn't set.
+ */
+ m_ordered_scan_ongoing= FALSE;
+ error= handle_unordered_scan_next_partition(buf);
+ }
+ else
+ {
+ /*
+ In all other cases we will use the ordered index scan. This will use
+ the partition set created by the get_partition_set method.
+ */
+ error= handle_ordered_index_scan(buf);
+ }
+ DBUG_RETURN(error);
+}
+
+
+/*
+ index_first() asks for the first key in the index.
+ This is similar to index_read except that there is no start key since
+ the scan starts from the leftmost entry and proceeds forward with
+ index_next.
+
+ Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
+ and sql_select.cc.
+*/
+
+int ha_partition::index_first(byte * buf)
+{
+ DBUG_ENTER("ha_partition::index_first");
+ end_range= 0;
+ m_index_scan_type= partition_index_first;
+ DBUG_RETURN(common_first_last(buf));
+}
+
+
+/*
+ index_last() asks for the last key in the index.
+ This is similar to index_read except that there is no start key since
+ the scan starts from the rightmost entry and proceeds forward with
+ index_prev.
+
+ Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
+ and sql_select.cc.
+*/
+
+int ha_partition::index_last(byte * buf)
+{
+ DBUG_ENTER("ha_partition::index_last");
+ m_index_scan_type= partition_index_last;
+ DBUG_RETURN(common_first_last(buf));
+}
+
+int ha_partition::common_first_last(byte *buf)
+{
+ int error;
+ if ((error= partition_scan_set_up(buf, FALSE)))
+ return error;
+ if (!m_ordered_scan_ongoing)
+ return handle_unordered_scan_next_partition(buf);
+ return handle_ordered_index_scan(buf);
+}
+
+/*
+ Positions an index cursor to the index specified in key. Fetches the
+ row if any. This is only used to read whole keys.
+ TODO: Optimise this code to avoid index_init and index_end
+*/
+
+int ha_partition::index_read_idx(byte * buf, uint index, const byte * key,
+ uint key_len,
+ enum ha_rkey_function find_flag)
+{
+ int res;
+ DBUG_ENTER("ha_partition::index_read_idx");
+ index_init(index, 0);
+ res= index_read(buf, key, key_len, find_flag);
+ index_end();
+ DBUG_RETURN(res);
+}
+
+/*
+ This is used in join_read_last_key to optimise away an ORDER BY.
+ Can only be used on indexes supporting HA_READ_ORDER
+*/
+
+int ha_partition::index_read_last(byte *buf, const byte *key, uint keylen)
+{
+ DBUG_ENTER("ha_partition::index_read_last");
+ m_ordered= TRUE; // Safety measure
+ DBUG_RETURN(index_read(buf, key, keylen, HA_READ_PREFIX_LAST));
+}
+
+
+/*
+ Used to read forward through the index.
+*/
+
+int ha_partition::index_next(byte * buf)
+{
+ DBUG_ENTER("ha_partition::index_next");
+ /*
+ TODO(low priority):
+ If we want partition to work with the HANDLER commands, we
+ must be able to do index_last() -> index_prev() -> index_next()
+ */
+ DBUG_ASSERT(m_index_scan_type != partition_index_last);
+ if (!m_ordered_scan_ongoing)
+ {
+ DBUG_RETURN(handle_unordered_next(buf, FALSE));
+ }
+ DBUG_RETURN(handle_ordered_next(buf, FALSE));
+}
+
+
+/*
+ This routine is used to read the next but only if the key is the same
+ as supplied in the call.
+*/
+
+int ha_partition::index_next_same(byte *buf, const byte *key, uint keylen)
+{
+ DBUG_ENTER("ha_partition::index_next_same");
+ DBUG_ASSERT(keylen == m_start_key.length);
+ DBUG_ASSERT(m_index_scan_type != partition_index_last);
+ if (!m_ordered_scan_ongoing)
+ DBUG_RETURN(handle_unordered_next(buf, TRUE));
+ DBUG_RETURN(handle_ordered_next(buf, TRUE));
+}
+
+/*
+ Used to read backwards through the index.
+*/
+
+int ha_partition::index_prev(byte * buf)
+{
+ DBUG_ENTER("ha_partition::index_prev");
+ /* TODO: read comment in index_next */
+ DBUG_ASSERT(m_index_scan_type != partition_index_first);
+ DBUG_RETURN(handle_ordered_prev(buf));
+}
+
+
+/*
+ We reimplement read_range_first since we don't want the compare_key
+ check at the end. This is already performed in the partition handler.
+ read_range_next is very much different due to that we need to scan
+ all underlying handlers.
+*/
+
+int ha_partition::read_range_first(const key_range *start_key,
+ const key_range *end_key,
+ bool eq_range_arg, bool sorted)
+{
+ int error;
+ DBUG_ENTER("ha_partition::read_range_first");
+ m_ordered= sorted;
+ eq_range= eq_range_arg;
+ end_range= 0;
+ if (end_key)
+ {
+ end_range= &save_end_range;
+ save_end_range= *end_key;
+ key_compare_result_on_equal=
+ ((end_key->flag == HA_READ_BEFORE_KEY) ? 1 :
+ (end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0);
+ }
+ range_key_part= m_curr_key_info->key_part;
+
+ if (!start_key) // Read first record
+ {
+ m_index_scan_type= partition_index_first;
+ error= common_first_last(m_rec0);
+ }
+ else
+ {
+ error= common_index_read(m_rec0,
+ start_key->key,
+ start_key->length, start_key->flag);
+ }
+ DBUG_RETURN(error);
+}
+
+
+int ha_partition::read_range_next()
+{
+ DBUG_ENTER("ha_partition::read_range_next");
+ if (m_ordered)
+ {
+ DBUG_RETURN(handler::read_range_next());
+ }
+ DBUG_RETURN(handle_unordered_next(m_rec0, eq_range));
+}
+
+
+int ha_partition::partition_scan_set_up(byte * buf, bool idx_read_flag)
+{
+ DBUG_ENTER("ha_partition::partition_scan_set_up");
+
+ if (idx_read_flag)
+ get_partition_set(table,buf,active_index,&m_start_key,&m_part_spec);
+ else
+ get_partition_set(table, buf, MAX_KEY, 0, &m_part_spec);
+ if (m_part_spec.start_part > m_part_spec.end_part)
+ {
+ /*
+ We discovered a partition set but the set was empty so we report
+ key not found.
+ */
+ DBUG_PRINT("info", ("scan with no partition to scan"));
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
+ if (m_part_spec.start_part == m_part_spec.end_part)
+ {
+ /*
+ We discovered a single partition to scan, this never needs to be
+ performed using the ordered index scan.
+ */
+ DBUG_PRINT("info", ("index scan using the single partition %d",
+ m_part_spec.start_part));
+ m_ordered_scan_ongoing= FALSE;
+ }
+ else
+ {
+ /*
+ Set m_ordered_scan_ongoing according how the scan should be done
+ */
+ m_ordered_scan_ongoing= m_ordered;
+ }
+ DBUG_ASSERT(m_part_spec.start_part < m_tot_parts &&
+ m_part_spec.end_part < m_tot_parts);
+ DBUG_RETURN(0);
+}
+
+
+/****************************************************************************
+ Unordered Index Scan Routines
+****************************************************************************/
+/*
+ These routines are used to scan partitions without considering order.
+ This is performed in two situations.
+ 1) In read_multi_range this is the normal case
+ 2) When performing any type of index_read, index_first, index_last where
+ all fields in the partition function is bound. In this case the index
+ scan is performed on only one partition and thus it isn't necessary to
+ perform any sort.
+*/
+
+int ha_partition::handle_unordered_next(byte *buf, bool next_same)
+{
+ handler *file= file= m_file[m_part_spec.start_part];
+ int error;
+ DBUG_ENTER("ha_partition::handle_unordered_next");
+
+ /*
+ We should consider if this should be split into two functions as
+ next_same is alwas a local constant
+ */
+ if (next_same)
+ {
+ if (!(error= file->index_next_same(buf, m_start_key.key,
+ m_start_key.length)))
+ {
+ m_last_part= m_part_spec.start_part;
+ DBUG_RETURN(0);
+ }
+ }
+ else if (!(error= file->index_next(buf)))
+ {
+ if (compare_key(end_range) <= 0)
+ {
+ m_last_part= m_part_spec.start_part;
+ DBUG_RETURN(0); // Row was in range
+ }
+ error= HA_ERR_END_OF_FILE;
+ }
+
+ if (error == HA_ERR_END_OF_FILE)
+ {
+ m_part_spec.start_part++; // Start using next part
+ error= handle_unordered_scan_next_partition(buf);
+ }
+ DBUG_RETURN(error);
+}
+
+
+/*
+ This routine is used to start the index scan on the next partition.
+ Both initial start and after completing scan on one partition.
+*/
+
+int ha_partition::handle_unordered_scan_next_partition(byte * buf)
+{
+ uint i;
+ DBUG_ENTER("ha_partition::handle_unordered_scan_next_partition");
+
+ for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ {
+ int error;
+ handler *file= m_file[i];
+
+ m_part_spec.start_part= i;
+ switch (m_index_scan_type) {
+ case partition_index_read:
+ DBUG_PRINT("info", ("index_read on partition %d", i));
+ error= file->index_read(buf, m_start_key.key,
+ m_start_key.length,
+ m_start_key.flag);
+ break;
+ case partition_index_first:
+ DBUG_PRINT("info", ("index_first on partition %d", i));
+ error= file->index_first(buf);
+ break;
+ default:
+ DBUG_ASSERT(FALSE);
+ DBUG_RETURN(1);
+ }
+ if (!error)
+ {
+ if (compare_key(end_range) <= 0)
+ {
+ m_last_part= i;
+ DBUG_RETURN(0);
+ }
+ error= HA_ERR_END_OF_FILE;
+ }
+ if ((error != HA_ERR_END_OF_FILE) && (error != HA_ERR_KEY_NOT_FOUND))
+ DBUG_RETURN(error);
+ DBUG_PRINT("info", ("HA_ERR_END_OF_FILE on partition %d", i));
+ }
+ m_part_spec.start_part= NO_CURRENT_PART_ID;
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+}
+
+
+/*
+ This part contains the logic to handle index scans that require ordered
+ output. This includes all except those started by read_range_first with
+ the flag ordered set to FALSE. Thus most direct index_read and all
+ index_first and index_last.
+
+ We implement ordering by keeping one record plus a key buffer for each
+ partition. Every time a new entry is requested we will fetch a new
+ entry from the partition that is currently not filled with an entry.
+ Then the entry is put into its proper sort position.
+
+ Returning a record is done by getting the top record, copying the
+ record to the request buffer and setting the partition as empty on
+ entries.
+*/
+
+int ha_partition::handle_ordered_index_scan(byte *buf)
+{
+ uint i, j= 0;
+ bool found= FALSE;
+ bool reverse_order= FALSE;
+ DBUG_ENTER("ha_partition::handle_ordered_index_scan");
+
+ m_top_entry= NO_CURRENT_PART_ID;
+ queue_remove_all(&queue);
+ for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ {
+ int error;
+ byte *rec_buf_ptr= rec_buf(i);
+ handler *file= m_file[i];
+
+ switch (m_index_scan_type) {
+ case partition_index_read:
+ error= file->index_read(rec_buf_ptr,
+ m_start_key.key,
+ m_start_key.length,
+ m_start_key.flag);
+ reverse_order= FALSE;
+ break;
+ case partition_index_first:
+ error= file->index_first(rec_buf_ptr);
+ reverse_order= FALSE;
+ break;
+ case partition_index_last:
+ error= file->index_last(rec_buf_ptr);
+ reverse_order= TRUE;
+ break;
+ default:
+ DBUG_ASSERT(FALSE);
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
+ if (!error)
+ {
+ found= TRUE;
+ /*
+ Initialise queue without order first, simply insert
+ */
+ queue_element(&queue, j++)= (byte*)queue_buf(i);
+ }
+ else if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
+ {
+ DBUG_RETURN(error);
+ }
+ }
+ if (found)
+ {
+ /*
+ We found at least one partition with data, now sort all entries and
+ after that read the first entry and copy it to the buffer to return in.
+ */
+ queue_set_max_at_top(&queue, reverse_order);
+ queue_set_cmp_arg(&queue, (void*)m_curr_key_info);
+ queue.elements= j;
+ queue_fix(&queue);
+ return_top_record(buf);
+ DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry));
+ DBUG_RETURN(0);
+ }
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+}
+
+
+void ha_partition::return_top_record(byte *buf)
+{
+ uint part_id;
+ byte *key_buffer= queue_top(&queue);
+ byte *rec_buffer= key_buffer + PARTITION_BYTES_IN_POS;
+ part_id= uint2korr(key_buffer);
+ memcpy(buf, rec_buffer, m_rec_length);
+ m_last_part= part_id;
+ m_top_entry= part_id;
+}
+
+
+int ha_partition::handle_ordered_next(byte *buf, bool next_same)
+{
+ int error;
+ uint part_id= m_top_entry;
+ handler *file= m_file[part_id];
+ DBUG_ENTER("ha_partition::handle_ordered_next");
+
+ if (!next_same)
+ error= file->index_next(rec_buf(part_id));
+ else
+ error= file->index_next_same(rec_buf(part_id), m_start_key.key,
+ m_start_key.length);
+ if (error)
+ {
+ if (error == HA_ERR_END_OF_FILE)
+ {
+ /* Return next buffered row */
+ queue_remove(&queue, (uint) 0);
+ if (queue.elements)
+ {
+ DBUG_PRINT("info", ("Record returned from partition %u (2)",
+ m_top_entry));
+ return_top_record(buf);
+ error= 0;
+ }
+ }
+ DBUG_RETURN(error);
+ }
+ queue_replaced(&queue);
+ return_top_record(buf);
+ DBUG_PRINT("info", ("Record returned from partition %u", m_top_entry));
+ DBUG_RETURN(0);
+}
+
+
+int ha_partition::handle_ordered_prev(byte *buf)
+{
+ int error;
+ uint part_id= m_top_entry;
+ handler *file= m_file[part_id];
+ DBUG_ENTER("ha_partition::handle_ordered_prev");
+ if ((error= file->index_prev(rec_buf(part_id))))
+ {
+ if (error == HA_ERR_END_OF_FILE)
+ {
+ queue_remove(&queue, (uint) 0);
+ if (queue.elements)
+ {
+ return_top_record(buf);
+ DBUG_PRINT("info", ("Record returned from partition %d (2)",
+ m_top_entry));
+ error= 0;
+ }
+ }
+ DBUG_RETURN(error);
+ }
+ queue_replaced(&queue);
+ return_top_record(buf);
+ DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry));
+ DBUG_RETURN(0);
+}
+
+
+void ha_partition::include_partition_fields_in_used_fields()
+{
+ DBUG_ENTER("ha_partition::include_partition_fields_in_used_fields");
+ Field **ptr= m_part_field_array;
+ do
+ {
+ ha_set_bit_in_read_set((*ptr)->fieldnr);
+ } while (*(++ptr));
+ DBUG_VOID_RETURN;
+}
+
+
+/****************************************************************************
+ MODULE information calls
+****************************************************************************/
+
+/*
+ These are all first approximations of the extra, info, scan_time
+ and read_time calls
+*/
+
+/*
+ ::info() is used to return information to the optimizer.
+ Currently this table handler doesn't implement most of the fields
+ really needed. SHOW also makes use of this data
+ Another note, if your handler doesn't proved exact record count,
+ you will probably want to have the following in your code:
+ if (records < 2)
+ records = 2;
+ The reason is that the server will optimize for cases of only a single
+ record. If in a table scan you don't know the number of records
+ it will probably be better to set records to two so you can return
+ as many records as you need.
+
+ Along with records a few more variables you may wish to set are:
+ records
+ deleted
+ data_file_length
+ index_file_length
+ delete_length
+ check_time
+ Take a look at the public variables in handler.h for more information.
+
+ Called in:
+ filesort.cc
+ ha_heap.cc
+ item_sum.cc
+ opt_sum.cc
+ sql_delete.cc
+ sql_delete.cc
+ sql_derived.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_show.cc
+ sql_show.cc
+ sql_show.cc
+ sql_show.cc
+ sql_table.cc
+ sql_union.cc
+ sql_update.cc
+
+ Some flags that are not implemented
+ HA_STATUS_POS:
+ This parameter is never used from the MySQL Server. It is checked in a
+ place in MyISAM so could potentially be used by MyISAM specific programs.
+ HA_STATUS_NO_LOCK:
+ This is declared and often used. It's only used by MyISAM.
+ It means that MySQL doesn't need the absolute latest statistics
+ information. This may save the handler from doing internal locks while
+ retrieving statistics data.
+*/
+
+void ha_partition::info(uint flag)
+{
+ handler *file, **file_array;
+ DBUG_ENTER("ha_partition:info");
+
+ if (flag & HA_STATUS_AUTO)
+ {
+ DBUG_PRINT("info", ("HA_STATUS_AUTO"));
+ /*
+ The auto increment value is only maintained by the first handler
+ so we will only call this.
+ */
+ m_file[0]->info(HA_STATUS_AUTO);
+ }
+ if (flag & HA_STATUS_VARIABLE)
+ {
+ DBUG_PRINT("info", ("HA_STATUS_VARIABLE"));
+ /*
+ Calculates statistical variables
+ records: Estimate of number records in table
+ We report sum (always at least 2)
+ deleted: Estimate of number holes in the table due to
+ deletes
+ We report sum
+ data_file_length: Length of data file, in principle bytes in table
+ We report sum
+ index_file_length: Length of index file, in principle bytes in
+ indexes in the table
+ We report sum
+ mean_record_length:Mean record length in the table
+ We calculate this
+ check_time: Time of last check (only applicable to MyISAM)
+ We report last time of all underlying handlers
+ */
+ records= 0;
+ deleted= 0;
+ data_file_length= 0;
+ index_file_length= 0;
+ check_time= 0;
+ file_array= m_file;
+ do
+ {
+ file= *file_array;
+ file->info(HA_STATUS_VARIABLE);
+ records+= file->records;
+ deleted+= file->deleted;
+ data_file_length+= file->data_file_length;
+ index_file_length+= file->index_file_length;
+ if (file->check_time > check_time)
+ check_time= file->check_time;
+ } while (*(++file_array));
+ if (records < 2)
+ records= 2;
+ mean_rec_length= (ulong) (data_file_length / records);
+ }
+ if (flag & HA_STATUS_CONST)
+ {
+ DBUG_PRINT("info", ("HA_STATUS_CONST"));
+ /*
+ Recalculate loads of constant variables. MyISAM also sets things
+ directly on the table share object.
+
+ Check whether this should be fixed since handlers should not
+ change things directly on the table object.
+
+ Monty comment: This should NOT be changed! It's the handlers
+ responsibility to correct table->s->keys_xxxx information if keys
+ have been disabled.
+
+ The most important parameters set here is records per key on
+ all indexes. block_size and primar key ref_length.
+
+ For each index there is an array of rec_per_key.
+ As an example if we have an index with three attributes a,b and c
+ we will have an array of 3 rec_per_key.
+ rec_per_key[0] is an estimate of number of records divided by
+ number of unique values of the field a.
+ rec_per_key[1] is an estimate of the number of records divided
+ by the number of unique combinations of the fields a and b.
+ rec_per_key[2] is an estimate of the number of records divided
+ by the number of unique combinations of the fields a,b and c.
+
+ Many handlers only set the value of rec_per_key when all fields
+ are bound (rec_per_key[2] in the example above).
+
+ If the handler doesn't support statistics, it should set all of the
+ above to 0.
+
+ We will allow the first handler to set the rec_per_key and use
+ this as an estimate on the total table.
+
+ max_data_file_length: Maximum data file length
+ We ignore it, is only used in
+ SHOW TABLE STATUS
+ max_index_file_length: Maximum index file length
+ We ignore it since it is never used
+ block_size: Block size used
+ We set it to the value of the first handler
+ sortkey: Never used at any place so ignored
+ ref_length: We set this to the value calculated
+ and stored in local object
+ raid_type: Set by first handler (MyISAM)
+ raid_chunks: Set by first handler (MyISAM)
+ raid_chunksize: Set by first handler (MyISAM)
+ create_time: Creation time of table
+ Set by first handler
+
+ So we calculate these constants by using the variables on the first
+ handler.
+ */
+
+ file= m_file[0];
+ file->info(HA_STATUS_CONST);
+ create_time= file->create_time;
+ raid_type= file->raid_type;
+ raid_chunks= file->raid_chunks;
+ raid_chunksize= file->raid_chunksize;
+ ref_length= m_ref_length;
+ }
+ if (flag & HA_STATUS_ERRKEY)
+ {
+ handler *file= m_file[m_last_part];
+ DBUG_PRINT("info", ("info: HA_STATUS_ERRKEY"));
+ /*
+ This flag is used to get index number of the unique index that
+ reported duplicate key
+ We will report the errkey on the last handler used and ignore the rest
+ */
+ file->info(HA_STATUS_ERRKEY);
+ if (file->errkey != (uint) -1)
+ errkey= file->errkey;
+ }
+ if (flag & HA_STATUS_TIME)
+ {
+ DBUG_PRINT("info", ("info: HA_STATUS_TIME"));
+ /*
+ This flag is used to set the latest update time of the table.
+ Used by SHOW commands
+ We will report the maximum of these times
+ */
+ update_time= 0;
+ file_array= m_file;
+ do
+ {
+ file= *file_array;
+ file->info(HA_STATUS_TIME);
+ if (file->update_time > update_time)
+ update_time= file->update_time;
+ } while (*(++file_array));
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ extra() is called whenever the server wishes to send a hint to
+ the storage engine. The MyISAM engine implements the most hints.
+
+ We divide the parameters into the following categories:
+ 1) Parameters used by most handlers
+ 2) Parameters used by some non-MyISAM handlers
+ 3) Parameters used only by MyISAM
+ 4) Parameters only used by temporary tables for query processing
+ 5) Parameters only used by MyISAM internally
+ 6) Parameters not used at all
+
+ The partition handler need to handle category 1), 2) and 3).
+
+ 1) Parameters used by most handlers
+ -----------------------------------
+ HA_EXTRA_RESET:
+ This option is used by most handlers and it resets the handler state
+ to the same state as after an open call. This includes releasing
+ any READ CACHE or WRITE CACHE or other internal buffer used.
+
+ It is called from the reset method in the handler interface. There are
+ three instances where this is called.
+ 1) After completing a INSERT ... SELECT ... query the handler for the
+ table inserted into is reset
+ 2) It is called from close_thread_table which in turn is called from
+ close_thread_tables except in the case where the tables are locked
+ in which case ha_commit_stmt is called instead.
+ It is only called from here if flush_version hasn't changed and the
+ table is not an old table when calling close_thread_table.
+ close_thread_tables is called from many places as a general clean up
+ function after completing a query.
+ 3) It is called when deleting the QUICK_RANGE_SELECT object if the
+ QUICK_RANGE_SELECT object had its own handler object. It is called
+ immediatley before close of this local handler object.
+ HA_EXTRA_KEYREAD:
+ HA_EXTRA_NO_KEYREAD:
+ These parameters are used to provide an optimisation hint to the handler.
+ If HA_EXTRA_KEYREAD is set it is enough to read the index fields, for
+ many handlers this means that the index-only scans can be used and it
+ is not necessary to use the real records to satisfy this part of the
+ query. Index-only scans is a very important optimisation for disk-based
+ indexes. For main-memory indexes most indexes contain a reference to the
+ record and thus KEYREAD only says that it is enough to read key fields.
+ HA_EXTRA_NO_KEYREAD disables this for the handler, also HA_EXTRA_RESET
+ will disable this option.
+ The handler will set HA_KEYREAD_ONLY in its table flags to indicate this
+ feature is supported.
+ HA_EXTRA_FLUSH:
+ Indication to flush tables to disk, called at close_thread_table to
+ ensure disk based tables are flushed at end of query execution.
+
+ 2) Parameters used by some non-MyISAM handlers
+ ----------------------------------------------
+ HA_EXTRA_RETRIEVE_ALL_COLS:
+ Many handlers have implemented optimisations to avoid fetching all
+ fields when retrieving data. In certain situations all fields need
+ to be retrieved even though the query_id is not set on all field
+ objects.
+
+ It is called from copy_data_between_tables where all fields are
+ copied without setting query_id before calling the handlers.
+ It is called from UPDATE statements when the fields of the index
+ used is updated or ORDER BY is used with UPDATE.
+ And finally when calculating checksum of a table using the CHECKSUM
+ command.
+ HA_EXTRA_RETRIEVE_PRIMARY_KEY:
+ In some situations it is mandatory to retrieve primary key fields
+ independent of the query id's. This extra flag specifies that fetch
+ of primary key fields is mandatory.
+ HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
+ This is a strictly InnoDB feature that is more or less undocumented.
+ When it is activated InnoDB copies field by field from its fetch
+ cache instead of all fields in one memcpy. Have no idea what the
+ purpose of this is.
+ Cut from include/my_base.h:
+ When using HA_EXTRA_KEYREAD, overwrite only key member fields and keep
+ other fields intact. When this is off (by default) InnoDB will use memcpy
+ to overwrite entire row.
+ HA_EXTRA_IGNORE_DUP_KEY:
+ HA_EXTRA_NO_IGNORE_DUP_KEY:
+ Informs the handler to we will not stop the transaction if we get an
+ duplicate key errors during insert/upate.
+ Always called in pair, triggered by INSERT IGNORE and other similar
+ SQL constructs.
+ Not used by MyISAM.
+
+ 3) Parameters used only by MyISAM
+ ---------------------------------
+ HA_EXTRA_NORMAL:
+ Only used in MyISAM to reset quick mode, not implemented by any other
+ handler. Quick mode is also reset in MyISAM by HA_EXTRA_RESET.
+
+ It is called after completing a successful DELETE query if the QUICK
+ option is set.
+
+ HA_EXTRA_QUICK:
+ When the user does DELETE QUICK FROM table where-clause; this extra
+ option is called before the delete query is performed and
+ HA_EXTRA_NORMAL is called after the delete query is completed.
+ Temporary tables used internally in MySQL always set this option
+
+ The meaning of quick mode is that when deleting in a B-tree no merging
+ of leafs is performed. This is a common method and many large DBMS's
+ actually only support this quick mode since it is very difficult to
+ merge leaves in a tree used by many threads concurrently.
+
+ HA_EXTRA_CACHE:
+ This flag is usually set with extra_opt along with a cache size.
+ The size of this buffer is set by the user variable
+ record_buffer_size. The value of this cache size is the amount of
+ data read from disk in each fetch when performing a table scan.
+ This means that before scanning a table it is normal to call
+ extra with HA_EXTRA_CACHE and when the scan is completed to call
+ HA_EXTRA_NO_CACHE to release the cache memory.
+
+ Some special care is taken when using this extra parameter since there
+ could be a write ongoing on the table in the same statement. In this
+ one has to take special care since there might be a WRITE CACHE as
+ well. HA_EXTRA_CACHE specifies using a READ CACHE and using
+ READ CACHE and WRITE CACHE at the same time is not possible.
+
+ Only MyISAM currently use this option.
+
+ It is set when doing full table scans using rr_sequential and
+ reset when completing such a scan with end_read_record
+ (resetting means calling extra with HA_EXTRA_NO_CACHE).
+
+ It is set in filesort.cc for MyISAM internal tables and it is set in
+ a multi-update where HA_EXTRA_CACHE is called on a temporary result
+ table and after that ha_rnd_init(0) on table to be updated
+ and immediately after that HA_EXTRA_NO_CACHE on table to be updated.
+
+ Apart from that it is always used from init_read_record but not when
+ used from UPDATE statements. It is not used from DELETE statements
+ with ORDER BY and LIMIT but it is used in normal scan loop in DELETE
+ statements. The reason here is that DELETE's in MyISAM doesn't move
+ existings data rows.
+
+ It is also set in copy_data_between_tables when scanning the old table
+ to copy over to the new table.
+ And it is set in join_init_read_record where quick objects are used
+ to perform a scan on the table. In this case the full table scan can
+ even be performed multiple times as part of the nested loop join.
+
+ For purposes of the partition handler it is obviously necessary to have
+ special treatment of this extra call. If we would simply pass this
+ extra call down to each handler we would allocate
+ cache size * no of partitions amount of memory and this is not
+ necessary since we will only scan one partition at a time when doing
+ full table scans.
+
+ Thus we treat it by first checking whether we have MyISAM handlers in
+ the table, if not we simply ignore the call and if we have we will
+ record the call but will not call any underlying handler yet. Then
+ when performing the sequential scan we will check this recorded value
+ and call extra_opt whenever we start scanning a new partition.
+
+ monty: Neads to be fixed so that it's passed to all handlers when we
+ move to another partition during table scan.
+
+ HA_EXTRA_NO_CACHE:
+ When performing a UNION SELECT HA_EXTRA_NO_CACHE is called from the
+ flush method in the select_union class.
+ It is used to some extent when insert delayed inserts.
+ See HA_EXTRA_RESET_STATE for use in conjunction with delete_all_rows().
+
+ It should be ok to call HA_EXTRA_NO_CACHE on all underlying handlers
+ if they are MyISAM handlers. Other handlers we can ignore the call
+ for. If no cache is in use they will quickly return after finding
+ this out. And we also ensure that all caches are disabled and no one
+ is left by mistake.
+ In the future this call will probably be deleted an we will instead call
+ ::reset();
+
+ HA_EXTRA_WRITE_CACHE:
+ See above, called from various places. It is mostly used when we
+ do INSERT ... SELECT
+ No special handling to save cache space is developed currently.
+
+ HA_EXTRA_PREPARE_FOR_UPDATE:
+ This is called as part of a multi-table update. When the table to be
+ updated is also scanned then this informs MyISAM handler to drop any
+ caches if dynamic records are used (fixed size records do not care
+ about this call). We pass this along to all underlying MyISAM handlers
+ and ignore it for the rest.
+
+ HA_EXTRA_PREPARE_FOR_DELETE:
+ Only used by MyISAM, called in preparation for a DROP TABLE.
+ It's used mostly by Windows that cannot handle dropping an open file.
+ On other platforms it has the same effect as HA_EXTRA_FORCE_REOPEN.
+
+ HA_EXTRA_READCHECK:
+ HA_EXTRA_NO_READCHECK:
+ Only one call to HA_EXTRA_NO_READCHECK from ha_open where it says that
+ this is not needed in SQL. The reason for this call is that MyISAM sets
+ the READ_CHECK_USED in the open call so the call is needed for MyISAM
+ to reset this feature.
+ The idea with this parameter was to inform of doing/not doing a read
+ check before applying an update. Since SQL always performs a read before
+ applying the update No Read Check is needed in MyISAM as well.
+
+ This is a cut from Docs/myisam.txt
+ Sometimes you might want to force an update without checking whether
+ another user has changed the record since you last read it. This is
+ somewhat dangerous, so it should ideally not be used. That can be
+ accomplished by wrapping the mi_update() call in two calls to mi_extra(),
+ using these functions:
+ HA_EXTRA_NO_READCHECK=5 No readcheck on update
+ HA_EXTRA_READCHECK=6 Use readcheck (def)
+
+ HA_EXTRA_FORCE_REOPEN:
+ Only used by MyISAM, called when altering table, closing tables to
+ enforce a reopen of the table files.
+
+ 4) Parameters only used by temporary tables for query processing
+ ----------------------------------------------------------------
+ HA_EXTRA_RESET_STATE:
+ Same as HA_EXTRA_RESET except that buffers are not released. If there is
+ a READ CACHE it is reinit'ed. A cache is reinit'ed to restart reading
+ or to change type of cache between READ CACHE and WRITE CACHE.
+
+ This extra function is always called immediately before calling
+ delete_all_rows on the handler for temporary tables.
+ There are cases however when HA_EXTRA_RESET_STATE isn't called in
+ a similar case for a temporary table in sql_union.cc and in two other
+ cases HA_EXTRA_NO_CACHE is called before and HA_EXTRA_WRITE_CACHE
+ called afterwards.
+ The case with HA_EXTRA_NO_CACHE and HA_EXTRA_WRITE_CACHE means
+ disable caching, delete all rows and enable WRITE CACHE. This is
+ used for temporary tables containing distinct sums and a
+ functional group.
+
+ The only case that delete_all_rows is called on non-temporary tables
+ is in sql_delete.cc when DELETE FROM table; is called by a user.
+ In this case no special extra calls are performed before or after this
+ call.
+
+ The partition handler should not need to bother about this one. It
+ should never be called.
+
+ HA_EXTRA_NO_ROWS:
+ Don't insert rows indication to HEAP and MyISAM, only used by temporary
+ tables used in query processing.
+ Not handled by partition handler.
+
+ 5) Parameters only used by MyISAM internally
+ --------------------------------------------
+ HA_EXTRA_REINIT_CACHE:
+ This call reinitialises the READ CACHE described above if there is one
+ and otherwise the call is ignored.
+
+ We can thus safely call it on all underlying handlers if they are
+ MyISAM handlers. It is however never called so we don't handle it at all.
+ HA_EXTRA_FLUSH_CACHE:
+ Flush WRITE CACHE in MyISAM. It is only from one place in the code.
+ This is in sql_insert.cc where it is called if the table_flags doesn't
+ contain HA_DUPP_POS. The only handler having the HA_DUPP_POS set is the
+ MyISAM handler and so the only handler not receiving this call is MyISAM.
+ Thus in effect this call is called but never used. Could be removed
+ from sql_insert.cc
+ HA_EXTRA_NO_USER_CHANGE:
+ Only used by MyISAM, never called.
+ Simulates lock_type as locked.
+ HA_EXTRA_WAIT_LOCK:
+ HA_EXTRA_WAIT_NOLOCK:
+ Only used by MyISAM, called from MyISAM handler but never from server
+ code on top of the handler.
+ Sets lock_wait on/off
+ HA_EXTRA_NO_KEYS:
+ Only used MyISAM, only used internally in MyISAM handler, never called
+ from server level.
+ HA_EXTRA_KEYREAD_CHANGE_POS:
+ HA_EXTRA_REMEMBER_POS:
+ HA_EXTRA_RESTORE_POS:
+ HA_EXTRA_PRELOAD_BUFFER_SIZE:
+ HA_EXTRA_CHANGE_KEY_TO_DUP:
+ HA_EXTRA_CHANGE_KEY_TO_UNIQUE:
+ Only used by MyISAM, never called.
+
+ 6) Parameters not used at all
+ -----------------------------
+ HA_EXTRA_KEY_CACHE:
+ HA_EXTRA_NO_KEY_CACHE:
+ This parameters are no longer used and could be removed.
+*/
+
+int ha_partition::extra(enum ha_extra_function operation)
+{
+ DBUG_ENTER("ha_partition:extra");
+ DBUG_PRINT("info", ("operation: %d", (int) operation));
+
+ switch (operation) {
+ /* Category 1), used by most handlers */
+ case HA_EXTRA_KEYREAD:
+ case HA_EXTRA_NO_KEYREAD:
+ case HA_EXTRA_FLUSH:
+ DBUG_RETURN(loop_extra(operation));
+
+ /* Category 2), used by non-MyISAM handlers */
+ case HA_EXTRA_IGNORE_DUP_KEY:
+ case HA_EXTRA_NO_IGNORE_DUP_KEY:
+ case HA_EXTRA_RETRIEVE_ALL_COLS:
+ case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
+ case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
+ {
+ if (!m_myisam)
+ DBUG_RETURN(loop_extra(operation));
+ break;
+ }
+
+ /* Category 3), used by MyISAM handlers */
+ case HA_EXTRA_NORMAL:
+ case HA_EXTRA_QUICK:
+ case HA_EXTRA_NO_READCHECK:
+ case HA_EXTRA_PREPARE_FOR_UPDATE:
+ case HA_EXTRA_PREPARE_FOR_DELETE:
+ case HA_EXTRA_FORCE_REOPEN:
+ {
+ if (m_myisam)
+ DBUG_RETURN(loop_extra(operation));
+ break;
+ }
+ case HA_EXTRA_CACHE:
+ {
+ prepare_extra_cache(0);
+ break;
+ }
+ case HA_EXTRA_NO_CACHE:
+ {
+ m_extra_cache= FALSE;
+ m_extra_cache_size= 0;
+ DBUG_RETURN(loop_extra(operation));
+ }
+ default:
+ {
+ /* Temporary crash to discover what is wrong */
+ DBUG_ASSERT(0);
+ break;
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
+/*
+ This will in the future be called instead of extra(HA_EXTRA_RESET) as this
+ is such a common call
+*/
+
+int ha_partition::reset(void)
+{
+ int result= 0, tmp;
+ handler **file;
+ DBUG_ENTER("ha_partition::reset");
+ file= m_file;
+ do
+ {
+ if ((tmp= (*file)->reset()))
+ result= tmp;
+ } while (*(++file));
+ DBUG_RETURN(result);
+}
+
+
+int ha_partition::extra_opt(enum ha_extra_function operation, ulong cachesize)
+{
+ DBUG_ENTER("ha_partition::extra_opt()");
+ DBUG_ASSERT(HA_EXTRA_CACHE == operation);
+ prepare_extra_cache(cachesize);
+ DBUG_RETURN(0);
+}
+
+
+void ha_partition::prepare_extra_cache(uint cachesize)
+{
+ DBUG_ENTER("ha_partition::prepare_extra_cache()");
+
+ m_extra_cache= TRUE;
+ m_extra_cache_size= cachesize;
+ if (m_part_spec.start_part != NO_CURRENT_PART_ID)
+ {
+ DBUG_ASSERT(m_part_spec.start_part == 0);
+ late_extra_cache(0);
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+int ha_partition::loop_extra(enum ha_extra_function operation)
+{
+ int result= 0, tmp;
+ handler **file;
+ DBUG_ENTER("ha_partition::loop_extra()");
+ for (file= m_file; *file; file++)
+ {
+ if ((tmp= (*file)->extra(operation)))
+ result= tmp;
+ }
+ DBUG_RETURN(result);
+}
+
+
+void ha_partition::late_extra_cache(uint partition_id)
+{
+ handler *file;
+ DBUG_ENTER("ha_partition::late_extra_cache");
+ if (!m_extra_cache)
+ DBUG_VOID_RETURN;
+ file= m_file[partition_id];
+ if (m_extra_cache_size == 0)
+ VOID(file->extra(HA_EXTRA_CACHE));
+ else
+ VOID(file->extra_opt(HA_EXTRA_CACHE, m_extra_cache_size));
+ DBUG_VOID_RETURN;
+}
+
+
+void ha_partition::late_extra_no_cache(uint partition_id)
+{
+ handler *file;
+ DBUG_ENTER("ha_partition::late_extra_no_cache");
+ if (!m_extra_cache)
+ DBUG_VOID_RETURN;
+ file= m_file[partition_id];
+ VOID(file->extra(HA_EXTRA_NO_CACHE));
+ DBUG_VOID_RETURN;
+}
+
+
+/****************************************************************************
+ MODULE optimiser support
+****************************************************************************/
+
+const key_map *ha_partition::keys_to_use_for_scanning()
+{
+ DBUG_ENTER("ha_partition::keys_to_use_for_scanning");
+ DBUG_RETURN(m_file[0]->keys_to_use_for_scanning());
+}
+
+double ha_partition::scan_time()
+{
+ double scan_time= 0;
+ handler **file;
+ DBUG_ENTER("ha_partition::scan_time");
+
+ for (file= m_file; *file; file++)
+ scan_time+= (*file)->scan_time();
+ DBUG_RETURN(scan_time);
+}
+
+
+/*
+ This will be optimised later to include whether or not the index can
+ be used with partitioning. To achieve we need to add another parameter
+ that specifies how many of the index fields that are bound in the ranges.
+ Possibly added as a new call to handlers.
+*/
+
+double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
+{
+ DBUG_ENTER("ha_partition::read_time");
+ DBUG_RETURN(m_file[0]->read_time(index, ranges, rows));
+}
+
+/*
+ Given a starting key, and an ending key estimate the number of rows that
+ will exist between the two. end_key may be empty which in case determine
+ if start_key matches any rows.
+
+ Called from opt_range.cc by check_quick_keys().
+
+ monty: MUST be called for each range and added.
+ Note that MySQL will assume that if this returns 0 there is no
+ matching rows for the range!
+*/
+
+ha_rows ha_partition::records_in_range(uint inx, key_range *min_key,
+ key_range *max_key)
+{
+ ha_rows in_range= 0;
+ handler **file;
+ DBUG_ENTER("ha_partition::records_in_range");
+
+ file= m_file;
+ do
+ {
+ in_range+= (*file)->records_in_range(inx, min_key, max_key);
+ } while (*(++file));
+ DBUG_RETURN(in_range);
+}
+
+
+ha_rows ha_partition::estimate_rows_upper_bound()
+{
+ ha_rows rows, tot_rows= 0;
+ handler **file;
+ DBUG_ENTER("ha_partition::estimate_rows_upper_bound");
+
+ file= m_file;
+ do
+ {
+ rows= (*file)->estimate_rows_upper_bound();
+ if (rows == HA_POS_ERROR)
+ DBUG_RETURN(HA_POS_ERROR);
+ tot_rows+= rows;
+ } while (*(++file));
+ DBUG_RETURN(tot_rows);
+}
+
+
+uint8 ha_partition::table_cache_type()
+{
+ DBUG_ENTER("ha_partition::table_cache_type");
+ DBUG_RETURN(m_file[0]->table_cache_type());
+}
+
+
+/****************************************************************************
+ MODULE print messages
+****************************************************************************/
+
+const char *ha_partition::index_type(uint inx)
+{
+ DBUG_ENTER("ha_partition::index_type");
+ DBUG_RETURN(m_file[0]->index_type(inx));
+}
+
+
+void ha_partition::print_error(int error, myf errflag)
+{
+ DBUG_ENTER("ha_partition::print_error");
+ /* Should probably look for my own errors first */
+ /* monty: needs to be called for the last used partition ! */
+ m_file[0]->print_error(error, errflag);
+ DBUG_VOID_RETURN;
+}
+
+
+bool ha_partition::get_error_message(int error, String *buf)
+{
+ DBUG_ENTER("ha_partition::get_error_message");
+ /* Should probably look for my own errors first */
+ /* monty: needs to be called for the last used partition ! */
+ DBUG_RETURN(m_file[0]->get_error_message(error, buf));
+}
+
+
+/****************************************************************************
+ MODULE handler characteristics
+****************************************************************************/
+/*
+ If frm_error() is called then we will use this to to find out what file
+ extensions exist for the storage engine. This is also used by the default
+ rename_table and delete_table method in handler.cc.
+*/
+
+static const char *ha_partition_ext[]=
+{
+ ha_par_ext, NullS
+};
+
+const char **ha_partition::bas_ext() const
+{ return ha_partition_ext; }
+
+
+uint ha_partition::min_of_the_max_uint(uint (handler::*operator_func)(void) const) const
+{
+ handler **file;
+ uint min_of_the_max= ((*m_file)->*operator_func)();
+
+ for (file= m_file+1; *file; file++)
+ {
+ uint tmp= ((*file)->*operator_func)();
+ set_if_smaller(min_of_the_max, tmp);
+ }
+ return min_of_the_max;
+}
+
+
+uint ha_partition::max_supported_key_parts() const
+{
+ return min_of_the_max_uint(&handler::max_supported_key_parts);
+}
+
+
+uint ha_partition::max_supported_key_length() const
+{
+ return min_of_the_max_uint(&handler::max_supported_key_length);
+}
+
+
+uint ha_partition::max_supported_key_part_length() const
+{
+ return min_of_the_max_uint(&handler::max_supported_key_part_length);
+}
+
+
+uint ha_partition::max_supported_record_length() const
+{
+ return min_of_the_max_uint(&handler::max_supported_record_length);
+}
+
+
+uint ha_partition::max_supported_keys() const
+{
+ return min_of_the_max_uint(&handler::max_supported_keys);
+}
+
+
+uint ha_partition::extra_rec_buf_length() const
+{
+ handler **file;
+ uint max= (*m_file)->extra_rec_buf_length();
+ for (file= m_file, file++; *file; file++)
+ if (max < (*file)->extra_rec_buf_length())
+ max= (*file)->extra_rec_buf_length();
+ return max;
+}
+
+
+uint ha_partition::min_record_length(uint options) const
+{
+ handler **file;
+ uint max= (*m_file)->min_record_length(options);
+ for (file= m_file, file++; *file; file++)
+ if (max < (*file)->min_record_length(options))
+ max= (*file)->min_record_length(options);
+ return max;
+}
+
+
+/****************************************************************************
+ MODULE compare records
+****************************************************************************/
+/*
+ We get two references and need to check if those records are the same.
+ If they belong to different partitions we decide that they are not
+ the same record. Otherwise we use the particular handler to decide if
+ they are the same. Sort in partition id order if not equal.
+*/
+
+int ha_partition::cmp_ref(const byte *ref1, const byte *ref2)
+{
+ uint part_id;
+ my_ptrdiff_t diff1, diff2;
+ handler *file;
+ DBUG_ENTER("ha_partition::cmp_ref");
+ if ((ref1[0] == ref2[0]) && (ref1[1] == ref2[1]))
+ {
+ part_id= get_part_id_from_pos(ref1);
+ file= m_file[part_id];
+ DBUG_ASSERT(part_id < m_tot_parts);
+ DBUG_RETURN(file->cmp_ref((ref1 + PARTITION_BYTES_IN_POS),
+ (ref2 + PARTITION_BYTES_IN_POS)));
+ }
+ diff1= ref2[1] - ref1[1];
+ diff2= ref2[0] - ref1[0];
+ if (diff1 > 0)
+ {
+ DBUG_RETURN(-1);
+ }
+ if (diff1 < 0)
+ {
+ DBUG_RETURN(+1);
+ }
+ if (diff2 > 0)
+ {
+ DBUG_RETURN(-1);
+ }
+ DBUG_RETURN(+1);
+}
+
+
+/****************************************************************************
+ MODULE auto increment
+****************************************************************************/
+
+void ha_partition::restore_auto_increment()
+{
+ DBUG_ENTER("ha_partition::restore_auto_increment");
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ This method is called by update_auto_increment which in turn is called
+ by the individual handlers as part of write_row. We will always let
+ the first handler keep track of the auto increment value for all
+ partitions.
+*/
+
+ulonglong ha_partition::get_auto_increment()
+{
+ DBUG_ENTER("ha_partition::get_auto_increment");
+ DBUG_RETURN(m_file[0]->get_auto_increment());
+}
+
+
+/****************************************************************************
+ MODULE initialise handler for HANDLER call
+****************************************************************************/
+
+void ha_partition::init_table_handle_for_HANDLER()
+{
+ return;
+}
+
+
+/****************************************************************************
+ MODULE Partition Share
+****************************************************************************/
+/*
+ Service routines for ... methods.
+-------------------------------------------------------------------------
+ Variables for partition share methods. A hash used to track open tables.
+ A mutex for the hash table and an init variable to check if hash table
+ is initialised.
+ There is also a constant ending of the partition handler file name.
+*/
+
+#ifdef NOT_USED
+static HASH partition_open_tables;
+static pthread_mutex_t partition_mutex;
+static int partition_init= 0;
+
+
+/*
+ Function we use in the creation of our hash to get key.
+*/
+static byte *partition_get_key(PARTITION_SHARE *share, uint *length,
+ my_bool not_used __attribute__ ((unused)))
+{
+ *length= share->table_name_length;
+ return (byte *) share->table_name;
+}
+
+/*
+ Example of simple lock controls. The "share" it creates is structure we
+ will pass to each partition handler. Do you have to have one of these?
+ Well, you have pieces that are used for locking, and they are needed to
+ function.
+*/
+
+
+static PARTITION_SHARE *get_share(const char *table_name, TABLE *table)
+{
+ PARTITION_SHARE *share;
+ uint length;
+ char *tmp_name;
+
+ /*
+ So why does this exist? There is no way currently to init a storage
+ engine.
+ Innodb and BDB both have modifications to the server to allow them to
+ do this. Since you will not want to do this, this is probably the next
+ best method.
+ */
+ if (!partition_init)
+ {
+ /* Hijack a mutex for init'ing the storage engine */
+ pthread_mutex_lock(&LOCK_mysql_create_db);
+ if (!partition_init)
+ {
+ partition_init++;
+ VOID(pthread_mutex_init(&partition_mutex, MY_MUTEX_INIT_FAST));
+ (void) hash_init(&partition_open_tables, system_charset_info, 32, 0, 0,
+ (hash_get_key) partition_get_key, 0, 0);
+ }
+ pthread_mutex_unlock(&LOCK_mysql_create_db);
+ }
+ pthread_mutex_lock(&partition_mutex);
+ length= (uint) strlen(table_name);
+
+ if (!(share= (PARTITION_SHARE *) hash_search(&partition_open_tables,
+ (byte *) table_name, length)))
+ {
+ if (!(share= (PARTITION_SHARE *)
+ my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
+ &share, sizeof(*share),
+ &tmp_name, length + 1, NullS)))
+ {
+ pthread_mutex_unlock(&partition_mutex);
+ return NULL;
+ }
+
+ share->use_count= 0;
+ share->table_name_length= length;
+ share->table_name= tmp_name;
+ strmov(share->table_name, table_name);
+ if (my_hash_insert(&partition_open_tables, (byte *) share))
+ goto error;
+ thr_lock_init(&share->lock);
+ pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST);
+ }
+ share->use_count++;
+ pthread_mutex_unlock(&partition_mutex);
+
+ return share;
+
+error:
+ pthread_mutex_unlock(&partition_mutex);
+ my_free((gptr) share, MYF(0));
+
+ return NULL;
+}
+
+
+/*
+ Free lock controls. We call this whenever we close a table. If the table
+ had the last reference to the share then we free memory associated with
+ it.
+*/
+
+static int free_share(PARTITION_SHARE *share)
+{
+ pthread_mutex_lock(&partition_mutex);
+ if (!--share->use_count)
+ {
+ hash_delete(&partition_open_tables, (byte *) share);
+ thr_lock_delete(&share->lock);
+ pthread_mutex_destroy(&share->mutex);
+ my_free((gptr) share, MYF(0));
+ }
+ pthread_mutex_unlock(&partition_mutex);
+
+ return 0;
+}
+#endif /* NOT_USED */
+#endif /* HAVE_PARTITION_DB */
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
new file mode 100644
index 00000000000..e78cff4cdbb
--- /dev/null
+++ b/sql/ha_partition.h
@@ -0,0 +1,916 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef __GNUC__
+#pragma interface /* gcc class implementation */
+#endif
+
+/*
+ PARTITION_SHARE is a structure that will be shared amoung all open handlers
+ The partition implements the minimum of what you will probably need.
+*/
+
+typedef struct st_partition_share
+{
+ char *table_name;
+ uint table_name_length, use_count;
+ pthread_mutex_t mutex;
+ THR_LOCK lock;
+} PARTITION_SHARE;
+
+
+#define PARTITION_BYTES_IN_POS 2
+class ha_partition :public handler
+{
+private:
+ enum partition_index_scan_type
+ {
+ partition_index_read= 0,
+ partition_index_first= 1,
+ partition_index_last= 2,
+ partition_no_index_scan= 3
+ };
+ /* Data for the partition handler */
+ char *m_file_buffer; // Buffer with names
+ char *m_name_buffer_ptr; // Pointer to first partition name
+ uchar *m_engine_array; // Array of types of the handlers
+ handler **m_file; // Array of references to handler inst.
+ partition_info *m_part_info; // local reference to partition
+ byte *m_start_key_ref; // Reference of start key in current
+ // index scan info
+ Field **m_part_field_array; // Part field array locally to save acc
+ byte *m_ordered_rec_buffer; // Row and key buffer for ord. idx scan
+ KEY *m_curr_key_info; // Current index
+ byte *m_rec0; // table->record[0]
+ QUEUE queue; // Prio queue used by sorted read
+ /*
+ Since the partition handler is a handler on top of other handlers, it
+ is necessary to keep information about what the underlying handler
+ characteristics is. It is not possible to keep any handler instances
+ for this since the MySQL Server sometimes allocating the handler object
+ without freeing them.
+ */
+ u_long m_table_flags;
+ u_long m_low_byte_first;
+
+ uint m_tot_parts; // Total number of partitions;
+ uint m_last_part; // Last file that we update,write
+ int m_lock_type; // Remembers type of last
+ // external_lock
+ part_id_range m_part_spec; // Which parts to scan
+ uint m_scan_value; // Value passed in rnd_init
+ // call
+ uint m_ref_length; // Length of position in this
+ // handler object
+ key_range m_start_key; // index read key range
+ enum partition_index_scan_type m_index_scan_type;// What type of index
+ // scan
+ uint m_top_entry; // Which partition is to
+ // deliver next result
+ uint m_rec_length; // Local copy of record length
+
+ bool m_ordered; // Ordered/Unordered index scan
+ bool m_has_transactions; // Can we support transactions
+ bool m_pkey_is_clustered; // Is primary key clustered
+ bool m_create_handler; // Handler used to create table
+ bool m_is_sub_partitioned; // Is subpartitioned
+ bool m_ordered_scan_ongoing;
+ bool m_use_bit_array;
+
+ /*
+ We keep track if all underlying handlers are MyISAM since MyISAM has a
+ great number of extra flags not needed by other handlers.
+ */
+ bool m_myisam; // Are all underlying handlers
+ // MyISAM
+ /*
+ We keep track of InnoDB handlers below since it requires proper setting
+ of query_id in fields at index_init and index_read calls.
+ */
+ bool m_innodb; // Are all underlying handlers
+ // InnoDB
+ /*
+ When calling extra(HA_EXTRA_CACHE) we do not pass this to the underlying
+ handlers immediately. Instead we cache it and call the underlying
+ immediately before starting the scan on the partition. This is to
+ prevent allocating a READ CACHE for each partition in parallel when
+ performing a full table scan on MyISAM partitioned table.
+ This state is cleared by extra(HA_EXTRA_NO_CACHE).
+ */
+ bool m_extra_cache;
+ uint m_extra_cache_size;
+
+ void init_handler_variables();
+ /*
+ Variables for lock structures.
+ */
+ THR_LOCK_DATA lock; /* MySQL lock */
+ PARTITION_SHARE *share; /* Shared lock info */
+
+public:
+ /*
+ -------------------------------------------------------------------------
+ MODULE create/delete handler object
+ -------------------------------------------------------------------------
+ Object create/delete methode. The normal called when a table object
+ exists. There is also a method to create the handler object with only
+ partition information. This is used from mysql_create_table when the
+ table is to be created and the engine type is deduced to be the
+ partition handler.
+ -------------------------------------------------------------------------
+ */
+ ha_partition(TABLE * table);
+ ha_partition(partition_info * part_info);
+ ~ha_partition();
+ /*
+ A partition handler has no characteristics in itself. It only inherits
+ those from the underlying handlers. Here we set-up those constants to
+ enable later calls of the methods to retrieve constants from the under-
+ lying handlers. Returns false if not successful.
+ */
+ int ha_initialise();
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE meta data changes
+ -------------------------------------------------------------------------
+ Meta data routines to CREATE, DROP, RENAME table and often used at
+ ALTER TABLE (update_create_info used from ALTER TABLE and SHOW ..).
+
+ update_table_comment is used in SHOW TABLE commands to provide a
+ chance for the handler to add any interesting comments to the table
+ comments not provided by the users comment.
+
+ create_handler_files is called before opening a new handler object
+ with openfrm to call create. It is used to create any local handler
+ object needed in opening the object in openfrm
+ -------------------------------------------------------------------------
+ */
+ virtual int delete_table(const char *from);
+ virtual int rename_table(const char *from, const char *to);
+ virtual int create(const char *name, TABLE * form,
+ HA_CREATE_INFO * create_info);
+ virtual int create_handler_files(const char *name);
+ virtual void update_create_info(HA_CREATE_INFO * create_info);
+ virtual char *update_table_comment(const char *comment);
+private:
+ /*
+ delete_table, rename_table and create uses very similar logic which
+ is packed into this routine.
+ */
+ uint del_ren_cre_table(const char *from,
+ const char *to= NULL,
+ TABLE * table_arg= NULL,
+ HA_CREATE_INFO * create_info= NULL);
+ /*
+ One method to create the table_name.par file containing the names of the
+ underlying partitions, their engine and the number of partitions.
+ And one method to read it in.
+ */
+ bool create_handler_file(const char *name);
+ bool get_from_handler_file(const char *name);
+ bool new_handlers_from_part_info();
+ bool create_handlers();
+ void clear_handler_file();
+ void set_up_table_before_create(TABLE * table_arg, HA_CREATE_INFO * info,
+ uint part_id);
+ partition_element *find_partition_element(uint part_id);
+public:
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE open/close object
+ -------------------------------------------------------------------------
+ Open and close handler object to ensure all underlying files and
+ objects allocated and deallocated for query handling is handled
+ properly.
+ -------------------------------------------------------------------------
+
+ A handler object is opened as part of its initialisation and before
+ being used for normal queries (not before meta-data changes always.
+ If the object was opened it will also be closed before being deleted.
+ */
+ virtual int open(const char *name, int mode, uint test_if_locked);
+ virtual int close(void);
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE start/end statement
+ -------------------------------------------------------------------------
+ This module contains methods that are used to understand start/end of
+ statements, transaction boundaries, and aid for proper concurrency
+ control.
+ The partition handler need not implement abort and commit since this
+ will be handled by any underlying handlers implementing transactions.
+ There is only one call to each handler type involved per transaction
+ and these go directly to the handlers supporting transactions
+ currently InnoDB, BDB and NDB).
+ -------------------------------------------------------------------------
+ */
+ virtual THR_LOCK_DATA **store_lock(THD * thd, THR_LOCK_DATA ** to,
+ enum thr_lock_type lock_type);
+ virtual int external_lock(THD * thd, int lock_type);
+ /*
+ When table is locked a statement is started by calling start_stmt
+ instead of external_lock
+ */
+ virtual int start_stmt(THD * thd);
+ /*
+ Lock count is number of locked underlying handlers (I assume)
+ */
+ virtual uint lock_count(void) const;
+ /*
+ Call to unlock rows not to be updated in transaction
+ */
+ virtual void unlock_row();
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE change record
+ -------------------------------------------------------------------------
+ This part of the handler interface is used to change the records
+ after INSERT, DELETE, UPDATE, REPLACE method calls but also other
+ special meta-data operations as ALTER TABLE, LOAD DATA, TRUNCATE.
+ -------------------------------------------------------------------------
+
+ These methods are used for insert (write_row), update (update_row)
+ and delete (delete_row). All methods to change data always work on
+ one row at a time. update_row and delete_row also contains the old
+ row.
+ delete_all_rows will delete all rows in the table in one call as a
+ special optimisation for DELETE from table;
+
+ Bulk inserts are supported if all underlying handlers support it.
+ start_bulk_insert and end_bulk_insert is called before and after a
+ number of calls to write_row.
+ Not yet though.
+ */
+ virtual int write_row(byte * buf);
+ virtual int update_row(const byte * old_data, byte * new_data);
+ virtual int delete_row(const byte * buf);
+ virtual int delete_all_rows(void);
+ virtual void start_bulk_insert(ha_rows rows);
+ virtual int end_bulk_insert();
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE full table scan
+ -------------------------------------------------------------------------
+ This module is used for the most basic access method for any table
+ handler. This is to fetch all data through a full table scan. No
+ indexes are needed to implement this part.
+ It contains one method to start the scan (rnd_init) that can also be
+ called multiple times (typical in a nested loop join). Then proceeding
+ to the next record (rnd_next) and closing the scan (rnd_end).
+ To remember a record for later access there is a method (position)
+ and there is a method used to retrieve the record based on the stored
+ position.
+ The position can be a file position, a primary key, a ROWID dependent
+ on the handler below.
+ -------------------------------------------------------------------------
+ */
+ /*
+ unlike index_init(), rnd_init() can be called two times
+ without rnd_end() in between (it only makes sense if scan=1).
+ then the second call should prepare for the new table scan
+ (e.g if rnd_init allocates the cursor, second call should
+ position it to the start of the table, no need to deallocate
+ and allocate it again
+ */
+ virtual int rnd_init(bool scan);
+ virtual int rnd_end();
+ virtual int rnd_next(byte * buf);
+ virtual int rnd_pos(byte * buf, byte * pos);
+ virtual void position(const byte * record);
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE index scan
+ -------------------------------------------------------------------------
+ This part of the handler interface is used to perform access through
+ indexes. The interface is defined as a scan interface but the handler
+ can also use key lookup if the index is a unique index or a primary
+ key index.
+ Index scans are mostly useful for SELECT queries but are an important
+ part also of UPDATE, DELETE, REPLACE and CREATE TABLE table AS SELECT
+ and so forth.
+ Naturally an index is needed for an index scan and indexes can either
+ be ordered, hash based. Some ordered indexes can return data in order
+ but not necessarily all of them.
+ There are many flags that define the behavior of indexes in the
+ various handlers. These methods are found in the optimizer module.
+ -------------------------------------------------------------------------
+
+ index_read is called to start a scan of an index. The find_flag defines
+ the semantics of the scan. These flags are defined in
+ include/my_base.h
+ index_read_idx is the same but also initializes index before calling doing
+ the same thing as index_read. Thus it is similar to index_init followed
+ by index_read. This is also how we implement it.
+
+ index_read/index_read_idx does also return the first row. Thus for
+ key lookups, the index_read will be the only call to the handler in
+ the index scan.
+
+ index_init initializes an index before using it and index_end does
+ any end processing needed.
+ */
+ virtual int index_read(byte * buf, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ virtual int index_read_idx(byte * buf, uint idx, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ virtual int index_init(uint idx, bool sorted);
+ virtual int index_end();
+
+ /*
+ These methods are used to jump to next or previous entry in the index
+ scan. There are also methods to jump to first and last entry.
+ */
+ virtual int index_next(byte * buf);
+ virtual int index_prev(byte * buf);
+ virtual int index_first(byte * buf);
+ virtual int index_last(byte * buf);
+ virtual int index_next_same(byte * buf, const byte * key, uint keylen);
+ virtual int index_read_last(byte * buf, const byte * key, uint keylen);
+
+ /*
+ read_first_row is virtual method but is only implemented by
+ handler.cc, no storage engine has implemented it so neither
+ will the partition handler.
+
+ virtual int read_first_row(byte *buf, uint primary_key);
+ */
+
+ /*
+ We don't implement multi read range yet, will do later.
+ virtual int read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
+ KEY_MULTI_RANGE *ranges, uint range_count,
+ bool sorted, HANDLER_BUFFER *buffer);
+ virtual int read_multi_range_next(KEY_MULTI_RANGE **found_range_p);
+ */
+
+
+ virtual int read_range_first(const key_range * start_key,
+ const key_range * end_key,
+ bool eq_range, bool sorted);
+ virtual int read_range_next();
+
+private:
+ int common_index_read(byte * buf, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int common_first_last(byte * buf);
+ int partition_scan_set_up(byte * buf, bool idx_read_flag);
+ int handle_unordered_next(byte * buf, bool next_same);
+ int handle_unordered_scan_next_partition(byte * buf);
+ byte *queue_buf(uint part_id)
+ {
+ return (m_ordered_rec_buffer +
+ (part_id * (m_rec_length + PARTITION_BYTES_IN_POS)));
+ }
+ byte *rec_buf(uint part_id)
+ {
+ return (queue_buf(part_id) +
+ PARTITION_BYTES_IN_POS);
+ }
+ int handle_ordered_index_scan(byte * buf);
+ int handle_ordered_next(byte * buf, bool next_same);
+ int handle_ordered_prev(byte * buf);
+ void return_top_record(byte * buf);
+ void include_partition_fields_in_used_fields();
+public:
+ /*
+ -------------------------------------------------------------------------
+ MODULE information calls
+ -------------------------------------------------------------------------
+ This calls are used to inform the handler of specifics of the ongoing
+ scans and other actions. Most of these are used for optimisation
+ purposes.
+ -------------------------------------------------------------------------
+ */
+ virtual void info(uint);
+ virtual int extra(enum ha_extra_function operation);
+ virtual int extra_opt(enum ha_extra_function operation, ulong cachesize);
+ virtual int reset(void);
+
+private:
+ static const uint NO_CURRENT_PART_ID= 0xFFFFFFFF;
+ int loop_extra(enum ha_extra_function operation);
+ void late_extra_cache(uint partition_id);
+ void late_extra_no_cache(uint partition_id);
+ void prepare_extra_cache(uint cachesize);
+public:
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE optimiser support
+ -------------------------------------------------------------------------
+ -------------------------------------------------------------------------
+ */
+
+ /*
+ NOTE !!!!!!
+ -------------------------------------------------------------------------
+ -------------------------------------------------------------------------
+ One important part of the public handler interface that is not depicted in
+ the methods is the attribute records
+
+ which is defined in the base class. This is looked upon directly and is
+ set by calling info(HA_STATUS_INFO) ?
+ -------------------------------------------------------------------------
+ */
+
+ /*
+ keys_to_use_for_scanning can probably be implemented as the
+ intersection of all underlying handlers if mixed handlers are used.
+ This method is used to derive whether an index can be used for
+ index-only scanning when performing an ORDER BY query.
+ Only called from one place in sql_select.cc
+ */
+ virtual const key_map *keys_to_use_for_scanning();
+
+ /*
+ Called in test_quick_select to determine if indexes should be used.
+ */
+ virtual double scan_time();
+
+ /*
+ The next method will never be called if you do not implement indexes.
+ */
+ virtual double read_time(uint index, uint ranges, ha_rows rows);
+ /*
+ For the given range how many records are estimated to be in this range.
+ Used by optimiser to calculate cost of using a particular index.
+ */
+ virtual ha_rows records_in_range(uint inx, key_range * min_key,
+ key_range * max_key);
+
+ /*
+ Upper bound of number records returned in scan is sum of all
+ underlying handlers.
+ */
+ virtual ha_rows estimate_rows_upper_bound();
+
+ /*
+ table_cache_type is implemented by the underlying handler but all
+ underlying handlers must have the same implementation for it to work.
+ */
+ virtual uint8 table_cache_type();
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE print messages
+ -------------------------------------------------------------------------
+ This module contains various methods that returns text messages for
+ table types, index type and error messages.
+ -------------------------------------------------------------------------
+ */
+ /*
+ The name of the index type that will be used for display
+ Here we must ensure that all handlers use the same index type
+ for each index created.
+ */
+ virtual const char *index_type(uint inx);
+
+ /* The name of the table type that will be used for display purposes */
+ virtual const char *table_type() const
+ { return "PARTITION"; }
+
+ /*
+ Handler specific error messages
+ */
+ virtual void print_error(int error, myf errflag);
+ virtual bool get_error_message(int error, String * buf);
+ /*
+ -------------------------------------------------------------------------
+ MODULE handler characteristics
+ -------------------------------------------------------------------------
+ This module contains a number of methods defining limitations and
+ characteristics of the handler. The partition handler will calculate
+ this characteristics based on underlying handler characteristics.
+ -------------------------------------------------------------------------
+
+ This is a list of flags that says what the storage engine
+ implements. The current table flags are documented in handler.h
+ The partition handler will support whatever the underlying handlers
+ support except when specifically mentioned below about exceptions
+ to this rule.
+
+ HA_READ_RND_SAME:
+ Not currently used. (Means that the handler supports the rnd_same() call)
+ (MyISAM, HEAP)
+
+ HA_TABLE_SCAN_ON_INDEX:
+ Used to avoid scanning full tables on an index. If this flag is set then
+ the handler always has a primary key (hidden if not defined) and this
+ index is used for scanning rather than a full table scan in all
+ situations.
+ (InnoDB, BDB, Federated)
+
+ HA_REC_NOT_IN_SEQ:
+ This flag is set for handlers that cannot guarantee that the rows are
+ returned accroding to incremental positions (0, 1, 2, 3...).
+ This also means that rnd_next() should return HA_ERR_RECORD_DELETED
+ if it finds a deleted row.
+ (MyISAM (not fixed length row), BDB, HEAP, NDB, InooDB)
+
+ HA_CAN_GEOMETRY:
+ Can the storage engine handle spatial data.
+ Used to check that no spatial attributes are declared unless
+ the storage engine is capable of handling it.
+ (MyISAM)
+
+ HA_FAST_KEY_READ:
+ Setting this flag indicates that the handler is equally fast in
+ finding a row by key as by position.
+ This flag is used in a very special situation in conjunction with
+ filesort's. For further explanation see intro to init_read_record.
+ (BDB, HEAP, InnoDB)
+
+ HA_NULL_IN_KEY:
+ Is NULL values allowed in indexes.
+ If this is not allowed then it is not possible to use an index on a
+ NULLable field.
+ (BDB, HEAP, MyISAM, NDB, InnoDB)
+
+ HA_DUPP_POS:
+ Tells that we can the position for the conflicting duplicate key
+ record is stored in table->file->dupp_ref. (insert uses rnd_pos() on
+ this to find the duplicated row)
+ (MyISAM)
+
+ HA_CAN_INDEX_BLOBS:
+ Is the storage engine capable of defining an index of a prefix on
+ a BLOB attribute.
+ (BDB, Federated, MyISAM, InnoDB)
+
+ HA_AUTO_PART_KEY:
+ Auto increment fields can be part of a multi-part key. For second part
+ auto-increment keys, the auto_incrementing is done in handler.cc
+ (BDB, Federated, MyISAM, NDB)
+
+ HA_REQUIRE_PRIMARY_KEY:
+ Can't define a table without primary key (and cannot handle a table
+ with hidden primary key)
+ (No handler has this limitation currently)
+
+ HA_NOT_EXACT_COUNT:
+ Does the counter of records after the info call specify an exact
+ value or not. If it doesn't this flag is set.
+ Only MyISAM and HEAP uses exact count.
+ (MyISAM, HEAP, BDB, InnoDB, NDB, Federated)
+
+ HA_CAN_INSERT_DELAYED:
+ Can the storage engine support delayed inserts.
+ To start with the partition handler will not support delayed inserts.
+ Further investigation needed.
+ (HEAP, MyISAM)
+
+ HA_PRIMARY_KEY_IN_READ_INDEX:
+ This parameter is set when the handler will also return the primary key
+ when doing read-only-key on another index.
+
+ HA_NOT_DELETE_WITH_CACHE:
+ Seems to be an old MyISAM feature that is no longer used. No handler
+ has it defined but it is checked in init_read_record.
+ Further investigation needed.
+ (No handler defines it)
+
+ HA_NO_PREFIX_CHAR_KEYS:
+ Indexes on prefixes of character fields is not allowed.
+ (NDB)
+
+ HA_CAN_FULLTEXT:
+ Does the storage engine support fulltext indexes
+ The partition handler will start by not supporting fulltext indexes.
+ (MyISAM)
+
+ HA_CAN_SQL_HANDLER:
+ Can the HANDLER interface in the MySQL API be used towards this
+ storage engine.
+ (MyISAM, InnoDB)
+
+ HA_NO_AUTO_INCREMENT:
+ Set if the storage engine does not support auto increment fields.
+ (Currently not set by any handler)
+
+ HA_HAS_CHECKSUM:
+ Special MyISAM feature. Has special SQL support in CREATE TABLE.
+ No special handling needed by partition handler.
+ (MyISAM)
+
+ HA_FILE_BASED:
+ Should file names always be in lower case (used by engines
+ that map table names to file names.
+ Since partition handler has a local file this flag is set.
+ (BDB, Federated, MyISAM)
+
+ HA_CAN_BIT_FIELD:
+ Is the storage engine capable of handling bit fields?
+ (MyISAM, NDB)
+
+ HA_NEED_READ_RANGE_BUFFER:
+ Is Read Multi-Range supported => need multi read range buffer
+ This parameter specifies whether a buffer for read multi range
+ is needed by the handler. Whether the handler supports this
+ feature or not is dependent of whether the handler implements
+ read_multi_range* calls or not. The only handler currently
+ supporting this feature is NDB so the partition handler need
+ not handle this call. There are methods in handler.cc that will
+ transfer those calls into index_read and other calls in the
+ index scan module.
+ (NDB)
+ */
+ virtual ulong table_flags() const
+ { return m_table_flags; }
+ /*
+ HA_CAN_PARTITION:
+ Used by storage engines that can handle partitioning without this
+ partition handler
+ (Partition, NDB)
+
+ HA_CAN_UPDATE_PARTITION_KEY:
+ Set if the handler can update fields that are part of the partition
+ function.
+
+ HA_CAN_PARTITION_UNIQUE:
+ Set if the handler can handle unique indexes where the fields of the
+ unique key are not part of the fields of the partition function. Thus
+ a unique key can be set on all fields.
+ */
+ virtual ulong partition_flags() const
+ { return HA_CAN_PARTITION; }
+
+ /*
+ This is a bitmap of flags that says how the storage engine
+ implements indexes. The current index flags are documented in
+ handler.h. If you do not implement indexes, just return zero
+ here.
+
+ part is the key part to check. First key part is 0
+ If all_parts it's set, MySQL want to know the flags for the combined
+ index up to and including 'part'.
+
+ HA_READ_NEXT:
+ Does the index support read next, this is assumed in the server
+ code and never checked so all indexes must support this.
+ Note that the handler can be used even if it doesn't have any index.
+ (BDB, HEAP, MyISAM, Federated, NDB, InnoDB)
+
+ HA_READ_PREV:
+ Can the index be used to scan backwards.
+ (BDB, HEAP, MyISAM, NDB, InnoDB)
+
+ HA_READ_ORDER:
+ Can the index deliver its record in index order. Typically true for
+ all ordered indexes and not true for hash indexes.
+ In first step this is not true for partition handler until a merge
+ sort has been implemented in partition handler.
+ Used to set keymap part_of_sortkey
+ This keymap is only used to find indexes usable for resolving an ORDER BY
+ in the query. Thus in most cases index_read will work just fine without
+ order in result production. When this flag is set it is however safe to
+ order all output started by index_read since most engines do this. With
+ read_multi_range calls there is a specific flag setting order or not
+ order so in those cases ordering of index output can be avoided.
+ (BDB, InnoDB, HEAP, MyISAM, NDB)
+
+ HA_READ_RANGE:
+ Specify whether index can handle ranges, typically true for all
+ ordered indexes and not true for hash indexes.
+ Used by optimiser to check if ranges (as key >= 5) can be optimised
+ by index.
+ (BDB, InnoDB, NDB, MyISAM, HEAP)
+
+ HA_ONLY_WHOLE_INDEX:
+ Can't use part key searches. This is typically true for hash indexes
+ and typically not true for ordered indexes.
+ (Federated, NDB, HEAP)
+
+ HA_KEYREAD_ONLY:
+ Does the storage engine support index-only scans on this index.
+ Enables use of HA_EXTRA_KEYREAD and HA_EXTRA_NO_KEYREAD
+ Used to set key_map keys_for_keyread and to check in optimiser for
+ index-only scans. When doing a read under HA_EXTRA_KEYREAD the handler
+ only have to fill in the columns the key covers. If
+ HA_PRIMARY_KEY_IN_READ_INDEX is set then also the PRIMARY KEY columns
+ must be updated in the row.
+ (BDB, InnoDB, MyISAM)
+ */
+ virtual ulong index_flags(uint inx, uint part, bool all_parts) const
+ {
+ return m_file[0]->index_flags(inx, part, all_parts);
+ }
+
+ /*
+ extensions of table handler files
+ */
+ virtual const char **bas_ext() const;
+ /*
+ unireg.cc will call the following to make sure that the storage engine
+ can handle the data it is about to send.
+
+ The maximum supported values is the minimum of all handlers in the table
+ */
+ uint min_of_the_max_uint(uint (handler::*operator_func)(void) const) const;
+ virtual uint max_supported_record_length() const;
+ virtual uint max_supported_keys() const;
+ virtual uint max_supported_key_parts() const;
+ virtual uint max_supported_key_length() const;
+ virtual uint max_supported_key_part_length() const;
+
+ /*
+ All handlers in a partitioned table must have the same low_byte_first
+ */
+ virtual bool low_byte_first() const
+ { return m_low_byte_first; }
+
+ /*
+ The extra record buffer length is the maximum needed by all handlers.
+ The minimum record length is the maximum of all involved handlers.
+ */
+ virtual uint extra_rec_buf_length() const;
+ virtual uint min_record_length(uint options) const;
+
+ /*
+ Transactions on the table is supported if all handlers below support
+ transactions.
+ */
+ virtual bool has_transactions()
+ { return m_has_transactions; }
+
+ /*
+ Primary key is clustered can only be true if all underlying handlers have
+ this feature.
+ */
+ virtual bool primary_key_is_clustered()
+ { return m_pkey_is_clustered; }
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE compare records
+ -------------------------------------------------------------------------
+ cmp_ref checks if two references are the same. For most handlers this is
+ a simple memcmp of the reference. However some handlers use primary key
+ as reference and this can be the same even if memcmp says they are
+ different. This is due to character sets and end spaces and so forth.
+ For the partition handler the reference is first two bytes providing the
+ partition identity of the referred record and then the reference of the
+ underlying handler.
+ Thus cmp_ref for the partition handler always returns FALSE for records
+ not in the same partition and uses cmp_ref on the underlying handler
+ to check whether the rest of the reference part is also the same.
+ -------------------------------------------------------------------------
+ */
+ virtual int cmp_ref(const byte * ref1, const byte * ref2);
+ /*
+ -------------------------------------------------------------------------
+ MODULE auto increment
+ -------------------------------------------------------------------------
+ This module is used to handle the support of auto increments.
+
+ This variable in the handler is used as part of the handler interface
+ It is maintained by the parent handler object and should not be
+ touched by child handler objects (see handler.cc for its use).
+
+ auto_increment_column_changed
+ -------------------------------------------------------------------------
+ */
+ virtual void restore_auto_increment();
+ virtual ulonglong get_auto_increment();
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE initialise handler for HANDLER call
+ -------------------------------------------------------------------------
+ This method is a special InnoDB method called before a HANDLER query.
+ -------------------------------------------------------------------------
+ */
+ virtual void init_table_handle_for_HANDLER();
+
+ /*
+ The remainder of this file defines the handler methods not implemented
+ by the partition handler
+ */
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE foreign key support
+ -------------------------------------------------------------------------
+ The following methods are used to implement foreign keys as supported by
+ InnoDB. Implement this ??
+ get_foreign_key_create_info is used by SHOW CREATE TABLE to get a textual
+ description of how the CREATE TABLE part to define FOREIGN KEY's is done.
+ free_foreign_key_create_info is used to free the memory area that provided
+ this description.
+ -------------------------------------------------------------------------
+
+ virtual char* get_foreign_key_create_info()
+ virtual void free_foreign_key_create_info(char* str)
+
+ virtual int get_foreign_key_list(THD *thd,
+ List<FOREIGN_KEY_INFO> *f_key_list)
+ virtual uint referenced_by_foreign_key()
+ */
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE fulltext index
+ -------------------------------------------------------------------------
+ Fulltext stuff not yet.
+ -------------------------------------------------------------------------
+ virtual int ft_init() { return HA_ERR_WRONG_COMMAND; }
+ virtual FT_INFO *ft_init_ext(uint flags,uint inx,const byte *key,
+ uint keylen)
+ { return NULL; }
+ virtual int ft_read(byte *buf) { return HA_ERR_WRONG_COMMAND; }
+ */
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE restart full table scan at position (MyISAM)
+ -------------------------------------------------------------------------
+ The following method is only used by MyISAM when used as
+ temporary tables in a join.
+ virtual int restart_rnd_next(byte *buf, byte *pos);
+ */
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE on-line ALTER TABLE
+ -------------------------------------------------------------------------
+ These methods are in the handler interface but never used (yet)
+ They are to be used by on-line alter table add/drop index:
+ -------------------------------------------------------------------------
+ virtual ulong index_ddl_flags(KEY *wanted_index) const
+ virtual int add_index(TABLE *table_arg,KEY *key_info,uint num_of_keys);
+ virtual int drop_index(TABLE *table_arg,uint *key_num,uint num_of_keys);
+ */
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE tablespace support
+ -------------------------------------------------------------------------
+ Admin of table spaces is not applicable to the partition handler (InnoDB)
+ This means that the following method is not implemented:
+ -------------------------------------------------------------------------
+ virtual int discard_or_import_tablespace(my_bool discard)
+ */
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE admin MyISAM
+ -------------------------------------------------------------------------
+ Admin commands not supported currently (almost purely MyISAM routines)
+ This means that the following methods are not implemented:
+ -------------------------------------------------------------------------
+
+ virtual int check(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int backup(TD* thd, HA_CHECK_OPT *check_opt);
+ virtual int restore(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int repair(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int optimize(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int analyze(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int preload_keys(THD *thd, HA_CHECK_OPT *check_opt);
+ virtual bool check_and_repair(THD *thd);
+ virtual int dump(THD* thd, int fd = -1);
+ virtual int net_read_dump(NET* net);
+ virtual uint checksum() const;
+ virtual bool is_crashed() const;
+ virtual bool auto_repair() const;
+
+ -------------------------------------------------------------------------
+ MODULE enable/disable indexes
+ -------------------------------------------------------------------------
+ Enable/Disable Indexes are not supported currently (Heap, MyISAM)
+ This means that the following methods are not implemented:
+ -------------------------------------------------------------------------
+ virtual int disable_indexes(uint mode);
+ virtual int enable_indexes(uint mode);
+ virtual int indexes_are_disabled(void);
+ */
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE append_create_info
+ -------------------------------------------------------------------------
+ append_create_info is only used by MyISAM MERGE tables and the partition
+ handler will not support this handler as underlying handler.
+ Implement this??
+ -------------------------------------------------------------------------
+ virtual void append_create_info(String *packet)
+ */
+};
diff --git a/sql/handler.cc b/sql/handler.cc
index 0d0f9a75e52..c2a138e7013 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -34,6 +34,9 @@
#ifdef HAVE_EXAMPLE_DB
#include "examples/ha_example.h"
#endif
+#ifdef HAVE_PARTITION_DB
+#include "ha_partition.h"
+#endif
#ifdef HAVE_ARCHIVE_DB
#include "examples/ha_archive.h"
#endif
@@ -170,7 +173,13 @@ enum db_type ha_checktype(THD *thd, enum db_type database_type,
{
if (ha_storage_engine_is_enabled(database_type))
return database_type;
-
+#ifdef HAVE_PARTITION_DB
+ /*
+ Partition handler is not in the list of handlers shown since it is an internal handler
+ */
+ if (database_type == DB_TYPE_PARTITION_DB)
+ return database_type;
+#endif
if (no_substitute)
{
if (report_error)
@@ -203,47 +212,66 @@ enum db_type ha_checktype(THD *thd, enum db_type database_type,
handler *get_new_handler(TABLE *table, enum db_type db_type)
{
+ handler *file;
switch (db_type) {
#ifndef NO_HASH
case DB_TYPE_HASH:
- return new ha_hash(table);
+ file= new ha_hash(table);
+ break;
#endif
case DB_TYPE_MRG_ISAM:
- return new ha_myisammrg(table);
+ file= new ha_myisammrg(table);
+ break;
#ifdef HAVE_BERKELEY_DB
case DB_TYPE_BERKELEY_DB:
- return new ha_berkeley(table);
+ file= new ha_berkeley(table);
+ break;
#endif
#ifdef HAVE_INNOBASE_DB
case DB_TYPE_INNODB:
- return new ha_innobase(table);
+ file= new ha_innobase(table);
+ break;
#endif
#ifdef HAVE_EXAMPLE_DB
case DB_TYPE_EXAMPLE_DB:
- return new ha_example(table);
+ file= new ha_example(table);
+ break;
+#endif
+#ifdef HAVE_PARTITION_DB
+ case DB_TYPE_PARTITION_DB:
+ {
+ file= new ha_partition(table);
+ break;
+ }
#endif
#ifdef HAVE_ARCHIVE_DB
case DB_TYPE_ARCHIVE_DB:
- return new ha_archive(table);
+ file= new ha_archive(table);
+ break;
#endif
#ifdef HAVE_BLACKHOLE_DB
case DB_TYPE_BLACKHOLE_DB:
- return new ha_blackhole(table);
+ file= new ha_blackhole(table);
+ break;
#endif
#ifdef HAVE_FEDERATED_DB
case DB_TYPE_FEDERATED_DB:
- return new ha_federated(table);
+ file= new ha_federated(table);
+ break;
#endif
#ifdef HAVE_CSV_DB
case DB_TYPE_CSV_DB:
- return new ha_tina(table);
+ file= new ha_tina(table);
+ break;
#endif
#ifdef HAVE_NDBCLUSTER_DB
case DB_TYPE_NDBCLUSTER:
- return new ha_ndbcluster(table);
+ file= new ha_ndbcluster(table);
+ break;
#endif
case DB_TYPE_HEAP:
- return new ha_heap(table);
+ file= new ha_heap(table);
+ break;
default: // should never happen
{
enum db_type def=(enum db_type) current_thd->variables.table_type;
@@ -253,12 +281,46 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
}
/* Fall back to MyISAM */
case DB_TYPE_MYISAM:
- return new ha_myisam(table);
+ file= new ha_myisam(table);
+ break;
case DB_TYPE_MRG_MYISAM:
- return new ha_myisammrg(table);
+ file= new ha_myisammrg(table);
+ break;
+ }
+ if (file)
+ {
+ if (file->ha_initialise())
+ {
+ delete file;
+ file=0;
+ }
}
+ return file;
}
+
+#ifdef HAVE_PARTITION_DB
+handler *get_ha_partition(partition_info *part_info)
+{
+ ha_partition *partition;
+ DBUG_ENTER("get_ha_partition");
+ if ((partition= new ha_partition(part_info)))
+ {
+ if (partition->ha_initialise())
+ {
+ delete partition;
+ partition= 0;
+ }
+ }
+ else
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), sizeof(ha_partition));
+ }
+ DBUG_RETURN(((handler*) partition));
+}
+#endif
+
+
/*
Register handler error messages for use with my_error().
@@ -1365,6 +1427,111 @@ int handler::ha_open(const char *name, int mode, int test_if_locked)
DBUG_RETURN(error);
}
+int handler::ha_initialise()
+{
+ DBUG_ENTER("ha_initialise");
+ if (table && table->s->fields &&
+ ha_allocate_read_write_set(table->s->fields))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
+
+int handler::ha_allocate_read_write_set(ulong no_fields)
+{
+ uint bitmap_size= 4*(((no_fields+1)+31)/32);
+ uint32 *read_buf, *write_buf;
+#ifndef DEBUG_OFF
+ my_bool r;
+#endif
+ DBUG_ENTER("ha_allocate_read_write_set");
+ DBUG_PRINT("enter", ("no_fields = %d", no_fields));
+
+ if (table)
+ {
+ if (table->read_set == NULL)
+ {
+ read_set= (MY_BITMAP*)sql_alloc(sizeof(MY_BITMAP));
+ write_set= (MY_BITMAP*)sql_alloc(sizeof(MY_BITMAP));
+ read_buf= (uint32*)sql_alloc(bitmap_size);
+ write_buf= (uint32*)sql_alloc(bitmap_size);
+ if (!read_set || !write_set || !read_buf || !write_buf)
+ {
+ ha_deallocate_read_write_set();
+ DBUG_RETURN(TRUE);
+ }
+#ifndef DEBUG_OFF
+ r =
+#endif
+ bitmap_init(read_set, read_buf, no_fields+1, FALSE);
+ DBUG_ASSERT(!r /*bitmap_init(read_set...)*/);
+#ifndef DEBUG_OFF
+ r =
+#endif
+ bitmap_init(write_set, write_buf, no_fields+1, FALSE);
+ DBUG_ASSERT(!r /*bitmap_init(write_set...)*/);
+ table->read_set= read_set;
+ table->write_set= write_set;
+ ha_clear_all_set();
+ }
+ else
+ {
+ read_set= table->read_set;
+ write_set= table->write_set;
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+void handler::ha_deallocate_read_write_set()
+{
+ DBUG_ENTER("ha_deallocate_read_write_set");
+ read_set=write_set=0;
+ DBUG_VOID_RETURN;
+}
+
+void handler::ha_clear_all_set()
+{
+ DBUG_ENTER("ha_clear_all_set");
+ bitmap_clear_all(read_set);
+ bitmap_clear_all(write_set);
+ bitmap_set_bit(read_set, 0);
+ bitmap_set_bit(write_set, 0);
+ DBUG_VOID_RETURN;
+}
+
+int handler::ha_retrieve_all_cols()
+{
+ DBUG_ENTER("handler::ha_retrieve_all_cols");
+ bitmap_set_all(read_set);
+ DBUG_RETURN(0);
+}
+
+int handler::ha_retrieve_all_pk()
+{
+ DBUG_ENTER("ha_retrieve_all_pk");
+ ha_set_primary_key_in_read_set();
+ DBUG_RETURN(0);
+}
+
+void handler::ha_set_primary_key_in_read_set()
+{
+ ulong prim_key= table->s->primary_key;
+ DBUG_ENTER("handler::ha_set_primary_key_in_read_set");
+ DBUG_PRINT("info", ("Primary key = %d", prim_key));
+ if (prim_key != MAX_KEY)
+ {
+ KEY_PART_INFO *key_part= table->key_info[prim_key].key_part;
+ KEY_PART_INFO *key_part_end= key_part +
+ table->key_info[prim_key].key_parts;
+ for (;key_part != key_part_end; ++key_part)
+ ha_set_bit_in_read_set(key_part->fieldnr);
+ }
+ DBUG_VOID_RETURN;
+}
+
+
/*
Read first row (only) from a table
This is never called for InnoDB or BDB tables, as these table types
@@ -1393,7 +1560,7 @@ int handler::read_first_row(byte * buf, uint primary_key)
else
{
/* Find the first row through the primary key */
- (void) ha_index_init(primary_key);
+ (void) ha_index_init(primary_key, 0);
error=index_first(buf);
(void) ha_index_end();
}
@@ -1577,7 +1744,7 @@ ulonglong handler::get_auto_increment()
int error;
(void) extra(HA_EXTRA_KEYREAD);
- index_init(table->s->next_number_index);
+ index_init(table->s->next_number_index, 1);
if (!table->s->next_number_key_offset)
{ // Autoincrement at key-start
error=index_last(table->record[1]);
@@ -2401,7 +2568,7 @@ int handler::compare_key(key_range *range)
int handler::index_read_idx(byte * buf, uint index, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
- int error= ha_index_init(index);
+ int error= ha_index_init(index, 0);
if (!error)
error= index_read(buf, key, key_len, find_flag);
if (!error)
diff --git a/sql/handler.h b/sql/handler.h
index 02b2353b890..39da4f65272 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -89,6 +89,11 @@
#define HA_NEED_READ_RANGE_BUFFER (1 << 29) /* for read_multi_range */
#define HA_ANY_INDEX_MAY_BE_UNIQUE (1 << 30)
+/* Flags for partition handlers */
+#define HA_CAN_PARTITION (1 << 0) /* Partition support */
+#define HA_CAN_UPDATE_PARTITION_KEY (1 << 1)
+#define HA_CAN_PARTITION_UNIQUE (1 << 2)
+
/* bits in index_flags(index_number) for what you can do with index */
#define HA_READ_NEXT 1 /* TODO really use this flag */
@@ -172,6 +177,7 @@ enum db_type
DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB,
DB_TYPE_FEDERATED_DB,
DB_TYPE_BLACKHOLE_DB,
+ DB_TYPE_PARTITION_DB,
DB_TYPE_DEFAULT // Must be last
};
@@ -369,6 +375,208 @@ typedef struct st_thd_trans
enum enum_tx_isolation { ISO_READ_UNCOMMITTED, ISO_READ_COMMITTED,
ISO_REPEATABLE_READ, ISO_SERIALIZABLE};
+
+typedef struct {
+ uint32 start_part;
+ uint32 end_part;
+ bool use_bit_array;
+} part_id_range;
+/**
+ * An enum and a struct to handle partitioning and subpartitioning.
+ */
+enum partition_type {
+ NOT_A_PARTITION= 0,
+ RANGE_PARTITION,
+ HASH_PARTITION,
+ LIST_PARTITION
+};
+
+#define UNDEF_NODEGROUP 65535
+class Item;
+
+class partition_element :public Sql_alloc {
+public:
+ List<partition_element> subpartitions;
+ List<Item> list_expr_list;
+ ulonglong part_max_rows;
+ ulonglong part_min_rows;
+ char *partition_name;
+ char *tablespace_name;
+ Item* range_expr;
+ char* part_comment;
+ char* data_file_name;
+ char* index_file_name;
+ enum db_type engine_type;
+ uint16 nodegroup_id;
+
+ partition_element()
+ : part_max_rows(0), part_min_rows(0), partition_name(NULL),
+ tablespace_name(NULL), range_expr(NULL), part_comment(NULL),
+ data_file_name(NULL), index_file_name(NULL),
+ engine_type(DB_TYPE_UNKNOWN), nodegroup_id(UNDEF_NODEGROUP)
+ {
+ subpartitions.empty();
+ list_expr_list.empty();
+ }
+ ~partition_element() {}
+};
+
+typedef struct {
+ longlong list_value;
+ uint partition_id;
+} LIST_PART_ENTRY;
+enum Item_result;
+
+class partition_info;
+
+typedef bool (*get_part_id_func)(partition_info *part_info,
+ uint32 *part_id);
+typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
+
+class partition_info :public Sql_alloc {
+public:
+ /*
+ * Here comes a set of definitions needed for partitioned table handlers.
+ */
+ List<partition_element> partitions;
+
+ List<char> part_field_list;
+ List<char> subpart_field_list;
+
+ get_part_id_func get_partition_id;
+ get_part_id_func get_part_partition_id;
+ get_subpart_id_func get_subpartition_id;
+
+ Field **part_field_array;
+ Field **subpart_field_array;
+ Field **full_part_field_array;
+
+ Item *part_expr;
+ Item *subpart_expr;
+
+ Item *item_free_list;
+
+ union {
+ longlong *range_int_array;
+ LIST_PART_ENTRY *list_array;
+ };
+ char* part_info_string;
+
+ char *part_func_string;
+ char *subpart_func_string;
+
+ partition_element *curr_part_elem;
+ partition_element *current_partition;
+ /*
+ These key_map's are used for Partitioning to enable quick decisions
+ on whether we can derive more information about which partition to
+ scan just by looking at what index is used.
+ */
+ key_map all_fields_in_PF, all_fields_in_PPF, all_fields_in_SPF;
+ key_map some_fields_in_PF;
+
+ enum db_type default_engine_type;
+ Item_result part_result_type;
+ partition_type part_type;
+ partition_type subpart_type;
+
+ uint part_info_len;
+ uint part_func_len;
+ uint subpart_func_len;
+
+ uint no_full_parts;
+ uint no_parts;
+ uint no_subparts;
+ uint count_curr_parts;
+ uint count_curr_subparts;
+
+ uint part_error_code;
+
+ uint no_list_values;
+
+ uint no_part_fields;
+ uint no_subpart_fields;
+ uint no_full_part_fields;
+
+ uint16 linear_hash_mask;
+
+ bool use_default_partitions;
+ bool use_default_subpartitions;
+ bool defined_max_value;
+ bool list_of_part_fields;
+ bool list_of_subpart_fields;
+ bool linear_hash_ind;
+
+ partition_info()
+ : get_partition_id(NULL), get_part_partition_id(NULL),
+ get_subpartition_id(NULL),
+ part_field_array(NULL), subpart_field_array(NULL),
+ full_part_field_array(NULL),
+ part_expr(NULL), subpart_expr(NULL), item_free_list(NULL),
+ list_array(NULL),
+ part_info_string(NULL),
+ part_func_string(NULL), subpart_func_string(NULL),
+ curr_part_elem(NULL), current_partition(NULL),
+ default_engine_type(DB_TYPE_UNKNOWN),
+ part_result_type(INT_RESULT),
+ part_type(NOT_A_PARTITION), subpart_type(NOT_A_PARTITION),
+ part_info_len(0), part_func_len(0), subpart_func_len(0),
+ no_full_parts(0), no_parts(0), no_subparts(0),
+ count_curr_parts(0), count_curr_subparts(0), part_error_code(0),
+ no_list_values(0), no_part_fields(0), no_subpart_fields(0),
+ no_full_part_fields(0), linear_hash_mask(0),
+ use_default_partitions(TRUE),
+ use_default_subpartitions(TRUE), defined_max_value(FALSE),
+ list_of_part_fields(FALSE), list_of_subpart_fields(FALSE),
+ linear_hash_ind(FALSE)
+ {
+ all_fields_in_PF.clear_all();
+ all_fields_in_PPF.clear_all();
+ all_fields_in_SPF.clear_all();
+ some_fields_in_PF.clear_all();
+ partitions.empty();
+ part_field_list.empty();
+ subpart_field_list.empty();
+ }
+ ~partition_info() {}
+};
+
+
+#ifdef HAVE_PARTITION_DB
+/*
+ Answers the question if subpartitioning is used for a certain table
+ SYNOPSIS
+ is_sub_partitioned()
+ part_info A reference to the partition_info struct
+ RETURN VALUE
+ Returns true if subpartitioning used and false otherwise
+ DESCRIPTION
+ A routine to check for subpartitioning for improved readability of code
+*/
+inline
+bool is_sub_partitioned(partition_info *part_info)
+{ return (part_info->subpart_type == NOT_A_PARTITION ? FALSE : TRUE); }
+
+
+/*
+ Returns the total number of partitions on the leaf level.
+ SYNOPSIS
+ get_tot_partitions()
+ part_info A reference to the partition_info struct
+ RETURN VALUE
+ Returns the number of partitions
+ DESCRIPTION
+ A routine to check for number of partitions for improved readability
+ of code
+*/
+inline
+uint get_tot_partitions(partition_info *part_info)
+{
+ return part_info->no_parts *
+ (is_sub_partitioned(part_info) ? part_info->no_subparts : 1);
+}
+#endif
+
typedef struct st_ha_create_information
{
CHARSET_INFO *table_charset, *default_table_charset;
@@ -417,6 +625,31 @@ typedef struct st_ha_check_opt
} HA_CHECK_OPT;
+#ifdef HAVE_PARTITION_DB
+handler *get_ha_partition(partition_info *part_info);
+int get_parts_for_update(const byte *old_data, byte *new_data,
+ const byte *rec0, partition_info *part_info,
+ uint32 *old_part_id, uint32 *new_part_id);
+int get_part_for_delete(const byte *buf, const byte *rec0,
+ partition_info *part_info, uint32 *part_id);
+bool check_partition_info(partition_info *part_info,enum db_type eng_type,
+ handler *file, ulonglong max_rows);
+bool fix_partition_func(THD *thd, const char *name, TABLE *table);
+char *generate_partition_syntax(partition_info *part_info,
+ uint *buf_length, bool use_sql_alloc);
+bool partition_key_modified(TABLE *table, List<Item> &fields);
+void get_partition_set(const TABLE *table, byte *buf, const uint index,
+ const key_range *key_spec,
+ part_id_range *part_spec);
+void get_full_part_id_from_key(const TABLE *table, byte *buf,
+ KEY *key_info,
+ const key_range *key_spec,
+ part_id_range *part_spec);
+bool mysql_unpack_partition(File file, THD *thd, uint part_info_len,
+ TABLE *table);
+#endif
+
+
/*
This is a buffer area that the handler can use to store rows.
'end_of_used_area' should be kept updated after calls to
@@ -434,10 +667,13 @@ typedef struct st_handler_buffer
class handler :public Sql_alloc
{
+#ifdef HAVE_PARTITION_DB
+ friend class ha_partition;
+#endif
protected:
struct st_table *table; /* The table definition */
- virtual int index_init(uint idx) { active_index=idx; return 0; }
+ virtual int index_init(uint idx, bool sorted) { active_index=idx; return 0; }
virtual int index_end() { active_index=MAX_KEY; return 0; }
/*
rnd_init() can be called two times without rnd_end() in between
@@ -449,6 +685,8 @@ class handler :public Sql_alloc
virtual int rnd_init(bool scan) =0;
virtual int rnd_end() { return 0; }
+private:
+ virtual int reset() { return extra(HA_EXTRA_RESET); }
public:
const handlerton *ht; /* storage engine of this handler */
byte *ref; /* Pointer to current row */
@@ -491,6 +729,8 @@ public:
bool auto_increment_column_changed;
bool implicit_emptied; /* Can be !=0 only if HEAP */
const COND *pushed_cond;
+ MY_BITMAP *read_set;
+ MY_BITMAP *write_set;
handler(const handlerton *ht_arg, TABLE *table_arg) :table(table_arg),
ht(ht_arg),
@@ -503,7 +743,12 @@ public:
raid_type(0), ft_handler(0), inited(NONE), implicit_emptied(0),
pushed_cond(NULL)
{}
- virtual ~handler(void) { /* TODO: DBUG_ASSERT(inited == NONE); */ }
+ virtual ~handler(void)
+ {
+ ha_deallocate_read_write_set();
+ /* TODO: DBUG_ASSERT(inited == NONE); */
+ }
+ virtual int ha_initialise();
int ha_open(const char *name, int mode, int test_if_locked);
bool update_auto_increment();
virtual void print_error(int error, myf errflag);
@@ -516,7 +761,7 @@ public:
{ return rows2double(ranges+rows); }
virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; }
virtual bool has_transactions(){ return 0;}
- virtual uint extra_rec_buf_length() { return 0; }
+ virtual uint extra_rec_buf_length() const { return 0; }
/*
Return upper bound of current number of records in the table
@@ -535,12 +780,12 @@ public:
virtual const char *index_type(uint key_number) { DBUG_ASSERT(0); return "";}
- int ha_index_init(uint idx)
+ int ha_index_init(uint idx, bool sorted)
{
DBUG_ENTER("ha_index_init");
DBUG_ASSERT(inited==NONE);
inited=INDEX;
- DBUG_RETURN(index_init(idx));
+ DBUG_RETURN(index_init(idx, sorted));
}
int ha_index_end()
{
@@ -563,11 +808,140 @@ public:
inited=NONE;
DBUG_RETURN(rnd_end());
}
+ int ha_reset()
+ {
+ DBUG_ENTER("ha_reset");
+ ha_clear_all_set();
+ DBUG_RETURN(reset());
+ }
+
/* this is necessary in many places, e.g. in HANDLER command */
int ha_index_or_rnd_end()
{
return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0;
}
+ /*
+ These are a set of routines used to enable handlers to only read/write
+ partial lists of the fields in the table. The bit vector is maintained
+ by the server part and is used by the handler at calls to read/write
+ data in the table.
+ It replaces the use of query id's for this purpose. The benefit is that
+ the handler can also set bits in the read/write set if it has special
+ needs and it is also easy for other parts of the server to interact
+ with the handler (e.g. the replication part for row-level logging).
+ The routines are all part of the general handler and are not possible
+ to override by a handler. A handler can however set/reset bits by
+ calling these routines.
+
+ The methods ha_retrieve_all_cols and ha_retrieve_all_pk are made
+ virtual to handle InnoDB specifics. If InnoDB doesn't need the
+ extra parameters HA_EXTRA_RETRIEVE_ALL_COLS and
+ HA_EXTRA_RETRIEVE_PRIMARY_KEY anymore then these methods need not be
+ virtual anymore.
+ */
+ virtual int ha_retrieve_all_cols();
+ virtual int ha_retrieve_all_pk();
+ void ha_set_all_bits_in_read_set()
+ {
+ DBUG_ENTER("ha_set_all_bits_in_read_set");
+ bitmap_set_all(read_set);
+ DBUG_VOID_RETURN;
+ }
+ void ha_set_all_bits_in_write_set()
+ {
+ DBUG_ENTER("ha_set_all_bits_in_write_set");
+ bitmap_set_all(write_set);
+ DBUG_VOID_RETURN;
+ }
+ void ha_set_bit_in_read_set(uint fieldnr)
+ {
+ DBUG_ENTER("ha_set_bit_in_read_set");
+ DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
+ bitmap_set_bit(read_set, fieldnr);
+ DBUG_VOID_RETURN;
+ }
+ void ha_clear_bit_in_read_set(uint fieldnr)
+ {
+ DBUG_ENTER("ha_clear_bit_in_read_set");
+ DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
+ bitmap_clear_bit(read_set, fieldnr);
+ DBUG_VOID_RETURN;
+ }
+ void ha_set_bit_in_write_set(uint fieldnr)
+ {
+ DBUG_ENTER("ha_set_bit_in_write_set");
+ DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
+ bitmap_set_bit(write_set, fieldnr);
+ DBUG_VOID_RETURN;
+ }
+ void ha_clear_bit_in_write_set(uint fieldnr)
+ {
+ DBUG_ENTER("ha_clear_bit_in_write_set");
+ DBUG_PRINT("info", ("fieldnr = %d", fieldnr));
+ bitmap_clear_bit(write_set, fieldnr);
+ DBUG_VOID_RETURN;
+ }
+ void ha_set_bit_in_rw_set(uint fieldnr, bool write_op)
+ {
+ DBUG_ENTER("ha_set_bit_in_rw_set");
+ DBUG_PRINT("info", ("Set bit %u in read set", fieldnr));
+ bitmap_set_bit(read_set, fieldnr);
+ if (!write_op) {
+ DBUG_VOID_RETURN;
+ }
+ else
+ {
+ DBUG_PRINT("info", ("Set bit %u in read and write set", fieldnr));
+ bitmap_set_bit(write_set, fieldnr);
+ }
+ DBUG_VOID_RETURN;
+ }
+ bool ha_get_bit_in_read_set(uint fieldnr)
+ {
+ bool bit_set=bitmap_is_set(read_set,fieldnr);
+ DBUG_ENTER("ha_get_bit_in_read_set");
+ DBUG_PRINT("info", ("bit %u = %u", fieldnr, bit_set));
+ DBUG_RETURN(bit_set);
+ }
+ bool ha_get_bit_in_write_set(uint fieldnr)
+ {
+ bool bit_set=bitmap_is_set(write_set,fieldnr);
+ DBUG_ENTER("ha_get_bit_in_write_set");
+ DBUG_PRINT("info", ("bit %u = %u", fieldnr, bit_set));
+ DBUG_RETURN(bit_set);
+ }
+ bool ha_get_all_bit_in_read_set()
+ {
+ bool all_bits_set= bitmap_is_set_all(read_set);
+ DBUG_ENTER("ha_get_all_bit_in_read_set");
+ DBUG_PRINT("info", ("all bits set = %u", all_bits_set));
+ DBUG_RETURN(all_bits_set);
+ }
+ bool ha_get_all_bit_in_read_clear()
+ {
+ bool all_bits_set= bitmap_is_clear_all(read_set);
+ DBUG_ENTER("ha_get_all_bit_in_read_clear");
+ DBUG_PRINT("info", ("all bits clear = %u", all_bits_set));
+ DBUG_RETURN(all_bits_set);
+ }
+ bool ha_get_all_bit_in_write_set()
+ {
+ bool all_bits_set= bitmap_is_set_all(write_set);
+ DBUG_ENTER("ha_get_all_bit_in_write_set");
+ DBUG_PRINT("info", ("all bits set = %u", all_bits_set));
+ DBUG_RETURN(all_bits_set);
+ }
+ bool ha_get_all_bit_in_write_clear()
+ {
+ bool all_bits_set= bitmap_is_clear_all(write_set);
+ DBUG_ENTER("ha_get_all_bit_in_write_clear");
+ DBUG_PRINT("info", ("all bits clear = %u", all_bits_set));
+ DBUG_RETURN(all_bits_set);
+ }
+ void ha_set_primary_key_in_read_set();
+ int ha_allocate_read_write_set(ulong no_fields);
+ void ha_deallocate_read_write_set();
+ void ha_clear_all_set();
uint get_index(void) const { return active_index; }
virtual int open(const char *name, int mode, uint test_if_locked)=0;
virtual int close(void)=0;
@@ -576,6 +950,85 @@ public:
{ return HA_ERR_WRONG_COMMAND; }
virtual int delete_row(const byte * buf)
{ return HA_ERR_WRONG_COMMAND; }
+ /*
+ SYNOPSIS
+ start_bulk_update()
+ RETURN
+ 0 Bulk update used by handler
+ 1 Bulk update not used, normal operation used
+ */
+ virtual bool start_bulk_update() { return 1; }
+ /*
+ SYNOPSIS
+ start_bulk_delete()
+ RETURN
+ 0 Bulk delete used by handler
+ 1 Bulk delete not used, normal operation used
+ */
+ virtual bool start_bulk_delete() { return 1; }
+ /*
+ SYNOPSIS
+ This method is similar to update_row, however the handler doesn't need
+ to execute the updates at this point in time. The handler can be certain
+ that another call to bulk_update_row will occur OR a call to
+ exec_bulk_update before the set of updates in this query is concluded.
+
+ bulk_update_row()
+ old_data Old record
+ new_data New record
+ dup_key_found Number of duplicate keys found
+ RETURN
+ 0 Bulk delete used by handler
+ 1 Bulk delete not used, normal operation used
+ */
+ virtual int bulk_update_row(const byte *old_data, byte *new_data,
+ uint *dup_key_found)
+ {
+ DBUG_ASSERT(FALSE);
+ return HA_ERR_WRONG_COMMAND;
+ }
+ /*
+ SYNOPSIS
+ After this call all outstanding updates must be performed. The number
+ of duplicate key errors are reported in the duplicate key parameter.
+ It is allowed to continue to the batched update after this call, the
+ handler has to wait until end_bulk_update with changing state.
+
+ exec_bulk_update()
+ dup_key_found Number of duplicate keys found
+ RETURN
+ 0 Success
+ >0 Error code
+ */
+ virtual int exec_bulk_update(uint *dup_key_found)
+ {
+ DBUG_ASSERT(FALSE);
+ return HA_ERR_WRONG_COMMAND;
+ }
+ /*
+ SYNOPSIS
+ Perform any needed clean-up, no outstanding updates are there at the
+ moment.
+
+ end_bulk_update()
+ RETURN
+ Nothing
+ */
+ virtual void end_bulk_update() { return; }
+ /*
+ SYNOPSIS
+ Execute all outstanding deletes and close down the bulk delete.
+
+ end_bulk_delete()
+ RETURN
+ 0 Success
+ >0 Error code
+ */
+ virtual int end_bulk_delete()
+ {
+ DBUG_ASSERT(FALSE);
+ return HA_ERR_WRONG_COMMAND;
+ }
virtual int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{ return HA_ERR_WRONG_COMMAND; }
@@ -626,7 +1079,6 @@ public:
{ return 0; }
virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
{ return extra(operation); }
- virtual int reset() { return extra(HA_EXTRA_RESET); }
virtual int external_lock(THD *thd, int lock_type) { return 0; }
virtual void unlock_row() {}
virtual int start_stmt(THD *thd) {return 0;}
@@ -693,6 +1145,10 @@ public:
virtual const char *table_type() const =0;
virtual const char **bas_ext() const =0;
virtual ulong table_flags(void) const =0;
+#ifdef HAVE_PARTITION_DB
+ virtual ulong partition_flags(void) const { return 0;}
+ virtual int get_default_no_partitions(ulonglong max_rows) { return 1;}
+#endif
virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
virtual ulong index_ddl_flags(KEY *wanted_index) const
{ return (HA_DDL_SUPPORT); }
@@ -732,6 +1188,7 @@ public:
virtual int delete_table(const char *name);
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
+ virtual int create_handler_files(const char *name) { return FALSE;}
/* lock_count() can be more than one if the table is a MERGE */
virtual uint lock_count(void) const { return 1; }
diff --git a/sql/item.cc b/sql/item.cc
index 0e3907dd5a6..604f2f6dd2c 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -3103,13 +3103,18 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
set_field(from_field);
}
- else if (thd->set_query_id && field->query_id != thd->query_id)
+ else if (thd->set_query_id)
{
- /* We only come here in unions */
- TABLE *table=field->table;
- field->query_id=thd->query_id;
- table->used_fields++;
- table->used_keys.intersect(field->part_of_key);
+ TABLE *table= field->table;
+ table->file->ha_set_bit_in_rw_set(field->fieldnr,
+ (bool)(thd->set_query_id-1));
+ if (field->query_id != thd->query_id)
+ {
+ /* We only come here in unions */
+ field->query_id=thd->query_id;
+ table->used_fields++;
+ table->used_keys.intersect(field->part_of_key);
+ }
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (any_privileges)
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index ad1c9977e5b..903f4c953a2 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -1492,7 +1492,7 @@ int subselect_uniquesubquery_engine::exec()
}
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
+ table->file->ha_index_init(tab->ref.key, 0);
error= table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT);
@@ -1545,7 +1545,7 @@ int subselect_indexsubquery_engine::exec()
}
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
+ table->file->ha_index_init(tab->ref.key, 1);
error= table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT);
diff --git a/sql/key.cc b/sql/key.cc
index 4bd71d2fa47..f1e073a4775 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -429,3 +429,86 @@ int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length)
}
return 0; // Keys are equal
}
+
+
+/*
+ Compare two records in index order
+ SYNOPSIS
+ key_rec_cmp()
+ key Index information
+ rec0 Pointer to table->record[0]
+ first_rec Pointer to record compare with
+ second_rec Pointer to record compare against first_rec
+ DESCRIPTION
+ This method is set-up such that it can be called directly from the
+ priority queue and it is attempted to be optimised as much as possible
+ since this will be called O(N * log N) times while performing a merge
+ sort in various places in the code.
+
+ We retrieve the pointer to table->record[0] using the fact that key_parts
+ have an offset making it possible to calculate the start of the record.
+ We need to get the diff to the compared record since none of the records
+ being compared are stored in table->record[0].
+
+ We first check for NULL values, if there are no NULL values we use
+ a compare method that gets two field pointers and a max length
+ and return the result of the comparison.
+*/
+
+int key_rec_cmp(void *key, byte *first_rec, byte *second_rec)
+{
+ KEY *key_info= (KEY*)key;
+ uint key_parts= key_info->key_parts, i= 0;
+ KEY_PART_INFO *key_part= key_info->key_part;
+ char *rec0= key_part->field->ptr - key_part->offset;
+ my_ptrdiff_t first_diff= first_rec - rec0, sec_diff= second_rec - rec0;
+ int result= 0;
+ DBUG_ENTER("key_rec_cmp");
+
+ do
+ {
+ Field *field= key_part->field;
+ uint length;
+
+ if (key_part->null_bit)
+ {
+ /* The key_part can contain NULL values */
+ bool first_is_null= field->is_null(first_diff);
+ bool sec_is_null= field->is_null(sec_diff);
+ /*
+ NULL is smaller then everything so if first is NULL and the other
+ not then we know that we should return -1 and for the opposite
+ we should return +1. If both are NULL then we call it equality
+ although it is a strange form of equality, we have equally little
+ information of the real value.
+ */
+ if (!first_is_null)
+ {
+ if (!sec_is_null)
+ ; /* Fall through, no NULL fields */
+ else
+ {
+ DBUG_RETURN(+1);
+ }
+ }
+ else if (!sec_is_null)
+ {
+ DBUG_RETURN(-1);
+ }
+ else
+ goto next_loop; /* Both were NULL */
+ }
+ /*
+ No null values in the fields
+ We use the virtual method cmp_max with a max length parameter.
+ For most field types this translates into a cmp without
+ max length. The exceptions are the BLOB and VARCHAR field types
+ that take the max length into account.
+ */
+ result= field->cmp_max(field->ptr+first_diff, field->ptr+sec_diff,
+ key_part->length);
+next_loop:
+ key_part++;
+ } while (!result && ++i < key_parts);
+ DBUG_RETURN(result);
+}
diff --git a/sql/lex.h b/sql/lex.h
index 122e7040c80..c0e91527f45 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -274,11 +274,14 @@ static SYMBOL symbols[] = {
{ "LEAVE", SYM(LEAVE_SYM)},
{ "LEAVES", SYM(LEAVES)},
{ "LEFT", SYM(LEFT)},
+ { "LESS", SYM(LESS_SYM)},
{ "LEVEL", SYM(LEVEL_SYM)},
{ "LIKE", SYM(LIKE)},
{ "LIMIT", SYM(LIMIT)},
+ { "LINEAR", SYM(LINEAR_SYM)},
{ "LINES", SYM(LINES)},
{ "LINESTRING", SYM(LINESTRING)},
+ { "LIST", SYM(LIST_SYM)},
{ "LOAD", SYM(LOAD)},
{ "LOCAL", SYM(LOCAL_SYM)},
{ "LOCALTIME", SYM(NOW_SYM)},
@@ -312,6 +315,7 @@ static SYMBOL symbols[] = {
{ "MAX_ROWS", SYM(MAX_ROWS)},
{ "MAX_UPDATES_PER_HOUR", SYM(MAX_UPDATES_PER_HOUR)},
{ "MAX_USER_CONNECTIONS", SYM(MAX_USER_CONNECTIONS_SYM)},
+ { "MAXVALUE", SYM(MAX_VALUE_SYM)},
{ "MEDIUM", SYM(MEDIUM_SYM)},
{ "MEDIUMBLOB", SYM(MEDIUMBLOB)},
{ "MEDIUMINT", SYM(MEDIUMINT)},
@@ -343,6 +347,7 @@ static SYMBOL symbols[] = {
{ "NEW", SYM(NEW_SYM)},
{ "NEXT", SYM(NEXT_SYM)},
{ "NO", SYM(NO_SYM)},
+ { "NODEGROUP", SYM(NODEGROUP_SYM)},
{ "NONE", SYM(NONE_SYM)},
{ "NOT", SYM(NOT_SYM)},
{ "NO_WRITE_TO_BINLOG", SYM(NO_WRITE_TO_BINLOG)},
@@ -365,6 +370,10 @@ static SYMBOL symbols[] = {
{ "OUTFILE", SYM(OUTFILE)},
{ "PACK_KEYS", SYM(PACK_KEYS_SYM)},
{ "PARTIAL", SYM(PARTIAL)},
+#ifdef HAVE_PARTITION_DB
+ { "PARTITION", SYM(PARTITION_SYM)},
+#endif
+ { "PARTITIONS", SYM(PARTITIONS_SYM)},
{ "PASSWORD", SYM(PASSWORD)},
{ "PHASE", SYM(PHASE_SYM)},
{ "POINT", SYM(POINT_SYM)},
@@ -385,6 +394,7 @@ static SYMBOL symbols[] = {
{ "RAID_CHUNKS", SYM(RAID_CHUNKS)},
{ "RAID_CHUNKSIZE", SYM(RAID_CHUNKSIZE)},
{ "RAID_TYPE", SYM(RAID_TYPE)},
+ { "RANGE", SYM(RANGE_SYM)},
{ "READ", SYM(READ_SYM)},
{ "READS", SYM(READS_SYM)},
{ "REAL", SYM(REAL)},
@@ -476,6 +486,8 @@ static SYMBOL symbols[] = {
{ "STRING", SYM(STRING_SYM)},
{ "STRIPED", SYM(RAID_STRIPED_SYM)},
{ "SUBJECT", SYM(SUBJECT_SYM)},
+ { "SUBPARTITION", SYM(SUBPARTITION_SYM)},
+ { "SUBPARTITIONS", SYM(SUBPARTITIONS_SYM)},
{ "SUPER", SYM(SUPER_SYM)},
{ "SUSPEND", SYM(SUSPEND_SYM)},
{ "TABLE", SYM(TABLE_SYM)},
@@ -485,6 +497,7 @@ static SYMBOL symbols[] = {
{ "TEMPTABLE", SYM(TEMPTABLE_SYM)},
{ "TERMINATED", SYM(TERMINATED)},
{ "TEXT", SYM(TEXT_SYM)},
+ { "THAN", SYM(THAN_SYM)},
{ "THEN", SYM(THEN_SYM)},
{ "TIME", SYM(TIME_SYM)},
{ "TIMESTAMP", SYM(TIMESTAMP)},
diff --git a/sql/lock.cc b/sql/lock.cc
index aa162a23b40..dff863ccf56 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -72,7 +72,7 @@ TODO:
#ifndef MASTER
#include "../srclib/myisammrg/myrg_def.h"
#else
-#include "../myisammrg/myrg_def.h"
+#include "../storage/myisammrg/myrg_def.h"
#endif
static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table,uint count,
@@ -210,7 +210,6 @@ static int lock_external(THD *thd, TABLE **tables, uint count)
((*tables)->reginfo.lock_type >= TL_READ &&
(*tables)->reginfo.lock_type <= TL_READ_NO_INSERT))
lock_type=F_RDLCK;
-
if ((error=(*tables)->file->external_lock(thd,lock_type)))
{
print_lock_error(error, (*tables)->file->table_type());
diff --git a/sql/log.cc b/sql/log.cc
index 7b67a11ab53..1ef522588ff 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -24,6 +24,7 @@
#include "mysql_priv.h"
#include "sql_repl.h"
+#include "rpl_filter.h"
#include <my_dir.h>
#include <stdarg.h>
@@ -1577,10 +1578,11 @@ bool MYSQL_LOG::write(Log_event *event_info)
binlog_[wild_]{do|ignore}_table?" (WL#1049)"
*/
if ((thd && !(thd->options & OPTION_BIN_LOG)) ||
- (!db_ok(local_db, binlog_do_db, binlog_ignore_db)))
+ (!binlog_filter->db_ok(local_db)))
{
VOID(pthread_mutex_unlock(&LOCK_log));
- DBUG_PRINT("error",("!db_ok('%s')", local_db));
+ DBUG_PRINT("info",("db_ok('%s')==%d", local_db,
+ binlog_filter->db_ok(local_db)));
DBUG_RETURN(0);
}
#endif /* HAVE_REPLICATION */
diff --git a/sql/log_event.cc b/sql/log_event.cc
index bdf17ba20e3..29f1160466e 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -23,6 +23,7 @@
#include "mysql_priv.h"
#include "slave.h"
+#include "rpl_filter.h"
#include <my_dir.h>
#endif /* MYSQL_CLIENT */
@@ -1524,7 +1525,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, const char *query
*/
thd->catalog= catalog_len ? (char *) catalog : (char *)"";
thd->db_length= db_len;
- thd->db= (char*) rewrite_db(db, &thd->db_length);
+ thd->db= (char *) rpl_filter->get_rewrite_db(db, &thd->db_length);
thd->variables.auto_increment_increment= auto_increment_increment;
thd->variables.auto_increment_offset= auto_increment_offset;
@@ -1543,7 +1544,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, const char *query
clear_all_errors(thd, rli);
- if (db_ok(thd->db, replicate_do_db, replicate_ignore_db))
+ if (rpl_filter->db_ok(thd->db))
{
thd->set_time((time_t)when);
thd->query_length= q_len_arg;
@@ -2664,7 +2665,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
bool use_rli_only_for_errors)
{
thd->db_length= db_len;
- thd->db= (char*) rewrite_db(db, &thd->db_length);
+ thd->db= (char *) rpl_filter->get_rewrite_db(db, &thd->db_length);
DBUG_ASSERT(thd->query == 0);
thd->query_length= 0; // Should not be needed
thd->query_error= 0;
@@ -2693,7 +2694,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
al. Another way is do the filtering in the I/O thread (more efficient: no
disk writes at all).
*/
- if (db_ok(thd->db, replicate_do_db, replicate_ignore_db))
+ if (rpl_filter->db_ok(thd->db))
{
thd->set_time((time_t)when);
VOID(pthread_mutex_lock(&LOCK_thread_count));
@@ -2715,7 +2716,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
tables.updating= 1;
// the table will be opened in mysql_load
- if (table_rules_on && !tables_ok(thd, &tables))
+ if (rpl_filter->is_on() && !rpl_filter->tables_ok(thd->db, &tables))
{
// TODO: this is a bug - this needs to be moved to the I/O thread
if (net)
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 7bd8d76f25d..1cc1aa836a8 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -619,6 +619,18 @@ bool check_table_access(THD *thd, ulong want_access, TABLE_LIST *tables,
bool no_errors);
bool check_global_access(THD *thd, ulong want_access);
+/*
+ General routine to change field->ptr of a NULL-terminated array of Field
+ objects. Useful when needed to call val_int, val_str or similar and the
+ field data is not in table->record[0] but in some other structure.
+ set_key_field_ptr changes all fields of an index using a key_info object.
+ All methods presume that there is at least one field to change.
+*/
+
+void set_field_ptr(Field **ptr, const byte *new_buf, const byte *old_buf);
+void set_key_field_ptr(KEY *key_info, const byte *new_buf,
+ const byte *old_buf);
+
bool mysql_backup_table(THD* thd, TABLE_LIST* table_list);
bool mysql_restore_table(THD* thd, TABLE_LIST* table_list);
@@ -777,6 +789,9 @@ Field *
find_field_in_real_table(THD *thd, TABLE *table, const char *name,
uint length, bool check_grants, bool allow_rowid,
uint *cached_field_index_ptr);
+Field *
+find_field_in_table_sef(TABLE *table, const char *name);
+
#ifdef HAVE_OPENSSL
#include <openssl/des.h>
struct st_des_keyblock
@@ -908,10 +923,10 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
List<Item> *sum_func_list, uint wild_num);
bool setup_fields(THD *thd, Item** ref_pointer_array,
- List<Item> &item, bool set_query_id,
+ List<Item> &item, ulong set_query_id,
List<Item> *sum_func_list, bool allow_sum_func);
inline bool setup_fields_with_no_wrap(THD *thd, Item **ref_pointer_array,
- List<Item> &item, bool set_query_id,
+ List<Item> &item, ulong set_query_id,
List<Item> *sum_func_list,
bool allow_sum_func)
{
@@ -1023,6 +1038,7 @@ bool key_cmp_if_same(TABLE *form,const byte *key,uint index,uint key_length);
void key_unpack(String *to,TABLE *form,uint index);
bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields);
int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length);
+int key_rec_cmp(void *key_info, byte *a, byte *b);
bool init_errmessage(void);
void sql_perror(const char *message);
@@ -1180,7 +1196,6 @@ extern KNOWN_DATE_TIME_FORMAT known_date_time_formats[];
extern String null_string;
extern HASH open_cache;
extern TABLE *unused_tables;
-extern I_List<i_string> binlog_do_db, binlog_ignore_db;
extern const char* any_db;
extern struct my_option my_long_options[];
@@ -1196,6 +1211,7 @@ extern SHOW_COMP_OPTION have_query_cache;
extern SHOW_COMP_OPTION have_geometry, have_rtree_keys;
extern SHOW_COMP_OPTION have_crypt;
extern SHOW_COMP_OPTION have_compress;
+extern SHOW_COMP_OPTION have_partition_db;
#ifndef __WIN__
extern pthread_t signal_thread;
@@ -1246,7 +1262,7 @@ bool mysql_create_frm(THD *thd, my_string file_name,
uint key_count,KEY *key_info,handler *db_type);
int rea_create_table(THD *thd, my_string file_name,HA_CREATE_INFO *create_info,
List<create_field> &create_field,
- uint key_count,KEY *key_info);
+ uint key_count,KEY *key_info, handler *file);
int format_number(uint inputflag,uint max_length,my_string pos,uint length,
my_string *errpos);
int openfrm(THD *thd, const char *name,const char *alias,uint filestat,
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 798bd25fa7c..e4a44b41317 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -19,6 +19,7 @@
#include <my_dir.h>
#include "slave.h"
#include "sql_repl.h"
+#include "rpl_filter.h"
#include "repl_failsafe.h"
#include "stacktrace.h"
#include "mysqld_suffix.h"
@@ -324,6 +325,7 @@ my_bool opt_ndb_shm, opt_ndb_optimized_node_selection;
ulong opt_ndb_cache_check_time;
const char *opt_ndb_mgmd;
ulong opt_ndb_nodeid;
+bool opt_ndb_linear_hash;
#endif
my_bool opt_readonly, use_temp_pool, relay_log_purge;
my_bool opt_sync_frm, opt_allow_suspicious_udfs;
@@ -411,12 +413,10 @@ Le_creator le_creator;
FILE *bootstrap_file;
int bootstrap_error;
-I_List<i_string_pair> replicate_rewrite_db;
-I_List<i_string> replicate_do_db, replicate_ignore_db;
-// allow the user to tell us which db to replicate and which to ignore
-I_List<i_string> binlog_do_db, binlog_ignore_db;
I_List<THD> threads;
I_List<NAMED_LIST> key_caches;
+Rpl_filter* rpl_filter;
+Rpl_filter* binlog_filter;
struct system_variables global_system_variables;
struct system_variables max_system_variables;
@@ -431,6 +431,7 @@ CHARSET_INFO *national_charset_info, *table_alias_charset;
SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_ndbcluster,
have_example_db, have_archive_db, have_csv_db;
SHOW_COMP_OPTION have_federated_db;
+SHOW_COMP_OPTION have_partition_db;
SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache;
SHOW_COMP_OPTION have_geometry, have_rtree_keys;
SHOW_COMP_OPTION have_crypt, have_compress;
@@ -1039,12 +1040,9 @@ void clean_up(bool print_message)
free_max_user_conn();
#ifdef HAVE_REPLICATION
end_slave_list();
- free_list(&replicate_do_db);
- free_list(&replicate_ignore_db);
- free_list(&binlog_do_db);
- free_list(&binlog_ignore_db);
- free_list(&replicate_rewrite_db);
#endif
+ delete binlog_filter;
+ delete rpl_filter;
#ifdef HAVE_OPENSSL
if (ssl_acceptor_fd)
my_free((gptr) ssl_acceptor_fd, MYF(MY_ALLOW_ZERO_PTR));
@@ -3010,6 +3008,15 @@ int main(int argc, char **argv)
#endif
{
DEBUGGER_OFF;
+
+ rpl_filter= new Rpl_filter;
+ binlog_filter= new Rpl_filter;
+ if (!rpl_filter || !binlog_filter)
+ {
+ sql_perror("Could not allocate replication and binlog filters");
+ exit(1);
+ }
+
MY_INIT(argv[0]); // init my_sys library & pthreads
#ifdef _CUSTOMSTARTUPCONFIG_
@@ -3391,7 +3398,6 @@ default_service_handling(char **argv,
int main(int argc, char **argv)
{
-
/*
When several instances are running on the same machine, we
need to have an unique named hEventShudown through the
@@ -4248,6 +4254,7 @@ enum options_mysqld
OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ,
OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME,
OPT_NDB_MGMD, OPT_NDB_NODEID,
+ OPT_NDB_LINEAR_HASH,
OPT_SKIP_SAFEMALLOC,
OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE,
OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
@@ -4814,6 +4821,16 @@ Disable with --skip-ndbcluster (will save memory).",
(gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz,
(gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz,
0, GET_ULONG, REQUIRED_ARG, 32, 1, 256, 0, 0, 0},
+ {"ndb-use-linear-hash", OPT_NDB_LINEAR_HASH,
+ "Flag to indicate whether to use linear hash for default in new tables",
+ (gptr*) &opt_ndb_linear_hash,
+ (gptr*) &opt_ndb_linear_hash,
+ 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
+ {"ndb_use_linear_hash", OPT_NDB_LINEAR_HASH,
+ "Flag to indicate whether to use linear hash for default in new tables",
+ (gptr*) &opt_ndb_linear_hash,
+ (gptr*) &opt_ndb_linear_hash,
+ 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
{"ndb-force-send", OPT_NDB_FORCE_SEND,
"Force send of buffers to ndb immediately without waiting for "
"other threads.",
@@ -6020,13 +6037,6 @@ static void mysql_init_variables(void)
exit(1);
multi_keycache_init(); /* set key_cache_hash.default_value = dflt_key_cache */
- /* Initialize structures that is used when processing options */
- replicate_rewrite_db.empty();
- replicate_do_db.empty();
- replicate_ignore_db.empty();
- binlog_do_db.empty();
- binlog_ignore_db.empty();
-
/* Set directory paths */
strmake(language, LANGUAGE, sizeof(language)-1);
strmake(mysql_real_data_home, get_relative_path(DATADIR),
@@ -6082,6 +6092,11 @@ static void mysql_init_variables(void)
#else
have_example_db= SHOW_OPTION_NO;
#endif
+#ifdef HAVE_PARTITION_DB
+ have_partition_db= SHOW_OPTION_YES;
+#else
+ have_partition_db= SHOW_OPTION_NO;
+#endif
#ifdef HAVE_ARCHIVE_DB
have_archive_db= SHOW_OPTION_YES;
#else
@@ -6272,14 +6287,12 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
}
case (int)OPT_REPLICATE_IGNORE_DB:
{
- i_string *db = new i_string(argument);
- replicate_ignore_db.push_back(db);
+ rpl_filter->add_ignore_db(argument);
break;
}
case (int)OPT_REPLICATE_DO_DB:
{
- i_string *db = new i_string(argument);
- replicate_do_db.push_back(db);
+ rpl_filter->add_do_db(argument);
break;
}
case (int)OPT_REPLICATE_REWRITE_DB:
@@ -6312,71 +6325,54 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
exit(1);
}
- i_string_pair *db_pair = new i_string_pair(key, val);
- replicate_rewrite_db.push_back(db_pair);
+ rpl_filter->add_db_rewrite(key, val);
break;
}
case (int)OPT_BINLOG_IGNORE_DB:
{
- i_string *db = new i_string(argument);
- binlog_ignore_db.push_back(db);
+ binlog_filter->add_ignore_db(argument);
break;
}
case (int)OPT_BINLOG_DO_DB:
{
- i_string *db = new i_string(argument);
- binlog_do_db.push_back(db);
+ binlog_filter->add_do_db(argument);
break;
}
case (int)OPT_REPLICATE_DO_TABLE:
{
- if (!do_table_inited)
- init_table_rule_hash(&replicate_do_table, &do_table_inited);
- if (add_table_rule(&replicate_do_table, argument))
+ if (rpl_filter->add_do_table(argument))
{
fprintf(stderr, "Could not add do table rule '%s'!\n", argument);
exit(1);
}
- table_rules_on = 1;
break;
}
case (int)OPT_REPLICATE_WILD_DO_TABLE:
{
- if (!wild_do_table_inited)
- init_table_rule_array(&replicate_wild_do_table,
- &wild_do_table_inited);
- if (add_wild_table_rule(&replicate_wild_do_table, argument))
+ if (rpl_filter->add_wild_do_table(argument))
{
fprintf(stderr, "Could not add do table rule '%s'!\n", argument);
exit(1);
}
- table_rules_on = 1;
break;
}
case (int)OPT_REPLICATE_WILD_IGNORE_TABLE:
{
- if (!wild_ignore_table_inited)
- init_table_rule_array(&replicate_wild_ignore_table,
- &wild_ignore_table_inited);
- if (add_wild_table_rule(&replicate_wild_ignore_table, argument))
+ if (rpl_filter->add_wild_ignore_table(argument))
{
fprintf(stderr, "Could not add ignore table rule '%s'!\n", argument);
exit(1);
}
- table_rules_on = 1;
break;
}
case (int)OPT_REPLICATE_IGNORE_TABLE:
{
- if (!ignore_table_inited)
- init_table_rule_hash(&replicate_ignore_table, &ignore_table_inited);
- if (add_table_rule(&replicate_ignore_table, argument))
+ if (rpl_filter->add_ignore_table(argument))
{
fprintf(stderr, "Could not add ignore table rule '%s'!\n", argument);
exit(1);
}
- table_rules_on = 1;
break;
}
#endif /* HAVE_REPLICATION */
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index c1ebfe105b6..25fcdfc51fd 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -751,7 +751,7 @@ int QUICK_RANGE_SELECT::init()
DBUG_ENTER("QUICK_RANGE_SELECT::init");
if (file->inited == handler::NONE)
- DBUG_RETURN(error= file->ha_index_init(index));
+ DBUG_RETURN(error= file->ha_index_init(index, 1));
error= 0;
DBUG_RETURN(0);
}
@@ -778,9 +778,10 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
{
DBUG_PRINT("info", ("Freeing separate handler %p (free=%d)", file,
free_file));
- file->reset();
+ file->ha_reset();
file->external_lock(current_thd, F_UNLCK);
file->close();
+ delete file;
}
}
delete_dynamic(&ranges); /* ranges are allocated in alloc */
@@ -916,7 +917,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
{
DBUG_PRINT("info", ("Reusing handler %p", file));
if (file->extra(HA_EXTRA_KEYREAD) ||
- file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) ||
+ file->ha_retrieve_all_pk() ||
init() || reset())
{
DBUG_RETURN(1);
@@ -944,7 +945,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
goto failure;
if (file->extra(HA_EXTRA_KEYREAD) ||
- file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) ||
+ file->ha_retrieve_all_pk() ||
init() || reset())
{
file->external_lock(thd, F_UNLCK);
@@ -956,6 +957,8 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
DBUG_RETURN(0);
failure:
+ if (file)
+ delete file;
file= save_file;
DBUG_RETURN(1);
}
@@ -1562,9 +1565,10 @@ static int fill_used_fields_bitmap(PARAM *param)
{
TABLE *table= param->table;
param->fields_bitmap_size= (table->s->fields/8 + 1);
- uchar *tmp;
+ uint32 *tmp;
uint pk;
- if (!(tmp= (uchar*)alloc_root(param->mem_root,param->fields_bitmap_size)) ||
+ if (!(tmp= (uint32*)alloc_root(param->mem_root,
+ bytes_word_aligned(param->fields_bitmap_size))) ||
bitmap_init(&param->needed_fields, tmp, param->fields_bitmap_size*8,
FALSE))
return 1;
@@ -2307,7 +2311,7 @@ static
ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
{
ROR_SCAN_INFO *ror_scan;
- uchar *bitmap_buf;
+ uint32 *bitmap_buf;
uint keynr;
DBUG_ENTER("make_ror_scan");
@@ -2322,8 +2326,8 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
ror_scan->sel_arg= sel_arg;
ror_scan->records= param->table->quick_rows[keynr];
- if (!(bitmap_buf= (uchar*)alloc_root(param->mem_root,
- param->fields_bitmap_size)))
+ if (!(bitmap_buf= (uint32*)alloc_root(param->mem_root,
+ bytes_word_aligned(param->fields_bitmap_size))))
DBUG_RETURN(NULL);
if (bitmap_init(&ror_scan->covered_fields, bitmap_buf,
@@ -2437,12 +2441,13 @@ static
ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param)
{
ROR_INTERSECT_INFO *info;
- uchar* buf;
+ uint32* buf;
if (!(info= (ROR_INTERSECT_INFO*)alloc_root(param->mem_root,
sizeof(ROR_INTERSECT_INFO))))
return NULL;
info->param= param;
- if (!(buf= (uchar*)alloc_root(param->mem_root, param->fields_bitmap_size)))
+ if (!(buf= (uint32*)alloc_root(param->mem_root,
+ bytes_word_aligned(param->fields_bitmap_size))))
return NULL;
if (bitmap_init(&info->covered_fields, buf, param->fields_bitmap_size*8,
FALSE))
@@ -2459,7 +2464,7 @@ void ror_intersect_cpy(ROR_INTERSECT_INFO *dst, const ROR_INTERSECT_INFO *src)
{
dst->param= src->param;
memcpy(dst->covered_fields.bitmap, src->covered_fields.bitmap,
- src->covered_fields.bitmap_size);
+ no_bytes_in_map(&src->covered_fields));
dst->out_rows= src->out_rows;
dst->is_covering= src->is_covering;
dst->index_records= src->index_records;
@@ -3001,9 +3006,9 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
/*I=set of all covering indexes */
ror_scan_mark= tree->ror_scans;
- uchar buf[MAX_KEY/8+1];
+ uint32 int_buf[MAX_KEY/32+1];
MY_BITMAP covered_fields;
- if (bitmap_init(&covered_fields, buf, nbits, FALSE))
+ if (bitmap_init(&covered_fields, int_buf, nbits, FALSE))
DBUG_RETURN(0);
bitmap_clear_all(&covered_fields);
@@ -5776,7 +5781,7 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
(This also creates a deficiency - it is possible that we will retrieve
parts of key that are not used by current query at all.)
*/
- if (head->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY))
+ if (head->file->ha_retrieve_all_pk())
DBUG_RETURN(1);
cur_quick_it.rewind();
@@ -6045,7 +6050,7 @@ int QUICK_RANGE_SELECT::reset()
range= NULL;
cur_range= (QUICK_RANGE**) ranges.buffer;
- if (file->inited == handler::NONE && (error= file->ha_index_init(index)))
+ if (file->inited == handler::NONE && (error= file->ha_index_init(index,1)))
DBUG_RETURN(error);
/* Do not allocate the buffers twice. */
@@ -6304,7 +6309,7 @@ int QUICK_RANGE_SELECT_GEOM::get_next()
(byte*) range->min_key,
range->min_length,
(ha_rkey_function)(range->flag ^ GEOM_FLAG));
- if (result != HA_ERR_KEY_NOT_FOUND)
+ if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
range=0; // Not found, to next range
}
@@ -6447,7 +6452,7 @@ int QUICK_SELECT_DESC::get_next()
}
if (result)
{
- if (result != HA_ERR_KEY_NOT_FOUND)
+ if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
range=0; // Not found, to next range
continue;
@@ -8079,7 +8084,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::reset(void)
DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::reset");
file->extra(HA_EXTRA_KEYREAD); /* We need only the key attributes */
- result= file->ha_index_init(index);
+ result= file->ha_index_init(index, 1);
result= file->index_last(record);
if (result == HA_ERR_END_OF_FILE)
DBUG_RETURN(0);
@@ -8155,7 +8160,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
DBUG_ASSERT(is_last_prefix <= 0);
if (result == HA_ERR_KEY_NOT_FOUND)
continue;
- else if (result)
+ if (result)
break;
if (have_min)
@@ -8185,10 +8190,11 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
HA_READ_KEY_EXACT);
result= have_min ? min_res : have_max ? max_res : result;
- }
- while (result == HA_ERR_KEY_NOT_FOUND && is_last_prefix != 0);
+ } while ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
+ is_last_prefix != 0);
if (result == 0)
+ {
/*
Partially mimic the behavior of end_select_send. Copy the
field data from Item_field::field into Item_field::result_field
@@ -8196,6 +8202,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
other fields in non-ANSI SQL mode).
*/
copy_fields(&join->tmp_table_param);
+ }
else if (result == HA_ERR_KEY_NOT_FOUND)
result= HA_ERR_END_OF_FILE;
@@ -8222,6 +8229,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
RETURN
0 on success
HA_ERR_KEY_NOT_FOUND if no MIN key was found that fulfills all conditions.
+ HA_ERR_END_OF_FILE - "" -
other if some error occurred
*/
@@ -8275,7 +8283,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
if (key_cmp(index_info->key_part, group_prefix, real_prefix_len))
key_restore(record, tmp_record, index_info, 0);
}
- else if (result == HA_ERR_KEY_NOT_FOUND)
+ else if (result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE)
result= 0; /* There is a result in any case. */
}
}
@@ -8300,6 +8308,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
RETURN
0 on success
HA_ERR_KEY_NOT_FOUND if no MAX key was found that fulfills all conditions.
+ HA_ERR_END_OF_FILE - "" -
other if some error occurred
*/
@@ -8400,6 +8409,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix()
0 on success
HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of
the ranges
+ HA_ERR_END_OF_FILE - "" -
other if some error
*/
@@ -8444,11 +8454,12 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
result= file->index_read(record, group_prefix, search_prefix_len,
find_flag);
- if ((result == HA_ERR_KEY_NOT_FOUND) &&
- (cur_range->flag & (EQ_RANGE | NULL_RANGE)))
- continue; /* Check the next range. */
- else if (result)
+ if (result)
{
+ if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
+ (cur_range->flag & (EQ_RANGE | NULL_RANGE)))
+ continue; /* Check the next range. */
+
/*
In all other cases (HA_ERR_*, HA_READ_KEY_EXACT with NO_MIN_RANGE,
HA_READ_AFTER_KEY, HA_READ_KEY_OR_NEXT) if the lookup failed for this
@@ -8475,7 +8486,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
/* Check if record belongs to the current group. */
if (key_cmp(index_info->key_part, group_prefix, real_prefix_len))
{
- result = HA_ERR_KEY_NOT_FOUND;
+ result= HA_ERR_KEY_NOT_FOUND;
continue;
}
@@ -8493,7 +8504,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
if (!((cur_range->flag & NEAR_MAX) && (cmp_res == -1) ||
(cmp_res <= 0)))
{
- result = HA_ERR_KEY_NOT_FOUND;
+ result= HA_ERR_KEY_NOT_FOUND;
continue;
}
}
@@ -8532,6 +8543,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
0 on success
HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of
the ranges
+ HA_ERR_END_OF_FILE - "" -
other if some error
*/
@@ -8577,10 +8589,12 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range()
result= file->index_read(record, group_prefix, search_prefix_len,
find_flag);
- if ((result == HA_ERR_KEY_NOT_FOUND) && (cur_range->flag & EQ_RANGE))
- continue; /* Check the next range. */
if (result)
{
+ if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
+ (cur_range->flag & EQ_RANGE))
+ continue; /* Check the next range. */
+
/*
In no key was found with this upper bound, there certainly are no keys
in the ranges to the left.
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index 33c8eadc065..9802bbddde6 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -181,7 +181,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
const_result= 0;
break;
}
- error= table->file->ha_index_init((uint) ref.key);
+ error= table->file->ha_index_init((uint) ref.key, 1);
if (!ref.key_length)
error= table->file->index_first(table->record[0]);
@@ -253,7 +253,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
const_result= 0;
break;
}
- error= table->file->ha_index_init((uint) ref.key);
+ error= table->file->ha_index_init((uint) ref.key, 1);
if (!ref.key_length)
error= table->file->index_last(table->record[0]);
diff --git a/sql/records.cc b/sql/records.cc
index 9b05dc3e291..b3610cf1bbf 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -31,6 +31,74 @@ static int rr_cmp(uchar *a,uchar *b);
/* init struct for read with info->read_record */
+/*
+ init_read_record is used to scan by using a number of different methods.
+ Which method to use is set-up in this call so that later calls to
+ the info->read_record will call the appropriate method using a function
+ pointer.
+
+ There are five methods that relate completely to the sort function
+ filesort. The result of a filesort is retrieved using read_record
+ calls. The other two methods are used for normal table access.
+
+ The filesort will produce references to the records sorted, these
+ references can be stored in memory or in a temporary file.
+
+ The temporary file is normally used when the references doesn't fit into
+ a properly sized memory buffer. For most small queries the references
+ are stored in the memory buffer.
+
+ The temporary file is also used when performing an update where a key is
+ modified.
+
+ Methods used when ref's are in memory (using rr_from_pointers):
+ rr_unpack_from_buffer:
+ ----------------------
+ This method is used when table->sort.addon_field is allocated.
+ This is allocated for most SELECT queries not involving any BLOB's.
+ In this case the records are fetched from a memory buffer.
+ rr_from_pointers:
+ -----------------
+ Used when the above is not true, UPDATE, DELETE and so forth and
+ SELECT's involving BLOB's. It is also used when the addon_field
+ buffer is not allocated due to that its size was bigger than the
+ session variable max_length_for_sort_data.
+ In this case the record data is fetched from the handler using the
+ saved reference using the rnd_pos handler call.
+
+ Methods used when ref's are in a temporary file (using rr_from_tempfile)
+ rr_unpack_from_tempfile:
+ ------------------------
+ Same as rr_unpack_from_buffer except that references are fetched from
+ temporary file. Should obviously not really happen other than in
+ strange configurations.
+
+ rr_from_tempfile:
+ -----------------
+ Same as rr_from_pointers except that references are fetched from
+ temporary file instead of from
+ rr_from_cache:
+ --------------
+ This is a special variant of rr_from_tempfile that can be used for
+ handlers that is not using the HA_FAST_KEY_READ table flag. Instead
+ of reading the references one by one from the temporary file it reads
+ a set of them, sorts them and reads all of them into a buffer which
+ is then used for a number of subsequent calls to rr_from_cache.
+ It is only used for SELECT queries and a number of other conditions
+ on table size.
+
+ All other accesses use either index access methods (rr_quick) or a full
+ table scan (rr_sequential).
+ rr_quick:
+ ---------
+ rr_quick uses one of the QUICK_SELECT classes in opt_range.cc to
+ perform an index scan. There are loads of functionality hidden
+ in these quick classes. It handles all index scans of various kinds.
+ rr_sequential:
+ --------------
+ This is the most basic access method of a table using rnd_init,
+ rnd_next and rnd_end. No indexes are used.
+*/
void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
SQL_SELECT *select,
int use_record_cache, bool print_error)
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index 0b6e44c0272..855520fb2e4 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -20,6 +20,7 @@
#include "repl_failsafe.h"
#include "sql_repl.h"
#include "slave.h"
+#include "rpl_filter.h"
#include "log_event.h"
#include <mysql.h>
@@ -735,14 +736,14 @@ static int fetch_db_tables(THD *thd, MYSQL *mysql, const char *db,
TABLE_LIST table;
const char* table_name= row[0];
int error;
- if (table_rules_on)
+ if (rpl_filter->is_on())
{
bzero((char*) &table, sizeof(table)); //just for safe
table.db= (char*) db;
table.table_name= (char*) table_name;
table.updating= 1;
- if (!tables_ok(thd, &table))
+ if (!rpl_filter->tables_ok(thd->db, &table))
continue;
}
/* download master's table and overwrite slave's table */
@@ -860,8 +861,8 @@ bool load_master_data(THD* thd)
data from master
*/
- if (!db_ok(db, replicate_do_db, replicate_ignore_db) ||
- !db_ok_with_wild_table(db) ||
+ if (!rpl_filter->db_ok(db) ||
+ !rpl_filter->db_ok_with_wild_table(db) ||
!strcmp(db,"mysql"))
{
*cur_table_res = 0;
diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc
new file mode 100644
index 00000000000..f9f8a3e98a7
--- /dev/null
+++ b/sql/rpl_filter.cc
@@ -0,0 +1,539 @@
+/* Copyright (C) 2000-2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "mysql_priv.h"
+#include "rpl_filter.h"
+
+#define TABLE_RULE_HASH_SIZE 16
+#define TABLE_RULE_ARR_SIZE 16
+
+Rpl_filter::Rpl_filter() :
+ table_rules_on(0), do_table_inited(0), ignore_table_inited(0),
+ wild_do_table_inited(0), wild_ignore_table_inited(0)
+{
+ do_db.empty();
+ ignore_db.empty();
+ rewrite_db.empty();
+}
+
+
+Rpl_filter::~Rpl_filter()
+{
+ if (do_table_inited)
+ hash_free(&do_table);
+ if (ignore_table_inited)
+ hash_free(&ignore_table);
+ if (wild_do_table_inited)
+ free_string_array(&wild_do_table);
+ if (wild_ignore_table_inited)
+ free_string_array(&wild_ignore_table);
+ free_list(&do_db);
+ free_list(&ignore_db);
+ free_list(&rewrite_db);
+}
+
+
+/*
+ Returns true if table should be logged/replicated
+
+ SYNOPSIS
+ tables_ok()
+ db db to use if db in TABLE_LIST is undefined for a table
+ tables list of tables to check
+
+ NOTES
+ Changing table order in the list can lead to different results.
+
+ Note also order of precedence of do/ignore rules (see code). For
+ that reason, users should not set conflicting rules because they
+ may get unpredicted results (precedence order is explained in the
+ manual).
+
+ If no table in the list is marked "updating", then we always
+ return 0, because there is no reason to execute this statement on
+ slave if it updates nothing. (Currently, this can only happen if
+ statement is a multi-delete (SQLCOM_DELETE_MULTI) and "tables" are
+ the tables in the FROM):
+
+ In the case of SQLCOM_DELETE_MULTI, there will be a second call to
+ tables_ok(), with tables having "updating==TRUE" (those after the
+ DELETE), so this second call will make the decision (because
+ all_tables_not_ok() = !tables_ok(1st_list) &&
+ !tables_ok(2nd_list)).
+
+ TODO
+ "Include all tables like "abc.%" except "%.EFG"". (Can't be done now.)
+ If we supported Perl regexps, we could do it with pattern: /^abc\.(?!EFG)/
+ (I could not find an equivalent in the regex library MySQL uses).
+
+ RETURN VALUES
+ 0 should not be logged/replicated
+ 1 should be logged/replicated
+*/
+
+bool
+Rpl_filter::tables_ok(const char* db, TABLE_LIST* tables)
+{
+ bool some_tables_updating= 0;
+ DBUG_ENTER("Rpl_filter::tables_ok");
+
+ for (; tables; tables= tables->next_global)
+ {
+ char hash_key[2*NAME_LEN+2];
+ char *end;
+ uint len;
+
+ if (!tables->updating)
+ continue;
+ some_tables_updating= 1;
+ end= strmov(hash_key, tables->db ? tables->db : db);
+ *end++= '.';
+ len= (uint) (strmov(end, tables->table_name) - hash_key);
+ if (do_table_inited) // if there are any do's
+ {
+ if (hash_search(&do_table, (byte*) hash_key, len))
+ DBUG_RETURN(1);
+ }
+ if (ignore_table_inited) // if there are any ignores
+ {
+ if (hash_search(&ignore_table, (byte*) hash_key, len))
+ DBUG_RETURN(0);
+ }
+ if (wild_do_table_inited &&
+ find_wild(&wild_do_table, hash_key, len))
+ DBUG_RETURN(1);
+ if (wild_ignore_table_inited &&
+ find_wild(&wild_ignore_table, hash_key, len))
+ DBUG_RETURN(0);
+ }
+
+ /*
+ If no table was to be updated, ignore statement (no reason we play it on
+ slave, slave is supposed to replicate _changes_ only).
+ If no explicit rule found and there was a do list, do not replicate.
+ If there was no do list, go ahead
+ */
+ DBUG_RETURN(some_tables_updating &&
+ !do_table_inited && !wild_do_table_inited);
+}
+
+
+/*
+ Checks whether a db matches some do_db and ignore_db rules
+
+ SYNOPSIS
+ db_ok()
+ db name of the db to check
+
+ RETURN VALUES
+ 0 should not be logged/replicated
+ 1 should be logged/replicated
+*/
+
+bool
+Rpl_filter::db_ok(const char* db)
+{
+ DBUG_ENTER("Rpl_filter::db_ok");
+
+ if (do_db.is_empty() && ignore_db.is_empty())
+ DBUG_RETURN(1); // Ok to replicate if the user puts no constraints
+
+ /*
+ If the user has specified restrictions on which databases to replicate
+ and db was not selected, do not replicate.
+ */
+ if (!db)
+ DBUG_RETURN(0);
+
+ if (!do_db.is_empty()) // if the do's are not empty
+ {
+ I_List_iterator<i_string> it(do_db);
+ i_string* tmp;
+
+ while ((tmp=it++))
+ {
+ if (!strcmp(tmp->ptr, db))
+ DBUG_RETURN(1); // match
+ }
+ DBUG_RETURN(0);
+ }
+ else // there are some elements in the don't, otherwise we cannot get here
+ {
+ I_List_iterator<i_string> it(ignore_db);
+ i_string* tmp;
+
+ while ((tmp=it++))
+ {
+ if (!strcmp(tmp->ptr, db))
+ DBUG_RETURN(0); // match
+ }
+ DBUG_RETURN(1);
+ }
+}
+
+
+/*
+ Checks whether a db matches wild_do_table and wild_ignore_table
+ rules (for replication)
+
+ SYNOPSIS
+ db_ok_with_wild_table()
+ db name of the db to check.
+ Is tested with check_db_name() before calling this function.
+
+ NOTES
+ Here is the reason for this function.
+ We advise users who want to exclude a database 'db1' safely to do it
+ with replicate_wild_ignore_table='db1.%' instead of binlog_ignore_db or
+ replicate_ignore_db because the two lasts only check for the selected db,
+ which won't work in that case:
+ USE db2;
+ UPDATE db1.t SET ... #this will be replicated and should not
+ whereas replicate_wild_ignore_table will work in all cases.
+ With replicate_wild_ignore_table, we only check tables. When
+ one does 'DROP DATABASE db1', tables are not involved and the
+ statement will be replicated, while users could expect it would not (as it
+ rougly means 'DROP db1.first_table, DROP db1.second_table...').
+ In other words, we want to interpret 'db1.%' as "everything touching db1".
+ That is why we want to match 'db1' against 'db1.%' wild table rules.
+
+ RETURN VALUES
+ 0 should not be logged/replicated
+ 1 should be logged/replicated
+*/
+
+bool
+Rpl_filter::db_ok_with_wild_table(const char *db)
+{
+ DBUG_ENTER("Rpl_filter::db_ok_with_wild_table");
+
+ char hash_key[NAME_LEN+2];
+ char *end;
+ int len;
+ end= strmov(hash_key, db);
+ *end++= '.';
+ len= end - hash_key ;
+ if (wild_do_table_inited && find_wild(&wild_do_table, hash_key, len))
+ {
+ DBUG_PRINT("return",("1"));
+ DBUG_RETURN(1);
+ }
+ if (wild_ignore_table_inited && find_wild(&wild_ignore_table, hash_key, len))
+ {
+ DBUG_PRINT("return",("0"));
+ DBUG_RETURN(0);
+ }
+
+ /*
+ If no explicit rule found and there was a do list, do not replicate.
+ If there was no do list, go ahead
+ */
+ DBUG_PRINT("return",("db=%s,retval=%d", db, !wild_do_table_inited));
+ DBUG_RETURN(!wild_do_table_inited);
+}
+
+
+bool
+Rpl_filter::is_on()
+{
+ return table_rules_on;
+}
+
+
+int
+Rpl_filter::add_do_table(const char* table_spec)
+{
+ DBUG_ENTER("Rpl_filter::add_do_table");
+ if (!do_table_inited)
+ init_table_rule_hash(&do_table, &do_table_inited);
+ table_rules_on= 1;
+ DBUG_RETURN(add_table_rule(&do_table, table_spec));
+}
+
+
+int
+Rpl_filter::add_ignore_table(const char* table_spec)
+{
+ DBUG_ENTER("Rpl_filter::add_ignore_table");
+ if (!ignore_table_inited)
+ init_table_rule_hash(&ignore_table, &ignore_table_inited);
+ table_rules_on= 1;
+ DBUG_RETURN(add_table_rule(&ignore_table, table_spec));
+}
+
+
+int
+Rpl_filter::add_wild_do_table(const char* table_spec)
+{
+ DBUG_ENTER("Rpl_filter::add_wild_do_table");
+ if (!wild_do_table_inited)
+ init_table_rule_array(&wild_do_table, &wild_do_table_inited);
+ table_rules_on= 1;
+ DBUG_RETURN(add_wild_table_rule(&wild_do_table, table_spec));
+}
+
+
+int
+Rpl_filter::add_wild_ignore_table(const char* table_spec)
+{
+ DBUG_ENTER("Rpl_filter::add_wild_ignore_table");
+ if (!wild_ignore_table_inited)
+ init_table_rule_array(&wild_ignore_table, &wild_ignore_table_inited);
+ table_rules_on= 1;
+ DBUG_RETURN(add_wild_table_rule(&wild_ignore_table, table_spec));
+}
+
+
+void
+Rpl_filter::add_db_rewrite(const char* from_db, const char* to_db)
+{
+ i_string_pair *db_pair = new i_string_pair(from_db, to_db);
+ rewrite_db.push_back(db_pair);
+}
+
+
+int
+Rpl_filter::add_table_rule(HASH* h, const char* table_spec)
+{
+ const char* dot = strchr(table_spec, '.');
+ if (!dot) return 1;
+ // len is always > 0 because we know the there exists a '.'
+ uint len = (uint)strlen(table_spec);
+ TABLE_RULE_ENT* e = (TABLE_RULE_ENT*)my_malloc(sizeof(TABLE_RULE_ENT)
+ + len, MYF(MY_WME));
+ if (!e) return 1;
+ e->db= (char*)e + sizeof(TABLE_RULE_ENT);
+ e->tbl_name= e->db + (dot - table_spec) + 1;
+ e->key_len= len;
+ memcpy(e->db, table_spec, len);
+
+ return my_hash_insert(h, (byte*)e);
+}
+
+
+/*
+ Add table expression with wildcards to dynamic array
+*/
+
+int
+Rpl_filter::add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec)
+{
+ const char* dot = strchr(table_spec, '.');
+ if (!dot) return 1;
+ uint len = (uint)strlen(table_spec);
+ TABLE_RULE_ENT* e = (TABLE_RULE_ENT*)my_malloc(sizeof(TABLE_RULE_ENT)
+ + len, MYF(MY_WME));
+ if (!e) return 1;
+ e->db= (char*)e + sizeof(TABLE_RULE_ENT);
+ e->tbl_name= e->db + (dot - table_spec) + 1;
+ e->key_len= len;
+ memcpy(e->db, table_spec, len);
+ insert_dynamic(a, (gptr)&e);
+ return 0;
+}
+
+
+void
+Rpl_filter::add_do_db(const char* table_spec)
+{
+ DBUG_ENTER("Rpl_filter::add_do_db");
+ i_string *db = new i_string(table_spec);
+ do_db.push_back(db);
+}
+
+
+void
+Rpl_filter::add_ignore_db(const char* table_spec)
+{
+ DBUG_ENTER("Rpl_filter::add_ignore_db");
+ i_string *db = new i_string(table_spec);
+ ignore_db.push_back(db);
+}
+
+
+static byte* get_table_key(const byte* a, uint* len,
+ my_bool __attribute__((unused)))
+{
+ TABLE_RULE_ENT *e= (TABLE_RULE_ENT *) a;
+
+ *len= e->key_len;
+ return (byte*)e->db;
+}
+
+
+static void free_table_ent(void* a)
+{
+ TABLE_RULE_ENT *e= (TABLE_RULE_ENT *) a;
+
+ my_free((gptr) e, MYF(0));
+}
+
+
+void
+Rpl_filter::init_table_rule_hash(HASH* h, bool* h_inited)
+{
+ hash_init(h, system_charset_info,TABLE_RULE_HASH_SIZE,0,0,
+ get_table_key, free_table_ent, 0);
+ *h_inited = 1;
+}
+
+
+void
+Rpl_filter::init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited)
+{
+ my_init_dynamic_array(a, sizeof(TABLE_RULE_ENT*), TABLE_RULE_ARR_SIZE,
+ TABLE_RULE_ARR_SIZE);
+ *a_inited = 1;
+}
+
+
+TABLE_RULE_ENT*
+Rpl_filter::find_wild(DYNAMIC_ARRAY *a, const char* key, int len)
+{
+ uint i;
+ const char* key_end= key + len;
+
+ for (i= 0; i < a->elements; i++)
+ {
+ TABLE_RULE_ENT* e ;
+ get_dynamic(a, (gptr)&e, i);
+ if (!my_wildcmp(system_charset_info, key, key_end,
+ (const char*)e->db,
+ (const char*)(e->db + e->key_len),
+ '\\',wild_one,wild_many))
+ return e;
+ }
+
+ return 0;
+}
+
+
+void
+Rpl_filter::free_string_array(DYNAMIC_ARRAY *a)
+{
+ uint i;
+ for (i= 0; i < a->elements; i++)
+ {
+ char* p;
+ get_dynamic(a, (gptr) &p, i);
+ my_free(p, MYF(MY_WME));
+ }
+ delete_dynamic(a);
+}
+
+
+/*
+ Builds a String from a HASH of TABLE_RULE_ENT. Cannot be used for any other
+ hash, as it assumes that the hash entries are TABLE_RULE_ENT.
+
+ SYNOPSIS
+ table_rule_ent_hash_to_str()
+ s pointer to the String to fill
+ h pointer to the HASH to read
+
+ RETURN VALUES
+ none
+*/
+
+void
+Rpl_filter::table_rule_ent_hash_to_str(String* s, HASH* h)
+{
+ s->length(0);
+ for (uint i= 0; i < h->records; i++)
+ {
+ TABLE_RULE_ENT* e= (TABLE_RULE_ENT*) hash_element(h, i);
+ if (s->length())
+ s->append(',');
+ s->append(e->db,e->key_len);
+ }
+}
+
+
+void
+Rpl_filter::table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a)
+{
+ s->length(0);
+ for (uint i= 0; i < a->elements; i++)
+ {
+ TABLE_RULE_ENT* e;
+ get_dynamic(a, (gptr)&e, i);
+ if (s->length())
+ s->append(',');
+ s->append(e->db,e->key_len);
+ }
+}
+
+
+void
+Rpl_filter::get_do_table(String* str)
+{
+ table_rule_ent_hash_to_str(str, &do_table);
+}
+
+
+void
+Rpl_filter::get_ignore_table(String* str)
+{
+ table_rule_ent_hash_to_str(str, &ignore_table);
+}
+
+
+void
+Rpl_filter::get_wild_do_table(String* str)
+{
+ table_rule_ent_dynamic_array_to_str(str, &wild_do_table);
+}
+
+
+void
+Rpl_filter::get_wild_ignore_table(String* str)
+{
+ table_rule_ent_dynamic_array_to_str(str, &wild_ignore_table);
+}
+
+
+const char*
+Rpl_filter::get_rewrite_db(const char* db, uint32 *new_len)
+{
+ if (rewrite_db.is_empty() || !db)
+ return db;
+ I_List_iterator<i_string_pair> it(rewrite_db);
+ i_string_pair* tmp;
+
+ while ((tmp=it++))
+ {
+ if (!strcmp(tmp->key, db))
+ {
+ *new_len= strlen(tmp->val);
+ return tmp->val;
+ }
+ }
+ return db;
+}
+
+
+I_List<i_string>*
+Rpl_filter::get_do_db()
+{
+ return &do_db;
+}
+
+
+I_List<i_string>*
+Rpl_filter::get_ignore_db()
+{
+ return &ignore_db;
+}
diff --git a/sql/rpl_filter.h b/sql/rpl_filter.h
new file mode 100644
index 00000000000..cfcb3b43607
--- /dev/null
+++ b/sql/rpl_filter.h
@@ -0,0 +1,113 @@
+/* Copyright (C) 2000-2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef RPL_FILTER_H
+#define RPL_FILTER_H
+
+#include "mysql.h"
+#include "my_list.h"
+
+typedef struct st_table_rule_ent
+{
+ char* db;
+ char* tbl_name;
+ uint key_len;
+} TABLE_RULE_ENT;
+
+/*
+ Rpl_filter
+
+ Inclusion and exclusion rules of tables and databases.
+ Also handles rewrites of db.
+ Used for replication and binlogging.
+ */
+class Rpl_filter
+{
+public:
+ Rpl_filter();
+ ~Rpl_filter();
+ Rpl_filter(Rpl_filter const&);
+ Rpl_filter& operator=(Rpl_filter const&);
+
+ /* Checks - returns true if ok to replicate/log */
+
+ bool tables_ok(const char* db, TABLE_LIST* tables);
+ bool db_ok(const char* db);
+ bool db_ok_with_wild_table(const char *db);
+
+ bool is_on();
+
+ /* Setters - add filtering rules */
+
+ int add_do_table(const char* table_spec);
+ int add_ignore_table(const char* table_spec);
+
+ int add_wild_do_table(const char* table_spec);
+ int add_wild_ignore_table(const char* table_spec);
+
+ void add_do_db(const char* db_spec);
+ void add_ignore_db(const char* db_spec);
+
+ void add_db_rewrite(const char* from_db, const char* to_db);
+
+ /* Getters - to get information about current rules */
+
+ void get_do_table(String* str);
+ void get_ignore_table(String* str);
+
+ void get_wild_do_table(String* str);
+ void get_wild_ignore_table(String* str);
+
+ const char* get_rewrite_db(const char* db, uint32 *new_len);
+
+ I_List<i_string>* get_do_db();
+ I_List<i_string>* get_ignore_db();
+
+private:
+ bool table_rules_on;
+
+ void init_table_rule_hash(HASH* h, bool* h_inited);
+ void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited);
+
+ int add_table_rule(HASH* h, const char* table_spec);
+ int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec);
+
+ void free_string_array(DYNAMIC_ARRAY *a);
+
+ void table_rule_ent_hash_to_str(String* s, HASH* h);
+ void table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a);
+ TABLE_RULE_ENT* find_wild(DYNAMIC_ARRAY *a, const char* key, int len);
+
+ HASH do_table;
+ HASH ignore_table;
+ DYNAMIC_ARRAY wild_do_table;
+ DYNAMIC_ARRAY wild_ignore_table;
+
+ bool do_table_inited;
+ bool ignore_table_inited;
+ bool wild_do_table_inited;
+ bool wild_ignore_table_inited;
+
+ I_List<i_string> do_db;
+ I_List<i_string> ignore_db;
+
+ I_List<i_string_pair> rewrite_db;
+};
+
+extern Rpl_filter *rpl_filter;
+extern Rpl_filter *binlog_filter;
+
+#endif // RPL_FILTER_H
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 09581aed217..f1ac90ea113 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -789,6 +789,7 @@ struct show_var_st init_vars[]= {
{"have_isam", (char*) &have_isam, SHOW_HAVE},
{"have_ndbcluster", (char*) &have_ndbcluster, SHOW_HAVE},
{"have_openssl", (char*) &have_openssl, SHOW_HAVE},
+ {"have_partition_engine", (char*) &have_partition_db, SHOW_HAVE},
{"have_query_cache", (char*) &have_query_cache, SHOW_HAVE},
{"have_raid", (char*) &have_raid, SHOW_HAVE},
{"have_rtree_keys", (char*) &have_rtree_keys, SHOW_HAVE},
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 4953004cc24..b3784b6421d 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -5380,7 +5380,85 @@ ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE
eng "Can't create federated table. The data source connection string '%-.64s' is not in the correct format"
ER_FOREIGN_DATA_STRING_INVALID
eng "The data source connection string '%-.64s' is not in the correct format"
-ER_CANT_CREATE_FEDERATED_TABLE
- eng "Can't create federated table. Foreign data src error : '%-.64s'"
-ER_TRG_IN_WRONG_SCHEMA
- eng "Trigger in wrong schema"
+ER_CANT_CREATE_FEDERATED_TABLE
+ eng "Can't create federated table. Foreign data src error : '%-.64s'"
+ER_TRG_IN_WRONG_SCHEMA
+ eng "Trigger in wrong schema"
+ER_PARTITION_REQUIRES_VALUES_ERROR
+ eng "%s PARTITIONING requires definition of VALUES %s for each partition"
+ swe "%s PARTITIONering kräver definition av VALUES %s för varje partition"
+ER_PARTITION_WRONG_VALUES_ERROR
+ eng "Only %s PARTITIONING can use VALUES %s in partition definition"
+ swe "Endast %s partitionering kan använda VALUES %s i definition av partitionen"
+ER_PARTITION_MAXVALUE_ERROR
+ eng "MAXVALUE can only be used in last partition definition"
+ swe "MAXVALUE kan bara användas i definitionen av den sista partitionen"
+ER_PARTITION_SUBPARTITION_ERROR
+ eng "Subpartitions can only be hash partitions and by key"
+ swe "Subpartitioner kan bara vara hash och key partitioner"
+ER_PARTITION_WRONG_NO_PART_ERROR
+ eng "Wrong number of partitions defined, mismatch with previous setting"
+ swe "Antal partitioner definierade och antal partitioner är inte lika"
+ER_PARTITION_WRONG_NO_SUBPART_ERROR
+ eng "Wrong number of subpartitions defined, mismatch with previous setting"
+ swe "Antal subpartitioner definierade och antal subpartitioner är inte lika"
+ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR
+ eng "Constant/Random expression in (sub)partitioning function is not allowed"
+ swe "Konstanta uttryck eller slumpmässiga uttryck är inte tillåtna (sub)partitioneringsfunktioner"
+ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR
+ eng "Expression in RANGE/LIST VALUES must be constant"
+ swe "Uttryck i RANGE/LIST VALUES måste vara ett konstant uttryck"
+ER_FIELD_NOT_FOUND_PART_ERROR
+ eng "Field in list of fields for partition function not found in table"
+ swe "Fält i listan av fält för partitionering med key inte funnen i tabellen"
+ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR
+ eng "List of fields is only allowed in KEY partitions"
+ swe "En lista av fält är endast tillåtet för KEY partitioner"
+ER_INCONSISTENT_PARTITION_INFO_ERROR
+ eng "The partition info in the frm file is not consistent with what can be written into the frm file"
+ swe "Partitioneringsinformationen i frm-filen är inte konsistent med vad som kan skrivas i frm-filen"
+ER_PARTITION_FUNC_NOT_ALLOWED_ERROR
+ eng "The %s function returns the wrong type"
+ swe "%s-funktionen returnerar felaktig typ"
+ER_PARTITIONS_MUST_BE_DEFINED_ERROR
+ eng "For %s partitions each partition must be defined"
+ swe "För %s partitionering så måste varje partition definieras"
+ER_RANGE_NOT_INCREASING_ERROR
+ eng "VALUES LESS THAN value must be strictly increasing for each partition"
+ swe "Värden i VALUES LESS THAN måste vara strikt växande för varje partition"
+ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR
+ eng "VALUES %s value must be of same type as partition function"
+ swe "Värden i VALUES %s måste vara av samma typ som partitioneringsfunktionen"
+ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR
+ eng "Multiple definition of same constant in list partitioning"
+ swe "Multipel definition av samma konstant i list partitionering"
+ER_PARTITION_ENTRY_ERROR
+ eng "Partitioning can not be used stand-alone in query"
+ swe "Partitioneringssyntax kan inte användas på egen hand i en SQL-fråga"
+ER_MIX_HANDLER_ERROR
+ eng "The mix of handlers in the partitions is not allowed in this version in MySQL"
+ swe "Denna mix av lagringsmotorer är inte tillåten i denna version av MySQL"
+ER_PARTITION_NOT_DEFINED_ERROR
+ eng "For the partitioned engine it is necessary to define all %s"
+ swe "För partitioneringsmotorn så är det nödvändigt att definiera alla %s"
+ER_TOO_MANY_PARTITIONS_ERROR
+ eng "Too many partitions were defined"
+ swe "För många partitioner definierades"
+ER_SUBPARTITION_ERROR
+ eng "It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning"
+ swe "Det är endast möjligt att blanda RANGE/LIST partitionering med HASH/KEY partitionering för subpartitionering"
+ER_CANT_CREATE_HANDLER_FILE
+ eng "Failed to create specific handler file"
+ swe "Misslyckades med att skapa specifik fil i lagringsmotor"
+ER_BLOB_FIELD_IN_PART_FUNC_ERROR
+ eng "A BLOB field is not allowed in partition function"
+ swe "Ett BLOB-fält är inte tillåtet i partitioneringsfunktioner"
+ER_CHAR_SET_IN_PART_FIELD_ERROR
+ eng "VARCHAR only allowed if binary collation for partition functions"
+ swe "VARCHAR endast tillåten med binär collation för partitioneringsfunktion"
+ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF
+ eng "A %s need to include all fields in the partition function"
+ swe "En %s behöver inkludera alla fält i partitioneringsfunktionen för denna lagringsmotor"
+ER_NO_PARTS_ERROR
+ eng "Number of %s = 0 is not an allowed value"
+ swe "Antal %s = 0 är inte ett tillåten värde"
diff --git a/sql/slave.cc b/sql/slave.cc
index a587ac5a118..445e2120475 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -22,6 +22,7 @@
#include <myisam.h>
#include "slave.h"
#include "sql_repl.h"
+#include "rpl_filter.h"
#include "repl_failsafe.h"
#include <thr_alarm.h>
#include <my_dir.h>
@@ -36,11 +37,7 @@ typedef bool (*CHECK_KILLED_FUNC)(THD*,void*);
volatile bool slave_sql_running = 0, slave_io_running = 0;
char* slave_load_tmpdir = 0;
MASTER_INFO *active_mi;
-HASH replicate_do_table, replicate_ignore_table;
-DYNAMIC_ARRAY replicate_wild_do_table, replicate_wild_ignore_table;
-bool do_table_inited = 0, ignore_table_inited = 0;
-bool wild_do_table_inited = 0, wild_ignore_table_inited = 0;
-bool table_rules_on= 0, replicate_same_server_id;
+bool replicate_same_server_id;
ulonglong relay_log_space_limit = 0;
/*
@@ -194,20 +191,6 @@ err:
}
-static void free_table_ent(TABLE_RULE_ENT* e)
-{
- my_free((gptr) e, MYF(0));
-}
-
-
-static byte* get_table_key(TABLE_RULE_ENT* e, uint* len,
- my_bool not_used __attribute__((unused)))
-{
- *len = e->key_len;
- return (byte*)e->db;
-}
-
-
/*
Open the given relay log
@@ -809,245 +792,6 @@ int start_slave_threads(bool need_slave_mutex, bool wait_for_start,
}
-void init_table_rule_hash(HASH* h, bool* h_inited)
-{
- hash_init(h, system_charset_info,TABLE_RULE_HASH_SIZE,0,0,
- (hash_get_key) get_table_key,
- (hash_free_key) free_table_ent, 0);
- *h_inited = 1;
-}
-
-
-void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited)
-{
- my_init_dynamic_array(a, sizeof(TABLE_RULE_ENT*), TABLE_RULE_ARR_SIZE,
- TABLE_RULE_ARR_SIZE);
- *a_inited = 1;
-}
-
-
-static TABLE_RULE_ENT* find_wild(DYNAMIC_ARRAY *a, const char* key, int len)
-{
- uint i;
- const char* key_end = key + len;
-
- for (i = 0; i < a->elements; i++)
- {
- TABLE_RULE_ENT* e ;
- get_dynamic(a, (gptr)&e, i);
- if (!my_wildcmp(system_charset_info, key, key_end,
- (const char*)e->db,
- (const char*)(e->db + e->key_len),
- '\\',wild_one,wild_many))
- return e;
- }
-
- return 0;
-}
-
-
-/*
- Checks whether tables match some (wild_)do_table and (wild_)ignore_table
- rules (for replication)
-
- SYNOPSIS
- tables_ok()
- thd thread (SQL slave thread normally). Mustn't be null.
- tables list of tables to check
-
- NOTES
- Note that changing the order of the tables in the list can lead to
- different results. Note also the order of precedence of the do/ignore
- rules (see code below). For that reason, users should not set conflicting
- rules because they may get unpredicted results (precedence order is
- explained in the manual).
- If no table of the list is marked "updating" (so far this can only happen
- if the statement is a multi-delete (SQLCOM_DELETE_MULTI) and the "tables"
- is the tables in the FROM): then we always return 0, because there is no
- reason we play this statement on this slave if it updates nothing. In the
- case of SQLCOM_DELETE_MULTI, there will be a second call to tables_ok(),
- with tables having "updating==TRUE" (those after the DELETE), so this
- second call will make the decision (because
- all_tables_not_ok() = !tables_ok(1st_list) && !tables_ok(2nd_list)).
-
- Thought which arose from a question of a big customer "I want to include
- all tables like "abc.%" except the "%.EFG"". This can't be done now. If we
- supported Perl regexps we could do it with this pattern: /^abc\.(?!EFG)/
- (I could not find an equivalent in the regex library MySQL uses).
-
- RETURN VALUES
- 0 should not be logged/replicated
- 1 should be logged/replicated
-*/
-
-bool tables_ok(THD* thd, TABLE_LIST* tables)
-{
- bool some_tables_updating= 0;
- DBUG_ENTER("tables_ok");
-
- /*
- In routine, can't reliably pick and choose substatements, so always
- replicate.
- We can't reliably know if one substatement should be executed or not:
- consider the case of this substatement: a SELECT on a non-replicated
- constant table; if we don't execute it maybe it was going to fill a
- variable which was going to be used by the next substatement to update
- a replicated table? If we execute it maybe the constant non-replicated
- table does not exist (and so we'll fail) while there was no need to
- execute this as this SELECT does not influence replicated tables in the
- rest of the routine? In other words: users are used to replicate-*-table
- specifying how to handle updates to tables, these options don't say
- anything about reads to tables; we can't guess.
- */
- if (thd->spcont)
- DBUG_RETURN(1);
-
- for (; tables; tables= tables->next_global)
- {
- char hash_key[2*NAME_LEN+2];
- char *end;
- uint len;
-
- if (!tables->updating)
- continue;
- some_tables_updating= 1;
- end= strmov(hash_key, tables->db ? tables->db : thd->db);
- *end++= '.';
- len= (uint) (strmov(end, tables->table_name) - hash_key);
- if (do_table_inited) // if there are any do's
- {
- if (hash_search(&replicate_do_table, (byte*) hash_key, len))
- DBUG_RETURN(1);
- }
- if (ignore_table_inited) // if there are any ignores
- {
- if (hash_search(&replicate_ignore_table, (byte*) hash_key, len))
- DBUG_RETURN(0);
- }
- if (wild_do_table_inited && find_wild(&replicate_wild_do_table,
- hash_key, len))
- DBUG_RETURN(1);
- if (wild_ignore_table_inited && find_wild(&replicate_wild_ignore_table,
- hash_key, len))
- DBUG_RETURN(0);
- }
-
- /*
- If no table was to be updated, ignore statement (no reason we play it on
- slave, slave is supposed to replicate _changes_ only).
- If no explicit rule found and there was a do list, do not replicate.
- If there was no do list, go ahead
- */
- DBUG_RETURN(some_tables_updating &&
- !do_table_inited && !wild_do_table_inited);
-}
-
-
-/*
- Checks whether a db matches wild_do_table and wild_ignore_table
- rules (for replication)
-
- SYNOPSIS
- db_ok_with_wild_table()
- db name of the db to check.
- Is tested with check_db_name() before calling this function.
-
- NOTES
- Here is the reason for this function.
- We advise users who want to exclude a database 'db1' safely to do it
- with replicate_wild_ignore_table='db1.%' instead of binlog_ignore_db or
- replicate_ignore_db because the two lasts only check for the selected db,
- which won't work in that case:
- USE db2;
- UPDATE db1.t SET ... #this will be replicated and should not
- whereas replicate_wild_ignore_table will work in all cases.
- With replicate_wild_ignore_table, we only check tables. When
- one does 'DROP DATABASE db1', tables are not involved and the
- statement will be replicated, while users could expect it would not (as it
- rougly means 'DROP db1.first_table, DROP db1.second_table...').
- In other words, we want to interpret 'db1.%' as "everything touching db1".
- That is why we want to match 'db1' against 'db1.%' wild table rules.
-
- RETURN VALUES
- 0 should not be logged/replicated
- 1 should be logged/replicated
- */
-
-int db_ok_with_wild_table(const char *db)
-{
- char hash_key[NAME_LEN+2];
- char *end;
- int len;
- end= strmov(hash_key, db);
- *end++= '.';
- len= end - hash_key ;
- if (wild_do_table_inited && find_wild(&replicate_wild_do_table,
- hash_key, len))
- return 1;
- if (wild_ignore_table_inited && find_wild(&replicate_wild_ignore_table,
- hash_key, len))
- return 0;
-
- /*
- If no explicit rule found and there was a do list, do not replicate.
- If there was no do list, go ahead
- */
- return !wild_do_table_inited;
-}
-
-
-int add_table_rule(HASH* h, const char* table_spec)
-{
- const char* dot = strchr(table_spec, '.');
- if (!dot) return 1;
- // len is always > 0 because we know the there exists a '.'
- uint len = (uint)strlen(table_spec);
- TABLE_RULE_ENT* e = (TABLE_RULE_ENT*)my_malloc(sizeof(TABLE_RULE_ENT)
- + len, MYF(MY_WME));
- if (!e) return 1;
- e->db = (char*)e + sizeof(TABLE_RULE_ENT);
- e->tbl_name = e->db + (dot - table_spec) + 1;
- e->key_len = len;
- memcpy(e->db, table_spec, len);
- (void)my_hash_insert(h, (byte*)e);
- return 0;
-}
-
-
-/*
- Add table expression with wildcards to dynamic array
-*/
-
-int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec)
-{
- const char* dot = strchr(table_spec, '.');
- if (!dot) return 1;
- uint len = (uint)strlen(table_spec);
- TABLE_RULE_ENT* e = (TABLE_RULE_ENT*)my_malloc(sizeof(TABLE_RULE_ENT)
- + len, MYF(MY_WME));
- if (!e) return 1;
- e->db = (char*)e + sizeof(TABLE_RULE_ENT);
- e->tbl_name = e->db + (dot - table_spec) + 1;
- e->key_len = len;
- memcpy(e->db, table_spec, len);
- insert_dynamic(a, (gptr)&e);
- return 0;
-}
-
-
-static void free_string_array(DYNAMIC_ARRAY *a)
-{
- uint i;
- for (i = 0; i < a->elements; i++)
- {
- char* p;
- get_dynamic(a, (gptr) &p, i);
- my_free(p, MYF(MY_WME));
- }
- delete_dynamic(a);
-}
-
-
#ifdef NOT_USED_YET
static int end_slave_on_walk(MASTER_INFO* mi, gptr /*unused*/)
{
@@ -1083,14 +827,6 @@ void end_slave()
*/
terminate_slave_threads(active_mi,SLAVE_FORCE_ALL);
end_master_info(active_mi);
- if (do_table_inited)
- hash_free(&replicate_do_table);
- if (ignore_table_inited)
- hash_free(&replicate_ignore_table);
- if (wild_do_table_inited)
- free_string_array(&replicate_wild_do_table);
- if (wild_ignore_table_inited)
- free_string_array(&replicate_wild_ignore_table);
delete active_mi;
active_mi= 0;
}
@@ -1170,24 +906,6 @@ bool net_request_file(NET* net, const char* fname)
}
-const char *rewrite_db(const char* db, uint32 *new_len)
-{
- if (replicate_rewrite_db.is_empty() || !db)
- return db;
- I_List_iterator<i_string_pair> it(replicate_rewrite_db);
- i_string_pair* tmp;
-
- while ((tmp=it++))
- {
- if (!strcmp(tmp->key, db))
- {
- *new_len= (uint32)strlen(tmp->val);
- return tmp->val;
- }
- }
- return db;
-}
-
/*
From other comments and tests in code, it looks like
sometimes Query_log_event and Load_log_event can have db == 0
@@ -1200,60 +918,6 @@ const char *print_slave_db_safe(const char* db)
return (db ? db : "");
}
-/*
- Checks whether a db matches some do_db and ignore_db rules
- (for logging or replication)
-
- SYNOPSIS
- db_ok()
- db name of the db to check
- do_list either binlog_do_db or replicate_do_db
- ignore_list either binlog_ignore_db or replicate_ignore_db
-
- RETURN VALUES
- 0 should not be logged/replicated
- 1 should be logged/replicated
-*/
-
-int db_ok(const char* db, I_List<i_string> &do_list,
- I_List<i_string> &ignore_list )
-{
- if (do_list.is_empty() && ignore_list.is_empty())
- return 1; // ok to replicate if the user puts no constraints
-
- /*
- If the user has specified restrictions on which databases to replicate
- and db was not selected, do not replicate.
- */
- if (!db)
- return 0;
-
- if (!do_list.is_empty()) // if the do's are not empty
- {
- I_List_iterator<i_string> it(do_list);
- i_string* tmp;
-
- while ((tmp=it++))
- {
- if (!strcmp(tmp->ptr, db))
- return 1; // match
- }
- return 0;
- }
- else // there are some elements in the don't, otherwise we cannot get here
- {
- I_List_iterator<i_string> it(ignore_list);
- i_string* tmp;
-
- while ((tmp=it++))
- {
- if (!strcmp(tmp->ptr, db))
- return 0; // match
- }
- return 1;
- }
-}
-
static int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
const char *default_val)
@@ -2262,48 +1926,6 @@ int register_slave_on_master(MYSQL* mysql)
}
-/*
- Builds a String from a HASH of TABLE_RULE_ENT. Cannot be used for any other
- hash, as it assumes that the hash entries are TABLE_RULE_ENT.
-
- SYNOPSIS
- table_rule_ent_hash_to_str()
- s pointer to the String to fill
- h pointer to the HASH to read
-
- RETURN VALUES
- none
-*/
-
-void table_rule_ent_hash_to_str(String* s, HASH* h)
-{
- s->length(0);
- for (uint i=0 ; i < h->records ; i++)
- {
- TABLE_RULE_ENT* e= (TABLE_RULE_ENT*) hash_element(h, i);
- if (s->length())
- s->append(',');
- s->append(e->db,e->key_len);
- }
-}
-
-/*
- Mostly the same thing as above
-*/
-
-void table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a)
-{
- s->length(0);
- for (uint i=0 ; i < a->elements ; i++)
- {
- TABLE_RULE_ENT* e;
- get_dynamic(a, (gptr)&e, i);
- if (s->length())
- s->append(',');
- s->append(e->db,e->key_len);
- }
-}
-
bool show_master_info(THD* thd, MASTER_INFO* mi)
{
// TODO: fix this for multi-master
@@ -2398,23 +2020,18 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
protocol->store(mi->rli.group_master_log_name, &my_charset_bin);
protocol->store(mi->slave_running ? "Yes":"No", &my_charset_bin);
protocol->store(mi->rli.slave_running ? "Yes":"No", &my_charset_bin);
- protocol->store(&replicate_do_db);
- protocol->store(&replicate_ignore_db);
- /*
- We can't directly use some protocol->store for
- replicate_*_table,
- as Protocol doesn't know the TABLE_RULE_ENT struct.
- We first build Strings and then pass them to protocol->store.
- */
+ protocol->store(rpl_filter->get_do_db());
+ protocol->store(rpl_filter->get_ignore_db());
+
char buf[256];
String tmp(buf, sizeof(buf), &my_charset_bin);
- table_rule_ent_hash_to_str(&tmp, &replicate_do_table);
+ rpl_filter->get_do_table(&tmp);
protocol->store(&tmp);
- table_rule_ent_hash_to_str(&tmp, &replicate_ignore_table);
+ rpl_filter->get_ignore_table(&tmp);
protocol->store(&tmp);
- table_rule_ent_dynamic_array_to_str(&tmp, &replicate_wild_do_table);
+ rpl_filter->get_wild_do_table(&tmp);
protocol->store(&tmp);
- table_rule_ent_dynamic_array_to_str(&tmp, &replicate_wild_ignore_table);
+ rpl_filter->get_wild_ignore_table(&tmp);
protocol->store(&tmp);
protocol->store((uint32) mi->rli.last_slave_errno);
@@ -3886,10 +3503,8 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev)
if (unlikely(!cev->is_valid()))
DBUG_RETURN(1);
- /*
- TODO: fix to honor table rules, not only db rules
- */
- if (!db_ok(cev->db, replicate_do_db, replicate_ignore_db))
+
+ if (!rpl_filter->db_ok(cev->db))
{
skip_load_data_infile(net);
DBUG_RETURN(0);
diff --git a/sql/slave.h b/sql/slave.h
index c41234ab2ed..ead1aa87ce6 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -21,6 +21,8 @@
#include "mysql.h"
#include "my_list.h"
+#include "rpl_filter.h"
+
#define SLAVE_NET_TIMEOUT 3600
#define MAX_SLAVE_ERRMSG 1024
#define MAX_SLAVE_ERROR 2000
@@ -461,15 +463,6 @@ typedef struct st_master_info
int queue_event(MASTER_INFO* mi,const char* buf,ulong event_len);
-typedef struct st_table_rule_ent
-{
- char* db;
- char* tbl_name;
- uint key_len;
-} TABLE_RULE_ENT;
-
-#define TABLE_RULE_HASH_SIZE 16
-#define TABLE_RULE_ARR_SIZE 16
#define MAX_SLAVE_ERRMSG 1024
#define RPL_LOG_NAME (rli->group_master_log_name[0] ? rli->group_master_log_name :\
@@ -523,27 +516,9 @@ int mysql_table_dump(THD* thd, const char* db,
int fetch_master_table(THD* thd, const char* db_name, const char* table_name,
MASTER_INFO* mi, MYSQL* mysql, bool overwrite);
-void table_rule_ent_hash_to_str(String* s, HASH* h);
-void table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a);
bool show_master_info(THD* thd, MASTER_INFO* mi);
bool show_binlog_info(THD* thd);
-/* See if the query uses any tables that should not be replicated */
-bool tables_ok(THD* thd, TABLE_LIST* tables);
-
-/*
- Check to see if the database is ok to operate on with respect to the
- do and ignore lists - used in replication
-*/
-int db_ok(const char* db, I_List<i_string> &do_list,
- I_List<i_string> &ignore_list );
-int db_ok_with_wild_table(const char *db);
-
-int add_table_rule(HASH* h, const char* table_spec);
-int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec);
-void init_table_rule_hash(HASH* h, bool* h_inited);
-void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited);
-const char *rewrite_db(const char* db, uint32 *new_db_len);
const char *print_slave_db_safe(const char *db);
int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code);
void skip_load_data_infile(NET* net);
@@ -577,11 +552,7 @@ extern "C" pthread_handler_decl(handle_slave_sql,arg);
extern bool volatile abort_loop;
extern MASTER_INFO main_mi, *active_mi; /* active_mi for multi-master */
extern LIST master_list;
-extern HASH replicate_do_table, replicate_ignore_table;
-extern DYNAMIC_ARRAY replicate_wild_do_table, replicate_wild_ignore_table;
-extern bool do_table_inited, ignore_table_inited,
- wild_do_table_inited, wild_ignore_table_inited;
-extern bool table_rules_on, replicate_same_server_id;
+extern bool replicate_same_server_id;
extern int disconnect_slave_event_count, abort_slave_event_count ;
@@ -595,8 +566,6 @@ extern my_bool master_ssl;
extern my_string master_ssl_ca, master_ssl_capath, master_ssl_cert,
master_ssl_cipher, master_ssl_key;
-extern I_List<i_string> replicate_do_db, replicate_ignore_db;
-extern I_List<i_string_pair> replicate_rewrite_db;
extern I_List<THD> threads;
#endif
diff --git a/sql/sp.cc b/sql/sp.cc
index a277c6bd253..0c9af895fb6 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -799,7 +799,7 @@ db_show_routine_status(THD *thd, int type, const char *wild)
}
}
- table->file->ha_index_init(0);
+ table->file->ha_index_init(0, 1);
if ((res= table->file->index_first(table->record[0])))
{
res= (res == HA_ERR_END_OF_FILE) ? 0 : SP_INTERNAL_ERROR;
@@ -849,7 +849,7 @@ sp_drop_db_routines(THD *thd, char *db)
goto err;
ret= SP_OK;
- table->file->ha_index_init(0);
+ table->file->ha_index_init(0, 1);
if (! table->file->index_read(table->record[0],
key, keylen, HA_READ_KEY_EXACT))
{
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 04666469e9c..fdb7f7f069c 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -27,9 +27,6 @@
#include "mysql_priv.h"
#include "hash_filo.h"
-#ifdef HAVE_REPLICATION
-#include "sql_repl.h" //for tables_ok()
-#endif
#include <m_ctype.h>
#include <stdarg.h>
#include "sp_head.h"
@@ -37,6 +34,8 @@
#ifndef NO_EMBEDDED_ACCESS_CHECKS
+#define FIRST_NON_YN_FIELD 26
+
class acl_entry :public hash_filo_element
{
public:
@@ -1506,7 +1505,7 @@ static bool update_user_table(THD *thd, const char *host, const char *user,
GRANT and REVOKE are applied the slave in/exclusion rules as they are
some kind of updates to the mysql.% tables.
*/
- if (thd->slave_thread && table_rules_on)
+ if (thd->slave_thread && rpl_filter->is_on())
{
/*
The tables must be marked "updating" so that tables_ok() takes them into
@@ -1514,7 +1513,7 @@ static bool update_user_table(THD *thd, const char *host, const char *user,
*/
tables.updating= 1;
/* Thanks to bzero, tables.next==0 */
- if (!tables_ok(thd, &tables))
+ if (!(thd->spcont || rpl_filter->tables_ok(0, &tables)))
DBUG_RETURN(0);
}
#endif
@@ -1526,7 +1525,7 @@ static bool update_user_table(THD *thd, const char *host, const char *user,
key_copy((byte *) user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ table->file->ha_retrieve_all_cols();
if (table->file->index_read_idx(table->record[0], 0,
(byte *) user_key, table->key_info->key_length,
HA_READ_KEY_EXACT))
@@ -1619,7 +1618,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ table->file->ha_retrieve_all_cols();
if (table->file->index_read_idx(table->record[0], 0,
user_key, table->key_info->key_length,
HA_READ_KEY_EXACT))
@@ -1752,7 +1751,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
We should NEVER delete from the user table, as a uses can still
use mysqld even if he doesn't have any privileges in the user table!
*/
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ table->file->ha_retrieve_all_cols();
if (cmp_record(table,record[1]) &&
(error=table->file->update_row(table->record[1],table->record[0])))
{ // This should never happen
@@ -1834,7 +1833,7 @@ static int replace_db_table(TABLE *table, const char *db,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ table->file->ha_retrieve_all_cols();
if (table->file->index_read_idx(table->record[0],0,
user_key, table->key_info->key_length,
HA_READ_KEY_EXACT))
@@ -1870,7 +1869,7 @@ static int replace_db_table(TABLE *table, const char *db,
/* update old existing row */
if (rights)
{
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ table->file->ha_retrieve_all_cols();
if ((error=table->file->update_row(table->record[1],table->record[0])))
goto table_error; /* purecov: deadcode */
}
@@ -2049,7 +2048,7 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs)
key_copy(key, col_privs->record[0], col_privs->key_info, key_prefix_len);
col_privs->field[4]->store("",0, &my_charset_latin1);
- col_privs->file->ha_index_init(0);
+ col_privs->file->ha_index_init(0, 1);
if (col_privs->file->index_read(col_privs->record[0],
(byte*) key,
key_prefix_len, HA_READ_KEY_EXACT))
@@ -2194,7 +2193,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
List_iterator <LEX_COLUMN> iter(columns);
class LEX_COLUMN *column;
- table->file->ha_index_init(0);
+ table->file->ha_index_init(0, 1);
while ((column= iter++))
{
ulong privileges= column->rights;
@@ -2209,7 +2208,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ table->file->ha_retrieve_all_cols();
if (table->file->index_read(table->record[0], user_key,
table->key_info->key_length,
HA_READ_KEY_EXACT))
@@ -2287,7 +2286,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
key_copy(user_key, table->record[0], table->key_info,
key_prefix_length);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ table->file->ha_retrieve_all_cols();
if (table->file->index_read(table->record[0], user_key,
key_prefix_length,
HA_READ_KEY_EXACT))
@@ -2385,7 +2384,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ table->file->ha_retrieve_all_cols();
if (table->file->index_read_idx(table->record[0], 0,
user_key, table->key_info->key_length,
HA_READ_KEY_EXACT))
@@ -2701,14 +2700,15 @@ bool mysql_table_grant(THD *thd, TABLE_LIST *table_list,
GRANT and REVOKE are applied the slave in/exclusion rules as they are
some kind of updates to the mysql.% tables.
*/
- if (thd->slave_thread && table_rules_on)
+ if (thd->slave_thread && rpl_filter->is_on())
+ if (thd->slave_thread && rpl_filter->is_on())
{
/*
The tables must be marked "updating" so that tables_ok() takes them into
account in tests.
*/
tables[0].updating= tables[1].updating= tables[2].updating= 1;
- if (!tables_ok(thd, tables))
+ if (!(thd->spcont || rpl_filter->tables_ok(0, tables)))
DBUG_RETURN(FALSE);
}
#endif
@@ -2908,14 +2908,14 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
GRANT and REVOKE are applied the slave in/exclusion rules as they are
some kind of updates to the mysql.% tables.
*/
- if (thd->slave_thread && table_rules_on)
+ if (thd->slave_thread && rpl_filter->is_on())
{
/*
The tables must be marked "updating" so that tables_ok() takes them into
account in tests.
*/
tables[0].updating= tables[1].updating= 1;
- if (!tables_ok(thd, tables))
+ if (!(thd->spcont || rpl_filter->tables_ok(0, tables)))
DBUG_RETURN(FALSE);
}
#endif
@@ -3039,14 +3039,14 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
GRANT and REVOKE are applied the slave in/exclusion rules as they are
some kind of updates to the mysql.% tables.
*/
- if (thd->slave_thread && table_rules_on)
+ if (thd->slave_thread && rpl_filter->is_on())
{
/*
The tables must be marked "updating" so that tables_ok() takes them into
account in tests.
*/
tables[0].updating= tables[1].updating= 1;
- if (!tables_ok(thd, tables))
+ if (!(thd->spcont || rpl_filter->tables_ok(0, tables)))
DBUG_RETURN(FALSE);
}
#endif
@@ -3168,8 +3168,8 @@ my_bool grant_init(THD *org_thd)
t_table = tables[0].table; c_table = tables[1].table;
p_table= tables[2].table;
- t_table->file->ha_index_init(0);
- p_table->file->ha_index_init(0);
+ t_table->file->ha_index_init(0, 1);
+ p_table->file->ha_index_init(0, 1);
if (!t_table->file->index_first(t_table->record[0]))
{
/* Will be restored by org_thd->store_globals() */
@@ -4303,7 +4303,7 @@ int open_grant_tables(THD *thd, TABLE_LIST *tables)
GRANT and REVOKE are applied the slave in/exclusion rules as they are
some kind of updates to the mysql.% tables.
*/
- if (thd->slave_thread && table_rules_on)
+ if (thd->slave_thread && rpl_filter->is_on())
{
/*
The tables must be marked "updating" so that tables_ok() takes them into
@@ -4311,7 +4311,7 @@ int open_grant_tables(THD *thd, TABLE_LIST *tables)
*/
tables[0].updating=tables[1].updating=tables[2].updating=
tables[3].updating=tables[4].updating=1;
- if (!tables_ok(thd, tables))
+ if (!(thd->spcont || rpl_filter->tables_ok(0, tables)))
DBUG_RETURN(1);
tables[0].updating=tables[1].updating=tables[2].updating=
tables[3].updating=tables[4].updating=0;;
@@ -4473,7 +4473,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
user_key, key_prefix_length,
HA_READ_KEY_EXACT)))
{
- if (error != HA_ERR_KEY_NOT_FOUND)
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
{
table->file->print_error(error, MYF(0));
result= -1;
diff --git a/sql/sql_acl.h b/sql/sql_acl.h
index eba000a627a..5e62e7ce6e3 100644
--- a/sql/sql_acl.h
+++ b/sql/sql_acl.h
@@ -14,6 +14,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include "slave.h" // for tables_ok(), rpl_filter
+
#define SELECT_ACL (1L << 0)
#define INSERT_ACL (1L << 1)
#define UPDATE_ACL (1L << 2)
@@ -50,7 +52,6 @@
*/
#define EXTRA_ACL (1L << 29)
#define NO_ACCESS (1L << 30)
-
#define DB_ACLS \
(UPDATE_ACL | SELECT_ACL | INSERT_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \
GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_TMP_ACL | \
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 576f5a503f0..eed7d749b2c 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -565,7 +565,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr)
else
{
// Free memory and reset for next loop
- table->file->reset();
+ table->file->ha_reset();
}
table->in_use=0;
if (unused_tables)
@@ -2561,6 +2561,42 @@ find_field_in_table(THD *thd, TABLE_LIST *table_list,
/*
+ Find field in table, no side effects, only purpose is to check for field
+ in table object and get reference to the field if found.
+
+ SYNOPSIS
+ find_field_in_table_sef()
+
+ table table where to find
+ name Name of field searched for
+
+ RETURN
+ 0 field is not found
+ # pointer to field
+*/
+
+Field *find_field_in_table_sef(TABLE *table, const char *name)
+{
+ Field **field_ptr;
+ if (table->s->name_hash.records)
+ field_ptr= (Field**)hash_search(&table->s->name_hash,(byte*) name,
+ strlen(name));
+ else
+ {
+ if (!(field_ptr= table->field))
+ return (Field *)0;
+ for (; *field_ptr; ++field_ptr)
+ if (!my_strcasecmp(system_charset_info, (*field_ptr)->field_name, name))
+ break;
+ }
+ if (field_ptr)
+ return *field_ptr;
+ else
+ return (Field *)0;
+}
+
+
+/*
Find field in table
SYNOPSIS
@@ -2619,15 +2655,20 @@ Field *find_field_in_real_table(THD *thd, TABLE *table,
if (thd->set_query_id)
{
+ table->file->ha_set_bit_in_rw_set(field->fieldnr,
+ (bool)(thd->set_query_id-1));
if (field->query_id != thd->query_id)
{
+ if (table->get_fields_in_item_tree)
+ field->flags|= GET_FIXED_FIELDS_FLAG;
field->query_id=thd->query_id;
table->used_fields++;
table->used_keys.intersect(field->part_of_key);
}
else
thd->dupp_field=field;
- }
+ } else if (table->get_fields_in_item_tree)
+ field->flags|= GET_FIXED_FIELDS_FLAG;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (check_grants && check_grant_column(thd, &table->grant,
table->s->db,
@@ -3143,7 +3184,7 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
****************************************************************************/
bool setup_fields(THD *thd, Item **ref_pointer_array,
- List<Item> &fields, bool set_query_id,
+ List<Item> &fields, ulong set_query_id,
List<Item> *sum_func_list, bool allow_sum_func)
{
reg2 Item *item;
@@ -3576,7 +3617,10 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
fields marked in setup_tables during fix_fields of view columns
*/
if (table)
+ {
table->used_fields= table->s->fields;
+ table->file->ha_set_all_bits_in_read_set();
+ }
}
}
if (found)
@@ -3742,12 +3786,14 @@ int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves,
goto err;
/* Mark field used for table cache */
t2_field->query_id= thd->query_id;
+ t2->file->ha_set_bit_in_read_set(t2_field->fieldnr);
t2->used_keys.intersect(t2_field->part_of_key);
}
if ((t1_field= iterator->field()))
{
/* Mark field used for table cache */
t1_field->query_id= thd->query_id;
+ t1->file->ha_set_bit_in_read_set(t1_field->fieldnr);
t1->used_keys.intersect(t1_field->part_of_key);
}
Item_func_eq *tmp= new Item_func_eq(iterator->create_item(thd),
diff --git a/sql/sql_bitmap.h b/sql/sql_bitmap.h
index 0f5b6dcd35e..35c501ede56 100644
--- a/sql/sql_bitmap.h
+++ b/sql/sql_bitmap.h
@@ -25,7 +25,7 @@
template <uint default_width> class Bitmap
{
MY_BITMAP map;
- uchar buffer[(default_width+7)/8];
+ uint32 buffer[(default_width+31)/32];
public:
Bitmap() { init(); }
Bitmap(const Bitmap& from) { *this=from; }
@@ -48,14 +48,14 @@ public:
void intersect(ulonglong map2buff)
{
MY_BITMAP map2;
- bitmap_init(&map2, (uchar *)&map2buff, sizeof(ulonglong)*8, 0);
+ bitmap_init(&map2, (uint32 *)&map2buff, sizeof(ulonglong)*8, 0);
bitmap_intersect(&map, &map2);
}
/* Use highest bit for all bits above sizeof(ulonglong)*8. */
void intersect_extended(ulonglong map2buff)
{
intersect(map2buff);
- if (map.bitmap_size > sizeof(ulonglong))
+ if (map.n_bits > sizeof(ulonglong) * 8)
bitmap_set_above(&map, sizeof(ulonglong),
test(map2buff & (LL(1) << (sizeof(ulonglong) * 8 - 1))));
}
@@ -70,7 +70,7 @@ public:
char *print(char *buf) const
{
char *s=buf;
- const uchar *e=buffer, *b=e+sizeof(buffer)-1;
+ const uchar *e=(uchar *)buffer, *b=e+sizeof(buffer)-1;
while (!*b && b>e)
b--;
if ((*s=_dig_vec_upper[*b >> 4]) != '0')
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 3d2d776f74e..e70ab38ee43 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -303,7 +303,7 @@ TODO list:
#ifndef MASTER
#include "../srclib/myisammrg/myrg_def.h"
#else
-#include "../myisammrg/myrg_def.h"
+#include "../storage/myisammrg/myrg_def.h"
#endif
#ifdef EMBEDDED_LIBRARY
diff --git a/sql/sql_class.h b/sql/sql_class.h
index d6847f5fb35..283ae96eebc 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -463,19 +463,20 @@ public:
class i_string: public ilink
{
public:
- char* ptr;
+ const char* ptr;
i_string():ptr(0) { }
- i_string(char* s) : ptr(s) {}
+ i_string(const char* s) : ptr(s) {}
};
/* needed for linked list of two strings for replicate-rewrite-db */
class i_string_pair: public ilink
{
public:
- char* key;
- char* val;
+ const char* key;
+ const char* val;
i_string_pair():key(0),val(0) { }
- i_string_pair(char* key_arg, char* val_arg) : key(key_arg),val(val_arg) {}
+ i_string_pair(const char* key_arg, const char* val_arg) :
+ key(key_arg),val(val_arg) {}
};
@@ -774,8 +775,15 @@ public:
/*
- if set_query_id=1, we set field->query_id for all fields. In that case
field list can not contain duplicates.
+ 0: Means query_id is not set and no indicator to handler of fields used
+ is set
+ 1: Means query_id is set for fields in list and bit in read set is set
+ to inform handler of that field is to be read
+ 2: Means query is set for fields in list and bit is set in update set
+ to inform handler that it needs to update this field in write_row
+ and update_row
*/
- bool set_query_id;
+ ulong set_query_id;
/*
This variable is used in post-parse stage to declare that sum-functions,
or functions which have sense only if GROUP BY is present, are allowed.
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index d83937098e2..2967e2a8a20 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -32,7 +32,8 @@
bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
SQL_LIST *order, ha_rows limit, ulong options)
{
- int error;
+ bool will_batch;
+ int error, loc_error;
TABLE *table;
SQL_SELECT *select=0;
READ_RECORD info;
@@ -169,6 +170,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
deleted=0L;
init_ftfuncs(thd, select_lex, 1);
thd->proc_info="updating";
+ will_batch= !table->file->start_bulk_delete();
while (!(error=info.read_record(&info)) && !thd->killed &&
!thd->net.report_error)
{
@@ -184,7 +186,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
break;
}
- if (!(error=table->file->delete_row(table->record[0])))
+ if (!(error= table->file->delete_row(table->record[0])))
{
deleted++;
if (table->triggers &&
@@ -220,7 +222,13 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
}
if (thd->killed && !error)
error= 1; // Aborted
- thd->proc_info="end";
+ if (will_batch && (loc_error= table->file->end_bulk_delete()))
+ {
+ if (error != 1)
+ table->file->print_error(loc_error,MYF(0));
+ error=1;
+ }
+ thd->proc_info= "end";
end_read_record(&info);
free_io_cache(table); // Will not do any harm
if (options & OPTION_QUICK)
@@ -615,7 +623,8 @@ void multi_delete::send_error(uint errcode,const char *err)
int multi_delete::do_deletes()
{
- int local_error= 0, counter= 0;
+ int local_error= 0, counter= 0, error;
+ bool will_batch;
DBUG_ENTER("do_deletes");
DBUG_ASSERT(do_delete);
@@ -643,6 +652,7 @@ int multi_delete::do_deletes()
been deleted by foreign key handling
*/
info.ignore_not_found_rows= 1;
+ will_batch= !table->file->start_bulk_delete();
while (!(local_error=info.read_record(&info)) && !thd->killed)
{
if (table->triggers &&
@@ -666,6 +676,14 @@ int multi_delete::do_deletes()
break;
}
}
+ if (will_batch && (error= table->file->end_bulk_delete()))
+ {
+ if (!local_error)
+ {
+ local_error= error;
+ table->file->print_error(local_error,MYF(0));
+ }
+ }
end_read_record(&info);
if (thd->killed && !local_error)
local_error= 1;
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index e109600bcd0..84087db9719 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -461,7 +461,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
if (keyname)
{
table->file->ha_index_or_rnd_end();
- table->file->ha_index_init(keyno);
+ table->file->ha_index_init(keyno, 1);
error= table->file->index_first(table->record[0]);
}
else
@@ -483,7 +483,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
case RLAST:
DBUG_ASSERT(keyname != 0);
table->file->ha_index_or_rnd_end();
- table->file->ha_index_init(keyno);
+ table->file->ha_index_init(keyno, 1);
error= table->file->index_last(table->record[0]);
mode=RPREV;
break;
@@ -522,7 +522,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
if (!(key= (byte*) thd->calloc(ALIGN_SIZE(key_len))))
goto err;
table->file->ha_index_or_rnd_end();
- table->file->ha_index_init(keyno);
+ table->file->ha_index_init(keyno, 1);
key_copy(key, table->record[0], table->key_info + keyno, key_len);
error= table->file->index_read(table->record[0],
key,key_len,ha_rkey_mode);
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index 6780beec258..11045529a51 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -286,8 +286,8 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
rtopic_id= find_fields[help_relation_help_topic_id].field;
rkey_id= find_fields[help_relation_help_keyword_id].field;
- topics->file->ha_index_init(iindex_topic);
- relations->file->ha_index_init(iindex_relations);
+ topics->file->ha_index_init(iindex_topic,1);
+ relations->file->ha_index_init(iindex_relations,1);
rkey_id->store((longlong) key_id);
rkey_id->get_key_image(buff, rkey_id->pack_length(), Field::itRAW);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 125390e4411..fa5930e5205 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -103,6 +103,11 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
#endif
clear_timestamp_auto_bits(table->timestamp_field_type,
TIMESTAMP_AUTO_SET_ON_INSERT);
+ /*
+ No fields are provided so all fields must be provided in the values.
+ Thus we set all bits in the write set.
+ */
+ table->file->ha_set_all_bits_in_write_set();
}
else
{ // Part field list
@@ -123,7 +128,11 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
/* fields only from first table */
table_list->next_local= 0;
context->resolve_in_table_list_only(table_list);
- res= setup_fields(thd, 0, fields, 1, 0, 0);
+ /*
+ Indicate fields in list is to be updated by setting set_query_id
+ parameter to 2. This sets the bit in the write_set for each field.
+ */
+ res= setup_fields(thd, 0, fields, 2, 0, 0);
table_list->next_local= save_next;
thd->lex->select_lex.no_wrap_view_item= FALSE;
context->table_list= save_context;
@@ -214,9 +223,10 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
/*
Check the fields we are going to modify. This will set the query_id
- of all used fields to the threads query_id.
+ of all used fields to the threads query_id. It will also set all
+ fields into the write set of this table.
*/
- if (setup_fields(thd, 0, update_fields, 1, 0, 0))
+ if (setup_fields(thd, 0, update_fields, 2, 0, 0))
return -1;
if (table->timestamp_field)
@@ -226,7 +236,10 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
clear_timestamp_auto_bits(table->timestamp_field_type,
TIMESTAMP_AUTO_SET_ON_UPDATE);
else
+ {
table->timestamp_field->query_id= timestamp_query_id;
+ table->file->ha_set_bit_in_write_set(table->timestamp_field->fieldnr);
+ }
}
return 0;
@@ -637,7 +650,7 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view)
Field_translator *trans;
Field **field_ptr= table->field;
uint used_fields_buff_size= (table->s->fields + 7) / 8;
- uchar *used_fields_buff= (uchar*)thd->alloc(used_fields_buff_size);
+ uint32 *used_fields_buff= (uint32*)thd->alloc(used_fields_buff_size);
MY_BITMAP used_fields;
DBUG_ENTER("check_key_in_view");
@@ -849,7 +862,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
select_lex->first_execution= 0;
}
if (duplic == DUP_UPDATE || duplic == DUP_REPLACE)
- table->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
+ table->file->ha_retrieve_all_pk();
DBUG_RETURN(FALSE);
}
@@ -2174,7 +2187,7 @@ select_insert::~select_insert()
if (table)
{
table->next_number_field=0;
- table->file->reset();
+ table->file->ha_reset();
}
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
thd->abort_on_warning= 0;
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 630a7e950f7..73aaecd39aa 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -155,6 +155,7 @@ void lex_start(THD *thd, uchar *buf,uint length)
lex->yylineno = 1;
lex->in_comment=0;
lex->length=0;
+ lex->part_info= 0;
lex->select_lex.in_sum_expr=0;
lex->select_lex.expr_list.empty();
lex->select_lex.ftfunc_list_alloc.empty();
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 4bba0c432c7..ec982703116 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -25,6 +25,7 @@ class sp_head;
class sp_name;
class sp_instr;
class sp_pcontext;
+class partition_info;
/*
The following hack is needed because mysql_yacc.cc does not define
@@ -722,6 +723,8 @@ typedef struct st_lex
TABLE_LIST **query_tables_last;
/* store original leaf_tables for INSERT SELECT and PS/SP */
TABLE_LIST *leaf_tables_insert;
+ /* Partition info structure filled in by PARTITION BY parse part */
+ partition_info *part_info;
List<key_part_spec> col_list;
List<key_part_spec> ref_list;
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 1ec209aba85..0090f956521 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -171,7 +171,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
The main thing to fix to remove this restriction is to ensure that the
table is marked to be 'used for insert' in which case we should never
- mark this table as as 'const table' (ie, one that has only one row).
+ mark this table as 'const table' (ie, one that has only one row).
*/
if (unique_table(table_list, table_list->next_global))
{
@@ -187,6 +187,10 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
Field **field;
for (field=table->field; *field ; field++)
fields_vars.push_back(new Item_field(*field));
+ /*
+ Since all fields are set we set all bits in the write set
+ */
+ table->file->ha_set_all_bits_in_write_set();
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
/*
Let us also prepare SET clause, altough it is probably empty
@@ -199,8 +203,15 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
else
{ // Part field list
/* TODO: use this conds for 'WITH CHECK OPTIONS' */
- if (setup_fields(thd, 0, fields_vars, 1, 0, 0) ||
- setup_fields(thd, 0, set_fields, 1, 0, 0) ||
+ /*
+ Indicate that both variables in field list and fields in update_list
+ is to be included in write set of table. We do however set all bits
+ in write set anyways since it is not allowed to specify NULLs in
+ LOAD DATA
+ */
+ table->file->ha_set_all_bits_in_write_set();
+ if (setup_fields(thd, 0, fields_vars, 2, 0, 0) ||
+ setup_fields(thd, 0, set_fields, 2, 0, 0) ||
check_that_all_fields_are_given_values(thd, table, table_list))
DBUG_RETURN(TRUE);
/*
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 23403e6e00a..3a7defebddd 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -16,6 +16,7 @@
#include "mysql_priv.h"
#include "sql_repl.h"
+#include "rpl_filter.h"
#include "repl_failsafe.h"
#include <m_ctype.h>
#include <myisam.h>
@@ -179,10 +180,13 @@ static bool begin_trans(THD *thd)
#ifdef HAVE_REPLICATION
inline bool all_tables_not_ok(THD *thd, TABLE_LIST *tables)
{
- return (table_rules_on && tables && !tables_ok(thd,tables) &&
+ return (rpl_filter->is_on() && tables &&
+ !(thd->spcont || rpl_filter->tables_ok(thd->db, tables)) &&
((thd->lex->sql_command != SQLCOM_DELETE_MULTI) ||
- !tables_ok(thd,
- (TABLE_LIST *)thd->lex->auxilliary_table_list.first)));
+ !(thd->spcont ||
+ rpl_filter->tables_ok(thd->db,
+ (TABLE_LIST *)
+ thd->lex->auxilliary_table_list.first))));
}
#endif
@@ -3527,9 +3531,9 @@ end_with_restore_list:
above was not called. So we have to check rules again here.
*/
#ifdef HAVE_REPLICATION
- if (thd->slave_thread &&
- (!db_ok(lex->name, replicate_do_db, replicate_ignore_db) ||
- !db_ok_with_wild_table(lex->name)))
+ if (thd->slave_thread &&
+ (!rpl_filter->db_ok(lex->name) ||
+ !rpl_filter->db_ok_with_wild_table(lex->name)))
{
my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
break;
@@ -3562,8 +3566,8 @@ end_with_restore_list:
*/
#ifdef HAVE_REPLICATION
if (thd->slave_thread &&
- (!db_ok(lex->name, replicate_do_db, replicate_ignore_db) ||
- !db_ok_with_wild_table(lex->name)))
+ (!rpl_filter->db_ok(lex->name) ||
+ !rpl_filter->db_ok_with_wild_table(lex->name)))
{
my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
break;
@@ -3602,8 +3606,8 @@ end_with_restore_list:
*/
#ifdef HAVE_REPLICATION
if (thd->slave_thread &&
- (!db_ok(db, replicate_do_db, replicate_ignore_db) ||
- !db_ok_with_wild_table(db)))
+ (!rpl_filter->db_ok(lex->name) ||
+ !rpl_filter->db_ok_with_wild_table(lex->name)))
{
my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
break;
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
new file mode 100644
index 00000000000..ffdf53ed287
--- /dev/null
+++ b/sql/sql_partition.cc
@@ -0,0 +1,3117 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ This file was introduced as a container for general functionality related
+ to partitioning introduced in MySQL version 5.1. It contains functionality
+ used by all handlers that support partitioning, which in the first version
+ is the partitioning handler itself and the NDB handler.
+
+ The first version was written by Mikael Ronström.
+
+ This version supports RANGE partitioning, LIST partitioning, HASH
+ partitioning and composite partitioning (hereafter called subpartitioning)
+ where each RANGE/LIST partitioning is HASH partitioned. The hash function
+ can either be supplied by the user or by only a list of fields (also
+ called KEY partitioning, where the MySQL server will use an internal
+ hash function.
+ There are quite a few defaults that can be used as well.
+*/
+
+/* Some general useful functions */
+
+#include "mysql_priv.h"
+#include <errno.h>
+#include <m_ctype.h>
+#include "md5.h"
+
+
+#ifdef HAVE_PARTITION_DB
+/*
+ Partition related functions declarations and some static constants;
+*/
+static char *hash_str= "HASH";
+static char *range_str= "RANGE";
+static char *list_str= "LIST";
+static char *part_str= "PARTITION";
+static char *sub_str= "SUB";
+static char *by_str= "BY";
+static char *key_str= "KEY";
+static char *space_str= " ";
+static char *equal_str= "=";
+static char *end_paren_str= ")";
+static char *begin_paren_str= "(";
+static char *comma_str= ",";
+static char buff[22];
+
+bool get_partition_id_list(partition_info *part_info,
+ uint32 *part_id);
+bool get_partition_id_range(partition_info *part_info,
+ uint32 *part_id);
+bool get_partition_id_hash_nosub(partition_info *part_info,
+ uint32 *part_id);
+bool get_partition_id_key_nosub(partition_info *part_info,
+ uint32 *part_id);
+bool get_partition_id_linear_hash_nosub(partition_info *part_info,
+ uint32 *part_id);
+bool get_partition_id_linear_key_nosub(partition_info *part_info,
+ uint32 *part_id);
+bool get_partition_id_range_sub_hash(partition_info *part_info,
+ uint32 *part_id);
+bool get_partition_id_range_sub_key(partition_info *part_info,
+ uint32 *part_id);
+bool get_partition_id_range_sub_linear_hash(partition_info *part_info,
+ uint32 *part_id);
+bool get_partition_id_range_sub_linear_key(partition_info *part_info,
+ uint32 *part_id);
+bool get_partition_id_list_sub_hash(partition_info *part_info,
+ uint32 *part_id);
+bool get_partition_id_list_sub_key(partition_info *part_info,
+ uint32 *part_id);
+bool get_partition_id_list_sub_linear_hash(partition_info *part_info,
+ uint32 *part_id);
+bool get_partition_id_list_sub_linear_key(partition_info *part_info,
+ uint32 *part_id);
+uint32 get_partition_id_hash_sub(partition_info *part_info);
+uint32 get_partition_id_key_sub(partition_info *part_info);
+uint32 get_partition_id_linear_hash_sub(partition_info *part_info);
+uint32 get_partition_id_linear_key_sub(partition_info *part_info);
+
+/*
+ A useful routine used by update_row for partition handlers to calculate
+ the partition ids of the old and the new record.
+ SYNOPSIS
+ get_part_for_update()
+ old_data Buffer of old record
+ new_data Buffer of new record
+ rec0 Reference to table->record[0]
+ part_info Reference to partition information
+ part_field_array A NULL-terminated array of fields for partition
+ function
+ old_part_id The returned partition id of old record
+ new_part_id The returned partition id of new record
+ RETURN VALUE
+ 0 Success
+ > 0 Error code
+ DESCRIPTION
+ Dependent on whether buf is not record[0] we need to prepare the
+ fields. Then we call the function pointer get_partition_id to
+ calculate the partition ids.
+*/
+
+int get_parts_for_update(const byte *old_data, byte *new_data,
+ const byte *rec0, partition_info *part_info,
+ uint32 *old_part_id, uint32 *new_part_id)
+{
+ Field **part_field_array= part_info->full_part_field_array;
+ int error;
+ DBUG_ENTER("get_parts_for_update");
+ DBUG_ASSERT(new_data == rec0);
+
+ set_field_ptr(part_field_array, old_data, rec0);
+ error= part_info->get_partition_id(part_info, old_part_id);
+ set_field_ptr(part_field_array, rec0, old_data);
+ if (unlikely(error)) // Should never happen
+ {
+ DBUG_ASSERT(0);
+ DBUG_RETURN(error);
+ }
+#ifdef NOT_NEEDED
+ if (new_data == rec0)
+#endif
+ {
+ if (unlikely(error= part_info->get_partition_id(part_info,new_part_id)))
+ {
+ DBUG_RETURN(error);
+ }
+ }
+#ifdef NOT_NEEDED
+ else
+ {
+ /*
+ This branch should never execute but it is written anyways for
+ future use. It will be tested by ensuring that the above
+ condition is false in one test situation before pushing the code.
+ */
+ set_field_ptr(part_field_array, new_data, rec0);
+ error= part_info->get_partition_id(part_info, new_part_id);
+ set_field_ptr(part_field_array, rec0, new_data);
+ if (unlikely(error))
+ {
+ DBUG_RETURN(error);
+ }
+ }
+#endif
+ DBUG_RETURN(0);
+}
+
+
+/*
+ A useful routine used by delete_row for partition handlers to calculate
+ the partition id.
+ SYNOPSIS
+ get_part_for_delete()
+ buf Buffer of old record
+ rec0 Reference to table->record[0]
+ part_info Reference to partition information
+ part_field_array A NULL-terminated array of fields for partition
+ function
+ part_id The returned partition id to delete from
+ RETURN VALUE
+ 0 Success
+ > 0 Error code
+ DESCRIPTION
+ Dependent on whether buf is not record[0] we need to prepare the
+ fields. Then we call the function pointer get_partition_id to
+ calculate the partition id.
+*/
+
+int get_part_for_delete(const byte *buf, const byte *rec0,
+ partition_info *part_info, uint32 *part_id)
+{
+ int error;
+ DBUG_ENTER("get_part_for_delete");
+
+ if (likely(buf == rec0))
+ {
+ if (unlikely((error= part_info->get_partition_id(part_info, part_id))))
+ {
+ DBUG_RETURN(error);
+ }
+ DBUG_PRINT("info", ("Delete from partition %d", *part_id));
+ }
+ else
+ {
+ Field **part_field_array= part_info->full_part_field_array;
+ set_field_ptr(part_field_array, buf, rec0);
+ error= part_info->get_partition_id(part_info, part_id);
+ set_field_ptr(part_field_array, rec0, buf);
+ if (unlikely(error))
+ {
+ DBUG_RETURN(error);
+ }
+ DBUG_PRINT("info", ("Delete from partition %d (path2)", *part_id));
+ }
+ DBUG_RETURN(0);
+}
+
+
+/*
+ This routine allocates an array for all range constants to achieve a fast
+ check what partition a certain value belongs to. At the same time it does
+ also check that the range constants are defined in increasing order and
+ that the expressions are constant integer expressions.
+ SYNOPSIS
+ check_range_constants()
+ part_info
+ RETURN VALUE
+ TRUE An error occurred during creation of range constants
+ FALSE Successful creation of range constant mapping
+ DESCRIPTION
+ This routine is called from check_partition_info to get a quick error
+ before we came too far into the CREATE TABLE process. It is also called
+ from fix_partition_func every time we open the .frm file. It is only
+ called for RANGE PARTITIONed tables.
+*/
+
+static bool check_range_constants(partition_info *part_info)
+{
+ partition_element* part_def;
+ longlong current_largest_int= LONGLONG_MIN, part_range_value_int;
+ uint no_parts= part_info->no_parts, i;
+ List_iterator<partition_element> it(part_info->partitions);
+ bool result= TRUE;
+ DBUG_ENTER("check_range_constants");
+ DBUG_PRINT("enter", ("INT_RESULT with %d parts", no_parts));
+
+ part_info->part_result_type= INT_RESULT;
+ part_info->range_int_array=
+ (longlong*)sql_alloc(no_parts * sizeof(longlong));
+ if (unlikely(part_info->range_int_array == NULL))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), no_parts*sizeof(longlong));
+ goto end;
+ }
+ i= 0;
+ do
+ {
+ part_def= it++;
+ if ((i != (no_parts - 1)) || !part_info->defined_max_value)
+ {
+ if (likely(part_def->range_expr->result_type() == INT_RESULT))
+ part_range_value_int= part_def->range_expr->val_int();
+ else
+ {
+ my_error(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR, MYF(0),
+ "LESS THAN");
+ goto end;
+ }
+ }
+ else
+ part_range_value_int= LONGLONG_MAX;
+ if (likely(current_largest_int < part_range_value_int))
+ {
+ current_largest_int= part_range_value_int;
+ part_info->range_int_array[i]= part_range_value_int;
+ }
+ else
+ {
+ my_error(ER_RANGE_NOT_INCREASING_ERROR, MYF(0));
+ goto end;
+ }
+ } while (++i < no_parts);
+ result= FALSE;
+end:
+ DBUG_RETURN(result);
+}
+
+
+/*
+ A support routine for check_list_constants used by qsort to sort the
+ constant list expressions.
+ SYNOPSIS
+ list_part_cmp()
+ a First list constant to compare with
+ b Second list constant to compare with
+ RETURN VALUE
+ +1 a > b
+ 0 a == b
+ -1 a < b
+*/
+
+static int list_part_cmp(const void* a, const void* b)
+{
+ longlong a1, b1;
+ a1= ((LIST_PART_ENTRY*)a)->list_value;
+ b1= ((LIST_PART_ENTRY*)b)->list_value;
+ if (a1 < b1)
+ return -1;
+ else if (a1 > b1)
+ return +1;
+ else
+ return 0;
+}
+
+
+/*
+ This routine allocates an array for all list constants to achieve a fast
+ check what partition a certain value belongs to. At the same time it does
+ also check that there are no duplicates among the list constants and that
+ that the list expressions are constant integer expressions.
+ SYNOPSIS
+ check_list_constants()
+ part_info
+ RETURN VALUE
+ TRUE An error occurred during creation of list constants
+ FALSE Successful creation of list constant mapping
+ DESCRIPTION
+ This routine is called from check_partition_info to get a quick error
+ before we came too far into the CREATE TABLE process. It is also called
+ from fix_partition_func every time we open the .frm file. It is only
+ called for LIST PARTITIONed tables.
+*/
+
+static bool check_list_constants(partition_info *part_info)
+{
+ uint i, no_list_values= 0, no_parts, list_index= 0;
+ Item *list_expr;
+ bool not_first, result= TRUE;
+ longlong curr_value, prev_value;
+ partition_element* part_def;
+ List_iterator<partition_element> list_func_it(part_info->partitions);
+ DBUG_ENTER("check_list_constants");
+
+ part_info->part_result_type= INT_RESULT;
+
+ /*
+ We begin by calculating the number of list values that have been
+ defined in the first step.
+
+ We use this number to allocate a properly sized array of structs
+ to keep the partition id and the value to use in that partition.
+ In the second traversal we check that all Item trees are of the
+ same type (INT_RESULT) and assign them values in the struct array.
+
+ Finally we sort the array of structs in order of values to enable
+ a quick binary search for the proper value to discover the
+ partition id.
+ After sorting the array we check that there are no duplicates in the
+ list.
+ */
+
+ no_parts= part_info->no_parts;
+ i= 0;
+ do
+ {
+ part_def= list_func_it++;
+ List_iterator<Item> list_val_it1(part_def->list_expr_list);
+ while (list_val_it1++)
+ no_list_values++;
+ } while (++i < no_parts);
+ list_func_it.rewind();
+ part_info->no_list_values= no_list_values;
+ part_info->list_array=
+ (LIST_PART_ENTRY*)sql_alloc(no_list_values*sizeof(LIST_PART_ENTRY));
+ if (unlikely(part_info->list_array == NULL))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), no_list_values*sizeof(LIST_PART_ENTRY));
+ goto end;
+ }
+
+ i= 0;
+ do
+ {
+ part_def= list_func_it++;
+ List_iterator<Item> list_val_it2(part_def->list_expr_list);
+ while ((list_expr= list_val_it2++))
+ {
+ if (likely(list_expr->result_type() == INT_RESULT))
+ {
+ part_info->list_array[list_index].list_value= list_expr->val_int();
+ part_info->list_array[list_index++].partition_id= i;
+ }
+ else
+ {
+ my_error(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR, MYF(0), "IN");
+ goto end;
+ }
+ }
+ } while (++i < no_parts);
+
+ qsort((void*)part_info->list_array, no_list_values,
+ sizeof(LIST_PART_ENTRY), &list_part_cmp);
+
+ not_first= FALSE;
+ i= prev_value= 0; //prev_value initialised to quiet compiler
+ do
+ {
+ curr_value= part_info->list_array[i].list_value;
+ if (likely(!not_first || prev_value != curr_value))
+ {
+ prev_value= curr_value;
+ not_first= TRUE;
+ }
+ else
+ {
+ my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0));
+ goto end;
+ }
+ } while (++i < no_list_values);
+ result= FALSE;
+end:
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Create a memory area where default partition names are stored and fill it
+ up with the names.
+ SYNOPSIS
+ create_default_partition_names()
+ no_parts Number of partitions
+ subpart Is it subpartitions
+ RETURN VALUE
+ A pointer to the memory area of the default partition names
+ DESCRIPTION
+ A support routine for the partition code where default values are
+ generated.
+ The external routine needing this code is check_partition_info
+*/
+
+#define MAX_PART_NAME_SIZE 8
+
+static char *create_default_partition_names(uint no_parts, bool subpart)
+{
+ char *ptr= sql_calloc(no_parts*MAX_PART_NAME_SIZE);
+ char *move_ptr= ptr;
+ uint i= 0;
+ DBUG_ENTER("create_default_partition_names");
+ if (likely(ptr != 0))
+ {
+ do
+ {
+ if (subpart)
+ my_sprintf(move_ptr, (move_ptr,"sp%u", i));
+ else
+ my_sprintf(move_ptr, (move_ptr,"p%u", i));
+ move_ptr+=MAX_PART_NAME_SIZE;
+ } while (++i < no_parts);
+ }
+ else
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), no_parts*MAX_PART_NAME_SIZE);
+ }
+ DBUG_RETURN(ptr);
+}
+
+
+/*
+ Set up all the default partitions not set-up by the user in the SQL
+ statement. Also perform a number of checks that the user hasn't tried
+ to use default values where no defaults exists.
+ SYNOPSIS
+ set_up_default_partitions()
+ part_info The reference to all partition information
+ file A reference to a handler of the table
+ max_rows Maximum number of rows stored in the table
+ RETURN VALUE
+ TRUE Error, attempted default values not possible
+ FALSE Ok, default partitions set-up
+ DESCRIPTION
+ The routine uses the underlying handler of the partitioning to define
+ the default number of partitions. For some handlers this requires
+ knowledge of the maximum number of rows to be stored in the table.
+ This routine only accepts HASH and KEY partitioning and thus there is
+ no subpartitioning if this routine is successful.
+ The external routine needing this code is check_partition_info
+*/
+
+static bool set_up_default_partitions(partition_info *part_info,
+ handler *file, ulonglong max_rows)
+{
+ uint no_parts, i;
+ char *default_name;
+ bool result= TRUE;
+ DBUG_ENTER("set_up_default_partitions");
+
+ if (part_info->part_type != HASH_PARTITION)
+ {
+ char *error_string;
+ if (part_info->part_type == RANGE_PARTITION)
+ error_string= range_str;
+ else
+ error_string= list_str;
+ my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_string);
+ goto end;
+ }
+ if (part_info->no_parts == 0)
+ part_info->no_parts= file->get_default_no_partitions(max_rows);
+ no_parts= part_info->no_parts;
+ if (unlikely(no_parts > MAX_PARTITIONS))
+ {
+ my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+ goto end;
+ }
+ if (unlikely((!(default_name= create_default_partition_names(no_parts,
+ FALSE)))))
+ goto end;
+ i= 0;
+ do
+ {
+ partition_element *part_elem= new partition_element();
+ if (likely(part_elem != 0))
+ {
+ part_elem->engine_type= DB_TYPE_UNKNOWN;
+ part_elem->partition_name= default_name;
+ default_name+=MAX_PART_NAME_SIZE;
+ part_info->partitions.push_back(part_elem);
+ }
+ else
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
+ goto end;
+ }
+ } while (++i < no_parts);
+ result= FALSE;
+end:
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Set up all the default subpartitions not set-up by the user in the SQL
+ statement. Also perform a number of checks that the default partitioning
+ becomes an allowed partitioning scheme.
+ SYNOPSIS
+ set_up_default_subpartitions()
+ part_info The reference to all partition information
+ file A reference to a handler of the table
+ max_rows Maximum number of rows stored in the table
+ RETURN VALUE
+ TRUE Error, attempted default values not possible
+ FALSE Ok, default partitions set-up
+ DESCRIPTION
+ The routine uses the underlying handler of the partitioning to define
+ the default number of partitions. For some handlers this requires
+ knowledge of the maximum number of rows to be stored in the table.
+ This routine is only called for RANGE or LIST partitioning and those
+ need to be specified so only subpartitions are specified.
+ The external routine needing this code is check_partition_info
+*/
+
+static bool set_up_default_subpartitions(partition_info *part_info,
+ handler *file, ulonglong max_rows)
+{
+ uint i, j= 0, no_parts, no_subparts;
+ char *default_name;
+ bool result= TRUE;
+ partition_element *part_elem;
+ List_iterator<partition_element> part_it(part_info->partitions);
+ DBUG_ENTER("set_up_default_subpartitions");
+
+ if (part_info->no_subparts == 0)
+ part_info->no_subparts= file->get_default_no_partitions(max_rows);
+ no_parts= part_info->no_parts;
+ no_subparts= part_info->no_subparts;
+ if (unlikely((no_parts * no_subparts) > MAX_PARTITIONS))
+ {
+ my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+ goto end;
+ }
+ if (unlikely((!(default_name=
+ create_default_partition_names(no_subparts, TRUE)))))
+ goto end;
+ i= 0;
+ do
+ {
+ part_elem= part_it++;
+ do
+ {
+ partition_element *subpart_elem= new partition_element();
+ if (likely(subpart_elem != 0))
+ {
+ subpart_elem->engine_type= DB_TYPE_UNKNOWN;
+ subpart_elem->partition_name= default_name;
+ default_name+= MAX_PART_NAME_SIZE;
+ part_elem->subpartitions.push_back(subpart_elem);
+ }
+ else
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
+ goto end;
+ }
+ } while (++j < no_subparts);
+ } while (++i < no_parts);
+ result= FALSE;
+end:
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Set up defaults for partition or subpartition (cannot set-up for both,
+ this will return an error.
+ SYNOPSIS
+ set_up_defaults_for_partitioning()
+ part_info The reference to all partition information
+ file A reference to a handler of the table
+ max_rows Maximum number of rows stored in the table
+ RETURN VALUE
+ TRUE Error, attempted default values not possible
+ FALSE Ok, default partitions set-up
+ DESCRIPTION
+ Support routine for check_partition_info
+*/
+
+static bool set_up_defaults_for_partitioning(partition_info *part_info,
+ handler *file,
+ ulonglong max_rows)
+{
+ DBUG_ENTER("set_up_defaults_for_partitioning");
+
+ if (part_info->use_default_partitions)
+ DBUG_RETURN(set_up_default_partitions(part_info, file, max_rows));
+ if (is_sub_partitioned(part_info) && part_info->use_default_subpartitions)
+ DBUG_RETURN(set_up_default_subpartitions(part_info, file, max_rows));
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Check that all partitions use the same storage engine.
+ This is currently a limitation in this version.
+ SYNOPSIS
+ check_engine_mix()
+ engine_array An array of engine identifiers
+ no_parts Total number of partitions
+ RETURN VALUE
+ TRUE Error, mixed engines
+ FALSE Ok, no mixed engines
+*/
+
+static bool check_engine_mix(u_char *engine_array, uint no_parts)
+{
+ /*
+ Current check verifies only that all handlers are the same.
+ Later this check will be more sophisticated.
+ */
+ uint i= 0;
+ bool result= FALSE;
+ DBUG_ENTER("check_engine_mix");
+
+ do
+ {
+ if (engine_array[i] != engine_array[0])
+ {
+ result= TRUE;
+ break;
+ }
+ } while (++i < no_parts);
+ DBUG_RETURN(result);
+}
+
+
+/*
+ We will check that the partition info requested is possible to set-up in
+ this version. This routine is an extension of the parser one could say.
+ If defaults were used we will generate default data structures for all
+ partitions.
+ SYNOPSIS
+ check_partition_info()
+ part_info The reference to all partition information
+ db_type Default storage engine if no engine specified per
+ partition.
+ file A reference to a handler of the table
+ max_rows Maximum number of rows stored in the table
+ RETURN VALUE
+ TRUE Error, something went wrong
+ FALSE Ok, full partition data structures are now generated
+ DESCRIPTION
+ This code is used early in the CREATE TABLE and ALTER TABLE process.
+*/
+
+bool check_partition_info(partition_info *part_info,enum db_type eng_type,
+ handler *file, ulonglong max_rows)
+{
+ u_char *engine_array= NULL;
+ uint part_count= 0, i, no_parts, tot_partitions;
+ bool result= TRUE;
+ List_iterator<partition_element> part_it(part_info->partitions);
+ DBUG_ENTER("check_partition_info");
+
+ if (unlikely(is_sub_partitioned(part_info) &&
+ (!(part_info->part_type == RANGE_PARTITION ||
+ part_info->part_type == LIST_PARTITION))))
+ {
+ /* Only RANGE and LIST partitioning can be subpartitioned */
+ my_error(ER_SUBPARTITION_ERROR, MYF(0));
+ goto end;
+ }
+ if (unlikely(set_up_defaults_for_partitioning(part_info, file, max_rows)))
+ goto end;
+ tot_partitions= get_tot_partitions(part_info);
+ if (unlikely(tot_partitions > MAX_PARTITIONS))
+ {
+ my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+ goto end;
+ }
+ engine_array= (u_char*)my_malloc(tot_partitions, MYF(MY_WME));
+ if (unlikely(!engine_array))
+ goto end;
+ i= 0;
+ no_parts= part_info->no_parts;
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (!is_sub_partitioned(part_info))
+ {
+ if (part_elem->engine_type == DB_TYPE_UNKNOWN)
+ part_elem->engine_type= eng_type;
+ DBUG_PRINT("info", ("engine = %u",(uint)part_elem->engine_type));
+ engine_array[part_count++]= (u_char)part_elem->engine_type;
+ }
+ else
+ {
+ uint j= 0, no_subparts= part_info->no_subparts;;
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ do
+ {
+ part_elem= sub_it++;
+ if (part_elem->engine_type == DB_TYPE_UNKNOWN)
+ part_elem->engine_type= eng_type;
+ DBUG_PRINT("info", ("engine = %u",(uint)part_elem->engine_type));
+ engine_array[part_count++]= (u_char)part_elem->engine_type;
+ } while (++j < no_subparts);
+ }
+ } while (++i < part_info->no_parts);
+ if (unlikely(check_engine_mix(engine_array, part_count)))
+ {
+ my_error(ER_MIX_HANDLER_ERROR, MYF(0));
+ goto end;
+ }
+
+ /*
+ We need to check all constant expressions that they are of the correct
+ type and that they are increasing for ranges and not overlapping for
+ list constants.
+ */
+
+ if (unlikely((part_info->part_type == RANGE_PARTITION &&
+ check_range_constants(part_info)) ||
+ (part_info->part_type == LIST_PARTITION &&
+ check_list_constants(part_info))))
+ goto end;
+ result= FALSE;
+end:
+ my_free((char*)engine_array,MYF(MY_ALLOW_ZERO_PTR));
+ DBUG_RETURN(result);
+}
+
+
+/*
+ A great number of functions below here is part of the fix_partition_func
+ method. It is used to set up the partition structures for execution from
+ openfrm. It is called at the end of the openfrm when the table struct has
+ been set-up apart from the partition information.
+ It involves:
+ 1) Setting arrays of fields for the partition functions.
+ 2) Setting up binary search array for LIST partitioning
+ 3) Setting up array for binary search for RANGE partitioning
+ 4) Setting up key_map's to assist in quick evaluation whether one
+ can deduce anything from a given index of what partition to use
+ 5) Checking whether a set of partitions can be derived from a range on
+ a field in the partition function.
+ As part of doing this there is also a great number of error controls.
+ This is actually the place where most of the things are checked for
+ partition information when creating a table.
+ Things that are checked includes
+ 1) No NULLable fields in partition function
+ 2) All fields of partition function in Primary keys and unique indexes
+ (if not supported)
+ 3) No fields in partition function that are BLOB's or VARCHAR with a
+ collation other than the binary collation.
+
+
+
+ Create an array of partition fields (NULL terminated). Before this method
+ is called fix_fields or find_table_in_sef has been called to set
+ GET_FIXED_FIELDS_FLAG on all fields that are part of the partition
+ function.
+ SYNOPSIS
+ set_up_field_array()
+ table TABLE object for which partition fields are set-up
+ sub_part Is the table subpartitioned as well
+ RETURN VALUE
+ TRUE Error, some field didn't meet requirements
+ FALSE Ok, partition field array set-up
+ DESCRIPTION
+ This method is used to set-up both partition and subpartitioning
+ field array and used for all types of partitioning.
+ It is part of the logic around fix_partition_func.
+*/
+static bool set_up_field_array(TABLE *table,
+ bool sub_part)
+{
+ Field **ptr, *field, **field_array;
+ uint no_fields= 0, size_field_array, i= 0;
+ partition_info *part_info= table->s->part_info;
+ int result= FALSE;
+ DBUG_ENTER("set_up_field_array");
+
+ ptr= table->field;
+ while ((field= *(ptr++)))
+ {
+ if (field->flags & GET_FIXED_FIELDS_FLAG)
+ no_fields++;
+ }
+ size_field_array= (no_fields+1)*sizeof(Field*);
+ field_array= (Field**)sql_alloc(size_field_array);
+ if (unlikely(!field_array))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), size_field_array);
+ result= TRUE;
+ }
+ ptr= table->field;
+ while ((field= *(ptr++)))
+ {
+ if (field->flags & GET_FIXED_FIELDS_FLAG)
+ {
+ field->flags&= ~GET_FIXED_FIELDS_FLAG;
+ field->flags|= FIELD_IN_PART_FUNC_FLAG;
+ if (likely(!result))
+ {
+ field_array[i++]= field;
+
+ /*
+ We check that the fields are proper. It is required for each
+ field in a partition function to:
+ 1) Not be a BLOB of any type
+ A BLOB takes too long time to evaluate so we don't want it for
+ performance reasons.
+ 2) Not be a VARCHAR other than VARCHAR with a binary collation
+ A VARCHAR with character sets can have several values being
+ equal with different number of spaces or NULL's. This is not a
+ good ground for a safe and exact partition function. Thus it is
+ not allowed in partition functions.
+ */
+
+ if (unlikely(field->flags & BLOB_FLAG))
+ {
+ my_error(ER_BLOB_FIELD_IN_PART_FUNC_ERROR, MYF(0));
+ result= TRUE;
+ }
+ else if (unlikely((!field->flags & BINARY_FLAG) &&
+ field->real_type() == MYSQL_TYPE_VARCHAR))
+ {
+ my_error(ER_CHAR_SET_IN_PART_FIELD_ERROR, MYF(0));
+ result= TRUE;
+ }
+ }
+ }
+ }
+ field_array[no_fields]= 0;
+ if (!sub_part)
+ {
+ part_info->part_field_array= field_array;
+ part_info->no_part_fields= no_fields;
+ }
+ else
+ {
+ part_info->subpart_field_array= field_array;
+ part_info->no_subpart_fields= no_fields;
+ }
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Create a field array including all fields of both the partitioning and the
+ subpartitioning functions.
+ SYNOPSIS
+ create_full_part_field_array()
+ table TABLE object for which partition fields are set-up
+ part_info Reference to partitioning data structure
+ RETURN VALUE
+ TRUE Memory allocation of field array failed
+ FALSE Ok
+ DESCRIPTION
+ If there is no subpartitioning then the same array is used as for the
+ partitioning. Otherwise a new array is built up using the flag
+ FIELD_IN_PART_FUNC in the field object.
+ This function is called from fix_partition_func
+*/
+
+static bool create_full_part_field_array(TABLE *table,
+ partition_info *part_info)
+{
+ bool result= FALSE;
+ DBUG_ENTER("create_full_part_field_array");
+
+ if (!is_sub_partitioned(part_info))
+ {
+ part_info->full_part_field_array= part_info->part_field_array;
+ part_info->no_full_part_fields= part_info->no_part_fields;
+ }
+ else
+ {
+ Field **ptr, *field, **field_array;
+ uint no_part_fields=0, size_field_array;
+ ptr= table->field;
+ while ((field= *(ptr++)))
+ {
+ if (field->flags & FIELD_IN_PART_FUNC_FLAG)
+ no_part_fields++;
+ }
+ size_field_array= (no_part_fields+1)*sizeof(Field*);
+ field_array= (Field**)sql_alloc(size_field_array);
+ if (unlikely(!field_array))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), size_field_array);
+ result= TRUE;
+ goto end;
+ }
+ no_part_fields= 0;
+ ptr= table->field;
+ while ((field= *(ptr++)))
+ {
+ if (field->flags & FIELD_IN_PART_FUNC_FLAG)
+ field_array[no_part_fields++]= field;
+ }
+ field_array[no_part_fields]=0;
+ part_info->full_part_field_array= field_array;
+ part_info->no_full_part_fields= no_part_fields;
+ }
+end:
+ DBUG_RETURN(result);
+}
+
+
+/*
+ These support routines is used to set/reset an indicator of all fields
+ in a certain key. It is used in conjunction with another support routine
+ that traverse all fields in the PF to find if all or some fields in the
+ PF is part of the key. This is used to check primary keys and unique
+ keys involve all fields in PF (unless supported) and to derive the
+ key_map's used to quickly decide whether the index can be used to
+ derive which partitions are needed to scan.
+
+
+
+ Clear flag GET_FIXED_FIELDS_FLAG in all fields of a key previously set by
+ set_indicator_in_key_fields (always used in pairs).
+ SYNOPSIS
+ clear_indicator_in_key_fields()
+ key_info Reference to find the key fields
+*/
+
+static void clear_indicator_in_key_fields(KEY *key_info)
+{
+ KEY_PART_INFO *key_part;
+ uint key_parts= key_info->key_parts, i;
+ for (i= 0, key_part=key_info->key_part; i < key_parts; i++, key_part++)
+ key_part->field->flags&= (~GET_FIXED_FIELDS_FLAG);
+}
+
+
+/*
+ Set flag GET_FIXED_FIELDS_FLAG in all fields of a key.
+ SYNOPSIS
+ set_indicator_in_key_fields
+ key_info Reference to find the key fields
+*/
+
+static void set_indicator_in_key_fields(KEY *key_info)
+{
+ KEY_PART_INFO *key_part;
+ uint key_parts= key_info->key_parts, i;
+ for (i= 0, key_part=key_info->key_part; i < key_parts; i++, key_part++)
+ key_part->field->flags|= GET_FIXED_FIELDS_FLAG;
+}
+
+
+/*
+ Check if all or some fields in partition field array is part of a key
+ previously used to tag key fields.
+ SYNOPSIS
+ check_fields_in_PF()
+ ptr Partition field array
+ all_fields Is all fields of partition field array used in key
+ some_fields Is some fields of partition field array used in key
+ RETURN VALUE
+ all_fields, some_fields
+*/
+
+static void check_fields_in_PF(Field **ptr, bool *all_fields,
+ bool *some_fields)
+{
+ DBUG_ENTER("check_fields_in_PF");
+ *all_fields= TRUE;
+ *some_fields= FALSE;
+ do
+ {
+ /* Check if the field of the PF is part of the current key investigated */
+ if ((*ptr)->flags & GET_FIXED_FIELDS_FLAG)
+ *some_fields= TRUE;
+ else
+ *all_fields= FALSE;
+ } while (*(++ptr));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Clear flag GET_FIXED_FIELDS_FLAG in all fields of the table.
+ This routine is used for error handling purposes.
+ SYNOPSIS
+ clear_field_flag()
+ table TABLE object for which partition fields are set-up
+*/
+
+static void clear_field_flag(TABLE *table)
+{
+ Field **ptr;
+ DBUG_ENTER("clear_field_flag");
+
+ for (ptr= table->field; *ptr; ptr++)
+ (*ptr)->flags&= (~GET_FIXED_FIELDS_FLAG);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ This routine sets-up the partition field array for KEY partitioning, it
+ also verifies that all fields in the list of fields is actually a part of
+ the table.
+ SYNOPSIS
+ handle_list_of_fields()
+ it A list of field names for the partition function
+ table TABLE object for which partition fields are set-up
+ part_info Reference to partitioning data structure
+ sub_part Is the table subpartitioned as well
+ RETURN VALUE
+ TRUE Fields in list of fields not part of table
+ FALSE All fields ok and array created
+ DESCRIPTION
+ find_field_in_table_sef finds the field given its name. All fields get
+ GET_FIXED_FIELDS_FLAG set.
+*/
+
+static bool handle_list_of_fields(List_iterator<char> it,
+ TABLE *table,
+ partition_info *part_info,
+ bool sub_part)
+{
+ Field *field;
+ bool result;
+ char *field_name;
+ DBUG_ENTER("handle_list_of_fields");
+
+ while ((field_name= it++))
+ {
+ field= find_field_in_table_sef(table, field_name);
+ if (likely(field != 0))
+ field->flags|= GET_FIXED_FIELDS_FLAG;
+ else
+ {
+ my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0));
+ clear_field_flag(table);
+ result= TRUE;
+ goto end;
+ }
+ }
+ result= set_up_field_array(table, sub_part);
+end:
+ DBUG_RETURN(result);
+}
+
+
+/*
+ This function is used to build an array of partition fields for the
+ partitioning function and subpartitioning function. The partitioning
+ function is an item tree that must reference at least one field in the
+ table. This is checked first in the parser that the function doesn't
+ contain non-cacheable parts (like a random function) and by checking
+ here that the function isn't a constant function.
+ SYNOPSIS
+ fix_fields_part_func()
+ thd The thread object
+ tables A list of one table, the partitioned table
+ func_expr The item tree reference of the partition function
+ part_info Reference to partitioning data structure
+ sub_part Is the table subpartitioned as well
+ RETURN VALUE
+ TRUE An error occurred, something was wrong with the
+ partition function.
+ FALSE Ok, a partition field array was created
+ DESCRIPTION
+ The function uses a new feature in fix_fields where the flag
+ GET_FIXED_FIELDS_FLAG is set for all fields in the item tree.
+ This field must always be reset before returning from the function
+ since it is used for other purposes as well.
+*/
+
+static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables,
+ Item* func_expr, partition_info *part_info,
+ bool sub_part)
+{
+ /*
+ Calculate the number of fields in the partition function.
+ Use it allocate memory for array of Field pointers.
+ Initialise array of field pointers. Use information set when
+ calling fix_fields and reset it immediately after.
+ The get_fields_in_item_tree activates setting of bit in flags
+ on the field object.
+ */
+
+ bool result= TRUE;
+ TABLE *table= tables->table;
+ TABLE_LIST *save_list;
+ int error;
+ Name_resolution_context *context= &thd->lex->current_select->context;
+ DBUG_ENTER("fix_fields_part_func");
+
+ table->map= 1; //To ensure correct calculation of const item
+ table->get_fields_in_item_tree= TRUE;
+ save_list= context->table_list;
+ context->table_list= tables;
+ thd->where= "partition function";
+ error= func_expr->fix_fields(thd, (Item**)0);
+ context->table_list= save_list;
+ if (unlikely(error))
+ {
+ DBUG_PRINT("info", ("Field in partition function not part of table"));
+ clear_field_flag(table);
+ goto end;
+ }
+ if (unlikely(func_expr->const_item()))
+ {
+ my_error(ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR, MYF(0));
+ clear_field_flag(table);
+ goto end;
+ }
+ result= set_up_field_array(table, sub_part);
+end:
+ table->get_fields_in_item_tree= FALSE;
+ table->map= 0; //Restore old value
+ DBUG_RETURN(result);
+}
+
+
+/*
+ This function verifies that if there is a primary key that it contains
+ all the fields of the partition function.
+ This is a temporary limitation that will hopefully be removed after a
+ while.
+ SYNOPSIS
+ check_primary_key()
+ table TABLE object for which partition fields are set-up
+ RETURN VALUES
+ TRUE Not all fields in partitioning function was part
+ of primary key
+ FALSE Ok, all fields of partitioning function were part
+ of primary key
+*/
+
+static bool check_primary_key(TABLE *table)
+{
+ uint primary_key= table->s->primary_key;
+ bool all_fields, some_fields, result= FALSE;
+ DBUG_ENTER("check_primary_key");
+
+ if (primary_key < MAX_KEY)
+ {
+ set_indicator_in_key_fields(table->key_info+primary_key);
+ check_fields_in_PF(table->s->part_info->full_part_field_array,
+ &all_fields, &some_fields);
+ clear_indicator_in_key_fields(table->key_info+primary_key);
+ if (unlikely(!all_fields))
+ {
+ my_error(ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF,MYF(0),"PRIMARY KEY");
+ result= TRUE;
+ }
+ }
+ DBUG_RETURN(result);
+}
+
+
+/*
+ This function verifies that if there is a unique index that it contains
+ all the fields of the partition function.
+ This is a temporary limitation that will hopefully be removed after a
+ while.
+ SYNOPSIS
+ check_unique_keys()
+ table TABLE object for which partition fields are set-up
+ RETURN VALUES
+ TRUE Not all fields in partitioning function was part
+ of all unique keys
+ FALSE Ok, all fields of partitioning function were part
+ of unique keys
+*/
+
+static bool check_unique_keys(TABLE *table)
+{
+ bool all_fields, some_fields, result= FALSE;
+ uint keys= table->s->keys, i;
+ DBUG_ENTER("check_unique_keys");
+ for (i= 0; i < keys; i++)
+ {
+ if (table->key_info[i].flags & HA_NOSAME) //Unique index
+ {
+ set_indicator_in_key_fields(table->key_info+i);
+ check_fields_in_PF(table->s->part_info->full_part_field_array,
+ &all_fields, &some_fields);
+ clear_indicator_in_key_fields(table->key_info+i);
+ if (unlikely(!all_fields))
+ {
+ my_error(ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF,MYF(0),"UNIQUE INDEX");
+ result= TRUE;
+ break;
+ }
+ }
+ }
+ DBUG_RETURN(result);
+}
+
+
+/*
+ An important optimisation is whether a range on a field can select a subset
+ of the partitions.
+ A prerequisite for this to happen is that the PF is a growing function OR
+ a shrinking function.
+ This can never happen for a multi-dimensional PF. Thus this can only happen
+ with PF with at most one field involved in the PF.
+ The idea is that if the function is a growing function and you know that
+ the field of the PF is 4 <= A <= 6 then we can convert this to a range
+ in the PF instead by setting the range to PF(4) <= PF(A) <= PF(6). In the
+ case of RANGE PARTITIONING and LIST PARTITIONING this can be used to
+ calculate a set of partitions rather than scanning all of them.
+ Thus the following prerequisites are there to check if sets of partitions
+ can be found.
+ 1) Only possible for RANGE and LIST partitioning (not for subpartitioning)
+ 2) Only possible if PF only contains 1 field
+ 3) Possible if PF is a growing function of the field
+ 4) Possible if PF is a shrinking function of the field
+ OBSERVATION:
+ 1) IF f1(A) is a growing function AND f2(A) is a growing function THEN
+ f1(A) + f2(A) is a growing function
+ f1(A) * f2(A) is a growing function if f1(A) >= 0 and f2(A) >= 0
+ 2) IF f1(A) is a growing function and f2(A) is a shrinking function THEN
+ f1(A) / f2(A) is a growing function if f1(A) >= 0 and f2(A) > 0
+ 3) IF A is a growing function then a function f(A) that removes the
+ least significant portion of A is a growing function
+ E.g. DATE(datetime) is a growing function
+ MONTH(datetime) is not a growing/shrinking function
+ 4) IF f1(A) is a growing function and f2(A) is a growing function THEN
+ f1(f2(A)) and f2(f1(A)) are also growing functions
+ 5) IF f1(A) is a shrinking function and f2(A) is a growing function THEN
+ f1(f2(A)) is a shrinking function and f2(f1(A)) is a shrinking function
+ 6) f1(A) = A is a growing function
+ 7) f1(A) = A*a + b (where a and b are constants) is a growing function
+
+ By analysing the item tree of the PF we can use these deducements and
+ derive whether the PF is a growing function or a shrinking function or
+ neither of it.
+
+ If the PF is range capable then a flag is set on the table object
+ indicating this to notify that we can use also ranges on the field
+ of the PF to deduce a set of partitions if the fields of the PF were
+ not all fully bound.
+ SYNOPSIS
+ check_range_capable_PF()
+ table TABLE object for which partition fields are set-up
+ DESCRIPTION
+ Support for this is not implemented yet.
+*/
+
+void check_range_capable_PF(TABLE *table)
+{
+ DBUG_ENTER("check_range_capable_PF");
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Set up partition key maps
+ SYNOPSIS
+ set_up_partition_key_maps()
+ table TABLE object for which partition fields are set-up
+ part_info Reference to partitioning data structure
+ RETURN VALUES
+ None
+ DESCRIPTION
+ This function sets up a couple of key maps to be able to quickly check
+ if an index ever can be used to deduce the partition fields or even
+ a part of the fields of the partition function.
+ We set up the following key_map's.
+ PF = Partition Function
+ 1) All fields of the PF is set even by equal on the first fields in the
+ key
+ 2) All fields of the PF is set if all fields of the key is set
+ 3) At least one field in the PF is set if all fields is set
+ 4) At least one field in the PF is part of the key
+*/
+
+static void set_up_partition_key_maps(TABLE *table,
+ partition_info *part_info)
+{
+ uint keys= table->s->keys, i;
+ bool all_fields, some_fields;
+ DBUG_ENTER("set_up_partition_key_maps");
+
+ part_info->all_fields_in_PF.clear_all();
+ part_info->all_fields_in_PPF.clear_all();
+ part_info->all_fields_in_SPF.clear_all();
+ part_info->some_fields_in_PF.clear_all();
+ for (i= 0; i < keys; i++)
+ {
+ set_indicator_in_key_fields(table->key_info+i);
+ check_fields_in_PF(part_info->full_part_field_array,
+ &all_fields, &some_fields);
+ if (all_fields)
+ part_info->all_fields_in_PF.set_bit(i);
+ if (some_fields)
+ part_info->some_fields_in_PF.set_bit(i);
+ if (is_sub_partitioned(part_info))
+ {
+ check_fields_in_PF(part_info->part_field_array,
+ &all_fields, &some_fields);
+ if (all_fields)
+ part_info->all_fields_in_PPF.set_bit(i);
+ check_fields_in_PF(part_info->subpart_field_array,
+ &all_fields, &some_fields);
+ if (all_fields)
+ part_info->all_fields_in_SPF.set_bit(i);
+ }
+ clear_indicator_in_key_fields(table->key_info+i);
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Set-up all function pointers for calculation of partition id,
+ subpartition id and the upper part in subpartitioning. This is to speed up
+ execution of get_partition_id which is executed once every record to be
+ written and deleted and twice for updates.
+ SYNOPSIS
+ set_up_partition_function_pointers()
+ part_info Reference to partitioning data structure
+*/
+
+static void set_up_partition_func_pointers(partition_info *part_info)
+{
+ if (is_sub_partitioned(part_info))
+ {
+ if (part_info->part_type == RANGE_PARTITION)
+ {
+ part_info->get_part_partition_id= get_partition_id_range;
+ if (part_info->list_of_subpart_fields)
+ {
+ if (part_info->linear_hash_ind)
+ {
+ part_info->get_partition_id= get_partition_id_range_sub_linear_key;
+ part_info->get_subpartition_id= get_partition_id_linear_key_sub;
+ }
+ else
+ {
+ part_info->get_partition_id= get_partition_id_range_sub_key;
+ part_info->get_subpartition_id= get_partition_id_key_sub;
+ }
+ }
+ else
+ {
+ if (part_info->linear_hash_ind)
+ {
+ part_info->get_partition_id= get_partition_id_range_sub_linear_hash;
+ part_info->get_subpartition_id= get_partition_id_linear_hash_sub;
+ }
+ else
+ {
+ part_info->get_partition_id= get_partition_id_range_sub_hash;
+ part_info->get_subpartition_id= get_partition_id_hash_sub;
+ }
+ }
+ }
+ else //LIST Partitioning
+ {
+ part_info->get_part_partition_id= get_partition_id_list;
+ if (part_info->list_of_subpart_fields)
+ {
+ if (part_info->linear_hash_ind)
+ {
+ part_info->get_partition_id= get_partition_id_list_sub_linear_key;
+ part_info->get_subpartition_id= get_partition_id_linear_key_sub;
+ }
+ else
+ {
+ part_info->get_partition_id= get_partition_id_list_sub_key;
+ part_info->get_subpartition_id= get_partition_id_key_sub;
+ }
+ }
+ else
+ {
+ if (part_info->linear_hash_ind)
+ {
+ part_info->get_partition_id= get_partition_id_list_sub_linear_hash;
+ part_info->get_subpartition_id= get_partition_id_linear_hash_sub;
+ }
+ else
+ {
+ part_info->get_partition_id= get_partition_id_list_sub_hash;
+ part_info->get_subpartition_id= get_partition_id_hash_sub;
+ }
+ }
+ }
+ }
+ else //No subpartitioning
+ {
+ part_info->get_part_partition_id= NULL;
+ part_info->get_subpartition_id= NULL;
+ if (part_info->part_type == RANGE_PARTITION)
+ part_info->get_partition_id= get_partition_id_range;
+ else if (part_info->part_type == LIST_PARTITION)
+ part_info->get_partition_id= get_partition_id_list;
+ else //HASH partitioning
+ {
+ if (part_info->list_of_part_fields)
+ {
+ if (part_info->linear_hash_ind)
+ part_info->get_partition_id= get_partition_id_linear_key_nosub;
+ else
+ part_info->get_partition_id= get_partition_id_key_nosub;
+ }
+ else
+ {
+ if (part_info->linear_hash_ind)
+ part_info->get_partition_id= get_partition_id_linear_hash_nosub;
+ else
+ part_info->get_partition_id= get_partition_id_hash_nosub;
+ }
+ }
+ }
+}
+
+
+/*
+ For linear hashing we need a mask which is on the form 2**n - 1 where
+ 2**n >= no_parts. Thus if no_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7.
+ SYNOPSIS
+ set_linear_hash_mask()
+ part_info Reference to partitioning data structure
+ no_parts Number of parts in linear hash partitioning
+*/
+
+static void set_linear_hash_mask(partition_info *part_info, uint no_parts)
+{
+ uint mask;
+ for (mask= 1; mask < no_parts; mask<<=1)
+ ;
+ part_info->linear_hash_mask= mask - 1;
+}
+
+
+/*
+ This function calculates the partition id provided the result of the hash
+ function using linear hashing parameters, mask and number of partitions.
+ SYNOPSIS
+ get_part_id_from_linear_hash()
+ hash_value Hash value calculated by HASH function or KEY function
+ mask Mask calculated previously by set_linear_hash_mask
+ no_parts Number of partitions in HASH partitioned part
+ RETURN VALUE
+ part_id The calculated partition identity (starting at 0)
+ DESCRIPTION
+ The partition is calculated according to the theory of linear hashing.
+ See e.g. Linear hashing: a new tool for file and table addressing,
+ Reprinted from VLDB-80 in Readings Database Systems, 2nd ed, M. Stonebraker
+ (ed.), Morgan Kaufmann 1994.
+*/
+
+static uint32 get_part_id_from_linear_hash(longlong hash_value, uint mask,
+ uint no_parts)
+{
+ uint32 part_id= (uint32)(hash_value & mask);
+ if (part_id >= no_parts)
+ {
+ uint new_mask= ((mask + 1) >> 1) - 1;
+ part_id= hash_value & new_mask;
+ }
+ return part_id;
+}
+
+/*
+ This function is called as part of opening the table by opening the .frm
+ file. It is a part of CREATE TABLE to do this so it is quite permissible
+ that errors due to erroneus syntax isn't found until we come here.
+ If the user has used a non-existing field in the table is one such example
+ of an error that is not discovered until here.
+ SYNOPSIS
+ fix_partition_func()
+ thd The thread object
+ name The name of the partitioned table
+ table TABLE object for which partition fields are set-up
+ RETURN VALUE
+ TRUE
+ FALSE
+ DESCRIPTION
+ The name parameter contains the full table name and is used to get the
+ database name of the table which is used to set-up a correct
+ TABLE_LIST object for use in fix_fields.
+*/
+
+bool fix_partition_func(THD *thd, const char* name, TABLE *table)
+{
+ bool result= TRUE;
+ uint dir_length, home_dir_length;
+ TABLE_LIST tables;
+ TABLE_SHARE *share= table->s;
+ char db_name_string[FN_REFLEN];
+ char* db_name;
+ partition_info *part_info= share->part_info;
+ ulong save_set_query_id= thd->set_query_id;
+ DBUG_ENTER("fix_partition_func");
+
+ thd->set_query_id= 0;
+ /*
+ Set-up the TABLE_LIST object to be a list with a single table
+ Set the object to zero to create NULL pointers and set alias
+ and real name to table name and get database name from file name.
+ */
+
+ bzero((void*)&tables, sizeof(TABLE_LIST));
+ tables.alias= tables.table_name= (char*)share->table_name;
+ tables.table= table;
+ strmov(db_name_string, name);
+ dir_length= dirname_length(db_name_string);
+ db_name_string[dir_length - 1]= 0;
+ home_dir_length= dirname_length(db_name_string);
+ db_name= &db_name_string[home_dir_length];
+ tables.db= db_name;
+
+ part_info->no_full_parts= part_info->no_parts;
+ if (is_sub_partitioned(part_info))
+ {
+ DBUG_ASSERT(part_info->subpart_type == HASH_PARTITION);
+ part_info->no_full_parts= part_info->no_parts*part_info->no_subparts;
+ /*
+ Subpartition is defined. We need to verify that subpartitioning
+ function is correct.
+ */
+ if (part_info->linear_hash_ind)
+ set_linear_hash_mask(part_info, part_info->no_subparts);
+ if (part_info->list_of_subpart_fields)
+ {
+ List_iterator<char> it(part_info->subpart_field_list);
+ if (unlikely(handle_list_of_fields(it, table, part_info, TRUE)))
+ goto end;
+ }
+ else
+ {
+ if (unlikely(fix_fields_part_func(thd, &tables,
+ part_info->subpart_expr, part_info, TRUE)))
+ goto end;
+ if (unlikely(part_info->subpart_expr->result_type() != INT_RESULT))
+ {
+ my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0),
+ "SUBPARTITION");
+ goto end;
+ }
+ }
+ }
+ DBUG_ASSERT(part_info->part_type != NOT_A_PARTITION);
+ /*
+ Partition is defined. We need to verify that partitioning
+ function is correct.
+ */
+ if (part_info->part_type == HASH_PARTITION)
+ {
+ if (part_info->linear_hash_ind)
+ set_linear_hash_mask(part_info, part_info->no_parts);
+ if (part_info->list_of_part_fields)
+ {
+ List_iterator<char> it(part_info->part_field_list);
+ if (unlikely(handle_list_of_fields(it, table, part_info, FALSE)))
+ goto end;
+ }
+ else
+ {
+ if (unlikely(fix_fields_part_func(thd, &tables, part_info->part_expr,
+ part_info, FALSE)))
+ goto end;
+ if (unlikely(part_info->part_expr->result_type() != INT_RESULT))
+ {
+ my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0), part_str);
+ goto end;
+ }
+ part_info->part_result_type= INT_RESULT;
+ }
+ }
+ else
+ {
+ char *error_str;
+ if (part_info->part_type == RANGE_PARTITION)
+ {
+ error_str= range_str;
+ if (unlikely(check_range_constants(part_info)))
+ goto end;
+ }
+ else if (part_info->part_type == LIST_PARTITION)
+ {
+ error_str= list_str;
+ if (unlikely(check_list_constants(part_info)))
+ goto end;
+ }
+ else
+ {
+ DBUG_ASSERT(0);
+ my_error(ER_INCONSISTENT_PARTITION_INFO_ERROR, MYF(0));
+ goto end;
+ }
+ if (unlikely(part_info->no_parts < 1))
+ {
+ my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_str);
+ goto end;
+ }
+ if (unlikely(fix_fields_part_func(thd, &tables, part_info->part_expr,
+ part_info, FALSE)))
+ goto end;
+ if (unlikely(part_info->part_expr->result_type() != INT_RESULT))
+ {
+ my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0), part_str);
+ goto end;
+ }
+ }
+ if (unlikely(create_full_part_field_array(table, part_info)))
+ goto end;
+ if (unlikely(check_primary_key(table)))
+ goto end;
+ if (unlikely((!table->file->partition_flags() & HA_CAN_PARTITION_UNIQUE) &&
+ check_unique_keys(table)))
+ goto end;
+ check_range_capable_PF(table);
+ set_up_partition_key_maps(table, part_info);
+ set_up_partition_func_pointers(part_info);
+ result= FALSE;
+end:
+ thd->set_query_id= save_set_query_id;
+ DBUG_RETURN(result);
+}
+
+
+/*
+ The code below is support routines for the reverse parsing of the
+ partitioning syntax. This feature is very useful to generate syntax for
+ all default values to avoid all default checking when opening the frm
+ file. It is also used when altering the partitioning by use of various
+ ALTER TABLE commands. Finally it is used for SHOW CREATE TABLES.
+*/
+
+static int add_write(File fptr, const char *buf, uint len)
+{
+ uint len_written= my_write(fptr, buf, len, MYF(0));
+ if (likely(len == len_written))
+ return 0;
+ else
+ return 1;
+}
+
+static int add_string(File fptr, const char *string)
+{
+ return add_write(fptr, string, strlen(string));
+}
+
+static int add_string_len(File fptr, const char *string, uint len)
+{
+ return add_write(fptr, string, len);
+}
+
+static int add_space(File fptr)
+{
+ return add_string(fptr, space_str);
+}
+
+static int add_comma(File fptr)
+{
+ return add_string(fptr, comma_str);
+}
+
+static int add_equal(File fptr)
+{
+ return add_string(fptr, equal_str);
+}
+
+static int add_end_parenthesis(File fptr)
+{
+ return add_string(fptr, end_paren_str);
+}
+
+static int add_begin_parenthesis(File fptr)
+{
+ return add_string(fptr, begin_paren_str);
+}
+
+static int add_part_key_word(File fptr, const char *key_string)
+{
+ int err= add_string(fptr, key_string);
+ err+= add_space(fptr);
+ return err + add_begin_parenthesis(fptr);
+}
+
+static int add_hash(File fptr)
+{
+ return add_part_key_word(fptr, hash_str);
+}
+
+static int add_partition(File fptr)
+{
+ strxmov(buff, part_str, space_str, NullS);
+ return add_string(fptr, buff);
+}
+
+static int add_subpartition(File fptr)
+{
+ int err= add_string(fptr, sub_str);
+ return err + add_partition(fptr);
+}
+
+static int add_partition_by(File fptr)
+{
+ strxmov(buff, part_str, space_str, by_str, space_str, NullS);
+ return add_string(fptr, buff);
+}
+
+static int add_subpartition_by(File fptr)
+{
+ int err= add_string(fptr, sub_str);
+ return err + add_partition_by(fptr);
+}
+
+static int add_key_partition(File fptr, List<char> field_list)
+{
+ uint i, no_fields;
+ int err;
+ List_iterator<char> part_it(field_list);
+ err= add_part_key_word(fptr, key_str);
+ no_fields= field_list.elements;
+ i= 0;
+ do
+ {
+ const char *field_str= part_it++;
+ err+= add_string(fptr, field_str);
+ if (i != (no_fields-1))
+ err+= add_comma(fptr);
+ } while (++i < no_fields);
+ return err;
+}
+
+static int add_int(File fptr, longlong number)
+{
+ llstr(number, buff);
+ return add_string(fptr, buff);
+}
+
+static int add_keyword_string(File fptr, const char *keyword,
+ const char *keystr)
+{
+ int err= add_string(fptr, keyword);
+ err+= add_space(fptr);
+ err+= add_equal(fptr);
+ err+= add_space(fptr);
+ err+= add_string(fptr, keystr);
+ return err + add_space(fptr);
+}
+
+static int add_keyword_int(File fptr, const char *keyword, longlong num)
+{
+ int err= add_string(fptr, keyword);
+ err+= add_space(fptr);
+ err+= add_equal(fptr);
+ err+= add_space(fptr);
+ err+= add_int(fptr, num);
+ return err + add_space(fptr);
+}
+
+static int add_engine(File fptr, enum db_type engine_type)
+{
+ const char *engine_str= ha_get_storage_engine(engine_type);
+ int err= add_string(fptr, "ENGINE = ");
+ return err + add_string(fptr, engine_str);
+ return err;
+}
+
+static int add_partition_options(File fptr, partition_element *p_elem)
+{
+ int err= 0;
+ if (p_elem->tablespace_name)
+ err+= add_keyword_string(fptr,"TABLESPACE",p_elem->tablespace_name);
+ if (p_elem->nodegroup_id != UNDEF_NODEGROUP)
+ err+= add_keyword_int(fptr,"NODEGROUP",(longlong)p_elem->nodegroup_id);
+ if (p_elem->part_max_rows)
+ err+= add_keyword_int(fptr,"MAX_ROWS",(longlong)p_elem->part_max_rows);
+ if (p_elem->part_min_rows)
+ err+= add_keyword_int(fptr,"MIN_ROWS",(longlong)p_elem->part_min_rows);
+ if (p_elem->data_file_name)
+ err+= add_keyword_string(fptr,"DATA DIRECTORY",p_elem->data_file_name);
+ if (p_elem->index_file_name)
+ err+= add_keyword_string(fptr,"INDEX DIRECTORY",p_elem->index_file_name);
+ if (p_elem->part_comment)
+ err+= add_keyword_string(fptr, "COMMENT",p_elem->part_comment);
+ return err + add_engine(fptr,p_elem->engine_type);
+}
+
+static int add_partition_values(File fptr, partition_info *part_info,
+ partition_element *p_elem)
+{
+ int err= 0;
+ if (part_info->part_type == RANGE_PARTITION)
+ {
+ err+= add_string(fptr, "VALUES LESS THAN ");
+ if (p_elem->range_expr)
+ {
+ err+= add_begin_parenthesis(fptr);
+ err+= add_int(fptr,p_elem->range_expr->val_int());
+ err+= add_end_parenthesis(fptr);
+ }
+ else
+ err+= add_string(fptr, "MAXVALUE");
+ }
+ else if (part_info->part_type == LIST_PARTITION)
+ {
+ uint i;
+ List_iterator<Item> list_expr_it(p_elem->list_expr_list);
+ err+= add_string(fptr, "VALUES IN ");
+ uint no_items= p_elem->list_expr_list.elements;
+ err+= add_begin_parenthesis(fptr);
+ i= 0;
+ do
+ {
+ Item *list_expr= list_expr_it++;
+ err+= add_int(fptr, list_expr->val_int());
+ if (i != (no_items-1))
+ err+= add_comma(fptr);
+ } while (++i < no_items);
+ err+= add_end_parenthesis(fptr);
+ }
+ return err + add_space(fptr);
+}
+
+/*
+ Generate the partition syntax from the partition data structure.
+ Useful for support of generating defaults, SHOW CREATE TABLES
+ and easy partition management.
+ SYNOPSIS
+ generate_partition_syntax()
+ part_info The partitioning data structure
+ buf_length A pointer to the returned buffer length
+ use_sql_alloc Allocate buffer from sql_alloc if true
+ otherwise use my_malloc
+ RETURN VALUES
+ NULL error
+ buf, buf_length Buffer and its length
+ DESCRIPTION
+ Here we will generate the full syntax for the given command where all
+ defaults have been expanded. By so doing the it is also possible to
+ make lots of checks of correctness while at it.
+ This could will also be reused for SHOW CREATE TABLES and also for all
+ type ALTER TABLE commands focusing on changing the PARTITION structure
+ in any fashion.
+
+ The implementation writes the syntax to a temporary file (essentially
+ an abstraction of a dynamic array) and if all writes goes well it
+ allocates a buffer and writes the syntax into this one and returns it.
+
+ As a security precaution the file is deleted before writing into it. This
+ means that no other processes on the machine can open and read the file
+ while this processing is ongoing.
+
+ The code is optimised for minimal code size since it is not used in any
+ common queries.
+*/
+
+char *generate_partition_syntax(partition_info *part_info,
+ uint *buf_length,
+ bool use_sql_alloc)
+{
+ uint i,j, no_parts, no_subparts;
+ partition_element *part_elem;
+ ulonglong buffer_length;
+ char path[FN_REFLEN];
+ int err= 0;
+ DBUG_ENTER("generate_partition_syntax");
+ File fptr;
+ char *buf= NULL; //Return buffer
+ const char *file_name;
+ sprintf(path, "%s_%lx_%lx", "part_syntax", current_pid,
+ current_thd->thread_id);
+ fn_format(path,path,mysql_tmpdir,".psy", MY_REPLACE_EXT);
+ file_name= &path[0];
+ DBUG_PRINT("info", ("File name = %s", file_name));
+ if (unlikely(((fptr= my_open(file_name,O_CREAT|O_RDWR, MYF(MY_WME))) == -1)))
+ DBUG_RETURN(NULL);
+#if defined(MSDOS) || defined(__WIN__) || defined(__EMX__) || defined(OS2)
+#else
+ my_delete(file_name, MYF(0));
+#endif
+ err+= add_space(fptr);
+ err+= add_partition_by(fptr);
+ switch (part_info->part_type)
+ {
+ case RANGE_PARTITION:
+ err+= add_part_key_word(fptr, range_str);
+ break;
+ case LIST_PARTITION:
+ err+= add_part_key_word(fptr, list_str);
+ break;
+ case HASH_PARTITION:
+ if (part_info->linear_hash_ind)
+ err+= add_string(fptr, "LINEAR ");
+ if (part_info->list_of_part_fields)
+ err+= add_key_partition(fptr, part_info->part_field_list);
+ else
+ err+= add_hash(fptr);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ /* We really shouldn't get here, no use in continuing from here */
+ current_thd->fatal_error();
+ DBUG_RETURN(NULL);
+ }
+ if (part_info->part_expr)
+ err+= add_string_len(fptr, part_info->part_func_string,
+ part_info->part_func_len);
+ err+= add_end_parenthesis(fptr);
+ err+= add_space(fptr);
+ if (is_sub_partitioned(part_info))
+ {
+ err+= add_subpartition_by(fptr);
+ /* Must be hash partitioning for subpartitioning */
+ if (part_info->list_of_subpart_fields)
+ err+= add_key_partition(fptr, part_info->subpart_field_list);
+ else
+ err+= add_hash(fptr);
+ if (part_info->subpart_expr)
+ err+= add_string_len(fptr, part_info->subpart_func_string,
+ part_info->subpart_func_len);
+ err+= add_end_parenthesis(fptr);
+ err+= add_space(fptr);
+ }
+ err+= add_begin_parenthesis(fptr);
+ List_iterator<partition_element> part_it(part_info->partitions);
+ no_parts= part_info->no_parts;
+ no_subparts= part_info->no_subparts;
+ i= 0;
+ do
+ {
+ part_elem= part_it++;
+ err+= add_partition(fptr);
+ err+= add_string(fptr, part_elem->partition_name);
+ err+= add_space(fptr);
+ err+= add_partition_values(fptr, part_info, part_elem);
+ if (!is_sub_partitioned(part_info))
+ err+= add_partition_options(fptr, part_elem);
+ if (is_sub_partitioned(part_info))
+ {
+ err+= add_space(fptr);
+ err+= add_begin_parenthesis(fptr);
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ j= 0;
+ do
+ {
+ part_elem= sub_it++;
+ err+= add_subpartition(fptr);
+ err+= add_string(fptr, part_elem->partition_name);
+ err+= add_space(fptr);
+ err+= add_partition_options(fptr, part_elem);
+ if (j != (no_subparts-1))
+ {
+ err+= add_comma(fptr);
+ err+= add_space(fptr);
+ }
+ else
+ err+= add_end_parenthesis(fptr);
+ } while (++j < no_subparts);
+ }
+ if (i != (no_parts-1))
+ {
+ err+= add_comma(fptr);
+ err+= add_space(fptr);
+ }
+ else
+ err+= add_end_parenthesis(fptr);
+ } while (++i < no_parts);
+ if (err)
+ goto close_file;
+ buffer_length= my_seek(fptr, 0L,MY_SEEK_END,MYF(0));
+ if (unlikely(buffer_length == MY_FILEPOS_ERROR))
+ goto close_file;
+ if (unlikely(my_seek(fptr, 0L, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR))
+ goto close_file;
+ *buf_length= (uint)buffer_length;
+ if (use_sql_alloc)
+ buf= sql_alloc(*buf_length+1);
+ else
+ buf= my_malloc(*buf_length+1, MYF(MY_WME));
+ if (!buf)
+ goto close_file;
+
+ if (unlikely(my_read(fptr, buf, *buf_length, MYF(MY_FNABP))))
+ {
+ if (!use_sql_alloc)
+ my_free(buf, MYF(0));
+ else
+ buf= NULL;
+ }
+ else
+ buf[*buf_length]= 0;
+
+close_file:
+ /*
+ Delete the file before closing to ensure the file doesn't get synched
+ to disk unnecessary. We only used the file system as a dynamic array
+ implementation so we are not really interested in getting the file
+ present on disk.
+ This is not possible on Windows so here it has to be done after closing
+ the file. Also on Unix we delete immediately after opening to ensure no
+ other process can read the information written into the file.
+ */
+ my_close(fptr, MYF(0));
+#if defined(MSDOS) || defined(__WIN__) || defined(__EMX__) || defined(OS2)
+ my_delete(file_name, MYF(0));
+#endif
+ DBUG_RETURN(buf);
+}
+
+
+/*
+ Check if partition key fields are modified and if it can be handled by the
+ underlying storage engine.
+ SYNOPSIS
+ partition_key_modified
+ table TABLE object for which partition fields are set-up
+ fields A list of the to be modifed
+ RETURN VALUES
+ TRUE Need special handling of UPDATE
+ FALSE Normal UPDATE handling is ok
+*/
+
+bool partition_key_modified(TABLE *table, List<Item> &fields)
+{
+ List_iterator_fast<Item> f(fields);
+ partition_info *part_info= table->s->part_info;
+ Item_field *item_field;
+ DBUG_ENTER("partition_key_modified");
+ if (!part_info)
+ DBUG_RETURN(FALSE);
+ if (table->file->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY)
+ DBUG_RETURN(FALSE);
+ f.rewind();
+ while ((item_field=(Item_field*) f++))
+ if (item_field->field->flags & FIELD_IN_PART_FUNC_FLAG)
+ DBUG_RETURN(TRUE);
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ The next set of functions are used to calculate the partition identity.
+ A handler sets up a variable that corresponds to one of these functions
+ to be able to quickly call it whenever the partition id needs to calculated
+ based on the record in table->record[0] (or set up to fake that).
+ There are 4 functions for hash partitioning and 2 for RANGE/LIST partitions.
+ In addition there are 4 variants for RANGE subpartitioning and 4 variants
+ for LIST subpartitioning thus in total there are 14 variants of this
+ function.
+
+ We have a set of support functions for these 14 variants. There are 4
+ variants of hash functions and there is a function for each. The KEY
+ partitioning uses the function calculate_key_value to calculate the hash
+ value based on an array of fields. The linear hash variants uses the
+ method get_part_id_from_linear_hash to get the partition id using the
+ hash value and some parameters calculated from the number of partitions.
+*/
+
+/*
+ Calculate hash value for KEY partitioning using an array of fields.
+ SYNOPSIS
+ calculate_key_value()
+ field_array An array of the fields in KEY partitioning
+ RETURN VALUE
+ hash_value calculated
+ DESCRIPTION
+ Uses the hash function on the character set of the field. Integer and
+ floating point fields use the binary character set by default.
+*/
+
+static uint32 calculate_key_value(Field **field_array)
+{
+ uint32 hashnr= 0;
+ ulong nr2= 4;
+ do
+ {
+ Field *field= *field_array;
+ if (field->is_null())
+ {
+ hashnr^= (hashnr << 1) | 1;
+ }
+ else
+ {
+ uint len= field->pack_length();
+ ulong nr1= 1;
+ CHARSET_INFO *cs= field->charset();
+ cs->coll->hash_sort(cs, (uchar*)field->ptr, len, &nr1, &nr2);
+ hashnr^= (uint32)nr1;
+ }
+ } while (*(++field_array));
+ return hashnr;
+}
+
+
+/*
+ A simple support function to calculate part_id given local part and
+ sub part.
+ SYNOPSIS
+ get_part_id_for_sub()
+ loc_part_id Local partition id
+ sub_part_id Subpartition id
+ no_subparts Number of subparts
+*/
+
+inline
+static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id,
+ uint no_subparts)
+{
+ return (uint32)((loc_part_id * no_subparts) + sub_part_id);
+}
+
+
+/*
+ Calculate part_id for (SUB)PARTITION BY HASH
+ SYNOPSIS
+ get_part_id_hash()
+ no_parts Number of hash partitions
+ part_expr Item tree of hash function
+ RETURN VALUE
+ Calculated partition id
+*/
+
+inline
+static uint32 get_part_id_hash(uint no_parts,
+ Item *part_expr)
+{
+ DBUG_ENTER("get_part_id_hash");
+ DBUG_RETURN((uint32)(part_expr->val_int() % no_parts));
+}
+
+
+/*
+ Calculate part_id for (SUB)PARTITION BY LINEAR HASH
+ SYNOPSIS
+ get_part_id_linear_hash()
+ part_info A reference to the partition_info struct where all the
+ desired information is given
+ no_parts Number of hash partitions
+ part_expr Item tree of hash function
+ RETURN VALUE
+ Calculated partition id
+*/
+
+inline
+static uint32 get_part_id_linear_hash(partition_info *part_info,
+ uint no_parts,
+ Item *part_expr)
+{
+ DBUG_ENTER("get_part_id_linear_hash");
+ DBUG_RETURN(get_part_id_from_linear_hash(part_expr->val_int(),
+ part_info->linear_hash_mask,
+ no_parts));
+}
+
+
+/*
+ Calculate part_id for (SUB)PARTITION BY KEY
+ SYNOPSIS
+ get_part_id_key()
+ field_array Array of fields for PARTTION KEY
+ no_parts Number of KEY partitions
+ RETURN VALUE
+ Calculated partition id
+*/
+
+inline
+static uint32 get_part_id_key(Field **field_array,
+ uint no_parts)
+{
+ DBUG_ENTER("get_part_id_key");
+ DBUG_RETURN(calculate_key_value(field_array) & no_parts);
+}
+
+
+/*
+ Calculate part_id for (SUB)PARTITION BY LINEAR KEY
+ SYNOPSIS
+ get_part_id_linear_key()
+ part_info A reference to the partition_info struct where all the
+ desired information is given
+ field_array Array of fields for PARTTION KEY
+ no_parts Number of KEY partitions
+ RETURN VALUE
+ Calculated partition id
+*/
+
+inline
+static uint32 get_part_id_linear_key(partition_info *part_info,
+ Field **field_array,
+ uint no_parts)
+{
+ DBUG_ENTER("get_partition_id_linear_key");
+ DBUG_RETURN(get_part_id_from_linear_hash(calculate_key_value(field_array),
+ part_info->linear_hash_mask,
+ no_parts));
+}
+
+/*
+ This function is used to calculate the partition id where all partition
+ fields have been prepared to point to a record where the partition field
+ values are bound.
+ SYNOPSIS
+ get_partition_id()
+ part_info A reference to the partition_info struct where all the
+ desired information is given
+ part_id The partition id is returned through this pointer
+ RETURN VALUE
+ part_id
+ return TRUE means that the fields of the partition function didn't fit
+ into any partition and thus the values of the PF-fields are not allowed.
+ DESCRIPTION
+ A routine used from write_row, update_row and delete_row from any
+ handler supporting partitioning. It is also a support routine for
+ get_partition_set used to find the set of partitions needed to scan
+ for a certain index scan or full table scan.
+
+ It is actually 14 different variants of this function which are called
+ through a function pointer.
+
+ get_partition_id_list
+ get_partition_id_range
+ get_partition_id_hash_nosub
+ get_partition_id_key_nosub
+ get_partition_id_linear_hash_nosub
+ get_partition_id_linear_key_nosub
+ get_partition_id_range_sub_hash
+ get_partition_id_range_sub_key
+ get_partition_id_range_sub_linear_hash
+ get_partition_id_range_sub_linear_key
+ get_partition_id_list_sub_hash
+ get_partition_id_list_sub_key
+ get_partition_id_list_sub_linear_hash
+ get_partition_id_list_sub_linear_key
+*/
+
+/*
+ This function is used to calculate the main partition to use in the case of
+ subpartitioning and we don't know enough to get the partition identity in
+ total.
+ SYNOPSIS
+ get_part_partition_id()
+ part_info A reference to the partition_info struct where all the
+ desired information is given
+ part_id The partition id is returned through this pointer
+ RETURN VALUE
+ part_id
+ return TRUE means that the fields of the partition function didn't fit
+ into any partition and thus the values of the PF-fields are not allowed.
+ DESCRIPTION
+
+ It is actually 6 different variants of this function which are called
+ through a function pointer.
+
+ get_partition_id_list
+ get_partition_id_range
+ get_partition_id_hash_nosub
+ get_partition_id_key_nosub
+ get_partition_id_linear_hash_nosub
+ get_partition_id_linear_key_nosub
+*/
+
+
+bool get_partition_id_list(partition_info *part_info,
+ uint32 *part_id)
+{
+ DBUG_ENTER("get_partition_id_list");
+ LIST_PART_ENTRY *list_array= part_info->list_array;
+ uint list_index;
+ longlong list_value;
+ uint min_list_index= 0, max_list_index= part_info->no_list_values - 1;
+ longlong part_func_value= part_info->part_expr->val_int();
+ while (max_list_index >= min_list_index)
+ {
+ list_index= (max_list_index + min_list_index) >> 1;
+ list_value= list_array[list_index].list_value;
+ if (list_value < part_func_value)
+ min_list_index= list_index + 1;
+ else if (list_value > part_func_value)
+ max_list_index= list_index - 1;
+ else {
+ *part_id= (uint32)list_array[list_index].partition_id;
+ DBUG_RETURN(FALSE);
+ }
+ }
+ *part_id= 0;
+ DBUG_RETURN(TRUE);
+}
+
+
+bool get_partition_id_range(partition_info *part_info,
+ uint32 *part_id)
+{
+ DBUG_ENTER("get_partition_id_int_range");
+ longlong *range_array= part_info->range_int_array;
+ uint max_partition= part_info->no_parts - 1;
+ uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
+ longlong part_func_value= part_info->part_expr->val_int();
+ while (max_part_id > min_part_id)
+ {
+ loc_part_id= (max_part_id + min_part_id + 1) >> 1;
+ if (range_array[loc_part_id] < part_func_value)
+ min_part_id= loc_part_id + 1;
+ else
+ max_part_id= loc_part_id - 1;
+ }
+ loc_part_id= max_part_id;
+ if (part_func_value >= range_array[loc_part_id])
+ if (loc_part_id != max_partition)
+ loc_part_id++;
+ *part_id= (uint32)loc_part_id;
+ if (loc_part_id == max_partition)
+ if (range_array[loc_part_id] != LONGLONG_MAX)
+ if (part_func_value >= range_array[loc_part_id])
+ DBUG_RETURN(TRUE);
+ DBUG_RETURN(FALSE);
+}
+
+bool get_partition_id_hash_nosub(partition_info *part_info,
+ uint32 *part_id)
+{
+ *part_id= get_part_id_hash(part_info->no_parts, part_info->part_expr);
+ return FALSE;
+}
+
+
+bool get_partition_id_linear_hash_nosub(partition_info *part_info,
+ uint32 *part_id)
+{
+ *part_id= get_part_id_linear_hash(part_info, part_info->no_parts,
+ part_info->part_expr);
+ return FALSE;
+}
+
+
+bool get_partition_id_key_nosub(partition_info *part_info,
+ uint32 *part_id)
+{
+ *part_id= get_part_id_key(part_info->part_field_array, part_info->no_parts);
+ return FALSE;
+}
+
+
+bool get_partition_id_linear_key_nosub(partition_info *part_info,
+ uint32 *part_id)
+{
+ *part_id= get_part_id_linear_key(part_info,
+ part_info->part_field_array,
+ part_info->no_parts);
+ return FALSE;
+}
+
+
+bool get_partition_id_range_sub_hash(partition_info *part_info,
+ uint32 *part_id)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ DBUG_ENTER("get_partition_id_range_sub_hash");
+ if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(FALSE);
+}
+
+
+bool get_partition_id_range_sub_linear_hash(partition_info *part_info,
+ uint32 *part_id)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ DBUG_ENTER("get_partition_id_range_sub_linear_hash");
+ if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
+ part_info->subpart_expr);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(FALSE);
+}
+
+
+bool get_partition_id_range_sub_key(partition_info *part_info,
+ uint32 *part_id)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ DBUG_ENTER("get_partition_id_range_sub_key");
+ if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_key(part_info->subpart_field_array, no_subparts);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(FALSE);
+}
+
+
+bool get_partition_id_range_sub_linear_key(partition_info *part_info,
+ uint32 *part_id)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ DBUG_ENTER("get_partition_id_range_sub_linear_key");
+ if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_linear_key(part_info,
+ part_info->subpart_field_array,
+ no_subparts);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(FALSE);
+}
+
+
+bool get_partition_id_list_sub_hash(partition_info *part_info,
+ uint32 *part_id)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ DBUG_ENTER("get_partition_id_list_sub_hash");
+ if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(FALSE);
+}
+
+
+bool get_partition_id_list_sub_linear_hash(partition_info *part_info,
+ uint32 *part_id)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ DBUG_ENTER("get_partition_id_list_sub_linear_hash");
+ if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(FALSE);
+}
+
+
+bool get_partition_id_list_sub_key(partition_info *part_info,
+ uint32 *part_id)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ DBUG_ENTER("get_partition_id_range_sub_key");
+ if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_key(part_info->subpart_field_array, no_subparts);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(FALSE);
+}
+
+
+bool get_partition_id_list_sub_linear_key(partition_info *part_info,
+ uint32 *part_id)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ DBUG_ENTER("get_partition_id_list_sub_linear_key");
+ if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_linear_key(part_info,
+ part_info->subpart_field_array,
+ no_subparts);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ This function is used to calculate the subpartition id
+ SYNOPSIS
+ get_subpartition_id()
+ part_info A reference to the partition_info struct where all the
+ desired information is given
+ RETURN VALUE
+ part_id
+ The subpartition identity
+ DESCRIPTION
+ A routine used in some SELECT's when only partial knowledge of the
+ partitions is known.
+
+ It is actually 4 different variants of this function which are called
+ through a function pointer.
+
+ get_partition_id_hash_sub
+ get_partition_id_key_sub
+ get_partition_id_linear_hash_sub
+ get_partition_id_linear_key_sub
+*/
+
+uint32 get_partition_id_hash_sub(partition_info *part_info)
+{
+ return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr);
+}
+
+
+uint32 get_partition_id_linear_hash_sub(partition_info *part_info)
+{
+ return get_part_id_linear_hash(part_info, part_info->no_subparts,
+ part_info->subpart_expr);
+}
+
+
+uint32 get_partition_id_key_sub(partition_info *part_info)
+{
+ return get_part_id_key(part_info->subpart_field_array,
+ part_info->no_subparts);
+}
+
+
+uint32 get_partition_id_linear_key_sub(partition_info *part_info)
+{
+ return get_part_id_linear_key(part_info,
+ part_info->subpart_field_array,
+ part_info->no_subparts);
+}
+
+
+/*
+ Set an indicator on all partition fields that are set by the key
+ SYNOPSIS
+ set_PF_fields_in_key()
+ key_info Information about the index
+ key_length Length of key
+ RETURN VALUE
+ TRUE Found partition field set by key
+ FALSE No partition field set by key
+*/
+
+static bool set_PF_fields_in_key(KEY *key_info, uint key_length)
+{
+ KEY_PART_INFO *key_part;
+ bool found_part_field= FALSE;
+ DBUG_ENTER("set_PF_fields_in_key");
+
+ for (key_part= key_info->key_part; (int)key_length > 0; key_part++)
+ {
+ if (key_part->null_bit)
+ key_length--;
+ if (key_part->type == HA_KEYTYPE_BIT)
+ {
+ if (((Field_bit*)key_part->field)->bit_len)
+ key_length--;
+ }
+ if (key_part->key_part_flag & (HA_BLOB_PART + HA_VAR_LENGTH_PART))
+ {
+ key_length-= HA_KEY_BLOB_LENGTH;
+ }
+ if (key_length < key_part->length)
+ break;
+ key_length-= key_part->length;
+ if (key_part->field->flags & FIELD_IN_PART_FUNC_FLAG)
+ {
+ found_part_field= TRUE;
+ key_part->field->flags|= GET_FIXED_FIELDS_FLAG;
+ }
+ }
+ DBUG_RETURN(found_part_field);
+}
+
+
+/*
+ We have found that at least one partition field was set by a key, now
+ check if a partition function has all its fields bound or not.
+ SYNOPSIS
+ check_part_func_bound()
+ ptr Array of fields NULL terminated (partition fields)
+ RETURN VALUE
+ TRUE All fields in partition function are set
+ FALSE Not all fields in partition function are set
+*/
+
+static bool check_part_func_bound(Field **ptr)
+{
+ bool result= TRUE;
+ DBUG_ENTER("check_part_func_bound");
+
+ for (; *ptr; ptr++)
+ {
+ if (!((*ptr)->flags & GET_FIXED_FIELDS_FLAG))
+ {
+ result= FALSE;
+ break;
+ }
+ }
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Get the id of the subpartitioning part by using the key buffer of the
+ index scan.
+ SYNOPSIS
+ get_sub_part_id_from_key()
+ table The table object
+ buf A buffer that can be used to evaluate the partition function
+ key_info The index object
+ key_spec A key_range containing key and key length
+ RETURN VALUES
+ part_id Subpartition id to use
+ DESCRIPTION
+ Use key buffer to set-up record in buf, move field pointers and
+ get the partition identity and restore field pointers afterwards.
+*/
+
+static uint32 get_sub_part_id_from_key(const TABLE *table,byte *buf,
+ KEY *key_info,
+ const key_range *key_spec)
+{
+ byte *rec0= table->record[0];
+ partition_info *part_info= table->s->part_info;
+ uint32 part_id;
+ DBUG_ENTER("get_sub_part_id_from_key");
+
+ key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length);
+ if (likely(rec0 == buf))
+ part_id= part_info->get_subpartition_id(part_info);
+ else
+ {
+ Field **part_field_array= part_info->subpart_field_array;
+ set_field_ptr(part_field_array, buf, rec0);
+ part_id= part_info->get_subpartition_id(part_info);
+ set_field_ptr(part_field_array, rec0, buf);
+ }
+ DBUG_RETURN(part_id);
+}
+
+/*
+ Get the id of the partitioning part by using the key buffer of the
+ index scan.
+ SYNOPSIS
+ get_part_id_from_key()
+ table The table object
+ buf A buffer that can be used to evaluate the partition function
+ key_info The index object
+ key_spec A key_range containing key and key length
+ part_id Partition to use
+ RETURN VALUES
+ TRUE Partition to use not found
+ FALSE Ok, part_id indicates partition to use
+ DESCRIPTION
+ Use key buffer to set-up record in buf, move field pointers and
+ get the partition identity and restore field pointers afterwards.
+*/
+bool get_part_id_from_key(const TABLE *table, byte *buf, KEY *key_info,
+ const key_range *key_spec, uint32 *part_id)
+{
+ bool result;
+ byte *rec0= table->record[0];
+ partition_info *part_info= table->s->part_info;
+ DBUG_ENTER("get_part_id_from_key");
+
+ key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length);
+ if (likely(rec0 == buf))
+ result= part_info->get_part_partition_id(part_info, part_id);
+ else
+ {
+ Field **part_field_array= part_info->part_field_array;
+ set_field_ptr(part_field_array, buf, rec0);
+ result= part_info->get_part_partition_id(part_info, part_id);
+ set_field_ptr(part_field_array, rec0, buf);
+ }
+ DBUG_RETURN(result);
+}
+
+/*
+ Get the partitioning id of the full PF by using the key buffer of the
+ index scan.
+ SYNOPSIS
+ get_full_part_id_from_key()
+ table The table object
+ buf A buffer that is used to evaluate the partition function
+ key_info The index object
+ key_spec A key_range containing key and key length
+ part_spec A partition id containing start part and end part
+ RETURN VALUES
+ part_spec
+ No partitions to scan is indicated by end_part > start_part when returning
+ DESCRIPTION
+ Use key buffer to set-up record in buf, move field pointers if needed and
+ get the partition identity and restore field pointers afterwards.
+*/
+
+void get_full_part_id_from_key(const TABLE *table, byte *buf,
+ KEY *key_info,
+ const key_range *key_spec,
+ part_id_range *part_spec)
+{
+ bool result;
+ partition_info *part_info= table->s->part_info;
+ byte *rec0= table->record[0];
+ DBUG_ENTER("get_full_part_id_from_key");
+
+ key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length);
+ if (likely(rec0 == buf))
+ result= part_info->get_partition_id(part_info, &part_spec->start_part);
+ else
+ {
+ Field **part_field_array= part_info->full_part_field_array;
+ set_field_ptr(part_field_array, buf, rec0);
+ result= part_info->get_partition_id(part_info, &part_spec->start_part);
+ set_field_ptr(part_field_array, rec0, buf);
+ }
+ part_spec->end_part= part_spec->start_part;
+ if (unlikely(result))
+ part_spec->start_part++;
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Get the set of partitions to use in query.
+ SYNOPSIS
+ get_partition_set()
+ table The table object
+ buf A buffer that can be used to evaluate the partition function
+ index The index of the key used, if MAX_KEY no index used
+ key_spec A key_range containing key and key length
+ part_spec Contains start part, end part and indicator if bitmap is
+ used for which partitions to scan
+ DESCRIPTION
+ This function is called to discover which partitions to use in an index
+ scan or a full table scan.
+ It returns a range of partitions to scan. If there are holes in this
+ range with partitions that are not needed to scan a bit array is used
+ to signal which partitions to use and which not to use.
+ If start_part > end_part at return it means no partition needs to be
+ scanned. If start_part == end_part it always means a single partition
+ needs to be scanned.
+ RETURN VALUE
+ part_spec
+*/
+void get_partition_set(const TABLE *table, byte *buf, const uint index,
+ const key_range *key_spec, part_id_range *part_spec)
+{
+ partition_info *part_info= table->s->part_info;
+ uint no_parts= part_info->no_full_parts, i, part_id;
+ uint sub_part= no_parts, part_part= no_parts;
+ KEY *key_info= NULL;
+ bool found_part_field= FALSE;
+ DBUG_ENTER("get_partition_set");
+
+ part_spec->use_bit_array= FALSE;
+ part_spec->start_part= 0;
+ part_spec->end_part= no_parts - 1;
+ if ((index < MAX_KEY) &&
+ key_spec->flag == (uint)HA_READ_KEY_EXACT &&
+ part_info->some_fields_in_PF.is_set(index))
+ {
+ key_info= table->key_info+index;
+ /*
+ The index can potentially provide at least one PF-field (field in the
+ partition function). Thus it is interesting to continue our probe.
+ */
+ if (key_spec->length == key_info->key_length)
+ {
+ /*
+ The entire key is set so we can check whether we can immediately
+ derive either the complete PF or if we can derive either
+ the top PF or the subpartitioning PF. This can be established by
+ checking precalculated bits on each index.
+ */
+ if (part_info->all_fields_in_PF.is_set(index))
+ {
+ /*
+ We can derive the exact partition to use, no more than this one
+ is needed.
+ */
+ get_full_part_id_from_key(table,buf,key_info,key_spec,part_spec);
+ DBUG_VOID_RETURN;
+ }
+ else if (is_sub_partitioned(part_info))
+ {
+ if (part_info->all_fields_in_SPF.is_set(index))
+ sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
+ else if (part_info->all_fields_in_PPF.is_set(index))
+ {
+ if (get_part_id_from_key(table,buf,key_info,key_spec,&part_part))
+ {
+ /*
+ The value of the RANGE or LIST partitioning was outside of
+ allowed values. Thus it is certain that the result of this
+ scan will be empty.
+ */
+ part_spec->start_part= no_parts;
+ DBUG_VOID_RETURN;
+ }
+ }
+ }
+ }
+ else
+ {
+ /*
+ Set an indicator on all partition fields that are bound.
+ If at least one PF-field was bound it pays off to check whether
+ the PF or PPF or SPF has been bound.
+ (PF = Partition Function, SPF = Subpartition Function and
+ PPF = Partition Function part of subpartitioning)
+ */
+ if ((found_part_field= set_PF_fields_in_key(key_info,
+ key_spec->length)))
+ {
+ if (check_part_func_bound(part_info->full_part_field_array))
+ {
+ /*
+ We were able to bind all fields in the partition function even
+ by using only a part of the key. Calculate the partition to use.
+ */
+ get_full_part_id_from_key(table,buf,key_info,key_spec,part_spec);
+ clear_indicator_in_key_fields(key_info);
+ DBUG_VOID_RETURN;
+ }
+ else if (check_part_func_bound(part_info->part_field_array))
+ sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
+ else if (check_part_func_bound(part_info->subpart_field_array))
+ {
+ if (get_part_id_from_key(table,buf,key_info,key_spec,&part_part))
+ {
+ part_spec->start_part= no_parts;
+ clear_indicator_in_key_fields(key_info);
+ DBUG_VOID_RETURN;
+ }
+ }
+ }
+ }
+ }
+ {
+ /*
+ The next step is to analyse the table condition to see whether any
+ information about which partitions to scan can be derived from there.
+ Currently not implemented.
+ */
+ }
+ /*
+ If we come here we have found a range of sorts we have either discovered
+ nothing or we have discovered a range of partitions with possible holes
+ in it. We need a bitvector to further the work here.
+ */
+ if (!(part_part == no_parts && sub_part == no_parts))
+ {
+ /*
+ We can only arrive here if we are using subpartitioning.
+ */
+ if (part_part != no_parts)
+ {
+ /*
+ We know the top partition and need to scan all underlying
+ subpartitions. This is a range without holes.
+ */
+ DBUG_ASSERT(sub_part == no_parts);
+ part_spec->start_part= part_part * part_info->no_parts;
+ part_spec->end_part= part_spec->start_part+part_info->no_subparts - 1;
+ }
+ else
+ {
+ DBUG_ASSERT(sub_part != no_parts);
+ part_spec->use_bit_array= TRUE;
+ part_spec->start_part= sub_part;
+ part_spec->end_part=sub_part+
+ (part_info->no_subparts*(part_info->no_parts-1));
+ for (i= 0, part_id= sub_part; i < part_info->no_parts;
+ i++, part_id+= part_info->no_subparts)
+ ; //Set bit part_id in bit array
+ }
+ }
+ if (found_part_field)
+ clear_indicator_in_key_fields(key_info);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ If the table is partitioned we will read the partition info into the
+ .frm file here.
+ -------------------------------
+ | Fileinfo 64 bytes |
+ -------------------------------
+ | Formnames 7 bytes |
+ -------------------------------
+ | Not used 4021 bytes |
+ -------------------------------
+ | Keyinfo + record |
+ -------------------------------
+ | Padded to next multiple |
+ | of IO_SIZE |
+ -------------------------------
+ | Forminfo 288 bytes |
+ -------------------------------
+ | Screen buffer, to make |
+ | field names readable |
+ -------------------------------
+ | Packed field info |
+ | 17 + 1 + strlen(field_name) |
+ | + 1 end of file character |
+ -------------------------------
+ | Partition info |
+ -------------------------------
+ We provide the length of partition length in Fileinfo[55-58].
+
+ Read the partition syntax from the frm file and parse it to get the
+ data structures of the partitioning.
+ SYNOPSIS
+ mysql_unpack_partition()
+ file File reference of frm file
+ thd Thread object
+ part_info_len Length of partition syntax
+ table Table object of partitioned table
+ RETURN VALUE
+ TRUE Error
+ FALSE Sucess
+ DESCRIPTION
+ Read the partition syntax from the current position in the frm file.
+ Initiate a LEX object, save the list of item tree objects to free after
+ the query is done. Set-up partition info object such that parser knows
+ it is called from internally. Call parser to create data structures
+ (best possible recreation of item trees and so forth since there is no
+ serialisation of these objects other than in parseable text format).
+ We need to save the text of the partition functions since it is not
+ possible to retrace this given an item tree.
+*/
+
+bool mysql_unpack_partition(File file, THD *thd, uint part_info_len,
+ TABLE* table)
+{
+ Item *thd_free_list= thd->free_list;
+ bool result= TRUE;
+ uchar* part_buf= NULL;
+ partition_info *part_info;
+ LEX *old_lex= thd->lex, lex;
+ DBUG_ENTER("mysql_unpack_partition");
+ if (read_string(file, (gptr*)&part_buf, part_info_len))
+ DBUG_RETURN(result);
+ thd->lex= &lex;
+ lex_start(thd, part_buf, part_info_len);
+ /*
+ We need to use the current SELECT_LEX since I need to keep the
+ Name_resolution_context object which is referenced from the
+ Item_field objects.
+ This is not a nice solution since if the parser uses current_select
+ for anything else it will corrupt the current LEX object.
+ */
+ thd->lex->current_select= old_lex->current_select;
+ /*
+ All Items created is put into a free list on the THD object. This list
+ is used to free all Item objects after completing a query. We don't
+ want that to happen with the Item tree created as part of the partition
+ info. This should be attached to the table object and remain so until
+ the table object is released.
+ Thus we move away the current list temporarily and start a new list that
+ we then save in the partition info structure.
+ */
+ thd->free_list= NULL;
+ lex.part_info= (partition_info*)1; //Indicate yyparse from this place
+ if (yyparse((void*)thd) || thd->is_fatal_error)
+ {
+ free_items(thd->free_list);
+ goto end;
+ }
+ part_info= lex.part_info;
+ table->s->part_info= part_info;
+ part_info->item_free_list= thd->free_list;
+
+ {
+ /*
+ This code part allocates memory for the serialised item information for
+ the partition functions. In most cases this is not needed but if the
+ table is used for SHOW CREATE TABLES or ALTER TABLE that modifies
+ partition information it is needed and the info is lost if we don't
+ save it here so unfortunately we have to do it here even if in most
+ cases it is not needed. This is a consequence of that item trees are
+ not serialisable.
+ */
+ uint part_func_len= part_info->part_func_len;
+ uint subpart_func_len= part_info->subpart_func_len;
+ char *part_func_string, *subpart_func_string= NULL;
+ if (!((part_func_string= sql_alloc(part_func_len))) ||
+ (subpart_func_len &&
+ !((subpart_func_string= sql_alloc(subpart_func_len)))))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), part_func_len);
+ free_items(thd->free_list);
+ part_info->item_free_list= 0;
+ goto end;
+ }
+ memcpy(part_func_string, part_info->part_func_string, part_func_len);
+ if (subpart_func_len)
+ memcpy(subpart_func_string, part_info->subpart_func_string,
+ subpart_func_len);
+ part_info->part_func_string= part_func_string;
+ part_info->subpart_func_string= subpart_func_string;
+ }
+
+ result= FALSE;
+end:
+ thd->free_list= thd_free_list;
+ x_free((gptr)part_buf);
+ thd->lex= old_lex;
+ DBUG_RETURN(result);
+}
+#endif
+
+/*
+ Prepare for calling val_int on partition function by setting fields to
+ point to the record where the values of the PF-fields are stored.
+ SYNOPSIS
+ set_field_ptr()
+ ptr Array of fields to change ptr
+ new_buf New record pointer
+ old_buf Old record pointer
+ DESCRIPTION
+ Set ptr in field objects of field array to refer to new_buf record
+ instead of previously old_buf. Used before calling val_int and after
+ it is used to restore pointers to table->record[0].
+ This routine is placed outside of partition code since it can be useful
+ also for other programs.
+*/
+
+void set_field_ptr(Field **ptr, const byte *new_buf,
+ const byte *old_buf)
+{
+ my_ptrdiff_t diff= (new_buf - old_buf);
+ DBUG_ENTER("set_nullable_field_ptr");
+
+ do
+ {
+ (*ptr)->move_field(diff);
+ } while (*(++ptr));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Prepare for calling val_int on partition function by setting fields to
+ point to the record where the values of the PF-fields are stored.
+ This variant works on a key_part reference.
+ It is not required that all fields are NOT NULL fields.
+ SYNOPSIS
+ set_key_field_ptr()
+ key_part key part with a set of fields to change ptr
+ new_buf New record pointer
+ old_buf Old record pointer
+ DESCRIPTION
+ Set ptr in field objects of field array to refer to new_buf record
+ instead of previously old_buf. Used before calling val_int and after
+ it is used to restore pointers to table->record[0].
+ This routine is placed outside of partition code since it can be useful
+ also for other programs.
+*/
+
+void set_key_field_ptr(KEY *key_info, const byte *new_buf,
+ const byte *old_buf)
+{
+ KEY_PART_INFO *key_part= key_info->key_part;
+ uint key_parts= key_info->key_parts, i= 0;
+ my_ptrdiff_t diff= (new_buf - old_buf);
+ DBUG_ENTER("set_key_field_ptr");
+
+ do
+ {
+ key_part->field->move_field(diff);
+ key_part++;
+ } while (++i < key_parts);
+ DBUG_VOID_RETURN;
+}
+
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index cec432a86be..87366fe157d 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1778,10 +1778,10 @@ bool mysql_stmt_prepare(THD *thd, char *packet, uint packet_length,
/* Reset warnings from previous command */
mysql_reset_errors(thd, 0);
lex= thd->lex;
- lex->safe_to_cache_query= 0;
error= yyparse((void *)thd) || thd->is_fatal_error ||
thd->net.report_error || init_param_array(stmt);
+ lex->safe_to_cache_query= 0;
/*
While doing context analysis of the query (in check_prepared_statement)
we allocate a lot of additional memory: for open tables, JOINs, derived
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 32a8378d41d..d376423e990 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -19,6 +19,7 @@
#include "sql_repl.h"
#include "log_event.h"
+#include "rpl_filter.h"
#include <my_dir.h>
int max_binlog_dump_events = 0; // unlimited
@@ -1455,8 +1456,8 @@ bool show_binlog_info(THD* thd)
int dir_len = dirname_length(li.log_file_name);
protocol->store(li.log_file_name + dir_len, &my_charset_bin);
protocol->store((ulonglong) li.pos);
- protocol->store(&binlog_do_db);
- protocol->store(&binlog_ignore_db);
+ protocol->store(binlog_filter->get_do_db());
+ protocol->store(binlog_filter->get_ignore_db());
if (protocol->write())
DBUG_RETURN(TRUE);
}
diff --git a/sql/sql_repl.h b/sql/sql_repl.h
index 9eb6456ee20..ba64e626adc 100644
--- a/sql/sql_repl.h
+++ b/sql/sql_repl.h
@@ -31,7 +31,6 @@ typedef struct st_slave_info
extern my_bool opt_show_slave_auth_info;
extern char *master_host, *master_info_file;
extern bool server_id_supplied;
-extern I_List<i_string> binlog_do_db, binlog_ignore_db;
extern int max_binlog_dump_events;
extern my_bool opt_sporadic_binlog_dump_fail;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index f757ccaef8e..e994b5ff26b 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -941,23 +941,19 @@ JOIN::optimize()
}
/*
- Need to tell Innobase that to play it safe, it should fetch all
- columns of the tables: this is because MySQL may build row
- pointers for the rows, and for all columns of the primary key the
- field->query_id has not necessarily been set to thd->query_id by
- MySQL.
+ Need to tell handlers that to play it safe, it should fetch all
+ columns of the primary key of the tables: this is because MySQL may
+ build row pointers for the rows, and for all columns of the primary key
+ the read set has not necessarily been set by the server code.
*/
-
-#ifdef HAVE_INNOBASE_DB
if (need_tmp || select_distinct || group_list || order)
{
for (uint i_h = const_tables; i_h < tables; i_h++)
{
TABLE* table_h = join_tab[i_h].table;
- table_h->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
+ table_h->file->ha_retrieve_all_pk();
}
}
-#endif
DBUG_EXECUTE("info",TEST_join(this););
@@ -1285,6 +1281,9 @@ JOIN::exec()
/* Copy data to the temporary table */
thd->proc_info= "Copying to tmp table";
DBUG_PRINT("info", ("%s", thd->proc_info));
+ if (!curr_join->sort_and_group &&
+ curr_join->const_tables != curr_join->tables)
+ curr_join->join_tab[curr_join->const_tables].sorted= 0;
if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0)))
{
error= tmp_error;
@@ -1431,6 +1430,9 @@ JOIN::exec()
1, TRUE))
DBUG_VOID_RETURN;
curr_join->group_list= 0;
+ if (!curr_join->sort_and_group &&
+ curr_join->const_tables != curr_join->tables)
+ curr_join->join_tab[curr_join->const_tables].sorted= 0;
if (setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) ||
(tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table,
0)))
@@ -1616,6 +1618,16 @@ JOIN::exec()
(select_options & OPTION_FOUND_ROWS ?
HA_POS_ERROR : unit->select_limit_cnt)))
DBUG_VOID_RETURN;
+ if (curr_join->const_tables != curr_join->tables &&
+ !curr_join->join_tab[curr_join->const_tables].table->sort.io_cache)
+ {
+ /*
+ If no IO cache exists for the first table then we are using an
+ INDEX SCAN and no filesort. Thus we should not remove the sorted
+ attribute on the INDEX SCAN.
+ */
+ skip_sort_order= 1;
+ }
}
}
/* XXX: When can we have here thd->net.report_error not zero? */
@@ -5708,6 +5720,7 @@ make_join_readinfo(JOIN *join, uint options)
uint i;
bool statistics= test(!(join->select_options & SELECT_DESCRIBE));
+ bool sorted= 1;
DBUG_ENTER("make_join_readinfo");
for (i=join->const_tables ; i < join->tables ; i++)
@@ -5717,6 +5730,8 @@ make_join_readinfo(JOIN *join, uint options)
tab->read_record.table= table;
tab->read_record.file=table->file;
tab->next_select=sub_select; /* normal select */
+ tab->sorted= sorted;
+ sorted= 0; // only first must be sorted
switch (tab->type) {
case JT_SYSTEM: // Only happens with left join
table->status=STATUS_NO_RECORD;
@@ -8093,7 +8108,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
uint hidden_null_count, hidden_null_pack_length, hidden_field_count;
uint blob_count,group_null_items, string_count;
uint temp_pool_slot=MY_BIT_NONE;
- ulong reclength, string_total_length;
+ ulong reclength, string_total_length, fieldnr= 0;
bool using_unique_constraint= 0;
bool use_packed_rows= 0;
bool not_all_columns= !(select_options & TMP_TABLE_ALL_COLUMNS);
@@ -8116,7 +8131,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
statistic_increment(thd->status_var.created_tmp_tables, &LOCK_status);
if (use_temp_pool)
- temp_pool_slot = bitmap_set_next(&temp_pool);
+ temp_pool_slot = bitmap_lock_set_next(&temp_pool);
if (temp_pool_slot != MY_BIT_NONE) // we got a slot
sprintf(path, "%s_%lx_%i", tmp_file_prefix,
@@ -8168,12 +8183,12 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
param->group_length : 0,
NullS))
{
- bitmap_clear_bit(&temp_pool, temp_pool_slot);
+ bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
DBUG_RETURN(NULL); /* purecov: inspected */
}
if (!(param->copy_field=copy=new Copy_field[field_count]))
{
- bitmap_clear_bit(&temp_pool, temp_pool_slot);
+ bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
my_free((gptr) table,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NULL); /* purecov: inspected */
}
@@ -8204,6 +8219,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
table->s->tmp_table= TMP_TABLE;
table->s->db_low_byte_first=1; // True for HEAP and MyISAM
table->s->table_charset= param->table_charset;
+ table->s->primary_key= MAX_KEY; //Indicate no primary key
table->s->keys_for_keyread.init();
table->s->keys_in_use.init();
/* For easier error reporting */
@@ -8279,6 +8295,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
(*argp)->maybe_null=1;
}
new_field->query_id= thd->query_id;
+ new_field->fieldnr= ++fieldnr;
}
}
}
@@ -8326,6 +8343,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
new_field->flags|= GROUP_FLAG;
}
new_field->query_id= thd->query_id;
+ new_field->fieldnr= ++fieldnr;
new_field->field_index= (uint) (reg_field - table->field);
*(reg_field++) =new_field;
}
@@ -8335,6 +8353,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
DBUG_ASSERT(field_count >= (uint) (reg_field - table->field));
field_count= (uint) (reg_field - table->field);
*blob_field= 0; // End marker
+ table->s->fields= field_count;
/* If result table is small; use a heap */
if (blob_count || using_unique_constraint ||
@@ -8351,7 +8370,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
{
table->file=get_new_handler(table,table->s->db_type= DB_TYPE_HEAP);
}
-
+ if (table->s->fields)
+ {
+ table->file->ha_set_all_bits_in_read_set();
+ table->file->ha_set_all_bits_in_write_set();
+ }
if (!using_unique_constraint)
reclength+= group_null_items; // null flag is stored separately
@@ -8377,7 +8400,6 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
string_total_length / string_count >= AVG_STRING_LENGTH_TO_PACK_ROWS))
use_packed_rows= 1;
- table->s->fields= field_count;
table->s->reclength= reclength;
{
uint alloc_length=ALIGN_SIZE(reclength+MI_UNIQUE_HASH_LENGTH+1);
@@ -8622,7 +8644,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
err:
free_tmp_table(thd,table); /* purecov: inspected */
- bitmap_clear_bit(&temp_pool, temp_pool_slot);
+ bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
DBUG_RETURN(NULL); /* purecov: inspected */
}
@@ -8896,7 +8918,7 @@ free_tmp_table(THD *thd, TABLE *entry)
my_free((gptr) entry->record[0],MYF(0));
free_io_cache(entry);
- bitmap_clear_bit(&temp_pool, entry->temp_pool_slot);
+ bitmap_lock_clear_bit(&temp_pool, entry->temp_pool_slot);
my_free((gptr) entry,MYF(0));
thd->proc_info=save_proc_info;
@@ -8957,7 +8979,12 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
new_table.file->extra(HA_EXTRA_WRITE_CACHE);
#endif
- /* copy all old rows */
+ /*
+ copy all old rows from heap table to MyISAM table
+ This is the only code that uses record[1] to read/write but this
+ is safe as this is a temporary MyISAM table without timestamp/autoincrement
+ or partitioning.
+ */
while (!table->file->rnd_next(new_table.record[1]))
{
if ((write_err=new_table.file->write_row(new_table.record[1])))
@@ -8992,8 +9019,8 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
(void) new_table.file->close();
err1:
new_table.file->delete_table(new_table.s->table_name);
- delete new_table.file;
err2:
+ delete new_table.file;
thd->proc_info=save_proc_info;
DBUG_RETURN(1);
}
@@ -9088,7 +9115,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
empty_record(table);
if (table->group && join->tmp_table_param.sum_func_count &&
table->s->keys && !table->file->inited)
- table->file->ha_index_init(0);
+ table->file->ha_index_init(0, 0);
}
/* Set up select_end */
join->join_tab[join->tables-1].next_select= setup_end_select_func(join);
@@ -9702,7 +9729,13 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
table->file->extra(HA_EXTRA_KEYREAD);
tab->index= tab->ref.key;
}
- if ((error=join_read_const(tab)))
+ error=join_read_const(tab);
+ if (table->key_read)
+ {
+ table->key_read=0;
+ table->file->extra(HA_EXTRA_NO_KEYREAD);
+ }
+ if (error)
{
tab->info="unique row not found";
/* Mark for EXPLAIN that the row was not found */
@@ -9710,11 +9743,6 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
if (!table->maybe_null || error > 0)
DBUG_RETURN(error);
}
- if (table->key_read)
- {
- table->key_read=0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
}
if (*tab->on_expr_ref && !table->null_row)
{
@@ -9786,7 +9814,7 @@ join_read_const(JOIN_TAB *tab)
table->status= STATUS_NOT_FOUND;
mark_as_null_row(tab->table);
empty_record(table);
- if (error != HA_ERR_KEY_NOT_FOUND)
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
return -1;
}
@@ -9809,7 +9837,9 @@ join_read_key(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
+ {
+ table->file->ha_index_init(tab->ref.key, tab->sorted);
+ }
if (cmp_buffer_with_ref(tab) ||
(table->status & (STATUS_GARBAGE | STATUS_NO_PARENT | STATUS_NULL_ROW)))
{
@@ -9821,7 +9851,7 @@ join_read_key(JOIN_TAB *tab)
error=table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT);
- if (error && error != HA_ERR_KEY_NOT_FOUND)
+ if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
}
table->null_row=0;
@@ -9836,14 +9866,16 @@ join_read_always_key(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
+ {
+ table->file->ha_index_init(tab->ref.key, tab->sorted);
+ }
if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
return -1;
if ((error=table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT)))
{
- if (error != HA_ERR_KEY_NOT_FOUND)
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
return -1; /* purecov: inspected */
}
@@ -9863,14 +9895,14 @@ join_read_last_key(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
+ table->file->ha_index_init(tab->ref.key, tab->sorted);
if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
return -1;
if ((error=table->file->index_read_last(table->record[0],
tab->ref.key_buff,
tab->ref.key_length)))
{
- if (error != HA_ERR_KEY_NOT_FOUND)
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
return -1; /* purecov: inspected */
}
@@ -9973,7 +10005,7 @@ join_read_first(JOIN_TAB *tab)
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
if (!table->file->inited)
- table->file->ha_index_init(tab->index);
+ table->file->ha_index_init(tab->index, tab->sorted);
if ((error=tab->table->file->index_first(tab->table->record[0])))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
@@ -10012,7 +10044,7 @@ join_read_last(JOIN_TAB *tab)
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
if (!table->file->inited)
- table->file->ha_index_init(tab->index);
+ table->file->ha_index_init(tab->index, 1);
if ((error= tab->table->file->index_last(tab->table->record[0])))
return report_error(table, error);
return 0;
@@ -10036,7 +10068,7 @@ join_ft_read_first(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
+ table->file->ha_index_init(tab->ref.key, 1);
#if NOT_USED_YET
if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) // as ft-key doesn't use store_key's
return -1; // see also FT_SELECT::init()
@@ -10422,7 +10454,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
error, 0))
DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error
/* Change method to update rows */
- table->file->ha_index_init(0);
+ table->file->ha_index_init(0, 0);
join->join_tab[join->tables-1].next_select=end_unique_update;
}
join->send_records++;
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 7f6d661a4de..2b2557de180 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -133,6 +133,7 @@ typedef struct st_join_table {
uint used_fields,used_fieldlength,used_blobs;
enum join_type type;
bool cached_eq_ref_table,eq_ref_table,not_used_in_distinct;
+ bool sorted;
TABLE_REF ref;
JOIN_CACHE cache;
JOIN *join;
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 6989eacf334..d56c14e2836 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -964,11 +964,16 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
packet->append("\n)", 2);
if (!(thd->variables.sql_mode & MODE_NO_TABLE_OPTIONS) && !foreign_db_mode)
{
- if (thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))
- packet->append(" TYPE=", 6);
- else
- packet->append(" ENGINE=", 8);
- packet->append(file->table_type());
+#ifdef HAVE_PARTITION_DB
+ if (!table->s->part_info)
+#endif
+ {
+ if (thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))
+ packet->append(" TYPE=", 6);
+ else
+ packet->append(" ENGINE=", 8);
+ packet->append(file->table_type());
+ }
if (share->table_charset &&
!(thd->variables.sql_mode & MODE_MYSQL323) &&
@@ -1035,6 +1040,23 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
append_directory(thd, packet, "DATA", create_info.data_file_name);
append_directory(thd, packet, "INDEX", create_info.index_file_name);
}
+#ifdef HAVE_PARTITION_DB
+ {
+ /*
+ Partition syntax for CREATE TABLE is at the end of the syntax.
+ */
+ uint part_syntax_len;
+ char *part_syntax;
+ if (table->s->part_info &&
+ ((part_syntax= generate_partition_syntax(table->s->part_info,
+ &part_syntax_len,
+ FALSE))))
+ {
+ packet->append(part_syntax, part_syntax_len);
+ my_free(part_syntax, MYF(0));
+ }
+ }
+#endif
DBUG_RETURN(0);
}
@@ -2744,7 +2766,7 @@ int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond)
{
DBUG_RETURN(1);
}
- proc_table->file->ha_index_init(0);
+ proc_table->file->ha_index_init(0, 1);
if ((res= proc_table->file->index_first(proc_table->record[0])))
{
res= (res == HA_ERR_END_OF_FILE) ? 0 : 1;
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index e3f85f05c17..7d0691455a0 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -30,6 +30,7 @@
#include <io.h>
#endif
+
const char *primary_key_name="PRIMARY";
static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end);
@@ -1507,7 +1508,66 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
if (create_info->row_type == ROW_TYPE_DYNAMIC)
db_options|=HA_OPTION_PACK_RECORD;
alias= table_case_name(create_info, table_name);
- file=get_new_handler((TABLE*) 0, create_info->db_type);
+ if (!(file=get_new_handler((TABLE*) 0, create_info->db_type)))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), 128);//128 bytes invented
+ DBUG_RETURN(TRUE);
+ }
+#ifdef HAVE_PARTITION_DB
+ partition_info *part_info= thd->lex->part_info;
+ if (part_info)
+ {
+ /*
+ The table has been specified as a partitioned table.
+ If this is part of an ALTER TABLE the handler will be the partition
+ handler but we need to specify the default handler to use for
+ partitions also in the call to check_partition_info. We transport
+ this information in the default_db_type variable, it is either
+ DB_TYPE_DEFAULT or the engine set in the ALTER TABLE command.
+ */
+ enum db_type part_engine_type= create_info->db_type;
+ char *part_syntax_buf;
+ uint syntax_len;
+ if (part_engine_type == DB_TYPE_PARTITION_DB)
+ {
+ /*
+ This only happens at ALTER TABLE.
+ default_engine_type was assigned from the engine set in the ALTER
+ TABLE command.
+ */
+ part_engine_type= ha_checktype(thd,
+ part_info->default_engine_type, 0, 0);
+ }
+ if (check_partition_info(part_info, part_engine_type,
+ file, create_info->max_rows))
+ DBUG_RETURN(TRUE);
+ /*
+ We reverse the partitioning parser and generate a standard format
+ for syntax stored in frm file.
+ */
+ if (!(part_syntax_buf= generate_partition_syntax(part_info,
+ &syntax_len,
+ TRUE)))
+ DBUG_RETURN(TRUE);
+ part_info->part_info_string= part_syntax_buf;
+ part_info->part_info_len= syntax_len;
+ if ((!(file->partition_flags() & HA_CAN_PARTITION)) ||
+ create_info->db_type == DB_TYPE_PARTITION_DB)
+ {
+ /*
+ The handler assigned to the table cannot handle partitioning.
+ Assign the partition handler as the handler of the table.
+ */
+ DBUG_PRINT("info", ("db_type= %d, part_flag= %d", create_info->db_type,file->partition_flags()));
+ delete file;
+ create_info->db_type= DB_TYPE_PARTITION_DB;
+ if (!(file= get_ha_partition(part_info)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+#endif
#ifdef NOT_USED
/*
@@ -1521,7 +1581,7 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
(file->table_flags() & HA_NO_TEMP_TABLES))
{
my_error(ER_ILLEGAL_HA, MYF(0), table_name);
- DBUG_RETURN(TRUE);
+ goto err;
}
#endif
@@ -1544,7 +1604,7 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
&keys, internal_tmp_table, &db_options, file,
&key_info_buffer, &key_count,
select_field_count))
- DBUG_RETURN(TRUE);
+ goto err;
/* Check if table exists */
if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
@@ -1566,13 +1626,13 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
{
create_info->table_existed= 1; // Mark that table existed
- DBUG_RETURN(FALSE);
+ goto no_err;
}
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias);
- DBUG_RETURN(TRUE);
+ goto err;
}
if (wait_if_global_read_lock(thd, 0, 1))
- DBUG_RETURN(error);
+ goto err;
VOID(pthread_mutex_lock(&LOCK_open));
if (!internal_tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE))
{
@@ -1625,7 +1685,7 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
create_info->table_options=db_options;
if (rea_create_table(thd, path, create_info, fields, key_count,
- key_info_buffer))
+ key_info_buffer, file))
{
/* my_error(ER_CANT_CREATE_TABLE,MYF(0),table_name,my_errno); */
goto end;
@@ -1651,8 +1711,16 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
end:
VOID(pthread_mutex_unlock(&LOCK_open));
start_waiting_global_read_lock(thd);
+ delete file;
thd->proc_info="After create";
DBUG_RETURN(error);
+
+err:
+ delete file;
+ DBUG_RETURN(TRUE);
+no_err:
+ delete file;
+ DBUG_RETURN(FALSE);
}
/*
@@ -3131,6 +3199,59 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
old_db_type= table->s->db_type;
if (create_info->db_type == DB_TYPE_DEFAULT)
create_info->db_type= old_db_type;
+#ifdef HAVE_PARTITION_DB
+ /*
+ When thd->lex->part_info has a reference to a partition_info the
+ ALTER TABLE contained a definition of a partitioning.
+
+ Case I:
+ If there was a partition before and there is a new one defined.
+ We use the new partitioning. The new partitioning is already
+ defined in the correct variable so no work is needed to
+ accomplish this.
+
+ Case IIa:
+ There was a partitioning before and there is no new one defined.
+ Also the user has not specified an explicit engine to use.
+
+ We use the old partitioning also for the new table. We do this
+ by assigning the partition_info from the table loaded in
+ open_ltable to the partition_info struct used by mysql_create_table
+ later in this method.
+
+ Case IIb:
+ There was a partitioning before and there is no new one defined.
+ The user has specified an explicit engine to use.
+
+ Since the user has specified an explicit engine to use we override
+ the old partitioning info and create a new table using the specified
+ engine. This is the reason for the extra check if old and new engine
+ is equal.
+
+ Case III:
+ There was no partitioning before altering the table, there is
+ partitioning defined in the altered table. Use the new partitioning.
+ No work needed since the partitioning info is already in the
+ correct variable.
+
+ Case IV:
+ There was no partitioning before and no partitioning defined. Obviously
+ no work needed.
+ */
+ if (table->s->part_info)
+ if (!thd->lex->part_info &&
+ create_info->db_type == old_db_type)
+ thd->lex->part_info= table->s->part_info;
+ if (thd->lex->part_info)
+ {
+ /*
+ Need to cater for engine types that can handle partition without
+ using the partition handler.
+ */
+ thd->lex->part_info->default_engine_type= create_info->db_type;
+ create_info->db_type= DB_TYPE_PARTITION_DB;
+ }
+#endif
if (check_engine(thd, new_name, &create_info->db_type))
DBUG_RETURN(TRUE);
new_db_type= create_info->db_type;
@@ -3916,7 +4037,8 @@ copy_data_between_tables(TABLE *from,TABLE *to,
this function does not set field->query_id in the columns to the
current query id
*/
- from->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ to->file->ha_set_all_bits_in_write_set();
+ from->file->ha_retrieve_all_cols();
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
if (ignore ||
handle_duplicates == DUP_REPLACE)
@@ -4079,10 +4201,11 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
/* calculating table's checksum */
ha_checksum crc= 0;
- /* InnoDB must be told explicitly to retrieve all columns, because
- this function does not set field->query_id in the columns to the
- current query id */
- t->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ /*
+ Set all bits in read set and inform InnoDB that we are reading all
+ fields
+ */
+ t->file->ha_retrieve_all_cols();
if (t->file->ha_rnd_init(1))
protocol->store_null();
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index e0c3034a58a..453b9324e88 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -528,7 +528,7 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name)
if (!(table = open_ltable(thd,&tables,TL_WRITE)))
goto err;
table->field[0]->store(udf_name->str, udf_name->length, system_charset_info);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ table->file->ha_retrieve_all_cols();
if (!table->file->index_read_idx(table->record[0], 0,
(byte*) table->field[0]->ptr,
table->key_info[0].key_length,
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 8a5b4ad8eae..95d0f500df8 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -119,10 +119,10 @@ int mysql_update(THD *thd,
{
bool using_limit= limit != HA_POS_ERROR;
bool safe_update= thd->options & OPTION_SAFE_UPDATES;
- bool used_key_is_modified, transactional_table;
+ bool used_key_is_modified, transactional_table, will_batch;
int res;
- int error=0;
- uint used_index;
+ int error=0, loc_error;
+ uint used_index, dup_key_found;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
uint want_privilege;
#endif
@@ -148,7 +148,7 @@ int mysql_update(THD *thd,
/* pass counter value */
thd->lex->table_count= table_count;
/* convert to multiupdate */
- return 2;
+ DBUG_RETURN(2);
}
if (lock_tables(thd, table_list, table_count) ||
@@ -187,7 +187,11 @@ int mysql_update(THD *thd,
#ifndef NO_EMBEDDED_ACCESS_CHECKS
table_list->grant.want_privilege= table->grant.want_privilege= want_privilege;
#endif
- if (setup_fields_with_no_wrap(thd, 0, fields, 1, 0, 0))
+ /*
+ Indicate that the set of fields is to be updated by passing 2 for
+ set_query_id.
+ */
+ if (setup_fields_with_no_wrap(thd, 0, fields, 2, 0, 0))
DBUG_RETURN(1); /* purecov: inspected */
if (table_list->view && check_fields(thd, fields))
{
@@ -204,7 +208,10 @@ int mysql_update(THD *thd,
if (table->timestamp_field->query_id == thd->query_id)
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
else
+ {
table->timestamp_field->query_id=timestamp_query_id;
+ table->file->ha_set_bit_in_write_set(table->timestamp_field->fieldnr);
+ }
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -258,13 +265,18 @@ int mysql_update(THD *thd,
else
used_key_is_modified=0;
+#ifdef HAVE_PARTITION_DB
+ if (used_key_is_modified || order ||
+ partition_key_modified(table, fields))
+#else
if (used_key_is_modified || order)
+#endif
{
/*
We can't update table directly; We must first search after all
matching rows before updating the table!
*/
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ table->file->ha_retrieve_all_cols();
if (used_index < MAX_KEY && old_used_keys.is_set(used_index))
{
table->key_read=1;
@@ -390,7 +402,7 @@ int mysql_update(THD *thd,
(thd->variables.sql_mode &
(MODE_STRICT_TRANS_TABLES |
MODE_STRICT_ALL_TABLES)));
-
+ will_batch= !table->file->start_bulk_update();
while (!(error=info.read_record(&info)) && !thd->killed)
{
if (!(select && select->skip_record()))
@@ -417,8 +429,47 @@ int mysql_update(THD *thd,
break;
}
}
- if (!(error=table->file->update_row((byte*) table->record[1],
- (byte*) table->record[0])))
+ if (will_batch)
+ {
+ /*
+ Typically a batched handler can execute the batched jobs when:
+ 1) When specifically told to do so
+ 2) When it is not a good idea to batch anymore
+ 3) When it is necessary to send batch for other reasons
+ (One such reason is when READ's must be performed)
+
+ 1) is covered by exec_bulk_update calls.
+ 2) and 3) is handled by the bulk_update_row method.
+
+ bulk_update_row can execute the updates including the one
+ defined in the bulk_update_row or not including the row
+ in the call. This is up to the handler implementation and can
+ vary from call to call.
+
+ The dup_key_found reports the number of duplicate keys found
+ in those updates actually executed. It only reports those if
+ the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
+ If this hasn't been issued it returns an error code and can
+ ignore this number. Thus any handler that implements batching
+ for UPDATE IGNORE must also handle this extra call properly.
+
+ If a duplicate key is found on the record included in this
+ call then it should be included in the count of dup_key_found
+ and error should be set to 0 (only if these errors are ignored).
+ */
+ error= table->file->bulk_update_row(table->record[1],
+ table->record[0],
+ &dup_key_found);
+ limit+= dup_key_found;
+ updated-= dup_key_found;
+ }
+ else
+ {
+ /* Non-batched update */
+ error= table->file->update_row((byte*) table->record[1],
+ (byte*) table->record[0]);
+ }
+ if (!error)
{
updated++;
thd->no_trans_update= !transactional_table;
@@ -442,20 +493,74 @@ int mysql_update(THD *thd,
if (!--limit && using_limit)
{
- error= -1; // Simulate end of file
- break;
+ /*
+ We have reached end-of-file in most common situations where no
+ batching has occurred and if batching was supposed to occur but
+ no updates were made and finally when the batch execution was
+ performed without error and without finding any duplicate keys.
+ If the batched updates were performed with errors we need to
+ check and if no error but duplicate key's found we need to
+ continue since those are not counted for in limit.
+ */
+ if (will_batch &&
+ ((error= table->file->exec_bulk_update(&dup_key_found)) ||
+ !dup_key_found))
+ {
+ if (error)
+ {
+ /*
+ The handler should not report error of duplicate keys if they
+ are ignored. This is a requirement on batching handlers.
+ */
+ table->file->print_error(error,MYF(0));
+ error= 1;
+ break;
+ }
+ /*
+ Either an error was found and we are ignoring errors or there
+ were duplicate keys found. In both cases we need to correct
+ the counters and continue the loop.
+ */
+ limit= dup_key_found; //limit is 0 when we get here so need to +
+ updated-= dup_key_found;
+ }
+ else
+ {
+ error= -1; // Simulate end of file
+ break;
+ }
}
}
else
table->file->unlock_row();
thd->row_count++;
}
+ dup_key_found= 0;
if (thd->killed && !error)
error= 1; // Aborted
+ else if (will_batch &&
+ (loc_error= table->file->exec_bulk_update(&dup_key_found)))
+ /*
+ An error has occurred when a batched update was performed and returned
+ an error indication. It cannot be an allowed duplicate key error since
+ we require the batching handler to treat this as a normal behavior.
+
+ Otherwise we simply remove the number of duplicate keys records found
+ in the batched update.
+ */
+ {
+ thd->fatal_error();
+ table->file->print_error(loc_error,MYF(0));
+ error= 1;
+ }
+ else
+ updated-= dup_key_found;
+ if (will_batch)
+ table->file->end_bulk_update();
end_read_record(&info);
free_io_cache(table); // If ORDER BY
delete select;
- thd->proc_info="end";
+ thd->proc_info= "end";
VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY));
/*
@@ -646,7 +751,7 @@ bool mysql_multi_update_prepare(THD *thd)
&lex->select_lex.leaf_tables, FALSE))
DBUG_RETURN(TRUE);
- if (setup_fields_with_no_wrap(thd, 0, *fields, 1, 0, 0))
+ if (setup_fields_with_no_wrap(thd, 0, *fields, 2, 0, 0))
DBUG_RETURN(TRUE);
for (tl= table_list; tl ; tl= tl->next_local)
@@ -762,7 +867,7 @@ bool mysql_multi_update_prepare(THD *thd)
if (setup_tables(thd, &lex->select_lex.context,
table_list, &lex->select_lex.where,
&lex->select_lex.leaf_tables, FALSE) ||
- setup_fields_with_no_wrap(thd, 0, *fields, 1, 0, 0))
+ setup_fields_with_no_wrap(thd, 0, *fields, 2, 0, 0))
DBUG_RETURN(TRUE);
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index b680787b9a3..d4cd2bd6600 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -356,13 +356,16 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token LEAVES
%token LEAVE_SYM
%token LEFT
+%token LESS_SYM
%token LEVEL_SYM
%token LEX_HOSTNAME
%token LIKE
%token LIMIT
+%token LINEAR_SYM
%token LINEFROMTEXT
%token LINES
%token LINESTRING
+%token LIST_SYM
%token LOAD
%token LOCAL_SYM
%token LOCATE
@@ -402,6 +405,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token MAX_SYM
%token MAX_UPDATES_PER_HOUR
%token MAX_USER_CONNECTIONS_SYM
+%token MAX_VALUE_SYM
%token MEDIUMBLOB
%token MEDIUMINT
%token MEDIUMTEXT
@@ -436,6 +440,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token NE
%token NEW_SYM
%token NEXT_SYM
+%token NODEGROUP_SYM
%token NONE_SYM
%token NOT2_SYM
%token NOT_SYM
@@ -464,6 +469,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token OUT_SYM
%token PACK_KEYS_SYM
%token PARTIAL
+%token PARTITION_SYM
+%token PARTITIONS_SYM
%token PASSWORD
%token PARAM_MARKER
%token PHASE_SYM
@@ -490,6 +497,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token RAID_STRIPED_SYM
%token RAID_TYPE
%token RAND
+%token RANGE_SYM
%token READS_SYM
%token READ_SYM
%token REAL
@@ -575,6 +583,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token STRING_SYM
%token SUBDATE_SYM
%token SUBJECT_SYM
+%token SUBPARTITION_SYM
+%token SUBPARTITIONS_SYM
%token SUBSTRING
%token SUBSTRING_INDEX
%token SUM_SYM
@@ -595,6 +605,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token TINYBLOB
%token TINYINT
%token TINYTEXT
+%token THAN_SYM
%token TO_SYM
%token TRAILING
%token TRANSACTION_SYM
@@ -619,11 +630,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token UNIX_TIMESTAMP
%token UNKNOWN_SYM
%token UNLOCK_SYM
-%token UNLOCK_SYM
%token UNSIGNED
%token UNTIL_SYM
-%token UNTIL_SYM
-%token UPDATE_SYM
%token UPDATE_SYM
%token USAGE
%token USER
@@ -724,6 +732,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
sp_opt_default
simple_ident_nospvar simple_ident_q
field_or_var limit_option
+ part_bit_expr part_func_expr
%type <item_num>
NUM_literal
@@ -822,6 +831,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
statement sp_suid opt_view_list view_list or_replace algorithm
sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa
load_data opt_field_or_var_spec fields_or_vars opt_load_data_set_spec
+ partition_entry
END_OF_INPUT
%type <NONE> call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt
@@ -887,6 +897,7 @@ statement:
| lock
| optimize
| keycache
+ | partition_entry
| preload
| prepare
| purge
@@ -2538,7 +2549,9 @@ trg_event:
create2:
'(' create2a {}
- | opt_create_table_options create3 {}
+ | opt_create_table_options
+ opt_partitioning {}
+ create3 {}
| LIKE table_ident
{
LEX *lex=Lex;
@@ -2554,8 +2567,12 @@ create2:
;
create2a:
- field_list ')' opt_create_table_options create3 {}
- | create_select ')' { Select->set_braces(1);} union_opt {}
+ field_list ')' opt_create_table_options
+ opt_partitioning {}
+ create3 {}
+ | opt_partitioning {}
+ create_select ')'
+ { Select->set_braces(1);} union_opt {}
;
create3:
@@ -2566,6 +2583,411 @@ create3:
{ Select->set_braces(1);} union_opt {}
;
+/*
+ This part of the parser is about handling of the partition information.
+
+ It's first version was written by Mikael Ronström with lots of answers to
+ questions provided by Antony Curtis.
+
+ The partition grammar can be called from three places.
+ 1) CREATE TABLE ... PARTITION ..
+ 2) ALTER TABLE table_name PARTITION ...
+ 3) PARTITION ...
+
+ The first place is called when a new table is created from a MySQL client.
+ The second place is called when a table is altered with the ALTER TABLE
+ command from a MySQL client.
+ The third place is called when opening an frm file and finding partition
+ info in the .frm file. It is necessary to avoid allowing PARTITION to be
+ an allowed entry point for SQL client queries. This is arranged by setting
+ some state variables before arriving here.
+
+ To be able to handle errors we will only set error code in this code
+ and handle the error condition in the function calling the parser. This
+ is necessary to ensure we can also handle errors when calling the parser
+ from the openfrm function.
+*/
+opt_partitioning:
+ /* empty */ {}
+ | partitioning
+ ;
+
+partitioning:
+ PARTITION_SYM
+ { Lex->part_info= new partition_info(); }
+ partition
+ ;
+
+partition_entry:
+ PARTITION_SYM
+ {
+ LEX *lex= Lex;
+ if (lex->part_info)
+ {
+ /*
+ We enter here when opening the frm file to translate
+ partition info string into part_info data structure.
+ */
+ lex->part_info= new partition_info();
+ }
+ else
+ {
+ yyerror(ER(ER_PARTITION_ENTRY_ERROR));
+ YYABORT;
+ }
+ }
+ partition {};
+
+partition:
+ BY part_type_def opt_no_parts {} opt_sub_part {} part_defs;
+
+part_type_def:
+ opt_linear KEY_SYM '(' part_field_list ')'
+ {
+ LEX *lex= Lex;
+ lex->part_info->list_of_part_fields= TRUE;
+ lex->part_info->part_type= HASH_PARTITION;
+ }
+ | opt_linear HASH_SYM
+ { Lex->part_info->part_type= HASH_PARTITION; }
+ part_func {}
+ | RANGE_SYM
+ { Lex->part_info->part_type= RANGE_PARTITION; }
+ part_func {}
+ | LIST_SYM
+ { Lex->part_info->part_type= LIST_PARTITION; }
+ part_func {};
+
+opt_linear:
+ /* empty */ {}
+ | LINEAR_SYM
+ { Lex->part_info->linear_hash_ind= TRUE;};
+
+part_field_list:
+ part_field_item {}
+ | part_field_list ',' part_field_item {};
+
+part_field_item:
+ ident
+ {
+ Lex->part_info->part_field_list.push_back($1.str);
+ };
+
+part_func:
+ '(' remember_name part_func_expr remember_end ')'
+ {
+ LEX *lex= Lex;
+ uint expr_len= (uint)($4 - $2) - 1;
+ lex->part_info->list_of_part_fields= FALSE;
+ lex->part_info->part_expr= $3;
+ lex->part_info->part_func_string= $2+1;
+ lex->part_info->part_func_len= expr_len;
+ };
+
+sub_part_func:
+ '(' remember_name part_func_expr remember_end ')'
+ {
+ LEX *lex= Lex;
+ uint expr_len= (uint)($4 - $2) - 1;
+ lex->part_info->list_of_subpart_fields= FALSE;
+ lex->part_info->subpart_expr= $3;
+ lex->part_info->subpart_func_string= $2+1;
+ lex->part_info->subpart_func_len= expr_len;
+ };
+
+
+opt_no_parts:
+ /* empty */ {}
+ | PARTITIONS_SYM ulong_num
+ {
+ uint no_parts= $2;
+ if (no_parts == 0)
+ {
+ my_error(ER_NO_PARTS_ERROR, MYF(0), "partitions");
+ YYABORT;
+ }
+ Lex->part_info->no_parts= no_parts;
+ };
+
+opt_sub_part:
+ /* empty */ {}
+ | SUBPARTITION_SYM BY opt_linear HASH_SYM sub_part_func
+ { Lex->part_info->subpart_type= HASH_PARTITION; }
+ opt_no_subparts {}
+ | SUBPARTITION_SYM BY opt_linear KEY_SYM
+ '(' sub_part_field_list ')'
+ {
+ LEX *lex= Lex;
+ lex->part_info->subpart_type= HASH_PARTITION;
+ lex->part_info->list_of_subpart_fields= TRUE;
+ }
+ opt_no_subparts {};
+
+sub_part_field_list:
+ sub_part_field_item {}
+ | sub_part_field_list ',' sub_part_field_item {};
+
+sub_part_field_item:
+ ident
+ { Lex->part_info->subpart_field_list.push_back($1.str); };
+
+part_func_expr:
+ bit_expr
+ {
+ LEX *lex= Lex;
+ bool not_corr_func;
+ not_corr_func= !lex->safe_to_cache_query;
+ lex->safe_to_cache_query= 1;
+ if (not_corr_func)
+ {
+ yyerror(ER(ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR));
+ YYABORT;
+ }
+ $$=$1;
+ }
+
+opt_no_subparts:
+ /* empty */ {}
+ | SUBPARTITIONS_SYM ulong_num
+ {
+ uint no_parts= $2;
+ if (no_parts == 0)
+ {
+ my_error(ER_NO_PARTS_ERROR, MYF(0), "subpartitions");
+ YYABORT;
+ }
+ Lex->part_info->no_subparts= no_parts;
+ };
+
+part_defs:
+ /* empty */
+ {}
+ | '(' part_def_list ')'
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ if (part_info->no_parts != 0)
+ {
+ if (part_info->no_parts !=
+ part_info->count_curr_parts)
+ {
+ yyerror(ER(ER_PARTITION_WRONG_NO_PART_ERROR));
+ YYABORT;
+ }
+ }
+ else if (part_info->count_curr_parts > 0)
+ {
+ part_info->no_parts= part_info->count_curr_parts;
+ }
+ part_info->count_curr_subparts= 0;
+ part_info->count_curr_parts= 0;
+ };
+
+part_def_list:
+ part_definition {}
+ | part_def_list ',' part_definition {};
+
+part_definition:
+ PARTITION_SYM
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ partition_element *p_elem= new partition_element();
+ if (!p_elem)
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
+ YYABORT;
+ }
+ part_info->curr_part_elem= p_elem;
+ part_info->current_partition= p_elem;
+ part_info->use_default_partitions= FALSE;
+ part_info->partitions.push_back(p_elem);
+ p_elem->engine_type= DB_TYPE_UNKNOWN;
+ part_info->count_curr_parts++;
+ }
+ part_name {}
+ opt_part_values {}
+ opt_part_options {}
+ opt_sub_partition {};
+
+part_name:
+ ident_or_text
+ { Lex->part_info->curr_part_elem->partition_name= $1.str; };
+
+opt_part_values:
+ /* empty */
+ {
+ LEX *lex= Lex;
+ if (lex->part_info->part_type == RANGE_PARTITION)
+ {
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
+ "RANGE", "LESS THAN");
+ YYABORT;
+ }
+ if (lex->part_info->part_type == LIST_PARTITION)
+ {
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
+ "LIST", "IN");
+ YYABORT;
+ }
+ }
+ | VALUES LESS_SYM THAN_SYM part_func_max
+ {
+ if (Lex->part_info->part_type != RANGE_PARTITION)
+ {
+ my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
+ "RANGE", "LESS THAN");
+ YYABORT;
+ }
+ }
+ | VALUES IN_SYM '(' part_list_func ')'
+ {
+ if (Lex->part_info->part_type != LIST_PARTITION)
+ {
+ my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
+ "LIST", "IN");
+ YYABORT;
+ }
+ };
+
+part_func_max:
+ MAX_VALUE_SYM
+ {
+ LEX *lex= Lex;
+ if (lex->part_info->defined_max_value)
+ {
+ yyerror(ER(ER_PARTITION_MAXVALUE_ERROR));
+ YYABORT;
+ }
+ lex->part_info->defined_max_value= TRUE;
+ }
+ | part_range_func
+ {
+ if (Lex->part_info->defined_max_value)
+ {
+ yyerror(ER(ER_PARTITION_MAXVALUE_ERROR));
+ YYABORT;
+ }
+ };
+
+part_range_func:
+ '(' part_bit_expr ')'
+ {
+ Lex->part_info->curr_part_elem->range_expr= $2;
+ };
+
+part_list_func:
+ part_list_item {}
+ | part_list_func ',' part_list_item {};
+
+part_list_item:
+ part_bit_expr
+ {
+ Lex->part_info->curr_part_elem->list_expr_list.push_back($1);
+ };
+
+part_bit_expr:
+ bit_expr
+ {
+ Item *part_expr= $1;
+ bool not_corr_func;
+ LEX *lex= Lex;
+ Name_resolution_context *context= &lex->current_select->context;
+ TABLE_LIST *save_list= context->table_list;
+
+ context->table_list= 0;
+ part_expr->fix_fields(YYTHD, (Item**)0);
+ context->table_list= save_list;
+ not_corr_func= !part_expr->const_item() ||
+ !lex->safe_to_cache_query;
+ lex->safe_to_cache_query= 1;
+ if (not_corr_func)
+ {
+ yyerror(ER(ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR));
+ YYABORT;
+ }
+ $$= part_expr;
+ }
+
+opt_sub_partition:
+ /* empty */ {}
+ | '(' sub_part_list ')'
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ if (part_info->no_subparts != 0)
+ {
+ if (part_info->no_subparts !=
+ part_info->count_curr_subparts)
+ {
+ yyerror(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR));
+ YYABORT;
+ }
+ }
+ else if (part_info->count_curr_subparts > 0)
+ {
+ part_info->no_subparts= part_info->count_curr_subparts;
+ }
+ part_info->count_curr_subparts= 0;
+ };
+
+sub_part_list:
+ sub_part_definition {}
+ | sub_part_list ',' sub_part_definition {};
+
+sub_part_definition:
+ SUBPARTITION_SYM
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ partition_element *p_elem= new partition_element();
+ if (!p_elem)
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
+ YYABORT;
+ }
+ part_info->curr_part_elem= p_elem;
+ part_info->current_partition->subpartitions.push_back(p_elem);
+ part_info->use_default_subpartitions= FALSE;
+ part_info->count_curr_subparts++;
+ p_elem->engine_type= DB_TYPE_UNKNOWN;
+ }
+ sub_name opt_part_options {};
+
+sub_name:
+ ident_or_text
+ { Lex->part_info->curr_part_elem->partition_name= $1.str; };
+
+opt_part_options:
+ /* empty */ {}
+ | opt_part_option_list {};
+
+opt_part_option_list:
+ opt_part_option_list opt_part_option {}
+ | opt_part_option {};
+
+opt_part_option:
+ TABLESPACE opt_equal ident_or_text
+ { Lex->part_info->curr_part_elem->tablespace_name= $3.str; }
+ | opt_storage ENGINE_SYM opt_equal storage_engines
+ { Lex->part_info->curr_part_elem->engine_type= $4; }
+ | NODEGROUP_SYM opt_equal ulong_num
+ { Lex->part_info->curr_part_elem->nodegroup_id= $3; }
+ | MAX_ROWS opt_equal ulonglong_num
+ { Lex->part_info->curr_part_elem->part_max_rows= $3; }
+ | MIN_ROWS opt_equal ulonglong_num
+ { Lex->part_info->curr_part_elem->part_min_rows= $3; }
+ | DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
+ { Lex->part_info->curr_part_elem->data_file_name= $4.str; }
+ | INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
+ { Lex->part_info->curr_part_elem->index_file_name= $4.str; }
+ | COMMENT_SYM opt_equal TEXT_STRING_sys
+ { Lex->part_info->curr_part_elem->part_comment= $3.str; };
+
+/*
+ End of partition parser part
+*/
+
create_select:
SELECT_SYM
{
@@ -3338,7 +3760,7 @@ alter:
lex->alter_info.reset();
lex->alter_info.flags= 0;
}
- alter_list
+ alter_commands
{}
| ALTER DATABASE ident_or_empty
{
@@ -3404,11 +3826,18 @@ ident_or_empty:
/* empty */ { $$= 0; }
| ident { $$= $1.str; };
-alter_list:
+alter_commands:
| DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; }
| IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; }
- | alter_list_item
- | alter_list ',' alter_list_item;
+ | alter_list
+ opt_partitioning
+ | partitioning
+ ;
+
+alter_list:
+ alter_list_item
+ | alter_list ',' alter_list_item
+ ;
add_column:
ADD opt_column
@@ -4037,7 +4466,7 @@ select_options:
/* empty*/
| select_option_list
{
- if (test_all_bits(Select->options, SELECT_ALL | SELECT_DISTINCT))
+ if (Select->options & SELECT_DISTINCT && Select->options & SELECT_ALL)
{
my_error(ER_WRONG_USAGE, MYF(0), "ALL", "DISTINCT");
YYABORT;
@@ -7380,6 +7809,7 @@ keyword:
| LANGUAGE_SYM {}
| NO_SYM {}
| OPEN_SYM {}
+ | PARTITION_SYM {}
| PREPARE_SYM {}
| REPAIR {}
| RESET_SYM {}
@@ -7480,8 +7910,10 @@ keyword_sp:
| RELAY_THREAD {}
| LAST_SYM {}
| LEAVES {}
+ | LESS_SYM {}
| LEVEL_SYM {}
| LINESTRING {}
+ | LIST_SYM {}
| LOCAL_SYM {}
| LOCKS_SYM {}
| LOGS_SYM {}
@@ -7505,6 +7937,7 @@ keyword_sp:
| MAX_QUERIES_PER_HOUR {}
| MAX_UPDATES_PER_HOUR {}
| MAX_USER_CONNECTIONS_SYM {}
+ | MAX_VALUE_SYM {}
| MEDIUM_SYM {}
| MERGE_SYM {}
| MICROSECOND_SYM {}
@@ -7525,6 +7958,7 @@ keyword_sp:
| NDBCLUSTER_SYM {}
| NEXT_SYM {}
| NEW_SYM {}
+ | NODEGROUP_SYM {}
| NONE_SYM {}
| NVARCHAR_SYM {}
| OFFSET_SYM {}
@@ -7533,6 +7967,7 @@ keyword_sp:
| ONE_SYM {}
| PACK_KEYS_SYM {}
| PARTIAL {}
+ | PARTITIONS_SYM {}
| PASSWORD {}
| PHASE_SYM {}
| POINT_SYM {}
@@ -7583,6 +8018,8 @@ keyword_sp:
| STRING_SYM {}
| SUBDATE_SYM {}
| SUBJECT_SYM {}
+ | SUBPARTITION_SYM {}
+ | SUBPARTITIONS_SYM {}
| SUPER_SYM {}
| SUSPEND_SYM {}
| TABLES {}
@@ -7590,6 +8027,7 @@ keyword_sp:
| TEMPORARY {}
| TEMPTABLE_SYM {}
| TEXT_SYM {}
+ | THAN_SYM {}
| TRANSACTION_SYM {}
| TRIGGERS_SYM {}
| TIMESTAMP {}
diff --git a/sql/table.cc b/sql/table.cc
index 220aba27d5b..8852e1fa9dd 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -70,7 +70,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
int j,error, errarg= 0;
uint rec_buff_length,n_length,int_length,records,key_parts,keys,
interval_count,interval_parts,read_length,db_create_options;
- uint key_info_length, com_length;
+ uint key_info_length, com_length, part_info_len, extra_rec_buf_length;
ulong pos;
char index_file[FN_REFLEN], *names, *keynames, *comment_pos;
uchar head[288],*disk_buff,new_field_pack_flag;
@@ -153,6 +153,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
goto err; /* purecov: inspected */
*fn_ext(index_file)='\0'; // Remove .frm extension
+ part_info_len= uint4korr(head+55);
share->frm_version= head[2];
/*
Check if .frm file created by MySQL 5.0. In this case we want to
@@ -300,10 +301,6 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
}
#endif
- /* Allocate handler */
- if (!(outparam->file= get_new_handler(outparam, share->db_type)))
- goto err;
-
error=4;
outparam->reginfo.lock_type= TL_UNLOCK;
outparam->current_lock=F_UNLCK;
@@ -314,8 +311,9 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
if (prgflag & (READ_ALL+EXTRA_RECORD))
records++;
/* QQ: TODO, remove the +1 from below */
+ extra_rec_buf_length= uint2korr(head+59);
rec_buff_length= ALIGN_SIZE(share->reclength + 1 +
- outparam->file->extra_rec_buf_length());
+ extra_rec_buf_length);
share->rec_buff_length= rec_buff_length;
if (!(record= (char *) alloc_root(&outparam->mem_root,
rec_buff_length * records)))
@@ -435,9 +433,22 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
if (keynames)
fix_type_pointers(&int_array, &share->keynames, 1, &keynames);
+ if (part_info_len > 0)
+ {
+#ifdef HAVE_PARTITION_DB
+ if (mysql_unpack_partition(file, thd, part_info_len, outparam))
+ goto err;
+#else
+ goto err;
+#endif
+ }
VOID(my_close(file,MYF(MY_WME)));
file= -1;
+ /* Allocate handler */
+ if (!(outparam->file= get_new_handler(outparam, share->db_type)))
+ goto err;
+
record= (char*) outparam->record[0]-1; /* Fieldstart = 1 */
if (null_field_first)
{
@@ -594,6 +605,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
goto err; /* purecov: inspected */
}
+ reg_field->fieldnr= i+1; //Set field number
reg_field->field_index= i;
reg_field->comment=comment;
if (field_type == FIELD_TYPE_BIT && !f_bit_as_char(pack_flag))
@@ -855,7 +867,16 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
(*save++)= i;
}
}
+ if (outparam->file->ha_allocate_read_write_set(share->fields))
+ goto err;
+ /* Fix the partition functions and ensure they are not constant functions*/
+ if (part_info_len > 0)
+#ifdef HAVE_PARTITION_DB
+ if (fix_partition_func(thd,name,outparam))
+#endif
+ goto err;
+
/* The table struct is now initialized; Open the table */
error=2;
if (db_stat)
@@ -913,6 +934,13 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
if (! error_reported)
frm_error(error,outparam,name,ME_ERROR+ME_WAITTANG, errarg);
delete outparam->file;
+#ifdef HAVE_PARTITION_DB
+ if (outparam->s->part_info)
+ {
+ free_items(outparam->s->part_info->item_free_list);
+ outparam->s->part_info->item_free_list= 0;
+ }
+#endif
outparam->file=0; // For easier errorchecking
outparam->db_stat=0;
hash_free(&share->name_hash);
@@ -939,6 +967,13 @@ int closefrm(register TABLE *table)
table->field= 0;
}
delete table->file;
+#ifdef HAVE_PARTITION_DB
+ if (table->s->part_info)
+ {
+ free_items(table->s->part_info->item_free_list);
+ table->s->part_info->item_free_list= 0;
+ }
+#endif
table->file= 0; /* For easier errorchecking */
hash_free(&table->s->name_hash);
free_root(&table->mem_root, MYF(0));
diff --git a/sql/table.h b/sql/table.h
index 13d44766804..8c0040b0fa4 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -21,6 +21,7 @@ class Item; /* Needed by ORDER */
class GRANT_TABLE;
class st_select_lex_unit;
class st_select_lex;
+class partition_info;
class COND_EQUAL;
/* Order clause list element */
@@ -96,6 +97,9 @@ class Table_triggers_list;
typedef struct st_table_share
{
+#ifdef HAVE_PARTITION_DB
+ partition_info *part_info; /* Partition related information */
+#endif
/* hash of field names (contains pointers to elements of field array) */
HASH name_hash; /* hash of field names */
MEM_ROOT mem_root;
@@ -203,6 +207,8 @@ struct st_table {
ORDER *group;
const char *alias; /* alias or table name */
uchar *null_flags;
+ MY_BITMAP *read_set;
+ MY_BITMAP *write_set;
query_id_t query_id;
ha_rows quick_rows[MAX_KEY];
@@ -256,6 +262,7 @@ struct st_table {
my_bool auto_increment_field_not_null;
my_bool insert_or_update; /* Can be used by the handler */
my_bool alias_name_used; /* true if table_name is alias */
+ my_bool get_fields_in_item_tree; /* Signal to fix_field */
REGINFO reginfo; /* field connections */
MEM_ROOT mem_root;
diff --git a/sql/tztime.cc b/sql/tztime.cc
index f5111459da2..bb516731440 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1623,7 +1623,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
mysql.time_zone* tables are MyISAM and these operations always succeed
for MyISAM.
*/
- (void)table->file->ha_index_init(0);
+ (void)table->file->ha_index_init(0, 1);
tz_leapcnt= 0;
res= table->file->index_first(table->record[0]);
@@ -1800,7 +1800,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
mysql.time_zone* tables are MyISAM and these operations always succeed
for MyISAM.
*/
- (void)table->file->ha_index_init(0);
+ (void)table->file->ha_index_init(0, 1);
if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
0, HA_READ_KEY_EXACT))
@@ -1827,7 +1827,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
table= tz_tables->table;
tz_tables= tz_tables->next_local;
table->field[0]->store((longlong)tzid);
- (void)table->file->ha_index_init(0);
+ (void)table->file->ha_index_init(0, 1);
if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
0, HA_READ_KEY_EXACT))
@@ -1854,7 +1854,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
table= tz_tables->table;
tz_tables= tz_tables->next_local;
table->field[0]->store((longlong)tzid);
- (void)table->file->ha_index_init(0);
+ (void)table->file->ha_index_init(0, 1);
// FIXME Is there any better approach than explicitly specifying 4 ???
res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
@@ -1926,7 +1926,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
*/
table= tz_tables->table;
table->field[0]->store((longlong)tzid);
- (void)table->file->ha_index_init(0);
+ (void)table->file->ha_index_init(0, 1);
// FIXME Is there any better approach than explicitly specifying 4 ???
res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 7f170b3ef87..cdbae4f1eb9 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -46,7 +46,8 @@ static bool pack_fields(File file, List<create_field> &create_fields,
static bool make_empty_rec(THD *thd, int file, enum db_type table_type,
uint table_options,
List<create_field> &create_fields,
- uint reclength, ulong data_offset);
+ uint reclength, ulong data_offset,
+ handler *handler);
/*
Create a frm (table definition) file
@@ -79,13 +80,18 @@ bool mysql_create_frm(THD *thd, my_string file_name,
uchar fileinfo[64],forminfo[288],*keybuff;
TYPELIB formnames;
uchar *screen_buff;
+#ifdef HAVE_PARTITION_DB
+ partition_info *part_info= thd->lex->part_info;
+#endif
DBUG_ENTER("mysql_create_frm");
+#ifdef HAVE_PARTITION_DB
+ thd->lex->part_info= NULL;
+#endif
formnames.type_names=0;
if (!(screen_buff=pack_screens(create_fields,&info_length,&screens,0)))
DBUG_RETURN(1);
- if (db_file == NULL)
- db_file= get_new_handler((TABLE*) 0, create_info->db_type);
+ DBUG_ASSERT(db_file != NULL);
/* If fixed row records, we need one bit to check for deleted rows */
if (!(create_info->table_options & HA_OPTION_PACK_RECORD))
@@ -136,6 +142,13 @@ bool mysql_create_frm(THD *thd, my_string file_name,
60);
forminfo[46]=(uchar) strlen((char*)forminfo+47); // Length of comment
+#ifdef HAVE_PARTITION_DB
+ if (part_info)
+ {
+ int4store(fileinfo+55,part_info->part_info_len);
+ }
+#endif
+ int2store(fileinfo+59,db_file->extra_rec_buf_length());
if (my_pwrite(file,(byte*) fileinfo,64,0L,MYF_RW) ||
my_pwrite(file,(byte*) keybuff,key_info_length,
(ulong) uint2korr(fileinfo+6),MYF_RW))
@@ -144,7 +157,7 @@ bool mysql_create_frm(THD *thd, my_string file_name,
(ulong) uint2korr(fileinfo+6)+ (ulong) key_buff_length,
MY_SEEK_SET,MYF(0)));
if (make_empty_rec(thd,file,create_info->db_type,create_info->table_options,
- create_fields,reclength, data_offset))
+ create_fields,reclength, data_offset, db_file))
goto err;
VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));
@@ -153,6 +166,14 @@ bool mysql_create_frm(THD *thd, my_string file_name,
pack_fields(file, create_fields, data_offset))
goto err;
+#ifdef HAVE_PARTITION_DB
+ if (part_info)
+ {
+ if (my_write(file, (byte*) part_info->part_info_string,
+ part_info->part_info_len, MYF_RW))
+ goto err;
+ }
+#endif
#ifdef HAVE_CRYPTED_FRM
if (create_info->password)
{
@@ -211,15 +232,14 @@ err3:
Create a frm (table definition) file and the tables
SYNOPSIS
- mysql_create_frm()
+ rea_create_table()
thd Thread handler
file_name Name of file (including database and .frm)
create_info create info parameters
create_fields Fields to create
keys number of keys to create
key_info Keys to create
- db_file Handler to use. May be zero, in which case we use
- create_info->db_type
+ file Handler to use.
RETURN
0 ok
1 error
@@ -228,19 +248,21 @@ err3:
int rea_create_table(THD *thd, my_string file_name,
HA_CREATE_INFO *create_info,
List<create_field> &create_fields,
- uint keys, KEY *key_info)
+ uint keys, KEY *key_info, handler *file)
{
DBUG_ENTER("rea_create_table");
if (mysql_create_frm(thd, file_name, create_info,
- create_fields, keys, key_info, NULL))
+ create_fields, keys, key_info, file))
DBUG_RETURN(1);
+ if (file->create_handler_files(file_name))
+ goto err_handler;
if (!create_info->frm_only && ha_create_table(file_name,create_info,0))
- {
- my_delete(file_name,MYF(0));
- DBUG_RETURN(1);
- }
+ goto err_handler;
DBUG_RETURN(0);
+err_handler:
+ my_delete(file_name, MYF(0));
+ DBUG_RETURN(1);
} /* rea_create_table */
@@ -664,7 +686,8 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
uint table_options,
List<create_field> &create_fields,
uint reclength,
- ulong data_offset)
+ ulong data_offset,
+ handler *handler)
{
int error;
Field::utype type;
@@ -672,19 +695,15 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
uchar *buff,*null_pos;
TABLE table;
create_field *field;
- handler *handler;
enum_check_fields old_count_cuted_fields= thd->count_cuted_fields;
DBUG_ENTER("make_empty_rec");
/* We need a table to generate columns for default values */
bzero((char*) &table,sizeof(table));
table.s= &table.share_not_to_be_used;
- handler= get_new_handler((TABLE*) 0, table_type);
- if (!handler ||
- !(buff=(uchar*) my_malloc((uint) reclength,MYF(MY_WME | MY_ZEROFILL))))
+ if (!(buff=(uchar*) my_malloc((uint) reclength,MYF(MY_WME | MY_ZEROFILL))))
{
- delete handler;
DBUG_RETURN(1);
}
@@ -741,6 +760,7 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
{
my_error(ER_INVALID_DEFAULT, MYF(0), regfield->field_name);
error= 1;
+ delete regfield; //To avoid memory leak
goto err;
}
}
@@ -770,7 +790,6 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
err:
my_free((gptr) buff,MYF(MY_FAE));
- delete handler;
thd->count_cuted_fields= old_count_cuted_fields;
DBUG_RETURN(error);
} /* make_empty_rec */
diff --git a/sql/unireg.h b/sql/unireg.h
index 8d88683241b..aafb96ef7c3 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -80,6 +80,7 @@
#define PSEUDO_TABLE_BITS (PARAM_TABLE_BIT | OUTER_REF_TABLE_BIT | \
RAND_TABLE_BIT)
#define MAX_FIELDS 4096 /* Limit in the .frm file */
+#define MAX_PARTITIONS 1024
#define MAX_SORT_MEMORY (2048*1024-MALLOC_OVERHEAD)
#define MIN_SORT_MEMORY (32*1024-MALLOC_OVERHEAD)
diff --git a/storage/Makefile.am b/storage/Makefile.am
new file mode 100644
index 00000000000..d3df68449ca
--- /dev/null
+++ b/storage/Makefile.am
@@ -0,0 +1,27 @@
+# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+# Process this file with automake to create Makefile.in
+
+AUTOMAKE_OPTIONS = foreign
+
+# These are built from source in the Docs directory
+EXTRA_DIST =
+SUBDIRS =
+DIST_SUBDIRS = . bdb heap innobase myisam myisammrg ndb
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
diff --git a/bdb/LICENSE b/storage/bdb/LICENSE
index 1cd727bfd98..1cd727bfd98 100644
--- a/bdb/LICENSE
+++ b/storage/bdb/LICENSE
diff --git a/bdb/Makefile.in b/storage/bdb/Makefile.in
index c83d40ac8b2..c83d40ac8b2 100644
--- a/bdb/Makefile.in
+++ b/storage/bdb/Makefile.in
diff --git a/bdb/btree/bt_compare.c b/storage/bdb/btree/bt_compare.c
index cbe2a1a7170..cbe2a1a7170 100644
--- a/bdb/btree/bt_compare.c
+++ b/storage/bdb/btree/bt_compare.c
diff --git a/bdb/btree/bt_conv.c b/storage/bdb/btree/bt_conv.c
index 4264b62ffdd..4264b62ffdd 100644
--- a/bdb/btree/bt_conv.c
+++ b/storage/bdb/btree/bt_conv.c
diff --git a/bdb/btree/bt_curadj.c b/storage/bdb/btree/bt_curadj.c
index 50d3d422e49..50d3d422e49 100644
--- a/bdb/btree/bt_curadj.c
+++ b/storage/bdb/btree/bt_curadj.c
diff --git a/bdb/btree/bt_cursor.c b/storage/bdb/btree/bt_cursor.c
index 14d90e8873d..14d90e8873d 100644
--- a/bdb/btree/bt_cursor.c
+++ b/storage/bdb/btree/bt_cursor.c
diff --git a/bdb/btree/bt_delete.c b/storage/bdb/btree/bt_delete.c
index 8c76ead2922..8c76ead2922 100644
--- a/bdb/btree/bt_delete.c
+++ b/storage/bdb/btree/bt_delete.c
diff --git a/bdb/btree/bt_method.c b/storage/bdb/btree/bt_method.c
index aa27ed6bab9..aa27ed6bab9 100644
--- a/bdb/btree/bt_method.c
+++ b/storage/bdb/btree/bt_method.c
diff --git a/bdb/btree/bt_open.c b/storage/bdb/btree/bt_open.c
index 24da41e9893..24da41e9893 100644
--- a/bdb/btree/bt_open.c
+++ b/storage/bdb/btree/bt_open.c
diff --git a/bdb/btree/bt_put.c b/storage/bdb/btree/bt_put.c
index 39bd2024e76..39bd2024e76 100644
--- a/bdb/btree/bt_put.c
+++ b/storage/bdb/btree/bt_put.c
diff --git a/bdb/btree/bt_rec.c b/storage/bdb/btree/bt_rec.c
index b6443547aa5..b6443547aa5 100644
--- a/bdb/btree/bt_rec.c
+++ b/storage/bdb/btree/bt_rec.c
diff --git a/bdb/btree/bt_reclaim.c b/storage/bdb/btree/bt_reclaim.c
index ae4554ea7d6..ae4554ea7d6 100644
--- a/bdb/btree/bt_reclaim.c
+++ b/storage/bdb/btree/bt_reclaim.c
diff --git a/bdb/btree/bt_recno.c b/storage/bdb/btree/bt_recno.c
index fab684f3a5f..fab684f3a5f 100644
--- a/bdb/btree/bt_recno.c
+++ b/storage/bdb/btree/bt_recno.c
diff --git a/bdb/btree/bt_rsearch.c b/storage/bdb/btree/bt_rsearch.c
index a75181b44e2..a75181b44e2 100644
--- a/bdb/btree/bt_rsearch.c
+++ b/storage/bdb/btree/bt_rsearch.c
diff --git a/bdb/btree/bt_search.c b/storage/bdb/btree/bt_search.c
index 92b2106311d..92b2106311d 100644
--- a/bdb/btree/bt_search.c
+++ b/storage/bdb/btree/bt_search.c
diff --git a/bdb/btree/bt_split.c b/storage/bdb/btree/bt_split.c
index f3302a6905f..f3302a6905f 100644
--- a/bdb/btree/bt_split.c
+++ b/storage/bdb/btree/bt_split.c
diff --git a/bdb/btree/bt_stat.c b/storage/bdb/btree/bt_stat.c
index 4428de98294..4428de98294 100644
--- a/bdb/btree/bt_stat.c
+++ b/storage/bdb/btree/bt_stat.c
diff --git a/bdb/btree/bt_upgrade.c b/storage/bdb/btree/bt_upgrade.c
index 9f92648d739..9f92648d739 100644
--- a/bdb/btree/bt_upgrade.c
+++ b/storage/bdb/btree/bt_upgrade.c
diff --git a/bdb/btree/bt_verify.c b/storage/bdb/btree/bt_verify.c
index 0cf8a47e476..0cf8a47e476 100644
--- a/bdb/btree/bt_verify.c
+++ b/storage/bdb/btree/bt_verify.c
diff --git a/bdb/btree/btree.src b/storage/bdb/btree/btree.src
index 73f4abac874..73f4abac874 100644
--- a/bdb/btree/btree.src
+++ b/storage/bdb/btree/btree.src
diff --git a/bdb/build_unix/.IGNORE_ME b/storage/bdb/build_unix/.IGNORE_ME
index 558fd496f0c..558fd496f0c 100644
--- a/bdb/build_unix/.IGNORE_ME
+++ b/storage/bdb/build_unix/.IGNORE_ME
diff --git a/bdb/build_vxworks/BerkeleyDB.wsp b/storage/bdb/build_vxworks/BerkeleyDB.wsp
index ce2e71b0eb3..ce2e71b0eb3 100644
--- a/bdb/build_vxworks/BerkeleyDB.wsp
+++ b/storage/bdb/build_vxworks/BerkeleyDB.wsp
diff --git a/bdb/build_vxworks/dbdemo/README b/storage/bdb/build_vxworks/dbdemo/README
index 1a2c7c7d073..1a2c7c7d073 100644
--- a/bdb/build_vxworks/dbdemo/README
+++ b/storage/bdb/build_vxworks/dbdemo/README
diff --git a/bdb/build_win32/Berkeley_DB.dsw b/storage/bdb/build_win32/Berkeley_DB.dsw
index 899e31ad58d..899e31ad58d 100644
--- a/bdb/build_win32/Berkeley_DB.dsw
+++ b/storage/bdb/build_win32/Berkeley_DB.dsw
diff --git a/bdb/build_win32/app_dsp.src b/storage/bdb/build_win32/app_dsp.src
index ff98d39ec79..ff98d39ec79 100644
--- a/bdb/build_win32/app_dsp.src
+++ b/storage/bdb/build_win32/app_dsp.src
diff --git a/bdb/build_win32/build_all.dsp b/storage/bdb/build_win32/build_all.dsp
index 7ae1f9bb031..7ae1f9bb031 100644
--- a/bdb/build_win32/build_all.dsp
+++ b/storage/bdb/build_win32/build_all.dsp
diff --git a/bdb/build_win32/db_java_xa.dsp b/storage/bdb/build_win32/db_java_xa.dsp
index 9c700ffeed4..9c700ffeed4 100644
--- a/bdb/build_win32/db_java_xa.dsp
+++ b/storage/bdb/build_win32/db_java_xa.dsp
diff --git a/bdb/build_win32/db_java_xaj.mak b/storage/bdb/build_win32/db_java_xaj.mak
index c2dbc920d17..c2dbc920d17 100644
--- a/bdb/build_win32/db_java_xaj.mak
+++ b/storage/bdb/build_win32/db_java_xaj.mak
diff --git a/bdb/build_win32/db_lib.dsp b/storage/bdb/build_win32/db_lib.dsp
index a7fb4157909..a7fb4157909 100644
--- a/bdb/build_win32/db_lib.dsp
+++ b/storage/bdb/build_win32/db_lib.dsp
diff --git a/bdb/build_win32/db_test.src b/storage/bdb/build_win32/db_test.src
index 73479d3856a..73479d3856a 100644
--- a/bdb/build_win32/db_test.src
+++ b/storage/bdb/build_win32/db_test.src
diff --git a/bdb/build_win32/dbkill.cpp b/storage/bdb/build_win32/dbkill.cpp
index 23dc87b0e85..23dc87b0e85 100644
--- a/bdb/build_win32/dbkill.cpp
+++ b/storage/bdb/build_win32/dbkill.cpp
diff --git a/bdb/build_win32/dllmain.c b/storage/bdb/build_win32/dllmain.c
index 70c2e849d66..70c2e849d66 100644
--- a/bdb/build_win32/dllmain.c
+++ b/storage/bdb/build_win32/dllmain.c
diff --git a/bdb/build_win32/dynamic_dsp.src b/storage/bdb/build_win32/dynamic_dsp.src
index a92906a51f4..a92906a51f4 100644
--- a/bdb/build_win32/dynamic_dsp.src
+++ b/storage/bdb/build_win32/dynamic_dsp.src
diff --git a/bdb/build_win32/java_dsp.src b/storage/bdb/build_win32/java_dsp.src
index 15941bcab67..15941bcab67 100644
--- a/bdb/build_win32/java_dsp.src
+++ b/storage/bdb/build_win32/java_dsp.src
diff --git a/bdb/build_win32/libdb_tcl.def b/storage/bdb/build_win32/libdb_tcl.def
index b6323c66bc6..b6323c66bc6 100644
--- a/bdb/build_win32/libdb_tcl.def
+++ b/storage/bdb/build_win32/libdb_tcl.def
diff --git a/bdb/build_win32/libdbrc.src b/storage/bdb/build_win32/libdbrc.src
index 3e5d8deec6f..3e5d8deec6f 100644
--- a/bdb/build_win32/libdbrc.src
+++ b/storage/bdb/build_win32/libdbrc.src
diff --git a/bdb/build_win32/srcfile_dsp.src b/storage/bdb/build_win32/srcfile_dsp.src
index 572350e6356..572350e6356 100644
--- a/bdb/build_win32/srcfile_dsp.src
+++ b/storage/bdb/build_win32/srcfile_dsp.src
diff --git a/bdb/build_win32/static_dsp.src b/storage/bdb/build_win32/static_dsp.src
index 0c66c851025..0c66c851025 100644
--- a/bdb/build_win32/static_dsp.src
+++ b/storage/bdb/build_win32/static_dsp.src
diff --git a/bdb/build_win32/tcl_dsp.src b/storage/bdb/build_win32/tcl_dsp.src
index 4de41e6934e..4de41e6934e 100644
--- a/bdb/build_win32/tcl_dsp.src
+++ b/storage/bdb/build_win32/tcl_dsp.src
diff --git a/bdb/clib/getcwd.c b/storage/bdb/clib/getcwd.c
index bae50dfe90c..bae50dfe90c 100644
--- a/bdb/clib/getcwd.c
+++ b/storage/bdb/clib/getcwd.c
diff --git a/bdb/clib/getopt.c b/storage/bdb/clib/getopt.c
index 3f6659ea6e6..3f6659ea6e6 100644
--- a/bdb/clib/getopt.c
+++ b/storage/bdb/clib/getopt.c
diff --git a/bdb/clib/memcmp.c b/storage/bdb/clib/memcmp.c
index 979badaef30..979badaef30 100644
--- a/bdb/clib/memcmp.c
+++ b/storage/bdb/clib/memcmp.c
diff --git a/bdb/clib/memmove.c b/storage/bdb/clib/memmove.c
index 632d50788da..632d50788da 100644
--- a/bdb/clib/memmove.c
+++ b/storage/bdb/clib/memmove.c
diff --git a/bdb/clib/raise.c b/storage/bdb/clib/raise.c
index fcf3bbcbd7f..fcf3bbcbd7f 100644
--- a/bdb/clib/raise.c
+++ b/storage/bdb/clib/raise.c
diff --git a/bdb/clib/snprintf.c b/storage/bdb/clib/snprintf.c
index fa1a63425e8..fa1a63425e8 100644
--- a/bdb/clib/snprintf.c
+++ b/storage/bdb/clib/snprintf.c
diff --git a/bdb/clib/strcasecmp.c b/storage/bdb/clib/strcasecmp.c
index d5ce6d76d5f..d5ce6d76d5f 100644
--- a/bdb/clib/strcasecmp.c
+++ b/storage/bdb/clib/strcasecmp.c
diff --git a/bdb/clib/strdup.c b/storage/bdb/clib/strdup.c
index e68623f1407..e68623f1407 100644
--- a/bdb/clib/strdup.c
+++ b/storage/bdb/clib/strdup.c
diff --git a/bdb/clib/strerror.c b/storage/bdb/clib/strerror.c
index 06c28946b88..06c28946b88 100644
--- a/bdb/clib/strerror.c
+++ b/storage/bdb/clib/strerror.c
diff --git a/bdb/clib/vsnprintf.c b/storage/bdb/clib/vsnprintf.c
index 4ffea8cb0ad..4ffea8cb0ad 100644
--- a/bdb/clib/vsnprintf.c
+++ b/storage/bdb/clib/vsnprintf.c
diff --git a/bdb/common/db_byteorder.c b/storage/bdb/common/db_byteorder.c
index d42d8e6a958..d42d8e6a958 100644
--- a/bdb/common/db_byteorder.c
+++ b/storage/bdb/common/db_byteorder.c
diff --git a/bdb/common/db_err.c b/storage/bdb/common/db_err.c
index 7c9ee3c4fde..7c9ee3c4fde 100644
--- a/bdb/common/db_err.c
+++ b/storage/bdb/common/db_err.c
diff --git a/bdb/common/db_getlong.c b/storage/bdb/common/db_getlong.c
index 6ba8ebfcdaa..6ba8ebfcdaa 100644
--- a/bdb/common/db_getlong.c
+++ b/storage/bdb/common/db_getlong.c
diff --git a/bdb/common/db_idspace.c b/storage/bdb/common/db_idspace.c
index 588ffd9fca9..588ffd9fca9 100644
--- a/bdb/common/db_idspace.c
+++ b/storage/bdb/common/db_idspace.c
diff --git a/bdb/common/db_log2.c b/storage/bdb/common/db_log2.c
index cdd87dda11d..cdd87dda11d 100644
--- a/bdb/common/db_log2.c
+++ b/storage/bdb/common/db_log2.c
diff --git a/bdb/common/util_arg.c b/storage/bdb/common/util_arg.c
index e034e3bd194..e034e3bd194 100644
--- a/bdb/common/util_arg.c
+++ b/storage/bdb/common/util_arg.c
diff --git a/bdb/common/util_cache.c b/storage/bdb/common/util_cache.c
index 5ca88665cc7..5ca88665cc7 100644
--- a/bdb/common/util_cache.c
+++ b/storage/bdb/common/util_cache.c
diff --git a/bdb/common/util_log.c b/storage/bdb/common/util_log.c
index ae215fca64a..ae215fca64a 100644
--- a/bdb/common/util_log.c
+++ b/storage/bdb/common/util_log.c
diff --git a/bdb/common/util_sig.c b/storage/bdb/common/util_sig.c
index 9714427ad33..9714427ad33 100644
--- a/bdb/common/util_sig.c
+++ b/storage/bdb/common/util_sig.c
diff --git a/bdb/cxx/cxx_db.cpp b/storage/bdb/cxx/cxx_db.cpp
index 7e50a9b3f27..7e50a9b3f27 100644
--- a/bdb/cxx/cxx_db.cpp
+++ b/storage/bdb/cxx/cxx_db.cpp
diff --git a/bdb/cxx/cxx_dbc.cpp b/storage/bdb/cxx/cxx_dbc.cpp
index 4d5844f922f..4d5844f922f 100644
--- a/bdb/cxx/cxx_dbc.cpp
+++ b/storage/bdb/cxx/cxx_dbc.cpp
diff --git a/bdb/cxx/cxx_dbt.cpp b/storage/bdb/cxx/cxx_dbt.cpp
index 7a4224503ee..7a4224503ee 100644
--- a/bdb/cxx/cxx_dbt.cpp
+++ b/storage/bdb/cxx/cxx_dbt.cpp
diff --git a/bdb/cxx/cxx_env.cpp b/storage/bdb/cxx/cxx_env.cpp
index c78c6e9fa47..c78c6e9fa47 100644
--- a/bdb/cxx/cxx_env.cpp
+++ b/storage/bdb/cxx/cxx_env.cpp
diff --git a/bdb/cxx/cxx_except.cpp b/storage/bdb/cxx/cxx_except.cpp
index 40fdeae69d6..40fdeae69d6 100644
--- a/bdb/cxx/cxx_except.cpp
+++ b/storage/bdb/cxx/cxx_except.cpp
diff --git a/bdb/cxx/cxx_lock.cpp b/storage/bdb/cxx/cxx_lock.cpp
index 446eba49e27..446eba49e27 100644
--- a/bdb/cxx/cxx_lock.cpp
+++ b/storage/bdb/cxx/cxx_lock.cpp
diff --git a/bdb/cxx/cxx_logc.cpp b/storage/bdb/cxx/cxx_logc.cpp
index d1fe83dd58b..d1fe83dd58b 100644
--- a/bdb/cxx/cxx_logc.cpp
+++ b/storage/bdb/cxx/cxx_logc.cpp
diff --git a/bdb/cxx/cxx_mpool.cpp b/storage/bdb/cxx/cxx_mpool.cpp
index 3eb78d03ff4..3eb78d03ff4 100644
--- a/bdb/cxx/cxx_mpool.cpp
+++ b/storage/bdb/cxx/cxx_mpool.cpp
diff --git a/bdb/cxx/cxx_txn.cpp b/storage/bdb/cxx/cxx_txn.cpp
index b04077c0f5b..b04077c0f5b 100644
--- a/bdb/cxx/cxx_txn.cpp
+++ b/storage/bdb/cxx/cxx_txn.cpp
diff --git a/bdb/db/crdel.src b/storage/bdb/db/crdel.src
index d89fa7a0382..d89fa7a0382 100644
--- a/bdb/db/crdel.src
+++ b/storage/bdb/db/crdel.src
diff --git a/bdb/db/crdel_rec.c b/storage/bdb/db/crdel_rec.c
index 542a0c358dd..542a0c358dd 100644
--- a/bdb/db/crdel_rec.c
+++ b/storage/bdb/db/crdel_rec.c
diff --git a/bdb/db/db.c b/storage/bdb/db/db.c
index 986167d5ade..986167d5ade 100644
--- a/bdb/db/db.c
+++ b/storage/bdb/db/db.c
diff --git a/bdb/db/db.src b/storage/bdb/db/db.src
index 414321fcbbd..414321fcbbd 100644
--- a/bdb/db/db.src
+++ b/storage/bdb/db/db.src
diff --git a/bdb/db/db_am.c b/storage/bdb/db/db_am.c
index cf6ef18549b..cf6ef18549b 100644
--- a/bdb/db/db_am.c
+++ b/storage/bdb/db/db_am.c
diff --git a/bdb/db/db_cam.c b/storage/bdb/db/db_cam.c
index 4de3467d4aa..4de3467d4aa 100644
--- a/bdb/db/db_cam.c
+++ b/storage/bdb/db/db_cam.c
diff --git a/bdb/db/db_conv.c b/storage/bdb/db/db_conv.c
index f731c82d85e..f731c82d85e 100644
--- a/bdb/db/db_conv.c
+++ b/storage/bdb/db/db_conv.c
diff --git a/bdb/db/db_dispatch.c b/storage/bdb/db/db_dispatch.c
index 2cf29ec2f33..2cf29ec2f33 100644
--- a/bdb/db/db_dispatch.c
+++ b/storage/bdb/db/db_dispatch.c
diff --git a/bdb/db/db_dup.c b/storage/bdb/db/db_dup.c
index 2d33d79153f..2d33d79153f 100644
--- a/bdb/db/db_dup.c
+++ b/storage/bdb/db/db_dup.c
diff --git a/bdb/db/db_iface.c b/storage/bdb/db/db_iface.c
index b518c3b14b2..b518c3b14b2 100644
--- a/bdb/db/db_iface.c
+++ b/storage/bdb/db/db_iface.c
diff --git a/bdb/db/db_join.c b/storage/bdb/db/db_join.c
index 6281b1a8383..6281b1a8383 100644
--- a/bdb/db/db_join.c
+++ b/storage/bdb/db/db_join.c
diff --git a/bdb/db/db_meta.c b/storage/bdb/db/db_meta.c
index 015ef5c8fc7..015ef5c8fc7 100644
--- a/bdb/db/db_meta.c
+++ b/storage/bdb/db/db_meta.c
diff --git a/bdb/db/db_method.c b/storage/bdb/db/db_method.c
index 14712180df0..14712180df0 100644
--- a/bdb/db/db_method.c
+++ b/storage/bdb/db/db_method.c
diff --git a/bdb/db/db_open.c b/storage/bdb/db/db_open.c
index 8352525361f..8352525361f 100644
--- a/bdb/db/db_open.c
+++ b/storage/bdb/db/db_open.c
diff --git a/bdb/db/db_overflow.c b/storage/bdb/db/db_overflow.c
index 27dcb41a2ff..27dcb41a2ff 100644
--- a/bdb/db/db_overflow.c
+++ b/storage/bdb/db/db_overflow.c
diff --git a/bdb/db/db_pr.c b/storage/bdb/db/db_pr.c
index 235e7187f7c..235e7187f7c 100644
--- a/bdb/db/db_pr.c
+++ b/storage/bdb/db/db_pr.c
diff --git a/bdb/db/db_rec.c b/storage/bdb/db/db_rec.c
index 303ab2fe1d4..303ab2fe1d4 100644
--- a/bdb/db/db_rec.c
+++ b/storage/bdb/db/db_rec.c
diff --git a/bdb/db/db_reclaim.c b/storage/bdb/db/db_reclaim.c
index 9aa39bcfa9b..9aa39bcfa9b 100644
--- a/bdb/db/db_reclaim.c
+++ b/storage/bdb/db/db_reclaim.c
diff --git a/bdb/db/db_remove.c b/storage/bdb/db/db_remove.c
index ef11c342555..ef11c342555 100644
--- a/bdb/db/db_remove.c
+++ b/storage/bdb/db/db_remove.c
diff --git a/bdb/db/db_rename.c b/storage/bdb/db/db_rename.c
index 87f88232cda..87f88232cda 100644
--- a/bdb/db/db_rename.c
+++ b/storage/bdb/db/db_rename.c
diff --git a/bdb/db/db_ret.c b/storage/bdb/db/db_ret.c
index b1af7b4ffeb..b1af7b4ffeb 100644
--- a/bdb/db/db_ret.c
+++ b/storage/bdb/db/db_ret.c
diff --git a/bdb/db/db_truncate.c b/storage/bdb/db/db_truncate.c
index 49546ae51b9..49546ae51b9 100644
--- a/bdb/db/db_truncate.c
+++ b/storage/bdb/db/db_truncate.c
diff --git a/bdb/db/db_upg.c b/storage/bdb/db/db_upg.c
index c0eb72f3713..c0eb72f3713 100644
--- a/bdb/db/db_upg.c
+++ b/storage/bdb/db/db_upg.c
diff --git a/bdb/db/db_upg_opd.c b/storage/bdb/db/db_upg_opd.c
index f410b797bff..f410b797bff 100644
--- a/bdb/db/db_upg_opd.c
+++ b/storage/bdb/db/db_upg_opd.c
diff --git a/bdb/db/db_vrfy.c b/storage/bdb/db/db_vrfy.c
index 1bbecdbd87a..1bbecdbd87a 100644
--- a/bdb/db/db_vrfy.c
+++ b/storage/bdb/db/db_vrfy.c
diff --git a/bdb/db/db_vrfyutil.c b/storage/bdb/db/db_vrfyutil.c
index 44344ceed11..44344ceed11 100644
--- a/bdb/db/db_vrfyutil.c
+++ b/storage/bdb/db/db_vrfyutil.c
diff --git a/bdb/db185/db185.c b/storage/bdb/db185/db185.c
index 99d37bcf341..99d37bcf341 100644
--- a/bdb/db185/db185.c
+++ b/storage/bdb/db185/db185.c
diff --git a/bdb/db185/db185_int.in b/storage/bdb/db185/db185_int.in
index a4a3ce19c17..a4a3ce19c17 100644
--- a/bdb/db185/db185_int.in
+++ b/storage/bdb/db185/db185_int.in
diff --git a/bdb/db_archive/db_archive.c b/storage/bdb/db_archive/db_archive.c
index dc8718e4c03..dc8718e4c03 100644
--- a/bdb/db_archive/db_archive.c
+++ b/storage/bdb/db_archive/db_archive.c
diff --git a/bdb/db_checkpoint/db_checkpoint.c b/storage/bdb/db_checkpoint/db_checkpoint.c
index a59572c5f76..a59572c5f76 100644
--- a/bdb/db_checkpoint/db_checkpoint.c
+++ b/storage/bdb/db_checkpoint/db_checkpoint.c
diff --git a/bdb/db_deadlock/db_deadlock.c b/storage/bdb/db_deadlock/db_deadlock.c
index 523918b9ea4..523918b9ea4 100644
--- a/bdb/db_deadlock/db_deadlock.c
+++ b/storage/bdb/db_deadlock/db_deadlock.c
diff --git a/bdb/db_dump/db_dump.c b/storage/bdb/db_dump/db_dump.c
index 143884a3fa8..143884a3fa8 100644
--- a/bdb/db_dump/db_dump.c
+++ b/storage/bdb/db_dump/db_dump.c
diff --git a/bdb/db_dump185/db_dump185.c b/storage/bdb/db_dump185/db_dump185.c
index 97164f34a9a..97164f34a9a 100644
--- a/bdb/db_dump185/db_dump185.c
+++ b/storage/bdb/db_dump185/db_dump185.c
diff --git a/bdb/db_load/db_load.c b/storage/bdb/db_load/db_load.c
index d27fca04ec0..d27fca04ec0 100644
--- a/bdb/db_load/db_load.c
+++ b/storage/bdb/db_load/db_load.c
diff --git a/bdb/db_printlog/README b/storage/bdb/db_printlog/README
index d59f4c77f55..d59f4c77f55 100644
--- a/bdb/db_printlog/README
+++ b/storage/bdb/db_printlog/README
diff --git a/bdb/db_printlog/commit.awk b/storage/bdb/db_printlog/commit.awk
index 66391d3fb63..66391d3fb63 100644
--- a/bdb/db_printlog/commit.awk
+++ b/storage/bdb/db_printlog/commit.awk
diff --git a/bdb/db_printlog/count.awk b/storage/bdb/db_printlog/count.awk
index 1d5a291950f..1d5a291950f 100644
--- a/bdb/db_printlog/count.awk
+++ b/storage/bdb/db_printlog/count.awk
diff --git a/bdb/db_printlog/db_printlog.c b/storage/bdb/db_printlog/db_printlog.c
index af6d00d593a..af6d00d593a 100644
--- a/bdb/db_printlog/db_printlog.c
+++ b/storage/bdb/db_printlog/db_printlog.c
diff --git a/bdb/db_printlog/dbname.awk b/storage/bdb/db_printlog/dbname.awk
index 47955994579..47955994579 100644
--- a/bdb/db_printlog/dbname.awk
+++ b/storage/bdb/db_printlog/dbname.awk
diff --git a/bdb/db_printlog/fileid.awk b/storage/bdb/db_printlog/fileid.awk
index 020644039ab..020644039ab 100644
--- a/bdb/db_printlog/fileid.awk
+++ b/storage/bdb/db_printlog/fileid.awk
diff --git a/bdb/db_printlog/logstat.awk b/storage/bdb/db_printlog/logstat.awk
index 1009343eba4..1009343eba4 100644
--- a/bdb/db_printlog/logstat.awk
+++ b/storage/bdb/db_printlog/logstat.awk
diff --git a/bdb/db_printlog/pgno.awk b/storage/bdb/db_printlog/pgno.awk
index 289fa853bc4..289fa853bc4 100644
--- a/bdb/db_printlog/pgno.awk
+++ b/storage/bdb/db_printlog/pgno.awk
diff --git a/bdb/db_printlog/range.awk b/storage/bdb/db_printlog/range.awk
index 7abb410b40f..7abb410b40f 100644
--- a/bdb/db_printlog/range.awk
+++ b/storage/bdb/db_printlog/range.awk
diff --git a/bdb/db_printlog/rectype.awk b/storage/bdb/db_printlog/rectype.awk
index 7f7b2f5ee15..7f7b2f5ee15 100644
--- a/bdb/db_printlog/rectype.awk
+++ b/storage/bdb/db_printlog/rectype.awk
diff --git a/bdb/db_printlog/status.awk b/storage/bdb/db_printlog/status.awk
index 13df0b6194a..13df0b6194a 100644
--- a/bdb/db_printlog/status.awk
+++ b/storage/bdb/db_printlog/status.awk
diff --git a/bdb/db_printlog/txn.awk b/storage/bdb/db_printlog/txn.awk
index be8c44e1092..be8c44e1092 100644
--- a/bdb/db_printlog/txn.awk
+++ b/storage/bdb/db_printlog/txn.awk
diff --git a/bdb/db_recover/db_recover.c b/storage/bdb/db_recover/db_recover.c
index b6414267f93..b6414267f93 100644
--- a/bdb/db_recover/db_recover.c
+++ b/storage/bdb/db_recover/db_recover.c
diff --git a/bdb/db_stat/db_stat.c b/storage/bdb/db_stat/db_stat.c
index a2b01b71e0a..a2b01b71e0a 100644
--- a/bdb/db_stat/db_stat.c
+++ b/storage/bdb/db_stat/db_stat.c
diff --git a/bdb/db_upgrade/db_upgrade.c b/storage/bdb/db_upgrade/db_upgrade.c
index f46b5eabc4e..f46b5eabc4e 100644
--- a/bdb/db_upgrade/db_upgrade.c
+++ b/storage/bdb/db_upgrade/db_upgrade.c
diff --git a/bdb/db_verify/db_verify.c b/storage/bdb/db_verify/db_verify.c
index 8d63a20e7bc..8d63a20e7bc 100644
--- a/bdb/db_verify/db_verify.c
+++ b/storage/bdb/db_verify/db_verify.c
diff --git a/bdb/dbinc/btree.h b/storage/bdb/dbinc/btree.h
index 54da9c5b208..54da9c5b208 100644
--- a/bdb/dbinc/btree.h
+++ b/storage/bdb/dbinc/btree.h
diff --git a/bdb/dbinc/crypto.h b/storage/bdb/dbinc/crypto.h
index 92fad098a4a..92fad098a4a 100644
--- a/bdb/dbinc/crypto.h
+++ b/storage/bdb/dbinc/crypto.h
diff --git a/bdb/dbinc/cxx_common.h b/storage/bdb/dbinc/cxx_common.h
index e5cb3a9aef4..e5cb3a9aef4 100644
--- a/bdb/dbinc/cxx_common.h
+++ b/storage/bdb/dbinc/cxx_common.h
diff --git a/bdb/dbinc/cxx_except.h b/storage/bdb/dbinc/cxx_except.h
index f9bf4f859f8..f9bf4f859f8 100644
--- a/bdb/dbinc/cxx_except.h
+++ b/storage/bdb/dbinc/cxx_except.h
diff --git a/bdb/dbinc/cxx_int.h b/storage/bdb/dbinc/cxx_int.h
index 9af3979d9f1..9af3979d9f1 100644
--- a/bdb/dbinc/cxx_int.h
+++ b/storage/bdb/dbinc/cxx_int.h
diff --git a/bdb/dbinc/db.in b/storage/bdb/dbinc/db.in
index 208de3bd622..208de3bd622 100644
--- a/bdb/dbinc/db.in
+++ b/storage/bdb/dbinc/db.in
diff --git a/bdb/dbinc/db_185.in b/storage/bdb/dbinc/db_185.in
index 86e2290c304..86e2290c304 100644
--- a/bdb/dbinc/db_185.in
+++ b/storage/bdb/dbinc/db_185.in
diff --git a/bdb/dbinc/db_am.h b/storage/bdb/dbinc/db_am.h
index c5aa424255d..c5aa424255d 100644
--- a/bdb/dbinc/db_am.h
+++ b/storage/bdb/dbinc/db_am.h
diff --git a/bdb/dbinc/db_cxx.in b/storage/bdb/dbinc/db_cxx.in
index 6752b36ec42..6752b36ec42 100644
--- a/bdb/dbinc/db_cxx.in
+++ b/storage/bdb/dbinc/db_cxx.in
diff --git a/bdb/dbinc/db_dispatch.h b/storage/bdb/dbinc/db_dispatch.h
index 283eb1e95de..283eb1e95de 100644
--- a/bdb/dbinc/db_dispatch.h
+++ b/storage/bdb/dbinc/db_dispatch.h
diff --git a/bdb/dbinc/db_int.in b/storage/bdb/dbinc/db_int.in
index 2f46293a65d..2f46293a65d 100644
--- a/bdb/dbinc/db_int.in
+++ b/storage/bdb/dbinc/db_int.in
diff --git a/bdb/dbinc/db_join.h b/storage/bdb/dbinc/db_join.h
index 487ce3eebbb..487ce3eebbb 100644
--- a/bdb/dbinc/db_join.h
+++ b/storage/bdb/dbinc/db_join.h
diff --git a/bdb/dbinc/db_page.h b/storage/bdb/dbinc/db_page.h
index 97497556fd9..97497556fd9 100644
--- a/bdb/dbinc/db_page.h
+++ b/storage/bdb/dbinc/db_page.h
diff --git a/bdb/dbinc/db_server_int.h b/storage/bdb/dbinc/db_server_int.h
index efec539b2f8..efec539b2f8 100644
--- a/bdb/dbinc/db_server_int.h
+++ b/storage/bdb/dbinc/db_server_int.h
diff --git a/bdb/dbinc/db_shash.h b/storage/bdb/dbinc/db_shash.h
index 2c54d6145c5..2c54d6145c5 100644
--- a/bdb/dbinc/db_shash.h
+++ b/storage/bdb/dbinc/db_shash.h
diff --git a/bdb/dbinc/db_swap.h b/storage/bdb/dbinc/db_swap.h
index d5aad65385e..d5aad65385e 100644
--- a/bdb/dbinc/db_swap.h
+++ b/storage/bdb/dbinc/db_swap.h
diff --git a/bdb/dbinc/db_upgrade.h b/storage/bdb/dbinc/db_upgrade.h
index 3ccba810889..3ccba810889 100644
--- a/bdb/dbinc/db_upgrade.h
+++ b/storage/bdb/dbinc/db_upgrade.h
diff --git a/bdb/dbinc/db_verify.h b/storage/bdb/dbinc/db_verify.h
index 949c9a2a6a1..949c9a2a6a1 100644
--- a/bdb/dbinc/db_verify.h
+++ b/storage/bdb/dbinc/db_verify.h
diff --git a/bdb/dbinc/debug.h b/storage/bdb/dbinc/debug.h
index 21f80387ccc..21f80387ccc 100644
--- a/bdb/dbinc/debug.h
+++ b/storage/bdb/dbinc/debug.h
diff --git a/bdb/dbinc/fop.h b/storage/bdb/dbinc/fop.h
index c438ef7ef40..c438ef7ef40 100644
--- a/bdb/dbinc/fop.h
+++ b/storage/bdb/dbinc/fop.h
diff --git a/bdb/dbinc/globals.h b/storage/bdb/dbinc/globals.h
index 3441ade2ea9..3441ade2ea9 100644
--- a/bdb/dbinc/globals.h
+++ b/storage/bdb/dbinc/globals.h
diff --git a/bdb/dbinc/hash.h b/storage/bdb/dbinc/hash.h
index 98289735fc4..98289735fc4 100644
--- a/bdb/dbinc/hash.h
+++ b/storage/bdb/dbinc/hash.h
diff --git a/bdb/dbinc/hmac.h b/storage/bdb/dbinc/hmac.h
index 16f61fb58ad..16f61fb58ad 100644
--- a/bdb/dbinc/hmac.h
+++ b/storage/bdb/dbinc/hmac.h
diff --git a/bdb/dbinc/lock.h b/storage/bdb/dbinc/lock.h
index 7ddc9ce9988..7ddc9ce9988 100644
--- a/bdb/dbinc/lock.h
+++ b/storage/bdb/dbinc/lock.h
diff --git a/bdb/dbinc/log.h b/storage/bdb/dbinc/log.h
index 434994528ea..434994528ea 100644
--- a/bdb/dbinc/log.h
+++ b/storage/bdb/dbinc/log.h
diff --git a/bdb/dbinc/mp.h b/storage/bdb/dbinc/mp.h
index 5c805b92364..5c805b92364 100644
--- a/bdb/dbinc/mp.h
+++ b/storage/bdb/dbinc/mp.h
diff --git a/bdb/dbinc/mutex.h b/storage/bdb/dbinc/mutex.h
index 41bb1b4bb59..41bb1b4bb59 100644
--- a/bdb/dbinc/mutex.h
+++ b/storage/bdb/dbinc/mutex.h
diff --git a/bdb/dbinc/os.h b/storage/bdb/dbinc/os.h
index 01ca0ac470d..01ca0ac470d 100644
--- a/bdb/dbinc/os.h
+++ b/storage/bdb/dbinc/os.h
diff --git a/bdb/dbinc/qam.h b/storage/bdb/dbinc/qam.h
index 0306ed07d2a..0306ed07d2a 100644
--- a/bdb/dbinc/qam.h
+++ b/storage/bdb/dbinc/qam.h
diff --git a/bdb/dbinc/queue.h b/storage/bdb/dbinc/queue.h
index 8d4a771add6..8d4a771add6 100644
--- a/bdb/dbinc/queue.h
+++ b/storage/bdb/dbinc/queue.h
diff --git a/bdb/dbinc/region.h b/storage/bdb/dbinc/region.h
index 9ee6c81062f..9ee6c81062f 100644
--- a/bdb/dbinc/region.h
+++ b/storage/bdb/dbinc/region.h
diff --git a/bdb/dbinc/rep.h b/storage/bdb/dbinc/rep.h
index 1e315494c87..1e315494c87 100644
--- a/bdb/dbinc/rep.h
+++ b/storage/bdb/dbinc/rep.h
diff --git a/bdb/dbinc/shqueue.h b/storage/bdb/dbinc/shqueue.h
index 47fdf12ac92..47fdf12ac92 100644
--- a/bdb/dbinc/shqueue.h
+++ b/storage/bdb/dbinc/shqueue.h
diff --git a/bdb/dbinc/tcl_db.h b/storage/bdb/dbinc/tcl_db.h
index 8c04d545295..8c04d545295 100644
--- a/bdb/dbinc/tcl_db.h
+++ b/storage/bdb/dbinc/tcl_db.h
diff --git a/bdb/dbinc/txn.h b/storage/bdb/dbinc/txn.h
index 31b00a6ba74..31b00a6ba74 100644
--- a/bdb/dbinc/txn.h
+++ b/storage/bdb/dbinc/txn.h
diff --git a/bdb/dbinc/xa.h b/storage/bdb/dbinc/xa.h
index 64bdac8c914..64bdac8c914 100644
--- a/bdb/dbinc/xa.h
+++ b/storage/bdb/dbinc/xa.h
diff --git a/bdb/dbm/dbm.c b/storage/bdb/dbm/dbm.c
index 3aa6fff6982..3aa6fff6982 100644
--- a/bdb/dbm/dbm.c
+++ b/storage/bdb/dbm/dbm.c
diff --git a/bdb/dbreg/dbreg.c b/storage/bdb/dbreg/dbreg.c
index 289fe67ed50..289fe67ed50 100644
--- a/bdb/dbreg/dbreg.c
+++ b/storage/bdb/dbreg/dbreg.c
diff --git a/bdb/dbreg/dbreg.src b/storage/bdb/dbreg/dbreg.src
index 18429471e82..18429471e82 100644
--- a/bdb/dbreg/dbreg.src
+++ b/storage/bdb/dbreg/dbreg.src
diff --git a/bdb/dbreg/dbreg_rec.c b/storage/bdb/dbreg/dbreg_rec.c
index ba3ba0e06d9..ba3ba0e06d9 100644
--- a/bdb/dbreg/dbreg_rec.c
+++ b/storage/bdb/dbreg/dbreg_rec.c
diff --git a/bdb/dbreg/dbreg_util.c b/storage/bdb/dbreg/dbreg_util.c
index 0db5c640adb..0db5c640adb 100644
--- a/bdb/dbreg/dbreg_util.c
+++ b/storage/bdb/dbreg/dbreg_util.c
diff --git a/bdb/dist/Makefile.in b/storage/bdb/dist/Makefile.in
index a7cc0e11f34..a7cc0e11f34 100644
--- a/bdb/dist/Makefile.in
+++ b/storage/bdb/dist/Makefile.in
diff --git a/bdb/dist/RELEASE b/storage/bdb/dist/RELEASE
index 61151b8589c..61151b8589c 100644
--- a/bdb/dist/RELEASE
+++ b/storage/bdb/dist/RELEASE
diff --git a/bdb/dist/aclocal/config.ac b/storage/bdb/dist/aclocal/config.ac
index cd288425946..cd288425946 100644
--- a/bdb/dist/aclocal/config.ac
+++ b/storage/bdb/dist/aclocal/config.ac
diff --git a/bdb/dist/aclocal/cxx.ac b/storage/bdb/dist/aclocal/cxx.ac
index 49103cc661a..49103cc661a 100644
--- a/bdb/dist/aclocal/cxx.ac
+++ b/storage/bdb/dist/aclocal/cxx.ac
diff --git a/bdb/dist/aclocal/gcc.ac b/storage/bdb/dist/aclocal/gcc.ac
index 0949d982f17..0949d982f17 100644
--- a/bdb/dist/aclocal/gcc.ac
+++ b/storage/bdb/dist/aclocal/gcc.ac
diff --git a/bdb/dist/aclocal/libtool.ac b/storage/bdb/dist/aclocal/libtool.ac
index e99faf15e4e..e99faf15e4e 100644
--- a/bdb/dist/aclocal/libtool.ac
+++ b/storage/bdb/dist/aclocal/libtool.ac
diff --git a/bdb/dist/aclocal/mutex.ac b/storage/bdb/dist/aclocal/mutex.ac
index f3f5529c74f..f3f5529c74f 100644
--- a/bdb/dist/aclocal/mutex.ac
+++ b/storage/bdb/dist/aclocal/mutex.ac
diff --git a/bdb/dist/aclocal/options.ac b/storage/bdb/dist/aclocal/options.ac
index ba45c34dfe9..ba45c34dfe9 100644
--- a/bdb/dist/aclocal/options.ac
+++ b/storage/bdb/dist/aclocal/options.ac
diff --git a/bdb/dist/aclocal/programs.ac b/storage/bdb/dist/aclocal/programs.ac
index 7bfa1fa2646..7bfa1fa2646 100644
--- a/bdb/dist/aclocal/programs.ac
+++ b/storage/bdb/dist/aclocal/programs.ac
diff --git a/bdb/dist/aclocal/sosuffix.ac b/storage/bdb/dist/aclocal/sosuffix.ac
index 1197128293b..1197128293b 100644
--- a/bdb/dist/aclocal/sosuffix.ac
+++ b/storage/bdb/dist/aclocal/sosuffix.ac
diff --git a/bdb/dist/aclocal/tcl.ac b/storage/bdb/dist/aclocal/tcl.ac
index 80ed19c5a97..80ed19c5a97 100644
--- a/bdb/dist/aclocal/tcl.ac
+++ b/storage/bdb/dist/aclocal/tcl.ac
diff --git a/bdb/dist/aclocal/types.ac b/storage/bdb/dist/aclocal/types.ac
index db8aaac6884..db8aaac6884 100644
--- a/bdb/dist/aclocal/types.ac
+++ b/storage/bdb/dist/aclocal/types.ac
diff --git a/bdb/dist/aclocal_java/ac_check_class.ac b/storage/bdb/dist/aclocal_java/ac_check_class.ac
index 915198af567..915198af567 100644
--- a/bdb/dist/aclocal_java/ac_check_class.ac
+++ b/storage/bdb/dist/aclocal_java/ac_check_class.ac
diff --git a/bdb/dist/aclocal_java/ac_check_classpath.ac b/storage/bdb/dist/aclocal_java/ac_check_classpath.ac
index 4a78d0f8785..4a78d0f8785 100644
--- a/bdb/dist/aclocal_java/ac_check_classpath.ac
+++ b/storage/bdb/dist/aclocal_java/ac_check_classpath.ac
diff --git a/bdb/dist/aclocal_java/ac_check_junit.ac b/storage/bdb/dist/aclocal_java/ac_check_junit.ac
index 3b81d1dc3fc..3b81d1dc3fc 100644
--- a/bdb/dist/aclocal_java/ac_check_junit.ac
+++ b/storage/bdb/dist/aclocal_java/ac_check_junit.ac
diff --git a/bdb/dist/aclocal_java/ac_check_rqrd_class.ac b/storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac
index ab62e33c887..ab62e33c887 100644
--- a/bdb/dist/aclocal_java/ac_check_rqrd_class.ac
+++ b/storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac
diff --git a/bdb/dist/aclocal_java/ac_java_options.ac b/storage/bdb/dist/aclocal_java/ac_java_options.ac
index 567afca7fa5..567afca7fa5 100644
--- a/bdb/dist/aclocal_java/ac_java_options.ac
+++ b/storage/bdb/dist/aclocal_java/ac_java_options.ac
diff --git a/bdb/dist/aclocal_java/ac_jni_include_dirs.ac b/storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac
index 65cfbbfd13e..65cfbbfd13e 100644
--- a/bdb/dist/aclocal_java/ac_jni_include_dirs.ac
+++ b/storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac
diff --git a/bdb/dist/aclocal_java/ac_prog_jar.ac b/storage/bdb/dist/aclocal_java/ac_prog_jar.ac
index 9dfa1be6dad..9dfa1be6dad 100644
--- a/bdb/dist/aclocal_java/ac_prog_jar.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_jar.ac
diff --git a/bdb/dist/aclocal_java/ac_prog_java.ac b/storage/bdb/dist/aclocal_java/ac_prog_java.ac
index 8cb24445132..8cb24445132 100644
--- a/bdb/dist/aclocal_java/ac_prog_java.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_java.ac
diff --git a/bdb/dist/aclocal_java/ac_prog_java_works.ac b/storage/bdb/dist/aclocal_java/ac_prog_java_works.ac
index 36acd2676fa..36acd2676fa 100644
--- a/bdb/dist/aclocal_java/ac_prog_java_works.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_java_works.ac
diff --git a/bdb/dist/aclocal_java/ac_prog_javac.ac b/storage/bdb/dist/aclocal_java/ac_prog_javac.ac
index 5ded7d1b7e6..5ded7d1b7e6 100644
--- a/bdb/dist/aclocal_java/ac_prog_javac.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_javac.ac
diff --git a/bdb/dist/aclocal_java/ac_prog_javac_works.ac b/storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac
index 139a99f989b..139a99f989b 100644
--- a/bdb/dist/aclocal_java/ac_prog_javac_works.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac
diff --git a/bdb/dist/aclocal_java/ac_prog_javadoc.ac b/storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac
index 5154d3f1f3b..5154d3f1f3b 100644
--- a/bdb/dist/aclocal_java/ac_prog_javadoc.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac
diff --git a/bdb/dist/aclocal_java/ac_prog_javah.ac b/storage/bdb/dist/aclocal_java/ac_prog_javah.ac
index 1b16d9e24e5..1b16d9e24e5 100644
--- a/bdb/dist/aclocal_java/ac_prog_javah.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_javah.ac
diff --git a/bdb/dist/aclocal_java/ac_try_compile_java.ac b/storage/bdb/dist/aclocal_java/ac_try_compile_java.ac
index 775569ba054..775569ba054 100644
--- a/bdb/dist/aclocal_java/ac_try_compile_java.ac
+++ b/storage/bdb/dist/aclocal_java/ac_try_compile_java.ac
diff --git a/bdb/dist/aclocal_java/ac_try_run_javac.ac b/storage/bdb/dist/aclocal_java/ac_try_run_javac.ac
index cf91306aff6..cf91306aff6 100644
--- a/bdb/dist/aclocal_java/ac_try_run_javac.ac
+++ b/storage/bdb/dist/aclocal_java/ac_try_run_javac.ac
diff --git a/bdb/dist/buildrel b/storage/bdb/dist/buildrel
index b796169c719..b796169c719 100644
--- a/bdb/dist/buildrel
+++ b/storage/bdb/dist/buildrel
diff --git a/bdb/dist/config.guess b/storage/bdb/dist/config.guess
index fd30ab0314c..fd30ab0314c 100755
--- a/bdb/dist/config.guess
+++ b/storage/bdb/dist/config.guess
diff --git a/bdb/dist/config.sub b/storage/bdb/dist/config.sub
index 9ff085efaf7..9ff085efaf7 100755
--- a/bdb/dist/config.sub
+++ b/storage/bdb/dist/config.sub
diff --git a/bdb/dist/configure.ac b/storage/bdb/dist/configure.ac
index 0bf53972f54..0bf53972f54 100644
--- a/bdb/dist/configure.ac
+++ b/storage/bdb/dist/configure.ac
diff --git a/bdb/dist/db.ecd.in b/storage/bdb/dist/db.ecd.in
index 92a6a090716..92a6a090716 100644
--- a/bdb/dist/db.ecd.in
+++ b/storage/bdb/dist/db.ecd.in
diff --git a/bdb/dist/db.spec.in b/storage/bdb/dist/db.spec.in
index ef253bcfcf4..ef253bcfcf4 100644
--- a/bdb/dist/db.spec.in
+++ b/storage/bdb/dist/db.spec.in
diff --git a/bdb/dist/gen_inc.awk b/storage/bdb/dist/gen_inc.awk
index 2f5b491cda1..2f5b491cda1 100644
--- a/bdb/dist/gen_inc.awk
+++ b/storage/bdb/dist/gen_inc.awk
diff --git a/bdb/dist/gen_rec.awk b/storage/bdb/dist/gen_rec.awk
index 75f2e86ca9e..75f2e86ca9e 100644
--- a/bdb/dist/gen_rec.awk
+++ b/storage/bdb/dist/gen_rec.awk
diff --git a/bdb/dist/gen_rpc.awk b/storage/bdb/dist/gen_rpc.awk
index 03975d7321b..03975d7321b 100644
--- a/bdb/dist/gen_rpc.awk
+++ b/storage/bdb/dist/gen_rpc.awk
diff --git a/bdb/dist/install-sh b/storage/bdb/dist/install-sh
index b41a2459161..b41a2459161 100755
--- a/bdb/dist/install-sh
+++ b/storage/bdb/dist/install-sh
diff --git a/bdb/dist/ltmain.sh b/storage/bdb/dist/ltmain.sh
index f07d424527d..f07d424527d 100644
--- a/bdb/dist/ltmain.sh
+++ b/storage/bdb/dist/ltmain.sh
diff --git a/bdb/dist/pubdef.in b/storage/bdb/dist/pubdef.in
index f42363022cd..f42363022cd 100644
--- a/bdb/dist/pubdef.in
+++ b/storage/bdb/dist/pubdef.in
diff --git a/bdb/dist/s_all b/storage/bdb/dist/s_all
index 132017def3c..132017def3c 100644
--- a/bdb/dist/s_all
+++ b/storage/bdb/dist/s_all
diff --git a/bdb/dist/s_config b/storage/bdb/dist/s_config
index 3e033da81ab..3e033da81ab 100755
--- a/bdb/dist/s_config
+++ b/storage/bdb/dist/s_config
diff --git a/bdb/dist/s_crypto b/storage/bdb/dist/s_crypto
index f7947cb3e10..f7947cb3e10 100644
--- a/bdb/dist/s_crypto
+++ b/storage/bdb/dist/s_crypto
diff --git a/bdb/dist/s_dir b/storage/bdb/dist/s_dir
index 58513a8321d..58513a8321d 100644
--- a/bdb/dist/s_dir
+++ b/storage/bdb/dist/s_dir
diff --git a/bdb/dist/s_include b/storage/bdb/dist/s_include
index 878b4a38af1..878b4a38af1 100755
--- a/bdb/dist/s_include
+++ b/storage/bdb/dist/s_include
diff --git a/bdb/dist/s_java b/storage/bdb/dist/s_java
index f7c96e823a1..f7c96e823a1 100755
--- a/bdb/dist/s_java
+++ b/storage/bdb/dist/s_java
diff --git a/bdb/dist/s_javah b/storage/bdb/dist/s_javah
index 67c41d09c4d..67c41d09c4d 100755
--- a/bdb/dist/s_javah
+++ b/storage/bdb/dist/s_javah
diff --git a/bdb/dist/s_perm b/storage/bdb/dist/s_perm
index c35278b8c83..c35278b8c83 100755
--- a/bdb/dist/s_perm
+++ b/storage/bdb/dist/s_perm
diff --git a/bdb/dist/s_readme b/storage/bdb/dist/s_readme
index 1da9f9681c0..1da9f9681c0 100755
--- a/bdb/dist/s_readme
+++ b/storage/bdb/dist/s_readme
diff --git a/bdb/dist/s_recover b/storage/bdb/dist/s_recover
index fc2e160c083..fc2e160c083 100755
--- a/bdb/dist/s_recover
+++ b/storage/bdb/dist/s_recover
diff --git a/bdb/dist/s_rpc b/storage/bdb/dist/s_rpc
index cdafa669d85..cdafa669d85 100644
--- a/bdb/dist/s_rpc
+++ b/storage/bdb/dist/s_rpc
diff --git a/bdb/dist/s_symlink b/storage/bdb/dist/s_symlink
index 8da49ca0c75..8da49ca0c75 100755
--- a/bdb/dist/s_symlink
+++ b/storage/bdb/dist/s_symlink
diff --git a/bdb/dist/s_tags b/storage/bdb/dist/s_tags
index 18b6025aa86..18b6025aa86 100755
--- a/bdb/dist/s_tags
+++ b/storage/bdb/dist/s_tags
diff --git a/bdb/dist/s_test b/storage/bdb/dist/s_test
index 16f3b9712d0..16f3b9712d0 100755
--- a/bdb/dist/s_test
+++ b/storage/bdb/dist/s_test
diff --git a/bdb/dist/s_vxworks b/storage/bdb/dist/s_vxworks
index 05c2599d02c..05c2599d02c 100644
--- a/bdb/dist/s_vxworks
+++ b/storage/bdb/dist/s_vxworks
diff --git a/bdb/dist/s_win32 b/storage/bdb/dist/s_win32
index 207978b82bb..207978b82bb 100755
--- a/bdb/dist/s_win32
+++ b/storage/bdb/dist/s_win32
diff --git a/bdb/dist/s_win32_dsp b/storage/bdb/dist/s_win32_dsp
index af5551ec248..af5551ec248 100644
--- a/bdb/dist/s_win32_dsp
+++ b/storage/bdb/dist/s_win32_dsp
diff --git a/bdb/dist/srcfiles.in b/storage/bdb/dist/srcfiles.in
index 54aeea0c1bc..54aeea0c1bc 100644
--- a/bdb/dist/srcfiles.in
+++ b/storage/bdb/dist/srcfiles.in
diff --git a/bdb/dist/template/rec_ctemp b/storage/bdb/dist/template/rec_ctemp
index 2951189c5bd..2951189c5bd 100644
--- a/bdb/dist/template/rec_ctemp
+++ b/storage/bdb/dist/template/rec_ctemp
diff --git a/bdb/dist/vx_2.0/BerkeleyDB.wpj b/storage/bdb/dist/vx_2.0/BerkeleyDB.wpj
index 78684d90067..78684d90067 100644
--- a/bdb/dist/vx_2.0/BerkeleyDB.wpj
+++ b/storage/bdb/dist/vx_2.0/BerkeleyDB.wpj
diff --git a/bdb/dist/vx_2.0/wpj.in b/storage/bdb/dist/vx_2.0/wpj.in
index 2b942bb562c..2b942bb562c 100644
--- a/bdb/dist/vx_2.0/wpj.in
+++ b/storage/bdb/dist/vx_2.0/wpj.in
diff --git a/bdb/dist/vx_3.1/Makefile.custom b/storage/bdb/dist/vx_3.1/Makefile.custom
index ca781f7b251..ca781f7b251 100644
--- a/bdb/dist/vx_3.1/Makefile.custom
+++ b/storage/bdb/dist/vx_3.1/Makefile.custom
diff --git a/bdb/dist/vx_3.1/cdf.1 b/storage/bdb/dist/vx_3.1/cdf.1
index 17db06f7e61..17db06f7e61 100644
--- a/bdb/dist/vx_3.1/cdf.1
+++ b/storage/bdb/dist/vx_3.1/cdf.1
diff --git a/bdb/dist/vx_3.1/cdf.2 b/storage/bdb/dist/vx_3.1/cdf.2
index 76f123af9fb..76f123af9fb 100644
--- a/bdb/dist/vx_3.1/cdf.2
+++ b/storage/bdb/dist/vx_3.1/cdf.2
diff --git a/bdb/dist/vx_3.1/cdf.3 b/storage/bdb/dist/vx_3.1/cdf.3
index a3146ced95a..a3146ced95a 100644
--- a/bdb/dist/vx_3.1/cdf.3
+++ b/storage/bdb/dist/vx_3.1/cdf.3
diff --git a/bdb/dist/vx_3.1/component.cdf b/storage/bdb/dist/vx_3.1/component.cdf
index 91edaa87853..91edaa87853 100644
--- a/bdb/dist/vx_3.1/component.cdf
+++ b/storage/bdb/dist/vx_3.1/component.cdf
diff --git a/bdb/dist/vx_3.1/component.wpj b/storage/bdb/dist/vx_3.1/component.wpj
index 01c51c1b97f..01c51c1b97f 100644
--- a/bdb/dist/vx_3.1/component.wpj
+++ b/storage/bdb/dist/vx_3.1/component.wpj
diff --git a/bdb/dist/vx_3.1/wpj.1 b/storage/bdb/dist/vx_3.1/wpj.1
index 414b4e8fa35..414b4e8fa35 100644
--- a/bdb/dist/vx_3.1/wpj.1
+++ b/storage/bdb/dist/vx_3.1/wpj.1
diff --git a/bdb/dist/vx_3.1/wpj.2 b/storage/bdb/dist/vx_3.1/wpj.2
index 0294f763ef7..0294f763ef7 100644
--- a/bdb/dist/vx_3.1/wpj.2
+++ b/storage/bdb/dist/vx_3.1/wpj.2
diff --git a/bdb/dist/vx_3.1/wpj.3 b/storage/bdb/dist/vx_3.1/wpj.3
index f06e6253923..f06e6253923 100644
--- a/bdb/dist/vx_3.1/wpj.3
+++ b/storage/bdb/dist/vx_3.1/wpj.3
diff --git a/bdb/dist/vx_3.1/wpj.4 b/storage/bdb/dist/vx_3.1/wpj.4
index 84de6ebf359..84de6ebf359 100644
--- a/bdb/dist/vx_3.1/wpj.4
+++ b/storage/bdb/dist/vx_3.1/wpj.4
diff --git a/bdb/dist/vx_3.1/wpj.5 b/storage/bdb/dist/vx_3.1/wpj.5
index f4056e7e22a..f4056e7e22a 100644
--- a/bdb/dist/vx_3.1/wpj.5
+++ b/storage/bdb/dist/vx_3.1/wpj.5
diff --git a/bdb/dist/vx_buildcd b/storage/bdb/dist/vx_buildcd
index a94d78db974..a94d78db974 100755
--- a/bdb/dist/vx_buildcd
+++ b/storage/bdb/dist/vx_buildcd
diff --git a/bdb/dist/vx_config.in b/storage/bdb/dist/vx_config.in
index 43fc8eb71f3..43fc8eb71f3 100644
--- a/bdb/dist/vx_config.in
+++ b/storage/bdb/dist/vx_config.in
diff --git a/bdb/dist/vx_setup/CONFIG.in b/storage/bdb/dist/vx_setup/CONFIG.in
index 6ccceee7034..6ccceee7034 100644
--- a/bdb/dist/vx_setup/CONFIG.in
+++ b/storage/bdb/dist/vx_setup/CONFIG.in
diff --git a/bdb/dist/vx_setup/LICENSE.TXT b/storage/bdb/dist/vx_setup/LICENSE.TXT
index 7814c679cd7..7814c679cd7 100644
--- a/bdb/dist/vx_setup/LICENSE.TXT
+++ b/storage/bdb/dist/vx_setup/LICENSE.TXT
diff --git a/bdb/dist/vx_setup/MESSAGES.TCL b/storage/bdb/dist/vx_setup/MESSAGES.TCL
index 718a67fbc50..718a67fbc50 100644
--- a/bdb/dist/vx_setup/MESSAGES.TCL
+++ b/storage/bdb/dist/vx_setup/MESSAGES.TCL
diff --git a/bdb/dist/vx_setup/README.in b/storage/bdb/dist/vx_setup/README.in
index f96948c37ba..f96948c37ba 100644
--- a/bdb/dist/vx_setup/README.in
+++ b/storage/bdb/dist/vx_setup/README.in
diff --git a/bdb/dist/vx_setup/SETUP.BMP b/storage/bdb/dist/vx_setup/SETUP.BMP
index 2918480b8c2..2918480b8c2 100644
--- a/bdb/dist/vx_setup/SETUP.BMP
+++ b/storage/bdb/dist/vx_setup/SETUP.BMP
Binary files differ
diff --git a/bdb/dist/vx_setup/vx_allfile.in b/storage/bdb/dist/vx_setup/vx_allfile.in
index 61a1b8ee805..61a1b8ee805 100644
--- a/bdb/dist/vx_setup/vx_allfile.in
+++ b/storage/bdb/dist/vx_setup/vx_allfile.in
diff --git a/bdb/dist/vx_setup/vx_demofile.in b/storage/bdb/dist/vx_setup/vx_demofile.in
index 42a698ea367..42a698ea367 100644
--- a/bdb/dist/vx_setup/vx_demofile.in
+++ b/storage/bdb/dist/vx_setup/vx_demofile.in
diff --git a/bdb/dist/vx_setup/vx_setup.in b/storage/bdb/dist/vx_setup/vx_setup.in
index 7bc3f510cfa..7bc3f510cfa 100644
--- a/bdb/dist/vx_setup/vx_setup.in
+++ b/storage/bdb/dist/vx_setup/vx_setup.in
diff --git a/bdb/dist/win_config.in b/storage/bdb/dist/win_config.in
index 09acab28806..09acab28806 100644
--- a/bdb/dist/win_config.in
+++ b/storage/bdb/dist/win_config.in
diff --git a/bdb/dist/win_exports.in b/storage/bdb/dist/win_exports.in
index 52df529d028..52df529d028 100644
--- a/bdb/dist/win_exports.in
+++ b/storage/bdb/dist/win_exports.in
diff --git a/bdb/env/db_salloc.c b/storage/bdb/env/db_salloc.c
index 1ef768d4114..1ef768d4114 100644
--- a/bdb/env/db_salloc.c
+++ b/storage/bdb/env/db_salloc.c
diff --git a/bdb/env/db_shash.c b/storage/bdb/env/db_shash.c
index 743a126307d..743a126307d 100644
--- a/bdb/env/db_shash.c
+++ b/storage/bdb/env/db_shash.c
diff --git a/bdb/env/env_file.c b/storage/bdb/env/env_file.c
index f221fd8d701..f221fd8d701 100644
--- a/bdb/env/env_file.c
+++ b/storage/bdb/env/env_file.c
diff --git a/bdb/env/env_method.c b/storage/bdb/env/env_method.c
index b51237ec44a..b51237ec44a 100644
--- a/bdb/env/env_method.c
+++ b/storage/bdb/env/env_method.c
diff --git a/bdb/env/env_method.c.b b/storage/bdb/env/env_method.c.b
index b6802b8a77c..b6802b8a77c 100644
--- a/bdb/env/env_method.c.b
+++ b/storage/bdb/env/env_method.c.b
diff --git a/bdb/env/env_open.c b/storage/bdb/env/env_open.c
index ae8399f61cd..ae8399f61cd 100644
--- a/bdb/env/env_open.c
+++ b/storage/bdb/env/env_open.c
diff --git a/bdb/env/env_recover.c b/storage/bdb/env/env_recover.c
index fbe3b345b0d..fbe3b345b0d 100644
--- a/bdb/env/env_recover.c
+++ b/storage/bdb/env/env_recover.c
diff --git a/bdb/env/env_region.c b/storage/bdb/env/env_region.c
index a919cf328b4..a919cf328b4 100644
--- a/bdb/env/env_region.c
+++ b/storage/bdb/env/env_region.c
diff --git a/bdb/fileops/fileops.src b/storage/bdb/fileops/fileops.src
index 1fd39dc3c45..1fd39dc3c45 100644
--- a/bdb/fileops/fileops.src
+++ b/storage/bdb/fileops/fileops.src
diff --git a/bdb/fileops/fop_basic.c b/storage/bdb/fileops/fop_basic.c
index 08160ab2e1a..08160ab2e1a 100644
--- a/bdb/fileops/fop_basic.c
+++ b/storage/bdb/fileops/fop_basic.c
diff --git a/bdb/fileops/fop_rec.c b/storage/bdb/fileops/fop_rec.c
index 67720e01d13..67720e01d13 100644
--- a/bdb/fileops/fop_rec.c
+++ b/storage/bdb/fileops/fop_rec.c
diff --git a/bdb/fileops/fop_util.c b/storage/bdb/fileops/fop_util.c
index ea6d86ab08d..ea6d86ab08d 100644
--- a/bdb/fileops/fop_util.c
+++ b/storage/bdb/fileops/fop_util.c
diff --git a/bdb/hash/hash.c b/storage/bdb/hash/hash.c
index 2f972a3238d..2f972a3238d 100644
--- a/bdb/hash/hash.c
+++ b/storage/bdb/hash/hash.c
diff --git a/bdb/hash/hash.src b/storage/bdb/hash/hash.src
index b4b633c56e6..b4b633c56e6 100644
--- a/bdb/hash/hash.src
+++ b/storage/bdb/hash/hash.src
diff --git a/bdb/hash/hash_conv.c b/storage/bdb/hash/hash_conv.c
index a93e56a2ee4..a93e56a2ee4 100644
--- a/bdb/hash/hash_conv.c
+++ b/storage/bdb/hash/hash_conv.c
diff --git a/bdb/hash/hash_dup.c b/storage/bdb/hash/hash_dup.c
index ec70e519d54..ec70e519d54 100644
--- a/bdb/hash/hash_dup.c
+++ b/storage/bdb/hash/hash_dup.c
diff --git a/bdb/hash/hash_func.c b/storage/bdb/hash/hash_func.c
index c6cc2ad4460..c6cc2ad4460 100644
--- a/bdb/hash/hash_func.c
+++ b/storage/bdb/hash/hash_func.c
diff --git a/bdb/hash/hash_meta.c b/storage/bdb/hash/hash_meta.c
index 9f224454869..9f224454869 100644
--- a/bdb/hash/hash_meta.c
+++ b/storage/bdb/hash/hash_meta.c
diff --git a/bdb/hash/hash_method.c b/storage/bdb/hash/hash_method.c
index 9a6bf59536a..9a6bf59536a 100644
--- a/bdb/hash/hash_method.c
+++ b/storage/bdb/hash/hash_method.c
diff --git a/bdb/hash/hash_open.c b/storage/bdb/hash/hash_open.c
index f976f5b6816..f976f5b6816 100644
--- a/bdb/hash/hash_open.c
+++ b/storage/bdb/hash/hash_open.c
diff --git a/bdb/hash/hash_page.c b/storage/bdb/hash/hash_page.c
index 6788129773f..6788129773f 100644
--- a/bdb/hash/hash_page.c
+++ b/storage/bdb/hash/hash_page.c
diff --git a/bdb/hash/hash_rec.c b/storage/bdb/hash/hash_rec.c
index 24d3473c508..24d3473c508 100644
--- a/bdb/hash/hash_rec.c
+++ b/storage/bdb/hash/hash_rec.c
diff --git a/bdb/hash/hash_reclaim.c b/storage/bdb/hash/hash_reclaim.c
index ac90ffff08a..ac90ffff08a 100644
--- a/bdb/hash/hash_reclaim.c
+++ b/storage/bdb/hash/hash_reclaim.c
diff --git a/bdb/hash/hash_stat.c b/storage/bdb/hash/hash_stat.c
index f9ee1d099cb..f9ee1d099cb 100644
--- a/bdb/hash/hash_stat.c
+++ b/storage/bdb/hash/hash_stat.c
diff --git a/bdb/hash/hash_upgrade.c b/storage/bdb/hash/hash_upgrade.c
index 2dd21d7b644..2dd21d7b644 100644
--- a/bdb/hash/hash_upgrade.c
+++ b/storage/bdb/hash/hash_upgrade.c
diff --git a/bdb/hash/hash_verify.c b/storage/bdb/hash/hash_verify.c
index e6f5a2b0d65..e6f5a2b0d65 100644
--- a/bdb/hash/hash_verify.c
+++ b/storage/bdb/hash/hash_verify.c
diff --git a/bdb/hmac/hmac.c b/storage/bdb/hmac/hmac.c
index d39a154ec63..d39a154ec63 100644
--- a/bdb/hmac/hmac.c
+++ b/storage/bdb/hmac/hmac.c
diff --git a/bdb/hmac/sha1.c b/storage/bdb/hmac/sha1.c
index 2f2c806a21f..2f2c806a21f 100644
--- a/bdb/hmac/sha1.c
+++ b/storage/bdb/hmac/sha1.c
diff --git a/bdb/hsearch/hsearch.c b/storage/bdb/hsearch/hsearch.c
index 9760aeeb9e8..9760aeeb9e8 100644
--- a/bdb/hsearch/hsearch.c
+++ b/storage/bdb/hsearch/hsearch.c
diff --git a/bdb/libdb_java/checkapi.prl b/storage/bdb/libdb_java/checkapi.prl
index a27b8ffd107..a27b8ffd107 100644
--- a/bdb/libdb_java/checkapi.prl
+++ b/storage/bdb/libdb_java/checkapi.prl
diff --git a/bdb/libdb_java/com_sleepycat_db_Db.h b/storage/bdb/libdb_java/com_sleepycat_db_Db.h
index 0787ae87aed..0787ae87aed 100644
--- a/bdb/libdb_java/com_sleepycat_db_Db.h
+++ b/storage/bdb/libdb_java/com_sleepycat_db_Db.h
diff --git a/bdb/libdb_java/com_sleepycat_db_DbEnv.h b/storage/bdb/libdb_java/com_sleepycat_db_DbEnv.h
index f239dfc7593..f239dfc7593 100644
--- a/bdb/libdb_java/com_sleepycat_db_DbEnv.h
+++ b/storage/bdb/libdb_java/com_sleepycat_db_DbEnv.h
diff --git a/bdb/libdb_java/com_sleepycat_db_DbLock.h b/storage/bdb/libdb_java/com_sleepycat_db_DbLock.h
index 9f3d77d44bc..9f3d77d44bc 100644
--- a/bdb/libdb_java/com_sleepycat_db_DbLock.h
+++ b/storage/bdb/libdb_java/com_sleepycat_db_DbLock.h
diff --git a/bdb/libdb_java/com_sleepycat_db_DbLogc.h b/storage/bdb/libdb_java/com_sleepycat_db_DbLogc.h
index 8d029c761ba..8d029c761ba 100644
--- a/bdb/libdb_java/com_sleepycat_db_DbLogc.h
+++ b/storage/bdb/libdb_java/com_sleepycat_db_DbLogc.h
diff --git a/bdb/libdb_java/com_sleepycat_db_DbLsn.h b/storage/bdb/libdb_java/com_sleepycat_db_DbLsn.h
index 080fa0a8758..080fa0a8758 100644
--- a/bdb/libdb_java/com_sleepycat_db_DbLsn.h
+++ b/storage/bdb/libdb_java/com_sleepycat_db_DbLsn.h
diff --git a/bdb/libdb_java/com_sleepycat_db_DbTxn.h b/storage/bdb/libdb_java/com_sleepycat_db_DbTxn.h
index 59641c041a4..59641c041a4 100644
--- a/bdb/libdb_java/com_sleepycat_db_DbTxn.h
+++ b/storage/bdb/libdb_java/com_sleepycat_db_DbTxn.h
diff --git a/bdb/libdb_java/com_sleepycat_db_DbUtil.h b/storage/bdb/libdb_java/com_sleepycat_db_DbUtil.h
index 7f8495590c0..7f8495590c0 100644
--- a/bdb/libdb_java/com_sleepycat_db_DbUtil.h
+++ b/storage/bdb/libdb_java/com_sleepycat_db_DbUtil.h
diff --git a/bdb/libdb_java/com_sleepycat_db_Dbc.h b/storage/bdb/libdb_java/com_sleepycat_db_Dbc.h
index 447ab234844..447ab234844 100644
--- a/bdb/libdb_java/com_sleepycat_db_Dbc.h
+++ b/storage/bdb/libdb_java/com_sleepycat_db_Dbc.h
diff --git a/bdb/libdb_java/com_sleepycat_db_Dbt.h b/storage/bdb/libdb_java/com_sleepycat_db_Dbt.h
index c09bd8e6131..c09bd8e6131 100644
--- a/bdb/libdb_java/com_sleepycat_db_Dbt.h
+++ b/storage/bdb/libdb_java/com_sleepycat_db_Dbt.h
diff --git a/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h b/storage/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h
index 00e9e2e6893..00e9e2e6893 100644
--- a/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h
+++ b/storage/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h
diff --git a/bdb/libdb_java/java_Db.c b/storage/bdb/libdb_java/java_Db.c
index 465c40f7d5a..465c40f7d5a 100644
--- a/bdb/libdb_java/java_Db.c
+++ b/storage/bdb/libdb_java/java_Db.c
diff --git a/bdb/libdb_java/java_DbEnv.c b/storage/bdb/libdb_java/java_DbEnv.c
index 651c38a0e3d..651c38a0e3d 100644
--- a/bdb/libdb_java/java_DbEnv.c
+++ b/storage/bdb/libdb_java/java_DbEnv.c
diff --git a/bdb/libdb_java/java_DbLock.c b/storage/bdb/libdb_java/java_DbLock.c
index 00a9836bfa0..00a9836bfa0 100644
--- a/bdb/libdb_java/java_DbLock.c
+++ b/storage/bdb/libdb_java/java_DbLock.c
diff --git a/bdb/libdb_java/java_DbLogc.c b/storage/bdb/libdb_java/java_DbLogc.c
index 69294d9baac..69294d9baac 100644
--- a/bdb/libdb_java/java_DbLogc.c
+++ b/storage/bdb/libdb_java/java_DbLogc.c
diff --git a/bdb/libdb_java/java_DbLsn.c b/storage/bdb/libdb_java/java_DbLsn.c
index d53082826f4..d53082826f4 100644
--- a/bdb/libdb_java/java_DbLsn.c
+++ b/storage/bdb/libdb_java/java_DbLsn.c
diff --git a/bdb/libdb_java/java_DbTxn.c b/storage/bdb/libdb_java/java_DbTxn.c
index 51195501b77..51195501b77 100644
--- a/bdb/libdb_java/java_DbTxn.c
+++ b/storage/bdb/libdb_java/java_DbTxn.c
diff --git a/bdb/libdb_java/java_DbUtil.c b/storage/bdb/libdb_java/java_DbUtil.c
index edcbc6d9f15..edcbc6d9f15 100644
--- a/bdb/libdb_java/java_DbUtil.c
+++ b/storage/bdb/libdb_java/java_DbUtil.c
diff --git a/bdb/libdb_java/java_DbXAResource.c b/storage/bdb/libdb_java/java_DbXAResource.c
index 609529bfe83..609529bfe83 100644
--- a/bdb/libdb_java/java_DbXAResource.c
+++ b/storage/bdb/libdb_java/java_DbXAResource.c
diff --git a/bdb/libdb_java/java_Dbc.c b/storage/bdb/libdb_java/java_Dbc.c
index 63ab368fc03..63ab368fc03 100644
--- a/bdb/libdb_java/java_Dbc.c
+++ b/storage/bdb/libdb_java/java_Dbc.c
diff --git a/bdb/libdb_java/java_Dbt.c b/storage/bdb/libdb_java/java_Dbt.c
index d21109f3408..d21109f3408 100644
--- a/bdb/libdb_java/java_Dbt.c
+++ b/storage/bdb/libdb_java/java_Dbt.c
diff --git a/bdb/libdb_java/java_info.c b/storage/bdb/libdb_java/java_info.c
index 22fcbd23d46..22fcbd23d46 100644
--- a/bdb/libdb_java/java_info.c
+++ b/storage/bdb/libdb_java/java_info.c
diff --git a/bdb/libdb_java/java_info.h b/storage/bdb/libdb_java/java_info.h
index bda83db420e..bda83db420e 100644
--- a/bdb/libdb_java/java_info.h
+++ b/storage/bdb/libdb_java/java_info.h
diff --git a/bdb/libdb_java/java_locked.c b/storage/bdb/libdb_java/java_locked.c
index 9534a387b40..9534a387b40 100644
--- a/bdb/libdb_java/java_locked.c
+++ b/storage/bdb/libdb_java/java_locked.c
diff --git a/bdb/libdb_java/java_locked.h b/storage/bdb/libdb_java/java_locked.h
index a79d929abee..a79d929abee 100644
--- a/bdb/libdb_java/java_locked.h
+++ b/storage/bdb/libdb_java/java_locked.h
diff --git a/bdb/libdb_java/java_util.c b/storage/bdb/libdb_java/java_util.c
index 5a538ee0785..5a538ee0785 100644
--- a/bdb/libdb_java/java_util.c
+++ b/storage/bdb/libdb_java/java_util.c
diff --git a/bdb/libdb_java/java_util.h b/storage/bdb/libdb_java/java_util.h
index 08187f6b51f..08187f6b51f 100644
--- a/bdb/libdb_java/java_util.h
+++ b/storage/bdb/libdb_java/java_util.h
diff --git a/bdb/lock/Design b/storage/bdb/lock/Design
index f0bb5c6e99c..f0bb5c6e99c 100644
--- a/bdb/lock/Design
+++ b/storage/bdb/lock/Design
diff --git a/bdb/lock/lock.c b/storage/bdb/lock/lock.c
index 8eda155b822..8eda155b822 100644
--- a/bdb/lock/lock.c
+++ b/storage/bdb/lock/lock.c
diff --git a/bdb/lock/lock_deadlock.c b/storage/bdb/lock/lock_deadlock.c
index d1461b89a4f..d1461b89a4f 100644
--- a/bdb/lock/lock_deadlock.c
+++ b/storage/bdb/lock/lock_deadlock.c
diff --git a/bdb/lock/lock_method.c b/storage/bdb/lock/lock_method.c
index 72703e253bc..72703e253bc 100644
--- a/bdb/lock/lock_method.c
+++ b/storage/bdb/lock/lock_method.c
diff --git a/bdb/lock/lock_region.c b/storage/bdb/lock/lock_region.c
index 6df6937e873..6df6937e873 100644
--- a/bdb/lock/lock_region.c
+++ b/storage/bdb/lock/lock_region.c
diff --git a/bdb/lock/lock_stat.c b/storage/bdb/lock/lock_stat.c
index 0bef3e18021..0bef3e18021 100644
--- a/bdb/lock/lock_stat.c
+++ b/storage/bdb/lock/lock_stat.c
diff --git a/bdb/lock/lock_util.c b/storage/bdb/lock/lock_util.c
index 260f021b1ee..260f021b1ee 100644
--- a/bdb/lock/lock_util.c
+++ b/storage/bdb/lock/lock_util.c
diff --git a/bdb/log/log.c b/storage/bdb/log/log.c
index f57caeccb95..f57caeccb95 100644
--- a/bdb/log/log.c
+++ b/storage/bdb/log/log.c
diff --git a/bdb/log/log_archive.c b/storage/bdb/log/log_archive.c
index 19e1af5a93e..19e1af5a93e 100644
--- a/bdb/log/log_archive.c
+++ b/storage/bdb/log/log_archive.c
diff --git a/bdb/log/log_compare.c b/storage/bdb/log/log_compare.c
index 115f9c21b76..115f9c21b76 100644
--- a/bdb/log/log_compare.c
+++ b/storage/bdb/log/log_compare.c
diff --git a/bdb/log/log_get.c b/storage/bdb/log/log_get.c
index c8b028da0fb..c8b028da0fb 100644
--- a/bdb/log/log_get.c
+++ b/storage/bdb/log/log_get.c
diff --git a/bdb/log/log_method.c b/storage/bdb/log/log_method.c
index 42adaf11c6c..42adaf11c6c 100644
--- a/bdb/log/log_method.c
+++ b/storage/bdb/log/log_method.c
diff --git a/bdb/log/log_put.c b/storage/bdb/log/log_put.c
index 64276fa8315..64276fa8315 100644
--- a/bdb/log/log_put.c
+++ b/storage/bdb/log/log_put.c
diff --git a/bdb/mp/mp_alloc.c b/storage/bdb/mp/mp_alloc.c
index 96dd612d7ba..96dd612d7ba 100644
--- a/bdb/mp/mp_alloc.c
+++ b/storage/bdb/mp/mp_alloc.c
diff --git a/bdb/mp/mp_bh.c b/storage/bdb/mp/mp_bh.c
index 85d15218abf..85d15218abf 100644
--- a/bdb/mp/mp_bh.c
+++ b/storage/bdb/mp/mp_bh.c
diff --git a/bdb/mp/mp_fget.c b/storage/bdb/mp/mp_fget.c
index be0785a2184..be0785a2184 100644
--- a/bdb/mp/mp_fget.c
+++ b/storage/bdb/mp/mp_fget.c
diff --git a/bdb/mp/mp_fopen.c b/storage/bdb/mp/mp_fopen.c
index 8fdefb0f5e9..8fdefb0f5e9 100644
--- a/bdb/mp/mp_fopen.c
+++ b/storage/bdb/mp/mp_fopen.c
diff --git a/bdb/mp/mp_fput.c b/storage/bdb/mp/mp_fput.c
index 271e44a4ef8..271e44a4ef8 100644
--- a/bdb/mp/mp_fput.c
+++ b/storage/bdb/mp/mp_fput.c
diff --git a/bdb/mp/mp_fset.c b/storage/bdb/mp/mp_fset.c
index 65cd6286ac9..65cd6286ac9 100644
--- a/bdb/mp/mp_fset.c
+++ b/storage/bdb/mp/mp_fset.c
diff --git a/bdb/mp/mp_method.c b/storage/bdb/mp/mp_method.c
index 38f0a645f16..38f0a645f16 100644
--- a/bdb/mp/mp_method.c
+++ b/storage/bdb/mp/mp_method.c
diff --git a/bdb/mp/mp_region.c b/storage/bdb/mp/mp_region.c
index 06eca2f8646..06eca2f8646 100644
--- a/bdb/mp/mp_region.c
+++ b/storage/bdb/mp/mp_region.c
diff --git a/bdb/mp/mp_register.c b/storage/bdb/mp/mp_register.c
index 46eefad986f..46eefad986f 100644
--- a/bdb/mp/mp_register.c
+++ b/storage/bdb/mp/mp_register.c
diff --git a/bdb/mp/mp_stat.c b/storage/bdb/mp/mp_stat.c
index 12e72b91d70..12e72b91d70 100644
--- a/bdb/mp/mp_stat.c
+++ b/storage/bdb/mp/mp_stat.c
diff --git a/bdb/mp/mp_sync.c b/storage/bdb/mp/mp_sync.c
index 03b42208b39..03b42208b39 100644
--- a/bdb/mp/mp_sync.c
+++ b/storage/bdb/mp/mp_sync.c
diff --git a/bdb/mp/mp_trickle.c b/storage/bdb/mp/mp_trickle.c
index 71077ab60cc..71077ab60cc 100644
--- a/bdb/mp/mp_trickle.c
+++ b/storage/bdb/mp/mp_trickle.c
diff --git a/bdb/mutex/README b/storage/bdb/mutex/README
index 323c34f1e74..323c34f1e74 100644
--- a/bdb/mutex/README
+++ b/storage/bdb/mutex/README
diff --git a/bdb/mutex/mut_fcntl.c b/storage/bdb/mutex/mut_fcntl.c
index 2fdf9eff7ef..2fdf9eff7ef 100644
--- a/bdb/mutex/mut_fcntl.c
+++ b/storage/bdb/mutex/mut_fcntl.c
diff --git a/bdb/mutex/mut_pthread.c b/storage/bdb/mutex/mut_pthread.c
index 4a55ce0ca03..4a55ce0ca03 100644
--- a/bdb/mutex/mut_pthread.c
+++ b/storage/bdb/mutex/mut_pthread.c
diff --git a/bdb/mutex/mut_tas.c b/storage/bdb/mutex/mut_tas.c
index c24e09473ca..c24e09473ca 100644
--- a/bdb/mutex/mut_tas.c
+++ b/storage/bdb/mutex/mut_tas.c
diff --git a/bdb/mutex/mut_win32.c b/storage/bdb/mutex/mut_win32.c
index 49eb20a6ecf..49eb20a6ecf 100644
--- a/bdb/mutex/mut_win32.c
+++ b/storage/bdb/mutex/mut_win32.c
diff --git a/bdb/mutex/mutex.c b/storage/bdb/mutex/mutex.c
index 5418764a889..5418764a889 100644
--- a/bdb/mutex/mutex.c
+++ b/storage/bdb/mutex/mutex.c
diff --git a/bdb/mutex/tm.c b/storage/bdb/mutex/tm.c
index 4af1b1907a8..4af1b1907a8 100644
--- a/bdb/mutex/tm.c
+++ b/storage/bdb/mutex/tm.c
diff --git a/bdb/mutex/uts4_cc.s b/storage/bdb/mutex/uts4_cc.s
index 9ebc45aad54..9ebc45aad54 100644
--- a/bdb/mutex/uts4_cc.s
+++ b/storage/bdb/mutex/uts4_cc.s
diff --git a/bdb/os/os_abs.c b/storage/bdb/os/os_abs.c
index cd7d0a5d2be..cd7d0a5d2be 100644
--- a/bdb/os/os_abs.c
+++ b/storage/bdb/os/os_abs.c
diff --git a/bdb/os/os_alloc.c b/storage/bdb/os/os_alloc.c
index 5b38cc7d6f1..5b38cc7d6f1 100644
--- a/bdb/os/os_alloc.c
+++ b/storage/bdb/os/os_alloc.c
diff --git a/bdb/os/os_clock.c b/storage/bdb/os/os_clock.c
index 8da02cf6f9c..8da02cf6f9c 100644
--- a/bdb/os/os_clock.c
+++ b/storage/bdb/os/os_clock.c
diff --git a/bdb/os/os_config.c b/storage/bdb/os/os_config.c
index b64952a8302..b64952a8302 100644
--- a/bdb/os/os_config.c
+++ b/storage/bdb/os/os_config.c
diff --git a/bdb/os/os_dir.c b/storage/bdb/os/os_dir.c
index 3f59a23d963..3f59a23d963 100644
--- a/bdb/os/os_dir.c
+++ b/storage/bdb/os/os_dir.c
diff --git a/bdb/os/os_errno.c b/storage/bdb/os/os_errno.c
index 4b40f88d177..4b40f88d177 100644
--- a/bdb/os/os_errno.c
+++ b/storage/bdb/os/os_errno.c
diff --git a/bdb/os/os_fid.c b/storage/bdb/os/os_fid.c
index 125e6f0712c..125e6f0712c 100644
--- a/bdb/os/os_fid.c
+++ b/storage/bdb/os/os_fid.c
diff --git a/bdb/os/os_fsync.c b/storage/bdb/os/os_fsync.c
index 46ab4885a16..46ab4885a16 100644
--- a/bdb/os/os_fsync.c
+++ b/storage/bdb/os/os_fsync.c
diff --git a/bdb/os/os_handle.c b/storage/bdb/os/os_handle.c
index 5f617085e5d..5f617085e5d 100644
--- a/bdb/os/os_handle.c
+++ b/storage/bdb/os/os_handle.c
diff --git a/bdb/os/os_id.c b/storage/bdb/os/os_id.c
index c242bb12e23..c242bb12e23 100644
--- a/bdb/os/os_id.c
+++ b/storage/bdb/os/os_id.c
diff --git a/bdb/os/os_map.c b/storage/bdb/os/os_map.c
index 6d385b6a84d..6d385b6a84d 100644
--- a/bdb/os/os_map.c
+++ b/storage/bdb/os/os_map.c
diff --git a/bdb/os/os_method.c b/storage/bdb/os/os_method.c
index 04367654efa..04367654efa 100644
--- a/bdb/os/os_method.c
+++ b/storage/bdb/os/os_method.c
diff --git a/bdb/os/os_oflags.c b/storage/bdb/os/os_oflags.c
index f75178de75e..f75178de75e 100644
--- a/bdb/os/os_oflags.c
+++ b/storage/bdb/os/os_oflags.c
diff --git a/bdb/os/os_open.c b/storage/bdb/os/os_open.c
index 0a4dbadc6e8..0a4dbadc6e8 100644
--- a/bdb/os/os_open.c
+++ b/storage/bdb/os/os_open.c
diff --git a/bdb/os/os_region.c b/storage/bdb/os/os_region.c
index 6529f708b2c..6529f708b2c 100644
--- a/bdb/os/os_region.c
+++ b/storage/bdb/os/os_region.c
diff --git a/bdb/os/os_rename.c b/storage/bdb/os/os_rename.c
index 2569a9c3186..2569a9c3186 100644
--- a/bdb/os/os_rename.c
+++ b/storage/bdb/os/os_rename.c
diff --git a/bdb/os/os_root.c b/storage/bdb/os/os_root.c
index cd5bfc352e9..cd5bfc352e9 100644
--- a/bdb/os/os_root.c
+++ b/storage/bdb/os/os_root.c
diff --git a/bdb/os/os_rpath.c b/storage/bdb/os/os_rpath.c
index b9ccba01bd5..b9ccba01bd5 100644
--- a/bdb/os/os_rpath.c
+++ b/storage/bdb/os/os_rpath.c
diff --git a/bdb/os/os_rw.c b/storage/bdb/os/os_rw.c
index 9a79342c7b8..9a79342c7b8 100644
--- a/bdb/os/os_rw.c
+++ b/storage/bdb/os/os_rw.c
diff --git a/bdb/os/os_seek.c b/storage/bdb/os/os_seek.c
index 5b2aa45d5dd..5b2aa45d5dd 100644
--- a/bdb/os/os_seek.c
+++ b/storage/bdb/os/os_seek.c
diff --git a/bdb/os/os_sleep.c b/storage/bdb/os/os_sleep.c
index 42d496dbae7..42d496dbae7 100644
--- a/bdb/os/os_sleep.c
+++ b/storage/bdb/os/os_sleep.c
diff --git a/bdb/os/os_spin.c b/storage/bdb/os/os_spin.c
index fb36977cb44..fb36977cb44 100644
--- a/bdb/os/os_spin.c
+++ b/storage/bdb/os/os_spin.c
diff --git a/bdb/os/os_stat.c b/storage/bdb/os/os_stat.c
index c3510e36f5d..c3510e36f5d 100644
--- a/bdb/os/os_stat.c
+++ b/storage/bdb/os/os_stat.c
diff --git a/bdb/os/os_tmpdir.c b/storage/bdb/os/os_tmpdir.c
index 94645af5e71..94645af5e71 100644
--- a/bdb/os/os_tmpdir.c
+++ b/storage/bdb/os/os_tmpdir.c
diff --git a/bdb/os/os_unlink.c b/storage/bdb/os/os_unlink.c
index 28b03afd1aa..28b03afd1aa 100644
--- a/bdb/os/os_unlink.c
+++ b/storage/bdb/os/os_unlink.c
diff --git a/bdb/os_vxworks/os_vx_abs.c b/storage/bdb/os_vxworks/os_vx_abs.c
index 93e9be7269b..93e9be7269b 100644
--- a/bdb/os_vxworks/os_vx_abs.c
+++ b/storage/bdb/os_vxworks/os_vx_abs.c
diff --git a/bdb/os_vxworks/os_vx_config.c b/storage/bdb/os_vxworks/os_vx_config.c
index 810983b38ff..810983b38ff 100644
--- a/bdb/os_vxworks/os_vx_config.c
+++ b/storage/bdb/os_vxworks/os_vx_config.c
diff --git a/bdb/os_vxworks/os_vx_map.c b/storage/bdb/os_vxworks/os_vx_map.c
index 8ad4f0765ce..8ad4f0765ce 100644
--- a/bdb/os_vxworks/os_vx_map.c
+++ b/storage/bdb/os_vxworks/os_vx_map.c
diff --git a/bdb/os_win32/os_abs.c b/storage/bdb/os_win32/os_abs.c
index c8bead83ec3..c8bead83ec3 100644
--- a/bdb/os_win32/os_abs.c
+++ b/storage/bdb/os_win32/os_abs.c
diff --git a/bdb/os_win32/os_clock.c b/storage/bdb/os_win32/os_clock.c
index 1bf154f9da9..1bf154f9da9 100644
--- a/bdb/os_win32/os_clock.c
+++ b/storage/bdb/os_win32/os_clock.c
diff --git a/bdb/os_win32/os_config.c b/storage/bdb/os_win32/os_config.c
index a2c220daf1a..a2c220daf1a 100644
--- a/bdb/os_win32/os_config.c
+++ b/storage/bdb/os_win32/os_config.c
diff --git a/bdb/os_win32/os_dir.c b/storage/bdb/os_win32/os_dir.c
index 3f47c4960b0..3f47c4960b0 100644
--- a/bdb/os_win32/os_dir.c
+++ b/storage/bdb/os_win32/os_dir.c
diff --git a/bdb/os_win32/os_errno.c b/storage/bdb/os_win32/os_errno.c
index d6fac82e6f3..d6fac82e6f3 100644
--- a/bdb/os_win32/os_errno.c
+++ b/storage/bdb/os_win32/os_errno.c
diff --git a/bdb/os_win32/os_fid.c b/storage/bdb/os_win32/os_fid.c
index 1190ad26e81..1190ad26e81 100644
--- a/bdb/os_win32/os_fid.c
+++ b/storage/bdb/os_win32/os_fid.c
diff --git a/bdb/os_win32/os_fsync.c b/storage/bdb/os_win32/os_fsync.c
index 6fd3e1dcdf4..6fd3e1dcdf4 100644
--- a/bdb/os_win32/os_fsync.c
+++ b/storage/bdb/os_win32/os_fsync.c
diff --git a/bdb/os_win32/os_handle.c b/storage/bdb/os_win32/os_handle.c
index 7db9c3da977..7db9c3da977 100644
--- a/bdb/os_win32/os_handle.c
+++ b/storage/bdb/os_win32/os_handle.c
diff --git a/bdb/os_win32/os_map.c b/storage/bdb/os_win32/os_map.c
index 1f16c9fead4..1f16c9fead4 100644
--- a/bdb/os_win32/os_map.c
+++ b/storage/bdb/os_win32/os_map.c
diff --git a/bdb/os_win32/os_open.c b/storage/bdb/os_win32/os_open.c
index c8bae54d585..c8bae54d585 100644
--- a/bdb/os_win32/os_open.c
+++ b/storage/bdb/os_win32/os_open.c
diff --git a/bdb/os_win32/os_rename.c b/storage/bdb/os_win32/os_rename.c
index 67c3846649b..67c3846649b 100644
--- a/bdb/os_win32/os_rename.c
+++ b/storage/bdb/os_win32/os_rename.c
diff --git a/bdb/os_win32/os_rw.c b/storage/bdb/os_win32/os_rw.c
index 63d1f715c53..63d1f715c53 100644
--- a/bdb/os_win32/os_rw.c
+++ b/storage/bdb/os_win32/os_rw.c
diff --git a/bdb/os_win32/os_seek.c b/storage/bdb/os_win32/os_seek.c
index 40140f51534..40140f51534 100644
--- a/bdb/os_win32/os_seek.c
+++ b/storage/bdb/os_win32/os_seek.c
diff --git a/bdb/os_win32/os_sleep.c b/storage/bdb/os_win32/os_sleep.c
index 12b4a7dbc2d..12b4a7dbc2d 100644
--- a/bdb/os_win32/os_sleep.c
+++ b/storage/bdb/os_win32/os_sleep.c
diff --git a/bdb/os_win32/os_spin.c b/storage/bdb/os_win32/os_spin.c
index eb50b3b53ff..eb50b3b53ff 100644
--- a/bdb/os_win32/os_spin.c
+++ b/storage/bdb/os_win32/os_spin.c
diff --git a/bdb/os_win32/os_stat.c b/storage/bdb/os_win32/os_stat.c
index c1cba698bea..c1cba698bea 100644
--- a/bdb/os_win32/os_stat.c
+++ b/storage/bdb/os_win32/os_stat.c
diff --git a/bdb/os_win32/os_type.c b/storage/bdb/os_win32/os_type.c
index 583da0aaf1e..583da0aaf1e 100644
--- a/bdb/os_win32/os_type.c
+++ b/storage/bdb/os_win32/os_type.c
diff --git a/bdb/perl/BerkeleyDB/BerkeleyDB.pm b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pm
index c56390ba71f..c56390ba71f 100644
--- a/bdb/perl/BerkeleyDB/BerkeleyDB.pm
+++ b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pm
diff --git a/bdb/perl/BerkeleyDB/BerkeleyDB.pod b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod
index 60f30e2abfb..60f30e2abfb 100644
--- a/bdb/perl/BerkeleyDB/BerkeleyDB.pod
+++ b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod
diff --git a/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P
index 4a848f5388d..4a848f5388d 100644
--- a/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P
+++ b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P
diff --git a/bdb/perl/BerkeleyDB/BerkeleyDB.xs b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.xs
index 531b38a655f..531b38a655f 100644
--- a/bdb/perl/BerkeleyDB/BerkeleyDB.xs
+++ b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.xs
diff --git a/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm b/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm
index ba9a9c0085d..ba9a9c0085d 100644
--- a/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm
+++ b/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm
diff --git a/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm b/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm
index 8e7bc7e78c7..8e7bc7e78c7 100644
--- a/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm
+++ b/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm
diff --git a/bdb/perl/BerkeleyDB/Changes b/storage/bdb/perl/BerkeleyDB/Changes
index cbeb1a34d73..cbeb1a34d73 100644
--- a/bdb/perl/BerkeleyDB/Changes
+++ b/storage/bdb/perl/BerkeleyDB/Changes
diff --git a/bdb/perl/BerkeleyDB/MANIFEST b/storage/bdb/perl/BerkeleyDB/MANIFEST
index 7da51ef7d7c..7da51ef7d7c 100644
--- a/bdb/perl/BerkeleyDB/MANIFEST
+++ b/storage/bdb/perl/BerkeleyDB/MANIFEST
diff --git a/bdb/perl/BerkeleyDB/Makefile.PL b/storage/bdb/perl/BerkeleyDB/Makefile.PL
index 86da9a845af..86da9a845af 100644
--- a/bdb/perl/BerkeleyDB/Makefile.PL
+++ b/storage/bdb/perl/BerkeleyDB/Makefile.PL
diff --git a/bdb/perl/BerkeleyDB/README b/storage/bdb/perl/BerkeleyDB/README
index a600e313193..a600e313193 100644
--- a/bdb/perl/BerkeleyDB/README
+++ b/storage/bdb/perl/BerkeleyDB/README
diff --git a/bdb/perl/BerkeleyDB/Todo b/storage/bdb/perl/BerkeleyDB/Todo
index 12d53bcf91c..12d53bcf91c 100644
--- a/bdb/perl/BerkeleyDB/Todo
+++ b/storage/bdb/perl/BerkeleyDB/Todo
diff --git a/bdb/perl/BerkeleyDB/config.in b/storage/bdb/perl/BerkeleyDB/config.in
index fd1bb1caede..fd1bb1caede 100644
--- a/bdb/perl/BerkeleyDB/config.in
+++ b/storage/bdb/perl/BerkeleyDB/config.in
diff --git a/bdb/perl/BerkeleyDB/constants.h b/storage/bdb/perl/BerkeleyDB/constants.h
index d86cef15513..d86cef15513 100644
--- a/bdb/perl/BerkeleyDB/constants.h
+++ b/storage/bdb/perl/BerkeleyDB/constants.h
diff --git a/bdb/perl/BerkeleyDB/constants.xs b/storage/bdb/perl/BerkeleyDB/constants.xs
index 1b2c8b2c3c8..1b2c8b2c3c8 100644
--- a/bdb/perl/BerkeleyDB/constants.xs
+++ b/storage/bdb/perl/BerkeleyDB/constants.xs
diff --git a/bdb/perl/BerkeleyDB/dbinfo b/storage/bdb/perl/BerkeleyDB/dbinfo
index af2c45facf5..af2c45facf5 100755
--- a/bdb/perl/BerkeleyDB/dbinfo
+++ b/storage/bdb/perl/BerkeleyDB/dbinfo
diff --git a/bdb/perl/BerkeleyDB/hints/dec_osf.pl b/storage/bdb/perl/BerkeleyDB/hints/dec_osf.pl
index 6d7faeed2e2..6d7faeed2e2 100644
--- a/bdb/perl/BerkeleyDB/hints/dec_osf.pl
+++ b/storage/bdb/perl/BerkeleyDB/hints/dec_osf.pl
diff --git a/bdb/perl/BerkeleyDB/hints/irix_6_5.pl b/storage/bdb/perl/BerkeleyDB/hints/irix_6_5.pl
index b531673e6e0..b531673e6e0 100644
--- a/bdb/perl/BerkeleyDB/hints/irix_6_5.pl
+++ b/storage/bdb/perl/BerkeleyDB/hints/irix_6_5.pl
diff --git a/bdb/perl/BerkeleyDB/hints/solaris.pl b/storage/bdb/perl/BerkeleyDB/hints/solaris.pl
index ddd941d634a..ddd941d634a 100644
--- a/bdb/perl/BerkeleyDB/hints/solaris.pl
+++ b/storage/bdb/perl/BerkeleyDB/hints/solaris.pl
diff --git a/bdb/perl/BerkeleyDB/mkconsts b/storage/bdb/perl/BerkeleyDB/mkconsts
index 7e0964333cc..7e0964333cc 100644
--- a/bdb/perl/BerkeleyDB/mkconsts
+++ b/storage/bdb/perl/BerkeleyDB/mkconsts
diff --git a/bdb/perl/BerkeleyDB/mkpod b/storage/bdb/perl/BerkeleyDB/mkpod
index 44bbf3fbf4f..44bbf3fbf4f 100755
--- a/bdb/perl/BerkeleyDB/mkpod
+++ b/storage/bdb/perl/BerkeleyDB/mkpod
diff --git a/bdb/perl/BerkeleyDB/patches/5.004 b/storage/bdb/perl/BerkeleyDB/patches/5.004
index 143ec95afbc..143ec95afbc 100644
--- a/bdb/perl/BerkeleyDB/patches/5.004
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.004
diff --git a/bdb/perl/BerkeleyDB/patches/5.004_01 b/storage/bdb/perl/BerkeleyDB/patches/5.004_01
index 1b05eb4e02b..1b05eb4e02b 100644
--- a/bdb/perl/BerkeleyDB/patches/5.004_01
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.004_01
diff --git a/bdb/perl/BerkeleyDB/patches/5.004_02 b/storage/bdb/perl/BerkeleyDB/patches/5.004_02
index 238f8737941..238f8737941 100644
--- a/bdb/perl/BerkeleyDB/patches/5.004_02
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.004_02
diff --git a/bdb/perl/BerkeleyDB/patches/5.004_03 b/storage/bdb/perl/BerkeleyDB/patches/5.004_03
index 06331eac922..06331eac922 100644
--- a/bdb/perl/BerkeleyDB/patches/5.004_03
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.004_03
diff --git a/bdb/perl/BerkeleyDB/patches/5.004_04 b/storage/bdb/perl/BerkeleyDB/patches/5.004_04
index a227dc700d9..a227dc700d9 100644
--- a/bdb/perl/BerkeleyDB/patches/5.004_04
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.004_04
diff --git a/bdb/perl/BerkeleyDB/patches/5.004_05 b/storage/bdb/perl/BerkeleyDB/patches/5.004_05
index 51c8bf35009..51c8bf35009 100644
--- a/bdb/perl/BerkeleyDB/patches/5.004_05
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.004_05
diff --git a/bdb/perl/BerkeleyDB/patches/5.005 b/storage/bdb/perl/BerkeleyDB/patches/5.005
index effee3e8275..effee3e8275 100644
--- a/bdb/perl/BerkeleyDB/patches/5.005
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.005
diff --git a/bdb/perl/BerkeleyDB/patches/5.005_01 b/storage/bdb/perl/BerkeleyDB/patches/5.005_01
index 2a05dd545f6..2a05dd545f6 100644
--- a/bdb/perl/BerkeleyDB/patches/5.005_01
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.005_01
diff --git a/bdb/perl/BerkeleyDB/patches/5.005_02 b/storage/bdb/perl/BerkeleyDB/patches/5.005_02
index 5dd57ddc03f..5dd57ddc03f 100644
--- a/bdb/perl/BerkeleyDB/patches/5.005_02
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.005_02
diff --git a/bdb/perl/BerkeleyDB/patches/5.005_03 b/storage/bdb/perl/BerkeleyDB/patches/5.005_03
index 115f9f5b909..115f9f5b909 100644
--- a/bdb/perl/BerkeleyDB/patches/5.005_03
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.005_03
diff --git a/bdb/perl/BerkeleyDB/patches/5.6.0 b/storage/bdb/perl/BerkeleyDB/patches/5.6.0
index 1f9b3b620de..1f9b3b620de 100644
--- a/bdb/perl/BerkeleyDB/patches/5.6.0
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.6.0
diff --git a/bdb/perl/BerkeleyDB/ppport.h b/storage/bdb/perl/BerkeleyDB/ppport.h
index 0887c2159a9..0887c2159a9 100644
--- a/bdb/perl/BerkeleyDB/ppport.h
+++ b/storage/bdb/perl/BerkeleyDB/ppport.h
diff --git a/bdb/perl/BerkeleyDB/scan b/storage/bdb/perl/BerkeleyDB/scan
index eb064950b2e..eb064950b2e 100644
--- a/bdb/perl/BerkeleyDB/scan
+++ b/storage/bdb/perl/BerkeleyDB/scan
diff --git a/bdb/perl/BerkeleyDB/t/btree.t b/storage/bdb/perl/BerkeleyDB/t/btree.t
index fd6ed8f1268..fd6ed8f1268 100644
--- a/bdb/perl/BerkeleyDB/t/btree.t
+++ b/storage/bdb/perl/BerkeleyDB/t/btree.t
diff --git a/bdb/perl/BerkeleyDB/t/destroy.t b/storage/bdb/perl/BerkeleyDB/t/destroy.t
index 7457d36c583..7457d36c583 100644
--- a/bdb/perl/BerkeleyDB/t/destroy.t
+++ b/storage/bdb/perl/BerkeleyDB/t/destroy.t
diff --git a/bdb/perl/BerkeleyDB/t/env.t b/storage/bdb/perl/BerkeleyDB/t/env.t
index 3905abfae43..3905abfae43 100644
--- a/bdb/perl/BerkeleyDB/t/env.t
+++ b/storage/bdb/perl/BerkeleyDB/t/env.t
diff --git a/bdb/perl/BerkeleyDB/t/examples.t b/storage/bdb/perl/BerkeleyDB/t/examples.t
index 69b7f8ff8c5..69b7f8ff8c5 100644
--- a/bdb/perl/BerkeleyDB/t/examples.t
+++ b/storage/bdb/perl/BerkeleyDB/t/examples.t
diff --git a/bdb/perl/BerkeleyDB/t/examples.t.T b/storage/bdb/perl/BerkeleyDB/t/examples.t.T
index fe9bdf76b06..fe9bdf76b06 100644
--- a/bdb/perl/BerkeleyDB/t/examples.t.T
+++ b/storage/bdb/perl/BerkeleyDB/t/examples.t.T
diff --git a/bdb/perl/BerkeleyDB/t/examples3.t b/storage/bdb/perl/BerkeleyDB/t/examples3.t
index 22e94b770e1..22e94b770e1 100644
--- a/bdb/perl/BerkeleyDB/t/examples3.t
+++ b/storage/bdb/perl/BerkeleyDB/t/examples3.t
diff --git a/bdb/perl/BerkeleyDB/t/examples3.t.T b/storage/bdb/perl/BerkeleyDB/t/examples3.t.T
index 5eeaa14d00c..5eeaa14d00c 100644
--- a/bdb/perl/BerkeleyDB/t/examples3.t.T
+++ b/storage/bdb/perl/BerkeleyDB/t/examples3.t.T
diff --git a/bdb/perl/BerkeleyDB/t/filter.t b/storage/bdb/perl/BerkeleyDB/t/filter.t
index 47a7c107acf..47a7c107acf 100644
--- a/bdb/perl/BerkeleyDB/t/filter.t
+++ b/storage/bdb/perl/BerkeleyDB/t/filter.t
diff --git a/bdb/perl/BerkeleyDB/t/hash.t b/storage/bdb/perl/BerkeleyDB/t/hash.t
index 0e683851c3d..0e683851c3d 100644
--- a/bdb/perl/BerkeleyDB/t/hash.t
+++ b/storage/bdb/perl/BerkeleyDB/t/hash.t
diff --git a/bdb/perl/BerkeleyDB/t/join.t b/storage/bdb/perl/BerkeleyDB/t/join.t
index ed9b6a269cb..ed9b6a269cb 100644
--- a/bdb/perl/BerkeleyDB/t/join.t
+++ b/storage/bdb/perl/BerkeleyDB/t/join.t
diff --git a/bdb/perl/BerkeleyDB/t/mldbm.t b/storage/bdb/perl/BerkeleyDB/t/mldbm.t
index d35f7e15895..d35f7e15895 100644
--- a/bdb/perl/BerkeleyDB/t/mldbm.t
+++ b/storage/bdb/perl/BerkeleyDB/t/mldbm.t
diff --git a/bdb/perl/BerkeleyDB/t/queue.t b/storage/bdb/perl/BerkeleyDB/t/queue.t
index 86add129ca4..86add129ca4 100644
--- a/bdb/perl/BerkeleyDB/t/queue.t
+++ b/storage/bdb/perl/BerkeleyDB/t/queue.t
diff --git a/bdb/perl/BerkeleyDB/t/recno.t b/storage/bdb/perl/BerkeleyDB/t/recno.t
index 64b1803f736..64b1803f736 100644
--- a/bdb/perl/BerkeleyDB/t/recno.t
+++ b/storage/bdb/perl/BerkeleyDB/t/recno.t
diff --git a/bdb/perl/BerkeleyDB/t/strict.t b/storage/bdb/perl/BerkeleyDB/t/strict.t
index ab41d44cb41..ab41d44cb41 100644
--- a/bdb/perl/BerkeleyDB/t/strict.t
+++ b/storage/bdb/perl/BerkeleyDB/t/strict.t
diff --git a/bdb/perl/BerkeleyDB/t/subdb.t b/storage/bdb/perl/BerkeleyDB/t/subdb.t
index 23016d6463f..23016d6463f 100644
--- a/bdb/perl/BerkeleyDB/t/subdb.t
+++ b/storage/bdb/perl/BerkeleyDB/t/subdb.t
diff --git a/bdb/perl/BerkeleyDB/t/txn.t b/storage/bdb/perl/BerkeleyDB/t/txn.t
index ba6b636cdc8..ba6b636cdc8 100644
--- a/bdb/perl/BerkeleyDB/t/txn.t
+++ b/storage/bdb/perl/BerkeleyDB/t/txn.t
diff --git a/bdb/perl/BerkeleyDB/t/unknown.t b/storage/bdb/perl/BerkeleyDB/t/unknown.t
index f2630b585c0..f2630b585c0 100644
--- a/bdb/perl/BerkeleyDB/t/unknown.t
+++ b/storage/bdb/perl/BerkeleyDB/t/unknown.t
diff --git a/bdb/perl/BerkeleyDB/t/util.pm b/storage/bdb/perl/BerkeleyDB/t/util.pm
index 1a1449751eb..1a1449751eb 100644
--- a/bdb/perl/BerkeleyDB/t/util.pm
+++ b/storage/bdb/perl/BerkeleyDB/t/util.pm
diff --git a/bdb/perl/BerkeleyDB/typemap b/storage/bdb/perl/BerkeleyDB/typemap
index 81ead2c36d9..81ead2c36d9 100644
--- a/bdb/perl/BerkeleyDB/typemap
+++ b/storage/bdb/perl/BerkeleyDB/typemap
diff --git a/bdb/perl/DB_File/Changes b/storage/bdb/perl/DB_File/Changes
index 7883cbdfef0..7883cbdfef0 100644
--- a/bdb/perl/DB_File/Changes
+++ b/storage/bdb/perl/DB_File/Changes
diff --git a/bdb/perl/DB_File/DB_File.pm b/storage/bdb/perl/DB_File/DB_File.pm
index 49004ffa148..49004ffa148 100644
--- a/bdb/perl/DB_File/DB_File.pm
+++ b/storage/bdb/perl/DB_File/DB_File.pm
diff --git a/bdb/perl/DB_File/DB_File.xs b/storage/bdb/perl/DB_File/DB_File.xs
index fba8dede791..fba8dede791 100644
--- a/bdb/perl/DB_File/DB_File.xs
+++ b/storage/bdb/perl/DB_File/DB_File.xs
diff --git a/bdb/perl/DB_File/DB_File_BS b/storage/bdb/perl/DB_File/DB_File_BS
index 9282c498811..9282c498811 100644
--- a/bdb/perl/DB_File/DB_File_BS
+++ b/storage/bdb/perl/DB_File/DB_File_BS
diff --git a/bdb/perl/DB_File/MANIFEST b/storage/bdb/perl/DB_File/MANIFEST
index b3e1a7bd85b..b3e1a7bd85b 100644
--- a/bdb/perl/DB_File/MANIFEST
+++ b/storage/bdb/perl/DB_File/MANIFEST
diff --git a/bdb/perl/DB_File/Makefile.PL b/storage/bdb/perl/DB_File/Makefile.PL
index 4c1565d8d01..4c1565d8d01 100644
--- a/bdb/perl/DB_File/Makefile.PL
+++ b/storage/bdb/perl/DB_File/Makefile.PL
diff --git a/bdb/perl/DB_File/README b/storage/bdb/perl/DB_File/README
index b09aa9d8aee..b09aa9d8aee 100644
--- a/bdb/perl/DB_File/README
+++ b/storage/bdb/perl/DB_File/README
diff --git a/bdb/perl/DB_File/config.in b/storage/bdb/perl/DB_File/config.in
index 292b09a5fb3..292b09a5fb3 100644
--- a/bdb/perl/DB_File/config.in
+++ b/storage/bdb/perl/DB_File/config.in
diff --git a/bdb/perl/DB_File/dbinfo b/storage/bdb/perl/DB_File/dbinfo
index af2c45facf5..af2c45facf5 100644
--- a/bdb/perl/DB_File/dbinfo
+++ b/storage/bdb/perl/DB_File/dbinfo
diff --git a/bdb/perl/DB_File/fallback.h b/storage/bdb/perl/DB_File/fallback.h
index 0213308a0ee..0213308a0ee 100644
--- a/bdb/perl/DB_File/fallback.h
+++ b/storage/bdb/perl/DB_File/fallback.h
diff --git a/bdb/perl/DB_File/fallback.xs b/storage/bdb/perl/DB_File/fallback.xs
index 8650cdf7646..8650cdf7646 100644
--- a/bdb/perl/DB_File/fallback.xs
+++ b/storage/bdb/perl/DB_File/fallback.xs
diff --git a/bdb/perl/DB_File/hints/dynixptx.pl b/storage/bdb/perl/DB_File/hints/dynixptx.pl
index bb5ffa56e6b..bb5ffa56e6b 100644
--- a/bdb/perl/DB_File/hints/dynixptx.pl
+++ b/storage/bdb/perl/DB_File/hints/dynixptx.pl
diff --git a/bdb/perl/DB_File/hints/sco.pl b/storage/bdb/perl/DB_File/hints/sco.pl
index ff604409496..ff604409496 100644
--- a/bdb/perl/DB_File/hints/sco.pl
+++ b/storage/bdb/perl/DB_File/hints/sco.pl
diff --git a/bdb/perl/DB_File/patches/5.004 b/storage/bdb/perl/DB_File/patches/5.004
index 143ec95afbc..143ec95afbc 100644
--- a/bdb/perl/DB_File/patches/5.004
+++ b/storage/bdb/perl/DB_File/patches/5.004
diff --git a/bdb/perl/DB_File/patches/5.004_01 b/storage/bdb/perl/DB_File/patches/5.004_01
index 1b05eb4e02b..1b05eb4e02b 100644
--- a/bdb/perl/DB_File/patches/5.004_01
+++ b/storage/bdb/perl/DB_File/patches/5.004_01
diff --git a/bdb/perl/DB_File/patches/5.004_02 b/storage/bdb/perl/DB_File/patches/5.004_02
index 238f8737941..238f8737941 100644
--- a/bdb/perl/DB_File/patches/5.004_02
+++ b/storage/bdb/perl/DB_File/patches/5.004_02
diff --git a/bdb/perl/DB_File/patches/5.004_03 b/storage/bdb/perl/DB_File/patches/5.004_03
index 06331eac922..06331eac922 100644
--- a/bdb/perl/DB_File/patches/5.004_03
+++ b/storage/bdb/perl/DB_File/patches/5.004_03
diff --git a/bdb/perl/DB_File/patches/5.004_04 b/storage/bdb/perl/DB_File/patches/5.004_04
index a227dc700d9..a227dc700d9 100644
--- a/bdb/perl/DB_File/patches/5.004_04
+++ b/storage/bdb/perl/DB_File/patches/5.004_04
diff --git a/bdb/perl/DB_File/patches/5.004_05 b/storage/bdb/perl/DB_File/patches/5.004_05
index 51c8bf35009..51c8bf35009 100644
--- a/bdb/perl/DB_File/patches/5.004_05
+++ b/storage/bdb/perl/DB_File/patches/5.004_05
diff --git a/bdb/perl/DB_File/patches/5.005 b/storage/bdb/perl/DB_File/patches/5.005
index effee3e8275..effee3e8275 100644
--- a/bdb/perl/DB_File/patches/5.005
+++ b/storage/bdb/perl/DB_File/patches/5.005
diff --git a/bdb/perl/DB_File/patches/5.005_01 b/storage/bdb/perl/DB_File/patches/5.005_01
index 2a05dd545f6..2a05dd545f6 100644
--- a/bdb/perl/DB_File/patches/5.005_01
+++ b/storage/bdb/perl/DB_File/patches/5.005_01
diff --git a/bdb/perl/DB_File/patches/5.005_02 b/storage/bdb/perl/DB_File/patches/5.005_02
index 5dd57ddc03f..5dd57ddc03f 100644
--- a/bdb/perl/DB_File/patches/5.005_02
+++ b/storage/bdb/perl/DB_File/patches/5.005_02
diff --git a/bdb/perl/DB_File/patches/5.005_03 b/storage/bdb/perl/DB_File/patches/5.005_03
index 115f9f5b909..115f9f5b909 100644
--- a/bdb/perl/DB_File/patches/5.005_03
+++ b/storage/bdb/perl/DB_File/patches/5.005_03
diff --git a/bdb/perl/DB_File/patches/5.6.0 b/storage/bdb/perl/DB_File/patches/5.6.0
index 1f9b3b620de..1f9b3b620de 100644
--- a/bdb/perl/DB_File/patches/5.6.0
+++ b/storage/bdb/perl/DB_File/patches/5.6.0
diff --git a/bdb/perl/DB_File/ppport.h b/storage/bdb/perl/DB_File/ppport.h
index 0887c2159a9..0887c2159a9 100644
--- a/bdb/perl/DB_File/ppport.h
+++ b/storage/bdb/perl/DB_File/ppport.h
diff --git a/bdb/perl/DB_File/t/db-btree.t b/storage/bdb/perl/DB_File/t/db-btree.t
index a990a5c4ba5..a990a5c4ba5 100644
--- a/bdb/perl/DB_File/t/db-btree.t
+++ b/storage/bdb/perl/DB_File/t/db-btree.t
diff --git a/bdb/perl/DB_File/t/db-hash.t b/storage/bdb/perl/DB_File/t/db-hash.t
index 10623cc82a7..10623cc82a7 100644
--- a/bdb/perl/DB_File/t/db-hash.t
+++ b/storage/bdb/perl/DB_File/t/db-hash.t
diff --git a/bdb/perl/DB_File/t/db-recno.t b/storage/bdb/perl/DB_File/t/db-recno.t
index 5390b549376..5390b549376 100644
--- a/bdb/perl/DB_File/t/db-recno.t
+++ b/storage/bdb/perl/DB_File/t/db-recno.t
diff --git a/bdb/perl/DB_File/typemap b/storage/bdb/perl/DB_File/typemap
index 8ad7b1282dc..8ad7b1282dc 100644
--- a/bdb/perl/DB_File/typemap
+++ b/storage/bdb/perl/DB_File/typemap
diff --git a/bdb/perl/DB_File/version.c b/storage/bdb/perl/DB_File/version.c
index 03b17c18e60..03b17c18e60 100644
--- a/bdb/perl/DB_File/version.c
+++ b/storage/bdb/perl/DB_File/version.c
diff --git a/bdb/qam/qam.c b/storage/bdb/qam/qam.c
index b10f8743439..b10f8743439 100644
--- a/bdb/qam/qam.c
+++ b/storage/bdb/qam/qam.c
diff --git a/bdb/qam/qam.src b/storage/bdb/qam/qam.src
index f8bf4da4dd0..f8bf4da4dd0 100644
--- a/bdb/qam/qam.src
+++ b/storage/bdb/qam/qam.src
diff --git a/bdb/qam/qam_conv.c b/storage/bdb/qam/qam_conv.c
index d89fe06b0cf..d89fe06b0cf 100644
--- a/bdb/qam/qam_conv.c
+++ b/storage/bdb/qam/qam_conv.c
diff --git a/bdb/qam/qam_files.c b/storage/bdb/qam/qam_files.c
index f15a88d546d..f15a88d546d 100644
--- a/bdb/qam/qam_files.c
+++ b/storage/bdb/qam/qam_files.c
diff --git a/bdb/qam/qam_method.c b/storage/bdb/qam/qam_method.c
index 5415fc5d00c..5415fc5d00c 100644
--- a/bdb/qam/qam_method.c
+++ b/storage/bdb/qam/qam_method.c
diff --git a/bdb/qam/qam_open.c b/storage/bdb/qam/qam_open.c
index efe4dfc540e..efe4dfc540e 100644
--- a/bdb/qam/qam_open.c
+++ b/storage/bdb/qam/qam_open.c
diff --git a/bdb/qam/qam_rec.c b/storage/bdb/qam/qam_rec.c
index 2c0f1227752..2c0f1227752 100644
--- a/bdb/qam/qam_rec.c
+++ b/storage/bdb/qam/qam_rec.c
diff --git a/bdb/qam/qam_stat.c b/storage/bdb/qam/qam_stat.c
index 57c67da4292..57c67da4292 100644
--- a/bdb/qam/qam_stat.c
+++ b/storage/bdb/qam/qam_stat.c
diff --git a/bdb/qam/qam_upgrade.c b/storage/bdb/qam/qam_upgrade.c
index 6bd79fc948a..6bd79fc948a 100644
--- a/bdb/qam/qam_upgrade.c
+++ b/storage/bdb/qam/qam_upgrade.c
diff --git a/bdb/qam/qam_verify.c b/storage/bdb/qam/qam_verify.c
index 5b020c2c335..5b020c2c335 100644
--- a/bdb/qam/qam_verify.c
+++ b/storage/bdb/qam/qam_verify.c
diff --git a/bdb/rep/rep_method.c b/storage/bdb/rep/rep_method.c
index 6773a537f4f..6773a537f4f 100644
--- a/bdb/rep/rep_method.c
+++ b/storage/bdb/rep/rep_method.c
diff --git a/bdb/rep/rep_record.c b/storage/bdb/rep/rep_record.c
index d3619f509b4..d3619f509b4 100644
--- a/bdb/rep/rep_record.c
+++ b/storage/bdb/rep/rep_record.c
diff --git a/bdb/rep/rep_region.c b/storage/bdb/rep/rep_region.c
index 1ac3fb8a20c..1ac3fb8a20c 100644
--- a/bdb/rep/rep_region.c
+++ b/storage/bdb/rep/rep_region.c
diff --git a/bdb/rep/rep_util.c b/storage/bdb/rep/rep_util.c
index 9c99d33ed4a..9c99d33ed4a 100644
--- a/bdb/rep/rep_util.c
+++ b/storage/bdb/rep/rep_util.c
diff --git a/bdb/rpc_client/client.c b/storage/bdb/rpc_client/client.c
index b6367e21449..b6367e21449 100644
--- a/bdb/rpc_client/client.c
+++ b/storage/bdb/rpc_client/client.c
diff --git a/bdb/rpc_client/gen_client_ret.c b/storage/bdb/rpc_client/gen_client_ret.c
index f35589738cd..f35589738cd 100644
--- a/bdb/rpc_client/gen_client_ret.c
+++ b/storage/bdb/rpc_client/gen_client_ret.c
diff --git a/bdb/rpc_server/c/db_server_proc.c.in b/storage/bdb/rpc_server/c/db_server_proc.c.in
index d5d1f49508a..d5d1f49508a 100644
--- a/bdb/rpc_server/c/db_server_proc.c.in
+++ b/storage/bdb/rpc_server/c/db_server_proc.c.in
diff --git a/bdb/rpc_server/c/db_server_util.c b/storage/bdb/rpc_server/c/db_server_util.c
index 2ea270c2d19..2ea270c2d19 100644
--- a/bdb/rpc_server/c/db_server_util.c
+++ b/storage/bdb/rpc_server/c/db_server_util.c
diff --git a/bdb/rpc_server/clsrv.html b/storage/bdb/rpc_server/clsrv.html
index 599ad56f557..599ad56f557 100644
--- a/bdb/rpc_server/clsrv.html
+++ b/storage/bdb/rpc_server/clsrv.html
diff --git a/bdb/rpc_server/cxx/db_server_cxxproc.cpp b/storage/bdb/rpc_server/cxx/db_server_cxxproc.cpp
index 25278273555..25278273555 100644
--- a/bdb/rpc_server/cxx/db_server_cxxproc.cpp
+++ b/storage/bdb/rpc_server/cxx/db_server_cxxproc.cpp
diff --git a/bdb/rpc_server/cxx/db_server_cxxutil.cpp b/storage/bdb/rpc_server/cxx/db_server_cxxutil.cpp
index 60865264c00..60865264c00 100644
--- a/bdb/rpc_server/cxx/db_server_cxxutil.cpp
+++ b/storage/bdb/rpc_server/cxx/db_server_cxxutil.cpp
diff --git a/bdb/rpc_server/java/DbDispatcher.java b/storage/bdb/rpc_server/java/DbDispatcher.java
index 5c5e63fc2ad..5c5e63fc2ad 100644
--- a/bdb/rpc_server/java/DbDispatcher.java
+++ b/storage/bdb/rpc_server/java/DbDispatcher.java
diff --git a/bdb/rpc_server/java/DbServer.java b/storage/bdb/rpc_server/java/DbServer.java
index 9b20becbcdc..9b20becbcdc 100644
--- a/bdb/rpc_server/java/DbServer.java
+++ b/storage/bdb/rpc_server/java/DbServer.java
diff --git a/bdb/rpc_server/java/FreeList.java b/storage/bdb/rpc_server/java/FreeList.java
index e831c466137..e831c466137 100644
--- a/bdb/rpc_server/java/FreeList.java
+++ b/storage/bdb/rpc_server/java/FreeList.java
diff --git a/bdb/rpc_server/java/LocalIterator.java b/storage/bdb/rpc_server/java/LocalIterator.java
index eecb0b5e78d..eecb0b5e78d 100644
--- a/bdb/rpc_server/java/LocalIterator.java
+++ b/storage/bdb/rpc_server/java/LocalIterator.java
diff --git a/bdb/rpc_server/java/README b/storage/bdb/rpc_server/java/README
index c2d8f3abd57..c2d8f3abd57 100644
--- a/bdb/rpc_server/java/README
+++ b/storage/bdb/rpc_server/java/README
diff --git a/bdb/rpc_server/java/RpcDb.java b/storage/bdb/rpc_server/java/RpcDb.java
index 59da9be67dc..59da9be67dc 100644
--- a/bdb/rpc_server/java/RpcDb.java
+++ b/storage/bdb/rpc_server/java/RpcDb.java
diff --git a/bdb/rpc_server/java/RpcDbEnv.java b/storage/bdb/rpc_server/java/RpcDbEnv.java
index 9d9f1ba4324..9d9f1ba4324 100644
--- a/bdb/rpc_server/java/RpcDbEnv.java
+++ b/storage/bdb/rpc_server/java/RpcDbEnv.java
diff --git a/bdb/rpc_server/java/RpcDbTxn.java b/storage/bdb/rpc_server/java/RpcDbTxn.java
index a3207b5e35d..a3207b5e35d 100644
--- a/bdb/rpc_server/java/RpcDbTxn.java
+++ b/storage/bdb/rpc_server/java/RpcDbTxn.java
diff --git a/bdb/rpc_server/java/RpcDbc.java b/storage/bdb/rpc_server/java/RpcDbc.java
index a37b4ee4896..a37b4ee4896 100644
--- a/bdb/rpc_server/java/RpcDbc.java
+++ b/storage/bdb/rpc_server/java/RpcDbc.java
diff --git a/bdb/rpc_server/java/Timer.java b/storage/bdb/rpc_server/java/Timer.java
index e16f3084f95..e16f3084f95 100644
--- a/bdb/rpc_server/java/Timer.java
+++ b/storage/bdb/rpc_server/java/Timer.java
diff --git a/bdb/rpc_server/java/gen/DbServerStub.java b/storage/bdb/rpc_server/java/gen/DbServerStub.java
index 90fc13a6d9c..90fc13a6d9c 100644
--- a/bdb/rpc_server/java/gen/DbServerStub.java
+++ b/storage/bdb/rpc_server/java/gen/DbServerStub.java
diff --git a/bdb/rpc_server/java/gen/__db_associate_msg.java b/storage/bdb/rpc_server/java/gen/__db_associate_msg.java
index 8977303b99a..8977303b99a 100644
--- a/bdb/rpc_server/java/gen/__db_associate_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_associate_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_associate_reply.java b/storage/bdb/rpc_server/java/gen/__db_associate_reply.java
index 476d0868b33..476d0868b33 100644
--- a/bdb/rpc_server/java/gen/__db_associate_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_associate_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java b/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java
index 007ce16a974..007ce16a974 100644
--- a/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java b/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java
index 855573271b3..855573271b3 100644
--- a/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java b/storage/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java
index c86ec382456..c86ec382456 100644
--- a/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java b/storage/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java
index 4d944b6bf33..4d944b6bf33 100644
--- a/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_close_msg.java b/storage/bdb/rpc_server/java/gen/__db_close_msg.java
index ce8d213701b..ce8d213701b 100644
--- a/bdb/rpc_server/java/gen/__db_close_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_close_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_close_reply.java b/storage/bdb/rpc_server/java/gen/__db_close_reply.java
index a9380e9c053..a9380e9c053 100644
--- a/bdb/rpc_server/java/gen/__db_close_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_close_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_create_msg.java b/storage/bdb/rpc_server/java/gen/__db_create_msg.java
index d21ca50f807..d21ca50f807 100644
--- a/bdb/rpc_server/java/gen/__db_create_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_create_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_create_reply.java b/storage/bdb/rpc_server/java/gen/__db_create_reply.java
index e3dcbbab14e..e3dcbbab14e 100644
--- a/bdb/rpc_server/java/gen/__db_create_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_create_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_cursor_msg.java b/storage/bdb/rpc_server/java/gen/__db_cursor_msg.java
index 60e09db6ebb..60e09db6ebb 100644
--- a/bdb/rpc_server/java/gen/__db_cursor_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_cursor_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_cursor_reply.java b/storage/bdb/rpc_server/java/gen/__db_cursor_reply.java
index bafd2817c67..bafd2817c67 100644
--- a/bdb/rpc_server/java/gen/__db_cursor_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_cursor_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_del_msg.java b/storage/bdb/rpc_server/java/gen/__db_del_msg.java
index fdf47907dd6..fdf47907dd6 100644
--- a/bdb/rpc_server/java/gen/__db_del_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_del_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_del_reply.java b/storage/bdb/rpc_server/java/gen/__db_del_reply.java
index 8a55445944f..8a55445944f 100644
--- a/bdb/rpc_server/java/gen/__db_del_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_del_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_encrypt_msg.java b/storage/bdb/rpc_server/java/gen/__db_encrypt_msg.java
index 46d9f8ee7e8..46d9f8ee7e8 100644
--- a/bdb/rpc_server/java/gen/__db_encrypt_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_encrypt_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_encrypt_reply.java b/storage/bdb/rpc_server/java/gen/__db_encrypt_reply.java
index a97cc98c90b..a97cc98c90b 100644
--- a/bdb/rpc_server/java/gen/__db_encrypt_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_encrypt_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_extentsize_msg.java b/storage/bdb/rpc_server/java/gen/__db_extentsize_msg.java
index 41a51cff9c4..41a51cff9c4 100644
--- a/bdb/rpc_server/java/gen/__db_extentsize_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_extentsize_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_extentsize_reply.java b/storage/bdb/rpc_server/java/gen/__db_extentsize_reply.java
index 409625486c7..409625486c7 100644
--- a/bdb/rpc_server/java/gen/__db_extentsize_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_extentsize_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_flags_msg.java b/storage/bdb/rpc_server/java/gen/__db_flags_msg.java
index d8752e2e4dd..d8752e2e4dd 100644
--- a/bdb/rpc_server/java/gen/__db_flags_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_flags_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_flags_reply.java b/storage/bdb/rpc_server/java/gen/__db_flags_reply.java
index c4ec253db83..c4ec253db83 100644
--- a/bdb/rpc_server/java/gen/__db_flags_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_flags_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_get_msg.java b/storage/bdb/rpc_server/java/gen/__db_get_msg.java
index 3dfe8e9d86e..3dfe8e9d86e 100644
--- a/bdb/rpc_server/java/gen/__db_get_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_get_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_get_reply.java b/storage/bdb/rpc_server/java/gen/__db_get_reply.java
index 64ce525728a..64ce525728a 100644
--- a/bdb/rpc_server/java/gen/__db_get_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_get_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java b/storage/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java
index 8d2ed1b1c0b..8d2ed1b1c0b 100644
--- a/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java b/storage/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java
index 1885ec50240..1885ec50240 100644
--- a/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_h_nelem_msg.java b/storage/bdb/rpc_server/java/gen/__db_h_nelem_msg.java
index 7d084351755..7d084351755 100644
--- a/bdb/rpc_server/java/gen/__db_h_nelem_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_h_nelem_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_h_nelem_reply.java b/storage/bdb/rpc_server/java/gen/__db_h_nelem_reply.java
index 20c5c774e69..20c5c774e69 100644
--- a/bdb/rpc_server/java/gen/__db_h_nelem_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_h_nelem_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_join_msg.java b/storage/bdb/rpc_server/java/gen/__db_join_msg.java
index 88c72dbd6ba..88c72dbd6ba 100644
--- a/bdb/rpc_server/java/gen/__db_join_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_join_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_join_reply.java b/storage/bdb/rpc_server/java/gen/__db_join_reply.java
index 80980e23d6c..80980e23d6c 100644
--- a/bdb/rpc_server/java/gen/__db_join_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_join_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_key_range_msg.java b/storage/bdb/rpc_server/java/gen/__db_key_range_msg.java
index 233077e0964..233077e0964 100644
--- a/bdb/rpc_server/java/gen/__db_key_range_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_key_range_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_key_range_reply.java b/storage/bdb/rpc_server/java/gen/__db_key_range_reply.java
index 09244c13d1d..09244c13d1d 100644
--- a/bdb/rpc_server/java/gen/__db_key_range_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_key_range_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_lorder_msg.java b/storage/bdb/rpc_server/java/gen/__db_lorder_msg.java
index 3399ad8daf0..3399ad8daf0 100644
--- a/bdb/rpc_server/java/gen/__db_lorder_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_lorder_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_lorder_reply.java b/storage/bdb/rpc_server/java/gen/__db_lorder_reply.java
index cdcda4d4f43..cdcda4d4f43 100644
--- a/bdb/rpc_server/java/gen/__db_lorder_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_lorder_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_open_msg.java b/storage/bdb/rpc_server/java/gen/__db_open_msg.java
index 14dbd9e3b0c..14dbd9e3b0c 100644
--- a/bdb/rpc_server/java/gen/__db_open_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_open_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_open_reply.java b/storage/bdb/rpc_server/java/gen/__db_open_reply.java
index d90c3754c2f..d90c3754c2f 100644
--- a/bdb/rpc_server/java/gen/__db_open_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_open_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_pagesize_msg.java b/storage/bdb/rpc_server/java/gen/__db_pagesize_msg.java
index a452ea4e381..a452ea4e381 100644
--- a/bdb/rpc_server/java/gen/__db_pagesize_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_pagesize_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_pagesize_reply.java b/storage/bdb/rpc_server/java/gen/__db_pagesize_reply.java
index 830b2078b34..830b2078b34 100644
--- a/bdb/rpc_server/java/gen/__db_pagesize_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_pagesize_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_pget_msg.java b/storage/bdb/rpc_server/java/gen/__db_pget_msg.java
index 11d27ca9e46..11d27ca9e46 100644
--- a/bdb/rpc_server/java/gen/__db_pget_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_pget_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_pget_reply.java b/storage/bdb/rpc_server/java/gen/__db_pget_reply.java
index 86c9c2111b9..86c9c2111b9 100644
--- a/bdb/rpc_server/java/gen/__db_pget_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_pget_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_put_msg.java b/storage/bdb/rpc_server/java/gen/__db_put_msg.java
index b6159cff3a8..b6159cff3a8 100644
--- a/bdb/rpc_server/java/gen/__db_put_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_put_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_put_reply.java b/storage/bdb/rpc_server/java/gen/__db_put_reply.java
index fc89ae1c3bd..fc89ae1c3bd 100644
--- a/bdb/rpc_server/java/gen/__db_put_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_put_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_re_delim_msg.java b/storage/bdb/rpc_server/java/gen/__db_re_delim_msg.java
index c386bddd256..c386bddd256 100644
--- a/bdb/rpc_server/java/gen/__db_re_delim_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_re_delim_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_re_delim_reply.java b/storage/bdb/rpc_server/java/gen/__db_re_delim_reply.java
index aa8a797f53d..aa8a797f53d 100644
--- a/bdb/rpc_server/java/gen/__db_re_delim_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_re_delim_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_re_len_msg.java b/storage/bdb/rpc_server/java/gen/__db_re_len_msg.java
index 664de5c899c..664de5c899c 100644
--- a/bdb/rpc_server/java/gen/__db_re_len_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_re_len_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_re_len_reply.java b/storage/bdb/rpc_server/java/gen/__db_re_len_reply.java
index dda27c8c123..dda27c8c123 100644
--- a/bdb/rpc_server/java/gen/__db_re_len_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_re_len_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_re_pad_msg.java b/storage/bdb/rpc_server/java/gen/__db_re_pad_msg.java
index 2c1290b6e74..2c1290b6e74 100644
--- a/bdb/rpc_server/java/gen/__db_re_pad_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_re_pad_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_re_pad_reply.java b/storage/bdb/rpc_server/java/gen/__db_re_pad_reply.java
index f0aaa9a3a70..f0aaa9a3a70 100644
--- a/bdb/rpc_server/java/gen/__db_re_pad_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_re_pad_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_remove_msg.java b/storage/bdb/rpc_server/java/gen/__db_remove_msg.java
index dfa9066a7ec..dfa9066a7ec 100644
--- a/bdb/rpc_server/java/gen/__db_remove_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_remove_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_remove_reply.java b/storage/bdb/rpc_server/java/gen/__db_remove_reply.java
index a2b86c04985..a2b86c04985 100644
--- a/bdb/rpc_server/java/gen/__db_remove_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_remove_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_rename_msg.java b/storage/bdb/rpc_server/java/gen/__db_rename_msg.java
index 12b434e3375..12b434e3375 100644
--- a/bdb/rpc_server/java/gen/__db_rename_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_rename_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_rename_reply.java b/storage/bdb/rpc_server/java/gen/__db_rename_reply.java
index 4e4a22be570..4e4a22be570 100644
--- a/bdb/rpc_server/java/gen/__db_rename_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_rename_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_stat_msg.java b/storage/bdb/rpc_server/java/gen/__db_stat_msg.java
index af536b5f707..af536b5f707 100644
--- a/bdb/rpc_server/java/gen/__db_stat_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_stat_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_stat_reply.java b/storage/bdb/rpc_server/java/gen/__db_stat_reply.java
index 8df1460149a..8df1460149a 100644
--- a/bdb/rpc_server/java/gen/__db_stat_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_stat_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_sync_msg.java b/storage/bdb/rpc_server/java/gen/__db_sync_msg.java
index c6594670fc6..c6594670fc6 100644
--- a/bdb/rpc_server/java/gen/__db_sync_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_sync_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_sync_reply.java b/storage/bdb/rpc_server/java/gen/__db_sync_reply.java
index d0a8bc8b196..d0a8bc8b196 100644
--- a/bdb/rpc_server/java/gen/__db_sync_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_sync_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_truncate_msg.java b/storage/bdb/rpc_server/java/gen/__db_truncate_msg.java
index 38810d65660..38810d65660 100644
--- a/bdb/rpc_server/java/gen/__db_truncate_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__db_truncate_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_truncate_reply.java b/storage/bdb/rpc_server/java/gen/__db_truncate_reply.java
index c4f68869007..c4f68869007 100644
--- a/bdb/rpc_server/java/gen/__db_truncate_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__db_truncate_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_close_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_close_msg.java
index eb1ca7f7e17..eb1ca7f7e17 100644
--- a/bdb/rpc_server/java/gen/__dbc_close_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_close_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_close_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_close_reply.java
index 47459aace36..47459aace36 100644
--- a/bdb/rpc_server/java/gen/__dbc_close_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_close_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_count_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_count_msg.java
index 5f554e18a1b..5f554e18a1b 100644
--- a/bdb/rpc_server/java/gen/__dbc_count_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_count_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_count_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_count_reply.java
index 4daecdd2296..4daecdd2296 100644
--- a/bdb/rpc_server/java/gen/__dbc_count_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_count_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_del_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_del_msg.java
index bc4bd05f573..bc4bd05f573 100644
--- a/bdb/rpc_server/java/gen/__dbc_del_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_del_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_del_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_del_reply.java
index e55ac9ffaf6..e55ac9ffaf6 100644
--- a/bdb/rpc_server/java/gen/__dbc_del_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_del_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_dup_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_dup_msg.java
index 9a3894e6158..9a3894e6158 100644
--- a/bdb/rpc_server/java/gen/__dbc_dup_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_dup_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_dup_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_dup_reply.java
index 6b942f1a61a..6b942f1a61a 100644
--- a/bdb/rpc_server/java/gen/__dbc_dup_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_dup_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_get_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_get_msg.java
index 672ace43fdd..672ace43fdd 100644
--- a/bdb/rpc_server/java/gen/__dbc_get_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_get_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_get_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_get_reply.java
index 8671fec6335..8671fec6335 100644
--- a/bdb/rpc_server/java/gen/__dbc_get_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_get_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_pget_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_pget_msg.java
index 8ca3c6171a1..8ca3c6171a1 100644
--- a/bdb/rpc_server/java/gen/__dbc_pget_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_pget_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_pget_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_pget_reply.java
index 16cc795878d..16cc795878d 100644
--- a/bdb/rpc_server/java/gen/__dbc_pget_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_pget_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_put_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_put_msg.java
index 98d12423dc5..98d12423dc5 100644
--- a/bdb/rpc_server/java/gen/__dbc_put_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_put_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_put_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_put_reply.java
index 385f9f783fb..385f9f783fb 100644
--- a/bdb/rpc_server/java/gen/__dbc_put_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__dbc_put_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_cachesize_msg.java b/storage/bdb/rpc_server/java/gen/__env_cachesize_msg.java
index d1fce1ffa35..d1fce1ffa35 100644
--- a/bdb/rpc_server/java/gen/__env_cachesize_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__env_cachesize_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_cachesize_reply.java b/storage/bdb/rpc_server/java/gen/__env_cachesize_reply.java
index 193f8355d71..193f8355d71 100644
--- a/bdb/rpc_server/java/gen/__env_cachesize_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__env_cachesize_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_close_msg.java b/storage/bdb/rpc_server/java/gen/__env_close_msg.java
index 5e657bacfa5..5e657bacfa5 100644
--- a/bdb/rpc_server/java/gen/__env_close_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__env_close_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_close_reply.java b/storage/bdb/rpc_server/java/gen/__env_close_reply.java
index 11e61f7c8c3..11e61f7c8c3 100644
--- a/bdb/rpc_server/java/gen/__env_close_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__env_close_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_create_msg.java b/storage/bdb/rpc_server/java/gen/__env_create_msg.java
index dbe546ae23a..dbe546ae23a 100644
--- a/bdb/rpc_server/java/gen/__env_create_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__env_create_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_create_reply.java b/storage/bdb/rpc_server/java/gen/__env_create_reply.java
index 5427fc4bc1e..5427fc4bc1e 100644
--- a/bdb/rpc_server/java/gen/__env_create_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__env_create_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_dbremove_msg.java b/storage/bdb/rpc_server/java/gen/__env_dbremove_msg.java
index 9730a92c590..9730a92c590 100644
--- a/bdb/rpc_server/java/gen/__env_dbremove_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__env_dbremove_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_dbremove_reply.java b/storage/bdb/rpc_server/java/gen/__env_dbremove_reply.java
index 75cc5a940cc..75cc5a940cc 100644
--- a/bdb/rpc_server/java/gen/__env_dbremove_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__env_dbremove_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_dbrename_msg.java b/storage/bdb/rpc_server/java/gen/__env_dbrename_msg.java
index 0bbda262b64..0bbda262b64 100644
--- a/bdb/rpc_server/java/gen/__env_dbrename_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__env_dbrename_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_dbrename_reply.java b/storage/bdb/rpc_server/java/gen/__env_dbrename_reply.java
index 0cc8882305d..0cc8882305d 100644
--- a/bdb/rpc_server/java/gen/__env_dbrename_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__env_dbrename_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_encrypt_msg.java b/storage/bdb/rpc_server/java/gen/__env_encrypt_msg.java
index 84e9a36d372..84e9a36d372 100644
--- a/bdb/rpc_server/java/gen/__env_encrypt_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__env_encrypt_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_encrypt_reply.java b/storage/bdb/rpc_server/java/gen/__env_encrypt_reply.java
index e202a3089d0..e202a3089d0 100644
--- a/bdb/rpc_server/java/gen/__env_encrypt_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__env_encrypt_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_flags_msg.java b/storage/bdb/rpc_server/java/gen/__env_flags_msg.java
index 25cd5f85f6d..25cd5f85f6d 100644
--- a/bdb/rpc_server/java/gen/__env_flags_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__env_flags_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_flags_reply.java b/storage/bdb/rpc_server/java/gen/__env_flags_reply.java
index d348a9224ea..d348a9224ea 100644
--- a/bdb/rpc_server/java/gen/__env_flags_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__env_flags_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_open_msg.java b/storage/bdb/rpc_server/java/gen/__env_open_msg.java
index e4649b41f9e..e4649b41f9e 100644
--- a/bdb/rpc_server/java/gen/__env_open_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__env_open_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_open_reply.java b/storage/bdb/rpc_server/java/gen/__env_open_reply.java
index 1994afb4cf2..1994afb4cf2 100644
--- a/bdb/rpc_server/java/gen/__env_open_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__env_open_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_remove_msg.java b/storage/bdb/rpc_server/java/gen/__env_remove_msg.java
index b32d758f0f5..b32d758f0f5 100644
--- a/bdb/rpc_server/java/gen/__env_remove_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__env_remove_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_remove_reply.java b/storage/bdb/rpc_server/java/gen/__env_remove_reply.java
index 19e4d52f662..19e4d52f662 100644
--- a/bdb/rpc_server/java/gen/__env_remove_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__env_remove_reply.java
diff --git a/bdb/rpc_server/java/gen/__txn_abort_msg.java b/storage/bdb/rpc_server/java/gen/__txn_abort_msg.java
index ff44c534e46..ff44c534e46 100644
--- a/bdb/rpc_server/java/gen/__txn_abort_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__txn_abort_msg.java
diff --git a/bdb/rpc_server/java/gen/__txn_abort_reply.java b/storage/bdb/rpc_server/java/gen/__txn_abort_reply.java
index 58f275c1a8f..58f275c1a8f 100644
--- a/bdb/rpc_server/java/gen/__txn_abort_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__txn_abort_reply.java
diff --git a/bdb/rpc_server/java/gen/__txn_begin_msg.java b/storage/bdb/rpc_server/java/gen/__txn_begin_msg.java
index 877031e8d3a..877031e8d3a 100644
--- a/bdb/rpc_server/java/gen/__txn_begin_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__txn_begin_msg.java
diff --git a/bdb/rpc_server/java/gen/__txn_begin_reply.java b/storage/bdb/rpc_server/java/gen/__txn_begin_reply.java
index 65a0c4016c2..65a0c4016c2 100644
--- a/bdb/rpc_server/java/gen/__txn_begin_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__txn_begin_reply.java
diff --git a/bdb/rpc_server/java/gen/__txn_commit_msg.java b/storage/bdb/rpc_server/java/gen/__txn_commit_msg.java
index 4b988d0c282..4b988d0c282 100644
--- a/bdb/rpc_server/java/gen/__txn_commit_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__txn_commit_msg.java
diff --git a/bdb/rpc_server/java/gen/__txn_commit_reply.java b/storage/bdb/rpc_server/java/gen/__txn_commit_reply.java
index b26937b82dd..b26937b82dd 100644
--- a/bdb/rpc_server/java/gen/__txn_commit_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__txn_commit_reply.java
diff --git a/bdb/rpc_server/java/gen/__txn_discard_msg.java b/storage/bdb/rpc_server/java/gen/__txn_discard_msg.java
index 87f5d4f77a7..87f5d4f77a7 100644
--- a/bdb/rpc_server/java/gen/__txn_discard_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__txn_discard_msg.java
diff --git a/bdb/rpc_server/java/gen/__txn_discard_reply.java b/storage/bdb/rpc_server/java/gen/__txn_discard_reply.java
index 9792211afcc..9792211afcc 100644
--- a/bdb/rpc_server/java/gen/__txn_discard_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__txn_discard_reply.java
diff --git a/bdb/rpc_server/java/gen/__txn_prepare_msg.java b/storage/bdb/rpc_server/java/gen/__txn_prepare_msg.java
index 6e09f2c7771..6e09f2c7771 100644
--- a/bdb/rpc_server/java/gen/__txn_prepare_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__txn_prepare_msg.java
diff --git a/bdb/rpc_server/java/gen/__txn_prepare_reply.java b/storage/bdb/rpc_server/java/gen/__txn_prepare_reply.java
index d7590117952..d7590117952 100644
--- a/bdb/rpc_server/java/gen/__txn_prepare_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__txn_prepare_reply.java
diff --git a/bdb/rpc_server/java/gen/__txn_recover_msg.java b/storage/bdb/rpc_server/java/gen/__txn_recover_msg.java
index 65153334403..65153334403 100644
--- a/bdb/rpc_server/java/gen/__txn_recover_msg.java
+++ b/storage/bdb/rpc_server/java/gen/__txn_recover_msg.java
diff --git a/bdb/rpc_server/java/gen/__txn_recover_reply.java b/storage/bdb/rpc_server/java/gen/__txn_recover_reply.java
index 0161ec949da..0161ec949da 100644
--- a/bdb/rpc_server/java/gen/__txn_recover_reply.java
+++ b/storage/bdb/rpc_server/java/gen/__txn_recover_reply.java
diff --git a/bdb/rpc_server/java/gen/db_server.java b/storage/bdb/rpc_server/java/gen/db_server.java
index a14a77028a2..a14a77028a2 100644
--- a/bdb/rpc_server/java/gen/db_server.java
+++ b/storage/bdb/rpc_server/java/gen/db_server.java
diff --git a/bdb/rpc_server/java/jrpcgen.jar b/storage/bdb/rpc_server/java/jrpcgen.jar
index 338825b848d..338825b848d 100644
--- a/bdb/rpc_server/java/jrpcgen.jar
+++ b/storage/bdb/rpc_server/java/jrpcgen.jar
Binary files differ
diff --git a/bdb/rpc_server/java/oncrpc.jar b/storage/bdb/rpc_server/java/oncrpc.jar
index e0f5cfa6966..e0f5cfa6966 100644
--- a/bdb/rpc_server/java/oncrpc.jar
+++ b/storage/bdb/rpc_server/java/oncrpc.jar
Binary files differ
diff --git a/bdb/rpc_server/java/s_jrpcgen b/storage/bdb/rpc_server/java/s_jrpcgen
index fed8cbf56bb..fed8cbf56bb 100644
--- a/bdb/rpc_server/java/s_jrpcgen
+++ b/storage/bdb/rpc_server/java/s_jrpcgen
diff --git a/bdb/rpc_server/rpc.src b/storage/bdb/rpc_server/rpc.src
index 7afee49b066..7afee49b066 100644
--- a/bdb/rpc_server/rpc.src
+++ b/storage/bdb/rpc_server/rpc.src
diff --git a/bdb/tcl/docs/db.html b/storage/bdb/tcl/docs/db.html
index 4f04c2c4f96..4f04c2c4f96 100644
--- a/bdb/tcl/docs/db.html
+++ b/storage/bdb/tcl/docs/db.html
diff --git a/bdb/tcl/docs/env.html b/storage/bdb/tcl/docs/env.html
index 79c349841ac..79c349841ac 100644
--- a/bdb/tcl/docs/env.html
+++ b/storage/bdb/tcl/docs/env.html
diff --git a/bdb/tcl/docs/historic.html b/storage/bdb/tcl/docs/historic.html
index 85f474fbc0f..85f474fbc0f 100644
--- a/bdb/tcl/docs/historic.html
+++ b/storage/bdb/tcl/docs/historic.html
diff --git a/bdb/tcl/docs/index.html b/storage/bdb/tcl/docs/index.html
index 845b6ca81e2..845b6ca81e2 100644
--- a/bdb/tcl/docs/index.html
+++ b/storage/bdb/tcl/docs/index.html
diff --git a/bdb/tcl/docs/library.html b/storage/bdb/tcl/docs/library.html
index bfb1588c3f2..bfb1588c3f2 100644
--- a/bdb/tcl/docs/library.html
+++ b/storage/bdb/tcl/docs/library.html
diff --git a/bdb/tcl/docs/lock.html b/storage/bdb/tcl/docs/lock.html
index d65142b798b..d65142b798b 100644
--- a/bdb/tcl/docs/lock.html
+++ b/storage/bdb/tcl/docs/lock.html
diff --git a/bdb/tcl/docs/log.html b/storage/bdb/tcl/docs/log.html
index 49f2f0ad2e0..49f2f0ad2e0 100644
--- a/bdb/tcl/docs/log.html
+++ b/storage/bdb/tcl/docs/log.html
diff --git a/bdb/tcl/docs/mpool.html b/storage/bdb/tcl/docs/mpool.html
index 7f2359b36e9..7f2359b36e9 100644
--- a/bdb/tcl/docs/mpool.html
+++ b/storage/bdb/tcl/docs/mpool.html
diff --git a/bdb/tcl/docs/rep.html b/storage/bdb/tcl/docs/rep.html
index 079fe443a63..079fe443a63 100644
--- a/bdb/tcl/docs/rep.html
+++ b/storage/bdb/tcl/docs/rep.html
diff --git a/bdb/tcl/docs/test.html b/storage/bdb/tcl/docs/test.html
index 603ae56a51e..603ae56a51e 100644
--- a/bdb/tcl/docs/test.html
+++ b/storage/bdb/tcl/docs/test.html
diff --git a/bdb/tcl/docs/txn.html b/storage/bdb/tcl/docs/txn.html
index 07c88c0fe1d..07c88c0fe1d 100644
--- a/bdb/tcl/docs/txn.html
+++ b/storage/bdb/tcl/docs/txn.html
diff --git a/bdb/tcl/tcl_compat.c b/storage/bdb/tcl/tcl_compat.c
index e77bc32aedf..e77bc32aedf 100644
--- a/bdb/tcl/tcl_compat.c
+++ b/storage/bdb/tcl/tcl_compat.c
diff --git a/bdb/tcl/tcl_db.c b/storage/bdb/tcl/tcl_db.c
index 7df2e48311c..7df2e48311c 100644
--- a/bdb/tcl/tcl_db.c
+++ b/storage/bdb/tcl/tcl_db.c
diff --git a/bdb/tcl/tcl_db_pkg.c b/storage/bdb/tcl/tcl_db_pkg.c
index ce37598dc1a..ce37598dc1a 100644
--- a/bdb/tcl/tcl_db_pkg.c
+++ b/storage/bdb/tcl/tcl_db_pkg.c
diff --git a/bdb/tcl/tcl_dbcursor.c b/storage/bdb/tcl/tcl_dbcursor.c
index fb426e53f48..fb426e53f48 100644
--- a/bdb/tcl/tcl_dbcursor.c
+++ b/storage/bdb/tcl/tcl_dbcursor.c
diff --git a/bdb/tcl/tcl_env.c b/storage/bdb/tcl/tcl_env.c
index cdf4890e9fc..cdf4890e9fc 100644
--- a/bdb/tcl/tcl_env.c
+++ b/storage/bdb/tcl/tcl_env.c
diff --git a/bdb/tcl/tcl_internal.c b/storage/bdb/tcl/tcl_internal.c
index 2d6ad4df444..2d6ad4df444 100644
--- a/bdb/tcl/tcl_internal.c
+++ b/storage/bdb/tcl/tcl_internal.c
diff --git a/bdb/tcl/tcl_lock.c b/storage/bdb/tcl/tcl_lock.c
index 6cb96dbb0da..6cb96dbb0da 100644
--- a/bdb/tcl/tcl_lock.c
+++ b/storage/bdb/tcl/tcl_lock.c
diff --git a/bdb/tcl/tcl_log.c b/storage/bdb/tcl/tcl_log.c
index be6eebfb013..be6eebfb013 100644
--- a/bdb/tcl/tcl_log.c
+++ b/storage/bdb/tcl/tcl_log.c
diff --git a/bdb/tcl/tcl_mp.c b/storage/bdb/tcl/tcl_mp.c
index 0c4411cb58a..0c4411cb58a 100644
--- a/bdb/tcl/tcl_mp.c
+++ b/storage/bdb/tcl/tcl_mp.c
diff --git a/bdb/tcl/tcl_rep.c b/storage/bdb/tcl/tcl_rep.c
index c72c9971338..c72c9971338 100644
--- a/bdb/tcl/tcl_rep.c
+++ b/storage/bdb/tcl/tcl_rep.c
diff --git a/bdb/tcl/tcl_txn.c b/storage/bdb/tcl/tcl_txn.c
index b5fab637943..b5fab637943 100644
--- a/bdb/tcl/tcl_txn.c
+++ b/storage/bdb/tcl/tcl_txn.c
diff --git a/bdb/tcl/tcl_util.c b/storage/bdb/tcl/tcl_util.c
index 3c0665f9e38..3c0665f9e38 100644
--- a/bdb/tcl/tcl_util.c
+++ b/storage/bdb/tcl/tcl_util.c
diff --git a/bdb/test/archive.tcl b/storage/bdb/test/archive.tcl
index 9b5e764b2b4..9b5e764b2b4 100644
--- a/bdb/test/archive.tcl
+++ b/storage/bdb/test/archive.tcl
diff --git a/bdb/test/bigfile001.tcl b/storage/bdb/test/bigfile001.tcl
index 78dcd940f5e..78dcd940f5e 100644
--- a/bdb/test/bigfile001.tcl
+++ b/storage/bdb/test/bigfile001.tcl
diff --git a/bdb/test/bigfile002.tcl b/storage/bdb/test/bigfile002.tcl
index f3e6defeaba..f3e6defeaba 100644
--- a/bdb/test/bigfile002.tcl
+++ b/storage/bdb/test/bigfile002.tcl
diff --git a/bdb/test/byteorder.tcl b/storage/bdb/test/byteorder.tcl
index 823ca46270d..823ca46270d 100644
--- a/bdb/test/byteorder.tcl
+++ b/storage/bdb/test/byteorder.tcl
diff --git a/bdb/test/conscript.tcl b/storage/bdb/test/conscript.tcl
index fd12c6e51a0..fd12c6e51a0 100644
--- a/bdb/test/conscript.tcl
+++ b/storage/bdb/test/conscript.tcl
diff --git a/bdb/test/dbm.tcl b/storage/bdb/test/dbm.tcl
index a392c7a9f3a..a392c7a9f3a 100644
--- a/bdb/test/dbm.tcl
+++ b/storage/bdb/test/dbm.tcl
diff --git a/bdb/test/dbscript.tcl b/storage/bdb/test/dbscript.tcl
index 5decc493e9e..5decc493e9e 100644
--- a/bdb/test/dbscript.tcl
+++ b/storage/bdb/test/dbscript.tcl
diff --git a/bdb/test/ddoyscript.tcl b/storage/bdb/test/ddoyscript.tcl
index 5478a1a98e0..5478a1a98e0 100644
--- a/bdb/test/ddoyscript.tcl
+++ b/storage/bdb/test/ddoyscript.tcl
diff --git a/bdb/test/ddscript.tcl b/storage/bdb/test/ddscript.tcl
index 621906233a9..621906233a9 100644
--- a/bdb/test/ddscript.tcl
+++ b/storage/bdb/test/ddscript.tcl
diff --git a/bdb/test/dead001.tcl b/storage/bdb/test/dead001.tcl
index e9853a87e53..e9853a87e53 100644
--- a/bdb/test/dead001.tcl
+++ b/storage/bdb/test/dead001.tcl
diff --git a/bdb/test/dead002.tcl b/storage/bdb/test/dead002.tcl
index bc19e7127e5..bc19e7127e5 100644
--- a/bdb/test/dead002.tcl
+++ b/storage/bdb/test/dead002.tcl
diff --git a/bdb/test/dead003.tcl b/storage/bdb/test/dead003.tcl
index 48088e1427c..48088e1427c 100644
--- a/bdb/test/dead003.tcl
+++ b/storage/bdb/test/dead003.tcl
diff --git a/bdb/test/dead004.tcl b/storage/bdb/test/dead004.tcl
index f5306a0d892..f5306a0d892 100644
--- a/bdb/test/dead004.tcl
+++ b/storage/bdb/test/dead004.tcl
diff --git a/bdb/test/dead005.tcl b/storage/bdb/test/dead005.tcl
index 71be8b1713f..71be8b1713f 100644
--- a/bdb/test/dead005.tcl
+++ b/storage/bdb/test/dead005.tcl
diff --git a/bdb/test/dead006.tcl b/storage/bdb/test/dead006.tcl
index b70e011fb74..b70e011fb74 100644
--- a/bdb/test/dead006.tcl
+++ b/storage/bdb/test/dead006.tcl
diff --git a/bdb/test/dead007.tcl b/storage/bdb/test/dead007.tcl
index 2b6a78cb4b9..2b6a78cb4b9 100644
--- a/bdb/test/dead007.tcl
+++ b/storage/bdb/test/dead007.tcl
diff --git a/bdb/test/env001.tcl b/storage/bdb/test/env001.tcl
index 781029f6a5c..781029f6a5c 100644
--- a/bdb/test/env001.tcl
+++ b/storage/bdb/test/env001.tcl
diff --git a/bdb/test/env002.tcl b/storage/bdb/test/env002.tcl
index 89c44f63a12..89c44f63a12 100644
--- a/bdb/test/env002.tcl
+++ b/storage/bdb/test/env002.tcl
diff --git a/bdb/test/env003.tcl b/storage/bdb/test/env003.tcl
index c16b54dd5e0..c16b54dd5e0 100644
--- a/bdb/test/env003.tcl
+++ b/storage/bdb/test/env003.tcl
diff --git a/bdb/test/env004.tcl b/storage/bdb/test/env004.tcl
index e93a0d95308..e93a0d95308 100644
--- a/bdb/test/env004.tcl
+++ b/storage/bdb/test/env004.tcl
diff --git a/bdb/test/env005.tcl b/storage/bdb/test/env005.tcl
index 03bb1b40b34..03bb1b40b34 100644
--- a/bdb/test/env005.tcl
+++ b/storage/bdb/test/env005.tcl
diff --git a/bdb/test/env006.tcl b/storage/bdb/test/env006.tcl
index 48fc6982772..48fc6982772 100644
--- a/bdb/test/env006.tcl
+++ b/storage/bdb/test/env006.tcl
diff --git a/bdb/test/env007.tcl b/storage/bdb/test/env007.tcl
index 5748d2dbc89..5748d2dbc89 100644
--- a/bdb/test/env007.tcl
+++ b/storage/bdb/test/env007.tcl
diff --git a/bdb/test/env008.tcl b/storage/bdb/test/env008.tcl
index dccdb41f612..dccdb41f612 100644
--- a/bdb/test/env008.tcl
+++ b/storage/bdb/test/env008.tcl
diff --git a/bdb/test/env009.tcl b/storage/bdb/test/env009.tcl
index 264d5e2dfec..264d5e2dfec 100644
--- a/bdb/test/env009.tcl
+++ b/storage/bdb/test/env009.tcl
diff --git a/bdb/test/env010.tcl b/storage/bdb/test/env010.tcl
index 4444e34e439..4444e34e439 100644
--- a/bdb/test/env010.tcl
+++ b/storage/bdb/test/env010.tcl
diff --git a/bdb/test/env011.tcl b/storage/bdb/test/env011.tcl
index 4061bb3fe51..4061bb3fe51 100644
--- a/bdb/test/env011.tcl
+++ b/storage/bdb/test/env011.tcl
diff --git a/bdb/test/hsearch.tcl b/storage/bdb/test/hsearch.tcl
index afeed93f74e..afeed93f74e 100644
--- a/bdb/test/hsearch.tcl
+++ b/storage/bdb/test/hsearch.tcl
diff --git a/bdb/test/join.tcl b/storage/bdb/test/join.tcl
index 87b0d1fae58..87b0d1fae58 100644
--- a/bdb/test/join.tcl
+++ b/storage/bdb/test/join.tcl
diff --git a/bdb/test/lock001.tcl b/storage/bdb/test/lock001.tcl
index 1afcc471fc1..1afcc471fc1 100644
--- a/bdb/test/lock001.tcl
+++ b/storage/bdb/test/lock001.tcl
diff --git a/bdb/test/lock002.tcl b/storage/bdb/test/lock002.tcl
index a1ad8760c9d..a1ad8760c9d 100644
--- a/bdb/test/lock002.tcl
+++ b/storage/bdb/test/lock002.tcl
diff --git a/bdb/test/lock003.tcl b/storage/bdb/test/lock003.tcl
index 91a8a2e90f6..91a8a2e90f6 100644
--- a/bdb/test/lock003.tcl
+++ b/storage/bdb/test/lock003.tcl
diff --git a/bdb/test/lock004.tcl b/storage/bdb/test/lock004.tcl
index 7fd51ee42f2..7fd51ee42f2 100644
--- a/bdb/test/lock004.tcl
+++ b/storage/bdb/test/lock004.tcl
diff --git a/bdb/test/lock005.tcl b/storage/bdb/test/lock005.tcl
index 5afe7344d36..5afe7344d36 100644
--- a/bdb/test/lock005.tcl
+++ b/storage/bdb/test/lock005.tcl
diff --git a/bdb/test/lockscript.tcl b/storage/bdb/test/lockscript.tcl
index 812339a4a70..812339a4a70 100644
--- a/bdb/test/lockscript.tcl
+++ b/storage/bdb/test/lockscript.tcl
diff --git a/bdb/test/log001.tcl b/storage/bdb/test/log001.tcl
index 87df780cb5a..87df780cb5a 100644
--- a/bdb/test/log001.tcl
+++ b/storage/bdb/test/log001.tcl
diff --git a/bdb/test/log002.tcl b/storage/bdb/test/log002.tcl
index 6e91f55398f..6e91f55398f 100644
--- a/bdb/test/log002.tcl
+++ b/storage/bdb/test/log002.tcl
diff --git a/bdb/test/log003.tcl b/storage/bdb/test/log003.tcl
index 11297b59d50..11297b59d50 100644
--- a/bdb/test/log003.tcl
+++ b/storage/bdb/test/log003.tcl
diff --git a/bdb/test/log004.tcl b/storage/bdb/test/log004.tcl
index 66968a8c1b4..66968a8c1b4 100644
--- a/bdb/test/log004.tcl
+++ b/storage/bdb/test/log004.tcl
diff --git a/bdb/test/log005.tcl b/storage/bdb/test/log005.tcl
index ab2ad703c55..ab2ad703c55 100644
--- a/bdb/test/log005.tcl
+++ b/storage/bdb/test/log005.tcl
diff --git a/bdb/test/logtrack.tcl b/storage/bdb/test/logtrack.tcl
index ad6b480b4e3..ad6b480b4e3 100644
--- a/bdb/test/logtrack.tcl
+++ b/storage/bdb/test/logtrack.tcl
diff --git a/bdb/test/mdbscript.tcl b/storage/bdb/test/mdbscript.tcl
index 9f3c971ee3c..9f3c971ee3c 100644
--- a/bdb/test/mdbscript.tcl
+++ b/storage/bdb/test/mdbscript.tcl
diff --git a/bdb/test/memp001.tcl b/storage/bdb/test/memp001.tcl
index c4bbf99b9b2..c4bbf99b9b2 100644
--- a/bdb/test/memp001.tcl
+++ b/storage/bdb/test/memp001.tcl
diff --git a/bdb/test/memp002.tcl b/storage/bdb/test/memp002.tcl
index d55f2987f06..d55f2987f06 100644
--- a/bdb/test/memp002.tcl
+++ b/storage/bdb/test/memp002.tcl
diff --git a/bdb/test/memp003.tcl b/storage/bdb/test/memp003.tcl
index 31eb55b757c..31eb55b757c 100644
--- a/bdb/test/memp003.tcl
+++ b/storage/bdb/test/memp003.tcl
diff --git a/bdb/test/mpoolscript.tcl b/storage/bdb/test/mpoolscript.tcl
index c13f70eb945..c13f70eb945 100644
--- a/bdb/test/mpoolscript.tcl
+++ b/storage/bdb/test/mpoolscript.tcl
diff --git a/bdb/test/mutex001.tcl b/storage/bdb/test/mutex001.tcl
index 93f858993a5..93f858993a5 100644
--- a/bdb/test/mutex001.tcl
+++ b/storage/bdb/test/mutex001.tcl
diff --git a/bdb/test/mutex002.tcl b/storage/bdb/test/mutex002.tcl
index 193e600fe8b..193e600fe8b 100644
--- a/bdb/test/mutex002.tcl
+++ b/storage/bdb/test/mutex002.tcl
diff --git a/bdb/test/mutex003.tcl b/storage/bdb/test/mutex003.tcl
index da35ac0d115..da35ac0d115 100644
--- a/bdb/test/mutex003.tcl
+++ b/storage/bdb/test/mutex003.tcl
diff --git a/bdb/test/mutexscript.tcl b/storage/bdb/test/mutexscript.tcl
index bc410f2716d..bc410f2716d 100644
--- a/bdb/test/mutexscript.tcl
+++ b/storage/bdb/test/mutexscript.tcl
diff --git a/bdb/test/ndbm.tcl b/storage/bdb/test/ndbm.tcl
index 0bf8e0cc87c..0bf8e0cc87c 100644
--- a/bdb/test/ndbm.tcl
+++ b/storage/bdb/test/ndbm.tcl
diff --git a/bdb/test/parallel.tcl b/storage/bdb/test/parallel.tcl
index 4e101c088cb..4e101c088cb 100644
--- a/bdb/test/parallel.tcl
+++ b/storage/bdb/test/parallel.tcl
diff --git a/bdb/test/recd001.tcl b/storage/bdb/test/recd001.tcl
index bc7ac6d896a..bc7ac6d896a 100644
--- a/bdb/test/recd001.tcl
+++ b/storage/bdb/test/recd001.tcl
diff --git a/bdb/test/recd002.tcl b/storage/bdb/test/recd002.tcl
index ed579291283..ed579291283 100644
--- a/bdb/test/recd002.tcl
+++ b/storage/bdb/test/recd002.tcl
diff --git a/bdb/test/recd003.tcl b/storage/bdb/test/recd003.tcl
index 0fd054832ce..0fd054832ce 100644
--- a/bdb/test/recd003.tcl
+++ b/storage/bdb/test/recd003.tcl
diff --git a/bdb/test/recd004.tcl b/storage/bdb/test/recd004.tcl
index 74504ac3cd7..74504ac3cd7 100644
--- a/bdb/test/recd004.tcl
+++ b/storage/bdb/test/recd004.tcl
diff --git a/bdb/test/recd005.tcl b/storage/bdb/test/recd005.tcl
index 7668c9e3be3..7668c9e3be3 100644
--- a/bdb/test/recd005.tcl
+++ b/storage/bdb/test/recd005.tcl
diff --git a/bdb/test/recd006.tcl b/storage/bdb/test/recd006.tcl
index fc35e755b08..fc35e755b08 100644
--- a/bdb/test/recd006.tcl
+++ b/storage/bdb/test/recd006.tcl
diff --git a/bdb/test/recd007.tcl b/storage/bdb/test/recd007.tcl
index aeac3bea2c1..aeac3bea2c1 100644
--- a/bdb/test/recd007.tcl
+++ b/storage/bdb/test/recd007.tcl
diff --git a/bdb/test/recd008.tcl b/storage/bdb/test/recd008.tcl
index 548813a403b..548813a403b 100644
--- a/bdb/test/recd008.tcl
+++ b/storage/bdb/test/recd008.tcl
diff --git a/bdb/test/recd009.tcl b/storage/bdb/test/recd009.tcl
index 5538d2d7652..5538d2d7652 100644
--- a/bdb/test/recd009.tcl
+++ b/storage/bdb/test/recd009.tcl
diff --git a/bdb/test/recd010.tcl b/storage/bdb/test/recd010.tcl
index 2549e03a2c0..2549e03a2c0 100644
--- a/bdb/test/recd010.tcl
+++ b/storage/bdb/test/recd010.tcl
diff --git a/bdb/test/recd011.tcl b/storage/bdb/test/recd011.tcl
index 74108a30650..74108a30650 100644
--- a/bdb/test/recd011.tcl
+++ b/storage/bdb/test/recd011.tcl
diff --git a/bdb/test/recd012.tcl b/storage/bdb/test/recd012.tcl
index 8231e648588..8231e648588 100644
--- a/bdb/test/recd012.tcl
+++ b/storage/bdb/test/recd012.tcl
diff --git a/bdb/test/recd013.tcl b/storage/bdb/test/recd013.tcl
index e08654f34e0..e08654f34e0 100644
--- a/bdb/test/recd013.tcl
+++ b/storage/bdb/test/recd013.tcl
diff --git a/bdb/test/recd014.tcl b/storage/bdb/test/recd014.tcl
index 6796341dca2..6796341dca2 100644
--- a/bdb/test/recd014.tcl
+++ b/storage/bdb/test/recd014.tcl
diff --git a/bdb/test/recd015.tcl b/storage/bdb/test/recd015.tcl
index 8c3ad612419..8c3ad612419 100644
--- a/bdb/test/recd015.tcl
+++ b/storage/bdb/test/recd015.tcl
diff --git a/bdb/test/recd016.tcl b/storage/bdb/test/recd016.tcl
index 504aca09617..504aca09617 100644
--- a/bdb/test/recd016.tcl
+++ b/storage/bdb/test/recd016.tcl
diff --git a/bdb/test/recd017.tcl b/storage/bdb/test/recd017.tcl
index 9f8208c1b3e..9f8208c1b3e 100644
--- a/bdb/test/recd017.tcl
+++ b/storage/bdb/test/recd017.tcl
diff --git a/bdb/test/recd018.tcl b/storage/bdb/test/recd018.tcl
index fb5a589d851..fb5a589d851 100644
--- a/bdb/test/recd018.tcl
+++ b/storage/bdb/test/recd018.tcl
diff --git a/bdb/test/recd019.tcl b/storage/bdb/test/recd019.tcl
index dd67b7dcb2a..dd67b7dcb2a 100644
--- a/bdb/test/recd019.tcl
+++ b/storage/bdb/test/recd019.tcl
diff --git a/bdb/test/recd020.tcl b/storage/bdb/test/recd020.tcl
index 93a89f32578..93a89f32578 100644
--- a/bdb/test/recd020.tcl
+++ b/storage/bdb/test/recd020.tcl
diff --git a/bdb/test/recd15scr.tcl b/storage/bdb/test/recd15scr.tcl
index e1238907a71..e1238907a71 100644
--- a/bdb/test/recd15scr.tcl
+++ b/storage/bdb/test/recd15scr.tcl
diff --git a/bdb/test/recdscript.tcl b/storage/bdb/test/recdscript.tcl
index a2afde46e4d..a2afde46e4d 100644
--- a/bdb/test/recdscript.tcl
+++ b/storage/bdb/test/recdscript.tcl
diff --git a/bdb/test/rep001.tcl b/storage/bdb/test/rep001.tcl
index 97a640029f5..97a640029f5 100644
--- a/bdb/test/rep001.tcl
+++ b/storage/bdb/test/rep001.tcl
diff --git a/bdb/test/rep002.tcl b/storage/bdb/test/rep002.tcl
index 68666b0d0f0..68666b0d0f0 100644
--- a/bdb/test/rep002.tcl
+++ b/storage/bdb/test/rep002.tcl
diff --git a/bdb/test/rep003.tcl b/storage/bdb/test/rep003.tcl
index 7bb7e00ddbf..7bb7e00ddbf 100644
--- a/bdb/test/rep003.tcl
+++ b/storage/bdb/test/rep003.tcl
diff --git a/bdb/test/rep004.tcl b/storage/bdb/test/rep004.tcl
index e1d4d3b65c7..e1d4d3b65c7 100644
--- a/bdb/test/rep004.tcl
+++ b/storage/bdb/test/rep004.tcl
diff --git a/bdb/test/rep005.tcl b/storage/bdb/test/rep005.tcl
index e0515f1cd62..e0515f1cd62 100644
--- a/bdb/test/rep005.tcl
+++ b/storage/bdb/test/rep005.tcl
diff --git a/bdb/test/reputils.tcl b/storage/bdb/test/reputils.tcl
index 340e359f26d..340e359f26d 100644
--- a/bdb/test/reputils.tcl
+++ b/storage/bdb/test/reputils.tcl
diff --git a/bdb/test/rpc001.tcl b/storage/bdb/test/rpc001.tcl
index 1b65639014f..1b65639014f 100644
--- a/bdb/test/rpc001.tcl
+++ b/storage/bdb/test/rpc001.tcl
diff --git a/bdb/test/rpc002.tcl b/storage/bdb/test/rpc002.tcl
index 4b69265bf3a..4b69265bf3a 100644
--- a/bdb/test/rpc002.tcl
+++ b/storage/bdb/test/rpc002.tcl
diff --git a/bdb/test/rpc003.tcl b/storage/bdb/test/rpc003.tcl
index 76f0dca6c07..76f0dca6c07 100644
--- a/bdb/test/rpc003.tcl
+++ b/storage/bdb/test/rpc003.tcl
diff --git a/bdb/test/rpc004.tcl b/storage/bdb/test/rpc004.tcl
index ca1462f3a89..ca1462f3a89 100644
--- a/bdb/test/rpc004.tcl
+++ b/storage/bdb/test/rpc004.tcl
diff --git a/bdb/test/rpc005.tcl b/storage/bdb/test/rpc005.tcl
index f46e7355e5a..f46e7355e5a 100644
--- a/bdb/test/rpc005.tcl
+++ b/storage/bdb/test/rpc005.tcl
diff --git a/bdb/test/rsrc001.tcl b/storage/bdb/test/rsrc001.tcl
index 1d57769fda2..1d57769fda2 100644
--- a/bdb/test/rsrc001.tcl
+++ b/storage/bdb/test/rsrc001.tcl
diff --git a/bdb/test/rsrc002.tcl b/storage/bdb/test/rsrc002.tcl
index 0cb3cf752e6..0cb3cf752e6 100644
--- a/bdb/test/rsrc002.tcl
+++ b/storage/bdb/test/rsrc002.tcl
diff --git a/bdb/test/rsrc003.tcl b/storage/bdb/test/rsrc003.tcl
index f357a1e7f80..f357a1e7f80 100644
--- a/bdb/test/rsrc003.tcl
+++ b/storage/bdb/test/rsrc003.tcl
diff --git a/bdb/test/rsrc004.tcl b/storage/bdb/test/rsrc004.tcl
index f6c2f997eb8..f6c2f997eb8 100644
--- a/bdb/test/rsrc004.tcl
+++ b/storage/bdb/test/rsrc004.tcl
diff --git a/bdb/test/scr001/chk.code b/storage/bdb/test/scr001/chk.code
index eb01d8614b3..eb01d8614b3 100644
--- a/bdb/test/scr001/chk.code
+++ b/storage/bdb/test/scr001/chk.code
diff --git a/bdb/test/scr002/chk.def b/storage/bdb/test/scr002/chk.def
index 7d5e6670f63..7d5e6670f63 100644
--- a/bdb/test/scr002/chk.def
+++ b/storage/bdb/test/scr002/chk.def
diff --git a/bdb/test/scr003/chk.define b/storage/bdb/test/scr003/chk.define
index f73355eddf6..f73355eddf6 100644
--- a/bdb/test/scr003/chk.define
+++ b/storage/bdb/test/scr003/chk.define
diff --git a/bdb/test/scr004/chk.javafiles b/storage/bdb/test/scr004/chk.javafiles
index d30c5e3e779..d30c5e3e779 100644
--- a/bdb/test/scr004/chk.javafiles
+++ b/storage/bdb/test/scr004/chk.javafiles
diff --git a/bdb/test/scr005/chk.nl b/storage/bdb/test/scr005/chk.nl
index 47c7ff74d4b..47c7ff74d4b 100644
--- a/bdb/test/scr005/chk.nl
+++ b/storage/bdb/test/scr005/chk.nl
diff --git a/bdb/test/scr006/chk.offt b/storage/bdb/test/scr006/chk.offt
index 6800268d2a2..6800268d2a2 100644
--- a/bdb/test/scr006/chk.offt
+++ b/storage/bdb/test/scr006/chk.offt
diff --git a/bdb/test/scr007/chk.proto b/storage/bdb/test/scr007/chk.proto
index ae406fa23fe..ae406fa23fe 100644
--- a/bdb/test/scr007/chk.proto
+++ b/storage/bdb/test/scr007/chk.proto
diff --git a/bdb/test/scr008/chk.pubdef b/storage/bdb/test/scr008/chk.pubdef
index 4f59e831b25..4f59e831b25 100644
--- a/bdb/test/scr008/chk.pubdef
+++ b/storage/bdb/test/scr008/chk.pubdef
diff --git a/bdb/test/scr009/chk.srcfiles b/storage/bdb/test/scr009/chk.srcfiles
index 4f09a2890f6..4f09a2890f6 100644
--- a/bdb/test/scr009/chk.srcfiles
+++ b/storage/bdb/test/scr009/chk.srcfiles
diff --git a/bdb/test/scr010/chk.str b/storage/bdb/test/scr010/chk.str
index 2b5698c0ff2..2b5698c0ff2 100644
--- a/bdb/test/scr010/chk.str
+++ b/storage/bdb/test/scr010/chk.str
diff --git a/bdb/test/scr010/spell.ok b/storage/bdb/test/scr010/spell.ok
index 18af8d1306d..18af8d1306d 100644
--- a/bdb/test/scr010/spell.ok
+++ b/storage/bdb/test/scr010/spell.ok
diff --git a/bdb/test/scr011/chk.tags b/storage/bdb/test/scr011/chk.tags
index 14a3c4e011d..14a3c4e011d 100644
--- a/bdb/test/scr011/chk.tags
+++ b/storage/bdb/test/scr011/chk.tags
diff --git a/bdb/test/scr012/chk.vx_code b/storage/bdb/test/scr012/chk.vx_code
index 8d7ca608f93..8d7ca608f93 100644
--- a/bdb/test/scr012/chk.vx_code
+++ b/storage/bdb/test/scr012/chk.vx_code
diff --git a/bdb/test/scr013/chk.stats b/storage/bdb/test/scr013/chk.stats
index 3a404699668..3a404699668 100644
--- a/bdb/test/scr013/chk.stats
+++ b/storage/bdb/test/scr013/chk.stats
diff --git a/bdb/test/scr014/chk.err b/storage/bdb/test/scr014/chk.err
index 72b4a62719f..72b4a62719f 100644
--- a/bdb/test/scr014/chk.err
+++ b/storage/bdb/test/scr014/chk.err
diff --git a/bdb/test/scr015/README b/storage/bdb/test/scr015/README
index 75a356eea06..75a356eea06 100644
--- a/bdb/test/scr015/README
+++ b/storage/bdb/test/scr015/README
diff --git a/bdb/test/scr015/TestConstruct01.cpp b/storage/bdb/test/scr015/TestConstruct01.cpp
index 7ae328d458c..7ae328d458c 100644
--- a/bdb/test/scr015/TestConstruct01.cpp
+++ b/storage/bdb/test/scr015/TestConstruct01.cpp
diff --git a/bdb/test/scr015/TestConstruct01.testerr b/storage/bdb/test/scr015/TestConstruct01.testerr
index 1ba627d103b..1ba627d103b 100644
--- a/bdb/test/scr015/TestConstruct01.testerr
+++ b/storage/bdb/test/scr015/TestConstruct01.testerr
diff --git a/bdb/test/scr015/TestConstruct01.testout b/storage/bdb/test/scr015/TestConstruct01.testout
index 9b840f9fcf4..9b840f9fcf4 100644
--- a/bdb/test/scr015/TestConstruct01.testout
+++ b/storage/bdb/test/scr015/TestConstruct01.testout
diff --git a/bdb/test/scr015/TestExceptInclude.cpp b/storage/bdb/test/scr015/TestExceptInclude.cpp
index 28bc498222f..28bc498222f 100644
--- a/bdb/test/scr015/TestExceptInclude.cpp
+++ b/storage/bdb/test/scr015/TestExceptInclude.cpp
diff --git a/bdb/test/scr015/TestGetSetMethods.cpp b/storage/bdb/test/scr015/TestGetSetMethods.cpp
index 81ef914eac3..81ef914eac3 100644
--- a/bdb/test/scr015/TestGetSetMethods.cpp
+++ b/storage/bdb/test/scr015/TestGetSetMethods.cpp
diff --git a/bdb/test/scr015/TestKeyRange.cpp b/storage/bdb/test/scr015/TestKeyRange.cpp
index 980d2f518e0..980d2f518e0 100644
--- a/bdb/test/scr015/TestKeyRange.cpp
+++ b/storage/bdb/test/scr015/TestKeyRange.cpp
diff --git a/bdb/test/scr015/TestKeyRange.testin b/storage/bdb/test/scr015/TestKeyRange.testin
index a2b6bd74e7b..a2b6bd74e7b 100644
--- a/bdb/test/scr015/TestKeyRange.testin
+++ b/storage/bdb/test/scr015/TestKeyRange.testin
diff --git a/bdb/test/scr015/TestKeyRange.testout b/storage/bdb/test/scr015/TestKeyRange.testout
index 25b2e1a835c..25b2e1a835c 100644
--- a/bdb/test/scr015/TestKeyRange.testout
+++ b/storage/bdb/test/scr015/TestKeyRange.testout
diff --git a/bdb/test/scr015/TestLogc.cpp b/storage/bdb/test/scr015/TestLogc.cpp
index 94fcfa0b3ec..94fcfa0b3ec 100644
--- a/bdb/test/scr015/TestLogc.cpp
+++ b/storage/bdb/test/scr015/TestLogc.cpp
diff --git a/bdb/test/scr015/TestLogc.testout b/storage/bdb/test/scr015/TestLogc.testout
index afac3af7eda..afac3af7eda 100644
--- a/bdb/test/scr015/TestLogc.testout
+++ b/storage/bdb/test/scr015/TestLogc.testout
diff --git a/bdb/test/scr015/TestSimpleAccess.cpp b/storage/bdb/test/scr015/TestSimpleAccess.cpp
index 2450b9b3030..2450b9b3030 100644
--- a/bdb/test/scr015/TestSimpleAccess.cpp
+++ b/storage/bdb/test/scr015/TestSimpleAccess.cpp
diff --git a/bdb/test/scr015/TestSimpleAccess.testout b/storage/bdb/test/scr015/TestSimpleAccess.testout
index dc88d4788e4..dc88d4788e4 100644
--- a/bdb/test/scr015/TestSimpleAccess.testout
+++ b/storage/bdb/test/scr015/TestSimpleAccess.testout
diff --git a/bdb/test/scr015/TestTruncate.cpp b/storage/bdb/test/scr015/TestTruncate.cpp
index d5c0dc6de29..d5c0dc6de29 100644
--- a/bdb/test/scr015/TestTruncate.cpp
+++ b/storage/bdb/test/scr015/TestTruncate.cpp
diff --git a/bdb/test/scr015/TestTruncate.testout b/storage/bdb/test/scr015/TestTruncate.testout
index 0a4bc98165d..0a4bc98165d 100644
--- a/bdb/test/scr015/TestTruncate.testout
+++ b/storage/bdb/test/scr015/TestTruncate.testout
diff --git a/bdb/test/scr015/chk.cxxtests b/storage/bdb/test/scr015/chk.cxxtests
index 5c21e27208c..5c21e27208c 100644
--- a/bdb/test/scr015/chk.cxxtests
+++ b/storage/bdb/test/scr015/chk.cxxtests
diff --git a/bdb/test/scr015/ignore b/storage/bdb/test/scr015/ignore
index 55ce82ae372..55ce82ae372 100644
--- a/bdb/test/scr015/ignore
+++ b/storage/bdb/test/scr015/ignore
diff --git a/bdb/test/scr015/testall b/storage/bdb/test/scr015/testall
index a2d493a8b22..a2d493a8b22 100644
--- a/bdb/test/scr015/testall
+++ b/storage/bdb/test/scr015/testall
diff --git a/bdb/test/scr015/testone b/storage/bdb/test/scr015/testone
index 3bbba3f90f0..3bbba3f90f0 100644
--- a/bdb/test/scr015/testone
+++ b/storage/bdb/test/scr015/testone
diff --git a/bdb/test/scr016/CallbackTest.java b/storage/bdb/test/scr016/CallbackTest.java
index eede964a027..eede964a027 100644
--- a/bdb/test/scr016/CallbackTest.java
+++ b/storage/bdb/test/scr016/CallbackTest.java
diff --git a/bdb/test/scr016/CallbackTest.testout b/storage/bdb/test/scr016/CallbackTest.testout
index 68797d4a2de..68797d4a2de 100644
--- a/bdb/test/scr016/CallbackTest.testout
+++ b/storage/bdb/test/scr016/CallbackTest.testout
diff --git a/bdb/test/scr016/README b/storage/bdb/test/scr016/README
index 226a8aa3b77..226a8aa3b77 100644
--- a/bdb/test/scr016/README
+++ b/storage/bdb/test/scr016/README
diff --git a/bdb/test/scr016/TestAppendRecno.java b/storage/bdb/test/scr016/TestAppendRecno.java
index f4ea70ca084..f4ea70ca084 100644
--- a/bdb/test/scr016/TestAppendRecno.java
+++ b/storage/bdb/test/scr016/TestAppendRecno.java
diff --git a/bdb/test/scr016/TestAppendRecno.testout b/storage/bdb/test/scr016/TestAppendRecno.testout
index 970174e7a96..970174e7a96 100644
--- a/bdb/test/scr016/TestAppendRecno.testout
+++ b/storage/bdb/test/scr016/TestAppendRecno.testout
diff --git a/bdb/test/scr016/TestAssociate.java b/storage/bdb/test/scr016/TestAssociate.java
index 4105b9cb0a1..4105b9cb0a1 100644
--- a/bdb/test/scr016/TestAssociate.java
+++ b/storage/bdb/test/scr016/TestAssociate.java
diff --git a/bdb/test/scr016/TestAssociate.testout b/storage/bdb/test/scr016/TestAssociate.testout
index 34414b660d1..34414b660d1 100644
--- a/bdb/test/scr016/TestAssociate.testout
+++ b/storage/bdb/test/scr016/TestAssociate.testout
diff --git a/bdb/test/scr016/TestClosedDb.java b/storage/bdb/test/scr016/TestClosedDb.java
index 3bd6e5380f8..3bd6e5380f8 100644
--- a/bdb/test/scr016/TestClosedDb.java
+++ b/storage/bdb/test/scr016/TestClosedDb.java
diff --git a/bdb/test/scr016/TestClosedDb.testout b/storage/bdb/test/scr016/TestClosedDb.testout
index ce13883f63a..ce13883f63a 100644
--- a/bdb/test/scr016/TestClosedDb.testout
+++ b/storage/bdb/test/scr016/TestClosedDb.testout
diff --git a/bdb/test/scr016/TestConstruct01.java b/storage/bdb/test/scr016/TestConstruct01.java
index b60073ebc0d..b60073ebc0d 100644
--- a/bdb/test/scr016/TestConstruct01.java
+++ b/storage/bdb/test/scr016/TestConstruct01.java
diff --git a/bdb/test/scr016/TestConstruct01.testerr b/storage/bdb/test/scr016/TestConstruct01.testerr
index e69de29bb2d..e69de29bb2d 100644
--- a/bdb/test/scr016/TestConstruct01.testerr
+++ b/storage/bdb/test/scr016/TestConstruct01.testerr
diff --git a/bdb/test/scr016/TestConstruct01.testout b/storage/bdb/test/scr016/TestConstruct01.testout
index 5d2041cd197..5d2041cd197 100644
--- a/bdb/test/scr016/TestConstruct01.testout
+++ b/storage/bdb/test/scr016/TestConstruct01.testout
diff --git a/bdb/test/scr016/TestConstruct02.java b/storage/bdb/test/scr016/TestConstruct02.java
index 5bbb55ccd56..5bbb55ccd56 100644
--- a/bdb/test/scr016/TestConstruct02.java
+++ b/storage/bdb/test/scr016/TestConstruct02.java
diff --git a/bdb/test/scr016/TestConstruct02.testout b/storage/bdb/test/scr016/TestConstruct02.testout
index 5d2041cd197..5d2041cd197 100644
--- a/bdb/test/scr016/TestConstruct02.testout
+++ b/storage/bdb/test/scr016/TestConstruct02.testout
diff --git a/bdb/test/scr016/TestDbtFlags.java b/storage/bdb/test/scr016/TestDbtFlags.java
index 98527e6b3e7..98527e6b3e7 100644
--- a/bdb/test/scr016/TestDbtFlags.java
+++ b/storage/bdb/test/scr016/TestDbtFlags.java
diff --git a/bdb/test/scr016/TestDbtFlags.testerr b/storage/bdb/test/scr016/TestDbtFlags.testerr
index 7666868ebd4..7666868ebd4 100644
--- a/bdb/test/scr016/TestDbtFlags.testerr
+++ b/storage/bdb/test/scr016/TestDbtFlags.testerr
diff --git a/bdb/test/scr016/TestDbtFlags.testout b/storage/bdb/test/scr016/TestDbtFlags.testout
index b8deb1bcc16..b8deb1bcc16 100644
--- a/bdb/test/scr016/TestDbtFlags.testout
+++ b/storage/bdb/test/scr016/TestDbtFlags.testout
diff --git a/bdb/test/scr016/TestGetSetMethods.java b/storage/bdb/test/scr016/TestGetSetMethods.java
index a1b2722d8fd..a1b2722d8fd 100644
--- a/bdb/test/scr016/TestGetSetMethods.java
+++ b/storage/bdb/test/scr016/TestGetSetMethods.java
diff --git a/bdb/test/scr016/TestKeyRange.java b/storage/bdb/test/scr016/TestKeyRange.java
index 8eda2de426f..8eda2de426f 100644
--- a/bdb/test/scr016/TestKeyRange.java
+++ b/storage/bdb/test/scr016/TestKeyRange.java
diff --git a/bdb/test/scr016/TestKeyRange.testout b/storage/bdb/test/scr016/TestKeyRange.testout
index c265f3289fb..c265f3289fb 100644
--- a/bdb/test/scr016/TestKeyRange.testout
+++ b/storage/bdb/test/scr016/TestKeyRange.testout
diff --git a/bdb/test/scr016/TestLockVec.java b/storage/bdb/test/scr016/TestLockVec.java
index ad48e9f2f9a..ad48e9f2f9a 100644
--- a/bdb/test/scr016/TestLockVec.java
+++ b/storage/bdb/test/scr016/TestLockVec.java
diff --git a/bdb/test/scr016/TestLockVec.testout b/storage/bdb/test/scr016/TestLockVec.testout
index 1cf16c6ac4e..1cf16c6ac4e 100644
--- a/bdb/test/scr016/TestLockVec.testout
+++ b/storage/bdb/test/scr016/TestLockVec.testout
diff --git a/bdb/test/scr016/TestLogc.java b/storage/bdb/test/scr016/TestLogc.java
index ec9c373a93b..ec9c373a93b 100644
--- a/bdb/test/scr016/TestLogc.java
+++ b/storage/bdb/test/scr016/TestLogc.java
diff --git a/bdb/test/scr016/TestLogc.testout b/storage/bdb/test/scr016/TestLogc.testout
index afac3af7eda..afac3af7eda 100644
--- a/bdb/test/scr016/TestLogc.testout
+++ b/storage/bdb/test/scr016/TestLogc.testout
diff --git a/bdb/test/scr016/TestOpenEmpty.java b/storage/bdb/test/scr016/TestOpenEmpty.java
index ae92fd363d9..ae92fd363d9 100644
--- a/bdb/test/scr016/TestOpenEmpty.java
+++ b/storage/bdb/test/scr016/TestOpenEmpty.java
diff --git a/bdb/test/scr016/TestOpenEmpty.testerr b/storage/bdb/test/scr016/TestOpenEmpty.testerr
index dd3e01c7ab7..dd3e01c7ab7 100644
--- a/bdb/test/scr016/TestOpenEmpty.testerr
+++ b/storage/bdb/test/scr016/TestOpenEmpty.testerr
diff --git a/bdb/test/scr016/TestReplication.java b/storage/bdb/test/scr016/TestReplication.java
index 87cb683d60f..87cb683d60f 100644
--- a/bdb/test/scr016/TestReplication.java
+++ b/storage/bdb/test/scr016/TestReplication.java
diff --git a/bdb/test/scr016/TestRpcServer.java b/storage/bdb/test/scr016/TestRpcServer.java
index ef325cef075..ef325cef075 100644
--- a/bdb/test/scr016/TestRpcServer.java
+++ b/storage/bdb/test/scr016/TestRpcServer.java
diff --git a/bdb/test/scr016/TestSameDbt.java b/storage/bdb/test/scr016/TestSameDbt.java
index 9866ed49307..9866ed49307 100644
--- a/bdb/test/scr016/TestSameDbt.java
+++ b/storage/bdb/test/scr016/TestSameDbt.java
diff --git a/bdb/test/scr016/TestSameDbt.testout b/storage/bdb/test/scr016/TestSameDbt.testout
index be4bbbe59e9..be4bbbe59e9 100644
--- a/bdb/test/scr016/TestSameDbt.testout
+++ b/storage/bdb/test/scr016/TestSameDbt.testout
diff --git a/bdb/test/scr016/TestSimpleAccess.java b/storage/bdb/test/scr016/TestSimpleAccess.java
index ba7390cada1..ba7390cada1 100644
--- a/bdb/test/scr016/TestSimpleAccess.java
+++ b/storage/bdb/test/scr016/TestSimpleAccess.java
diff --git a/bdb/test/scr016/TestSimpleAccess.testout b/storage/bdb/test/scr016/TestSimpleAccess.testout
index dc88d4788e4..dc88d4788e4 100644
--- a/bdb/test/scr016/TestSimpleAccess.testout
+++ b/storage/bdb/test/scr016/TestSimpleAccess.testout
diff --git a/bdb/test/scr016/TestStat.java b/storage/bdb/test/scr016/TestStat.java
index 55ba9823115..55ba9823115 100644
--- a/bdb/test/scr016/TestStat.java
+++ b/storage/bdb/test/scr016/TestStat.java
diff --git a/bdb/test/scr016/TestStat.testout b/storage/bdb/test/scr016/TestStat.testout
index caf9db1fb13..caf9db1fb13 100644
--- a/bdb/test/scr016/TestStat.testout
+++ b/storage/bdb/test/scr016/TestStat.testout
diff --git a/bdb/test/scr016/TestTruncate.java b/storage/bdb/test/scr016/TestTruncate.java
index 71377236246..71377236246 100644
--- a/bdb/test/scr016/TestTruncate.java
+++ b/storage/bdb/test/scr016/TestTruncate.java
diff --git a/bdb/test/scr016/TestTruncate.testout b/storage/bdb/test/scr016/TestTruncate.testout
index 23f291df754..23f291df754 100644
--- a/bdb/test/scr016/TestTruncate.testout
+++ b/storage/bdb/test/scr016/TestTruncate.testout
diff --git a/bdb/test/scr016/TestUtil.java b/storage/bdb/test/scr016/TestUtil.java
index 1bddfb0b014..1bddfb0b014 100644
--- a/bdb/test/scr016/TestUtil.java
+++ b/storage/bdb/test/scr016/TestUtil.java
diff --git a/bdb/test/scr016/TestXAServlet.java b/storage/bdb/test/scr016/TestXAServlet.java
index 8b9fe57e261..8b9fe57e261 100644
--- a/bdb/test/scr016/TestXAServlet.java
+++ b/storage/bdb/test/scr016/TestXAServlet.java
diff --git a/bdb/test/scr016/chk.javatests b/storage/bdb/test/scr016/chk.javatests
index 34d7dfe78d7..34d7dfe78d7 100644
--- a/bdb/test/scr016/chk.javatests
+++ b/storage/bdb/test/scr016/chk.javatests
diff --git a/bdb/test/scr016/ignore b/storage/bdb/test/scr016/ignore
index 1dfaf6adea4..1dfaf6adea4 100644
--- a/bdb/test/scr016/ignore
+++ b/storage/bdb/test/scr016/ignore
diff --git a/bdb/test/scr016/testall b/storage/bdb/test/scr016/testall
index a4e1b5a8c70..a4e1b5a8c70 100644
--- a/bdb/test/scr016/testall
+++ b/storage/bdb/test/scr016/testall
diff --git a/bdb/test/scr016/testone b/storage/bdb/test/scr016/testone
index 5f5d2e0017d..5f5d2e0017d 100644
--- a/bdb/test/scr016/testone
+++ b/storage/bdb/test/scr016/testone
diff --git a/bdb/test/scr017/O.BH b/storage/bdb/test/scr017/O.BH
index cd499d38779..cd499d38779 100644
--- a/bdb/test/scr017/O.BH
+++ b/storage/bdb/test/scr017/O.BH
diff --git a/bdb/test/scr017/O.R b/storage/bdb/test/scr017/O.R
index d78a04727d8..d78a04727d8 100644
--- a/bdb/test/scr017/O.R
+++ b/storage/bdb/test/scr017/O.R
diff --git a/bdb/test/scr017/chk.db185 b/storage/bdb/test/scr017/chk.db185
index c2a07c51d26..c2a07c51d26 100644
--- a/bdb/test/scr017/chk.db185
+++ b/storage/bdb/test/scr017/chk.db185
diff --git a/bdb/test/scr017/t.c b/storage/bdb/test/scr017/t.c
index f03b33880d6..f03b33880d6 100644
--- a/bdb/test/scr017/t.c
+++ b/storage/bdb/test/scr017/t.c
diff --git a/bdb/test/scr018/chk.comma b/storage/bdb/test/scr018/chk.comma
index 42df48d1881..42df48d1881 100644
--- a/bdb/test/scr018/chk.comma
+++ b/storage/bdb/test/scr018/chk.comma
diff --git a/bdb/test/scr018/t.c b/storage/bdb/test/scr018/t.c
index 4056a605928..4056a605928 100644
--- a/bdb/test/scr018/t.c
+++ b/storage/bdb/test/scr018/t.c
diff --git a/bdb/test/scr019/chk.include b/storage/bdb/test/scr019/chk.include
index 444217bedb4..444217bedb4 100644
--- a/bdb/test/scr019/chk.include
+++ b/storage/bdb/test/scr019/chk.include
diff --git a/bdb/test/scr020/chk.inc b/storage/bdb/test/scr020/chk.inc
index 189126b10c3..189126b10c3 100644
--- a/bdb/test/scr020/chk.inc
+++ b/storage/bdb/test/scr020/chk.inc
diff --git a/bdb/test/scr021/chk.flags b/storage/bdb/test/scr021/chk.flags
index 1b2bb62cca7..1b2bb62cca7 100644
--- a/bdb/test/scr021/chk.flags
+++ b/storage/bdb/test/scr021/chk.flags
diff --git a/bdb/test/scr022/chk.rr b/storage/bdb/test/scr022/chk.rr
index df230315299..df230315299 100644
--- a/bdb/test/scr022/chk.rr
+++ b/storage/bdb/test/scr022/chk.rr
diff --git a/bdb/test/sdb001.tcl b/storage/bdb/test/sdb001.tcl
index a03160e0ab7..a03160e0ab7 100644
--- a/bdb/test/sdb001.tcl
+++ b/storage/bdb/test/sdb001.tcl
diff --git a/bdb/test/sdb002.tcl b/storage/bdb/test/sdb002.tcl
index 4757e12afc7..4757e12afc7 100644
--- a/bdb/test/sdb002.tcl
+++ b/storage/bdb/test/sdb002.tcl
diff --git a/bdb/test/sdb003.tcl b/storage/bdb/test/sdb003.tcl
index 5d1536d8c84..5d1536d8c84 100644
--- a/bdb/test/sdb003.tcl
+++ b/storage/bdb/test/sdb003.tcl
diff --git a/bdb/test/sdb004.tcl b/storage/bdb/test/sdb004.tcl
index d3d95f1fde0..d3d95f1fde0 100644
--- a/bdb/test/sdb004.tcl
+++ b/storage/bdb/test/sdb004.tcl
diff --git a/bdb/test/sdb005.tcl b/storage/bdb/test/sdb005.tcl
index 98cea5b348b..98cea5b348b 100644
--- a/bdb/test/sdb005.tcl
+++ b/storage/bdb/test/sdb005.tcl
diff --git a/bdb/test/sdb006.tcl b/storage/bdb/test/sdb006.tcl
index fd6066b08d6..fd6066b08d6 100644
--- a/bdb/test/sdb006.tcl
+++ b/storage/bdb/test/sdb006.tcl
diff --git a/bdb/test/sdb007.tcl b/storage/bdb/test/sdb007.tcl
index 0f9488a92a1..0f9488a92a1 100644
--- a/bdb/test/sdb007.tcl
+++ b/storage/bdb/test/sdb007.tcl
diff --git a/bdb/test/sdb008.tcl b/storage/bdb/test/sdb008.tcl
index 1c46aed2087..1c46aed2087 100644
--- a/bdb/test/sdb008.tcl
+++ b/storage/bdb/test/sdb008.tcl
diff --git a/bdb/test/sdb009.tcl b/storage/bdb/test/sdb009.tcl
index 4e4869643ef..4e4869643ef 100644
--- a/bdb/test/sdb009.tcl
+++ b/storage/bdb/test/sdb009.tcl
diff --git a/bdb/test/sdb010.tcl b/storage/bdb/test/sdb010.tcl
index 51f25976c56..51f25976c56 100644
--- a/bdb/test/sdb010.tcl
+++ b/storage/bdb/test/sdb010.tcl
diff --git a/bdb/test/sdb011.tcl b/storage/bdb/test/sdb011.tcl
index 862e32f73ed..862e32f73ed 100644
--- a/bdb/test/sdb011.tcl
+++ b/storage/bdb/test/sdb011.tcl
diff --git a/bdb/test/sdb012.tcl b/storage/bdb/test/sdb012.tcl
index 9c05d977daf..9c05d977daf 100644
--- a/bdb/test/sdb012.tcl
+++ b/storage/bdb/test/sdb012.tcl
diff --git a/bdb/test/sdbscript.tcl b/storage/bdb/test/sdbscript.tcl
index d1978ccb048..d1978ccb048 100644
--- a/bdb/test/sdbscript.tcl
+++ b/storage/bdb/test/sdbscript.tcl
diff --git a/bdb/test/sdbtest001.tcl b/storage/bdb/test/sdbtest001.tcl
index b8b4508c2a4..b8b4508c2a4 100644
--- a/bdb/test/sdbtest001.tcl
+++ b/storage/bdb/test/sdbtest001.tcl
diff --git a/bdb/test/sdbtest002.tcl b/storage/bdb/test/sdbtest002.tcl
index 95717413a7b..95717413a7b 100644
--- a/bdb/test/sdbtest002.tcl
+++ b/storage/bdb/test/sdbtest002.tcl
diff --git a/bdb/test/sdbutils.tcl b/storage/bdb/test/sdbutils.tcl
index 3221a422e18..3221a422e18 100644
--- a/bdb/test/sdbutils.tcl
+++ b/storage/bdb/test/sdbutils.tcl
diff --git a/bdb/test/sec001.tcl b/storage/bdb/test/sec001.tcl
index eb4bcc24dd2..eb4bcc24dd2 100644
--- a/bdb/test/sec001.tcl
+++ b/storage/bdb/test/sec001.tcl
diff --git a/bdb/test/sec002.tcl b/storage/bdb/test/sec002.tcl
index d790162f1d7..d790162f1d7 100644
--- a/bdb/test/sec002.tcl
+++ b/storage/bdb/test/sec002.tcl
diff --git a/bdb/test/shelltest.tcl b/storage/bdb/test/shelltest.tcl
index 6190bac1f8d..6190bac1f8d 100644
--- a/bdb/test/shelltest.tcl
+++ b/storage/bdb/test/shelltest.tcl
diff --git a/bdb/test/si001.tcl b/storage/bdb/test/si001.tcl
index 1a2247c5f8b..1a2247c5f8b 100644
--- a/bdb/test/si001.tcl
+++ b/storage/bdb/test/si001.tcl
diff --git a/bdb/test/si002.tcl b/storage/bdb/test/si002.tcl
index 46ba86e7560..46ba86e7560 100644
--- a/bdb/test/si002.tcl
+++ b/storage/bdb/test/si002.tcl
diff --git a/bdb/test/si003.tcl b/storage/bdb/test/si003.tcl
index 1cc8c884e75..1cc8c884e75 100644
--- a/bdb/test/si003.tcl
+++ b/storage/bdb/test/si003.tcl
diff --git a/bdb/test/si004.tcl b/storage/bdb/test/si004.tcl
index 291100da6b3..291100da6b3 100644
--- a/bdb/test/si004.tcl
+++ b/storage/bdb/test/si004.tcl
diff --git a/bdb/test/si005.tcl b/storage/bdb/test/si005.tcl
index e5ed49175c9..e5ed49175c9 100644
--- a/bdb/test/si005.tcl
+++ b/storage/bdb/test/si005.tcl
diff --git a/bdb/test/si006.tcl b/storage/bdb/test/si006.tcl
index 3a1dbb3c4f8..3a1dbb3c4f8 100644
--- a/bdb/test/si006.tcl
+++ b/storage/bdb/test/si006.tcl
diff --git a/bdb/test/sindex.tcl b/storage/bdb/test/sindex.tcl
index fc2a0fc2f31..fc2a0fc2f31 100644
--- a/bdb/test/sindex.tcl
+++ b/storage/bdb/test/sindex.tcl
diff --git a/bdb/test/sysscript.tcl b/storage/bdb/test/sysscript.tcl
index 810b0df6cef..810b0df6cef 100644
--- a/bdb/test/sysscript.tcl
+++ b/storage/bdb/test/sysscript.tcl
diff --git a/bdb/test/test.tcl b/storage/bdb/test/test.tcl
index 10ee9425b7a..10ee9425b7a 100644
--- a/bdb/test/test.tcl
+++ b/storage/bdb/test/test.tcl
diff --git a/bdb/test/test001.tcl b/storage/bdb/test/test001.tcl
index f0b562bbf24..f0b562bbf24 100644
--- a/bdb/test/test001.tcl
+++ b/storage/bdb/test/test001.tcl
diff --git a/bdb/test/test002.tcl b/storage/bdb/test/test002.tcl
index bc28994d6a7..bc28994d6a7 100644
--- a/bdb/test/test002.tcl
+++ b/storage/bdb/test/test002.tcl
diff --git a/bdb/test/test003.tcl b/storage/bdb/test/test003.tcl
index c7bfe6c15ad..c7bfe6c15ad 100644
--- a/bdb/test/test003.tcl
+++ b/storage/bdb/test/test003.tcl
diff --git a/bdb/test/test004.tcl b/storage/bdb/test/test004.tcl
index 7bea6f88eca..7bea6f88eca 100644
--- a/bdb/test/test004.tcl
+++ b/storage/bdb/test/test004.tcl
diff --git a/bdb/test/test005.tcl b/storage/bdb/test/test005.tcl
index f3e37f2149d..f3e37f2149d 100644
--- a/bdb/test/test005.tcl
+++ b/storage/bdb/test/test005.tcl
diff --git a/bdb/test/test006.tcl b/storage/bdb/test/test006.tcl
index fbaebfe8ac8..fbaebfe8ac8 100644
--- a/bdb/test/test006.tcl
+++ b/storage/bdb/test/test006.tcl
diff --git a/bdb/test/test007.tcl b/storage/bdb/test/test007.tcl
index 1e99d107a2d..1e99d107a2d 100644
--- a/bdb/test/test007.tcl
+++ b/storage/bdb/test/test007.tcl
diff --git a/bdb/test/test008.tcl b/storage/bdb/test/test008.tcl
index 0af97a40110..0af97a40110 100644
--- a/bdb/test/test008.tcl
+++ b/storage/bdb/test/test008.tcl
diff --git a/bdb/test/test009.tcl b/storage/bdb/test/test009.tcl
index 7ef46d8c818..7ef46d8c818 100644
--- a/bdb/test/test009.tcl
+++ b/storage/bdb/test/test009.tcl
diff --git a/bdb/test/test010.tcl b/storage/bdb/test/test010.tcl
index 0b5f5531795..0b5f5531795 100644
--- a/bdb/test/test010.tcl
+++ b/storage/bdb/test/test010.tcl
diff --git a/bdb/test/test011.tcl b/storage/bdb/test/test011.tcl
index 63e2203efe4..63e2203efe4 100644
--- a/bdb/test/test011.tcl
+++ b/storage/bdb/test/test011.tcl
diff --git a/bdb/test/test012.tcl b/storage/bdb/test/test012.tcl
index e7237d27267..e7237d27267 100644
--- a/bdb/test/test012.tcl
+++ b/storage/bdb/test/test012.tcl
diff --git a/bdb/test/test013.tcl b/storage/bdb/test/test013.tcl
index 96d7757b0d8..96d7757b0d8 100644
--- a/bdb/test/test013.tcl
+++ b/storage/bdb/test/test013.tcl
diff --git a/bdb/test/test014.tcl b/storage/bdb/test/test014.tcl
index 00d69d3352e..00d69d3352e 100644
--- a/bdb/test/test014.tcl
+++ b/storage/bdb/test/test014.tcl
diff --git a/bdb/test/test015.tcl b/storage/bdb/test/test015.tcl
index f129605a405..f129605a405 100644
--- a/bdb/test/test015.tcl
+++ b/storage/bdb/test/test015.tcl
diff --git a/bdb/test/test016.tcl b/storage/bdb/test/test016.tcl
index af289f866f4..af289f866f4 100644
--- a/bdb/test/test016.tcl
+++ b/storage/bdb/test/test016.tcl
diff --git a/bdb/test/test017.tcl b/storage/bdb/test/test017.tcl
index 1f99aa328fb..1f99aa328fb 100644
--- a/bdb/test/test017.tcl
+++ b/storage/bdb/test/test017.tcl
diff --git a/bdb/test/test018.tcl b/storage/bdb/test/test018.tcl
index 8fc8a14e95e..8fc8a14e95e 100644
--- a/bdb/test/test018.tcl
+++ b/storage/bdb/test/test018.tcl
diff --git a/bdb/test/test019.tcl b/storage/bdb/test/test019.tcl
index aa3a58a0bcd..aa3a58a0bcd 100644
--- a/bdb/test/test019.tcl
+++ b/storage/bdb/test/test019.tcl
diff --git a/bdb/test/test020.tcl b/storage/bdb/test/test020.tcl
index 9b6d939acad..9b6d939acad 100644
--- a/bdb/test/test020.tcl
+++ b/storage/bdb/test/test020.tcl
diff --git a/bdb/test/test021.tcl b/storage/bdb/test/test021.tcl
index 56936da389a..56936da389a 100644
--- a/bdb/test/test021.tcl
+++ b/storage/bdb/test/test021.tcl
diff --git a/bdb/test/test022.tcl b/storage/bdb/test/test022.tcl
index d25d7ecdffe..d25d7ecdffe 100644
--- a/bdb/test/test022.tcl
+++ b/storage/bdb/test/test022.tcl
diff --git a/bdb/test/test023.tcl b/storage/bdb/test/test023.tcl
index c37539a0f55..c37539a0f55 100644
--- a/bdb/test/test023.tcl
+++ b/storage/bdb/test/test023.tcl
diff --git a/bdb/test/test024.tcl b/storage/bdb/test/test024.tcl
index bbdc8fb2253..bbdc8fb2253 100644
--- a/bdb/test/test024.tcl
+++ b/storage/bdb/test/test024.tcl
diff --git a/bdb/test/test025.tcl b/storage/bdb/test/test025.tcl
index 180a1aa2939..180a1aa2939 100644
--- a/bdb/test/test025.tcl
+++ b/storage/bdb/test/test025.tcl
diff --git a/bdb/test/test026.tcl b/storage/bdb/test/test026.tcl
index ce65e925d35..ce65e925d35 100644
--- a/bdb/test/test026.tcl
+++ b/storage/bdb/test/test026.tcl
diff --git a/bdb/test/test027.tcl b/storage/bdb/test/test027.tcl
index a0f6dfa4dcb..a0f6dfa4dcb 100644
--- a/bdb/test/test027.tcl
+++ b/storage/bdb/test/test027.tcl
diff --git a/bdb/test/test028.tcl b/storage/bdb/test/test028.tcl
index a546744fdac..a546744fdac 100644
--- a/bdb/test/test028.tcl
+++ b/storage/bdb/test/test028.tcl
diff --git a/bdb/test/test029.tcl b/storage/bdb/test/test029.tcl
index 8e4b8aa6e41..8e4b8aa6e41 100644
--- a/bdb/test/test029.tcl
+++ b/storage/bdb/test/test029.tcl
diff --git a/bdb/test/test030.tcl b/storage/bdb/test/test030.tcl
index d91359f07a0..d91359f07a0 100644
--- a/bdb/test/test030.tcl
+++ b/storage/bdb/test/test030.tcl
diff --git a/bdb/test/test031.tcl b/storage/bdb/test/test031.tcl
index 0006deb2d99..0006deb2d99 100644
--- a/bdb/test/test031.tcl
+++ b/storage/bdb/test/test031.tcl
diff --git a/bdb/test/test032.tcl b/storage/bdb/test/test032.tcl
index 2076b744851..2076b744851 100644
--- a/bdb/test/test032.tcl
+++ b/storage/bdb/test/test032.tcl
diff --git a/bdb/test/test033.tcl b/storage/bdb/test/test033.tcl
index a7796ce99d6..a7796ce99d6 100644
--- a/bdb/test/test033.tcl
+++ b/storage/bdb/test/test033.tcl
diff --git a/bdb/test/test034.tcl b/storage/bdb/test/test034.tcl
index 647ad940815..647ad940815 100644
--- a/bdb/test/test034.tcl
+++ b/storage/bdb/test/test034.tcl
diff --git a/bdb/test/test035.tcl b/storage/bdb/test/test035.tcl
index 06796b1e9aa..06796b1e9aa 100644
--- a/bdb/test/test035.tcl
+++ b/storage/bdb/test/test035.tcl
diff --git a/bdb/test/test036.tcl b/storage/bdb/test/test036.tcl
index 4e54f363ff8..4e54f363ff8 100644
--- a/bdb/test/test036.tcl
+++ b/storage/bdb/test/test036.tcl
diff --git a/bdb/test/test037.tcl b/storage/bdb/test/test037.tcl
index 0b2e2989949..0b2e2989949 100644
--- a/bdb/test/test037.tcl
+++ b/storage/bdb/test/test037.tcl
diff --git a/bdb/test/test038.tcl b/storage/bdb/test/test038.tcl
index 3babde8fe0b..3babde8fe0b 100644
--- a/bdb/test/test038.tcl
+++ b/storage/bdb/test/test038.tcl
diff --git a/bdb/test/test039.tcl b/storage/bdb/test/test039.tcl
index 2bbc83ebe05..2bbc83ebe05 100644
--- a/bdb/test/test039.tcl
+++ b/storage/bdb/test/test039.tcl
diff --git a/bdb/test/test040.tcl b/storage/bdb/test/test040.tcl
index 1856f78fc2e..1856f78fc2e 100644
--- a/bdb/test/test040.tcl
+++ b/storage/bdb/test/test040.tcl
diff --git a/bdb/test/test041.tcl b/storage/bdb/test/test041.tcl
index fdcbdbef3d7..fdcbdbef3d7 100644
--- a/bdb/test/test041.tcl
+++ b/storage/bdb/test/test041.tcl
diff --git a/bdb/test/test042.tcl b/storage/bdb/test/test042.tcl
index 9f444b8349c..9f444b8349c 100644
--- a/bdb/test/test042.tcl
+++ b/storage/bdb/test/test042.tcl
diff --git a/bdb/test/test043.tcl b/storage/bdb/test/test043.tcl
index eea7ec86d54..eea7ec86d54 100644
--- a/bdb/test/test043.tcl
+++ b/storage/bdb/test/test043.tcl
diff --git a/bdb/test/test044.tcl b/storage/bdb/test/test044.tcl
index 67cf3ea24b8..67cf3ea24b8 100644
--- a/bdb/test/test044.tcl
+++ b/storage/bdb/test/test044.tcl
diff --git a/bdb/test/test045.tcl b/storage/bdb/test/test045.tcl
index 3825135facd..3825135facd 100644
--- a/bdb/test/test045.tcl
+++ b/storage/bdb/test/test045.tcl
diff --git a/bdb/test/test046.tcl b/storage/bdb/test/test046.tcl
index 4136f30aaa7..4136f30aaa7 100644
--- a/bdb/test/test046.tcl
+++ b/storage/bdb/test/test046.tcl
diff --git a/bdb/test/test047.tcl b/storage/bdb/test/test047.tcl
index 61c1d0864c5..61c1d0864c5 100644
--- a/bdb/test/test047.tcl
+++ b/storage/bdb/test/test047.tcl
diff --git a/bdb/test/test048.tcl b/storage/bdb/test/test048.tcl
index 2131f6f553c..2131f6f553c 100644
--- a/bdb/test/test048.tcl
+++ b/storage/bdb/test/test048.tcl
diff --git a/bdb/test/test049.tcl b/storage/bdb/test/test049.tcl
index 3040727c469..3040727c469 100644
--- a/bdb/test/test049.tcl
+++ b/storage/bdb/test/test049.tcl
diff --git a/bdb/test/test050.tcl b/storage/bdb/test/test050.tcl
index dfaeddd035c..dfaeddd035c 100644
--- a/bdb/test/test050.tcl
+++ b/storage/bdb/test/test050.tcl
diff --git a/bdb/test/test051.tcl b/storage/bdb/test/test051.tcl
index 830b7630788..830b7630788 100644
--- a/bdb/test/test051.tcl
+++ b/storage/bdb/test/test051.tcl
diff --git a/bdb/test/test052.tcl b/storage/bdb/test/test052.tcl
index 1f386449630..1f386449630 100644
--- a/bdb/test/test052.tcl
+++ b/storage/bdb/test/test052.tcl
diff --git a/bdb/test/test053.tcl b/storage/bdb/test/test053.tcl
index 3e217a2b55f..3e217a2b55f 100644
--- a/bdb/test/test053.tcl
+++ b/storage/bdb/test/test053.tcl
diff --git a/bdb/test/test054.tcl b/storage/bdb/test/test054.tcl
index f53f5a658bf..f53f5a658bf 100644
--- a/bdb/test/test054.tcl
+++ b/storage/bdb/test/test054.tcl
diff --git a/bdb/test/test055.tcl b/storage/bdb/test/test055.tcl
index 25134dca4be..25134dca4be 100644
--- a/bdb/test/test055.tcl
+++ b/storage/bdb/test/test055.tcl
diff --git a/bdb/test/test056.tcl b/storage/bdb/test/test056.tcl
index ef310332ed1..ef310332ed1 100644
--- a/bdb/test/test056.tcl
+++ b/storage/bdb/test/test056.tcl
diff --git a/bdb/test/test057.tcl b/storage/bdb/test/test057.tcl
index 04fb09ef260..04fb09ef260 100644
--- a/bdb/test/test057.tcl
+++ b/storage/bdb/test/test057.tcl
diff --git a/bdb/test/test058.tcl b/storage/bdb/test/test058.tcl
index daf164fd6e2..daf164fd6e2 100644
--- a/bdb/test/test058.tcl
+++ b/storage/bdb/test/test058.tcl
diff --git a/bdb/test/test059.tcl b/storage/bdb/test/test059.tcl
index 596ea7a3c94..596ea7a3c94 100644
--- a/bdb/test/test059.tcl
+++ b/storage/bdb/test/test059.tcl
diff --git a/bdb/test/test060.tcl b/storage/bdb/test/test060.tcl
index 4a18c97f42f..4a18c97f42f 100644
--- a/bdb/test/test060.tcl
+++ b/storage/bdb/test/test060.tcl
diff --git a/bdb/test/test061.tcl b/storage/bdb/test/test061.tcl
index 65544e88deb..65544e88deb 100644
--- a/bdb/test/test061.tcl
+++ b/storage/bdb/test/test061.tcl
diff --git a/bdb/test/test062.tcl b/storage/bdb/test/test062.tcl
index 5cacd98a2c0..5cacd98a2c0 100644
--- a/bdb/test/test062.tcl
+++ b/storage/bdb/test/test062.tcl
diff --git a/bdb/test/test063.tcl b/storage/bdb/test/test063.tcl
index 2e8726c8f96..2e8726c8f96 100644
--- a/bdb/test/test063.tcl
+++ b/storage/bdb/test/test063.tcl
diff --git a/bdb/test/test064.tcl b/storage/bdb/test/test064.tcl
index c306b0d9d46..c306b0d9d46 100644
--- a/bdb/test/test064.tcl
+++ b/storage/bdb/test/test064.tcl
diff --git a/bdb/test/test065.tcl b/storage/bdb/test/test065.tcl
index ea29b4d2db7..ea29b4d2db7 100644
--- a/bdb/test/test065.tcl
+++ b/storage/bdb/test/test065.tcl
diff --git a/bdb/test/test066.tcl b/storage/bdb/test/test066.tcl
index 13d0894dcae..13d0894dcae 100644
--- a/bdb/test/test066.tcl
+++ b/storage/bdb/test/test066.tcl
diff --git a/bdb/test/test067.tcl b/storage/bdb/test/test067.tcl
index 5f5a88c4be1..5f5a88c4be1 100644
--- a/bdb/test/test067.tcl
+++ b/storage/bdb/test/test067.tcl
diff --git a/bdb/test/test068.tcl b/storage/bdb/test/test068.tcl
index 31f4272ba55..31f4272ba55 100644
--- a/bdb/test/test068.tcl
+++ b/storage/bdb/test/test068.tcl
diff --git a/bdb/test/test069.tcl b/storage/bdb/test/test069.tcl
index d986c861358..d986c861358 100644
--- a/bdb/test/test069.tcl
+++ b/storage/bdb/test/test069.tcl
diff --git a/bdb/test/test070.tcl b/storage/bdb/test/test070.tcl
index 986fd079589..986fd079589 100644
--- a/bdb/test/test070.tcl
+++ b/storage/bdb/test/test070.tcl
diff --git a/bdb/test/test071.tcl b/storage/bdb/test/test071.tcl
index 3f2604022f1..3f2604022f1 100644
--- a/bdb/test/test071.tcl
+++ b/storage/bdb/test/test071.tcl
diff --git a/bdb/test/test072.tcl b/storage/bdb/test/test072.tcl
index 3c08f93975d..3c08f93975d 100644
--- a/bdb/test/test072.tcl
+++ b/storage/bdb/test/test072.tcl
diff --git a/bdb/test/test073.tcl b/storage/bdb/test/test073.tcl
index 02a0f3b0d19..02a0f3b0d19 100644
--- a/bdb/test/test073.tcl
+++ b/storage/bdb/test/test073.tcl
diff --git a/bdb/test/test074.tcl b/storage/bdb/test/test074.tcl
index 7f620db2d97..7f620db2d97 100644
--- a/bdb/test/test074.tcl
+++ b/storage/bdb/test/test074.tcl
diff --git a/bdb/test/test075.tcl b/storage/bdb/test/test075.tcl
index 540d8f0ed73..540d8f0ed73 100644
--- a/bdb/test/test075.tcl
+++ b/storage/bdb/test/test075.tcl
diff --git a/bdb/test/test076.tcl b/storage/bdb/test/test076.tcl
index 9f7b1ed2972..9f7b1ed2972 100644
--- a/bdb/test/test076.tcl
+++ b/storage/bdb/test/test076.tcl
diff --git a/bdb/test/test077.tcl b/storage/bdb/test/test077.tcl
index 99cf432af20..99cf432af20 100644
--- a/bdb/test/test077.tcl
+++ b/storage/bdb/test/test077.tcl
diff --git a/bdb/test/test078.tcl b/storage/bdb/test/test078.tcl
index 45a1d46466e..45a1d46466e 100644
--- a/bdb/test/test078.tcl
+++ b/storage/bdb/test/test078.tcl
diff --git a/bdb/test/test079.tcl b/storage/bdb/test/test079.tcl
index 70fd4e05090..70fd4e05090 100644
--- a/bdb/test/test079.tcl
+++ b/storage/bdb/test/test079.tcl
diff --git a/bdb/test/test080.tcl b/storage/bdb/test/test080.tcl
index 9f649496f68..9f649496f68 100644
--- a/bdb/test/test080.tcl
+++ b/storage/bdb/test/test080.tcl
diff --git a/bdb/test/test081.tcl b/storage/bdb/test/test081.tcl
index 37c2b44ac33..37c2b44ac33 100644
--- a/bdb/test/test081.tcl
+++ b/storage/bdb/test/test081.tcl
diff --git a/bdb/test/test082.tcl b/storage/bdb/test/test082.tcl
index e8c1fa45a92..e8c1fa45a92 100644
--- a/bdb/test/test082.tcl
+++ b/storage/bdb/test/test082.tcl
diff --git a/bdb/test/test083.tcl b/storage/bdb/test/test083.tcl
index e4168ee1c43..e4168ee1c43 100644
--- a/bdb/test/test083.tcl
+++ b/storage/bdb/test/test083.tcl
diff --git a/bdb/test/test084.tcl b/storage/bdb/test/test084.tcl
index 89bc13978b0..89bc13978b0 100644
--- a/bdb/test/test084.tcl
+++ b/storage/bdb/test/test084.tcl
diff --git a/bdb/test/test085.tcl b/storage/bdb/test/test085.tcl
index b0412d6fe68..b0412d6fe68 100644
--- a/bdb/test/test085.tcl
+++ b/storage/bdb/test/test085.tcl
diff --git a/bdb/test/test086.tcl b/storage/bdb/test/test086.tcl
index e15aa1d8bb9..e15aa1d8bb9 100644
--- a/bdb/test/test086.tcl
+++ b/storage/bdb/test/test086.tcl
diff --git a/bdb/test/test087.tcl b/storage/bdb/test/test087.tcl
index 089664a0002..089664a0002 100644
--- a/bdb/test/test087.tcl
+++ b/storage/bdb/test/test087.tcl
diff --git a/bdb/test/test088.tcl b/storage/bdb/test/test088.tcl
index 7065b4cd642..7065b4cd642 100644
--- a/bdb/test/test088.tcl
+++ b/storage/bdb/test/test088.tcl
diff --git a/bdb/test/test089.tcl b/storage/bdb/test/test089.tcl
index d378152f203..d378152f203 100644
--- a/bdb/test/test089.tcl
+++ b/storage/bdb/test/test089.tcl
diff --git a/bdb/test/test090.tcl b/storage/bdb/test/test090.tcl
index da90688ffc5..da90688ffc5 100644
--- a/bdb/test/test090.tcl
+++ b/storage/bdb/test/test090.tcl
diff --git a/bdb/test/test091.tcl b/storage/bdb/test/test091.tcl
index cfd2a60ebb5..cfd2a60ebb5 100644
--- a/bdb/test/test091.tcl
+++ b/storage/bdb/test/test091.tcl
diff --git a/bdb/test/test092.tcl b/storage/bdb/test/test092.tcl
index 29c1c55a9a9..29c1c55a9a9 100644
--- a/bdb/test/test092.tcl
+++ b/storage/bdb/test/test092.tcl
diff --git a/bdb/test/test093.tcl b/storage/bdb/test/test093.tcl
index e3f8f0103c6..e3f8f0103c6 100644
--- a/bdb/test/test093.tcl
+++ b/storage/bdb/test/test093.tcl
diff --git a/bdb/test/test094.tcl b/storage/bdb/test/test094.tcl
index 781052913f4..781052913f4 100644
--- a/bdb/test/test094.tcl
+++ b/storage/bdb/test/test094.tcl
diff --git a/bdb/test/test095.tcl b/storage/bdb/test/test095.tcl
index 5543f346b7e..5543f346b7e 100644
--- a/bdb/test/test095.tcl
+++ b/storage/bdb/test/test095.tcl
diff --git a/bdb/test/test096.tcl b/storage/bdb/test/test096.tcl
index 042df19eac7..042df19eac7 100644
--- a/bdb/test/test096.tcl
+++ b/storage/bdb/test/test096.tcl
diff --git a/bdb/test/test097.tcl b/storage/bdb/test/test097.tcl
index 6e43b820b2f..6e43b820b2f 100644
--- a/bdb/test/test097.tcl
+++ b/storage/bdb/test/test097.tcl
diff --git a/bdb/test/test098.tcl b/storage/bdb/test/test098.tcl
index 320e0258a84..320e0258a84 100644
--- a/bdb/test/test098.tcl
+++ b/storage/bdb/test/test098.tcl
diff --git a/bdb/test/test099.tcl b/storage/bdb/test/test099.tcl
index db177ce5fff..db177ce5fff 100644
--- a/bdb/test/test099.tcl
+++ b/storage/bdb/test/test099.tcl
diff --git a/bdb/test/test100.tcl b/storage/bdb/test/test100.tcl
index f80b2e526dd..f80b2e526dd 100644
--- a/bdb/test/test100.tcl
+++ b/storage/bdb/test/test100.tcl
diff --git a/bdb/test/test101.tcl b/storage/bdb/test/test101.tcl
index 7e5c8fc30fc..7e5c8fc30fc 100644
--- a/bdb/test/test101.tcl
+++ b/storage/bdb/test/test101.tcl
diff --git a/bdb/test/testparams.tcl b/storage/bdb/test/testparams.tcl
index 6628db532d7..6628db532d7 100644
--- a/bdb/test/testparams.tcl
+++ b/storage/bdb/test/testparams.tcl
diff --git a/bdb/test/testutils.tcl b/storage/bdb/test/testutils.tcl
index d1f89dd1e15..d1f89dd1e15 100644
--- a/bdb/test/testutils.tcl
+++ b/storage/bdb/test/testutils.tcl
diff --git a/bdb/test/txn001.tcl b/storage/bdb/test/txn001.tcl
index 406ef35751c..406ef35751c 100644
--- a/bdb/test/txn001.tcl
+++ b/storage/bdb/test/txn001.tcl
diff --git a/bdb/test/txn002.tcl b/storage/bdb/test/txn002.tcl
index 5107472644d..5107472644d 100644
--- a/bdb/test/txn002.tcl
+++ b/storage/bdb/test/txn002.tcl
diff --git a/bdb/test/txn003.tcl b/storage/bdb/test/txn003.tcl
index 71e450cf9ce..71e450cf9ce 100644
--- a/bdb/test/txn003.tcl
+++ b/storage/bdb/test/txn003.tcl
diff --git a/bdb/test/txn004.tcl b/storage/bdb/test/txn004.tcl
index 75e1b40043f..75e1b40043f 100644
--- a/bdb/test/txn004.tcl
+++ b/storage/bdb/test/txn004.tcl
diff --git a/bdb/test/txn005.tcl b/storage/bdb/test/txn005.tcl
index 604f3ad7de4..604f3ad7de4 100644
--- a/bdb/test/txn005.tcl
+++ b/storage/bdb/test/txn005.tcl
diff --git a/bdb/test/txn006.tcl b/storage/bdb/test/txn006.tcl
index 7bf37d34dfc..7bf37d34dfc 100644
--- a/bdb/test/txn006.tcl
+++ b/storage/bdb/test/txn006.tcl
diff --git a/bdb/test/txn007.tcl b/storage/bdb/test/txn007.tcl
index f67dc209f92..f67dc209f92 100644
--- a/bdb/test/txn007.tcl
+++ b/storage/bdb/test/txn007.tcl
diff --git a/bdb/test/txn008.tcl b/storage/bdb/test/txn008.tcl
index ad57ea0eeaa..ad57ea0eeaa 100644
--- a/bdb/test/txn008.tcl
+++ b/storage/bdb/test/txn008.tcl
diff --git a/bdb/test/txn009.tcl b/storage/bdb/test/txn009.tcl
index 784c0068a41..784c0068a41 100644
--- a/bdb/test/txn009.tcl
+++ b/storage/bdb/test/txn009.tcl
diff --git a/bdb/test/txnscript.tcl b/storage/bdb/test/txnscript.tcl
index 1a4a1b6f2ec..1a4a1b6f2ec 100644
--- a/bdb/test/txnscript.tcl
+++ b/storage/bdb/test/txnscript.tcl
diff --git a/bdb/test/update.tcl b/storage/bdb/test/update.tcl
index 2bedfacc793..2bedfacc793 100644
--- a/bdb/test/update.tcl
+++ b/storage/bdb/test/update.tcl
diff --git a/bdb/test/upgrade.tcl b/storage/bdb/test/upgrade.tcl
index 1c0ffc5461a..1c0ffc5461a 100644
--- a/bdb/test/upgrade.tcl
+++ b/storage/bdb/test/upgrade.tcl
diff --git a/bdb/test/wordlist b/storage/bdb/test/wordlist
index 03ea15f7277..03ea15f7277 100644
--- a/bdb/test/wordlist
+++ b/storage/bdb/test/wordlist
diff --git a/bdb/test/wrap.tcl b/storage/bdb/test/wrap.tcl
index aaceb4f74e6..aaceb4f74e6 100644
--- a/bdb/test/wrap.tcl
+++ b/storage/bdb/test/wrap.tcl
diff --git a/bdb/txn/txn.c b/storage/bdb/txn/txn.c
index 78c54791d06..78c54791d06 100644
--- a/bdb/txn/txn.c
+++ b/storage/bdb/txn/txn.c
diff --git a/bdb/txn/txn.src b/storage/bdb/txn/txn.src
index 3f69b29e3ff..3f69b29e3ff 100644
--- a/bdb/txn/txn.src
+++ b/storage/bdb/txn/txn.src
diff --git a/bdb/txn/txn_method.c b/storage/bdb/txn/txn_method.c
index 60fdf30583e..60fdf30583e 100644
--- a/bdb/txn/txn_method.c
+++ b/storage/bdb/txn/txn_method.c
diff --git a/bdb/txn/txn_rec.c b/storage/bdb/txn/txn_rec.c
index 69af6a1f907..69af6a1f907 100644
--- a/bdb/txn/txn_rec.c
+++ b/storage/bdb/txn/txn_rec.c
diff --git a/bdb/txn/txn_recover.c b/storage/bdb/txn/txn_recover.c
index 732a82e5030..732a82e5030 100644
--- a/bdb/txn/txn_recover.c
+++ b/storage/bdb/txn/txn_recover.c
diff --git a/bdb/txn/txn_region.c b/storage/bdb/txn/txn_region.c
index bf72d4f1d2c..bf72d4f1d2c 100644
--- a/bdb/txn/txn_region.c
+++ b/storage/bdb/txn/txn_region.c
diff --git a/bdb/txn/txn_stat.c b/storage/bdb/txn/txn_stat.c
index f7d84e8f4c6..f7d84e8f4c6 100644
--- a/bdb/txn/txn_stat.c
+++ b/storage/bdb/txn/txn_stat.c
diff --git a/bdb/txn/txn_util.c b/storage/bdb/txn/txn_util.c
index cbfbc419615..cbfbc419615 100644
--- a/bdb/txn/txn_util.c
+++ b/storage/bdb/txn/txn_util.c
diff --git a/bdb/xa/xa.c b/storage/bdb/xa/xa.c
index 6667d14c2bf..6667d14c2bf 100644
--- a/bdb/xa/xa.c
+++ b/storage/bdb/xa/xa.c
diff --git a/bdb/xa/xa_db.c b/storage/bdb/xa/xa_db.c
index b84bb1c9fa9..b84bb1c9fa9 100644
--- a/bdb/xa/xa_db.c
+++ b/storage/bdb/xa/xa_db.c
diff --git a/bdb/xa/xa_map.c b/storage/bdb/xa/xa_map.c
index 42fa4b20ed2..42fa4b20ed2 100644
--- a/bdb/xa/xa_map.c
+++ b/storage/bdb/xa/xa_map.c
diff --git a/heap/.cvsignore b/storage/heap/.cvsignore
index 675df8a3eb6..675df8a3eb6 100644
--- a/heap/.cvsignore
+++ b/storage/heap/.cvsignore
diff --git a/heap/ChangeLog b/storage/heap/ChangeLog
index 9d3ced84cc9..9d3ced84cc9 100644
--- a/heap/ChangeLog
+++ b/storage/heap/ChangeLog
diff --git a/storage/heap/Makefile.am b/storage/heap/Makefile.am
new file mode 100644
index 00000000000..2890dc6ee23
--- /dev/null
+++ b/storage/heap/Makefile.am
@@ -0,0 +1,34 @@
+# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include
+LDADD = libheap.a $(top_srcdir)/mysys/libmysys.a \
+ $(top_srcdir)/dbug/libdbug.a \
+ $(top_srcdir)/strings/libmystrings.a
+pkglib_LIBRARIES = libheap.a
+noinst_PROGRAMS = hp_test1 hp_test2
+hp_test1_LDFLAGS = @NOINST_LDFLAGS@
+hp_test2_LDFLAGS = @NOINST_LDFLAGS@
+noinst_HEADERS = heapdef.h
+libheap_a_SOURCES = hp_open.c hp_extra.c hp_close.c hp_panic.c hp_info.c \
+ hp_rrnd.c hp_scan.c hp_update.c hp_write.c hp_delete.c \
+ hp_rsame.c hp_create.c hp_rename.c hp_rfirst.c \
+ hp_rnext.c hp_rlast.c hp_rprev.c hp_clear.c \
+ hp_rkey.c hp_block.c \
+ hp_hash.c _check.c _rectest.c hp_static.c
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
diff --git a/heap/_check.c b/storage/heap/_check.c
index 4316a9926f7..4316a9926f7 100644
--- a/heap/_check.c
+++ b/storage/heap/_check.c
diff --git a/heap/_rectest.c b/storage/heap/_rectest.c
index eb350263cec..eb350263cec 100644
--- a/heap/_rectest.c
+++ b/storage/heap/_rectest.c
diff --git a/heap/heapdef.h b/storage/heap/heapdef.h
index 68d9405138f..68d9405138f 100644
--- a/heap/heapdef.h
+++ b/storage/heap/heapdef.h
diff --git a/heap/hp_block.c b/storage/heap/hp_block.c
index f26b208b521..f26b208b521 100644
--- a/heap/hp_block.c
+++ b/storage/heap/hp_block.c
diff --git a/heap/hp_clear.c b/storage/heap/hp_clear.c
index 596d71ebe9c..596d71ebe9c 100644
--- a/heap/hp_clear.c
+++ b/storage/heap/hp_clear.c
diff --git a/heap/hp_close.c b/storage/heap/hp_close.c
index 3e0c9003ac8..3e0c9003ac8 100644
--- a/heap/hp_close.c
+++ b/storage/heap/hp_close.c
diff --git a/heap/hp_create.c b/storage/heap/hp_create.c
index 8fcf7dde000..8fcf7dde000 100644
--- a/heap/hp_create.c
+++ b/storage/heap/hp_create.c
diff --git a/heap/hp_delete.c b/storage/heap/hp_delete.c
index 5287533ae0a..5287533ae0a 100644
--- a/heap/hp_delete.c
+++ b/storage/heap/hp_delete.c
diff --git a/heap/hp_extra.c b/storage/heap/hp_extra.c
index dd41d6c5f19..dd41d6c5f19 100644
--- a/heap/hp_extra.c
+++ b/storage/heap/hp_extra.c
diff --git a/heap/hp_hash.c b/storage/heap/hp_hash.c
index d643f776731..d643f776731 100644
--- a/heap/hp_hash.c
+++ b/storage/heap/hp_hash.c
diff --git a/heap/hp_info.c b/storage/heap/hp_info.c
index 2e56d030234..2e56d030234 100644
--- a/heap/hp_info.c
+++ b/storage/heap/hp_info.c
diff --git a/heap/hp_open.c b/storage/heap/hp_open.c
index fd937229b0d..fd937229b0d 100644
--- a/heap/hp_open.c
+++ b/storage/heap/hp_open.c
diff --git a/heap/hp_panic.c b/storage/heap/hp_panic.c
index 2b659cbfbb3..2b659cbfbb3 100644
--- a/heap/hp_panic.c
+++ b/storage/heap/hp_panic.c
diff --git a/heap/hp_rename.c b/storage/heap/hp_rename.c
index 93906a66c37..93906a66c37 100644
--- a/heap/hp_rename.c
+++ b/storage/heap/hp_rename.c
diff --git a/heap/hp_rfirst.c b/storage/heap/hp_rfirst.c
index 85548fea212..85548fea212 100644
--- a/heap/hp_rfirst.c
+++ b/storage/heap/hp_rfirst.c
diff --git a/heap/hp_rkey.c b/storage/heap/hp_rkey.c
index f5f22a877a1..f5f22a877a1 100644
--- a/heap/hp_rkey.c
+++ b/storage/heap/hp_rkey.c
diff --git a/heap/hp_rlast.c b/storage/heap/hp_rlast.c
index b1a49739108..b1a49739108 100644
--- a/heap/hp_rlast.c
+++ b/storage/heap/hp_rlast.c
diff --git a/heap/hp_rnext.c b/storage/heap/hp_rnext.c
index a1bc480333e..a1bc480333e 100644
--- a/heap/hp_rnext.c
+++ b/storage/heap/hp_rnext.c
diff --git a/heap/hp_rprev.c b/storage/heap/hp_rprev.c
index d8f5c01dcea..d8f5c01dcea 100644
--- a/heap/hp_rprev.c
+++ b/storage/heap/hp_rprev.c
diff --git a/heap/hp_rrnd.c b/storage/heap/hp_rrnd.c
index 4daa3a06377..4daa3a06377 100644
--- a/heap/hp_rrnd.c
+++ b/storage/heap/hp_rrnd.c
diff --git a/heap/hp_rsame.c b/storage/heap/hp_rsame.c
index 6a375753b1a..6a375753b1a 100644
--- a/heap/hp_rsame.c
+++ b/storage/heap/hp_rsame.c
diff --git a/heap/hp_scan.c b/storage/heap/hp_scan.c
index 59e544ca590..59e544ca590 100644
--- a/heap/hp_scan.c
+++ b/storage/heap/hp_scan.c
diff --git a/heap/hp_static.c b/storage/heap/hp_static.c
index a458b742b9c..a458b742b9c 100644
--- a/heap/hp_static.c
+++ b/storage/heap/hp_static.c
diff --git a/heap/hp_test1.c b/storage/heap/hp_test1.c
index dd696528eb8..dd696528eb8 100644
--- a/heap/hp_test1.c
+++ b/storage/heap/hp_test1.c
diff --git a/heap/hp_test2.c b/storage/heap/hp_test2.c
index 2de49bcb66b..2de49bcb66b 100644
--- a/heap/hp_test2.c
+++ b/storage/heap/hp_test2.c
diff --git a/heap/hp_update.c b/storage/heap/hp_update.c
index 63ada225f06..63ada225f06 100644
--- a/heap/hp_update.c
+++ b/storage/heap/hp_update.c
diff --git a/heap/hp_write.c b/storage/heap/hp_write.c
index a60d32eecb6..a60d32eecb6 100644
--- a/heap/hp_write.c
+++ b/storage/heap/hp_write.c
diff --git a/heap/make-ccc b/storage/heap/make-ccc
index 192647298ad..192647298ad 100755
--- a/heap/make-ccc
+++ b/storage/heap/make-ccc
diff --git a/innobase/Makefile.am b/storage/innobase/Makefile.am
index 8ff90d16a2c..8ff90d16a2c 100644
--- a/innobase/Makefile.am
+++ b/storage/innobase/Makefile.am
diff --git a/innobase/btr/Makefile.am b/storage/innobase/btr/Makefile.am
index ed61facb695..ed61facb695 100644
--- a/innobase/btr/Makefile.am
+++ b/storage/innobase/btr/Makefile.am
diff --git a/innobase/btr/btr0btr.c b/storage/innobase/btr/btr0btr.c
index c27fb73ff8d..c27fb73ff8d 100644
--- a/innobase/btr/btr0btr.c
+++ b/storage/innobase/btr/btr0btr.c
diff --git a/innobase/btr/btr0cur.c b/storage/innobase/btr/btr0cur.c
index f81cce5b8e9..f81cce5b8e9 100644
--- a/innobase/btr/btr0cur.c
+++ b/storage/innobase/btr/btr0cur.c
diff --git a/innobase/btr/btr0pcur.c b/storage/innobase/btr/btr0pcur.c
index cb398b4afab..cb398b4afab 100644
--- a/innobase/btr/btr0pcur.c
+++ b/storage/innobase/btr/btr0pcur.c
diff --git a/innobase/btr/btr0sea.c b/storage/innobase/btr/btr0sea.c
index f705fee4275..f705fee4275 100644
--- a/innobase/btr/btr0sea.c
+++ b/storage/innobase/btr/btr0sea.c
diff --git a/innobase/btr/makefilewin b/storage/innobase/btr/makefilewin
index a5806b74a51..a5806b74a51 100644
--- a/innobase/btr/makefilewin
+++ b/storage/innobase/btr/makefilewin
diff --git a/innobase/buf/Makefile.am b/storage/innobase/buf/Makefile.am
index 3f56c8b02d7..3f56c8b02d7 100644
--- a/innobase/buf/Makefile.am
+++ b/storage/innobase/buf/Makefile.am
diff --git a/innobase/buf/buf0buf.c b/storage/innobase/buf/buf0buf.c
index fe4498e6f10..fe4498e6f10 100644
--- a/innobase/buf/buf0buf.c
+++ b/storage/innobase/buf/buf0buf.c
diff --git a/innobase/buf/buf0flu.c b/storage/innobase/buf/buf0flu.c
index ffb16790b2d..ffb16790b2d 100644
--- a/innobase/buf/buf0flu.c
+++ b/storage/innobase/buf/buf0flu.c
diff --git a/innobase/buf/buf0lru.c b/storage/innobase/buf/buf0lru.c
index a0157da2d42..a0157da2d42 100644
--- a/innobase/buf/buf0lru.c
+++ b/storage/innobase/buf/buf0lru.c
diff --git a/innobase/buf/buf0rea.c b/storage/innobase/buf/buf0rea.c
index 813ca589907..813ca589907 100644
--- a/innobase/buf/buf0rea.c
+++ b/storage/innobase/buf/buf0rea.c
diff --git a/innobase/buf/makefilewin b/storage/innobase/buf/makefilewin
index ce62cb95958..ce62cb95958 100644
--- a/innobase/buf/makefilewin
+++ b/storage/innobase/buf/makefilewin
diff --git a/innobase/configure.in b/storage/innobase/configure.in
index c56bd8274c4..c56bd8274c4 100644
--- a/innobase/configure.in
+++ b/storage/innobase/configure.in
diff --git a/innobase/data/Makefile.am b/storage/innobase/data/Makefile.am
index eeb6f129de0..eeb6f129de0 100644
--- a/innobase/data/Makefile.am
+++ b/storage/innobase/data/Makefile.am
diff --git a/innobase/data/data0data.c b/storage/innobase/data/data0data.c
index 194213a04e1..194213a04e1 100644
--- a/innobase/data/data0data.c
+++ b/storage/innobase/data/data0data.c
diff --git a/innobase/data/data0type.c b/storage/innobase/data/data0type.c
index d4264ad2926..d4264ad2926 100644
--- a/innobase/data/data0type.c
+++ b/storage/innobase/data/data0type.c
diff --git a/innobase/data/makefilewin b/storage/innobase/data/makefilewin
index 785b75fbb2b..785b75fbb2b 100644
--- a/innobase/data/makefilewin
+++ b/storage/innobase/data/makefilewin
diff --git a/innobase/db/db0err.h b/storage/innobase/db/db0err.h
index 34513545faa..34513545faa 100644
--- a/innobase/db/db0err.h
+++ b/storage/innobase/db/db0err.h
diff --git a/innobase/dict/Makefile.am b/storage/innobase/dict/Makefile.am
index 0034d2f8f1e..0034d2f8f1e 100644
--- a/innobase/dict/Makefile.am
+++ b/storage/innobase/dict/Makefile.am
diff --git a/innobase/dict/dict0boot.c b/storage/innobase/dict/dict0boot.c
index 18a707a1b93..18a707a1b93 100644
--- a/innobase/dict/dict0boot.c
+++ b/storage/innobase/dict/dict0boot.c
diff --git a/innobase/dict/dict0crea.c b/storage/innobase/dict/dict0crea.c
index c7d6ffd2c22..c7d6ffd2c22 100644
--- a/innobase/dict/dict0crea.c
+++ b/storage/innobase/dict/dict0crea.c
diff --git a/innobase/dict/dict0dict.c b/storage/innobase/dict/dict0dict.c
index 9580a80e7e7..9580a80e7e7 100644
--- a/innobase/dict/dict0dict.c
+++ b/storage/innobase/dict/dict0dict.c
diff --git a/innobase/dict/dict0load.c b/storage/innobase/dict/dict0load.c
index 9bafcf33553..9bafcf33553 100644
--- a/innobase/dict/dict0load.c
+++ b/storage/innobase/dict/dict0load.c
diff --git a/innobase/dict/dict0mem.c b/storage/innobase/dict/dict0mem.c
index eec35310039..eec35310039 100644
--- a/innobase/dict/dict0mem.c
+++ b/storage/innobase/dict/dict0mem.c
diff --git a/innobase/dict/makefilewin b/storage/innobase/dict/makefilewin
index e828d06943c..e828d06943c 100644
--- a/innobase/dict/makefilewin
+++ b/storage/innobase/dict/makefilewin
diff --git a/innobase/dyn/Makefile.am b/storage/innobase/dyn/Makefile.am
index ec33a3c18a9..ec33a3c18a9 100644
--- a/innobase/dyn/Makefile.am
+++ b/storage/innobase/dyn/Makefile.am
diff --git a/innobase/dyn/dyn0dyn.c b/storage/innobase/dyn/dyn0dyn.c
index 0afe6eda856..0afe6eda856 100644
--- a/innobase/dyn/dyn0dyn.c
+++ b/storage/innobase/dyn/dyn0dyn.c
diff --git a/innobase/dyn/makefilewin b/storage/innobase/dyn/makefilewin
index 71a58a756c1..71a58a756c1 100644
--- a/innobase/dyn/makefilewin
+++ b/storage/innobase/dyn/makefilewin
diff --git a/innobase/eval/Makefile.am b/storage/innobase/eval/Makefile.am
index aebffb91be3..aebffb91be3 100644
--- a/innobase/eval/Makefile.am
+++ b/storage/innobase/eval/Makefile.am
diff --git a/innobase/eval/eval0eval.c b/storage/innobase/eval/eval0eval.c
index 5b2d1f857b1..5b2d1f857b1 100644
--- a/innobase/eval/eval0eval.c
+++ b/storage/innobase/eval/eval0eval.c
diff --git a/innobase/eval/eval0proc.c b/storage/innobase/eval/eval0proc.c
index 50676e4f3fc..50676e4f3fc 100644
--- a/innobase/eval/eval0proc.c
+++ b/storage/innobase/eval/eval0proc.c
diff --git a/innobase/eval/makefilewin b/storage/innobase/eval/makefilewin
index f587f2a05a6..f587f2a05a6 100644
--- a/innobase/eval/makefilewin
+++ b/storage/innobase/eval/makefilewin
diff --git a/innobase/fil/Makefile.am b/storage/innobase/fil/Makefile.am
index dc0baff7d1a..dc0baff7d1a 100644
--- a/innobase/fil/Makefile.am
+++ b/storage/innobase/fil/Makefile.am
diff --git a/innobase/fil/fil0fil.c b/storage/innobase/fil/fil0fil.c
index 20f522c1a60..20f522c1a60 100644
--- a/innobase/fil/fil0fil.c
+++ b/storage/innobase/fil/fil0fil.c
diff --git a/innobase/fil/makefilewin b/storage/innobase/fil/makefilewin
index 1b2d6ab2dbb..1b2d6ab2dbb 100644
--- a/innobase/fil/makefilewin
+++ b/storage/innobase/fil/makefilewin
diff --git a/innobase/fsp/Makefile.am b/storage/innobase/fsp/Makefile.am
index edf06bda0d6..edf06bda0d6 100644
--- a/innobase/fsp/Makefile.am
+++ b/storage/innobase/fsp/Makefile.am
diff --git a/innobase/fsp/fsp0fsp.c b/storage/innobase/fsp/fsp0fsp.c
index ad4228f6797..ad4228f6797 100644
--- a/innobase/fsp/fsp0fsp.c
+++ b/storage/innobase/fsp/fsp0fsp.c
diff --git a/innobase/fsp/makefilewin b/storage/innobase/fsp/makefilewin
index 503cf27f490..503cf27f490 100644
--- a/innobase/fsp/makefilewin
+++ b/storage/innobase/fsp/makefilewin
diff --git a/innobase/fut/Makefile.am b/storage/innobase/fut/Makefile.am
index 839fdb1580e..839fdb1580e 100644
--- a/innobase/fut/Makefile.am
+++ b/storage/innobase/fut/Makefile.am
diff --git a/innobase/fut/fut0fut.c b/storage/innobase/fut/fut0fut.c
index 7f7a8fa39e7..7f7a8fa39e7 100644
--- a/innobase/fut/fut0fut.c
+++ b/storage/innobase/fut/fut0fut.c
diff --git a/innobase/fut/fut0lst.c b/storage/innobase/fut/fut0lst.c
index 8deaa8adb3f..8deaa8adb3f 100644
--- a/innobase/fut/fut0lst.c
+++ b/storage/innobase/fut/fut0lst.c
diff --git a/innobase/fut/makefilewin b/storage/innobase/fut/makefilewin
index 40f3161015c..40f3161015c 100644
--- a/innobase/fut/makefilewin
+++ b/storage/innobase/fut/makefilewin
diff --git a/innobase/ha/Makefile.am b/storage/innobase/ha/Makefile.am
index 121bafe167d..121bafe167d 100644
--- a/innobase/ha/Makefile.am
+++ b/storage/innobase/ha/Makefile.am
diff --git a/innobase/ha/ha0ha.c b/storage/innobase/ha/ha0ha.c
index ad1391ff83e..ad1391ff83e 100644
--- a/innobase/ha/ha0ha.c
+++ b/storage/innobase/ha/ha0ha.c
diff --git a/innobase/ha/hash0hash.c b/storage/innobase/ha/hash0hash.c
index facdea66198..facdea66198 100644
--- a/innobase/ha/hash0hash.c
+++ b/storage/innobase/ha/hash0hash.c
diff --git a/innobase/ha/makefilewin b/storage/innobase/ha/makefilewin
index c7cd130ceea..c7cd130ceea 100644
--- a/innobase/ha/makefilewin
+++ b/storage/innobase/ha/makefilewin
diff --git a/innobase/ibuf/Makefile.am b/storage/innobase/ibuf/Makefile.am
index fb813d38ee5..fb813d38ee5 100644
--- a/innobase/ibuf/Makefile.am
+++ b/storage/innobase/ibuf/Makefile.am
diff --git a/innobase/ibuf/ibuf0ibuf.c b/storage/innobase/ibuf/ibuf0ibuf.c
index d7fa48b6e66..d7fa48b6e66 100644
--- a/innobase/ibuf/ibuf0ibuf.c
+++ b/storage/innobase/ibuf/ibuf0ibuf.c
diff --git a/innobase/ibuf/makefilewin b/storage/innobase/ibuf/makefilewin
index 86bf9794520..86bf9794520 100644
--- a/innobase/ibuf/makefilewin
+++ b/storage/innobase/ibuf/makefilewin
diff --git a/innobase/include/Makefile.am b/storage/innobase/include/Makefile.am
index eb1e3b72877..eb1e3b72877 100644
--- a/innobase/include/Makefile.am
+++ b/storage/innobase/include/Makefile.am
diff --git a/storage/innobase/include/Makefile.i b/storage/innobase/include/Makefile.i
new file mode 100644
index 00000000000..87952a7abc8
--- /dev/null
+++ b/storage/innobase/include/Makefile.i
@@ -0,0 +1,6 @@
+# Makefile included in Makefile.am in every subdirectory
+
+INCLUDES = -I$(top_srcdir)/include -I$(top_srcdir)/../../include
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
diff --git a/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h
index 1f3a32fa70c..1f3a32fa70c 100644
--- a/innobase/include/btr0btr.h
+++ b/storage/innobase/include/btr0btr.h
diff --git a/innobase/include/btr0btr.ic b/storage/innobase/include/btr0btr.ic
index a0860b1c3a7..a0860b1c3a7 100644
--- a/innobase/include/btr0btr.ic
+++ b/storage/innobase/include/btr0btr.ic
diff --git a/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index 352d1739b6a..352d1739b6a 100644
--- a/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
diff --git a/innobase/include/btr0cur.ic b/storage/innobase/include/btr0cur.ic
index bf8a6efb68d..bf8a6efb68d 100644
--- a/innobase/include/btr0cur.ic
+++ b/storage/innobase/include/btr0cur.ic
diff --git a/innobase/include/btr0pcur.h b/storage/innobase/include/btr0pcur.h
index eb3822aab7a..eb3822aab7a 100644
--- a/innobase/include/btr0pcur.h
+++ b/storage/innobase/include/btr0pcur.h
diff --git a/innobase/include/btr0pcur.ic b/storage/innobase/include/btr0pcur.ic
index 9a7d7867025..9a7d7867025 100644
--- a/innobase/include/btr0pcur.ic
+++ b/storage/innobase/include/btr0pcur.ic
diff --git a/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h
index 78e88a24083..78e88a24083 100644
--- a/innobase/include/btr0sea.h
+++ b/storage/innobase/include/btr0sea.h
diff --git a/innobase/include/btr0sea.ic b/storage/innobase/include/btr0sea.ic
index 8a41042f713..8a41042f713 100644
--- a/innobase/include/btr0sea.ic
+++ b/storage/innobase/include/btr0sea.ic
diff --git a/innobase/include/btr0types.h b/storage/innobase/include/btr0types.h
index 03a61480e2e..03a61480e2e 100644
--- a/innobase/include/btr0types.h
+++ b/storage/innobase/include/btr0types.h
diff --git a/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index ae8d0411c12..ae8d0411c12 100644
--- a/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
diff --git a/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic
index d949254d47d..d949254d47d 100644
--- a/innobase/include/buf0buf.ic
+++ b/storage/innobase/include/buf0buf.ic
diff --git a/innobase/include/buf0flu.h b/storage/innobase/include/buf0flu.h
index 1b40acaa269..1b40acaa269 100644
--- a/innobase/include/buf0flu.h
+++ b/storage/innobase/include/buf0flu.h
diff --git a/innobase/include/buf0flu.ic b/storage/innobase/include/buf0flu.ic
index 9a8a021e029..9a8a021e029 100644
--- a/innobase/include/buf0flu.ic
+++ b/storage/innobase/include/buf0flu.ic
diff --git a/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h
index fb29b44ba98..fb29b44ba98 100644
--- a/innobase/include/buf0lru.h
+++ b/storage/innobase/include/buf0lru.h
diff --git a/innobase/include/buf0lru.ic b/storage/innobase/include/buf0lru.ic
index 7b8ee457b0b..7b8ee457b0b 100644
--- a/innobase/include/buf0lru.ic
+++ b/storage/innobase/include/buf0lru.ic
diff --git a/innobase/include/buf0rea.h b/storage/innobase/include/buf0rea.h
index 380a42f4b80..380a42f4b80 100644
--- a/innobase/include/buf0rea.h
+++ b/storage/innobase/include/buf0rea.h
diff --git a/innobase/include/buf0types.h b/storage/innobase/include/buf0types.h
index 44fdfa80e73..44fdfa80e73 100644
--- a/innobase/include/buf0types.h
+++ b/storage/innobase/include/buf0types.h
diff --git a/innobase/include/data0data.h b/storage/innobase/include/data0data.h
index 2136de0f9b3..2136de0f9b3 100644
--- a/innobase/include/data0data.h
+++ b/storage/innobase/include/data0data.h
diff --git a/innobase/include/data0data.ic b/storage/innobase/include/data0data.ic
index 0769372e16f..0769372e16f 100644
--- a/innobase/include/data0data.ic
+++ b/storage/innobase/include/data0data.ic
diff --git a/innobase/include/data0type.h b/storage/innobase/include/data0type.h
index 7e9692eca5a..7e9692eca5a 100644
--- a/innobase/include/data0type.h
+++ b/storage/innobase/include/data0type.h
diff --git a/innobase/include/data0type.ic b/storage/innobase/include/data0type.ic
index 06d45dd5501..06d45dd5501 100644
--- a/innobase/include/data0type.ic
+++ b/storage/innobase/include/data0type.ic
diff --git a/innobase/include/data0types.h b/storage/innobase/include/data0types.h
index ab314f8f471..ab314f8f471 100644
--- a/innobase/include/data0types.h
+++ b/storage/innobase/include/data0types.h
diff --git a/innobase/include/db0err.h b/storage/innobase/include/db0err.h
index de5ac44e73f..de5ac44e73f 100644
--- a/innobase/include/db0err.h
+++ b/storage/innobase/include/db0err.h
diff --git a/innobase/include/dict0boot.h b/storage/innobase/include/dict0boot.h
index 86702cbca05..86702cbca05 100644
--- a/innobase/include/dict0boot.h
+++ b/storage/innobase/include/dict0boot.h
diff --git a/innobase/include/dict0boot.ic b/storage/innobase/include/dict0boot.ic
index 8a91feed018..8a91feed018 100644
--- a/innobase/include/dict0boot.ic
+++ b/storage/innobase/include/dict0boot.ic
diff --git a/innobase/include/dict0crea.h b/storage/innobase/include/dict0crea.h
index 5dd571be59c..5dd571be59c 100644
--- a/innobase/include/dict0crea.h
+++ b/storage/innobase/include/dict0crea.h
diff --git a/innobase/include/dict0crea.ic b/storage/innobase/include/dict0crea.ic
index b4da2d7e03f..b4da2d7e03f 100644
--- a/innobase/include/dict0crea.ic
+++ b/storage/innobase/include/dict0crea.ic
diff --git a/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index d9cda402bac..d9cda402bac 100644
--- a/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
diff --git a/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic
index 928a693f860..928a693f860 100644
--- a/innobase/include/dict0dict.ic
+++ b/storage/innobase/include/dict0dict.ic
diff --git a/innobase/include/dict0load.h b/storage/innobase/include/dict0load.h
index f13620bc6e8..f13620bc6e8 100644
--- a/innobase/include/dict0load.h
+++ b/storage/innobase/include/dict0load.h
diff --git a/innobase/include/dict0load.ic b/storage/innobase/include/dict0load.ic
index 1a207fbf0fd..1a207fbf0fd 100644
--- a/innobase/include/dict0load.ic
+++ b/storage/innobase/include/dict0load.ic
diff --git a/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index ff6c4ec9b28..ff6c4ec9b28 100644
--- a/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
diff --git a/innobase/include/dict0mem.ic b/storage/innobase/include/dict0mem.ic
index 9bcefc2a51f..9bcefc2a51f 100644
--- a/innobase/include/dict0mem.ic
+++ b/storage/innobase/include/dict0mem.ic
diff --git a/innobase/include/dict0types.h b/storage/innobase/include/dict0types.h
index bd8a1a996d1..bd8a1a996d1 100644
--- a/innobase/include/dict0types.h
+++ b/storage/innobase/include/dict0types.h
diff --git a/innobase/include/dyn0dyn.h b/storage/innobase/include/dyn0dyn.h
index 1df976a5301..1df976a5301 100644
--- a/innobase/include/dyn0dyn.h
+++ b/storage/innobase/include/dyn0dyn.h
diff --git a/innobase/include/dyn0dyn.ic b/storage/innobase/include/dyn0dyn.ic
index c1b8f2cb8ce..c1b8f2cb8ce 100644
--- a/innobase/include/dyn0dyn.ic
+++ b/storage/innobase/include/dyn0dyn.ic
diff --git a/innobase/include/eval0eval.h b/storage/innobase/include/eval0eval.h
index 6561f0c8ae7..6561f0c8ae7 100644
--- a/innobase/include/eval0eval.h
+++ b/storage/innobase/include/eval0eval.h
diff --git a/innobase/include/eval0eval.ic b/storage/innobase/include/eval0eval.ic
index 069cbfe5f37..069cbfe5f37 100644
--- a/innobase/include/eval0eval.ic
+++ b/storage/innobase/include/eval0eval.ic
diff --git a/innobase/include/eval0proc.h b/storage/innobase/include/eval0proc.h
index 5d685ad9076..5d685ad9076 100644
--- a/innobase/include/eval0proc.h
+++ b/storage/innobase/include/eval0proc.h
diff --git a/innobase/include/eval0proc.ic b/storage/innobase/include/eval0proc.ic
index 0d7ecb6d1dc..0d7ecb6d1dc 100644
--- a/innobase/include/eval0proc.ic
+++ b/storage/innobase/include/eval0proc.ic
diff --git a/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h
index aa1ec5c25a5..aa1ec5c25a5 100644
--- a/innobase/include/fil0fil.h
+++ b/storage/innobase/include/fil0fil.h
diff --git a/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h
index 2fcde882df7..2fcde882df7 100644
--- a/innobase/include/fsp0fsp.h
+++ b/storage/innobase/include/fsp0fsp.h
diff --git a/innobase/include/fsp0fsp.ic b/storage/innobase/include/fsp0fsp.ic
index 89cd9263bd6..89cd9263bd6 100644
--- a/innobase/include/fsp0fsp.ic
+++ b/storage/innobase/include/fsp0fsp.ic
diff --git a/innobase/include/fut0fut.h b/storage/innobase/include/fut0fut.h
index b9546b4e1a0..b9546b4e1a0 100644
--- a/innobase/include/fut0fut.h
+++ b/storage/innobase/include/fut0fut.h
diff --git a/innobase/include/fut0fut.ic b/storage/innobase/include/fut0fut.ic
index 6a107786376..6a107786376 100644
--- a/innobase/include/fut0fut.ic
+++ b/storage/innobase/include/fut0fut.ic
diff --git a/innobase/include/fut0lst.h b/storage/innobase/include/fut0lst.h
index 5427e2248da..5427e2248da 100644
--- a/innobase/include/fut0lst.h
+++ b/storage/innobase/include/fut0lst.h
diff --git a/innobase/include/fut0lst.ic b/storage/innobase/include/fut0lst.ic
index c0d61833b48..c0d61833b48 100644
--- a/innobase/include/fut0lst.ic
+++ b/storage/innobase/include/fut0lst.ic
diff --git a/innobase/include/ha0ha.h b/storage/innobase/include/ha0ha.h
index bdaecfcc57a..bdaecfcc57a 100644
--- a/innobase/include/ha0ha.h
+++ b/storage/innobase/include/ha0ha.h
diff --git a/innobase/include/ha0ha.ic b/storage/innobase/include/ha0ha.ic
index 63cd19fafc3..63cd19fafc3 100644
--- a/innobase/include/ha0ha.ic
+++ b/storage/innobase/include/ha0ha.ic
diff --git a/innobase/include/hash0hash.h b/storage/innobase/include/hash0hash.h
index 51315e40875..51315e40875 100644
--- a/innobase/include/hash0hash.h
+++ b/storage/innobase/include/hash0hash.h
diff --git a/innobase/include/hash0hash.ic b/storage/innobase/include/hash0hash.ic
index 0d713140c13..0d713140c13 100644
--- a/innobase/include/hash0hash.ic
+++ b/storage/innobase/include/hash0hash.ic
diff --git a/innobase/include/ibuf0ibuf.h b/storage/innobase/include/ibuf0ibuf.h
index 4f38ab4f1e9..4f38ab4f1e9 100644
--- a/innobase/include/ibuf0ibuf.h
+++ b/storage/innobase/include/ibuf0ibuf.h
diff --git a/innobase/include/ibuf0ibuf.ic b/storage/innobase/include/ibuf0ibuf.ic
index 68f7ce9c1d0..68f7ce9c1d0 100644
--- a/innobase/include/ibuf0ibuf.ic
+++ b/storage/innobase/include/ibuf0ibuf.ic
diff --git a/innobase/include/ibuf0types.h b/storage/innobase/include/ibuf0types.h
index fb202ac44b0..fb202ac44b0 100644
--- a/innobase/include/ibuf0types.h
+++ b/storage/innobase/include/ibuf0types.h
diff --git a/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h
index 20b1f1d7145..20b1f1d7145 100644
--- a/innobase/include/lock0lock.h
+++ b/storage/innobase/include/lock0lock.h
diff --git a/innobase/include/lock0lock.ic b/storage/innobase/include/lock0lock.ic
index c7a71bb45d8..c7a71bb45d8 100644
--- a/innobase/include/lock0lock.ic
+++ b/storage/innobase/include/lock0lock.ic
diff --git a/innobase/include/lock0types.h b/storage/innobase/include/lock0types.h
index 6c3e54ee1fc..6c3e54ee1fc 100644
--- a/innobase/include/lock0types.h
+++ b/storage/innobase/include/lock0types.h
diff --git a/innobase/include/log0log.h b/storage/innobase/include/log0log.h
index 7f3f10438b4..7f3f10438b4 100644
--- a/innobase/include/log0log.h
+++ b/storage/innobase/include/log0log.h
diff --git a/innobase/include/log0log.ic b/storage/innobase/include/log0log.ic
index ca7531783a2..ca7531783a2 100644
--- a/innobase/include/log0log.ic
+++ b/storage/innobase/include/log0log.ic
diff --git a/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h
index 658df4d5586..658df4d5586 100644
--- a/innobase/include/log0recv.h
+++ b/storage/innobase/include/log0recv.h
diff --git a/innobase/include/log0recv.ic b/storage/innobase/include/log0recv.ic
index 489641bade2..489641bade2 100644
--- a/innobase/include/log0recv.ic
+++ b/storage/innobase/include/log0recv.ic
diff --git a/innobase/include/mach0data.h b/storage/innobase/include/mach0data.h
index f9a3ff521d5..f9a3ff521d5 100644
--- a/innobase/include/mach0data.h
+++ b/storage/innobase/include/mach0data.h
diff --git a/innobase/include/mach0data.ic b/storage/innobase/include/mach0data.ic
index 888f3f743e4..888f3f743e4 100644
--- a/innobase/include/mach0data.ic
+++ b/storage/innobase/include/mach0data.ic
diff --git a/innobase/include/makefilewin.i b/storage/innobase/include/makefilewin.i
index f756cf2ea3a..f756cf2ea3a 100644
--- a/innobase/include/makefilewin.i
+++ b/storage/innobase/include/makefilewin.i
diff --git a/innobase/include/mem0dbg.h b/storage/innobase/include/mem0dbg.h
index 96f30842df6..96f30842df6 100644
--- a/innobase/include/mem0dbg.h
+++ b/storage/innobase/include/mem0dbg.h
diff --git a/innobase/include/mem0dbg.ic b/storage/innobase/include/mem0dbg.ic
index 7ce5f6f1ba5..7ce5f6f1ba5 100644
--- a/innobase/include/mem0dbg.ic
+++ b/storage/innobase/include/mem0dbg.ic
diff --git a/innobase/include/mem0mem.h b/storage/innobase/include/mem0mem.h
index 87afdb8f91c..87afdb8f91c 100644
--- a/innobase/include/mem0mem.h
+++ b/storage/innobase/include/mem0mem.h
diff --git a/innobase/include/mem0mem.ic b/storage/innobase/include/mem0mem.ic
index 8c87c884d78..8c87c884d78 100644
--- a/innobase/include/mem0mem.ic
+++ b/storage/innobase/include/mem0mem.ic
diff --git a/innobase/include/mem0pool.h b/storage/innobase/include/mem0pool.h
index 43707bd5f61..43707bd5f61 100644
--- a/innobase/include/mem0pool.h
+++ b/storage/innobase/include/mem0pool.h
diff --git a/innobase/include/mem0pool.ic b/storage/innobase/include/mem0pool.ic
index 4e8c08733ed..4e8c08733ed 100644
--- a/innobase/include/mem0pool.ic
+++ b/storage/innobase/include/mem0pool.ic
diff --git a/innobase/include/mtr0log.h b/storage/innobase/include/mtr0log.h
index 6a3920aa8a1..6a3920aa8a1 100644
--- a/innobase/include/mtr0log.h
+++ b/storage/innobase/include/mtr0log.h
diff --git a/innobase/include/mtr0log.ic b/storage/innobase/include/mtr0log.ic
index 08d9a6448eb..08d9a6448eb 100644
--- a/innobase/include/mtr0log.ic
+++ b/storage/innobase/include/mtr0log.ic
diff --git a/innobase/include/mtr0mtr.h b/storage/innobase/include/mtr0mtr.h
index f44e813cf6b..f44e813cf6b 100644
--- a/innobase/include/mtr0mtr.h
+++ b/storage/innobase/include/mtr0mtr.h
diff --git a/innobase/include/mtr0mtr.ic b/storage/innobase/include/mtr0mtr.ic
index 4fc6dd2f6a9..4fc6dd2f6a9 100644
--- a/innobase/include/mtr0mtr.ic
+++ b/storage/innobase/include/mtr0mtr.ic
diff --git a/innobase/include/mtr0types.h b/storage/innobase/include/mtr0types.h
index e3b6ec9a84f..e3b6ec9a84f 100644
--- a/innobase/include/mtr0types.h
+++ b/storage/innobase/include/mtr0types.h
diff --git a/innobase/include/os0file.h b/storage/innobase/include/os0file.h
index 362e3552411..362e3552411 100644
--- a/innobase/include/os0file.h
+++ b/storage/innobase/include/os0file.h
diff --git a/innobase/include/os0proc.h b/storage/innobase/include/os0proc.h
index b0b72e18675..b0b72e18675 100644
--- a/innobase/include/os0proc.h
+++ b/storage/innobase/include/os0proc.h
diff --git a/innobase/include/os0proc.ic b/storage/innobase/include/os0proc.ic
index 651ba1f17e3..651ba1f17e3 100644
--- a/innobase/include/os0proc.ic
+++ b/storage/innobase/include/os0proc.ic
diff --git a/innobase/include/os0sync.h b/storage/innobase/include/os0sync.h
index d27b1676f1b..d27b1676f1b 100644
--- a/innobase/include/os0sync.h
+++ b/storage/innobase/include/os0sync.h
diff --git a/innobase/include/os0sync.ic b/storage/innobase/include/os0sync.ic
index 1337e97152a..1337e97152a 100644
--- a/innobase/include/os0sync.ic
+++ b/storage/innobase/include/os0sync.ic
diff --git a/innobase/include/os0thread.h b/storage/innobase/include/os0thread.h
index c00d28baf60..c00d28baf60 100644
--- a/innobase/include/os0thread.h
+++ b/storage/innobase/include/os0thread.h
diff --git a/innobase/include/os0thread.ic b/storage/innobase/include/os0thread.ic
index a75aa3abb34..a75aa3abb34 100644
--- a/innobase/include/os0thread.ic
+++ b/storage/innobase/include/os0thread.ic
diff --git a/innobase/include/page0cur.h b/storage/innobase/include/page0cur.h
index b03302b0e77..b03302b0e77 100644
--- a/innobase/include/page0cur.h
+++ b/storage/innobase/include/page0cur.h
diff --git a/innobase/include/page0cur.ic b/storage/innobase/include/page0cur.ic
index f8346819e84..f8346819e84 100644
--- a/innobase/include/page0cur.ic
+++ b/storage/innobase/include/page0cur.ic
diff --git a/innobase/include/page0page.h b/storage/innobase/include/page0page.h
index c4ffa39d3ac..c4ffa39d3ac 100644
--- a/innobase/include/page0page.h
+++ b/storage/innobase/include/page0page.h
diff --git a/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic
index fd5281fdbec..fd5281fdbec 100644
--- a/innobase/include/page0page.ic
+++ b/storage/innobase/include/page0page.ic
diff --git a/innobase/include/page0types.h b/storage/innobase/include/page0types.h
index 525a0366a6f..525a0366a6f 100644
--- a/innobase/include/page0types.h
+++ b/storage/innobase/include/page0types.h
diff --git a/innobase/include/pars0grm.h b/storage/innobase/include/pars0grm.h
index b2790949057..b2790949057 100644
--- a/innobase/include/pars0grm.h
+++ b/storage/innobase/include/pars0grm.h
diff --git a/innobase/include/pars0opt.h b/storage/innobase/include/pars0opt.h
index d091c3ee2d0..d091c3ee2d0 100644
--- a/innobase/include/pars0opt.h
+++ b/storage/innobase/include/pars0opt.h
diff --git a/innobase/include/pars0opt.ic b/storage/innobase/include/pars0opt.ic
index 0bfa8526bee..0bfa8526bee 100644
--- a/innobase/include/pars0opt.ic
+++ b/storage/innobase/include/pars0opt.ic
diff --git a/innobase/include/pars0pars.h b/storage/innobase/include/pars0pars.h
index 62a41a881e8..62a41a881e8 100644
--- a/innobase/include/pars0pars.h
+++ b/storage/innobase/include/pars0pars.h
diff --git a/innobase/include/pars0pars.ic b/storage/innobase/include/pars0pars.ic
index 155b6659ace..155b6659ace 100644
--- a/innobase/include/pars0pars.ic
+++ b/storage/innobase/include/pars0pars.ic
diff --git a/innobase/include/pars0sym.h b/storage/innobase/include/pars0sym.h
index 633a49e3cb5..633a49e3cb5 100644
--- a/innobase/include/pars0sym.h
+++ b/storage/innobase/include/pars0sym.h
diff --git a/innobase/include/pars0sym.ic b/storage/innobase/include/pars0sym.ic
index 9508d423769..9508d423769 100644
--- a/innobase/include/pars0sym.ic
+++ b/storage/innobase/include/pars0sym.ic
diff --git a/innobase/include/pars0types.h b/storage/innobase/include/pars0types.h
index 9fbfd6efaa1..9fbfd6efaa1 100644
--- a/innobase/include/pars0types.h
+++ b/storage/innobase/include/pars0types.h
diff --git a/innobase/include/que0que.h b/storage/innobase/include/que0que.h
index 4113e52d425..4113e52d425 100644
--- a/innobase/include/que0que.h
+++ b/storage/innobase/include/que0que.h
diff --git a/innobase/include/que0que.ic b/storage/innobase/include/que0que.ic
index ae4ed10560f..ae4ed10560f 100644
--- a/innobase/include/que0que.ic
+++ b/storage/innobase/include/que0que.ic
diff --git a/innobase/include/que0types.h b/storage/innobase/include/que0types.h
index e59c2313a5a..e59c2313a5a 100644
--- a/innobase/include/que0types.h
+++ b/storage/innobase/include/que0types.h
diff --git a/innobase/include/read0read.h b/storage/innobase/include/read0read.h
index db6bf888095..db6bf888095 100644
--- a/innobase/include/read0read.h
+++ b/storage/innobase/include/read0read.h
diff --git a/innobase/include/read0read.ic b/storage/innobase/include/read0read.ic
index ec9ef5814bb..ec9ef5814bb 100644
--- a/innobase/include/read0read.ic
+++ b/storage/innobase/include/read0read.ic
diff --git a/innobase/include/read0types.h b/storage/innobase/include/read0types.h
index 5eb3e533f89..5eb3e533f89 100644
--- a/innobase/include/read0types.h
+++ b/storage/innobase/include/read0types.h
diff --git a/innobase/include/rem0cmp.h b/storage/innobase/include/rem0cmp.h
index 1b1ee26b809..1b1ee26b809 100644
--- a/innobase/include/rem0cmp.h
+++ b/storage/innobase/include/rem0cmp.h
diff --git a/innobase/include/rem0cmp.ic b/storage/innobase/include/rem0cmp.ic
index b86534e0a6a..b86534e0a6a 100644
--- a/innobase/include/rem0cmp.ic
+++ b/storage/innobase/include/rem0cmp.ic
diff --git a/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h
index 1d15b8d1c77..1d15b8d1c77 100644
--- a/innobase/include/rem0rec.h
+++ b/storage/innobase/include/rem0rec.h
diff --git a/innobase/include/rem0rec.ic b/storage/innobase/include/rem0rec.ic
index e2dceb6bae5..e2dceb6bae5 100644
--- a/innobase/include/rem0rec.ic
+++ b/storage/innobase/include/rem0rec.ic
diff --git a/innobase/include/rem0types.h b/storage/innobase/include/rem0types.h
index 94c394499c5..94c394499c5 100644
--- a/innobase/include/rem0types.h
+++ b/storage/innobase/include/rem0types.h
diff --git a/innobase/include/row0ins.h b/storage/innobase/include/row0ins.h
index a5b4b74e7fc..a5b4b74e7fc 100644
--- a/innobase/include/row0ins.h
+++ b/storage/innobase/include/row0ins.h
diff --git a/innobase/include/row0ins.ic b/storage/innobase/include/row0ins.ic
index 80a232d41ee..80a232d41ee 100644
--- a/innobase/include/row0ins.ic
+++ b/storage/innobase/include/row0ins.ic
diff --git a/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h
index 4e6ff73b0f8..4e6ff73b0f8 100644
--- a/innobase/include/row0mysql.h
+++ b/storage/innobase/include/row0mysql.h
diff --git a/innobase/include/row0mysql.ic b/storage/innobase/include/row0mysql.ic
index aa8a70d8761..aa8a70d8761 100644
--- a/innobase/include/row0mysql.ic
+++ b/storage/innobase/include/row0mysql.ic
diff --git a/innobase/include/row0purge.h b/storage/innobase/include/row0purge.h
index 4c863441442..4c863441442 100644
--- a/innobase/include/row0purge.h
+++ b/storage/innobase/include/row0purge.h
diff --git a/innobase/include/row0purge.ic b/storage/innobase/include/row0purge.ic
index 50aabf0bc1b..50aabf0bc1b 100644
--- a/innobase/include/row0purge.ic
+++ b/storage/innobase/include/row0purge.ic
diff --git a/innobase/include/row0row.h b/storage/innobase/include/row0row.h
index 782973d8f5d..782973d8f5d 100644
--- a/innobase/include/row0row.h
+++ b/storage/innobase/include/row0row.h
diff --git a/innobase/include/row0row.ic b/storage/innobase/include/row0row.ic
index 85410beacf0..85410beacf0 100644
--- a/innobase/include/row0row.ic
+++ b/storage/innobase/include/row0row.ic
diff --git a/innobase/include/row0sel.h b/storage/innobase/include/row0sel.h
index 8d5187bfc1c..8d5187bfc1c 100644
--- a/innobase/include/row0sel.h
+++ b/storage/innobase/include/row0sel.h
diff --git a/innobase/include/row0sel.ic b/storage/innobase/include/row0sel.ic
index 600c6204571..600c6204571 100644
--- a/innobase/include/row0sel.ic
+++ b/storage/innobase/include/row0sel.ic
diff --git a/innobase/include/row0types.h b/storage/innobase/include/row0types.h
index 79b864f4835..79b864f4835 100644
--- a/innobase/include/row0types.h
+++ b/storage/innobase/include/row0types.h
diff --git a/innobase/include/row0uins.h b/storage/innobase/include/row0uins.h
index fc57881f691..fc57881f691 100644
--- a/innobase/include/row0uins.h
+++ b/storage/innobase/include/row0uins.h
diff --git a/innobase/include/row0uins.ic b/storage/innobase/include/row0uins.ic
index 2b3d5a10f95..2b3d5a10f95 100644
--- a/innobase/include/row0uins.ic
+++ b/storage/innobase/include/row0uins.ic
diff --git a/innobase/include/row0umod.h b/storage/innobase/include/row0umod.h
index 2c8e19a80ae..2c8e19a80ae 100644
--- a/innobase/include/row0umod.h
+++ b/storage/innobase/include/row0umod.h
diff --git a/innobase/include/row0umod.ic b/storage/innobase/include/row0umod.ic
index fcbf4dbc1f3..fcbf4dbc1f3 100644
--- a/innobase/include/row0umod.ic
+++ b/storage/innobase/include/row0umod.ic
diff --git a/innobase/include/row0undo.h b/storage/innobase/include/row0undo.h
index d64a00dcb8f..d64a00dcb8f 100644
--- a/innobase/include/row0undo.h
+++ b/storage/innobase/include/row0undo.h
diff --git a/innobase/include/row0undo.ic b/storage/innobase/include/row0undo.ic
index e7f89c7de67..e7f89c7de67 100644
--- a/innobase/include/row0undo.ic
+++ b/storage/innobase/include/row0undo.ic
diff --git a/innobase/include/row0upd.h b/storage/innobase/include/row0upd.h
index 673e0511153..673e0511153 100644
--- a/innobase/include/row0upd.h
+++ b/storage/innobase/include/row0upd.h
diff --git a/innobase/include/row0upd.ic b/storage/innobase/include/row0upd.ic
index acbb11aa1c7..acbb11aa1c7 100644
--- a/innobase/include/row0upd.ic
+++ b/storage/innobase/include/row0upd.ic
diff --git a/innobase/include/row0vers.h b/storage/innobase/include/row0vers.h
index 079d841f7f3..079d841f7f3 100644
--- a/innobase/include/row0vers.h
+++ b/storage/innobase/include/row0vers.h
diff --git a/innobase/include/row0vers.ic b/storage/innobase/include/row0vers.ic
index ab1e264635b..ab1e264635b 100644
--- a/innobase/include/row0vers.ic
+++ b/storage/innobase/include/row0vers.ic
diff --git a/innobase/include/srv0que.h b/storage/innobase/include/srv0que.h
index 05c339cdd32..05c339cdd32 100644
--- a/innobase/include/srv0que.h
+++ b/storage/innobase/include/srv0que.h
diff --git a/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index 116ae7b6438..116ae7b6438 100644
--- a/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
diff --git a/innobase/include/srv0srv.ic b/storage/innobase/include/srv0srv.ic
index 73e0729660f..73e0729660f 100644
--- a/innobase/include/srv0srv.ic
+++ b/storage/innobase/include/srv0srv.ic
diff --git a/innobase/include/srv0start.h b/storage/innobase/include/srv0start.h
index d24f119c0b0..d24f119c0b0 100644
--- a/innobase/include/srv0start.h
+++ b/storage/innobase/include/srv0start.h
diff --git a/innobase/include/sync0arr.h b/storage/innobase/include/sync0arr.h
index fecd910683e..fecd910683e 100644
--- a/innobase/include/sync0arr.h
+++ b/storage/innobase/include/sync0arr.h
diff --git a/innobase/include/sync0arr.ic b/storage/innobase/include/sync0arr.ic
index dbe35c033e5..dbe35c033e5 100644
--- a/innobase/include/sync0arr.ic
+++ b/storage/innobase/include/sync0arr.ic
diff --git a/innobase/include/sync0rw.h b/storage/innobase/include/sync0rw.h
index 911c8ac3f4a..911c8ac3f4a 100644
--- a/innobase/include/sync0rw.h
+++ b/storage/innobase/include/sync0rw.h
diff --git a/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic
index b1ae636010a..b1ae636010a 100644
--- a/innobase/include/sync0rw.ic
+++ b/storage/innobase/include/sync0rw.ic
diff --git a/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h
index c798c047fa3..c798c047fa3 100644
--- a/innobase/include/sync0sync.h
+++ b/storage/innobase/include/sync0sync.h
diff --git a/innobase/include/sync0sync.ic b/storage/innobase/include/sync0sync.ic
index b3fde61db5e..b3fde61db5e 100644
--- a/innobase/include/sync0sync.ic
+++ b/storage/innobase/include/sync0sync.ic
diff --git a/innobase/include/sync0types.h b/storage/innobase/include/sync0types.h
index 57478426f25..57478426f25 100644
--- a/innobase/include/sync0types.h
+++ b/storage/innobase/include/sync0types.h
diff --git a/innobase/include/thr0loc.h b/storage/innobase/include/thr0loc.h
index 32e2dc3ae93..32e2dc3ae93 100644
--- a/innobase/include/thr0loc.h
+++ b/storage/innobase/include/thr0loc.h
diff --git a/innobase/include/thr0loc.ic b/storage/innobase/include/thr0loc.ic
index b8b8136180c..b8b8136180c 100644
--- a/innobase/include/thr0loc.ic
+++ b/storage/innobase/include/thr0loc.ic
diff --git a/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h
index 049c79aec9b..049c79aec9b 100644
--- a/innobase/include/trx0purge.h
+++ b/storage/innobase/include/trx0purge.h
diff --git a/innobase/include/trx0purge.ic b/storage/innobase/include/trx0purge.ic
index 451e8ca31d0..451e8ca31d0 100644
--- a/innobase/include/trx0purge.ic
+++ b/storage/innobase/include/trx0purge.ic
diff --git a/innobase/include/trx0rec.h b/storage/innobase/include/trx0rec.h
index 4387ce1a61e..4387ce1a61e 100644
--- a/innobase/include/trx0rec.h
+++ b/storage/innobase/include/trx0rec.h
diff --git a/innobase/include/trx0rec.ic b/storage/innobase/include/trx0rec.ic
index cd02ed9e04c..cd02ed9e04c 100644
--- a/innobase/include/trx0rec.ic
+++ b/storage/innobase/include/trx0rec.ic
diff --git a/innobase/include/trx0roll.h b/storage/innobase/include/trx0roll.h
index 944142a299d..944142a299d 100644
--- a/innobase/include/trx0roll.h
+++ b/storage/innobase/include/trx0roll.h
diff --git a/innobase/include/trx0roll.ic b/storage/innobase/include/trx0roll.ic
index dfde83ac478..dfde83ac478 100644
--- a/innobase/include/trx0roll.ic
+++ b/storage/innobase/include/trx0roll.ic
diff --git a/innobase/include/trx0rseg.h b/storage/innobase/include/trx0rseg.h
index fd64612ab3f..fd64612ab3f 100644
--- a/innobase/include/trx0rseg.h
+++ b/storage/innobase/include/trx0rseg.h
diff --git a/innobase/include/trx0rseg.ic b/storage/innobase/include/trx0rseg.ic
index c9ac50ebf16..c9ac50ebf16 100644
--- a/innobase/include/trx0rseg.ic
+++ b/storage/innobase/include/trx0rseg.ic
diff --git a/innobase/include/trx0sys.h b/storage/innobase/include/trx0sys.h
index 31e8607f8a0..31e8607f8a0 100644
--- a/innobase/include/trx0sys.h
+++ b/storage/innobase/include/trx0sys.h
diff --git a/innobase/include/trx0sys.ic b/storage/innobase/include/trx0sys.ic
index 24610bef827..24610bef827 100644
--- a/innobase/include/trx0sys.ic
+++ b/storage/innobase/include/trx0sys.ic
diff --git a/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h
index 146730d46f8..146730d46f8 100644
--- a/innobase/include/trx0trx.h
+++ b/storage/innobase/include/trx0trx.h
diff --git a/innobase/include/trx0trx.ic b/storage/innobase/include/trx0trx.ic
index 54cf2ff331f..54cf2ff331f 100644
--- a/innobase/include/trx0trx.ic
+++ b/storage/innobase/include/trx0trx.ic
diff --git a/innobase/include/trx0types.h b/storage/innobase/include/trx0types.h
index 2965eb4451f..2965eb4451f 100644
--- a/innobase/include/trx0types.h
+++ b/storage/innobase/include/trx0types.h
diff --git a/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h
index bd7337e4f90..bd7337e4f90 100644
--- a/innobase/include/trx0undo.h
+++ b/storage/innobase/include/trx0undo.h
diff --git a/innobase/include/trx0undo.ic b/storage/innobase/include/trx0undo.ic
index a04b234b495..a04b234b495 100644
--- a/innobase/include/trx0undo.ic
+++ b/storage/innobase/include/trx0undo.ic
diff --git a/innobase/include/trx0xa.h b/storage/innobase/include/trx0xa.h
index 34b7a2f95a8..34b7a2f95a8 100644
--- a/innobase/include/trx0xa.h
+++ b/storage/innobase/include/trx0xa.h
diff --git a/innobase/include/univ.i b/storage/innobase/include/univ.i
index 132ac9e18c5..132ac9e18c5 100644
--- a/innobase/include/univ.i
+++ b/storage/innobase/include/univ.i
diff --git a/innobase/include/usr0sess.h b/storage/innobase/include/usr0sess.h
index c7bcfb20fed..c7bcfb20fed 100644
--- a/innobase/include/usr0sess.h
+++ b/storage/innobase/include/usr0sess.h
diff --git a/innobase/include/usr0sess.ic b/storage/innobase/include/usr0sess.ic
index c851d5745b9..c851d5745b9 100644
--- a/innobase/include/usr0sess.ic
+++ b/storage/innobase/include/usr0sess.ic
diff --git a/innobase/include/usr0types.h b/storage/innobase/include/usr0types.h
index 29359425169..29359425169 100644
--- a/innobase/include/usr0types.h
+++ b/storage/innobase/include/usr0types.h
diff --git a/innobase/include/ut0byte.h b/storage/innobase/include/ut0byte.h
index 22d488abeaf..22d488abeaf 100644
--- a/innobase/include/ut0byte.h
+++ b/storage/innobase/include/ut0byte.h
diff --git a/innobase/include/ut0byte.ic b/storage/innobase/include/ut0byte.ic
index e141de3aa3f..e141de3aa3f 100644
--- a/innobase/include/ut0byte.ic
+++ b/storage/innobase/include/ut0byte.ic
diff --git a/innobase/include/ut0dbg.h b/storage/innobase/include/ut0dbg.h
index bc3f852626a..bc3f852626a 100644
--- a/innobase/include/ut0dbg.h
+++ b/storage/innobase/include/ut0dbg.h
diff --git a/innobase/include/ut0lst.h b/storage/innobase/include/ut0lst.h
index d290c476963..d290c476963 100644
--- a/innobase/include/ut0lst.h
+++ b/storage/innobase/include/ut0lst.h
diff --git a/innobase/include/ut0mem.h b/storage/innobase/include/ut0mem.h
index 74357f6bf13..74357f6bf13 100644
--- a/innobase/include/ut0mem.h
+++ b/storage/innobase/include/ut0mem.h
diff --git a/innobase/include/ut0mem.ic b/storage/innobase/include/ut0mem.ic
index 76c721112a0..76c721112a0 100644
--- a/innobase/include/ut0mem.ic
+++ b/storage/innobase/include/ut0mem.ic
diff --git a/innobase/include/ut0rnd.h b/storage/innobase/include/ut0rnd.h
index aeec5d2f6eb..aeec5d2f6eb 100644
--- a/innobase/include/ut0rnd.h
+++ b/storage/innobase/include/ut0rnd.h
diff --git a/innobase/include/ut0rnd.ic b/storage/innobase/include/ut0rnd.ic
index d2ab087d491..d2ab087d491 100644
--- a/innobase/include/ut0rnd.ic
+++ b/storage/innobase/include/ut0rnd.ic
diff --git a/innobase/include/ut0sort.h b/storage/innobase/include/ut0sort.h
index d0a3d34e79e..d0a3d34e79e 100644
--- a/innobase/include/ut0sort.h
+++ b/storage/innobase/include/ut0sort.h
diff --git a/innobase/include/ut0ut.h b/storage/innobase/include/ut0ut.h
index 8938957cd12..8938957cd12 100644
--- a/innobase/include/ut0ut.h
+++ b/storage/innobase/include/ut0ut.h
diff --git a/innobase/include/ut0ut.ic b/storage/innobase/include/ut0ut.ic
index 9a0ef1c0d5b..9a0ef1c0d5b 100644
--- a/innobase/include/ut0ut.ic
+++ b/storage/innobase/include/ut0ut.ic
diff --git a/innobase/lock/Makefile.am b/storage/innobase/lock/Makefile.am
index 549eb2604e3..549eb2604e3 100644
--- a/innobase/lock/Makefile.am
+++ b/storage/innobase/lock/Makefile.am
diff --git a/innobase/lock/lock0lock.c b/storage/innobase/lock/lock0lock.c
index 280c4871ee9..280c4871ee9 100644
--- a/innobase/lock/lock0lock.c
+++ b/storage/innobase/lock/lock0lock.c
diff --git a/innobase/lock/makefilewin b/storage/innobase/lock/makefilewin
index 149b0a2fed6..149b0a2fed6 100644
--- a/innobase/lock/makefilewin
+++ b/storage/innobase/lock/makefilewin
diff --git a/innobase/log/Makefile.am b/storage/innobase/log/Makefile.am
index 2dbaf93e6d9..2dbaf93e6d9 100644
--- a/innobase/log/Makefile.am
+++ b/storage/innobase/log/Makefile.am
diff --git a/innobase/log/log0log.c b/storage/innobase/log/log0log.c
index 2f76bf450db..2f76bf450db 100644
--- a/innobase/log/log0log.c
+++ b/storage/innobase/log/log0log.c
diff --git a/innobase/log/log0recv.c b/storage/innobase/log/log0recv.c
index 42e854398ba..42e854398ba 100644
--- a/innobase/log/log0recv.c
+++ b/storage/innobase/log/log0recv.c
diff --git a/innobase/log/makefilewin b/storage/innobase/log/makefilewin
index a690af3bb35..a690af3bb35 100644
--- a/innobase/log/makefilewin
+++ b/storage/innobase/log/makefilewin
diff --git a/innobase/mach/Makefile.am b/storage/innobase/mach/Makefile.am
index ce827c8033f..ce827c8033f 100644
--- a/innobase/mach/Makefile.am
+++ b/storage/innobase/mach/Makefile.am
diff --git a/innobase/mach/mach0data.c b/storage/innobase/mach/mach0data.c
index ff7265b34f4..ff7265b34f4 100644
--- a/innobase/mach/mach0data.c
+++ b/storage/innobase/mach/mach0data.c
diff --git a/innobase/mach/makefilewin b/storage/innobase/mach/makefilewin
index 5306b0fe14c..5306b0fe14c 100644
--- a/innobase/mach/makefilewin
+++ b/storage/innobase/mach/makefilewin
diff --git a/innobase/makefilewin b/storage/innobase/makefilewin
index 1bd8d96e5e7..1bd8d96e5e7 100644
--- a/innobase/makefilewin
+++ b/storage/innobase/makefilewin
diff --git a/innobase/mem/Makefile.am b/storage/innobase/mem/Makefile.am
index 10b7771b580..10b7771b580 100644
--- a/innobase/mem/Makefile.am
+++ b/storage/innobase/mem/Makefile.am
diff --git a/innobase/mem/makefilewin b/storage/innobase/mem/makefilewin
index 8a30f8a6e71..8a30f8a6e71 100644
--- a/innobase/mem/makefilewin
+++ b/storage/innobase/mem/makefilewin
diff --git a/innobase/mem/mem0dbg.c b/storage/innobase/mem/mem0dbg.c
index 7c0be818948..7c0be818948 100644
--- a/innobase/mem/mem0dbg.c
+++ b/storage/innobase/mem/mem0dbg.c
diff --git a/innobase/mem/mem0mem.c b/storage/innobase/mem/mem0mem.c
index 85f0119d02a..85f0119d02a 100644
--- a/innobase/mem/mem0mem.c
+++ b/storage/innobase/mem/mem0mem.c
diff --git a/innobase/mem/mem0pool.c b/storage/innobase/mem/mem0pool.c
index cb891a03092..cb891a03092 100644
--- a/innobase/mem/mem0pool.c
+++ b/storage/innobase/mem/mem0pool.c
diff --git a/innobase/mtr/Makefile.am b/storage/innobase/mtr/Makefile.am
index 1e93a34ce23..1e93a34ce23 100644
--- a/innobase/mtr/Makefile.am
+++ b/storage/innobase/mtr/Makefile.am
diff --git a/innobase/mtr/makefilewin b/storage/innobase/mtr/makefilewin
index 9da0863bd28..9da0863bd28 100644
--- a/innobase/mtr/makefilewin
+++ b/storage/innobase/mtr/makefilewin
diff --git a/innobase/mtr/mtr0log.c b/storage/innobase/mtr/mtr0log.c
index 0308619073a..0308619073a 100644
--- a/innobase/mtr/mtr0log.c
+++ b/storage/innobase/mtr/mtr0log.c
diff --git a/innobase/mtr/mtr0mtr.c b/storage/innobase/mtr/mtr0mtr.c
index da045be1f62..da045be1f62 100644
--- a/innobase/mtr/mtr0mtr.c
+++ b/storage/innobase/mtr/mtr0mtr.c
diff --git a/innobase/my_cnf b/storage/innobase/my_cnf
index 94365237841..94365237841 100644
--- a/innobase/my_cnf
+++ b/storage/innobase/my_cnf
diff --git a/innobase/os/Makefile.am b/storage/innobase/os/Makefile.am
index 3b09a10efb5..3b09a10efb5 100644
--- a/innobase/os/Makefile.am
+++ b/storage/innobase/os/Makefile.am
diff --git a/innobase/os/makefilewin b/storage/innobase/os/makefilewin
index 8bc8d08611b..8bc8d08611b 100644
--- a/innobase/os/makefilewin
+++ b/storage/innobase/os/makefilewin
diff --git a/innobase/os/os0file.c b/storage/innobase/os/os0file.c
index 48dc808e36c..48dc808e36c 100644
--- a/innobase/os/os0file.c
+++ b/storage/innobase/os/os0file.c
diff --git a/innobase/os/os0proc.c b/storage/innobase/os/os0proc.c
index 167aed93de7..167aed93de7 100644
--- a/innobase/os/os0proc.c
+++ b/storage/innobase/os/os0proc.c
diff --git a/innobase/os/os0sync.c b/storage/innobase/os/os0sync.c
index 356d7c8c163..356d7c8c163 100644
--- a/innobase/os/os0sync.c
+++ b/storage/innobase/os/os0sync.c
diff --git a/innobase/os/os0thread.c b/storage/innobase/os/os0thread.c
index 847d0ee1cc7..847d0ee1cc7 100644
--- a/innobase/os/os0thread.c
+++ b/storage/innobase/os/os0thread.c
diff --git a/innobase/page/Makefile.am b/storage/innobase/page/Makefile.am
index 2e260787438..2e260787438 100644
--- a/innobase/page/Makefile.am
+++ b/storage/innobase/page/Makefile.am
diff --git a/innobase/page/makefilewin b/storage/innobase/page/makefilewin
index 4a132cf828c..4a132cf828c 100644
--- a/innobase/page/makefilewin
+++ b/storage/innobase/page/makefilewin
diff --git a/innobase/page/page0cur.c b/storage/innobase/page/page0cur.c
index d0b89e81787..d0b89e81787 100644
--- a/innobase/page/page0cur.c
+++ b/storage/innobase/page/page0cur.c
diff --git a/innobase/page/page0page.c b/storage/innobase/page/page0page.c
index 7e09cdf073e..7e09cdf073e 100644
--- a/innobase/page/page0page.c
+++ b/storage/innobase/page/page0page.c
diff --git a/innobase/pars/Makefile.am b/storage/innobase/pars/Makefile.am
index 2356f330486..2356f330486 100644
--- a/innobase/pars/Makefile.am
+++ b/storage/innobase/pars/Makefile.am
diff --git a/innobase/pars/lexyy.c b/storage/innobase/pars/lexyy.c
index 1145ca295e7..1145ca295e7 100644
--- a/innobase/pars/lexyy.c
+++ b/storage/innobase/pars/lexyy.c
diff --git a/innobase/pars/makefilewin b/storage/innobase/pars/makefilewin
index f183d89cbe2..f183d89cbe2 100644
--- a/innobase/pars/makefilewin
+++ b/storage/innobase/pars/makefilewin
diff --git a/innobase/pars/pars0grm.c b/storage/innobase/pars/pars0grm.c
index 05b75398084..05b75398084 100644
--- a/innobase/pars/pars0grm.c
+++ b/storage/innobase/pars/pars0grm.c
diff --git a/innobase/pars/pars0grm.h b/storage/innobase/pars/pars0grm.h
index b2790949057..b2790949057 100644
--- a/innobase/pars/pars0grm.h
+++ b/storage/innobase/pars/pars0grm.h
diff --git a/innobase/pars/pars0grm.y b/storage/innobase/pars/pars0grm.y
index a142d04301e..a142d04301e 100644
--- a/innobase/pars/pars0grm.y
+++ b/storage/innobase/pars/pars0grm.y
diff --git a/innobase/pars/pars0lex.l b/storage/innobase/pars/pars0lex.l
index e481634f77e..e481634f77e 100644
--- a/innobase/pars/pars0lex.l
+++ b/storage/innobase/pars/pars0lex.l
diff --git a/innobase/pars/pars0opt.c b/storage/innobase/pars/pars0opt.c
index 88022e2efe1..88022e2efe1 100644
--- a/innobase/pars/pars0opt.c
+++ b/storage/innobase/pars/pars0opt.c
diff --git a/innobase/pars/pars0pars.c b/storage/innobase/pars/pars0pars.c
index c62184abd85..c62184abd85 100644
--- a/innobase/pars/pars0pars.c
+++ b/storage/innobase/pars/pars0pars.c
diff --git a/innobase/pars/pars0sym.c b/storage/innobase/pars/pars0sym.c
index 8ade5579e47..8ade5579e47 100644
--- a/innobase/pars/pars0sym.c
+++ b/storage/innobase/pars/pars0sym.c
diff --git a/innobase/que/Makefile.am b/storage/innobase/que/Makefile.am
index d9c046b4f4c..d9c046b4f4c 100644
--- a/innobase/que/Makefile.am
+++ b/storage/innobase/que/Makefile.am
diff --git a/innobase/que/makefilewin b/storage/innobase/que/makefilewin
index 9661c716551..9661c716551 100644
--- a/innobase/que/makefilewin
+++ b/storage/innobase/que/makefilewin
diff --git a/innobase/que/que0que.c b/storage/innobase/que/que0que.c
index a0a6adf9b83..a0a6adf9b83 100644
--- a/innobase/que/que0que.c
+++ b/storage/innobase/que/que0que.c
diff --git a/innobase/read/Makefile.am b/storage/innobase/read/Makefile.am
index 7edf2a5a2e1..7edf2a5a2e1 100644
--- a/innobase/read/Makefile.am
+++ b/storage/innobase/read/Makefile.am
diff --git a/innobase/read/makefilewin b/storage/innobase/read/makefilewin
index 39593993a67..39593993a67 100644
--- a/innobase/read/makefilewin
+++ b/storage/innobase/read/makefilewin
diff --git a/innobase/read/read0read.c b/storage/innobase/read/read0read.c
index 0c4a037508e..0c4a037508e 100644
--- a/innobase/read/read0read.c
+++ b/storage/innobase/read/read0read.c
diff --git a/innobase/rem/Makefile.am b/storage/innobase/rem/Makefile.am
index e2b2fdaf669..e2b2fdaf669 100644
--- a/innobase/rem/Makefile.am
+++ b/storage/innobase/rem/Makefile.am
diff --git a/innobase/rem/makefilewin b/storage/innobase/rem/makefilewin
index 51ca4a92012..51ca4a92012 100644
--- a/innobase/rem/makefilewin
+++ b/storage/innobase/rem/makefilewin
diff --git a/innobase/rem/rem0cmp.c b/storage/innobase/rem/rem0cmp.c
index 7c33476fb9e..7c33476fb9e 100644
--- a/innobase/rem/rem0cmp.c
+++ b/storage/innobase/rem/rem0cmp.c
diff --git a/innobase/rem/rem0rec.c b/storage/innobase/rem/rem0rec.c
index fbc33aea669..fbc33aea669 100644
--- a/innobase/rem/rem0rec.c
+++ b/storage/innobase/rem/rem0rec.c
diff --git a/innobase/row/Makefile.am b/storage/innobase/row/Makefile.am
index bd09f9a237d..bd09f9a237d 100644
--- a/innobase/row/Makefile.am
+++ b/storage/innobase/row/Makefile.am
diff --git a/innobase/row/makefilewin b/storage/innobase/row/makefilewin
index c17240c6119..c17240c6119 100644
--- a/innobase/row/makefilewin
+++ b/storage/innobase/row/makefilewin
diff --git a/innobase/row/row0ins.c b/storage/innobase/row/row0ins.c
index bce775c25d6..bce775c25d6 100644
--- a/innobase/row/row0ins.c
+++ b/storage/innobase/row/row0ins.c
diff --git a/innobase/row/row0mysql.c b/storage/innobase/row/row0mysql.c
index 2ac0824b331..2ac0824b331 100644
--- a/innobase/row/row0mysql.c
+++ b/storage/innobase/row/row0mysql.c
diff --git a/innobase/row/row0purge.c b/storage/innobase/row/row0purge.c
index abcf97110d9..abcf97110d9 100644
--- a/innobase/row/row0purge.c
+++ b/storage/innobase/row/row0purge.c
diff --git a/innobase/row/row0row.c b/storage/innobase/row/row0row.c
index 9a74397dc08..9a74397dc08 100644
--- a/innobase/row/row0row.c
+++ b/storage/innobase/row/row0row.c
diff --git a/innobase/row/row0sel.c b/storage/innobase/row/row0sel.c
index 15439bed7e7..15439bed7e7 100644
--- a/innobase/row/row0sel.c
+++ b/storage/innobase/row/row0sel.c
diff --git a/innobase/row/row0uins.c b/storage/innobase/row/row0uins.c
index 9dc860d70b1..9dc860d70b1 100644
--- a/innobase/row/row0uins.c
+++ b/storage/innobase/row/row0uins.c
diff --git a/innobase/row/row0umod.c b/storage/innobase/row/row0umod.c
index 0225a9faec5..0225a9faec5 100644
--- a/innobase/row/row0umod.c
+++ b/storage/innobase/row/row0umod.c
diff --git a/innobase/row/row0undo.c b/storage/innobase/row/row0undo.c
index 435c0279dbb..435c0279dbb 100644
--- a/innobase/row/row0undo.c
+++ b/storage/innobase/row/row0undo.c
diff --git a/innobase/row/row0upd.c b/storage/innobase/row/row0upd.c
index 514fb6bd577..514fb6bd577 100644
--- a/innobase/row/row0upd.c
+++ b/storage/innobase/row/row0upd.c
diff --git a/innobase/row/row0vers.c b/storage/innobase/row/row0vers.c
index 8e747423047..8e747423047 100644
--- a/innobase/row/row0vers.c
+++ b/storage/innobase/row/row0vers.c
diff --git a/innobase/srv/Makefile.am b/storage/innobase/srv/Makefile.am
index 752683b82b8..752683b82b8 100644
--- a/innobase/srv/Makefile.am
+++ b/storage/innobase/srv/Makefile.am
diff --git a/innobase/srv/makefilewin b/storage/innobase/srv/makefilewin
index 129c65ec220..129c65ec220 100644
--- a/innobase/srv/makefilewin
+++ b/storage/innobase/srv/makefilewin
diff --git a/innobase/srv/srv0que.c b/storage/innobase/srv/srv0que.c
index 9dc9ae453d9..9dc9ae453d9 100644
--- a/innobase/srv/srv0que.c
+++ b/storage/innobase/srv/srv0que.c
diff --git a/innobase/srv/srv0srv.c b/storage/innobase/srv/srv0srv.c
index 837c5be2bb6..837c5be2bb6 100644
--- a/innobase/srv/srv0srv.c
+++ b/storage/innobase/srv/srv0srv.c
diff --git a/innobase/srv/srv0start.c b/storage/innobase/srv/srv0start.c
index 7798e0c8e32..7798e0c8e32 100644
--- a/innobase/srv/srv0start.c
+++ b/storage/innobase/srv/srv0start.c
diff --git a/innobase/sync/Makefile.am b/storage/innobase/sync/Makefile.am
index c95955a733b..c95955a733b 100644
--- a/innobase/sync/Makefile.am
+++ b/storage/innobase/sync/Makefile.am
diff --git a/innobase/sync/makefilewin b/storage/innobase/sync/makefilewin
index 73cff40405a..73cff40405a 100644
--- a/innobase/sync/makefilewin
+++ b/storage/innobase/sync/makefilewin
diff --git a/innobase/sync/sync0arr.c b/storage/innobase/sync/sync0arr.c
index 198ef49ca9f..198ef49ca9f 100644
--- a/innobase/sync/sync0arr.c
+++ b/storage/innobase/sync/sync0arr.c
diff --git a/innobase/sync/sync0rw.c b/storage/innobase/sync/sync0rw.c
index 973b46fdd50..973b46fdd50 100644
--- a/innobase/sync/sync0rw.c
+++ b/storage/innobase/sync/sync0rw.c
diff --git a/innobase/sync/sync0sync.c b/storage/innobase/sync/sync0sync.c
index f0f0e9a3a2e..f0f0e9a3a2e 100644
--- a/innobase/sync/sync0sync.c
+++ b/storage/innobase/sync/sync0sync.c
diff --git a/innobase/thr/Makefile.am b/storage/innobase/thr/Makefile.am
index 62c39492c07..62c39492c07 100644
--- a/innobase/thr/Makefile.am
+++ b/storage/innobase/thr/Makefile.am
diff --git a/innobase/thr/makefilewin b/storage/innobase/thr/makefilewin
index 3f29ea1d3e3..3f29ea1d3e3 100644
--- a/innobase/thr/makefilewin
+++ b/storage/innobase/thr/makefilewin
diff --git a/innobase/thr/thr0loc.c b/storage/innobase/thr/thr0loc.c
index 033bb22807f..033bb22807f 100644
--- a/innobase/thr/thr0loc.c
+++ b/storage/innobase/thr/thr0loc.c
diff --git a/innobase/trx/Makefile.am b/storage/innobase/trx/Makefile.am
index 9e2b3c398e3..9e2b3c398e3 100644
--- a/innobase/trx/Makefile.am
+++ b/storage/innobase/trx/Makefile.am
diff --git a/innobase/trx/makefilewin b/storage/innobase/trx/makefilewin
index 35588779d66..35588779d66 100644
--- a/innobase/trx/makefilewin
+++ b/storage/innobase/trx/makefilewin
diff --git a/innobase/trx/trx0purge.c b/storage/innobase/trx/trx0purge.c
index 3df34111281..3df34111281 100644
--- a/innobase/trx/trx0purge.c
+++ b/storage/innobase/trx/trx0purge.c
diff --git a/innobase/trx/trx0rec.c b/storage/innobase/trx/trx0rec.c
index 3b7171e6038..3b7171e6038 100644
--- a/innobase/trx/trx0rec.c
+++ b/storage/innobase/trx/trx0rec.c
diff --git a/innobase/trx/trx0roll.c b/storage/innobase/trx/trx0roll.c
index fdfb7428129..fdfb7428129 100644
--- a/innobase/trx/trx0roll.c
+++ b/storage/innobase/trx/trx0roll.c
diff --git a/innobase/trx/trx0rseg.c b/storage/innobase/trx/trx0rseg.c
index a01d4bb835d..a01d4bb835d 100644
--- a/innobase/trx/trx0rseg.c
+++ b/storage/innobase/trx/trx0rseg.c
diff --git a/innobase/trx/trx0sys.c b/storage/innobase/trx/trx0sys.c
index 68fe6d5079a..68fe6d5079a 100644
--- a/innobase/trx/trx0sys.c
+++ b/storage/innobase/trx/trx0sys.c
diff --git a/innobase/trx/trx0trx.c b/storage/innobase/trx/trx0trx.c
index 10fbf3468c0..10fbf3468c0 100644
--- a/innobase/trx/trx0trx.c
+++ b/storage/innobase/trx/trx0trx.c
diff --git a/innobase/trx/trx0undo.c b/storage/innobase/trx/trx0undo.c
index 7441dd3f152..7441dd3f152 100644
--- a/innobase/trx/trx0undo.c
+++ b/storage/innobase/trx/trx0undo.c
diff --git a/innobase/usr/Makefile.am b/storage/innobase/usr/Makefile.am
index bdcc832a76e..bdcc832a76e 100644
--- a/innobase/usr/Makefile.am
+++ b/storage/innobase/usr/Makefile.am
diff --git a/innobase/usr/makefilewin b/storage/innobase/usr/makefilewin
index 66a77275e9b..66a77275e9b 100644
--- a/innobase/usr/makefilewin
+++ b/storage/innobase/usr/makefilewin
diff --git a/innobase/usr/usr0sess.c b/storage/innobase/usr/usr0sess.c
index 359c1552421..359c1552421 100644
--- a/innobase/usr/usr0sess.c
+++ b/storage/innobase/usr/usr0sess.c
diff --git a/innobase/ut/Makefile.am b/storage/innobase/ut/Makefile.am
index 2fdbb99e0f3..2fdbb99e0f3 100644
--- a/innobase/ut/Makefile.am
+++ b/storage/innobase/ut/Makefile.am
diff --git a/innobase/ut/makefilewin b/storage/innobase/ut/makefilewin
index 2fda190773b..2fda190773b 100644
--- a/innobase/ut/makefilewin
+++ b/storage/innobase/ut/makefilewin
diff --git a/innobase/ut/ut0byte.c b/storage/innobase/ut/ut0byte.c
index cc83aacc90b..cc83aacc90b 100644
--- a/innobase/ut/ut0byte.c
+++ b/storage/innobase/ut/ut0byte.c
diff --git a/innobase/ut/ut0dbg.c b/storage/innobase/ut/ut0dbg.c
index e810d8dead7..e810d8dead7 100644
--- a/innobase/ut/ut0dbg.c
+++ b/storage/innobase/ut/ut0dbg.c
diff --git a/innobase/ut/ut0mem.c b/storage/innobase/ut/ut0mem.c
index 3e8fd79a739..3e8fd79a739 100644
--- a/innobase/ut/ut0mem.c
+++ b/storage/innobase/ut/ut0mem.c
diff --git a/innobase/ut/ut0rnd.c b/storage/innobase/ut/ut0rnd.c
index 85d2e6094c3..85d2e6094c3 100644
--- a/innobase/ut/ut0rnd.c
+++ b/storage/innobase/ut/ut0rnd.c
diff --git a/innobase/ut/ut0ut.c b/storage/innobase/ut/ut0ut.c
index 1be5939303a..1be5939303a 100644
--- a/innobase/ut/ut0ut.c
+++ b/storage/innobase/ut/ut0ut.c
diff --git a/myisam/.cvsignore b/storage/myisam/.cvsignore
index ef6d92c6e18..ef6d92c6e18 100644
--- a/myisam/.cvsignore
+++ b/storage/myisam/.cvsignore
diff --git a/myisam/ChangeLog b/storage/myisam/ChangeLog
index 504202be43f..504202be43f 100644
--- a/myisam/ChangeLog
+++ b/storage/myisam/ChangeLog
diff --git a/myisam/Makefile.am b/storage/myisam/Makefile.am
index e4327070997..e4327070997 100644
--- a/myisam/Makefile.am
+++ b/storage/myisam/Makefile.am
diff --git a/myisam/NEWS b/storage/myisam/NEWS
index bb1f141610b..bb1f141610b 100644
--- a/myisam/NEWS
+++ b/storage/myisam/NEWS
diff --git a/myisam/TODO b/storage/myisam/TODO
index cad9486e1bb..cad9486e1bb 100644
--- a/myisam/TODO
+++ b/storage/myisam/TODO
diff --git a/storage/myisam/ft_boolean_search.c b/storage/myisam/ft_boolean_search.c
new file mode 100644
index 00000000000..34c3fe138a3
--- /dev/null
+++ b/storage/myisam/ft_boolean_search.c
@@ -0,0 +1,772 @@
+/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* Written by Sergei A. Golubchik, who has a shared copyright to this code */
+
+/* TODO: add caching - pre-read several index entries at once */
+
+/*
+ Added optimization for full-text queries with plus-words. It was
+ implemented by sharing maximal document id (max_docid) variable
+ inside plus subtree. max_docid could be used by any word in plus
+ subtree, but it could be updated by plus-word only.
+
+ The idea is: there is no need to search for docid smaller than
+ biggest docid inside current plus subtree.
+
+ Examples:
+ +word1 word2
+ share same max_docid
+ max_docid updated by word1
+ +word1 +(word2 word3)
+ share same max_docid
+ max_docid updated by word1
+ +(word1 -word2) +(+word3 word4)
+ share same max_docid
+ max_docid updated by word3
+*/
+
+#define FT_CORE
+#include "ftdefs.h"
+
+/* search with boolean queries */
+
+static double _wghts[11]=
+{
+ 0.131687242798354,
+ 0.197530864197531,
+ 0.296296296296296,
+ 0.444444444444444,
+ 0.666666666666667,
+ 1.000000000000000,
+ 1.500000000000000,
+ 2.250000000000000,
+ 3.375000000000000,
+ 5.062500000000000,
+ 7.593750000000000};
+static double *wghts=_wghts+5; /* wghts[i] = 1.5**i */
+
+static double _nwghts[11]=
+{
+ -0.065843621399177,
+ -0.098765432098766,
+ -0.148148148148148,
+ -0.222222222222222,
+ -0.333333333333334,
+ -0.500000000000000,
+ -0.750000000000000,
+ -1.125000000000000,
+ -1.687500000000000,
+ -2.531250000000000,
+ -3.796875000000000};
+static double *nwghts=_nwghts+5; /* nwghts[i] = -0.5*1.5**i */
+
+#define FTB_FLAG_TRUNC 1
+/* At most one of the following flags can be set */
+#define FTB_FLAG_YES 2
+#define FTB_FLAG_NO 4
+#define FTB_FLAG_WONLY 8
+
+typedef struct st_ftb_expr FTB_EXPR;
+struct st_ftb_expr
+{
+ FTB_EXPR *up;
+ uint flags;
+/* ^^^^^^^^^^^^^^^^^^ FTB_{EXPR,WORD} common section */
+ my_off_t docid[2];
+ my_off_t max_docid;
+ float weight;
+ float cur_weight;
+ LIST *phrase; /* phrase words */
+ uint yesses; /* number of "yes" words matched */
+ uint nos; /* number of "no" words matched */
+ uint ythresh; /* number of "yes" words in expr */
+ uint yweaks; /* number of "yes" words for scan only */
+};
+
+typedef struct st_ftb_word
+{
+ FTB_EXPR *up;
+ uint flags;
+/* ^^^^^^^^^^^^^^^^^^ FTB_{EXPR,WORD} common section */
+ my_off_t docid[2]; /* for index search and for scan */
+ my_off_t key_root;
+ my_off_t *max_docid;
+ MI_KEYDEF *keyinfo;
+ float weight;
+ uint ndepth;
+ uint len;
+ uchar off;
+ byte word[1];
+} FTB_WORD;
+
+typedef struct st_ft_info
+{
+ struct _ft_vft *please;
+ MI_INFO *info;
+ CHARSET_INFO *charset;
+ FTB_EXPR *root;
+ FTB_WORD **list;
+ MEM_ROOT mem_root;
+ QUEUE queue;
+ TREE no_dupes;
+ my_off_t lastpos;
+ uint keynr;
+ uchar with_scan;
+ enum { UNINITIALIZED, READY, INDEX_SEARCH, INDEX_DONE } state;
+} FTB;
+
+static int FTB_WORD_cmp(my_off_t *v, FTB_WORD *a, FTB_WORD *b)
+{
+ int i;
+
+ /* if a==curdoc, take it as a < b */
+ if (v && a->docid[0] == *v)
+ return -1;
+
+ /* ORDER BY docid, ndepth DESC */
+ i=CMP_NUM(a->docid[0], b->docid[0]);
+ if (!i)
+ i=CMP_NUM(b->ndepth,a->ndepth);
+ return i;
+}
+
+static int FTB_WORD_cmp_list(CHARSET_INFO *cs, FTB_WORD **a, FTB_WORD **b)
+{
+ /* ORDER BY word DESC, ndepth DESC */
+ int i= mi_compare_text(cs, (uchar*) (*b)->word+1,(*b)->len-1,
+ (uchar*) (*a)->word+1,(*a)->len-1,0,0);
+ if (!i)
+ i=CMP_NUM((*b)->ndepth,(*a)->ndepth);
+ return i;
+}
+
+static void _ftb_parse_query(FTB *ftb, byte **start, byte *end,
+ FTB_EXPR *up, uint depth, byte *up_quot)
+{
+ byte res;
+ FTB_PARAM param;
+ FT_WORD w;
+ FTB_WORD *ftbw;
+ FTB_EXPR *ftbe;
+ FTB_EXPR *tmp_expr;
+ FT_WORD *phrase_word;
+ LIST *phrase_list;
+ uint extra=HA_FT_WLEN+ftb->info->s->rec_reflength; /* just a shortcut */
+
+ if (ftb->state != UNINITIALIZED)
+ return;
+
+ param.prev=' ';
+ param.quot= up_quot;
+ while ((res=ft_get_word(ftb->charset,start,end,&w,&param)))
+ {
+ int r=param.plusminus;
+ float weight= (float) (param.pmsign ? nwghts : wghts)[(r>5)?5:((r<-5)?-5:r)];
+ switch (res) {
+ case 1: /* word found */
+ ftbw=(FTB_WORD *)alloc_root(&ftb->mem_root,
+ sizeof(FTB_WORD) +
+ (param.trunc ? MI_MAX_KEY_BUFF :
+ w.len*ftb->charset->mbmaxlen+extra));
+ ftbw->len=w.len+1;
+ ftbw->flags=0;
+ ftbw->off=0;
+ if (param.yesno>0) ftbw->flags|=FTB_FLAG_YES;
+ if (param.yesno<0) ftbw->flags|=FTB_FLAG_NO;
+ if (param.trunc) ftbw->flags|=FTB_FLAG_TRUNC;
+ ftbw->weight=weight;
+ ftbw->up=up;
+ ftbw->docid[0]=ftbw->docid[1]=HA_OFFSET_ERROR;
+ ftbw->ndepth= (param.yesno<0) + depth;
+ ftbw->key_root=HA_OFFSET_ERROR;
+ memcpy(ftbw->word+1, w.pos, w.len);
+ ftbw->word[0]=w.len;
+ if (param.yesno > 0) up->ythresh++;
+ queue_insert(& ftb->queue, (byte *)ftbw);
+ ftb->with_scan|=(param.trunc & FTB_FLAG_TRUNC);
+ for (tmp_expr= up; tmp_expr->up; tmp_expr= tmp_expr->up)
+ if (! (tmp_expr->flags & FTB_FLAG_YES))
+ break;
+ ftbw->max_docid= &tmp_expr->max_docid;
+ case 4: /* not indexed word (stopword or too short/long) */
+ if (! up_quot) break;
+ phrase_word= (FT_WORD *)alloc_root(&ftb->mem_root, sizeof(FT_WORD));
+ phrase_list= (LIST *)alloc_root(&ftb->mem_root, sizeof(LIST));
+ phrase_word->pos= w.pos;
+ phrase_word->len= w.len;
+ phrase_list->data= (void *)phrase_word;
+ up->phrase= list_add(up->phrase, phrase_list);
+ break;
+ case 2: /* left bracket */
+ ftbe=(FTB_EXPR *)alloc_root(&ftb->mem_root, sizeof(FTB_EXPR));
+ ftbe->flags=0;
+ if (param.yesno>0) ftbe->flags|=FTB_FLAG_YES;
+ if (param.yesno<0) ftbe->flags|=FTB_FLAG_NO;
+ ftbe->weight=weight;
+ ftbe->up=up;
+ ftbe->max_docid= ftbe->ythresh= ftbe->yweaks= 0;
+ ftbe->docid[0]=ftbe->docid[1]=HA_OFFSET_ERROR;
+ ftbe->phrase= NULL;
+ if (param.quot) ftb->with_scan|=2;
+ if (param.yesno > 0) up->ythresh++;
+ _ftb_parse_query(ftb, start, end, ftbe, depth+1, param.quot);
+ param.quot=0;
+ break;
+ case 3: /* right bracket */
+ if (up_quot) up->phrase= list_reverse(up->phrase);
+ return;
+ }
+ }
+ return;
+}
+
+static int _ftb_no_dupes_cmp(void* not_used __attribute__((unused)),
+ const void *a,const void *b)
+{
+ return CMP_NUM((*((my_off_t*)a)), (*((my_off_t*)b)));
+}
+
+/* returns 1 if the search was finished (must-word wasn't found) */
+static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search)
+{
+ int r;
+ int subkeys=1;
+ my_bool can_go_down;
+ MI_INFO *info=ftb->info;
+ uint off, extra=HA_FT_WLEN+info->s->base.rec_reflength;
+ byte *lastkey_buf=ftbw->word+ftbw->off;
+
+ LINT_INIT(off);
+ if (ftbw->flags & FTB_FLAG_TRUNC)
+ lastkey_buf+=ftbw->len;
+
+ if (init_search)
+ {
+ ftbw->key_root=info->s->state.key_root[ftb->keynr];
+ ftbw->keyinfo=info->s->keyinfo+ftb->keynr;
+
+ r=_mi_search(info, ftbw->keyinfo, (uchar*) ftbw->word, ftbw->len,
+ SEARCH_FIND | SEARCH_BIGGER, ftbw->key_root);
+ }
+ else
+ {
+ uint sflag= SEARCH_BIGGER;
+ if (ftbw->docid[0] < *ftbw->max_docid)
+ {
+ sflag|= SEARCH_SAME;
+ _mi_dpointer(info, (uchar *)(ftbw->word + ftbw->len + HA_FT_WLEN),
+ *ftbw->max_docid);
+ }
+ r=_mi_search(info, ftbw->keyinfo, (uchar*) lastkey_buf,
+ USE_WHOLE_KEY, sflag, ftbw->key_root);
+ }
+
+ can_go_down=(!ftbw->off && (init_search || (ftbw->flags & FTB_FLAG_TRUNC)));
+ /* Skip rows inserted by concurrent insert */
+ while (!r)
+ {
+ if (can_go_down)
+ {
+ /* going down ? */
+ off=info->lastkey_length-extra;
+ subkeys=ft_sintXkorr(info->lastkey+off);
+ }
+ if (subkeys<0 || info->lastpos < info->state->data_file_length)
+ break;
+ r= _mi_search_next(info, ftbw->keyinfo, info->lastkey,
+ info->lastkey_length,
+ SEARCH_BIGGER, ftbw->key_root);
+ }
+
+ if (!r && !ftbw->off)
+ {
+ r= mi_compare_text(ftb->charset,
+ info->lastkey+1,
+ info->lastkey_length-extra-1,
+ (uchar*) ftbw->word+1,
+ ftbw->len-1,
+ (my_bool) (ftbw->flags & FTB_FLAG_TRUNC),0);
+ }
+
+ if (r) /* not found */
+ {
+ if (!ftbw->off || !(ftbw->flags & FTB_FLAG_TRUNC))
+ {
+ ftbw->docid[0]=HA_OFFSET_ERROR;
+ if ((ftbw->flags & FTB_FLAG_YES) && ftbw->up->up==0)
+ {
+ /*
+ This word MUST BE present in every document returned,
+ so we can stop the search right now
+ */
+ ftb->state=INDEX_DONE;
+ return 1; /* search is done */
+ }
+ else
+ return 0;
+ }
+
+ /* going up to the first-level tree to continue search there */
+ _mi_dpointer(info, (uchar*) (lastkey_buf+HA_FT_WLEN), ftbw->key_root);
+ ftbw->key_root=info->s->state.key_root[ftb->keynr];
+ ftbw->keyinfo=info->s->keyinfo+ftb->keynr;
+ ftbw->off=0;
+ return _ft2_search(ftb, ftbw, 0);
+ }
+
+ /* matching key found */
+ memcpy(lastkey_buf, info->lastkey, info->lastkey_length);
+ if (lastkey_buf == ftbw->word)
+ ftbw->len=info->lastkey_length-extra;
+
+ /* going down ? */
+ if (subkeys<0)
+ {
+ /*
+ yep, going down, to the second-level tree
+ TODO here: subkey-based optimization
+ */
+ ftbw->off=off;
+ ftbw->key_root=info->lastpos;
+ ftbw->keyinfo=& info->s->ft2_keyinfo;
+ r=_mi_search_first(info, ftbw->keyinfo, ftbw->key_root);
+ DBUG_ASSERT(r==0); /* found something */
+ memcpy(lastkey_buf+off, info->lastkey, info->lastkey_length);
+ }
+ ftbw->docid[0]=info->lastpos;
+ if (ftbw->flags & FTB_FLAG_YES)
+ *ftbw->max_docid= info->lastpos;
+ return 0;
+}
+
+static void _ftb_init_index_search(FT_INFO *ftb)
+{
+ int i;
+ FTB_WORD *ftbw;
+
+ if ((ftb->state != READY && ftb->state !=INDEX_DONE) ||
+ ftb->keynr == NO_SUCH_KEY)
+ return;
+ ftb->state=INDEX_SEARCH;
+
+ for (i=ftb->queue.elements; i; i--)
+ {
+ ftbw=(FTB_WORD *)(ftb->queue.root[i]);
+
+ if (ftbw->flags & FTB_FLAG_TRUNC)
+ {
+ /*
+ special treatment for truncation operator
+ 1. there are some (besides this) +words
+ | no need to search in the index, it can never ADD new rows
+ | to the result, and to remove half-matched rows we do scan anyway
+ 2. -trunc*
+ | same as 1.
+ 3. in 1 and 2, +/- need not be on the same expr. level,
+ but can be on any upper level, as in +word +(trunc1* trunc2*)
+ 4. otherwise
+ | We have to index-search for this prefix.
+ | It may cause duplicates, as in the index (sorted by <word,docid>)
+ | <aaaa,row1>
+ | <aabb,row2>
+ | <aacc,row1>
+ | Searching for "aa*" will find row1 twice...
+ */
+ FTB_EXPR *ftbe;
+ for (ftbe=(FTB_EXPR*)ftbw;
+ ftbe->up && !(ftbe->up->flags & FTB_FLAG_TRUNC);
+ ftbe->up->flags|= FTB_FLAG_TRUNC, ftbe=ftbe->up)
+ {
+ if (ftbe->flags & FTB_FLAG_NO || /* 2 */
+ ftbe->up->ythresh - ftbe->up->yweaks >1) /* 1 */
+ {
+ FTB_EXPR *top_ftbe=ftbe->up;
+ ftbw->docid[0]=HA_OFFSET_ERROR;
+ for (ftbe=(FTB_EXPR *)ftbw;
+ ftbe != top_ftbe && !(ftbe->flags & FTB_FLAG_NO);
+ ftbe=ftbe->up)
+ ftbe->up->yweaks++;
+ ftbe=0;
+ break;
+ }
+ }
+ if (!ftbe)
+ continue;
+ /* 4 */
+ if (!is_tree_inited(& ftb->no_dupes))
+ init_tree(& ftb->no_dupes,0,0,sizeof(my_off_t),
+ _ftb_no_dupes_cmp,0,0,0);
+ else
+ reset_tree(& ftb->no_dupes);
+ }
+
+ ftbw->off=0; /* in case of reinit */
+ if (_ft2_search(ftb, ftbw, 1))
+ return;
+ }
+ queue_fix(& ftb->queue);
+}
+
+
+FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query,
+ uint query_len, CHARSET_INFO *cs)
+{
+ FTB *ftb;
+ FTB_EXPR *ftbe;
+ uint res;
+
+ if (!(ftb=(FTB *)my_malloc(sizeof(FTB), MYF(MY_WME))))
+ return 0;
+ ftb->please= (struct _ft_vft *) & _ft_vft_boolean;
+ ftb->state=UNINITIALIZED;
+ ftb->info=info;
+ ftb->keynr=keynr;
+ ftb->charset=cs;
+ DBUG_ASSERT(keynr==NO_SUCH_KEY || cs == info->s->keyinfo[keynr].seg->charset);
+ ftb->with_scan=0;
+ ftb->lastpos=HA_OFFSET_ERROR;
+ bzero(& ftb->no_dupes, sizeof(TREE));
+
+ init_alloc_root(&ftb->mem_root, 1024, 1024);
+
+ /*
+ Hack: instead of init_queue, we'll use reinit queue to be able
+ to alloc queue with alloc_root()
+ */
+ res=ftb->queue.max_elements=1+query_len/2;
+ if (!(ftb->queue.root=
+ (byte **)alloc_root(&ftb->mem_root, (res+1)*sizeof(void*))))
+ goto err;
+ reinit_queue(& ftb->queue, res, 0, 0,
+ (int (*)(void*,byte*,byte*))FTB_WORD_cmp, 0);
+ if (!(ftbe=(FTB_EXPR *)alloc_root(&ftb->mem_root, sizeof(FTB_EXPR))))
+ goto err;
+ ftbe->weight=1;
+ ftbe->flags=FTB_FLAG_YES;
+ ftbe->nos=1;
+ ftbe->up=0;
+ ftbe->max_docid= ftbe->ythresh= ftbe->yweaks= 0;
+ ftbe->docid[0]=ftbe->docid[1]=HA_OFFSET_ERROR;
+ ftbe->phrase= NULL;
+ ftb->root=ftbe;
+ _ftb_parse_query(ftb, &query, query+query_len, ftbe, 0, NULL);
+ ftb->list=(FTB_WORD **)alloc_root(&ftb->mem_root,
+ sizeof(FTB_WORD *)*ftb->queue.elements);
+ memcpy(ftb->list, ftb->queue.root+1, sizeof(FTB_WORD *)*ftb->queue.elements);
+ qsort2(ftb->list, ftb->queue.elements, sizeof(FTB_WORD *),
+ (qsort2_cmp)FTB_WORD_cmp_list, ftb->charset);
+ if (ftb->queue.elements<2) ftb->with_scan &= ~FTB_FLAG_TRUNC;
+ ftb->state=READY;
+ return ftb;
+err:
+ free_root(& ftb->mem_root, MYF(0));
+ my_free((gptr)ftb,MYF(0));
+ return 0;
+}
+
+
+/*
+ Checks if given buffer matches phrase list.
+
+ SYNOPSIS
+ _ftb_check_phrase()
+ s0 start of buffer
+ e0 end of buffer
+ phrase broken into list phrase
+ cs charset info
+
+ RETURN VALUE
+ 1 is returned if phrase found, 0 else.
+*/
+
+static int _ftb_check_phrase(const byte *s0, const byte *e0,
+ LIST *phrase, CHARSET_INFO *cs)
+{
+ FT_WORD h_word;
+ const byte *h_start= s0;
+ DBUG_ENTER("_ftb_strstr");
+ DBUG_ASSERT(phrase);
+
+ while (ft_simple_get_word(cs, (byte **)&h_start, e0, &h_word, FALSE))
+ {
+ FT_WORD *n_word;
+ LIST *phrase_element= phrase;
+ const byte *h_start1= h_start;
+ for (;;)
+ {
+ n_word= (FT_WORD *)phrase_element->data;
+ if (my_strnncoll(cs, h_word.pos, h_word.len, n_word->pos, n_word->len))
+ break;
+ if (! (phrase_element= phrase_element->next))
+ DBUG_RETURN(1);
+ if (! ft_simple_get_word(cs, (byte **)&h_start1, e0, &h_word, FALSE))
+ DBUG_RETURN(0);
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
+static void _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_orig)
+{
+ FT_SEG_ITERATOR ftsi;
+ FTB_EXPR *ftbe;
+ float weight=ftbw->weight;
+ int yn=ftbw->flags, ythresh, mode=(ftsi_orig != 0);
+ my_off_t curdoc=ftbw->docid[mode];
+
+ for (ftbe=ftbw->up; ftbe; ftbe=ftbe->up)
+ {
+ ythresh = ftbe->ythresh - (mode ? 0 : ftbe->yweaks);
+ if (ftbe->docid[mode] != curdoc)
+ {
+ ftbe->cur_weight=0;
+ ftbe->yesses=ftbe->nos=0;
+ ftbe->docid[mode]=curdoc;
+ }
+ if (ftbe->nos)
+ break;
+ if (yn & FTB_FLAG_YES)
+ {
+ weight /= ftbe->ythresh;
+ ftbe->cur_weight += weight;
+ if ((int) ++ftbe->yesses == ythresh)
+ {
+ yn=ftbe->flags;
+ weight=ftbe->cur_weight*ftbe->weight;
+ if (mode && ftbe->phrase)
+ {
+ int not_found=1;
+
+ memcpy(&ftsi, ftsi_orig, sizeof(ftsi));
+ while (_mi_ft_segiterator(&ftsi) && not_found)
+ {
+ if (!ftsi.pos)
+ continue;
+ not_found = ! _ftb_check_phrase(ftsi.pos, ftsi.pos+ftsi.len,
+ ftbe->phrase, ftb->charset);
+ }
+ if (not_found) break;
+ } /* ftbe->quot */
+ }
+ else
+ break;
+ }
+ else
+ if (yn & FTB_FLAG_NO)
+ {
+ /*
+ NOTE: special sort function of queue assures that all
+ (yn & FTB_FLAG_NO) != 0
+ events for every particular subexpression will
+ "auto-magically" happen BEFORE all the
+ (yn & FTB_FLAG_YES) != 0 events. So no
+ already matched expression can become not-matched again.
+ */
+ ++ftbe->nos;
+ break;
+ }
+ else
+ {
+ if (ftbe->ythresh)
+ weight/=3;
+ ftbe->cur_weight += weight;
+ if ((int) ftbe->yesses < ythresh)
+ break;
+ if (!(yn & FTB_FLAG_WONLY))
+ yn= ((int) ftbe->yesses++ == ythresh) ? ftbe->flags : FTB_FLAG_WONLY ;
+ weight*= ftbe->weight;
+ }
+ }
+}
+
+
+int ft_boolean_read_next(FT_INFO *ftb, char *record)
+{
+ FTB_EXPR *ftbe;
+ FTB_WORD *ftbw;
+ MI_INFO *info=ftb->info;
+ my_off_t curdoc;
+
+ if (ftb->state != INDEX_SEARCH && ftb->state != INDEX_DONE)
+ return -1;
+
+ /* black magic ON */
+ if ((int) _mi_check_index(info, ftb->keynr) < 0)
+ return my_errno;
+ if (_mi_readinfo(info, F_RDLCK, 1))
+ return my_errno;
+ /* black magic OFF */
+
+ if (!ftb->queue.elements)
+ return my_errno=HA_ERR_END_OF_FILE;
+
+ /* Attention!!! Address of a local variable is used here! See err: label */
+ ftb->queue.first_cmp_arg=(void *)&curdoc;
+
+ while (ftb->state == INDEX_SEARCH &&
+ (curdoc=((FTB_WORD *)queue_top(& ftb->queue))->docid[0]) !=
+ HA_OFFSET_ERROR)
+ {
+ while (curdoc == (ftbw=(FTB_WORD *)queue_top(& ftb->queue))->docid[0])
+ {
+ _ftb_climb_the_tree(ftb, ftbw, 0);
+
+ /* update queue */
+ _ft2_search(ftb, ftbw, 0);
+ queue_replaced(& ftb->queue);
+ }
+
+ ftbe=ftb->root;
+ if (ftbe->docid[0]==curdoc && ftbe->cur_weight>0 &&
+ ftbe->yesses>=(ftbe->ythresh-ftbe->yweaks) && !ftbe->nos)
+ {
+ /* curdoc matched ! */
+ if (is_tree_inited(&ftb->no_dupes) &&
+ tree_insert(&ftb->no_dupes, &curdoc, 0,
+ ftb->no_dupes.custom_arg)->count >1)
+ /* but it managed already to get past this line once */
+ continue;
+
+ info->lastpos=curdoc;
+ /* Clear all states, except that the table was updated */
+ info->update&= (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
+
+ if (!(*info->read_record)(info,curdoc,record))
+ {
+ info->update|= HA_STATE_AKTIV; /* Record is read */
+ if (ftb->with_scan && ft_boolean_find_relevance(ftb,record,0)==0)
+ continue; /* no match */
+ my_errno=0;
+ goto err;
+ }
+ goto err;
+ }
+ }
+ ftb->state=INDEX_DONE;
+ my_errno=HA_ERR_END_OF_FILE;
+err:
+ ftb->queue.first_cmp_arg=(void *)0;
+ return my_errno;
+}
+
+
+float ft_boolean_find_relevance(FT_INFO *ftb, byte *record, uint length)
+{
+ FT_WORD word;
+ FTB_WORD *ftbw;
+ FTB_EXPR *ftbe;
+ FT_SEG_ITERATOR ftsi, ftsi2;
+ const byte *end;
+ my_off_t docid=ftb->info->lastpos;
+
+ if (docid == HA_OFFSET_ERROR)
+ return -2.0;
+ if (!ftb->queue.elements)
+ return 0;
+
+ if (ftb->state != INDEX_SEARCH && docid <= ftb->lastpos)
+ {
+ FTB_EXPR *x;
+ uint i;
+
+ for (i=0; i < ftb->queue.elements; i++)
+ {
+ ftb->list[i]->docid[1]=HA_OFFSET_ERROR;
+ for (x=ftb->list[i]->up; x; x=x->up)
+ x->docid[1]=HA_OFFSET_ERROR;
+ }
+ }
+
+ ftb->lastpos=docid;
+
+ if (ftb->keynr==NO_SUCH_KEY)
+ _mi_ft_segiterator_dummy_init(record, length, &ftsi);
+ else
+ _mi_ft_segiterator_init(ftb->info, ftb->keynr, record, &ftsi);
+ memcpy(&ftsi2, &ftsi, sizeof(ftsi));
+
+ while (_mi_ft_segiterator(&ftsi))
+ {
+ if (!ftsi.pos)
+ continue;
+
+ end=ftsi.pos+ftsi.len;
+ while (ft_simple_get_word(ftb->charset, (byte **) &ftsi.pos,
+ (byte *) end, &word, TRUE))
+ {
+ int a, b, c;
+ for (a=0, b=ftb->queue.elements, c=(a+b)/2; b-a>1; c=(a+b)/2)
+ {
+ ftbw=ftb->list[c];
+ if (mi_compare_text(ftb->charset, (uchar*) word.pos, word.len,
+ (uchar*) ftbw->word+1, ftbw->len-1,
+ (my_bool) (ftbw->flags&FTB_FLAG_TRUNC),0) >0)
+ b=c;
+ else
+ a=c;
+ }
+ for (; c>=0; c--)
+ {
+ ftbw=ftb->list[c];
+ if (mi_compare_text(ftb->charset, (uchar*) word.pos, word.len,
+ (uchar*) ftbw->word+1,ftbw->len-1,
+ (my_bool) (ftbw->flags&FTB_FLAG_TRUNC),0))
+ break;
+ if (ftbw->docid[1] == docid)
+ continue;
+ ftbw->docid[1]=docid;
+ _ftb_climb_the_tree(ftb, ftbw, &ftsi2);
+ }
+ }
+ }
+
+ ftbe=ftb->root;
+ if (ftbe->docid[1]==docid && ftbe->cur_weight>0 &&
+ ftbe->yesses>=ftbe->ythresh && !ftbe->nos)
+ { /* row matched ! */
+ return ftbe->cur_weight;
+ }
+ else
+ { /* match failed ! */
+ return 0.0;
+ }
+}
+
+
+void ft_boolean_close_search(FT_INFO *ftb)
+{
+ if (is_tree_inited(& ftb->no_dupes))
+ {
+ delete_tree(& ftb->no_dupes);
+ }
+ free_root(& ftb->mem_root, MYF(0));
+ my_free((gptr)ftb,MYF(0));
+}
+
+
+float ft_boolean_get_relevance(FT_INFO *ftb)
+{
+ return ftb->root->cur_weight;
+}
+
+
+void ft_boolean_reinit_search(FT_INFO *ftb)
+{
+ _ftb_init_index_search(ftb);
+}
+
diff --git a/myisam/ft_eval.c b/storage/myisam/ft_eval.c
index 34248c69f20..34248c69f20 100644
--- a/myisam/ft_eval.c
+++ b/storage/myisam/ft_eval.c
diff --git a/myisam/ft_eval.h b/storage/myisam/ft_eval.h
index 5501fe9d34b..5501fe9d34b 100644
--- a/myisam/ft_eval.h
+++ b/storage/myisam/ft_eval.h
diff --git a/myisam/ft_nlq_search.c b/storage/myisam/ft_nlq_search.c
index 7a506fd11c6..7a506fd11c6 100644
--- a/myisam/ft_nlq_search.c
+++ b/storage/myisam/ft_nlq_search.c
diff --git a/myisam/ft_parser.c b/storage/myisam/ft_parser.c
index 2fad2363ae2..2fad2363ae2 100644
--- a/myisam/ft_parser.c
+++ b/storage/myisam/ft_parser.c
diff --git a/myisam/ft_static.c b/storage/myisam/ft_static.c
index e221950f445..e221950f445 100644
--- a/myisam/ft_static.c
+++ b/storage/myisam/ft_static.c
diff --git a/myisam/ft_stem.c b/storage/myisam/ft_stem.c
index 846d5d2247f..846d5d2247f 100644
--- a/myisam/ft_stem.c
+++ b/storage/myisam/ft_stem.c
diff --git a/myisam/ft_stopwords.c b/storage/myisam/ft_stopwords.c
index ab51afb0e82..ab51afb0e82 100644
--- a/myisam/ft_stopwords.c
+++ b/storage/myisam/ft_stopwords.c
diff --git a/myisam/ft_test1.c b/storage/myisam/ft_test1.c
index 14be9aa1e8c..14be9aa1e8c 100644
--- a/myisam/ft_test1.c
+++ b/storage/myisam/ft_test1.c
diff --git a/myisam/ft_test1.h b/storage/myisam/ft_test1.h
index e360244057b..e360244057b 100644
--- a/myisam/ft_test1.h
+++ b/storage/myisam/ft_test1.h
diff --git a/myisam/ft_update.c b/storage/myisam/ft_update.c
index b8cd925bf4f..b8cd925bf4f 100644
--- a/myisam/ft_update.c
+++ b/storage/myisam/ft_update.c
diff --git a/myisam/ftbench/Ecompare.pl b/storage/myisam/ftbench/Ecompare.pl
index 265534e704d..265534e704d 100755
--- a/myisam/ftbench/Ecompare.pl
+++ b/storage/myisam/ftbench/Ecompare.pl
diff --git a/myisam/ftbench/Ecreate.pl b/storage/myisam/ftbench/Ecreate.pl
index d90a6f7a0ad..d90a6f7a0ad 100755
--- a/myisam/ftbench/Ecreate.pl
+++ b/storage/myisam/ftbench/Ecreate.pl
diff --git a/myisam/ftbench/Ereport.pl b/storage/myisam/ftbench/Ereport.pl
index 5969304da09..5969304da09 100755
--- a/myisam/ftbench/Ereport.pl
+++ b/storage/myisam/ftbench/Ereport.pl
diff --git a/myisam/ftbench/README b/storage/myisam/ftbench/README
index b1f8b66b15f..b1f8b66b15f 100644
--- a/myisam/ftbench/README
+++ b/storage/myisam/ftbench/README
diff --git a/myisam/ftbench/ft-test-run.sh b/storage/myisam/ftbench/ft-test-run.sh
index ceba818fa5c..ceba818fa5c 100755
--- a/myisam/ftbench/ft-test-run.sh
+++ b/storage/myisam/ftbench/ft-test-run.sh
diff --git a/myisam/ftdefs.h b/storage/myisam/ftdefs.h
index 91c679a1e58..91c679a1e58 100644
--- a/myisam/ftdefs.h
+++ b/storage/myisam/ftdefs.h
diff --git a/myisam/fulltext.h b/storage/myisam/fulltext.h
index d8c74d4e94b..d8c74d4e94b 100644
--- a/myisam/fulltext.h
+++ b/storage/myisam/fulltext.h
diff --git a/myisam/make-ccc b/storage/myisam/make-ccc
index 6d1303729db..6d1303729db 100755
--- a/myisam/make-ccc
+++ b/storage/myisam/make-ccc
diff --git a/myisam/mi_cache.c b/storage/myisam/mi_cache.c
index 8dee068c50e..8dee068c50e 100644
--- a/myisam/mi_cache.c
+++ b/storage/myisam/mi_cache.c
diff --git a/myisam/mi_changed.c b/storage/myisam/mi_changed.c
index c2ab5568eba..c2ab5568eba 100644
--- a/myisam/mi_changed.c
+++ b/storage/myisam/mi_changed.c
diff --git a/myisam/mi_check.c b/storage/myisam/mi_check.c
index 1db829808a9..1db829808a9 100644
--- a/myisam/mi_check.c
+++ b/storage/myisam/mi_check.c
diff --git a/myisam/mi_checksum.c b/storage/myisam/mi_checksum.c
index 33a51068fb0..33a51068fb0 100644
--- a/myisam/mi_checksum.c
+++ b/storage/myisam/mi_checksum.c
diff --git a/myisam/mi_close.c b/storage/myisam/mi_close.c
index 62f5617de1a..62f5617de1a 100644
--- a/myisam/mi_close.c
+++ b/storage/myisam/mi_close.c
diff --git a/myisam/mi_create.c b/storage/myisam/mi_create.c
index 560535f2933..560535f2933 100644
--- a/myisam/mi_create.c
+++ b/storage/myisam/mi_create.c
diff --git a/myisam/mi_dbug.c b/storage/myisam/mi_dbug.c
index e782d21afe7..e782d21afe7 100644
--- a/myisam/mi_dbug.c
+++ b/storage/myisam/mi_dbug.c
diff --git a/myisam/mi_delete.c b/storage/myisam/mi_delete.c
index 60a07254e82..60a07254e82 100644
--- a/myisam/mi_delete.c
+++ b/storage/myisam/mi_delete.c
diff --git a/myisam/mi_delete_all.c b/storage/myisam/mi_delete_all.c
index 3033249886f..3033249886f 100644
--- a/myisam/mi_delete_all.c
+++ b/storage/myisam/mi_delete_all.c
diff --git a/myisam/mi_delete_table.c b/storage/myisam/mi_delete_table.c
index 6843881568d..6843881568d 100644
--- a/myisam/mi_delete_table.c
+++ b/storage/myisam/mi_delete_table.c
diff --git a/myisam/mi_dynrec.c b/storage/myisam/mi_dynrec.c
index 8de500a7351..8de500a7351 100644
--- a/myisam/mi_dynrec.c
+++ b/storage/myisam/mi_dynrec.c
diff --git a/myisam/mi_extra.c b/storage/myisam/mi_extra.c
index bfe1748af01..bfe1748af01 100644
--- a/myisam/mi_extra.c
+++ b/storage/myisam/mi_extra.c
diff --git a/myisam/mi_info.c b/storage/myisam/mi_info.c
index bdece9c2ee3..bdece9c2ee3 100644
--- a/myisam/mi_info.c
+++ b/storage/myisam/mi_info.c
diff --git a/myisam/mi_key.c b/storage/myisam/mi_key.c
index ae50900a190..ae50900a190 100644
--- a/myisam/mi_key.c
+++ b/storage/myisam/mi_key.c
diff --git a/myisam/mi_keycache.c b/storage/myisam/mi_keycache.c
index fb13f3703a2..fb13f3703a2 100644
--- a/myisam/mi_keycache.c
+++ b/storage/myisam/mi_keycache.c
diff --git a/myisam/mi_locking.c b/storage/myisam/mi_locking.c
index 8d48c5242e5..8d48c5242e5 100644
--- a/myisam/mi_locking.c
+++ b/storage/myisam/mi_locking.c
diff --git a/myisam/mi_log.c b/storage/myisam/mi_log.c
index 13842c56828..13842c56828 100644
--- a/myisam/mi_log.c
+++ b/storage/myisam/mi_log.c
diff --git a/myisam/mi_open.c b/storage/myisam/mi_open.c
index 82663e0c318..82663e0c318 100644
--- a/myisam/mi_open.c
+++ b/storage/myisam/mi_open.c
diff --git a/myisam/mi_packrec.c b/storage/myisam/mi_packrec.c
index c251e4dda4a..c251e4dda4a 100644
--- a/myisam/mi_packrec.c
+++ b/storage/myisam/mi_packrec.c
diff --git a/myisam/mi_page.c b/storage/myisam/mi_page.c
index 5240c063fba..5240c063fba 100644
--- a/myisam/mi_page.c
+++ b/storage/myisam/mi_page.c
diff --git a/myisam/mi_panic.c b/storage/myisam/mi_panic.c
index 78698d88c54..78698d88c54 100644
--- a/myisam/mi_panic.c
+++ b/storage/myisam/mi_panic.c
diff --git a/myisam/mi_preload.c b/storage/myisam/mi_preload.c
index d63399b519d..d63399b519d 100644
--- a/myisam/mi_preload.c
+++ b/storage/myisam/mi_preload.c
diff --git a/myisam/mi_range.c b/storage/myisam/mi_range.c
index e78f3b11625..e78f3b11625 100644
--- a/myisam/mi_range.c
+++ b/storage/myisam/mi_range.c
diff --git a/myisam/mi_rename.c b/storage/myisam/mi_rename.c
index 8380ee1bfad..8380ee1bfad 100644
--- a/myisam/mi_rename.c
+++ b/storage/myisam/mi_rename.c
diff --git a/myisam/mi_rfirst.c b/storage/myisam/mi_rfirst.c
index e30f61801a0..e30f61801a0 100644
--- a/myisam/mi_rfirst.c
+++ b/storage/myisam/mi_rfirst.c
diff --git a/myisam/mi_rkey.c b/storage/myisam/mi_rkey.c
index 635a7eb2c48..635a7eb2c48 100644
--- a/myisam/mi_rkey.c
+++ b/storage/myisam/mi_rkey.c
diff --git a/myisam/mi_rlast.c b/storage/myisam/mi_rlast.c
index 61c3ff58fd5..61c3ff58fd5 100644
--- a/myisam/mi_rlast.c
+++ b/storage/myisam/mi_rlast.c
diff --git a/myisam/mi_rnext.c b/storage/myisam/mi_rnext.c
index 69bf5c8deae..69bf5c8deae 100644
--- a/myisam/mi_rnext.c
+++ b/storage/myisam/mi_rnext.c
diff --git a/myisam/mi_rnext_same.c b/storage/myisam/mi_rnext_same.c
index 4d770258a72..4d770258a72 100644
--- a/myisam/mi_rnext_same.c
+++ b/storage/myisam/mi_rnext_same.c
diff --git a/myisam/mi_rprev.c b/storage/myisam/mi_rprev.c
index b787210e037..b787210e037 100644
--- a/myisam/mi_rprev.c
+++ b/storage/myisam/mi_rprev.c
diff --git a/myisam/mi_rrnd.c b/storage/myisam/mi_rrnd.c
index f6a2f021662..f6a2f021662 100644
--- a/myisam/mi_rrnd.c
+++ b/storage/myisam/mi_rrnd.c
diff --git a/myisam/mi_rsame.c b/storage/myisam/mi_rsame.c
index 321097744b9..321097744b9 100644
--- a/myisam/mi_rsame.c
+++ b/storage/myisam/mi_rsame.c
diff --git a/myisam/mi_rsamepos.c b/storage/myisam/mi_rsamepos.c
index 35cdd41e297..35cdd41e297 100644
--- a/myisam/mi_rsamepos.c
+++ b/storage/myisam/mi_rsamepos.c
diff --git a/myisam/mi_scan.c b/storage/myisam/mi_scan.c
index 90bc3430ba7..90bc3430ba7 100644
--- a/myisam/mi_scan.c
+++ b/storage/myisam/mi_scan.c
diff --git a/myisam/mi_search.c b/storage/myisam/mi_search.c
index ed61bbfe41a..ed61bbfe41a 100644
--- a/myisam/mi_search.c
+++ b/storage/myisam/mi_search.c
diff --git a/myisam/mi_static.c b/storage/myisam/mi_static.c
index 4c9d814f7d6..4c9d814f7d6 100644
--- a/myisam/mi_static.c
+++ b/storage/myisam/mi_static.c
diff --git a/myisam/mi_statrec.c b/storage/myisam/mi_statrec.c
index 42352f63c66..42352f63c66 100644
--- a/myisam/mi_statrec.c
+++ b/storage/myisam/mi_statrec.c
diff --git a/myisam/mi_test1.c b/storage/myisam/mi_test1.c
index 5727c699469..5727c699469 100644
--- a/myisam/mi_test1.c
+++ b/storage/myisam/mi_test1.c
diff --git a/myisam/mi_test2.c b/storage/myisam/mi_test2.c
index 95c8ce56a13..95c8ce56a13 100644
--- a/myisam/mi_test2.c
+++ b/storage/myisam/mi_test2.c
diff --git a/myisam/mi_test3.c b/storage/myisam/mi_test3.c
index be4277cc65c..be4277cc65c 100644
--- a/myisam/mi_test3.c
+++ b/storage/myisam/mi_test3.c
diff --git a/myisam/mi_test_all.res b/storage/myisam/mi_test_all.res
index 16b517d3f76..16b517d3f76 100644
--- a/myisam/mi_test_all.res
+++ b/storage/myisam/mi_test_all.res
diff --git a/myisam/mi_test_all.sh b/storage/myisam/mi_test_all.sh
index 07e71d65675..07e71d65675 100755
--- a/myisam/mi_test_all.sh
+++ b/storage/myisam/mi_test_all.sh
diff --git a/myisam/mi_unique.c b/storage/myisam/mi_unique.c
index 34f5f595f30..34f5f595f30 100644
--- a/myisam/mi_unique.c
+++ b/storage/myisam/mi_unique.c
diff --git a/myisam/mi_update.c b/storage/myisam/mi_update.c
index ab23f2e6da9..ab23f2e6da9 100644
--- a/myisam/mi_update.c
+++ b/storage/myisam/mi_update.c
diff --git a/myisam/mi_write.c b/storage/myisam/mi_write.c
index c8f9aa84a41..c8f9aa84a41 100644
--- a/myisam/mi_write.c
+++ b/storage/myisam/mi_write.c
diff --git a/myisam/myisam_ftdump.c b/storage/myisam/myisam_ftdump.c
index 28aac0a8ecf..28aac0a8ecf 100644
--- a/myisam/myisam_ftdump.c
+++ b/storage/myisam/myisam_ftdump.c
diff --git a/myisam/myisamchk.c b/storage/myisam/myisamchk.c
index 4856d93b320..4856d93b320 100644
--- a/myisam/myisamchk.c
+++ b/storage/myisam/myisamchk.c
diff --git a/myisam/myisamdef.h b/storage/myisam/myisamdef.h
index 74463ec065a..74463ec065a 100644
--- a/myisam/myisamdef.h
+++ b/storage/myisam/myisamdef.h
diff --git a/myisam/myisamlog.c b/storage/myisam/myisamlog.c
index de55b86252c..de55b86252c 100644
--- a/myisam/myisamlog.c
+++ b/storage/myisam/myisamlog.c
diff --git a/myisam/myisampack.c b/storage/myisam/myisampack.c
index ba48cbf1b62..ba48cbf1b62 100644
--- a/myisam/myisampack.c
+++ b/storage/myisam/myisampack.c
diff --git a/myisam/rt_index.c b/storage/myisam/rt_index.c
index 97554dca4e6..97554dca4e6 100644
--- a/myisam/rt_index.c
+++ b/storage/myisam/rt_index.c
diff --git a/myisam/rt_index.h b/storage/myisam/rt_index.h
index d3fcd934719..d3fcd934719 100644
--- a/myisam/rt_index.h
+++ b/storage/myisam/rt_index.h
diff --git a/myisam/rt_key.c b/storage/myisam/rt_key.c
index e2a402fbefd..e2a402fbefd 100644
--- a/myisam/rt_key.c
+++ b/storage/myisam/rt_key.c
diff --git a/myisam/rt_key.h b/storage/myisam/rt_key.h
index df4f8aa03a2..df4f8aa03a2 100644
--- a/myisam/rt_key.h
+++ b/storage/myisam/rt_key.h
diff --git a/myisam/rt_mbr.c b/storage/myisam/rt_mbr.c
index c43daec2f7c..c43daec2f7c 100644
--- a/myisam/rt_mbr.c
+++ b/storage/myisam/rt_mbr.c
diff --git a/myisam/rt_mbr.h b/storage/myisam/rt_mbr.h
index 2153faad2b4..2153faad2b4 100644
--- a/myisam/rt_mbr.h
+++ b/storage/myisam/rt_mbr.h
diff --git a/myisam/rt_split.c b/storage/myisam/rt_split.c
index 31a7d09ab4f..31a7d09ab4f 100644
--- a/myisam/rt_split.c
+++ b/storage/myisam/rt_split.c
diff --git a/myisam/rt_test.c b/storage/myisam/rt_test.c
index 4f04aa11fce..4f04aa11fce 100644
--- a/myisam/rt_test.c
+++ b/storage/myisam/rt_test.c
diff --git a/myisam/sort.c b/storage/myisam/sort.c
index f2f8c8ef7ec..f2f8c8ef7ec 100644
--- a/myisam/sort.c
+++ b/storage/myisam/sort.c
diff --git a/myisam/sp_defs.h b/storage/myisam/sp_defs.h
index 4cc2267a1bd..4cc2267a1bd 100644
--- a/myisam/sp_defs.h
+++ b/storage/myisam/sp_defs.h
diff --git a/myisam/sp_key.c b/storage/myisam/sp_key.c
index 1d43f89cba9..1d43f89cba9 100644
--- a/myisam/sp_key.c
+++ b/storage/myisam/sp_key.c
diff --git a/myisam/sp_test.c b/storage/myisam/sp_test.c
index f0b48dbd5d8..f0b48dbd5d8 100644
--- a/myisam/sp_test.c
+++ b/storage/myisam/sp_test.c
diff --git a/myisam/test_pack b/storage/myisam/test_pack
index 0cbeb57ba70..0cbeb57ba70 100755
--- a/myisam/test_pack
+++ b/storage/myisam/test_pack
diff --git a/myisammrg/.cvsignore b/storage/myisammrg/.cvsignore
index e9955884756..e9955884756 100644
--- a/myisammrg/.cvsignore
+++ b/storage/myisammrg/.cvsignore
diff --git a/myisammrg/Makefile.am b/storage/myisammrg/Makefile.am
index 14e3295c1ae..14e3295c1ae 100644
--- a/myisammrg/Makefile.am
+++ b/storage/myisammrg/Makefile.am
diff --git a/myisammrg/make-ccc b/storage/myisammrg/make-ccc
index a7e3dfc3cdb..a7e3dfc3cdb 100755
--- a/myisammrg/make-ccc
+++ b/storage/myisammrg/make-ccc
diff --git a/myisammrg/myrg_close.c b/storage/myisammrg/myrg_close.c
index 897020c6865..897020c6865 100644
--- a/myisammrg/myrg_close.c
+++ b/storage/myisammrg/myrg_close.c
diff --git a/myisammrg/myrg_create.c b/storage/myisammrg/myrg_create.c
index 7ddb7ecb3b9..7ddb7ecb3b9 100644
--- a/myisammrg/myrg_create.c
+++ b/storage/myisammrg/myrg_create.c
diff --git a/myisammrg/myrg_def.h b/storage/myisammrg/myrg_def.h
index 00e7950bccf..00e7950bccf 100644
--- a/myisammrg/myrg_def.h
+++ b/storage/myisammrg/myrg_def.h
diff --git a/myisammrg/myrg_delete.c b/storage/myisammrg/myrg_delete.c
index 8b89ed62ac1..8b89ed62ac1 100644
--- a/myisammrg/myrg_delete.c
+++ b/storage/myisammrg/myrg_delete.c
diff --git a/myisammrg/myrg_extra.c b/storage/myisammrg/myrg_extra.c
index 62cf5f01aba..62cf5f01aba 100644
--- a/myisammrg/myrg_extra.c
+++ b/storage/myisammrg/myrg_extra.c
diff --git a/myisammrg/myrg_info.c b/storage/myisammrg/myrg_info.c
index ba840ac444b..ba840ac444b 100644
--- a/myisammrg/myrg_info.c
+++ b/storage/myisammrg/myrg_info.c
diff --git a/myisammrg/myrg_locking.c b/storage/myisammrg/myrg_locking.c
index e5a8d3f3d9d..e5a8d3f3d9d 100644
--- a/myisammrg/myrg_locking.c
+++ b/storage/myisammrg/myrg_locking.c
diff --git a/myisammrg/myrg_open.c b/storage/myisammrg/myrg_open.c
index f9cdc2bb205..f9cdc2bb205 100644
--- a/myisammrg/myrg_open.c
+++ b/storage/myisammrg/myrg_open.c
diff --git a/myisammrg/myrg_panic.c b/storage/myisammrg/myrg_panic.c
index ab08b8082c3..ab08b8082c3 100644
--- a/myisammrg/myrg_panic.c
+++ b/storage/myisammrg/myrg_panic.c
diff --git a/myisammrg/myrg_queue.c b/storage/myisammrg/myrg_queue.c
index dfb434d6397..dfb434d6397 100644
--- a/myisammrg/myrg_queue.c
+++ b/storage/myisammrg/myrg_queue.c
diff --git a/myisammrg/myrg_range.c b/storage/myisammrg/myrg_range.c
index aafdf70525c..aafdf70525c 100644
--- a/myisammrg/myrg_range.c
+++ b/storage/myisammrg/myrg_range.c
diff --git a/myisammrg/myrg_rfirst.c b/storage/myisammrg/myrg_rfirst.c
index 9ba07686c47..9ba07686c47 100644
--- a/myisammrg/myrg_rfirst.c
+++ b/storage/myisammrg/myrg_rfirst.c
diff --git a/myisammrg/myrg_rkey.c b/storage/myisammrg/myrg_rkey.c
index a85ef6a3b5e..a85ef6a3b5e 100644
--- a/myisammrg/myrg_rkey.c
+++ b/storage/myisammrg/myrg_rkey.c
diff --git a/myisammrg/myrg_rlast.c b/storage/myisammrg/myrg_rlast.c
index 96bb798bd4f..96bb798bd4f 100644
--- a/myisammrg/myrg_rlast.c
+++ b/storage/myisammrg/myrg_rlast.c
diff --git a/myisammrg/myrg_rnext.c b/storage/myisammrg/myrg_rnext.c
index 0929c63fc1d..0929c63fc1d 100644
--- a/myisammrg/myrg_rnext.c
+++ b/storage/myisammrg/myrg_rnext.c
diff --git a/myisammrg/myrg_rnext_same.c b/storage/myisammrg/myrg_rnext_same.c
index 997e4100acd..997e4100acd 100644
--- a/myisammrg/myrg_rnext_same.c
+++ b/storage/myisammrg/myrg_rnext_same.c
diff --git a/myisammrg/myrg_rprev.c b/storage/myisammrg/myrg_rprev.c
index 797993e903d..797993e903d 100644
--- a/myisammrg/myrg_rprev.c
+++ b/storage/myisammrg/myrg_rprev.c
diff --git a/myisammrg/myrg_rrnd.c b/storage/myisammrg/myrg_rrnd.c
index d623ea8ea9c..d623ea8ea9c 100644
--- a/myisammrg/myrg_rrnd.c
+++ b/storage/myisammrg/myrg_rrnd.c
diff --git a/myisammrg/myrg_rsame.c b/storage/myisammrg/myrg_rsame.c
index f6b2164dc21..f6b2164dc21 100644
--- a/myisammrg/myrg_rsame.c
+++ b/storage/myisammrg/myrg_rsame.c
diff --git a/myisammrg/myrg_static.c b/storage/myisammrg/myrg_static.c
index 9e76cbae07b..9e76cbae07b 100644
--- a/myisammrg/myrg_static.c
+++ b/storage/myisammrg/myrg_static.c
diff --git a/myisammrg/myrg_update.c b/storage/myisammrg/myrg_update.c
index 7b9f614b965..7b9f614b965 100644
--- a/myisammrg/myrg_update.c
+++ b/storage/myisammrg/myrg_update.c
diff --git a/myisammrg/myrg_write.c b/storage/myisammrg/myrg_write.c
index 532709e361d..532709e361d 100644
--- a/myisammrg/myrg_write.c
+++ b/storage/myisammrg/myrg_write.c
diff --git a/storage/ndb/Makefile.am b/storage/ndb/Makefile.am
new file mode 100644
index 00000000000..9adb67f89fc
--- /dev/null
+++ b/storage/ndb/Makefile.am
@@ -0,0 +1,30 @@
+SUBDIRS = src tools . include @ndb_opt_subdirs@
+DIST_SUBDIRS = src tools include test docs
+EXTRA_DIST = config ndbapi-examples
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+
+dist-hook:
+ -rm -rf `find $(distdir) -type d -name SCCS`
+ -rm -rf `find $(distdir) -type d -name old_files`
+ -rm -rf `find $(distdir)/ndbapi-examples -name '*.o'`
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" != "." -a "$$subdir" != "include"; then \
+ files="`find $$subdir -name '*\.h'` `find $$subdir -name '*\.hpp'`"; \
+ for f in $$files; do \
+ if test -d "$(distdir)/`dirname $$f`" -a ! -e "$(distdir)/$$f"; then \
+ cp $$f $(distdir)/$$f; \
+ fi; \
+ done; \
+ fi; \
+ done
+
+windoze:
+ for i in `find . -name 'Makefile.am'`; do make -C `dirname $$i` windoze-dsp; done
+
+windoze-dsp:
+
+all-windoze-dsp: windoze
+ find . -name '*.dsp' | grep -v SCCS | xargs unix2dos
+ $(top_srcdir)/storage/ndb/config/make-win-dsw.sh | unix2dos > ndb.dsw
+ tar cvfz ndb-win-dsp.tar.gz ndb.dsw `find . -name '*.dsp' | grep -v SCCS`
diff --git a/ndb/bin/.empty b/storage/ndb/bin/.empty
index e69de29bb2d..e69de29bb2d 100644
--- a/ndb/bin/.empty
+++ b/storage/ndb/bin/.empty
diff --git a/ndb/bin/check-regression.sh b/storage/ndb/bin/check-regression.sh
index 93a31ccb39c..93a31ccb39c 100755
--- a/ndb/bin/check-regression.sh
+++ b/storage/ndb/bin/check-regression.sh
diff --git a/ndb/bin/makeTestPrograms_html.sh b/storage/ndb/bin/makeTestPrograms_html.sh
index ac31c8a6267..ac31c8a6267 100755
--- a/ndb/bin/makeTestPrograms_html.sh
+++ b/storage/ndb/bin/makeTestPrograms_html.sh
diff --git a/storage/ndb/config/common.mk.am b/storage/ndb/config/common.mk.am
new file mode 100644
index 00000000000..1fc254ef57c
--- /dev/null
+++ b/storage/ndb/config/common.mk.am
@@ -0,0 +1,12 @@
+ndbbindir = "$(libexecdir)"
+ndbtoolsdir = "$(bindir)"
+ndbtestdir = "$(bindir)"
+ndblibdir = "$(pkglibdir)"
+ndbincludedir = "$(pkgincludedir)/storage/ndb"
+ndbapiincludedir = "$(pkgincludedir)/storage/ndb/ndbapi"
+mgmapiincludedir = "$(pkgincludedir)/storage/ndb/mgmapi"
+
+INCLUDES = $(INCLUDES_LOC)
+LDADD = $(top_srcdir)/storage/ndb/src/common/portlib/gcc.cpp $(LDADD_LOC)
+DEFS = @DEFS@ @NDB_DEFS@ $(DEFS_LOC) $(NDB_EXTRA_FLAGS)
+NDB_CXXFLAGS=@ndb_cxxflags_fix@ $(NDB_CXXFLAGS_LOC)
diff --git a/ndb/config/make-win-dsw.sh b/storage/ndb/config/make-win-dsw.sh
index b0613620f8a..b0613620f8a 100755
--- a/ndb/config/make-win-dsw.sh
+++ b/storage/ndb/config/make-win-dsw.sh
diff --git a/storage/ndb/config/type_kernel.mk.am b/storage/ndb/config/type_kernel.mk.am
new file mode 100644
index 00000000000..710cdbd80fa
--- /dev/null
+++ b/storage/ndb/config/type_kernel.mk.am
@@ -0,0 +1,18 @@
+
+INCLUDES += \
+ -I$(srcdir) -I$(top_srcdir)/include \
+ -I$(top_srcdir)/storage/ndb/include \
+ -I$(top_srcdir)/storage/ndb/src/kernel/vm \
+ -I$(top_srcdir)/storage/ndb/src/kernel/error \
+ -I$(top_srcdir)/storage/ndb/src/kernel \
+ -I$(top_srcdir)/storage/ndb/include/kernel \
+ -I$(top_srcdir)/storage/ndb/include/transporter \
+ -I$(top_srcdir)/storage/ndb/include/debugger \
+ -I$(top_srcdir)/storage/ndb/include/mgmapi \
+ -I$(top_srcdir)/storage/ndb/include/mgmcommon \
+ -I$(top_srcdir)/storage/ndb/include/ndbapi \
+ -I$(top_srcdir)/storage/ndb/include/util \
+ -I$(top_srcdir)/storage/ndb/include/portlib \
+ -I$(top_srcdir)/storage/ndb/include/logger
+
+#AM_LDFLAGS = @ndb_ldflags@
diff --git a/storage/ndb/config/type_mgmapiclient.mk.am b/storage/ndb/config/type_mgmapiclient.mk.am
new file mode 100644
index 00000000000..d1d002962ae
--- /dev/null
+++ b/storage/ndb/config/type_mgmapiclient.mk.am
@@ -0,0 +1,2 @@
+
+INCLUDES += -I$(top_srcdir)/storage/ndb/include/mgmapi
diff --git a/storage/ndb/config/type_ndbapi.mk.am b/storage/ndb/config/type_ndbapi.mk.am
new file mode 100644
index 00000000000..d4be0c40b36
--- /dev/null
+++ b/storage/ndb/config/type_ndbapi.mk.am
@@ -0,0 +1,12 @@
+
+INCLUDES += \
+ -I$(srcdir) -I$(top_srcdir)/include -I$(top_srcdir)/storage/ndb/include \
+ -I$(top_srcdir)/storage/ndb/include/kernel \
+ -I$(top_srcdir)/storage/ndb/include/transporter \
+ -I$(top_srcdir)/storage/ndb/include/debugger \
+ -I$(top_srcdir)/storage/ndb/include/mgmapi \
+ -I$(top_srcdir)/storage/ndb/include/mgmcommon \
+ -I$(top_srcdir)/storage/ndb/include/ndbapi \
+ -I$(top_srcdir)/storage/ndb/include/util \
+ -I$(top_srcdir)/storage/ndb/include/portlib \
+ -I$(top_srcdir)/storage/ndb/include/logger
diff --git a/storage/ndb/config/type_ndbapiclient.mk.am b/storage/ndb/config/type_ndbapiclient.mk.am
new file mode 100644
index 00000000000..eae7ca73ea2
--- /dev/null
+++ b/storage/ndb/config/type_ndbapiclient.mk.am
@@ -0,0 +1,2 @@
+
+INCLUDES += -I$(top_srcdir)/storage/ndb/include/ndbapi
diff --git a/storage/ndb/config/type_ndbapitest.mk.am b/storage/ndb/config/type_ndbapitest.mk.am
new file mode 100644
index 00000000000..f4a339b27c7
--- /dev/null
+++ b/storage/ndb/config/type_ndbapitest.mk.am
@@ -0,0 +1,14 @@
+
+LDADD += $(top_builddir)/storage/ndb/test/src/libNDBT.a \
+ $(top_builddir)/storage/ndb/src/libndbclient.la \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
+
+INCLUDES += -I$(top_srcdir) -I$(top_srcdir)/include \
+ -I$(top_srcdir)/storage/ndb/include \
+ -I$(top_srcdir)/storage/ndb/include/ndbapi \
+ -I$(top_srcdir)/storage/ndb/include/util \
+ -I$(top_srcdir)/storage/ndb/include/portlib \
+ -I$(top_srcdir)/storage/ndb/test/include \
+ -I$(top_srcdir)/storage/ndb/include/mgmapi
diff --git a/storage/ndb/config/type_ndbapitools.mk.am b/storage/ndb/config/type_ndbapitools.mk.am
new file mode 100644
index 00000000000..e0f2fd1c0f6
--- /dev/null
+++ b/storage/ndb/config/type_ndbapitools.mk.am
@@ -0,0 +1,15 @@
+
+LDADD += \
+ $(top_builddir)/storage/ndb/src/libndbclient.la \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
+
+INCLUDES += -I$(srcdir) -I$(top_srcdir)/include \
+ -I$(top_srcdir)/storage/ndb/include \
+ -I$(top_srcdir)/storage/ndb/include/ndbapi \
+ -I$(top_srcdir)/storage/ndb/include/util \
+ -I$(top_srcdir)/storage/ndb/include/portlib \
+ -I$(top_srcdir)/storage/ndb/test/include \
+ -I$(top_srcdir)/storage/ndb/include/mgmapi \
+ -I$(top_srcdir)/storage/ndb/include/kernel
diff --git a/storage/ndb/config/type_util.mk.am b/storage/ndb/config/type_util.mk.am
new file mode 100644
index 00000000000..5d7ad6e57b9
--- /dev/null
+++ b/storage/ndb/config/type_util.mk.am
@@ -0,0 +1,6 @@
+
+INCLUDES += -I$(srcdir) -I$(top_srcdir)/include \
+ -I$(top_srcdir)/storage/ndb/include \
+ -I$(top_srcdir)/storage/ndb/include/util \
+ -I$(top_srcdir)/storage/ndb/include/portlib \
+ -I$(top_srcdir)/storage/ndb/include/logger
diff --git a/ndb/config/win-includes b/storage/ndb/config/win-includes
index fa5984fd25e..fa5984fd25e 100755
--- a/ndb/config/win-includes
+++ b/storage/ndb/config/win-includes
diff --git a/ndb/config/win-lib.am b/storage/ndb/config/win-lib.am
index 05ac1ec8a40..05ac1ec8a40 100644
--- a/ndb/config/win-lib.am
+++ b/storage/ndb/config/win-lib.am
diff --git a/ndb/config/win-libraries b/storage/ndb/config/win-libraries
index c7a6fb696ee..c7a6fb696ee 100755
--- a/ndb/config/win-libraries
+++ b/storage/ndb/config/win-libraries
diff --git a/ndb/config/win-name b/storage/ndb/config/win-name
index 036f2b9cc2e..036f2b9cc2e 100755
--- a/ndb/config/win-name
+++ b/storage/ndb/config/win-name
diff --git a/ndb/config/win-prg.am b/storage/ndb/config/win-prg.am
index 10a8cfbca02..10a8cfbca02 100644
--- a/ndb/config/win-prg.am
+++ b/storage/ndb/config/win-prg.am
diff --git a/ndb/config/win-sources b/storage/ndb/config/win-sources
index a383bb0e613..a383bb0e613 100755
--- a/ndb/config/win-sources
+++ b/storage/ndb/config/win-sources
diff --git a/ndb/demos/1-node/1-api-3/Ndb.cfg b/storage/ndb/demos/1-node/1-api-3/Ndb.cfg
index 61309af029e..61309af029e 100644
--- a/ndb/demos/1-node/1-api-3/Ndb.cfg
+++ b/storage/ndb/demos/1-node/1-api-3/Ndb.cfg
diff --git a/ndb/demos/1-node/1-db-2/Ndb.cfg b/storage/ndb/demos/1-node/1-db-2/Ndb.cfg
index 9315950b67a..9315950b67a 100644
--- a/ndb/demos/1-node/1-db-2/Ndb.cfg
+++ b/storage/ndb/demos/1-node/1-db-2/Ndb.cfg
diff --git a/ndb/demos/1-node/1-mgm-1/Ndb.cfg b/storage/ndb/demos/1-node/1-mgm-1/Ndb.cfg
index 61d4c0ecc17..61d4c0ecc17 100644
--- a/ndb/demos/1-node/1-mgm-1/Ndb.cfg
+++ b/storage/ndb/demos/1-node/1-mgm-1/Ndb.cfg
diff --git a/ndb/demos/1-node/1-mgm-1/template_config.ini b/storage/ndb/demos/1-node/1-mgm-1/template_config.ini
index 76bb7867e3c..76bb7867e3c 100644
--- a/ndb/demos/1-node/1-mgm-1/template_config.ini
+++ b/storage/ndb/demos/1-node/1-mgm-1/template_config.ini
diff --git a/ndb/demos/2-node/2-api-4/Ndb.cfg b/storage/ndb/demos/2-node/2-api-4/Ndb.cfg
index 1713a9b5893..1713a9b5893 100644
--- a/ndb/demos/2-node/2-api-4/Ndb.cfg
+++ b/storage/ndb/demos/2-node/2-api-4/Ndb.cfg
diff --git a/ndb/demos/2-node/2-api-5/Ndb.cfg b/storage/ndb/demos/2-node/2-api-5/Ndb.cfg
index faa2882eeea..faa2882eeea 100644
--- a/ndb/demos/2-node/2-api-5/Ndb.cfg
+++ b/storage/ndb/demos/2-node/2-api-5/Ndb.cfg
diff --git a/ndb/demos/2-node/2-api-6/Ndb.cfg b/storage/ndb/demos/2-node/2-api-6/Ndb.cfg
index bc2c4809453..bc2c4809453 100644
--- a/ndb/demos/2-node/2-api-6/Ndb.cfg
+++ b/storage/ndb/demos/2-node/2-api-6/Ndb.cfg
diff --git a/ndb/demos/2-node/2-api-7/Ndb.cfg b/storage/ndb/demos/2-node/2-api-7/Ndb.cfg
index 4107fdb6c5e..4107fdb6c5e 100644
--- a/ndb/demos/2-node/2-api-7/Ndb.cfg
+++ b/storage/ndb/demos/2-node/2-api-7/Ndb.cfg
diff --git a/ndb/demos/2-node/2-db-2/Ndb.cfg b/storage/ndb/demos/2-node/2-db-2/Ndb.cfg
index 9315950b67a..9315950b67a 100644
--- a/ndb/demos/2-node/2-db-2/Ndb.cfg
+++ b/storage/ndb/demos/2-node/2-db-2/Ndb.cfg
diff --git a/ndb/demos/2-node/2-db-3/Ndb.cfg b/storage/ndb/demos/2-node/2-db-3/Ndb.cfg
index 61309af029e..61309af029e 100644
--- a/ndb/demos/2-node/2-db-3/Ndb.cfg
+++ b/storage/ndb/demos/2-node/2-db-3/Ndb.cfg
diff --git a/ndb/demos/2-node/2-mgm-1/Ndb.cfg b/storage/ndb/demos/2-node/2-mgm-1/Ndb.cfg
index 61d4c0ecc17..61d4c0ecc17 100644
--- a/ndb/demos/2-node/2-mgm-1/Ndb.cfg
+++ b/storage/ndb/demos/2-node/2-mgm-1/Ndb.cfg
diff --git a/ndb/demos/2-node/2-mgm-1/template_config.ini b/storage/ndb/demos/2-node/2-mgm-1/template_config.ini
index 3edb909609a..3edb909609a 100644
--- a/ndb/demos/2-node/2-mgm-1/template_config.ini
+++ b/storage/ndb/demos/2-node/2-mgm-1/template_config.ini
diff --git a/ndb/demos/config-templates/config_template-1-REP.ini b/storage/ndb/demos/config-templates/config_template-1-REP.ini
index 71be3f2f53f..71be3f2f53f 100644
--- a/ndb/demos/config-templates/config_template-1-REP.ini
+++ b/storage/ndb/demos/config-templates/config_template-1-REP.ini
diff --git a/ndb/demos/config-templates/config_template-4.ini b/storage/ndb/demos/config-templates/config_template-4.ini
index e47c9037344..e47c9037344 100644
--- a/ndb/demos/config-templates/config_template-4.ini
+++ b/storage/ndb/demos/config-templates/config_template-4.ini
diff --git a/ndb/demos/config-templates/config_template-install.ini b/storage/ndb/demos/config-templates/config_template-install.ini
index e31906ba609..e31906ba609 100644
--- a/ndb/demos/config-templates/config_template-install.ini
+++ b/storage/ndb/demos/config-templates/config_template-install.ini
diff --git a/ndb/demos/run_demo1-PS-SS_common.sh b/storage/ndb/demos/run_demo1-PS-SS_common.sh
index 625e9655087..625e9655087 100644
--- a/ndb/demos/run_demo1-PS-SS_common.sh
+++ b/storage/ndb/demos/run_demo1-PS-SS_common.sh
diff --git a/ndb/demos/run_demo1-PS.sh b/storage/ndb/demos/run_demo1-PS.sh
index 82cfdd5e65b..82cfdd5e65b 100755
--- a/ndb/demos/run_demo1-PS.sh
+++ b/storage/ndb/demos/run_demo1-PS.sh
diff --git a/ndb/demos/run_demo1-SS.sh b/storage/ndb/demos/run_demo1-SS.sh
index 5ede57c44c4..5ede57c44c4 100755
--- a/ndb/demos/run_demo1-SS.sh
+++ b/storage/ndb/demos/run_demo1-SS.sh
diff --git a/ndb/demos/run_demo1.sh b/storage/ndb/demos/run_demo1.sh
index df6e3fc799d..df6e3fc799d 100755
--- a/ndb/demos/run_demo1.sh
+++ b/storage/ndb/demos/run_demo1.sh
diff --git a/ndb/demos/run_demo2.sh b/storage/ndb/demos/run_demo2.sh
index 9bae7517d5f..9bae7517d5f 100755
--- a/ndb/demos/run_demo2.sh
+++ b/storage/ndb/demos/run_demo2.sh
diff --git a/storage/ndb/docs/Makefile.am b/storage/ndb/docs/Makefile.am
new file mode 100644
index 00000000000..d10228d419d
--- /dev/null
+++ b/storage/ndb/docs/Makefile.am
@@ -0,0 +1,114 @@
+DOXYDIR = doxygen
+noinst_HEADERS = $(DOXYDIR)/predoxy.pl $(DOXYDIR)/postdoxy.pl $(DOXYDIR)/Doxyfile.ndbapi $(DOXYDIR)/Doxyfile.mgmapi $(DOXYDIR)/header.ndbapi.tex $(DOXYDIR)/header.mgmapi.tex
+
+all: do-check-html ndbapidoc-html mgmapidoc-html
+all-pdf: do-check-pdf ndbapidoc-pdf mgmapidoc-pdf
+
+DOXYTMP = .doxytmp
+DOXYOUT = .doxyout
+
+NDB_RELEASE = @NDB_VERSION_MAJOR@.@NDB_VERSION_MINOR@.@NDB_VERSION_BUILD@-@NDB_VERSION_STATUS@
+
+clean-local:
+ rm -rf ndbapi.pdf ndbapi.html mgmapi.pdf mgmapi.html
+ rm -rf $(DOXYTMP) $(DOXYOUT)
+
+do-check-html:
+ @set -x; \
+ if test @PERL@ = no ; then \
+ echo "Perl needed to make docs"; \
+ exit 1; \
+ fi; \
+ if test @DOXYGEN@ = no ; then \
+ echo "Doxygen needed to make docs"; \
+ exit 1; \
+ fi;
+
+do-check-pdf: do-check-html
+ if test @PDFLATEX@ = no ; then \
+ echo "Pdflatex needed to make docs"; \
+ exit 1; \
+ fi; \
+ if test @MAKEINDEX@ = no ; then \
+ echo "Makeindex needed to make docs"; \
+ exit 1; \
+ fi;
+
+###
+#
+# NDB API Programmer's Guide
+#
+ndbapidoc-html: ndbapi.html
+ndbapidoc-pdf: ndbapi.pdf
+
+ndbapi.html: $(noinst_HEADERS)
+ @set -x; \
+ export NDB_RELEASE=$(NDB_RELEASE); \
+ @RM@ -f ndbapi.pdf ndbapi.html; \
+ @RM@ -rf $(DOXYTMP) $(DOXYOUT); \
+ mkdir -p $(DOXYTMP) $(DOXYOUT); \
+ @CP@ $(top_srcdir)/storage/ndb/include/ndbapi/* $(DOXYTMP); \
+ @CP@ $(top_srcdir)/storage/ndb/ndbapi-examples/*/*.[ch]pp $(DOXYTMP); \
+ @PERL@ $(DOXYDIR)/predoxy.pl; \
+ mv footer.html $(DOXYTMP); \
+ (cd $(DOXYTMP) ; @DOXYGEN@ ../$(DOXYDIR)/Doxyfile.ndbapi); \
+ @PERL@ $(DOXYDIR)/postdoxy.pl $(DOXYOUT)/ndbapi.latex "MySQL Cluster NDB API Programmer Guide"; \
+ (cd $(DOXYOUT) && \
+ find ndbapi.html -print | cpio -pdm ..);
+
+ndbapi.pdf: ndbapi.html
+ (cd $(DOXYOUT)/ndbapi.latex && \
+ @PDFLATEX@ refman.tex && @MAKEINDEX@ refman && @PDFLATEX@ refman.tex && \
+ cp -p refman.pdf ../../ndbapi.pdf);
+
+###
+#
+# MGM API Guide
+#
+mgmapidoc-html: mgmapi.html
+mgmapidoc-pdf: mgmapi.pdf
+
+mgmapi.html: $(noinst_HEADERS)
+ @set -x; \
+ export NDB_RELEASE=$(NDB_RELEASE); \
+ @RM@ -f mgmapi.pdf mgmapi.html; \
+ @RM@ -rf $(DOXYTMP) $(DOXYOUT); \
+ mkdir -p $(DOXYTMP) $(DOXYOUT); \
+ @CP@ $(top_srcdir)/storage/ndb/include/mgmapi/* $(DOXYTMP); \
+ @PERL@ $(DOXYDIR)/predoxy.pl; \
+ mv footer.html $(DOXYTMP); \
+ (cd $(DOXYTMP) ; @DOXYGEN@ ../$(DOXYDIR)/Doxyfile.mgmapi); \
+ @PERL@ $(DOXYDIR)/postdoxy.pl $(DOXYOUT)/mgmapi.latex "MySQL Cluster MGM API Guide"; \
+ (cd $(DOXYOUT) && \
+ find mgmapi.html -print | cpio -pdm ..);
+
+mgmapi.pdf: mgmapi.html
+ (cd $(DOXYOUT)/mgmapi.latex && \
+ @PDFLATEX@ refman.tex && @MAKEINDEX@ refman && @PDFLATEX@ refman.tex && \
+ cp -p refman.pdf ../../mgmapi.pdf);
+
+###
+#
+# Complete Source Browser except for
+# ndbapi odbc test tools win32 lib examples docs CVS config bin
+# include/ndbapi
+# include/newtonapi src/newtonapi
+# include/mgmapi src/mgmapi
+# src/client
+ndbdoc: DUMMY
+ mkdir -p $(OUTDIR)
+ cd $(top_srcdir)/storage/ndb ; $(DOXYGEN) $(DOXYDIR)/Doxyfile.ndb
+
+###
+#
+# odbcdoc - Complete Source Browser for NDB ODBC (src/client/odbc)
+
+odbcdoc: DUMMY
+ mkdir -p $(OUTDIR)
+ cd $(top_srcdir)/storage/ndb ; $(DOXYGEN) $(DOXYDIR)/Doxyfile.odbc
+
+testdoc: DUMMY
+ mkdir -p $(OUTDIR)
+ cd $(top_srcdir)/storage/ndb ; $(DOXYGEN) $(DOXYDIR)/Doxyfile.test
+
+windoze-dsp:
diff --git a/ndb/docs/README b/storage/ndb/docs/README
index 262e9003aca..262e9003aca 100644
--- a/ndb/docs/README
+++ b/storage/ndb/docs/README
diff --git a/ndb/docs/doxygen/Doxyfile.mgmapi b/storage/ndb/docs/doxygen/Doxyfile.mgmapi
index 1e743dcb60e..1e743dcb60e 100644
--- a/ndb/docs/doxygen/Doxyfile.mgmapi
+++ b/storage/ndb/docs/doxygen/Doxyfile.mgmapi
diff --git a/ndb/docs/doxygen/Doxyfile.ndb b/storage/ndb/docs/doxygen/Doxyfile.ndb
index 3986a7cd17f..3986a7cd17f 100644
--- a/ndb/docs/doxygen/Doxyfile.ndb
+++ b/storage/ndb/docs/doxygen/Doxyfile.ndb
diff --git a/ndb/docs/doxygen/Doxyfile.ndbapi b/storage/ndb/docs/doxygen/Doxyfile.ndbapi
index da610148468..da610148468 100644
--- a/ndb/docs/doxygen/Doxyfile.ndbapi
+++ b/storage/ndb/docs/doxygen/Doxyfile.ndbapi
diff --git a/ndb/docs/doxygen/Doxyfile.odbc b/storage/ndb/docs/doxygen/Doxyfile.odbc
index 262513852b7..262513852b7 100644
--- a/ndb/docs/doxygen/Doxyfile.odbc
+++ b/storage/ndb/docs/doxygen/Doxyfile.odbc
diff --git a/ndb/docs/doxygen/Doxyfile.test b/storage/ndb/docs/doxygen/Doxyfile.test
index 801c82cf380..801c82cf380 100644
--- a/ndb/docs/doxygen/Doxyfile.test
+++ b/storage/ndb/docs/doxygen/Doxyfile.test
diff --git a/ndb/docs/doxygen/header.mgmapi.tex b/storage/ndb/docs/doxygen/header.mgmapi.tex
index 1b55ceb15c7..1b55ceb15c7 100644
--- a/ndb/docs/doxygen/header.mgmapi.tex
+++ b/storage/ndb/docs/doxygen/header.mgmapi.tex
diff --git a/ndb/docs/doxygen/header.ndbapi.tex b/storage/ndb/docs/doxygen/header.ndbapi.tex
index c37ce286ed8..c37ce286ed8 100644
--- a/ndb/docs/doxygen/header.ndbapi.tex
+++ b/storage/ndb/docs/doxygen/header.ndbapi.tex
diff --git a/ndb/docs/doxygen/postdoxy.pl b/storage/ndb/docs/doxygen/postdoxy.pl
index ad0edb44a31..ad0edb44a31 100755
--- a/ndb/docs/doxygen/postdoxy.pl
+++ b/storage/ndb/docs/doxygen/postdoxy.pl
diff --git a/ndb/docs/doxygen/predoxy.pl b/storage/ndb/docs/doxygen/predoxy.pl
index 3994054dcf6..3994054dcf6 100755
--- a/ndb/docs/doxygen/predoxy.pl
+++ b/storage/ndb/docs/doxygen/predoxy.pl
diff --git a/ndb/docs/wl2077.txt b/storage/ndb/docs/wl2077.txt
index f5b10bb702e..f5b10bb702e 100644
--- a/ndb/docs/wl2077.txt
+++ b/storage/ndb/docs/wl2077.txt
diff --git a/ndb/home/bin/Linuxmkisofs b/storage/ndb/home/bin/Linuxmkisofs
index a531f4cca7b..a531f4cca7b 100755
--- a/ndb/home/bin/Linuxmkisofs
+++ b/storage/ndb/home/bin/Linuxmkisofs
Binary files differ
diff --git a/ndb/home/bin/Solarismkisofs b/storage/ndb/home/bin/Solarismkisofs
index b239eaed6ad..b239eaed6ad 100755
--- a/ndb/home/bin/Solarismkisofs
+++ b/storage/ndb/home/bin/Solarismkisofs
Binary files differ
diff --git a/ndb/home/bin/cvs2cl.pl b/storage/ndb/home/bin/cvs2cl.pl
index 9e6da5acf5b..9e6da5acf5b 100755
--- a/ndb/home/bin/cvs2cl.pl
+++ b/storage/ndb/home/bin/cvs2cl.pl
diff --git a/ndb/home/bin/fix-cvs-root b/storage/ndb/home/bin/fix-cvs-root
index 2c4f158f825..2c4f158f825 100755
--- a/ndb/home/bin/fix-cvs-root
+++ b/storage/ndb/home/bin/fix-cvs-root
diff --git a/ndb/home/bin/import-from-bk.sh b/storage/ndb/home/bin/import-from-bk.sh
index 4e3957be6d5..4e3957be6d5 100755
--- a/ndb/home/bin/import-from-bk.sh
+++ b/storage/ndb/home/bin/import-from-bk.sh
diff --git a/ndb/home/bin/ndb_deploy b/storage/ndb/home/bin/ndb_deploy
index 773fc9b8fd7..773fc9b8fd7 100755
--- a/ndb/home/bin/ndb_deploy
+++ b/storage/ndb/home/bin/ndb_deploy
diff --git a/ndb/home/bin/ndbdoxy.pl b/storage/ndb/home/bin/ndbdoxy.pl
index 89b7de8440e..89b7de8440e 100755
--- a/ndb/home/bin/ndbdoxy.pl
+++ b/storage/ndb/home/bin/ndbdoxy.pl
diff --git a/ndb/home/bin/ngcalc b/storage/ndb/home/bin/ngcalc
index a289d384db9..a289d384db9 100755
--- a/ndb/home/bin/ngcalc
+++ b/storage/ndb/home/bin/ngcalc
diff --git a/ndb/home/bin/parseConfigFile.awk b/storage/ndb/home/bin/parseConfigFile.awk
index 6903949156c..6903949156c 100644
--- a/ndb/home/bin/parseConfigFile.awk
+++ b/storage/ndb/home/bin/parseConfigFile.awk
diff --git a/ndb/home/bin/setup-test.sh b/storage/ndb/home/bin/setup-test.sh
index 61097c30027..61097c30027 100755
--- a/ndb/home/bin/setup-test.sh
+++ b/storage/ndb/home/bin/setup-test.sh
diff --git a/ndb/home/bin/signallog2html.lib/signallog2list.awk b/storage/ndb/home/bin/signallog2html.lib/signallog2list.awk
index 9839f314556..9839f314556 100644
--- a/ndb/home/bin/signallog2html.lib/signallog2list.awk
+++ b/storage/ndb/home/bin/signallog2html.lib/signallog2list.awk
diff --git a/ndb/home/bin/signallog2html.lib/uniq_blocks.awk b/storage/ndb/home/bin/signallog2html.lib/uniq_blocks.awk
index 43f48d1cde1..43f48d1cde1 100644
--- a/ndb/home/bin/signallog2html.lib/uniq_blocks.awk
+++ b/storage/ndb/home/bin/signallog2html.lib/uniq_blocks.awk
diff --git a/ndb/home/bin/signallog2html.sh b/storage/ndb/home/bin/signallog2html.sh
index 5665275807c..5665275807c 100755
--- a/ndb/home/bin/signallog2html.sh
+++ b/storage/ndb/home/bin/signallog2html.sh
diff --git a/ndb/home/bin/stripcr b/storage/ndb/home/bin/stripcr
index 540418f88cf..540418f88cf 100755
--- a/ndb/home/bin/stripcr
+++ b/storage/ndb/home/bin/stripcr
diff --git a/ndb/home/lib/funcs.sh b/storage/ndb/home/lib/funcs.sh
index b7d8914035e..b7d8914035e 100644
--- a/ndb/home/lib/funcs.sh
+++ b/storage/ndb/home/lib/funcs.sh
diff --git a/storage/ndb/include/Makefile.am b/storage/ndb/include/Makefile.am
new file mode 100644
index 00000000000..f702514d4e2
--- /dev/null
+++ b/storage/ndb/include/Makefile.am
@@ -0,0 +1,51 @@
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+
+ndbinclude_HEADERS = \
+ndb_constants.h \
+ndb_init.h \
+ndb_types.h \
+ndb_version.h
+
+ndbapiinclude_HEADERS = \
+ndbapi/ndbapi_limits.h \
+ndbapi/ndb_opt_defaults.h \
+ndbapi/Ndb.hpp \
+ndbapi/NdbApi.hpp \
+ndbapi/NdbTransaction.hpp \
+ndbapi/NdbDictionary.hpp \
+ndbapi/NdbError.hpp \
+ndbapi/NdbEventOperation.hpp \
+ndbapi/NdbIndexOperation.hpp \
+ndbapi/NdbOperation.hpp \
+ndbapi/ndb_cluster_connection.hpp \
+ndbapi/NdbBlob.hpp \
+ndbapi/NdbPool.hpp \
+ndbapi/NdbRecAttr.hpp \
+ndbapi/NdbReceiver.hpp \
+ndbapi/NdbScanFilter.hpp \
+ndbapi/NdbScanOperation.hpp \
+ndbapi/NdbIndexScanOperation.hpp \
+ndbapi/ndberror.h
+
+mgmapiinclude_HEADERS = \
+mgmapi/mgmapi.h \
+mgmapi/mgmapi_debug.h \
+mgmapi/mgmapi_config_parameters.h \
+mgmapi/mgmapi_config_parameters_debug.h \
+mgmapi/ndb_logevent.h
+
+noinst_HEADERS = \
+ndb_global.h \
+ndb_net.h
+
+EXTRA_DIST = debugger editline kernel logger mgmcommon \
+portlib transporter util
+
+dist-hook:
+ -rm -rf `find $(distdir) -type d -name SCCS`
+
+windoze-dsp:
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
diff --git a/ndb/include/debugger/DebuggerNames.hpp b/storage/ndb/include/debugger/DebuggerNames.hpp
index cf9b1b57226..cf9b1b57226 100644
--- a/ndb/include/debugger/DebuggerNames.hpp
+++ b/storage/ndb/include/debugger/DebuggerNames.hpp
diff --git a/ndb/include/debugger/EventLogger.hpp b/storage/ndb/include/debugger/EventLogger.hpp
index 6308cf25465..6308cf25465 100644
--- a/ndb/include/debugger/EventLogger.hpp
+++ b/storage/ndb/include/debugger/EventLogger.hpp
diff --git a/ndb/include/debugger/GrepError.hpp b/storage/ndb/include/debugger/GrepError.hpp
index beedbd95c80..beedbd95c80 100644
--- a/ndb/include/debugger/GrepError.hpp
+++ b/storage/ndb/include/debugger/GrepError.hpp
diff --git a/ndb/include/debugger/SignalLoggerManager.hpp b/storage/ndb/include/debugger/SignalLoggerManager.hpp
index d212329bf78..d212329bf78 100644
--- a/ndb/include/debugger/SignalLoggerManager.hpp
+++ b/storage/ndb/include/debugger/SignalLoggerManager.hpp
diff --git a/ndb/include/editline/editline.h b/storage/ndb/include/editline/editline.h
index 2757e385968..2757e385968 100644
--- a/ndb/include/editline/editline.h
+++ b/storage/ndb/include/editline/editline.h
diff --git a/ndb/include/kernel/AttributeDescriptor.hpp b/storage/ndb/include/kernel/AttributeDescriptor.hpp
index af28e777213..af28e777213 100644
--- a/ndb/include/kernel/AttributeDescriptor.hpp
+++ b/storage/ndb/include/kernel/AttributeDescriptor.hpp
diff --git a/storage/ndb/include/kernel/AttributeHeader.hpp b/storage/ndb/include/kernel/AttributeHeader.hpp
new file mode 100644
index 00000000000..7d89219b8b2
--- /dev/null
+++ b/storage/ndb/include/kernel/AttributeHeader.hpp
@@ -0,0 +1,215 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef ATTRIBUTE_HEADER
+#define ATTRIBUTE_HEADER
+
+/**
+ * @class AttributeHeader
+ * @brief Header passed in front of every attribute value in AttrInfo signal
+ */
+class AttributeHeader {
+ friend class Dbtup;
+ friend class Backup;
+ friend class NdbOperation;
+ friend class DbUtil;
+ friend class Suma;
+
+public:
+ /**
+ * Pseudo columns
+ */
+ STATIC_CONST( PSEUDO = 0x8000 );
+ STATIC_CONST( FRAGMENT = 0xFFFE ); // Read fragment no
+ STATIC_CONST( ROW_COUNT = 0xFFFD ); // Read row count (committed)
+ STATIC_CONST( COMMIT_COUNT = 0xFFFC ); // Read commit count
+ STATIC_CONST( RANGE_NO = 0xFFFB ); // Read range no (when batched ranges)
+
+ STATIC_CONST( ROW_SIZE = 0xFFFA );
+ STATIC_CONST( FRAGMENT_MEMORY= 0xFFF9 );
+
+ /** Initialize AttributeHeader at location aHeaderPtr */
+ static AttributeHeader& init(void* aHeaderPtr, Uint32 anAttributeId,
+ Uint32 aDataSize);
+
+ /** Returns size of AttributeHeader (usually one or two words) */
+ Uint32 getHeaderSize() const; // In 32-bit words
+
+ /** Store AttributeHeader in location given as argument */
+ void insertHeader(Uint32*);
+
+ /** Get next attribute header (if there is one) */
+ AttributeHeader* getNext() const;
+
+ /** Get location of attribute value */
+ Uint32* getDataPtr() const;
+
+ /** Getters and Setters */
+ Uint32 getAttributeId() const;
+ void setAttributeId(Uint32);
+ Uint32 getDataSize() const; // In 32-bit words
+ void setDataSize(Uint32);
+ bool isNULL() const;
+ void setNULL();
+
+ /** Print **/
+ //void print(NdbOut&);
+ void print(FILE*);
+
+ static Uint32 getDataSize(Uint32);
+
+public:
+ AttributeHeader(Uint32 = 0);
+ AttributeHeader(Uint32 anAttributeId, Uint32 aDataSize);
+ ~AttributeHeader();
+
+ Uint32 m_value;
+};
+
+/**
+ * 1111111111222222222233
+ * 01234567890123456789012345678901
+ * ssssssssssssss eiiiiiiiiiiiiiiii
+ *
+ * i = Attribute Id
+ * s = Size of current "chunk" - 14 Bits -> 16384 (words) = 65k
+ * Including optional extra word(s).
+ * e - Element data/Blob, read element of array
+ * If == 0 next data word contains attribute value.
+ * If == 1 next data word contains:
+ * For Array of Fixed size Elements
+ * Start Index (16 bit), Stop Index(16 bit)
+ * For Blob
+ * Start offset (32 bit) (length is defined in previous word)
+ *
+ * An attribute value equal to "null" is represented by setting s == 0.
+ *
+ * Bit 14 is not yet used.
+ */
+
+inline
+AttributeHeader& AttributeHeader::init(void* aHeaderPtr, Uint32 anAttributeId,
+ Uint32 aDataSize)
+{
+ return * new (aHeaderPtr) AttributeHeader(anAttributeId, aDataSize);
+}
+
+inline
+AttributeHeader::AttributeHeader(Uint32 aHeader)
+{
+ m_value = aHeader;
+}
+
+inline
+AttributeHeader::AttributeHeader(Uint32 anAttributeId, Uint32 aDataSize)
+{
+ m_value = 0;
+ this->setAttributeId(anAttributeId);
+ this->setDataSize(aDataSize);
+}
+
+inline
+AttributeHeader::~AttributeHeader()
+{}
+
+inline
+Uint32 AttributeHeader::getHeaderSize() const
+{
+ // Should check 'e' bit here
+ return 1;
+}
+
+inline
+Uint32 AttributeHeader::getAttributeId() const
+{
+ return (m_value & 0xFFFF0000) >> 16;
+}
+
+inline
+void AttributeHeader::setAttributeId(Uint32 anAttributeId)
+{
+ m_value &= 0x0000FFFF; // Clear attribute id
+ m_value |= (anAttributeId << 16);
+}
+
+inline
+Uint32 AttributeHeader::getDataSize() const
+{
+ return (m_value & 0x3FFF);
+}
+
+inline
+void AttributeHeader::setDataSize(Uint32 aDataSize)
+{
+ m_value &= (~0x3FFF);
+ m_value |= aDataSize;
+}
+
+inline
+bool AttributeHeader::isNULL() const
+{
+ return (getDataSize() == 0);
+}
+
+inline
+void AttributeHeader::setNULL()
+{
+ setDataSize(0);
+}
+
+inline
+Uint32* AttributeHeader::getDataPtr() const
+{
+ return (Uint32*)&m_value + getHeaderSize();
+}
+
+inline
+void AttributeHeader::insertHeader(Uint32* target)
+{
+ *target = m_value;
+}
+
+inline
+AttributeHeader*
+AttributeHeader::getNext() const {
+ return (AttributeHeader*)(getDataPtr() + getDataSize());
+}
+
+inline
+void
+//AttributeHeader::print(NdbOut& output) {
+AttributeHeader::print(FILE* output) {
+ fprintf(output, "AttributeId: H\'%.8x (D\'%d), DataSize: H\'%.8x (D\'%d), "
+ "isNULL: %d\n",
+ getAttributeId(), getAttributeId(),
+ getDataSize(), getDataSize(),
+ isNULL());
+}
+
+inline
+Uint32
+AttributeHeader::getDataSize(Uint32 m_value){
+ return (m_value & 0x3FFF);
+}
+
+#endif
+
+
+
+
+
+
+
diff --git a/ndb/include/kernel/AttributeList.hpp b/storage/ndb/include/kernel/AttributeList.hpp
index 70b178c6c79..70b178c6c79 100644
--- a/ndb/include/kernel/AttributeList.hpp
+++ b/storage/ndb/include/kernel/AttributeList.hpp
diff --git a/ndb/include/kernel/BlockNumbers.h b/storage/ndb/include/kernel/BlockNumbers.h
index cb3cc697eee..cb3cc697eee 100644
--- a/ndb/include/kernel/BlockNumbers.h
+++ b/storage/ndb/include/kernel/BlockNumbers.h
diff --git a/storage/ndb/include/kernel/GlobalSignalNumbers.h b/storage/ndb/include/kernel/GlobalSignalNumbers.h
new file mode 100644
index 00000000000..cc016b1f3e5
--- /dev/null
+++ b/storage/ndb/include/kernel/GlobalSignalNumbers.h
@@ -0,0 +1,949 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef GLOBAL_SIGNAL_NUMBERS_H
+#define GLOBAL_SIGNAL_NUMBERS_H
+
+#include <kernel_types.h>
+/**
+ * NOTE
+ *
+ * When adding a new signal, remember to update MAX_GSN and SignalNames.cpp
+ */
+const GlobalSignalNumber MAX_GSN = 712;
+
+struct GsnName {
+ GlobalSignalNumber gsn;
+ const char * name;
+};
+
+extern const GsnName SignalNames[];
+extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
+
+/**
+ * These are used by API and kernel
+ */
+#define GSN_API_REGCONF 1
+#define GSN_API_REGREF 2
+#define GSN_API_REGREQ 3
+
+#define GSN_ATTRINFO 4
+#define GSN_TRANSID_AI 5
+#define GSN_KEYINFO 6
+#define GSN_READCONF 7
+
+#define GSN_TCKEY_FAILCONF 8
+#define GSN_TCKEY_FAILREF 9
+#define GSN_TCKEYCONF 10
+#define GSN_TCKEYREF 11
+#define GSN_TCKEYREQ 12
+
+#define GSN_TCROLLBACKCONF 13
+#define GSN_TCROLLBACKREF 14
+#define GSN_TCROLLBACKREQ 15
+#define GSN_TCROLLBACKREP 16
+
+#define GSN_TC_COMMITCONF 17
+#define GSN_TC_COMMITREF 18
+#define GSN_TC_COMMITREQ 19
+#define GSN_TC_HBREP 20
+
+#define GSN_TRANSID_AI_R 21
+#define GSN_KEYINFO20_R 22
+
+#define GSN_GET_TABINFOREF 23
+#define GSN_GET_TABINFOREQ 24
+#define GSN_GET_TABINFO_CONF 190
+
+#define GSN_GET_TABLEID_REQ 683
+#define GSN_GET_TABLEID_REF 684
+#define GSN_GET_TABLEID_CONF 685
+
+#define GSN_DIHNDBTAMPER 25
+#define GSN_NODE_FAILREP 26
+#define GSN_NF_COMPLETEREP 27
+
+#define GSN_SCAN_NEXTREQ 28
+#define GSN_SCAN_TABCONF 29
+/* 30 unused */
+#define GSN_SCAN_TABREF 31
+#define GSN_SCAN_TABREQ 32
+#define GSN_KEYINFO20 33
+
+#define GSN_TCRELEASECONF 34
+#define GSN_TCRELEASEREF 35
+#define GSN_TCRELEASEREQ 36
+
+#define GSN_TCSEIZECONF 37
+#define GSN_TCSEIZEREF 38
+#define GSN_TCSEIZEREQ 39
+
+/* 40 unused */
+/* 41 unused */
+/* 42 unused */
+/* 43 unused */
+/* 44 unused */
+/* 45 unused */
+/* 46 unused */
+/* 47 unused */
+/* 48 unused */
+/* 49 unused */
+/* 50 unused */
+/* 51 unused */
+/* 52 unused */
+/* 53 unused */
+/* 54 unused */
+/* 55 unused */
+/* 56 unused */
+/* 57 unused */
+/* 58 unused */
+/* 59 unused */
+/* 60 unused */
+/* 61 unused */
+/* 62 unused */
+/* 63 unused */
+/* 64 unused */
+/* 65 unused */
+/* 66 unused */
+
+/**
+ * These are used only by kernel
+ */
+
+#define GSN_ACC_ABORTCONF 67
+/* 68 unused */
+/* 69 unused */
+/* 70 unused */
+#define GSN_ACC_ABORTREQ 71
+#define GSN_ACC_CHECK_SCAN 72
+#define GSN_ACC_COMMITCONF 73
+#define GSN_ACC_COMMITREQ 74
+#define GSN_ACC_CONTOPCONF 75
+#define GSN_ACC_CONTOPREQ 76
+#define GSN_ACC_LCPCONF 77
+#define GSN_ACC_LCPREF 78
+#define GSN_ACC_LCPREQ 79
+#define GSN_ACC_LCPSTARTED 80
+#define GSN_ACC_OVER_REC 81
+
+#define GSN_ACC_SAVE_PAGES 83
+#define GSN_ACC_SCAN_INFO 84
+#define GSN_ACC_SCAN_INFO24 85
+#define GSN_ACC_SCANCONF 86
+#define GSN_ACC_SCANREF 87
+#define GSN_ACC_SCANREQ 88
+#define GSN_ACC_SRCONF 89
+#define GSN_ACC_SRREF 90
+#define GSN_ACC_SRREQ 91
+#define GSN_ACC_TO_CONF 92
+#define GSN_ACC_TO_REF 93
+#define GSN_ACC_TO_REQ 94
+#define GSN_ACCFRAGCONF 95
+#define GSN_ACCFRAGREF 96
+#define GSN_ACCFRAGREQ 97
+#define GSN_ACCKEYCONF 98
+#define GSN_ACCKEYREF 99
+#define GSN_ACCKEYREQ 100
+#define GSN_ACCMINUPDATE 101
+#define GSN_ACCSEIZECONF 103
+#define GSN_ACCSEIZEREF 104
+#define GSN_ACCSEIZEREQ 105
+#define GSN_ACCUPDATECONF 106
+#define GSN_ACCUPDATEKEY 107
+#define GSN_ACCUPDATEREF 108
+
+#define GSN_ADD_FRAGCONF 109
+#define GSN_ADD_FRAGREF 110
+#define GSN_ADD_FRAGREQ 111
+
+#define GSN_API_FAILCONF 113
+#define GSN_API_FAILREQ 114
+#define GSN_CNTR_START_REQ 115
+/* 116 not unused */
+#define GSN_CNTR_START_REF 117
+#define GSN_CNTR_START_CONF 118
+#define GSN_CNTR_START_REP 119
+/* 120 unused */
+/* 121 unused */
+/* 122 unused */
+/* 123 unused */
+/* 124 unused */
+#define GSN_CHECK_LCP_STOP 125
+#define GSN_CLOSE_COMCONF 126 /* local */
+#define GSN_CLOSE_COMREQ 127 /* local */
+#define GSN_CM_ACKADD 128 /* distr. */
+/* 129 unused */
+#define GSN_CM_ADD 130 /* distr. */
+/* 131 unused */
+/* 132 not unused */
+/* 133 not unused */
+#define GSN_CM_HEARTBEAT 134 /* distr. */
+/* 135 unused */
+/* 136 unused */
+/* 137 unused */
+#define GSN_CM_NODEINFOCONF 138 /* distr. */
+#define GSN_CM_NODEINFOREF 139 /* distr. */
+#define GSN_CM_NODEINFOREQ 140 /* distr. */
+#define GSN_CM_REGCONF 141 /* distr. */
+#define GSN_CM_REGREF 142 /* distr. */
+#define GSN_CM_REGREQ 143 /* distr. */
+/* 144 unused */
+/* 145 unused */
+/* 146 unused */
+#define GSN_CM_ADD_REP 147 /* local */
+/* 148 unused */
+/* 149 unused */
+/* 150 unused */
+#define GSN_CNTR_WAITREP 151 /* distr. */
+#define GSN_COMMIT 152
+#define GSN_COMMIT_FAILCONF 153
+#define GSN_COMMIT_FAILREQ 154
+#define GSN_COMMITCONF 155
+#define GSN_COMMITREQ 156
+#define GSN_COMMITTED 157
+#define GSN_COMPLETE 159
+#define GSN_COMPLETECONF 160
+#define GSN_COMPLETED 161
+#define GSN_COMPLETEREQ 162
+#define GSN_CONNECT_REP 163
+#define GSN_CONTINUEB 164
+/* 165 not unused */
+#define GSN_COPY_ACTIVECONF 166
+#define GSN_COPY_ACTIVEREF 167
+#define GSN_COPY_ACTIVEREQ 168
+#define GSN_COPY_FRAGCONF 169
+#define GSN_COPY_FRAGREF 170
+#define GSN_COPY_FRAGREQ 171
+#define GSN_COPY_GCICONF 172
+#define GSN_COPY_GCIREQ 173
+#define GSN_COPY_STATECONF 174
+#define GSN_COPY_STATEREQ 175
+#define GSN_COPY_TABCONF 176
+#define GSN_COPY_TABREQ 177
+#define GSN_CREATE_FRAGCONF 178
+#define GSN_CREATE_FRAGREF 179
+#define GSN_CREATE_FRAGREQ 180
+#define GSN_DEBUG_SIG 181
+#define GSN_DI_FCOUNTCONF 182
+#define GSN_DI_FCOUNTREF 183
+#define GSN_DI_FCOUNTREQ 184
+#define GSN_DIADDTABCONF 185
+#define GSN_DIADDTABREF 186
+#define GSN_DIADDTABREQ 187
+/* 188 not unused */
+/* 189 not unused */
+/* 190 not unused */
+#define GSN_DICTSTARTCONF 191
+#define GSN_DICTSTARTREQ 192
+
+#define GSN_LIST_TABLES_REQ 193
+#define GSN_LIST_TABLES_CONF 194
+
+#define GSN_ABORT 195
+#define GSN_ABORTCONF 196
+#define GSN_ABORTED 197
+#define GSN_ABORTREQ 198
+
+/******************************************
+ * DROP TABLE
+ *
+ */
+
+/**
+ * This is drop table's public interface
+ */
+#define GSN_DROP_TABLE_REQ 82
+#define GSN_DROP_TABLE_REF 102
+#define GSN_DROP_TABLE_CONF 112
+
+/**
+ * This is used for implementing drop table
+ */
+#define GSN_PREP_DROP_TAB_REQ 199
+#define GSN_PREP_DROP_TAB_REF 200
+#define GSN_PREP_DROP_TAB_CONF 201
+
+#define GSN_DROP_TAB_REQ 202
+#define GSN_DROP_TAB_REF 203
+#define GSN_DROP_TAB_CONF 204
+
+#define GSN_WAIT_DROP_TAB_REQ 208
+#define GSN_WAIT_DROP_TAB_REF 209
+#define GSN_WAIT_DROP_TAB_CONF 216
+
+/*****************************************/
+
+#define GSN_UPDATE_TOCONF 205
+#define GSN_UPDATE_TOREF 206
+#define GSN_UPDATE_TOREQ 207
+
+#define GSN_DIGETNODESCONF 210
+#define GSN_DIGETNODESREF 211
+#define GSN_DIGETNODESREQ 212
+#define GSN_DIGETPRIMCONF 213
+#define GSN_DIGETPRIMREF 214
+#define GSN_DIGETPRIMREQ 215
+
+#define GSN_DIH_RESTARTCONF 217
+#define GSN_DIH_RESTARTREF 218
+#define GSN_DIH_RESTARTREQ 219
+
+/* 220 not unused */
+/* 221 not unused */
+/* 222 not unused */
+
+#define GSN_EMPTY_LCP_REQ 223
+#define GSN_EMPTY_LCP_CONF 224
+
+#define GSN_SCHEMA_INFO 225
+#define GSN_SCHEMA_INFOCONF 226
+
+#define GSN_MASTER_GCPCONF 227
+#define GSN_MASTER_GCPREF 228
+#define GSN_MASTER_GCPREQ 229
+
+/* 230 not unused */
+/* 231 not unused */
+
+#define GSN_DIRELEASECONF 232
+#define GSN_DIRELEASEREF 233
+#define GSN_DIRELEASEREQ 234
+#define GSN_DISCONNECT_REP 235
+#define GSN_DISEIZECONF 236
+#define GSN_DISEIZEREF 237
+#define GSN_DISEIZEREQ 238
+#define GSN_DIVERIFYCONF 239
+#define GSN_DIVERIFYREF 240
+#define GSN_DIVERIFYREQ 241
+#define GSN_ENABLE_COMORD 242
+#define GSN_END_LCPCONF 243
+#define GSN_END_LCPREQ 244
+#define GSN_END_TOCONF 245
+#define GSN_END_TOREQ 246
+#define GSN_EVENT_REP 247
+#define GSN_EXEC_FRAGCONF 248
+#define GSN_EXEC_FRAGREF 249
+#define GSN_EXEC_FRAGREQ 250
+#define GSN_EXEC_SRCONF 251
+#define GSN_EXEC_SRREQ 252
+#define GSN_EXPANDCHECK2 253
+#define GSN_FAIL_REP 254
+#define GSN_FSCLOSECONF 255
+#define GSN_FSCLOSEREF 256
+#define GSN_FSCLOSEREQ 257
+#define GSN_FSAPPENDCONF 258
+#define GSN_FSOPENCONF 259
+#define GSN_FSOPENREF 260
+#define GSN_FSOPENREQ 261
+#define GSN_FSREADCONF 262
+#define GSN_FSREADREF 263
+#define GSN_FSREADREQ 264
+#define GSN_FSSYNCCONF 265
+#define GSN_FSSYNCREF 266
+#define GSN_FSSYNCREQ 267
+#define GSN_FSAPPENDREQ 268
+#define GSN_FSAPPENDREF 269
+#define GSN_FSWRITECONF 270
+#define GSN_FSWRITEREF 271
+#define GSN_FSWRITEREQ 272
+#define GSN_GCP_ABORT 273
+#define GSN_GCP_ABORTED 274
+#define GSN_GCP_COMMIT 275
+#define GSN_GCP_NODEFINISH 276
+#define GSN_GCP_NOMORETRANS 277
+#define GSN_GCP_PREPARE 278
+#define GSN_GCP_PREPARECONF 279
+#define GSN_GCP_PREPAREREF 280
+#define GSN_GCP_SAVECONF 281
+#define GSN_GCP_SAVEREF 282
+#define GSN_GCP_SAVEREQ 283
+#define GSN_GCP_TCFINISHED 284
+#define GSN_SR_FRAGIDCONF 285
+#define GSN_SR_FRAGIDREF 286
+#define GSN_SR_FRAGIDREQ 287
+#define GSN_GETGCICONF 288
+#define GSN_GETGCIREQ 289
+#define GSN_HOT_SPAREREP 290
+#define GSN_INCL_NODECONF 291
+#define GSN_INCL_NODEREF 292
+#define GSN_INCL_NODEREQ 293
+#define GSN_LCP_FRAGIDCONF 294
+#define GSN_LCP_FRAGIDREF 295
+#define GSN_LCP_FRAGIDREQ 296
+#define GSN_LCP_HOLDOPCONF 297
+#define GSN_LCP_HOLDOPREF 298
+#define GSN_LCP_HOLDOPREQ 299
+#define GSN_SHRINKCHECK2 301
+#define GSN_GET_SCHEMA_INFOREQ 302
+/* 303 not unused */
+/* 304 not unused */
+#define GSN_LQH_RESTART_OP 305
+#define GSN_LQH_TRANSCONF 306
+#define GSN_LQH_TRANSREQ 307
+#define GSN_LQHADDATTCONF 308
+#define GSN_LQHADDATTREF 309
+#define GSN_LQHADDATTREQ 310
+#define GSN_LQHFRAGCONF 311
+#define GSN_LQHFRAGREF 312
+#define GSN_LQHFRAGREQ 313
+#define GSN_LQHKEYCONF 314
+#define GSN_LQHKEYREF 315
+#define GSN_LQHKEYREQ 316
+
+#define GSN_MASTER_LCPCONF 318
+#define GSN_MASTER_LCPREF 319
+#define GSN_MASTER_LCPREQ 320
+
+#define GSN_MEMCHECKCONF 321
+#define GSN_MEMCHECKREQ 322
+#define GSN_NDB_FAILCONF 323
+#define GSN_NDB_STARTCONF 324
+#define GSN_NDB_STARTREF 325
+#define GSN_NDB_STARTREQ 326
+#define GSN_NDB_STTOR 327
+#define GSN_NDB_STTORRY 328
+#define GSN_NDB_TAMPER 329
+#define GSN_NEXT_SCANCONF 330
+#define GSN_NEXT_SCANREF 331
+#define GSN_NEXT_SCANREQ 332
+#define GSN_NEXTOPERATION 333
+
+#define GSN_READ_CONFIG_REQ 334 /* new name for sizealt, local */
+#define GSN_READ_CONFIG_CONF 335 /* new name for sizealt, local */
+
+/* 336 unused */
+/* 337 unused */
+/* 338 unused */
+#define GSN_OPEN_COMCONF 339
+#define GSN_OPEN_COMREF 340
+#define GSN_OPEN_COMREQ 341
+#define GSN_PACKED_SIGNAL 342
+#define GSN_PREP_FAILCONF 343
+#define GSN_PREP_FAILREF 344
+#define GSN_PREP_FAILREQ 345
+#define GSN_PRES_TOCONF 346
+#define GSN_PRES_TOREQ 347
+#define GSN_READ_NODESCONF 348
+#define GSN_READ_NODESREF 349
+#define GSN_READ_NODESREQ 350
+#define GSN_SCAN_FRAGCONF 351
+#define GSN_SCAN_FRAGREF 352
+#define GSN_SCAN_FRAGREQ 353
+#define GSN_SCAN_HBREP 354
+#define GSN_SCAN_PROCCONF 355
+#define GSN_SCAN_PROCREQ 356
+#define GSN_SEND_PACKED 357
+#define GSN_SET_LOGLEVELORD 358
+
+#define GSN_LQH_ALLOCREQ 359
+#define GSN_TUP_ALLOCREQ 360
+#define GSN_TUP_DEALLOCREQ 361
+
+/* 362 not unused */
+
+#define GSN_TUP_WRITELOG_REQ 363
+#define GSN_LQH_WRITELOG_REQ 364
+
+#define GSN_LCP_FRAG_REP 300
+#define GSN_LCP_FRAG_ORD 365
+#define GSN_LCP_COMPLETE_REP 158
+
+#define GSN_START_LCP_REQ 317
+#define GSN_START_LCP_CONF 366
+
+#define GSN_UNBLO_DICTCONF 367
+#define GSN_UNBLO_DICTREQ 368
+#define GSN_START_COPYCONF 369
+#define GSN_START_COPYREF 370
+#define GSN_START_COPYREQ 371
+#define GSN_START_EXEC_SR 372
+#define GSN_START_FRAGCONF 373
+#define GSN_START_FRAGREF 374
+#define GSN_START_FRAGREQ 375
+#define GSN_START_LCP_REF 376
+#define GSN_START_LCP_ROUND 377
+#define GSN_START_MECONF 378
+#define GSN_START_MEREF 379
+#define GSN_START_MEREQ 380
+#define GSN_START_PERMCONF 381
+#define GSN_START_PERMREF 382
+#define GSN_START_PERMREQ 383
+#define GSN_START_RECCONF 384
+#define GSN_START_RECREF 385
+#define GSN_START_RECREQ 386
+#define GSN_START_TOCONF 387
+#define GSN_START_TOREQ 388
+#define GSN_STORED_PROCCONF 389
+#define GSN_STORED_PROCREF 390
+#define GSN_STORED_PROCREQ 391
+#define GSN_STTOR 392
+#define GSN_STTORRY 393
+#define GSN_BACKUP_TRIG_REQ 394
+#define GSN_SYSTEM_ERROR 395
+#define GSN_TAB_COMMITCONF 396
+#define GSN_TAB_COMMITREF 397
+#define GSN_TAB_COMMITREQ 398
+#define GSN_TAKE_OVERTCCONF 399
+#define GSN_TAKE_OVERTCREQ 400
+#define GSN_TC_CLOPSIZECONF 401
+#define GSN_TC_CLOPSIZEREQ 402
+#define GSN_TC_SCHVERCONF 403
+#define GSN_TC_SCHVERREQ 404
+#define GSN_TCGETOPSIZECONF 405
+#define GSN_TCGETOPSIZEREQ 406
+#define GSN_TEST_ORD 407
+#define GSN_TESTSIG 408
+#define GSN_TIME_SIGNAL 409
+/* 410 unused */
+/* 411 unused */
+/* 412 unused */
+#define GSN_TUP_ABORTREQ 414
+#define GSN_TUP_ADD_ATTCONF 415
+#define GSN_TUP_ADD_ATTRREF 416
+#define GSN_TUP_ADD_ATTRREQ 417
+#define GSN_TUP_ATTRINFO 418
+#define GSN_TUP_COMMITREQ 419
+/* 420 unused */
+#define GSN_TUP_LCPCONF 421
+#define GSN_TUP_LCPREF 422
+#define GSN_TUP_LCPREQ 423
+#define GSN_TUP_LCPSTARTED 424
+#define GSN_TUP_PREPLCPCONF 425
+#define GSN_TUP_PREPLCPREF 426
+#define GSN_TUP_PREPLCPREQ 427
+#define GSN_TUP_SRCONF 428
+#define GSN_TUP_SRREF 429
+#define GSN_TUP_SRREQ 430
+#define GSN_TUPFRAGCONF 431
+#define GSN_TUPFRAGREF 432
+#define GSN_TUPFRAGREQ 433
+#define GSN_TUPKEYCONF 434
+#define GSN_TUPKEYREF 435
+#define GSN_TUPKEYREQ 436
+#define GSN_TUPRELEASECONF 437
+#define GSN_TUPRELEASEREF 438
+#define GSN_TUPRELEASEREQ 439
+#define GSN_TUPSEIZECONF 440
+#define GSN_TUPSEIZEREF 441
+#define GSN_TUPSEIZEREQ 442
+
+#define GSN_ABORT_ALL_REQ 445
+#define GSN_ABORT_ALL_REF 446
+#define GSN_ABORT_ALL_CONF 447
+
+#define GSN_STATISTICS_REQ 448
+#define GSN_STOP_ORD 449
+#define GSN_TAMPER_ORD 450
+#define GSN_SET_VAR_REQ 451
+#define GSN_SET_VAR_CONF 452
+#define GSN_SET_VAR_REF 453
+#define GSN_STATISTICS_CONF 454
+
+#define GSN_START_ORD 455
+/* 456 unused */
+/* 457 unused */
+
+#define GSN_EVENT_SUBSCRIBE_REQ 458
+#define GSN_EVENT_SUBSCRIBE_CONF 459
+#define GSN_EVENT_SUBSCRIBE_REF 460
+#define GSN_ACC_COM_BLOCK 461
+#define GSN_ACC_COM_UNBLOCK 462
+#define GSN_TUP_COM_BLOCK 463
+#define GSN_TUP_COM_UNBLOCK 464
+
+#define GSN_DUMP_STATE_ORD 465
+
+#define GSN_START_INFOREQ 466
+#define GSN_START_INFOREF 467
+#define GSN_START_INFOCONF 468
+
+#define GSN_TC_COMMIT_ACK 469
+#define GSN_REMOVE_MARKER_ORD 470
+
+#define GSN_CHECKNODEGROUPSREQ 471
+#define GSN_CHECKNODEGROUPSCONF 472
+
+/* 473 unused */
+#define GSN_ARBIT_PREPREQ 474
+#define GSN_ARBIT_PREPCONF 475
+#define GSN_ARBIT_PREPREF 476
+#define GSN_ARBIT_STARTREQ 477
+#define GSN_ARBIT_STARTCONF 478
+#define GSN_ARBIT_STARTREF 479
+#define GSN_ARBIT_CHOOSEREQ 480
+#define GSN_ARBIT_CHOOSECONF 481
+#define GSN_ARBIT_CHOOSEREF 482
+#define GSN_ARBIT_STOPORD 483
+#define GSN_ARBIT_STOPREP 484
+
+#define GSN_BLOCK_COMMIT_ORD 485
+#define GSN_UNBLOCK_COMMIT_ORD 486
+
+#define GSN_NODE_STATE_REP 487
+#define GSN_CHANGE_NODE_STATE_REQ 488
+#define GSN_CHANGE_NODE_STATE_CONF 489
+
+#define GSN_DIH_SWITCH_REPLICA_REQ 490
+#define GSN_DIH_SWITCH_REPLICA_CONF 491
+#define GSN_DIH_SWITCH_REPLICA_REF 492
+
+#define GSN_STOP_PERM_REQ 493
+#define GSN_STOP_PERM_REF 494
+#define GSN_STOP_PERM_CONF 495
+
+#define GSN_STOP_ME_REQ 496
+#define GSN_STOP_ME_REF 497
+#define GSN_STOP_ME_CONF 498
+
+#define GSN_WAIT_GCP_REQ 499
+#define GSN_WAIT_GCP_REF 500
+#define GSN_WAIT_GCP_CONF 501
+
+/* 502 not used */
+
+/**
+ * Trigger and index signals
+ */
+
+/**
+ * These are used by API and kernel
+ */
+#define GSN_TRIG_ATTRINFO 503
+#define GSN_CREATE_TRIG_REQ 504
+#define GSN_CREATE_TRIG_CONF 505
+#define GSN_CREATE_TRIG_REF 506
+#define GSN_ALTER_TRIG_REQ 507
+#define GSN_ALTER_TRIG_CONF 508
+#define GSN_ALTER_TRIG_REF 509
+#define GSN_CREATE_INDX_REQ 510
+#define GSN_CREATE_INDX_CONF 511
+#define GSN_CREATE_INDX_REF 512
+#define GSN_DROP_TRIG_REQ 513
+#define GSN_DROP_TRIG_CONF 514
+#define GSN_DROP_TRIG_REF 515
+#define GSN_DROP_INDX_REQ 516
+#define GSN_DROP_INDX_CONF 517
+#define GSN_DROP_INDX_REF 518
+#define GSN_TCINDXREQ 519
+#define GSN_TCINDXCONF 520
+#define GSN_TCINDXREF 521
+#define GSN_INDXKEYINFO 522
+#define GSN_INDXATTRINFO 523
+#define GSN_TCINDXNEXTREQ 524
+#define GSN_TCINDXNEXTCONF 525
+#define GSN_TCINDXNEXREF 526
+#define GSN_FIRE_TRIG_ORD 527
+
+/**
+ * These are used only by kernel
+ */
+#define GSN_BUILDINDXREQ 528
+#define GSN_BUILDINDXCONF 529
+#define GSN_BUILDINDXREF 530
+
+/**
+ * Backup interface
+ */
+#define GSN_BACKUP_REQ 531
+#define GSN_BACKUP_DATA 532
+#define GSN_BACKUP_REF 533
+#define GSN_BACKUP_CONF 534
+
+#define GSN_ABORT_BACKUP_ORD 535
+
+#define GSN_BACKUP_ABORT_REP 536
+#define GSN_BACKUP_COMPLETE_REP 537
+#define GSN_BACKUP_NF_COMPLETE_REP 538
+
+/**
+ * Internal backup signals
+ */
+#define GSN_DEFINE_BACKUP_REQ 539
+#define GSN_DEFINE_BACKUP_REF 540
+#define GSN_DEFINE_BACKUP_CONF 541
+
+#define GSN_START_BACKUP_REQ 542
+#define GSN_START_BACKUP_REF 543
+#define GSN_START_BACKUP_CONF 544
+
+#define GSN_BACKUP_FRAGMENT_REQ 545
+#define GSN_BACKUP_FRAGMENT_REF 546
+#define GSN_BACKUP_FRAGMENT_CONF 547
+
+#define GSN_STOP_BACKUP_REQ 548
+#define GSN_STOP_BACKUP_REF 549
+#define GSN_STOP_BACKUP_CONF 550
+
+/**
+ * Used for master take-over / API status request
+ */
+#define GSN_BACKUP_STATUS_REQ 551
+#define GSN_BACKUP_STATUS_REF 116
+#define GSN_BACKUP_STATUS_CONF 165
+
+/**
+ * Db sequence signals
+ */
+#define GSN_UTIL_SEQUENCE_REQ 552
+#define GSN_UTIL_SEQUENCE_REF 553
+#define GSN_UTIL_SEQUENCE_CONF 554
+
+#define GSN_FSREMOVEREQ 555
+#define GSN_FSREMOVEREF 556
+#define GSN_FSREMOVECONF 557
+
+#define GSN_UTIL_PREPARE_REQ 558
+#define GSN_UTIL_PREPARE_CONF 559
+#define GSN_UTIL_PREPARE_REF 560
+
+#define GSN_UTIL_EXECUTE_REQ 561
+#define GSN_UTIL_EXECUTE_CONF 562
+#define GSN_UTIL_EXECUTE_REF 563
+
+#define GSN_UTIL_RELEASE_REQ 564
+#define GSN_UTIL_RELEASE_CONF 565
+#define GSN_UTIL_RELEASE_REF 566
+
+/**
+ * When dropping a long signal due to lack of memory resources
+ */
+#define GSN_SIGNAL_DROPPED_REP 567
+#define GSN_CONTINUE_FRAGMENTED 568
+
+/**
+ * Suma participant interface
+ */
+#define GSN_SUB_REMOVE_REQ 569
+#define GSN_SUB_REMOVE_REF 570
+#define GSN_SUB_REMOVE_CONF 571
+#define GSN_SUB_STOP_REQ 572
+#define GSN_SUB_STOP_REF 573
+#define GSN_SUB_STOP_CONF 574
+/* 575 unused */
+#define GSN_SUB_CREATE_REQ 576
+#define GSN_SUB_CREATE_REF 577
+#define GSN_SUB_CREATE_CONF 578
+#define GSN_SUB_START_REQ 579
+#define GSN_SUB_START_REF 580
+#define GSN_SUB_START_CONF 581
+#define GSN_SUB_SYNC_REQ 582
+#define GSN_SUB_SYNC_REF 583
+#define GSN_SUB_SYNC_CONF 584
+#define GSN_SUB_META_DATA 585
+#define GSN_SUB_TABLE_DATA 586
+
+#define GSN_CREATE_TABLE_REQ 587
+#define GSN_CREATE_TABLE_REF 588
+#define GSN_CREATE_TABLE_CONF 589
+
+#define GSN_ALTER_TABLE_REQ 624
+#define GSN_ALTER_TABLE_REF 625
+#define GSN_ALTER_TABLE_CONF 626
+
+#define GSN_SUB_SYNC_CONTINUE_REQ 590
+#define GSN_SUB_SYNC_CONTINUE_REF 591
+#define GSN_SUB_SYNC_CONTINUE_CONF 592
+#define GSN_SUB_GCP_COMPLETE_REP 593
+
+#define GSN_CREATE_FRAGMENTATION_REQ 594
+#define GSN_CREATE_FRAGMENTATION_REF 595
+#define GSN_CREATE_FRAGMENTATION_CONF 596
+
+#define GSN_CREATE_TAB_REQ 597
+#define GSN_CREATE_TAB_REF 598
+#define GSN_CREATE_TAB_CONF 599
+
+#define GSN_ALTER_TAB_REQ 600
+#define GSN_ALTER_TAB_REF 601
+#define GSN_ALTER_TAB_CONF 602
+
+#define GSN_ALTER_INDX_REQ 603
+#define GSN_ALTER_INDX_REF 604
+#define GSN_ALTER_INDX_CONF 605
+
+/**
+ * Grep signals
+ */
+#define GSN_GREP_SUB_CREATE_REQ 606
+#define GSN_GREP_SUB_CREATE_REF 607
+#define GSN_GREP_SUB_CREATE_CONF 608
+#define GSN_GREP_CREATE_REQ 609
+#define GSN_GREP_CREATE_REF 610
+#define GSN_GREP_CREATE_CONF 611
+
+#define GSN_GREP_SUB_START_REQ 612
+#define GSN_GREP_SUB_START_REF 613
+#define GSN_GREP_SUB_START_CONF 614
+#define GSN_GREP_START_REQ 615
+#define GSN_GREP_START_REF 616
+#define GSN_GREP_START_CONF 617
+
+#define GSN_GREP_SUB_SYNC_REQ 618
+#define GSN_GREP_SUB_SYNC_REF 619
+#define GSN_GREP_SUB_SYNC_CONF 620
+#define GSN_GREP_SYNC_REQ 621
+#define GSN_GREP_SYNC_REF 622
+#define GSN_GREP_SYNC_CONF 623
+
+/**
+ * REP signals
+ */
+#define GSN_REP_WAITGCP_REQ 627
+#define GSN_REP_WAITGCP_REF 628
+#define GSN_REP_WAITGCP_CONF 629
+#define GSN_GREP_WAITGCP_REQ 630
+#define GSN_GREP_WAITGCP_REF 631
+#define GSN_GREP_WAITGCP_CONF 632
+#define GSN_REP_GET_GCI_REQ 633
+#define GSN_REP_GET_GCI_REF 634
+#define GSN_REP_GET_GCI_CONF 635
+#define GSN_REP_GET_GCIBUFFER_REQ 636
+#define GSN_REP_GET_GCIBUFFER_REF 637
+#define GSN_REP_GET_GCIBUFFER_CONF 638
+#define GSN_REP_INSERT_GCIBUFFER_REQ 639
+#define GSN_REP_INSERT_GCIBUFFER_REF 640
+#define GSN_REP_INSERT_GCIBUFFER_CONF 641
+#define GSN_REP_CLEAR_PS_GCIBUFFER_REQ 642
+#define GSN_REP_CLEAR_PS_GCIBUFFER_REF 643
+#define GSN_REP_CLEAR_PS_GCIBUFFER_CONF 644
+#define GSN_REP_CLEAR_SS_GCIBUFFER_REQ 645
+#define GSN_REP_CLEAR_SS_GCIBUFFER_REF 646
+#define GSN_REP_CLEAR_SS_GCIBUFFER_CONF 647
+#define GSN_REP_DATA_PAGE 648
+#define GSN_REP_GCIBUFFER_ACC_REP 649
+
+#define GSN_GREP_SUB_REMOVE_REQ 650
+#define GSN_GREP_SUB_REMOVE_REF 651
+#define GSN_GREP_SUB_REMOVE_CONF 652
+#define GSN_GREP_REMOVE_REQ 653
+#define GSN_GREP_REMOVE_REF 654
+#define GSN_GREP_REMOVE_CONF 655
+
+/* Start Global Replication */
+#define GSN_GREP_REQ 656
+
+/**
+ * Management server
+ */
+#define GSN_MGM_LOCK_CONFIG_REQ 657
+#define GSN_MGM_LOCK_CONFIG_REP 658
+#define GSN_MGM_UNLOCK_CONFIG_REQ 659
+#define GSN_MGM_UNLOCK_CONFIG_REP 660
+
+#define GSN_UTIL_CREATE_LOCK_REQ 132
+#define GSN_UTIL_CREATE_LOCK_REF 133
+#define GSN_UTIL_CREATE_LOCK_CONF 188
+
+#define GSN_UTIL_DESTROY_LOCK_REQ 189
+#define GSN_UTIL_DESTROY_LOCK_REF 220
+#define GSN_UTIL_DESTROY_LOCK_CONF 221
+
+#define GSN_UTIL_LOCK_REQ 222
+#define GSN_UTIL_LOCK_REF 230
+#define GSN_UTIL_LOCK_CONF 231
+
+#define GSN_UTIL_UNLOCK_REQ 303
+#define GSN_UTIL_UNLOCK_REF 304
+#define GSN_UTIL_UNLOCK_CONF 362
+
+/* SUMA */
+#define GSN_CREATE_SUBID_REQ 661
+#define GSN_CREATE_SUBID_REF 662
+#define GSN_CREATE_SUBID_CONF 663
+
+/* GREP */
+#define GSN_GREP_CREATE_SUBID_REQ 664
+#define GSN_GREP_CREATE_SUBID_REF 665
+#define GSN_GREP_CREATE_SUBID_CONF 666
+#define GSN_REP_DROP_TABLE_REQ 667
+#define GSN_REP_DROP_TABLE_REF 668
+#define GSN_REP_DROP_TABLE_CONF 669
+
+/*
+ * TUX
+ */
+#define GSN_TUXFRAGREQ 670
+#define GSN_TUXFRAGCONF 671
+#define GSN_TUXFRAGREF 672
+#define GSN_TUX_ADD_ATTRREQ 673
+#define GSN_TUX_ADD_ATTRCONF 674
+#define GSN_TUX_ADD_ATTRREF 675
+
+/*
+ * REP
+ */
+#define GSN_REP_DISCONNECT_REP 676
+
+#define GSN_TUX_MAINT_REQ 677
+#define GSN_TUX_MAINT_CONF 678
+#define GSN_TUX_MAINT_REF 679
+
+/* not used 680 */
+/* not used 681 */
+
+/**
+ * from mgmtsrvr to NDBCNTR
+ */
+#define GSN_RESUME_REQ 682
+#define GSN_STOP_REQ 443
+#define GSN_STOP_REF 444
+#define GSN_API_VERSION_REQ 697
+#define GSN_API_VERSION_CONF 698
+
+/* not used 686 */
+/* not used 687 */
+/* not used 689 */
+/* not used 690 */
+
+/**
+ * SUMA restart protocol
+ */
+#define GSN_SUMA_START_ME 691
+#define GSN_SUMA_HANDOVER_REQ 692
+#define GSN_SUMA_HANDOVER_CONF 693
+
+/* not used 694 */
+/* not used 695 */
+/* not used 696 */
+
+/**
+ * GREP restart protocol
+ */
+#define GSN_GREP_START_ME 706
+#define GSN_GREP_ADD_SUB_REQ 707
+#define GSN_GREP_ADD_SUB_REF 708
+#define GSN_GREP_ADD_SUB_CONF 709
+
+
+/*
+ * EVENT Signals
+ */
+#define GSN_SUB_GCP_COMPLETE_ACC 699
+
+#define GSN_CREATE_EVNT_REQ 700
+#define GSN_CREATE_EVNT_CONF 701
+#define GSN_CREATE_EVNT_REF 702
+
+#define GSN_DROP_EVNT_REQ 703
+#define GSN_DROP_EVNT_CONF 704
+#define GSN_DROP_EVNT_REF 705
+
+#define GSN_TUX_BOUND_INFO 710
+
+#define GSN_ACC_LOCKREQ 711
+#define GSN_READ_PSEUDO_REQ 712
+
+#endif
diff --git a/ndb/include/kernel/GrepEvent.hpp b/storage/ndb/include/kernel/GrepEvent.hpp
index 2073a7072c9..2073a7072c9 100644
--- a/ndb/include/kernel/GrepEvent.hpp
+++ b/storage/ndb/include/kernel/GrepEvent.hpp
diff --git a/ndb/include/kernel/Interpreter.hpp b/storage/ndb/include/kernel/Interpreter.hpp
index 69c952ea7c3..69c952ea7c3 100644
--- a/ndb/include/kernel/Interpreter.hpp
+++ b/storage/ndb/include/kernel/Interpreter.hpp
diff --git a/ndb/include/kernel/LogLevel.hpp b/storage/ndb/include/kernel/LogLevel.hpp
index 60dcd36ab56..60dcd36ab56 100644
--- a/ndb/include/kernel/LogLevel.hpp
+++ b/storage/ndb/include/kernel/LogLevel.hpp
diff --git a/ndb/include/kernel/NodeBitmask.hpp b/storage/ndb/include/kernel/NodeBitmask.hpp
index 423c01cd841..423c01cd841 100644
--- a/ndb/include/kernel/NodeBitmask.hpp
+++ b/storage/ndb/include/kernel/NodeBitmask.hpp
diff --git a/ndb/include/kernel/NodeInfo.hpp b/storage/ndb/include/kernel/NodeInfo.hpp
index 5377f001949..5377f001949 100644
--- a/ndb/include/kernel/NodeInfo.hpp
+++ b/storage/ndb/include/kernel/NodeInfo.hpp
diff --git a/ndb/include/kernel/NodeState.hpp b/storage/ndb/include/kernel/NodeState.hpp
index 16784ecde79..16784ecde79 100644
--- a/ndb/include/kernel/NodeState.hpp
+++ b/storage/ndb/include/kernel/NodeState.hpp
diff --git a/ndb/include/kernel/RefConvert.hpp b/storage/ndb/include/kernel/RefConvert.hpp
index 7604b1cf224..7604b1cf224 100644
--- a/ndb/include/kernel/RefConvert.hpp
+++ b/storage/ndb/include/kernel/RefConvert.hpp
diff --git a/ndb/include/kernel/kernel_config_parameters.h b/storage/ndb/include/kernel/kernel_config_parameters.h
index bb7c6ebd42c..bb7c6ebd42c 100644
--- a/ndb/include/kernel/kernel_config_parameters.h
+++ b/storage/ndb/include/kernel/kernel_config_parameters.h
diff --git a/ndb/include/kernel/kernel_types.h b/storage/ndb/include/kernel/kernel_types.h
index b176d20798c..b176d20798c 100644
--- a/ndb/include/kernel/kernel_types.h
+++ b/storage/ndb/include/kernel/kernel_types.h
diff --git a/storage/ndb/include/kernel/ndb_limits.h b/storage/ndb/include/kernel/ndb_limits.h
new file mode 100644
index 00000000000..9baec7d69dc
--- /dev/null
+++ b/storage/ndb/include/kernel/ndb_limits.h
@@ -0,0 +1,133 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef NDB_LIMITS_H
+#define NDB_LIMITS_H
+
+#include <mysql.h>
+
+#define RNIL 0xffffff00
+
+/**
+ * Note that actual value = MAX_NODES - 1,
+ * since NodeId = 0 can not be used
+ */
+#define MAX_NDB_NODES 49
+#define MAX_NODES 64
+
+/**
+ * MAX_API_NODES = MAX_NODES - No of NDB Nodes in use
+ */
+
+/**
+ * The maximum number of replicas in the system
+ */
+#define MAX_REPLICAS 4
+
+/**
+ * The maximum number of local checkpoints stored at a time
+ */
+#define MAX_LCP_STORED 3
+
+/**
+ * The maximum number of log execution rounds at system restart
+ */
+#define MAX_LOG_EXEC 4
+
+/**
+ * The maximum number of tuples per page
+ **/
+#define MAX_TUPLES_PER_PAGE 8191
+#define MAX_TUPLES_BITS 13 /* 13 bits = 8191 tuples per page */
+#define MAX_TABLES 20320 /* SchemaFile.hpp */
+#define MAX_TAB_NAME_SIZE 128
+#define MAX_ATTR_NAME_SIZE NAME_LEN /* From mysql_com.h */
+#define MAX_ATTR_DEFAULT_VALUE_SIZE 128
+#define MAX_ATTRIBUTES_IN_TABLE 128
+#define MAX_ATTRIBUTES_IN_INDEX 32
+#define MAX_TUPLE_SIZE_IN_WORDS 2013
+#define MAX_KEY_SIZE_IN_WORDS 1023
+#define MAX_FRM_DATA_SIZE 6000
+#define MAX_NULL_BITS 4096
+#define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES))
+#define MAX_NDB_PARTITIONS 1024
+
+#define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1)
+/*
+ * Max Number of Records to fetch per SCAN_NEXTREQ in a scan in LQH. The
+ * API can order a multiple of this number of records at a time since
+ * fragments can be scanned in parallel.
+ */
+#define MAX_PARALLEL_OP_PER_SCAN 992
+/*
+* The default batch size. Configurable parameter.
+*/
+#define DEF_BATCH_SIZE 64
+/*
+* When calculating the number of records sent from LQH in each batch
+* one uses SCAN_BATCH_SIZE divided by the expected size of signals
+* per row. This gives the batch size used for the scan. The NDB API
+* will receive one batch from each node at a time so there has to be
+* some care taken also so that the NDB API is not overloaded with
+* signals.
+* This parameter is configurable, this is the default value.
+*/
+#define SCAN_BATCH_SIZE 32768
+/*
+* To protect the NDB API from overload we also define a maximum total
+* batch size from all nodes. This parameter should most likely be
+* configurable, or dependent on sendBufferSize.
+* This parameter is configurable, this is the default value.
+*/
+#define MAX_SCAN_BATCH_SIZE 262144
+/*
+ * Maximum number of Parallel Scan queries on one hash index fragment
+ */
+#define MAX_PARALLEL_SCANS_PER_FRAG 12
+/*
+ * Maximum parallel ordered index scans per primary table fragment.
+ * Implementation limit is (256 minus 12).
+ */
+#define MAX_PARALLEL_INDEX_SCANS_PER_FRAG 32
+
+/**
+ * Computed defines
+ */
+#define MAXNROFATTRIBUTESINWORDS (MAX_ATTRIBUTES_IN_TABLE / 32)
+
+/*
+ * Ordered index constants. Make configurable per index later.
+ */
+#define MAX_TTREE_NODE_SIZE 64 /* total words in node */
+#define MAX_TTREE_PREF_SIZE 4 /* words in min prefix */
+#define MAX_TTREE_NODE_SLACK 2 /* diff between max and min occupancy */
+
+/*
+ * Blobs.
+ */
+#define NDB_BLOB_HEAD_SIZE 2 /* sizeof(NdbBlob::Head) >> 2 */
+
+/*
+ * Character sets.
+ */
+#define MAX_XFRM_MULTIPLY 8 /* max expansion when normalizing */
+
+/*
+ * Long signals
+ */
+#define NDB_SECTION_SEGMENT_SZ 60
+
+#endif
diff --git a/ndb/include/kernel/signaldata/AbortAll.hpp b/storage/ndb/include/kernel/signaldata/AbortAll.hpp
index a3d7f483953..a3d7f483953 100644
--- a/ndb/include/kernel/signaldata/AbortAll.hpp
+++ b/storage/ndb/include/kernel/signaldata/AbortAll.hpp
diff --git a/ndb/include/kernel/signaldata/AccFrag.hpp b/storage/ndb/include/kernel/signaldata/AccFrag.hpp
index e28ab0d1ee6..e28ab0d1ee6 100644
--- a/ndb/include/kernel/signaldata/AccFrag.hpp
+++ b/storage/ndb/include/kernel/signaldata/AccFrag.hpp
diff --git a/ndb/include/kernel/signaldata/AccLock.hpp b/storage/ndb/include/kernel/signaldata/AccLock.hpp
index 1a41b4c9334..1a41b4c9334 100644
--- a/ndb/include/kernel/signaldata/AccLock.hpp
+++ b/storage/ndb/include/kernel/signaldata/AccLock.hpp
diff --git a/ndb/include/kernel/signaldata/AccScan.hpp b/storage/ndb/include/kernel/signaldata/AccScan.hpp
index d94d4da8cca..d94d4da8cca 100644
--- a/ndb/include/kernel/signaldata/AccScan.hpp
+++ b/storage/ndb/include/kernel/signaldata/AccScan.hpp
diff --git a/ndb/include/kernel/signaldata/AccSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/AccSizeAltReq.hpp
index ac348444826..ac348444826 100644
--- a/ndb/include/kernel/signaldata/AccSizeAltReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/AccSizeAltReq.hpp
diff --git a/ndb/include/kernel/signaldata/AlterIndx.hpp b/storage/ndb/include/kernel/signaldata/AlterIndx.hpp
index f5ad835b6f3..f5ad835b6f3 100644
--- a/ndb/include/kernel/signaldata/AlterIndx.hpp
+++ b/storage/ndb/include/kernel/signaldata/AlterIndx.hpp
diff --git a/ndb/include/kernel/signaldata/AlterTab.hpp b/storage/ndb/include/kernel/signaldata/AlterTab.hpp
index 02d4eb95d2e..02d4eb95d2e 100644
--- a/ndb/include/kernel/signaldata/AlterTab.hpp
+++ b/storage/ndb/include/kernel/signaldata/AlterTab.hpp
diff --git a/ndb/include/kernel/signaldata/AlterTable.hpp b/storage/ndb/include/kernel/signaldata/AlterTable.hpp
index 30f8727551d..30f8727551d 100644
--- a/ndb/include/kernel/signaldata/AlterTable.hpp
+++ b/storage/ndb/include/kernel/signaldata/AlterTable.hpp
diff --git a/ndb/include/kernel/signaldata/AlterTrig.hpp b/storage/ndb/include/kernel/signaldata/AlterTrig.hpp
index a97c1fd0196..a97c1fd0196 100644
--- a/ndb/include/kernel/signaldata/AlterTrig.hpp
+++ b/storage/ndb/include/kernel/signaldata/AlterTrig.hpp
diff --git a/ndb/include/kernel/signaldata/ApiRegSignalData.hpp b/storage/ndb/include/kernel/signaldata/ApiRegSignalData.hpp
index 84dca8fb260..84dca8fb260 100644
--- a/ndb/include/kernel/signaldata/ApiRegSignalData.hpp
+++ b/storage/ndb/include/kernel/signaldata/ApiRegSignalData.hpp
diff --git a/ndb/include/kernel/signaldata/ApiVersion.hpp b/storage/ndb/include/kernel/signaldata/ApiVersion.hpp
index 28281e7d186..28281e7d186 100644
--- a/ndb/include/kernel/signaldata/ApiVersion.hpp
+++ b/storage/ndb/include/kernel/signaldata/ApiVersion.hpp
diff --git a/ndb/include/kernel/signaldata/ArbitSignalData.hpp b/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp
index 34b73644a13..34b73644a13 100644
--- a/ndb/include/kernel/signaldata/ArbitSignalData.hpp
+++ b/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp
diff --git a/ndb/include/kernel/signaldata/AttrInfo.hpp b/storage/ndb/include/kernel/signaldata/AttrInfo.hpp
index c87470db8b0..c87470db8b0 100644
--- a/ndb/include/kernel/signaldata/AttrInfo.hpp
+++ b/storage/ndb/include/kernel/signaldata/AttrInfo.hpp
diff --git a/ndb/include/kernel/signaldata/BackupContinueB.hpp b/storage/ndb/include/kernel/signaldata/BackupContinueB.hpp
index d3d3f79f310..d3d3f79f310 100644
--- a/ndb/include/kernel/signaldata/BackupContinueB.hpp
+++ b/storage/ndb/include/kernel/signaldata/BackupContinueB.hpp
diff --git a/ndb/include/kernel/signaldata/BackupImpl.hpp b/storage/ndb/include/kernel/signaldata/BackupImpl.hpp
index 2032e2347b5..2032e2347b5 100644
--- a/ndb/include/kernel/signaldata/BackupImpl.hpp
+++ b/storage/ndb/include/kernel/signaldata/BackupImpl.hpp
diff --git a/ndb/include/kernel/signaldata/BackupSignalData.hpp b/storage/ndb/include/kernel/signaldata/BackupSignalData.hpp
index b38dd8d14b2..b38dd8d14b2 100644
--- a/ndb/include/kernel/signaldata/BackupSignalData.hpp
+++ b/storage/ndb/include/kernel/signaldata/BackupSignalData.hpp
diff --git a/ndb/include/kernel/signaldata/BlockCommitOrd.hpp b/storage/ndb/include/kernel/signaldata/BlockCommitOrd.hpp
index 3b33dceb758..3b33dceb758 100644
--- a/ndb/include/kernel/signaldata/BlockCommitOrd.hpp
+++ b/storage/ndb/include/kernel/signaldata/BlockCommitOrd.hpp
diff --git a/ndb/include/kernel/signaldata/BuildIndx.hpp b/storage/ndb/include/kernel/signaldata/BuildIndx.hpp
index a6ea84c5ea0..a6ea84c5ea0 100644
--- a/ndb/include/kernel/signaldata/BuildIndx.hpp
+++ b/storage/ndb/include/kernel/signaldata/BuildIndx.hpp
diff --git a/ndb/include/kernel/signaldata/CheckNodeGroups.hpp b/storage/ndb/include/kernel/signaldata/CheckNodeGroups.hpp
index b3e79949c68..b3e79949c68 100644
--- a/ndb/include/kernel/signaldata/CheckNodeGroups.hpp
+++ b/storage/ndb/include/kernel/signaldata/CheckNodeGroups.hpp
diff --git a/ndb/include/kernel/signaldata/CloseComReqConf.hpp b/storage/ndb/include/kernel/signaldata/CloseComReqConf.hpp
index 3d3dc54ba64..3d3dc54ba64 100644
--- a/ndb/include/kernel/signaldata/CloseComReqConf.hpp
+++ b/storage/ndb/include/kernel/signaldata/CloseComReqConf.hpp
diff --git a/ndb/include/kernel/signaldata/CmInit.hpp b/storage/ndb/include/kernel/signaldata/CmInit.hpp
index b59547b767b..b59547b767b 100644
--- a/ndb/include/kernel/signaldata/CmInit.hpp
+++ b/storage/ndb/include/kernel/signaldata/CmInit.hpp
diff --git a/ndb/include/kernel/signaldata/CmRegSignalData.hpp b/storage/ndb/include/kernel/signaldata/CmRegSignalData.hpp
index f33c991249f..f33c991249f 100644
--- a/ndb/include/kernel/signaldata/CmRegSignalData.hpp
+++ b/storage/ndb/include/kernel/signaldata/CmRegSignalData.hpp
diff --git a/ndb/include/kernel/signaldata/CmvmiCfgConf.hpp b/storage/ndb/include/kernel/signaldata/CmvmiCfgConf.hpp
index 12b785723d9..12b785723d9 100644
--- a/ndb/include/kernel/signaldata/CmvmiCfgConf.hpp
+++ b/storage/ndb/include/kernel/signaldata/CmvmiCfgConf.hpp
diff --git a/ndb/include/kernel/signaldata/CntrMasterConf.hpp b/storage/ndb/include/kernel/signaldata/CntrMasterConf.hpp
index e6bf363ea68..e6bf363ea68 100644
--- a/ndb/include/kernel/signaldata/CntrMasterConf.hpp
+++ b/storage/ndb/include/kernel/signaldata/CntrMasterConf.hpp
diff --git a/ndb/include/kernel/signaldata/CntrMasterReq.hpp b/storage/ndb/include/kernel/signaldata/CntrMasterReq.hpp
index caf9efb1243..caf9efb1243 100644
--- a/ndb/include/kernel/signaldata/CntrMasterReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/CntrMasterReq.hpp
diff --git a/ndb/include/kernel/signaldata/CntrStart.hpp b/storage/ndb/include/kernel/signaldata/CntrStart.hpp
index abdd1003c0f..abdd1003c0f 100644
--- a/ndb/include/kernel/signaldata/CntrStart.hpp
+++ b/storage/ndb/include/kernel/signaldata/CntrStart.hpp
diff --git a/ndb/include/kernel/signaldata/ConfigParamId.hpp b/storage/ndb/include/kernel/signaldata/ConfigParamId.hpp
index 9d9e04957ab..9d9e04957ab 100644
--- a/ndb/include/kernel/signaldata/ConfigParamId.hpp
+++ b/storage/ndb/include/kernel/signaldata/ConfigParamId.hpp
diff --git a/ndb/include/kernel/signaldata/ContinueFragmented.hpp b/storage/ndb/include/kernel/signaldata/ContinueFragmented.hpp
index 3d12b9e51eb..3d12b9e51eb 100644
--- a/ndb/include/kernel/signaldata/ContinueFragmented.hpp
+++ b/storage/ndb/include/kernel/signaldata/ContinueFragmented.hpp
diff --git a/ndb/include/kernel/signaldata/CopyActive.hpp b/storage/ndb/include/kernel/signaldata/CopyActive.hpp
index 19b05bda072..19b05bda072 100644
--- a/ndb/include/kernel/signaldata/CopyActive.hpp
+++ b/storage/ndb/include/kernel/signaldata/CopyActive.hpp
diff --git a/ndb/include/kernel/signaldata/CopyFrag.hpp b/storage/ndb/include/kernel/signaldata/CopyFrag.hpp
index 67b935dda64..67b935dda64 100644
--- a/ndb/include/kernel/signaldata/CopyFrag.hpp
+++ b/storage/ndb/include/kernel/signaldata/CopyFrag.hpp
diff --git a/ndb/include/kernel/signaldata/CopyGCIReq.hpp b/storage/ndb/include/kernel/signaldata/CopyGCIReq.hpp
index 4b401654de3..4b401654de3 100644
--- a/ndb/include/kernel/signaldata/CopyGCIReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/CopyGCIReq.hpp
diff --git a/ndb/include/kernel/signaldata/CreateEvnt.hpp b/storage/ndb/include/kernel/signaldata/CreateEvnt.hpp
index 8712ce8890c..8712ce8890c 100644
--- a/ndb/include/kernel/signaldata/CreateEvnt.hpp
+++ b/storage/ndb/include/kernel/signaldata/CreateEvnt.hpp
diff --git a/ndb/include/kernel/signaldata/CreateFrag.hpp b/storage/ndb/include/kernel/signaldata/CreateFrag.hpp
index a7b3f836353..a7b3f836353 100644
--- a/ndb/include/kernel/signaldata/CreateFrag.hpp
+++ b/storage/ndb/include/kernel/signaldata/CreateFrag.hpp
diff --git a/storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp b/storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp
new file mode 100644
index 00000000000..04638b81b99
--- /dev/null
+++ b/storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp
@@ -0,0 +1,101 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef CREATE_FRAGMENTATION_REQ_HPP
+#define CREATE_FRAGMENTATION_REQ_HPP
+
+#include "SignalData.hpp"
+
+class CreateFragmentationReq {
+ /**
+ * Sender(s)
+ */
+ friend class Dbdict;
+
+ /**
+ * Receiver(s)
+ */
+ friend class Dbdih;
+
+ friend bool printCREATE_FRAGMENTATION_REQ(FILE *,
+ const Uint32 *, Uint32, Uint16);
+public:
+ STATIC_CONST( SignalLength = 5 );
+
+private:
+ Uint32 senderRef;
+ Uint32 senderData;
+ Uint32 fragmentationType;
+ Uint32 noOfFragments;
+ Uint32 primaryTableId; // use same fragmentation as this table if not RNIL
+};
+
+class CreateFragmentationRef {
+ /**
+ * Sender(s)
+ */
+ friend class Dbdih;
+
+ /**
+ * Receiver(s)
+ */
+ friend class Dbdict;
+
+ friend bool printCREATE_FRAGMENTATION_REF(FILE *,
+ const Uint32 *, Uint32, Uint16);
+public:
+ STATIC_CONST( SignalLength = 3 );
+
+ enum ErrorCode {
+ OK = 0
+ ,InvalidFragmentationType = 1301
+ ,InvalidNodeId = 1302
+ ,InvalidNodeType = 1303
+ ,InvalidPrimaryTable = 1304
+ ,InvalidNodeGroup = 1305
+ };
+
+private:
+ Uint32 senderRef;
+ Uint32 senderData;
+ Uint32 errorCode;
+};
+
+class CreateFragmentationConf {
+ /**
+ * Sender(s)
+ */
+ friend class Dbdih;
+
+ /**
+ * Receiver(s)
+ */
+ friend class Dbdict;
+
+ friend bool printCREATE_FRAGMENTATION_CONF(FILE *,
+ const Uint32 *, Uint32, Uint16);
+public:
+ STATIC_CONST( SignalLength = 4 );
+ SECTION( FRAGMENTS = 0 );
+
+private:
+ Uint32 senderRef;
+ Uint32 senderData;
+ Uint32 noOfReplicas;
+ Uint32 noOfFragments;
+};
+
+#endif
diff --git a/ndb/include/kernel/signaldata/CreateIndx.hpp b/storage/ndb/include/kernel/signaldata/CreateIndx.hpp
index a9dc653f349..a9dc653f349 100644
--- a/ndb/include/kernel/signaldata/CreateIndx.hpp
+++ b/storage/ndb/include/kernel/signaldata/CreateIndx.hpp
diff --git a/ndb/include/kernel/signaldata/CreateTab.hpp b/storage/ndb/include/kernel/signaldata/CreateTab.hpp
index b2ef52a6bf7..b2ef52a6bf7 100644
--- a/ndb/include/kernel/signaldata/CreateTab.hpp
+++ b/storage/ndb/include/kernel/signaldata/CreateTab.hpp
diff --git a/ndb/include/kernel/signaldata/CreateTable.hpp b/storage/ndb/include/kernel/signaldata/CreateTable.hpp
index 481b323fdb0..481b323fdb0 100644
--- a/ndb/include/kernel/signaldata/CreateTable.hpp
+++ b/storage/ndb/include/kernel/signaldata/CreateTable.hpp
diff --git a/ndb/include/kernel/signaldata/CreateTrig.hpp b/storage/ndb/include/kernel/signaldata/CreateTrig.hpp
index 62627256dcf..62627256dcf 100644
--- a/ndb/include/kernel/signaldata/CreateTrig.hpp
+++ b/storage/ndb/include/kernel/signaldata/CreateTrig.hpp
diff --git a/ndb/include/kernel/signaldata/DiAddTab.hpp b/storage/ndb/include/kernel/signaldata/DiAddTab.hpp
index 6b17515eb6f..6b17515eb6f 100644
--- a/ndb/include/kernel/signaldata/DiAddTab.hpp
+++ b/storage/ndb/include/kernel/signaldata/DiAddTab.hpp
diff --git a/ndb/include/kernel/signaldata/DiGetNodes.hpp b/storage/ndb/include/kernel/signaldata/DiGetNodes.hpp
index 05ab6bfebb3..05ab6bfebb3 100644
--- a/ndb/include/kernel/signaldata/DiGetNodes.hpp
+++ b/storage/ndb/include/kernel/signaldata/DiGetNodes.hpp
diff --git a/ndb/include/kernel/signaldata/DictSchemaInfo.hpp b/storage/ndb/include/kernel/signaldata/DictSchemaInfo.hpp
index c15dcf2fd7a..c15dcf2fd7a 100644
--- a/ndb/include/kernel/signaldata/DictSchemaInfo.hpp
+++ b/storage/ndb/include/kernel/signaldata/DictSchemaInfo.hpp
diff --git a/ndb/include/kernel/signaldata/DictSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/DictSizeAltReq.hpp
index b40f0c8c1af..b40f0c8c1af 100644
--- a/ndb/include/kernel/signaldata/DictSizeAltReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/DictSizeAltReq.hpp
diff --git a/ndb/include/kernel/signaldata/DictStart.hpp b/storage/ndb/include/kernel/signaldata/DictStart.hpp
index 59310601f48..59310601f48 100644
--- a/ndb/include/kernel/signaldata/DictStart.hpp
+++ b/storage/ndb/include/kernel/signaldata/DictStart.hpp
diff --git a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
new file mode 100644
index 00000000000..274261583a4
--- /dev/null
+++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
@@ -0,0 +1,520 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DICT_TAB_INFO_HPP
+#define DICT_TAB_INFO_HPP
+
+#include "SignalData.hpp"
+#include <AttributeDescriptor.hpp>
+#include <SimpleProperties.hpp>
+#include <ndb_limits.h>
+#include <trigger_definitions.h>
+#include <NdbSqlUtil.hpp>
+
+#ifndef my_decimal_h
+
+// sql/my_decimal.h requires many more sql/*.h new to ndb
+// for now, copy the bit we need TODO proper fix
+
+#define DECIMAL_MAX_LENGTH ((8 * 9) - 8)
+
+#ifndef NOT_FIXED_DEC
+#define NOT_FIXED_DEC 31
+#endif
+
+C_MODE_START
+extern int decimal_bin_size(int, int);
+C_MODE_END
+
+inline int my_decimal_get_binary_size(uint precision, uint scale)
+{
+ return decimal_bin_size((int)precision, (int)scale);
+}
+
+#endif
+
+#define DTIMAP(x, y, z) \
+ { DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 }
+
+#define DTIMAP2(x, y, z, u, v) \
+ { DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 }
+
+#define DTIMAPS(x, y, z, u, v) \
+ { DictTabInfo::y, offsetof(x, z), SimpleProperties::StringValue, u, v, 0 }
+
+#define DTIMAPB(x, y, z, u, v, l) \
+ { DictTabInfo::y, offsetof(x, z), SimpleProperties::BinaryValue, u, v, \
+ offsetof(x, l) }
+
+#define DTIBREAK(x) \
+ { DictTabInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 }
+
+class DictTabInfo {
+ /**
+ * Sender(s) / Reciver(s)
+ */
+ // Blocks
+ friend class Backup;
+ friend class Dbdict;
+ friend class Ndbcntr;
+ friend class Trix;
+ friend class DbUtil;
+ // API
+ friend class NdbSchemaOp;
+
+ /**
+ * For printing
+ */
+ friend bool printDICTTABINFO(FILE * output,
+ const Uint32 * theData,
+ Uint32 len,
+ Uint16 receiverBlockNo);
+
+public:
+ enum RequestType {
+ CreateTableFromAPI = 1,
+ AddTableFromDict = 2, // Between DICT's
+ CopyTable = 3, // Between DICT's
+ ReadTableFromDiskSR = 4, // Local in DICT
+ GetTabInfoConf = 5,
+ AlterTableFromAPI = 6
+ };
+
+ enum KeyValues {
+ TableName = 1, // String, Mandatory
+ TableId = 2, //Mandatory between DICT's otherwise not allowed
+ TableVersion = 3, //Mandatory between DICT's otherwise not allowed
+ TableLoggedFlag = 4, //Default Logged
+ NoOfKeyAttr = 5, //Default 1
+ NoOfAttributes = 6, //Mandatory
+ NoOfNullable = 7, //Deafult 0
+ NoOfVariable = 8, //Default 0
+ TableKValue = 9, //Default 6
+ MinLoadFactor = 10, //Default 70
+ MaxLoadFactor = 11, //Default 80
+ KeyLength = 12, //Default 1 (No of words in primary key)
+ FragmentTypeVal = 13, //Default AllNodesSmallTable
+ TableTypeVal = 18, //Default TableType::UserTable
+ PrimaryTable = 19, //Mandatory for index otherwise RNIL
+ PrimaryTableId = 20, //ditto
+ IndexState = 21,
+ InsertTriggerId = 22,
+ UpdateTriggerId = 23,
+ DeleteTriggerId = 24,
+ CustomTriggerId = 25,
+ FrmLen = 26,
+ FrmData = 27,
+ FragmentCount = 128, // No of fragments in table (!fragment replicas)
+ FragmentDataLen = 129,
+ FragmentData = 130, // CREATE_FRAGMENTATION reply
+ TableEnd = 999,
+
+ AttributeName = 1000, // String, Mandatory
+ AttributeId = 1001, //Mandatory between DICT's otherwise not allowed
+ AttributeType = 1002, //for osu 4.1->5.0.x
+ AttributeSize = 1003, //Default DictTabInfo::a32Bit
+ AttributeArraySize = 1005, //Default 1
+ AttributeKeyFlag = 1006, //Default noKey
+ AttributeStorage = 1007, //Default MainMemory
+ AttributeNullableFlag = 1008, //Default NotNullable
+ AttributeDKey = 1010, //Default NotDKey
+ AttributeExtType = 1013, //Default ExtUnsigned
+ AttributeExtPrecision = 1014, //Default 0
+ AttributeExtScale = 1015, //Default 0
+ AttributeExtLength = 1016, //Default 0
+ AttributeAutoIncrement = 1017, //Default false
+ AttributeDefaultValue = 1018, //Default value (printable string)
+ AttributeEnd = 1999 //
+ };
+ // ----------------------------------------------------------------------
+ // Part of the protocol is that we only transfer parameters which do not
+ // have a default value. Thus the default values are part of the protocol.
+ // ----------------------------------------------------------------------
+
+
+
+ // FragmentType constants
+ enum FragmentType {
+ AllNodesSmallTable = 0,
+ AllNodesMediumTable = 1,
+ AllNodesLargeTable = 2,
+ SingleFragment = 3,
+ DistrKeyHash = 4,
+ DistrKeyLin = 5,
+ UserDefined = 6,
+ DistrKeyUniqueHashIndex = 7,
+ DistrKeyOrderedIndex = 8
+ };
+
+ // TableType constants + objects
+ enum TableType {
+ UndefTableType = 0,
+ SystemTable = 1,
+ UserTable = 2,
+ UniqueHashIndex = 3,
+ HashIndex = 4,
+ UniqueOrderedIndex = 5,
+ OrderedIndex = 6,
+ // constant 10 hardcoded in Dbdict.cpp
+ HashIndexTrigger = 10 + TriggerType::SECONDARY_INDEX,
+ SubscriptionTrigger = 10 + TriggerType::SUBSCRIPTION,
+ ReadOnlyConstraint = 10 + TriggerType::READ_ONLY_CONSTRAINT,
+ IndexTrigger = 10 + TriggerType::ORDERED_INDEX
+ };
+ static inline bool
+ isTable(int tableType) {
+ return
+ tableType == SystemTable ||
+ tableType == UserTable;
+ }
+ static inline bool
+ isIndex(int tableType) {
+ return
+ tableType == UniqueHashIndex ||
+ tableType == HashIndex ||
+ tableType == UniqueOrderedIndex ||
+ tableType == OrderedIndex;
+ }
+ static inline bool
+ isUniqueIndex(int tableType) {
+ return
+ tableType == UniqueHashIndex ||
+ tableType == UniqueOrderedIndex;
+ }
+ static inline bool
+ isNonUniqueIndex(int tableType) {
+ return
+ tableType == HashIndex ||
+ tableType == OrderedIndex;
+ }
+ static inline bool
+ isHashIndex(int tableType) {
+ return
+ tableType == UniqueHashIndex ||
+ tableType == HashIndex;
+ }
+ static inline bool
+ isOrderedIndex(int tableType) {
+ return
+ tableType == UniqueOrderedIndex ||
+ tableType == OrderedIndex;
+ }
+
+ // Object state for translating from/to API
+ enum ObjectState {
+ StateUndefined = 0,
+ StateOffline = 1,
+ StateBuilding = 2,
+ StateDropping = 3,
+ StateOnline = 4,
+ StateBroken = 9
+ };
+
+ // Object store for translating from/to API
+ enum ObjectStore {
+ StoreUndefined = 0,
+ StoreTemporary = 1,
+ StorePermanent = 2
+ };
+
+ // AttributeSize constants
+ STATIC_CONST( aBit = 0 );
+ STATIC_CONST( an8Bit = 3 );
+ STATIC_CONST( a16Bit = 4 );
+ STATIC_CONST( a32Bit = 5 );
+ STATIC_CONST( a64Bit = 6 );
+ STATIC_CONST( a128Bit = 7 );
+
+ // Table data interpretation
+ struct Table {
+ char TableName[MAX_TAB_NAME_SIZE];
+ Uint32 TableId;
+ char PrimaryTable[MAX_TAB_NAME_SIZE]; // Only used when "index"
+ Uint32 PrimaryTableId;
+ Uint32 TableLoggedFlag;
+ Uint32 NoOfKeyAttr;
+ Uint32 NoOfAttributes;
+ Uint32 NoOfNullable;
+ Uint32 NoOfVariable;
+ Uint32 TableKValue;
+ Uint32 MinLoadFactor;
+ Uint32 MaxLoadFactor;
+ Uint32 KeyLength;
+ Uint32 FragmentType;
+ Uint32 TableStorage;
+ Uint32 TableType;
+ Uint32 TableVersion;
+ Uint32 IndexState;
+ Uint32 InsertTriggerId;
+ Uint32 UpdateTriggerId;
+ Uint32 DeleteTriggerId;
+ Uint32 CustomTriggerId;
+ Uint32 FrmLen;
+ char FrmData[MAX_FRM_DATA_SIZE];
+ Uint32 FragmentCount;
+ Uint32 FragmentDataLen;
+ Uint16 FragmentData[(MAX_FRAGMENT_DATA_BYTES+1)/2];
+
+ void init();
+ };
+
+ static const
+ SimpleProperties::SP2StructMapping TableMapping[];
+
+ static const Uint32 TableMappingSize;
+
+ // AttributeExtType values
+ enum ExtType {
+ ExtUndefined = NdbSqlUtil::Type::Undefined,
+ ExtTinyint = NdbSqlUtil::Type::Tinyint,
+ ExtTinyunsigned = NdbSqlUtil::Type::Tinyunsigned,
+ ExtSmallint = NdbSqlUtil::Type::Smallint,
+ ExtSmallunsigned = NdbSqlUtil::Type::Smallunsigned,
+ ExtMediumint = NdbSqlUtil::Type::Mediumint,
+ ExtMediumunsigned = NdbSqlUtil::Type::Mediumunsigned,
+ ExtInt = NdbSqlUtil::Type::Int,
+ ExtUnsigned = NdbSqlUtil::Type::Unsigned,
+ ExtBigint = NdbSqlUtil::Type::Bigint,
+ ExtBigunsigned = NdbSqlUtil::Type::Bigunsigned,
+ ExtFloat = NdbSqlUtil::Type::Float,
+ ExtDouble = NdbSqlUtil::Type::Double,
+ ExtOlddecimal = NdbSqlUtil::Type::Olddecimal,
+ ExtOlddecimalunsigned = NdbSqlUtil::Type::Olddecimalunsigned,
+ ExtDecimal = NdbSqlUtil::Type::Decimal,
+ ExtDecimalunsigned = NdbSqlUtil::Type::Decimalunsigned,
+ ExtChar = NdbSqlUtil::Type::Char,
+ ExtVarchar = NdbSqlUtil::Type::Varchar,
+ ExtBinary = NdbSqlUtil::Type::Binary,
+ ExtVarbinary = NdbSqlUtil::Type::Varbinary,
+ ExtDatetime = NdbSqlUtil::Type::Datetime,
+ ExtDate = NdbSqlUtil::Type::Date,
+ ExtBlob = NdbSqlUtil::Type::Blob,
+ ExtText = NdbSqlUtil::Type::Text,
+ ExtBit = NdbSqlUtil::Type::Bit,
+ ExtLongvarchar = NdbSqlUtil::Type::Longvarchar,
+ ExtLongvarbinary = NdbSqlUtil::Type::Longvarbinary,
+ ExtTime = NdbSqlUtil::Type::Time,
+ ExtYear = NdbSqlUtil::Type::Year,
+ ExtTimestamp = NdbSqlUtil::Type::Timestamp
+ };
+
+ // Attribute data interpretation
+ struct Attribute {
+ char AttributeName[MAX_TAB_NAME_SIZE];
+ Uint32 AttributeId;
+ Uint32 AttributeType; // for osu 4.1->5.0.x
+ Uint32 AttributeSize;
+ Uint32 AttributeArraySize;
+ Uint32 AttributeKeyFlag;
+ Uint32 AttributeNullableFlag;
+ Uint32 AttributeDKey;
+ Uint32 AttributeExtType;
+ Uint32 AttributeExtPrecision;
+ Uint32 AttributeExtScale;
+ Uint32 AttributeExtLength;
+ Uint32 AttributeAutoIncrement;
+ char AttributeDefaultValue[MAX_ATTR_DEFAULT_VALUE_SIZE];
+
+ void init();
+
+ inline
+ Uint32 sizeInWords()
+ {
+ return ((1 << AttributeSize) * AttributeArraySize + 31) >> 5;
+ }
+
+ // compute old-sty|e attribute size and array size
+ inline bool
+ translateExtType() {
+ switch (AttributeExtType) {
+ case DictTabInfo::ExtUndefined:
+ return false;
+ case DictTabInfo::ExtTinyint:
+ case DictTabInfo::ExtTinyunsigned:
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize = AttributeExtLength;
+ break;
+ case DictTabInfo::ExtSmallint:
+ case DictTabInfo::ExtSmallunsigned:
+ AttributeSize = DictTabInfo::a16Bit;
+ AttributeArraySize = AttributeExtLength;
+ break;
+ case DictTabInfo::ExtMediumint:
+ case DictTabInfo::ExtMediumunsigned:
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize = 3 * AttributeExtLength;
+ break;
+ case DictTabInfo::ExtInt:
+ case DictTabInfo::ExtUnsigned:
+ AttributeSize = DictTabInfo::a32Bit;
+ AttributeArraySize = AttributeExtLength;
+ break;
+ case DictTabInfo::ExtBigint:
+ case DictTabInfo::ExtBigunsigned:
+ AttributeSize = DictTabInfo::a64Bit;
+ AttributeArraySize = AttributeExtLength;
+ break;
+ case DictTabInfo::ExtFloat:
+ AttributeSize = DictTabInfo::a32Bit;
+ AttributeArraySize = AttributeExtLength;
+ break;
+ case DictTabInfo::ExtDouble:
+ AttributeSize = DictTabInfo::a64Bit;
+ AttributeArraySize = AttributeExtLength;
+ break;
+ case DictTabInfo::ExtOlddecimal:
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize =
+ (1 + AttributeExtPrecision + (int(AttributeExtScale) > 0)) *
+ AttributeExtLength;
+ break;
+ case DictTabInfo::ExtOlddecimalunsigned:
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize =
+ (0 + AttributeExtPrecision + (int(AttributeExtScale) > 0)) *
+ AttributeExtLength;
+ break;
+ case DictTabInfo::ExtDecimal:
+ case DictTabInfo::ExtDecimalunsigned:
+ {
+ // copy from Field_new_decimal ctor
+ uint precision = AttributeExtPrecision;
+ uint scale = AttributeExtScale;
+ if (precision > DECIMAL_MAX_LENGTH || scale >= NOT_FIXED_DEC)
+ precision = DECIMAL_MAX_LENGTH;
+ uint bin_size = my_decimal_get_binary_size(precision, scale);
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize = bin_size * AttributeExtLength;
+ }
+ break;
+ case DictTabInfo::ExtChar:
+ case DictTabInfo::ExtBinary:
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize = AttributeExtLength;
+ break;
+ case DictTabInfo::ExtVarchar:
+ case DictTabInfo::ExtVarbinary:
+ if (AttributeExtLength > 0xff)
+ return false;
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize = AttributeExtLength + 1;
+ break;
+ case DictTabInfo::ExtDatetime:
+ // to fix
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize = 8 * AttributeExtLength;
+ break;
+ case DictTabInfo::ExtDate:
+ // to fix
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize = 3 * AttributeExtLength;
+ break;
+ case DictTabInfo::ExtBlob:
+ case DictTabInfo::ExtText:
+ AttributeSize = DictTabInfo::an8Bit;
+ // head + inline part (length in precision lower half)
+ AttributeArraySize = (NDB_BLOB_HEAD_SIZE << 2) + (AttributeExtPrecision & 0xFFFF);
+ break;
+ case DictTabInfo::ExtBit:
+ AttributeSize = DictTabInfo::aBit;
+ AttributeArraySize = AttributeExtLength;
+ break;
+ case DictTabInfo::ExtLongvarchar:
+ case DictTabInfo::ExtLongvarbinary:
+ if (AttributeExtLength > 0xffff)
+ return false;
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize = AttributeExtLength + 2;
+ break;
+ case DictTabInfo::ExtTime:
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize = 3 * AttributeExtLength;
+ break;
+ case DictTabInfo::ExtYear:
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize = 1 * AttributeExtLength;
+ break;
+ case DictTabInfo::ExtTimestamp:
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize = 4 * AttributeExtLength;
+ break;
+ default:
+ return false;
+ };
+ return true;
+ }
+
+ inline void print(FILE *out) {
+ fprintf(out, "AttributeId = %d\n", AttributeId);
+ fprintf(out, "AttributeType = %d\n", AttributeType);
+ fprintf(out, "AttributeSize = %d\n", AttributeSize);
+ fprintf(out, "AttributeArraySize = %d\n", AttributeArraySize);
+ fprintf(out, "AttributeKeyFlag = %d\n", AttributeKeyFlag);
+ fprintf(out, "AttributeStorage = %d\n", AttributeStorage);
+ fprintf(out, "AttributeNullableFlag = %d\n", AttributeNullableFlag);
+ fprintf(out, "AttributeDKey = %d\n", AttributeDKey);
+ fprintf(out, "AttributeGroup = %d\n", AttributeGroup);
+ fprintf(out, "AttributeAutoIncrement = %d\n", AttributeAutoIncrement);
+ fprintf(out, "AttributeExtType = %d\n", AttributeExtType);
+ fprintf(out, "AttributeExtPrecision = %d\n", AttributeExtPrecision);
+ fprintf(out, "AttributeExtScale = %d\n", AttributeExtScale);
+ fprintf(out, "AttributeExtLength = %d\n", AttributeExtLength);
+ fprintf(out, "AttributeDefaultValue = \"%s\"\n",
+ AttributeDefaultValue ? AttributeDefaultValue : "");
+ }
+ };
+
+ static const
+ SimpleProperties::SP2StructMapping AttributeMapping[];
+
+ static const Uint32 AttributeMappingSize;
+
+ // Signal constants
+ STATIC_CONST( DataLength = 20 );
+ STATIC_CONST( HeaderLength = 5 );
+
+private:
+ Uint32 senderRef;
+ Uint32 senderData;
+ Uint32 requestType;
+ Uint32 totalLen;
+ Uint32 offset;
+
+ /**
+ * Length of this data = signal->length() - HeaderLength
+ * Sender block ref = signal->senderBlockRef()
+ */
+
+ Uint32 tabInfoData[DataLength];
+
+public:
+ enum Depricated
+ {
+ AttributeDGroup = 1009, //Default NotDGroup
+ AttributeStoredInd = 1011, //Default NotStored
+ SecondTableId = 17, //Mandatory between DICT's otherwise not allowed
+ FragmentKeyTypeVal = 16 //Default PrimaryKey
+ };
+
+ enum Unimplemented
+ {
+ TableStorageVal = 14, //Default StorageType::MainMemory
+ ScanOptimised = 15, //Default updateOptimised
+ AttributeGroup = 1012 //Default 0
+ };
+};
+
+#endif
diff --git a/ndb/include/kernel/signaldata/DihAddFrag.hpp b/storage/ndb/include/kernel/signaldata/DihAddFrag.hpp
index 6e5a24ee413..6e5a24ee413 100644
--- a/ndb/include/kernel/signaldata/DihAddFrag.hpp
+++ b/storage/ndb/include/kernel/signaldata/DihAddFrag.hpp
diff --git a/ndb/include/kernel/signaldata/DihContinueB.hpp b/storage/ndb/include/kernel/signaldata/DihContinueB.hpp
index 77ecf360601..77ecf360601 100644
--- a/ndb/include/kernel/signaldata/DihContinueB.hpp
+++ b/storage/ndb/include/kernel/signaldata/DihContinueB.hpp
diff --git a/ndb/include/kernel/signaldata/DihSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/DihSizeAltReq.hpp
index 73279447859..73279447859 100644
--- a/ndb/include/kernel/signaldata/DihSizeAltReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/DihSizeAltReq.hpp
diff --git a/ndb/include/kernel/signaldata/DihStartTab.hpp b/storage/ndb/include/kernel/signaldata/DihStartTab.hpp
index 75443e6070e..75443e6070e 100644
--- a/ndb/include/kernel/signaldata/DihStartTab.hpp
+++ b/storage/ndb/include/kernel/signaldata/DihStartTab.hpp
diff --git a/ndb/include/kernel/signaldata/DihSwitchReplica.hpp b/storage/ndb/include/kernel/signaldata/DihSwitchReplica.hpp
index d4212f510f3..d4212f510f3 100644
--- a/ndb/include/kernel/signaldata/DihSwitchReplica.hpp
+++ b/storage/ndb/include/kernel/signaldata/DihSwitchReplica.hpp
diff --git a/ndb/include/kernel/signaldata/DisconnectRep.hpp b/storage/ndb/include/kernel/signaldata/DisconnectRep.hpp
index d7fcdc4fb35..d7fcdc4fb35 100644
--- a/ndb/include/kernel/signaldata/DisconnectRep.hpp
+++ b/storage/ndb/include/kernel/signaldata/DisconnectRep.hpp
diff --git a/ndb/include/kernel/signaldata/DropIndx.hpp b/storage/ndb/include/kernel/signaldata/DropIndx.hpp
index fd2ea7f0b7b..fd2ea7f0b7b 100644
--- a/ndb/include/kernel/signaldata/DropIndx.hpp
+++ b/storage/ndb/include/kernel/signaldata/DropIndx.hpp
diff --git a/ndb/include/kernel/signaldata/DropTab.hpp b/storage/ndb/include/kernel/signaldata/DropTab.hpp
index dd3946d8cc0..dd3946d8cc0 100644
--- a/ndb/include/kernel/signaldata/DropTab.hpp
+++ b/storage/ndb/include/kernel/signaldata/DropTab.hpp
diff --git a/ndb/include/kernel/signaldata/DropTabFile.hpp b/storage/ndb/include/kernel/signaldata/DropTabFile.hpp
index 9ae4dae41c1..9ae4dae41c1 100644
--- a/ndb/include/kernel/signaldata/DropTabFile.hpp
+++ b/storage/ndb/include/kernel/signaldata/DropTabFile.hpp
diff --git a/ndb/include/kernel/signaldata/DropTable.hpp b/storage/ndb/include/kernel/signaldata/DropTable.hpp
index 7a5b96e4cd1..7a5b96e4cd1 100644
--- a/ndb/include/kernel/signaldata/DropTable.hpp
+++ b/storage/ndb/include/kernel/signaldata/DropTable.hpp
diff --git a/ndb/include/kernel/signaldata/DropTrig.hpp b/storage/ndb/include/kernel/signaldata/DropTrig.hpp
index 7c5049f3de8..7c5049f3de8 100644
--- a/ndb/include/kernel/signaldata/DropTrig.hpp
+++ b/storage/ndb/include/kernel/signaldata/DropTrig.hpp
diff --git a/ndb/include/kernel/signaldata/DumpStateOrd.hpp b/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp
index 7368a0ec40d..7368a0ec40d 100644
--- a/ndb/include/kernel/signaldata/DumpStateOrd.hpp
+++ b/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp
diff --git a/ndb/include/kernel/signaldata/EmptyLcp.hpp b/storage/ndb/include/kernel/signaldata/EmptyLcp.hpp
index 32ea6c13231..32ea6c13231 100644
--- a/ndb/include/kernel/signaldata/EmptyLcp.hpp
+++ b/storage/ndb/include/kernel/signaldata/EmptyLcp.hpp
diff --git a/ndb/include/kernel/signaldata/EndTo.hpp b/storage/ndb/include/kernel/signaldata/EndTo.hpp
index 944cca3ca98..944cca3ca98 100644
--- a/ndb/include/kernel/signaldata/EndTo.hpp
+++ b/storage/ndb/include/kernel/signaldata/EndTo.hpp
diff --git a/ndb/include/kernel/signaldata/EventReport.hpp b/storage/ndb/include/kernel/signaldata/EventReport.hpp
index 9822a0539cf..9822a0539cf 100644
--- a/ndb/include/kernel/signaldata/EventReport.hpp
+++ b/storage/ndb/include/kernel/signaldata/EventReport.hpp
diff --git a/ndb/include/kernel/signaldata/EventSubscribeReq.hpp b/storage/ndb/include/kernel/signaldata/EventSubscribeReq.hpp
index 84a1717b1de..84a1717b1de 100644
--- a/ndb/include/kernel/signaldata/EventSubscribeReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/EventSubscribeReq.hpp
diff --git a/ndb/include/kernel/signaldata/ExecFragReq.hpp b/storage/ndb/include/kernel/signaldata/ExecFragReq.hpp
index e40213d6e29..e40213d6e29 100644
--- a/ndb/include/kernel/signaldata/ExecFragReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/ExecFragReq.hpp
diff --git a/ndb/include/kernel/signaldata/FailRep.hpp b/storage/ndb/include/kernel/signaldata/FailRep.hpp
index 44577f07fdc..44577f07fdc 100644
--- a/ndb/include/kernel/signaldata/FailRep.hpp
+++ b/storage/ndb/include/kernel/signaldata/FailRep.hpp
diff --git a/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp b/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp
new file mode 100644
index 00000000000..674ce1d1d0b
--- /dev/null
+++ b/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp
@@ -0,0 +1,201 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef FIRE_TRIG_ORD_HPP
+#define FIRE_TRIG_ORD_HPP
+
+#include "SignalData.hpp"
+#include <NodeBitmask.hpp>
+#include <trigger_definitions.h>
+#include <string.h>
+
+/**
+ * FireTrigOrd
+ *
+ * This signal is sent by TUP to signal
+ * that a trigger has fired
+ */
+class FireTrigOrd {
+ /**
+ * Sender(s)
+ */
+ // API
+
+ /**
+ * Sender(s) / Reciver(s)
+ */
+ friend class Dbtup;
+
+ /**
+ * Reciver(s)
+ */
+ friend class Dbtc;
+ friend class Backup;
+ friend class SumaParticipant;
+
+ /**
+ * For printing
+ */
+ friend bool printFIRE_TRIG_ORD(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo);
+
+public:
+ STATIC_CONST( SignalLength = 8 );
+ STATIC_CONST( SignalWithGCILength = 9 );
+ STATIC_CONST( SignalWithHashValueLength = 10 );
+
+private:
+ Uint32 m_connectionPtr;
+ Uint32 m_userRef;
+ Uint32 m_triggerId;
+ TriggerEvent::Value m_triggerEvent;
+ Uint32 m_noPrimKeyWords;
+ Uint32 m_noBeforeValueWords;
+ Uint32 m_noAfterValueWords;
+ Uint32 fragId;
+ Uint32 m_gci;
+ Uint32 m_hashValue;
+ // Public methods
+public:
+ Uint32 getConnectionPtr() const;
+ void setConnectionPtr(Uint32);
+ Uint32 getUserRef() const;
+ void setUserRef(Uint32);
+ Uint32 getTriggerId() const;
+ void setTriggerId(Uint32 anIndxId);
+ TriggerEvent::Value getTriggerEvent() const;
+ void setTriggerEvent(TriggerEvent::Value);
+ Uint32 getNoOfPrimaryKeyWords() const;
+ void setNoOfPrimaryKeyWords(Uint32);
+ Uint32 getNoOfBeforeValueWords() const;
+ void setNoOfBeforeValueWords(Uint32);
+ Uint32 getNoOfAfterValueWords() const;
+ void setNoOfAfterValueWords(Uint32);
+ Uint32 getGCI() const;
+ void setGCI(Uint32);
+ Uint32 getHashValue() const;
+ void setHashValue(Uint32);
+};
+
+inline
+Uint32 FireTrigOrd::getConnectionPtr() const
+{
+ return m_connectionPtr;
+}
+
+inline
+void FireTrigOrd::setConnectionPtr(Uint32 aConnectionPtr)
+{
+ m_connectionPtr = aConnectionPtr;
+}
+
+inline
+Uint32 FireTrigOrd::getUserRef() const
+{
+ return m_userRef;
+}
+
+inline
+void FireTrigOrd::setUserRef(Uint32 aUserRef)
+{
+ m_userRef = aUserRef;
+}
+
+inline
+Uint32 FireTrigOrd::getTriggerId() const
+{
+ return m_triggerId;
+}
+
+inline
+void FireTrigOrd::setTriggerId(Uint32 aTriggerId)
+{
+ m_triggerId = aTriggerId;
+}
+
+inline
+TriggerEvent::Value FireTrigOrd::getTriggerEvent() const
+{
+ return m_triggerEvent;
+}
+
+inline
+void FireTrigOrd::setTriggerEvent(TriggerEvent::Value aTriggerEvent)
+{
+ m_triggerEvent = aTriggerEvent;
+}
+
+inline
+Uint32 FireTrigOrd::getNoOfPrimaryKeyWords() const
+{
+ return m_noPrimKeyWords;
+}
+
+inline
+void FireTrigOrd::setNoOfPrimaryKeyWords(Uint32 noPrim)
+{
+ m_noPrimKeyWords = noPrim;
+}
+
+inline
+Uint32 FireTrigOrd::getNoOfBeforeValueWords() const
+{
+ return m_noBeforeValueWords;
+}
+
+inline
+void FireTrigOrd::setNoOfBeforeValueWords(Uint32 noBefore)
+{
+ m_noBeforeValueWords = noBefore;
+}
+
+inline
+Uint32 FireTrigOrd::getNoOfAfterValueWords() const
+{
+ return m_noAfterValueWords;
+}
+
+inline
+void FireTrigOrd::setNoOfAfterValueWords(Uint32 noAfter)
+{
+ m_noAfterValueWords = noAfter;
+}
+
+inline
+Uint32 FireTrigOrd::getGCI() const
+{
+ return m_gci;
+}
+
+inline
+void FireTrigOrd::setGCI(Uint32 aGCI)
+{
+ m_gci = aGCI;
+}
+
+inline
+Uint32 FireTrigOrd::getHashValue() const
+{
+ return m_hashValue;
+}
+
+inline
+void FireTrigOrd::setHashValue(Uint32 flag)
+{
+ m_hashValue = flag;
+}
+
+
+#endif
diff --git a/ndb/include/kernel/signaldata/FsAppendReq.hpp b/storage/ndb/include/kernel/signaldata/FsAppendReq.hpp
index e2fd61f8a11..e2fd61f8a11 100644
--- a/ndb/include/kernel/signaldata/FsAppendReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/FsAppendReq.hpp
diff --git a/ndb/include/kernel/signaldata/FsCloseReq.hpp b/storage/ndb/include/kernel/signaldata/FsCloseReq.hpp
index 10d094fb30b..10d094fb30b 100644
--- a/ndb/include/kernel/signaldata/FsCloseReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/FsCloseReq.hpp
diff --git a/ndb/include/kernel/signaldata/FsConf.hpp b/storage/ndb/include/kernel/signaldata/FsConf.hpp
index f66d9feea49..f66d9feea49 100644
--- a/ndb/include/kernel/signaldata/FsConf.hpp
+++ b/storage/ndb/include/kernel/signaldata/FsConf.hpp
diff --git a/ndb/include/kernel/signaldata/FsOpenReq.hpp b/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp
index 906bb947128..906bb947128 100644
--- a/ndb/include/kernel/signaldata/FsOpenReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp
diff --git a/ndb/include/kernel/signaldata/FsReadWriteReq.hpp b/storage/ndb/include/kernel/signaldata/FsReadWriteReq.hpp
index 6e4fa4d260e..6e4fa4d260e 100644
--- a/ndb/include/kernel/signaldata/FsReadWriteReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/FsReadWriteReq.hpp
diff --git a/ndb/include/kernel/signaldata/FsRef.hpp b/storage/ndb/include/kernel/signaldata/FsRef.hpp
index 650f6520fb5..650f6520fb5 100644
--- a/ndb/include/kernel/signaldata/FsRef.hpp
+++ b/storage/ndb/include/kernel/signaldata/FsRef.hpp
diff --git a/ndb/include/kernel/signaldata/FsRemoveReq.hpp b/storage/ndb/include/kernel/signaldata/FsRemoveReq.hpp
index efb566d883a..efb566d883a 100644
--- a/ndb/include/kernel/signaldata/FsRemoveReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/FsRemoveReq.hpp
diff --git a/ndb/include/kernel/signaldata/GCPSave.hpp b/storage/ndb/include/kernel/signaldata/GCPSave.hpp
index 2b4a25e6bb2..2b4a25e6bb2 100644
--- a/ndb/include/kernel/signaldata/GCPSave.hpp
+++ b/storage/ndb/include/kernel/signaldata/GCPSave.hpp
diff --git a/ndb/include/kernel/signaldata/GetTabInfo.hpp b/storage/ndb/include/kernel/signaldata/GetTabInfo.hpp
index 6b223cab119..6b223cab119 100644
--- a/ndb/include/kernel/signaldata/GetTabInfo.hpp
+++ b/storage/ndb/include/kernel/signaldata/GetTabInfo.hpp
diff --git a/ndb/include/kernel/signaldata/GetTableId.hpp b/storage/ndb/include/kernel/signaldata/GetTableId.hpp
index fb91c2e10d7..fb91c2e10d7 100644
--- a/ndb/include/kernel/signaldata/GetTableId.hpp
+++ b/storage/ndb/include/kernel/signaldata/GetTableId.hpp
diff --git a/ndb/include/kernel/signaldata/GrepImpl.hpp b/storage/ndb/include/kernel/signaldata/GrepImpl.hpp
index 95b93df0a58..95b93df0a58 100644
--- a/ndb/include/kernel/signaldata/GrepImpl.hpp
+++ b/storage/ndb/include/kernel/signaldata/GrepImpl.hpp
diff --git a/ndb/include/kernel/signaldata/HotSpareRep.hpp b/storage/ndb/include/kernel/signaldata/HotSpareRep.hpp
index fb9d338be1b..fb9d338be1b 100644
--- a/ndb/include/kernel/signaldata/HotSpareRep.hpp
+++ b/storage/ndb/include/kernel/signaldata/HotSpareRep.hpp
diff --git a/ndb/include/kernel/signaldata/IndxAttrInfo.hpp b/storage/ndb/include/kernel/signaldata/IndxAttrInfo.hpp
index ec5790d84f3..ec5790d84f3 100755
--- a/ndb/include/kernel/signaldata/IndxAttrInfo.hpp
+++ b/storage/ndb/include/kernel/signaldata/IndxAttrInfo.hpp
diff --git a/ndb/include/kernel/signaldata/IndxKeyInfo.hpp b/storage/ndb/include/kernel/signaldata/IndxKeyInfo.hpp
index 7cd7795ec71..7cd7795ec71 100755
--- a/ndb/include/kernel/signaldata/IndxKeyInfo.hpp
+++ b/storage/ndb/include/kernel/signaldata/IndxKeyInfo.hpp
diff --git a/ndb/include/kernel/signaldata/InvalidateNodeLCPConf.hpp b/storage/ndb/include/kernel/signaldata/InvalidateNodeLCPConf.hpp
index 2497af354ce..2497af354ce 100644
--- a/ndb/include/kernel/signaldata/InvalidateNodeLCPConf.hpp
+++ b/storage/ndb/include/kernel/signaldata/InvalidateNodeLCPConf.hpp
diff --git a/ndb/include/kernel/signaldata/InvalidateNodeLCPReq.hpp b/storage/ndb/include/kernel/signaldata/InvalidateNodeLCPReq.hpp
index e55a58710b4..e55a58710b4 100644
--- a/ndb/include/kernel/signaldata/InvalidateNodeLCPReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/InvalidateNodeLCPReq.hpp
diff --git a/ndb/include/kernel/signaldata/KeyInfo.hpp b/storage/ndb/include/kernel/signaldata/KeyInfo.hpp
index 686f3ae053d..686f3ae053d 100644
--- a/ndb/include/kernel/signaldata/KeyInfo.hpp
+++ b/storage/ndb/include/kernel/signaldata/KeyInfo.hpp
diff --git a/ndb/include/kernel/signaldata/LCP.hpp b/storage/ndb/include/kernel/signaldata/LCP.hpp
index 7d3fb71ae7e..7d3fb71ae7e 100644
--- a/ndb/include/kernel/signaldata/LCP.hpp
+++ b/storage/ndb/include/kernel/signaldata/LCP.hpp
diff --git a/ndb/include/kernel/signaldata/ListTables.hpp b/storage/ndb/include/kernel/signaldata/ListTables.hpp
index 7fbfab1294c..7fbfab1294c 100644
--- a/ndb/include/kernel/signaldata/ListTables.hpp
+++ b/storage/ndb/include/kernel/signaldata/ListTables.hpp
diff --git a/ndb/include/kernel/signaldata/LqhFrag.hpp b/storage/ndb/include/kernel/signaldata/LqhFrag.hpp
index 13dfafcc653..13dfafcc653 100644
--- a/ndb/include/kernel/signaldata/LqhFrag.hpp
+++ b/storage/ndb/include/kernel/signaldata/LqhFrag.hpp
diff --git a/ndb/include/kernel/signaldata/LqhKey.hpp b/storage/ndb/include/kernel/signaldata/LqhKey.hpp
index e937180e3f7..e937180e3f7 100644
--- a/ndb/include/kernel/signaldata/LqhKey.hpp
+++ b/storage/ndb/include/kernel/signaldata/LqhKey.hpp
diff --git a/ndb/include/kernel/signaldata/LqhSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/LqhSizeAltReq.hpp
index e47ce39897a..e47ce39897a 100644
--- a/ndb/include/kernel/signaldata/LqhSizeAltReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/LqhSizeAltReq.hpp
diff --git a/ndb/include/kernel/signaldata/LqhTransConf.hpp b/storage/ndb/include/kernel/signaldata/LqhTransConf.hpp
index f62dfd07f51..f62dfd07f51 100644
--- a/ndb/include/kernel/signaldata/LqhTransConf.hpp
+++ b/storage/ndb/include/kernel/signaldata/LqhTransConf.hpp
diff --git a/ndb/include/kernel/signaldata/ManagementServer.hpp b/storage/ndb/include/kernel/signaldata/ManagementServer.hpp
index ce14e30c81d..ce14e30c81d 100644
--- a/ndb/include/kernel/signaldata/ManagementServer.hpp
+++ b/storage/ndb/include/kernel/signaldata/ManagementServer.hpp
diff --git a/ndb/include/kernel/signaldata/MasterGCP.hpp b/storage/ndb/include/kernel/signaldata/MasterGCP.hpp
index ebe6857a107..ebe6857a107 100644
--- a/ndb/include/kernel/signaldata/MasterGCP.hpp
+++ b/storage/ndb/include/kernel/signaldata/MasterGCP.hpp
diff --git a/ndb/include/kernel/signaldata/MasterLCP.hpp b/storage/ndb/include/kernel/signaldata/MasterLCP.hpp
index bf84ac73309..bf84ac73309 100644
--- a/ndb/include/kernel/signaldata/MasterLCP.hpp
+++ b/storage/ndb/include/kernel/signaldata/MasterLCP.hpp
diff --git a/ndb/include/kernel/signaldata/NFCompleteRep.hpp b/storage/ndb/include/kernel/signaldata/NFCompleteRep.hpp
index c8bde705a86..c8bde705a86 100644
--- a/ndb/include/kernel/signaldata/NFCompleteRep.hpp
+++ b/storage/ndb/include/kernel/signaldata/NFCompleteRep.hpp
diff --git a/ndb/include/kernel/signaldata/NdbSttor.hpp b/storage/ndb/include/kernel/signaldata/NdbSttor.hpp
index edd93ef96a8..edd93ef96a8 100644
--- a/ndb/include/kernel/signaldata/NdbSttor.hpp
+++ b/storage/ndb/include/kernel/signaldata/NdbSttor.hpp
diff --git a/ndb/include/kernel/signaldata/NdbfsContinueB.hpp b/storage/ndb/include/kernel/signaldata/NdbfsContinueB.hpp
index 6154e5c19b1..6154e5c19b1 100644
--- a/ndb/include/kernel/signaldata/NdbfsContinueB.hpp
+++ b/storage/ndb/include/kernel/signaldata/NdbfsContinueB.hpp
diff --git a/ndb/include/kernel/signaldata/NextScan.hpp b/storage/ndb/include/kernel/signaldata/NextScan.hpp
index a502a89108c..a502a89108c 100644
--- a/ndb/include/kernel/signaldata/NextScan.hpp
+++ b/storage/ndb/include/kernel/signaldata/NextScan.hpp
diff --git a/ndb/include/kernel/signaldata/NodeFailRep.hpp b/storage/ndb/include/kernel/signaldata/NodeFailRep.hpp
index 060acd6a3e2..060acd6a3e2 100644
--- a/ndb/include/kernel/signaldata/NodeFailRep.hpp
+++ b/storage/ndb/include/kernel/signaldata/NodeFailRep.hpp
diff --git a/ndb/include/kernel/signaldata/NodeStateSignalData.hpp b/storage/ndb/include/kernel/signaldata/NodeStateSignalData.hpp
index 391d8f89566..391d8f89566 100644
--- a/ndb/include/kernel/signaldata/NodeStateSignalData.hpp
+++ b/storage/ndb/include/kernel/signaldata/NodeStateSignalData.hpp
diff --git a/ndb/include/kernel/signaldata/PackedSignal.hpp b/storage/ndb/include/kernel/signaldata/PackedSignal.hpp
index ea0ff6db526..ea0ff6db526 100644
--- a/ndb/include/kernel/signaldata/PackedSignal.hpp
+++ b/storage/ndb/include/kernel/signaldata/PackedSignal.hpp
diff --git a/ndb/include/kernel/signaldata/PrepDropTab.hpp b/storage/ndb/include/kernel/signaldata/PrepDropTab.hpp
index c54b2474aa3..c54b2474aa3 100644
--- a/ndb/include/kernel/signaldata/PrepDropTab.hpp
+++ b/storage/ndb/include/kernel/signaldata/PrepDropTab.hpp
diff --git a/ndb/include/kernel/signaldata/PrepFailReqRef.hpp b/storage/ndb/include/kernel/signaldata/PrepFailReqRef.hpp
index 90b568237b8..90b568237b8 100644
--- a/ndb/include/kernel/signaldata/PrepFailReqRef.hpp
+++ b/storage/ndb/include/kernel/signaldata/PrepFailReqRef.hpp
diff --git a/ndb/include/kernel/signaldata/ReadConfig.hpp b/storage/ndb/include/kernel/signaldata/ReadConfig.hpp
index 0835b252a32..0835b252a32 100644
--- a/ndb/include/kernel/signaldata/ReadConfig.hpp
+++ b/storage/ndb/include/kernel/signaldata/ReadConfig.hpp
diff --git a/ndb/include/kernel/signaldata/ReadNodesConf.hpp b/storage/ndb/include/kernel/signaldata/ReadNodesConf.hpp
index 0507007f71a..0507007f71a 100644
--- a/ndb/include/kernel/signaldata/ReadNodesConf.hpp
+++ b/storage/ndb/include/kernel/signaldata/ReadNodesConf.hpp
diff --git a/ndb/include/kernel/signaldata/RelTabMem.hpp b/storage/ndb/include/kernel/signaldata/RelTabMem.hpp
index 9cf1787bba4..9cf1787bba4 100644
--- a/ndb/include/kernel/signaldata/RelTabMem.hpp
+++ b/storage/ndb/include/kernel/signaldata/RelTabMem.hpp
diff --git a/ndb/include/kernel/signaldata/RepImpl.hpp b/storage/ndb/include/kernel/signaldata/RepImpl.hpp
index 0de1389a4a9..0de1389a4a9 100644
--- a/ndb/include/kernel/signaldata/RepImpl.hpp
+++ b/storage/ndb/include/kernel/signaldata/RepImpl.hpp
diff --git a/ndb/include/kernel/signaldata/ResumeReq.hpp b/storage/ndb/include/kernel/signaldata/ResumeReq.hpp
index a4880474ca8..a4880474ca8 100644
--- a/ndb/include/kernel/signaldata/ResumeReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/ResumeReq.hpp
diff --git a/ndb/include/kernel/signaldata/ScanFrag.hpp b/storage/ndb/include/kernel/signaldata/ScanFrag.hpp
index f21a3eef7ac..f21a3eef7ac 100644
--- a/ndb/include/kernel/signaldata/ScanFrag.hpp
+++ b/storage/ndb/include/kernel/signaldata/ScanFrag.hpp
diff --git a/ndb/include/kernel/signaldata/ScanTab.hpp b/storage/ndb/include/kernel/signaldata/ScanTab.hpp
index 8cb282270ff..8cb282270ff 100644
--- a/ndb/include/kernel/signaldata/ScanTab.hpp
+++ b/storage/ndb/include/kernel/signaldata/ScanTab.hpp
diff --git a/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp b/storage/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp
index 2923029f8f6..2923029f8f6 100644
--- a/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp
+++ b/storage/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp
diff --git a/ndb/include/kernel/signaldata/SetVarReq.hpp b/storage/ndb/include/kernel/signaldata/SetVarReq.hpp
index 8cb3e78be8b..8cb3e78be8b 100644
--- a/ndb/include/kernel/signaldata/SetVarReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/SetVarReq.hpp
diff --git a/ndb/include/kernel/signaldata/SignalData.hpp b/storage/ndb/include/kernel/signaldata/SignalData.hpp
index f825b0feb7b..f825b0feb7b 100644
--- a/ndb/include/kernel/signaldata/SignalData.hpp
+++ b/storage/ndb/include/kernel/signaldata/SignalData.hpp
diff --git a/ndb/include/kernel/signaldata/SignalDataPrint.hpp b/storage/ndb/include/kernel/signaldata/SignalDataPrint.hpp
index 17ab07acd4e..17ab07acd4e 100644
--- a/ndb/include/kernel/signaldata/SignalDataPrint.hpp
+++ b/storage/ndb/include/kernel/signaldata/SignalDataPrint.hpp
diff --git a/ndb/include/kernel/signaldata/SignalDroppedRep.hpp b/storage/ndb/include/kernel/signaldata/SignalDroppedRep.hpp
index 20863524358..20863524358 100644
--- a/ndb/include/kernel/signaldata/SignalDroppedRep.hpp
+++ b/storage/ndb/include/kernel/signaldata/SignalDroppedRep.hpp
diff --git a/ndb/include/kernel/signaldata/SrFragidConf.hpp b/storage/ndb/include/kernel/signaldata/SrFragidConf.hpp
index 9a6088ad57f..9a6088ad57f 100644
--- a/ndb/include/kernel/signaldata/SrFragidConf.hpp
+++ b/storage/ndb/include/kernel/signaldata/SrFragidConf.hpp
diff --git a/ndb/include/kernel/signaldata/StartFragReq.hpp b/storage/ndb/include/kernel/signaldata/StartFragReq.hpp
index ec05c1ee366..ec05c1ee366 100644
--- a/ndb/include/kernel/signaldata/StartFragReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/StartFragReq.hpp
diff --git a/ndb/include/kernel/signaldata/StartInfo.hpp b/storage/ndb/include/kernel/signaldata/StartInfo.hpp
index d0850b13ef4..d0850b13ef4 100644
--- a/ndb/include/kernel/signaldata/StartInfo.hpp
+++ b/storage/ndb/include/kernel/signaldata/StartInfo.hpp
diff --git a/ndb/include/kernel/signaldata/StartMe.hpp b/storage/ndb/include/kernel/signaldata/StartMe.hpp
index 6593a9e9741..6593a9e9741 100644
--- a/ndb/include/kernel/signaldata/StartMe.hpp
+++ b/storage/ndb/include/kernel/signaldata/StartMe.hpp
diff --git a/ndb/include/kernel/signaldata/StartOrd.hpp b/storage/ndb/include/kernel/signaldata/StartOrd.hpp
index 43a48f70ba9..43a48f70ba9 100644
--- a/ndb/include/kernel/signaldata/StartOrd.hpp
+++ b/storage/ndb/include/kernel/signaldata/StartOrd.hpp
diff --git a/ndb/include/kernel/signaldata/StartPerm.hpp b/storage/ndb/include/kernel/signaldata/StartPerm.hpp
index 38be72835a3..38be72835a3 100644
--- a/ndb/include/kernel/signaldata/StartPerm.hpp
+++ b/storage/ndb/include/kernel/signaldata/StartPerm.hpp
diff --git a/ndb/include/kernel/signaldata/StartRec.hpp b/storage/ndb/include/kernel/signaldata/StartRec.hpp
index f8a4e01a094..f8a4e01a094 100644
--- a/ndb/include/kernel/signaldata/StartRec.hpp
+++ b/storage/ndb/include/kernel/signaldata/StartRec.hpp
diff --git a/ndb/include/kernel/signaldata/StartTo.hpp b/storage/ndb/include/kernel/signaldata/StartTo.hpp
index 5aecef6275d..5aecef6275d 100644
--- a/ndb/include/kernel/signaldata/StartTo.hpp
+++ b/storage/ndb/include/kernel/signaldata/StartTo.hpp
diff --git a/ndb/include/kernel/signaldata/StopMe.hpp b/storage/ndb/include/kernel/signaldata/StopMe.hpp
index 51d944a3b96..51d944a3b96 100644
--- a/ndb/include/kernel/signaldata/StopMe.hpp
+++ b/storage/ndb/include/kernel/signaldata/StopMe.hpp
diff --git a/ndb/include/kernel/signaldata/StopPerm.hpp b/storage/ndb/include/kernel/signaldata/StopPerm.hpp
index 95fb82c8cde..95fb82c8cde 100644
--- a/ndb/include/kernel/signaldata/StopPerm.hpp
+++ b/storage/ndb/include/kernel/signaldata/StopPerm.hpp
diff --git a/ndb/include/kernel/signaldata/StopReq.hpp b/storage/ndb/include/kernel/signaldata/StopReq.hpp
index ea453ae115d..ea453ae115d 100644
--- a/ndb/include/kernel/signaldata/StopReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/StopReq.hpp
diff --git a/ndb/include/kernel/signaldata/SumaImpl.hpp b/storage/ndb/include/kernel/signaldata/SumaImpl.hpp
index 75fb65e1ad2..75fb65e1ad2 100644
--- a/ndb/include/kernel/signaldata/SumaImpl.hpp
+++ b/storage/ndb/include/kernel/signaldata/SumaImpl.hpp
diff --git a/ndb/include/kernel/signaldata/SystemError.hpp b/storage/ndb/include/kernel/signaldata/SystemError.hpp
index 7b4d47c5c2e..7b4d47c5c2e 100644
--- a/ndb/include/kernel/signaldata/SystemError.hpp
+++ b/storage/ndb/include/kernel/signaldata/SystemError.hpp
diff --git a/ndb/include/kernel/signaldata/TamperOrd.hpp b/storage/ndb/include/kernel/signaldata/TamperOrd.hpp
index eb6cd47b093..eb6cd47b093 100644
--- a/ndb/include/kernel/signaldata/TamperOrd.hpp
+++ b/storage/ndb/include/kernel/signaldata/TamperOrd.hpp
diff --git a/ndb/include/kernel/signaldata/TcCommit.hpp b/storage/ndb/include/kernel/signaldata/TcCommit.hpp
index dcbca0cb6f2..dcbca0cb6f2 100644
--- a/ndb/include/kernel/signaldata/TcCommit.hpp
+++ b/storage/ndb/include/kernel/signaldata/TcCommit.hpp
diff --git a/ndb/include/kernel/signaldata/TcContinueB.hpp b/storage/ndb/include/kernel/signaldata/TcContinueB.hpp
index 85213791b2a..85213791b2a 100644
--- a/ndb/include/kernel/signaldata/TcContinueB.hpp
+++ b/storage/ndb/include/kernel/signaldata/TcContinueB.hpp
diff --git a/ndb/include/kernel/signaldata/TcHbRep.hpp b/storage/ndb/include/kernel/signaldata/TcHbRep.hpp
index 7e701b510f9..7e701b510f9 100644
--- a/ndb/include/kernel/signaldata/TcHbRep.hpp
+++ b/storage/ndb/include/kernel/signaldata/TcHbRep.hpp
diff --git a/ndb/include/kernel/signaldata/TcIndx.hpp b/storage/ndb/include/kernel/signaldata/TcIndx.hpp
index c5e7d2489ba..c5e7d2489ba 100644
--- a/ndb/include/kernel/signaldata/TcIndx.hpp
+++ b/storage/ndb/include/kernel/signaldata/TcIndx.hpp
diff --git a/ndb/include/kernel/signaldata/TcKeyConf.hpp b/storage/ndb/include/kernel/signaldata/TcKeyConf.hpp
index c23e94951dc..c23e94951dc 100644
--- a/ndb/include/kernel/signaldata/TcKeyConf.hpp
+++ b/storage/ndb/include/kernel/signaldata/TcKeyConf.hpp
diff --git a/ndb/include/kernel/signaldata/TcKeyFailConf.hpp b/storage/ndb/include/kernel/signaldata/TcKeyFailConf.hpp
index 7c0a766df40..7c0a766df40 100644
--- a/ndb/include/kernel/signaldata/TcKeyFailConf.hpp
+++ b/storage/ndb/include/kernel/signaldata/TcKeyFailConf.hpp
diff --git a/ndb/include/kernel/signaldata/TcKeyRef.hpp b/storage/ndb/include/kernel/signaldata/TcKeyRef.hpp
index c773920713a..c773920713a 100644
--- a/ndb/include/kernel/signaldata/TcKeyRef.hpp
+++ b/storage/ndb/include/kernel/signaldata/TcKeyRef.hpp
diff --git a/ndb/include/kernel/signaldata/TcKeyReq.hpp b/storage/ndb/include/kernel/signaldata/TcKeyReq.hpp
index d7c11ca773c..d7c11ca773c 100644
--- a/ndb/include/kernel/signaldata/TcKeyReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/TcKeyReq.hpp
diff --git a/ndb/include/kernel/signaldata/TcRollbackRep.hpp b/storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp
index febbd4f86b1..febbd4f86b1 100644
--- a/ndb/include/kernel/signaldata/TcRollbackRep.hpp
+++ b/storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp
diff --git a/ndb/include/kernel/signaldata/TcSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/TcSizeAltReq.hpp
index 34eacfe5a93..34eacfe5a93 100644
--- a/ndb/include/kernel/signaldata/TcSizeAltReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/TcSizeAltReq.hpp
diff --git a/ndb/include/kernel/signaldata/TestOrd.hpp b/storage/ndb/include/kernel/signaldata/TestOrd.hpp
index 1600df08884..1600df08884 100644
--- a/ndb/include/kernel/signaldata/TestOrd.hpp
+++ b/storage/ndb/include/kernel/signaldata/TestOrd.hpp
diff --git a/ndb/include/kernel/signaldata/TransIdAI.hpp b/storage/ndb/include/kernel/signaldata/TransIdAI.hpp
index 5beaf6eba4b..5beaf6eba4b 100755
--- a/ndb/include/kernel/signaldata/TransIdAI.hpp
+++ b/storage/ndb/include/kernel/signaldata/TransIdAI.hpp
diff --git a/ndb/include/kernel/signaldata/TrigAttrInfo.hpp b/storage/ndb/include/kernel/signaldata/TrigAttrInfo.hpp
index e2c029b9033..e2c029b9033 100644
--- a/ndb/include/kernel/signaldata/TrigAttrInfo.hpp
+++ b/storage/ndb/include/kernel/signaldata/TrigAttrInfo.hpp
diff --git a/ndb/include/kernel/signaldata/TupCommit.hpp b/storage/ndb/include/kernel/signaldata/TupCommit.hpp
index 7c5a7931e6c..7c5a7931e6c 100644
--- a/ndb/include/kernel/signaldata/TupCommit.hpp
+++ b/storage/ndb/include/kernel/signaldata/TupCommit.hpp
diff --git a/ndb/include/kernel/signaldata/TupFrag.hpp b/storage/ndb/include/kernel/signaldata/TupFrag.hpp
index 8acb3d28bd6..8acb3d28bd6 100644
--- a/ndb/include/kernel/signaldata/TupFrag.hpp
+++ b/storage/ndb/include/kernel/signaldata/TupFrag.hpp
diff --git a/ndb/include/kernel/signaldata/TupKey.hpp b/storage/ndb/include/kernel/signaldata/TupKey.hpp
index ffd57d81e64..ffd57d81e64 100644
--- a/ndb/include/kernel/signaldata/TupKey.hpp
+++ b/storage/ndb/include/kernel/signaldata/TupKey.hpp
diff --git a/ndb/include/kernel/signaldata/TupSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/TupSizeAltReq.hpp
index 215493bc188..215493bc188 100644
--- a/ndb/include/kernel/signaldata/TupSizeAltReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/TupSizeAltReq.hpp
diff --git a/ndb/include/kernel/signaldata/TuxBound.hpp b/storage/ndb/include/kernel/signaldata/TuxBound.hpp
index 7e12897407b..7e12897407b 100644
--- a/ndb/include/kernel/signaldata/TuxBound.hpp
+++ b/storage/ndb/include/kernel/signaldata/TuxBound.hpp
diff --git a/ndb/include/kernel/signaldata/TuxContinueB.hpp b/storage/ndb/include/kernel/signaldata/TuxContinueB.hpp
index 385d85715e2..385d85715e2 100644
--- a/ndb/include/kernel/signaldata/TuxContinueB.hpp
+++ b/storage/ndb/include/kernel/signaldata/TuxContinueB.hpp
diff --git a/ndb/include/kernel/signaldata/TuxMaint.hpp b/storage/ndb/include/kernel/signaldata/TuxMaint.hpp
index 4518f0531ea..4518f0531ea 100644
--- a/ndb/include/kernel/signaldata/TuxMaint.hpp
+++ b/storage/ndb/include/kernel/signaldata/TuxMaint.hpp
diff --git a/ndb/include/kernel/signaldata/TuxSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/TuxSizeAltReq.hpp
index 5d5a0e102ba..5d5a0e102ba 100644
--- a/ndb/include/kernel/signaldata/TuxSizeAltReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/TuxSizeAltReq.hpp
diff --git a/ndb/include/kernel/signaldata/UpdateTo.hpp b/storage/ndb/include/kernel/signaldata/UpdateTo.hpp
index 0fa5f31b6b4..0fa5f31b6b4 100644
--- a/ndb/include/kernel/signaldata/UpdateTo.hpp
+++ b/storage/ndb/include/kernel/signaldata/UpdateTo.hpp
diff --git a/ndb/include/kernel/signaldata/UpgradeStartup.hpp b/storage/ndb/include/kernel/signaldata/UpgradeStartup.hpp
index 93fef323789..93fef323789 100644
--- a/ndb/include/kernel/signaldata/UpgradeStartup.hpp
+++ b/storage/ndb/include/kernel/signaldata/UpgradeStartup.hpp
diff --git a/ndb/include/kernel/signaldata/UtilDelete.hpp b/storage/ndb/include/kernel/signaldata/UtilDelete.hpp
index 67c13b8c2d5..67c13b8c2d5 100644
--- a/ndb/include/kernel/signaldata/UtilDelete.hpp
+++ b/storage/ndb/include/kernel/signaldata/UtilDelete.hpp
diff --git a/ndb/include/kernel/signaldata/UtilExecute.hpp b/storage/ndb/include/kernel/signaldata/UtilExecute.hpp
index 551fb172cac..551fb172cac 100644
--- a/ndb/include/kernel/signaldata/UtilExecute.hpp
+++ b/storage/ndb/include/kernel/signaldata/UtilExecute.hpp
diff --git a/ndb/include/kernel/signaldata/UtilLock.hpp b/storage/ndb/include/kernel/signaldata/UtilLock.hpp
index 318024fd706..318024fd706 100644
--- a/ndb/include/kernel/signaldata/UtilLock.hpp
+++ b/storage/ndb/include/kernel/signaldata/UtilLock.hpp
diff --git a/ndb/include/kernel/signaldata/UtilPrepare.hpp b/storage/ndb/include/kernel/signaldata/UtilPrepare.hpp
index 8508487ce15..8508487ce15 100644
--- a/ndb/include/kernel/signaldata/UtilPrepare.hpp
+++ b/storage/ndb/include/kernel/signaldata/UtilPrepare.hpp
diff --git a/ndb/include/kernel/signaldata/UtilRelease.hpp b/storage/ndb/include/kernel/signaldata/UtilRelease.hpp
index d2864f02f47..d2864f02f47 100644
--- a/ndb/include/kernel/signaldata/UtilRelease.hpp
+++ b/storage/ndb/include/kernel/signaldata/UtilRelease.hpp
diff --git a/ndb/include/kernel/signaldata/UtilSequence.hpp b/storage/ndb/include/kernel/signaldata/UtilSequence.hpp
index 50e5d673e99..50e5d673e99 100644
--- a/ndb/include/kernel/signaldata/UtilSequence.hpp
+++ b/storage/ndb/include/kernel/signaldata/UtilSequence.hpp
diff --git a/ndb/include/kernel/signaldata/WaitGCP.hpp b/storage/ndb/include/kernel/signaldata/WaitGCP.hpp
index ebed28714d2..ebed28714d2 100644
--- a/ndb/include/kernel/signaldata/WaitGCP.hpp
+++ b/storage/ndb/include/kernel/signaldata/WaitGCP.hpp
diff --git a/ndb/include/kernel/trigger_definitions.h b/storage/ndb/include/kernel/trigger_definitions.h
index 11410654a15..11410654a15 100644
--- a/ndb/include/kernel/trigger_definitions.h
+++ b/storage/ndb/include/kernel/trigger_definitions.h
diff --git a/ndb/include/logger/ConsoleLogHandler.hpp b/storage/ndb/include/logger/ConsoleLogHandler.hpp
index ae77b13d3b7..ae77b13d3b7 100644
--- a/ndb/include/logger/ConsoleLogHandler.hpp
+++ b/storage/ndb/include/logger/ConsoleLogHandler.hpp
diff --git a/ndb/include/logger/FileLogHandler.hpp b/storage/ndb/include/logger/FileLogHandler.hpp
index 8fb25e72be7..8fb25e72be7 100644
--- a/ndb/include/logger/FileLogHandler.hpp
+++ b/storage/ndb/include/logger/FileLogHandler.hpp
diff --git a/ndb/include/logger/LogHandler.hpp b/storage/ndb/include/logger/LogHandler.hpp
index 7df6ad864e5..7df6ad864e5 100644
--- a/ndb/include/logger/LogHandler.hpp
+++ b/storage/ndb/include/logger/LogHandler.hpp
diff --git a/ndb/include/logger/Logger.hpp b/storage/ndb/include/logger/Logger.hpp
index ee762098fb6..ee762098fb6 100644
--- a/ndb/include/logger/Logger.hpp
+++ b/storage/ndb/include/logger/Logger.hpp
diff --git a/ndb/include/logger/SysLogHandler.hpp b/storage/ndb/include/logger/SysLogHandler.hpp
index 0dfc1cb2d43..0dfc1cb2d43 100644
--- a/ndb/include/logger/SysLogHandler.hpp
+++ b/storage/ndb/include/logger/SysLogHandler.hpp
diff --git a/ndb/include/mgmapi/mgmapi.h b/storage/ndb/include/mgmapi/mgmapi.h
index 018e554de7c..018e554de7c 100644
--- a/ndb/include/mgmapi/mgmapi.h
+++ b/storage/ndb/include/mgmapi/mgmapi.h
diff --git a/ndb/include/mgmapi/mgmapi_config_parameters.h b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h
index 33134899d1e..33134899d1e 100644
--- a/ndb/include/mgmapi/mgmapi_config_parameters.h
+++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h
diff --git a/ndb/include/mgmapi/mgmapi_config_parameters_debug.h b/storage/ndb/include/mgmapi/mgmapi_config_parameters_debug.h
index 0241dca90ef..0241dca90ef 100644
--- a/ndb/include/mgmapi/mgmapi_config_parameters_debug.h
+++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters_debug.h
diff --git a/ndb/include/mgmapi/mgmapi_debug.h b/storage/ndb/include/mgmapi/mgmapi_debug.h
index e86d9d4b768..e86d9d4b768 100644
--- a/ndb/include/mgmapi/mgmapi_debug.h
+++ b/storage/ndb/include/mgmapi/mgmapi_debug.h
diff --git a/ndb/include/mgmapi/ndb_logevent.h b/storage/ndb/include/mgmapi/ndb_logevent.h
index d5744b0fffe..d5744b0fffe 100644
--- a/ndb/include/mgmapi/ndb_logevent.h
+++ b/storage/ndb/include/mgmapi/ndb_logevent.h
diff --git a/ndb/include/mgmcommon/ConfigRetriever.hpp b/storage/ndb/include/mgmcommon/ConfigRetriever.hpp
index c0b877af07d..c0b877af07d 100644
--- a/ndb/include/mgmcommon/ConfigRetriever.hpp
+++ b/storage/ndb/include/mgmcommon/ConfigRetriever.hpp
diff --git a/ndb/include/mgmcommon/IPCConfig.hpp b/storage/ndb/include/mgmcommon/IPCConfig.hpp
index 1e23cdf9807..1e23cdf9807 100644
--- a/ndb/include/mgmcommon/IPCConfig.hpp
+++ b/storage/ndb/include/mgmcommon/IPCConfig.hpp
diff --git a/ndb/include/mgmcommon/MgmtErrorReporter.hpp b/storage/ndb/include/mgmcommon/MgmtErrorReporter.hpp
index 0d980aa7245..0d980aa7245 100644
--- a/ndb/include/mgmcommon/MgmtErrorReporter.hpp
+++ b/storage/ndb/include/mgmcommon/MgmtErrorReporter.hpp
diff --git a/ndb/include/ndb_constants.h b/storage/ndb/include/ndb_constants.h
index c292880749b..c292880749b 100644
--- a/ndb/include/ndb_constants.h
+++ b/storage/ndb/include/ndb_constants.h
diff --git a/ndb/include/ndb_global.h.in b/storage/ndb/include/ndb_global.h.in
index 43f90e1f8b5..43f90e1f8b5 100644
--- a/ndb/include/ndb_global.h.in
+++ b/storage/ndb/include/ndb_global.h.in
diff --git a/ndb/include/ndb_init.h b/storage/ndb/include/ndb_init.h
index 0ff53e6a2af..0ff53e6a2af 100644
--- a/ndb/include/ndb_init.h
+++ b/storage/ndb/include/ndb_init.h
diff --git a/ndb/include/ndb_net.h b/storage/ndb/include/ndb_net.h
index 279beb471a7..279beb471a7 100644
--- a/ndb/include/ndb_net.h
+++ b/storage/ndb/include/ndb_net.h
diff --git a/ndb/include/ndb_types.h.in b/storage/ndb/include/ndb_types.h.in
index 2a5d576ffea..2a5d576ffea 100644
--- a/ndb/include/ndb_types.h.in
+++ b/storage/ndb/include/ndb_types.h.in
diff --git a/ndb/include/ndb_version.h.in b/storage/ndb/include/ndb_version.h.in
index 826f5124407..826f5124407 100644
--- a/ndb/include/ndb_version.h.in
+++ b/storage/ndb/include/ndb_version.h.in
diff --git a/ndb/include/ndbapi/Ndb.hpp b/storage/ndb/include/ndbapi/Ndb.hpp
index db2212075e8..db2212075e8 100644
--- a/ndb/include/ndbapi/Ndb.hpp
+++ b/storage/ndb/include/ndbapi/Ndb.hpp
diff --git a/ndb/include/ndbapi/NdbApi.hpp b/storage/ndb/include/ndbapi/NdbApi.hpp
index aed4d5efbd7..aed4d5efbd7 100644
--- a/ndb/include/ndbapi/NdbApi.hpp
+++ b/storage/ndb/include/ndbapi/NdbApi.hpp
diff --git a/ndb/include/ndbapi/NdbBlob.hpp b/storage/ndb/include/ndbapi/NdbBlob.hpp
index 271287b765c..271287b765c 100644
--- a/ndb/include/ndbapi/NdbBlob.hpp
+++ b/storage/ndb/include/ndbapi/NdbBlob.hpp
diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp
new file mode 100644
index 00000000000..8ae40a738ad
--- /dev/null
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp
@@ -0,0 +1,1348 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef NdbDictionary_H
+#define NdbDictionary_H
+
+#include <ndb_types.h>
+
+class Ndb;
+struct charset_info_st;
+typedef struct charset_info_st CHARSET_INFO;
+
+/**
+ * @class NdbDictionary
+ * @brief Data dictionary class
+ *
+ * The preferred and supported way to create and drop tables and indexes
+ * in ndb is through the
+ * MySQL Server (see MySQL reference Manual, section MySQL Cluster).
+ *
+ * Tables and indexes that are created directly through the
+ * NdbDictionary class
+ * can not be viewed from the MySQL Server.
+ * Dropping indexes directly via the NdbApi will cause inconsistencies
+ * if they were originally created from a MySQL Cluster.
+ *
+ * This class supports schema data enquiries such as:
+ * -# Enquiries about tables
+ * (Dictionary::getTable, Table::getNoOfColumns,
+ * Table::getPrimaryKey, and Table::getNoOfPrimaryKeys)
+ * -# Enquiries about indexes
+ * (Dictionary::getIndex, Index::getNoOfColumns,
+ * and Index::getColumn)
+ *
+ * This class supports schema data definition such as:
+ * -# Creating tables (Dictionary::createTable) and table columns
+ * -# Dropping tables (Dictionary::dropTable)
+ * -# Creating secondary indexes (Dictionary::createIndex)
+ * -# Dropping secondary indexes (Dictionary::dropIndex)
+ *
+ * NdbDictionary has several help (inner) classes to support this:
+ * -# NdbDictionary::Dictionary the dictionary handling dictionary objects
+ * -# NdbDictionary::Table for creating tables
+ * -# NdbDictionary::Column for creating table columns
+ * -# NdbDictionary::Index for creating secondary indexes
+ *
+ * See @ref ndbapi_simple_index.cpp for details of usage.
+ */
+class NdbDictionary {
+public:
+ /**
+ * @class Object
+ * @brief Meta information about a database object (a table, index, etc)
+ */
+ class Object {
+ public:
+ /**
+ * Status of object
+ */
+ enum Status {
+ New, ///< The object only exist in memory and
+ ///< has not been created in the NDB Kernel
+ Changed, ///< The object has been modified in memory
+ ///< and has to be commited in NDB Kernel for
+ ///< changes to take effect
+ Retrieved, ///< The object exist and has been read
+ ///< into main memory from NDB Kernel
+ Invalid ///< The object has been invalidated
+ ///< and should not be used
+
+ };
+
+ /**
+ * Get status of object
+ */
+ virtual Status getObjectStatus() const = 0;
+
+ /**
+ * Get version of object
+ */
+ virtual int getObjectVersion() const = 0;
+
+ /**
+ * Object type
+ */
+ enum Type {
+ TypeUndefined = 0, ///< Undefined
+ SystemTable = 1, ///< System table
+ UserTable = 2, ///< User table (may be temporary)
+ UniqueHashIndex = 3, ///< Unique un-ordered hash index
+ OrderedIndex = 6, ///< Non-unique ordered index
+ HashIndexTrigger = 7, ///< Index maintenance, internal
+ IndexTrigger = 8, ///< Index maintenance, internal
+ SubscriptionTrigger = 9,///< Backup or replication, internal
+ ReadOnlyConstraint = 10 ///< Trigger, internal
+ };
+
+ /**
+ * Object state
+ */
+ enum State {
+ StateUndefined = 0, ///< Undefined
+ StateOffline = 1, ///< Offline, not usable
+ StateBuilding = 2, ///< Building, not yet usable
+ StateDropping = 3, ///< Offlining or dropping, not usable
+ StateOnline = 4, ///< Online, usable
+ StateBroken = 9 ///< Broken, should be dropped and re-created
+ };
+
+ /**
+ * Object store
+ */
+ enum Store {
+ StoreUndefined = 0, ///< Undefined
+ StoreTemporary = 1, ///< Object or data deleted on system restart
+ StorePermanent = 2 ///< Permanent. logged to disk
+ };
+
+ /**
+ * Type of fragmentation.
+ *
+ * This parameter specifies how data in the table or index will
+ * be distributed among the db nodes in the cluster.<br>
+ * The bigger the table the more number of fragments should be used.
+ * Note that all replicas count as same "fragment".<br>
+ * For a table, default is FragAllMedium. For a unique hash index,
+ * default is taken from underlying table and cannot currently
+ * be changed.
+ */
+ enum FragmentType {
+ FragUndefined = 0, ///< Fragmentation type undefined or default
+ FragSingle = 1, ///< Only one fragment
+ FragAllSmall = 2, ///< One fragment per node, default
+ FragAllMedium = 3, ///< two fragments per node
+ FragAllLarge = 4, ///< Four fragments per node.
+ DistrKeyHash = 5,
+ DistrKeyLin = 6,
+ UserDefined = 7
+ };
+ };
+
+ class Table; // forward declaration
+
+ /**
+ * @class Column
+ * @brief Represents a column in an NDB Cluster table
+ *
+ * Each column has a type. The type of a column is determined by a number
+ * of type specifiers.
+ * The type specifiers are:
+ * - Builtin type
+ * - Array length or max length
+ * - Precision and scale (not used yet)
+ * - Character set for string types
+ * - Inline and part sizes for blobs
+ *
+ * Types in general correspond to MySQL types and their variants.
+ * Data formats are same as in MySQL. NDB API provides no support for
+ * constructing such formats. NDB kernel checks them however.
+ */
+ class Column {
+ public:
+ /**
+ * The builtin column types
+ */
+ enum Type {
+ Undefined = NDB_TYPE_UNDEFINED, ///< Undefined
+ Tinyint = NDB_TYPE_TINYINT, ///< 8 bit. 1 byte signed integer, can be used in array
+ Tinyunsigned = NDB_TYPE_TINYUNSIGNED, ///< 8 bit. 1 byte unsigned integer, can be used in array
+ Smallint = NDB_TYPE_SMALLINT, ///< 16 bit. 2 byte signed integer, can be used in array
+ Smallunsigned = NDB_TYPE_SMALLUNSIGNED, ///< 16 bit. 2 byte unsigned integer, can be used in array
+ Mediumint = NDB_TYPE_MEDIUMINT, ///< 24 bit. 3 byte signed integer, can be used in array
+ Mediumunsigned = NDB_TYPE_MEDIUMUNSIGNED,///< 24 bit. 3 byte unsigned integer, can be used in array
+ Int = NDB_TYPE_INT, ///< 32 bit. 4 byte signed integer, can be used in array
+ Unsigned = NDB_TYPE_UNSIGNED, ///< 32 bit. 4 byte unsigned integer, can be used in array
+ Bigint = NDB_TYPE_BIGINT, ///< 64 bit. 8 byte signed integer, can be used in array
+ Bigunsigned = NDB_TYPE_BIGUNSIGNED, ///< 64 Bit. 8 byte signed integer, can be used in array
+ Float = NDB_TYPE_FLOAT, ///< 32-bit float. 4 bytes float, can be used in array
+ Double = NDB_TYPE_DOUBLE, ///< 64-bit float. 8 byte float, can be used in array
+ Olddecimal = NDB_TYPE_OLDDECIMAL, ///< MySQL < 5.0 signed decimal, Precision, Scale
+ Olddecimalunsigned = NDB_TYPE_OLDDECIMALUNSIGNED,
+ Decimal = NDB_TYPE_DECIMAL, ///< MySQL >= 5.0 signed decimal, Precision, Scale
+ Decimalunsigned = NDB_TYPE_DECIMALUNSIGNED,
+ Char = NDB_TYPE_CHAR, ///< Len. A fixed array of 1-byte chars
+ Varchar = NDB_TYPE_VARCHAR, ///< Length bytes: 1, Max: 255
+ Binary = NDB_TYPE_BINARY, ///< Len
+ Varbinary = NDB_TYPE_VARBINARY, ///< Length bytes: 1, Max: 255
+ Datetime = NDB_TYPE_DATETIME, ///< Precision down to 1 sec (sizeof(Datetime) == 8 bytes )
+ Date = NDB_TYPE_DATE, ///< Precision down to 1 day(sizeof(Date) == 4 bytes )
+ Blob = NDB_TYPE_BLOB, ///< Binary large object (see NdbBlob)
+ Text = NDB_TYPE_TEXT, ///< Text blob
+ Bit = NDB_TYPE_BIT, ///< Bit, length specifies no of bits
+ Longvarchar = NDB_TYPE_LONGVARCHAR, ///< Length bytes: 2, little-endian
+ Longvarbinary = NDB_TYPE_LONGVARBINARY, ///< Length bytes: 2, little-endian
+ Time = NDB_TYPE_TIME, ///< Time without date
+ Year = NDB_TYPE_YEAR, ///< Year 1901-2155 (1 byte)
+ Timestamp = NDB_TYPE_TIMESTAMP ///< Unix time
+ };
+
+ /**
+ * @name General
+ * @{
+ */
+
+ /**
+ * Get name of column
+ * @return Name of the column
+ */
+ const char* getName() const;
+
+ /**
+ * Get if the column is nullable or not
+ */
+ bool getNullable() const;
+
+ /**
+ * Check if column is part of primary key
+ */
+ bool getPrimaryKey() const;
+
+ /**
+ * Get number of column (horizontal position within table)
+ */
+ int getColumnNo() const;
+
+ /**
+ * Check if column is equal to some other column
+ * @param column Column to compare with
+ * @return true if column is equal to some other column otherwise false.
+ */
+ bool equal(const Column& column) const;
+
+
+ /** @} *******************************************************************/
+ /**
+ * @name Get Type Specifiers
+ * @{
+ */
+
+ /**
+ * Get type of column
+ */
+ Type getType() const;
+
+ /**
+ * Get precision of column.
+ * @note Only applicable for decimal types
+ */
+ int getPrecision() const;
+
+ /**
+ * Get scale of column.
+ * @note Only applicable for decimal types
+ */
+ int getScale() const;
+
+ /**
+ * Get length for column
+ * Array length for column or max length for variable length arrays.
+ */
+ int getLength() const;
+
+ /**
+ * For Char or Varchar or Text, get MySQL CHARSET_INFO. This
+ * specifies both character set and collation. See get_charset()
+ * etc in MySQL. (The cs is not "const" in MySQL).
+ */
+ CHARSET_INFO* getCharset() const;
+
+
+ /**
+ * For blob, get "inline size" i.e. number of initial bytes
+ * to store in table's blob attribute. This part is normally in
+ * main memory and can be indexed and interpreted.
+ */
+ int getInlineSize() const;
+
+ /**
+ * For blob, get "part size" i.e. number of bytes to store in
+ * each tuple of the "blob table". Can be set to zero to omit parts
+ * and to allow only inline bytes ("tinyblob").
+ */
+ int getPartSize() const;
+
+ /**
+ * For blob, set or get "stripe size" i.e. number of consecutive
+ * <em>parts</em> to store in each node group.
+ */
+ int getStripeSize() const;
+
+ /**
+ * Get size of element
+ */
+ int getSize() const;
+
+ /**
+ * Check if column is part of partition key
+ *
+ * A <em>partition key</em> is a set of attributes which are used
+ * to distribute the tuples onto the NDB nodes.
+ * The partition key uses the NDB Cluster hashing function.
+ *
+ * An example where this is useful is TPC-C where it might be
+ * good to use the warehouse id and district id as the partition key.
+ * This would place all data for a specific district and warehouse
+ * in the same database node.
+ *
+ * Locally in the fragments the full primary key
+ * will still be used with the hashing algorithm.
+ *
+ * @return true then the column is part of
+ * the partition key.
+ */
+ bool getPartitionKey() const;
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ inline bool getDistributionKey() const { return getPartitionKey(); };
+#endif
+
+ /** @} *******************************************************************/
+
+
+ /**
+ * @name Column creation
+ * @{
+ *
+ * These operations should normally not be performed in an NbdApi program
+ * as results will not be visable in the MySQL Server
+ *
+ */
+
+ /**
+ * Constructor
+ * @param name Name of column
+ */
+ Column(const char * name = "");
+ /**
+ * Copy constructor
+ * @param column Column to be copied
+ */
+ Column(const Column& column);
+ ~Column();
+
+ /**
+ * Set name of column
+ * @param name Name of the column
+ */
+ void setName(const char * name);
+
+ /**
+ * Set whether column is nullable or not
+ */
+ void setNullable(bool);
+
+ /**
+ * Set that column is part of primary key
+ */
+ void setPrimaryKey(bool);
+
+ /**
+ * Set type of column
+ * @param type Type of column
+ *
+ * @note setType resets <em>all</em> column attributes
+ * to (type dependent) defaults and should be the first
+ * method to call. Default type is Unsigned.
+ */
+ void setType(Type type);
+
+ /**
+ * Set precision of column.
+ * @note Only applicable for decimal types
+ */
+ void setPrecision(int);
+
+ /**
+ * Set scale of column.
+ * @note Only applicable for decimal types
+ */
+ void setScale(int);
+
+ /**
+ * Set length for column
+ * Array length for column or max length for variable length arrays.
+ */
+ void setLength(int length);
+
+ /**
+ * For Char or Varchar or Text, get MySQL CHARSET_INFO. This
+ * specifies both character set and collation. See get_charset()
+ * etc in MySQL. (The cs is not "const" in MySQL).
+ */
+ void setCharset(CHARSET_INFO* cs);
+
+ /**
+ * For blob, get "inline size" i.e. number of initial bytes
+ * to store in table's blob attribute. This part is normally in
+ * main memory and can be indexed and interpreted.
+ */
+ void setInlineSize(int size);
+
+ /**
+ * For blob, get "part size" i.e. number of bytes to store in
+ * each tuple of the "blob table". Can be set to zero to omit parts
+ * and to allow only inline bytes ("tinyblob").
+ */
+ void setPartSize(int size);
+
+ /**
+ * For blob, get "stripe size" i.e. number of consecutive
+ * <em>parts</em> to store in each node group.
+ */
+ void setStripeSize(int size);
+
+ /**
+ * Set partition key
+ * @see getPartitionKey
+ *
+ * @param enable If set to true, then the column will be part of
+ * the partition key.
+ */
+ void setPartitionKey(bool enable);
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ inline void setDistributionKey(bool enable)
+ { setPartitionKey(enable); };
+#endif
+
+ /** @} *******************************************************************/
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ const Table * getBlobTable() const;
+
+ void setAutoIncrement(bool);
+ bool getAutoIncrement() const;
+ void setAutoIncrementInitialValue(Uint64 val);
+ void setDefaultValue(const char*);
+ const char* getDefaultValue() const;
+
+ static const Column * FRAGMENT;
+ static const Column * FRAGMENT_MEMORY;
+ static const Column * ROW_COUNT;
+ static const Column * COMMIT_COUNT;
+ static const Column * ROW_SIZE;
+ static const Column * RANGE_NO;
+
+ int getSizeInBytes() const;
+#endif
+
+ private:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ friend class NdbRecAttr;
+ friend class NdbColumnImpl;
+#endif
+ class NdbColumnImpl & m_impl;
+ Column(NdbColumnImpl&);
+ Column& operator=(const Column&);
+ };
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /**
+ * ???
+ */
+ typedef Column Attribute;
+#endif
+
+ /**
+ * @brief Represents a table in NDB Cluster
+ *
+ * <em>TableSize</em><br>
+ * When calculating the data storage one should add the size of all
+ * attributes (each attributeconsumes at least 4 bytes) and also an overhead
+ * of 12 byte. Variable size attributes (not supported yet) will have a
+ * size of 12 bytes plus the actual data storage parts where there is an
+ * additional overhead based on the size of the variable part.<br>
+ * An example table with 5 attributes:
+ * one 64 bit attribute, one 32 bit attribute,
+ * two 16 bit attributes and one array of 64 8 bits.
+ * This table will consume
+ * 12 (overhead) + 8 + 4 + 2*4 (4 is minimum) + 64 = 96 bytes per record.
+ * Additionally an overhead of about 2 % as page headers and waste should
+ * be allocated. Thus, 1 million records should consume 96 MBytes
+ * plus the overhead 2 MByte and rounded up to 100 000 kBytes.<br>
+ *
+ */
+ class Table : public Object {
+ public:
+ /**
+ * @name General
+ * @{
+ */
+
+ /**
+ * Get table name
+ */
+ const char * getName() const;
+
+ /**
+ * Get table id
+ */
+ int getTableId() const;
+
+ /**
+ * Get column definition via name.
+ * @return null if none existing name
+ */
+ const Column* getColumn(const char * name) const;
+
+ /**
+ * Get column definition via index in table.
+ * @return null if none existing name
+ */
+ Column* getColumn(const int attributeId);
+
+ /**
+ * Get column definition via name.
+ * @return null if none existing name
+ */
+ Column* getColumn(const char * name);
+
+ /**
+ * Get column definition via index in table.
+ * @return null if none existing name
+ */
+ const Column* getColumn(const int attributeId) const;
+
+ /** @} *******************************************************************/
+ /**
+ * @name Storage
+ * @{
+ */
+
+ /**
+ * If set to false, then the table is a temporary
+ * table and is not logged to disk.
+ *
+ * In case of a system restart the table will still
+ * be defined and exist but will be empty.
+ * Thus no checkpointing and no logging is performed on the table.
+ *
+ * The default value is true and indicates a normal table
+ * with full checkpointing and logging activated.
+ */
+ bool getLogging() const;
+
+ /**
+ * Get fragmentation type
+ */
+ FragmentType getFragmentType() const;
+
+ /**
+ * Get KValue (Hash parameter.)
+ * Only allowed value is 6.
+ * Later implementations might add flexibility in this parameter.
+ */
+ int getKValue() const;
+
+ /**
+ * Get MinLoadFactor (Hash parameter.)
+ * This value specifies the load factor when starting to shrink
+ * the hash table.
+ * It must be smaller than MaxLoadFactor.
+ * Both these factors are given in percentage.
+ */
+ int getMinLoadFactor() const;
+
+ /**
+ * Get MaxLoadFactor (Hash parameter.)
+ * This value specifies the load factor when starting to split
+ * the containers in the local hash tables.
+ * 100 is the maximum which will optimize memory usage.
+ * A lower figure will store less information in each container and thus
+ * find the key faster but consume more memory.
+ */
+ int getMaxLoadFactor() const;
+
+ /** @} *******************************************************************/
+ /**
+ * @name Other
+ * @{
+ */
+
+ /**
+ * Get number of columns in the table
+ */
+ int getNoOfColumns() const;
+
+ /**
+ * Get number of primary keys in the table
+ */
+ int getNoOfPrimaryKeys() const;
+
+ /**
+ * Get name of primary key
+ */
+ const char* getPrimaryKey(int no) const;
+
+ /**
+ * Check if table is equal to some other table
+ */
+ bool equal(const Table&) const;
+
+ /**
+ * Get frm file stored with this table
+ */
+ const void* getFrmData() const;
+ Uint32 getFrmLength() const;
+
+ /**
+ * Get Node Group and Tablespace id's for fragments in table
+ */
+ const void *getNodeGroupIds() const;
+ Uint32 getNodeGroupIdsLength() const;
+
+ /** @} *******************************************************************/
+
+ /**
+ * @name Table creation
+ * @{
+ *
+ * These methods should normally not be used in an application as
+ * the result is not accessible from the MySQL Server
+ *
+ */
+
+ /**
+ * Constructor
+ * @param name Name of table
+ */
+ Table(const char * name = "");
+
+ /**
+ * Copy constructor
+ * @param table Table to be copied
+ */
+ Table(const Table& table);
+ virtual ~Table();
+
+ /**
+ * Assignment operator, deep copy
+ * @param table Table to be copied
+ */
+ Table& operator=(const Table& table);
+
+ /**
+ * Name of table
+ * @param name Name of table
+ */
+ void setName(const char * name);
+
+ /**
+ * Add a column definition to a table
+ * @note creates a copy
+ */
+ void addColumn(const Column &);
+
+ /**
+ * @see NdbDictionary::Table::getLogging.
+ */
+ void setLogging(bool);
+
+ /**
+ * Set fragmentation type
+ */
+ void setFragmentType(FragmentType);
+
+ /**
+ * Set KValue (Hash parameter.)
+ * Only allowed value is 6.
+ * Later implementations might add flexibility in this parameter.
+ */
+ void setKValue(int kValue);
+
+ /**
+ * Set MinLoadFactor (Hash parameter.)
+ * This value specifies the load factor when starting to shrink
+ * the hash table.
+ * It must be smaller than MaxLoadFactor.
+ * Both these factors are given in percentage.
+ */
+ void setMinLoadFactor(int);
+
+ /**
+ * Set MaxLoadFactor (Hash parameter.)
+ * This value specifies the load factor when starting to split
+ * the containers in the local hash tables.
+ * 100 is the maximum which will optimize memory usage.
+ * A lower figure will store less information in each container and thus
+ * find the key faster but consume more memory.
+ */
+ void setMaxLoadFactor(int);
+
+ /**
+ * Get table object type
+ */
+ Object::Type getObjectType() const;
+
+ /**
+ * Get object status
+ */
+ virtual Object::Status getObjectStatus() const;
+
+ /**
+ * Get object version
+ */
+ virtual int getObjectVersion() const;
+
+ /**
+ * Set frm file to store with this table
+ */
+ void setFrm(const void* data, Uint32 len);
+
+ /**
+ * Set node group for fragments
+ */
+ void setNodeGroupIds(const void *data, Uint32 len);
+
+ /**
+ * Set table object type
+ */
+ void setObjectType(Object::Type type);
+
+ /** @} *******************************************************************/
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ void setStoredTable(bool x) { setLogging(x); }
+ bool getStoredTable() const { return getLogging(); }
+
+ int getRowSizeInBytes() const ;
+ int createTableInDb(Ndb*, bool existingEqualIsOk = true) const ;
+
+ int getReplicaCount() const ;
+#endif
+
+ private:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ friend class NdbTableImpl;
+#endif
+ class NdbTableImpl & m_impl;
+ Table(NdbTableImpl&);
+ };
+
+ /**
+ * @class Index
+ * @brief Represents an index in an NDB Cluster
+ */
+ class Index : public Object {
+ public:
+
+ /**
+ * @name Getting Index properties
+ * @{
+ */
+
+ /**
+ * Get the name of an index
+ */
+ const char * getName() const;
+
+ /**
+ * Get the name of the table being indexed
+ */
+ const char * getTable() const;
+
+ /**
+ * Get the number of columns in the index
+ */
+ unsigned getNoOfColumns() const;
+
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ /**
+ * Get the number of columns in the index
+ * Depricated, use getNoOfColumns instead.
+ */
+ int getNoOfIndexColumns() const;
+#endif
+
+ /**
+ * Get a specific column in the index
+ */
+ const Column * getColumn(unsigned no) const ;
+
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ /**
+ * Get a specific column name in the index
+ * Depricated, use getColumn instead.
+ */
+ const char * getIndexColumn(int no) const ;
+#endif
+
+ /**
+ * Represents type of index
+ */
+ enum Type {
+ Undefined = 0, ///< Undefined object type (initial value)
+ UniqueHashIndex = 3, ///< Unique un-ordered hash index
+ ///< (only one currently supported)
+ OrderedIndex = 6 ///< Non-unique ordered index
+ };
+
+ /**
+ * Get index type of the index
+ */
+ Type getType() const;
+
+ /**
+ * Check if index is set to be stored on disk
+ *
+ * @return if true then logging id enabled
+ *
+ * @note Non-logged indexes are rebuilt at system restart.
+ * @note Ordered index does not currently support logging.
+ */
+ bool getLogging() const;
+
+ /**
+ * Get object status
+ */
+ virtual Object::Status getObjectStatus() const;
+
+ /**
+ * Get object version
+ */
+ virtual int getObjectVersion() const;
+
+ /** @} *******************************************************************/
+
+ /**
+ * @name Index creation
+ * @{
+ *
+ * These methods should normally not be used in an application as
+ * the result will not be visible from the MySQL Server
+ *
+ */
+
+ /**
+ * Constructor
+ * @param name Name of index
+ */
+ Index(const char * name = "");
+ virtual ~Index();
+
+ /**
+ * Set the name of an index
+ */
+ void setName(const char * name);
+
+ /**
+ * Define the name of the table to be indexed
+ */
+ void setTable(const char * name);
+
+ /**
+ * Add a column to the index definition
+ * Note that the order of columns will be in
+ * the order they are added (only matters for ordered indexes).
+ */
+ void addColumn(const Column & c);
+
+ /**
+ * Add a column name to the index definition
+ * Note that the order of indexes will be in
+ * the order they are added (only matters for ordered indexes).
+ */
+ void addColumnName(const char * name);
+
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ /**
+ * Add a column name to the index definition
+ * Note that the order of indexes will be in
+ * the order they are added (only matters for ordered indexes).
+ * Depricated, use addColumnName instead.
+ */
+ void addIndexColumn(const char * name);
+#endif
+
+ /**
+ * Add several column names to the index definition
+ * Note that the order of indexes will be in
+ * the order they are added (only matters for ordered indexes).
+ */
+ void addColumnNames(unsigned noOfNames, const char ** names);
+
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ /**
+ * Add several column names to the index definition
+ * Note that the order of indexes will be in
+ * the order they are added (only matters for ordered indexes).
+ * Depricated, use addColumnNames instead.
+ */
+ void addIndexColumns(int noOfNames, const char ** names);
+#endif
+
+ /**
+ * Set index type of the index
+ */
+ void setType(Type type);
+
+ /**
+ * Enable/Disable index storage on disk
+ *
+ * @param enable If enable is set to true, then logging becomes enabled
+ *
+ * @see NdbDictionary::Index::getLogging
+ */
+ void setLogging(bool enable);
+
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ void setStoredIndex(bool x) { setLogging(x); }
+ bool getStoredIndex() const { return getLogging(); }
+#endif
+
+ /** @} *******************************************************************/
+
+ private:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ friend class NdbIndexImpl;
+#endif
+ class NdbIndexImpl & m_impl;
+ Index(NdbIndexImpl&);
+ };
+
+ /**
+ * @brief Represents an Event in NDB Cluster
+ *
+ */
+ class Event : public Object {
+ public:
+ /**
+ * Specifies the type of database operations an Event listens to
+ */
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /** TableEvent must match 1 << TriggerEvent */
+#endif
+ enum TableEvent {
+ TE_INSERT=1, ///< Insert event on table
+ TE_DELETE=2, ///< Delete event on table
+ TE_UPDATE=4, ///< Update event on table
+ TE_ALL=7 ///< Any/all event on table (not relevant when
+ ///< events are received)
+ };
+ /**
+ * Specifies the durability of an event
+ * (future version may supply other types)
+ */
+ enum EventDurability {
+ ED_UNDEFINED
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ = 0
+#endif
+#if 0 // not supported
+ ,ED_SESSION = 1,
+ // Only this API can use it
+ // and it's deleted after api has disconnected or ndb has restarted
+
+ ED_TEMPORARY = 2
+ // All API's can use it,
+ // But's its removed when ndb is restarted
+#endif
+ ,ED_PERMANENT ///< All API's can use it.
+ ///< It's still defined after a cluster system restart
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ = 3
+#endif
+ };
+
+ /**
+ * Constructor
+ * @param name Name of event
+ */
+ Event(const char *name);
+ /**
+ * Constructor
+ * @param name Name of event
+ * @param table Reference retrieved from NdbDictionary
+ */
+ Event(const char *name, const NdbDictionary::Table& table);
+ virtual ~Event();
+ /**
+ * Set unique identifier for the event
+ */
+ void setName(const char *name);
+ /**
+ * Get unique identifier for the event
+ */
+ const char *getName() const;
+ /**
+ * Define table on which events should be detected
+ *
+ * @note calling this method will default to detection
+ * of events on all columns. Calling subsequent
+ * addEventColumn calls will override this.
+ *
+ * @param table reference retrieved from NdbDictionary
+ */
+ void setTable(const NdbDictionary::Table& table);
+ /**
+ * Set table for which events should be detected
+ *
+ * @note preferred way is using setTable(const NdbDictionary::Table&)
+ * or constructor with table object parameter
+ */
+ void setTable(const char *tableName);
+ /**
+ * Get table name for events
+ *
+ * @return table name
+ */
+ const char* getTableName() const;
+ /**
+ * Add type of event that should be detected
+ */
+ void addTableEvent(const TableEvent te);
+ /**
+ * Set durability of the event
+ */
+ void setDurability(EventDurability);
+ /**
+ * Get durability of the event
+ */
+ EventDurability getDurability() const;
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ void addColumn(const Column &c);
+#endif
+ /**
+ * Add a column on which events should be detected
+ *
+ * @param attrId Column id
+ *
+ * @note errors will mot be detected until createEvent() is called
+ */
+ void addEventColumn(unsigned attrId);
+ /**
+ * Add a column on which events should be detected
+ *
+ * @param columnName Column name
+ *
+ * @note errors will not be detected until createEvent() is called
+ */
+ void addEventColumn(const char * columnName);
+ /**
+ * Add several columns on which events should be detected
+ *
+ * @param n Number of columns
+ * @param columnNames Column names
+ *
+ * @note errors will mot be detected until
+ * NdbDictionary::Dictionary::createEvent() is called
+ */
+ void addEventColumns(int n, const char ** columnNames);
+
+ /**
+ * Get no of columns defined in an Event
+ *
+ * @return Number of columns, -1 on error
+ */
+ int getNoOfEventColumns() const;
+
+ /**
+ * Get object status
+ */
+ virtual Object::Status getObjectStatus() const;
+
+ /**
+ * Get object version
+ */
+ virtual int getObjectVersion() const;
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ void print();
+#endif
+
+ private:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ friend class NdbEventImpl;
+ friend class NdbEventOperationImpl;
+#endif
+ class NdbEventImpl & m_impl;
+ Event(NdbEventImpl&);
+ };
+
+ /**
+ * @class Dictionary
+ * @brief Dictionary for defining and retreiving meta data
+ */
+ class Dictionary {
+ public:
+ /**
+ * @class List
+ * @brief Structure for retrieving lists of object names
+ */
+ struct List {
+ /**
+ * @struct Element
+ * @brief Object to be stored in an NdbDictionary::Dictionary::List
+ */
+ struct Element {
+ unsigned id; ///< Id of object
+ Object::Type type; ///< Type of object
+ Object::State state; ///< State of object
+ Object::Store store; ///< How object is stored
+ char * database; ///< In what database the object resides
+ char * schema; ///< What schema the object is defined in
+ char * name; ///< Name of object
+ Element() :
+ id(0),
+ type(Object::TypeUndefined),
+ state(Object::StateUndefined),
+ store(Object::StoreUndefined),
+ database(0),
+ schema(0),
+ name(0) {
+ }
+ };
+ unsigned count; ///< Number of elements in list
+ Element * elements; ///< Pointer to array of elements
+ List() : count(0), elements(0) {}
+ ~List() {
+ if (elements != 0) {
+ for (unsigned i = 0; i < count; i++) {
+ delete[] elements[i].database;
+ delete[] elements[i].schema;
+ delete[] elements[i].name;
+ elements[i].name = 0;
+ }
+ delete[] elements;
+ count = 0;
+ elements = 0;
+ }
+ }
+ };
+
+ /**
+ * @name General
+ * @{
+ */
+
+ /**
+ * Fetch list of all objects, optionally restricted to given type.
+ *
+ * @param list List of objects returned in the dictionary
+ * @param type Restrict returned list to only contain objects of
+ * this type
+ *
+ * @return -1 if error.
+ *
+ */
+ int listObjects(List & list, Object::Type type = Object::TypeUndefined);
+ int listObjects(List & list,
+ Object::Type type = Object::TypeUndefined) const;
+
+ /**
+ * Get the latest error
+ *
+ * @return Error object.
+ */
+ const struct NdbError & getNdbError() const;
+
+ /** @} *******************************************************************/
+
+ /**
+ * @name Retrieving references to Tables and Indexes
+ * @{
+ */
+
+ /**
+ * Get table with given name, NULL if undefined
+ * @param name Name of table to get
+ * @return table if successful otherwise NULL.
+ */
+ const Table * getTable(const char * name) const;
+
+ /**
+ * Get index with given name, NULL if undefined
+ * @param indexName Name of index to get.
+ * @param tableName Name of table that index belongs to.
+ * @return index if successful, otherwise 0.
+ */
+ const Index * getIndex(const char * indexName,
+ const char * tableName) const;
+
+ /**
+ * Fetch list of indexes of given table.
+ * @param list Reference to list where to store the listed indexes
+ * @param tableName Name of table that index belongs to.
+ * @return 0 if successful, otherwise -1
+ */
+ int listIndexes(List & list, const char * tableName);
+ int listIndexes(List & list, const char * tableName) const;
+
+ /** @} *******************************************************************/
+ /**
+ * @name Events
+ * @{
+ */
+
+ /**
+ * Create event given defined Event instance
+ * @param event Event to create
+ * @return 0 if successful otherwise -1.
+ */
+ int createEvent(const Event &event);
+
+ /**
+ * Drop event with given name
+ * @param eventName Name of event to drop.
+ * @return 0 if successful otherwise -1.
+ */
+ int dropEvent(const char * eventName);
+
+ /**
+ * Get event with given name.
+ * @param eventName Name of event to get.
+ * @return an Event if successful, otherwise NULL.
+ */
+ const Event * getEvent(const char * eventName);
+
+ /** @} *******************************************************************/
+
+ /**
+ * @name Table creation
+ * @{
+ *
+ * These methods should normally not be used in an application as
+ * the result will not be visible from the MySQL Server
+ */
+
+ /**
+ * Create defined table given defined Table instance
+ * @param table Table to create
+ * @return 0 if successful otherwise -1.
+ */
+ int createTable(const Table &table);
+
+ /**
+ * Drop table given retrieved Table instance
+ * @param table Table to drop
+ * @return 0 if successful otherwise -1.
+ */
+ int dropTable(Table & table);
+
+ /**
+ * Drop table given table name
+ * @param name Name of table to drop
+ * @return 0 if successful otherwise -1.
+ */
+ int dropTable(const char * name);
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /**
+ * Alter defined table given defined Table instance
+ * @param table Table to alter
+ * @return -2 (incompatible version) <br>
+ * -1 general error <br>
+ * 0 success
+ */
+ int alterTable(const Table &table);
+
+ /**
+ * Invalidate cached table object
+ * @param name Name of table to invalidate
+ */
+ void invalidateTable(const char * name);
+#endif
+
+ /**
+ * Remove table from local cache
+ */
+ void removeCachedTable(const char * table);
+ /**
+ * Remove index from local cache
+ */
+ void removeCachedIndex(const char * index, const char * table);
+
+
+ /** @} *******************************************************************/
+ /**
+ * @name Index creation
+ * @{
+ *
+ * These methods should normally not be used in an application as
+ * the result will not be visible from the MySQL Server
+ *
+ */
+
+ /**
+ * Create index given defined Index instance
+ * @param index Index to create
+ * @return 0 if successful otherwise -1.
+ */
+ int createIndex(const Index &index);
+
+ /**
+ * Drop index with given name
+ * @param indexName Name of index to drop.
+ * @param tableName Name of table that index belongs to.
+ * @return 0 if successful otherwise -1.
+ */
+ int dropIndex(const char * indexName,
+ const char * tableName);
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /**
+ * Invalidate cached index object
+ */
+ void invalidateIndex(const char * indexName,
+ const char * tableName);
+#endif
+
+ /** @} *******************************************************************/
+
+ protected:
+ Dictionary(Ndb & ndb);
+ ~Dictionary();
+
+ private:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ friend class NdbDictionaryImpl;
+ friend class UtilTransactions;
+ friend class NdbBlob;
+#endif
+ class NdbDictionaryImpl & m_impl;
+ Dictionary(NdbDictionaryImpl&);
+ const Table * getIndexTable(const char * indexName,
+ const char * tableName) const;
+ public:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ const Table * getTable(const char * name, void **data) const;
+ void set_local_table_data_size(unsigned sz);
+#endif
+ };
+};
+
+class NdbOut& operator <<(class NdbOut& out, const NdbDictionary::Column& col);
+
+#endif
diff --git a/ndb/include/ndbapi/NdbError.hpp b/storage/ndb/include/ndbapi/NdbError.hpp
index f67b3c4ccaa..f67b3c4ccaa 100644
--- a/ndb/include/ndbapi/NdbError.hpp
+++ b/storage/ndb/include/ndbapi/NdbError.hpp
diff --git a/ndb/include/ndbapi/NdbEventOperation.hpp b/storage/ndb/include/ndbapi/NdbEventOperation.hpp
index 55ee96b3144..55ee96b3144 100644
--- a/ndb/include/ndbapi/NdbEventOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbEventOperation.hpp
diff --git a/ndb/include/ndbapi/NdbIndexOperation.hpp b/storage/ndb/include/ndbapi/NdbIndexOperation.hpp
index 3de6835238e..3de6835238e 100644
--- a/ndb/include/ndbapi/NdbIndexOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbIndexOperation.hpp
diff --git a/ndb/include/ndbapi/NdbIndexScanOperation.hpp b/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp
index 0a31f228921..0a31f228921 100644
--- a/ndb/include/ndbapi/NdbIndexScanOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp
diff --git a/ndb/include/ndbapi/NdbOperation.hpp b/storage/ndb/include/ndbapi/NdbOperation.hpp
index fca610772cc..fca610772cc 100644
--- a/ndb/include/ndbapi/NdbOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbOperation.hpp
diff --git a/ndb/include/ndbapi/NdbPool.hpp b/storage/ndb/include/ndbapi/NdbPool.hpp
index 64cba5a008c..64cba5a008c 100644
--- a/ndb/include/ndbapi/NdbPool.hpp
+++ b/storage/ndb/include/ndbapi/NdbPool.hpp
diff --git a/ndb/include/ndbapi/NdbRecAttr.hpp b/storage/ndb/include/ndbapi/NdbRecAttr.hpp
index 50de4f3277e..50de4f3277e 100644
--- a/ndb/include/ndbapi/NdbRecAttr.hpp
+++ b/storage/ndb/include/ndbapi/NdbRecAttr.hpp
diff --git a/ndb/include/ndbapi/NdbReceiver.hpp b/storage/ndb/include/ndbapi/NdbReceiver.hpp
index ff6debc7fd3..ff6debc7fd3 100644
--- a/ndb/include/ndbapi/NdbReceiver.hpp
+++ b/storage/ndb/include/ndbapi/NdbReceiver.hpp
diff --git a/ndb/include/ndbapi/NdbScanFilter.hpp b/storage/ndb/include/ndbapi/NdbScanFilter.hpp
index b5457bab99b..b5457bab99b 100644
--- a/ndb/include/ndbapi/NdbScanFilter.hpp
+++ b/storage/ndb/include/ndbapi/NdbScanFilter.hpp
diff --git a/ndb/include/ndbapi/NdbScanOperation.hpp b/storage/ndb/include/ndbapi/NdbScanOperation.hpp
index bf8f362cefc..bf8f362cefc 100644
--- a/ndb/include/ndbapi/NdbScanOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbScanOperation.hpp
diff --git a/ndb/include/ndbapi/NdbTransaction.hpp b/storage/ndb/include/ndbapi/NdbTransaction.hpp
index 2e102b104d8..2e102b104d8 100644
--- a/ndb/include/ndbapi/NdbTransaction.hpp
+++ b/storage/ndb/include/ndbapi/NdbTransaction.hpp
diff --git a/ndb/include/ndbapi/ndb_cluster_connection.hpp b/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp
index 97db76563aa..97db76563aa 100644
--- a/ndb/include/ndbapi/ndb_cluster_connection.hpp
+++ b/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp
diff --git a/ndb/include/ndbapi/ndb_opt_defaults.h b/storage/ndb/include/ndbapi/ndb_opt_defaults.h
index d03a9dcc36f..d03a9dcc36f 100644
--- a/ndb/include/ndbapi/ndb_opt_defaults.h
+++ b/storage/ndb/include/ndbapi/ndb_opt_defaults.h
diff --git a/ndb/include/ndbapi/ndbapi_limits.h b/storage/ndb/include/ndbapi/ndbapi_limits.h
index 5c4db71b747..5c4db71b747 100644
--- a/ndb/include/ndbapi/ndbapi_limits.h
+++ b/storage/ndb/include/ndbapi/ndbapi_limits.h
diff --git a/ndb/include/ndbapi/ndberror.h b/storage/ndb/include/ndbapi/ndberror.h
index 2225f68f08d..2225f68f08d 100644
--- a/ndb/include/ndbapi/ndberror.h
+++ b/storage/ndb/include/ndbapi/ndberror.h
diff --git a/ndb/include/newtonapi/dba.h b/storage/ndb/include/newtonapi/dba.h
index 4cfc0ec8eb9..4cfc0ec8eb9 100644
--- a/ndb/include/newtonapi/dba.h
+++ b/storage/ndb/include/newtonapi/dba.h
diff --git a/ndb/include/newtonapi/defs/pcn_types.h b/storage/ndb/include/newtonapi/defs/pcn_types.h
index eae6c67899d..eae6c67899d 100644
--- a/ndb/include/newtonapi/defs/pcn_types.h
+++ b/storage/ndb/include/newtonapi/defs/pcn_types.h
diff --git a/ndb/include/portlib/NdbCondition.h b/storage/ndb/include/portlib/NdbCondition.h
index 3d959a0db41..3d959a0db41 100644
--- a/ndb/include/portlib/NdbCondition.h
+++ b/storage/ndb/include/portlib/NdbCondition.h
diff --git a/ndb/include/portlib/NdbConfig.h b/storage/ndb/include/portlib/NdbConfig.h
index 1bca825ab8d..1bca825ab8d 100644
--- a/ndb/include/portlib/NdbConfig.h
+++ b/storage/ndb/include/portlib/NdbConfig.h
diff --git a/ndb/include/portlib/NdbDaemon.h b/storage/ndb/include/portlib/NdbDaemon.h
index 74ea3f06419..74ea3f06419 100644
--- a/ndb/include/portlib/NdbDaemon.h
+++ b/storage/ndb/include/portlib/NdbDaemon.h
diff --git a/ndb/include/portlib/NdbEnv.h b/storage/ndb/include/portlib/NdbEnv.h
index 1611bf3152e..1611bf3152e 100644
--- a/ndb/include/portlib/NdbEnv.h
+++ b/storage/ndb/include/portlib/NdbEnv.h
diff --git a/ndb/include/portlib/NdbHost.h b/storage/ndb/include/portlib/NdbHost.h
index 90e7b781137..90e7b781137 100644
--- a/ndb/include/portlib/NdbHost.h
+++ b/storage/ndb/include/portlib/NdbHost.h
diff --git a/ndb/include/portlib/NdbMain.h b/storage/ndb/include/portlib/NdbMain.h
index 7cc7a877750..7cc7a877750 100644
--- a/ndb/include/portlib/NdbMain.h
+++ b/storage/ndb/include/portlib/NdbMain.h
diff --git a/ndb/include/portlib/NdbMem.h b/storage/ndb/include/portlib/NdbMem.h
index 0f2de80200e..0f2de80200e 100644
--- a/ndb/include/portlib/NdbMem.h
+++ b/storage/ndb/include/portlib/NdbMem.h
diff --git a/ndb/include/portlib/NdbMutex.h b/storage/ndb/include/portlib/NdbMutex.h
index b0b985ecef5..b0b985ecef5 100644
--- a/ndb/include/portlib/NdbMutex.h
+++ b/storage/ndb/include/portlib/NdbMutex.h
diff --git a/ndb/include/portlib/NdbSleep.h b/storage/ndb/include/portlib/NdbSleep.h
index 3b26710154f..3b26710154f 100644
--- a/ndb/include/portlib/NdbSleep.h
+++ b/storage/ndb/include/portlib/NdbSleep.h
diff --git a/ndb/include/portlib/NdbTCP.h b/storage/ndb/include/portlib/NdbTCP.h
index 308a3833ffd..308a3833ffd 100644
--- a/ndb/include/portlib/NdbTCP.h
+++ b/storage/ndb/include/portlib/NdbTCP.h
diff --git a/ndb/include/portlib/NdbThread.h b/storage/ndb/include/portlib/NdbThread.h
index e86deee4354..e86deee4354 100644
--- a/ndb/include/portlib/NdbThread.h
+++ b/storage/ndb/include/portlib/NdbThread.h
diff --git a/ndb/include/portlib/NdbTick.h b/storage/ndb/include/portlib/NdbTick.h
index 9bd8eca22bd..9bd8eca22bd 100644
--- a/ndb/include/portlib/NdbTick.h
+++ b/storage/ndb/include/portlib/NdbTick.h
diff --git a/ndb/include/portlib/PortDefs.h b/storage/ndb/include/portlib/PortDefs.h
index a115c60cfe1..a115c60cfe1 100644
--- a/ndb/include/portlib/PortDefs.h
+++ b/storage/ndb/include/portlib/PortDefs.h
diff --git a/ndb/include/portlib/prefetch.h b/storage/ndb/include/portlib/prefetch.h
index 729c80bd93e..729c80bd93e 100644
--- a/ndb/include/portlib/prefetch.h
+++ b/storage/ndb/include/portlib/prefetch.h
diff --git a/ndb/include/transporter/TransporterCallback.hpp b/storage/ndb/include/transporter/TransporterCallback.hpp
index 9f910f31728..9f910f31728 100644
--- a/ndb/include/transporter/TransporterCallback.hpp
+++ b/storage/ndb/include/transporter/TransporterCallback.hpp
diff --git a/ndb/include/transporter/TransporterDefinitions.hpp b/storage/ndb/include/transporter/TransporterDefinitions.hpp
index e9c5ffa2c80..e9c5ffa2c80 100644
--- a/ndb/include/transporter/TransporterDefinitions.hpp
+++ b/storage/ndb/include/transporter/TransporterDefinitions.hpp
diff --git a/ndb/include/transporter/TransporterRegistry.hpp b/storage/ndb/include/transporter/TransporterRegistry.hpp
index 0bb9733e8c4..0bb9733e8c4 100644
--- a/ndb/include/transporter/TransporterRegistry.hpp
+++ b/storage/ndb/include/transporter/TransporterRegistry.hpp
diff --git a/ndb/include/util/Base64.hpp b/storage/ndb/include/util/Base64.hpp
index f4b11ad9214..f4b11ad9214 100644
--- a/ndb/include/util/Base64.hpp
+++ b/storage/ndb/include/util/Base64.hpp
diff --git a/ndb/include/util/BaseString.hpp b/storage/ndb/include/util/BaseString.hpp
index 02a6a3b3e66..02a6a3b3e66 100644
--- a/ndb/include/util/BaseString.hpp
+++ b/storage/ndb/include/util/BaseString.hpp
diff --git a/ndb/include/util/Bitmask.hpp b/storage/ndb/include/util/Bitmask.hpp
index ade57a5ee57..ade57a5ee57 100644
--- a/ndb/include/util/Bitmask.hpp
+++ b/storage/ndb/include/util/Bitmask.hpp
diff --git a/ndb/include/util/ConfigValues.hpp b/storage/ndb/include/util/ConfigValues.hpp
index 457488e3c42..457488e3c42 100644
--- a/ndb/include/util/ConfigValues.hpp
+++ b/storage/ndb/include/util/ConfigValues.hpp
diff --git a/ndb/include/util/File.hpp b/storage/ndb/include/util/File.hpp
index 3ed0ad7a6f9..3ed0ad7a6f9 100644
--- a/ndb/include/util/File.hpp
+++ b/storage/ndb/include/util/File.hpp
diff --git a/ndb/include/util/InputStream.hpp b/storage/ndb/include/util/InputStream.hpp
index b2a56b1e433..b2a56b1e433 100644
--- a/ndb/include/util/InputStream.hpp
+++ b/storage/ndb/include/util/InputStream.hpp
diff --git a/ndb/include/util/NdbAutoPtr.hpp b/storage/ndb/include/util/NdbAutoPtr.hpp
index ff747e3de68..ff747e3de68 100644
--- a/ndb/include/util/NdbAutoPtr.hpp
+++ b/storage/ndb/include/util/NdbAutoPtr.hpp
diff --git a/ndb/include/util/NdbOut.hpp b/storage/ndb/include/util/NdbOut.hpp
index d85d5cc6305..d85d5cc6305 100644
--- a/ndb/include/util/NdbOut.hpp
+++ b/storage/ndb/include/util/NdbOut.hpp
diff --git a/ndb/include/util/NdbSqlUtil.hpp b/storage/ndb/include/util/NdbSqlUtil.hpp
index 3e98dcd1805..3e98dcd1805 100644
--- a/ndb/include/util/NdbSqlUtil.hpp
+++ b/storage/ndb/include/util/NdbSqlUtil.hpp
diff --git a/ndb/include/util/OutputStream.hpp b/storage/ndb/include/util/OutputStream.hpp
index c7e009d4537..c7e009d4537 100644
--- a/ndb/include/util/OutputStream.hpp
+++ b/storage/ndb/include/util/OutputStream.hpp
diff --git a/ndb/include/util/Parser.hpp b/storage/ndb/include/util/Parser.hpp
index c117498e1ba..c117498e1ba 100644
--- a/ndb/include/util/Parser.hpp
+++ b/storage/ndb/include/util/Parser.hpp
diff --git a/ndb/include/util/Properties.hpp b/storage/ndb/include/util/Properties.hpp
index e6668744211..e6668744211 100644
--- a/ndb/include/util/Properties.hpp
+++ b/storage/ndb/include/util/Properties.hpp
diff --git a/ndb/include/util/SimpleProperties.hpp b/storage/ndb/include/util/SimpleProperties.hpp
index 438426fb62b..438426fb62b 100644
--- a/ndb/include/util/SimpleProperties.hpp
+++ b/storage/ndb/include/util/SimpleProperties.hpp
diff --git a/ndb/include/util/SocketAuthenticator.hpp b/storage/ndb/include/util/SocketAuthenticator.hpp
index 1b82567feaa..1b82567feaa 100644
--- a/ndb/include/util/SocketAuthenticator.hpp
+++ b/storage/ndb/include/util/SocketAuthenticator.hpp
diff --git a/ndb/include/util/SocketClient.hpp b/storage/ndb/include/util/SocketClient.hpp
index bf1ad7d45d6..bf1ad7d45d6 100644
--- a/ndb/include/util/SocketClient.hpp
+++ b/storage/ndb/include/util/SocketClient.hpp
diff --git a/ndb/include/util/SocketServer.hpp b/storage/ndb/include/util/SocketServer.hpp
index 4c37e63adf0..4c37e63adf0 100644
--- a/ndb/include/util/SocketServer.hpp
+++ b/storage/ndb/include/util/SocketServer.hpp
diff --git a/ndb/include/util/UtilBuffer.hpp b/storage/ndb/include/util/UtilBuffer.hpp
index f43fc960a16..f43fc960a16 100644
--- a/ndb/include/util/UtilBuffer.hpp
+++ b/storage/ndb/include/util/UtilBuffer.hpp
diff --git a/ndb/include/util/Vector.hpp b/storage/ndb/include/util/Vector.hpp
index 480dddf8243..480dddf8243 100644
--- a/ndb/include/util/Vector.hpp
+++ b/storage/ndb/include/util/Vector.hpp
diff --git a/ndb/include/util/basestring_vsnprintf.h b/storage/ndb/include/util/basestring_vsnprintf.h
index 7c804f22841..7c804f22841 100644
--- a/ndb/include/util/basestring_vsnprintf.h
+++ b/storage/ndb/include/util/basestring_vsnprintf.h
diff --git a/ndb/include/util/md5_hash.hpp b/storage/ndb/include/util/md5_hash.hpp
index b79dce3b5a9..b79dce3b5a9 100644
--- a/ndb/include/util/md5_hash.hpp
+++ b/storage/ndb/include/util/md5_hash.hpp
diff --git a/ndb/include/util/ndb_opts.h b/storage/ndb/include/util/ndb_opts.h
index f60ac4e6a63..f60ac4e6a63 100644
--- a/ndb/include/util/ndb_opts.h
+++ b/storage/ndb/include/util/ndb_opts.h
diff --git a/ndb/include/util/random.h b/storage/ndb/include/util/random.h
index 1b83e5fec93..1b83e5fec93 100644
--- a/ndb/include/util/random.h
+++ b/storage/ndb/include/util/random.h
diff --git a/ndb/include/util/socket_io.h b/storage/ndb/include/util/socket_io.h
index a0e6c4e369d..a0e6c4e369d 100644
--- a/ndb/include/util/socket_io.h
+++ b/storage/ndb/include/util/socket_io.h
diff --git a/ndb/include/util/uucode.h b/storage/ndb/include/util/uucode.h
index f5569d033a5..f5569d033a5 100644
--- a/ndb/include/util/uucode.h
+++ b/storage/ndb/include/util/uucode.h
diff --git a/ndb/include/util/version.h b/storage/ndb/include/util/version.h
index 62dc07d905a..62dc07d905a 100644
--- a/ndb/include/util/version.h
+++ b/storage/ndb/include/util/version.h
diff --git a/ndb/lib/.empty b/storage/ndb/lib/.empty
index e69de29bb2d..e69de29bb2d 100644
--- a/ndb/lib/.empty
+++ b/storage/ndb/lib/.empty
diff --git a/ndb/ndbapi-examples/Makefile b/storage/ndb/ndbapi-examples/Makefile
index 965dc3ec29f..965dc3ec29f 100644
--- a/ndb/ndbapi-examples/Makefile
+++ b/storage/ndb/ndbapi-examples/Makefile
diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile b/storage/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile
new file mode 100644
index 00000000000..f96989c885c
--- /dev/null
+++ b/storage/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile
@@ -0,0 +1,23 @@
+TARGET = mgmapi_logevent
+SRCS = $(TARGET).cpp
+OBJS = $(TARGET).o
+CXX = g++
+CFLAGS = -c -Wall -fno-rtti -fno-exceptions
+CXXFLAGS =
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../../..
+INCLUDE_DIR = $(TOP_SRCDIR)/storage/ndb/include
+LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/mgmapi -I$(INCLUDE_DIR)/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp b/storage/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp
index 5ec1fba6314..5ec1fba6314 100644
--- a/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp
+++ b/storage/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp
diff --git a/storage/ndb/ndbapi-examples/ndbapi_async_example/Makefile b/storage/ndb/ndbapi-examples/ndbapi_async_example/Makefile
new file mode 100644
index 00000000000..c5de3b06fc7
--- /dev/null
+++ b/storage/ndb/ndbapi-examples/ndbapi_async_example/Makefile
@@ -0,0 +1,23 @@
+TARGET = ndbapi_async
+SRCS = $(TARGET).cpp
+OBJS = $(TARGET).o
+CXX = g++
+CFLAGS = -g -c -Wall -fno-rtti -fno-exceptions
+CXXFLAGS = -g
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../../..
+INCLUDE_DIR = $(TOP_SRCDIR)
+LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/extra -I$(INCLUDE_DIR)/storage/ndb/include -I$(INCLUDE_DIR)/storage/ndb/include/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp b/storage/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp
index aa745f4d28d..aa745f4d28d 100644
--- a/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp
diff --git a/ndb/ndbapi-examples/ndbapi_async_example/readme.txt b/storage/ndb/ndbapi-examples/ndbapi_async_example/readme.txt
index 47cb4bf9ffa..47cb4bf9ffa 100644
--- a/ndb/ndbapi-examples/ndbapi_async_example/readme.txt
+++ b/storage/ndb/ndbapi-examples/ndbapi_async_example/readme.txt
diff --git a/storage/ndb/ndbapi-examples/ndbapi_async_example1/Makefile b/storage/ndb/ndbapi-examples/ndbapi_async_example1/Makefile
new file mode 100644
index 00000000000..cc6bcebb71b
--- /dev/null
+++ b/storage/ndb/ndbapi-examples/ndbapi_async_example1/Makefile
@@ -0,0 +1,22 @@
+TARGET = ndbapi_async1
+SRCS = ndbapi_async1.cpp
+OBJS = ndbapi_async1.o
+CXX = g++
+CFLAGS = -c -Wall -fno-rtti -fno-exceptions
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../../..
+INCLUDE_DIR = $(TOP_SRCDIR)
+LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/storage/ndb/include -I$(INCLUDE_DIR)/storage/ndb/include/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp b/storage/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp
index e8bc19e267b..e8bc19e267b 100644
--- a/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp
diff --git a/storage/ndb/ndbapi-examples/ndbapi_event_example/Makefile b/storage/ndb/ndbapi-examples/ndbapi_event_example/Makefile
new file mode 100644
index 00000000000..d8f7a03aac6
--- /dev/null
+++ b/storage/ndb/ndbapi-examples/ndbapi_event_example/Makefile
@@ -0,0 +1,23 @@
+TARGET = ndbapi_event
+SRCS = ndbapi_event.cpp
+OBJS = ndbapi_event.o
+CXX = g++
+CFLAGS = -c -Wall -fno-rtti -fno-exceptions
+CXXFLAGS =
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../../..
+INCLUDE_DIR = $(TOP_SRCDIR)/storage/ndb/include
+LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp b/storage/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp
index 286f6fafbab..286f6fafbab 100644
--- a/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp
diff --git a/storage/ndb/ndbapi-examples/ndbapi_retries_example/Makefile b/storage/ndb/ndbapi-examples/ndbapi_retries_example/Makefile
new file mode 100644
index 00000000000..3dee4f77e35
--- /dev/null
+++ b/storage/ndb/ndbapi-examples/ndbapi_retries_example/Makefile
@@ -0,0 +1,22 @@
+TARGET = ndbapi_retries
+SRCS = ndbapi_retries.cpp
+OBJS = ndbapi_retries.o
+CXX = g++
+CFLAGS = -c -Wall -fno-rtti -fno-exceptions
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../../..
+INCLUDE_DIR = ../../include
+LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp b/storage/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp
index 8c29fe31446..8c29fe31446 100644
--- a/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp
diff --git a/storage/ndb/ndbapi-examples/ndbapi_scan_example/Makefile b/storage/ndb/ndbapi-examples/ndbapi_scan_example/Makefile
new file mode 100644
index 00000000000..e3a7d9c97b0
--- /dev/null
+++ b/storage/ndb/ndbapi-examples/ndbapi_scan_example/Makefile
@@ -0,0 +1,23 @@
+TARGET = ndbapi_scan
+SRCS = $(TARGET).cpp
+OBJS = $(TARGET).o
+CXX = g++
+CFLAGS = -g -c -Wall -fno-rtti -fno-exceptions
+CXXFLAGS = -g
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../../..
+INCLUDE_DIR = $(TOP_SRCDIR)
+LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/extra -I$(INCLUDE_DIR)/storage/ndb/include -I$(INCLUDE_DIR)/storage/ndb/include/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp b/storage/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp
index 69ffd99b8ca..69ffd99b8ca 100644
--- a/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp
diff --git a/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt b/storage/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt
index 47cb4bf9ffa..47cb4bf9ffa 100644
--- a/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt
+++ b/storage/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt
diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_example/Makefile b/storage/ndb/ndbapi-examples/ndbapi_simple_example/Makefile
new file mode 100644
index 00000000000..b792c4c4a47
--- /dev/null
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple_example/Makefile
@@ -0,0 +1,23 @@
+TARGET = ndbapi_simple
+SRCS = $(TARGET).cpp
+OBJS = $(TARGET).o
+CXX = g++
+CFLAGS = -c -Wall -fno-rtti -fno-exceptions
+CXXFLAGS =
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../../..
+INCLUDE_DIR = $(TOP_SRCDIR)/storage/ndb/include
+LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp
index 152d4fa44af..152d4fa44af 100644
--- a/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp
diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile b/storage/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile
new file mode 100644
index 00000000000..3b3ac7f484a
--- /dev/null
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile
@@ -0,0 +1,23 @@
+TARGET = ndbapi_simple_index
+SRCS = $(TARGET).cpp
+OBJS = $(TARGET).o
+CXX = g++
+CFLAGS = -c -Wall -fno-rtti -fno-exceptions
+CXXFLAGS =
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../../..
+INCLUDE_DIR = $(TOP_SRCDIR)
+LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/storage/ndb/include -I$(INCLUDE_DIR)/storage/ndb/include/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp
index 5afaf6078d1..5afaf6078d1 100644
--- a/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp
diff --git a/storage/ndb/src/Makefile.am b/storage/ndb/src/Makefile.am
new file mode 100644
index 00000000000..23ba5f6f9e4
--- /dev/null
+++ b/storage/ndb/src/Makefile.am
@@ -0,0 +1,33 @@
+SUBDIRS = common mgmapi ndbapi . kernel mgmclient mgmsrv cw
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+
+ndblib_LTLIBRARIES = libndbclient.la
+
+libndbclient_la_SOURCES =
+
+libndbclient_la_LIBADD = \
+ ndbapi/libndbapi.la \
+ common/transporter/libtransporter.la \
+ common/debugger/libtrace.la \
+ common/debugger/signaldata/libsignaldataprint.la \
+ mgmapi/libmgmapi.la \
+ common/mgmcommon/libmgmsrvcommon.la \
+ common/logger/liblogger.la \
+ common/portlib/libportlib.la \
+ common/util/libgeneral.la
+
+windoze-dsp: libndbclient.dsp
+
+libndbclient.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(ndblib_LTLIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ dummy.cpp
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(libndbclient_la_LIBADD)
+ @touch dummy.cpp
diff --git a/ndb/src/common/Makefile.am b/storage/ndb/src/common/Makefile.am
index 0059f3fb210..0059f3fb210 100644
--- a/ndb/src/common/Makefile.am
+++ b/storage/ndb/src/common/Makefile.am
diff --git a/ndb/src/common/debugger/BlockNames.cpp b/storage/ndb/src/common/debugger/BlockNames.cpp
index 44650b84c5c..44650b84c5c 100644
--- a/ndb/src/common/debugger/BlockNames.cpp
+++ b/storage/ndb/src/common/debugger/BlockNames.cpp
diff --git a/ndb/src/common/debugger/DebuggerNames.cpp b/storage/ndb/src/common/debugger/DebuggerNames.cpp
index 8571b8ece86..8571b8ece86 100644
--- a/ndb/src/common/debugger/DebuggerNames.cpp
+++ b/storage/ndb/src/common/debugger/DebuggerNames.cpp
diff --git a/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp
index 5a534b36b59..5a534b36b59 100644
--- a/ndb/src/common/debugger/EventLogger.cpp
+++ b/storage/ndb/src/common/debugger/EventLogger.cpp
diff --git a/ndb/src/common/debugger/GrepError.cpp b/storage/ndb/src/common/debugger/GrepError.cpp
index 20aeaa6dd77..20aeaa6dd77 100644
--- a/ndb/src/common/debugger/GrepError.cpp
+++ b/storage/ndb/src/common/debugger/GrepError.cpp
diff --git a/storage/ndb/src/common/debugger/Makefile.am b/storage/ndb/src/common/debugger/Makefile.am
new file mode 100644
index 00000000000..71b8ed55561
--- /dev/null
+++ b/storage/ndb/src/common/debugger/Makefile.am
@@ -0,0 +1,25 @@
+SUBDIRS = signaldata
+
+noinst_LTLIBRARIES = libtrace.la
+
+libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp EventLogger.cpp GrepError.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libtrace.dsp
+
+libtrace.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libtrace_la_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/debugger/SignalLoggerManager.cpp b/storage/ndb/src/common/debugger/SignalLoggerManager.cpp
index d8710d2058f..d8710d2058f 100644
--- a/ndb/src/common/debugger/SignalLoggerManager.cpp
+++ b/storage/ndb/src/common/debugger/SignalLoggerManager.cpp
diff --git a/ndb/src/common/debugger/signaldata/AccLock.cpp b/storage/ndb/src/common/debugger/signaldata/AccLock.cpp
index affed431957..affed431957 100644
--- a/ndb/src/common/debugger/signaldata/AccLock.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/AccLock.cpp
diff --git a/ndb/src/common/debugger/signaldata/AlterIndx.cpp b/storage/ndb/src/common/debugger/signaldata/AlterIndx.cpp
index e1865136fc3..e1865136fc3 100644
--- a/ndb/src/common/debugger/signaldata/AlterIndx.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/AlterIndx.cpp
diff --git a/ndb/src/common/debugger/signaldata/AlterTab.cpp b/storage/ndb/src/common/debugger/signaldata/AlterTab.cpp
index f9521984095..f9521984095 100644
--- a/ndb/src/common/debugger/signaldata/AlterTab.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/AlterTab.cpp
diff --git a/ndb/src/common/debugger/signaldata/AlterTable.cpp b/storage/ndb/src/common/debugger/signaldata/AlterTable.cpp
index 59909c8e490..59909c8e490 100644
--- a/ndb/src/common/debugger/signaldata/AlterTable.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/AlterTable.cpp
diff --git a/ndb/src/common/debugger/signaldata/AlterTrig.cpp b/storage/ndb/src/common/debugger/signaldata/AlterTrig.cpp
index d488fd6e348..d488fd6e348 100644
--- a/ndb/src/common/debugger/signaldata/AlterTrig.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/AlterTrig.cpp
diff --git a/ndb/src/common/debugger/signaldata/BackupImpl.cpp b/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp
index e9b0188d93b..e9b0188d93b 100644
--- a/ndb/src/common/debugger/signaldata/BackupImpl.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp
diff --git a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp b/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp
index 4b0a0e07b66..4b0a0e07b66 100644
--- a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp
diff --git a/ndb/src/common/debugger/signaldata/CloseComReqConf.cpp b/storage/ndb/src/common/debugger/signaldata/CloseComReqConf.cpp
index 84410a2b2db..84410a2b2db 100644
--- a/ndb/src/common/debugger/signaldata/CloseComReqConf.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/CloseComReqConf.cpp
diff --git a/ndb/src/common/debugger/signaldata/CntrStart.cpp b/storage/ndb/src/common/debugger/signaldata/CntrStart.cpp
index 154013f40b0..154013f40b0 100644
--- a/ndb/src/common/debugger/signaldata/CntrStart.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/CntrStart.cpp
diff --git a/ndb/src/common/debugger/signaldata/ContinueB.cpp b/storage/ndb/src/common/debugger/signaldata/ContinueB.cpp
index c295041bc01..c295041bc01 100644
--- a/ndb/src/common/debugger/signaldata/ContinueB.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/ContinueB.cpp
diff --git a/ndb/src/common/debugger/signaldata/CopyGCI.cpp b/storage/ndb/src/common/debugger/signaldata/CopyGCI.cpp
index 173b3f6708f..173b3f6708f 100644
--- a/ndb/src/common/debugger/signaldata/CopyGCI.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/CopyGCI.cpp
diff --git a/ndb/src/common/debugger/signaldata/CreateEvnt.cpp b/storage/ndb/src/common/debugger/signaldata/CreateEvnt.cpp
index 7b497d6a974..7b497d6a974 100644
--- a/ndb/src/common/debugger/signaldata/CreateEvnt.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/CreateEvnt.cpp
diff --git a/storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp b/storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp
new file mode 100644
index 00000000000..991a0cce131
--- /dev/null
+++ b/storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp
@@ -0,0 +1,55 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <signaldata/CreateFragmentation.hpp>
+
+bool
+printCREATE_FRAGMENTATION_REQ(FILE * output, const Uint32 * theData,
+ Uint32 len, Uint16 receiverBlockNo) {
+ const CreateFragmentationReq * const sig = (CreateFragmentationReq *)theData;
+ fprintf(output, " senderRef: %x\n", sig->senderRef);
+ fprintf(output, " senderData: %x\n", sig->senderData);
+ fprintf(output, " fragmentationType: %x\n", sig->fragmentationType);
+ fprintf(output, " noOfFragments: %x\n", sig->noOfFragments);
+ if (sig->primaryTableId == RNIL)
+ fprintf(output, " primaryTableId: none\n");
+ else
+ fprintf(output, " primaryTableId: %x\n", sig->primaryTableId);
+ return true;
+}
+
+bool
+printCREATE_FRAGMENTATION_REF(FILE * output, const Uint32 * theData,
+ Uint32 len, Uint16 receiverBlockNo) {
+ const CreateFragmentationRef * const sig = (CreateFragmentationRef *)theData;
+ fprintf(output, " senderRef: %x\n", sig->senderRef);
+ fprintf(output, " senderData: %x\n", sig->senderData);
+ fprintf(output, " errorCode: %x\n", sig->errorCode);
+ return true;
+}
+
+bool
+printCREATE_FRAGMENTATION_CONF(FILE * output, const Uint32 * theData,
+ Uint32 len, Uint16 receiverBlockNo) {
+ const CreateFragmentationConf * const sig =
+ (CreateFragmentationConf *)theData;
+ fprintf(output, " senderRef: %x\n", sig->senderRef);
+ fprintf(output, " senderData: %x\n", sig->senderData);
+ fprintf(output, " noOfReplicas: %x\n", sig->noOfReplicas);
+ fprintf(output, " noOfFragments: %x\n", sig->noOfFragments);
+ return true;
+}
+
diff --git a/ndb/src/common/debugger/signaldata/CreateIndx.cpp b/storage/ndb/src/common/debugger/signaldata/CreateIndx.cpp
index 8fcbb9279ed..8fcbb9279ed 100644
--- a/ndb/src/common/debugger/signaldata/CreateIndx.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/CreateIndx.cpp
diff --git a/ndb/src/common/debugger/signaldata/CreateTrig.cpp b/storage/ndb/src/common/debugger/signaldata/CreateTrig.cpp
index db5344cfbe7..db5344cfbe7 100644
--- a/ndb/src/common/debugger/signaldata/CreateTrig.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/CreateTrig.cpp
diff --git a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
index 43c129347c0..43c129347c0 100644
--- a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
diff --git a/ndb/src/common/debugger/signaldata/DihContinueB.cpp b/storage/ndb/src/common/debugger/signaldata/DihContinueB.cpp
index 9fece17315c..9fece17315c 100644
--- a/ndb/src/common/debugger/signaldata/DihContinueB.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/DihContinueB.cpp
diff --git a/ndb/src/common/debugger/signaldata/DihSwitchReplicaReq.cpp b/storage/ndb/src/common/debugger/signaldata/DihSwitchReplicaReq.cpp
index 2e4318f4033..2e4318f4033 100644
--- a/ndb/src/common/debugger/signaldata/DihSwitchReplicaReq.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/DihSwitchReplicaReq.cpp
diff --git a/ndb/src/common/debugger/signaldata/DisconnectRep.cpp b/storage/ndb/src/common/debugger/signaldata/DisconnectRep.cpp
index 3a73747a978..3a73747a978 100644
--- a/ndb/src/common/debugger/signaldata/DisconnectRep.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/DisconnectRep.cpp
diff --git a/ndb/src/common/debugger/signaldata/DropIndx.cpp b/storage/ndb/src/common/debugger/signaldata/DropIndx.cpp
index 0d59a981a18..0d59a981a18 100644
--- a/ndb/src/common/debugger/signaldata/DropIndx.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/DropIndx.cpp
diff --git a/ndb/src/common/debugger/signaldata/DropTab.cpp b/storage/ndb/src/common/debugger/signaldata/DropTab.cpp
index 83c95b0e344..83c95b0e344 100644
--- a/ndb/src/common/debugger/signaldata/DropTab.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/DropTab.cpp
diff --git a/ndb/src/common/debugger/signaldata/DropTrig.cpp b/storage/ndb/src/common/debugger/signaldata/DropTrig.cpp
index 54e8734439f..54e8734439f 100644
--- a/ndb/src/common/debugger/signaldata/DropTrig.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/DropTrig.cpp
diff --git a/ndb/src/common/debugger/signaldata/FailRep.cpp b/storage/ndb/src/common/debugger/signaldata/FailRep.cpp
index d70912fe8c7..d70912fe8c7 100644
--- a/ndb/src/common/debugger/signaldata/FailRep.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/FailRep.cpp
diff --git a/ndb/src/common/debugger/signaldata/FireTrigOrd.cpp b/storage/ndb/src/common/debugger/signaldata/FireTrigOrd.cpp
index d86aa2e06de..d86aa2e06de 100644
--- a/ndb/src/common/debugger/signaldata/FireTrigOrd.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/FireTrigOrd.cpp
diff --git a/ndb/src/common/debugger/signaldata/FsAppendReq.cpp b/storage/ndb/src/common/debugger/signaldata/FsAppendReq.cpp
index 6e443ffe5fc..6e443ffe5fc 100644
--- a/ndb/src/common/debugger/signaldata/FsAppendReq.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/FsAppendReq.cpp
diff --git a/ndb/src/common/debugger/signaldata/FsCloseReq.cpp b/storage/ndb/src/common/debugger/signaldata/FsCloseReq.cpp
index df9f3cc9fbc..df9f3cc9fbc 100644
--- a/ndb/src/common/debugger/signaldata/FsCloseReq.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/FsCloseReq.cpp
diff --git a/ndb/src/common/debugger/signaldata/FsConf.cpp b/storage/ndb/src/common/debugger/signaldata/FsConf.cpp
index f0ab57aadcf..f0ab57aadcf 100644
--- a/ndb/src/common/debugger/signaldata/FsConf.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/FsConf.cpp
diff --git a/ndb/src/common/debugger/signaldata/FsOpenReq.cpp b/storage/ndb/src/common/debugger/signaldata/FsOpenReq.cpp
index 31d351a8a84..31d351a8a84 100644
--- a/ndb/src/common/debugger/signaldata/FsOpenReq.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/FsOpenReq.cpp
diff --git a/ndb/src/common/debugger/signaldata/FsReadWriteReq.cpp b/storage/ndb/src/common/debugger/signaldata/FsReadWriteReq.cpp
index a9f240d3cb4..a9f240d3cb4 100644
--- a/ndb/src/common/debugger/signaldata/FsReadWriteReq.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/FsReadWriteReq.cpp
diff --git a/ndb/src/common/debugger/signaldata/FsRef.cpp b/storage/ndb/src/common/debugger/signaldata/FsRef.cpp
index ccf3d6da9c8..ccf3d6da9c8 100644
--- a/ndb/src/common/debugger/signaldata/FsRef.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/FsRef.cpp
diff --git a/ndb/src/common/debugger/signaldata/GCPSave.cpp b/storage/ndb/src/common/debugger/signaldata/GCPSave.cpp
index 7566f004bfd..7566f004bfd 100644
--- a/ndb/src/common/debugger/signaldata/GCPSave.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/GCPSave.cpp
diff --git a/ndb/src/common/debugger/signaldata/IndxAttrInfo.cpp b/storage/ndb/src/common/debugger/signaldata/IndxAttrInfo.cpp
index 2ef5feaada7..2ef5feaada7 100755
--- a/ndb/src/common/debugger/signaldata/IndxAttrInfo.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/IndxAttrInfo.cpp
diff --git a/ndb/src/common/debugger/signaldata/IndxKeyInfo.cpp b/storage/ndb/src/common/debugger/signaldata/IndxKeyInfo.cpp
index 6fe5567188d..6fe5567188d 100755
--- a/ndb/src/common/debugger/signaldata/IndxKeyInfo.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/IndxKeyInfo.cpp
diff --git a/ndb/src/common/debugger/signaldata/LCP.cpp b/storage/ndb/src/common/debugger/signaldata/LCP.cpp
index 6b4bb13e2cd..6b4bb13e2cd 100644
--- a/ndb/src/common/debugger/signaldata/LCP.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/LCP.cpp
diff --git a/ndb/src/common/debugger/signaldata/LqhFrag.cpp b/storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp
index 6d727959a67..6d727959a67 100644
--- a/ndb/src/common/debugger/signaldata/LqhFrag.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp
diff --git a/ndb/src/common/debugger/signaldata/LqhKey.cpp b/storage/ndb/src/common/debugger/signaldata/LqhKey.cpp
index 2796437fd8b..2796437fd8b 100644
--- a/ndb/src/common/debugger/signaldata/LqhKey.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/LqhKey.cpp
diff --git a/ndb/src/common/debugger/signaldata/LqhTrans.cpp b/storage/ndb/src/common/debugger/signaldata/LqhTrans.cpp
index 8282530cae6..8282530cae6 100644
--- a/ndb/src/common/debugger/signaldata/LqhTrans.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/LqhTrans.cpp
diff --git a/storage/ndb/src/common/debugger/signaldata/Makefile.am b/storage/ndb/src/common/debugger/signaldata/Makefile.am
new file mode 100644
index 00000000000..af9d343de44
--- /dev/null
+++ b/storage/ndb/src/common/debugger/signaldata/Makefile.am
@@ -0,0 +1,47 @@
+
+noinst_LTLIBRARIES = libsignaldataprint.la
+
+libsignaldataprint_la_SOURCES = \
+ TcKeyReq.cpp TcKeyConf.cpp TcKeyRef.cpp \
+ TcRollbackRep.cpp \
+ TupKey.cpp TupCommit.cpp LqhKey.cpp \
+ FsOpenReq.cpp FsCloseReq.cpp FsRef.cpp FsConf.cpp FsReadWriteReq.cpp\
+ SignalDataPrint.cpp SignalNames.cpp \
+ ContinueB.cpp DihContinueB.cpp NdbfsContinueB.cpp \
+ CloseComReqConf.cpp PackedSignal.cpp PrepFailReqRef.cpp \
+ GCPSave.cpp DictTabInfo.cpp \
+ AlterTable.cpp AlterTab.cpp \
+ CreateTrig.cpp AlterTrig.cpp DropTrig.cpp \
+ FireTrigOrd.cpp TrigAttrInfo.cpp \
+ CreateIndx.cpp AlterIndx.cpp DropIndx.cpp TcIndx.cpp \
+ IndxKeyInfo.cpp IndxAttrInfo.cpp \
+ FsAppendReq.cpp ScanTab.cpp \
+ BackupImpl.cpp BackupSignalData.cpp \
+ UtilSequence.cpp UtilPrepare.cpp UtilDelete.cpp UtilExecute.cpp \
+ LqhFrag.cpp DropTab.cpp PrepDropTab.cpp LCP.cpp MasterLCP.cpp \
+ CopyGCI.cpp SystemError.cpp StartRec.cpp NFCompleteRep.cpp \
+ FailRep.cpp DisconnectRep.cpp SignalDroppedRep.cpp \
+ SumaImpl.cpp NdbSttor.cpp CreateFragmentation.cpp \
+ UtilLock.cpp TuxMaint.cpp AccLock.cpp \
+ LqhTrans.cpp ReadNodesConf.cpp CntrStart.cpp \
+ ScanFrag.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libsignaldataprint.dsp
+
+libsignaldataprint.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libsignaldataprint_la_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/debugger/signaldata/MasterLCP.cpp b/storage/ndb/src/common/debugger/signaldata/MasterLCP.cpp
index 078b92f6f2e..078b92f6f2e 100644
--- a/ndb/src/common/debugger/signaldata/MasterLCP.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/MasterLCP.cpp
diff --git a/ndb/src/common/debugger/signaldata/NFCompleteRep.cpp b/storage/ndb/src/common/debugger/signaldata/NFCompleteRep.cpp
index f2d6f2f104a..f2d6f2f104a 100644
--- a/ndb/src/common/debugger/signaldata/NFCompleteRep.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/NFCompleteRep.cpp
diff --git a/ndb/src/common/debugger/signaldata/NdbSttor.cpp b/storage/ndb/src/common/debugger/signaldata/NdbSttor.cpp
index 9fd081313be..9fd081313be 100644
--- a/ndb/src/common/debugger/signaldata/NdbSttor.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/NdbSttor.cpp
diff --git a/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp b/storage/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp
index 9f55efae017..9f55efae017 100644
--- a/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp
diff --git a/ndb/src/common/debugger/signaldata/PackedSignal.cpp b/storage/ndb/src/common/debugger/signaldata/PackedSignal.cpp
index f0f7aee74e4..f0f7aee74e4 100644
--- a/ndb/src/common/debugger/signaldata/PackedSignal.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/PackedSignal.cpp
diff --git a/ndb/src/common/debugger/signaldata/PrepDropTab.cpp b/storage/ndb/src/common/debugger/signaldata/PrepDropTab.cpp
index 59001bcd6f6..59001bcd6f6 100644
--- a/ndb/src/common/debugger/signaldata/PrepDropTab.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/PrepDropTab.cpp
diff --git a/ndb/src/common/debugger/signaldata/PrepFailReqRef.cpp b/storage/ndb/src/common/debugger/signaldata/PrepFailReqRef.cpp
index 2e900de8f70..2e900de8f70 100644
--- a/ndb/src/common/debugger/signaldata/PrepFailReqRef.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/PrepFailReqRef.cpp
diff --git a/ndb/src/common/debugger/signaldata/ReadNodesConf.cpp b/storage/ndb/src/common/debugger/signaldata/ReadNodesConf.cpp
index 103f4a884f1..103f4a884f1 100644
--- a/ndb/src/common/debugger/signaldata/ReadNodesConf.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/ReadNodesConf.cpp
diff --git a/ndb/src/common/debugger/signaldata/ScanFrag.cpp b/storage/ndb/src/common/debugger/signaldata/ScanFrag.cpp
index 4d19a325637..4d19a325637 100644
--- a/ndb/src/common/debugger/signaldata/ScanFrag.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/ScanFrag.cpp
diff --git a/ndb/src/common/debugger/signaldata/ScanTab.cpp b/storage/ndb/src/common/debugger/signaldata/ScanTab.cpp
index d78beb4740a..d78beb4740a 100644
--- a/ndb/src/common/debugger/signaldata/ScanTab.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/ScanTab.cpp
diff --git a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp b/storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
index ab23c04bffa..ab23c04bffa 100644
--- a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
diff --git a/ndb/src/common/debugger/signaldata/SignalDroppedRep.cpp b/storage/ndb/src/common/debugger/signaldata/SignalDroppedRep.cpp
index be31b4edb22..be31b4edb22 100644
--- a/ndb/src/common/debugger/signaldata/SignalDroppedRep.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/SignalDroppedRep.cpp
diff --git a/ndb/src/common/debugger/signaldata/SignalNames.cpp b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
index 984d28819c0..984d28819c0 100644
--- a/ndb/src/common/debugger/signaldata/SignalNames.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
diff --git a/ndb/src/common/debugger/signaldata/StartRec.cpp b/storage/ndb/src/common/debugger/signaldata/StartRec.cpp
index 482e3cb0728..482e3cb0728 100644
--- a/ndb/src/common/debugger/signaldata/StartRec.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/StartRec.cpp
diff --git a/ndb/src/common/debugger/signaldata/SumaImpl.cpp b/storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp
index e50a3040fe3..e50a3040fe3 100644
--- a/ndb/src/common/debugger/signaldata/SumaImpl.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp
diff --git a/ndb/src/common/debugger/signaldata/SystemError.cpp b/storage/ndb/src/common/debugger/signaldata/SystemError.cpp
index 549c34710a0..549c34710a0 100644
--- a/ndb/src/common/debugger/signaldata/SystemError.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/SystemError.cpp
diff --git a/ndb/src/common/debugger/signaldata/TcIndx.cpp b/storage/ndb/src/common/debugger/signaldata/TcIndx.cpp
index b0578f5b646..b0578f5b646 100644
--- a/ndb/src/common/debugger/signaldata/TcIndx.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/TcIndx.cpp
diff --git a/ndb/src/common/debugger/signaldata/TcKeyConf.cpp b/storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp
index 652c2b8a557..652c2b8a557 100644
--- a/ndb/src/common/debugger/signaldata/TcKeyConf.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp
diff --git a/ndb/src/common/debugger/signaldata/TcKeyRef.cpp b/storage/ndb/src/common/debugger/signaldata/TcKeyRef.cpp
index 0dba9909caf..0dba9909caf 100644
--- a/ndb/src/common/debugger/signaldata/TcKeyRef.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/TcKeyRef.cpp
diff --git a/ndb/src/common/debugger/signaldata/TcKeyReq.cpp b/storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp
index 3918bd5db26..3918bd5db26 100644
--- a/ndb/src/common/debugger/signaldata/TcKeyReq.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp
diff --git a/ndb/src/common/debugger/signaldata/TcRollbackRep.cpp b/storage/ndb/src/common/debugger/signaldata/TcRollbackRep.cpp
index 961f0c3619d..961f0c3619d 100644
--- a/ndb/src/common/debugger/signaldata/TcRollbackRep.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/TcRollbackRep.cpp
diff --git a/ndb/src/common/debugger/signaldata/TrigAttrInfo.cpp b/storage/ndb/src/common/debugger/signaldata/TrigAttrInfo.cpp
index 7a8d176ec61..7a8d176ec61 100644
--- a/ndb/src/common/debugger/signaldata/TrigAttrInfo.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/TrigAttrInfo.cpp
diff --git a/ndb/src/common/debugger/signaldata/TupCommit.cpp b/storage/ndb/src/common/debugger/signaldata/TupCommit.cpp
index d0391b2a8e6..d0391b2a8e6 100644
--- a/ndb/src/common/debugger/signaldata/TupCommit.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/TupCommit.cpp
diff --git a/ndb/src/common/debugger/signaldata/TupKey.cpp b/storage/ndb/src/common/debugger/signaldata/TupKey.cpp
index 134b5fde8bc..134b5fde8bc 100644
--- a/ndb/src/common/debugger/signaldata/TupKey.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/TupKey.cpp
diff --git a/ndb/src/common/debugger/signaldata/TuxMaint.cpp b/storage/ndb/src/common/debugger/signaldata/TuxMaint.cpp
index ba6a299b77d..ba6a299b77d 100644
--- a/ndb/src/common/debugger/signaldata/TuxMaint.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/TuxMaint.cpp
diff --git a/ndb/src/common/debugger/signaldata/UtilDelete.cpp b/storage/ndb/src/common/debugger/signaldata/UtilDelete.cpp
index b6ba53559ac..b6ba53559ac 100644
--- a/ndb/src/common/debugger/signaldata/UtilDelete.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/UtilDelete.cpp
diff --git a/ndb/src/common/debugger/signaldata/UtilExecute.cpp b/storage/ndb/src/common/debugger/signaldata/UtilExecute.cpp
index 2c88fa174d4..2c88fa174d4 100644
--- a/ndb/src/common/debugger/signaldata/UtilExecute.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/UtilExecute.cpp
diff --git a/ndb/src/common/debugger/signaldata/UtilLock.cpp b/storage/ndb/src/common/debugger/signaldata/UtilLock.cpp
index 34e37c3e2d8..34e37c3e2d8 100644
--- a/ndb/src/common/debugger/signaldata/UtilLock.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/UtilLock.cpp
diff --git a/ndb/src/common/debugger/signaldata/UtilPrepare.cpp b/storage/ndb/src/common/debugger/signaldata/UtilPrepare.cpp
index adc2e299380..adc2e299380 100644
--- a/ndb/src/common/debugger/signaldata/UtilPrepare.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/UtilPrepare.cpp
diff --git a/ndb/src/common/debugger/signaldata/UtilSequence.cpp b/storage/ndb/src/common/debugger/signaldata/UtilSequence.cpp
index e91999d9abf..e91999d9abf 100644
--- a/ndb/src/common/debugger/signaldata/UtilSequence.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/UtilSequence.cpp
diff --git a/ndb/src/common/debugger/signaldata/print.awk b/storage/ndb/src/common/debugger/signaldata/print.awk
index 9730fb4a236..9730fb4a236 100644
--- a/ndb/src/common/debugger/signaldata/print.awk
+++ b/storage/ndb/src/common/debugger/signaldata/print.awk
diff --git a/ndb/src/common/logger/ConsoleLogHandler.cpp b/storage/ndb/src/common/logger/ConsoleLogHandler.cpp
index 94367d2fc45..94367d2fc45 100644
--- a/ndb/src/common/logger/ConsoleLogHandler.cpp
+++ b/storage/ndb/src/common/logger/ConsoleLogHandler.cpp
diff --git a/ndb/src/common/logger/FileLogHandler.cpp b/storage/ndb/src/common/logger/FileLogHandler.cpp
index 8678b999b6f..8678b999b6f 100644
--- a/ndb/src/common/logger/FileLogHandler.cpp
+++ b/storage/ndb/src/common/logger/FileLogHandler.cpp
diff --git a/ndb/src/common/logger/LogHandler.cpp b/storage/ndb/src/common/logger/LogHandler.cpp
index a9d4512112f..a9d4512112f 100644
--- a/ndb/src/common/logger/LogHandler.cpp
+++ b/storage/ndb/src/common/logger/LogHandler.cpp
diff --git a/ndb/src/common/logger/LogHandlerList.cpp b/storage/ndb/src/common/logger/LogHandlerList.cpp
index 62495d7566b..62495d7566b 100644
--- a/ndb/src/common/logger/LogHandlerList.cpp
+++ b/storage/ndb/src/common/logger/LogHandlerList.cpp
diff --git a/ndb/src/common/logger/LogHandlerList.hpp b/storage/ndb/src/common/logger/LogHandlerList.hpp
index 21344023560..21344023560 100644
--- a/ndb/src/common/logger/LogHandlerList.hpp
+++ b/storage/ndb/src/common/logger/LogHandlerList.hpp
diff --git a/ndb/src/common/logger/Logger.cpp b/storage/ndb/src/common/logger/Logger.cpp
index 4a48236053d..4a48236053d 100644
--- a/ndb/src/common/logger/Logger.cpp
+++ b/storage/ndb/src/common/logger/Logger.cpp
diff --git a/storage/ndb/src/common/logger/Makefile.am b/storage/ndb/src/common/logger/Makefile.am
new file mode 100644
index 00000000000..8a26d4258f1
--- /dev/null
+++ b/storage/ndb/src/common/logger/Makefile.am
@@ -0,0 +1,25 @@
+
+noinst_LTLIBRARIES = liblogger.la
+
+SOURCE_WIN = Logger.cpp LogHandlerList.cpp LogHandler.cpp \
+ ConsoleLogHandler.cpp FileLogHandler.cpp
+liblogger_la_SOURCES = $(SOURCE_WIN) SysLogHandler.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+windoze-dsp: liblogger.dsp
+
+liblogger.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(SOURCE_WIN)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/logger/SysLogHandler.cpp b/storage/ndb/src/common/logger/SysLogHandler.cpp
index 5b1b8d85ca7..5b1b8d85ca7 100644
--- a/ndb/src/common/logger/SysLogHandler.cpp
+++ b/storage/ndb/src/common/logger/SysLogHandler.cpp
diff --git a/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp b/storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp
index 7de9ee46479..7de9ee46479 100644
--- a/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp
+++ b/storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp
diff --git a/ndb/src/common/logger/listtest/LogHandlerListUnitTest.hpp b/storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.hpp
index e98a2722b8d..e98a2722b8d 100644
--- a/ndb/src/common/logger/listtest/LogHandlerListUnitTest.hpp
+++ b/storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.hpp
diff --git a/ndb/src/common/logger/listtest/Makefile b/storage/ndb/src/common/logger/listtest/Makefile
index 4688a5e5a2f..4688a5e5a2f 100644
--- a/ndb/src/common/logger/listtest/Makefile
+++ b/storage/ndb/src/common/logger/listtest/Makefile
diff --git a/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp b/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp
index 990d2e0eada..990d2e0eada 100644
--- a/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp
+++ b/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp
diff --git a/ndb/src/common/logger/loggertest/LoggerUnitTest.hpp b/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.hpp
index 79f560750d5..79f560750d5 100644
--- a/ndb/src/common/logger/loggertest/LoggerUnitTest.hpp
+++ b/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.hpp
diff --git a/ndb/src/common/logger/loggertest/Makefile b/storage/ndb/src/common/logger/loggertest/Makefile
index 0aef0ca2bce..0aef0ca2bce 100644
--- a/ndb/src/common/logger/loggertest/Makefile
+++ b/storage/ndb/src/common/logger/loggertest/Makefile
diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp
index b3d0221fedb..b3d0221fedb 100644
--- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp
+++ b/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp
diff --git a/ndb/src/common/mgmcommon/IPCConfig.cpp b/storage/ndb/src/common/mgmcommon/IPCConfig.cpp
index f935f8ffab4..f935f8ffab4 100644
--- a/ndb/src/common/mgmcommon/IPCConfig.cpp
+++ b/storage/ndb/src/common/mgmcommon/IPCConfig.cpp
diff --git a/storage/ndb/src/common/mgmcommon/Makefile.am b/storage/ndb/src/common/mgmcommon/Makefile.am
new file mode 100644
index 00000000000..0540ef012b8
--- /dev/null
+++ b/storage/ndb/src/common/mgmcommon/Makefile.am
@@ -0,0 +1,28 @@
+noinst_LTLIBRARIES = libmgmsrvcommon.la
+
+libmgmsrvcommon_la_SOURCES = \
+ ConfigRetriever.cpp \
+ IPCConfig.cpp
+
+INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/mgmapi -I$(top_srcdir)/storage/ndb/src/mgmsrv
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am
+include $(top_srcdir)/storage/ndb/config/type_mgmapiclient.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libmgmsrvcommon.dsp
+
+libmgmsrvcommon.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libmgmsrvcommon_la_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/mgmcommon/printConfig/Makefile b/storage/ndb/src/common/mgmcommon/printConfig/Makefile
index 77e8943e2c6..77e8943e2c6 100644
--- a/ndb/src/common/mgmcommon/printConfig/Makefile
+++ b/storage/ndb/src/common/mgmcommon/printConfig/Makefile
diff --git a/ndb/src/common/mgmcommon/printConfig/printConfig.cpp b/storage/ndb/src/common/mgmcommon/printConfig/printConfig.cpp
index 7cedbb451e2..7cedbb451e2 100644
--- a/ndb/src/common/mgmcommon/printConfig/printConfig.cpp
+++ b/storage/ndb/src/common/mgmcommon/printConfig/printConfig.cpp
diff --git a/storage/ndb/src/common/portlib/Makefile.am b/storage/ndb/src/common/portlib/Makefile.am
new file mode 100644
index 00000000000..cdefa46af50
--- /dev/null
+++ b/storage/ndb/src/common/portlib/Makefile.am
@@ -0,0 +1,43 @@
+noinst_HEADERS = gcc.cpp
+
+noinst_LTLIBRARIES = libportlib.la
+
+libportlib_la_SOURCES = \
+ NdbCondition.c NdbMutex.c NdbSleep.c NdbTick.c \
+ NdbEnv.c NdbThread.c NdbHost.c NdbTCP.cpp \
+ NdbDaemon.c NdbMem.c \
+ NdbConfig.c
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_util.mk.am
+
+EXTRA_PROGRAMS = memtest PortLibTest munmaptest
+
+PortLibTest_SOURCES = NdbPortLibTest.cpp
+munmaptest_SOURCES = munmaptest.cpp
+
+# Don't update the files from bitkeeper
+WIN_src = win32/NdbCondition.c \
+ win32/NdbDaemon.c \
+ win32/NdbEnv.c \
+ win32/NdbHost.c \
+ win32/NdbMem.c \
+ win32/NdbMutex.c \
+ win32/NdbSleep.c \
+ win32/NdbTCP.c \
+ win32/NdbThread.c \
+ win32/NdbTick.c
+
+windoze-dsp: libportlib.dsp
+
+libportlib.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(WIN_src)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/portlib/NdbCondition.c b/storage/ndb/src/common/portlib/NdbCondition.c
index df312c7cc24..df312c7cc24 100644
--- a/ndb/src/common/portlib/NdbCondition.c
+++ b/storage/ndb/src/common/portlib/NdbCondition.c
diff --git a/ndb/src/common/portlib/NdbConfig.c b/storage/ndb/src/common/portlib/NdbConfig.c
index b275143646f..b275143646f 100644
--- a/ndb/src/common/portlib/NdbConfig.c
+++ b/storage/ndb/src/common/portlib/NdbConfig.c
diff --git a/ndb/src/common/portlib/NdbDaemon.c b/storage/ndb/src/common/portlib/NdbDaemon.c
index 3f1c1998501..3f1c1998501 100644
--- a/ndb/src/common/portlib/NdbDaemon.c
+++ b/storage/ndb/src/common/portlib/NdbDaemon.c
diff --git a/ndb/src/common/portlib/NdbEnv.c b/storage/ndb/src/common/portlib/NdbEnv.c
index d294e0b52ca..d294e0b52ca 100644
--- a/ndb/src/common/portlib/NdbEnv.c
+++ b/storage/ndb/src/common/portlib/NdbEnv.c
diff --git a/ndb/src/common/portlib/NdbHost.c b/storage/ndb/src/common/portlib/NdbHost.c
index 4749bb39ea7..4749bb39ea7 100644
--- a/ndb/src/common/portlib/NdbHost.c
+++ b/storage/ndb/src/common/portlib/NdbHost.c
diff --git a/ndb/src/common/portlib/NdbMem.c b/storage/ndb/src/common/portlib/NdbMem.c
index f964f4d9937..f964f4d9937 100644
--- a/ndb/src/common/portlib/NdbMem.c
+++ b/storage/ndb/src/common/portlib/NdbMem.c
diff --git a/ndb/src/common/portlib/NdbMutex.c b/storage/ndb/src/common/portlib/NdbMutex.c
index 4a170d87e5c..4a170d87e5c 100644
--- a/ndb/src/common/portlib/NdbMutex.c
+++ b/storage/ndb/src/common/portlib/NdbMutex.c
diff --git a/ndb/src/common/portlib/NdbPortLibTest.cpp b/storage/ndb/src/common/portlib/NdbPortLibTest.cpp
index d7892411851..d7892411851 100644
--- a/ndb/src/common/portlib/NdbPortLibTest.cpp
+++ b/storage/ndb/src/common/portlib/NdbPortLibTest.cpp
diff --git a/ndb/src/common/portlib/NdbSleep.c b/storage/ndb/src/common/portlib/NdbSleep.c
index 44bafe98a37..44bafe98a37 100644
--- a/ndb/src/common/portlib/NdbSleep.c
+++ b/storage/ndb/src/common/portlib/NdbSleep.c
diff --git a/ndb/src/common/portlib/NdbTCP.cpp b/storage/ndb/src/common/portlib/NdbTCP.cpp
index c7b9d33c5f6..c7b9d33c5f6 100644
--- a/ndb/src/common/portlib/NdbTCP.cpp
+++ b/storage/ndb/src/common/portlib/NdbTCP.cpp
diff --git a/ndb/src/common/portlib/NdbThread.c b/storage/ndb/src/common/portlib/NdbThread.c
index 55ebc4c8111..55ebc4c8111 100644
--- a/ndb/src/common/portlib/NdbThread.c
+++ b/storage/ndb/src/common/portlib/NdbThread.c
diff --git a/ndb/src/common/portlib/NdbTick.c b/storage/ndb/src/common/portlib/NdbTick.c
index d8f0b6ec27a..d8f0b6ec27a 100644
--- a/ndb/src/common/portlib/NdbTick.c
+++ b/storage/ndb/src/common/portlib/NdbTick.c
diff --git a/ndb/src/common/portlib/gcc.cpp b/storage/ndb/src/common/portlib/gcc.cpp
index 4e49d787d3c..4e49d787d3c 100644
--- a/ndb/src/common/portlib/gcc.cpp
+++ b/storage/ndb/src/common/portlib/gcc.cpp
diff --git a/ndb/src/common/portlib/memtest.c b/storage/ndb/src/common/portlib/memtest.c
index 673f23fa803..673f23fa803 100644
--- a/ndb/src/common/portlib/memtest.c
+++ b/storage/ndb/src/common/portlib/memtest.c
diff --git a/ndb/src/common/portlib/mmslist.cpp b/storage/ndb/src/common/portlib/mmslist.cpp
index 05538785293..05538785293 100644
--- a/ndb/src/common/portlib/mmslist.cpp
+++ b/storage/ndb/src/common/portlib/mmslist.cpp
diff --git a/ndb/src/common/portlib/mmstest.cpp b/storage/ndb/src/common/portlib/mmstest.cpp
index 9cc7d810985..9cc7d810985 100644
--- a/ndb/src/common/portlib/mmstest.cpp
+++ b/storage/ndb/src/common/portlib/mmstest.cpp
diff --git a/ndb/src/common/portlib/munmaptest.cpp b/storage/ndb/src/common/portlib/munmaptest.cpp
index b1d84131810..b1d84131810 100644
--- a/ndb/src/common/portlib/munmaptest.cpp
+++ b/storage/ndb/src/common/portlib/munmaptest.cpp
diff --git a/ndb/src/common/portlib/old_dirs/memtest/Makefile b/storage/ndb/src/common/portlib/old_dirs/memtest/Makefile
index 716cdbdea82..716cdbdea82 100644
--- a/ndb/src/common/portlib/old_dirs/memtest/Makefile
+++ b/storage/ndb/src/common/portlib/old_dirs/memtest/Makefile
diff --git a/ndb/src/common/portlib/old_dirs/memtest/munmaptest/Makefile b/storage/ndb/src/common/portlib/old_dirs/memtest/munmaptest/Makefile
index ea8c5238d1c..ea8c5238d1c 100644
--- a/ndb/src/common/portlib/old_dirs/memtest/munmaptest/Makefile
+++ b/storage/ndb/src/common/portlib/old_dirs/memtest/munmaptest/Makefile
diff --git a/ndb/src/common/portlib/old_dirs/ose/Makefile b/storage/ndb/src/common/portlib/old_dirs/ose/Makefile
index 4ef93b7824a..4ef93b7824a 100644
--- a/ndb/src/common/portlib/old_dirs/ose/Makefile
+++ b/storage/ndb/src/common/portlib/old_dirs/ose/Makefile
diff --git a/ndb/src/common/portlib/old_dirs/ose/NdbCondition.c b/storage/ndb/src/common/portlib/old_dirs/ose/NdbCondition.c
index 73a2dbc5d66..73a2dbc5d66 100644
--- a/ndb/src/common/portlib/old_dirs/ose/NdbCondition.c
+++ b/storage/ndb/src/common/portlib/old_dirs/ose/NdbCondition.c
diff --git a/ndb/src/common/portlib/old_dirs/ose/NdbConditionOSE.h b/storage/ndb/src/common/portlib/old_dirs/ose/NdbConditionOSE.h
index bd0306261cc..bd0306261cc 100644
--- a/ndb/src/common/portlib/old_dirs/ose/NdbConditionOSE.h
+++ b/storage/ndb/src/common/portlib/old_dirs/ose/NdbConditionOSE.h
diff --git a/ndb/src/common/portlib/old_dirs/ose/NdbEnv.c b/storage/ndb/src/common/portlib/old_dirs/ose/NdbEnv.c
index e2ac4d879d2..e2ac4d879d2 100644
--- a/ndb/src/common/portlib/old_dirs/ose/NdbEnv.c
+++ b/storage/ndb/src/common/portlib/old_dirs/ose/NdbEnv.c
diff --git a/ndb/src/common/portlib/old_dirs/ose/NdbHost.c b/storage/ndb/src/common/portlib/old_dirs/ose/NdbHost.c
index f5e1e511c16..f5e1e511c16 100644
--- a/ndb/src/common/portlib/old_dirs/ose/NdbHost.c
+++ b/storage/ndb/src/common/portlib/old_dirs/ose/NdbHost.c
diff --git a/ndb/src/common/portlib/old_dirs/ose/NdbMem.c b/storage/ndb/src/common/portlib/old_dirs/ose/NdbMem.c
index 0e38024bbb4..0e38024bbb4 100644
--- a/ndb/src/common/portlib/old_dirs/ose/NdbMem.c
+++ b/storage/ndb/src/common/portlib/old_dirs/ose/NdbMem.c
diff --git a/ndb/src/common/portlib/old_dirs/ose/NdbMem_SoftOse.cpp b/storage/ndb/src/common/portlib/old_dirs/ose/NdbMem_SoftOse.cpp
index cad22c0474b..cad22c0474b 100644
--- a/ndb/src/common/portlib/old_dirs/ose/NdbMem_SoftOse.cpp
+++ b/storage/ndb/src/common/portlib/old_dirs/ose/NdbMem_SoftOse.cpp
diff --git a/ndb/src/common/portlib/old_dirs/ose/NdbMutex.c b/storage/ndb/src/common/portlib/old_dirs/ose/NdbMutex.c
index 253c0e412ff..253c0e412ff 100644
--- a/ndb/src/common/portlib/old_dirs/ose/NdbMutex.c
+++ b/storage/ndb/src/common/portlib/old_dirs/ose/NdbMutex.c
diff --git a/ndb/src/common/portlib/old_dirs/ose/NdbOut.cpp b/storage/ndb/src/common/portlib/old_dirs/ose/NdbOut.cpp
index eb81bc9d971..eb81bc9d971 100644
--- a/ndb/src/common/portlib/old_dirs/ose/NdbOut.cpp
+++ b/storage/ndb/src/common/portlib/old_dirs/ose/NdbOut.cpp
diff --git a/ndb/src/common/portlib/old_dirs/ose/NdbSleep.c b/storage/ndb/src/common/portlib/old_dirs/ose/NdbSleep.c
index 70fd83117ef..70fd83117ef 100644
--- a/ndb/src/common/portlib/old_dirs/ose/NdbSleep.c
+++ b/storage/ndb/src/common/portlib/old_dirs/ose/NdbSleep.c
diff --git a/ndb/src/common/portlib/old_dirs/ose/NdbTCP.c b/storage/ndb/src/common/portlib/old_dirs/ose/NdbTCP.c
index 9994697b3f8..9994697b3f8 100644
--- a/ndb/src/common/portlib/old_dirs/ose/NdbTCP.c
+++ b/storage/ndb/src/common/portlib/old_dirs/ose/NdbTCP.c
diff --git a/ndb/src/common/portlib/old_dirs/ose/NdbThread.c b/storage/ndb/src/common/portlib/old_dirs/ose/NdbThread.c
index e46903a5cce..e46903a5cce 100644
--- a/ndb/src/common/portlib/old_dirs/ose/NdbThread.c
+++ b/storage/ndb/src/common/portlib/old_dirs/ose/NdbThread.c
diff --git a/ndb/src/common/portlib/old_dirs/ose/NdbTick.c b/storage/ndb/src/common/portlib/old_dirs/ose/NdbTick.c
index c3deae2bec3..c3deae2bec3 100644
--- a/ndb/src/common/portlib/old_dirs/ose/NdbTick.c
+++ b/storage/ndb/src/common/portlib/old_dirs/ose/NdbTick.c
diff --git a/ndb/src/common/portlib/old_dirs/test/Makefile b/storage/ndb/src/common/portlib/old_dirs/test/Makefile
index 4edc98ede75..4edc98ede75 100644
--- a/ndb/src/common/portlib/old_dirs/test/Makefile
+++ b/storage/ndb/src/common/portlib/old_dirs/test/Makefile
diff --git a/ndb/src/common/portlib/old_dirs/win32/Makefile b/storage/ndb/src/common/portlib/old_dirs/win32/Makefile
index bb29ac5547e..bb29ac5547e 100644
--- a/ndb/src/common/portlib/old_dirs/win32/Makefile
+++ b/storage/ndb/src/common/portlib/old_dirs/win32/Makefile
diff --git a/ndb/src/common/portlib/old_dirs/win32/NdbCondition.c b/storage/ndb/src/common/portlib/old_dirs/win32/NdbCondition.c
index 77869b673de..77869b673de 100644
--- a/ndb/src/common/portlib/old_dirs/win32/NdbCondition.c
+++ b/storage/ndb/src/common/portlib/old_dirs/win32/NdbCondition.c
diff --git a/ndb/src/common/portlib/old_dirs/win32/NdbDaemon.c b/storage/ndb/src/common/portlib/old_dirs/win32/NdbDaemon.c
index 972fb1b88d8..972fb1b88d8 100644
--- a/ndb/src/common/portlib/old_dirs/win32/NdbDaemon.c
+++ b/storage/ndb/src/common/portlib/old_dirs/win32/NdbDaemon.c
diff --git a/ndb/src/common/portlib/old_dirs/win32/NdbEnv.c b/storage/ndb/src/common/portlib/old_dirs/win32/NdbEnv.c
index 0df703a5e97..0df703a5e97 100644
--- a/ndb/src/common/portlib/old_dirs/win32/NdbEnv.c
+++ b/storage/ndb/src/common/portlib/old_dirs/win32/NdbEnv.c
diff --git a/ndb/src/common/portlib/old_dirs/win32/NdbHost.c b/storage/ndb/src/common/portlib/old_dirs/win32/NdbHost.c
index f91dd1a531c..f91dd1a531c 100644
--- a/ndb/src/common/portlib/old_dirs/win32/NdbHost.c
+++ b/storage/ndb/src/common/portlib/old_dirs/win32/NdbHost.c
diff --git a/ndb/src/common/portlib/old_dirs/win32/NdbMem.c b/storage/ndb/src/common/portlib/old_dirs/win32/NdbMem.c
index ab7123b0a29..ab7123b0a29 100644
--- a/ndb/src/common/portlib/old_dirs/win32/NdbMem.c
+++ b/storage/ndb/src/common/portlib/old_dirs/win32/NdbMem.c
diff --git a/ndb/src/common/portlib/old_dirs/win32/NdbMutex.c b/storage/ndb/src/common/portlib/old_dirs/win32/NdbMutex.c
index e797024d5bb..e797024d5bb 100644
--- a/ndb/src/common/portlib/old_dirs/win32/NdbMutex.c
+++ b/storage/ndb/src/common/portlib/old_dirs/win32/NdbMutex.c
diff --git a/ndb/src/common/portlib/old_dirs/win32/NdbSleep.c b/storage/ndb/src/common/portlib/old_dirs/win32/NdbSleep.c
index ac0f44dd07f..ac0f44dd07f 100644
--- a/ndb/src/common/portlib/old_dirs/win32/NdbSleep.c
+++ b/storage/ndb/src/common/portlib/old_dirs/win32/NdbSleep.c
diff --git a/ndb/src/common/portlib/old_dirs/win32/NdbTCP.c b/storage/ndb/src/common/portlib/old_dirs/win32/NdbTCP.c
index 483a53bd606..483a53bd606 100644
--- a/ndb/src/common/portlib/old_dirs/win32/NdbTCP.c
+++ b/storage/ndb/src/common/portlib/old_dirs/win32/NdbTCP.c
diff --git a/ndb/src/common/portlib/old_dirs/win32/NdbThread.c b/storage/ndb/src/common/portlib/old_dirs/win32/NdbThread.c
index 1f052f034e8..1f052f034e8 100644
--- a/ndb/src/common/portlib/old_dirs/win32/NdbThread.c
+++ b/storage/ndb/src/common/portlib/old_dirs/win32/NdbThread.c
diff --git a/ndb/src/common/portlib/old_dirs/win32/NdbTick.c b/storage/ndb/src/common/portlib/old_dirs/win32/NdbTick.c
index e3a67d8437d..e3a67d8437d 100644
--- a/ndb/src/common/portlib/old_dirs/win32/NdbTick.c
+++ b/storage/ndb/src/common/portlib/old_dirs/win32/NdbTick.c
diff --git a/ndb/src/common/portlib/win32/NdbCondition.c b/storage/ndb/src/common/portlib/win32/NdbCondition.c
index 4046db1d60a..4046db1d60a 100644
--- a/ndb/src/common/portlib/win32/NdbCondition.c
+++ b/storage/ndb/src/common/portlib/win32/NdbCondition.c
diff --git a/ndb/src/common/portlib/win32/NdbDaemon.c b/storage/ndb/src/common/portlib/win32/NdbDaemon.c
index b96d4c20260..b96d4c20260 100644
--- a/ndb/src/common/portlib/win32/NdbDaemon.c
+++ b/storage/ndb/src/common/portlib/win32/NdbDaemon.c
diff --git a/ndb/src/common/portlib/win32/NdbEnv.c b/storage/ndb/src/common/portlib/win32/NdbEnv.c
index f42e685fe15..f42e685fe15 100644
--- a/ndb/src/common/portlib/win32/NdbEnv.c
+++ b/storage/ndb/src/common/portlib/win32/NdbEnv.c
diff --git a/ndb/src/common/portlib/win32/NdbHost.c b/storage/ndb/src/common/portlib/win32/NdbHost.c
index 7df96c45991..7df96c45991 100644
--- a/ndb/src/common/portlib/win32/NdbHost.c
+++ b/storage/ndb/src/common/portlib/win32/NdbHost.c
diff --git a/ndb/src/common/portlib/win32/NdbMem.c b/storage/ndb/src/common/portlib/win32/NdbMem.c
index 313ca9dff66..313ca9dff66 100644
--- a/ndb/src/common/portlib/win32/NdbMem.c
+++ b/storage/ndb/src/common/portlib/win32/NdbMem.c
diff --git a/ndb/src/common/portlib/win32/NdbMutex.c b/storage/ndb/src/common/portlib/win32/NdbMutex.c
index e6d1f081e9a..e6d1f081e9a 100644
--- a/ndb/src/common/portlib/win32/NdbMutex.c
+++ b/storage/ndb/src/common/portlib/win32/NdbMutex.c
diff --git a/ndb/src/common/portlib/win32/NdbSleep.c b/storage/ndb/src/common/portlib/win32/NdbSleep.c
index 8f5bdc49acd..8f5bdc49acd 100644
--- a/ndb/src/common/portlib/win32/NdbSleep.c
+++ b/storage/ndb/src/common/portlib/win32/NdbSleep.c
diff --git a/ndb/src/common/portlib/win32/NdbTCP.c b/storage/ndb/src/common/portlib/win32/NdbTCP.c
index b936cd2db6c..b936cd2db6c 100644
--- a/ndb/src/common/portlib/win32/NdbTCP.c
+++ b/storage/ndb/src/common/portlib/win32/NdbTCP.c
diff --git a/ndb/src/common/portlib/win32/NdbThread.c b/storage/ndb/src/common/portlib/win32/NdbThread.c
index 98db0d5c287..98db0d5c287 100644
--- a/ndb/src/common/portlib/win32/NdbThread.c
+++ b/storage/ndb/src/common/portlib/win32/NdbThread.c
diff --git a/ndb/src/common/portlib/win32/NdbTick.c b/storage/ndb/src/common/portlib/win32/NdbTick.c
index 4430cbf419b..4430cbf419b 100644
--- a/ndb/src/common/portlib/win32/NdbTick.c
+++ b/storage/ndb/src/common/portlib/win32/NdbTick.c
diff --git a/storage/ndb/src/common/transporter/Makefile.am b/storage/ndb/src/common/transporter/Makefile.am
new file mode 100644
index 00000000000..bf7b64af186
--- /dev/null
+++ b/storage/ndb/src/common/transporter/Makefile.am
@@ -0,0 +1,36 @@
+
+noinst_LTLIBRARIES = libtransporter.la
+
+libtransporter_la_SOURCES = \
+ Transporter.cpp \
+ SendBuffer.cpp \
+ TCP_Transporter.cpp \
+ TransporterRegistry.cpp \
+ Packer.cpp
+
+EXTRA_libtransporter_la_SOURCES = SHM_Transporter.cpp SHM_Transporter.unix.cpp SCI_Transporter.cpp
+
+libtransporter_la_LIBADD = @ndb_transporter_opt_objs@
+libtransporter_la_DEPENDENCIES = @ndb_transporter_opt_objs@
+
+INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/include/mgmapi -I$(top_srcdir)/storage/ndb/src/mgmapi -I$(top_srcdir)/storage/ndb/include/debugger -I$(top_srcdir)/storage/ndb/include/kernel -I$(top_srcdir)/storage/ndb/include/transporter @NDB_SCI_INCLUDES@
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_util.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libtransporter.dsp
+
+libtransporter.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libtransporter_la_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/transporter/OSE_Receiver.cpp b/storage/ndb/src/common/transporter/OSE_Receiver.cpp
index 63a33fc8f24..63a33fc8f24 100644
--- a/ndb/src/common/transporter/OSE_Receiver.cpp
+++ b/storage/ndb/src/common/transporter/OSE_Receiver.cpp
diff --git a/ndb/src/common/transporter/OSE_Receiver.hpp b/storage/ndb/src/common/transporter/OSE_Receiver.hpp
index 1812ab51065..1812ab51065 100644
--- a/ndb/src/common/transporter/OSE_Receiver.hpp
+++ b/storage/ndb/src/common/transporter/OSE_Receiver.hpp
diff --git a/ndb/src/common/transporter/OSE_Signals.hpp b/storage/ndb/src/common/transporter/OSE_Signals.hpp
index 3f6cc07b473..3f6cc07b473 100644
--- a/ndb/src/common/transporter/OSE_Signals.hpp
+++ b/storage/ndb/src/common/transporter/OSE_Signals.hpp
diff --git a/ndb/src/common/transporter/OSE_Transporter.cpp b/storage/ndb/src/common/transporter/OSE_Transporter.cpp
index ad67791fc0c..ad67791fc0c 100644
--- a/ndb/src/common/transporter/OSE_Transporter.cpp
+++ b/storage/ndb/src/common/transporter/OSE_Transporter.cpp
diff --git a/ndb/src/common/transporter/OSE_Transporter.hpp b/storage/ndb/src/common/transporter/OSE_Transporter.hpp
index 898352366ba..898352366ba 100644
--- a/ndb/src/common/transporter/OSE_Transporter.hpp
+++ b/storage/ndb/src/common/transporter/OSE_Transporter.hpp
diff --git a/ndb/src/common/transporter/Packer.cpp b/storage/ndb/src/common/transporter/Packer.cpp
index bcfac8417bb..bcfac8417bb 100644
--- a/ndb/src/common/transporter/Packer.cpp
+++ b/storage/ndb/src/common/transporter/Packer.cpp
diff --git a/ndb/src/common/transporter/Packer.hpp b/storage/ndb/src/common/transporter/Packer.hpp
index 5c191203201..5c191203201 100644
--- a/ndb/src/common/transporter/Packer.hpp
+++ b/storage/ndb/src/common/transporter/Packer.hpp
diff --git a/ndb/src/common/transporter/SCI_Transporter.cpp b/storage/ndb/src/common/transporter/SCI_Transporter.cpp
index 1fe276249e5..1fe276249e5 100644
--- a/ndb/src/common/transporter/SCI_Transporter.cpp
+++ b/storage/ndb/src/common/transporter/SCI_Transporter.cpp
diff --git a/ndb/src/common/transporter/SCI_Transporter.hpp b/storage/ndb/src/common/transporter/SCI_Transporter.hpp
index cb42e437118..cb42e437118 100644
--- a/ndb/src/common/transporter/SCI_Transporter.hpp
+++ b/storage/ndb/src/common/transporter/SCI_Transporter.hpp
diff --git a/ndb/src/common/transporter/SHM_Buffer.hpp b/storage/ndb/src/common/transporter/SHM_Buffer.hpp
index 27321a3191f..27321a3191f 100644
--- a/ndb/src/common/transporter/SHM_Buffer.hpp
+++ b/storage/ndb/src/common/transporter/SHM_Buffer.hpp
diff --git a/ndb/src/common/transporter/SHM_Transporter.cpp b/storage/ndb/src/common/transporter/SHM_Transporter.cpp
index a225988d37f..a225988d37f 100644
--- a/ndb/src/common/transporter/SHM_Transporter.cpp
+++ b/storage/ndb/src/common/transporter/SHM_Transporter.cpp
diff --git a/ndb/src/common/transporter/SHM_Transporter.hpp b/storage/ndb/src/common/transporter/SHM_Transporter.hpp
index e7a76225471..e7a76225471 100644
--- a/ndb/src/common/transporter/SHM_Transporter.hpp
+++ b/storage/ndb/src/common/transporter/SHM_Transporter.hpp
diff --git a/ndb/src/common/transporter/SHM_Transporter.unix.cpp b/storage/ndb/src/common/transporter/SHM_Transporter.unix.cpp
index 28882324fc0..28882324fc0 100644
--- a/ndb/src/common/transporter/SHM_Transporter.unix.cpp
+++ b/storage/ndb/src/common/transporter/SHM_Transporter.unix.cpp
diff --git a/ndb/src/common/transporter/SHM_Transporter.win32.cpp b/storage/ndb/src/common/transporter/SHM_Transporter.win32.cpp
index c289a85da0e..c289a85da0e 100644
--- a/ndb/src/common/transporter/SHM_Transporter.win32.cpp
+++ b/storage/ndb/src/common/transporter/SHM_Transporter.win32.cpp
diff --git a/ndb/src/common/transporter/SendBuffer.cpp b/storage/ndb/src/common/transporter/SendBuffer.cpp
index 8f69eb4bd40..8f69eb4bd40 100644
--- a/ndb/src/common/transporter/SendBuffer.cpp
+++ b/storage/ndb/src/common/transporter/SendBuffer.cpp
diff --git a/ndb/src/common/transporter/SendBuffer.hpp b/storage/ndb/src/common/transporter/SendBuffer.hpp
index 7ebeb6d890e..7ebeb6d890e 100644
--- a/ndb/src/common/transporter/SendBuffer.hpp
+++ b/storage/ndb/src/common/transporter/SendBuffer.hpp
diff --git a/ndb/src/common/transporter/TCP_Transporter.cpp b/storage/ndb/src/common/transporter/TCP_Transporter.cpp
index 5db12d3985c..5db12d3985c 100644
--- a/ndb/src/common/transporter/TCP_Transporter.cpp
+++ b/storage/ndb/src/common/transporter/TCP_Transporter.cpp
diff --git a/ndb/src/common/transporter/TCP_Transporter.hpp b/storage/ndb/src/common/transporter/TCP_Transporter.hpp
index df4149531b4..df4149531b4 100644
--- a/ndb/src/common/transporter/TCP_Transporter.hpp
+++ b/storage/ndb/src/common/transporter/TCP_Transporter.hpp
diff --git a/ndb/src/common/transporter/Transporter.cpp b/storage/ndb/src/common/transporter/Transporter.cpp
index 377fabe27ab..377fabe27ab 100644
--- a/ndb/src/common/transporter/Transporter.cpp
+++ b/storage/ndb/src/common/transporter/Transporter.cpp
diff --git a/ndb/src/common/transporter/Transporter.hpp b/storage/ndb/src/common/transporter/Transporter.hpp
index c9f4e9bda42..c9f4e9bda42 100644
--- a/ndb/src/common/transporter/Transporter.hpp
+++ b/storage/ndb/src/common/transporter/Transporter.hpp
diff --git a/ndb/src/common/transporter/TransporterInternalDefinitions.hpp b/storage/ndb/src/common/transporter/TransporterInternalDefinitions.hpp
index 624b495422f..624b495422f 100644
--- a/ndb/src/common/transporter/TransporterInternalDefinitions.hpp
+++ b/storage/ndb/src/common/transporter/TransporterInternalDefinitions.hpp
diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/storage/ndb/src/common/transporter/TransporterRegistry.cpp
index 60649665d4a..60649665d4a 100644
--- a/ndb/src/common/transporter/TransporterRegistry.cpp
+++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp
diff --git a/ndb/src/common/transporter/basictest/Makefile b/storage/ndb/src/common/transporter/basictest/Makefile
index d86af360408..d86af360408 100644
--- a/ndb/src/common/transporter/basictest/Makefile
+++ b/storage/ndb/src/common/transporter/basictest/Makefile
diff --git a/ndb/src/common/transporter/basictest/basicTransporterTest.cpp b/storage/ndb/src/common/transporter/basictest/basicTransporterTest.cpp
index c0a437c4907..c0a437c4907 100644
--- a/ndb/src/common/transporter/basictest/basicTransporterTest.cpp
+++ b/storage/ndb/src/common/transporter/basictest/basicTransporterTest.cpp
diff --git a/ndb/src/common/transporter/buddy.cpp b/storage/ndb/src/common/transporter/buddy.cpp
index dc25e2dc66c..dc25e2dc66c 100644
--- a/ndb/src/common/transporter/buddy.cpp
+++ b/storage/ndb/src/common/transporter/buddy.cpp
diff --git a/ndb/src/common/transporter/buddy.hpp b/storage/ndb/src/common/transporter/buddy.hpp
index f720e9e61a1..f720e9e61a1 100644
--- a/ndb/src/common/transporter/buddy.hpp
+++ b/storage/ndb/src/common/transporter/buddy.hpp
diff --git a/ndb/src/common/transporter/failoverSCI/Makefile b/storage/ndb/src/common/transporter/failoverSCI/Makefile
index 1e3d5f4a4b7..1e3d5f4a4b7 100644
--- a/ndb/src/common/transporter/failoverSCI/Makefile
+++ b/storage/ndb/src/common/transporter/failoverSCI/Makefile
diff --git a/ndb/src/common/transporter/failoverSCI/failoverSCI.cpp b/storage/ndb/src/common/transporter/failoverSCI/failoverSCI.cpp
index 803029ee565..803029ee565 100644
--- a/ndb/src/common/transporter/failoverSCI/failoverSCI.cpp
+++ b/storage/ndb/src/common/transporter/failoverSCI/failoverSCI.cpp
diff --git a/ndb/src/common/transporter/perftest/Makefile b/storage/ndb/src/common/transporter/perftest/Makefile
index 01869e1acf9..01869e1acf9 100644
--- a/ndb/src/common/transporter/perftest/Makefile
+++ b/storage/ndb/src/common/transporter/perftest/Makefile
diff --git a/ndb/src/common/transporter/perftest/perfTransporterTest.cpp b/storage/ndb/src/common/transporter/perftest/perfTransporterTest.cpp
index 71df9f12a4c..71df9f12a4c 100644
--- a/ndb/src/common/transporter/perftest/perfTransporterTest.cpp
+++ b/storage/ndb/src/common/transporter/perftest/perfTransporterTest.cpp
diff --git a/ndb/src/common/transporter/priotest/Makefile b/storage/ndb/src/common/transporter/priotest/Makefile
index 483fc0f1f07..483fc0f1f07 100644
--- a/ndb/src/common/transporter/priotest/Makefile
+++ b/storage/ndb/src/common/transporter/priotest/Makefile
diff --git a/ndb/src/common/transporter/priotest/prioOSE/Makefile b/storage/ndb/src/common/transporter/priotest/prioOSE/Makefile
index 4df66fa35e0..4df66fa35e0 100644
--- a/ndb/src/common/transporter/priotest/prioOSE/Makefile
+++ b/storage/ndb/src/common/transporter/priotest/prioOSE/Makefile
diff --git a/ndb/src/common/transporter/priotest/prioSCI/Makefile b/storage/ndb/src/common/transporter/priotest/prioSCI/Makefile
index 7d403539bf3..7d403539bf3 100644
--- a/ndb/src/common/transporter/priotest/prioSCI/Makefile
+++ b/storage/ndb/src/common/transporter/priotest/prioSCI/Makefile
diff --git a/ndb/src/common/transporter/priotest/prioSCI/prioSCI.cpp b/storage/ndb/src/common/transporter/priotest/prioSCI/prioSCI.cpp
index 6218b764e09..6218b764e09 100644
--- a/ndb/src/common/transporter/priotest/prioSCI/prioSCI.cpp
+++ b/storage/ndb/src/common/transporter/priotest/prioSCI/prioSCI.cpp
diff --git a/ndb/src/common/transporter/priotest/prioSHM/Makefile b/storage/ndb/src/common/transporter/priotest/prioSHM/Makefile
index a827c6e3f1e..a827c6e3f1e 100644
--- a/ndb/src/common/transporter/priotest/prioSHM/Makefile
+++ b/storage/ndb/src/common/transporter/priotest/prioSHM/Makefile
diff --git a/ndb/src/common/transporter/priotest/prioSHM/prioSHM.cpp b/storage/ndb/src/common/transporter/priotest/prioSHM/prioSHM.cpp
index 4c1701a91e4..4c1701a91e4 100644
--- a/ndb/src/common/transporter/priotest/prioSHM/prioSHM.cpp
+++ b/storage/ndb/src/common/transporter/priotest/prioSHM/prioSHM.cpp
diff --git a/ndb/src/common/transporter/priotest/prioTCP/Makefile b/storage/ndb/src/common/transporter/priotest/prioTCP/Makefile
index 92abf3e7424..92abf3e7424 100644
--- a/ndb/src/common/transporter/priotest/prioTCP/Makefile
+++ b/storage/ndb/src/common/transporter/priotest/prioTCP/Makefile
diff --git a/ndb/src/common/transporter/priotest/prioTCP/prioTCP.cpp b/storage/ndb/src/common/transporter/priotest/prioTCP/prioTCP.cpp
index f993dd05ac8..f993dd05ac8 100644
--- a/ndb/src/common/transporter/priotest/prioTCP/prioTCP.cpp
+++ b/storage/ndb/src/common/transporter/priotest/prioTCP/prioTCP.cpp
diff --git a/ndb/src/common/transporter/priotest/prioTransporterTest.cpp b/storage/ndb/src/common/transporter/priotest/prioTransporterTest.cpp
index 6c5623a49a6..6c5623a49a6 100644
--- a/ndb/src/common/transporter/priotest/prioTransporterTest.cpp
+++ b/storage/ndb/src/common/transporter/priotest/prioTransporterTest.cpp
diff --git a/ndb/src/common/transporter/priotest/prioTransporterTest.hpp b/storage/ndb/src/common/transporter/priotest/prioTransporterTest.hpp
index 787a9f46433..787a9f46433 100644
--- a/ndb/src/common/transporter/priotest/prioTransporterTest.hpp
+++ b/storage/ndb/src/common/transporter/priotest/prioTransporterTest.hpp
diff --git a/ndb/src/common/util/Base64.cpp b/storage/ndb/src/common/util/Base64.cpp
index 3db911f481f..3db911f481f 100644
--- a/ndb/src/common/util/Base64.cpp
+++ b/storage/ndb/src/common/util/Base64.cpp
diff --git a/ndb/src/common/util/BaseString.cpp b/storage/ndb/src/common/util/BaseString.cpp
index dbff44c377d..dbff44c377d 100644
--- a/ndb/src/common/util/BaseString.cpp
+++ b/storage/ndb/src/common/util/BaseString.cpp
diff --git a/ndb/src/common/util/Bitmask.cpp b/storage/ndb/src/common/util/Bitmask.cpp
index 0aa39a37204..0aa39a37204 100644
--- a/ndb/src/common/util/Bitmask.cpp
+++ b/storage/ndb/src/common/util/Bitmask.cpp
diff --git a/ndb/src/common/util/ConfigValues.cpp b/storage/ndb/src/common/util/ConfigValues.cpp
index 5c4b17c73ca..5c4b17c73ca 100644
--- a/ndb/src/common/util/ConfigValues.cpp
+++ b/storage/ndb/src/common/util/ConfigValues.cpp
diff --git a/ndb/src/common/util/File.cpp b/storage/ndb/src/common/util/File.cpp
index e514ad8e122..e514ad8e122 100644
--- a/ndb/src/common/util/File.cpp
+++ b/storage/ndb/src/common/util/File.cpp
diff --git a/ndb/src/common/util/InputStream.cpp b/storage/ndb/src/common/util/InputStream.cpp
index 410e9a70e9c..410e9a70e9c 100644
--- a/ndb/src/common/util/InputStream.cpp
+++ b/storage/ndb/src/common/util/InputStream.cpp
diff --git a/storage/ndb/src/common/util/Makefile.am b/storage/ndb/src/common/util/Makefile.am
new file mode 100644
index 00000000000..7ba3ef941c5
--- /dev/null
+++ b/storage/ndb/src/common/util/Makefile.am
@@ -0,0 +1,49 @@
+
+noinst_LTLIBRARIES = libgeneral.la
+
+libgeneral_la_SOURCES = \
+ File.cpp md5_hash.cpp Properties.cpp socket_io.cpp \
+ SimpleProperties.cpp Parser.cpp InputStream.cpp \
+ SocketServer.cpp SocketClient.cpp SocketAuthenticator.cpp\
+ OutputStream.cpp NdbOut.cpp BaseString.cpp Base64.cpp \
+ NdbSqlUtil.cpp new.cpp \
+ uucode.c random.c version.c \
+ strdup.c \
+ ConfigValues.cpp ndb_init.c basestring_vsnprintf.c \
+ Bitmask.cpp
+
+EXTRA_PROGRAMS = testBitmask
+testBitmask_SOURCES = testBitmask.cpp
+testBitmask_LDFLAGS = @ndb_bin_am_ldflags@ \
+ $(top_builddir)/storage/ndb/src/libndbclient.la \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a
+
+testBitmask.cpp : Bitmask.cpp
+ rm -f testBitmask.cpp
+ @LN_CP_F@ Bitmask.cpp testBitmask.cpp
+
+testBitmask.o: $(testBitmask_SOURCES)
+ $(CXXCOMPILE) -c $(INCLUDES) -D__TEST_BITMASK__ $<
+
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_util.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libgeneral.dsp
+
+libgeneral.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libgeneral_la_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/common/util/NdbErrHnd.cpp b/storage/ndb/src/common/util/NdbErrHnd.cpp
index 38a67f29853..38a67f29853 100644
--- a/ndb/src/common/util/NdbErrHnd.cpp
+++ b/storage/ndb/src/common/util/NdbErrHnd.cpp
diff --git a/ndb/src/common/util/NdbOut.cpp b/storage/ndb/src/common/util/NdbOut.cpp
index e20119a7987..e20119a7987 100644
--- a/ndb/src/common/util/NdbOut.cpp
+++ b/storage/ndb/src/common/util/NdbOut.cpp
diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/storage/ndb/src/common/util/NdbSqlUtil.cpp
index 09e150dbacf..09e150dbacf 100644
--- a/ndb/src/common/util/NdbSqlUtil.cpp
+++ b/storage/ndb/src/common/util/NdbSqlUtil.cpp
diff --git a/ndb/src/common/util/OutputStream.cpp b/storage/ndb/src/common/util/OutputStream.cpp
index a41eef649dd..a41eef649dd 100644
--- a/ndb/src/common/util/OutputStream.cpp
+++ b/storage/ndb/src/common/util/OutputStream.cpp
diff --git a/ndb/src/common/util/Parser.cpp b/storage/ndb/src/common/util/Parser.cpp
index d692aa18392..d692aa18392 100644
--- a/ndb/src/common/util/Parser.cpp
+++ b/storage/ndb/src/common/util/Parser.cpp
diff --git a/ndb/src/common/util/Properties.cpp b/storage/ndb/src/common/util/Properties.cpp
index 0edcda0e726..0edcda0e726 100644
--- a/ndb/src/common/util/Properties.cpp
+++ b/storage/ndb/src/common/util/Properties.cpp
diff --git a/ndb/src/common/util/SimpleProperties.cpp b/storage/ndb/src/common/util/SimpleProperties.cpp
index c25aaea491a..c25aaea491a 100644
--- a/ndb/src/common/util/SimpleProperties.cpp
+++ b/storage/ndb/src/common/util/SimpleProperties.cpp
diff --git a/ndb/src/common/util/SocketAuthenticator.cpp b/storage/ndb/src/common/util/SocketAuthenticator.cpp
index aed4db39231..aed4db39231 100644
--- a/ndb/src/common/util/SocketAuthenticator.cpp
+++ b/storage/ndb/src/common/util/SocketAuthenticator.cpp
diff --git a/ndb/src/common/util/SocketClient.cpp b/storage/ndb/src/common/util/SocketClient.cpp
index 821624eb5c4..821624eb5c4 100644
--- a/ndb/src/common/util/SocketClient.cpp
+++ b/storage/ndb/src/common/util/SocketClient.cpp
diff --git a/ndb/src/common/util/SocketServer.cpp b/storage/ndb/src/common/util/SocketServer.cpp
index 15dca2d96b1..15dca2d96b1 100644
--- a/ndb/src/common/util/SocketServer.cpp
+++ b/storage/ndb/src/common/util/SocketServer.cpp
diff --git a/ndb/src/common/util/basestring_vsnprintf.c b/storage/ndb/src/common/util/basestring_vsnprintf.c
index f5d01fb1532..f5d01fb1532 100644
--- a/ndb/src/common/util/basestring_vsnprintf.c
+++ b/storage/ndb/src/common/util/basestring_vsnprintf.c
diff --git a/ndb/src/common/util/filetest/FileUnitTest.cpp b/storage/ndb/src/common/util/filetest/FileUnitTest.cpp
index b6e7b7e8ec0..b6e7b7e8ec0 100644
--- a/ndb/src/common/util/filetest/FileUnitTest.cpp
+++ b/storage/ndb/src/common/util/filetest/FileUnitTest.cpp
diff --git a/ndb/src/common/util/filetest/FileUnitTest.hpp b/storage/ndb/src/common/util/filetest/FileUnitTest.hpp
index a589615e9b2..a589615e9b2 100644
--- a/ndb/src/common/util/filetest/FileUnitTest.hpp
+++ b/storage/ndb/src/common/util/filetest/FileUnitTest.hpp
diff --git a/ndb/src/common/util/filetest/Makefile b/storage/ndb/src/common/util/filetest/Makefile
index fe1842921f9..fe1842921f9 100644
--- a/ndb/src/common/util/filetest/Makefile
+++ b/storage/ndb/src/common/util/filetest/Makefile
diff --git a/ndb/src/common/util/getarg.cat3 b/storage/ndb/src/common/util/getarg.cat3
index 31685510537..31685510537 100644
--- a/ndb/src/common/util/getarg.cat3
+++ b/storage/ndb/src/common/util/getarg.cat3
diff --git a/ndb/src/common/util/md5_hash.cpp b/storage/ndb/src/common/util/md5_hash.cpp
index d4eedbc40fb..d4eedbc40fb 100644
--- a/ndb/src/common/util/md5_hash.cpp
+++ b/storage/ndb/src/common/util/md5_hash.cpp
diff --git a/ndb/src/common/util/ndb_init.c b/storage/ndb/src/common/util/ndb_init.c
index f3aa734d7f9..f3aa734d7f9 100644
--- a/ndb/src/common/util/ndb_init.c
+++ b/storage/ndb/src/common/util/ndb_init.c
diff --git a/ndb/src/common/util/new.cpp b/storage/ndb/src/common/util/new.cpp
index 643800f1582..643800f1582 100644
--- a/ndb/src/common/util/new.cpp
+++ b/storage/ndb/src/common/util/new.cpp
diff --git a/ndb/src/common/util/random.c b/storage/ndb/src/common/util/random.c
index 21235763793..21235763793 100644
--- a/ndb/src/common/util/random.c
+++ b/storage/ndb/src/common/util/random.c
diff --git a/ndb/src/common/util/socket_io.cpp b/storage/ndb/src/common/util/socket_io.cpp
index 83a546de773..83a546de773 100644
--- a/ndb/src/common/util/socket_io.cpp
+++ b/storage/ndb/src/common/util/socket_io.cpp
diff --git a/ndb/src/common/util/strdup.c b/storage/ndb/src/common/util/strdup.c
index d8f4d99bd28..d8f4d99bd28 100644
--- a/ndb/src/common/util/strdup.c
+++ b/storage/ndb/src/common/util/strdup.c
diff --git a/ndb/src/common/util/testConfigValues/Makefile b/storage/ndb/src/common/util/testConfigValues/Makefile
index 5b7400f5ee3..5b7400f5ee3 100644
--- a/ndb/src/common/util/testConfigValues/Makefile
+++ b/storage/ndb/src/common/util/testConfigValues/Makefile
diff --git a/ndb/src/common/util/testConfigValues/testConfigValues.cpp b/storage/ndb/src/common/util/testConfigValues/testConfigValues.cpp
index 362deb1ddad..362deb1ddad 100644
--- a/ndb/src/common/util/testConfigValues/testConfigValues.cpp
+++ b/storage/ndb/src/common/util/testConfigValues/testConfigValues.cpp
diff --git a/ndb/src/common/util/testProperties/Makefile b/storage/ndb/src/common/util/testProperties/Makefile
index 343c07a49e7..343c07a49e7 100644
--- a/ndb/src/common/util/testProperties/Makefile
+++ b/storage/ndb/src/common/util/testProperties/Makefile
diff --git a/ndb/src/common/util/testProperties/testProperties.cpp b/storage/ndb/src/common/util/testProperties/testProperties.cpp
index e445f7ca3e4..e445f7ca3e4 100644
--- a/ndb/src/common/util/testProperties/testProperties.cpp
+++ b/storage/ndb/src/common/util/testProperties/testProperties.cpp
diff --git a/ndb/src/common/util/testSimpleProperties/Makefile b/storage/ndb/src/common/util/testSimpleProperties/Makefile
index 89d33fa8dd8..89d33fa8dd8 100644
--- a/ndb/src/common/util/testSimpleProperties/Makefile
+++ b/storage/ndb/src/common/util/testSimpleProperties/Makefile
diff --git a/ndb/src/common/util/testSimpleProperties/sp_test.cpp b/storage/ndb/src/common/util/testSimpleProperties/sp_test.cpp
index d4052b64132..d4052b64132 100644
--- a/ndb/src/common/util/testSimpleProperties/sp_test.cpp
+++ b/storage/ndb/src/common/util/testSimpleProperties/sp_test.cpp
diff --git a/ndb/src/common/util/uucode.c b/storage/ndb/src/common/util/uucode.c
index da34d565153..da34d565153 100644
--- a/ndb/src/common/util/uucode.c
+++ b/storage/ndb/src/common/util/uucode.c
diff --git a/storage/ndb/src/common/util/version.c b/storage/ndb/src/common/util/version.c
new file mode 100644
index 00000000000..e87a342d7b1
--- /dev/null
+++ b/storage/ndb/src/common/util/version.c
@@ -0,0 +1,244 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+#include <ndb_version.h>
+#include <version.h>
+#include <basestring_vsnprintf.h>
+#include <NdbEnv.h>
+#include <NdbOut.hpp>
+
+Uint32 getMajor(Uint32 version) {
+ return (version >> 16) & 0xFF;
+}
+
+Uint32 getMinor(Uint32 version) {
+ return (version >> 8) & 0xFF;
+}
+
+Uint32 getBuild(Uint32 version) {
+ return (version >> 0) & 0xFF;
+}
+
+Uint32 makeVersion(Uint32 major, Uint32 minor, Uint32 build) {
+ return MAKE_VERSION(major, minor, build);
+
+}
+
+char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ];
+const char * getVersionString(Uint32 version, const char * status,
+ char *buf, unsigned sz)
+{
+ if (status && status[0] != 0)
+ basestring_snprintf(buf, sz,
+ "Version %d.%d.%d (%s)",
+ getMajor(version),
+ getMinor(version),
+ getBuild(version),
+ status);
+ else
+ basestring_snprintf(buf, sz,
+ "Version %d.%d.%d",
+ getMajor(version),
+ getMinor(version),
+ getBuild(version));
+ return buf;
+}
+
+typedef enum {
+ UG_Null,
+ UG_Range,
+ UG_Exact
+} UG_MatchType;
+
+struct NdbUpGradeCompatible {
+ Uint32 ownVersion;
+ Uint32 otherVersion;
+ UG_MatchType matchType;
+};
+
+/*#define TEST_VERSION*/
+
+#define HAVE_NDB_SETVERSION
+#ifdef HAVE_NDB_SETVERSION
+Uint32 ndbOwnVersionTesting = 0;
+void
+ndbSetOwnVersion() {
+ char buf[256];
+ if (NdbEnv_GetEnv("NDB_SETVERSION", buf, sizeof(buf))) {
+ Uint32 _v1,_v2,_v3;
+ if (sscanf(buf, "%u.%u.%u", &_v1, &_v2, &_v3) == 3) {
+ ndbOwnVersionTesting = MAKE_VERSION(_v1,_v2,_v3);
+ ndbout_c("Testing: Version set to 0x%x", ndbOwnVersionTesting);
+ }
+ }
+}
+#else
+void ndbSetOwnVersion() {}
+#endif
+
+#ifndef TEST_VERSION
+struct NdbUpGradeCompatible ndbCompatibleTable_full[] = {
+ { MAKE_VERSION(5,1,NDB_VERSION_BUILD), MAKE_VERSION(5,1,0), UG_Range},
+ { MAKE_VERSION(5,0,8), MAKE_VERSION(5,0,3), UG_Range},
+ { MAKE_VERSION(5,0,3), MAKE_VERSION(5,0,2), UG_Exact },
+ { MAKE_VERSION(4,1,12), MAKE_VERSION(4,1,10), UG_Range },
+ { MAKE_VERSION(4,1,10), MAKE_VERSION(4,1,9), UG_Exact },
+ { MAKE_VERSION(4,1,9), MAKE_VERSION(4,1,8), UG_Exact },
+ { MAKE_VERSION(3,5,2), MAKE_VERSION(3,5,1), UG_Exact },
+ { 0, 0, UG_Null }
+};
+
+struct NdbUpGradeCompatible ndbCompatibleTable_upgrade[] = {
+ { MAKE_VERSION(5,0,2), MAKE_VERSION(4,1,8), UG_Exact },
+ { MAKE_VERSION(3,5,4), MAKE_VERSION(3,5,3), UG_Exact },
+ { 0, 0, UG_Null }
+};
+
+#else /* testing purposes */
+
+struct NdbUpGradeCompatible ndbCompatibleTable_full[] = {
+ { MAKE_VERSION(4,1,5), MAKE_VERSION(4,1,0), UG_Range },
+ { MAKE_VERSION(3,6,9), MAKE_VERSION(3,6,1), UG_Range },
+ { MAKE_VERSION(3,6,2), MAKE_VERSION(3,6,1), UG_Range },
+ { MAKE_VERSION(3,5,7), MAKE_VERSION(3,5,0), UG_Range },
+ { MAKE_VERSION(3,5,1), MAKE_VERSION(3,5,0), UG_Range },
+ { NDB_VERSION_D , MAKE_VERSION(NDB_VERSION_MAJOR,NDB_VERSION_MINOR,2), UG_Range },
+ { 0, 0, UG_Null }
+};
+
+struct NdbUpGradeCompatible ndbCompatibleTable_upgrade[] = {
+ { MAKE_VERSION(4,1,5), MAKE_VERSION(3,6,9), UG_Exact },
+ { MAKE_VERSION(3,6,2), MAKE_VERSION(3,5,7), UG_Exact },
+ { MAKE_VERSION(3,5,1), NDB_VERSION_D , UG_Exact },
+ { 0, 0, UG_Null }
+};
+
+
+#endif
+
+void ndbPrintVersion()
+{
+ printf("Version: %u.%u.%u\n",
+ getMajor(ndbGetOwnVersion()),
+ getMinor(ndbGetOwnVersion()),
+ getBuild(ndbGetOwnVersion()));
+}
+
+Uint32
+ndbGetOwnVersion()
+{
+#ifdef HAVE_NDB_SETVERSION
+ if (ndbOwnVersionTesting == 0)
+ return NDB_VERSION_D;
+ else
+ return ndbOwnVersionTesting;
+#else
+ return NDB_VERSION_D;
+#endif
+}
+
+int
+ndbSearchUpgradeCompatibleTable(Uint32 ownVersion, Uint32 otherVersion,
+ struct NdbUpGradeCompatible table[])
+{
+ int i;
+ for (i = 0; table[i].ownVersion != 0 && table[i].otherVersion != 0; i++) {
+ if (table[i].ownVersion == ownVersion ||
+ table[i].ownVersion == (Uint32) ~0) {
+ switch (table[i].matchType) {
+ case UG_Range:
+ if (otherVersion >= table[i].otherVersion){
+ return 1;
+ }
+ break;
+ case UG_Exact:
+ if (otherVersion == table[i].otherVersion){
+ return 1;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+int
+ndbCompatible(Uint32 ownVersion, Uint32 otherVersion, struct NdbUpGradeCompatible table[])
+{
+ if (otherVersion >= ownVersion) {
+ return 1;
+ }
+ return ndbSearchUpgradeCompatibleTable(ownVersion, otherVersion, table);
+}
+
+int
+ndbCompatible_full(Uint32 ownVersion, Uint32 otherVersion)
+{
+ return ndbCompatible(ownVersion, otherVersion, ndbCompatibleTable_full);
+}
+
+int
+ndbCompatible_upgrade(Uint32 ownVersion, Uint32 otherVersion)
+{
+ if (ndbCompatible_full(ownVersion, otherVersion))
+ return 1;
+ return ndbCompatible(ownVersion, otherVersion, ndbCompatibleTable_upgrade);
+}
+
+int
+ndbCompatible_mgmt_ndb(Uint32 ownVersion, Uint32 otherVersion)
+{
+ return ndbCompatible_upgrade(ownVersion, otherVersion);
+}
+
+int
+ndbCompatible_mgmt_api(Uint32 ownVersion, Uint32 otherVersion)
+{
+ return ndbCompatible_upgrade(ownVersion, otherVersion);
+}
+
+int
+ndbCompatible_ndb_mgmt(Uint32 ownVersion, Uint32 otherVersion)
+{
+ return ndbCompatible_full(ownVersion, otherVersion);
+}
+
+int
+ndbCompatible_api_mgmt(Uint32 ownVersion, Uint32 otherVersion)
+{
+ return ndbCompatible_full(ownVersion, otherVersion);
+}
+
+int
+ndbCompatible_api_ndb(Uint32 ownVersion, Uint32 otherVersion)
+{
+ return ndbCompatible_full(ownVersion, otherVersion);
+}
+
+int
+ndbCompatible_ndb_api(Uint32 ownVersion, Uint32 otherVersion)
+{
+ return ndbCompatible_upgrade(ownVersion, otherVersion);
+}
+
+int
+ndbCompatible_ndb_ndb(Uint32 ownVersion, Uint32 otherVersion)
+{
+ return ndbCompatible_upgrade(ownVersion, otherVersion);
+}
diff --git a/ndb/src/cw/Makefile.am b/storage/ndb/src/cw/Makefile.am
index 7348fc9eab6..7348fc9eab6 100644
--- a/ndb/src/cw/Makefile.am
+++ b/storage/ndb/src/cw/Makefile.am
diff --git a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.cpp b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.cpp
index 59ee3e90451..59ee3e90451 100644
--- a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.cpp
+++ b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.cpp
diff --git a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsp b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsp
index 91007b0a47e..91007b0a47e 100644
--- a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsp
+++ b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsp
diff --git a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsw b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsw
index 1f163a31662..1f163a31662 100644
--- a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsw
+++ b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsw
diff --git a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.h b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.h
index cf7670948a7..cf7670948a7 100644
--- a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.h
+++ b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.h
diff --git a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.ico b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.ico
index 386883523bc..386883523bc 100644
--- a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.ico
+++ b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.rc b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.rc
index 41d75b2b282..41d75b2b282 100644
--- a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.rc
+++ b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.rc
diff --git a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.sln b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.sln
index 86b574d851d..86b574d851d 100644
--- a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.sln
+++ b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.sln
diff --git a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.suo b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.suo
index e7d178f04c3..e7d178f04c3 100644
--- a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.suo
+++ b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.suo
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj
index 56f9f3a8511..56f9f3a8511 100644
--- a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj
+++ b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj
diff --git a/ndb/src/cw/cpcc-win32/C++/Closed.ICO b/storage/ndb/src/cw/cpcc-win32/C++/Closed.ICO
index 044042b42fb..044042b42fb 100644
--- a/ndb/src/cw/cpcc-win32/C++/Closed.ICO
+++ b/storage/ndb/src/cw/cpcc-win32/C++/Closed.ICO
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/C++/NdbControls.cpp b/storage/ndb/src/cw/cpcc-win32/C++/NdbControls.cpp
index 6bbc9a9859b..6bbc9a9859b 100644
--- a/ndb/src/cw/cpcc-win32/C++/NdbControls.cpp
+++ b/storage/ndb/src/cw/cpcc-win32/C++/NdbControls.cpp
diff --git a/ndb/src/cw/cpcc-win32/C++/Open.ICO b/storage/ndb/src/cw/cpcc-win32/C++/Open.ICO
index ab7b05d9df7..ab7b05d9df7 100644
--- a/ndb/src/cw/cpcc-win32/C++/Open.ICO
+++ b/storage/ndb/src/cw/cpcc-win32/C++/Open.ICO
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/C++/StdAfx.cpp b/storage/ndb/src/cw/cpcc-win32/C++/StdAfx.cpp
index 8fcdb4ce158..8fcdb4ce158 100644
--- a/ndb/src/cw/cpcc-win32/C++/StdAfx.cpp
+++ b/storage/ndb/src/cw/cpcc-win32/C++/StdAfx.cpp
diff --git a/ndb/src/cw/cpcc-win32/C++/StdAfx.h b/storage/ndb/src/cw/cpcc-win32/C++/StdAfx.h
index 370d04fb466..370d04fb466 100644
--- a/ndb/src/cw/cpcc-win32/C++/StdAfx.h
+++ b/storage/ndb/src/cw/cpcc-win32/C++/StdAfx.h
diff --git a/ndb/src/cw/cpcc-win32/C++/TreeView.cpp b/storage/ndb/src/cw/cpcc-win32/C++/TreeView.cpp
index db5c62f14bb..db5c62f14bb 100644
--- a/ndb/src/cw/cpcc-win32/C++/TreeView.cpp
+++ b/storage/ndb/src/cw/cpcc-win32/C++/TreeView.cpp
diff --git a/ndb/src/cw/cpcc-win32/C++/TreeView.h b/storage/ndb/src/cw/cpcc-win32/C++/TreeView.h
index 595f9bd6cdc..595f9bd6cdc 100644
--- a/ndb/src/cw/cpcc-win32/C++/TreeView.h
+++ b/storage/ndb/src/cw/cpcc-win32/C++/TreeView.h
diff --git a/ndb/src/cw/cpcc-win32/C++/bmp00001.bmp b/storage/ndb/src/cw/cpcc-win32/C++/bmp00001.bmp
index e50af403eda..e50af403eda 100644
--- a/ndb/src/cw/cpcc-win32/C++/bmp00001.bmp
+++ b/storage/ndb/src/cw/cpcc-win32/C++/bmp00001.bmp
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/C++/resource.h b/storage/ndb/src/cw/cpcc-win32/C++/resource.h
index 0bec552edf6..0bec552edf6 100644
--- a/ndb/src/cw/cpcc-win32/C++/resource.h
+++ b/storage/ndb/src/cw/cpcc-win32/C++/resource.h
diff --git a/ndb/src/cw/cpcc-win32/C++/small.ico b/storage/ndb/src/cw/cpcc-win32/C++/small.ico
index 8f94d9aa828..8f94d9aa828 100644
--- a/ndb/src/cw/cpcc-win32/C++/small.ico
+++ b/storage/ndb/src/cw/cpcc-win32/C++/small.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/C++/toolbar.bmp b/storage/ndb/src/cw/cpcc-win32/C++/toolbar.bmp
index a1059352c66..a1059352c66 100644
--- a/ndb/src/cw/cpcc-win32/C++/toolbar.bmp
+++ b/storage/ndb/src/cw/cpcc-win32/C++/toolbar.bmp
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/csharp/App.ico b/storage/ndb/src/cw/cpcc-win32/csharp/App.ico
index 3a5525fd794..3a5525fd794 100644
--- a/ndb/src/cw/cpcc-win32/csharp/App.ico
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/App.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/csharp/CPC_Form.cs b/storage/ndb/src/cw/cpcc-win32/csharp/CPC_Form.cs
index ea1798c8c67..ea1798c8c67 100644
--- a/ndb/src/cw/cpcc-win32/csharp/CPC_Form.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/CPC_Form.cs
diff --git a/ndb/src/cw/cpcc-win32/csharp/Computer.cs b/storage/ndb/src/cw/cpcc-win32/csharp/Computer.cs
index 9763fac5622..9763fac5622 100644
--- a/ndb/src/cw/cpcc-win32/csharp/Computer.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/Computer.cs
diff --git a/ndb/src/cw/cpcc-win32/csharp/ComputerAddDialog.cs b/storage/ndb/src/cw/cpcc-win32/csharp/ComputerAddDialog.cs
index c01e41f3e60..c01e41f3e60 100644
--- a/ndb/src/cw/cpcc-win32/csharp/ComputerAddDialog.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/ComputerAddDialog.cs
diff --git a/ndb/src/cw/cpcc-win32/csharp/ComputerRemoveDialog.cs b/storage/ndb/src/cw/cpcc-win32/csharp/ComputerRemoveDialog.cs
index 5b4d1b56df7..5b4d1b56df7 100644
--- a/ndb/src/cw/cpcc-win32/csharp/ComputerRemoveDialog.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/ComputerRemoveDialog.cs
diff --git a/ndb/src/cw/cpcc-win32/csharp/DATABASE.ICO b/storage/ndb/src/cw/cpcc-win32/csharp/DATABASE.ICO
index 9689aa88361..9689aa88361 100644
--- a/ndb/src/cw/cpcc-win32/csharp/DATABASE.ICO
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/DATABASE.ICO
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/csharp/Database.cs b/storage/ndb/src/cw/cpcc-win32/csharp/Database.cs
index 39b8c160159..39b8c160159 100644
--- a/ndb/src/cw/cpcc-win32/csharp/Database.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/Database.cs
diff --git a/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj b/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj
index 6384eff8329..6384eff8329 100644
--- a/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj
diff --git a/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj.user b/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj.user
index 68937906d93..68937906d93 100644
--- a/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj.user
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj.user
diff --git a/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.ncb b/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.ncb
index ed3460476b0..ed3460476b0 100644
--- a/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.ncb
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.ncb
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.sln b/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.sln
index ef18b5e94ce..ef18b5e94ce 100644
--- a/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.sln
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.sln
diff --git a/ndb/src/cw/cpcc-win32/csharp/PanelWizard.cs b/storage/ndb/src/cw/cpcc-win32/csharp/PanelWizard.cs
index f492aa64c60..f492aa64c60 100644
--- a/ndb/src/cw/cpcc-win32/csharp/PanelWizard.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/PanelWizard.cs
diff --git a/ndb/src/cw/cpcc-win32/csharp/Process.cs b/storage/ndb/src/cw/cpcc-win32/csharp/Process.cs
index c1ee1b2fe9e..c1ee1b2fe9e 100644
--- a/ndb/src/cw/cpcc-win32/csharp/Process.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/Process.cs
diff --git a/ndb/src/cw/cpcc-win32/csharp/ProcessDefineDialog.cs b/storage/ndb/src/cw/cpcc-win32/csharp/ProcessDefineDialog.cs
index 581b8383e7c..581b8383e7c 100644
--- a/ndb/src/cw/cpcc-win32/csharp/ProcessDefineDialog.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/ProcessDefineDialog.cs
diff --git a/ndb/src/cw/cpcc-win32/csharp/fileaccess/FileMgmt.cs b/storage/ndb/src/cw/cpcc-win32/csharp/fileaccess/FileMgmt.cs
index b3a2361bcb0..b3a2361bcb0 100644
--- a/ndb/src/cw/cpcc-win32/csharp/fileaccess/FileMgmt.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/fileaccess/FileMgmt.cs
diff --git a/ndb/src/cw/cpcc-win32/csharp/simpleparser/SimpleCPCParser.cs b/storage/ndb/src/cw/cpcc-win32/csharp/simpleparser/SimpleCPCParser.cs
index b8ff2844af9..b8ff2844af9 100644
--- a/ndb/src/cw/cpcc-win32/csharp/simpleparser/SimpleCPCParser.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/simpleparser/SimpleCPCParser.cs
diff --git a/ndb/src/cw/cpcc-win32/csharp/socketcomm/SocketComm.cs b/storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/SocketComm.cs
index 2cef5d34f17..2cef5d34f17 100644
--- a/ndb/src/cw/cpcc-win32/csharp/socketcomm/SocketComm.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/SocketComm.cs
diff --git a/ndb/src/cw/cpcc-win32/csharp/socketcomm/myTcpClient.cs b/storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/myTcpClient.cs
index 9c0d82a0b27..9c0d82a0b27 100644
--- a/ndb/src/cw/cpcc-win32/csharp/socketcomm/myTcpClient.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/myTcpClient.cs
diff --git a/ndb/src/cw/cpcc-win32/csharp/startDatabaseDlg.cs b/storage/ndb/src/cw/cpcc-win32/csharp/startDatabaseDlg.cs
index cecfcaeb0f3..cecfcaeb0f3 100644
--- a/ndb/src/cw/cpcc-win32/csharp/startDatabaseDlg.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/startDatabaseDlg.cs
diff --git a/ndb/src/cw/cpcc-win32/csharp/telnetclient/telnetClient.cs b/storage/ndb/src/cw/cpcc-win32/csharp/telnetclient/telnetClient.cs
index a7966947e1f..a7966947e1f 100644
--- a/ndb/src/cw/cpcc-win32/csharp/telnetclient/telnetClient.cs
+++ b/storage/ndb/src/cw/cpcc-win32/csharp/telnetclient/telnetClient.cs
diff --git a/ndb/src/cw/cpcc-win32/vb6/Computer.cls b/storage/ndb/src/cw/cpcc-win32/vb6/Computer.cls
index 5b42dfeadb6..5b42dfeadb6 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Computer.cls
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Computer.cls
diff --git a/ndb/src/cw/cpcc-win32/vb6/Database.cls b/storage/ndb/src/cw/cpcc-win32/vb6/Database.cls
index dfb1195d910..dfb1195d910 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Database.cls
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Database.cls
diff --git a/ndb/src/cw/cpcc-win32/vb6/Icon 110.ico b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 110.ico
index 34b85992394..34b85992394 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Icon 110.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 110.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/Icon 231.ico b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 231.ico
index fe30ff5d1e6..fe30ff5d1e6 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Icon 231.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 231.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/Icon 237.ico b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 237.ico
index af0a1294f9e..af0a1294f9e 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Icon 237.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 237.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/Icon 241.ico b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 241.ico
index e8caf6e9a73..e8caf6e9a73 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Icon 241.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 241.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/Icon 242.ico b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 242.ico
index 2deff5472bc..2deff5472bc 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Icon 242.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 242.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/Icon 270.ico b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 270.ico
index 9cab239de23..9cab239de23 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Icon 270.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 270.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/Icon 271.ico b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 271.ico
index f05c95f74fe..f05c95f74fe 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Icon 271.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 271.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/Icon 273.ico b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 273.ico
index 800606eda0c..800606eda0c 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Icon 273.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 273.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/Icon 31.ico b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 31.ico
index a2404977771..a2404977771 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Icon 31.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 31.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/Icon 337.ico b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 337.ico
index 9dadb12cfbe..9dadb12cfbe 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Icon 337.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 337.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/Icon 338.ico b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 338.ico
index a13c80c81b4..a13c80c81b4 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Icon 338.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 338.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/Icon 339.ico b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 339.ico
index 5eb4c06815d..5eb4c06815d 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Icon 339.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Icon 339.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/MSSCCPRJ.SCC b/storage/ndb/src/cw/cpcc-win32/vb6/MSSCCPRJ.SCC
index 3100640f8bd..3100640f8bd 100644
--- a/ndb/src/cw/cpcc-win32/vb6/MSSCCPRJ.SCC
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/MSSCCPRJ.SCC
diff --git a/ndb/src/cw/cpcc-win32/vb6/Module1.bas b/storage/ndb/src/cw/cpcc-win32/vb6/Module1.bas
index ae8ed444a41..ae8ed444a41 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Module1.bas
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Module1.bas
diff --git a/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbp b/storage/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbp
index dc8f3780a74..dc8f3780a74 100644
--- a/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbp
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbp
diff --git a/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbw b/storage/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbw
index 825abbc923a..825abbc923a 100644
--- a/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbw
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbw
diff --git a/ndb/src/cw/cpcc-win32/vb6/Process.cls b/storage/ndb/src/cw/cpcc-win32/vb6/Process.cls
index fcb4c2cbb2c..fcb4c2cbb2c 100644
--- a/ndb/src/cw/cpcc-win32/vb6/Process.cls
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/Process.cls
diff --git a/ndb/src/cw/cpcc-win32/vb6/closed folder.ico b/storage/ndb/src/cw/cpcc-win32/vb6/closed folder.ico
index fe82350d376..fe82350d376 100644
--- a/ndb/src/cw/cpcc-win32/vb6/closed folder.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/closed folder.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/computer.ico b/storage/ndb/src/cw/cpcc-win32/vb6/computer.ico
index d73302d1cd5..d73302d1cd5 100644
--- a/ndb/src/cw/cpcc-win32/vb6/computer.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/computer.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/frmAbout.frm b/storage/ndb/src/cw/cpcc-win32/vb6/frmAbout.frm
index b842d20de21..b842d20de21 100644
--- a/ndb/src/cw/cpcc-win32/vb6/frmAbout.frm
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/frmAbout.frm
diff --git a/ndb/src/cw/cpcc-win32/vb6/frmLogin.frm b/storage/ndb/src/cw/cpcc-win32/vb6/frmLogin.frm
index d4d663c93c2..d4d663c93c2 100644
--- a/ndb/src/cw/cpcc-win32/vb6/frmLogin.frm
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/frmLogin.frm
diff --git a/ndb/src/cw/cpcc-win32/vb6/frmMain.frm b/storage/ndb/src/cw/cpcc-win32/vb6/frmMain.frm
index a4bf5b58941..a4bf5b58941 100644
--- a/ndb/src/cw/cpcc-win32/vb6/frmMain.frm
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/frmMain.frm
diff --git a/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frm b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frm
index eae5802493c..eae5802493c 100644
--- a/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frm
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frm
diff --git a/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frx b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frx
index 593f4708db8..593f4708db8 100644
--- a/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frx
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frx
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase.frx b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase.frx
index b20c2b651ae..b20c2b651ae 100644
--- a/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase.frx
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase.frx
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase1.frm b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase1.frm
index 3fa1fd4c4e8..3fa1fd4c4e8 100644
--- a/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase1.frm
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase1.frm
diff --git a/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.frm b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.frm
index 49806a695ea..49806a695ea 100644
--- a/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.frm
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.frm
diff --git a/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.log b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.log
index 808b21866e5..808b21866e5 100644
--- a/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.log
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.log
diff --git a/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase3.frm b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase3.frm
index ba050a58a09..ba050a58a09 100644
--- a/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase3.frm
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase3.frm
diff --git a/ndb/src/cw/cpcc-win32/vb6/frmOptions.frm b/storage/ndb/src/cw/cpcc-win32/vb6/frmOptions.frm
index e526a35b3ec..e526a35b3ec 100644
--- a/ndb/src/cw/cpcc-win32/vb6/frmOptions.frm
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/frmOptions.frm
diff --git a/ndb/src/cw/cpcc-win32/vb6/frmSplash.frx b/storage/ndb/src/cw/cpcc-win32/vb6/frmSplash.frx
index fee0c5c59de..fee0c5c59de 100644
--- a/ndb/src/cw/cpcc-win32/vb6/frmSplash.frx
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/frmSplash.frx
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/networking.ico b/storage/ndb/src/cw/cpcc-win32/vb6/networking.ico
index 6bbf8022fc6..6bbf8022fc6 100644
--- a/ndb/src/cw/cpcc-win32/vb6/networking.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/networking.ico
Binary files differ
diff --git a/ndb/src/cw/cpcc-win32/vb6/open folder.ico b/storage/ndb/src/cw/cpcc-win32/vb6/open folder.ico
index 7bb32cc83d3..7bb32cc83d3 100644
--- a/ndb/src/cw/cpcc-win32/vb6/open folder.ico
+++ b/storage/ndb/src/cw/cpcc-win32/vb6/open folder.ico
Binary files differ
diff --git a/ndb/src/cw/cpcd/APIService.cpp b/storage/ndb/src/cw/cpcd/APIService.cpp
index e7a2092c15d..e7a2092c15d 100644
--- a/ndb/src/cw/cpcd/APIService.cpp
+++ b/storage/ndb/src/cw/cpcd/APIService.cpp
diff --git a/ndb/src/cw/cpcd/APIService.hpp b/storage/ndb/src/cw/cpcd/APIService.hpp
index 3586d64187e..3586d64187e 100644
--- a/ndb/src/cw/cpcd/APIService.hpp
+++ b/storage/ndb/src/cw/cpcd/APIService.hpp
diff --git a/ndb/src/cw/cpcd/CPCD.cpp b/storage/ndb/src/cw/cpcd/CPCD.cpp
index 69a7b840528..69a7b840528 100644
--- a/ndb/src/cw/cpcd/CPCD.cpp
+++ b/storage/ndb/src/cw/cpcd/CPCD.cpp
diff --git a/ndb/src/cw/cpcd/CPCD.hpp b/storage/ndb/src/cw/cpcd/CPCD.hpp
index aecc43150c4..aecc43150c4 100644
--- a/ndb/src/cw/cpcd/CPCD.hpp
+++ b/storage/ndb/src/cw/cpcd/CPCD.hpp
diff --git a/storage/ndb/src/cw/cpcd/Makefile.am b/storage/ndb/src/cw/cpcd/Makefile.am
new file mode 100644
index 00000000000..e0b22be6b8d
--- /dev/null
+++ b/storage/ndb/src/cw/cpcd/Makefile.am
@@ -0,0 +1,20 @@
+
+ndbbin_PROGRAMS = ndb_cpcd
+
+ndb_cpcd_SOURCES = main.cpp CPCD.cpp Process.cpp APIService.cpp Monitor.cpp common.cpp
+
+LDADD_LOC = \
+ $(top_builddir)/storage/ndb/src/libndbclient.la \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_util.mk.am
+
+ndb_cpcd_LDFLAGS = @ndb_bin_am_ldflags@
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp:
diff --git a/ndb/src/cw/cpcd/Monitor.cpp b/storage/ndb/src/cw/cpcd/Monitor.cpp
index 141de926d4d..141de926d4d 100644
--- a/ndb/src/cw/cpcd/Monitor.cpp
+++ b/storage/ndb/src/cw/cpcd/Monitor.cpp
diff --git a/ndb/src/cw/cpcd/Process.cpp b/storage/ndb/src/cw/cpcd/Process.cpp
index 431c96e3320..431c96e3320 100644
--- a/ndb/src/cw/cpcd/Process.cpp
+++ b/storage/ndb/src/cw/cpcd/Process.cpp
diff --git a/ndb/src/cw/cpcd/common.cpp b/storage/ndb/src/cw/cpcd/common.cpp
index 53c0e4d5a64..53c0e4d5a64 100644
--- a/ndb/src/cw/cpcd/common.cpp
+++ b/storage/ndb/src/cw/cpcd/common.cpp
diff --git a/ndb/src/cw/cpcd/common.hpp b/storage/ndb/src/cw/cpcd/common.hpp
index 4f5f702762f..4f5f702762f 100644
--- a/ndb/src/cw/cpcd/common.hpp
+++ b/storage/ndb/src/cw/cpcd/common.hpp
diff --git a/ndb/src/cw/cpcd/main.cpp b/storage/ndb/src/cw/cpcd/main.cpp
index c320f07ef04..c320f07ef04 100644
--- a/ndb/src/cw/cpcd/main.cpp
+++ b/storage/ndb/src/cw/cpcd/main.cpp
diff --git a/ndb/src/cw/test/socketclient/Makefile b/storage/ndb/src/cw/test/socketclient/Makefile
index 04f11f031e5..04f11f031e5 100644
--- a/ndb/src/cw/test/socketclient/Makefile
+++ b/storage/ndb/src/cw/test/socketclient/Makefile
diff --git a/ndb/src/cw/test/socketclient/socketClientTest.cpp b/storage/ndb/src/cw/test/socketclient/socketClientTest.cpp
index 423c196aa43..423c196aa43 100644
--- a/ndb/src/cw/test/socketclient/socketClientTest.cpp
+++ b/storage/ndb/src/cw/test/socketclient/socketClientTest.cpp
diff --git a/ndb/src/cw/util/ClientInterface.cpp b/storage/ndb/src/cw/util/ClientInterface.cpp
index 627b622f1dd..627b622f1dd 100644
--- a/ndb/src/cw/util/ClientInterface.cpp
+++ b/storage/ndb/src/cw/util/ClientInterface.cpp
diff --git a/ndb/src/cw/util/ClientInterface.hpp b/storage/ndb/src/cw/util/ClientInterface.hpp
index 66ecfe05197..66ecfe05197 100644
--- a/ndb/src/cw/util/ClientInterface.hpp
+++ b/storage/ndb/src/cw/util/ClientInterface.hpp
diff --git a/ndb/src/cw/util/Makefile b/storage/ndb/src/cw/util/Makefile
index f5ab16721be..f5ab16721be 100644
--- a/ndb/src/cw/util/Makefile
+++ b/storage/ndb/src/cw/util/Makefile
diff --git a/ndb/src/cw/util/SocketRegistry.cpp b/storage/ndb/src/cw/util/SocketRegistry.cpp
index 1dbb402f7c9..1dbb402f7c9 100644
--- a/ndb/src/cw/util/SocketRegistry.cpp
+++ b/storage/ndb/src/cw/util/SocketRegistry.cpp
diff --git a/ndb/src/cw/util/SocketRegistry.hpp b/storage/ndb/src/cw/util/SocketRegistry.hpp
index 2b079156967..2b079156967 100644
--- a/ndb/src/cw/util/SocketRegistry.hpp
+++ b/storage/ndb/src/cw/util/SocketRegistry.hpp
diff --git a/ndb/src/cw/util/SocketService.cpp b/storage/ndb/src/cw/util/SocketService.cpp
index b993ec8c2c1..b993ec8c2c1 100644
--- a/ndb/src/cw/util/SocketService.cpp
+++ b/storage/ndb/src/cw/util/SocketService.cpp
diff --git a/ndb/src/cw/util/SocketService.hpp b/storage/ndb/src/cw/util/SocketService.hpp
index 7a0c3a2fd91..7a0c3a2fd91 100644
--- a/ndb/src/cw/util/SocketService.hpp
+++ b/storage/ndb/src/cw/util/SocketService.hpp
diff --git a/ndb/src/external/WIN32.x86/sci/lib/SISCI_LIBRARY_WIN32.TXT b/storage/ndb/src/external/WIN32.x86/sci/lib/SISCI_LIBRARY_WIN32.TXT
index 97fe959bb2c..97fe959bb2c 100644
--- a/ndb/src/external/WIN32.x86/sci/lib/SISCI_LIBRARY_WIN32.TXT
+++ b/storage/ndb/src/external/WIN32.x86/sci/lib/SISCI_LIBRARY_WIN32.TXT
diff --git a/ndb/src/external/WIN32.x86/sci/lib/scilib.lib b/storage/ndb/src/external/WIN32.x86/sci/lib/scilib.lib
index 572169a2016..572169a2016 100644
--- a/ndb/src/external/WIN32.x86/sci/lib/scilib.lib
+++ b/storage/ndb/src/external/WIN32.x86/sci/lib/scilib.lib
Binary files differ
diff --git a/ndb/src/external/WIN32.x86/sci/lib/scilib_md.lib b/storage/ndb/src/external/WIN32.x86/sci/lib/scilib_md.lib
index f18cba61336..f18cba61336 100644
--- a/ndb/src/external/WIN32.x86/sci/lib/scilib_md.lib
+++ b/storage/ndb/src/external/WIN32.x86/sci/lib/scilib_md.lib
Binary files differ
diff --git a/ndb/src/external/WIN32.x86/sci/lib/scilib_mt.lib b/storage/ndb/src/external/WIN32.x86/sci/lib/scilib_mt.lib
index 3e9982468ea..3e9982468ea 100644
--- a/ndb/src/external/WIN32.x86/sci/lib/scilib_mt.lib
+++ b/storage/ndb/src/external/WIN32.x86/sci/lib/scilib_mt.lib
Binary files differ
diff --git a/ndb/src/external/WIN32.x86/sci/lib/sisci_api.lib b/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api.lib
index 3fbff6ec809..3fbff6ec809 100644
--- a/ndb/src/external/WIN32.x86/sci/lib/sisci_api.lib
+++ b/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api.lib
Binary files differ
diff --git a/ndb/src/external/WIN32.x86/sci/lib/sisci_api_md.lib b/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_md.lib
index 1d8d42d1d35..1d8d42d1d35 100644
--- a/ndb/src/external/WIN32.x86/sci/lib/sisci_api_md.lib
+++ b/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_md.lib
Binary files differ
diff --git a/ndb/src/external/WIN32.x86/sci/lib/sisci_api_mt.lib b/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_mt.lib
index 017fad7ba31..017fad7ba31 100644
--- a/ndb/src/external/WIN32.x86/sci/lib/sisci_api_mt.lib
+++ b/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_mt.lib
Binary files differ
diff --git a/storage/ndb/src/kernel/Makefile.am b/storage/ndb/src/kernel/Makefile.am
new file mode 100644
index 00000000000..6e3664f0997
--- /dev/null
+++ b/storage/ndb/src/kernel/Makefile.am
@@ -0,0 +1,75 @@
+SUBDIRS = error blocks vm
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+
+ndbbin_PROGRAMS = ndbd
+
+ndbd_SOURCES = main.cpp SimBlockList.cpp
+
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+INCLUDES += \
+ -Iblocks/cmvmi \
+ -Iblocks/dbacc \
+ -Iblocks/dbdict \
+ -Iblocks/dbdih \
+ -Iblocks/dblqh \
+ -Iblocks/dbtc \
+ -Iblocks/dbtup \
+ -Iblocks/ndbfs \
+ -Iblocks/ndbcntr \
+ -Iblocks/qmgr \
+ -Iblocks/trix \
+ -Iblocks/backup \
+ -Iblocks/dbutil \
+ -Iblocks/suma \
+ -Iblocks/grep \
+ -Iblocks/dbtux
+
+LDADD += \
+ blocks/cmvmi/libcmvmi.a \
+ blocks/dbacc/libdbacc.a \
+ blocks/dbdict/libdbdict.a \
+ blocks/dbdih/libdbdih.a \
+ blocks/dblqh/libdblqh.a \
+ blocks/dbtc/libdbtc.a \
+ blocks/dbtup/libdbtup.a \
+ blocks/ndbfs/libndbfs.a \
+ blocks/ndbcntr/libndbcntr.a \
+ blocks/qmgr/libqmgr.a \
+ blocks/trix/libtrix.a \
+ blocks/backup/libbackup.a \
+ blocks/dbutil/libdbutil.a \
+ blocks/suma/libsuma.a \
+ blocks/grep/libgrep.a \
+ blocks/dbtux/libdbtux.a \
+ vm/libkernel.a \
+ error/liberror.a \
+ $(top_builddir)/storage/ndb/src/common/transporter/libtransporter.la \
+ $(top_builddir)/storage/ndb/src/common/debugger/libtrace.la \
+ $(top_builddir)/storage/ndb/src/common/debugger/signaldata/libsignaldataprint.la \
+ $(top_builddir)/storage/ndb/src/common/logger/liblogger.la \
+ $(top_builddir)/storage/ndb/src/common/mgmcommon/libmgmsrvcommon.la \
+ $(top_builddir)/storage/ndb/src/mgmapi/libmgmapi.la \
+ $(top_builddir)/storage/ndb/src/common/portlib/libportlib.la \
+ $(top_builddir)/storage/ndb/src/common/util/libgeneral.la \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: ndbd.dsp
+
+storage/ndbd.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(ndbbin_PROGRAMS)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(ndbd_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
diff --git a/ndb/src/kernel/SimBlockList.cpp b/storage/ndb/src/kernel/SimBlockList.cpp
index bf3958cf137..bf3958cf137 100644
--- a/ndb/src/kernel/SimBlockList.cpp
+++ b/storage/ndb/src/kernel/SimBlockList.cpp
diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
index fedddb58c0d..fedddb58c0d 100644
--- a/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
diff --git a/ndb/src/kernel/blocks/Makefile.am b/storage/ndb/src/kernel/blocks/Makefile.am
index 7ee90e6239f..7ee90e6239f 100644
--- a/ndb/src/kernel/blocks/Makefile.am
+++ b/storage/ndb/src/kernel/blocks/Makefile.am
diff --git a/ndb/src/kernel/blocks/NodeRestart.new.txt b/storage/ndb/src/kernel/blocks/NodeRestart.new.txt
index 00ab8f0c208..00ab8f0c208 100644
--- a/ndb/src/kernel/blocks/NodeRestart.new.txt
+++ b/storage/ndb/src/kernel/blocks/NodeRestart.new.txt
diff --git a/ndb/src/kernel/blocks/NodeRestart.txt b/storage/ndb/src/kernel/blocks/NodeRestart.txt
index e9f277bb39e..e9f277bb39e 100644
--- a/ndb/src/kernel/blocks/NodeRestart.txt
+++ b/storage/ndb/src/kernel/blocks/NodeRestart.txt
diff --git a/ndb/src/kernel/blocks/Start.txt b/storage/ndb/src/kernel/blocks/Start.txt
index 3e805ebab55..3e805ebab55 100644
--- a/ndb/src/kernel/blocks/Start.txt
+++ b/storage/ndb/src/kernel/blocks/Start.txt
diff --git a/ndb/src/kernel/blocks/SystemRestart.new.txt b/storage/ndb/src/kernel/blocks/SystemRestart.new.txt
index 3738de28df8..3738de28df8 100644
--- a/ndb/src/kernel/blocks/SystemRestart.new.txt
+++ b/storage/ndb/src/kernel/blocks/SystemRestart.new.txt
diff --git a/ndb/src/kernel/blocks/SystemRestart.txt b/storage/ndb/src/kernel/blocks/SystemRestart.txt
index 235dfb968fa..235dfb968fa 100644
--- a/ndb/src/kernel/blocks/SystemRestart.txt
+++ b/storage/ndb/src/kernel/blocks/SystemRestart.txt
diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp
index 327fcc33aff..327fcc33aff 100644
--- a/ndb/src/kernel/blocks/backup/Backup.cpp
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp
diff --git a/ndb/src/kernel/blocks/backup/Backup.hpp b/storage/ndb/src/kernel/blocks/backup/Backup.hpp
index 7bcea5655b4..7bcea5655b4 100644
--- a/ndb/src/kernel/blocks/backup/Backup.hpp
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.hpp
diff --git a/ndb/src/kernel/blocks/backup/Backup.txt b/storage/ndb/src/kernel/blocks/backup/Backup.txt
index 73942c6ebdc..73942c6ebdc 100644
--- a/ndb/src/kernel/blocks/backup/Backup.txt
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.txt
diff --git a/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp
index 65dd2ad9053..65dd2ad9053 100644
--- a/ndb/src/kernel/blocks/backup/BackupFormat.hpp
+++ b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp
diff --git a/ndb/src/kernel/blocks/backup/BackupInit.cpp b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp
index eae72f43db5..eae72f43db5 100644
--- a/ndb/src/kernel/blocks/backup/BackupInit.cpp
+++ b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp
diff --git a/ndb/src/kernel/blocks/backup/FsBuffer.hpp b/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp
index 2f3c7daae43..2f3c7daae43 100644
--- a/ndb/src/kernel/blocks/backup/FsBuffer.hpp
+++ b/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp
diff --git a/storage/ndb/src/kernel/blocks/backup/Makefile.am b/storage/ndb/src/kernel/blocks/backup/Makefile.am
new file mode 100644
index 00000000000..8d1df032514
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/backup/Makefile.am
@@ -0,0 +1,24 @@
+
+noinst_LIBRARIES = libbackup.a
+
+libbackup_a_SOURCES = Backup.cpp BackupInit.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libbackup.dsp
+
+libbackup.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libbackup_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/backup/read.cpp b/storage/ndb/src/kernel/blocks/backup/read.cpp
index 89cc08ee9de..89cc08ee9de 100644
--- a/ndb/src/kernel/blocks/backup/read.cpp
+++ b/storage/ndb/src/kernel/blocks/backup/read.cpp
diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index c313abc28eb..c313abc28eb 100644
--- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
index 1c91f564749..1c91f564749 100644
--- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Makefile.am b/storage/ndb/src/kernel/blocks/cmvmi/Makefile.am
new file mode 100644
index 00000000000..c9a105dc613
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Makefile.am
@@ -0,0 +1,24 @@
+
+noinst_LIBRARIES = libcmvmi.a
+
+libcmvmi_a_SOURCES = Cmvmi.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libcmvmi.dsp
+
+libcmvmi.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libcmvmi_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
new file mode 100644
index 00000000000..aa1056e5570
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
@@ -0,0 +1,1470 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBACC_H
+#define DBACC_H
+
+
+
+#include <pc.hpp>
+#include <SimulatedBlock.hpp>
+
+// primary key is stored in TUP
+#include <Dbtup.hpp>
+
+#ifdef DBACC_C
+// Debug Macros
+#define dbgWord32(ptr, ind, val)
+
+/*
+#define dbgWord32(ptr, ind, val) \
+if(debug_jan){ \
+tmp_val = val; \
+switch(ind){ \
+case 1: strcpy(tmp_string, "ZPOS_PAGE_TYPE "); \
+break; \
+case 2: strcpy(tmp_string, "ZPOS_NO_ELEM_IN_PAGE"); \
+break; \
+case 3: strcpy(tmp_string, "ZPOS_CHECKSUM "); \
+break; \
+case 4: strcpy(tmp_string, "ZPOS_OVERFLOWREC "); \
+break; \
+case 5: strcpy(tmp_string, "ZPOS_FREE_AREA_IN_PAGE"); \
+break; \
+case 6: strcpy(tmp_string, "ZPOS_LAST_INDEX "); \
+break; \
+case 7: strcpy(tmp_string, "ZPOS_INSERT_INDEX "); \
+break; \
+case 8: strcpy(tmp_string, "ZPOS_ARRAY_POS "); \
+break; \
+case 9: strcpy(tmp_string, "ZPOS_NEXT_FREE_INDEX"); \
+break; \
+case 10: strcpy(tmp_string, "ZPOS_NEXT_PAGE "); \
+break; \
+case 11: strcpy(tmp_string, "ZPOS_PREV_PAGE "); \
+break; \
+default: sprintf(tmp_string, "%-20d", ind);\
+} \
+ndbout << "Ptr: " << ptr.p->word32 << " \tIndex: " << tmp_string << " \tValue: " << tmp_val << " \tLINE: " << __LINE__ << endl; \
+}\
+*/
+
+#define dbgUndoword(ptr, ind, val)
+
+// Constants
+/** ------------------------------------------------------------------------
+ * THESE ARE CONSTANTS THAT ARE USED FOR DEFINING THE SIZE OF BUFFERS, THE
+ * SIZE OF PAGE HEADERS, THE NUMBER OF BUFFERS IN A PAGE AND A NUMBER OF
+ * OTHER CONSTANTS WHICH ARE CHANGED WHEN THE BUFFER SIZE IS CHANGED.
+ * ----------------------------------------------------------------------- */
+#define ZHEAD_SIZE 32
+#define ZCON_HEAD_SIZE 2
+#define ZBUF_SIZE 28
+#define ZEMPTYLIST 72
+#define ZUP_LIMIT 14
+#define ZDOWN_LIMIT 12
+#define ZSHIFT_PLUS 5
+#define ZSHIFT_MINUS 2
+#define ZFREE_LIMIT 65
+#define ZNO_CONTAINERS 64
+#define ZELEM_HEAD_SIZE 1
+/* ------------------------------------------------------------------------- */
+/* THESE CONSTANTS DEFINE THE USE OF THE PAGE HEADER IN THE INDEX PAGES. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_PAGE_ID 0
+#define ZPOS_PAGE_TYPE 1
+#define ZPOS_PAGE_TYPE_BIT 14
+#define ZPOS_EMPTY_LIST 1
+#define ZPOS_ALLOC_CONTAINERS 2
+#define ZPOS_CHECKSUM 3
+#define ZPOS_OVERFLOWREC 4
+#define ZPOS_NO_ELEM_IN_PAGE 2
+#define ZPOS_FREE_AREA_IN_PAGE 5
+#define ZPOS_LAST_INDEX 6
+#define ZPOS_INSERT_INDEX 7
+#define ZPOS_ARRAY_POS 8
+#define ZPOS_NEXT_FREE_INDEX 9
+#define ZPOS_NEXT_PAGE 10
+#define ZPOS_PREV_PAGE 11
+#define ZNORMAL_PAGE_TYPE 0
+#define ZOVERFLOW_PAGE_TYPE 1
+#define ZDEFAULT_LIST 3
+#define ZWORDS_IN_PAGE 2048
+/* --------------------------------------------------------------------------------- */
+/* CONSTANTS FOR THE ZERO PAGES */
+/* --------------------------------------------------------------------------------- */
+#define ZPAGEZERO_PREV_UNDOP 8
+#define ZPAGEZERO_NO_OVER_PAGE 9
+#define ZPAGEZERO_TABID 10
+#define ZPAGEZERO_FRAGID0 11
+#define ZPAGEZERO_FRAGID1 12
+#define ZPAGEZERO_HASH_CHECK 13
+#define ZPAGEZERO_DIRSIZE 14
+#define ZPAGEZERO_EXPCOUNTER 15
+#define ZPAGEZERO_NEXT_UNDO_FILE 16
+#define ZPAGEZERO_SLACK 17
+#define ZPAGEZERO_NO_PAGES 18
+#define ZPAGEZERO_HASHCHECKBIT 19
+#define ZPAGEZERO_K 20
+#define ZPAGEZERO_LHFRAGBITS 21
+#define ZPAGEZERO_LHDIRBITS 22
+#define ZPAGEZERO_LOCALKEYLEN 23
+#define ZPAGEZERO_MAXP 24
+#define ZPAGEZERO_MAXLOADFACTOR 25
+#define ZPAGEZERO_MINLOADFACTOR 26
+#define ZPAGEZERO_MYFID 27
+#define ZPAGEZERO_LAST_OVER_INDEX 28
+#define ZPAGEZERO_P 29
+#define ZPAGEZERO_NO_OF_ELEMENTS 30
+#define ZPAGEZERO_ELEMENT_LENGTH 31
+#define ZPAGEZERO_KEY_LENGTH 32
+#define ZPAGEZERO_NODETYPE 33
+#define ZPAGEZERO_SLACK_CHECK 34
+/* --------------------------------------------------------------------------------- */
+/* CONSTANTS IN ALPHABETICAL ORDER */
+/* --------------------------------------------------------------------------------- */
+#define ZADDFRAG 0
+#define ZCOPY_NEXT 1
+#define ZCOPY_NEXT_COMMIT 2
+#define ZCOPY_COMMIT 3
+#define ZCOPY_REPEAT 4
+#define ZCOPY_ABORT 5
+#define ZCOPY_CLOSE 6
+#define ZDIRARRAY 68
+#define ZDIRRANGESIZE 65
+//#define ZEMPTY_FRAGMENT 0
+#define ZFRAGMENTSIZE 64
+#define ZFIRSTTIME 1
+#define ZFS_CONNECTSIZE 300
+#define ZFS_OPSIZE 100
+#define ZKEYINKEYREQ 4
+#define ZLCP_CONNECTSIZE 30
+#define ZLEFT 1
+#define ZLOCALLOGFILE 2
+#define ZLOCKED 0
+#define ZMAXSCANSIGNALLEN 20
+#define ZMAINKEYLEN 8
+#define ZMAX_UNDO_VERSION 4
+#define ZNO_OF_DISK_VERSION 3
+#define ZNO_OF_OP_PER_SIGNAL 20
+//#define ZNOT_EMPTY_FRAGMENT 1
+#define ZNR_OF_UNDO_PAGE_GROUP 16
+#define ZOP_HEAD_INFO_LN 3
+#define ZOPRECSIZE 740
+#define ZOVERFLOWRECSIZE 5
+#define ZPAGE8_BASE_ADD 1
+#define ZPAGESIZE 128
+#define ZPARALLEL_QUEUE 1
+#define ZPDIRECTORY 1
+#define ZSCAN_MAX_LOCK 4
+#define ZSERIAL_QUEUE 2
+#define ZSPH1 1
+#define ZSPH2 2
+#define ZSPH3 3
+#define ZSPH6 6
+#define ZREADLOCK 0
+#define ZRIGHT 2
+#define ZROOTFRAGMENTSIZE 32
+#define ZSCAN_LOCK_ALL 3
+#define ZSCAN_OP 5
+#define ZSCAN_REC_SIZE 256
+#define ZSR_VERSION_REC_SIZE 16
+#define ZSTAND_BY 2
+#define ZTABLESIZE 16
+#define ZTABMAXINDEX 3
+#define ZUNDEFINED_OP 6
+#define ZUNDOHEADSIZE 7
+#define ZUNLOCKED 1
+#define ZUNDOPAGE_BASE_ADD 2
+#define ZUNDOPAGEINDEXBITS 13
+#define ZUNDOPAGEINDEX_MASK 0x1fff
+#define ZWRITEPAGESIZE 8
+#define ZWRITE_UNDOPAGESIZE 2
+#define ZMIN_UNDO_PAGES_AT_COMMIT 4
+#define ZMIN_UNDO_PAGES_AT_OPERATION 10
+#define ZMIN_UNDO_PAGES_AT_EXPAND 16
+
+/* --------------------------------------------------------------------------------- */
+/* CONTINUEB CODES */
+/* --------------------------------------------------------------------------------- */
+#define ZLOAD_BAL_LCP_TIMER 0
+#define ZINITIALISE_RECORDS 1
+#define ZSR_READ_PAGES_ALLOC 2
+#define ZSTART_UNDO 3
+#define ZSEND_SCAN_HBREP 4
+#define ZREL_ROOT_FRAG 5
+#define ZREL_FRAG 6
+#define ZREL_DIR 7
+#define ZREPORT_MEMORY_USAGE 8
+#define ZLCP_OP_WRITE_RT_BREAK 9
+
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES */
+/* ------------------------------------------------------------------------- */
+#define ZLIMIT_OF_ERROR 600 // Limit check for error codes
+#define ZCHECKROOT_ERROR 601 // Delete fragment error code
+#define ZCONNECT_SIZE_ERROR 602 // ACC_SEIZEREF
+#define ZDIR_RANGE_ERROR 603 // Add fragment error code
+#define ZFULL_FRAGRECORD_ERROR 604 // Add fragment error code
+#define ZFULL_ROOTFRAGRECORD_ERROR 605 // Add fragment error code
+#define ZROOTFRAG_STATE_ERROR 606 // Add fragment
+#define ZOVERTAB_REC_ERROR 607 // Add fragment
+
+#define ZSCAN_REFACC_CONNECT_ERROR 608 // ACC_SCANREF
+#define ZFOUR_ACTIVE_SCAN_ERROR 609 // ACC_SCANREF
+#define ZNULL_SCAN_REC_ERROR 610 // ACC_SCANREF
+
+#define ZDIRSIZE_ERROR 623
+#define ZOVER_REC_ERROR 624 // Insufficient Space
+#define ZPAGESIZE_ERROR 625
+#define ZTUPLE_DELETED_ERROR 626
+#define ZREAD_ERROR 626
+#define ZWRITE_ERROR 630
+#define ZTO_OP_STATE_ERROR 631
+#define ZTOO_EARLY_ACCESS_ERROR 632
+#define ZTEMPORARY_ACC_UNDO_FAILURE 677
+#endif
+
+class ElementHeader {
+ /**
+ *
+ * l = Locked -- If true contains operation else scan bits + hash value
+ * s = Scan bits
+ * h = Hash value
+ * o = Operation ptr I
+ *
+ * 1111111111222222222233
+ * 01234567890123456789012345678901
+ * lssssssssssss hhhhhhhhhhhhhhhh
+ * ooooooooooooooooooooooooooooooo
+ */
+public:
+ STATIC_CONST( HASH_VALUE_PART_MASK = 0xFFFF );
+
+ static bool getLocked(Uint32 data);
+ static bool getUnlocked(Uint32 data);
+ static Uint32 getScanBits(Uint32 data);
+ static Uint32 getHashValuePart(Uint32 data);
+ static Uint32 getOpPtrI(Uint32 data);
+
+ static Uint32 setLocked(Uint32 opPtrI);
+ static Uint32 setUnlocked(Uint32 hashValuePart, Uint32 scanBits);
+ static Uint32 setScanBit(Uint32 header, Uint32 scanBit);
+ static Uint32 clearScanBit(Uint32 header, Uint32 scanBit);
+};
+
+inline
+bool
+ElementHeader::getLocked(Uint32 data){
+ return (data & 1) == 0;
+}
+
+inline
+bool
+ElementHeader::getUnlocked(Uint32 data){
+ return (data & 1) == 1;
+}
+
+inline
+Uint32
+ElementHeader::getScanBits(Uint32 data){
+ assert(getUnlocked(data));
+ return (data >> 1) & ((1 << MAX_PARALLEL_SCANS_PER_FRAG) - 1);
+}
+
+inline
+Uint32
+ElementHeader::getHashValuePart(Uint32 data){
+ assert(getUnlocked(data));
+ return data >> 16;
+}
+
+inline
+Uint32
+ElementHeader::getOpPtrI(Uint32 data){
+ assert(getLocked(data));
+ return data >> 1;
+}
+
+inline
+Uint32
+ElementHeader::setLocked(Uint32 opPtrI){
+ return (opPtrI << 1) + 0;
+}
+inline
+Uint32
+ElementHeader::setUnlocked(Uint32 hashValue, Uint32 scanBits){
+ return (hashValue << 16) + (scanBits << 1) + 1;
+}
+
+inline
+Uint32
+ElementHeader::setScanBit(Uint32 header, Uint32 scanBit){
+ assert(getUnlocked(header));
+ return header | (scanBit << 1);
+}
+
+inline
+Uint32
+ElementHeader::clearScanBit(Uint32 header, Uint32 scanBit){
+ assert(getUnlocked(header));
+ return header & (~(scanBit << 1));
+}
+
+
+class Dbacc: public SimulatedBlock {
+public:
+// State values
+enum State {
+ FREEFRAG = 0,
+ ACTIVEFRAG = 1,
+ SEND_QUE_OP = 2,
+ WAIT_ACC_LCPREQ = 3,
+ LCP_SEND_PAGES = 4,
+ LCP_SEND_OVER_PAGES = 5,
+ LCP_SEND_ZERO_PAGE = 6,
+ SR_READ_PAGES = 7,
+ SR_READ_OVER_PAGES = 8,
+ WAIT_ZERO_PAGE_STORED = 9,
+ WAIT_NOTHING = 10,
+ WAIT_OPEN_UNDO_LCP = 11,
+ WAIT_OPEN_UNDO_LCP_NEXT = 12,
+ WAIT_OPEN_DATA_FILE_FOR_READ = 13,
+ WAIT_OPEN_DATA_FILE_FOR_WRITE = 14,
+ OPEN_UNDO_FILE_SR = 15,
+ READ_UNDO_PAGE = 16,
+ READ_UNDO_PAGE_AND_CLOSE = 17,
+ WAIT_READ_DATA = 18,
+ WAIT_READ_PAGE_ZERO = 19,
+ WAIT_WRITE_DATA = 20,
+ WAIT_WRITE_UNDO = 21,
+ WAIT_WRITE_UNDO_EXIT = 22,
+ WAIT_CLOSE_UNDO = 23,
+ LCP_CLOSE_DATA = 24,
+ SR_CLOSE_DATA = 25,
+ WAIT_ONE_CONF = 26,
+ WAIT_TWO_CONF = 27,
+ LCP_FREE = 28,
+ LCP_ACTIVE = 29,
+ FREE_OP = 30,
+ WAIT_EXE_OP = 32,
+ WAIT_IN_QUEUE = 34,
+ EXE_OP = 35,
+ SCAN_ACTIVE = 36,
+ SCAN_WAIT_IN_QUEUE = 37,
+ IDLE = 39,
+ ACTIVE = 40,
+ WAIT_COMMIT_ABORT = 41,
+ ABORT = 42,
+ ABORTADDFRAG = 43,
+ REFUSEADDFRAG = 44,
+ DELETEFRAG = 45,
+ DELETETABLE = 46,
+ UNDEFINEDROOT = 47,
+ ADDFIRSTFRAG = 48,
+ ADDSECONDFRAG = 49,
+ DELETEFIRSTFRAG = 50,
+ DELETESECONDFRAG = 51,
+ ACTIVEROOT = 52,
+ LCP_CREATION = 53
+};
+
+// Records
+
+/* --------------------------------------------------------------------------------- */
+/* UNDO HEADER RECORD */
+/* --------------------------------------------------------------------------------- */
+
+ struct UndoHeader {
+ enum UndoHeaderType{
+ ZPAGE_INFO = 0,
+ ZOVER_PAGE_INFO = 1,
+ ZOP_INFO = 2,
+ ZNO_UNDORECORD_TYPES = 3
+ };
+ UintR tableId;
+ UintR rootFragId;
+ UintR localFragId;
+ UintR variousInfo;
+ UintR logicalPageId;
+ UintR prevUndoAddressForThisFrag;
+ UintR prevUndoAddress;
+ };
+
+/* --------------------------------------------------------------------------------- */
+/* DIRECTORY RANGE */
+/* --------------------------------------------------------------------------------- */
+ struct DirRange {
+ Uint32 dirArray[256];
+ }; /* p2c: size = 1024 bytes */
+
+ typedef Ptr<DirRange> DirRangePtr;
+
+/* --------------------------------------------------------------------------------- */
+/* DIRECTORYARRAY */
+/* --------------------------------------------------------------------------------- */
+struct Directoryarray {
+ Uint32 pagep[256];
+}; /* p2c: size = 1024 bytes */
+
+ typedef Ptr<Directoryarray> DirectoryarrayPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* FRAGMENTREC. ALL INFORMATION ABOUT FRAMENT AND HASH TABLE IS SAVED IN FRAGMENT */
+/* REC A POINTER TO FRAGMENT RECORD IS SAVED IN ROOTFRAGMENTREC FRAGMENT */
+/* --------------------------------------------------------------------------------- */
+struct Fragmentrec {
+//-----------------------------------------------------------------------------
+// References to long key pages with free area. Some type of buddy structure
+// where references in higher index have more free space.
+//-----------------------------------------------------------------------------
+ Uint32 longKeyPageArray[4];
+
+//-----------------------------------------------------------------------------
+// These variables keep track of allocated pages, the number of them and the
+// start file page of them. Used during local checkpoints.
+//-----------------------------------------------------------------------------
+ Uint32 datapages[8];
+ Uint32 activeDataPage;
+ Uint32 activeDataFilePage;
+
+//-----------------------------------------------------------------------------
+// Temporary variables used during shrink and expand process.
+//-----------------------------------------------------------------------------
+ Uint32 expReceivePageptr;
+ Uint32 expReceiveIndex;
+ Uint32 expReceiveForward;
+ Uint32 expSenderDirIndex;
+ Uint32 expSenderDirptr;
+ Uint32 expSenderIndex;
+ Uint32 expSenderPageptr;
+
+//-----------------------------------------------------------------------------
+// List of lock owners and list of lock waiters to support LCP handling
+//-----------------------------------------------------------------------------
+ Uint32 lockOwnersList;
+ Uint32 firstWaitInQueOp;
+ Uint32 lastWaitInQueOp;
+ Uint32 sentWaitInQueOp;
+
+//-----------------------------------------------------------------------------
+// References to Directory Ranges (which in turn references directories, which
+// in its turn references the pages) for the bucket pages and the overflow
+// bucket pages.
+//-----------------------------------------------------------------------------
+ Uint32 directory;
+ Uint32 dirsize;
+ Uint32 overflowdir;
+ Uint32 lastOverIndex;
+
+//-----------------------------------------------------------------------------
+// These variables are used to support LCP and Restore from disk.
+// lcpDirIndex: used during LCP as the frag page id currently stored.
+// lcpMaxDirIndex: The dirsize at start of LCP.
+// lcpMaxOverDirIndex: The xx at start of LCP
+// During a LCP one writes the minimum of the number of pages in the directory
+// and the number of pages at the start of the LCP.
+// noStoredPages: Number of bucket pages written in LCP used at restore
+// noOfOverStoredPages: Number of overflow pages written in LCP used at restore
+// This variable is also used during LCP to calculate this number.
+//-----------------------------------------------------------------------------
+ Uint32 lcpDirIndex;
+ Uint32 lcpMaxDirIndex;
+ Uint32 lcpMaxOverDirIndex;
+ Uint32 noStoredPages;
+ Uint32 noOfStoredOverPages;
+
+//-----------------------------------------------------------------------------
+// We have a list of overflow pages with free areas. We have a special record,
+// the overflow record representing these pages. The reason is that the
+// same record is also used to represent pages in the directory array that have
+// been released since they were empty (there were however higher indexes with
+// data in them). These are put in the firstFreeDirIndexRec-list.
+// An overflow record representing a page can only be in one of these lists.
+//-----------------------------------------------------------------------------
+ Uint32 firstOverflowRec;
+ Uint32 lastOverflowRec;
+ Uint32 firstFreeDirindexRec;
+
+//-----------------------------------------------------------------------------
+// localCheckpId is used during execution of UNDO log to ensure that we only
+// apply UNDO log records from the restored LCP of the fragment.
+// lcpLqhPtr keeps track of LQH record for this fragment to checkpoint
+//-----------------------------------------------------------------------------
+ Uint32 localCheckpId;
+ Uint32 lcpLqhPtr;
+
+//-----------------------------------------------------------------------------
+// Counter keeping track of how many times we have expanded. We need to ensure
+// that we do not shrink so many times that this variable becomes negative.
+//-----------------------------------------------------------------------------
+ Uint32 expandCounter;
+//-----------------------------------------------------------------------------
+// Reference to record for open file at LCP and restore
+//-----------------------------------------------------------------------------
+ Uint32 fsConnPtr;
+
+//-----------------------------------------------------------------------------
+// These variables are important for the linear hashing algorithm.
+// localkeylen is the size of the local key (1 and 2 is currently supported)
+// maxloadfactor is the factor specifying when to expand
+// minloadfactor is the factor specifying when to shrink (hysteresis model)
+// maxp and p
+// maxp and p is the variables most central to linear hashing. p + maxp + 1 is the
+// current number of buckets. maxp is the largest value of the type 2**n - 1
+// which is smaller than the number of buckets. These values are used to find
+// correct bucket with the aid of the hash value.
+//
+// slack is the variable keeping track of whether we have inserted more than
+// the current size is suitable for or less. Slack together with the boundaries
+// set by maxloadfactor and minloadfactor decides when to expand/shrink
+// slackCheck When slack goes over this value it is time to expand.
+// slackCheck = (maxp + p + 1)*(maxloadfactor - minloadfactor) or
+// bucketSize * hysteresis
+//-----------------------------------------------------------------------------
+ Uint32 localkeylen;
+ Uint32 maxp;
+ Uint32 maxloadfactor;
+ Uint32 minloadfactor;
+ Uint32 p;
+ Uint32 slack;
+ Uint32 slackCheck;
+
+//-----------------------------------------------------------------------------
+// myfid is the fragment id of the fragment
+// myroot is the reference to the root fragment record
+// nextfreefrag is the next free fragment if linked into a free list
+//-----------------------------------------------------------------------------
+ Uint32 myfid;
+ Uint32 myroot;
+ Uint32 myTableId;
+ Uint32 nextfreefrag;
+
+//-----------------------------------------------------------------------------
+// This variable is used during restore to keep track of page id of read pages.
+// During read of bucket pages this is used to calculate the page id and also
+// to verify that the page id of the read page is correct. During read of over-
+// flow pages it is only used to keep track of the number of pages read.
+//-----------------------------------------------------------------------------
+ Uint32 nextAllocPage;
+
+//-----------------------------------------------------------------------------
+// Keeps track of undo position for fragment during LCP and restore.
+//-----------------------------------------------------------------------------
+ Uint32 prevUndoposition;
+
+//-----------------------------------------------------------------------------
+// Page reference during LCP and restore of page zero where fragment data is
+// saved
+//-----------------------------------------------------------------------------
+ Uint32 zeroPagePtr;
+
+//-----------------------------------------------------------------------------
+// Number of pages read from file during restore
+//-----------------------------------------------------------------------------
+ Uint32 noOfExpectedPages;
+
+//-----------------------------------------------------------------------------
+// Fragment State, mostly applicable during LCP and restore
+//-----------------------------------------------------------------------------
+ State fragState;
+
+//-----------------------------------------------------------------------------
+// Keep track of number of outstanding writes of UNDO log records to ensure that
+// we have saved all UNDO info before concluding local checkpoint.
+//-----------------------------------------------------------------------------
+ Uint32 nrWaitWriteUndoExit;
+
+//-----------------------------------------------------------------------------
+// lastUndoIsStored is used to handle parallel writes of UNDO log and pages to
+// know when LCP is completed
+//-----------------------------------------------------------------------------
+ Uint8 lastUndoIsStored;
+
+//-----------------------------------------------------------------------------
+// Set to ZTRUE when local checkpoint freeze occurs and set to ZFALSE when
+// local checkpoint concludes.
+//-----------------------------------------------------------------------------
+ Uint8 createLcp;
+
+//-----------------------------------------------------------------------------
+// Flag indicating whether we are in the load phase of restore still.
+//-----------------------------------------------------------------------------
+ Uint8 loadingFlag;
+
+//-----------------------------------------------------------------------------
+// elementLength: Length of element in bucket and overflow pages
+// keyLength: Length of key
+//-----------------------------------------------------------------------------
+ Uint8 elementLength;
+ Uint16 keyLength;
+
+//-----------------------------------------------------------------------------
+// This flag is used to avoid sending a big number of expand or shrink signals
+// when simultaneously committing many inserts or deletes.
+//-----------------------------------------------------------------------------
+ Uint8 expandFlag;
+
+//-----------------------------------------------------------------------------
+// hashcheckbit is the bit to check whether to send element to split bucket or not
+// k (== 6) is the number of buckets per page
+// lhfragbits is the number of bits used to calculate the fragment id
+// lhdirbits is the number of bits used to calculate the page id
+//-----------------------------------------------------------------------------
+ Uint8 hashcheckbit;
+ Uint8 k;
+ Uint8 lhfragbits;
+ Uint8 lhdirbits;
+
+//-----------------------------------------------------------------------------
+// nodetype can only be STORED in this release. Is currently only set, never read
+// stopQueOp is indicator that locked operations will not start until LCP have
+// released the lock on the fragment
+//-----------------------------------------------------------------------------
+ Uint8 nodetype;
+ Uint8 stopQueOp;
+
+//-----------------------------------------------------------------------------
+// flag to avoid accessing table record if no char attributes
+//-----------------------------------------------------------------------------
+ Uint8 hasCharAttr;
+};
+
+ typedef Ptr<Fragmentrec> FragmentrecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* FS_CONNECTREC */
+/* --------------------------------------------------------------------------------- */
+struct FsConnectrec {
+ Uint32 fsNext;
+ Uint32 fsPrev;
+ Uint32 fragrecPtr;
+ Uint32 fsPtr;
+ State fsState;
+ Uint8 activeFragId;
+ Uint8 fsPart;
+}; /* p2c: size = 24 bytes */
+
+ typedef Ptr<FsConnectrec> FsConnectrecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* FS_OPREC */
+/* --------------------------------------------------------------------------------- */
+struct FsOprec {
+ Uint32 fsOpnext;
+ Uint32 fsOpfragrecPtr;
+ Uint32 fsConptr;
+ State fsOpstate;
+ Uint16 fsOpMemPage;
+}; /* p2c: size = 20 bytes */
+
+ typedef Ptr<FsOprec> FsOprecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* LCP_CONNECTREC */
+/* --------------------------------------------------------------------------------- */
+struct LcpConnectrec {
+ Uint32 nextLcpConn;
+ Uint32 lcpUserptr;
+ Uint32 rootrecptr;
+ State syncUndopageState;
+ State lcpstate;
+ Uint32 lcpUserblockref;
+ Uint16 localCheckPid;
+ Uint8 noOfLcpConf;
+};
+ typedef Ptr<LcpConnectrec> LcpConnectrecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* OPERATIONREC */
+/* --------------------------------------------------------------------------------- */
+struct Operationrec {
+ Uint32 keydata[8];
+ Uint32 localdata[2];
+ Uint32 elementIsforward;
+ Uint32 elementPage;
+ Uint32 elementPointer;
+ Uint32 fid;
+ Uint32 fragptr;
+ Uint32 hashvaluePart;
+ Uint32 hashValue;
+ Uint32 insertDeleteLen;
+ Uint32 keyinfoPage;
+ Uint32 nextLockOwnerOp;
+ Uint32 nextOp;
+ Uint32 nextParallelQue;
+ Uint32 nextQueOp;
+ Uint32 nextSerialQue;
+ Uint32 prevOp;
+ Uint32 prevLockOwnerOp;
+ Uint32 prevParallelQue;
+ Uint32 prevQueOp;
+ Uint32 prevSerialQue;
+ Uint32 scanRecPtr;
+ Uint32 transId1;
+ Uint32 transId2;
+ Uint32 longPagePtr;
+ Uint32 longKeyPageIndex;
+ State opState;
+ Uint32 userptr;
+ State transactionstate;
+ Uint16 elementContainer;
+ Uint16 tupkeylen;
+ Uint32 xfrmtupkeylen;
+ Uint32 userblockref;
+ Uint32 scanBits;
+ Uint8 elementIsDisappeared;
+ Uint8 insertIsDone;
+ Uint8 lockMode;
+ Uint8 lockOwner;
+ Uint8 nodeType;
+ Uint8 operation;
+ Uint8 opSimple;
+ Uint8 dirtyRead;
+ Uint8 commitDeleteCheckFlag;
+ Uint8 isAccLockReq;
+ Uint8 isUndoLogReq;
+}; /* p2c: size = 168 bytes */
+
+ typedef Ptr<Operationrec> OperationrecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* OVERFLOW_RECORD */
+/* --------------------------------------------------------------------------------- */
+struct OverflowRecord {
+ Uint32 dirindex;
+ Uint32 nextOverRec;
+ Uint32 nextOverList;
+ Uint32 prevOverRec;
+ Uint32 prevOverList;
+ Uint32 overpage;
+ Uint32 nextfreeoverrec;
+};
+
+ typedef Ptr<OverflowRecord> OverflowRecordPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* PAGE8 */
+/* --------------------------------------------------------------------------------- */
+struct Page8 {
+ Uint32 word32[2048];
+}; /* p2c: size = 8192 bytes */
+
+ typedef Ptr<Page8> Page8Ptr;
+
+/* --------------------------------------------------------------------------------- */
+/* ROOTFRAGMENTREC */
+/* DURING EXPAND FRAGMENT PROCESS, EACH FRAGMEND WILL BE EXPAND INTO TWO */
+/* NEW FRAGMENTS.TO MAKE THIS PROCESS EASIER, DURING ADD FRAGMENT PROCESS */
+/* NEXT FRAGMENT IDENTIIES WILL BE CALCULATED, AND TWO FRAGMENTS WILL BE */
+/* ADDED IN (NDBACC). THEREBY EXPAND OF FRAGMENT CAN BE PERFORMED QUICK AND */
+/* EASY.THE NEW FRAGMENT ID SENDS TO TUP MANAGER FOR ALL OPERATION PROCESS. */
+/* --------------------------------------------------------------------------------- */
+struct Rootfragmentrec {
+ Uint32 scan[MAX_PARALLEL_SCANS_PER_FRAG];
+ Uint32 fragmentptr[2];
+ Uint32 fragmentid[2];
+ Uint32 lcpPtr;
+ Uint32 mytabptr;
+ Uint32 nextroot;
+ Uint32 roothashcheck;
+ Uint32 noOfElements;
+ Uint32 m_commit_count;
+ State rootState;
+}; /* p2c: size = 72 bytes */
+
+ typedef Ptr<Rootfragmentrec> RootfragmentrecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* SCAN_REC */
+/* --------------------------------------------------------------------------------- */
+struct ScanRec {
+ enum ScanState {
+ WAIT_NEXT,
+ SCAN_DISCONNECT
+ };
+ enum ScanBucketState {
+ FIRST_LAP,
+ SECOND_LAP,
+ SCAN_COMPLETED
+ };
+ Uint32 activeLocalFrag;
+ Uint32 rootPtr;
+ Uint32 nextBucketIndex;
+ Uint32 scanNextfreerec;
+ Uint32 scanFirstActiveOp;
+ Uint32 scanFirstLockedOp;
+ Uint32 scanLastLockedOp;
+ Uint32 scanFirstQueuedOp;
+ Uint32 scanLastQueuedOp;
+ Uint32 scanUserptr;
+ Uint32 scanTrid1;
+ Uint32 scanTrid2;
+ Uint32 startNoOfBuckets;
+ Uint32 minBucketIndexToRescan;
+ Uint32 maxBucketIndexToRescan;
+ Uint32 scanOpsAllocated;
+ ScanBucketState scanBucketState;
+ ScanState scanState;
+ Uint16 scanLockHeld;
+ Uint32 scanUserblockref;
+ Uint32 scanMask;
+ Uint8 scanLockMode;
+ Uint8 scanKeyinfoFlag;
+ Uint8 scanTimer;
+ Uint8 scanContinuebCounter;
+ Uint8 scanReadCommittedFlag;
+};
+
+ typedef Ptr<ScanRec> ScanRecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* SR_VERSION_REC */
+/* --------------------------------------------------------------------------------- */
+struct SrVersionRec {
+ Uint32 nextFreeSr;
+ Uint32 checkPointId;
+ Uint32 prevAddress;
+ Uint32 srUnused; /* p2c: Not used */
+}; /* p2c: size = 16 bytes */
+
+ typedef Ptr<SrVersionRec> SrVersionRecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* TABREC */
+/* --------------------------------------------------------------------------------- */
+struct Tabrec {
+ Uint32 fragholder[MAX_FRAG_PER_NODE];
+ Uint32 fragptrholder[MAX_FRAG_PER_NODE];
+ Uint32 tabUserPtr;
+ BlockReference tabUserRef;
+
+ Uint8 noOfKeyAttr;
+ Uint8 hasCharAttr;
+ struct KeyAttr {
+ Uint32 attributeDescriptor;
+ CHARSET_INFO* charsetInfo;
+ } keyAttr[MAX_ATTRIBUTES_IN_INDEX];
+};
+ typedef Ptr<Tabrec> TabrecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* UNDOPAGE */
+/* --------------------------------------------------------------------------------- */
+struct Undopage {
+ Uint32 undoword[8192];
+}; /* p2c: size = 32768 bytes */
+
+ typedef Ptr<Undopage> UndopagePtr;
+
+public:
+ Dbacc(const class Configuration &);
+ virtual ~Dbacc();
+
+ // pointer to TUP instance in this thread
+ Dbtup* c_tup;
+
+private:
+ BLOCK_DEFINES(Dbacc);
+
+ // Transit signals
+ void execDEBUG_SIG(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+ void execACC_CHECK_SCAN(Signal* signal);
+ void execEXPANDCHECK2(Signal* signal);
+ void execSHRINKCHECK2(Signal* signal);
+ void execACC_OVER_REC(Signal* signal);
+ void execACC_SAVE_PAGES(Signal* signal);
+ void execNEXTOPERATION(Signal* signal);
+ void execREAD_PSEUDO_REQ(Signal* signal);
+
+ // Received signals
+ void execSTTOR(Signal* signal);
+ void execSR_FRAGIDREQ(Signal* signal);
+ void execLCP_FRAGIDREQ(Signal* signal);
+ void execLCP_HOLDOPREQ(Signal* signal);
+ void execEND_LCPREQ(Signal* signal);
+ void execACC_LCPREQ(Signal* signal);
+ void execSTART_RECREQ(Signal* signal);
+ void execACC_CONTOPREQ(Signal* signal);
+ void execACCKEYREQ(Signal* signal);
+ void execACCSEIZEREQ(Signal* signal);
+ void execACCFRAGREQ(Signal* signal);
+ void execTC_SCHVERREQ(Signal* signal);
+ void execACC_SRREQ(Signal* signal);
+ void execNEXT_SCANREQ(Signal* signal);
+ void execACC_ABORTREQ(Signal* signal);
+ void execACC_SCANREQ(Signal* signal);
+ void execACCMINUPDATE(Signal* signal);
+ void execACC_COMMITREQ(Signal* signal);
+ void execACC_TO_REQ(Signal* signal);
+ void execACC_LOCKREQ(Signal* signal);
+ void execFSOPENCONF(Signal* signal);
+ void execFSOPENREF(Signal* signal);
+ void execFSCLOSECONF(Signal* signal);
+ void execFSCLOSEREF(Signal* signal);
+ void execFSWRITECONF(Signal* signal);
+ void execFSWRITEREF(Signal* signal);
+ void execFSREADCONF(Signal* signal);
+ void execFSREADREF(Signal* signal);
+ void execNDB_STTOR(Signal* signal);
+ void execDROP_TAB_REQ(Signal* signal);
+ void execFSREMOVECONF(Signal* signal);
+ void execFSREMOVEREF(Signal* signal);
+ void execREAD_CONFIG_REQ(Signal* signal);
+ void execSET_VAR_REQ(Signal* signal);
+ void execDUMP_STATE_ORD(Signal* signal);
+
+ // Statement blocks
+ void ACCKEY_error(Uint32 fromWhere);
+
+ void commitDeleteCheck();
+
+ void initRootFragPageZero(RootfragmentrecPtr, Page8Ptr);
+ void initRootFragSr(RootfragmentrecPtr, Page8Ptr);
+ void initFragAdd(Signal*, Uint32 rootFragIndex, Uint32 rootIndex, FragmentrecPtr);
+ void initFragPageZero(FragmentrecPtr, Page8Ptr);
+ void initFragSr(FragmentrecPtr, Page8Ptr);
+ void initFragGeneral(FragmentrecPtr);
+ void verifyFragCorrect(FragmentrecPtr regFragPtr);
+ void sendFSREMOVEREQ(Signal* signal, Uint32 tableId);
+ void releaseFragResources(Signal* signal, Uint32 fragIndex);
+ void releaseRootFragRecord(Signal* signal, RootfragmentrecPtr rootPtr);
+ void releaseRootFragResources(Signal* signal, Uint32 tableId);
+ void releaseDirResources(Signal* signal,
+ Uint32 fragIndex,
+ Uint32 dirIndex,
+ Uint32 startIndex);
+ void releaseDirectoryResources(Signal* signal,
+ Uint32 fragIndex,
+ Uint32 dirIndex,
+ Uint32 startIndex,
+ Uint32 directoryIndex);
+ void releaseOverflowResources(Signal* signal, FragmentrecPtr regFragPtr);
+ void releaseDirIndexResources(Signal* signal, FragmentrecPtr regFragPtr);
+ void releaseFragRecord(Signal* signal, FragmentrecPtr regFragPtr);
+ Uint32 remainingUndoPages();
+ void updateLastUndoPageIdWritten(Signal* signal, Uint32 aNewValue);
+ void updateUndoPositionPage(Signal* signal, Uint32 aNewValue);
+ void srCheckPage(Signal* signal);
+ void srCheckContainer(Signal* signal);
+ void initScanFragmentPart(Signal* signal);
+ Uint32 checkScanExpand(Signal* signal);
+ Uint32 checkScanShrink(Signal* signal);
+ void initialiseDirRec(Signal* signal);
+ void initialiseDirRangeRec(Signal* signal);
+ void initialiseFragRec(Signal* signal);
+ void initialiseFsConnectionRec(Signal* signal);
+ void initialiseFsOpRec(Signal* signal);
+ void initialiseLcpConnectionRec(Signal* signal);
+ void initialiseOperationRec(Signal* signal);
+ void initialiseOverflowRec(Signal* signal);
+ void initialisePageRec(Signal* signal);
+ void initialiseLcpPages(Signal* signal);
+ void initialiseRootfragRec(Signal* signal);
+ void initialiseScanRec(Signal* signal);
+ void initialiseSrVerRec(Signal* signal);
+ void initialiseTableRec(Signal* signal);
+ bool addfragtotab(Signal* signal, Uint32 rootIndex, Uint32 fragId);
+ void initOpRec(Signal* signal);
+ void sendAcckeyconf(Signal* signal);
+ Uint32 placeReadInLockQueue(Signal* signal);
+ void placeSerialQueueRead(Signal* signal);
+ void checkOnlyReadEntry(Signal* signal);
+ Uint32 getNoParallelTransaction(const Operationrec*);
+ void moveLastParallelQueue(Signal* signal);
+ void moveLastParallelQueueWrite(Signal* signal);
+ Uint32 placeWriteInLockQueue(Signal* signal);
+ void placeSerialQueueWrite(Signal* signal);
+ void expandcontainer(Signal* signal);
+ void shrinkcontainer(Signal* signal);
+ void nextcontainerinfoExp(Signal* signal);
+ void lcpCopyPage(Signal* signal);
+ void lcpUpdatePage(Signal* signal);
+ void checkUndoPages(Signal* signal);
+ void undoWritingProcess(Signal* signal);
+ void writeUndoDataInfo(Signal* signal);
+ void writeUndoHeader(Signal* signal,
+ Uint32 logicalPageId,
+ UndoHeader::UndoHeaderType pageType);
+ void writeUndoOpInfo(Signal* signal);
+ void checksumControl(Signal* signal, Uint32 checkPage);
+ void startActiveUndo(Signal* signal);
+ void releaseAndCommitActiveOps(Signal* signal);
+ void releaseAndCommitQueuedOps(Signal* signal);
+ void releaseAndAbortLockedOps(Signal* signal);
+ void containerinfo(Signal* signal);
+ bool getScanElement(Signal* signal);
+ void initScanOpRec(Signal* signal);
+ void nextcontainerinfo(Signal* signal);
+ void putActiveScanOp(Signal* signal);
+ void putOpScanLockQue();
+ void putReadyScanQueue(Signal* signal, Uint32 scanRecIndex);
+ void releaseScanBucket(Signal* signal);
+ void releaseScanContainer(Signal* signal);
+ void releaseScanRec(Signal* signal);
+ bool searchScanContainer(Signal* signal);
+ void sendNextScanConf(Signal* signal);
+ void setlock(Signal* signal);
+ void takeOutActiveScanOp(Signal* signal);
+ void takeOutScanLockQueue(Uint32 scanRecIndex);
+ void takeOutReadyScanQueue(Signal* signal);
+ void insertElement(Signal* signal);
+ void insertContainer(Signal* signal);
+ void addnewcontainer(Signal* signal);
+ void getfreelist(Signal* signal);
+ void increaselistcont(Signal* signal);
+ void seizeLeftlist(Signal* signal);
+ void seizeRightlist(Signal* signal);
+ Uint32 readTablePk(Uint32 localkey1);
+ void getElement(Signal* signal);
+ void getdirindex(Signal* signal);
+ void commitdelete(Signal* signal, bool systemRestart);
+ void deleteElement(Signal* signal);
+ void getLastAndRemove(Signal* signal);
+ void releaseLeftlist(Signal* signal);
+ void releaseRightlist(Signal* signal);
+ void checkoverfreelist(Signal* signal);
+ void abortOperation(Signal* signal);
+ void accAbortReqLab(Signal* signal, bool sendConf);
+ void commitOperation(Signal* signal);
+ void copyOpInfo(Signal* signal);
+ Uint32 executeNextOperation(Signal* signal);
+ void releaselock(Signal* signal);
+ void takeOutFragWaitQue(Signal* signal);
+ void check_lock_upgrade(Signal* signal, OperationrecPtr lock_owner,
+ OperationrecPtr release_op);
+ void allocOverflowPage(Signal* signal);
+ bool getrootfragmentrec(Signal* signal, RootfragmentrecPtr&, Uint32 fragId);
+ void insertLockOwnersList(Signal* signal, const OperationrecPtr&);
+ void takeOutLockOwnersList(Signal* signal, const OperationrecPtr&);
+ void initFsOpRec(Signal* signal);
+ void initLcpConnRec(Signal* signal);
+ void initOverpage(Signal* signal);
+ void initPage(Signal* signal);
+ void initRootfragrec(Signal* signal);
+ void putOpInFragWaitQue(Signal* signal);
+ void putOverflowRecInFrag(Signal* signal);
+ void putRecInFreeOverdir(Signal* signal);
+ void releaseDirectory(Signal* signal);
+ void releaseDirrange(Signal* signal);
+ void releaseFsConnRec(Signal* signal);
+ void releaseFsOpRec(Signal* signal);
+ void releaseLcpConnectRec(Signal* signal);
+ void releaseOpRec(Signal* signal);
+ void releaseOverflowRec(Signal* signal);
+ void releaseOverpage(Signal* signal);
+ void releasePage(Signal* signal);
+ void releaseLcpPage(Signal* signal);
+ void releaseSrRec(Signal* signal);
+ void releaseLogicalPage(Fragmentrec * fragP, Uint32 logicalPageId);
+ void seizeDirectory(Signal* signal);
+ void seizeDirrange(Signal* signal);
+ void seizeFragrec(Signal* signal);
+ void seizeFsConnectRec(Signal* signal);
+ void seizeFsOpRec(Signal* signal);
+ void seizeLcpConnectRec(Signal* signal);
+ void seizeOpRec(Signal* signal);
+ void seizeOverRec(Signal* signal);
+ void seizePage(Signal* signal);
+ void seizeLcpPage(Page8Ptr&);
+ void seizeRootfragrec(Signal* signal);
+ void seizeScanRec(Signal* signal);
+ void seizeSrVerRec(Signal* signal);
+ void sendSystemerror(Signal* signal);
+ void takeRecOutOfFreeOverdir(Signal* signal);
+ void takeRecOutOfFreeOverpage(Signal* signal);
+ void sendScanHbRep(Signal* signal, Uint32);
+
+ void addFragRefuse(Signal* signal, Uint32 errorCode);
+ void ndbsttorryLab(Signal* signal);
+ void srCloseDataFileLab(Signal* signal);
+ void acckeyref1Lab(Signal* signal, Uint32 result_code);
+ void insertelementLab(Signal* signal);
+ void startUndoLab(Signal* signal);
+ void checkNextFragmentLab(Signal* signal);
+ void endofexpLab(Signal* signal);
+ void endofshrinkbucketLab(Signal* signal);
+ void srStartUndoLab(Signal* signal);
+ void senddatapagesLab(Signal* signal);
+ void undoNext2Lab(Signal* signal);
+ void sttorrysignalLab(Signal* signal);
+ void sendholdconfsignalLab(Signal* signal);
+ void accIsLockedLab(Signal* signal);
+ void insertExistElemLab(Signal* signal);
+ void refaccConnectLab(Signal* signal);
+ void srReadOverPagesLab(Signal* signal);
+ void releaseScanLab(Signal* signal);
+ void lcpOpenUndofileConfLab(Signal* signal);
+ void srFsOpenConfLab(Signal* signal);
+ void checkSyncUndoPagesLab(Signal* signal);
+ void sendaccSrconfLab(Signal* signal);
+ void checkSendLcpConfLab(Signal* signal);
+ void endsaveoverpageLab(Signal* signal);
+ void lcpCloseDataFileLab(Signal* signal);
+ void srOpenDataFileLoopLab(Signal* signal);
+ void srReadPagesLab(Signal* signal);
+ void srDoUndoLab(Signal* signal);
+ void ndbrestart1Lab(Signal* signal);
+ void initialiseRecordsLab(Signal* signal, Uint32 ref, Uint32 data);
+ void srReadPagesAllocLab(Signal* signal);
+ void checkNextBucketLab(Signal* signal);
+ void endsavepageLab(Signal* signal);
+ void saveZeroPageLab(Signal* signal);
+ void srAllocPage0011Lab(Signal* signal);
+ void sendLcpFragidconfLab(Signal* signal);
+ void savepagesLab(Signal* signal);
+ void saveOverPagesLab(Signal* signal);
+ void srReadPageZeroLab(Signal* signal);
+ void storeDataPageInDirectoryLab(Signal* signal);
+ void lcpFsOpenConfLab(Signal* signal);
+
+ void zpagesize_error(const char* where);
+
+ void reportMemoryUsage(Signal* signal, int gth);
+ void lcp_write_op_to_undolog(Signal* signal);
+ void reenable_expand_after_redo_log_exection_complete(Signal*);
+
+ // charsets
+ void xfrmKeyData(Signal* signal);
+
+ // Initialisation
+ void initData();
+ void initRecords();
+
+ // Variables
+/* --------------------------------------------------------------------------------- */
+/* DIRECTORY RANGE */
+/* --------------------------------------------------------------------------------- */
+ DirRange *dirRange;
+ DirRangePtr expDirRangePtr;
+ DirRangePtr gnsDirRangePtr;
+ DirRangePtr newDirRangePtr;
+ DirRangePtr rdDirRangePtr;
+ DirRangePtr nciOverflowrangeptr;
+ Uint32 cdirrangesize;
+ Uint32 cfirstfreeDirrange;
+/* --------------------------------------------------------------------------------- */
+/* DIRECTORYARRAY */
+/* --------------------------------------------------------------------------------- */
+ Directoryarray *directoryarray;
+ DirectoryarrayPtr expDirptr;
+ DirectoryarrayPtr rdDirptr;
+ DirectoryarrayPtr sdDirptr;
+ DirectoryarrayPtr nciOverflowDirptr;
+ Uint32 cdirarraysize;
+ Uint32 cdirmemory;
+ Uint32 cfirstfreedir;
+/* --------------------------------------------------------------------------------- */
+/* FRAGMENTREC. ALL INFORMATION ABOUT FRAMENT AND HASH TABLE IS SAVED IN FRAGMENT */
+/* REC A POINTER TO FRAGMENT RECORD IS SAVED IN ROOTFRAGMENTREC FRAGMENT */
+/* --------------------------------------------------------------------------------- */
+ Fragmentrec *fragmentrec;
+ FragmentrecPtr fragrecptr;
+ Uint32 cfirstfreefrag;
+ Uint32 cfragmentsize;
+/* --------------------------------------------------------------------------------- */
+/* FS_CONNECTREC */
+/* --------------------------------------------------------------------------------- */
+ FsConnectrec *fsConnectrec;
+ FsConnectrecPtr fsConnectptr;
+ Uint32 cfsConnectsize;
+ Uint32 cfsFirstfreeconnect;
+/* --------------------------------------------------------------------------------- */
+/* FS_OPREC */
+/* --------------------------------------------------------------------------------- */
+ FsOprec *fsOprec;
+ FsOprecPtr fsOpptr;
+ Uint32 cfsOpsize;
+ Uint32 cfsFirstfreeop;
+/* --------------------------------------------------------------------------------- */
+/* LCP_CONNECTREC */
+/* --------------------------------------------------------------------------------- */
+ LcpConnectrec *lcpConnectrec;
+ LcpConnectrecPtr lcpConnectptr;
+ Uint32 clcpConnectsize;
+ Uint32 cfirstfreelcpConnect;
+/* --------------------------------------------------------------------------------- */
+/* OPERATIONREC */
+/* --------------------------------------------------------------------------------- */
+ Operationrec *operationrec;
+ OperationrecPtr operationRecPtr;
+ OperationrecPtr idrOperationRecPtr;
+ OperationrecPtr copyInOperPtr;
+ OperationrecPtr copyOperPtr;
+ OperationrecPtr mlpqOperPtr;
+ OperationrecPtr queOperPtr;
+ OperationrecPtr readWriteOpPtr;
+ Uint32 cfreeopRec;
+ Uint32 coprecsize;
+/* --------------------------------------------------------------------------------- */
+/* OVERFLOW_RECORD */
+/* --------------------------------------------------------------------------------- */
+ OverflowRecord *overflowRecord;
+ OverflowRecordPtr iopOverflowRecPtr;
+ OverflowRecordPtr tfoOverflowRecPtr;
+ OverflowRecordPtr porOverflowRecPtr;
+ OverflowRecordPtr priOverflowRecPtr;
+ OverflowRecordPtr rorOverflowRecPtr;
+ OverflowRecordPtr sorOverflowRecPtr;
+ OverflowRecordPtr troOverflowRecPtr;
+ Uint32 cfirstfreeoverrec;
+ Uint32 coverflowrecsize;
+
+/* --------------------------------------------------------------------------------- */
+/* PAGE8 */
+/* --------------------------------------------------------------------------------- */
+ Page8 *page8;
+ /* 8 KB PAGE */
+ Page8Ptr ancPageptr;
+ Page8Ptr colPageptr;
+ Page8Ptr ccoPageptr;
+ Page8Ptr datapageptr;
+ Page8Ptr delPageptr;
+ Page8Ptr excPageptr;
+ Page8Ptr expPageptr;
+ Page8Ptr gdiPageptr;
+ Page8Ptr gePageptr;
+ Page8Ptr gflPageptr;
+ Page8Ptr idrPageptr;
+ Page8Ptr ilcPageptr;
+ Page8Ptr inpPageptr;
+ Page8Ptr iopPageptr;
+ Page8Ptr lastPageptr;
+ Page8Ptr lastPrevpageptr;
+ Page8Ptr lcnPageptr;
+ Page8Ptr lcnCopyPageptr;
+ Page8Ptr lupPageptr;
+ Page8Ptr priPageptr;
+ Page8Ptr pwiPageptr;
+ Page8Ptr ciPageidptr;
+ Page8Ptr gsePageidptr;
+ Page8Ptr isoPageptr;
+ Page8Ptr nciPageidptr;
+ Page8Ptr rsbPageidptr;
+ Page8Ptr rscPageidptr;
+ Page8Ptr slPageidptr;
+ Page8Ptr sscPageidptr;
+ Page8Ptr rlPageptr;
+ Page8Ptr rlpPageptr;
+ Page8Ptr ropPageptr;
+ Page8Ptr rpPageptr;
+ Page8Ptr slPageptr;
+ Page8Ptr spPageptr;
+ Uint32 cfirstfreepage;
+ Uint32 cfreepage;
+ Uint32 cpagesize;
+ Uint32 cfirstfreeLcpPage;
+ Uint32 cnoOfAllocatedPages;
+ Uint32 cnoLcpPages;
+/* --------------------------------------------------------------------------------- */
+/* ROOTFRAGMENTREC */
+/* DURING EXPAND FRAGMENT PROCESS, EACH FRAGMEND WILL BE EXPAND INTO TWO */
+/* NEW FRAGMENTS.TO MAKE THIS PROCESS EASIER, DURING ADD FRAGMENT PROCESS */
+/* NEXT FRAGMENT IDENTIIES WILL BE CALCULATED, AND TWO FRAGMENTS WILL BE */
+/* ADDED IN (NDBACC). THEREBY EXPAND OF FRAGMENT CAN BE PERFORMED QUICK AND */
+/* EASY.THE NEW FRAGMENT ID SENDS TO TUP MANAGER FOR ALL OPERATION PROCESS. */
+/* --------------------------------------------------------------------------------- */
+ Rootfragmentrec *rootfragmentrec;
+ RootfragmentrecPtr rootfragrecptr;
+ Uint32 crootfragmentsize;
+ Uint32 cfirstfreerootfrag;
+/* --------------------------------------------------------------------------------- */
+/* SCAN_REC */
+/* --------------------------------------------------------------------------------- */
+ ScanRec *scanRec;
+ ScanRecPtr scanPtr;
+ Uint32 cscanRecSize;
+ Uint32 cfirstFreeScanRec;
+/* --------------------------------------------------------------------------------- */
+/* SR_VERSION_REC */
+/* --------------------------------------------------------------------------------- */
+ SrVersionRec *srVersionRec;
+ SrVersionRecPtr srVersionPtr;
+ Uint32 csrVersionRecSize;
+ Uint32 cfirstFreeSrVersionRec;
+/* --------------------------------------------------------------------------------- */
+/* TABREC */
+/* --------------------------------------------------------------------------------- */
+ Tabrec *tabrec;
+ TabrecPtr tabptr;
+ Uint32 ctablesize;
+/* --------------------------------------------------------------------------------- */
+/* UNDOPAGE */
+/* --------------------------------------------------------------------------------- */
+ Undopage *undopage;
+ /* 32 KB PAGE */
+ UndopagePtr undopageptr;
+ Uint32 tpwiElementptr;
+ Uint32 tpriElementptr;
+ Uint32 tgseElementptr;
+ Uint32 tgseContainerptr;
+ Uint32 trlHead;
+ Uint32 trlRelCon;
+ Uint32 trlNextused;
+ Uint32 trlPrevused;
+ Uint32 tlcnChecksum;
+ Uint32 tlupElemIndex;
+ Uint32 tlupIndex;
+ Uint32 tlupForward;
+ Uint32 tancNext;
+ Uint32 tancBufType;
+ Uint32 tancContainerptr;
+ Uint32 tancPageindex;
+ Uint32 tancPageid;
+ Uint32 tidrResult;
+ Uint32 tidrElemhead;
+ Uint32 tidrForward;
+ Uint32 tidrPageindex;
+ Uint32 tidrContainerptr;
+ Uint32 tidrContainerhead;
+ Uint32 tlastForward;
+ Uint32 tlastPageindex;
+ Uint32 tlastContainerlen;
+ Uint32 tlastElementptr;
+ Uint32 tlastContainerptr;
+ Uint32 tlastContainerhead;
+ Uint32 trlPageindex;
+ Uint32 tdelContainerptr;
+ Uint32 tdelElementptr;
+ Uint32 tdelForward;
+ Uint32 tiopPageId;
+ Uint32 tipPageId;
+ Uint32 tgeLocked;
+ Uint32 tgeResult;
+ Uint32 tgeContainerptr;
+ Uint32 tgeElementptr;
+ Uint32 tgeForward;
+ Uint32 tundoElemIndex;
+ Uint32 texpReceivedBucket;
+ Uint32 texpDirInd;
+ Uint32 texpDirRangeIndex;
+ Uint32 texpDirPageIndex;
+ Uint32 tdata0;
+ Uint32 tcheckpointid;
+ Uint32 tciContainerptr;
+ Uint32 tnciContainerptr;
+ Uint32 tisoContainerptr;
+ Uint32 trscContainerptr;
+ Uint32 tsscContainerptr;
+ Uint32 tciContainerlen;
+ Uint32 trscContainerlen;
+ Uint32 tsscContainerlen;
+ Uint32 tciContainerhead;
+ Uint32 tnciContainerhead;
+ Uint32 tslElementptr;
+ Uint32 tisoElementptr;
+ Uint32 tsscElementptr;
+ Uint32 tfid;
+ Uint32 tscanFlag;
+ Uint32 theadundoindex;
+ Uint32 tgflBufType;
+ Uint32 tgseIsforward;
+ Uint32 tsscIsforward;
+ Uint32 trscIsforward;
+ Uint32 tciIsforward;
+ Uint32 tnciIsforward;
+ Uint32 tisoIsforward;
+ Uint32 tgseIsLocked;
+ Uint32 tsscIsLocked;
+ Uint32 tkeylen;
+ Uint32 tmp;
+ Uint32 tmpP;
+ Uint32 tmpP2;
+ Uint32 tmp1;
+ Uint32 tmp2;
+ Uint32 tgflPageindex;
+ Uint32 tmpindex;
+ Uint32 tslNextfree;
+ Uint32 tslPageindex;
+ Uint32 tgsePageindex;
+ Uint32 tnciNextSamePage;
+ Uint32 tslPrevfree;
+ Uint32 tciPageindex;
+ Uint32 trsbPageindex;
+ Uint32 tnciPageindex;
+ Uint32 tlastPrevconptr;
+ Uint32 tresult;
+ Uint32 tslUpdateHeader;
+ Uint32 tuserptr;
+ BlockReference tuserblockref;
+ Uint32 tundoindex;
+ Uint32 tlqhPointer;
+ Uint32 tholdSentOp;
+ Uint32 tholdMore;
+ Uint32 tlcpLqhCheckV;
+ Uint32 tgdiPageindex;
+ Uint32 tiopIndex;
+ Uint32 tnciTmp;
+ Uint32 tullIndex;
+ Uint32 turlIndex;
+ Uint32 tlfrTmp1;
+ Uint32 tlfrTmp2;
+ Uint32 tscanTrid1;
+ Uint32 tscanTrid2;
+
+ Uint16 clastUndoPageIdWritten;
+ Uint32 cactiveCheckpId;
+ Uint32 cactiveRootfrag;
+ Uint32 cactiveSrFsPtr;
+ Uint32 cactiveUndoFilePage;
+ Uint32 cactiveOpenUndoFsPtr;
+ Uint32 cactiveSrUndoPage;
+ Uint32 cprevUndoaddress;
+ Uint32 creadyUndoaddress;
+ Uint32 ctest;
+ Uint32 cundoLogActive;
+ Uint32 clqhPtr;
+ BlockReference clqhBlockRef;
+ Uint32 cminusOne;
+ NodeId cmynodeid;
+ Uint32 cactiveUndoFileVersion;
+ BlockReference cownBlockref;
+ BlockReference cndbcntrRef;
+ Uint16 csignalkey;
+ Uint32 cundopagesize;
+ Uint32 cundoposition;
+ Uint32 cundoElemIndex;
+ Uint32 cundoinfolength;
+ Uint32 czero;
+ Uint32 csrVersList[16];
+ Uint32 clblPageCounter;
+ Uint32 clblPageOver;
+ Uint32 clblPagesPerTick;
+ Uint32 clblPagesPerTickAfterSr;
+ Uint32 csystemRestart;
+ Uint32 cexcForward;
+ Uint32 cexcPageindex;
+ Uint32 cexcContainerptr;
+ Uint32 cexcContainerhead;
+ Uint32 cexcContainerlen;
+ Uint32 cexcElementptr;
+ Uint32 cexcPrevconptr;
+ Uint32 cexcMovedLen;
+ Uint32 cexcPrevpageptr;
+ Uint32 cexcPrevpageindex;
+ Uint32 cexcPrevforward;
+ Uint32 clocalkey[32];
+ union {
+ Uint32 ckeys[2048];
+ Uint64 ckeys_align;
+ };
+
+ Uint32 c_errorInsert3000_TableId;
+ Uint32 cSrUndoRecords[UndoHeader::ZNO_UNDORECORD_TYPES];
+};
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
new file mode 100644
index 00000000000..ccc65ccf9fa
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
@@ -0,0 +1,343 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+
+#define DBACC_C
+#include "Dbacc.hpp"
+
+#define DEBUG(x) { ndbout << "ACC::" << x << endl; }
+
+void Dbacc::initData()
+{
+ cdirarraysize = ZDIRARRAY;
+ coprecsize = ZOPRECSIZE;
+ cpagesize = ZPAGESIZE;
+ clcpConnectsize = ZLCP_CONNECTSIZE;
+ ctablesize = ZTABLESIZE;
+ cfragmentsize = ZFRAGMENTSIZE;
+ crootfragmentsize = ZROOTFRAGMENTSIZE;
+ cdirrangesize = ZDIRRANGESIZE;
+ coverflowrecsize = ZOVERFLOWRECSIZE;
+ cfsConnectsize = ZFS_CONNECTSIZE;
+ cfsOpsize = ZFS_OPSIZE;
+ cscanRecSize = ZSCAN_REC_SIZE;
+ csrVersionRecSize = ZSR_VERSION_REC_SIZE;
+
+
+ dirRange = 0;
+ directoryarray = 0;
+ fragmentrec = 0;
+ fsConnectrec = 0;
+ fsOprec = 0;
+ lcpConnectrec = 0;
+ operationrec = 0;
+ overflowRecord = 0;
+ page8 = 0;
+ rootfragmentrec = 0;
+ scanRec = 0;
+ srVersionRec = 0;
+ tabrec = 0;
+ undopage = 0;
+
+ // Records with constant sizes
+}//Dbacc::initData()
+
+void Dbacc::initRecords()
+{
+ // Records with dynamic sizes
+ dirRange = (DirRange*)allocRecord("DirRange",
+ sizeof(DirRange),
+ cdirrangesize);
+
+ directoryarray = (Directoryarray*)allocRecord("Directoryarray",
+ sizeof(Directoryarray),
+ cdirarraysize);
+
+ fragmentrec = (Fragmentrec*)allocRecord("Fragmentrec",
+ sizeof(Fragmentrec),
+ cfragmentsize);
+
+ fsConnectrec = (FsConnectrec*)allocRecord("FsConnectrec",
+ sizeof(FsConnectrec),
+ cfsConnectsize);
+
+ fsOprec = (FsOprec*)allocRecord("FsOprec",
+ sizeof(FsOprec),
+ cfsOpsize);
+
+ lcpConnectrec = (LcpConnectrec*)allocRecord("LcpConnectrec",
+ sizeof(LcpConnectrec),
+ clcpConnectsize);
+
+ operationrec = (Operationrec*)allocRecord("Operationrec",
+ sizeof(Operationrec),
+ coprecsize);
+
+ overflowRecord = (OverflowRecord*)allocRecord("OverflowRecord",
+ sizeof(OverflowRecord),
+ coverflowrecsize);
+
+ page8 = (Page8*)allocRecord("Page8",
+ sizeof(Page8),
+ cpagesize,
+ false);
+
+ rootfragmentrec = (Rootfragmentrec*)allocRecord("Rootfragmentrec",
+ sizeof(Rootfragmentrec),
+ crootfragmentsize);
+
+ scanRec = (ScanRec*)allocRecord("ScanRec",
+ sizeof(ScanRec),
+ cscanRecSize);
+
+ srVersionRec = (SrVersionRec*)allocRecord("SrVersionRec",
+ sizeof(SrVersionRec),
+ csrVersionRecSize);
+
+ tabrec = (Tabrec*)allocRecord("Tabrec",
+ sizeof(Tabrec),
+ ctablesize);
+
+ undopage = (Undopage*)allocRecord("Undopage",
+ sizeof(Undopage),
+ cundopagesize,
+ false);
+
+ // Initialize BAT for interface to file system
+
+ NewVARIABLE* bat = allocateBat(3);
+ bat[1].WA = &page8->word32[0];
+ bat[1].nrr = cpagesize;
+ bat[1].ClusterSize = sizeof(Page8);
+ bat[1].bits.q = 11;
+ bat[1].bits.v = 5;
+ bat[2].WA = &undopage->undoword[0];
+ bat[2].nrr = cundopagesize;
+ bat[2].ClusterSize = sizeof(Undopage);
+ bat[2].bits.q = 13;
+ bat[2].bits.v = 5;
+}//Dbacc::initRecords()
+
+Dbacc::Dbacc(const class Configuration & conf):
+ SimulatedBlock(DBACC, conf),
+ c_tup(0)
+{
+ Uint32 log_page_size= 0;
+ BLOCK_CONSTRUCTOR(Dbacc);
+
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_UNDO_INDEX_BUFFER,
+ &log_page_size);
+
+ /**
+ * Always set page size in half MBytes
+ */
+ cundopagesize= (log_page_size / sizeof(Undopage));
+ Uint32 mega_byte_part= cundopagesize & 15;
+ if (mega_byte_part != 0) {
+ jam();
+ cundopagesize+= (16 - mega_byte_part);
+ }
+
+ // Transit signals
+ addRecSignal(GSN_DUMP_STATE_ORD, &Dbacc::execDUMP_STATE_ORD);
+ addRecSignal(GSN_DEBUG_SIG, &Dbacc::execDEBUG_SIG);
+ addRecSignal(GSN_CONTINUEB, &Dbacc::execCONTINUEB);
+ addRecSignal(GSN_ACC_CHECK_SCAN, &Dbacc::execACC_CHECK_SCAN);
+ addRecSignal(GSN_EXPANDCHECK2, &Dbacc::execEXPANDCHECK2);
+ addRecSignal(GSN_SHRINKCHECK2, &Dbacc::execSHRINKCHECK2);
+ addRecSignal(GSN_ACC_OVER_REC, &Dbacc::execACC_OVER_REC);
+ addRecSignal(GSN_ACC_SAVE_PAGES, &Dbacc::execACC_SAVE_PAGES);
+ addRecSignal(GSN_NEXTOPERATION, &Dbacc::execNEXTOPERATION);
+ addRecSignal(GSN_READ_PSEUDO_REQ, &Dbacc::execREAD_PSEUDO_REQ);
+
+ // Received signals
+ addRecSignal(GSN_STTOR, &Dbacc::execSTTOR);
+ addRecSignal(GSN_SR_FRAGIDREQ, &Dbacc::execSR_FRAGIDREQ);
+ addRecSignal(GSN_LCP_FRAGIDREQ, &Dbacc::execLCP_FRAGIDREQ);
+ addRecSignal(GSN_LCP_HOLDOPREQ, &Dbacc::execLCP_HOLDOPREQ);
+ addRecSignal(GSN_END_LCPREQ, &Dbacc::execEND_LCPREQ);
+ addRecSignal(GSN_ACC_LCPREQ, &Dbacc::execACC_LCPREQ);
+ addRecSignal(GSN_START_RECREQ, &Dbacc::execSTART_RECREQ);
+ addRecSignal(GSN_ACC_CONTOPREQ, &Dbacc::execACC_CONTOPREQ);
+ addRecSignal(GSN_ACCKEYREQ, &Dbacc::execACCKEYREQ);
+ addRecSignal(GSN_ACCSEIZEREQ, &Dbacc::execACCSEIZEREQ);
+ addRecSignal(GSN_ACCFRAGREQ, &Dbacc::execACCFRAGREQ);
+ addRecSignal(GSN_TC_SCHVERREQ, &Dbacc::execTC_SCHVERREQ);
+ addRecSignal(GSN_ACC_SRREQ, &Dbacc::execACC_SRREQ);
+ addRecSignal(GSN_NEXT_SCANREQ, &Dbacc::execNEXT_SCANREQ);
+ addRecSignal(GSN_ACC_ABORTREQ, &Dbacc::execACC_ABORTREQ);
+ addRecSignal(GSN_ACC_SCANREQ, &Dbacc::execACC_SCANREQ);
+ addRecSignal(GSN_ACCMINUPDATE, &Dbacc::execACCMINUPDATE);
+ addRecSignal(GSN_ACC_COMMITREQ, &Dbacc::execACC_COMMITREQ);
+ addRecSignal(GSN_ACC_TO_REQ, &Dbacc::execACC_TO_REQ);
+ addRecSignal(GSN_ACC_LOCKREQ, &Dbacc::execACC_LOCKREQ);
+ addRecSignal(GSN_FSOPENCONF, &Dbacc::execFSOPENCONF);
+ addRecSignal(GSN_FSOPENREF, &Dbacc::execFSOPENREF);
+ addRecSignal(GSN_FSCLOSECONF, &Dbacc::execFSCLOSECONF);
+ addRecSignal(GSN_FSCLOSEREF, &Dbacc::execFSCLOSEREF);
+ addRecSignal(GSN_FSWRITECONF, &Dbacc::execFSWRITECONF);
+ addRecSignal(GSN_FSWRITEREF, &Dbacc::execFSWRITEREF);
+ addRecSignal(GSN_FSREADCONF, &Dbacc::execFSREADCONF);
+ addRecSignal(GSN_FSREADREF, &Dbacc::execFSREADREF);
+ addRecSignal(GSN_NDB_STTOR, &Dbacc::execNDB_STTOR);
+ addRecSignal(GSN_DROP_TAB_REQ, &Dbacc::execDROP_TAB_REQ);
+ addRecSignal(GSN_FSREMOVECONF, &Dbacc::execFSREMOVECONF);
+ addRecSignal(GSN_FSREMOVEREF, &Dbacc::execFSREMOVEREF);
+ addRecSignal(GSN_READ_CONFIG_REQ, &Dbacc::execREAD_CONFIG_REQ, true);
+ addRecSignal(GSN_SET_VAR_REQ, &Dbacc::execSET_VAR_REQ);
+
+ initData();
+
+#ifdef VM_TRACE
+ {
+ void* tmp[] = { &expDirRangePtr,
+ &gnsDirRangePtr,
+ &newDirRangePtr,
+ &rdDirRangePtr,
+ &nciOverflowrangeptr,
+ &expDirptr,
+ &rdDirptr,
+ &sdDirptr,
+ &nciOverflowDirptr,
+ &fragrecptr,
+ &fsConnectptr,
+ &fsOpptr,
+ &lcpConnectptr,
+ &operationRecPtr,
+ &idrOperationRecPtr,
+ &copyInOperPtr,
+ &copyOperPtr,
+ &mlpqOperPtr,
+ &queOperPtr,
+ &readWriteOpPtr,
+ &iopOverflowRecPtr,
+ &tfoOverflowRecPtr,
+ &porOverflowRecPtr,
+ &priOverflowRecPtr,
+ &rorOverflowRecPtr,
+ &sorOverflowRecPtr,
+ &troOverflowRecPtr,
+ &ancPageptr,
+ &colPageptr,
+ &ccoPageptr,
+ &datapageptr,
+ &delPageptr,
+ &excPageptr,
+ &expPageptr,
+ &gdiPageptr,
+ &gePageptr,
+ &gflPageptr,
+ &idrPageptr,
+ &ilcPageptr,
+ &inpPageptr,
+ &iopPageptr,
+ &lastPageptr,
+ &lastPrevpageptr,
+ &lcnPageptr,
+ &lcnCopyPageptr,
+ &lupPageptr,
+ &priPageptr,
+ &pwiPageptr,
+ &ciPageidptr,
+ &gsePageidptr,
+ &isoPageptr,
+ &nciPageidptr,
+ &rsbPageidptr,
+ &rscPageidptr,
+ &slPageidptr,
+ &sscPageidptr,
+ &rlPageptr,
+ &rlpPageptr,
+ &ropPageptr,
+ &rpPageptr,
+ &slPageptr,
+ &spPageptr,
+ &rootfragrecptr,
+ &scanPtr,
+ &srVersionPtr,
+ &tabptr,
+ &undopageptr
+ };
+ init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0]));
+ }
+#endif
+}//Dbacc::Dbacc()
+
+Dbacc::~Dbacc()
+{
+ deallocRecord((void **)&dirRange, "DirRange",
+ sizeof(DirRange),
+ cdirrangesize);
+
+ deallocRecord((void **)&directoryarray, "Directoryarray",
+ sizeof(Directoryarray),
+ cdirarraysize);
+
+ deallocRecord((void **)&fragmentrec, "Fragmentrec",
+ sizeof(Fragmentrec),
+ cfragmentsize);
+
+ deallocRecord((void **)&fsConnectrec, "FsConnectrec",
+ sizeof(FsConnectrec),
+ cfsConnectsize);
+
+ deallocRecord((void **)&fsOprec, "FsOprec",
+ sizeof(FsOprec),
+ cfsOpsize);
+
+ deallocRecord((void **)&lcpConnectrec, "LcpConnectrec",
+ sizeof(LcpConnectrec),
+ clcpConnectsize);
+
+ deallocRecord((void **)&operationrec, "Operationrec",
+ sizeof(Operationrec),
+ coprecsize);
+
+ deallocRecord((void **)&overflowRecord, "OverflowRecord",
+ sizeof(OverflowRecord),
+ coverflowrecsize);
+
+ deallocRecord((void **)&page8, "Page8",
+ sizeof(Page8),
+ cpagesize);
+
+ deallocRecord((void **)&rootfragmentrec, "Rootfragmentrec",
+ sizeof(Rootfragmentrec),
+ crootfragmentsize);
+
+ deallocRecord((void **)&scanRec, "ScanRec",
+ sizeof(ScanRec),
+ cscanRecSize);
+
+ deallocRecord((void **)&srVersionRec, "SrVersionRec",
+ sizeof(SrVersionRec),
+ csrVersionRecSize);
+
+ deallocRecord((void **)&tabrec, "Tabrec",
+ sizeof(Tabrec),
+ ctablesize);
+
+ deallocRecord((void **)&undopage, "Undopage",
+ sizeof(Undopage),
+ cundopagesize);
+
+}//Dbacc::~Dbacc()
+
+BLOCK_FUNCTIONS(Dbacc)
diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
new file mode 100644
index 00000000000..a8bb0ab894c
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -0,0 +1,11817 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBACC_C
+#include "Dbacc.hpp"
+#include <my_sys.h>
+
+#include <AttributeHeader.hpp>
+#include <signaldata/AccFrag.hpp>
+#include <signaldata/AccScan.hpp>
+#include <signaldata/AccLock.hpp>
+#include <signaldata/EventReport.hpp>
+#include <signaldata/FsConf.hpp>
+#include <signaldata/FsRef.hpp>
+#include <signaldata/FsRemoveReq.hpp>
+#include <signaldata/DropTab.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+#include <SectionReader.hpp>
+
+// TO_DO_RONM is a label for comments on what needs to be improved in future versions
+// when more time is given.
+
+#ifdef VM_TRACE
+#define DEBUG(x) ndbout << "DBACC: "<< x << endl;
+#else
+#define DEBUG(x)
+#endif
+
+
+Uint32
+Dbacc::remainingUndoPages(){
+ Uint32 HeadPage = cundoposition >> ZUNDOPAGEINDEXBITS;
+ Uint32 TailPage = clastUndoPageIdWritten;
+
+ // Head must be larger or same as tail
+ ndbrequire(HeadPage>=TailPage);
+
+ Uint32 UsedPages = HeadPage - TailPage;
+ Int32 Remaining = cundopagesize - UsedPages;
+
+ // There can not be more than cundopagesize remaining
+ if (Remaining <= 0){
+ // No more undolog, crash node
+ progError(__LINE__,
+ ERR_NO_MORE_UNDOLOG,
+ "There are more than 1Mbyte undolog writes outstanding");
+ }
+ return Remaining;
+}
+
+void
+Dbacc::updateLastUndoPageIdWritten(Signal* signal, Uint32 aNewValue){
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_COMMIT) {
+ clastUndoPageIdWritten = aNewValue;
+ if (remainingUndoPages() >= ZMIN_UNDO_PAGES_AT_COMMIT) {
+ jam();
+ EXECUTE_DIRECT(DBLQH, GSN_ACC_COM_UNBLOCK, signal, 1);
+ jamEntry();
+ }//if
+ } else {
+ clastUndoPageIdWritten = aNewValue;
+ }//if
+}//Dbacc::updateLastUndoPageIdWritten()
+
+void
+Dbacc::updateUndoPositionPage(Signal* signal, Uint32 aNewValue){
+ if (remainingUndoPages() >= ZMIN_UNDO_PAGES_AT_COMMIT) {
+ cundoposition = aNewValue;
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_COMMIT) {
+ jam();
+ EXECUTE_DIRECT(DBLQH, GSN_ACC_COM_BLOCK, signal, 1);
+ jamEntry();
+ }//if
+ } else {
+ cundoposition = aNewValue;
+ }//if
+}//Dbacc::updateUndoPositionPage()
+
+// Signal entries and statement blocks
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* COMMON SIGNAL RECEPTION MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+
+/* --------------------------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* CONTINUEB CONTINUE SIGNAL */
+/* ******************------------------------------+ */
+/* SENDER: ACC, LEVEL B */
+void Dbacc::execCONTINUEB(Signal* signal)
+{
+ Uint32 tcase;
+
+ jamEntry();
+ tcase = signal->theData[0];
+ tdata0 = signal->theData[1];
+ tresult = 0;
+ switch (tcase) {
+ case ZLOAD_BAL_LCP_TIMER:
+ if (clblPageOver == 0) {
+ jam();
+ clblPageCounter = clblPagesPerTick;
+ } else {
+ if (clblPageOver > clblPagesPerTick) {
+ jam();
+ clblPageOver = clblPageOver - clblPagesPerTick;
+ } else {
+ jam();
+ clblPageOver = 0;
+ clblPageCounter = clblPagesPerTick - clblPageOver;
+ }//if
+ }//if
+ signal->theData[0] = ZLOAD_BAL_LCP_TIMER;
+ sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 1);
+ return;
+ break;
+ case ZINITIALISE_RECORDS:
+ jam();
+ initialiseRecordsLab(signal, signal->theData[3], signal->theData[4]);
+ return;
+ break;
+ case ZSR_READ_PAGES_ALLOC:
+ jam();
+ fragrecptr.i = tdata0;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ srReadPagesAllocLab(signal);
+ return;
+ break;
+ case ZSTART_UNDO:
+ jam();
+ startUndoLab(signal);
+ return;
+ break;
+ case ZSEND_SCAN_HBREP:
+ jam();
+ sendScanHbRep(signal, tdata0);
+ break;
+ case ZREL_ROOT_FRAG:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ releaseRootFragResources(signal, tableId);
+ break;
+ }
+ case ZREL_FRAG:
+ {
+ jam();
+ Uint32 fragIndex = signal->theData[1];
+ releaseFragResources(signal, fragIndex);
+ break;
+ }
+ case ZREL_DIR:
+ {
+ jam();
+ Uint32 fragIndex = signal->theData[1];
+ Uint32 dirIndex = signal->theData[2];
+ Uint32 startIndex = signal->theData[3];
+ releaseDirResources(signal, fragIndex, dirIndex, startIndex);
+ break;
+ }
+ case ZREPORT_MEMORY_USAGE:{
+ jam();
+ static int c_currentMemUsed = 0;
+ int now = (cnoOfAllocatedPages * 100)/cpagesize;
+ const int thresholds[] = { 99, 90, 80, 0};
+
+ Uint32 i = 0;
+ const Uint32 sz = sizeof(thresholds)/sizeof(thresholds[0]);
+ for(i = 0; i<sz; i++){
+ if(now >= thresholds[i]){
+ now = thresholds[i];
+ break;
+ }
+ }
+
+ if(now != c_currentMemUsed){
+ reportMemoryUsage(signal, now > c_currentMemUsed ? 1 : -1);
+ }
+
+ c_currentMemUsed = now;
+
+ signal->theData[0] = ZREPORT_MEMORY_USAGE;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 2000, 1);
+ return;
+ }
+
+ case ZLCP_OP_WRITE_RT_BREAK:
+ {
+ operationRecPtr.i= signal->theData[1];
+ fragrecptr.i= signal->theData[2];
+ lcpConnectptr.i= signal->theData[3];
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ lcp_write_op_to_undolog(signal);
+ return;
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbacc::execCONTINUEB()
+
+/* ******************--------------------------------------------------------------- */
+/* FSCLOSECONF CLOSE FILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSCLOSECONF(Signal* signal)
+{
+ jamEntry();
+ fsConnectptr.i = signal->theData[0];
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ tresult = 0;
+ switch (fsConnectptr.p->fsState) {
+ case WAIT_CLOSE_UNDO:
+ jam();
+ releaseFsConnRec(signal);
+ break;
+ case LCP_CLOSE_DATA:
+ jam();
+ checkSyncUndoPagesLab(signal);
+ return;
+ break;
+ case SR_CLOSE_DATA:
+ jam();
+ sendaccSrconfLab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbacc::execFSCLOSECONF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSCLOSEREF OPENFILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSCLOSEREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dbacc::execFSCLOSEREF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSOPENCONF OPENFILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSOPENCONF(Signal* signal)
+{
+ jamEntry();
+ fsConnectptr.i = signal->theData[0];
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ tuserptr = signal->theData[1];
+ tresult = 0; /* RESULT CHECK VALUE */
+ switch (fsConnectptr.p->fsState) {
+ case WAIT_OPEN_UNDO_LCP:
+ jam();
+ lcpOpenUndofileConfLab(signal);
+ return;
+ break;
+ case WAIT_OPEN_UNDO_LCP_NEXT:
+ jam();
+ fsConnectptr.p->fsPtr = tuserptr;
+ return;
+ break;
+ case OPEN_UNDO_FILE_SR:
+ jam();
+ fsConnectptr.p->fsPtr = tuserptr;
+ srStartUndoLab(signal);
+ return;
+ break;
+ case WAIT_OPEN_DATA_FILE_FOR_WRITE:
+ jam();
+ lcpFsOpenConfLab(signal);
+ return;
+ break;
+ case WAIT_OPEN_DATA_FILE_FOR_READ:
+ jam();
+ fsConnectptr.p->fsPtr = tuserptr;
+ srFsOpenConfLab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbacc::execFSOPENCONF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSOPENREF OPENFILE REF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSOPENREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dbacc::execFSOPENREF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSREADCONF OPENFILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSREADCONF(Signal* signal)
+{
+ jamEntry();
+ fsConnectptr.i = signal->theData[0];
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ tresult = 0; /* RESULT CHECK VALUE */
+ switch (fsConnectptr.p->fsState) {
+ case WAIT_READ_PAGE_ZERO:
+ jam();
+ fragrecptr.i = fsConnectptr.p->fragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ srReadPageZeroLab(signal);
+ return;
+ break;
+ case WAIT_READ_DATA:
+ jam();
+ fragrecptr.i = fsConnectptr.p->fragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ storeDataPageInDirectoryLab(signal);
+ return;
+ break;
+ case READ_UNDO_PAGE:
+ jam();
+ srDoUndoLab(signal);
+ return;
+ break;
+ case READ_UNDO_PAGE_AND_CLOSE:
+ jam();
+ fsConnectptr.p->fsState = WAIT_CLOSE_UNDO;
+ /* ************************ */
+ /* FSCLOSEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = 0;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+ /* FLAG = DO NOT DELETE FILE */
+ srDoUndoLab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbacc::execFSREADCONF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSREADRREF OPENFILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSREADREF(Signal* signal)
+{
+ jamEntry();
+ progError(0, __LINE__, "Read of file refused");
+ return;
+}//Dbacc::execFSREADREF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSWRITECONF OPENFILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSWRITECONF(Signal* signal)
+{
+ jamEntry();
+ fsOpptr.i = signal->theData[0];
+ ptrCheckGuard(fsOpptr, cfsOpsize, fsOprec);
+ /* FS_OPERATION PTR */
+ tresult = 0; /* RESULT CHECK VALUE */
+ fsConnectptr.i = fsOpptr.p->fsConptr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ fragrecptr.i = fsOpptr.p->fsOpfragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ switch (fsOpptr.p->fsOpstate) {
+ case WAIT_WRITE_UNDO:
+ jam();
+ updateLastUndoPageIdWritten(signal, fsOpptr.p->fsOpMemPage);
+ releaseFsOpRec(signal);
+ if (fragrecptr.p->nrWaitWriteUndoExit == 0) {
+ jam();
+ checkSendLcpConfLab(signal);
+ return;
+ } else {
+ jam();
+ fragrecptr.p->lastUndoIsStored = ZTRUE;
+ }//if
+ return;
+ break;
+ case WAIT_WRITE_UNDO_EXIT:
+ jam();
+ updateLastUndoPageIdWritten(signal, fsOpptr.p->fsOpMemPage);
+ releaseFsOpRec(signal);
+ if (fragrecptr.p->nrWaitWriteUndoExit > 0) {
+ jam();
+ fragrecptr.p->nrWaitWriteUndoExit--;
+ }//if
+ if (fsConnectptr.p->fsState == WAIT_CLOSE_UNDO) {
+ jam();
+ /* ************************ */
+ /* FSCLOSEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = ZFALSE;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+ }//if
+ if (fragrecptr.p->nrWaitWriteUndoExit == 0) {
+ if (fragrecptr.p->lastUndoIsStored == ZTRUE) {
+ jam();
+ fragrecptr.p->lastUndoIsStored = ZFALSE;
+ checkSendLcpConfLab(signal);
+ return;
+ }//if
+ }//if
+ return;
+ break;
+ case WAIT_WRITE_DATA:
+ jam();
+ releaseFsOpRec(signal);
+ fragrecptr.p->activeDataFilePage += ZWRITEPAGESIZE;
+ fragrecptr.p->activeDataPage = 0;
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ switch (fragrecptr.p->fragState) {
+ case LCP_SEND_PAGES:
+ jam();
+ savepagesLab(signal);
+ return;
+ break;
+ case LCP_SEND_OVER_PAGES:
+ jam();
+ saveOverPagesLab(signal);
+ return;
+ break;
+ case LCP_SEND_ZERO_PAGE:
+ jam();
+ saveZeroPageLab(signal);
+ return;
+ break;
+ case WAIT_ZERO_PAGE_STORED:
+ jam();
+ lcpCloseDataFileLab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbacc::execFSWRITECONF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSWRITEREF OPENFILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSWRITEREF(Signal* signal)
+{
+ jamEntry();
+ progError(0, __LINE__, "Write to file refused");
+ return;
+}//Dbacc::execFSWRITEREF()
+
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* */
+/* END OF COMMON SIGNAL RECEPTION MODULE */
+/* */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* */
+/* SYSTEM RESTART MODULE */
+/* */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+void Dbacc::execNDB_STTOR(Signal* signal)
+{
+ Uint32 tstartphase;
+ Uint32 tStartType;
+
+ jamEntry();
+ cndbcntrRef = signal->theData[0];
+ cmynodeid = signal->theData[1];
+ tstartphase = signal->theData[2];
+ tStartType = signal->theData[3];
+ switch (tstartphase) {
+ case ZSPH1:
+ jam();
+ ndbsttorryLab(signal);
+ return;
+ break;
+ case ZSPH2:
+ cnoLcpPages = 2 * (ZWRITEPAGESIZE + 1);
+ initialiseLcpPages(signal);
+ ndbsttorryLab(signal);
+ return;
+ break;
+ case ZSPH3:
+ if ((tStartType == NodeState::ST_NODE_RESTART) ||
+ (tStartType == NodeState::ST_INITIAL_NODE_RESTART)) {
+ jam();
+ //---------------------------------------------
+ // csystemRestart is used to check what is needed
+ // during log execution. When starting a node it
+ // is not a log execution and rather a normal
+ // execution. Thus we reset the variable here to
+ // avoid unnecessary system crashes.
+ //---------------------------------------------
+ csystemRestart = ZFALSE;
+ }//if
+
+ signal->theData[0] = ZLOAD_BAL_LCP_TIMER;
+ sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 1);
+ break;
+ case ZSPH6:
+ jam();
+ clblPagesPerTick = clblPagesPerTickAfterSr;
+ csystemRestart = ZFALSE;
+
+ signal->theData[0] = ZREPORT_MEMORY_USAGE;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 2000, 1);
+ break;
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }//switch
+ ndbsttorryLab(signal);
+ return;
+}//Dbacc::execNDB_STTOR()
+
+/* ******************--------------------------------------------------------------- */
+/* STTOR START / RESTART */
+/* ******************------------------------------+ */
+/* SENDER: ANY, LEVEL B */
+void Dbacc::execSTTOR(Signal* signal)
+{
+ jamEntry();
+ Uint32 tstartphase = signal->theData[1];
+ switch (tstartphase) {
+ case 1:
+ jam();
+ c_tup = (Dbtup*)globalData.getBlock(DBTUP);
+ ndbrequire(c_tup != 0);
+ break;
+ }
+ tuserblockref = signal->theData[3];
+ csignalkey = signal->theData[6];
+ sttorrysignalLab(signal);
+ return;
+}//Dbacc::execSTTOR()
+
+/* --------------------------------------------------------------------------------- */
+/* ZSPH1 */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::ndbrestart1Lab(Signal* signal)
+{
+ cmynodeid = globalData.ownId;
+ cownBlockref = numberToRef(DBACC, cmynodeid);
+ czero = 0;
+ cminusOne = czero - 1;
+ ctest = 0;
+ cundoLogActive = ZFALSE;
+ csystemRestart = ZTRUE;
+ clblPageOver = 0;
+ clblPageCounter = 0;
+ cactiveUndoFilePage = 0;
+ cprevUndoaddress = cminusOne;
+ cundoposition = 0;
+ clastUndoPageIdWritten = 0;
+ cactiveUndoFileVersion = RNIL;
+ cactiveOpenUndoFsPtr = RNIL;
+ for (Uint32 tmp = 0; tmp < ZMAX_UNDO_VERSION; tmp++) {
+ csrVersList[tmp] = RNIL;
+ }//for
+ return;
+}//Dbacc::ndbrestart1Lab()
+
+void Dbacc::initialiseRecordsLab(Signal* signal, Uint32 ref, Uint32 data)
+{
+ switch (tdata0) {
+ case 0:
+ jam();
+ initialiseTableRec(signal);
+ break;
+ case 1:
+ jam();
+ initialiseFsConnectionRec(signal);
+ break;
+ case 2:
+ jam();
+ initialiseFsOpRec(signal);
+ break;
+ case 3:
+ jam();
+ initialiseLcpConnectionRec(signal);
+ break;
+ case 4:
+ jam();
+ initialiseDirRec(signal);
+ break;
+ case 5:
+ jam();
+ initialiseDirRangeRec(signal);
+ break;
+ case 6:
+ jam();
+ initialiseFragRec(signal);
+ break;
+ case 7:
+ jam();
+ initialiseOverflowRec(signal);
+ break;
+ case 8:
+ jam();
+ initialiseOperationRec(signal);
+ break;
+ case 9:
+ jam();
+ initialisePageRec(signal);
+ break;
+ case 10:
+ jam();
+ initialiseRootfragRec(signal);
+ break;
+ case 11:
+ jam();
+ initialiseScanRec(signal);
+ break;
+ case 12:
+ jam();
+ initialiseSrVerRec(signal);
+
+ {
+ ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = data;
+ sendSignal(ref, GSN_READ_CONFIG_CONF, signal,
+ ReadConfigConf::SignalLength, JBB);
+ }
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+
+ signal->theData[0] = ZINITIALISE_RECORDS;
+ signal->theData[1] = tdata0 + 1;
+ signal->theData[2] = 0;
+ signal->theData[3] = ref;
+ signal->theData[4] = data;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
+ return;
+}//Dbacc::initialiseRecordsLab()
+
+/* *********************************<< */
+/* NDB_STTORRY */
+/* *********************************<< */
+void Dbacc::ndbsttorryLab(Signal* signal)
+{
+ signal->theData[0] = cownBlockref;
+ sendSignal(cndbcntrRef, GSN_NDB_STTORRY, signal, 1, JBB);
+ return;
+}//Dbacc::ndbsttorryLab()
+
+/* *********************************<< */
+/* SIZEALT_REP SIZE ALTERATION */
+/* *********************************<< */
+void Dbacc::execREAD_CONFIG_REQ(Signal* signal)
+{
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ jamEntry();
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_DIR_RANGE, &cdirrangesize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_DIR_ARRAY, &cdirarraysize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_FRAGMENT, &cfragmentsize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_OP_RECS, &coprecsize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_OVERFLOW_RECS,
+ &coverflowrecsize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_PAGE8, &cpagesize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_ROOT_FRAG,
+ &crootfragmentsize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_TABLE, &ctablesize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_SCAN, &cscanRecSize));
+ initRecords();
+ ndbrestart1Lab(signal);
+
+ clblPagesPerTick = 50;
+ //ndb_mgm_get_int_parameter(p, CFG_DB_, &clblPagesPerTick);
+
+ clblPagesPerTickAfterSr = 50;
+ //ndb_mgm_get_int_parameter(p, CFG_DB_, &clblPagesPerTickAfterSr);
+
+ tdata0 = 0;
+ initialiseRecordsLab(signal, ref, senderData);
+ return;
+}//Dbacc::execSIZEALT_REP()
+
+/* *********************************<< */
+/* STTORRY */
+/* *********************************<< */
+void Dbacc::sttorrysignalLab(Signal* signal)
+{
+ signal->theData[0] = csignalkey;
+ signal->theData[1] = 3;
+ /* BLOCK CATEGORY */
+ signal->theData[2] = 2;
+ /* SIGNAL VERSION NUMBER */
+ signal->theData[3] = ZSPH1;
+ signal->theData[4] = 255;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
+ /* END OF START PHASES */
+ return;
+}//Dbacc::sttorrysignalLab()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_DIR_REC */
+/* INITIALATES THE DIRECTORY RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseDirRec(Signal* signal)
+{
+ DirectoryarrayPtr idrDirptr;
+ ndbrequire(cdirarraysize > 0);
+ for (idrDirptr.i = 0; idrDirptr.i < cdirarraysize; idrDirptr.i++) {
+ refresh_watch_dog();
+ ptrAss(idrDirptr, directoryarray);
+ for (Uint32 i = 0; i <= 255; i++) {
+ idrDirptr.p->pagep[i] = RNIL;
+ }//for
+ }//for
+ cdirmemory = 0;
+ cfirstfreedir = RNIL;
+}//Dbacc::initialiseDirRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_DIR_RANGE_REC */
+/* INITIALATES THE DIR_RANGE RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseDirRangeRec(Signal* signal)
+{
+ DirRangePtr idrDirRangePtr;
+
+ ndbrequire(cdirrangesize > 0);
+ for (idrDirRangePtr.i = 0; idrDirRangePtr.i < cdirrangesize; idrDirRangePtr.i++) {
+ refresh_watch_dog();
+ ptrAss(idrDirRangePtr, dirRange);
+ idrDirRangePtr.p->dirArray[0] = idrDirRangePtr.i + 1;
+ for (Uint32 i = 1; i < 256; i++) {
+ idrDirRangePtr.p->dirArray[i] = RNIL;
+ }//for
+ }//for
+ idrDirRangePtr.i = cdirrangesize - 1;
+ ptrAss(idrDirRangePtr, dirRange);
+ idrDirRangePtr.p->dirArray[0] = RNIL;
+ cfirstfreeDirrange = 0;
+}//Dbacc::initialiseDirRangeRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_FRAG_REC */
+/* INITIALATES THE FRAGMENT RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseFragRec(Signal* signal)
+{
+ FragmentrecPtr regFragPtr;
+ ndbrequire(cfragmentsize > 0);
+ for (regFragPtr.i = 0; regFragPtr.i < cfragmentsize; regFragPtr.i++) {
+ jam();
+ refresh_watch_dog();
+ ptrAss(regFragPtr, fragmentrec);
+ initFragGeneral(regFragPtr);
+ regFragPtr.p->nextfreefrag = regFragPtr.i + 1;
+ }//for
+ regFragPtr.i = cfragmentsize - 1;
+ ptrAss(regFragPtr, fragmentrec);
+ regFragPtr.p->nextfreefrag = RNIL;
+ cfirstfreefrag = 0;
+}//Dbacc::initialiseFragRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_FS_CONNECTION_REC */
+/* INITIALATES THE FS_CONNECTION RECORDS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseFsConnectionRec(Signal* signal)
+{
+ ndbrequire(cfsConnectsize > 0);
+ for (fsConnectptr.i = 0; fsConnectptr.i < cfsConnectsize; fsConnectptr.i++) {
+ ptrAss(fsConnectptr, fsConnectrec);
+ fsConnectptr.p->fsNext = fsConnectptr.i + 1;
+ fsConnectptr.p->fsPrev = RNIL;
+ fsConnectptr.p->fragrecPtr = RNIL;
+ fsConnectptr.p->fsState = WAIT_NOTHING;
+ }//for
+ fsConnectptr.i = cfsConnectsize - 1;
+ ptrAss(fsConnectptr, fsConnectrec);
+ fsConnectptr.p->fsNext = RNIL; /* INITIALITES THE LAST CONNECTRECORD */
+ cfsFirstfreeconnect = 0; /* INITIATES THE FIRST FREE CONNECT RECORD */
+}//Dbacc::initialiseFsConnectionRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_FS_OP_REC */
+/* INITIALATES THE FS_OP RECORDS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseFsOpRec(Signal* signal)
+{
+ ndbrequire(cfsOpsize > 0);
+ for (fsOpptr.i = 0; fsOpptr.i < cfsOpsize; fsOpptr.i++) {
+ ptrAss(fsOpptr, fsOprec);
+ fsOpptr.p->fsOpnext = fsOpptr.i + 1;
+ fsOpptr.p->fsOpfragrecPtr = RNIL;
+ fsOpptr.p->fsConptr = RNIL;
+ fsOpptr.p->fsOpstate = WAIT_NOTHING;
+ }//for
+ fsOpptr.i = cfsOpsize - 1;
+ ptrAss(fsOpptr, fsOprec);
+ fsOpptr.p->fsOpnext = RNIL;
+ cfsFirstfreeop = 0;
+}//Dbacc::initialiseFsOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_LCP_CONNECTION_REC */
+/* INITIALATES THE LCP_CONNECTION RECORDS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseLcpConnectionRec(Signal* signal)
+{
+ ndbrequire(clcpConnectsize > 0);
+ for (lcpConnectptr.i = 0; lcpConnectptr.i < clcpConnectsize; lcpConnectptr.i++) {
+ ptrAss(lcpConnectptr, lcpConnectrec);
+ lcpConnectptr.p->nextLcpConn = lcpConnectptr.i + 1;
+ lcpConnectptr.p->lcpUserptr = RNIL;
+ lcpConnectptr.p->rootrecptr = RNIL;
+ lcpConnectptr.p->lcpstate = LCP_FREE;
+ }//for
+ lcpConnectptr.i = clcpConnectsize - 1;
+ ptrAss(lcpConnectptr, lcpConnectrec);
+ lcpConnectptr.p->nextLcpConn = RNIL;
+ cfirstfreelcpConnect = 0;
+}//Dbacc::initialiseLcpConnectionRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_OPERATION_REC */
+/* INITIALATES THE OPERATION RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseOperationRec(Signal* signal)
+{
+ ndbrequire(coprecsize > 0);
+ for (operationRecPtr.i = 0; operationRecPtr.i < coprecsize; operationRecPtr.i++) {
+ refresh_watch_dog();
+ ptrAss(operationRecPtr, operationrec);
+ operationRecPtr.p->transactionstate = IDLE;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ operationRecPtr.p->opState = FREE_OP;
+ operationRecPtr.p->nextOp = operationRecPtr.i + 1;
+ }//for
+ operationRecPtr.i = coprecsize - 1;
+ ptrAss(operationRecPtr, operationrec);
+ operationRecPtr.p->nextOp = RNIL;
+ cfreeopRec = 0;
+}//Dbacc::initialiseOperationRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_OVERFLOW_REC */
+/* INITIALATES THE OVERFLOW RECORDS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseOverflowRec(Signal* signal)
+{
+ OverflowRecordPtr iorOverflowRecPtr;
+
+ ndbrequire(coverflowrecsize > 0);
+ for (iorOverflowRecPtr.i = 0; iorOverflowRecPtr.i < coverflowrecsize; iorOverflowRecPtr.i++) {
+ refresh_watch_dog();
+ ptrAss(iorOverflowRecPtr, overflowRecord);
+ iorOverflowRecPtr.p->nextfreeoverrec = iorOverflowRecPtr.i + 1;
+ }//for
+ iorOverflowRecPtr.i = coverflowrecsize - 1;
+ ptrAss(iorOverflowRecPtr, overflowRecord);
+ iorOverflowRecPtr.p->nextfreeoverrec = RNIL;
+ cfirstfreeoverrec = 0;
+}//Dbacc::initialiseOverflowRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_PAGE_REC */
+/* INITIALATES THE PAGE RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialisePageRec(Signal* signal)
+{
+ ndbrequire(cpagesize > 0);
+ cfreepage = 0;
+ cfirstfreepage = RNIL;
+ cnoOfAllocatedPages = 0;
+}//Dbacc::initialisePageRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_LCP_PAGES */
+/* INITIALATES THE LCP PAGE RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseLcpPages(Signal* signal)
+{
+ Uint32 tilpIndex;
+
+ ndbrequire(cnoLcpPages >= (2 * (ZWRITEPAGESIZE + 1)));
+ /* --------------------------------------------------------------------------------- */
+ /* AN ABSOLUTE MINIMUM IS THAT WE HAVE 16 LCP PAGES TO HANDLE TWO CONCURRENT */
+ /* LCP'S ON LOCAL FRAGMENTS. */
+ /* --------------------------------------------------------------------------------- */
+ ndbrequire(cpagesize >= (cnoLcpPages + 8));
+ /* --------------------------------------------------------------------------------- */
+ /* THE NUMBER OF PAGES MUST BE AT LEAST 8 PLUS THE NUMBER OF PAGES REQUIRED BY */
+ /* THE LOCAL CHECKPOINT PROCESS. THIS NUMBER IS 8 TIMES THE PARALLELISM OF */
+ /* LOCAL CHECKPOINTS. */
+ /* --------------------------------------------------------------------------------- */
+ /* --------------------------------------------------------------------------------- */
+ /* WE SET UP A LINKED LIST OF PAGES FOR EXCLUSIVE USE BY LOCAL CHECKPOINTS. */
+ /* --------------------------------------------------------------------------------- */
+ cfirstfreeLcpPage = RNIL;
+ for (tilpIndex = 0; tilpIndex < cnoLcpPages; tilpIndex++) {
+ jam();
+ seizePage(signal);
+ rlpPageptr = spPageptr;
+ releaseLcpPage(signal);
+ }//for
+}//Dbacc::initialiseLcpPages()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_ROOTFRAG_REC */
+/* INITIALATES THE ROOTFRAG RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseRootfragRec(Signal* signal)
+{
+ ndbrequire(crootfragmentsize > 0);
+ for (rootfragrecptr.i = 0; rootfragrecptr.i < crootfragmentsize; rootfragrecptr.i++) {
+ refresh_watch_dog();
+ ptrAss(rootfragrecptr, rootfragmentrec);
+ rootfragrecptr.p->nextroot = rootfragrecptr.i + 1;
+ rootfragrecptr.p->fragmentptr[0] = RNIL;
+ rootfragrecptr.p->fragmentptr[1] = RNIL;
+ }//for
+ rootfragrecptr.i = crootfragmentsize - 1;
+ ptrAss(rootfragrecptr, rootfragmentrec);
+ rootfragrecptr.p->nextroot = RNIL;
+ cfirstfreerootfrag = 0;
+}//Dbacc::initialiseRootfragRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_SCAN_REC */
+/* INITIALATES THE QUE_SCAN RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseScanRec(Signal* signal)
+{
+ ndbrequire(cscanRecSize > 0);
+ for (scanPtr.i = 0; scanPtr.i < cscanRecSize; scanPtr.i++) {
+ ptrAss(scanPtr, scanRec);
+ scanPtr.p->scanNextfreerec = scanPtr.i + 1;
+ scanPtr.p->scanState = ScanRec::SCAN_DISCONNECT;
+ scanPtr.p->scanTimer = 0;
+ scanPtr.p->scanContinuebCounter = 0;
+ }//for
+ scanPtr.i = cscanRecSize - 1;
+ ptrAss(scanPtr, scanRec);
+ scanPtr.p->scanNextfreerec = RNIL;
+ cfirstFreeScanRec = 0;
+}//Dbacc::initialiseScanRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_SR_VER_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseSrVerRec(Signal* signal)
+{
+ ndbrequire(csrVersionRecSize > 0);
+ for (srVersionPtr.i = 0; srVersionPtr.i < csrVersionRecSize; srVersionPtr.i++) {
+ ptrAss(srVersionPtr, srVersionRec);
+ srVersionPtr.p->nextFreeSr = srVersionPtr.i + 1;
+ }//for
+ srVersionPtr.i = csrVersionRecSize - 1;
+ ptrAss(srVersionPtr, srVersionRec);
+ srVersionPtr.p->nextFreeSr = RNIL;
+ cfirstFreeSrVersionRec = 0;
+}//Dbacc::initialiseSrVerRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_TABLE_REC */
+/* INITIALATES THE TABLE RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseTableRec(Signal* signal)
+{
+ ndbrequire(ctablesize > 0);
+ for (tabptr.i = 0; tabptr.i < ctablesize; tabptr.i++) {
+ refresh_watch_dog();
+ ptrAss(tabptr, tabrec);
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+ tabptr.p->fragholder[i] = RNIL;
+ tabptr.p->fragptrholder[i] = RNIL;
+ }//for
+ tabptr.p->noOfKeyAttr = 0;
+ tabptr.p->hasCharAttr = 0;
+ for (Uint32 k = 0; k < MAX_ATTRIBUTES_IN_INDEX; k++) {
+ tabptr.p->keyAttr[k].attributeDescriptor = 0;
+ tabptr.p->keyAttr[k].charsetInfo = 0;
+ }
+ }//for
+}//Dbacc::initialiseTableRec()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF SYSTEM RESTART MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* ADD/DELETE FRAGMENT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+
+void Dbacc::initRootfragrec(Signal* signal)
+{
+ const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
+ rootfragrecptr.p->mytabptr = req->tableId;
+ rootfragrecptr.p->roothashcheck = req->kValue + req->lhFragBits;
+ rootfragrecptr.p->noOfElements = 0;
+ rootfragrecptr.p->m_commit_count = 0;
+ for (Uint32 i = 0; i < MAX_PARALLEL_SCANS_PER_FRAG; i++) {
+ rootfragrecptr.p->scan[i] = RNIL;
+ }//for
+}//Dbacc::initRootfragrec()
+
+void Dbacc::execACCFRAGREQ(Signal* signal)
+{
+ const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
+ jamEntry();
+ if (ERROR_INSERTED(3001)) {
+ jam();
+ addFragRefuse(signal, 1);
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+ tabptr.i = req->tableId;
+#ifndef VM_TRACE
+ // config mismatch - do not crash if release compiled
+ if (tabptr.i >= ctablesize) {
+ jam();
+ addFragRefuse(signal, 640);
+ return;
+ }
+#endif
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+ ndbrequire((req->reqInfo & 0xF) == ZADDFRAG);
+ ndbrequire(!getrootfragmentrec(signal, rootfragrecptr, req->fragId));
+ if (cfirstfreerootfrag == RNIL) {
+ jam();
+ addFragRefuse(signal, ZFULL_ROOTFRAGRECORD_ERROR);
+ return;
+ }//if
+ seizeRootfragrec(signal);
+ if (!addfragtotab(signal, rootfragrecptr.i, req->fragId)) {
+ jam();
+ releaseRootFragRecord(signal, rootfragrecptr);
+ addFragRefuse(signal, ZFULL_ROOTFRAGRECORD_ERROR);
+ return;
+ }//if
+ initRootfragrec(signal);
+ for (Uint32 i = 0; i < 2; i++) {
+ jam();
+ if (cfirstfreefrag == RNIL) {
+ jam();
+ addFragRefuse(signal, ZFULL_FRAGRECORD_ERROR);
+ return;
+ }//if
+ seizeFragrec(signal);
+ initFragGeneral(fragrecptr);
+ initFragAdd(signal, i, rootfragrecptr.i, fragrecptr);
+ rootfragrecptr.p->fragmentptr[i] = fragrecptr.i;
+ rootfragrecptr.p->fragmentid[i] = fragrecptr.p->myfid;
+ if (cfirstfreeDirrange == RNIL) {
+ jam();
+ addFragRefuse(signal, ZDIR_RANGE_ERROR);
+ return;
+ } else {
+ jam();
+ seizeDirrange(signal);
+ }//if
+ fragrecptr.p->directory = newDirRangePtr.i;
+ seizeDirectory(signal);
+ if (tresult < ZLIMIT_OF_ERROR) {
+ jam();
+ newDirRangePtr.p->dirArray[0] = sdDirptr.i;
+ } else {
+ jam();
+ addFragRefuse(signal, tresult);
+ return;
+ }//if
+ seizePage(signal);
+ if (tresult > ZLIMIT_OF_ERROR) {
+ jam();
+ addFragRefuse(signal, tresult);
+ return;
+ }//if
+ sdDirptr.p->pagep[0] = spPageptr.i;
+ tipPageId = 0;
+ inpPageptr = spPageptr;
+ initPage(signal);
+ if (cfirstfreeDirrange == RNIL) {
+ jam();
+ addFragRefuse(signal, ZDIR_RANGE_ERROR);
+ return;
+ } else {
+ jam();
+ seizeDirrange(signal);
+ }//if
+ fragrecptr.p->overflowdir = newDirRangePtr.i;
+ seizeDirectory(signal);
+ if (tresult < ZLIMIT_OF_ERROR) {
+ jam();
+ newDirRangePtr.p->dirArray[0] = sdDirptr.i;
+ } else {
+ jam();
+ addFragRefuse(signal, tresult);
+ return;
+ }//if
+ }//for
+ Uint32 userPtr = req->userPtr;
+ BlockReference retRef = req->userRef;
+ rootfragrecptr.p->rootState = ACTIVEROOT;
+ AccFragConf * const conf = (AccFragConf*)&signal->theData[0];
+
+ conf->userPtr = userPtr;
+ conf->rootFragPtr = rootfragrecptr.i;
+ conf->fragId[0] = rootfragrecptr.p->fragmentid[0];
+ conf->fragId[1] = rootfragrecptr.p->fragmentid[1];
+ conf->fragPtr[0] = rootfragrecptr.p->fragmentptr[0];
+ conf->fragPtr[1] = rootfragrecptr.p->fragmentptr[1];
+ conf->rootHashCheck = rootfragrecptr.p->roothashcheck;
+ sendSignal(retRef, GSN_ACCFRAGCONF, signal, AccFragConf::SignalLength, JBB);
+}//Dbacc::execACCFRAGREQ()
+
+void Dbacc::addFragRefuse(Signal* signal, Uint32 errorCode)
+{
+ const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
+ AccFragRef * const ref = (AccFragRef*)&signal->theData[0];
+ Uint32 userPtr = req->userPtr;
+ BlockReference retRef = req->userRef;
+
+ ref->userPtr = userPtr;
+ ref->errorCode = errorCode;
+ sendSignal(retRef, GSN_ACCFRAGREF, signal, AccFragRef::SignalLength, JBB);
+ return;
+}//Dbacc::addFragRefuseEarly()
+
+void
+Dbacc::execTC_SCHVERREQ(Signal* signal)
+{
+ jamEntry();
+ if (! assembleFragments(signal)) {
+ jam();
+ return;
+ }
+ tabptr.i = signal->theData[0];
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+ Uint32 noOfKeyAttr = signal->theData[6];
+ ndbrequire(noOfKeyAttr <= MAX_ATTRIBUTES_IN_INDEX);
+ Uint32 hasCharAttr = 0;
+
+ SegmentedSectionPtr s0Ptr;
+ signal->getSection(s0Ptr, 0);
+ SectionReader r0(s0Ptr, getSectionSegmentPool());
+ Uint32 i = 0;
+ while (i < noOfKeyAttr) {
+ jam();
+ Uint32 attributeDescriptor = ~0;
+ Uint32 csNumber = ~0;
+ if (! r0.getWord(&attributeDescriptor) ||
+ ! r0.getWord(&csNumber)) {
+ jam();
+ break;
+ }
+ CHARSET_INFO* cs = 0;
+ if (csNumber != 0) {
+ cs = all_charsets[csNumber];
+ ndbrequire(cs != 0);
+ hasCharAttr = 1;
+ }
+ tabptr.p->keyAttr[i].attributeDescriptor = attributeDescriptor;
+ tabptr.p->keyAttr[i].charsetInfo = cs;
+ i++;
+ }
+ ndbrequire(i == noOfKeyAttr);
+ releaseSections(signal);
+
+ tabptr.p->noOfKeyAttr = noOfKeyAttr;
+ tabptr.p->hasCharAttr = hasCharAttr;
+
+ // copy char attr flag to each fragment
+ for (Uint32 i1 = 0; i1 < MAX_FRAG_PER_NODE; i1++) {
+ jam();
+ if (tabptr.p->fragptrholder[i1] != RNIL) {
+ rootfragrecptr.i = tabptr.p->fragptrholder[i1];
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ for (Uint32 i2 = 0; i2 < 2; i2++) {
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[i2];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ fragrecptr.p->hasCharAttr = hasCharAttr;
+ }
+ }
+ }
+
+ // no reply to DICT
+}
+
+void
+Dbacc::execDROP_TAB_REQ(Signal* signal){
+ jamEntry();
+ DropTabReq* req = (DropTabReq*)signal->getDataPtr();
+
+ TabrecPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctablesize, tabrec);
+
+ tabPtr.p->tabUserRef = req->senderRef;
+ tabPtr.p->tabUserPtr = req->senderData;
+
+ signal->theData[0] = ZREL_ROOT_FRAG;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
+}
+
+void Dbacc::releaseRootFragResources(Signal* signal, Uint32 tableId)
+{
+ RootfragmentrecPtr rootPtr;
+ TabrecPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctablesize, tabrec);
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+ jam();
+ if (tabPtr.p->fragholder[i] != RNIL) {
+ jam();
+ Uint32 fragIndex;
+ rootPtr.i = tabPtr.p->fragptrholder[i];
+ ptrCheckGuard(rootPtr, crootfragmentsize, rootfragmentrec);
+ if (rootPtr.p->fragmentptr[0] != RNIL) {
+ jam();
+ fragIndex = rootPtr.p->fragmentptr[0];
+ rootPtr.p->fragmentptr[0] = RNIL;
+ } else if (rootPtr.p->fragmentptr[1] != RNIL) {
+ jam();
+ fragIndex = rootPtr.p->fragmentptr[1];
+ rootPtr.p->fragmentptr[1] = RNIL;
+ } else {
+ jam();
+ releaseRootFragRecord(signal, rootPtr);
+ tabPtr.p->fragholder[i] = RNIL;
+ tabPtr.p->fragptrholder[i] = RNIL;
+ continue;
+ }//if
+ releaseFragResources(signal, fragIndex);
+ return;
+ }//if
+ }//for
+
+ /**
+ * Finished...
+ */
+ sendFSREMOVEREQ(signal, tableId);
+}//Dbacc::releaseRootFragResources()
+
+void Dbacc::releaseRootFragRecord(Signal* signal, RootfragmentrecPtr rootPtr)
+{
+ rootPtr.p->nextroot = cfirstfreerootfrag;
+ cfirstfreerootfrag = rootPtr.i;
+}//Dbacc::releaseRootFragRecord()
+
+void Dbacc::releaseFragResources(Signal* signal, Uint32 fragIndex)
+{
+ FragmentrecPtr regFragPtr;
+ regFragPtr.i = fragIndex;
+ ptrCheckGuard(regFragPtr, cfragmentsize, fragmentrec);
+ verifyFragCorrect(regFragPtr);
+ if (regFragPtr.p->directory != RNIL) {
+ jam();
+ releaseDirResources(signal, regFragPtr.i, regFragPtr.p->directory, 0);
+ regFragPtr.p->directory = RNIL;
+ } else if (regFragPtr.p->overflowdir != RNIL) {
+ jam();
+ releaseDirResources(signal, regFragPtr.i, regFragPtr.p->overflowdir, 0);
+ regFragPtr.p->overflowdir = RNIL;
+ } else if (regFragPtr.p->firstOverflowRec != RNIL) {
+ jam();
+ releaseOverflowResources(signal, regFragPtr);
+ } else if (regFragPtr.p->firstFreeDirindexRec != RNIL) {
+ jam();
+ releaseDirIndexResources(signal, regFragPtr);
+ } else {
+ RootfragmentrecPtr rootPtr;
+ jam();
+ rootPtr.i = regFragPtr.p->myroot;
+ ptrCheckGuard(rootPtr, crootfragmentsize, rootfragmentrec);
+ releaseFragRecord(signal, regFragPtr);
+ signal->theData[0] = ZREL_ROOT_FRAG;
+ signal->theData[1] = rootPtr.p->mytabptr;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+}//Dbacc::releaseFragResources()
+
+void Dbacc::verifyFragCorrect(FragmentrecPtr regFragPtr)
+{
+ for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) {
+ jam();
+ ndbrequire(regFragPtr.p->datapages[i] == RNIL);
+ }//for
+ ndbrequire(regFragPtr.p->lockOwnersList == RNIL);
+ ndbrequire(regFragPtr.p->firstWaitInQueOp == RNIL);
+ ndbrequire(regFragPtr.p->lastWaitInQueOp == RNIL);
+ ndbrequire(regFragPtr.p->sentWaitInQueOp == RNIL);
+ //ndbrequire(regFragPtr.p->fsConnPtr == RNIL);
+ ndbrequire(regFragPtr.p->zeroPagePtr == RNIL);
+ ndbrequire(regFragPtr.p->nrWaitWriteUndoExit == 0);
+ ndbrequire(regFragPtr.p->sentWaitInQueOp == RNIL);
+}//Dbacc::verifyFragCorrect()
+
+void Dbacc::releaseDirResources(Signal* signal,
+ Uint32 fragIndex,
+ Uint32 dirIndex,
+ Uint32 startIndex)
+{
+ DirRangePtr regDirRangePtr;
+ regDirRangePtr.i = dirIndex;
+ ptrCheckGuard(regDirRangePtr, cdirrangesize, dirRange);
+ for (Uint32 i = startIndex; i < 256; i++) {
+ jam();
+ if (regDirRangePtr.p->dirArray[i] != RNIL) {
+ jam();
+ Uint32 directoryIndex = regDirRangePtr.p->dirArray[i];
+ regDirRangePtr.p->dirArray[i] = RNIL;
+ releaseDirectoryResources(signal, fragIndex, dirIndex, (i + 1), directoryIndex);
+ return;
+ }//if
+ }//for
+ rdDirRangePtr = regDirRangePtr;
+ releaseDirrange(signal);
+ signal->theData[0] = ZREL_FRAG;
+ signal->theData[1] = fragIndex;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
+}//Dbacc::releaseDirResources()
+
+void Dbacc::releaseDirectoryResources(Signal* signal,
+ Uint32 fragIndex,
+ Uint32 dirIndex,
+ Uint32 startIndex,
+ Uint32 directoryIndex)
+{
+ DirectoryarrayPtr regDirPtr;
+ regDirPtr.i = directoryIndex;
+ ptrCheckGuard(regDirPtr, cdirarraysize, directoryarray);
+ for (Uint32 i = 0; i < 256; i++) {
+ jam();
+ if (regDirPtr.p->pagep[i] != RNIL) {
+ jam();
+ rpPageptr.i = regDirPtr.p->pagep[i];
+ ptrCheckGuard(rpPageptr, cpagesize, page8);
+ releasePage(signal);
+ regDirPtr.p->pagep[i] = RNIL;
+ }//if
+ }//for
+ rdDirptr = regDirPtr;
+ releaseDirectory(signal);
+ signal->theData[0] = ZREL_DIR;
+ signal->theData[1] = fragIndex;
+ signal->theData[2] = dirIndex;
+ signal->theData[3] = startIndex;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 4, JBB);
+}//Dbacc::releaseDirectoryResources()
+
+void Dbacc::releaseOverflowResources(Signal* signal, FragmentrecPtr regFragPtr)
+{
+ Uint32 loopCount = 0;
+ OverflowRecordPtr regOverflowRecPtr;
+ while ((regFragPtr.p->firstOverflowRec != RNIL) &&
+ (loopCount < 1)) {
+ jam();
+ regOverflowRecPtr.i = regFragPtr.p->firstOverflowRec;
+ ptrCheckGuard(regOverflowRecPtr, coverflowrecsize, overflowRecord);
+ regFragPtr.p->firstOverflowRec = regOverflowRecPtr.p->nextOverRec;
+ rorOverflowRecPtr = regOverflowRecPtr;
+ releaseOverflowRec(signal);
+ loopCount++;
+ }//while
+ signal->theData[0] = ZREL_FRAG;
+ signal->theData[1] = regFragPtr.i;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
+}//Dbacc::releaseOverflowResources()
+
+void Dbacc::releaseDirIndexResources(Signal* signal, FragmentrecPtr regFragPtr)
+{
+ Uint32 loopCount = 0;
+ OverflowRecordPtr regOverflowRecPtr;
+ while ((regFragPtr.p->firstFreeDirindexRec != RNIL) &&
+ (loopCount < 1)) {
+ jam();
+ regOverflowRecPtr.i = regFragPtr.p->firstFreeDirindexRec;
+ ptrCheckGuard(regOverflowRecPtr, coverflowrecsize, overflowRecord);
+ regFragPtr.p->firstFreeDirindexRec = regOverflowRecPtr.p->nextOverList;
+ rorOverflowRecPtr = regOverflowRecPtr;
+ releaseOverflowRec(signal);
+ loopCount++;
+ }//while
+ signal->theData[0] = ZREL_FRAG;
+ signal->theData[1] = regFragPtr.i;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
+}//Dbacc::releaseDirIndexResources()
+
+void Dbacc::releaseFragRecord(Signal* signal, FragmentrecPtr regFragPtr)
+{
+ regFragPtr.p->nextfreefrag = cfirstfreefrag;
+ cfirstfreefrag = regFragPtr.i;
+ initFragGeneral(regFragPtr);
+}//Dbacc::releaseFragRecord()
+
+void Dbacc::sendFSREMOVEREQ(Signal* signal, Uint32 tableId)
+{
+ FsRemoveReq * const fsReq = (FsRemoveReq *)signal->getDataPtrSend();
+ fsReq->userReference = cownBlockref;
+ fsReq->userPointer = tableId;
+ fsReq->fileNumber[0] = tableId;
+ fsReq->fileNumber[1] = (Uint32)-1; // Remove all fragments
+ fsReq->fileNumber[2] = (Uint32)-1; // Remove all data files within fragment
+ fsReq->fileNumber[3] = 255 | // No P-value used here
+ (3 << 8) | // Data-files in D3
+ (0 << 16) | // Data-files
+ (1 << 24); // Version 1 of fileNumber
+ fsReq->directory = 1;
+ fsReq->ownDirectory = 1;
+ sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, FsRemoveReq::SignalLength, JBA);
+}//Dbacc::sendFSREMOVEREQ()
+
+void Dbacc::execFSREMOVECONF(Signal* signal)
+{
+ FsConf * const fsConf = (FsConf *)signal->getDataPtrSend();
+ TabrecPtr tabPtr;
+ tabPtr.i = fsConf->userPointer;
+ ptrCheckGuard(tabPtr, ctablesize, tabrec);
+
+ DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend();
+ dropConf->senderRef = reference();
+ dropConf->senderData = tabPtr.p->tabUserPtr;
+ dropConf->tableId = tabPtr.i;
+ sendSignal(tabPtr.p->tabUserRef, GSN_DROP_TAB_CONF,
+ signal, DropTabConf::SignalLength, JBB);
+
+ tabPtr.p->tabUserPtr = RNIL;
+ tabPtr.p->tabUserRef = 0;
+}//Dbacc::execFSREMOVECONF()
+
+void Dbacc::execFSREMOVEREF(Signal* signal)
+{
+ ndbrequire(false);
+}//Dbacc::execFSREMOVEREF()
+
+/* -------------------------------------------------------------------------- */
+/* ADDFRAGTOTAB */
+/* DESCRIPTION: PUTS A FRAGMENT ID AND A POINTER TO ITS RECORD INTO */
+/* TABLE ARRRAY OF THE TABLE RECORD. */
+/* -------------------------------------------------------------------------- */
+bool Dbacc::addfragtotab(Signal* signal, Uint32 rootIndex, Uint32 fid)
+{
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+ jam();
+ if (tabptr.p->fragholder[i] == RNIL) {
+ jam();
+ tabptr.p->fragholder[i] = fid;
+ tabptr.p->fragptrholder[i] = rootIndex;
+ return true;
+ }//if
+ }//for
+ return false;
+}//Dbacc::addfragtotab()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF ADD/DELETE FRAGMENT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* CONNECTION MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACCSEIZEREQ SEIZE REQ */
+/* SENDER: LQH, LEVEL B */
+/* ENTER ACCSEIZEREQ WITH */
+/* TUSERPTR , CONECTION PTR OF LQH */
+/* TUSERBLOCKREF BLOCK REFERENCE OF LQH */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACCSEIZEREQ SEIZE REQ */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execACCSEIZEREQ(Signal* signal)
+{
+ jamEntry();
+ tuserptr = signal->theData[0];
+ /* CONECTION PTR OF LQH */
+ tuserblockref = signal->theData[1];
+ /* BLOCK REFERENCE OF LQH */
+ tresult = 0;
+ if (cfreeopRec == RNIL) {
+ jam();
+ refaccConnectLab(signal);
+ return;
+ }//if
+ seizeOpRec(signal);
+ ptrGuard(operationRecPtr);
+ operationRecPtr.p->userptr = tuserptr;
+ operationRecPtr.p->userblockref = tuserblockref;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ operationRecPtr.p->transactionstate = IDLE;
+ /* ******************************< */
+ /* ACCSEIZECONF */
+ /* ******************************< */
+ signal->theData[0] = tuserptr;
+ signal->theData[1] = operationRecPtr.i;
+ sendSignal(tuserblockref, GSN_ACCSEIZECONF, signal, 2, JBB);
+ return;
+}//Dbacc::execACCSEIZEREQ()
+
+void Dbacc::refaccConnectLab(Signal* signal)
+{
+ tresult = ZCONNECT_SIZE_ERROR;
+ /* ******************************< */
+ /* ACCSEIZEREF */
+ /* ******************************< */
+ signal->theData[0] = tuserptr;
+ signal->theData[1] = tresult;
+ sendSignal(tuserblockref, GSN_ACCSEIZEREF, signal, 2, JBB);
+ return;
+}//Dbacc::refaccConnectLab()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF CONNECTION MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* EXECUTE OPERATION MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* INIT_OP_REC */
+/* INFORMATION WHICH IS RECIEVED BY ACCKEYREQ WILL BE SAVED */
+/* IN THE OPERATION RECORD. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initOpRec(Signal* signal)
+{
+ register Uint32 Treqinfo;
+
+ Treqinfo = signal->theData[2];
+
+ operationRecPtr.p->hashValue = signal->theData[3];
+ operationRecPtr.p->tupkeylen = signal->theData[4];
+ operationRecPtr.p->xfrmtupkeylen = signal->theData[4];
+ operationRecPtr.p->transId1 = signal->theData[5];
+ operationRecPtr.p->transId2 = signal->theData[6];
+ operationRecPtr.p->transactionstate = ACTIVE;
+ operationRecPtr.p->commitDeleteCheckFlag = ZFALSE;
+ operationRecPtr.p->operation = Treqinfo & 0x7;
+ /* --------------------------------------------------------------------------------- */
+ // opSimple is not used in this version. Is needed for deadlock handling later on.
+ /* --------------------------------------------------------------------------------- */
+ // operationRecPtr.p->opSimple = (Treqinfo >> 3) & 0x1;
+
+ operationRecPtr.p->lockMode = (Treqinfo >> 4) & 0x3;
+
+ Uint32 readFlag = (((Treqinfo >> 4) & 0x3) == 0); // Only 1 if Read
+ Uint32 dirtyFlag = (((Treqinfo >> 6) & 0x1) == 1); // Only 1 if Dirty
+ Uint32 dirtyReadFlag = readFlag & dirtyFlag;
+ operationRecPtr.p->dirtyRead = dirtyReadFlag;
+
+ operationRecPtr.p->nodeType = (Treqinfo >> 7) & 0x3;
+ operationRecPtr.p->fid = fragrecptr.p->myfid;
+ operationRecPtr.p->fragptr = fragrecptr.i;
+ operationRecPtr.p->nextParallelQue = RNIL;
+ operationRecPtr.p->prevParallelQue = RNIL;
+ operationRecPtr.p->prevQueOp = RNIL;
+ operationRecPtr.p->nextQueOp = RNIL;
+ operationRecPtr.p->nextSerialQue = RNIL;
+ operationRecPtr.p->prevSerialQue = RNIL;
+ operationRecPtr.p->elementPage = RNIL;
+ operationRecPtr.p->keyinfoPage = RNIL;
+ operationRecPtr.p->lockOwner = ZFALSE;
+ operationRecPtr.p->insertIsDone = ZFALSE;
+ operationRecPtr.p->elementIsDisappeared = ZFALSE;
+ operationRecPtr.p->insertDeleteLen = fragrecptr.p->elementLength;
+ operationRecPtr.p->longPagePtr = RNIL;
+ operationRecPtr.p->longKeyPageIndex = RNIL;
+ operationRecPtr.p->scanRecPtr = RNIL;
+
+ // bit to mark lock operation
+ operationRecPtr.p->isAccLockReq = (Treqinfo >> 31) & 0x1;
+
+ // undo log is not run via ACCKEYREQ
+ operationRecPtr.p->isUndoLogReq = 0;
+}//Dbacc::initOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEND_ACCKEYCONF */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::sendAcckeyconf(Signal* signal)
+{
+ signal->theData[0] = operationRecPtr.p->userptr;
+ signal->theData[1] = operationRecPtr.p->operation;
+ signal->theData[2] = operationRecPtr.p->fid;
+ signal->theData[3] = operationRecPtr.p->localdata[0];
+ signal->theData[4] = operationRecPtr.p->localdata[1];
+ signal->theData[5] = fragrecptr.p->localkeylen;
+}//Dbacc::sendAcckeyconf()
+
+
+void Dbacc::ACCKEY_error(Uint32 fromWhere)
+{
+ switch(fromWhere) {
+ case 0:
+ ndbrequire(false);
+ case 1:
+ ndbrequire(false);
+ case 2:
+ ndbrequire(false);
+ case 3:
+ ndbrequire(false);
+ case 4:
+ ndbrequire(false);
+ case 5:
+ ndbrequire(false);
+ case 6:
+ ndbrequire(false);
+ case 7:
+ ndbrequire(false);
+ case 8:
+ ndbrequire(false);
+ case 9:
+ ndbrequire(false);
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dbacc::ACCKEY_error()
+
+/* ******************--------------------------------------------------------------- */
+/* ACCKEYREQ REQUEST FOR INSERT, DELETE, */
+/* RERAD AND UPDATE, A TUPLE. */
+/* SENDER: LQH, LEVEL B */
+/* SIGNAL DATA: OPERATION_REC_PTR, CONNECTION PTR */
+/* TABPTR, TABLE ID = TABLE RECORD POINTER */
+/* TREQINFO, */
+/* THASHVALUE, HASH VALUE OF THE TUP */
+/* TKEYLEN, LENGTH OF THE PRIMARY KEYS */
+/* TKEY1, PRIMARY KEY 1 */
+/* TKEY2, PRIMARY KEY 2 */
+/* TKEY3, PRIMARY KEY 3 */
+/* TKEY4, PRIMARY KEY 4 */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::execACCKEYREQ(Signal* signal)
+{
+ jamEntry();
+ operationRecPtr.i = signal->theData[0]; /* CONNECTION PTR */
+ fragrecptr.i = signal->theData[1]; /* FRAGMENT RECORD POINTER */
+ if (!((operationRecPtr.i < coprecsize) ||
+ (fragrecptr.i < cfragmentsize))) {
+ ACCKEY_error(0);
+ return;
+ }//if
+ ptrAss(operationRecPtr, operationrec);
+ ptrAss(fragrecptr, fragmentrec);
+ ndbrequire(operationRecPtr.p->transactionstate == IDLE);
+
+ initOpRec(signal);
+ // normalize key if any char attr
+ if (! operationRecPtr.p->isAccLockReq && fragrecptr.p->hasCharAttr)
+ xfrmKeyData(signal);
+
+ /*---------------------------------------------------------------*/
+ /* */
+ /* WE WILL USE THE HASH VALUE TO LOOK UP THE PROPER MEMORY */
+ /* PAGE AND MEMORY PAGE INDEX TO START THE SEARCH WITHIN. */
+ /* WE REMEMBER THESE ADDRESS IF WE LATER NEED TO INSERT */
+ /* THE ITEM AFTER NOT FINDING THE ITEM. */
+ /*---------------------------------------------------------------*/
+ getElement(signal);
+
+ if (tgeResult == ZTRUE) {
+ switch (operationRecPtr.p->operation) {
+ case ZREAD:
+ case ZUPDATE:
+ case ZDELETE:
+ case ZWRITE:
+ case ZSCAN_OP:
+ if (!tgeLocked){
+ if(operationRecPtr.p->operation == ZWRITE)
+ {
+ jam();
+ operationRecPtr.p->operation = ZUPDATE;
+ }
+ sendAcckeyconf(signal);
+ if (operationRecPtr.p->dirtyRead == ZFALSE) {
+ /*---------------------------------------------------------------*/
+ // It is not a dirty read. We proceed by locking and continue with
+ // the operation.
+ /*---------------------------------------------------------------*/
+ Uint32 eh = gePageptr.p->word32[tgeElementptr];
+ operationRecPtr.p->scanBits = ElementHeader::getScanBits(eh);
+ operationRecPtr.p->hashvaluePart = ElementHeader::getHashValuePart(eh);
+ operationRecPtr.p->elementPage = gePageptr.i;
+ operationRecPtr.p->elementContainer = tgeContainerptr;
+ operationRecPtr.p->elementPointer = tgeElementptr;
+ operationRecPtr.p->elementIsforward = tgeForward;
+
+ eh = ElementHeader::setLocked(operationRecPtr.i);
+ dbgWord32(gePageptr, tgeElementptr, eh);
+ gePageptr.p->word32[tgeElementptr] = eh;
+
+ insertLockOwnersList(signal , operationRecPtr);
+ return;
+ } else {
+ jam();
+ /*---------------------------------------------------------------*/
+ // It is a dirty read. We do not lock anything. Set state to
+ // IDLE since no COMMIT call will come.
+ /*---------------------------------------------------------------*/
+ operationRecPtr.p->transactionstate = IDLE;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ return;
+ }//if
+ } else {
+ jam();
+ accIsLockedLab(signal);
+ return;
+ }//if
+ break;
+ case ZINSERT:
+ jam();
+ insertExistElemLab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ } else if (tgeResult == ZFALSE) {
+ switch (operationRecPtr.p->operation) {
+ case ZINSERT:
+ case ZWRITE:
+ jam();
+ // If a write operation makes an insert we switch operation to ZINSERT so
+ // that the commit-method knows an insert has been made and updates noOfElements.
+ operationRecPtr.p->operation = ZINSERT;
+ operationRecPtr.p->insertIsDone = ZTRUE;
+ insertelementLab(signal);
+ return;
+ break;
+ case ZREAD:
+ case ZUPDATE:
+ case ZDELETE:
+ case ZSCAN_OP:
+ jam();
+ acckeyref1Lab(signal, ZREAD_ERROR);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ } else {
+ jam();
+ acckeyref1Lab(signal, tgeResult);
+ return;
+ }//if
+ return;
+}//Dbacc::execACCKEYREQ()
+
+void
+Dbacc::xfrmKeyData(Signal* signal)
+{
+ tabptr.i = fragrecptr.p->myTableId;
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+
+ Uint32 dst[1024 * MAX_XFRM_MULTIPLY];
+ Uint32 dstSize = (sizeof(dst) >> 2);
+ Uint32* src = &signal->theData[7];
+ const Uint32 noOfKeyAttr = tabptr.p->noOfKeyAttr;
+ Uint32 dstPos = 0;
+ Uint32 srcPos = 0;
+ Uint32 i = 0;
+
+ while (i < noOfKeyAttr) {
+ const Tabrec::KeyAttr& keyAttr = tabptr.p->keyAttr[i];
+
+ Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(keyAttr.attributeDescriptor);
+ Uint32 srcWords = (srcBytes + 3) / 4;
+ Uint32 dstWords = ~0;
+ uchar* dstPtr = (uchar*)&dst[dstPos];
+ const uchar* srcPtr = (const uchar*)&src[srcPos];
+ CHARSET_INFO* cs = keyAttr.charsetInfo;
+
+ if (cs == 0) {
+ jam();
+ memcpy(dstPtr, srcPtr, srcWords << 2);
+ dstWords = srcWords;
+ } else {
+ jam();
+ Uint32 typeId = AttributeDescriptor::getType(keyAttr.attributeDescriptor);
+ Uint32 lb, len;
+ bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
+ ndbrequire(ok);
+ Uint32 xmul = cs->strxfrm_multiply;
+ if (xmul == 0)
+ xmul = 1;
+ // see comment in DbtcMain.cpp
+ Uint32 dstLen = xmul * (srcBytes - lb);
+ ndbrequire(dstLen <= ((dstSize - dstPos) << 2));
+ int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
+ ndbrequire(n != -1);
+ while ((n & 3) != 0)
+ dstPtr[n++] = 0;
+ dstWords = (n >> 2);
+ }
+ dstPos += dstWords;
+ srcPos += srcWords;
+ i++;
+ }
+ memcpy(src, dst, dstPos << 2);
+ operationRecPtr.p->xfrmtupkeylen = dstPos;
+}
+
+void Dbacc::accIsLockedLab(Signal* signal)
+{
+ ndbrequire(csystemRestart == ZFALSE);
+ queOperPtr.i = ElementHeader::getOpPtrI(gePageptr.p->word32[tgeElementptr]);
+ ptrCheckGuard(queOperPtr, coprecsize, operationrec);
+ if (operationRecPtr.p->dirtyRead == ZFALSE) {
+ Uint32 return_result;
+ if (operationRecPtr.p->lockMode == ZREADLOCK) {
+ jam();
+ priPageptr = gePageptr;
+ tpriElementptr = tgeElementptr;
+ return_result = placeReadInLockQueue(signal);
+ } else {
+ jam();
+ pwiPageptr = gePageptr;
+ tpwiElementptr = tgeElementptr;
+ return_result = placeWriteInLockQueue(signal);
+ }//if
+ if (return_result == ZPARALLEL_QUEUE) {
+ jam();
+ sendAcckeyconf(signal);
+ return;
+ } else if (return_result == ZSERIAL_QUEUE) {
+ jam();
+ signal->theData[0] = RNIL;
+ return;
+ } else if (return_result == ZWRITE_ERROR) {
+ jam();
+ acckeyref1Lab(signal, return_result);
+ return;
+ }//if
+ ndbrequire(false);
+ } else {
+ if (queOperPtr.p->elementIsDisappeared == ZFALSE) {
+ jam();
+ /*---------------------------------------------------------------*/
+ // It is a dirty read. We do not lock anything. Set state to
+ // IDLE since no COMMIT call will arrive.
+ /*---------------------------------------------------------------*/
+ sendAcckeyconf(signal);
+ operationRecPtr.p->transactionstate = IDLE;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ return;
+ } else {
+ jam();
+ /*---------------------------------------------------------------*/
+ // The tuple does not exist in the committed world currently.
+ // Report read error.
+ /*---------------------------------------------------------------*/
+ acckeyref1Lab(signal, ZREAD_ERROR);
+ return;
+ }//if
+ }//if
+}//Dbacc::accIsLockedLab()
+
+/* --------------------------------------------------------------------------------- */
+/* I N S E R T E X I S T E L E M E N T */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::insertExistElemLab(Signal* signal)
+{
+ if (!tgeLocked){
+ jam();
+ acckeyref1Lab(signal, ZWRITE_ERROR);/* THE ELEMENT ALREADY EXIST */
+ return;
+ }//if
+ accIsLockedLab(signal);
+}//Dbacc::insertExistElemLab()
+
+/* --------------------------------------------------------------------------------- */
+/* INSERTELEMENT */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::insertelementLab(Signal* signal)
+{
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_OPERATION) {
+ jam();
+ acckeyref1Lab(signal, ZTEMPORARY_ACC_UNDO_FAILURE);
+ return;
+ }//if
+ }//if
+ if (fragrecptr.p->firstOverflowRec == RNIL) {
+ jam();
+ allocOverflowPage(signal);
+ if (tresult > ZLIMIT_OF_ERROR) {
+ jam();
+ acckeyref1Lab(signal, tresult);
+ return;
+ }//if
+ }//if
+ if (fragrecptr.p->keyLength != operationRecPtr.p->tupkeylen) {
+ // historical
+ ndbrequire(fragrecptr.p->keyLength == 0);
+ }//if
+
+ signal->theData[0] = operationRecPtr.p->userptr;
+ Uint32 blockNo = refToBlock(operationRecPtr.p->userblockref);
+ EXECUTE_DIRECT(blockNo, GSN_LQH_ALLOCREQ, signal, 1);
+ jamEntry();
+ if (signal->theData[0] != 0) {
+ jam();
+ Uint32 result_code = signal->theData[0];
+ acckeyref1Lab(signal, result_code);
+ return;
+ }//if
+ Uint32 localKey = (signal->theData[1] << MAX_TUPLES_BITS) + signal->theData[2];
+
+ insertLockOwnersList(signal, operationRecPtr);
+
+ const Uint32 tmp = fragrecptr.p->k + fragrecptr.p->lhfragbits;
+ operationRecPtr.p->hashvaluePart =
+ (operationRecPtr.p->hashValue >> tmp) & 0xFFFF;
+ operationRecPtr.p->scanBits = 0; /* NOT ANY ACTIVE SCAN */
+ tidrElemhead = ElementHeader::setLocked(operationRecPtr.i);
+ idrPageptr = gdiPageptr;
+ tidrPageindex = tgdiPageindex;
+ tidrForward = ZTRUE;
+ idrOperationRecPtr = operationRecPtr;
+ clocalkey[0] = localKey;
+ operationRecPtr.p->localdata[0] = localKey;
+ /* --------------------------------------------------------------------------------- */
+ /* WE SET THE LOCAL KEY TO MINUS ONE TO INDICATE IT IS NOT YET VALID. */
+ /* --------------------------------------------------------------------------------- */
+ insertElement(signal);
+ sendAcckeyconf(signal);
+ return;
+}//Dbacc::insertelementLab()
+
+/* --------------------------------------------------------------------------------- */
+/* PLACE_READ_IN_LOCK_QUEUE */
+/* INPUT: OPERATION_REC_PTR OUR OPERATION POINTER */
+/* QUE_OPER_PTR LOCK QUEUE OWNER OPERATION POINTER */
+/* PRI_PAGEPTR PAGE POINTER OF ELEMENT */
+/* TPRI_ELEMENTPTR ELEMENT POINTER OF ELEMENT */
+/* OUTPUT TRESULT = */
+/* ZPARALLEL_QUEUE OPERATION PLACED IN PARALLEL QUEUE */
+/* OPERATION CAN PROCEED NOW. */
+/* ZSERIAL_QUEUE OPERATION PLACED IN SERIAL QUEUE */
+/* ERROR CODE OPERATION NEEDS ABORTING */
+/* THE ELEMENT WAS LOCKED AND WE WANT TO READ THE TUPLE. WE WILL CHECK THE LOCK */
+/* QUEUES TO PERFORM THE PROPER ACTION. */
+/* */
+/* IN SOME PLACES IN THE CODE BELOW THAT HANDLES WHAT TO DO WHEN THE TUPLE IS LOCKED */
+/* WE DO ASSUME THAT NEXT_PARALLEL_QUEUE AND NEXT_SERIAL_QUEUE ON OPERATION_REC_PTR */
+/* HAVE BEEN INITIALISED TO RNIL. THUS WE DO NOT PERFORM THIS ONCE MORE EVEN IF IT */
+/* COULD BE NICE FOR READABILITY. */
+/* --------------------------------------------------------------------------------- */
+Uint32 Dbacc::placeReadInLockQueue(Signal* signal)
+{
+ if (getNoParallelTransaction(queOperPtr.p) == 1) {
+ if ((queOperPtr.p->transId1 == operationRecPtr.p->transId1) &&
+ (queOperPtr.p->transId2 == operationRecPtr.p->transId2)) {
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE PERFORMING A READ OPERATION AND THIS TRANSACTION ALREADY OWNS THE LOCK */
+ /* ALONE. PUT THE OPERATION LAST IN THE PARALLEL QUEUE. */
+ /* --------------------------------------------------------------------------------- */
+ jam();
+ mlpqOperPtr = queOperPtr;
+ moveLastParallelQueue(signal);
+ operationRecPtr.p->localdata[0] = queOperPtr.p->localdata[0];
+ operationRecPtr.p->localdata[1] = queOperPtr.p->localdata[1];
+ operationRecPtr.p->prevParallelQue = mlpqOperPtr.i;
+ mlpqOperPtr.p->nextParallelQue = operationRecPtr.i;
+ switch (queOperPtr.p->lockMode) {
+ case ZREADLOCK:
+ jam();
+ /*empty*/;
+ break;
+ default:
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* IF THE TRANSACTION PREVIOUSLY SET A WRITE LOCK WE MUST ENSURE THAT ALL */
+ /* OPERATIONS IN THE PARALLEL QUEUE HAVE WRITE LOCK MODE TO AVOID STRANGE BUGS.*/
+ /* --------------------------------------------------------------------------------- */
+ operationRecPtr.p->lockMode = queOperPtr.p->lockMode;
+ break;
+ }//switch
+ return ZPARALLEL_QUEUE;
+ }//if
+ }//if
+ if (queOperPtr.p->nextSerialQue == RNIL) {
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE PERFORMING A READ OPERATION AND THERE IS NO SERIAL QUEUE. IF THERE IS NO */
+ /* WRITE OPERATION THAT OWNS THE LOCK OR ANY WRITE OPERATION IN THE PARALLEL QUEUE */
+ /* IT IS ENOUGH TO CHECK THE LOCK MODE OF THE LEADER IN THE PARALLEL QUEUE. IF IT IS */
+ /* A READ LOCK THEN WE PLACE OURSELVES IN THE PARALLEL QUEUE OTHERWISE WE GO ON TO */
+ /* PLACE OURSELVES IN THE SERIAL QUEUE. */
+ /* --------------------------------------------------------------------------------- */
+ switch (queOperPtr.p->lockMode) {
+ case ZREADLOCK:
+ jam();
+ mlpqOperPtr = queOperPtr;
+ moveLastParallelQueue(signal);
+ operationRecPtr.p->prevParallelQue = mlpqOperPtr.i;
+ mlpqOperPtr.p->nextParallelQue = operationRecPtr.i;
+ operationRecPtr.p->localdata[0] = queOperPtr.p->localdata[0];
+ operationRecPtr.p->localdata[1] = queOperPtr.p->localdata[1];
+ return ZPARALLEL_QUEUE;
+ default:
+ jam();
+ queOperPtr.p->nextSerialQue = operationRecPtr.i;
+ operationRecPtr.p->prevSerialQue = queOperPtr.i;
+ putOpInFragWaitQue(signal);
+ break;
+ }//switch
+ } else {
+ jam();
+ placeSerialQueueRead(signal);
+ }//if
+ return ZSERIAL_QUEUE;
+}//Dbacc::placeReadInLockQueue()
+
+/* --------------------------------------------------------------------------------- */
+/* WE WILL CHECK IF THIS TRANSACTION IS ALREADY PLACED AT SOME SPOT IN THE PARALLEL */
+/* SERIAL QUEUE WITHOUT ANY NEIGHBORS FROM OTHER TRANSACTION. IF SO WE WILL INSERT */
+/* IT IN THAT PARALLEL QUEUE. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::placeSerialQueueRead(Signal* signal)
+{
+ readWriteOpPtr.i = queOperPtr.p->nextSerialQue;
+ ptrCheckGuard(readWriteOpPtr, coprecsize, operationrec);
+ PSQR_LOOP:
+ jam();
+ if (readWriteOpPtr.p->nextSerialQue == RNIL) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THERE WAS NO PREVIOUS OPERATION IN THIS TRANSACTION WHICH WE COULD PUT IT */
+ /* IN THE PARALLEL QUEUE TOGETHER WITH. */
+ /* --------------------------------------------------------------------------------- */
+ checkOnlyReadEntry(signal);
+ return;
+ }//if
+ if (getNoParallelTransaction(readWriteOpPtr.p) == 1) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THERE WAS ONLY ONE TRANSACTION INVOLVED IN THE PARALLEL QUEUE. IF THIS IS OUR */
+ /* TRANSACTION WE CAN STILL GET HOLD OF THE LOCK. */
+ /* --------------------------------------------------------------------------------- */
+ if ((readWriteOpPtr.p->transId1 == operationRecPtr.p->transId1) &&
+ (readWriteOpPtr.p->transId2 == operationRecPtr.p->transId2)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE PERFORMING A READ IN THE SAME TRANSACTION WHERE WE ALREADY */
+ /* PREVIOUSLY HAVE EXECUTED AN OPERATION. INSERT-DELETE, READ-UPDATE, READ-READ, */
+ /* UPDATE-UPDATE, UPDATE-DELETE, READ-DELETE, INSERT-READ, INSERT-UPDATE ARE ALLOWED */
+ /* COMBINATIONS. A NEW INSERT AFTER A DELETE IS NOT ALLOWED AND SUCH AN INSERT WILL */
+ /* GO TO THE SERIAL LOCK QUEUE WHICH IT WILL NOT LEAVE UNTIL A TIME-OUT AND THE */
+ /* TRANSACTION IS ABORTED. READS AND UPDATES AFTER DELETES IS ALSO NOT ALLOWED. */
+ /* --------------------------------------------------------------------------------- */
+ mlpqOperPtr = readWriteOpPtr;
+ moveLastParallelQueue(signal);
+ readWriteOpPtr = mlpqOperPtr;
+ operationRecPtr.p->prevParallelQue = readWriteOpPtr.i;
+ readWriteOpPtr.p->nextParallelQue = operationRecPtr.i;
+ operationRecPtr.p->localdata[0] = readWriteOpPtr.p->localdata[0];
+ operationRecPtr.p->localdata[1] = readWriteOpPtr.p->localdata[1];
+ switch (readWriteOpPtr.p->lockMode) {
+ case ZREADLOCK:
+ jam();
+ /*empty*/;
+ break;
+ default:
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* IF THE TRANSACTION PREVIOUSLY SET A WRITE LOCK WE MUST ENSURE THAT ALL */
+ /* OPERATIONS IN THE PARALLEL QUEUE HAVE WRITE LOCK MODE TO AVOID STRANGE BUGS.*/
+ /* --------------------------------------------------------------------------------- */
+ operationRecPtr.p->lockMode = readWriteOpPtr.p->lockMode;
+ break;
+ }//switch
+ putOpInFragWaitQue(signal);
+ return;
+ }//if
+ }//if
+ readWriteOpPtr.i = readWriteOpPtr.p->nextSerialQue;
+ ptrCheckGuard(readWriteOpPtr, coprecsize, operationrec);
+ goto PSQR_LOOP;
+}//Dbacc::placeSerialQueueRead()
+
+/* --------------------------------------------------------------------------------- */
+/* WE WILL CHECK IF THE LAST ENTRY IN THE SERIAL QUEUE CONTAINS ONLY READ */
+/* OPERATIONS. IF SO WE WILL INSERT IT IN THAT PARALLEL QUEUE. OTHERWISE WE */
+/* WILL PLACE IT AT THE END OF THE SERIAL QUEUE. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::checkOnlyReadEntry(Signal* signal)
+{
+ switch (readWriteOpPtr.p->lockMode) {
+ case ZREADLOCK:
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* SINCE THIS LAST QUEUE ONLY CONTAINS READ LOCKS WE CAN JOIN THE PARALLEL QUEUE AT */
+ /* THE END. */
+ /* --------------------------------------------------------------------------------- */
+ mlpqOperPtr = readWriteOpPtr;
+ moveLastParallelQueue(signal);
+ readWriteOpPtr = mlpqOperPtr;
+ operationRecPtr.p->prevParallelQue = readWriteOpPtr.i;
+ readWriteOpPtr.p->nextParallelQue = operationRecPtr.i;
+ operationRecPtr.p->localdata[0] = readWriteOpPtr.p->localdata[0];
+ operationRecPtr.p->localdata[1] = readWriteOpPtr.p->localdata[1];
+ break;
+ default:
+ jam(); /* PUT THE OPERATION RECORD IN THE SERIAL QUEUE */
+ readWriteOpPtr.p->nextSerialQue = operationRecPtr.i;
+ operationRecPtr.p->prevSerialQue = readWriteOpPtr.i;
+ break;
+ }//switch
+ putOpInFragWaitQue(signal);
+}//Dbacc::checkOnlyReadEntry()
+
+/* --------------------------------------------------------------------------------- */
+/* GET_NO_PARALLEL_TRANSACTION */
+/* --------------------------------------------------------------------------------- */
+Uint32
+Dbacc::getNoParallelTransaction(const Operationrec * op)
+{
+ OperationrecPtr tmp;
+
+ tmp.i= op->nextParallelQue;
+ Uint32 transId[2] = { op->transId1, op->transId2 };
+ while (tmp.i != RNIL)
+ {
+ jam();
+ ptrCheckGuard(tmp, coprecsize, operationrec);
+ if (tmp.p->transId1 == transId[0] && tmp.p->transId2 == transId[1])
+ tmp.i = tmp.p->nextParallelQue;
+ else
+ return 2;
+ }
+ return 1;
+}//Dbacc::getNoParallelTransaction()
+
+void Dbacc::moveLastParallelQueue(Signal* signal)
+{
+ while (mlpqOperPtr.p->nextParallelQue != RNIL) {
+ jam();
+ mlpqOperPtr.i = mlpqOperPtr.p->nextParallelQue;
+ ptrCheckGuard(mlpqOperPtr, coprecsize, operationrec);
+ }//if
+}//Dbacc::moveLastParallelQueue()
+
+void Dbacc::moveLastParallelQueueWrite(Signal* signal)
+{
+ /* --------------------------------------------------------------------------------- */
+ /* ENSURE THAT ALL OPERATIONS HAVE LOCK MODE SET TO WRITE SINCE WE INSERT A */
+ /* WRITE LOCK INTO THE PARALLEL QUEUE. */
+ /* --------------------------------------------------------------------------------- */
+ while (mlpqOperPtr.p->nextParallelQue != RNIL) {
+ jam();
+ mlpqOperPtr.p->lockMode = operationRecPtr.p->lockMode;
+ mlpqOperPtr.i = mlpqOperPtr.p->nextParallelQue;
+ ptrCheckGuard(mlpqOperPtr, coprecsize, operationrec);
+ }//if
+ mlpqOperPtr.p->lockMode = operationRecPtr.p->lockMode;
+}//Dbacc::moveLastParallelQueueWrite()
+
+/* --------------------------------------------------------------------------------- */
+/* PLACE_WRITE_IN_LOCK_QUEUE */
+/* INPUT: OPERATION_REC_PTR OUR OPERATION POINTER */
+/* QUE_OPER_PTR LOCK QUEUE OWNER OPERATION POINTER */
+/* PWI_PAGEPTR PAGE POINTER OF ELEMENT */
+/* TPWI_ELEMENTPTR ELEMENT POINTER OF ELEMENT */
+/* OUTPUT TRESULT = */
+/* ZPARALLEL_QUEUE OPERATION PLACED IN PARALLEL QUEUE */
+/* OPERATION CAN PROCEED NOW. */
+/* ZSERIAL_QUEUE OPERATION PLACED IN SERIAL QUEUE */
+/* ERROR CODE OPERATION NEEDS ABORTING */
+/* --------------------------------------------------------------------------------- */
+Uint32 Dbacc::placeWriteInLockQueue(Signal* signal)
+{
+ if (!((getNoParallelTransaction(queOperPtr.p) == 1) &&
+ (queOperPtr.p->transId1 == operationRecPtr.p->transId1) &&
+ (queOperPtr.p->transId2 == operationRecPtr.p->transId2))) {
+ jam();
+ placeSerialQueueWrite(signal);
+ return ZSERIAL_QUEUE;
+ }//if
+
+ /*
+ WE ARE PERFORMING AN READ EXCLUSIVE, INSERT, UPDATE OR DELETE IN THE SAME
+ TRANSACTION WHERE WE PREVIOUSLY HAVE EXECUTED AN OPERATION.
+ Read-All, Update-All, Insert-All and Delete-Insert are allowed
+ combinations.
+ Delete-Read, Delete-Update and Delete-Delete are not an allowed
+ combination and will result in tuple not found error.
+ */
+ mlpqOperPtr = queOperPtr;
+ moveLastParallelQueueWrite(signal);
+
+ if (operationRecPtr.p->operation == ZINSERT &&
+ mlpqOperPtr.p->operation != ZDELETE){
+ jam();
+ return ZWRITE_ERROR;
+ }//if
+
+ if(operationRecPtr.p->operation == ZWRITE)
+ {
+ operationRecPtr.p->operation =
+ (mlpqOperPtr.p->operation == ZDELETE) ? ZINSERT : ZUPDATE;
+ }
+
+ operationRecPtr.p->localdata[0] = queOperPtr.p->localdata[0];
+ operationRecPtr.p->localdata[1] = queOperPtr.p->localdata[1];
+ operationRecPtr.p->prevParallelQue = mlpqOperPtr.i;
+ mlpqOperPtr.p->nextParallelQue = operationRecPtr.i;
+ return ZPARALLEL_QUEUE;
+}//Dbacc::placeWriteInLockQueue()
+
+/* --------------------------------------------------------------------------------- */
+/* WE HAVE TO PLACE IT SOMEWHERE IN THE SERIAL QUEUE INSTEAD. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::placeSerialQueueWrite(Signal* signal)
+{
+ readWriteOpPtr = queOperPtr;
+ PSQW_LOOP:
+ if (readWriteOpPtr.p->nextSerialQue == RNIL) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE COULD NOT PUT IN ANY PARALLEL QUEUE. WE MUST PUT IT LAST IN THE SERIAL QUEUE. */
+ /* --------------------------------------------------------------------------------- */
+ readWriteOpPtr.p->nextSerialQue = operationRecPtr.i;
+ operationRecPtr.p->prevSerialQue = readWriteOpPtr.i;
+ putOpInFragWaitQue(signal);
+ return;
+ }//if
+ readWriteOpPtr.i = readWriteOpPtr.p->nextSerialQue;
+ ptrCheckGuard(readWriteOpPtr, coprecsize, operationrec);
+ if (getNoParallelTransaction(readWriteOpPtr.p) == 1) {
+ /* --------------------------------------------------------------------------------- */
+ /* THERE WAS ONLY ONE TRANSACTION INVOLVED IN THE PARALLEL QUEUE. IF THIS IS OUR */
+ /* TRANSACTION WE CAN STILL GET HOLD OF THE LOCK. */
+ /* --------------------------------------------------------------------------------- */
+ if ((readWriteOpPtr.p->transId1 == operationRecPtr.p->transId1) &&
+ (readWriteOpPtr.p->transId2 == operationRecPtr.p->transId2)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE PERFORMING AN UPDATE OR DELETE IN THE SAME TRANSACTION WHERE WE ALREADY */
+ /* PREVIOUSLY HAVE EXECUTED AN OPERATION. INSERT-DELETE, READ-UPDATE, READ-READ, */
+ /* UPDATE-UPDATE, UPDATE-DELETE, READ-DELETE, INSERT-READ, INSERT-UPDATE ARE ALLOWED */
+ /* COMBINATIONS. A NEW INSERT AFTER A DELETE IS NOT ALLOWED AND SUCH AN INSERT WILL */
+ /* GO TO THE SERIAL LOCK QUEUE WHICH IT WILL NOT LEAVE UNTIL A TIME-OUT AND THE */
+ /* TRANSACTION IS ABORTED. READS AND UPDATES AFTER DELETES IS ALSO NOT ALLOWED. */
+ /* --------------------------------------------------------------------------------- */
+ mlpqOperPtr = readWriteOpPtr;
+ moveLastParallelQueueWrite(signal);
+ readWriteOpPtr = mlpqOperPtr;
+ operationRecPtr.p->prevParallelQue = readWriteOpPtr.i;
+ readWriteOpPtr.p->nextParallelQue = operationRecPtr.i;
+ operationRecPtr.p->localdata[0] = readWriteOpPtr.p->localdata[0];
+ operationRecPtr.p->localdata[1] = readWriteOpPtr.p->localdata[1];
+ putOpInFragWaitQue(signal);
+ return;
+ }//if
+ }//if
+ goto PSQW_LOOP;
+}//Dbacc::placeSerialQueueWrite()
+
+/* ------------------------------------------------------------------------- */
+/* ACC KEYREQ END */
+/* ------------------------------------------------------------------------- */
+void Dbacc::acckeyref1Lab(Signal* signal, Uint32 result_code)
+{
+ if (operationRecPtr.p->keyinfoPage != RNIL) {
+ jam();
+ rpPageptr.i = operationRecPtr.p->keyinfoPage;
+ ptrCheckGuard(rpPageptr, cpagesize, page8);
+ releasePage(signal);
+ operationRecPtr.p->keyinfoPage = RNIL;
+ }//if
+ operationRecPtr.p->transactionstate = WAIT_COMMIT_ABORT;
+ /* ************************<< */
+ /* ACCKEYREF */
+ /* ************************<< */
+ signal->theData[0] = cminusOne;
+ signal->theData[1] = result_code;
+ return;
+}//Dbacc::acckeyref1Lab()
+
+/* ******************--------------------------------------------------------------- */
+/* ACCMINUPDATE UPDATE LOCAL KEY REQ */
+/* DESCRIPTION: UPDATES LOCAL KEY OF AN ELEMENTS IN THE HASH TABLE */
+/* THIS SIGNAL IS WAITED AFTER ANY INSERT REQ */
+/* ENTER ACCMINUPDATE WITH SENDER: LQH, LEVEL B */
+/* OPERATION_REC_PTR, OPERATION RECORD PTR */
+/* CLOCALKEY(0), LOCAL KEY 1 */
+/* CLOCALKEY(1) LOCAL KEY 2 */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::execACCMINUPDATE(Signal* signal)
+{
+ Page8Ptr ulkPageidptr;
+ Uint32 tulkLocalPtr;
+ Uint32 tlocalkey1, tlocalkey2;
+ Uint32 TlogStart;
+
+ jamEntry();
+ operationRecPtr.i = signal->theData[0];
+ tlocalkey1 = signal->theData[1];
+ tlocalkey2 = signal->theData[2];
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ if (operationRecPtr.p->transactionstate == ACTIVE) {
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ulkPageidptr.i = operationRecPtr.p->elementPage;
+ tulkLocalPtr = operationRecPtr.p->elementPointer + operationRecPtr.p->elementIsforward;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ ptrCheckGuard(ulkPageidptr, cpagesize, page8);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ //----------------------------------------------------------
+ // To avoid undo log the element header we take care to only
+ // undo log the local key part.
+ //----------------------------------------------------------
+ if (operationRecPtr.p->elementIsforward == 1) {
+ jam();
+ TlogStart = tulkLocalPtr;
+ } else {
+ jam();
+ TlogStart = tulkLocalPtr - fragrecptr.p->localkeylen + 1;
+ }//if
+ datapageptr.p = ulkPageidptr.p;
+ cundoinfolength = fragrecptr.p->localkeylen;
+ cundoElemIndex = TlogStart;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(ulkPageidptr, tulkLocalPtr, tlocalkey1);
+ arrGuard(tulkLocalPtr, 2048);
+ ulkPageidptr.p->word32[tulkLocalPtr] = tlocalkey1;
+ operationRecPtr.p->localdata[0] = tlocalkey1;
+ if (fragrecptr.p->localkeylen == 1) {
+ return;
+ } else if (fragrecptr.p->localkeylen == 2) {
+ jam();
+ tulkLocalPtr = tulkLocalPtr + operationRecPtr.p->elementIsforward;
+ operationRecPtr.p->localdata[1] = tlocalkey2;
+ dbgWord32(ulkPageidptr, tulkLocalPtr, tlocalkey2);
+ arrGuard(tulkLocalPtr, 2048);
+ ulkPageidptr.p->word32[tulkLocalPtr] = tlocalkey2;
+ return;
+ } else {
+ jam();
+ }//if
+ }//if
+ ndbrequire(false);
+}//Dbacc::execACCMINUPDATE()
+
+/* ******************--------------------------------------------------------------- */
+/* ACC_COMMITREQ COMMIT TRANSACTION */
+/* SENDER: LQH, LEVEL B */
+/* INPUT: OPERATION_REC_PTR , */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::execACC_COMMITREQ(Signal* signal)
+{
+ Uint8 Toperation;
+ jamEntry();
+ operationRecPtr.i = signal->theData[0];
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ ndbrequire(operationRecPtr.p->transactionstate == ACTIVE);
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ commitOperation(signal);
+ Toperation = operationRecPtr.p->operation;
+ operationRecPtr.p->transactionstate = IDLE;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ if(Toperation != ZREAD){
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ rootfragrecptr.p->m_commit_count++;
+ if (Toperation != ZINSERT) {
+ if (Toperation != ZDELETE) {
+ return;
+ } else {
+ jam();
+ rootfragrecptr.p->noOfElements--;
+ fragrecptr.p->slack += operationRecPtr.p->insertDeleteLen;
+ if (fragrecptr.p->slack > fragrecptr.p->slackCheck) {
+ /* TIME FOR JOIN BUCKETS PROCESS */
+ if (fragrecptr.p->expandCounter > 0) {
+ if (fragrecptr.p->expandFlag < 2) {
+ jam();
+ signal->theData[0] = fragrecptr.i;
+ signal->theData[1] = fragrecptr.p->p;
+ signal->theData[2] = fragrecptr.p->maxp;
+ signal->theData[3] = fragrecptr.p->expandFlag;
+ fragrecptr.p->expandFlag = 2;
+ sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 4, JBB);
+ }//if
+ }//if
+ }//if
+ }//if
+ } else {
+ jam(); /* EXPAND PROCESS HANDLING */
+ rootfragrecptr.p->noOfElements++;
+ fragrecptr.p->slack -= operationRecPtr.p->insertDeleteLen;
+ if (fragrecptr.p->slack >= (1u << 31)) {
+ /* IT MEANS THAT IF SLACK < ZERO */
+ if (fragrecptr.p->expandFlag == 0) {
+ jam();
+ fragrecptr.p->expandFlag = 2;
+ signal->theData[0] = fragrecptr.i;
+ signal->theData[1] = fragrecptr.p->p;
+ signal->theData[2] = fragrecptr.p->maxp;
+ sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB);
+ }//if
+ }//if
+ }//if
+ }
+ return;
+}//Dbacc::execACC_COMMITREQ()
+
+/* ******************--------------------------------------------------------------- */
+/* ACC ABORT REQ ABORT ALL OPERATION OF THE TRANSACTION */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+/* ******************--------------------------------------------------------------- */
+/* ACC ABORT REQ ABORT TRANSACTION */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execACC_ABORTREQ(Signal* signal)
+{
+ jamEntry();
+ accAbortReqLab(signal, true);
+}//Dbacc::execACC_ABORTREQ()
+
+void Dbacc::accAbortReqLab(Signal* signal, bool sendConf)
+{
+ operationRecPtr.i = signal->theData[0];
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ tresult = 0; /* ZFALSE */
+ if ((operationRecPtr.p->transactionstate == ACTIVE) ||
+ (operationRecPtr.p->transactionstate == WAIT_COMMIT_ABORT)) {
+ jam();
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ operationRecPtr.p->transactionstate = ABORT;
+ abortOperation(signal);
+ } else {
+ ndbrequire(operationRecPtr.p->transactionstate == IDLE);
+ jam();
+ }//if
+ operationRecPtr.p->transactionstate = IDLE;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ if (! sendConf)
+ return;
+ signal->theData[0] = operationRecPtr.p->userptr;
+ sendSignal(operationRecPtr.p->userblockref, GSN_ACC_ABORTCONF, signal, 1, JBB);
+ return;
+}//Dbacc::accAbortReqLab()
+
+/*
+ * Lock or unlock tuple.
+ */
+void Dbacc::execACC_LOCKREQ(Signal* signal)
+{
+ jamEntry();
+ AccLockReq* sig = (AccLockReq*)signal->getDataPtrSend();
+ AccLockReq reqCopy = *sig;
+ AccLockReq* const req = &reqCopy;
+ Uint32 lockOp = (req->requestInfo & 0xFF);
+ if (lockOp == AccLockReq::LockShared ||
+ lockOp == AccLockReq::LockExclusive) {
+ jam();
+ // find table
+ tabptr.i = req->tableId;
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+ // find fragment (TUX will know it)
+ if (req->fragPtrI == RNIL) {
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+ jam();
+ if (tabptr.p->fragptrholder[i] != RNIL) {
+ rootfragrecptr.i = tabptr.p->fragptrholder[i];
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->fragmentid[0] == req->fragId) {
+ jam();
+ req->fragPtrI = rootfragrecptr.p->fragmentptr[0];
+ break;
+ }
+ if (rootfragrecptr.p->fragmentid[1] == req->fragId) {
+ jam();
+ req->fragPtrI = rootfragrecptr.p->fragmentptr[1];
+ break;
+ }
+ }
+ }
+ }
+ fragrecptr.i = req->fragPtrI;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ ndbrequire(req->fragId == fragrecptr.p->myfid);
+ // caller must be explicit here
+ ndbrequire(req->accOpPtr == RNIL);
+ // seize operation to hold the lock
+ if (cfreeopRec != RNIL) {
+ jam();
+ seizeOpRec(signal);
+ // init as in ACCSEIZEREQ
+ operationRecPtr.p->userptr = req->userPtr;
+ operationRecPtr.p->userblockref = req->userRef;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ operationRecPtr.p->transactionstate = IDLE;
+ // do read with lock via ACCKEYREQ
+ Uint32 lockMode = (lockOp == AccLockReq::LockShared) ? 0 : 1;
+ Uint32 opCode = ZSCAN_OP;
+ signal->theData[0] = operationRecPtr.i;
+ signal->theData[1] = fragrecptr.i;
+ signal->theData[2] = opCode | (lockMode << 4) | (1u << 31);
+ signal->theData[3] = req->hashValue;
+ signal->theData[4] = 1; // fake primKeyLen
+ signal->theData[5] = req->transId1;
+ signal->theData[6] = req->transId2;
+ // enter local key in place of PK
+ signal->theData[7] = req->tupAddr;
+ EXECUTE_DIRECT(DBACC, GSN_ACCKEYREQ, signal, 8);
+ // translate the result
+ if (signal->theData[0] < RNIL) {
+ jam();
+ req->returnCode = AccLockReq::Success;
+ req->accOpPtr = operationRecPtr.i;
+ } else if (signal->theData[0] == RNIL) {
+ jam();
+ req->returnCode = AccLockReq::IsBlocked;
+ req->accOpPtr = operationRecPtr.i;
+ } else {
+ ndbrequire(signal->theData[0] == (UintR)-1);
+ releaseOpRec(signal);
+ req->returnCode = AccLockReq::Refused;
+ req->accOpPtr = RNIL;
+ }
+ } else {
+ jam();
+ req->returnCode = AccLockReq::NoFreeOp;
+ }
+ *sig = *req;
+ return;
+ }
+ if (lockOp == AccLockReq::Unlock) {
+ jam();
+ // do unlock via ACC_COMMITREQ (immediate)
+ signal->theData[0] = req->accOpPtr;
+ EXECUTE_DIRECT(DBACC, GSN_ACC_COMMITREQ, signal, 1);
+ releaseOpRec(signal);
+ req->returnCode = AccLockReq::Success;
+ *sig = *req;
+ return;
+ }
+ if (lockOp == AccLockReq::Abort) {
+ jam();
+ // do abort via ACC_ABORTREQ (immediate)
+ signal->theData[0] = req->accOpPtr;
+ accAbortReqLab(signal, false);
+ releaseOpRec(signal);
+ req->returnCode = AccLockReq::Success;
+ *sig = *req;
+ return;
+ }
+ if (lockOp == AccLockReq::AbortWithConf) {
+ jam();
+ // do abort via ACC_ABORTREQ (with conf signal)
+ signal->theData[0] = req->accOpPtr;
+ accAbortReqLab(signal, true);
+ releaseOpRec(signal);
+ req->returnCode = AccLockReq::Success;
+ *sig = *req;
+ return;
+ }
+ ndbrequire(false);
+}
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF EXECUTE OPERATION MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* MODULE: INSERT */
+/* THE FOLLOWING SUBROUTINES ARE ONLY USED BY INSERT_ELEMENT. THIS */
+/* ROUTINE IS THE SOLE INTERFACE TO INSERT ELEMENTS INTO THE INDEX. */
+/* CURRENT USERS ARE INSERT REQUESTS, EXPAND CONTAINER AND SHRINK */
+/* CONTAINER. */
+/* */
+/* THE FOLLOWING SUBROUTINES ARE INCLUDED IN THIS MODULE: */
+/* INSERT_ELEMENT */
+/* INSERT_CONTAINER */
+/* ADDNEWCONTAINER */
+/* GETFREELIST */
+/* INCREASELISTCONT */
+/* SEIZE_LEFTLIST */
+/* SEIZE_RIGHTLIST */
+/* */
+/* THESE ROUTINES ARE ONLY USED BY THIS MODULE AND BY NO ONE ELSE. */
+/* ALSO THE ROUTINES MAKE NO USE OF ROUTINES IN OTHER MODULES. */
+/* TAKE_REC_OUT_OF_FREE_OVERPAGE AND RELEASE_OVERFLOW_REC ARE */
+/* EXCEPTIONS TO THIS RULE. */
+/* */
+/* THE ONLY SHORT-LIVED VARIABLES USED IN OTHER PARTS OF THE BLOCK ARE */
+/* THOSE DEFINED AS INPUT AND OUTPUT IN INSERT_ELEMENT */
+/* SHORT-LIVED VARIABLES INCLUDE TEMPORARY VARIABLES, COMMON VARIABLES */
+/* AND POINTER VARIABLES. */
+/* THE ONLY EXCEPTION TO THIS RULE IS FRAGRECPTR WHICH POINTS TO THE */
+/* FRAGMENT RECORD. THIS IS MORE LESS STATIC ALWAYS DURING A SIGNAL */
+/* EXECUTION. */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* INSERT_ELEMENT */
+/* INPUT: */
+/* IDR_PAGEPTR (POINTER TO THE ACTIVE PAGE REC) */
+/* TIDR_PAGEINDEX (INDEX OF THE CONTAINER) */
+/* TIDR_FORWARD (DIRECTION FORWARD OR BACKWARD) */
+/* TIDR_ELEMHEAD (HEADER OF ELEMENT TO BE INSERTED */
+/* CIDR_KEYS(ARRAY OF TUPLE KEYS) */
+/* CLOCALKEY(ARRAY OF LOCAL KEYS). */
+/* FRAGRECPTR */
+/* IDR_OPERATION_REC_PTR */
+/* TIDR_KEY_LEN */
+/* */
+/* OUTPUT: */
+/* TIDR_PAGEINDEX (PAGE INDEX OF INSERTED ELEMENT) */
+/* IDR_PAGEPTR (PAGE POINTER OF INSERTED ELEMENT) */
+/* TIDR_FORWARD (CONTAINER DIRECTION OF INSERTED ELEMENT) */
+/* NONE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::insertElement(Signal* signal)
+{
+ DirRangePtr inrOverflowrangeptr;
+ DirectoryarrayPtr inrOverflowDirptr;
+ OverflowRecordPtr inrOverflowRecPtr;
+ Page8Ptr inrNewPageptr;
+ Uint32 tinrNextSamePage;
+ Uint32 tinrTmp;
+
+ do {
+ insertContainer(signal);
+ if (tidrResult != ZFALSE) {
+ jam();
+ return;
+ /* INSERTION IS DONE, OR */
+ /* AN ERROR IS DETECTED */
+ }//if
+ if (((tidrContainerhead >> 7) & 0x3) != 0) {
+ tinrNextSamePage = (tidrContainerhead >> 9) & 0x1; /* CHECK BIT FOR CHECKING WHERE */
+ /* THE NEXT CONTAINER IS IN THE SAME PAGE */
+ tidrPageindex = tidrContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */
+ if (((tidrContainerhead >> 7) & 3) == ZLEFT) {
+ jam();
+ tidrForward = ZTRUE;
+ } else if (((tidrContainerhead >> 7) & 3) == ZRIGHT) {
+ jam();
+ tidrForward = cminusOne;
+ } else {
+ ndbrequire(false);
+ return;
+ }//if
+ if (tinrNextSamePage == ZFALSE) {
+ jam(); /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */
+ tinrTmp = idrPageptr.p->word32[tidrContainerptr + 1];
+ inrOverflowrangeptr.i = fragrecptr.p->overflowdir;
+ ptrCheckGuard(inrOverflowrangeptr, cdirrangesize, dirRange);
+ arrGuard((tinrTmp >> 8), 256);
+ inrOverflowDirptr.i = inrOverflowrangeptr.p->dirArray[tinrTmp >> 8];
+ ptrCheckGuard(inrOverflowDirptr, cdirarraysize, directoryarray);
+ idrPageptr.i = inrOverflowDirptr.p->pagep[tinrTmp & 0xff];
+ ptrCheckGuard(idrPageptr, cpagesize, page8);
+ }//if
+ ndbrequire(tidrPageindex < ZEMPTYLIST);
+ } else {
+ break;
+ }//if
+ } while (1);
+ gflPageptr.p = idrPageptr.p;
+ getfreelist(signal);
+ if (tgflPageindex == ZEMPTYLIST) {
+ jam();
+ /* NO FREE BUFFER IS FOUND */
+ if (fragrecptr.p->firstOverflowRec == RNIL) {
+ jam();
+ allocOverflowPage(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ }//if
+ inrOverflowRecPtr.i = fragrecptr.p->firstOverflowRec;
+ ptrCheckGuard(inrOverflowRecPtr, coverflowrecsize, overflowRecord);
+ inrNewPageptr.i = inrOverflowRecPtr.p->overpage;
+ ptrCheckGuard(inrNewPageptr, cpagesize, page8);
+ gflPageptr.p = inrNewPageptr.p;
+ getfreelist(signal);
+ ndbrequire(tgflPageindex != ZEMPTYLIST);
+ tancNext = 0;
+ } else {
+ jam();
+ inrNewPageptr = idrPageptr;
+ tancNext = 1;
+ }//if
+ tslUpdateHeader = ZTRUE;
+ tslPageindex = tgflPageindex;
+ slPageptr.p = inrNewPageptr.p;
+ if (tgflBufType == ZLEFT) {
+ seizeLeftlist(signal);
+ tidrForward = ZTRUE;
+ } else {
+ seizeRightlist(signal);
+ tidrForward = cminusOne;
+ }//if
+ tancPageindex = tgflPageindex;
+ tancPageid = inrNewPageptr.p->word32[ZPOS_PAGE_ID];
+ tancBufType = tgflBufType;
+ tancContainerptr = tidrContainerptr;
+ ancPageptr.p = idrPageptr.p;
+ addnewcontainer(signal);
+
+ idrPageptr = inrNewPageptr;
+ tidrPageindex = tgflPageindex;
+ insertContainer(signal);
+ ndbrequire(tidrResult == ZTRUE);
+}//Dbacc::insertElement()
+
+/* --------------------------------------------------------------------------------- */
+/* INSERT_CONTAINER */
+/* INPUT: */
+/* IDR_PAGEPTR (POINTER TO THE ACTIVE PAGE REC) */
+/* TIDR_PAGEINDEX (INDEX OF THE CONTAINER) */
+/* TIDR_FORWARD (DIRECTION FORWARD OR BACKWARD) */
+/* TIDR_ELEMHEAD (HEADER OF ELEMENT TO BE INSERTED */
+/* CKEYS(ARRAY OF TUPLE KEYS) */
+/* CLOCALKEY(ARRAY 0F LOCAL KEYS). */
+/* TIDR_KEY_LEN */
+/* FRAGRECPTR */
+/* IDR_OPERATION_REC_PTR */
+/* OUTPUT: */
+/* TIDR_RESULT (ZTRUE FOR SUCCESS AND ZFALSE OTHERWISE) */
+/* TIDR_CONTAINERHEAD (HEADER OF CONTAINER) */
+/* TIDR_CONTAINERPTR (POINTER TO CONTAINER HEADER) */
+/* */
+/* DESCRIPTION: */
+/* THE FREE AREA OF THE CONTAINER WILL BE CALCULATED. IF IT IS */
+/* LARGER THAN OR EQUAL THE ELEMENT LENGTH. THE ELEMENT WILL BE */
+/* INSERT IN THE CONTAINER AND CONTAINER HEAD WILL BE UPDATED. */
+/* THIS ROUTINE ALWAYS DEALS WITH ONLY ONE CONTAINER AND DO NEVER */
+/* START ANYTHING OUTSIDE OF THIS CONTAINER. */
+/* */
+/* SHORT FORM: IDR */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::insertContainer(Signal* signal)
+{
+ Uint32 tidrContainerlen;
+ Uint32 tidrConfreelen;
+ Uint32 tidrNextSide;
+ Uint32 tidrNextConLen;
+ Uint32 tidrIndex;
+ Uint32 tidrInputIndex;
+ Uint32 tidrContLen;
+ Uint32 guard26;
+
+ tidrResult = ZFALSE;
+ tidrContainerptr = (tidrPageindex << ZSHIFT_PLUS) - (tidrPageindex << ZSHIFT_MINUS);
+ tidrContainerptr = tidrContainerptr + ZHEAD_SIZE;
+ /* --------------------------------------------------------------------------------- */
+ /* CALCULATE THE POINTER TO THE ELEMENT TO BE INSERTED AND THE POINTER TO THE */
+ /* CONTAINER HEADER OF THE OTHER SIDE OF THE BUFFER. */
+ /* --------------------------------------------------------------------------------- */
+ if (tidrForward == ZTRUE) {
+ jam();
+ tidrNextSide = tidrContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ arrGuard(tidrNextSide + 1, 2048);
+ tidrContainerhead = idrPageptr.p->word32[tidrContainerptr];
+ tidrContainerlen = tidrContainerhead >> 26;
+ tidrIndex = tidrContainerptr + tidrContainerlen;
+ } else {
+ jam();
+ tidrNextSide = tidrContainerptr;
+ tidrContainerptr = tidrContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ arrGuard(tidrContainerptr + 1, 2048);
+ tidrContainerhead = idrPageptr.p->word32[tidrContainerptr];
+ tidrContainerlen = tidrContainerhead >> 26;
+ tidrIndex = (tidrContainerptr - tidrContainerlen) + (ZCON_HEAD_SIZE - 1);
+ }//if
+ if (tidrContainerlen > (ZBUF_SIZE - 3)) {
+ return;
+ }//if
+ tidrConfreelen = ZBUF_SIZE - tidrContainerlen;
+ /* --------------------------------------------------------------------------------- */
+ /* WE CALCULATE THE TOTAL LENGTH THE CONTAINER CAN EXPAND TO */
+ /* THIS INCLUDES THE OTHER SIDE OF THE BUFFER IF POSSIBLE TO EXPAND THERE. */
+ /* --------------------------------------------------------------------------------- */
+ if (((tidrContainerhead >> 10) & 1) == 0) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE HAVE NOT EXPANDED TO THE ENTIRE BUFFER YET. WE CAN THUS READ THE OTHER */
+ /* SIDE'S CONTAINER HEADER TO READ HIS LENGTH. */
+ /* --------------------------------------------------------------------------------- */
+ tidrNextConLen = idrPageptr.p->word32[tidrNextSide] >> 26;
+ tidrConfreelen = tidrConfreelen - tidrNextConLen;
+ if (tidrConfreelen > ZBUF_SIZE) {
+ ndbrequire(false);
+ /* --------------------------------------------------------------------------------- */
+ /* THE BUFFERS ARE PLACED ON TOP OF EACH OTHER. THIS SHOULD NEVER OCCUR. */
+ /* --------------------------------------------------------------------------------- */
+ return;
+ }//if
+ } else {
+ jam();
+ tidrNextConLen = 1; /* INDICATE OTHER SIDE IS NOT PART OF FREE LIST */
+ }//if
+ if (tidrConfreelen < fragrecptr.p->elementLength) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE CONTAINER COULD NOT BE EXPANDED TO FIT THE NEW ELEMENT. WE HAVE TO */
+ /* RETURN AND FIND A NEW CONTAINER TO INSERT IT INTO. */
+ /* --------------------------------------------------------------------------------- */
+ return;
+ }//if
+ tidrContainerlen = tidrContainerlen + fragrecptr.p->elementLength;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = idrPageptr.p;
+ cundoElemIndex = tidrContainerptr;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ if (tidrNextConLen == 0) {
+ /* EACH SIDE OF THE BUFFER WHICH BELONG TO A FREE */
+ /* LIST, HAS ZERO AS LENGTH. */
+ if (tidrContainerlen > ZUP_LIMIT) {
+ dbgWord32(idrPageptr, tidrContainerptr, idrPageptr.p->word32[tidrContainerptr] | (1 << 10));
+ idrPageptr.p->word32[tidrContainerptr] = idrPageptr.p->word32[tidrContainerptr] | (1 << 10);
+ tslUpdateHeader = ZFALSE;
+ tslPageindex = tidrPageindex;
+ slPageptr.p = idrPageptr.p;
+ if (tidrForward == ZTRUE) {
+ jam();
+ seizeRightlist(signal); /* REMOVE THE RIGHT SIDE OF THE BUFFER FROM THE LIST */
+ } else {
+ jam();
+ /* OF THE FREE CONTAINERS */
+ seizeLeftlist(signal); /* REMOVE THE LEFT SIDE OF THE BUFFER FROM THE LIST */
+ }//if
+ }//if
+ }//if
+ /* OF THE FREE CONTAINERS */
+ /* --------------------------------------------------------------------------------- */
+ /* WE HAVE NOW FOUND A FREE SPOT IN THE CURRENT CONTAINER. WE INSERT THE */
+ /* ELEMENT HERE. THE ELEMENT CONTAINS A HEADER, A LOCAL KEY AND A TUPLE KEY. */
+ /* BEFORE INSERTING THE ELEMENT WE WILL UPDATE THE OPERATION RECORD WITH THE */
+ /* DATA CONCERNING WHERE WE INSERTED THE ELEMENT. THIS MAKES IT EASY TO FIND */
+ /* THIS INFORMATION WHEN WE RETURN TO UPDATE THE LOCAL KEY OR RETURN TO COMMIT */
+ /* OR ABORT THE INSERT. IF NO OPERATION RECORD EXIST IT MEANS THAT WE ARE */
+ /* PERFORMING THIS AS A PART OF THE EXPAND OR SHRINK PROCESS. */
+ /* --------------------------------------------------------------------------------- */
+ if (idrOperationRecPtr.i != RNIL) {
+ jam();
+ idrOperationRecPtr.p->elementIsforward = tidrForward;
+ idrOperationRecPtr.p->elementPage = idrPageptr.i;
+ idrOperationRecPtr.p->elementContainer = tidrContainerptr;
+ idrOperationRecPtr.p->elementPointer = tidrIndex;
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* WE CHOOSE TO UNDO LOG INSERTS BY WRITING THE BEFORE VALUE TO THE UNDO LOG. */
+ /* WE COULD ALSO HAVE DONE THIS BY WRITING THIS BEFORE VALUE WHEN DELETING */
+ /* ELEMENTS. WE CHOOSE TO PUT IT HERE SINCE WE THEREBY ENSURE THAT WE ALWAYS */
+ /* UNDO LOG ALL WRITES TO PAGE MEMORY. IT SHOULD BE EASIER TO MAINTAIN SUCH A */
+ /* STRUCTURE. IT IS RATHER DIFFICULT TO MAINTAIN A LOGICAL STRUCTURE WHERE */
+ /* DELETES ARE INSERTS AND INSERTS ARE PURELY DELETES. */
+ /* --------------------------------------------------------------------------------- */
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (tidrForward == ZTRUE) {
+ cundoElemIndex = tidrIndex;
+ } else {
+ cundoElemIndex = (tidrIndex + 1) - fragrecptr.p->elementLength;
+ }//if
+ cundoinfolength = fragrecptr.p->elementLength;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(idrPageptr, tidrIndex, tidrElemhead);
+ idrPageptr.p->word32[tidrIndex] = tidrElemhead; /* INSERTS THE HEAD OF THE ELEMENT */
+ tidrIndex += tidrForward;
+ guard26 = fragrecptr.p->localkeylen - 1;
+ arrGuard(guard26, 2);
+ for (tidrInputIndex = 0; tidrInputIndex <= guard26; tidrInputIndex++) {
+ dbgWord32(idrPageptr, tidrIndex, clocalkey[tidrInputIndex]);
+ arrGuard(tidrIndex, 2048);
+ idrPageptr.p->word32[tidrIndex] = clocalkey[tidrInputIndex]; /* INSERTS LOCALKEY */
+ tidrIndex += tidrForward;
+ }//for
+ tidrContLen = idrPageptr.p->word32[tidrContainerptr] << 6;
+ tidrContLen = tidrContLen >> 6;
+ dbgWord32(idrPageptr, tidrContainerptr, (tidrContainerlen << 26) | tidrContLen);
+ idrPageptr.p->word32[tidrContainerptr] = (tidrContainerlen << 26) | tidrContLen;
+ tidrResult = ZTRUE;
+}//Dbacc::insertContainer()
+
+/* --------------------------------------------------------------------------------- */
+/* ADDNEWCONTAINER */
+/* INPUT: */
+/* TANC_CONTAINERPTR */
+/* ANC_PAGEPTR */
+/* TANC_NEXT */
+/* TANC_PAGEINDEX */
+/* TANC_BUF_TYPE */
+/* TANC_PAGEID */
+/* OUTPUT: */
+/* NONE */
+/* */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::addnewcontainer(Signal* signal)
+{
+ Uint32 tancTmp1;
+
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = tancContainerptr;
+ datapageptr.p = ancPageptr.p;
+ cundoinfolength = 2;
+ undoWritingProcess(signal); /* WHEN UNDO PROCESS HAS STARTED, */
+ }//if
+ /* THE OLD DATA IS STORED ON AN UNDO PAGE */
+ /* --------------------------------------------------------------------------------- */
+ /* KEEP LENGTH INFORMATION IN BIT 26-31. */
+ /* SET BIT 9 INDICATING IF NEXT BUFFER IN THE SAME PAGE USING TANC_NEXT. */
+ /* SET TYPE OF NEXT CONTAINER IN BIT 7-8. */
+ /* SET PAGE INDEX OF NEXT CONTAINER IN BIT 0-6. */
+ /* KEEP INDICATOR OF OWNING OTHER SIDE OF BUFFER IN BIT 10. */
+ /* --------------------------------------------------------------------------------- */
+ tancTmp1 = ancPageptr.p->word32[tancContainerptr] >> 10;
+ tancTmp1 = tancTmp1 << 1;
+ tancTmp1 = tancTmp1 | tancNext;
+ tancTmp1 = tancTmp1 << 2;
+ tancTmp1 = tancTmp1 | tancBufType; /* TYPE OF THE NEXT CONTAINER */
+ tancTmp1 = tancTmp1 << 7;
+ tancTmp1 = tancTmp1 | tancPageindex;
+ dbgWord32(ancPageptr, tancContainerptr, tancTmp1);
+ ancPageptr.p->word32[tancContainerptr] = tancTmp1; /* HEAD OF THE CONTAINER IS UPDATED */
+ dbgWord32(ancPageptr, tancContainerptr + 1, tancPageid);
+ ancPageptr.p->word32[tancContainerptr + 1] = tancPageid;
+}//Dbacc::addnewcontainer()
+
+/* --------------------------------------------------------------------------------- */
+/* GETFREELIST */
+/* INPUT: */
+/* GFL_PAGEPTR (POINTER TO A PAGE RECORD). */
+/* OUTPUT: */
+/* TGFL_PAGEINDEX(POINTER TO A FREE BUFFER IN THE FREEPAGE), AND */
+/* TGFL_BUF_TYPE( TYPE OF THE FREE BUFFER). */
+/* DESCRIPTION: SEARCHS IN THE FREE LIST OF THE FREE BUFFER IN THE PAGE HEAD */
+/* (WORD32(1)),AND RETURN ADDRESS OF A FREE BUFFER OR NIL. */
+/* THE FREE BUFFER CAN BE A RIGHT CONTAINER OR A LEFT ONE */
+/* THE KIND OF THE CONTAINER IS NOTED BY TGFL_BUF_TYPE. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::getfreelist(Signal* signal)
+{
+ Uint32 tgflTmp;
+
+ tgflTmp = gflPageptr.p->word32[ZPOS_EMPTY_LIST];
+ tgflPageindex = (tgflTmp >> 7) & 0x7f; /* LEFT FREE LIST */
+ tgflBufType = ZLEFT;
+ if (tgflPageindex == ZEMPTYLIST) {
+ jam();
+ tgflPageindex = tgflTmp & 0x7f; /* RIGHT FREE LIST */
+ tgflBufType = ZRIGHT;
+ }//if
+ ndbrequire(tgflPageindex <= ZEMPTYLIST);
+}//Dbacc::getfreelist()
+
+/* --------------------------------------------------------------------------------- */
+/* INCREASELISTCONT */
+/* INPUT: */
+/* ILC_PAGEPTR PAGE POINTER TO INCREASE NUMBER OF CONTAINERS IN */
+/* A CONTAINER OF AN OVERFLOW PAGE (FREEPAGEPTR) IS ALLOCATED, NR OF */
+/* ALLOCATED CONTAINER HAVE TO BE INCRESE BY ONE . */
+/* IF THE NUMBER OF ALLOCATED CONTAINERS IS ABOVE THE FREE LIMIT WE WILL */
+/* REMOVE THE PAGE FROM THE FREE LIST. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::increaselistcont(Signal* signal)
+{
+ OverflowRecordPtr ilcOverflowRecPtr;
+
+ dbgWord32(ilcPageptr, ZPOS_ALLOC_CONTAINERS, ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] + 1);
+ ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] + 1;
+ if (ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] > ZFREE_LIMIT) {
+ if (ilcPageptr.p->word32[ZPOS_OVERFLOWREC] != RNIL) {
+ jam();
+ ilcOverflowRecPtr.i = ilcPageptr.p->word32[ZPOS_OVERFLOWREC];
+ dbgWord32(ilcPageptr, ZPOS_OVERFLOWREC, RNIL);
+ ilcPageptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
+ ptrCheckGuard(ilcOverflowRecPtr, coverflowrecsize, overflowRecord);
+ tfoOverflowRecPtr = ilcOverflowRecPtr;
+ takeRecOutOfFreeOverpage(signal);
+ rorOverflowRecPtr = ilcOverflowRecPtr;
+ releaseOverflowRec(signal);
+ }//if
+ }//if
+}//Dbacc::increaselistcont()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_LEFTLIST */
+/* INPUT: */
+/* TSL_PAGEINDEX PAGE INDEX OF CONTAINER TO SEIZE */
+/* SL_PAGEPTR PAGE POINTER OF CONTAINER TO SEIZE */
+/* TSL_UPDATE_HEADER SHOULD WE UPDATE THE CONTAINER HEADER */
+/* */
+/* OUTPUT: */
+/* NONE */
+/* DESCRIPTION: THE BUFFER NOTED BY TSL_PAGEINDEX WILL BE REMOVED FROM THE */
+/* LIST OF LEFT FREE CONTAINER, IN THE HEADER OF THE PAGE */
+/* (FREEPAGEPTR). PREVIOUS AND NEXT BUFFER OF REMOVED BUFFER */
+/* WILL BE UPDATED. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeLeftlist(Signal* signal)
+{
+ Uint32 tsllTmp1;
+ Uint32 tsllNewHead;
+ Uint32 tsllHeadIndex;
+ Uint32 tsllTmp;
+
+ tsllHeadIndex = ((tslPageindex << ZSHIFT_PLUS) - (tslPageindex << ZSHIFT_MINUS)) + ZHEAD_SIZE;
+ arrGuard(tsllHeadIndex + 1, 2048);
+ tslNextfree = slPageptr.p->word32[tsllHeadIndex];
+ tslPrevfree = slPageptr.p->word32[tsllHeadIndex + 1];
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = slPageptr.p;
+ cundoElemIndex = tsllHeadIndex;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = ZPOS_EMPTY_LIST;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ if (tslPrevfree == ZEMPTYLIST) {
+ jam();
+ /* UPDATE FREE LIST OF LEFT CONTAINER IN PAGE HEAD */
+ tsllTmp1 = slPageptr.p->word32[ZPOS_EMPTY_LIST];
+ tsllTmp = tsllTmp1 & 0x7f;
+ tsllTmp1 = (tsllTmp1 >> 14) << 14;
+ tsllTmp1 = (tsllTmp1 | (tslNextfree << 7)) | tsllTmp;
+ dbgWord32(slPageptr, ZPOS_EMPTY_LIST, tsllTmp1);
+ slPageptr.p->word32[ZPOS_EMPTY_LIST] = tsllTmp1;
+ } else {
+ ndbrequire(tslPrevfree < ZEMPTYLIST);
+ jam();
+ tsllTmp = ((tslPrevfree << ZSHIFT_PLUS) - (tslPrevfree << ZSHIFT_MINUS)) + ZHEAD_SIZE;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = tsllTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(slPageptr, tsllTmp, tslNextfree);
+ slPageptr.p->word32[tsllTmp] = tslNextfree;
+ }//if
+ if (tslNextfree < ZEMPTYLIST) {
+ jam();
+ tsllTmp = (((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ZHEAD_SIZE) + 1;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = tsllTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(slPageptr, tsllTmp, tslPrevfree);
+ slPageptr.p->word32[tsllTmp] = tslPrevfree;
+ } else {
+ ndbrequire(tslNextfree == ZEMPTYLIST);
+ jam();
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* IF WE ARE UPDATING THE HEADER WE ARE CREATING A NEW CONTAINER IN THE PAGE. */
+ /* TO BE ABLE TO FIND ALL LOCKED ELEMENTS WE KEEP ALL CONTAINERS IN LINKED */
+ /* LISTS IN THE PAGE. */
+ /* */
+ /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 16-22 THAT REFERS TO THE */
+ /* FIRST CONTAINER IN A LIST OF USED RIGHT CONTAINERS IN THE PAGE. */
+ /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 23-29 THAT REFERS TO THE */
+ /* FIRST CONTAINER IN A LIST OF USED LEFT CONTAINERS IN THE PAGE. */
+ /* EACH CONTAINER IN THE LIST CONTAINS A NEXT POINTER IN BIT 11-17 AND IT */
+ /* CONTAINS A PREVIOUS POINTER IN BIT 18-24. */
+ /* WE ALSO SET BIT 25 TO INDICATE THAT IT IS A CONTAINER HEADER. */
+ /* --------------------------------------------------------------------------------- */
+ if (tslUpdateHeader == ZTRUE) {
+ jam();
+ tslNextfree = (slPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f;
+ tsllNewHead = ZCON_HEAD_SIZE;
+ tsllNewHead = ((tsllNewHead << 8) + ZEMPTYLIST) + (1 << 7);
+ tsllNewHead = (tsllNewHead << 7) + tslNextfree;
+ tsllNewHead = tsllNewHead << 11;
+ dbgWord32(slPageptr, tsllHeadIndex, tsllNewHead);
+ slPageptr.p->word32[tsllHeadIndex] = tsllNewHead;
+ tsllTmp = slPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xc07fffff;
+ tsllTmp = tsllTmp | (tslPageindex << 23);
+ dbgWord32(slPageptr, ZPOS_EMPTY_LIST, tsllTmp);
+ slPageptr.p->word32[ZPOS_EMPTY_LIST] = tsllTmp;
+ if (tslNextfree < ZEMPTYLIST) {
+ jam();
+ tsllTmp = ((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ZHEAD_SIZE;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = tsllTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ tsllTmp1 = slPageptr.p->word32[tsllTmp] & 0xfe03ffff;
+ tsllTmp1 = tsllTmp1 | (tslPageindex << 18);
+ dbgWord32(slPageptr, tsllTmp, tsllTmp1);
+ slPageptr.p->word32[tsllTmp] = tsllTmp1;
+ } else {
+ ndbrequire(tslNextfree == ZEMPTYLIST);
+ jam();
+ }//if
+ }//if
+ ilcPageptr.p = slPageptr.p;
+ increaselistcont(signal);
+}//Dbacc::seizeLeftlist()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_RIGHTLIST */
+/* DESCRIPTION: THE BUFFER NOTED BY TSL_PAGEINDEX WILL BE REMOVED FROM THE */
+/* LIST OF RIGHT FREE CONTAINER, IN THE HEADER OF THE PAGE */
+/* (SL_PAGEPTR). PREVIOUS AND NEXT BUFFER OF REMOVED BUFFER */
+/* WILL BE UPDATED. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeRightlist(Signal* signal)
+{
+ Uint32 tsrlTmp1;
+ Uint32 tsrlNewHead;
+ Uint32 tsrlHeadIndex;
+ Uint32 tsrlTmp;
+
+ tsrlHeadIndex = ((tslPageindex << ZSHIFT_PLUS) - (tslPageindex << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ arrGuard(tsrlHeadIndex + 1, 2048);
+ tslNextfree = slPageptr.p->word32[tsrlHeadIndex];
+ tslPrevfree = slPageptr.p->word32[tsrlHeadIndex + 1];
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = slPageptr.p;
+ cundoElemIndex = tsrlHeadIndex;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = ZPOS_EMPTY_LIST;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ if (tslPrevfree == ZEMPTYLIST) {
+ jam();
+ tsrlTmp = slPageptr.p->word32[ZPOS_EMPTY_LIST];
+ dbgWord32(slPageptr, ZPOS_EMPTY_LIST, ((tsrlTmp >> 7) << 7) | tslNextfree);
+ slPageptr.p->word32[ZPOS_EMPTY_LIST] = ((tsrlTmp >> 7) << 7) | tslNextfree;
+ } else {
+ ndbrequire(tslPrevfree < ZEMPTYLIST);
+ jam();
+ tsrlTmp = ((tslPrevfree << ZSHIFT_PLUS) - (tslPrevfree << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = tsrlTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(slPageptr, tsrlTmp, tslNextfree);
+ slPageptr.p->word32[tsrlTmp] = tslNextfree;
+ }//if
+ if (tslNextfree < ZEMPTYLIST) {
+ jam();
+ tsrlTmp = ((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - (ZCON_HEAD_SIZE - 1));
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = tsrlTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(slPageptr, tsrlTmp, tslPrevfree);
+ slPageptr.p->word32[tsrlTmp] = tslPrevfree;
+ } else {
+ ndbrequire(tslNextfree == ZEMPTYLIST);
+ jam();
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* IF WE ARE UPDATING THE HEADER WE ARE CREATING A NEW CONTAINER IN THE PAGE. */
+ /* TO BE ABLE TO FIND ALL LOCKED ELEMENTS WE KEEP ALL CONTAINERS IN LINKED */
+ /* LISTS IN THE PAGE. */
+ /* */
+ /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 16-22 THAT REFERS TO THE */
+ /* FIRST CONTAINER IN A LIST OF USED RIGHT CONTAINERS IN THE PAGE. */
+ /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 23-29 THAT REFERS TO THE */
+ /* FIRST CONTAINER IN A LIST OF USED LEFT CONTAINERS IN THE PAGE. */
+ /* EACH CONTAINER IN THE LIST CONTAINS A NEXT POINTER IN BIT 11-17 AND IT */
+ /* CONTAINS A PREVIOUS POINTER IN BIT 18-24. */
+ /* --------------------------------------------------------------------------------- */
+ if (tslUpdateHeader == ZTRUE) {
+ jam();
+ tslNextfree = (slPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f;
+ tsrlNewHead = ZCON_HEAD_SIZE;
+ tsrlNewHead = ((tsrlNewHead << 8) + ZEMPTYLIST) + (1 << 7);
+ tsrlNewHead = (tsrlNewHead << 7) + tslNextfree;
+ tsrlNewHead = tsrlNewHead << 11;
+ dbgWord32(slPageptr, tsrlHeadIndex, tsrlNewHead);
+ slPageptr.p->word32[tsrlHeadIndex] = tsrlNewHead;
+ tsrlTmp = slPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xff80ffff;
+ dbgWord32(slPageptr, ZPOS_EMPTY_LIST, tsrlTmp | (tslPageindex << 16));
+ slPageptr.p->word32[ZPOS_EMPTY_LIST] = tsrlTmp | (tslPageindex << 16);
+ if (tslNextfree < ZEMPTYLIST) {
+ jam();
+ tsrlTmp = ((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = tsrlTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ tsrlTmp1 = slPageptr.p->word32[tsrlTmp] & 0xfe03ffff;
+ dbgWord32(slPageptr, tsrlTmp, tsrlTmp1 | (tslPageindex << 18));
+ slPageptr.p->word32[tsrlTmp] = tsrlTmp1 | (tslPageindex << 18);
+ } else {
+ ndbrequire(tslNextfree == ZEMPTYLIST);
+ jam();
+ }//if
+ }//if
+ ilcPageptr.p = slPageptr.p;
+ increaselistcont(signal);
+}//Dbacc::seizeRightlist()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF INSERT_ELEMENT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* MODULE: GET_ELEMENT */
+/* THE FOLLOWING SUBROUTINES ARE ONLY USED BY GET_ELEMENT AND */
+/* GETDIRINDEX. THIS ROUTINE IS THE SOLE INTERFACE TO GET ELEMENTS */
+/* FROM THE INDEX. CURRENT USERS ARE ALL REQUESTS AND EXECUTE UNDO LOG */
+/* */
+/* THE FOLLOWING SUBROUTINES ARE INCLUDED IN THIS MODULE: */
+/* GET_ELEMENT */
+/* GET_DIRINDEX */
+/* SEARCH_LONG_KEY */
+/* */
+/* THESE ROUTINES ARE ONLY USED BY THIS MODULE AND BY NO ONE ELSE. */
+/* ALSO THE ROUTINES MAKE NO USE OF ROUTINES IN OTHER MODULES. */
+/* THE ONLY SHORT-LIVED VARIABLES USED IN OTHER PARTS OF THE BLOCK ARE */
+/* THOSE DEFINED AS INPUT AND OUTPUT IN GET_ELEMENT AND GETDIRINDEX */
+/* SHORT-LIVED VARIABLES INCLUDE TEMPORARY VARIABLES, COMMON VARIABLES */
+/* AND POINTER VARIABLES. */
+/* THE ONLY EXCEPTION TO THIS RULE IS FRAGRECPTR WHICH POINTS TO THE */
+/* FRAGMENT RECORD. THIS IS MORE LESS STATIC ALWAYS DURING A SIGNAL */
+/* EXECUTION. */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* GETDIRINDEX */
+/* SUPPORT ROUTINE FOR INSERT ELEMENT, GET ELEMENT AND COMMITDELETE */
+/* INPUT:FRAGRECPTR ( POINTER TO THE ACTIVE FRAGMENT REC) */
+/* OPERATION_REC_PTR (POINTER TO THE OPERATION REC). */
+/* */
+/* OUTPUT:GDI_PAGEPTR ( POINTER TO THE PAGE OF THE ELEMENT) */
+/* TGDI_PAGEINDEX ( INDEX OF THE ELEMENT IN THE PAGE). */
+/* */
+/* DESCRIPTION: CHECK THE HASH VALUE OF THE OPERATION REC AND CALCULATE THE */
+/* THE ADDRESS OF THE ELEMENT IN THE HASH TABLE,(GDI_PAGEPTR, */
+/* TGDI_PAGEINDEX) ACCORDING TO LH3. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::getdirindex(Signal* signal)
+{
+ DirRangePtr gdiDirRangePtr;
+ DirectoryarrayPtr gdiDirptr;
+ Uint32 tgdiTmp;
+ Uint32 tgdiAddress;
+
+ tgdiTmp = fragrecptr.p->k + fragrecptr.p->lhfragbits; /* OBS K = 6 */
+ tgdiPageindex = operationRecPtr.p->hashValue & ((1 << fragrecptr.p->k) - 1);
+ tgdiTmp = operationRecPtr.p->hashValue >> tgdiTmp;
+ tgdiTmp = (tgdiTmp << fragrecptr.p->k) | tgdiPageindex;
+ tgdiAddress = tgdiTmp & fragrecptr.p->maxp;
+ gdiDirRangePtr.i = fragrecptr.p->directory;
+ ptrCheckGuard(gdiDirRangePtr, cdirrangesize, dirRange);
+ if (tgdiAddress < fragrecptr.p->p) {
+ jam();
+ tgdiAddress = tgdiTmp & ((fragrecptr.p->maxp << 1) | 1);
+ }//if
+ tgdiTmp = tgdiAddress >> fragrecptr.p->k;
+ arrGuard((tgdiTmp >> 8), 256);
+ gdiDirptr.i = gdiDirRangePtr.p->dirArray[tgdiTmp >> 8];
+ ptrCheckGuard(gdiDirptr, cdirarraysize, directoryarray);
+ gdiPageptr.i = gdiDirptr.p->pagep[tgdiTmp & 0xff]; /* DIRECTORY INDEX OF SEND BUCKET PAGE */
+ ptrCheckGuard(gdiPageptr, cpagesize, page8);
+}//Dbacc::getdirindex()
+
+Uint32
+Dbacc::readTablePk(Uint32 localkey1)
+{
+ Uint32 tableId = fragrecptr.p->myTableId;
+ Uint32 fragId = fragrecptr.p->myfid;
+ Uint32 fragPageId = localkey1 >> MAX_TUPLES_BITS;
+ Uint32 pageIndex = localkey1 & ((1 << MAX_TUPLES_BITS ) - 1);
+#ifdef VM_TRACE
+ memset(ckeys, 0x1f, (fragrecptr.p->keyLength * MAX_XFRM_MULTIPLY) << 2);
+#endif
+ int ret = c_tup->accReadPk(tableId, fragId, fragPageId, pageIndex, ckeys, true);
+ ndbrequire(ret > 0);
+ return ret;
+}
+
+/* --------------------------------------------------------------------------------- */
+/* GET_ELEMENT */
+/* INPUT: */
+/* OPERATION_REC_PTR */
+/* FRAGRECPTR */
+/* OUTPUT: */
+/* TGE_RESULT RESULT SUCCESS = ZTRUE OTHERWISE ZFALSE */
+/* TGE_LOCKED LOCK INFORMATION IF SUCCESSFUL RESULT */
+/* GE_PAGEPTR PAGE POINTER OF FOUND ELEMENT */
+/* TGE_CONTAINERPTR CONTAINER INDEX OF FOUND ELEMENT */
+/* TGE_ELEMENTPTR ELEMENT INDEX OF FOUND ELEMENT */
+/* TGE_FORWARD DIRECTION OF CONTAINER WHERE ELEMENT FOUND */
+/* */
+/* DESCRIPTION: THE SUBROUTIN GOES THROUGH ALL CONTAINERS OF THE ACTIVE */
+/* BUCKET, AND SERCH FOR ELEMENT.THE PRIMARY KEYS WHICH IS SAVED */
+/* IN THE OPERATION REC ARE THE CHECK ITEMS IN THE SEARCHING. */
+/* --------------------------------------------------------------------------------- */
+
+#if __ia64 == 1
+#if __INTEL_COMPILER == 810
+int ndb_acc_ia64_icc810_dummy_var = 0;
+void ndb_acc_ia64_icc810_dummy_func()
+{
+ ndb_acc_ia64_icc810_dummy_var++;
+}
+#endif
+#endif
+
+void Dbacc::getElement(Signal* signal)
+{
+ DirRangePtr geOverflowrangeptr;
+ DirectoryarrayPtr geOverflowDirptr;
+ OperationrecPtr geTmpOperationRecPtr;
+ Uint32 tgeElementHeader;
+ Uint32 tgeElemStep;
+ Uint32 tgeContainerhead;
+ Uint32 tgePageindex;
+ Uint32 tgeActivePageDir;
+ Uint32 tgeNextptrtype;
+ register Uint32 tgeKeyptr;
+ register Uint32 tgeRemLen;
+ register Uint32 TelemLen = fragrecptr.p->elementLength;
+ register Uint32* Tkeydata = (Uint32*)&signal->theData[7];
+
+ getdirindex(signal);
+ tgePageindex = tgdiPageindex;
+ gePageptr = gdiPageptr;
+ tgeResult = ZFALSE;
+ /*
+ * The value seached is
+ * - table key for ACCKEYREQ, stored in TUP
+ * - local key (1 word) for ACC_LOCKREQ and UNDO, stored in ACC
+ */
+ const bool searchLocalKey =
+ operationRecPtr.p->isAccLockReq || operationRecPtr.p->isUndoLogReq;
+
+ ndbrequire(TelemLen == ZELEM_HEAD_SIZE + fragrecptr.p->localkeylen);
+ tgeNextptrtype = ZLEFT;
+ tgeLocked = 0;
+
+ const Uint32 tmp = fragrecptr.p->k + fragrecptr.p->lhfragbits;
+ const Uint32 opHashValuePart = (operationRecPtr.p->hashValue >> tmp) &0xFFFF;
+ do {
+ tgeContainerptr = (tgePageindex << ZSHIFT_PLUS) - (tgePageindex << ZSHIFT_MINUS);
+ if (tgeNextptrtype == ZLEFT) {
+ jam();
+ tgeContainerptr = tgeContainerptr + ZHEAD_SIZE;
+ tgeElementptr = tgeContainerptr + ZCON_HEAD_SIZE;
+ tgeKeyptr = (tgeElementptr + ZELEM_HEAD_SIZE) + fragrecptr.p->localkeylen;
+ tgeElemStep = TelemLen;
+ tgeForward = 1;
+ if (tgeContainerptr >= 2048) { ACCKEY_error(4); return;}
+ tgeRemLen = gePageptr.p->word32[tgeContainerptr] >> 26;
+ if ((tgeContainerptr + tgeRemLen - 1) >= 2048) { ACCKEY_error(5); return;}
+ } else if (tgeNextptrtype == ZRIGHT) {
+ jam();
+ tgeContainerptr = tgeContainerptr + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ tgeElementptr = tgeContainerptr - 1;
+ tgeKeyptr = (tgeElementptr - ZELEM_HEAD_SIZE) - fragrecptr.p->localkeylen;
+ tgeElemStep = 0 - TelemLen;
+ tgeForward = (Uint32)-1;
+ if (tgeContainerptr >= 2048) { ACCKEY_error(4); return;}
+ tgeRemLen = gePageptr.p->word32[tgeContainerptr] >> 26;
+ if ((tgeContainerptr - tgeRemLen) >= 2048) { ACCKEY_error(5); return;}
+ } else {
+ ACCKEY_error(6); return;
+ }//if
+ if (tgeRemLen >= ZCON_HEAD_SIZE + TelemLen) {
+ if (tgeRemLen > ZBUF_SIZE) {
+ ACCKEY_error(7); return;
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ // There is at least one element in this container. Check if it is the element
+ // searched for.
+ /* --------------------------------------------------------------------------------- */
+ do {
+ tgeElementHeader = gePageptr.p->word32[tgeElementptr];
+ tgeRemLen = tgeRemLen - TelemLen;
+ Uint32 hashValuePart;
+ if (ElementHeader::getLocked(tgeElementHeader)) {
+ jam();
+ geTmpOperationRecPtr.i = ElementHeader::getOpPtrI(tgeElementHeader);
+ ptrCheckGuard(geTmpOperationRecPtr, coprecsize, operationrec);
+ hashValuePart = geTmpOperationRecPtr.p->hashvaluePart;
+ } else {
+ jam();
+ hashValuePart = ElementHeader::getHashValuePart(tgeElementHeader);
+ }
+ if (hashValuePart == opHashValuePart) {
+ jam();
+ Uint32 localkey1 = gePageptr.p->word32[tgeElementptr + tgeForward];
+ Uint32 localkey2 = 0;
+ bool found;
+ if (! searchLocalKey) {
+ Uint32 len = readTablePk(localkey1);
+ found = (len == operationRecPtr.p->xfrmtupkeylen) &&
+ (memcmp(Tkeydata, ckeys, len << 2) == 0);
+ } else {
+ jam();
+ found = (localkey1 == Tkeydata[0]);
+ }
+ if (found) {
+ jam();
+ tgeLocked = ElementHeader::getLocked(tgeElementHeader);
+ tgeResult = ZTRUE;
+ operationRecPtr.p->localdata[0] = localkey1;
+ operationRecPtr.p->localdata[1] = localkey2;
+ return;
+ }
+ }
+ if (tgeRemLen <= ZCON_HEAD_SIZE) {
+ break;
+ }
+ tgeElementptr = tgeElementptr + tgeElemStep;
+ } while (true);
+ }//if
+ if (tgeRemLen != ZCON_HEAD_SIZE) {
+ ACCKEY_error(8); return;
+ }//if
+ tgeContainerhead = gePageptr.p->word32[tgeContainerptr];
+ tgeNextptrtype = (tgeContainerhead >> 7) & 0x3;
+ if (tgeNextptrtype == 0) {
+ jam();
+ return; /* NO MORE CONTAINER */
+ }//if
+ tgePageindex = tgeContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */
+ if (tgePageindex > ZEMPTYLIST) {
+ ACCKEY_error(9); return;
+ }//if
+ if (((tgeContainerhead >> 9) & 1) == ZFALSE) {
+ jam();
+ tgeActivePageDir = gePageptr.p->word32[tgeContainerptr + 1]; /* NEXT PAGE ID */
+ geOverflowrangeptr.i = fragrecptr.p->overflowdir;
+ ptrCheckGuard(geOverflowrangeptr, cdirrangesize, dirRange);
+ arrGuard((tgeActivePageDir >> 8), 256);
+ geOverflowDirptr.i = geOverflowrangeptr.p->dirArray[tgeActivePageDir >> 8];
+ ptrCheckGuard(geOverflowDirptr, cdirarraysize, directoryarray);
+ gePageptr.i = geOverflowDirptr.p->pagep[tgeActivePageDir & 0xff];
+ ptrCheckGuard(gePageptr, cpagesize, page8);
+ }//if
+ } while (1);
+ return;
+}//Dbacc::getElement()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF GET_ELEMENT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* MODULE: DELETE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* COMMITDELETE */
+/* INPUT: OPERATION_REC_PTR, PTR TO AN OPERATION RECORD. */
+/* FRAGRECPTR, PTR TO A FRAGMENT RECORD */
+/* */
+/* OUTPUT: */
+/* NONE */
+/* DESCRIPTION: DELETE OPERATIONS WILL BE COMPLETED AT THE COMMIT OF TRANSA- */
+/* CTION. THIS SUBROUTINE SEARCHS FOR ELEMENT AND DELETES IT. IT DOES SO BY */
+/* REPLACING IT WITH THE LAST ELEMENT IN THE BUCKET. IF THE DELETED ELEMENT */
+/* IS ALSO THE LAST ELEMENT THEN IT IS ONLY NECESSARY TO REMOVE THE ELEMENT. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::commitdelete(Signal* signal, bool systemRestart)
+{
+ if (!systemRestart) {
+ jam();
+ signal->theData[0] = fragrecptr.p->myfid;
+ signal->theData[1] = fragrecptr.p->myTableId;
+ signal->theData[2] = operationRecPtr.p->localdata[0];
+ Uint32 localKey = operationRecPtr.p->localdata[0];
+ Uint32 pageId = localKey >> MAX_TUPLES_BITS;
+ Uint32 pageIndex = localKey & ((1 << MAX_TUPLES_BITS) - 1);
+ signal->theData[2] = pageId;
+ signal->theData[3] = pageIndex;
+ EXECUTE_DIRECT(DBTUP, GSN_TUP_DEALLOCREQ, signal, 4);
+ jamEntry();
+ }//if
+ getdirindex(signal);
+ tlastPageindex = tgdiPageindex;
+ lastPageptr.i = gdiPageptr.i;
+ lastPageptr.p = gdiPageptr.p;
+ tlastForward = ZTRUE;
+ tlastContainerptr = (tlastPageindex << ZSHIFT_PLUS) - (tlastPageindex << ZSHIFT_MINUS);
+ tlastContainerptr = tlastContainerptr + ZHEAD_SIZE;
+ arrGuard(tlastContainerptr, 2048);
+ tlastContainerhead = lastPageptr.p->word32[tlastContainerptr];
+ tlastContainerlen = tlastContainerhead >> 26;
+ lastPrevpageptr.i = RNIL;
+ ptrNull(lastPrevpageptr);
+ tlastPrevconptr = 0;
+ getLastAndRemove(signal);
+
+ delPageptr.i = operationRecPtr.p->elementPage;
+ ptrCheckGuard(delPageptr, cpagesize, page8);
+ tdelElementptr = operationRecPtr.p->elementPointer;
+ /* --------------------------------------------------------------------------------- */
+ // Here we have to take extreme care since we do not want locks to end up after the
+ // log execution. Thus it is necessary to put back the element in unlocked shape.
+ // We thus update the element header to ensure we log an unlocked element. We do not
+ // need to restore it later since it is deleted immediately anyway.
+ /* --------------------------------------------------------------------------------- */
+ const Uint32 hv = operationRecPtr.p->hashvaluePart;
+ const Uint32 eh = ElementHeader::setUnlocked(hv, 0);
+ delPageptr.p->word32[tdelElementptr] = eh;
+ if (operationRecPtr.p->elementPage == lastPageptr.i) {
+ if (operationRecPtr.p->elementPointer == tlastElementptr) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE LAST ELEMENT WAS THE ELEMENT TO BE DELETED. WE NEED NOT COPY IT. */
+ /* --------------------------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* THE DELETED ELEMENT IS NOT THE LAST. WE READ THE LAST ELEMENT AND OVERWRITE THE */
+ /* DELETED ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ tdelContainerptr = operationRecPtr.p->elementContainer;
+ tdelForward = operationRecPtr.p->elementIsforward;
+ deleteElement(signal);
+}//Dbacc::commitdelete()
+
+/* --------------------------------------------------------------------------------- */
+/* DELETE_ELEMENT */
+/* INPUT: FRAGRECPTR, POINTER TO A FRAGMENT RECORD */
+/* LAST_PAGEPTR, POINTER TO THE PAGE OF THE LAST ELEMENT */
+/* DEL_PAGEPTR, POINTER TO THE PAGE OF THE DELETED ELEMENT */
+/* TLAST_ELEMENTPTR, ELEMENT POINTER OF THE LAST ELEMENT */
+/* TDEL_ELEMENTPTR, ELEMENT POINTER OF THE DELETED ELEMENT */
+/* TLAST_FORWARD, DIRECTION OF LAST ELEMENT */
+/* TDEL_FORWARD, DIRECTION OF DELETED ELEMENT */
+/* TDEL_CONTAINERPTR, CONTAINER POINTER OF DELETED ELEMENT */
+/* DESCRIPTION: COPY LAST ELEMENT TO DELETED ELEMENT AND UPDATE UNDO LOG AND */
+/* UPDATE ANY ACTIVE OPERATION ON THE MOVED ELEMENT. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::deleteElement(Signal* signal)
+{
+ OperationrecPtr deOperationRecPtr;
+ Uint32 tdeIndex;
+ Uint32 tlastMoveElemptr;
+ Uint32 tdelMoveElemptr;
+ Uint32 guard31;
+
+ if (tlastElementptr >= 2048)
+ goto deleteElement_index_error1;
+ {
+ const Uint32 tdeElemhead = lastPageptr.p->word32[tlastElementptr];
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ datapageptr.p = delPageptr.p;
+ cundoinfolength = fragrecptr.p->elementLength;
+ if (tdelForward == ZTRUE) {
+ jam();
+ cundoElemIndex = tdelElementptr;
+ } else {
+ jam();
+ cundoElemIndex = (tdelElementptr + 1) - fragrecptr.p->elementLength;
+ }//if
+ undoWritingProcess(signal);
+ }//if
+ tlastMoveElemptr = tlastElementptr;
+ tdelMoveElemptr = tdelElementptr;
+ guard31 = fragrecptr.p->elementLength - 1;
+ for (tdeIndex = 0; tdeIndex <= guard31; tdeIndex++) {
+ dbgWord32(delPageptr, tdelMoveElemptr, lastPageptr.p->word32[tlastMoveElemptr]);
+ if ((tlastMoveElemptr >= 2048) ||
+ (tdelMoveElemptr >= 2048))
+ goto deleteElement_index_error2;
+ delPageptr.p->word32[tdelMoveElemptr] = lastPageptr.p->word32[tlastMoveElemptr];
+ tdelMoveElemptr = tdelMoveElemptr + tdelForward;
+ tlastMoveElemptr = tlastMoveElemptr + tlastForward;
+ }//for
+ if (ElementHeader::getLocked(tdeElemhead)) {
+ /* --------------------------------------------------------------------------------- */
+ /* THE LAST ELEMENT IS LOCKED AND IS THUS REFERENCED BY AN OPERATION RECORD. WE NEED */
+ /* TO UPDATE THE OPERATION RECORD WITH THE NEW REFERENCE TO THE ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ deOperationRecPtr.i = ElementHeader::getOpPtrI(tdeElemhead);
+ ptrCheckGuard(deOperationRecPtr, coprecsize, operationrec);
+ if (cundoLogActive == ZFALSE) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE DO NOT BOTHER WITH THIS INFORMATION DURING EXECUTION OF THE UNDO LOG. */
+ /* --------------------------------------------------------------------------------- */
+ deOperationRecPtr.p->elementPage = delPageptr.i;
+ deOperationRecPtr.p->elementContainer = tdelContainerptr;
+ deOperationRecPtr.p->elementPointer = tdelElementptr;
+ deOperationRecPtr.p->elementIsforward = tdelForward;
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ // We need to take extreme care to not install locked records after system restart.
+ // An undo of the delete will reinstall the moved record. We have to ensure that the
+ // lock is removed to ensure that no such thing happen.
+ /* --------------------------------------------------------------------------------- */
+ Uint32 eh = ElementHeader::setUnlocked(deOperationRecPtr.p->hashvaluePart,
+ 0);
+ lastPageptr.p->word32[tlastElementptr] = eh;
+ }//if
+ return;
+ }
+
+ deleteElement_index_error1:
+ arrGuard(tlastElementptr, 2048);
+ return;
+
+ deleteElement_index_error2:
+ arrGuard(tdelMoveElemptr + guard31, 2048);
+ arrGuard(tlastMoveElemptr, 2048);
+ return;
+
+}//Dbacc::deleteElement()
+
+/* --------------------------------------------------------------------------------- */
+/* GET_LAST_AND_REMOVE */
+/* INPUT: */
+/* LAST_PAGEPTR PAGE POINTER OF FIRST CONTAINER IN SEARCH OF LAST*/
+/* TLAST_CONTAINERPTR CONTAINER INDEX OF THE SAME */
+/* TLAST_CONTAINERHEAD CONTAINER HEADER OF THE SAME */
+/* TLAST_PAGEINDEX PAGE INDEX OF THE SAME */
+/* TLAST_FORWARD CONTAINER DIRECTION OF THE SAME */
+/* TLAST_CONTAINERLEN CONTAINER LENGTH OF THE SAME */
+/* LAST_PREVPAGEPTR PAGE POINTER OF PREVIOUS CONTAINER OF THE SAME */
+/* TLAST_PREVCONPTR CONTAINER INDEX OF PREVIOUS CONTAINER OF THE SAME*/
+/* */
+/* OUTPUT: */
+/* ALL VARIABLES FROM INPUT BUT NOW CONTAINING INFO ABOUT LAST */
+/* CONTAINER. */
+/* TLAST_ELEMENTPTR LAST ELEMENT POINTER IN LAST CONTAINER */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::getLastAndRemove(Signal* signal)
+{
+ DirRangePtr glrOverflowrangeptr;
+ DirectoryarrayPtr glrOverflowDirptr;
+ Uint32 tglrHead;
+ Uint32 tglrTmp;
+
+ GLR_LOOP_10:
+ if (((tlastContainerhead >> 7) & 0x3) != 0) {
+ jam();
+ lastPrevpageptr.i = lastPageptr.i;
+ lastPrevpageptr.p = lastPageptr.p;
+ tlastPrevconptr = tlastContainerptr;
+ tlastPageindex = tlastContainerhead & 0x7f;
+ if (((tlastContainerhead >> 9) & 0x1) == ZFALSE) {
+ jam();
+ arrGuard(tlastContainerptr + 1, 2048);
+ tglrTmp = lastPageptr.p->word32[tlastContainerptr + 1];
+ glrOverflowrangeptr.i = fragrecptr.p->overflowdir;
+ ptrCheckGuard(glrOverflowrangeptr, cdirrangesize, dirRange);
+ arrGuard((tglrTmp >> 8), 256);
+ glrOverflowDirptr.i = glrOverflowrangeptr.p->dirArray[tglrTmp >> 8];
+ ptrCheckGuard(glrOverflowDirptr, cdirarraysize, directoryarray);
+ lastPageptr.i = glrOverflowDirptr.p->pagep[tglrTmp & 0xff];
+ ptrCheckGuard(lastPageptr, cpagesize, page8);
+ }//if
+ tlastContainerptr = (tlastPageindex << ZSHIFT_PLUS) - (tlastPageindex << ZSHIFT_MINUS);
+ if (((tlastContainerhead >> 7) & 3) == ZLEFT) {
+ jam();
+ tlastForward = ZTRUE;
+ tlastContainerptr = tlastContainerptr + ZHEAD_SIZE;
+ } else if (((tlastContainerhead >> 7) & 3) == ZRIGHT) {
+ jam();
+ tlastForward = cminusOne;
+ tlastContainerptr = ((tlastContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE;
+ } else {
+ ndbrequire(false);
+ return;
+ }//if
+ arrGuard(tlastContainerptr, 2048);
+ tlastContainerhead = lastPageptr.p->word32[tlastContainerptr];
+ tlastContainerlen = tlastContainerhead >> 26;
+ ndbrequire(tlastContainerlen >= ((Uint32)ZCON_HEAD_SIZE + fragrecptr.p->elementLength));
+ goto GLR_LOOP_10;
+ }//if
+ tlastContainerlen = tlastContainerlen - fragrecptr.p->elementLength;
+ if (tlastForward == ZTRUE) {
+ jam();
+ tlastElementptr = tlastContainerptr + tlastContainerlen;
+ } else {
+ jam();
+ tlastElementptr = (tlastContainerptr + (ZCON_HEAD_SIZE - 1)) - tlastContainerlen;
+ }//if
+ rlPageptr.i = lastPageptr.i;
+ rlPageptr.p = lastPageptr.p;
+ trlPageindex = tlastPageindex;
+ if (((tlastContainerhead >> 10) & 1) == 1) {
+ /* --------------------------------------------------------------------------------- */
+ /* WE HAVE OWNERSHIP OF BOTH PARTS OF THE CONTAINER ENDS. */
+ /* --------------------------------------------------------------------------------- */
+ if (tlastContainerlen < ZDOWN_LIMIT) {
+ /* --------------------------------------------------------------------------------- */
+ /* WE HAVE DECREASED THE SIZE BELOW THE DOWN LIMIT, WE MUST GIVE UP THE OTHER */
+ /* SIDE OF THE BUFFER. */
+ /* --------------------------------------------------------------------------------- */
+ tlastContainerhead = tlastContainerhead ^ (1 << 10);
+ trlRelCon = ZFALSE;
+ if (tlastForward == ZTRUE) {
+ jam();
+ turlIndex = tlastContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ releaseRightlist(signal);
+ } else {
+ jam();
+ tullIndex = tlastContainerptr - (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ releaseLeftlist(signal);
+ }//if
+ }//if
+ }//if
+ if (tlastContainerlen <= 2) {
+ ndbrequire(tlastContainerlen == 2);
+ if (lastPrevpageptr.i != RNIL) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE LAST CONTAINER IS EMPTY AND IS NOT THE FIRST CONTAINER WHICH IS NOT REMOVED. */
+ /* DELETE THE LAST CONTAINER AND UPDATE THE PREVIOUS CONTAINER. ALSO PUT THIS */
+ /* CONTAINER IN FREE CONTAINER LIST OF THE PAGE. */
+ /* --------------------------------------------------------------------------------- */
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = lastPrevpageptr.p;
+ cundoElemIndex = tlastPrevconptr;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ ndbrequire(tlastPrevconptr < 2048);
+ tglrTmp = lastPrevpageptr.p->word32[tlastPrevconptr] >> 9;
+ dbgWord32(lastPrevpageptr, tlastPrevconptr, tglrTmp << 9);
+ lastPrevpageptr.p->word32[tlastPrevconptr] = tglrTmp << 9;
+ trlRelCon = ZTRUE;
+ if (tlastForward == ZTRUE) {
+ jam();
+ tullIndex = tlastContainerptr;
+ releaseLeftlist(signal);
+ } else {
+ jam();
+ turlIndex = tlastContainerptr;
+ releaseRightlist(signal);
+ }//if
+ return;
+ }//if
+ }//if
+ tglrHead = tlastContainerhead << 6;
+ tglrHead = tglrHead >> 6;
+ tglrHead = tglrHead | (tlastContainerlen << 26);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = lastPageptr.p;
+ cundoElemIndex = tlastContainerptr;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(lastPageptr, tlastContainerptr, tglrHead);
+ arrGuard(tlastContainerptr, 2048);
+ lastPageptr.p->word32[tlastContainerptr] = tglrHead;
+}//Dbacc::getLastAndRemove()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_LEFTLIST */
+/* INPUT: */
+/* RL_PAGEPTR PAGE POINTER OF CONTAINER TO BE RELEASED */
+/* TRL_PAGEINDEX PAGE INDEX OF CONTAINER TO BE RELEASED */
+/* TURL_INDEX INDEX OF CONTAINER TO BE RELEASED */
+/* TRL_REL_CON TRUE IF CONTAINER RELEASED OTHERWISE ONLY */
+/* A PART IS RELEASED. */
+/* */
+/* OUTPUT: */
+/* NONE */
+/* */
+/* THE FREE LIST OF LEFT FREE BUFFER IN THE PAGE WILL BE UPDATE */
+/* TULL_INDEX IS INDEX TO THE FIRST WORD IN THE LEFT SIDE OF THE BUFFER */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseLeftlist(Signal* signal)
+{
+ Uint32 tullTmp;
+ Uint32 tullTmp1;
+
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = rlPageptr.p;
+ cundoElemIndex = tullIndex;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = ZPOS_EMPTY_LIST;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* IF A CONTAINER IS RELEASED AND NOT ONLY A PART THEN WE HAVE TO REMOVE IT */
+ /* FROM THE LIST OF USED CONTAINERS IN THE PAGE. THIS IN ORDER TO ENSURE THAT */
+ /* WE CAN FIND ALL LOCKED ELEMENTS DURING LOCAL CHECKPOINT. */
+ /* --------------------------------------------------------------------------------- */
+ if (trlRelCon == ZTRUE) {
+ arrGuard(tullIndex, 2048);
+ trlHead = rlPageptr.p->word32[tullIndex];
+ trlNextused = (trlHead >> 11) & 0x7f;
+ trlPrevused = (trlHead >> 18) & 0x7f;
+ if (trlNextused < ZEMPTYLIST) {
+ jam();
+ tullTmp1 = (trlNextused << ZSHIFT_PLUS) - (trlNextused << ZSHIFT_MINUS);
+ tullTmp1 = tullTmp1 + ZHEAD_SIZE;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = tullTmp1;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ tullTmp = rlPageptr.p->word32[tullTmp1] & 0xfe03ffff;
+ dbgWord32(rlPageptr, tullTmp1, tullTmp | (trlPrevused << 18));
+ rlPageptr.p->word32[tullTmp1] = tullTmp | (trlPrevused << 18);
+ } else {
+ ndbrequire(trlNextused == ZEMPTYLIST);
+ jam();
+ }//if
+ if (trlPrevused < ZEMPTYLIST) {
+ jam();
+ tullTmp1 = (trlPrevused << ZSHIFT_PLUS) - (trlPrevused << ZSHIFT_MINUS);
+ tullTmp1 = tullTmp1 + ZHEAD_SIZE;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = tullTmp1;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ tullTmp = rlPageptr.p->word32[tullTmp1] & 0xfffc07ff;
+ dbgWord32(rlPageptr, tullTmp1, tullTmp | (trlNextused << 11));
+ rlPageptr.p->word32[tullTmp1] = tullTmp | (trlNextused << 11);
+ } else {
+ ndbrequire(trlPrevused == ZEMPTYLIST);
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE FIRST IN THE LIST AND THUS WE NEED TO UPDATE THE FIRST POINTER. */
+ /* --------------------------------------------------------------------------------- */
+ tullTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xc07fffff;
+ dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, tullTmp | (trlNextused << 23));
+ rlPageptr.p->word32[ZPOS_EMPTY_LIST] = tullTmp | (trlNextused << 23);
+ }//if
+ }//if
+ dbgWord32(rlPageptr, tullIndex + 1, ZEMPTYLIST);
+ arrGuard(tullIndex + 1, 2048);
+ rlPageptr.p->word32[tullIndex + 1] = ZEMPTYLIST;
+ tullTmp1 = (rlPageptr.p->word32[ZPOS_EMPTY_LIST] >> 7) & 0x7f;
+ dbgWord32(rlPageptr, tullIndex, tullTmp1);
+ arrGuard(tullIndex, 2048);
+ rlPageptr.p->word32[tullIndex] = tullTmp1;
+ if (tullTmp1 < ZEMPTYLIST) {
+ jam();
+ tullTmp1 = (tullTmp1 << ZSHIFT_PLUS) - (tullTmp1 << ZSHIFT_MINUS);
+ tullTmp1 = (tullTmp1 + ZHEAD_SIZE) + 1;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = tullTmp1;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(rlPageptr, tullTmp1, trlPageindex);
+ rlPageptr.p->word32[tullTmp1] = trlPageindex; /* UPDATES PREV POINTER IN THE NEXT FREE */
+ } else {
+ ndbrequire(tullTmp1 == ZEMPTYLIST);
+ }//if
+ tullTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST];
+ tullTmp = (((tullTmp >> 14) << 14) | (trlPageindex << 7)) | (tullTmp & 0x7f);
+ dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, tullTmp);
+ rlPageptr.p->word32[ZPOS_EMPTY_LIST] = tullTmp;
+ dbgWord32(rlPageptr, ZPOS_ALLOC_CONTAINERS, rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1);
+ rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1;
+ ndbrequire(rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] <= ZNIL);
+ if (((rlPageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3) == 1) {
+ jam();
+ colPageptr.i = rlPageptr.i;
+ colPageptr.p = rlPageptr.p;
+ ptrCheck(colPageptr, cpagesize, page8);
+ checkoverfreelist(signal);
+ }//if
+}//Dbacc::releaseLeftlist()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_RIGHTLIST */
+/* INPUT: */
+/* RL_PAGEPTR PAGE POINTER OF CONTAINER TO BE RELEASED */
+/* TRL_PAGEINDEX PAGE INDEX OF CONTAINER TO BE RELEASED */
+/* TURL_INDEX INDEX OF CONTAINER TO BE RELEASED */
+/* TRL_REL_CON TRUE IF CONTAINER RELEASED OTHERWISE ONLY */
+/* A PART IS RELEASED. */
+/* */
+/* OUTPUT: */
+/* NONE */
+/* */
+/* THE FREE LIST OF RIGHT FREE BUFFER IN THE PAGE WILL BE UPDATE. */
+/* TURL_INDEX IS INDEX TO THE FIRST WORD IN THE RIGHT SIDE OF */
+/* THE BUFFER, WHICH IS THE LAST WORD IN THE BUFFER. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseRightlist(Signal* signal)
+{
+ Uint32 turlTmp1;
+ Uint32 turlTmp;
+
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = rlPageptr.p;
+ cundoElemIndex = turlIndex;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = ZPOS_EMPTY_LIST;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* IF A CONTAINER IS RELEASED AND NOT ONLY A PART THEN WE HAVE TO REMOVE IT */
+ /* FROM THE LIST OF USED CONTAINERS IN THE PAGE. THIS IN ORDER TO ENSURE THAT */
+ /* WE CAN FIND ALL LOCKED ELEMENTS DURING LOCAL CHECKPOINT. */
+ /* --------------------------------------------------------------------------------- */
+ if (trlRelCon == ZTRUE) {
+ jam();
+ arrGuard(turlIndex, 2048);
+ trlHead = rlPageptr.p->word32[turlIndex];
+ trlNextused = (trlHead >> 11) & 0x7f;
+ trlPrevused = (trlHead >> 18) & 0x7f;
+ if (trlNextused < ZEMPTYLIST) {
+ jam();
+ turlTmp1 = (trlNextused << ZSHIFT_PLUS) - (trlNextused << ZSHIFT_MINUS);
+ turlTmp1 = turlTmp1 + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = turlTmp1;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ turlTmp = rlPageptr.p->word32[turlTmp1] & 0xfe03ffff;
+ dbgWord32(rlPageptr, turlTmp1, turlTmp | (trlPrevused << 18));
+ rlPageptr.p->word32[turlTmp1] = turlTmp | (trlPrevused << 18);
+ } else {
+ ndbrequire(trlNextused == ZEMPTYLIST);
+ jam();
+ }//if
+ if (trlPrevused < ZEMPTYLIST) {
+ jam();
+ turlTmp1 = (trlPrevused << ZSHIFT_PLUS) - (trlPrevused << ZSHIFT_MINUS);
+ turlTmp1 = turlTmp1 + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = turlTmp1;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ turlTmp = rlPageptr.p->word32[turlTmp1] & 0xfffc07ff;
+ dbgWord32(rlPageptr, turlTmp1, turlTmp | (trlNextused << 11));
+ rlPageptr.p->word32[turlTmp1] = turlTmp | (trlNextused << 11);
+ } else {
+ ndbrequire(trlPrevused == ZEMPTYLIST);
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE FIRST IN THE LIST AND THUS WE NEED TO UPDATE THE FIRST POINTER */
+ /* OF THE RIGHT CONTAINER LIST. */
+ /* --------------------------------------------------------------------------------- */
+ turlTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xff80ffff;
+ dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, turlTmp | (trlNextused << 16));
+ rlPageptr.p->word32[ZPOS_EMPTY_LIST] = turlTmp | (trlNextused << 16);
+ }//if
+ }//if
+ dbgWord32(rlPageptr, turlIndex + 1, ZEMPTYLIST);
+ arrGuard(turlIndex + 1, 2048);
+ rlPageptr.p->word32[turlIndex + 1] = ZEMPTYLIST;
+ turlTmp1 = rlPageptr.p->word32[ZPOS_EMPTY_LIST] & 0x7f;
+ dbgWord32(rlPageptr, turlIndex, turlTmp1);
+ arrGuard(turlIndex, 2048);
+ rlPageptr.p->word32[turlIndex] = turlTmp1;
+ if (turlTmp1 < ZEMPTYLIST) {
+ jam();
+ turlTmp = (turlTmp1 << ZSHIFT_PLUS) - (turlTmp1 << ZSHIFT_MINUS);
+ turlTmp = turlTmp + ((ZHEAD_SIZE + ZBUF_SIZE) - (ZCON_HEAD_SIZE - 1));
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = turlTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(rlPageptr, turlTmp, trlPageindex);
+ rlPageptr.p->word32[turlTmp] = trlPageindex; /* UPDATES PREV POINTER IN THE NEXT FREE */
+ } else {
+ ndbrequire(turlTmp1 == ZEMPTYLIST);
+ }//if
+ turlTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST];
+ dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, ((turlTmp >> 7) << 7) | trlPageindex);
+ rlPageptr.p->word32[ZPOS_EMPTY_LIST] = ((turlTmp >> 7) << 7) | trlPageindex;
+ dbgWord32(rlPageptr, ZPOS_ALLOC_CONTAINERS, rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1);
+ rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1;
+ ndbrequire(rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] <= ZNIL);
+ if (((rlPageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3) == 1) {
+ jam();
+ colPageptr.i = rlPageptr.i;
+ colPageptr.p = rlPageptr.p;
+ checkoverfreelist(signal);
+ }//if
+}//Dbacc::releaseRightlist()
+
+/* --------------------------------------------------------------------------------- */
+/* CHECKOVERFREELIST */
+/* INPUT: COL_PAGEPTR, POINTER OF AN OVERFLOW PAGE RECORD. */
+/* DESCRIPTION: CHECKS IF THE PAGE HAVE TO PUT IN FREE LIST OF OVER FLOW */
+/* PAGES. WHEN IT HAVE TO, AN OVERFLOW REC PTR WILL BE ALLOCATED */
+/* TO KEEP NFORMATION ABOUT THE PAGE. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::checkoverfreelist(Signal* signal)
+{
+ Uint32 tcolTmp;
+
+ if (fragrecptr.p->loadingFlag == ZFALSE) {
+ tcolTmp = colPageptr.p->word32[ZPOS_ALLOC_CONTAINERS];
+ if (tcolTmp <= ZFREE_LIMIT) {
+ if (tcolTmp == 0) {
+ jam();
+ ropPageptr = colPageptr;
+ releaseOverpage(signal);
+ } else {
+ jam();
+ if (colPageptr.p->word32[ZPOS_OVERFLOWREC] == RNIL) {
+ ndbrequire(cfirstfreeoverrec != RNIL);
+ jam();
+ seizeOverRec(signal);
+ sorOverflowRecPtr.p->dirindex = colPageptr.p->word32[ZPOS_PAGE_ID];
+ sorOverflowRecPtr.p->overpage = colPageptr.i;
+ dbgWord32(colPageptr, ZPOS_OVERFLOWREC, sorOverflowRecPtr.i);
+ colPageptr.p->word32[ZPOS_OVERFLOWREC] = sorOverflowRecPtr.i;
+ porOverflowRecPtr = sorOverflowRecPtr;
+ putOverflowRecInFrag(signal);
+ }//if
+ }//if
+ }//if
+ }//if
+}//Dbacc::checkoverfreelist()
+
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* */
+/* END OF DELETE MODULE */
+/* */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* */
+/* COMMIT AND ABORT MODULE */
+/* */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ABORT_OPERATION */
+/*DESCRIPTION: AN OPERATION RECORD CAN BE IN A LOCK QUEUE OF AN ELEMENT OR */
+/*OWNS THE LOCK. BY THIS SUBROUTINE THE LOCK STATE OF THE OPERATION WILL */
+/*BE CHECKED. THE OPERATION RECORD WILL BE REMOVED FROM THE QUEUE IF IT */
+/*BELONGED TO ANY ONE, OTHERWISE THE ELEMENT HEAD WILL BE UPDATED. */
+/* ------------------------------------------------------------------------- */
+void Dbacc::abortOperation(Signal* signal)
+{
+ OperationrecPtr aboOperRecPtr;
+ OperationrecPtr TaboOperRecPtr;
+ Page8Ptr aboPageidptr;
+ Uint32 taboElementptr;
+ Uint32 tmp2Olq;
+
+ if (operationRecPtr.p->lockOwner == ZTRUE) {
+ takeOutLockOwnersList(signal, operationRecPtr);
+ if (operationRecPtr.p->insertIsDone == ZTRUE) {
+ jam();
+ operationRecPtr.p->elementIsDisappeared = ZTRUE;
+ }//if
+ if ((operationRecPtr.p->nextParallelQue != RNIL) ||
+ (operationRecPtr.p->nextSerialQue != RNIL)) {
+ jam();
+ releaselock(signal);
+ } else {
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE OWNER OF THE LOCK AND NO OTHER OPERATIONS ARE QUEUED. IF INSERT OR STANDBY */
+ /* WE DELETE THE ELEMENT OTHERWISE WE REMOVE THE LOCK FROM THE ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ if (operationRecPtr.p->elementIsDisappeared == ZFALSE) {
+ jam();
+ taboElementptr = operationRecPtr.p->elementPointer;
+ aboPageidptr.i = operationRecPtr.p->elementPage;
+ tmp2Olq = ElementHeader::setUnlocked(operationRecPtr.p->hashvaluePart,
+ operationRecPtr.p->scanBits);
+ ptrCheckGuard(aboPageidptr, cpagesize, page8);
+ dbgWord32(aboPageidptr, taboElementptr, tmp2Olq);
+ arrGuard(taboElementptr, 2048);
+ aboPageidptr.p->word32[taboElementptr] = tmp2Olq;
+ return;
+ } else {
+ jam();
+ commitdelete(signal, false);
+ }//if
+ }//if
+ } else {
+ /* --------------------------------------------------------------- */
+ // We are not the lock owner.
+ /* --------------------------------------------------------------- */
+ jam();
+ takeOutFragWaitQue(signal);
+ if (operationRecPtr.p->prevParallelQue != RNIL) {
+ jam();
+ /* ---------------------------------------------------------------------------------- */
+ /* SINCE WE ARE NOT QUEUE LEADER WE NEED NOT CONSIDER IF THE ELEMENT IS TO BE DELETED.*/
+ /* We will simply remove it from the parallel list without any other rearrangements. */
+ /* ---------------------------------------------------------------------------------- */
+ aboOperRecPtr.i = operationRecPtr.p->prevParallelQue;
+ ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
+ aboOperRecPtr.p->nextParallelQue = operationRecPtr.p->nextParallelQue;
+ if (operationRecPtr.p->nextParallelQue != RNIL) {
+ jam();
+ aboOperRecPtr.i = operationRecPtr.p->nextParallelQue;
+ ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
+ aboOperRecPtr.p->prevParallelQue = operationRecPtr.p->prevParallelQue;
+ }//if
+ } else if (operationRecPtr.p->prevSerialQue != RNIL) {
+ /* ------------------------------------------------------------------------- */
+ // We are not in the parallel queue owning the lock. Thus we are in another parallel
+ // queue longer down in the serial queue. We are however first since prevParallelQue
+ // == RNIL.
+ /* ------------------------------------------------------------------------- */
+ if (operationRecPtr.p->nextParallelQue != RNIL) {
+ jam();
+ /* ------------------------------------------------------------------------- */
+ // We have an operation in the queue after us. We simply rearrange this parallel queue.
+ // The new leader of this parallel queue will be operation in the serial queue.
+ /* ------------------------------------------------------------------------- */
+ aboOperRecPtr.i = operationRecPtr.p->nextParallelQue;
+ ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
+ aboOperRecPtr.p->nextSerialQue = operationRecPtr.p->nextSerialQue;
+ aboOperRecPtr.p->prevSerialQue = operationRecPtr.p->prevSerialQue;
+ aboOperRecPtr.p->prevParallelQue = RNIL; // Queue Leader
+ if (operationRecPtr.p->nextSerialQue != RNIL) {
+ jam();
+ TaboOperRecPtr.i = operationRecPtr.p->nextSerialQue;
+ ptrCheckGuard(TaboOperRecPtr, coprecsize, operationrec);
+ TaboOperRecPtr.p->prevSerialQue = aboOperRecPtr.i;
+ }//if
+ TaboOperRecPtr.i = operationRecPtr.p->prevSerialQue;
+ ptrCheckGuard(TaboOperRecPtr, coprecsize, operationrec);
+ TaboOperRecPtr.p->nextSerialQue = aboOperRecPtr.i;
+ } else {
+ jam();
+ /* ------------------------------------------------------------------------- */
+ // We are the only operation in this parallel queue. We will thus shrink the serial
+ // queue.
+ /* ------------------------------------------------------------------------- */
+ aboOperRecPtr.i = operationRecPtr.p->prevSerialQue;
+ ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
+ aboOperRecPtr.p->nextSerialQue = operationRecPtr.p->nextSerialQue;
+ if (operationRecPtr.p->nextSerialQue != RNIL) {
+ jam();
+ aboOperRecPtr.i = operationRecPtr.p->nextSerialQue;
+ ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
+ aboOperRecPtr.p->prevSerialQue = operationRecPtr.p->prevSerialQue;
+ }//if
+ }//if
+ }//if
+ }//if
+ /* ------------------------------------------------------------------------- */
+ // If prevParallelQue = RNIL and prevSerialQue = RNIL and we are not owner of the
+ // lock then we cannot be in any lock queue at all.
+ /* ------------------------------------------------------------------------- */
+}//Dbacc::abortOperation()
+
+void Dbacc::commitDeleteCheck()
+{
+ OperationrecPtr opPtr;
+ OperationrecPtr lastOpPtr;
+ OperationrecPtr deleteOpPtr;
+ bool elementDeleted = false;
+ bool deleteCheckOngoing = true;
+ Uint32 hashValue = 0;
+ lastOpPtr = operationRecPtr;
+ opPtr.i = operationRecPtr.p->nextParallelQue;
+ while (opPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(opPtr, coprecsize, operationrec);
+ lastOpPtr = opPtr;
+ opPtr.i = opPtr.p->nextParallelQue;
+ }//while
+ deleteOpPtr = lastOpPtr;
+ do {
+ if (deleteOpPtr.p->operation == ZDELETE) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* IF THE CURRENT OPERATION TO BE COMMITTED IS A DELETE OPERATION DUE TO A */
+ /* SCAN-TAKEOVER THE ACTUAL DELETE WILL BE PERFORMED BY THE PREVIOUS OPERATION (SCAN)*/
+ /* IN THE PARALLEL QUEUE WHICH OWNS THE LOCK.THE PROBLEM IS THAT THE SCAN OPERATION */
+ /* DOES NOT HAVE A HASH VALUE ASSIGNED TO IT SO WE COPY IT FROM THIS OPERATION. */
+ /* */
+ /* WE ASSUME THAT THIS SOLUTION WILL WORK BECAUSE THE ONLY WAY A SCAN CAN PERFORM */
+ /* A DELETE IS BY BEING FOLLOWED BY A NORMAL DELETE-OPERATION THAT HAS A HASH VALUE. */
+ /* --------------------------------------------------------------------------------- */
+ hashValue = deleteOpPtr.p->hashValue;
+ elementDeleted = true;
+ deleteCheckOngoing = false;
+ } else if ((deleteOpPtr.p->operation == ZREAD) ||
+ (deleteOpPtr.p->operation == ZSCAN_OP)) {
+ /* --------------------------------------------------------------------------------- */
+ /* We are trying to find out whether the commit will in the end delete the tuple. */
+ /* Normally the delete will be the last operation in the list of operations on this */
+ /* It is however possible to issue reads and scans in the same savepoint as the */
+ /* delete operation was issued and these can end up after the delete in the list of */
+ /* operations in the parallel queue. Thus if we discover a read or a scan we have to */
+ /* continue scanning the list looking for a delete operation. */
+ /* --------------------------------------------------------------------------------- */
+ deleteOpPtr.i = deleteOpPtr.p->prevParallelQue;
+ if (deleteOpPtr.i == RNIL) {
+ jam();
+ deleteCheckOngoing = false;
+ } else {
+ jam();
+ ptrCheckGuard(deleteOpPtr, coprecsize, operationrec);
+ }//if
+ } else {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* Finding an UPDATE or INSERT before finding a DELETE means we cannot be deleting */
+ /* as the end result of this transaction. */
+ /* --------------------------------------------------------------------------------- */
+ deleteCheckOngoing = false;
+ }//if
+ } while (deleteCheckOngoing);
+ opPtr = lastOpPtr;
+ do {
+ jam();
+ opPtr.p->commitDeleteCheckFlag = ZTRUE;
+ if (elementDeleted) {
+ jam();
+ opPtr.p->elementIsDisappeared = ZTRUE;
+ opPtr.p->hashValue = hashValue;
+ }//if
+ opPtr.i = opPtr.p->prevParallelQue;
+ if (opPtr.i == RNIL) {
+ jam();
+ break;
+ }//if
+ ptrCheckGuard(opPtr, coprecsize, operationrec);
+ } while (true);
+}//Dbacc::commitDeleteCheck()
+
+/* ------------------------------------------------------------------------- */
+/* COMMIT_OPERATION */
+/* INPUT: OPERATION_REC_PTR, POINTER TO AN OPERATION RECORD */
+/* DESCRIPTION: THE OPERATION RECORD WILL BE TAKE OUT OF ANY LOCK QUEUE. */
+/* IF IT OWNS THE ELEMENT LOCK. HEAD OF THE ELEMENT WILL BE UPDATED. */
+/* ------------------------------------------------------------------------- */
+void Dbacc::commitOperation(Signal* signal)
+{
+ OperationrecPtr tolqTmpPtr;
+ Page8Ptr coPageidptr;
+ Uint32 tcoElementptr;
+ Uint32 tmp2Olq;
+
+ if ((operationRecPtr.p->commitDeleteCheckFlag == ZFALSE) &&
+ (operationRecPtr.p->operation != ZSCAN_OP) &&
+ (operationRecPtr.p->operation != ZREAD)) {
+ jam();
+ /* This method is used to check whether the end result of the transaction
+ will be to delete the tuple. In this case all operation will be marked
+ with elementIsDisappeared = true to ensure that the last operation
+ committed will remove the tuple. We only run this once per transaction
+ (commitDeleteCheckFlag = true if performed earlier) and we don't
+ execute this code when committing a scan operation since committing
+ a scan operation only means that the scan is continuing and the scan
+ lock is released.
+ */
+ commitDeleteCheck();
+ }//if
+ if (operationRecPtr.p->lockOwner == ZTRUE) {
+ takeOutLockOwnersList(signal, operationRecPtr);
+ if ((operationRecPtr.p->nextParallelQue == RNIL) &&
+ (operationRecPtr.p->nextSerialQue == RNIL) &&
+ (operationRecPtr.p->elementIsDisappeared == ZFALSE)) {
+ /*
+ This is the normal path through the commit for operations owning the
+ lock without any queues and not a delete operation.
+ */
+ coPageidptr.i = operationRecPtr.p->elementPage;
+ tcoElementptr = operationRecPtr.p->elementPointer;
+ tmp2Olq = ElementHeader::setUnlocked(operationRecPtr.p->hashvaluePart,
+ operationRecPtr.p->scanBits);
+ ptrCheckGuard(coPageidptr, cpagesize, page8);
+ dbgWord32(coPageidptr, tcoElementptr, tmp2Olq);
+ arrGuard(tcoElementptr, 2048);
+ coPageidptr.p->word32[tcoElementptr] = tmp2Olq;
+ return;
+ } else if ((operationRecPtr.p->nextParallelQue != RNIL) ||
+ (operationRecPtr.p->nextSerialQue != RNIL)) {
+ jam();
+ /*
+ The case when there is a queue lined up.
+ Release the lock and pass it to the next operation lined up.
+ */
+ releaselock(signal);
+ return;
+ } else {
+ jam();
+ /*
+ No queue and elementIsDisappeared is true. We perform the actual delete
+ operation.
+ */
+ commitdelete(signal, false);
+ return;
+ }//if
+ } else {
+ /*
+ THE OPERATION DOES NOT OWN THE LOCK. IT MUST BE IN A LOCK QUEUE OF THE
+ ELEMENT.
+ */
+ ndbrequire(operationRecPtr.p->prevParallelQue != RNIL);
+ jam();
+ tolqTmpPtr.i = operationRecPtr.p->prevParallelQue;
+ ptrCheckGuard(tolqTmpPtr, coprecsize, operationrec);
+ tolqTmpPtr.p->nextParallelQue = operationRecPtr.p->nextParallelQue;
+ if (operationRecPtr.p->nextParallelQue != RNIL) {
+ jam();
+ tolqTmpPtr.i = operationRecPtr.p->nextParallelQue;
+ ptrCheckGuard(tolqTmpPtr, coprecsize, operationrec);
+ tolqTmpPtr.p->prevParallelQue = operationRecPtr.p->prevParallelQue;
+ }//if
+
+ /**
+ * Check possible lock upgrade
+ * 1) Find lock owner
+ * 2) Count transactions in parallel que
+ * 3) If count == 1 and TRANSID(next serial) == TRANSID(lock owner)
+ * upgrade next serial
+ */
+ if(operationRecPtr.p->lockMode)
+ {
+ jam();
+ /**
+ * Committing a non shared operation can't lead to lock upgrade
+ */
+ return;
+ }
+
+ OperationrecPtr lock_owner;
+ lock_owner.i = operationRecPtr.p->prevParallelQue;
+ ptrCheckGuard(lock_owner, coprecsize, operationrec);
+ Uint32 transid[2] = { lock_owner.p->transId1,
+ lock_owner.p->transId2 };
+
+
+ while(lock_owner.p->prevParallelQue != RNIL)
+ {
+ lock_owner.i = lock_owner.p->prevParallelQue;
+ ptrCheckGuard(lock_owner, coprecsize, operationrec);
+
+ if(lock_owner.p->transId1 != transid[0] ||
+ lock_owner.p->transId2 != transid[1])
+ {
+ jam();
+ /**
+ * If more than 1 trans in lock queue -> no lock upgrade
+ */
+ return;
+ }
+ }
+
+ check_lock_upgrade(signal, lock_owner, operationRecPtr);
+ }
+}//Dbacc::commitOperation()
+
+void
+Dbacc::check_lock_upgrade(Signal* signal,
+ OperationrecPtr lock_owner,
+ OperationrecPtr release_op)
+{
+ if((lock_owner.p->transId1 == release_op.p->transId1 &&
+ lock_owner.p->transId2 == release_op.p->transId2) ||
+ release_op.p->lockMode ||
+ lock_owner.p->nextSerialQue == RNIL)
+ {
+ jam();
+ /**
+ * No lock upgrade if same trans or lock owner has no serial queue
+ * or releasing non shared op
+ */
+ return;
+ }
+
+ OperationrecPtr next;
+ next.i = lock_owner.p->nextSerialQue;
+ ptrCheckGuard(next, coprecsize, operationrec);
+
+ if(lock_owner.p->transId1 != next.p->transId1 ||
+ lock_owner.p->transId2 != next.p->transId2)
+ {
+ jam();
+ /**
+ * No lock upgrad if !same trans in serial queue
+ */
+ return;
+ }
+
+ if (getNoParallelTransaction(lock_owner.p) > 1)
+ {
+ jam();
+ /**
+ * No lock upgrade if more than 1 transaction in parallell queue
+ */
+ return;
+ }
+
+ if (getNoParallelTransaction(next.p) > 1)
+ {
+ jam();
+ /**
+ * No lock upgrade if more than 1 transaction in next's parallell queue
+ */
+ return;
+ }
+
+ OperationrecPtr tmp;
+ tmp.i = lock_owner.p->nextSerialQue = next.p->nextSerialQue;
+ if(tmp.i != RNIL)
+ {
+ ptrCheckGuard(tmp, coprecsize, operationrec);
+ ndbassert(tmp.p->prevSerialQue == next.i);
+ tmp.p->prevSerialQue = lock_owner.i;
+ }
+ next.p->nextSerialQue = next.p->prevSerialQue = RNIL;
+
+ // Find end of parallell que
+ tmp = lock_owner;
+ Uint32 lockMode = next.p->lockMode > lock_owner.p->lockMode ?
+ next.p->lockMode : lock_owner.p->lockMode;
+ while(tmp.p->nextParallelQue != RNIL)
+ {
+ jam();
+ tmp.i = tmp.p->nextParallelQue;
+ tmp.p->lockMode = lockMode;
+ ptrCheckGuard(tmp, coprecsize, operationrec);
+ }
+ tmp.p->lockMode = lockMode;
+
+ next.p->prevParallelQue = tmp.i;
+ tmp.p->nextParallelQue = next.i;
+
+ OperationrecPtr save = operationRecPtr;
+
+ Uint32 localdata[2];
+ localdata[0] = lock_owner.p->localdata[0];
+ localdata[1] = lock_owner.p->localdata[1];
+ do {
+ next.p->localdata[0] = localdata[0];
+ next.p->localdata[1] = localdata[1];
+ next.p->lockMode = lockMode;
+
+ operationRecPtr = next;
+ executeNextOperation(signal);
+ if (next.p->nextParallelQue != RNIL)
+ {
+ jam();
+ next.i = next.p->nextParallelQue;
+ ptrCheckGuard(next, coprecsize, operationrec);
+ } else {
+ jam();
+ break;
+ }//if
+ } while (1);
+
+ operationRecPtr = save;
+
+}
+
+/* ------------------------------------------------------------------------- */
+/* RELEASELOCK */
+/* RESETS LOCK OF AN ELEMENT. */
+/* INFORMATION ABOUT THE ELEMENT IS SAVED IN THE OPERATION RECORD */
+/* THESE INFORMATION IS USED TO UPDATE HEADER OF THE ELEMENT */
+/* ------------------------------------------------------------------------- */
+void Dbacc::releaselock(Signal* signal)
+{
+ OperationrecPtr rloOperPtr;
+ OperationrecPtr trlOperPtr;
+ OperationrecPtr trlTmpOperPtr;
+ Uint32 TelementIsDisappeared;
+
+ trlOperPtr.i = RNIL;
+ if (operationRecPtr.p->nextParallelQue != RNIL) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* NEXT OPERATION TAKES OVER THE LOCK. We will simply move the info from the leader */
+ // to the new queue leader.
+ /* --------------------------------------------------------------------------------- */
+ trlOperPtr.i = operationRecPtr.p->nextParallelQue;
+ ptrCheckGuard(trlOperPtr, coprecsize, operationrec);
+ copyInOperPtr = trlOperPtr;
+ copyOperPtr = operationRecPtr;
+ copyOpInfo(signal);
+ trlOperPtr.p->prevParallelQue = RNIL;
+ if (operationRecPtr.p->nextSerialQue != RNIL) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THERE IS A SERIAL QUEUE. MOVE IT FROM RELEASED OP REC TO THE NEW LOCK OWNER. */
+ /* --------------------------------------------------------------------------------- */
+ trlOperPtr.p->nextSerialQue = operationRecPtr.p->nextSerialQue;
+ trlTmpOperPtr.i = trlOperPtr.p->nextSerialQue;
+ ptrCheckGuard(trlTmpOperPtr, coprecsize, operationrec);
+ trlTmpOperPtr.p->prevSerialQue = trlOperPtr.i;
+ }//if
+
+ check_lock_upgrade(signal, copyInOperPtr, operationRecPtr);
+ /* --------------------------------------------------------------------------------- */
+ /* SINCE THERE ARE STILL ITEMS IN THE PARALLEL QUEUE WE NEED NOT WORRY ABOUT */
+ /* STARTING QUEUED OPERATIONS. THUS WE CAN END HERE. */
+ /* --------------------------------------------------------------------------------- */
+ } else {
+ ndbrequire(operationRecPtr.p->nextSerialQue != RNIL);
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE PARALLEL QUEUE IS EMPTY AND THE SERIAL QUEUE IS NOT EMPTY. WE NEED TO */
+ /* REARRANGE LISTS AND START A NUMBER OF OPERATIONS. */
+ /* --------------------------------------------------------------------------------- */
+ trlOperPtr.i = operationRecPtr.p->nextSerialQue;
+ ptrCheckGuard(trlOperPtr, coprecsize, operationrec);
+ copyOperPtr = operationRecPtr;
+ copyInOperPtr = trlOperPtr;
+ copyOpInfo(signal);
+ trlOperPtr.p->prevSerialQue = RNIL;
+ ndbrequire(trlOperPtr.p->prevParallelQue == RNIL);
+ /* --------------------------------------------------------------------------------- */
+ /* WE HAVE MOVED TO THE NEXT PARALLEL QUEUE. WE MUST START ALL OF THOSE */
+ /* OPERATIONS WHICH UP TILL NOW HAVE BEEN QUEUED WAITING FOR THE LOCK. */
+ /* --------------------------------------------------------------------------------- */
+ rloOperPtr = operationRecPtr;
+ trlTmpOperPtr = trlOperPtr;
+ TelementIsDisappeared = trlOperPtr.p->elementIsDisappeared;
+ Uint32 ThashValue = trlOperPtr.p->hashValue;
+ do {
+ /* --------------------------------------------------------------------------------- */
+ // Ensure that all operations in the queue are assigned with the elementIsDisappeared
+ // to ensure that the element is removed after a previous delete. An insert does
+ // however revert this decision since the element is put back again. Local checkpoints
+ // complicate life here since they do not execute the next operation but simply change
+ // the state on the operation. We need to set-up the variable elementIsDisappeared
+ // properly even when local checkpoints and inserts/writes after deletes occur.
+ /* --------------------------------------------------------------------------------- */
+ trlTmpOperPtr.p->elementIsDisappeared = TelementIsDisappeared;
+ if (TelementIsDisappeared == ZTRUE) {
+ /* --------------------------------------------------------------------------------- */
+ // If the elementIsDisappeared is set then we know that the hashValue is also set
+ // since it always originates from a committing abort or a aborting insert. Scans
+ // do not initialise the hashValue and must have this value initialised if they are
+ // to successfully commit the delete.
+ /* --------------------------------------------------------------------------------- */
+ jam();
+ trlTmpOperPtr.p->hashValue = ThashValue;
+ }//if
+ trlTmpOperPtr.p->localdata[0] = trlOperPtr.p->localdata[0];
+ trlTmpOperPtr.p->localdata[1] = trlOperPtr.p->localdata[1];
+ /* --------------------------------------------------------------------------------- */
+ // Restart the queued operation.
+ /* --------------------------------------------------------------------------------- */
+ operationRecPtr = trlTmpOperPtr;
+ TelementIsDisappeared = executeNextOperation(signal);
+ ThashValue = operationRecPtr.p->hashValue;
+ if (trlTmpOperPtr.p->nextParallelQue != RNIL) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // We will continue with the next operation in the parallel queue and start this as
+ // well.
+ /* --------------------------------------------------------------------------------- */
+ trlTmpOperPtr.i = trlTmpOperPtr.p->nextParallelQue;
+ ptrCheckGuard(trlTmpOperPtr, coprecsize, operationrec);
+ } else {
+ jam();
+ break;
+ }//if
+ } while (1);
+ operationRecPtr = rloOperPtr;
+ }//if
+
+ // Insert the next op into the lock owner list
+ insertLockOwnersList(signal, trlOperPtr);
+ return;
+}//Dbacc::releaselock()
+
+/* --------------------------------------------------------------------------------- */
+/* COPY_OP_INFO */
+/* INPUT: COPY_IN_OPER_PTR AND COPY_OPER_PTR. */
+/* DESCRIPTION:INFORMATION ABOUT THE ELEMENT WILL BE MOVED FROM OPERATION */
+/* REC TO QUEUE OP REC. QUE OP REC TAKES OVER THE LOCK. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::copyOpInfo(Signal* signal)
+{
+ Page8Ptr coiPageidptr;
+
+ copyInOperPtr.p->elementPage = copyOperPtr.p->elementPage;
+ copyInOperPtr.p->elementIsforward = copyOperPtr.p->elementIsforward;
+ copyInOperPtr.p->elementContainer = copyOperPtr.p->elementContainer;
+ copyInOperPtr.p->elementPointer = copyOperPtr.p->elementPointer;
+ copyInOperPtr.p->scanBits = copyOperPtr.p->scanBits;
+ copyInOperPtr.p->hashvaluePart = copyOperPtr.p->hashvaluePart;
+ copyInOperPtr.p->elementIsDisappeared = copyOperPtr.p->elementIsDisappeared;
+ if (copyInOperPtr.p->elementIsDisappeared == ZTRUE) {
+ /* --------------------------------------------------------------------------------- */
+ // If the elementIsDisappeared is set then we know that the hashValue is also set
+ // since it always originates from a committing abort or a aborting insert. Scans
+ // do not initialise the hashValue and must have this value initialised if they are
+ // to successfully commit the delete.
+ /* --------------------------------------------------------------------------------- */
+ jam();
+ copyInOperPtr.p->hashValue = copyOperPtr.p->hashValue;
+ }//if
+ coiPageidptr.i = copyOperPtr.p->elementPage;
+ ptrCheckGuard(coiPageidptr, cpagesize, page8);
+ const Uint32 tmp = ElementHeader::setLocked(copyInOperPtr.i);
+ dbgWord32(coiPageidptr, copyOperPtr.p->elementPointer, tmp);
+ arrGuard(copyOperPtr.p->elementPointer, 2048);
+ coiPageidptr.p->word32[copyOperPtr.p->elementPointer] = tmp;
+ copyInOperPtr.p->localdata[0] = copyOperPtr.p->localdata[0];
+ copyInOperPtr.p->localdata[1] = copyOperPtr.p->localdata[1];
+}//Dbacc::copyOpInfo()
+
+/* ******************--------------------------------------------------------------- */
+/* EXECUTE NEXT OPERATION */
+/* NEXT OPERATION IN A LOCK QUEUE WILL BE EXECUTED. */
+/* --------------------------------------------------------------------------------- */
+Uint32 Dbacc::executeNextOperation(Signal* signal)
+{
+ ndbrequire(operationRecPtr.p->transactionstate == ACTIVE);
+ if (fragrecptr.p->stopQueOp == ZTRUE) {
+ Uint32 TelemDisappeared;
+ jam();
+ TelemDisappeared = operationRecPtr.p->elementIsDisappeared;
+ if ((operationRecPtr.p->elementIsDisappeared == ZTRUE) &&
+ (operationRecPtr.p->prevParallelQue == RNIL) &&
+ ((operationRecPtr.p->operation == ZINSERT) ||
+ (operationRecPtr.p->operation == ZWRITE))) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // In this case we do not wish to change the elementIsDisappeared since that would
+ // create an error the next time this method is called for this operation after local
+ // checkpoint starts up operations again. We must however ensure that operations
+ // that follow in the queue do not get the value ZTRUE when actually an INSERT/WRITE
+ // precedes them (only if the INSERT/WRITE is the first operation).
+ /* --------------------------------------------------------------------------------- */
+ TelemDisappeared = ZFALSE;
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* A LOCAL CHECKPOINT HAS STOPPED OPERATIONS. WE MUST NOT START THE OPERATION */
+ /* AT THIS TIME. WE SET THE STATE TO INDICATE THAT WE ARE READY TO START AS */
+ /* SOON AS WE ARE ALLOWED. */
+ /* --------------------------------------------------------------------------------- */
+ operationRecPtr.p->opState = WAIT_EXE_OP;
+ return TelemDisappeared;
+ }//if
+ takeOutFragWaitQue(signal);
+ if (operationRecPtr.p->elementIsDisappeared == ZTRUE) {
+ /* --------------------------------------------------------------------------------- */
+ /* PREVIOUS OPERATION WAS DELETE OPERATION AND THE ELEMENT IS ALREADY DELETED. */
+ /* --------------------------------------------------------------------------------- */
+ if (((operationRecPtr.p->operation != ZINSERT) &&
+ (operationRecPtr.p->operation != ZWRITE)) ||
+ (operationRecPtr.p->prevParallelQue != RNIL)) {
+ if (operationRecPtr.p->operation != ZSCAN_OP ||
+ operationRecPtr.p->isAccLockReq) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // Updates and reads with a previous delete simply aborts with read error indicating
+ // that tuple did not exist. Also inserts and writes not being the first operation.
+ /* --------------------------------------------------------------------------------- */
+ operationRecPtr.p->transactionstate = WAIT_COMMIT_ABORT;
+ signal->theData[0] = operationRecPtr.p->userptr;
+ signal->theData[1] = ZREAD_ERROR;
+ sendSignal(operationRecPtr.p->userblockref, GSN_ACCKEYREF, signal, 2, JBB);
+ return operationRecPtr.p->elementIsDisappeared;
+ } else {
+ /* --------------------------------------------------------------------------------- */
+ /* ABORT OF OPERATION NEEDED BUT THE OPERATION IS A SCAN => SPECIAL TREATMENT. */
+ /* IF THE SCAN WAITS IN QUEUE THEN WE MUST REMOVE THE OPERATION FROM THE SCAN */
+ /* LOCK QUEUE AND IF NO MORE OPERATIONS ARE QUEUED THEN WE SHOULD RESTART THE */
+ /* SCAN PROCESS. OTHERWISE WE SIMPLY RELEASE THE OPERATION AND DECREASE THE */
+ /* NUMBER OF LOCKS HELD. */
+ /* --------------------------------------------------------------------------------- */
+ takeOutScanLockQueue(operationRecPtr.p->scanRecPtr);
+ putReadyScanQueue(signal, operationRecPtr.p->scanRecPtr);
+ return operationRecPtr.p->elementIsDisappeared;
+ }//if
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ // Insert and writes can continue but need to be converted to inserts.
+ /* --------------------------------------------------------------------------------- */
+ jam();
+ operationRecPtr.p->elementIsDisappeared = ZFALSE;
+ operationRecPtr.p->operation = ZINSERT;
+ operationRecPtr.p->insertIsDone = ZTRUE;
+ } else if (operationRecPtr.p->operation == ZINSERT) {
+ bool abortFlag = true;
+ if (operationRecPtr.p->prevParallelQue != RNIL) {
+ OperationrecPtr prevOpPtr;
+ jam();
+ prevOpPtr.i = operationRecPtr.p->prevParallelQue;
+ ptrCheckGuard(prevOpPtr, coprecsize, operationrec);
+ if (prevOpPtr.p->operation == ZDELETE) {
+ jam();
+ abortFlag = false;
+ }//if
+ }//if
+ if (abortFlag) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* ELEMENT STILL REMAINS AND WE ARE TRYING TO INSERT IT AGAIN. THIS IS CLEARLY */
+ /* NOT A GOOD IDEA. */
+ /* --------------------------------------------------------------------------------- */
+ operationRecPtr.p->transactionstate = WAIT_COMMIT_ABORT;
+ signal->theData[0] = operationRecPtr.p->userptr;
+ signal->theData[1] = ZWRITE_ERROR;
+ sendSignal(operationRecPtr.p->userblockref, GSN_ACCKEYREF, signal, 2, JBB);
+ return operationRecPtr.p->elementIsDisappeared;
+ }//if
+ }//if
+ if (operationRecPtr.p->operation == ZSCAN_OP &&
+ ! operationRecPtr.p->isAccLockReq) {
+ jam();
+ takeOutScanLockQueue(operationRecPtr.p->scanRecPtr);
+ putReadyScanQueue(signal, operationRecPtr.p->scanRecPtr);
+ } else {
+ jam();
+ sendAcckeyconf(signal);
+ sendSignal(operationRecPtr.p->userblockref, GSN_ACCKEYCONF, signal, 6, JBB);
+ }//if
+ return operationRecPtr.p->elementIsDisappeared;
+}//Dbacc::executeNextOperation()
+
+/* --------------------------------------------------------------------------------- */
+/* TAKE_OUT_FRAG_WAIT_QUE */
+/* DESCRIPTION: AN OPERATION WHICH OWNS A LOCK OF AN ELEMENT, IS IN A LIST */
+/* OF THE FRAGMENT. THIS LIST IS USED TO STOP THE QUEUE OPERATION */
+/* DURING CREATE CHECK POINT PROSESS FOR STOP AND RESTART OF THE */
+/* OPERATIONS. THIS SUBRUTIN TAKES A OPERATION RECORD OUT OF THE LIST */
+/* -------------------------------------------------------------------------------- */
+void Dbacc::takeOutFragWaitQue(Signal* signal)
+{
+ OperationrecPtr tofwqOperRecPtr;
+
+ if (operationRecPtr.p->opState == WAIT_IN_QUEUE) {
+ if (fragrecptr.p->sentWaitInQueOp == operationRecPtr.i) {
+ jam();
+ fragrecptr.p->sentWaitInQueOp = operationRecPtr.p->nextQueOp;
+ }//if
+ if (operationRecPtr.p->prevQueOp != RNIL) {
+ jam();
+ tofwqOperRecPtr.i = operationRecPtr.p->prevQueOp;
+ ptrCheckGuard(tofwqOperRecPtr, coprecsize, operationrec);
+ tofwqOperRecPtr.p->nextQueOp = operationRecPtr.p->nextQueOp;
+ } else {
+ jam();
+ fragrecptr.p->firstWaitInQueOp = operationRecPtr.p->nextQueOp;
+ }//if
+ if (operationRecPtr.p->nextQueOp != RNIL) {
+ jam();
+ tofwqOperRecPtr.i = operationRecPtr.p->nextQueOp;
+ ptrCheckGuard(tofwqOperRecPtr, coprecsize, operationrec);
+ tofwqOperRecPtr.p->prevQueOp = operationRecPtr.p->prevQueOp;
+ } else {
+ jam();
+ fragrecptr.p->lastWaitInQueOp = operationRecPtr.p->prevQueOp;
+ }//if
+ operationRecPtr.p->opState = FREE_OP;
+ return;
+ } else {
+ ndbrequire(operationRecPtr.p->opState == FREE_OP);
+ }//if
+}//Dbacc::takeOutFragWaitQue()
+
+/**
+ * takeOutLockOwnersList
+ *
+ * Description: Take out an operation from the doubly linked
+ * lock owners list on the fragment.
+ *
+ */
+void Dbacc::takeOutLockOwnersList(Signal* signal,
+ const OperationrecPtr& outOperPtr)
+{
+ const Uint32 Tprev = outOperPtr.p->prevLockOwnerOp;
+ const Uint32 Tnext = outOperPtr.p->nextLockOwnerOp;
+
+#ifdef VM_TRACE
+ // Check that operation is already in the list
+ OperationrecPtr tmpOperPtr;
+ bool inList = false;
+ tmpOperPtr.i = fragrecptr.p->lockOwnersList;
+ while (tmpOperPtr.i != RNIL){
+ ptrCheckGuard(tmpOperPtr, coprecsize, operationrec);
+ if (tmpOperPtr.i == outOperPtr.i)
+ inList = true;
+ tmpOperPtr.i = tmpOperPtr.p->nextLockOwnerOp;
+ }
+ ndbrequire(inList == true);
+#endif
+
+ ndbrequire(outOperPtr.p->lockOwner == ZTRUE);
+ outOperPtr.p->lockOwner = ZFALSE;
+
+ // Fast path through the code for the common case.
+ if ((Tprev == RNIL) && (Tnext == RNIL)) {
+ ndbrequire(fragrecptr.p->lockOwnersList == outOperPtr.i);
+ fragrecptr.p->lockOwnersList = RNIL;
+ return;
+ }
+
+ // Check previous operation
+ if (Tprev != RNIL) {
+ jam();
+ arrGuard(Tprev, coprecsize);
+ operationrec[Tprev].nextLockOwnerOp = Tnext;
+ } else {
+ fragrecptr.p->lockOwnersList = Tnext;
+ }//if
+
+ // Check next operation
+ if (Tnext == RNIL) {
+ return;
+ } else {
+ jam();
+ arrGuard(Tnext, coprecsize);
+ operationrec[Tnext].prevLockOwnerOp = Tprev;
+ }//if
+
+ return;
+}//Dbacc::takeOutLockOwnersList()
+
+/**
+ * insertLockOwnersList
+ *
+ * Description: Insert an operation first in the dubly linked lock owners
+ * list on the fragment.
+ *
+ */
+void Dbacc::insertLockOwnersList(Signal* signal,
+ const OperationrecPtr& insOperPtr)
+{
+ OperationrecPtr tmpOperPtr;
+
+#ifdef VM_TRACE
+ // Check that operation is not already in list
+ tmpOperPtr.i = fragrecptr.p->lockOwnersList;
+ while(tmpOperPtr.i != RNIL){
+ ptrCheckGuard(tmpOperPtr, coprecsize, operationrec);
+ ndbrequire(tmpOperPtr.i != insOperPtr.i);
+ tmpOperPtr.i = tmpOperPtr.p->nextLockOwnerOp;
+ }
+#endif
+
+ ndbrequire(insOperPtr.p->lockOwner == ZFALSE);
+
+ insOperPtr.p->lockOwner = ZTRUE;
+ insOperPtr.p->prevLockOwnerOp = RNIL;
+ tmpOperPtr.i = fragrecptr.p->lockOwnersList;
+ fragrecptr.p->lockOwnersList = insOperPtr.i;
+ insOperPtr.p->nextLockOwnerOp = tmpOperPtr.i;
+ if (tmpOperPtr.i == RNIL) {
+ return;
+ } else {
+ jam();
+ ptrCheckGuard(tmpOperPtr, coprecsize, operationrec);
+ tmpOperPtr.p->prevLockOwnerOp = insOperPtr.i;
+ }//if
+}//Dbacc::insertLockOwnersList()
+
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF COMMIT AND ABORT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* ALLOC_OVERFLOW_PAGE */
+/* DESCRIPTION: */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::allocOverflowPage(Signal* signal)
+{
+ DirRangePtr aopDirRangePtr;
+ DirectoryarrayPtr aopOverflowDirptr;
+ OverflowRecordPtr aopOverflowRecPtr;
+ Uint32 taopTmp1;
+ Uint32 taopTmp2;
+ Uint32 taopTmp3;
+
+ tresult = 0;
+ if ((cfirstfreepage == RNIL) &&
+ (cfreepage >= cpagesize)) {
+ jam();
+ zpagesize_error("Dbacc::allocOverflowPage");
+ tresult = ZPAGESIZE_ERROR;
+ return;
+ }//if
+ if (fragrecptr.p->firstFreeDirindexRec != RNIL) {
+ jam();
+ /* FRAGRECPTR:FIRST_FREE_DIRINDEX_REC POINTS */
+ /* TO THE FIRST ELEMENT IN A FREE LIST OF THE */
+ /* DIRECTORY INDEX WICH HAVE NULL AS PAGE */
+ aopOverflowRecPtr.i = fragrecptr.p->firstFreeDirindexRec;
+ ptrCheckGuard(aopOverflowRecPtr, coverflowrecsize, overflowRecord);
+ troOverflowRecPtr.p = aopOverflowRecPtr.p;
+ takeRecOutOfFreeOverdir(signal);
+ } else if (cfirstfreeoverrec == RNIL) {
+ jam();
+ tresult = ZOVER_REC_ERROR;
+ return;
+ } else if ((cfirstfreedir == RNIL) &&
+ (cdirarraysize <= cdirmemory)) {
+ jam();
+ tresult = ZDIRSIZE_ERROR;
+ return;
+ } else {
+ jam();
+ seizeOverRec(signal);
+ aopOverflowRecPtr = sorOverflowRecPtr;
+ aopOverflowRecPtr.p->dirindex = fragrecptr.p->lastOverIndex;
+ }//if
+ aopOverflowRecPtr.p->nextOverRec = RNIL;
+ aopOverflowRecPtr.p->prevOverRec = RNIL;
+ fragrecptr.p->firstOverflowRec = aopOverflowRecPtr.i;
+ fragrecptr.p->lastOverflowRec = aopOverflowRecPtr.i;
+ taopTmp1 = aopOverflowRecPtr.p->dirindex;
+ aopDirRangePtr.i = fragrecptr.p->overflowdir;
+ taopTmp2 = taopTmp1 >> 8;
+ taopTmp3 = taopTmp1 & 0xff;
+ ptrCheckGuard(aopDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(taopTmp2, 256);
+ if (aopDirRangePtr.p->dirArray[taopTmp2] == RNIL) {
+ jam();
+ seizeDirectory(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ aopDirRangePtr.p->dirArray[taopTmp2] = sdDirptr.i;
+ }//if
+ aopOverflowDirptr.i = aopDirRangePtr.p->dirArray[taopTmp2];
+ seizePage(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ ptrCheckGuard(aopOverflowDirptr, cdirarraysize, directoryarray);
+ aopOverflowDirptr.p->pagep[taopTmp3] = spPageptr.i;
+ tiopPageId = aopOverflowRecPtr.p->dirindex;
+ iopOverflowRecPtr = aopOverflowRecPtr;
+ iopPageptr = spPageptr;
+ initOverpage(signal);
+ aopOverflowRecPtr.p->overpage = spPageptr.i;
+ if (fragrecptr.p->lastOverIndex <= aopOverflowRecPtr.p->dirindex) {
+ jam();
+ ndbrequire(fragrecptr.p->lastOverIndex == aopOverflowRecPtr.p->dirindex);
+ fragrecptr.p->lastOverIndex++;
+ }//if
+}//Dbacc::allocOverflowPage()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* EXPAND/SHRINK MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/*EXPANDCHECK EXPAND BUCKET ORD */
+/* SENDER: ACC, LEVEL B */
+/* INPUT: FRAGRECPTR, POINTS TO A FRAGMENT RECORD. */
+/* DESCRIPTION: A BUCKET OF A FRAGMENT PAGE WILL BE EXPAND INTO TWO BUCKETS */
+/* ACCORDING TO LH3. */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* EXPANDCHECK EXPAND BUCKET ORD */
+/* ******************------------------------------+ */
+/* SENDER: ACC, LEVEL B */
+/* A BUCKET OF THE FRAGMENT WILL */
+/* BE EXPANDED ACORDING TO LH3, */
+/* AND COMMIT TRANSACTION PROCESS */
+/* WILL BE CONTINUED */
+Uint32 Dbacc::checkScanExpand(Signal* signal)
+{
+ Uint32 Ti;
+ Uint32 TreturnCode = 0;
+ Uint32 TPageIndex;
+ Uint32 TDirInd;
+ Uint32 TSplit;
+ Uint32 TreleaseInd = 0;
+ Uint32 TreleaseScanBucket;
+ Uint32 TreleaseScanIndicator[4];
+ DirectoryarrayPtr TDirptr;
+ DirRangePtr TDirRangePtr;
+ Page8Ptr TPageptr;
+ ScanRecPtr TscanPtr;
+ RootfragmentrecPtr Trootfragrecptr;
+
+ Trootfragrecptr.i = fragrecptr.p->myroot;
+ TSplit = fragrecptr.p->p;
+ ptrCheckGuard(Trootfragrecptr, crootfragmentsize, rootfragmentrec);
+ for (Ti = 0; Ti < 4; Ti++) {
+ TreleaseScanIndicator[Ti] = 0;
+ if (Trootfragrecptr.p->scan[Ti] != RNIL) {
+ //-------------------------------------------------------------
+ // A scan is ongoing on this particular local fragment. We have
+ // to check its current state.
+ //-------------------------------------------------------------
+ TscanPtr.i = Trootfragrecptr.p->scan[Ti];
+ ptrCheckGuard(TscanPtr, cscanRecSize, scanRec);
+ if (TscanPtr.p->activeLocalFrag == fragrecptr.i) {
+ if (TscanPtr.p->scanBucketState == ScanRec::FIRST_LAP) {
+ if (TSplit == TscanPtr.p->nextBucketIndex) {
+ jam();
+ //-------------------------------------------------------------
+ // We are currently scanning this bucket. We cannot split it
+ // simultaneously with the scan. We have to pass this offer for
+ // splitting the bucket.
+ //-------------------------------------------------------------
+ TreturnCode = 1;
+ return TreturnCode;
+ } else if (TSplit > TscanPtr.p->nextBucketIndex) {
+ jam();
+ //-------------------------------------------------------------
+ // This bucket has not yet been scanned. We must reset the scanned
+ // bit indicator for this scan on this bucket.
+ //-------------------------------------------------------------
+ TreleaseScanIndicator[Ti] = 1;
+ TreleaseInd = 1;
+ } else {
+ jam();
+ }//if
+ } else if (TscanPtr.p->scanBucketState == ScanRec::SECOND_LAP) {
+ jam();
+ //-------------------------------------------------------------
+ // We are performing a second lap to handle buckets that was
+ // merged during the first lap of scanning. During this second
+ // lap we do not allow any splits or merges.
+ //-------------------------------------------------------------
+ TreturnCode = 1;
+ return TreturnCode;
+ } else {
+ ndbrequire(TscanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED);
+ jam();
+ //-------------------------------------------------------------
+ // The scan is completed and we can thus go ahead and perform
+ // the split.
+ //-------------------------------------------------------------
+ }//if
+ }//if
+ }//if
+ }//for
+ if (TreleaseInd == 1) {
+ TreleaseScanBucket = TSplit;
+ TDirRangePtr.i = fragrecptr.p->directory;
+ TPageIndex = TreleaseScanBucket & ((1 << fragrecptr.p->k) - 1); /* PAGE INDEX OBS K = 6 */
+ TDirInd = TreleaseScanBucket >> fragrecptr.p->k; /* DIRECTORY INDEX OBS K = 6 */
+ ptrCheckGuard(TDirRangePtr, cdirrangesize, dirRange);
+ arrGuard((TDirInd >> 8), 256);
+ TDirptr.i = TDirRangePtr.p->dirArray[TDirInd >> 8];
+ ptrCheckGuard(TDirptr, cdirarraysize, directoryarray);
+ TPageptr.i = TDirptr.p->pagep[TDirInd & 0xff];
+ ptrCheckGuard(TPageptr, cpagesize, page8);
+ for (Ti = 0; Ti < 4; Ti++) {
+ if (TreleaseScanIndicator[Ti] == 1) {
+ jam();
+ scanPtr.i = Trootfragrecptr.p->scan[Ti];
+ ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
+ rsbPageidptr = TPageptr;
+ trsbPageindex = TPageIndex;
+ releaseScanBucket(signal);
+ }//if
+ }//for
+ }//if
+ return TreturnCode;
+}//Dbacc::checkScanExpand()
+
+void Dbacc::execEXPANDCHECK2(Signal* signal)
+{
+ jamEntry();
+
+ if(refToBlock(signal->getSendersBlockRef()) == DBLQH){
+ jam();
+ reenable_expand_after_redo_log_exection_complete(signal);
+ return;
+ }
+
+ DirectoryarrayPtr newDirptr;
+
+ fragrecptr.i = signal->theData[0];
+ tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
+ Uint32 tmp = 1;
+ tmp = tmp << 31;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ fragrecptr.p->expandFlag = 0;
+ if (fragrecptr.p->slack < tmp) {
+ jam();
+ /* IT MEANS THAT IF SLACK > ZERO */
+ /*--------------------------------------------------------------*/
+ /* THE SLACK HAS IMPROVED AND IS NOW ACCEPTABLE AND WE */
+ /* CAN FORGET ABOUT THE EXPAND PROCESS. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ if (fragrecptr.p->firstOverflowRec == RNIL) {
+ jam();
+ allocOverflowPage(signal);
+ if (tresult > ZLIMIT_OF_ERROR) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* WE COULD NOT ALLOCATE ANY OVERFLOW PAGE. THUS WE HAVE TO STOP*/
+ /* THE EXPAND SINCE WE CANNOT GUARANTEE ITS COMPLETION. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+ if (cfirstfreepage == RNIL) {
+ if (cfreepage >= cpagesize) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* WE HAVE TO STOP THE EXPAND PROCESS SINCE THERE ARE NO FREE */
+ /* PAGES. THIS MEANS THAT WE COULD BE FORCED TO CRASH SINCE WE */
+ /* CANNOT COMPLETE THE EXPAND. TO AVOID THE CRASH WE EXIT HERE. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+ if (checkScanExpand(signal) == 1) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // A scan state was inconsistent with performing an expand
+ // operation.
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_EXPAND) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // We did not have enough undo log buffers to start up an
+ // expand operation
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+
+ /*--------------------------------------------------------------------------*/
+ /* WE START BY FINDING THE PAGE, THE PAGE INDEX AND THE PAGE DIRECTORY*/
+ /* OF THE NEW BUCKET WHICH SHALL RECEIVE THE ELEMENT WHICH HAVE A 1 IN*/
+ /* THE NEXT HASH BIT. THIS BIT IS USED IN THE SPLIT MECHANISM TO */
+ /* DECIDE WHICH ELEMENT GOES WHERE. */
+ /*--------------------------------------------------------------------------*/
+ expDirRangePtr.i = fragrecptr.p->directory;
+ texpReceivedBucket = (fragrecptr.p->maxp + fragrecptr.p->p) + 1; /* RECEIVED BUCKET */
+ texpDirInd = texpReceivedBucket >> fragrecptr.p->k;
+ newDirptr.i = RNIL;
+ ptrNull(newDirptr);
+ texpDirRangeIndex = texpDirInd >> 8;
+ ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(texpDirRangeIndex, 256);
+ expDirptr.i = expDirRangePtr.p->dirArray[texpDirRangeIndex];
+ if (expDirptr.i == RNIL) {
+ jam();
+ seizeDirectory(signal);
+ if (tresult > ZLIMIT_OF_ERROR) {
+ jam();
+ return;
+ } else {
+ jam();
+ newDirptr = sdDirptr;
+ expDirptr = sdDirptr;
+ expDirRangePtr.p->dirArray[texpDirRangeIndex] = sdDirptr.i;
+ }//if
+ } else {
+ ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
+ }//if
+ texpDirPageIndex = texpDirInd & 0xff;
+ expPageptr.i = expDirptr.p->pagep[texpDirPageIndex];
+ if (expPageptr.i == RNIL) {
+ jam();
+ seizePage(signal);
+ if (tresult > ZLIMIT_OF_ERROR) {
+ jam();
+ if (newDirptr.i != RNIL) {
+ jam();
+ rdDirptr.i = newDirptr.i;
+ releaseDirectory(signal);
+ }//if
+ return;
+ }//if
+ expDirptr.p->pagep[texpDirPageIndex] = spPageptr.i;
+ tipPageId = texpDirInd;
+ inpPageptr = spPageptr;
+ initPage(signal);
+ fragrecptr.p->dirsize++;
+ expPageptr = spPageptr;
+ } else {
+ ptrCheckGuard(expPageptr, cpagesize, page8);
+ }//if
+
+ fragrecptr.p->expReceivePageptr = expPageptr.i;
+ fragrecptr.p->expReceiveIndex = texpReceivedBucket & ((1 << fragrecptr.p->k) - 1);
+ /*--------------------------------------------------------------------------*/
+ /* THE NEXT ACTION IS TO FIND THE PAGE, THE PAGE INDEX AND THE PAGE */
+ /* DIRECTORY OF THE BUCKET TO BE SPLIT. */
+ /*--------------------------------------------------------------------------*/
+ expDirRangePtr.i = fragrecptr.p->directory;
+ cexcPageindex = fragrecptr.p->p & ((1 << fragrecptr.p->k) - 1); /* PAGE INDEX OBS K = 6 */
+ texpDirInd = fragrecptr.p->p >> fragrecptr.p->k; /* DIRECTORY INDEX OBS K = 6 */
+ ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
+ arrGuard((texpDirInd >> 8), 256);
+ expDirptr.i = expDirRangePtr.p->dirArray[texpDirInd >> 8];
+ ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
+ excPageptr.i = expDirptr.p->pagep[texpDirInd & 0xff];
+ fragrecptr.p->expSenderIndex = cexcPageindex;
+ fragrecptr.p->expSenderPageptr = excPageptr.i;
+ if (excPageptr.i == RNIL) {
+ jam();
+ endofexpLab(signal); /* EMPTY BUCKET */
+ return;
+ }//if
+ fragrecptr.p->expReceiveForward = ZTRUE;
+ ptrCheckGuard(excPageptr, cpagesize, page8);
+ expandcontainer(signal);
+ endofexpLab(signal);
+ return;
+}//Dbacc::execEXPANDCHECK2()
+
+void Dbacc::endofexpLab(Signal* signal)
+{
+ fragrecptr.p->p++;
+ fragrecptr.p->slack += fragrecptr.p->maxloadfactor;
+ fragrecptr.p->expandCounter++;
+ if (fragrecptr.p->p > fragrecptr.p->maxp) {
+ jam();
+ fragrecptr.p->maxp = (fragrecptr.p->maxp << 1) | 1;
+ fragrecptr.p->lhdirbits++;
+ fragrecptr.p->hashcheckbit++;
+ fragrecptr.p->p = 0;
+ }//if
+ Uint32 noOfBuckets = (fragrecptr.p->maxp + 1) + fragrecptr.p->p;
+ Uint32 Thysteres = fragrecptr.p->maxloadfactor - fragrecptr.p->minloadfactor;
+ fragrecptr.p->slackCheck = noOfBuckets * Thysteres;
+ if (fragrecptr.p->slack > (1u << 31)) {
+ jam();
+ /* IT MEANS THAT IF SLACK < ZERO */
+ /* --------------------------------------------------------------------------------- */
+ /* IT IS STILL NECESSARY TO EXPAND THE FRAGMENT EVEN MORE. START IT FROM HERE */
+ /* WITHOUT WAITING FOR NEXT COMMIT ON THE FRAGMENT. */
+ /* --------------------------------------------------------------------------------- */
+ fragrecptr.p->expandFlag = 2;
+ signal->theData[0] = fragrecptr.i;
+ signal->theData[1] = fragrecptr.p->p;
+ signal->theData[2] = fragrecptr.p->maxp;
+ sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB);
+ }//if
+ return;
+}//Dbacc::endofexpLab()
+
+void Dbacc::reenable_expand_after_redo_log_exection_complete(Signal* signal){
+
+ tabptr.i = signal->theData[0];
+ Uint32 fragId = signal->theData[1];
+
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+ ndbrequire(getrootfragmentrec(signal, rootfragrecptr, fragId));
+#if 0
+ ndbout_c("reenable expand check for table %d fragment: %d",
+ tabptr.i, fragId);
+#endif
+
+ for (Uint32 i = 0; i < 2; i++) {
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[i];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ switch(fragrecptr.p->expandFlag){
+ case 0:
+ /**
+ * Hmm... this means that it's alreay has been reenabled...
+ */
+ ndbassert(false);
+ continue;
+ case 1:
+ /**
+ * Nothing is going on start expand check
+ */
+ case 2:
+ /**
+ * A shrink is running, do expand check anyway
+ * (to reset expandFlag)
+ */
+ fragrecptr.p->expandFlag = 2;
+ signal->theData[0] = fragrecptr.i;
+ signal->theData[1] = fragrecptr.p->p;
+ signal->theData[2] = fragrecptr.p->maxp;
+ sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB);
+ break;
+ }
+ }
+}
+
+void Dbacc::execDEBUG_SIG(Signal* signal)
+{
+ jamEntry();
+ expPageptr.i = signal->theData[0];
+
+ progError(__LINE__,
+ ERR_SR_UNDOLOG);
+ return;
+}//Dbacc::execDEBUG_SIG()
+
+/* --------------------------------------------------------------------------------- */
+/* EXPANDCONTAINER */
+/* INPUT: EXC_PAGEPTR (POINTER TO THE ACTIVE PAGE RECORD) */
+/* CEXC_PAGEINDEX (INDEX OF THE BUCKET). */
+/* */
+/* DESCRIPTION: THE HASH VALUE OF ALL ELEMENTS IN THE CONTAINER WILL BE */
+/* CHECKED. SOME OF THIS ELEMENTS HAVE TO MOVE TO THE NEW CONTAINER */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::expandcontainer(Signal* signal)
+{
+ Uint32 texcHashvalue;
+ Uint32 texcTmp;
+ Uint32 texcIndex;
+ Uint32 guard20;
+
+ cexcPrevpageptr = RNIL;
+ cexcPrevconptr = 0;
+ cexcForward = ZTRUE;
+ EXP_CONTAINER_LOOP:
+ cexcContainerptr = (cexcPageindex << ZSHIFT_PLUS) - (cexcPageindex << ZSHIFT_MINUS);
+ if (cexcForward == ZTRUE) {
+ jam();
+ cexcContainerptr = cexcContainerptr + ZHEAD_SIZE;
+ cexcElementptr = cexcContainerptr + ZCON_HEAD_SIZE;
+ } else {
+ jam();
+ cexcContainerptr = ((cexcContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE;
+ cexcElementptr = cexcContainerptr - 1;
+ }//if
+ arrGuard(cexcContainerptr, 2048);
+ cexcContainerhead = excPageptr.p->word32[cexcContainerptr];
+ cexcContainerlen = cexcContainerhead >> 26;
+ cexcMovedLen = ZCON_HEAD_SIZE;
+ if (cexcContainerlen <= ZCON_HEAD_SIZE) {
+ ndbrequire(cexcContainerlen >= ZCON_HEAD_SIZE);
+ jam();
+ goto NEXT_ELEMENT;
+ }//if
+ NEXT_ELEMENT_LOOP:
+ idrOperationRecPtr.i = RNIL;
+ ptrNull(idrOperationRecPtr);
+ /* --------------------------------------------------------------------------------- */
+ /* CEXC_PAGEINDEX PAGE INDEX OF CURRENT CONTAINER BEING EXAMINED. */
+ /* CEXC_CONTAINERPTR INDEX OF CURRENT CONTAINER BEING EXAMINED. */
+ /* CEXC_ELEMENTPTR INDEX OF CURRENT ELEMENT BEING EXAMINED. */
+ /* EXC_PAGEPTR PAGE WHERE CURRENT ELEMENT RESIDES. */
+ /* CEXC_PREVPAGEPTR PAGE OF PREVIOUS CONTAINER. */
+ /* CEXC_PREVCONPTR INDEX OF PREVIOUS CONTAINER */
+ /* CEXC_FORWARD DIRECTION OF CURRENT CONTAINER */
+ /* --------------------------------------------------------------------------------- */
+ arrGuard(cexcElementptr, 2048);
+ tidrElemhead = excPageptr.p->word32[cexcElementptr];
+ if (ElementHeader::getUnlocked(tidrElemhead)){
+ jam();
+ texcHashvalue = ElementHeader::getHashValuePart(tidrElemhead);
+ } else {
+ jam();
+ idrOperationRecPtr.i = ElementHeader::getOpPtrI(tidrElemhead);
+ ptrCheckGuard(idrOperationRecPtr, coprecsize, operationrec);
+ texcHashvalue = idrOperationRecPtr.p->hashvaluePart;
+ if ((fragrecptr.p->createLcp == ZTRUE) &&
+ (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) != 0)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // During local checkpoints we must ensure that we restore the element header in
+ // unlocked state and with the hash value part there with tuple status zeroed.
+ // Otherwise a later insert over the same element will write an UNDO log that will
+ // ensure that the now removed element is restored together with its locked element
+ // header and without the hash value part.
+ /* --------------------------------------------------------------------------------- */
+ const Uint32 hv = idrOperationRecPtr.p->hashvaluePart;
+ const Uint32 eh = ElementHeader::setUnlocked(hv, 0);
+ excPageptr.p->word32[cexcElementptr] = eh;
+ }//if
+ }//if
+ if (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) == 0) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THIS ELEMENT IS NOT TO BE MOVED. WE CALCULATE THE WHEREABOUTS OF THE NEXT */
+ /* ELEMENT AND PROCEED WITH THAT OR END THE SEARCH IF THERE ARE NO MORE */
+ /* ELEMENTS IN THIS CONTAINER. */
+ /* --------------------------------------------------------------------------------- */
+ goto NEXT_ELEMENT;
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* THE HASH BIT WAS SET AND WE SHALL MOVE THIS ELEMENT TO THE NEW BUCKET. */
+ /* WE START BY READING THE ELEMENT TO BE ABLE TO INSERT IT INTO THE NEW BUCKET.*/
+ /* THEN WE INSERT THE ELEMENT INTO THE NEW BUCKET. THE NEXT STEP IS TO DELETE */
+ /* THE ELEMENT FROM THIS BUCKET. THIS IS PERFORMED BY REPLACING IT WITH THE */
+ /* LAST ELEMENT IN THE BUCKET. IF THIS ELEMENT IS TO BE MOVED WE MOVE IT AND */
+ /* GET THE LAST ELEMENT AGAIN UNTIL WE EITHER FIND ONE THAT STAYS OR THIS */
+ /* ELEMENT IS THE LAST ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ texcTmp = cexcElementptr + cexcForward;
+ guard20 = fragrecptr.p->localkeylen - 1;
+ for (texcIndex = 0; texcIndex <= guard20; texcIndex++) {
+ arrGuard(texcIndex, 2);
+ arrGuard(texcTmp, 2048);
+ clocalkey[texcIndex] = excPageptr.p->word32[texcTmp];
+ texcTmp = texcTmp + cexcForward;
+ }//for
+ tidrPageindex = fragrecptr.p->expReceiveIndex;
+ idrPageptr.i = fragrecptr.p->expReceivePageptr;
+ ptrCheckGuard(idrPageptr, cpagesize, page8);
+ tidrForward = fragrecptr.p->expReceiveForward;
+ insertElement(signal);
+ fragrecptr.p->expReceiveIndex = tidrPageindex;
+ fragrecptr.p->expReceivePageptr = idrPageptr.i;
+ fragrecptr.p->expReceiveForward = tidrForward;
+ REMOVE_LAST_LOOP:
+ jam();
+ lastPageptr.i = excPageptr.i;
+ lastPageptr.p = excPageptr.p;
+ tlastContainerptr = cexcContainerptr;
+ lastPrevpageptr.i = cexcPrevpageptr;
+ ptrCheck(lastPrevpageptr, cpagesize, page8);
+ tlastPrevconptr = cexcPrevconptr;
+ arrGuard(tlastContainerptr, 2048);
+ tlastContainerhead = lastPageptr.p->word32[tlastContainerptr];
+ tlastContainerlen = tlastContainerhead >> 26;
+ tlastForward = cexcForward;
+ tlastPageindex = cexcPageindex;
+ getLastAndRemove(signal);
+ if (excPageptr.i == lastPageptr.i) {
+ if (cexcElementptr == tlastElementptr) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE CURRENT ELEMENT WAS ALSO THE LAST ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* THE CURRENT ELEMENT WAS NOT THE LAST ELEMENT. IF THE LAST ELEMENT SHOULD */
+ /* STAY WE COPY IT TO THE POSITION OF THE CURRENT ELEMENT, OTHERWISE WE INSERT */
+ /* INTO THE NEW BUCKET, REMOVE IT AND TRY WITH THE NEW LAST ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ idrOperationRecPtr.i = RNIL;
+ ptrNull(idrOperationRecPtr);
+ arrGuard(tlastElementptr, 2048);
+ tidrElemhead = lastPageptr.p->word32[tlastElementptr];
+ if (ElementHeader::getUnlocked(tidrElemhead)) {
+ jam();
+ texcHashvalue = ElementHeader::getHashValuePart(tidrElemhead);
+ } else {
+ jam();
+ idrOperationRecPtr.i = ElementHeader::getOpPtrI(tidrElemhead);
+ ptrCheckGuard(idrOperationRecPtr, coprecsize, operationrec);
+ texcHashvalue = idrOperationRecPtr.p->hashvaluePart;
+ if ((fragrecptr.p->createLcp == ZTRUE) &&
+ (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) != 0)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // During local checkpoints we must ensure that we restore the element header in
+ // unlocked state and with the hash value part there with tuple status zeroed.
+ // Otherwise a later insert over the same element will write an UNDO log that will
+ // ensure that the now removed element is restored together with its locked element
+ // header and without the hash value part.
+ /* --------------------------------------------------------------------------------- */
+ const Uint32 hv = idrOperationRecPtr.p->hashvaluePart;
+ const Uint32 eh = ElementHeader::setUnlocked(hv, 0);
+ lastPageptr.p->word32[tlastElementptr] = eh;
+ }//if
+ }//if
+ if (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) == 0) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE LAST ELEMENT IS NOT TO BE MOVED. WE COPY IT TO THE CURRENT ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ delPageptr = excPageptr;
+ tdelContainerptr = cexcContainerptr;
+ tdelForward = cexcForward;
+ tdelElementptr = cexcElementptr;
+ deleteElement(signal);
+ } else {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE LAST ELEMENT IS ALSO TO BE MOVED. */
+ /* --------------------------------------------------------------------------------- */
+ texcTmp = tlastElementptr + tlastForward;
+ for (texcIndex = 0; texcIndex < fragrecptr.p->localkeylen; texcIndex++) {
+ arrGuard(texcIndex, 2);
+ arrGuard(texcTmp, 2048);
+ clocalkey[texcIndex] = lastPageptr.p->word32[texcTmp];
+ texcTmp = texcTmp + tlastForward;
+ }//for
+ tidrPageindex = fragrecptr.p->expReceiveIndex;
+ idrPageptr.i = fragrecptr.p->expReceivePageptr;
+ ptrCheckGuard(idrPageptr, cpagesize, page8);
+ tidrForward = fragrecptr.p->expReceiveForward;
+ insertElement(signal);
+ fragrecptr.p->expReceiveIndex = tidrPageindex;
+ fragrecptr.p->expReceivePageptr = idrPageptr.i;
+ fragrecptr.p->expReceiveForward = tidrForward;
+ goto REMOVE_LAST_LOOP;
+ }//if
+ NEXT_ELEMENT:
+ arrGuard(cexcContainerptr, 2048);
+ cexcContainerhead = excPageptr.p->word32[cexcContainerptr];
+ cexcMovedLen = cexcMovedLen + fragrecptr.p->elementLength;
+ if ((cexcContainerhead >> 26) > cexcMovedLen) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE HAVE NOT YET MOVED THE COMPLETE CONTAINER. WE PROCEED WITH THE NEXT */
+ /* ELEMENT IN THE CONTAINER. IT IS IMPORTANT TO READ THE CONTAINER LENGTH */
+ /* FROM THE CONTAINER HEADER SINCE IT MIGHT CHANGE BY REMOVING THE LAST */
+ /* ELEMENT IN THE BUCKET. */
+ /* --------------------------------------------------------------------------------- */
+ cexcElementptr = cexcElementptr + (cexcForward * fragrecptr.p->elementLength);
+ goto NEXT_ELEMENT_LOOP;
+ }//if
+ if (((cexcContainerhead >> 7) & 3) != 0) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE PROCEED TO THE NEXT CONTAINER IN THE BUCKET. */
+ /* --------------------------------------------------------------------------------- */
+ cexcPrevpageptr = excPageptr.i;
+ cexcPrevconptr = cexcContainerptr;
+ nextcontainerinfoExp(signal);
+ goto EXP_CONTAINER_LOOP;
+ }//if
+}//Dbacc::expandcontainer()
+
+/* ******************--------------------------------------------------------------- */
+/* SHRINKCHECK JOIN BUCKET ORD */
+/* SENDER: ACC, LEVEL B */
+/* INPUT: FRAGRECPTR, POINTS TO A FRAGMENT RECORD. */
+/* DESCRIPTION: TWO BUCKET OF A FRAGMENT PAGE WILL BE JOINED TOGETHER */
+/* ACCORDING TO LH3. */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* SHRINKCHECK JOIN BUCKET ORD */
+/* ******************------------------------------+ */
+/* SENDER: ACC, LEVEL B */
+/* TWO BUCKETS OF THE FRAGMENT */
+/* WILL BE JOINED ACORDING TO LH3 */
+/* AND COMMIT TRANSACTION PROCESS */
+/* WILL BE CONTINUED */
+Uint32 Dbacc::checkScanShrink(Signal* signal)
+{
+ Uint32 Ti;
+ Uint32 TreturnCode = 0;
+ Uint32 TPageIndex;
+ Uint32 TDirInd;
+ Uint32 TmergeDest;
+ Uint32 TmergeSource;
+ Uint32 TreleaseScanBucket;
+ Uint32 TreleaseInd = 0;
+ Uint32 TreleaseScanIndicator[4];
+ DirectoryarrayPtr TDirptr;
+ DirRangePtr TDirRangePtr;
+ Page8Ptr TPageptr;
+ ScanRecPtr TscanPtr;
+ RootfragmentrecPtr Trootfragrecptr;
+
+ Trootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(Trootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (fragrecptr.p->p == 0) {
+ jam();
+ TmergeDest = fragrecptr.p->maxp >> 1;
+ } else {
+ jam();
+ TmergeDest = fragrecptr.p->p - 1;
+ }//if
+ TmergeSource = fragrecptr.p->maxp + fragrecptr.p->p;
+ for (Ti = 0; Ti < 4; Ti++) {
+ TreleaseScanIndicator[Ti] = 0;
+ if (Trootfragrecptr.p->scan[Ti] != RNIL) {
+ TscanPtr.i = Trootfragrecptr.p->scan[Ti];
+ ptrCheckGuard(TscanPtr, cscanRecSize, scanRec);
+ if (TscanPtr.p->activeLocalFrag == fragrecptr.i) {
+ //-------------------------------------------------------------
+ // A scan is ongoing on this particular local fragment. We have
+ // to check its current state.
+ //-------------------------------------------------------------
+ if (TscanPtr.p->scanBucketState == ScanRec::FIRST_LAP) {
+ jam();
+ if ((TmergeDest == TscanPtr.p->nextBucketIndex) ||
+ (TmergeSource == TscanPtr.p->nextBucketIndex)) {
+ jam();
+ //-------------------------------------------------------------
+ // We are currently scanning one of the buckets involved in the
+ // merge. We cannot merge while simultaneously performing a scan.
+ // We have to pass this offer for merging the buckets.
+ //-------------------------------------------------------------
+ TreturnCode = 1;
+ return TreturnCode;
+ } else if (TmergeDest < TscanPtr.p->nextBucketIndex) {
+ jam();
+ TreleaseScanIndicator[Ti] = 1;
+ TreleaseInd = 1;
+ }//if
+ } else if (TscanPtr.p->scanBucketState == ScanRec::SECOND_LAP) {
+ jam();
+ //-------------------------------------------------------------
+ // We are performing a second lap to handle buckets that was
+ // merged during the first lap of scanning. During this second
+ // lap we do not allow any splits or merges.
+ //-------------------------------------------------------------
+ TreturnCode = 1;
+ return TreturnCode;
+ } else if (TscanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) {
+ jam();
+ //-------------------------------------------------------------
+ // The scan is completed and we can thus go ahead and perform
+ // the split.
+ //-------------------------------------------------------------
+ } else {
+ jam();
+ sendSystemerror(signal);
+ return TreturnCode;
+ }//if
+ }//if
+ }//if
+ }//for
+ if (TreleaseInd == 1) {
+ jam();
+ TreleaseScanBucket = TmergeSource;
+ TDirRangePtr.i = fragrecptr.p->directory;
+ TPageIndex = TreleaseScanBucket & ((1 << fragrecptr.p->k) - 1); /* PAGE INDEX OBS K = 6 */
+ TDirInd = TreleaseScanBucket >> fragrecptr.p->k; /* DIRECTORY INDEX OBS K = 6 */
+ ptrCheckGuard(TDirRangePtr, cdirrangesize, dirRange);
+ arrGuard((TDirInd >> 8), 256);
+ TDirptr.i = TDirRangePtr.p->dirArray[TDirInd >> 8];
+ ptrCheckGuard(TDirptr, cdirarraysize, directoryarray);
+ TPageptr.i = TDirptr.p->pagep[TDirInd & 0xff];
+ ptrCheckGuard(TPageptr, cpagesize, page8);
+ for (Ti = 0; Ti < 4; Ti++) {
+ if (TreleaseScanIndicator[Ti] == 1) {
+ jam();
+ scanPtr.i = Trootfragrecptr.p->scan[Ti];
+ ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
+ rsbPageidptr.i = TPageptr.i;
+ rsbPageidptr.p = TPageptr.p;
+ trsbPageindex = TPageIndex;
+ releaseScanBucket(signal);
+ if (TmergeDest < scanPtr.p->minBucketIndexToRescan) {
+ jam();
+ //-------------------------------------------------------------
+ // We have to keep track of the starting bucket to Rescan in the
+ // second lap.
+ //-------------------------------------------------------------
+ scanPtr.p->minBucketIndexToRescan = TmergeDest;
+ }//if
+ if (TmergeDest > scanPtr.p->maxBucketIndexToRescan) {
+ jam();
+ //-------------------------------------------------------------
+ // We have to keep track of the ending bucket to Rescan in the
+ // second lap.
+ //-------------------------------------------------------------
+ scanPtr.p->maxBucketIndexToRescan = TmergeDest;
+ }//if
+ }//if
+ }//for
+ }//if
+ return TreturnCode;
+}//Dbacc::checkScanShrink()
+
+void Dbacc::execSHRINKCHECK2(Signal* signal)
+{
+ Uint32 tshrTmp1;
+
+ jamEntry();
+ fragrecptr.i = signal->theData[0];
+ Uint32 oldFlag = signal->theData[3];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ fragrecptr.p->expandFlag = oldFlag;
+ tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
+ if (fragrecptr.p->slack <= fragrecptr.p->slackCheck) {
+ jam();
+ /* TIME FOR JOIN BUCKETS PROCESS */
+ /*--------------------------------------------------------------*/
+ /* NO LONGER NECESSARY TO SHRINK THE FRAGMENT. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ if (fragrecptr.p->slack > (1u << 31)) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* THE SLACK IS NEGATIVE, IN THIS CASE WE WILL NOT NEED ANY */
+ /* SHRINK. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ texpDirInd = (fragrecptr.p->maxp + fragrecptr.p->p) >> fragrecptr.p->k;
+ if (((fragrecptr.p->maxp + fragrecptr.p->p) & ((1 << fragrecptr.p->k) - 1)) == 0) {
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (fragrecptr.p->fragState == LCP_SEND_PAGES) {
+ if (fragrecptr.p->lcpMaxDirIndex > texpDirInd) {
+ if (fragrecptr.p->lcpDirIndex <= texpDirInd) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* WE DO NOT ALLOW ANY SHRINKS THAT REMOVE PAGES THAT ARE */
+ /* NEEDED AS PART OF THE LOCAL CHECKPOINT. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+ }//if
+ }//if
+ }//if
+ if (fragrecptr.p->firstOverflowRec == RNIL) {
+ jam();
+ allocOverflowPage(signal);
+ if (tresult > ZLIMIT_OF_ERROR) {
+ jam();
+ return;
+ }//if
+ }//if
+ if (cfirstfreepage == RNIL) {
+ if (cfreepage >= cpagesize) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* WE HAVE TO STOP THE SHRINK PROCESS SINCE THERE ARE NO FREE */
+ /* PAGES. THIS MEANS THAT WE COULD BE FORCED TO CRASH SINCE WE */
+ /* CANNOT COMPLETE THE SHRINK. TO AVOID THE CRASH WE EXIT HERE. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+ if (checkScanShrink(signal) == 1) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // A scan state was inconsistent with performing a shrink
+ // operation.
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_EXPAND) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // We did not have enough undo log buffers to start up an
+ // shrink operation
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+ if (fragrecptr.p->p == 0) {
+ jam();
+ fragrecptr.p->maxp = fragrecptr.p->maxp >> 1;
+ fragrecptr.p->p = fragrecptr.p->maxp;
+ fragrecptr.p->lhdirbits--;
+ fragrecptr.p->hashcheckbit--;
+ } else {
+ jam();
+ fragrecptr.p->p--;
+ }//if
+
+ /*--------------------------------------------------------------------------*/
+ /* WE START BY FINDING THE NECESSARY INFORMATION OF THE BUCKET TO BE */
+ /* REMOVED WHICH WILL SEND ITS ELEMENTS TO THE RECEIVING BUCKET. */
+ /*--------------------------------------------------------------------------*/
+ expDirRangePtr.i = fragrecptr.p->directory;
+ cexcPageindex = ((fragrecptr.p->maxp + fragrecptr.p->p) + 1) & ((1 << fragrecptr.p->k) - 1);
+ texpDirInd = ((fragrecptr.p->maxp + fragrecptr.p->p) + 1) >> fragrecptr.p->k;
+ texpDirRangeIndex = texpDirInd >> 8;
+ texpDirPageIndex = texpDirInd & 0xff;
+ ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(texpDirRangeIndex, 256);
+ expDirptr.i = expDirRangePtr.p->dirArray[texpDirRangeIndex];
+ ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
+ excPageptr.i = expDirptr.p->pagep[texpDirPageIndex];
+ fragrecptr.p->expSenderDirptr = expDirptr.i;
+ fragrecptr.p->expSenderIndex = cexcPageindex;
+ fragrecptr.p->expSenderPageptr = excPageptr.i;
+ fragrecptr.p->expSenderDirIndex = texpDirInd;
+ /*--------------------------------------------------------------------------*/
+ /* WE NOW PROCEED BY FINDING THE NECESSARY INFORMATION ABOUT THE */
+ /* RECEIVING BUCKET. */
+ /*--------------------------------------------------------------------------*/
+ expDirRangePtr.i = fragrecptr.p->directory;
+ texpReceivedBucket = fragrecptr.p->p >> fragrecptr.p->k;
+ ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
+ arrGuard((texpReceivedBucket >> 8), 256);
+ expDirptr.i = expDirRangePtr.p->dirArray[texpReceivedBucket >> 8];
+ ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
+ fragrecptr.p->expReceivePageptr = expDirptr.p->pagep[texpReceivedBucket & 0xff];
+ fragrecptr.p->expReceiveIndex = fragrecptr.p->p & ((1 << fragrecptr.p->k) - 1);
+ fragrecptr.p->expReceiveForward = ZTRUE;
+ if (excPageptr.i == RNIL) {
+ jam();
+ endofshrinkbucketLab(signal); /* EMPTY BUCKET */
+ return;
+ }//if
+ /*--------------------------------------------------------------------------*/
+ /* INITIALISE THE VARIABLES FOR THE SHRINK PROCESS. */
+ /*--------------------------------------------------------------------------*/
+ ptrCheckGuard(excPageptr, cpagesize, page8);
+ cexcForward = ZTRUE;
+ cexcContainerptr = (cexcPageindex << ZSHIFT_PLUS) - (cexcPageindex << ZSHIFT_MINUS);
+ cexcContainerptr = cexcContainerptr + ZHEAD_SIZE;
+ arrGuard(cexcContainerptr, 2048);
+ cexcContainerhead = excPageptr.p->word32[cexcContainerptr];
+ cexcContainerlen = cexcContainerhead >> 26;
+ if (cexcContainerlen <= ZCON_HEAD_SIZE) {
+ ndbrequire(cexcContainerlen == ZCON_HEAD_SIZE);
+ } else {
+ jam();
+ shrinkcontainer(signal);
+ }//if
+ /*--------------------------------------------------------------------------*/
+ /* THIS CONTAINER IS NOT YET EMPTY AND WE REMOVE ALL THE ELEMENTS. */
+ /*--------------------------------------------------------------------------*/
+ if (((cexcContainerhead >> 10) & 1) == 1) {
+ jam();
+ rlPageptr = excPageptr;
+ trlPageindex = cexcPageindex;
+ trlRelCon = ZFALSE;
+ turlIndex = cexcContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ releaseRightlist(signal);
+ }//if
+ tshrTmp1 = ZCON_HEAD_SIZE;
+ tshrTmp1 = tshrTmp1 << 26;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = excPageptr.p;
+ cundoinfolength = 1;
+ cundoElemIndex = cexcContainerptr;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(excPageptr, cexcContainerptr, tshrTmp1);
+ arrGuard(cexcContainerptr, 2048);
+ excPageptr.p->word32[cexcContainerptr] = tshrTmp1;
+ if (((cexcContainerhead >> 7) & 0x3) == 0) {
+ jam();
+ endofshrinkbucketLab(signal);
+ return;
+ }//if
+ nextcontainerinfoExp(signal);
+ do {
+ cexcContainerptr = (cexcPageindex << ZSHIFT_PLUS) - (cexcPageindex << ZSHIFT_MINUS);
+ if (cexcForward == ZTRUE) {
+ jam();
+ cexcContainerptr = cexcContainerptr + ZHEAD_SIZE;
+ } else {
+ jam();
+ cexcContainerptr = ((cexcContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE;
+ }//if
+ arrGuard(cexcContainerptr, 2048);
+ cexcContainerhead = excPageptr.p->word32[cexcContainerptr];
+ cexcContainerlen = cexcContainerhead >> 26;
+ ndbrequire(cexcContainerlen > ZCON_HEAD_SIZE);
+ /*--------------------------------------------------------------------------*/
+ /* THIS CONTAINER IS NOT YET EMPTY AND WE REMOVE ALL THE ELEMENTS. */
+ /*--------------------------------------------------------------------------*/
+ shrinkcontainer(signal);
+ cexcPrevpageptr = excPageptr.i;
+ cexcPrevpageindex = cexcPageindex;
+ cexcPrevforward = cexcForward;
+ if (((cexcContainerhead >> 7) & 0x3) != 0) {
+ jam();
+ /*--------------------------------------------------------------------------*/
+ /* WE MUST CALL THE NEXT CONTAINER INFO ROUTINE BEFORE WE RELEASE THE */
+ /* CONTAINER SINCE THE RELEASE WILL OVERWRITE THE NEXT POINTER. */
+ /*--------------------------------------------------------------------------*/
+ nextcontainerinfoExp(signal);
+ }//if
+ rlPageptr.i = cexcPrevpageptr;
+ ptrCheckGuard(rlPageptr, cpagesize, page8);
+ trlPageindex = cexcPrevpageindex;
+ if (cexcPrevforward == ZTRUE) {
+ jam();
+ if (((cexcContainerhead >> 10) & 1) == 1) {
+ jam();
+ trlRelCon = ZFALSE;
+ turlIndex = cexcContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ releaseRightlist(signal);
+ }//if
+ trlRelCon = ZTRUE;
+ tullIndex = cexcContainerptr;
+ releaseLeftlist(signal);
+ } else {
+ jam();
+ if (((cexcContainerhead >> 10) & 1) == 1) {
+ jam();
+ trlRelCon = ZFALSE;
+ tullIndex = cexcContainerptr - (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ releaseLeftlist(signal);
+ }//if
+ trlRelCon = ZTRUE;
+ turlIndex = cexcContainerptr;
+ releaseRightlist(signal);
+ }//if
+ } while (((cexcContainerhead >> 7) & 0x3) != 0);
+ endofshrinkbucketLab(signal);
+ return;
+}//Dbacc::execSHRINKCHECK2()
+
+void Dbacc::endofshrinkbucketLab(Signal* signal)
+{
+ fragrecptr.p->expandCounter--;
+ fragrecptr.p->slack -= fragrecptr.p->maxloadfactor;
+ if (fragrecptr.p->expSenderIndex == 0) {
+ jam();
+ fragrecptr.p->dirsize--;
+ if (fragrecptr.p->expSenderPageptr != RNIL) {
+ jam();
+ rpPageptr.i = fragrecptr.p->expSenderPageptr;
+ ptrCheckGuard(rpPageptr, cpagesize, page8);
+ releasePage(signal);
+ expDirptr.i = fragrecptr.p->expSenderDirptr;
+ ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
+ expDirptr.p->pagep[fragrecptr.p->expSenderDirIndex & 0xff] = RNIL;
+ }//if
+ if (((((fragrecptr.p->p + fragrecptr.p->maxp) + 1) >> fragrecptr.p->k) & 0xff) == 0) {
+ jam();
+ rdDirptr.i = fragrecptr.p->expSenderDirptr;
+ releaseDirectory(signal);
+ expDirRangePtr.i = fragrecptr.p->directory;
+ ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
+ arrGuard((fragrecptr.p->expSenderDirIndex >> 8), 256);
+ expDirRangePtr.p->dirArray[fragrecptr.p->expSenderDirIndex >> 8] = RNIL;
+ }//if
+ }//if
+ if (fragrecptr.p->slack < (1u << 31)) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* THE SLACK IS POSITIVE, IN THIS CASE WE WILL CHECK WHETHER */
+ /* WE WILL CONTINUE PERFORM ANOTHER SHRINK. */
+ /*--------------------------------------------------------------*/
+ Uint32 noOfBuckets = (fragrecptr.p->maxp + 1) + fragrecptr.p->p;
+ Uint32 Thysteresis = fragrecptr.p->maxloadfactor - fragrecptr.p->minloadfactor;
+ fragrecptr.p->slackCheck = noOfBuckets * Thysteresis;
+ if (fragrecptr.p->slack > Thysteresis) {
+ /*--------------------------------------------------------------*/
+ /* IT IS STILL NECESSARY TO SHRINK THE FRAGMENT MORE. THIS*/
+ /* CAN HAPPEN WHEN A NUMBER OF SHRINKS GET REJECTED */
+ /* DURING A LOCAL CHECKPOINT. WE START A NEW SHRINK */
+ /* IMMEDIATELY FROM HERE WITHOUT WAITING FOR A COMMIT TO */
+ /* START IT. */
+ /*--------------------------------------------------------------*/
+ if (fragrecptr.p->expandCounter > 0) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* IT IS VERY IMPORTANT TO NOT TRY TO SHRINK MORE THAN */
+ /* WAS EXPANDED. IF MAXP IS SET TO A VALUE BELOW 63 THEN */
+ /* WE WILL LOSE RECORDS SINCE GETDIRINDEX CANNOT HANDLE */
+ /* SHRINKING BELOW 2^K - 1 (NOW 63). THIS WAS A BUG THAT */
+ /* WAS REMOVED 2000-05-12. */
+ /*--------------------------------------------------------------*/
+ signal->theData[0] = fragrecptr.i;
+ signal->theData[1] = fragrecptr.p->p;
+ signal->theData[2] = fragrecptr.p->maxp;
+ signal->theData[3] = fragrecptr.p->expandFlag;
+ ndbrequire(fragrecptr.p->expandFlag < 2);
+ fragrecptr.p->expandFlag = 2;
+ sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 4, JBB);
+ }//if
+ }//if
+ }//if
+ ndbrequire(fragrecptr.p->maxp >= (Uint32)((1 << fragrecptr.p->k) - 1));
+ return;
+}//Dbacc::endofshrinkbucketLab()
+
+/* --------------------------------------------------------------------------------- */
+/* SHRINKCONTAINER */
+/* INPUT: EXC_PAGEPTR (POINTER TO THE ACTIVE PAGE RECORD) */
+/* CEXC_CONTAINERLEN (LENGTH OF THE CONTAINER). */
+/* CEXC_CONTAINERPTR (ARRAY INDEX OF THE CONTAINER). */
+/* CEXC_FORWARD (CONTAINER FORWARD (+1) OR BACKWARD (-1)) */
+/* */
+/* DESCRIPTION: ALL ELEMENTS OF THE ACTIVE CONTAINER HAVE TO MOVE TO THE NEW */
+/* CONTAINER. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::shrinkcontainer(Signal* signal)
+{
+ Uint32 tshrElementptr;
+ Uint32 tshrRemLen;
+ Uint32 tshrInc;
+ Uint32 tshrTmp;
+ Uint32 tshrIndex;
+ Uint32 guard21;
+
+ tshrRemLen = cexcContainerlen - ZCON_HEAD_SIZE;
+ tshrInc = fragrecptr.p->elementLength;
+ if (cexcForward == ZTRUE) {
+ jam();
+ tshrElementptr = cexcContainerptr + ZCON_HEAD_SIZE;
+ } else {
+ jam();
+ tshrElementptr = cexcContainerptr - 1;
+ }//if
+ SHR_LOOP:
+ idrOperationRecPtr.i = RNIL;
+ ptrNull(idrOperationRecPtr);
+ /* --------------------------------------------------------------------------------- */
+ /* THE CODE BELOW IS ALL USED TO PREPARE FOR THE CALL TO INSERT_ELEMENT AND */
+ /* HANDLE THE RESULT FROM INSERT_ELEMENT. INSERT_ELEMENT INSERTS THE ELEMENT */
+ /* INTO ANOTHER BUCKET. */
+ /* --------------------------------------------------------------------------------- */
+ arrGuard(tshrElementptr, 2048);
+ tidrElemhead = excPageptr.p->word32[tshrElementptr];
+ if (ElementHeader::getLocked(tidrElemhead)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* IF THE ELEMENT IS LOCKED WE MUST UPDATE THE ELEMENT INFO IN THE OPERATION */
+ /* RECORD OWNING THE LOCK. WE DO THIS BY READING THE OPERATION RECORD POINTER */
+ /* FROM THE ELEMENT HEADER. */
+ /* --------------------------------------------------------------------------------- */
+ idrOperationRecPtr.i = ElementHeader::getOpPtrI(tidrElemhead);
+ ptrCheckGuard(idrOperationRecPtr, coprecsize, operationrec);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // During local checkpoints we must ensure that we restore the element header in
+ // unlocked state and with the hash value part there with tuple status zeroed.
+ // Otherwise a later insert over the same element will write an UNDO log that will
+ // ensure that the now removed element is restored together with its locked element
+ // header and without the hash value part.
+ /* --------------------------------------------------------------------------------- */
+ const Uint32 hv = idrOperationRecPtr.p->hashvaluePart;
+ const Uint32 eh = ElementHeader::setUnlocked(hv, 0);
+ excPageptr.p->word32[tshrElementptr] = eh;
+ }//if
+ }//if
+ tshrTmp = tshrElementptr + cexcForward;
+ guard21 = fragrecptr.p->localkeylen - 1;
+ for (tshrIndex = 0; tshrIndex <= guard21; tshrIndex++) {
+ arrGuard(tshrIndex, 2);
+ arrGuard(tshrTmp, 2048);
+ clocalkey[tshrIndex] = excPageptr.p->word32[tshrTmp];
+ tshrTmp = tshrTmp + cexcForward;
+ }//for
+ tidrPageindex = fragrecptr.p->expReceiveIndex;
+ idrPageptr.i = fragrecptr.p->expReceivePageptr;
+ ptrCheckGuard(idrPageptr, cpagesize, page8);
+ tidrForward = fragrecptr.p->expReceiveForward;
+ insertElement(signal);
+ /* --------------------------------------------------------------------------------- */
+ /* TAKE CARE OF RESULT FROM INSERT_ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ fragrecptr.p->expReceiveIndex = tidrPageindex;
+ fragrecptr.p->expReceivePageptr = idrPageptr.i;
+ fragrecptr.p->expReceiveForward = tidrForward;
+ if (tshrRemLen < tshrInc) {
+ jam();
+ sendSystemerror(signal);
+ }//if
+ tshrRemLen = tshrRemLen - tshrInc;
+ if (tshrRemLen != 0) {
+ jam();
+ tshrElementptr = tshrTmp;
+ goto SHR_LOOP;
+ }//if
+}//Dbacc::shrinkcontainer()
+
+/* --------------------------------------------------------------------------------- */
+/* NEXTCONTAINERINFO_EXP */
+/* DESCRIPTION:THE CONTAINER HEAD WILL BE CHECKED TO CALCULATE INFORMATION */
+/* ABOUT NEXT CONTAINER IN THE BUCKET. */
+/* INPUT: CEXC_CONTAINERHEAD */
+/* CEXC_CONTAINERPTR */
+/* EXC_PAGEPTR */
+/* OUTPUT: */
+/* CEXC_PAGEINDEX (INDEX FROM WHICH PAGE INDEX CAN BE CALCULATED. */
+/* EXC_PAGEPTR (PAGE REFERENCE OF NEXT CONTAINER) */
+/* CEXC_FORWARD */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::nextcontainerinfoExp(Signal* signal)
+{
+ tnciNextSamePage = (cexcContainerhead >> 9) & 0x1; /* CHECK BIT FOR CHECKING WHERE */
+ /* THE NEXT CONTAINER IS IN THE SAME PAGE */
+ cexcPageindex = cexcContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */
+ if (((cexcContainerhead >> 7) & 3) == ZLEFT) {
+ jam();
+ cexcForward = ZTRUE;
+ } else if (((cexcContainerhead >> 7) & 3) == ZRIGHT) {
+ jam();
+ cexcForward = cminusOne;
+ } else {
+ jam();
+ sendSystemerror(signal);
+ cexcForward = 0; /* DUMMY FOR COMPILER */
+ }//if
+ if (tnciNextSamePage == ZFALSE) {
+ jam();
+ /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */
+ arrGuard(cexcContainerptr + 1, 2048);
+ tnciTmp = excPageptr.p->word32[cexcContainerptr + 1];
+ nciOverflowrangeptr.i = fragrecptr.p->overflowdir;
+ ptrCheckGuard(nciOverflowrangeptr, cdirrangesize, dirRange);
+ arrGuard((tnciTmp >> 8), 256);
+ nciOverflowDirptr.i = nciOverflowrangeptr.p->dirArray[tnciTmp >> 8];
+ ptrCheckGuard(nciOverflowDirptr, cdirarraysize, directoryarray);
+ excPageptr.i = nciOverflowDirptr.p->pagep[tnciTmp & 0xff];
+ ptrCheckGuard(excPageptr, cpagesize, page8);
+ }//if
+}//Dbacc::nextcontainerinfoExp()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF EXPAND/SHRINK MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* LOCAL CHECKPOINT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* LCP_FRAGIDREQ */
+/* SENDER: LQH, LEVEL B */
+/* ENTER LCP_FRAGIDREQ WITH */
+/* TUSERPTR LQH CONNECTION PTR */
+/* TUSERBLOCKREF, LQH BLOCK REFERENCE */
+/* TCHECKPOINTID, THE CHECKPOINT NUMBER TO USE */
+/* (E.G. 1,2 OR 3) */
+/* TABPTR, TABLE ID = TABLE RECORD POINTER */
+/* TFID ROOT FRAGMENT ID */
+/* CACTIVE_UNDO_FILE_VERSION UNDO FILE VERSION 0,1,2 OR 3. */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* LCP_FRAGIDREQ REQUEST FOR LIST OF STOPED OPERATION */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execLCP_FRAGIDREQ(Signal* signal)
+{
+ jamEntry();
+ tuserptr = signal->theData[0]; /* LQH CONNECTION PTR */
+ tuserblockref = signal->theData[1]; /* LQH BLOCK REFERENCE */
+ tcheckpointid = signal->theData[2]; /* THE CHECKPOINT NUMBER TO USE */
+ /* (E.G. 1,2 OR 3) */
+ tabptr.i = signal->theData[3]; /* TABLE ID = TABLE RECORD POINTER */
+ ptrCheck(tabptr, ctablesize, tabrec);
+ tfid = signal->theData[4]; /* ROOT FRAGMENT ID */
+ cactiveUndoFileVersion = signal->theData[5]; /* UNDO FILE VERSION 0,1,2 OR 3. */
+ tresult = 0;
+ ndbrequire(getrootfragmentrec(signal, rootfragrecptr, tfid));
+ ndbrequire(rootfragrecptr.p->rootState == ACTIVEROOT);
+ seizeLcpConnectRec(signal);
+ initLcpConnRec(signal);
+ lcpConnectptr.p->rootrecptr = rootfragrecptr.i;
+ rootfragrecptr.p->lcpPtr = lcpConnectptr.i;
+ lcpConnectptr.p->localCheckPid = tcheckpointid;
+ lcpConnectptr.p->lcpstate = LCP_ACTIVE;
+ rootfragrecptr.p->rootState = LCP_CREATION;
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ /* D6 AT FSOPENREQ =#010003FF. */
+ tlfrTmp1 = 0x010003ff; /* FILE TYPE = .DATA ,VERSION OF FILENAME = 1 */
+ tlfrTmp2 = 0x301; /* D7 CREATE, WRITE ONLY, TRUNCATE TO ZERO */
+ ndbrequire(cfsFirstfreeconnect != RNIL);
+ seizeFsConnectRec(signal);
+ fsConnectptr.p->fragrecPtr = fragrecptr.i;
+ fsConnectptr.p->fsState = WAIT_OPEN_DATA_FILE_FOR_WRITE;
+ /* ----------- FILENAME (FILESYSTEM)/D3/DBACC/"T"TABID/"F"FRAGID/"S"VERSIONID.DATA ------------ */
+ /* ************************ */
+ /* FSOPENREQ */
+ /* ************************ */
+ signal->theData[0] = cownBlockref;
+ signal->theData[1] = fsConnectptr.i;
+ signal->theData[2] = tabptr.i; /* TABLE IDENTITY */
+ signal->theData[3] = rootfragrecptr.p->fragmentid[0]; /* FRAGMENT IDENTITY */
+ signal->theData[4] = lcpConnectptr.p->localCheckPid; /* CHECKPOINT ID */
+ signal->theData[5] = tlfrTmp1;
+ signal->theData[6] = tlfrTmp2;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ return;
+}//Dbacc::execLCP_FRAGIDREQ()
+
+/* ******************--------------------------------------------------------------- */
+/* FSOPENCONF OPENFILE CONF */
+/* SENDER: FS, LEVEL B */
+/* ENTER FSOPENCONF WITH */
+/* FS_CONNECTPTR, FS_CONNECTION PTR */
+/* TUSERPOINTER, FILE POINTER */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::lcpFsOpenConfLab(Signal* signal)
+{
+ fsConnectptr.p->fsPtr = tuserptr;
+ fragrecptr.i = fsConnectptr.p->fragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ fragrecptr.p->activeDataFilePage = 1; /* ZERO IS KEPT FOR PAGE_ZERO */
+ fragrecptr.p->fsConnPtr = fsConnectptr.i;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ if (rootfragrecptr.p->fragmentptr[0] == fragrecptr.i) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ ptrCheck(fragrecptr, cfragmentsize, fragmentrec);
+ /* ----------- FILENAME (FILESYSTEM)/D3/DBACC/"T"TABID/"F"FRAGID/"S"VERSIONID.DATA ------------ */
+ /* D6 AT FSOPENREQ =#010003FF. */
+ tlfrTmp1 = 0x010003ff; /* FILE TYPE = .DATA ,VERSION OF FILENAME = 1 */
+ tlfrTmp2 = 0x301; /* D7 CREATE, WRITE ONLY, TRUNCATE TO ZERO */
+ ndbrequire(cfsFirstfreeconnect != RNIL);
+ seizeFsConnectRec(signal);
+ fsConnectptr.p->fragrecPtr = fragrecptr.i;
+ fsConnectptr.p->fsState = WAIT_OPEN_DATA_FILE_FOR_WRITE;
+ /* ************************ */
+ /* FSOPENREQ */
+ /* ************************ */
+ signal->theData[0] = cownBlockref;
+ signal->theData[1] = fsConnectptr.i;
+ signal->theData[2] = rootfragrecptr.p->mytabptr; /* TABLE IDENTITY */
+ signal->theData[3] = rootfragrecptr.p->fragmentid[1]; /* FRAGMENT IDENTITY */
+ signal->theData[4] = lcpConnectptr.p->localCheckPid; /* CHECKPOINT ID */
+ signal->theData[5] = tlfrTmp1;
+ signal->theData[6] = tlfrTmp2;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ return;
+ } else {
+ ndbrequire(rootfragrecptr.p->fragmentptr[1] == fragrecptr.i);
+ }//if
+ /*---- BOTH DATA FILES ARE OPEN------*/
+ /* ----IF THE UNDO FILE IS CLOSED , OPEN IT.----- */
+ if (cactiveOpenUndoFsPtr != RNIL) {
+ jam();
+ sendLcpFragidconfLab(signal);
+ return;
+ }//if
+ cactiveUndoFilePage = 0;
+ cprevUndoaddress = cminusOne;
+ cundoposition = 0;
+ clastUndoPageIdWritten = 0;
+ ndbrequire(cfsFirstfreeconnect != RNIL);
+ seizeFsConnectRec(signal);
+ fsConnectptr.p->fsState = WAIT_OPEN_UNDO_LCP;
+ fsConnectptr.p->fsPart = 0; /* FILE INDEX, SECOND FILE IN THE DIRECTORY */
+ cactiveOpenUndoFsPtr = fsConnectptr.i;
+ cactiveRootfrag = rootfragrecptr.i;
+ tlfrTmp1 = 1; /* FILE VERSION */
+ tlfrTmp1 = (tlfrTmp1 << 8) + ZLOCALLOGFILE; /* .LOCLOG = 2 */
+ tlfrTmp1 = (tlfrTmp1 << 8) + 4; /* ROOT DIRECTORY = D4 */
+ tlfrTmp1 = (tlfrTmp1 << 8) + fsConnectptr.p->fsPart; /* P2 */
+ tlfrTmp2 = 0x302; /* D7 CREATE , READ / WRITE , TRUNCATE TO ZERO */
+ /* ---FILE NAME "D4"/"DBACC"/LCP_CONNECTPTR:LOCAL_CHECK_PID/FS_CONNECTPTR:FS_PART".LOCLOG-- */
+ /* ************************ */
+ /* FSOPENREQ */
+ /* ************************ */
+ signal->theData[0] = cownBlockref;
+ signal->theData[1] = fsConnectptr.i;
+ signal->theData[2] = cminusOne; /* #FFFFFFFF */
+ signal->theData[3] = cminusOne; /* #FFFFFFFF */
+ signal->theData[4] = cactiveUndoFileVersion;
+ /* A GROUP OF UNDO FILES WHICH ARE UPDATED */
+ signal->theData[5] = tlfrTmp1;
+ signal->theData[6] = tlfrTmp2;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ return;
+}//Dbacc::lcpFsOpenConfLab()
+
+void Dbacc::lcpOpenUndofileConfLab(Signal* signal)
+{
+ ptrGuard(fsConnectptr);
+ fsConnectptr.p->fsState = WAIT_NOTHING;
+ rootfragrecptr.i = cactiveRootfrag;
+ ptrCheck(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ fsConnectptr.p->fsPtr = tuserptr;
+ sendLcpFragidconfLab(signal);
+ return;
+}//Dbacc::lcpOpenUndofileConfLab()
+
+void Dbacc::sendLcpFragidconfLab(Signal* signal)
+{
+ ptrGuard(rootfragrecptr);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ /* ************************ */
+ /* LCP_FRAGIDCONF */
+ /* ************************ */
+ signal->theData[0] = lcpConnectptr.p->lcpUserptr;
+ signal->theData[1] = lcpConnectptr.i;
+ signal->theData[2] = 2;
+ /* NO OF LOCAL FRAGMENTS */
+ signal->theData[3] = rootfragrecptr.p->fragmentid[0];
+ signal->theData[4] = rootfragrecptr.p->fragmentid[1];
+ signal->theData[5] = RNIL;
+ signal->theData[6] = RNIL;
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_LCP_FRAGIDCONF, signal, 7, JBB);
+ return;
+}//Dbacc::sendLcpFragidconfLab()
+
+/* ******************--------------------------------------------------------------- */
+/* LCP_HOLDOPERATION REQUEST FOR LIST OF STOPED OPERATION */
+/* SENDER: LQH, LEVEL B */
+/* ENTER LCP_HOLDOPREQ WITH */
+/* LCP_CONNECTPTR CONNECTION POINTER */
+/* TFID, LOCAL FRAGMENT ID */
+/* THOLD_PREV_SENT_OP NR OF SENT OPERATIONS AT */
+/* PREVIOUS SIGNALS */
+/* TLQH_POINTER LQH USER POINTER */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* LCP_HOLDOPERATION REQUEST FOR LIST OF STOPED OPERATION */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execLCP_HOLDOPREQ(Signal* signal)
+{
+ Uint32 tholdPrevSentOp;
+
+ jamEntry();
+ lcpConnectptr.i = signal->theData[0]; /* CONNECTION POINTER */
+ tfid = signal->theData[1]; /* LOCAL FRAGMENT ID */
+ tholdPrevSentOp = signal->theData[2]; /* NR OF SENT OPERATIONS AT */
+ /* PREVIOUS SIGNALS */
+ tlqhPointer = signal->theData[3]; /* LQH USER POINTER */
+
+ tresult = 0;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
+ rootfragrecptr.i = lcpConnectptr.p->rootrecptr;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->fragmentid[0] == tfid) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ } else {
+ ndbrequire(rootfragrecptr.p->fragmentid[1] == tfid);
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ }//if
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ fragrecptr.p->lcpLqhPtr = tlqhPointer;
+ if (tholdPrevSentOp != 0) {
+ ndbrequire(fragrecptr.p->fragState == SEND_QUE_OP);
+ } else if (tholdPrevSentOp == 0) {
+ jam();
+ fragrecptr.p->fragState = SEND_QUE_OP;
+ fragrecptr.p->stopQueOp = ZTRUE;
+ fragrecptr.p->sentWaitInQueOp = fragrecptr.p->firstWaitInQueOp;
+ }//if
+ tholdSentOp = 0; /* NR OF OPERATION WHICH ARE SENT THIS TIME */
+ operationRecPtr.i = fragrecptr.p->sentWaitInQueOp;
+
+ /* --------------------------------------------- */
+ /* GO THROUGH ALL OPERATION IN THE WAIT */
+ /* LIST AND SEND THE LQH CONNECTION PTR OF THE */
+ /* OPERATIONS TO THE LQH BLOCK. MAX 23 0PERATION */
+ /* PER SIGNAL */
+ /* --------------------------------------------- */
+ while (operationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ ckeys[tholdSentOp] = operationRecPtr.p->userptr;
+ operationRecPtr.i = operationRecPtr.p->nextQueOp;
+ tholdSentOp++;
+ if ((tholdSentOp >= 23) &&
+ (operationRecPtr.i != RNIL)) {
+ jam();
+ /* ----------------------------------------------- */
+ /* THERE IS MORE THAN 23 WAIT OPERATION. WE */
+ /* HAVE TO SEND THESE 23 AND WAITE FOR NEXT SIGNAL */
+ /* ----------------------------------------------- */
+ tholdMore = ZTRUE; /* SECOUND DATA AT THE CONF SIGNAL , = MORE */
+ fragrecptr.p->sentWaitInQueOp = operationRecPtr.i;
+ sendholdconfsignalLab(signal);
+ return;
+ }//if
+ }//while
+ /* ----------------------------------------------- */
+ /* OPERATION_REC_PTR = RNIL */
+ /* THERE IS NO MORE WAITING OPERATION, STATE OF */
+ /* THE FRAGMENT RRECORD IS CHANGED AND RETURN */
+ /* SIGNAL IS SENT */
+ /* ----------------------------------------------- */
+ fragrecptr.p->sentWaitInQueOp = RNIL;
+ tholdMore = ZFALSE; /* SECOND DATA AT THE CONF SIGNAL , = NOT MORE */
+ fragrecptr.p->fragState = WAIT_ACC_LCPREQ;
+ sendholdconfsignalLab(signal);
+ return;
+}//Dbacc::execLCP_HOLDOPREQ()
+
+void Dbacc::sendholdconfsignalLab(Signal* signal)
+{
+ tholdMore = (tholdMore << 16) + tholdSentOp;
+ /* SECOND SIGNAL DATA, LENGTH + MORE */
+ /* ************************ */
+ /* LCP_HOLDOPCONF */
+ /* ************************ */
+ signal->theData[0] = fragrecptr.p->lcpLqhPtr;
+ signal->theData[1] = tholdMore;
+ signal->theData[2] = ckeys[0];
+ signal->theData[3] = ckeys[1];
+ signal->theData[4] = ckeys[2];
+ signal->theData[5] = ckeys[3];
+ signal->theData[6] = ckeys[4];
+ signal->theData[7] = ckeys[5];
+ signal->theData[8] = ckeys[6];
+ signal->theData[9] = ckeys[7];
+ signal->theData[10] = ckeys[8];
+ signal->theData[11] = ckeys[9];
+ signal->theData[12] = ckeys[10];
+ signal->theData[13] = ckeys[11];
+ signal->theData[14] = ckeys[12];
+ signal->theData[15] = ckeys[13];
+ signal->theData[16] = ckeys[14];
+ signal->theData[17] = ckeys[15];
+ signal->theData[18] = ckeys[16];
+ signal->theData[19] = ckeys[17];
+ signal->theData[20] = ckeys[18];
+ signal->theData[21] = ckeys[19];
+ signal->theData[22] = ckeys[20];
+ signal->theData[23] = ckeys[21];
+ signal->theData[24] = ckeys[22];
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_LCP_HOLDOPCONF, signal, 25, JBA);
+ return;
+}//Dbacc::sendholdconfsignalLab()
+
+/**
+ * execACC_LCPREQ
+ * Perform local checkpoint of a fragment
+ *
+ * SENDER: LQH, LEVEL B
+ * ENTER ACC_LCPREQ WITH
+ * LCP_CONNECTPTR, OPERATION RECORD PTR
+ * TLCP_LQH_CHECK_V, LQH'S LOCAL FRAG CHECK VALUE
+ * TLCP_LOCAL_FRAG_ID, LOCAL FRAG ID
+ *
+ */
+void Dbacc::execACC_LCPREQ(Signal* signal)
+{
+ Uint32 tlcpLocalFragId;
+ Uint32 tlcpLqhCheckV;
+
+ jamEntry();
+ lcpConnectptr.i = signal->theData[0]; // CONNECTION PTR
+ tlcpLqhCheckV = signal->theData[1]; // LQH'S LOCAL FRAG CHECK VALUE
+ tlcpLocalFragId = signal->theData[2]; // LOCAL FRAG ID
+ tresult = 0;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
+
+ rootfragrecptr.i = lcpConnectptr.p->rootrecptr;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->fragmentid[0] == tlcpLocalFragId) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ } else {
+ ndbrequire(rootfragrecptr.p->fragmentid[1] == tlcpLocalFragId);
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ }//if
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ ndbrequire(fragrecptr.p->fragState == WAIT_ACC_LCPREQ);
+ fragrecptr.p->lcpLqhPtr = tlcpLqhCheckV;
+
+ Page8Ptr zeroPagePtr;
+ seizeLcpPage(zeroPagePtr);
+ fragrecptr.p->zeroPagePtr = zeroPagePtr.i;
+ fragrecptr.p->prevUndoposition = cminusOne;
+ initRootFragPageZero(rootfragrecptr, zeroPagePtr);
+ initFragPageZero(fragrecptr, zeroPagePtr);
+ /*-----------------------------------------------------------------*/
+ /* SEIZE ZERO PAGE FIRST AND THEN SEIZE DATA PAGES IN */
+ /* BACKWARDS ORDER. THIS IS TO ENSURE THAT WE GET THE PAGES */
+ /* IN ORDER. ON WINDOWS NT THIS WILL BE A BENEFIT SINCE WE */
+ /* CAN THEN DO 1 WRITE_FILE INSTEAD OF 8. */
+ /* WHEN WE RELEASE THE PAGES WE RELEASE THEM IN THE OPPOSITE */
+ /* ORDER. */
+ /*-----------------------------------------------------------------*/
+ for (Uint32 taspTmp = ZWRITEPAGESIZE - 1; (Uint32)~taspTmp; taspTmp--) {
+ Page8Ptr dataPagePtr;
+ jam();
+ ndbrequire(fragrecptr.p->datapages[taspTmp] == RNIL);
+ seizeLcpPage(dataPagePtr);
+ fragrecptr.p->datapages[taspTmp] = dataPagePtr.i;
+ }//for
+ fragrecptr.p->lcpMaxDirIndex = fragrecptr.p->dirsize;
+ fragrecptr.p->lcpMaxOverDirIndex = fragrecptr.p->lastOverIndex;
+ fragrecptr.p->createLcp = ZTRUE;
+ operationRecPtr.i = fragrecptr.p->lockOwnersList;
+ lcp_write_op_to_undolog(signal);
+}
+
+void
+Dbacc::lcp_write_op_to_undolog(Signal* signal)
+{
+ bool delay_continueb= false;
+ Uint32 i, j;
+ for (i= 0; i < 16; i++) {
+ jam();
+ if (remainingUndoPages() <= ZMIN_UNDO_PAGES_AT_COMMIT) {
+ jam();
+ delay_continueb= true;
+ break;
+ }
+ for (j= 0; j < 32; j++) {
+ if (operationRecPtr.i == RNIL) {
+ jam();
+ break;
+ }
+ jam();
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+
+ if ((operationRecPtr.p->operation == ZINSERT) ||
+ (operationRecPtr.p->elementIsDisappeared == ZTRUE)){
+ /*******************************************************************
+ * Only log inserts and elements that are marked as dissapeared.
+ * All other operations update the element header and that is handled
+ * when pages are written to disk
+ ********************************************************************/
+ undopageptr.i = (cundoposition>>ZUNDOPAGEINDEXBITS) & (cundopagesize-1);
+ ptrAss(undopageptr, undopage);
+ theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
+ tundoindex = theadundoindex + ZUNDOHEADSIZE;
+
+ writeUndoOpInfo(signal);/* THE INFORMATION ABOUT ELEMENT HEADER, STORED*/
+ /* IN OP REC, IS WRITTEN AT UNDO PAGES */
+ cundoElemIndex = 0;/* DEFAULT VALUE USED BY WRITE_UNDO_HEADER SUBROTINE */
+ writeUndoHeader(signal, RNIL, UndoHeader::ZOP_INFO); /* WRITE THE HEAD OF THE UNDO ELEMENT */
+ checkUndoPages(signal); /* SEND UNDO PAGE TO DISK WHEN A GROUP OF */
+ /* UNDO PAGES,CURRENTLY 8, IS FILLED */
+ }
+ operationRecPtr.i = operationRecPtr.p->nextLockOwnerOp;
+ }
+ if (operationRecPtr.i == RNIL) {
+ jam();
+ break;
+ }
+ }
+ if (operationRecPtr.i != RNIL) {
+ jam();
+ signal->theData[0]= ZLCP_OP_WRITE_RT_BREAK;
+ signal->theData[1]= operationRecPtr.i;
+ signal->theData[2]= fragrecptr.i;
+ signal->theData[3]= lcpConnectptr.i;
+ if (delay_continueb) {
+ jam();
+ sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 10, 4);
+ } else {
+ jam();
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 4, JBB);
+ }
+ return;
+ }
+
+ signal->theData[0] = fragrecptr.p->lcpLqhPtr;
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_LCPSTARTED,
+ signal, 1, JBA);
+
+ fragrecptr.p->activeDataPage = 0;
+ fragrecptr.p->lcpDirIndex = 0;
+ fragrecptr.p->fragState = LCP_SEND_PAGES;
+
+ signal->theData[0] = lcpConnectptr.i;
+ signal->theData[1] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 2, JBB);
+}
+
+/* ******************--------------------------------------------------------------- */
+/* ACC_SAVE_PAGES A GROUP OF PAGES IS ALLOCATED. THE PAGES AND OVERFLOW */
+/* PAGES OF THE FRAGMENT ARE COPIED IN THEM AND IS SEND TO */
+/* THE DATA FILE OF THE CHECK POINT. */
+/* SENDER: ACC, LEVEL B */
+/* ENTER ACC_SAVE_PAGES WITH */
+/* LCP_CONNECTPTR, CONNECTION RECORD PTR */
+/* FRAGRECPTR FRAGMENT RECORD PTR */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACC_SAVE_PAGES REQUEST TO SEND THE PAGE TO DISK */
+/* ******************------------------------------+ UNDO PAGES */
+/* SENDER: ACC, LEVEL B */
+void Dbacc::execACC_SAVE_PAGES(Signal* signal)
+{
+ jamEntry();
+ lcpConnectptr.i = signal->theData[0];
+ /* CONNECTION RECORD PTR */
+ fragrecptr.i = signal->theData[1];
+ /* FRAGMENT RECORD PTR */
+ tresult = 0;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ if (lcpConnectptr.p->lcpstate != LCP_ACTIVE) {
+ jam();
+ sendSystemerror(signal);
+ return;
+ }//if
+ if (ERROR_INSERTED(3000)) {
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->mytabptr == c_errorInsert3000_TableId){
+ ndbout << "Delay writing of datapages" << endl;
+ // Delay writing of pages
+ jam();
+ sendSignalWithDelay(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 1000, 2);
+ return;
+ }
+ }
+ if (clblPageCounter == 0) {
+ jam();
+ signal->theData[0] = lcpConnectptr.i;
+ signal->theData[1] = fragrecptr.i;
+ sendSignalWithDelay(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 100, 2);
+ return;
+ } else {
+ jam();
+ clblPageCounter = clblPageCounter - 1;
+ }//if
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (fragrecptr.p->fragState == LCP_SEND_PAGES) {
+ jam();
+ savepagesLab(signal);
+ return;
+ } else {
+ if (fragrecptr.p->fragState == LCP_SEND_OVER_PAGES) {
+ jam();
+ saveOverPagesLab(signal);
+ return;
+ } else {
+ ndbrequire(fragrecptr.p->fragState == LCP_SEND_ZERO_PAGE);
+ jam();
+ saveZeroPageLab(signal);
+ return;
+ }//if
+ }//if
+}//Dbacc::execACC_SAVE_PAGES()
+
+void Dbacc::savepagesLab(Signal* signal)
+{
+ DirRangePtr spDirRangePtr;
+ DirectoryarrayPtr spDirptr;
+ Page8Ptr aspPageptr;
+ Page8Ptr aspCopyPageptr;
+ Uint32 taspDirindex;
+ Uint32 taspDirIndex;
+ Uint32 taspIndex;
+
+ if ((fragrecptr.p->lcpDirIndex >= fragrecptr.p->dirsize) ||
+ (fragrecptr.p->lcpDirIndex >= fragrecptr.p->lcpMaxDirIndex)) {
+ jam();
+ endsavepageLab(signal);
+ return;
+ }//if
+ /* SOME EXPAND PROCESSES HAVE BEEN PERFORMED. */
+ /* THE ADDED PAGE ARE NOT SENT TO DISK */
+ arrGuard(fragrecptr.p->activeDataPage, 8);
+ aspCopyPageptr.i = fragrecptr.p->datapages[fragrecptr.p->activeDataPage];
+ ptrCheckGuard(aspCopyPageptr, cpagesize, page8);
+ taspDirindex = fragrecptr.p->lcpDirIndex; /* DIRECTORY OF ACTIVE PAGE */
+ spDirRangePtr.i = fragrecptr.p->directory;
+ taspDirIndex = taspDirindex >> 8;
+ taspIndex = taspDirindex & 0xff;
+ ptrCheckGuard(spDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(taspDirIndex, 256);
+ spDirptr.i = spDirRangePtr.p->dirArray[taspDirIndex];
+ ptrCheckGuard(spDirptr, cdirarraysize, directoryarray);
+ aspPageptr.i = spDirptr.p->pagep[taspIndex];
+ ptrCheckGuard(aspPageptr, cpagesize, page8);
+ ndbrequire(aspPageptr.p->word32[ZPOS_PAGE_ID] == fragrecptr.p->lcpDirIndex);
+ lcnPageptr = aspPageptr;
+ lcnCopyPageptr = aspCopyPageptr;
+ lcpCopyPage(signal);
+ fragrecptr.p->lcpDirIndex++;
+ fragrecptr.p->activeDataPage++;
+ if (fragrecptr.p->activeDataPage < ZWRITEPAGESIZE) {
+ jam();
+ signal->theData[0] = lcpConnectptr.i;
+ signal->theData[1] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 2, JBB);
+ return;
+ }//if
+ senddatapagesLab(signal);
+ return;
+}//Dbacc::savepagesLab()
+
+/* FRAGRECPTR:ACTIVE_DATA_PAGE = ZWRITEPAGESIZE */
+/* SEND A GROUP OF PAGES TO DISK */
+void Dbacc::senddatapagesLab(Signal* signal)
+{
+ fsConnectptr.i = fragrecptr.p->fsConnPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ seizeFsOpRec(signal);
+ initFsOpRec(signal);
+ fsOpptr.p->fsOpstate = WAIT_WRITE_DATA;
+ ndbrequire(fragrecptr.p->activeDataPage <= 8);
+ for (Uint32 i = 0; i < fragrecptr.p->activeDataPage; i++) {
+ signal->theData[i + 6] = fragrecptr.p->datapages[i];
+ }//for
+ signal->theData[fragrecptr.p->activeDataPage + 6] = fragrecptr.p->activeDataFilePage;
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsOpptr.i;
+ signal->theData[3] = 0x2;
+ signal->theData[4] = ZPAGE8_BASE_ADD;
+ signal->theData[5] = fragrecptr.p->activeDataPage;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 15, JBA);
+ return;
+}//Dbacc::senddatapagesLab()
+
+void Dbacc::endsavepageLab(Signal* signal)
+{
+ Page8Ptr espPageidptr;
+
+ espPageidptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(espPageidptr, cpagesize, page8);
+ dbgWord32(espPageidptr, ZPAGEZERO_NO_PAGES, fragrecptr.p->lcpDirIndex);
+ espPageidptr.p->word32[ZPAGEZERO_NO_PAGES] = fragrecptr.p->lcpDirIndex;
+ fragrecptr.p->fragState = LCP_SEND_OVER_PAGES;
+ fragrecptr.p->noOfStoredOverPages = 0;
+ fragrecptr.p->lcpDirIndex = 0;
+ saveOverPagesLab(signal);
+ return;
+}//Dbacc::endsavepageLab()
+
+/* ******************--------------------------------------------------------------- */
+/* ACC_SAVE_OVER_PAGES CONTINUE SAVING THE LEFT OVERPAGES. */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::saveOverPagesLab(Signal* signal)
+{
+ DirRangePtr sopDirRangePtr;
+ DirectoryarrayPtr sopOverflowDirptr;
+ Page8Ptr sopPageptr;
+ Page8Ptr sopCopyPageptr;
+ Uint32 tsopDirindex;
+ Uint32 tsopDirInd;
+ Uint32 tsopIndex;
+
+ if ((fragrecptr.p->lcpDirIndex >= fragrecptr.p->lastOverIndex) ||
+ (fragrecptr.p->lcpDirIndex >= fragrecptr.p->lcpMaxOverDirIndex)) {
+ jam();
+ endsaveoverpageLab(signal);
+ return;
+ }//if
+ arrGuard(fragrecptr.p->activeDataPage, 8);
+ sopCopyPageptr.i = fragrecptr.p->datapages[fragrecptr.p->activeDataPage];
+ ptrCheckGuard(sopCopyPageptr, cpagesize, page8);
+ tsopDirindex = fragrecptr.p->lcpDirIndex;
+ sopDirRangePtr.i = fragrecptr.p->overflowdir;
+ tsopDirInd = tsopDirindex >> 8;
+ tsopIndex = tsopDirindex & 0xff;
+ ptrCheckGuard(sopDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(tsopDirInd, 256);
+ sopOverflowDirptr.i = sopDirRangePtr.p->dirArray[tsopDirInd];
+ ptrCheckGuard(sopOverflowDirptr, cdirarraysize, directoryarray);
+ sopPageptr.i = sopOverflowDirptr.p->pagep[tsopIndex];
+ fragrecptr.p->lcpDirIndex++;
+ if (sopPageptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(sopPageptr, cpagesize, page8);
+ ndbrequire(sopPageptr.p->word32[ZPOS_PAGE_ID] == tsopDirindex);
+ ndbrequire(((sopPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) != ZNORMAL_PAGE_TYPE);
+ lcnPageptr = sopPageptr;
+ lcnCopyPageptr = sopCopyPageptr;
+ lcpCopyPage(signal);
+ fragrecptr.p->noOfStoredOverPages++;
+ fragrecptr.p->activeDataPage++;
+ if ((sopPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] == 0)) {
+ //ndbrequire(((sopPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == ZOVERFLOW_PAGE_TYPE);
+ if (((sopPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) ==
+ ZOVERFLOW_PAGE_TYPE) {
+ /*--------------------------------------------------------------------------------*/
+ /* THE PAGE IS EMPTY AND WAITING TO BE RELEASED. IT COULD NOT BE RELEASED */
+ /* EARLIER SINCE IT WAS PART OF A LOCAL CHECKPOINT. */
+ /*--------------------------------------------------------------------------------*/
+ jam();
+ ropPageptr = sopPageptr;
+ releaseOverpage(signal);
+ } else {
+ jam();
+ sendSystemerror(signal);
+ }
+ }//if
+ }
+ if (fragrecptr.p->activeDataPage == ZWRITEPAGESIZE) {
+ jam();
+ senddatapagesLab(signal);
+ return;
+ }//if
+ signal->theData[0] = lcpConnectptr.i;
+ signal->theData[1] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 2, JBB);
+ return;
+}//Dbacc::saveOverPagesLab()
+
+void Dbacc::endsaveoverpageLab(Signal* signal)
+{
+ Page8Ptr esoPageidptr;
+
+ esoPageidptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(esoPageidptr, cpagesize, page8);
+ dbgWord32(esoPageidptr, ZPAGEZERO_NO_OVER_PAGE, fragrecptr.p->noOfStoredOverPages);
+ esoPageidptr.p->word32[ZPAGEZERO_NO_OVER_PAGE] = fragrecptr.p->noOfStoredOverPages;
+ fragrecptr.p->fragState = LCP_SEND_ZERO_PAGE;
+ if (fragrecptr.p->activeDataPage != 0) {
+ jam();
+ senddatapagesLab(signal); /* SEND LEFT PAGES TO DISK */
+ return;
+ }//if
+ saveZeroPageLab(signal);
+ return;
+}//Dbacc::endsaveoverpageLab()
+
+/* ******************--------------------------------------------------------------- */
+/* ACC_SAVE_ZERO_PAGE PAGE ZERO IS SENT TO DISK.IT IS THE LAST STAGE AT THE */
+/* CREATION LCP. ACC_LCPCONF IS RETURND. */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::saveZeroPageLab(Signal* signal)
+{
+ Page8Ptr szpPageidptr;
+ Uint32 Tchs;
+ Uint32 Ti;
+
+ fragrecptr.p->createLcp = ZFALSE;
+ fsConnectptr.i = fragrecptr.p->fsConnPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ szpPageidptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(szpPageidptr, cpagesize, page8);
+ dbgWord32(szpPageidptr, ZPAGEZERO_PREV_UNDOP, fragrecptr.p->prevUndoposition);
+ szpPageidptr.p->word32[ZPAGEZERO_PREV_UNDOP] = fragrecptr.p->prevUndoposition;
+ dbgWord32(szpPageidptr, ZPAGEZERO_NEXT_UNDO_FILE, cactiveUndoFileVersion);
+ szpPageidptr.p->word32[ZPAGEZERO_NEXT_UNDO_FILE] = cactiveUndoFileVersion;
+ fragrecptr.p->fragState = WAIT_ZERO_PAGE_STORED;
+
+ /* --------------------------------------------------------------------------------- */
+ // Calculate the checksum and store it for the zero page of the fragment.
+ /* --------------------------------------------------------------------------------- */
+ szpPageidptr.p->word32[ZPOS_CHECKSUM] = 0;
+ Tchs = 0;
+ for (Ti = 0; Ti < 2048; Ti++) {
+ Tchs = Tchs ^ szpPageidptr.p->word32[Ti];
+ }//for
+ szpPageidptr.p->word32[ZPOS_CHECKSUM] = Tchs;
+ dbgWord32(szpPageidptr, ZPOS_CHECKSUM, Tchs);
+
+ seizeFsOpRec(signal);
+ initFsOpRec(signal);
+ fsOpptr.p->fsOpstate = WAIT_WRITE_DATA;
+ if (clblPageCounter > 0) {
+ jam();
+ clblPageCounter = clblPageCounter - 1;
+ } else {
+ jam();
+ clblPageOver = clblPageOver + 1;
+ }//if
+ /* ************************ */
+ /* FSWRITEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsOpptr.i;
+ signal->theData[3] = 0x10;
+ /* FLAG = LIST MEM PAGES, LIST FILE PAGES */
+ /* SYNC FILE AFTER WRITING */
+ signal->theData[4] = ZPAGE8_BASE_ADD;
+ signal->theData[5] = 1;
+ /* NO OF PAGES */
+ signal->theData[6] = fragrecptr.p->zeroPagePtr;
+ /* ZERO PAGE */
+ signal->theData[7] = 0;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+ /* ZERO PAGE AT DATA FILE */
+ return;
+}//Dbacc::saveZeroPageLab()
+
+/* ******************--------------------------------------------------------------- */
+/* FSWRITECONF OPENFILE CONF */
+/* ENTER FSWRITECONF WITH SENDER: FS, LEVEL B */
+/* FS_OPPTR FS_CONNECTION PTR */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::lcpCloseDataFileLab(Signal* signal)
+{
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ fsConnectptr.p->fsState = LCP_CLOSE_DATA;
+ /* ************************ */
+ /* FSCLOSEREQ */
+ /* ************************ */
+ /* CLOSE DATA FILE */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = ZFALSE;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+ /* FLAG = 0, DO NOT DELETE FILE */
+ return;
+}//Dbacc::lcpCloseDataFileLab()
+
+void Dbacc::checkSyncUndoPagesLab(Signal* signal)
+{
+ fragrecptr.i = fsConnectptr.p->fragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ releaseFsConnRec(signal);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ switch (lcpConnectptr.p->syncUndopageState) {
+ case WAIT_NOTHING:
+ jam();
+ lcpConnectptr.p->syncUndopageState = WAIT_ONE_CONF;
+ break;
+ case WAIT_ONE_CONF:
+ jam();
+ lcpConnectptr.p->syncUndopageState = WAIT_TWO_CONF;
+ break;
+ default:
+ jam();
+ sendSystemerror(signal);
+ return;
+ break;
+ }//switch
+
+ /* ACTIVE UNDO PAGE ID */
+ Uint32 tundoPageId = cundoposition >> ZUNDOPAGEINDEXBITS;
+ tmp1 = tundoPageId - (tundoPageId & (ZWRITE_UNDOPAGESIZE - 1));
+ /* START PAGE OF THE LAST UNDO PAGES GROUP */
+ tmp2 = (tundoPageId - tmp1) + 1; /* NO OF LEFT UNDO PAGES */
+ tmp1 = tmp1 & (cundopagesize - 1); /* 1 MBYTE PAGE WINDOW IN MEMORY */
+ fsConnectptr.i = cactiveOpenUndoFsPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ seizeFsOpRec(signal);
+ initFsOpRec(signal);
+ fsOpptr.p->fsOpstate = WAIT_WRITE_UNDO;
+ fsOpptr.p->fsOpMemPage = tundoPageId; /* RECORD MEMORY PAGE WRITTEN */
+ if (clblPageCounter >= (4 * tmp2)) {
+ jam();
+ clblPageCounter = clblPageCounter - (4 * tmp2);
+ } else {
+ jam();
+ clblPageOver = clblPageOver + ((4 * tmp2) - clblPageCounter);
+ clblPageCounter = 0;
+ }//if
+ /* ************************ */
+ /* FSWRITEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsOpptr.i;
+ /* FLAG = START MEM PAGES, START FILE PAGES */
+ /* SYNC FILE AFTER WRITING */
+ signal->theData[3] = 0x11;
+ signal->theData[4] = ZUNDOPAGE_BASE_ADD;
+ /* NO OF UNDO PAGES */
+ signal->theData[5] = tmp2;
+ /* FIRST MEMORY PAGE */
+ signal->theData[6] = tmp1;
+ /* ACTIVE PAGE AT UNDO FILE */
+ signal->theData[7] = cactiveUndoFilePage;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+
+ return;
+}//Dbacc::checkSyncUndoPagesLab()
+
+void Dbacc::checkSendLcpConfLab(Signal* signal)
+{
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
+ switch (lcpConnectptr.p->syncUndopageState) {
+ case WAIT_ONE_CONF:
+ jam();
+ lcpConnectptr.p->syncUndopageState = WAIT_NOTHING;
+ break;
+ case WAIT_TWO_CONF:
+ jam();
+ lcpConnectptr.p->syncUndopageState = WAIT_ONE_CONF;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ lcpConnectptr.p->noOfLcpConf++;
+ ndbrequire(lcpConnectptr.p->noOfLcpConf <= 4);
+ fragrecptr.p->fragState = ACTIVEFRAG;
+ rlpPageptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(rlpPageptr, cpagesize, page8);
+ releaseLcpPage(signal);
+ fragrecptr.p->zeroPagePtr = RNIL;
+ for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) {
+ jam();
+ if (fragrecptr.p->datapages[i] != RNIL) {
+ jam();
+ rlpPageptr.i = fragrecptr.p->datapages[i];
+ ptrCheckGuard(rlpPageptr, cpagesize, page8);
+ releaseLcpPage(signal);
+ fragrecptr.p->datapages[i] = RNIL;
+ }//if
+ }//for
+ signal->theData[0] = fragrecptr.p->lcpLqhPtr;
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_LCPCONF, signal, 1, JBB);
+ if (lcpConnectptr.p->noOfLcpConf == 4) {
+ jam();
+ releaseLcpConnectRec(signal);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ rootfragrecptr.p->rootState = ACTIVEROOT;
+ }//if
+}//Dbacc::checkSendLcpConfLab()
+
+/* ******************--------------------------------------------------------------- */
+/* ACC_CONTOPREQ */
+/* SENDER: LQH, LEVEL B */
+/* ENTER ACC_CONTOPREQ WITH */
+/* LCP_CONNECTPTR */
+/* TMP1 LOCAL FRAG ID */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACC_CONTOPREQ COMMIT TRANSACTION */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execACC_CONTOPREQ(Signal* signal)
+{
+ Uint32 tcorLocalFrag;
+
+ jamEntry();
+ lcpConnectptr.i = signal->theData[0];
+ /* CONNECTION PTR */
+ tcorLocalFrag = signal->theData[1];
+ /* LOCAL FRAG ID */
+ tresult = 0;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ if(ERROR_INSERTED(3002) && lcpConnectptr.p->noOfLcpConf < 2)
+ {
+ sendSignalWithDelay(cownBlockref, GSN_ACC_CONTOPREQ, signal, 300,
+ signal->getLength());
+ return;
+ }
+
+ ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
+ rootfragrecptr.i = lcpConnectptr.p->rootrecptr;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->fragmentid[0] == tcorLocalFrag) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ } else {
+ ndbrequire(rootfragrecptr.p->fragmentid[1] == tcorLocalFrag);
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ }//if
+ operationRecPtr.i = fragrecptr.p->firstWaitInQueOp;
+ fragrecptr.p->sentWaitInQueOp = RNIL;
+ fragrecptr.p->stopQueOp = ZFALSE;
+ while (operationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ if (operationRecPtr.p->opState == WAIT_EXE_OP) {
+ jam();
+ //------------------------------------------------------------
+ // Indicate that we are now a normal waiter in the queue. We
+ // will remove the operation from the queue as part of starting
+ // operation again.
+ //------------------------------------------------------------
+ operationRecPtr.p->opState = WAIT_IN_QUEUE;
+ executeNextOperation(signal);
+ }//if
+ operationRecPtr.i = operationRecPtr.p->nextQueOp;
+ }//while
+ signal->theData[0] = fragrecptr.p->lcpLqhPtr;
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_CONTOPCONF, signal, 1, JBA);
+
+ lcpConnectptr.p->noOfLcpConf++;
+ if (lcpConnectptr.p->noOfLcpConf == 4) {
+ jam();
+ releaseLcpConnectRec(signal);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ rootfragrecptr.p->rootState = ACTIVEROOT;
+ }//if
+ return; /* ALL QUEUED OPERATION ARE RESTARTED IF NEEDED. */
+}//Dbacc::execACC_CONTOPREQ()
+
+/* ******************--------------------------------------------------------------- */
+/* END_LCPREQ END OF LOCAL CHECK POINT */
+/* ENTER END_LCPREQ WITH SENDER: LQH, LEVEL B */
+/* CLQH_PTR, LQH PTR */
+/* CLQH_BLOCK_REF LQH BLOCK REF */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* END_LCPREQ PERFORM A LOCAL CHECK POINT */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execEND_LCPREQ(Signal* signal)
+{
+ jamEntry();
+ clqhPtr = signal->theData[0];
+ /* LQH PTR */
+ clqhBlockRef = signal->theData[1];
+ /* LQH BLOCK REF */
+ tresult = 0;
+ fsConnectptr.i = cactiveOpenUndoFsPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ fsConnectptr.p->fsState = WAIT_CLOSE_UNDO; /* CLOSE FILE AFTER WRITTING */
+ /* ************************ */
+ /* FSCLOSEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = ZFALSE;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+ /* FLAG = 0, DO NOT DELETE FILE */
+ cactiveUndoFileVersion = RNIL;
+ cactiveOpenUndoFsPtr = RNIL;
+ /* ************************ */
+ /* END_LCPCONF */
+ /* ************************ */
+ signal->theData[0] = clqhPtr;
+ sendSignal(clqhBlockRef, GSN_END_LCPCONF, signal, 1, JBB);
+ return;
+}//Dbacc::execEND_LCPREQ()
+
+/*-----------------------------------------------------------------*/
+/* WHEN WE COPY THE PAGE WE ALSO WRITE THE ELEMENT HEADER AS */
+/* UNLOCKED IF THEY ARE CURRENTLY LOCKED. */
+/*-----------------------------------------------------------------*/
+void Dbacc::lcpCopyPage(Signal* signal)
+{
+ Uint32 tlcnNextContainer;
+ Uint32 tlcnTmp;
+ Uint32 tlcnConIndex;
+ Uint32 tlcnIndex;
+ Uint32 Tmp1;
+ Uint32 Tmp2;
+ Uint32 Tmp3;
+ Uint32 Tmp4;
+ Uint32 Ti;
+ Uint32 Tchs;
+ Uint32 Tlimit;
+
+ Tchs = 0;
+ lupPageptr.p = lcnCopyPageptr.p;
+ lcnPageptr.p->word32[ZPOS_CHECKSUM] = Tchs;
+ for (Ti = 0; Ti < 32 ; Ti++) {
+ Tlimit = 16 + (Ti << 6);
+ for (tlcnTmp = (Ti << 6); tlcnTmp < Tlimit; tlcnTmp ++) {
+ Tmp1 = lcnPageptr.p->word32[tlcnTmp];
+ Tmp2 = lcnPageptr.p->word32[tlcnTmp + 16];
+ Tmp3 = lcnPageptr.p->word32[tlcnTmp + 32];
+ Tmp4 = lcnPageptr.p->word32[tlcnTmp + 48];
+
+ lcnCopyPageptr.p->word32[tlcnTmp] = Tmp1;
+ lcnCopyPageptr.p->word32[tlcnTmp + 16] = Tmp2;
+ lcnCopyPageptr.p->word32[tlcnTmp + 32] = Tmp3;
+ lcnCopyPageptr.p->word32[tlcnTmp + 48] = Tmp4;
+
+ Tchs = Tchs ^ Tmp1;
+ Tchs = Tchs ^ Tmp2;
+ Tchs = Tchs ^ Tmp3;
+ Tchs = Tchs ^ Tmp4;
+ }//for
+ }//for
+ tlcnChecksum = Tchs;
+ if (((lcnCopyPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == ZNORMAL_PAGE_TYPE) {
+ jam();
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL 64 BUFFERS ADDRESSED BY ALGORITHM IN */
+ /* FIRST PAGE. IF THEY ARE EMPTY THEY STILL HAVE A CONTAINER */
+ /* HEADER OF 2 WORDS. */
+ /*-----------------------------------------------------------------*/
+ tlcnConIndex = ZHEAD_SIZE;
+ tlupForward = 1;
+ for (tlcnIndex = 0; tlcnIndex <= ZNO_CONTAINERS - 1; tlcnIndex++) {
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
+ lcpUpdatePage(signal);
+ tlcnConIndex = tlcnConIndex + ZBUF_SIZE;
+ }//for
+ }//if
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL USED BUFFERS ON THE LEFT SIDE. */
+ /*-----------------------------------------------------------------*/
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f;
+ while (tlcnNextContainer < ZEMPTYLIST) {
+ tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
+ tlcnConIndex = tlcnConIndex + ZHEAD_SIZE;
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
+ tlupForward = 1;
+ lcpUpdatePage(signal);
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
+ }//while
+ if (tlcnNextContainer == ZEMPTYLIST) {
+ jam();
+ /*empty*/;
+ } else {
+ jam();
+ sendSystemerror(signal);
+ return;
+ }//if
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL USED BUFFERS ON THE RIGHT SIDE. */
+ /*-----------------------------------------------------------------*/
+ tlupForward = cminusOne;
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f;
+ while (tlcnNextContainer < ZEMPTYLIST) {
+ tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
+ tlcnConIndex = tlcnConIndex + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex - 1;
+ lcpUpdatePage(signal);
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
+ }//while
+ if (tlcnNextContainer == ZEMPTYLIST) {
+ jam();
+ /*empty*/;
+ } else {
+ jam();
+ sendSystemerror(signal);
+ return;
+ }//if
+ lcnCopyPageptr.p->word32[ZPOS_CHECKSUM] = tlcnChecksum;
+}//Dbacc::lcpCopyPage()
+
+/* --------------------------------------------------------------------------------- */
+/* THIS SUBROUTINE GOES THROUGH ONE CONTAINER TO CHECK FOR LOCKED ELEMENTS AND */
+/* UPDATING THEM TO ENSURE ALL ELEMENTS ARE UNLOCKED ON DISK. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::lcpUpdatePage(Signal* signal)
+{
+ OperationrecPtr lupOperationRecPtr;
+ Uint32 tlupElemHead;
+ Uint32 tlupElemLen;
+ Uint32 tlupElemStep;
+ Uint32 tlupConLen;
+
+ tlupConLen = lupPageptr.p->word32[tlupIndex] >> 26;
+ tlupElemLen = fragrecptr.p->elementLength;
+ tlupElemStep = tlupForward * tlupElemLen;
+ while (tlupConLen > ZCON_HEAD_SIZE) {
+ jam();
+ tlupElemHead = lupPageptr.p->word32[tlupElemIndex];
+ if (ElementHeader::getLocked(tlupElemHead)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WHEN CHANGING THE ELEMENT HEADER WE ALSO HAVE TO UPDATE THE CHECKSUM. IN */
+ /* DOING THIS WE USE THE FORMULA (A XOR B) XOR B = A WHICH MEANS THAT IF WE */
+ /* XOR SOMETHING TWICE WITH THE SAME OPERAND THEN WE RETURN TO THE ORIGINAL */
+ /* VALUE. THEN WE ALSO HAVE TO USE THE NEW ELEMENT HEADER IN THE CHECKSUM */
+ /* CALCULATION. */
+ /* --------------------------------------------------------------------------------- */
+ tlcnChecksum = tlcnChecksum ^ tlupElemHead;
+ lupOperationRecPtr.i = ElementHeader::getOpPtrI(tlupElemHead);
+ ptrCheckGuard(lupOperationRecPtr, coprecsize, operationrec);
+ const Uint32 hv = lupOperationRecPtr.p->hashvaluePart;
+ tlupElemHead = ElementHeader::setUnlocked(hv , 0);
+ arrGuard(tlupElemIndex, 2048);
+ lupPageptr.p->word32[tlupElemIndex] = tlupElemHead;
+ tlcnChecksum = tlcnChecksum ^ tlupElemHead;
+ }//if
+ tlupConLen = tlupConLen - tlupElemLen;
+ tlupElemIndex = tlupElemIndex + tlupElemStep;
+ }//while
+ if (tlupConLen < ZCON_HEAD_SIZE) {
+ jam();
+ sendSystemerror(signal);
+ }//if
+}//Dbacc::lcpUpdatePage()
+
+/*-----------------------------------------------------------------*/
+// At a system restart we check that the page do not contain any
+// locks that hinder the system restart procedure.
+/*-----------------------------------------------------------------*/
+void Dbacc::srCheckPage(Signal* signal)
+{
+ Uint32 tlcnNextContainer;
+ Uint32 tlcnConIndex;
+ Uint32 tlcnIndex;
+
+ lupPageptr.p = lcnCopyPageptr.p;
+ if (((lcnCopyPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == ZNORMAL_PAGE_TYPE) {
+ jam();
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL 64 BUFFERS ADDRESSED BY ALGORITHM IN */
+ /* FIRST PAGE. IF THEY ARE EMPTY THEY STILL HAVE A CONTAINER */
+ /* HEADER OF 2 WORDS. */
+ /*-----------------------------------------------------------------*/
+ tlcnConIndex = ZHEAD_SIZE;
+ tlupForward = 1;
+ for (tlcnIndex = 0; tlcnIndex <= ZNO_CONTAINERS - 1; tlcnIndex++) {
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
+ srCheckContainer(signal);
+ if (tresult != 0) {
+ jam();
+ return;
+ }//if
+ tlcnConIndex = tlcnConIndex + ZBUF_SIZE;
+ }//for
+ }//if
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL USED BUFFERS ON THE LEFT SIDE. */
+ /*-----------------------------------------------------------------*/
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f;
+ while (tlcnNextContainer < ZEMPTYLIST) {
+ tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
+ tlcnConIndex = tlcnConIndex + ZHEAD_SIZE;
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
+ tlupForward = 1;
+ srCheckContainer(signal);
+ if (tresult != 0) {
+ jam();
+ return;
+ }//if
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
+ }//while
+ if (tlcnNextContainer == ZEMPTYLIST) {
+ jam();
+ /*empty*/;
+ } else {
+ jam();
+ tresult = 4;
+ return;
+ }//if
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL USED BUFFERS ON THE RIGHT SIDE. */
+ /*-----------------------------------------------------------------*/
+ tlupForward = cminusOne;
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f;
+ while (tlcnNextContainer < ZEMPTYLIST) {
+ tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
+ tlcnConIndex = tlcnConIndex + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex - 1;
+ srCheckContainer(signal);
+ if (tresult != 0) {
+ jam();
+ return;
+ }//if
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
+ }//while
+ if (tlcnNextContainer == ZEMPTYLIST) {
+ jam();
+ /*empty*/;
+ } else {
+ jam();
+ tresult = 4;
+ return;
+ }//if
+}//Dbacc::srCheckPage()
+
+/* --------------------------------------------------------------------------------- */
+/* THIS SUBROUTINE GOES THROUGH ONE CONTAINER TO CHECK FOR LOCKED ELEMENTS AND */
+/* UPDATING THEM TO ENSURE ALL ELEMENTS ARE UNLOCKED ON DISK. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::srCheckContainer(Signal* signal)
+{
+ Uint32 tlupElemLen;
+ Uint32 tlupElemStep;
+ Uint32 tlupConLen;
+
+ tlupConLen = lupPageptr.p->word32[tlupIndex] >> 26;
+ tlupElemLen = fragrecptr.p->elementLength;
+ tlupElemStep = tlupForward * tlupElemLen;
+ while (tlupConLen > ZCON_HEAD_SIZE) {
+ jam();
+ const Uint32 tlupElemHead = lupPageptr.p->word32[tlupElemIndex];
+ if (ElementHeader::getLocked(tlupElemHead)){
+ jam();
+ //-------------------------------------------------------
+ // This is absolutely undesirable. We have a lock remaining
+ // after the system restart. We send a crash signal that will
+ // enter the trace file.
+ //-------------------------------------------------------
+ tresult = 2;
+ return;
+ }//if
+ tlupConLen = tlupConLen - tlupElemLen;
+ tlupElemIndex = tlupElemIndex + tlupElemStep;
+ }//while
+ if (tlupConLen < ZCON_HEAD_SIZE) {
+ jam();
+ tresult = 3;
+ }//if
+ return;
+}//Dbacc::srCheckContainer()
+
+/* ------------------------------------------------------------------------- */
+/* CHECK_UNDO_PAGES */
+/* DESCRIPTION: CHECKS WHEN A PAGE OR A GROUP OF UNDO PAGES IS FILLED.WHEN */
+/* A PAGE IS FILLED, CUNDOPOSITION WILL BE UPDATE, THE NEW */
+/* POSITION IS THE BEGNING OF THE NEXT UNDO PAGE. */
+/* IN CASE THAT A GROUP IS FILLED THE PAGES ARE SENT TO DISK, */
+/* AND A NEW GROUP IS CHOSEN. */
+/* ------------------------------------------------------------------------- */
+void Dbacc::checkUndoPages(Signal* signal)
+{
+
+ fragrecptr.p->prevUndoposition = cundoposition;
+ cprevUndoaddress = cundoposition;
+
+ // Calculate active undo page id
+ Uint32 tundoPageId = cundoposition >> ZUNDOPAGEINDEXBITS;
+
+ /**
+ * WE WILL WRITE UNTIL WE HAVE ABOUT 8 KBYTE REMAINING ON THE 32 KBYTE
+ * PAGE. THIS IS TO ENSURE THAT WE DO NOT HAVE ANY UNDO LOG RECORDS THAT PASS
+ * A PAGE BOUNDARIE. THIS SIMPLIFIES CODING TRADING SOME INEFFICIENCY.
+ */
+ static const Uint32 ZMAXUNDOPAGEINDEX = 7100;
+ if (tundoindex < ZMAXUNDOPAGEINDEX) {
+ jam();
+ cundoposition = (tundoPageId << ZUNDOPAGEINDEXBITS) + tundoindex;
+ return;
+ }//if
+
+ /**
+ * WE CHECK IF MORE THAN 1 MBYTE OF WRITES ARE OUTSTANDING TO THE UNDO FILE.
+ * IF SO WE HAVE TO CRASH SINCE WE HAVE NO MORE SPACE TO WRITE UNDO LOG
+ * RECORDS IN
+ */
+ Uint16 nextUndoPageId = tundoPageId + 1;
+ updateUndoPositionPage(signal, nextUndoPageId << ZUNDOPAGEINDEXBITS);
+
+ if ((tundoPageId & (ZWRITE_UNDOPAGESIZE - 1)) == (ZWRITE_UNDOPAGESIZE - 1)) {
+ jam();
+ /* ---------- SEND A GROUP OF UNDO PAGES TO DISK --------- */
+ fsConnectptr.i = cactiveOpenUndoFsPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ Uint32 tcupTmp1 = (tundoPageId - ZWRITE_UNDOPAGESIZE) + 1;
+ tcupTmp1 = tcupTmp1 & (cundopagesize - 1); /* 1 MBYTE PAGE WINDOW */
+ seizeFsOpRec(signal);
+ initFsOpRec(signal);
+ fsOpptr.p->fsOpstate = WAIT_WRITE_UNDO_EXIT;
+ fsOpptr.p->fsOpMemPage = tundoPageId;
+ fragrecptr.p->nrWaitWriteUndoExit++;
+ if (clblPageCounter >= 8) {
+ jam();
+ clblPageCounter = clblPageCounter - 8;
+ } else {
+ jam();
+ clblPageOver = clblPageOver + (8 - clblPageCounter);
+ clblPageCounter = 0;
+ }//if
+ /* ************************ */
+ /* FSWRITEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsOpptr.i;
+ signal->theData[3] = 0x1;
+ /* FLAG = START MEM PAGES, START FILE PAGES */
+ signal->theData[4] = ZUNDOPAGE_BASE_ADD;
+ signal->theData[5] = ZWRITE_UNDOPAGESIZE;
+ signal->theData[6] = tcupTmp1;
+ signal->theData[7] = cactiveUndoFilePage;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+ cactiveUndoFilePage = cactiveUndoFilePage + ZWRITE_UNDOPAGESIZE;
+ }//if
+}//Dbacc::checkUndoPages()
+
+/* --------------------------------------------------------------------------------- */
+/* UNDO_WRITING_PROCESS */
+/* INPUT: FRAGRECPTR, CUNDO_ELEM_INDEX, DATAPAGEPTR, CUNDOINFOLENGTH */
+/* DESCRIPTION: WHEN THE PROCESS OF CREATION LOCAL CHECK POINT HAS */
+/* STARTED. IF THE ACTIVE PAGE IS NOT ALREADY SENT TO DISK, THE */
+/* OLD VALUE OF THE ITEM WHICH IS GOING TO BE CHECKED IS STORED ON */
+/* THE ACTIVE UNDO PAGE. INFORMATION ABOUT UNDO PROCESS IN THE */
+/* BLOCK AND IN THE FRAGMENT WILL BE UPDATED. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::undoWritingProcess(Signal* signal)
+{
+ const Uint32 tactivePageDir = datapageptr.p->word32[ZPOS_PAGE_ID];
+ const Uint32 tpageType = (datapageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3;
+ if (fragrecptr.p->fragState == LCP_SEND_PAGES) {
+ if (tpageType == ZNORMAL_PAGE_TYPE) {
+ /* --------------------------------------------------------------------------- */
+ /* HANDLING OF LOG OF NORMAL PAGES DURING WRITE OF NORMAL PAGES. */
+ /* --------------------------------------------------------------------------- */
+ if (tactivePageDir < fragrecptr.p->lcpDirIndex) {
+ jam();
+ /* ------------------------------------------------------------------- */
+ /* THIS PAGE HAS ALREADY BEEN WRITTEN IN THE LOCAL CHECKPOINT. */
+ /* ------------------------------------------------------------------- */
+ /*empty*/;
+ } else {
+ if (tactivePageDir >= fragrecptr.p->lcpMaxDirIndex) {
+ jam();
+ /* --------------------------------------------------------------------------- */
+ /* OBVIOUSLY THE FRAGMENT HAS EXPANDED SINCE THE START OF THE LOCAL CHECKPOINT.*/
+ /* WE NEED NOT LOG ANY UPDATES OF PAGES THAT DID NOT EXIST AT START OF LCP. */
+ /* --------------------------------------------------------------------------- */
+ /*empty*/;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------------- */
+ /* IN ALL OTHER CASES WE HAVE TO WRITE TO THE UNDO LOG. */
+ /* --------------------------------------------------------------------------- */
+ undopageptr.i = (cundoposition >> ZUNDOPAGEINDEXBITS) & (cundopagesize - 1);
+ ptrAss(undopageptr, undopage);
+ theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
+ tundoindex = theadundoindex + ZUNDOHEADSIZE;
+ writeUndoHeader(signal, tactivePageDir, UndoHeader::ZPAGE_INFO);
+ tundoElemIndex = cundoElemIndex;
+ writeUndoDataInfo(signal);
+ checkUndoPages(signal);
+ }//if
+ }//if
+ } else if (tpageType == ZOVERFLOW_PAGE_TYPE) {
+ /* --------------------------------------------------------------------------------- */
+ /* OVERFLOW PAGE HANDLING DURING WRITE OF NORMAL PAGES. */
+ /* --------------------------------------------------------------------------------- */
+ if (tactivePageDir >= fragrecptr.p->lcpMaxOverDirIndex) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* OBVIOUSLY THE FRAGMENT HAS EXPANDED THE NUMBER OF OVERFLOW PAGES SINCE THE */
+ /* START OF THE LOCAL CHECKPOINT. WE NEED NOT LOG ANY UPDATES OF PAGES THAT DID*/
+ /* NOT EXIST AT START OF LCP. */
+ /* --------------------------------------------------------------------------------- */
+ /*empty*/;
+ } else {
+ jam();
+ undopageptr.i = (cundoposition >> ZUNDOPAGEINDEXBITS) & (cundopagesize - 1);
+ ptrAss(undopageptr, undopage);
+ theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
+ tundoindex = theadundoindex + ZUNDOHEADSIZE;
+ writeUndoHeader(signal, tactivePageDir, UndoHeader::ZOVER_PAGE_INFO);
+ tundoElemIndex = cundoElemIndex;
+ writeUndoDataInfo(signal);
+ checkUndoPages(signal);
+ }//if
+ } else {
+ jam();
+ /* --------------------------------------------------------------------------- */
+ /* ONLY PAGE INFO AND OVERFLOW PAGE INFO CAN BE LOGGED BY THIS ROUTINE. A */
+ /* SERIOUS ERROR. */
+ /* --------------------------------------------------------------------------- */
+ sendSystemerror(signal);
+ }
+ } else {
+ if (fragrecptr.p->fragState == LCP_SEND_OVER_PAGES) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* DURING WRITE OF OVERFLOW PAGES WE NEED NOT WORRY ANYMORE ABOUT NORMAL PAGES.*/
+ /* --------------------------------------------------------------------------------- */
+ if (tpageType == ZOVERFLOW_PAGE_TYPE) {
+ if (tactivePageDir < fragrecptr.p->lcpDirIndex) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THIS PAGE HAS ALREADY BEEN WRITTEN IN THE LOCAL CHECKPOINT. */
+ /* --------------------------------------------------------------------------------- */
+ /*empty*/;
+ } else {
+ if (tactivePageDir >= fragrecptr.p->lcpMaxOverDirIndex) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* OBVIOUSLY THE FRAGMENT HAS EXPANDED THE NUMBER OF OVERFLOW PAGES SINCE THE */
+ /* START OF THE LOCAL CHECKPOINT. WE NEED NOT LOG ANY UPDATES OF PAGES THAT DID*/
+ /* NOT EXIST AT START OF LCP. */
+ /* --------------------------------------------------------------------------------- */
+ /*empty*/;
+ } else {
+ jam();
+ undopageptr.i = (cundoposition >> ZUNDOPAGEINDEXBITS) & (cundopagesize - 1);
+ ptrAss(undopageptr, undopage);
+ theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
+ tundoindex = theadundoindex + ZUNDOHEADSIZE;
+ writeUndoHeader(signal, tactivePageDir, UndoHeader::ZOVER_PAGE_INFO);
+ tundoElemIndex = cundoElemIndex;
+ writeUndoDataInfo(signal);
+ checkUndoPages(signal);
+ }//if
+ }//if
+ }
+ }//if
+ }//if
+}//Dbacc::undoWritingProcess()
+
+/* --------------------------------------------------------------------------------- */
+/* OTHER STATES MEANS THAT WE HAVE ALREADY WRITTEN ALL PAGES BUT NOT YET RESET */
+/* THE CREATE_LCP FLAG. */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* WRITE_UNDO_DATA_INFO */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::writeUndoDataInfo(Signal* signal)
+{
+ Uint32 twudiIndex;
+ Uint32 guard22;
+
+ guard22 = cundoinfolength;
+ arrGuard((tundoindex + guard22 - 1), 8192);
+ arrGuard((tundoElemIndex + guard22 - 1), 2048);
+ for (twudiIndex = 1; twudiIndex <= guard22; twudiIndex++) {
+ undopageptr.p->undoword[tundoindex] = datapageptr.p->word32[tundoElemIndex];
+ tundoindex++;
+ tundoElemIndex++;
+ }//for
+}//Dbacc::writeUndoDataInfo()
+
+/* --------------------------------------------------------------------------------- */
+/* WRITE_UNDO_HEADER */
+/* THE HEAD OF UNDO ELEMENT IS 24 BYTES AND CONTAINS THE FOLLOWING INFORMATION: */
+/* TABLE IDENTITY 32 BITS */
+/* ROOT FRAGMENT IDENTITY 32 BITS */
+/* LOCAL FRAGMENT IDENTITY 32 BITS */
+/* LENGTH OF ELEMENT INF0 (BIT 31 - 18) 14 BITS */
+/* INFO TYPE (BIT 17 - 14) 4 BITS */
+/* PAGE INDEX OF THE FIRST FIELD IN THE FRAGMENT (BIT 13 - 0) 14 BITS */
+/* DIRECTORY INDEX OF THE PAGE IN THE FRAGMENT 32 BITS */
+/* ADDRESS OF THE PREVIOUS ELEMENT OF THE FRAGMENT 64 BITS */
+/* ADDRESS OF THE PREVIOUS ELEMENT IN THE UNDO PAGES 64 BITS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::writeUndoHeader(Signal* signal,
+ Uint32 logicalPageId,
+ UndoHeader::UndoHeaderType pageType)
+{
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ arrGuard(theadundoindex + 6, 8192);
+
+ // Set the structpointer to point at the undo page at the right address.
+ UndoHeader * const & undoHeaderPtr =
+ (UndoHeader *) &undopageptr.p->undoword[theadundoindex];
+
+ undoHeaderPtr->tableId = rootfragrecptr.p->mytabptr;
+ undoHeaderPtr->rootFragId = rootfragrecptr.p->fragmentid[0] >> 1;
+ undoHeaderPtr->localFragId = fragrecptr.p->myfid;
+ ndbrequire((undoHeaderPtr->localFragId >> 1) == undoHeaderPtr->rootFragId);
+ Uint32 Ttmp = cundoinfolength;
+ Ttmp = (Ttmp << 4) + pageType;
+ Ttmp = Ttmp << 14;
+ undoHeaderPtr->variousInfo = Ttmp + cundoElemIndex;
+ undoHeaderPtr->logicalPageId = logicalPageId;
+ undoHeaderPtr->prevUndoAddressForThisFrag = fragrecptr.p->prevUndoposition;
+ undoHeaderPtr->prevUndoAddress = cprevUndoaddress;
+}//Dbacc::writeUndoHeader()
+
+/* --------------------------------------------------------------------------------- */
+/* WRITE_UNDO_OP_INFO */
+/* FOR A LOCKED ELEMENT, OPERATION TYPE, UNDO OF ELEMENT HEADER AND THE LENGTH OF*/
+/* THE TUPLE KEY HAVE TO BE SAVED IN UNDO PAGES. IN THIS CASE AN UNDO ELEMENT */
+/* INCLUDES THE FLLOWING ITEMS. */
+/* OPERATION TYPE 32 BITS */
+/* HASH VALUE 32 BITS */
+/* LENGTH OF THE TUPLE = N 32 BITS */
+/* TUPLE KEYS N * 32 BITS */
+/* */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::writeUndoOpInfo(Signal* signal)
+{
+ Page8Ptr locPageptr;
+
+ arrGuard((tundoindex + 3), 8192);
+ undopageptr.p->undoword[tundoindex] = operationRecPtr.p->operation;
+ undopageptr.p->undoword[tundoindex + 1] = operationRecPtr.p->hashValue;
+ undopageptr.p->undoword[tundoindex + 2] = operationRecPtr.p->tupkeylen;
+ tundoindex = tundoindex + 3;
+ // log localkey1
+ locPageptr.i = operationRecPtr.p->elementPage;
+ ptrCheckGuard(locPageptr, cpagesize, page8);
+ Uint32 Tforward = operationRecPtr.p->elementIsforward;
+ Uint32 TelemPtr = operationRecPtr.p->elementPointer;
+ TelemPtr += Tforward; // ZELEM_HEAD_SIZE
+ arrGuard(tundoindex+1, 8192);
+ undopageptr.p->undoword[tundoindex] = locPageptr.p->word32[TelemPtr];
+ tundoindex++;
+ cundoinfolength = ZOP_HEAD_INFO_LN + 1;
+}//Dbacc::writeUndoOpInfo()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF LOCAL CHECKPOINT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* SYSTEM RESTART MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* SR_FRAGIDREQ REQUEST FOR RESTART OF A FRAGMENT */
+/* SENDER: LQH, LEVEL B */
+/* ENTER SR_FRAGIDREQ WITH */
+/* TUSERPTR, LQH CONNECTION PTR */
+/* TUSERBLOCKREF, LQH BLOCK REFERENCE */
+/* TCHECKPOINTID, THE CHECKPOINT NUMBER TO USE */
+/* (E.G. 1,2 OR 3) */
+/* TABPTR, TABLE ID = TABLE RECORD POINTER */
+/* TFID, ROOT FRAGMENT ID */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* SR_FRAGIDREQ REQUEST FOR LIST OF STOPED OPERATION */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execSR_FRAGIDREQ(Signal* signal)
+{
+ jamEntry();
+ tuserptr = signal->theData[0]; /* LQH CONNECTION PTR */
+ tuserblockref = signal->theData[1]; /* LQH BLOCK REFERENCE */
+ tcheckpointid = signal->theData[2]; /* THE CHECKPOINT NUMBER TO USE */
+ /* (E.G. 1,2 OR 3) */
+ tabptr.i = signal->theData[3];
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+ /* TABLE ID = TABLE RECORD POINTER */
+ tfid = signal->theData[4]; /* ROOT FRAGMENT ID */
+ tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
+ seizeLcpConnectRec(signal);
+ initLcpConnRec(signal);
+
+ ndbrequire(getrootfragmentrec(signal, rootfragrecptr, tfid));
+ rootfragrecptr.p->lcpPtr = lcpConnectptr.i;
+ lcpConnectptr.p->rootrecptr = rootfragrecptr.i;
+ lcpConnectptr.p->localCheckPid = tcheckpointid;
+ for (Uint32 i = 0; i < 2; i++) {
+ Page8Ptr zeroPagePtr;
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[i];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ seizeLcpPage(zeroPagePtr);
+ fragrecptr.p->zeroPagePtr = zeroPagePtr.i;
+ }//for
+
+ /* ---------------------------OPEN THE DATA FILE WHICH BELONGS TO TFID AND TCHECK POINT ---- */
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ tfid = rootfragrecptr.p->fragmentid[0];
+ tmp = 0;
+ srOpenDataFileLoopLab(signal);
+
+ return;
+}//Dbacc::execSR_FRAGIDREQ()
+
+void Dbacc::srOpenDataFileLoopLab(Signal* signal)
+{
+ /* D6 AT FSOPENREQ. FILE TYPE = .DATA */
+ tmp1 = 0x010003ff; /* VERSION OF FILENAME = 1 */
+ tmp2 = 0x0; /* D7 DON'T CREATE, READ ONLY */
+ ndbrequire(cfsFirstfreeconnect != RNIL);
+ seizeFsConnectRec(signal);
+
+ fragrecptr.p->fsConnPtr = fsConnectptr.i;
+ fsConnectptr.p->fragrecPtr = fragrecptr.i;
+ fsConnectptr.p->fsState = WAIT_OPEN_DATA_FILE_FOR_READ;
+ fsConnectptr.p->activeFragId = tmp; /* LOCAL FRAG INDEX */
+ /* ************************ */
+ /* FSOPENREQ */
+ /* ************************ */
+ signal->theData[0] = cownBlockref;
+ signal->theData[1] = fsConnectptr.i;
+ signal->theData[2] = rootfragrecptr.p->mytabptr; /* TABLE IDENTITY */
+ signal->theData[3] = tfid; /* FRAGMENT IDENTITY */
+ signal->theData[4] = lcpConnectptr.p->localCheckPid; /* CHECKPOINT ID */
+ signal->theData[5] = tmp1;
+ signal->theData[6] = tmp2;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ return;
+}//Dbacc::srOpenDataFileLoopLab()
+
+void Dbacc::srFsOpenConfLab(Signal* signal)
+{
+ fsConnectptr.p->fsState = WAIT_READ_PAGE_ZERO;
+ /* ------------------------ READ ZERO PAGE ---------- */
+ fragrecptr.i = fsConnectptr.p->fragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = 0x0;
+ /* FLAG = LIST MEM PAGES, LIST FILE PAGES */
+ signal->theData[4] = ZPAGE8_BASE_ADD;
+ signal->theData[5] = 1; /* NO OF PAGES */
+ signal->theData[6] = fragrecptr.p->zeroPagePtr; /* ZERO PAGE */
+ signal->theData[7] = 0; /* PAGE ZERO OF THE DATA FILE */
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+ return;
+}//Dbacc::srFsOpenConfLab()
+
+void Dbacc::srReadPageZeroLab(Signal* signal)
+{
+ Page8Ptr srzPageptr;
+
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ fragrecptr.p->activeDataFilePage = 1;
+ srzPageptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(srzPageptr, cpagesize, page8);
+ /* --------------------------------------------------------------------------------- */
+ // Check that the checksum of the zero page is ok.
+ /* --------------------------------------------------------------------------------- */
+ ccoPageptr.p = srzPageptr.p;
+ checksumControl(signal, (Uint32)0);
+ if (tresult > 0) {
+ jam();
+ return; // We will crash through a DEBUG_SIG
+ }//if
+
+ ndbrequire(srzPageptr.p->word32[ZPAGEZERO_FRAGID0] == rootfragrecptr.p->fragmentid[0]);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ if (fsConnectptr.p->activeFragId == 0) {
+ jam();
+ rootfragrecptr.p->fragmentid[1] = srzPageptr.p->word32[ZPAGEZERO_FRAGID1];
+ /* ---------------------------OPEN THE DATA FILE FOR NEXT LOCAL FRAGMENT ----------- ---- */
+ tfid = rootfragrecptr.p->fragmentid[1];
+ tmp = 1; /* LOCAL FRAG INDEX */
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ srOpenDataFileLoopLab(signal);
+ return;
+ } else {
+ jam();
+ lcpConnectptr.p->lcpstate = LCP_ACTIVE;
+ signal->theData[0] = lcpConnectptr.p->lcpUserptr;
+ signal->theData[1] = lcpConnectptr.i;
+ signal->theData[2] = 2; /* NO OF LOCAL FRAGMENTS */
+ signal->theData[3] = srzPageptr.p->word32[ZPAGEZERO_FRAGID0];
+ /* ROOTFRAGRECPTR:FRAGMENTID(0) */
+ signal->theData[4] = srzPageptr.p->word32[ZPAGEZERO_FRAGID1];
+ /* ROOTFRAGRECPTR:FRAGMENTID(1) */
+ signal->theData[5] = RNIL;
+ signal->theData[6] = RNIL;
+ signal->theData[7] = rootfragrecptr.p->fragmentptr[0];
+ signal->theData[8] = rootfragrecptr.p->fragmentptr[1];
+ signal->theData[9] = srzPageptr.p->word32[ZPAGEZERO_HASH_CHECK];
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_SR_FRAGIDCONF, signal, 10, JBB);
+ }//if
+ return;
+}//Dbacc::srReadPageZeroLab()
+
+void Dbacc::initFragAdd(Signal* signal,
+ Uint32 rootFragIndex,
+ Uint32 rootIndex,
+ FragmentrecPtr regFragPtr)
+{
+ const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
+ Uint32 lhFragBits = req->lhFragBits + 1;
+ Uint32 minLoadFactor = (req->minLoadFactor * ZBUF_SIZE) / 100;
+ Uint32 maxLoadFactor = (req->maxLoadFactor * ZBUF_SIZE) / 100;
+ if (minLoadFactor >= maxLoadFactor) {
+ jam();
+ minLoadFactor = maxLoadFactor - 1;
+ }//if
+ regFragPtr.p->fragState = ACTIVEFRAG;
+ // NOTE: next line must match calculation in Dblqh::execLQHFRAGREQ
+ regFragPtr.p->myfid = (req->fragId << 1) | rootFragIndex;
+ regFragPtr.p->myroot = rootIndex;
+ regFragPtr.p->myTableId = req->tableId;
+ ndbrequire(req->kValue == 6);
+ regFragPtr.p->k = req->kValue; /* TK_SIZE = 6 IN THIS VERSION */
+ regFragPtr.p->expandCounter = 0;
+
+ /**
+ * Only allow shrink during SR
+ * - to make sure we don't run out of pages during REDO log execution
+ *
+ * Is later restored to 0 by LQH at end of REDO log execution
+ */
+ regFragPtr.p->expandFlag = (getNodeState().getSystemRestartInProgress()?1:0);
+ regFragPtr.p->p = 0;
+ regFragPtr.p->maxp = (1 << req->kValue) - 1;
+ regFragPtr.p->minloadfactor = minLoadFactor;
+ regFragPtr.p->maxloadfactor = maxLoadFactor;
+ regFragPtr.p->slack = (regFragPtr.p->maxp + 1) * maxLoadFactor;
+ regFragPtr.p->lhfragbits = lhFragBits;
+ regFragPtr.p->lhdirbits = 0;
+ regFragPtr.p->hashcheckbit = 0; //lhFragBits;
+ regFragPtr.p->localkeylen = req->localKeyLen;
+ regFragPtr.p->nodetype = (req->reqInfo >> 4) & 0x3;
+ regFragPtr.p->lastOverIndex = 0;
+ regFragPtr.p->dirsize = 1;
+ regFragPtr.p->loadingFlag = ZFALSE;
+ regFragPtr.p->keyLength = req->keyLength;
+ ndbrequire(req->keyLength != 0);
+ regFragPtr.p->elementLength = ZELEM_HEAD_SIZE + regFragPtr.p->localkeylen;
+ Uint32 Tmp1 = (regFragPtr.p->maxp + 1) + regFragPtr.p->p;
+ Uint32 Tmp2 = regFragPtr.p->maxloadfactor - regFragPtr.p->minloadfactor;
+ Tmp2 = Tmp1 * Tmp2;
+ regFragPtr.p->slackCheck = Tmp2;
+}//Dbacc::initFragAdd()
+
+void Dbacc::initFragGeneral(FragmentrecPtr regFragPtr)
+{
+ regFragPtr.p->directory = RNIL;
+ regFragPtr.p->overflowdir = RNIL;
+ regFragPtr.p->fsConnPtr = RNIL;
+ regFragPtr.p->firstOverflowRec = RNIL;
+ regFragPtr.p->lastOverflowRec = RNIL;
+ regFragPtr.p->firstWaitInQueOp = RNIL;
+ regFragPtr.p->lastWaitInQueOp = RNIL;
+ regFragPtr.p->sentWaitInQueOp = RNIL;
+ regFragPtr.p->lockOwnersList = RNIL;
+ regFragPtr.p->firstFreeDirindexRec = RNIL;
+ regFragPtr.p->zeroPagePtr = RNIL;
+
+ regFragPtr.p->activeDataPage = 0;
+ regFragPtr.p->createLcp = ZFALSE;
+ regFragPtr.p->stopQueOp = ZFALSE;
+ regFragPtr.p->hasCharAttr = ZFALSE;
+ regFragPtr.p->nextAllocPage = 0;
+ regFragPtr.p->nrWaitWriteUndoExit = 0;
+ regFragPtr.p->lastUndoIsStored = ZFALSE;
+ regFragPtr.p->loadingFlag = ZFALSE;
+ regFragPtr.p->fragState = FREEFRAG;
+ for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) {
+ regFragPtr.p->datapages[i] = RNIL;
+ }//for
+ for (Uint32 j = 0; j < 4; j++) {
+ regFragPtr.p->longKeyPageArray[j] = RNIL;
+ }//for
+}//Dbacc::initFragGeneral()
+
+void Dbacc::initFragSr(FragmentrecPtr regFragPtr, Page8Ptr regPagePtr)
+{
+ regFragPtr.p->prevUndoposition = regPagePtr.p->word32[ZPAGEZERO_PREV_UNDOP];
+ regFragPtr.p->noOfStoredOverPages = regPagePtr.p->word32[ZPAGEZERO_NO_OVER_PAGE];
+ regFragPtr.p->noStoredPages = regPagePtr.p->word32[ZPAGEZERO_NO_PAGES];
+ regFragPtr.p->dirsize = regPagePtr.p->word32[ZPAGEZERO_DIRSIZE];
+ regFragPtr.p->expandCounter = regPagePtr.p->word32[ZPAGEZERO_EXPCOUNTER];
+ regFragPtr.p->slack = regPagePtr.p->word32[ZPAGEZERO_SLACK];
+ regFragPtr.p->hashcheckbit = regPagePtr.p->word32[ZPAGEZERO_HASHCHECKBIT];
+ regFragPtr.p->k = regPagePtr.p->word32[ZPAGEZERO_K];
+ regFragPtr.p->lhfragbits = regPagePtr.p->word32[ZPAGEZERO_LHFRAGBITS];
+ regFragPtr.p->lhdirbits = regPagePtr.p->word32[ZPAGEZERO_LHDIRBITS];
+ regFragPtr.p->localkeylen = regPagePtr.p->word32[ZPAGEZERO_LOCALKEYLEN];
+ regFragPtr.p->maxp = regPagePtr.p->word32[ZPAGEZERO_MAXP];
+ regFragPtr.p->maxloadfactor = regPagePtr.p->word32[ZPAGEZERO_MAXLOADFACTOR];
+ regFragPtr.p->minloadfactor = regPagePtr.p->word32[ZPAGEZERO_MINLOADFACTOR];
+ regFragPtr.p->myfid = regPagePtr.p->word32[ZPAGEZERO_MYFID];
+ regFragPtr.p->lastOverIndex = regPagePtr.p->word32[ZPAGEZERO_LAST_OVER_INDEX];
+ regFragPtr.p->nodetype = regPagePtr.p->word32[ZPAGEZERO_NODETYPE];
+ regFragPtr.p->p = regPagePtr.p->word32[ZPAGEZERO_P];
+ regFragPtr.p->elementLength = regPagePtr.p->word32[ZPAGEZERO_ELEMENT_LENGTH];
+ regFragPtr.p->keyLength = regPagePtr.p->word32[ZPAGEZERO_KEY_LENGTH];
+ regFragPtr.p->slackCheck = regPagePtr.p->word32[ZPAGEZERO_SLACK_CHECK];
+
+ regFragPtr.p->loadingFlag = ZTRUE;
+
+}//Dbacc::initFragSr()
+
+void Dbacc::initFragPageZero(FragmentrecPtr regFragPtr, Page8Ptr regPagePtr)
+{
+ //------------------------------------------------------------------
+ // PREV_UNDOP, NEXT_UNDO_FILE, NO_OVER_PAGE, NO_PAGES
+ // is set at end of copy phase
+ //------------------------------------------------------------------
+ regPagePtr.p->word32[ZPAGEZERO_DIRSIZE] = regFragPtr.p->dirsize;
+ regPagePtr.p->word32[ZPAGEZERO_EXPCOUNTER] = regFragPtr.p->expandCounter;
+ regPagePtr.p->word32[ZPAGEZERO_SLACK] = regFragPtr.p->slack;
+ regPagePtr.p->word32[ZPAGEZERO_HASHCHECKBIT] = regFragPtr.p->hashcheckbit;
+ regPagePtr.p->word32[ZPAGEZERO_K] = regFragPtr.p->k;
+ regPagePtr.p->word32[ZPAGEZERO_LHFRAGBITS] = regFragPtr.p->lhfragbits;
+ regPagePtr.p->word32[ZPAGEZERO_LHDIRBITS] = regFragPtr.p->lhdirbits;
+ regPagePtr.p->word32[ZPAGEZERO_LOCALKEYLEN] = regFragPtr.p->localkeylen;
+ regPagePtr.p->word32[ZPAGEZERO_MAXP] = regFragPtr.p->maxp;
+ regPagePtr.p->word32[ZPAGEZERO_MAXLOADFACTOR] = regFragPtr.p->maxloadfactor;
+ regPagePtr.p->word32[ZPAGEZERO_MINLOADFACTOR] = regFragPtr.p->minloadfactor;
+ regPagePtr.p->word32[ZPAGEZERO_MYFID] = regFragPtr.p->myfid;
+ regPagePtr.p->word32[ZPAGEZERO_LAST_OVER_INDEX] = regFragPtr.p->lastOverIndex;
+ regPagePtr.p->word32[ZPAGEZERO_NODETYPE] = regFragPtr.p->nodetype;
+ regPagePtr.p->word32[ZPAGEZERO_P] = regFragPtr.p->p;
+ regPagePtr.p->word32[ZPAGEZERO_ELEMENT_LENGTH] = regFragPtr.p->elementLength;
+ regPagePtr.p->word32[ZPAGEZERO_KEY_LENGTH] = regFragPtr.p->keyLength;
+ regPagePtr.p->word32[ZPAGEZERO_SLACK_CHECK] = regFragPtr.p->slackCheck;
+}//Dbacc::initFragPageZero()
+
+void Dbacc::initRootFragPageZero(RootfragmentrecPtr rootPtr, Page8Ptr regPagePtr)
+{
+ regPagePtr.p->word32[ZPAGEZERO_TABID] = rootPtr.p->mytabptr;
+ regPagePtr.p->word32[ZPAGEZERO_FRAGID0] = rootPtr.p->fragmentid[0];
+ regPagePtr.p->word32[ZPAGEZERO_FRAGID1] = rootPtr.p->fragmentid[1];
+ regPagePtr.p->word32[ZPAGEZERO_HASH_CHECK] = rootPtr.p->roothashcheck;
+ regPagePtr.p->word32[ZPAGEZERO_NO_OF_ELEMENTS] = rootPtr.p->noOfElements;
+}//Dbacc::initRootFragPageZero()
+
+void Dbacc::initRootFragSr(RootfragmentrecPtr rootPtr, Page8Ptr regPagePtr)
+{
+ rootPtr.p->roothashcheck = regPagePtr.p->word32[ZPAGEZERO_HASH_CHECK];
+ rootPtr.p->noOfElements = regPagePtr.p->word32[ZPAGEZERO_NO_OF_ELEMENTS];
+}//Dbacc::initRootFragSr()
+
+/* ******************--------------------------------------------------------------- */
+/* ACC_SRREQ SYSTEM RESTART OF A LOCAL CHECK POINT */
+/* SENDER: LQH, LEVEL B */
+/* ENTER ACC_SRREQ WITH */
+/* LCP_CONNECTPTR, OPERATION RECORD PTR */
+/* TMP2, LQH'S LOCAL FRAG CHECK VALUE */
+/* TFID, LOCAL FRAG ID */
+/* TMP1, LOCAL CHECKPOINT ID */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACC_SRREQ PERFORM A LOCAL CHECK POINT */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execACC_SRREQ(Signal* signal)
+{
+ Page8Ptr asrPageidptr;
+ jamEntry();
+ lcpConnectptr.i = signal->theData[0];
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ Uint32 lqhPtr = signal->theData[1];
+ Uint32 fragId = signal->theData[2];
+ Uint32 lcpId = signal->theData[3];
+ tresult = 0;
+ ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
+ rootfragrecptr.i = lcpConnectptr.p->rootrecptr;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->fragmentid[0] == fragId) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ } else {
+ ndbrequire(rootfragrecptr.p->fragmentid[1] == fragId);
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ }//if
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ fragrecptr.p->lcpLqhPtr = lqhPtr;
+ fragrecptr.p->localCheckpId = lcpId;
+ asrPageidptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(asrPageidptr, cpagesize, page8);
+ ndbrequire(asrPageidptr.p->word32[ZPAGEZERO_TABID] == rootfragrecptr.p->mytabptr);
+ ndbrequire(asrPageidptr.p->word32[ZPAGEZERO_FRAGID0] == rootfragrecptr.p->fragmentid[0]);
+ ndbrequire(asrPageidptr.p->word32[ZPAGEZERO_FRAGID1] == rootfragrecptr.p->fragmentid[1]);
+ initRootFragSr(rootfragrecptr, asrPageidptr);
+ initFragSr(fragrecptr, asrPageidptr);
+ for (Uint32 i = 0; i < ZMAX_UNDO_VERSION; i++) {
+ jam();
+ if (csrVersList[i] != RNIL) {
+ jam();
+ srVersionPtr.i = csrVersList[i];
+ ptrCheckGuard(srVersionPtr, csrVersionRecSize, srVersionRec);
+ if (fragrecptr.p->localCheckpId == srVersionPtr.p->checkPointId) {
+ jam();
+ ndbrequire(srVersionPtr.p->checkPointId == asrPageidptr.p->word32[ZPAGEZERO_NEXT_UNDO_FILE]);
+ /*--------------------------------------------------------------------------------*/
+ /* SINCE -1 IS THE END OF LOG CODE WE MUST TREAT THIS CODE WITH CARE. WHEN */
+ /* COMPARING IT IS LARGER THAN EVERYTHING ELSE BUT SHOULD BE TREATED AS THE */
+ /* SMALLEST POSSIBLE VALUE, MEANING EMPTY. */
+ /*--------------------------------------------------------------------------------*/
+ if (fragrecptr.p->prevUndoposition != cminusOne) {
+ if (srVersionPtr.p->prevAddress < fragrecptr.p->prevUndoposition) {
+ jam();
+ srVersionPtr.p->prevAddress = fragrecptr.p->prevUndoposition;
+ } else if (srVersionPtr.p->prevAddress == cminusOne) {
+ jam();
+ srVersionPtr.p->prevAddress = fragrecptr.p->prevUndoposition;
+ }//if
+ }//if
+ srAllocPage0011Lab(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ seizeSrVerRec(signal);
+ srVersionPtr.p->checkPointId = fragrecptr.p->localCheckpId;
+ srVersionPtr.p->prevAddress = fragrecptr.p->prevUndoposition;
+ csrVersList[i] = srVersionPtr.i;
+ srAllocPage0011Lab(signal);
+ return;
+ }//if
+ }//for
+ ndbrequire(false);
+}//Dbacc::execACC_SRREQ()
+
+void
+Dbacc::releaseLogicalPage(Fragmentrec * fragP, Uint32 logicalPageId){
+ Ptr<struct DirRange> dirRangePtr;
+ dirRangePtr.i = fragP->directory;
+ ptrCheckGuard(dirRangePtr, cdirrangesize, dirRange);
+
+ const Uint32 lp1 = logicalPageId >> 8;
+ const Uint32 lp2 = logicalPageId & 0xFF;
+ ndbrequire(lp1 < 256);
+
+ Ptr<struct Directoryarray> dirArrPtr;
+ dirArrPtr.i = dirRangePtr.p->dirArray[lp1];
+ ptrCheckGuard(dirArrPtr, cdirarraysize, directoryarray);
+
+ const Uint32 physicalPageId = dirArrPtr.p->pagep[lp2];
+
+ rpPageptr.i = physicalPageId;
+ ptrCheckGuard(rpPageptr, cpagesize, page8);
+ releasePage(0);
+
+ dirArrPtr.p->pagep[lp2] = RNIL;
+}
+
+void Dbacc::srAllocPage0011Lab(Signal* signal)
+{
+ releaseLogicalPage(fragrecptr.p, 0);
+
+#if JONAS
+ ndbrequire(cfirstfreeDirrange != RNIL);
+ seizeDirrange(signal);
+ fragrecptr.p->directory = newDirRangePtr.i;
+ ndbrequire(cfirstfreeDirrange != RNIL);
+ seizeDirrange(signal);
+ fragrecptr.p->overflowdir = newDirRangePtr.i;
+ seizeDirectory(signal);
+ ndbrequire(tresult < ZLIMIT_OF_ERROR);
+ newDirRangePtr.p->dirArray[0] = sdDirptr.i;
+#endif
+
+ fragrecptr.p->nextAllocPage = 0;
+ fragrecptr.p->fragState = SR_READ_PAGES;
+ srReadPagesLab(signal);
+ return;
+}//Dbacc::srAllocPage0011Lab()
+
+void Dbacc::srReadPagesLab(Signal* signal)
+{
+ if (fragrecptr.p->nextAllocPage >= fragrecptr.p->noStoredPages) {
+ /*--------------------------------------------------------------------------------*/
+ /* WE HAVE NOW READ ALL NORMAL PAGES FROM THE FILE. */
+ /*--------------------------------------------------------------------------------*/
+ if (fragrecptr.p->nextAllocPage == fragrecptr.p->dirsize) {
+ jam();
+ /*--------------------------------------------------------------------------------*/
+ /* WE HAVE NOW READ ALL NORMAL PAGES AND ALLOCATED ALL THE NEEDED PAGES. */
+ /*--------------------------------------------------------------------------------*/
+ fragrecptr.p->nextAllocPage = 0; /* THE NEXT OVER FLOW PAGE WHICH WILL BE READ */
+ fragrecptr.p->fragState = SR_READ_OVER_PAGES;
+ srReadOverPagesLab(signal);
+ } else {
+ ndbrequire(fragrecptr.p->nextAllocPage < fragrecptr.p->dirsize);
+ jam();
+ /*--------------------------------------------------------------------------------*/
+ /* WE NEEDED TO ALLOCATE PAGES THAT WERE DEALLOCATED DURING THE LOCAL */
+ /* CHECKPOINT. */
+ /* ALLOCATE THE PAGE AND INITIALISE IT. THEN WE INSERT A REAL-TIME BREAK. */
+ /*--------------------------------------------------------------------------------*/
+ seizePage(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ tipPageId = fragrecptr.p->nextAllocPage;
+ inpPageptr.i = spPageptr.i;
+ ptrCheckGuard(inpPageptr, cpagesize, page8);
+ initPage(signal);
+ fragrecptr.p->noOfExpectedPages = 1;
+ fragrecptr.p->datapages[0] = spPageptr.i;
+ signal->theData[0] = ZSR_READ_PAGES_ALLOC;
+ signal->theData[1] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+ return;
+ }//if
+ Uint32 limitLoop;
+ if ((fragrecptr.p->noStoredPages - fragrecptr.p->nextAllocPage) < ZWRITEPAGESIZE) {
+ jam();
+ limitLoop = fragrecptr.p->noStoredPages - fragrecptr.p->nextAllocPage;
+ } else {
+ jam();
+ limitLoop = ZWRITEPAGESIZE;
+ }//if
+ ndbrequire(limitLoop <= 8);
+ for (Uint32 i = 0; i < limitLoop; i++) {
+ jam();
+ seizePage(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ fragrecptr.p->datapages[i] = spPageptr.i;
+ signal->theData[i + 6] = spPageptr.i;
+ }//for
+ signal->theData[limitLoop + 6] = fragrecptr.p->activeDataFilePage;
+ fragrecptr.p->noOfExpectedPages = limitLoop;
+ /* -----------------SEND READ PAGES SIGNAL TO THE FILE MANAGER --------- */
+ fsConnectptr.i = fragrecptr.p->fsConnPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ fsConnectptr.p->fsState = WAIT_READ_DATA;
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = 2;
+ /* FLAG = LIST MEM PAGES, RANGE OF FILE PAGES */
+ signal->theData[4] = ZPAGE8_BASE_ADD;
+ signal->theData[5] = fragrecptr.p->noOfExpectedPages;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 15, JBA);
+ return;
+}//Dbacc::srReadPagesLab()
+
+void Dbacc::storeDataPageInDirectoryLab(Signal* signal)
+{
+ fragrecptr.p->activeDataFilePage += fragrecptr.p->noOfExpectedPages;
+ srReadPagesAllocLab(signal);
+ return;
+}//Dbacc::storeDataPageInDirectoryLab()
+
+void Dbacc::srReadPagesAllocLab(Signal* signal)
+{
+ DirRangePtr srpDirRangePtr;
+ DirectoryarrayPtr srpDirptr;
+ DirectoryarrayPtr srpOverflowDirptr;
+ Page8Ptr srpPageidptr;
+
+ if (fragrecptr.p->fragState == SR_READ_PAGES) {
+ jam();
+ for (Uint32 i = 0; i < fragrecptr.p->noOfExpectedPages; i++) {
+ jam();
+ tmpP = fragrecptr.p->nextAllocPage;
+ srpDirRangePtr.i = fragrecptr.p->directory;
+ tmpP2 = tmpP >> 8;
+ tmp = tmpP & 0xff;
+ ptrCheckGuard(srpDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(tmpP2, 256);
+ if (srpDirRangePtr.p->dirArray[tmpP2] == RNIL) {
+ seizeDirectory(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ srpDirptr.i = sdDirptr.i;
+ srpDirRangePtr.p->dirArray[tmpP2] = srpDirptr.i;
+ } else {
+ jam();
+ srpDirptr.i = srpDirRangePtr.p->dirArray[tmpP2];
+ }//if
+ ptrCheckGuard(srpDirptr, cdirarraysize, directoryarray);
+ arrGuard(i, 8);
+ srpDirptr.p->pagep[tmp] = fragrecptr.p->datapages[i];
+ srpPageidptr.i = fragrecptr.p->datapages[i];
+ ptrCheckGuard(srpPageidptr, cpagesize, page8);
+ ndbrequire(srpPageidptr.p->word32[ZPOS_PAGE_ID] == fragrecptr.p->nextAllocPage);
+ ndbrequire(((srpPageidptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == 0);
+ ccoPageptr.p = srpPageidptr.p;
+ checksumControl(signal, (Uint32)1);
+ if (tresult > 0) {
+ jam();
+ return; // We will crash through a DEBUG_SIG
+ }//if
+ dbgWord32(srpPageidptr, ZPOS_OVERFLOWREC, RNIL);
+ srpPageidptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
+ fragrecptr.p->datapages[i] = RNIL;
+ fragrecptr.p->nextAllocPage++;
+ }//for
+ srReadPagesLab(signal);
+ return;
+ } else {
+ ndbrequire(fragrecptr.p->fragState == SR_READ_OVER_PAGES);
+ for (Uint32 i = 0; i < fragrecptr.p->noOfExpectedPages; i++) {
+ jam();
+ arrGuard(i, 8);
+ srpPageidptr.i = fragrecptr.p->datapages[i];
+ ptrCheckGuard(srpPageidptr, cpagesize, page8);
+ tmpP = srpPageidptr.p->word32[ZPOS_PAGE_ID]; /* DIR INDEX OF THE OVERFLOW PAGE */
+ /*--------------------------------------------------------------------------------*/
+ /* IT IS POSSIBLE THAT WE HAVE LOGICAL PAGES WHICH ARE NOT PART OF THE LOCAL*/
+ /* CHECKPOINT. THUS WE USE THE LOGICAL PAGE ID FROM THE PAGE HERE. */
+ /*--------------------------------------------------------------------------------*/
+ srpDirRangePtr.i = fragrecptr.p->overflowdir;
+ tmpP2 = tmpP >> 8;
+ tmpP = tmpP & 0xff;
+ ptrCheckGuard(srpDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(tmpP2, 256);
+ if (srpDirRangePtr.p->dirArray[tmpP2] == RNIL) {
+ jam();
+ seizeDirectory(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ srpDirRangePtr.p->dirArray[tmpP2] = sdDirptr.i;
+ }//if
+ srpOverflowDirptr.i = srpDirRangePtr.p->dirArray[tmpP2];
+ ndbrequire(((srpPageidptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) != 0);
+ ndbrequire(((srpPageidptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) != 3);
+ ptrCheckGuard(srpOverflowDirptr, cdirarraysize, directoryarray);
+ ndbrequire(srpOverflowDirptr.p->pagep[tmpP] == RNIL);
+ srpOverflowDirptr.p->pagep[tmpP] = srpPageidptr.i;
+ ccoPageptr.p = srpPageidptr.p;
+ checksumControl(signal, (Uint32)1);
+ ndbrequire(tresult == 0);
+ dbgWord32(srpPageidptr, ZPOS_OVERFLOWREC, RNIL);
+ srpPageidptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
+ fragrecptr.p->nextAllocPage++;
+ }//for
+ srReadOverPagesLab(signal);
+ return;
+ }//if
+}//Dbacc::srReadPagesAllocLab()
+
+void Dbacc::srReadOverPagesLab(Signal* signal)
+{
+ if (fragrecptr.p->nextAllocPage >= fragrecptr.p->noOfStoredOverPages) {
+ fragrecptr.p->nextAllocPage = 0;
+ if (fragrecptr.p->prevUndoposition == cminusOne) {
+ jam();
+ /* ************************ */
+ /* ACC_OVER_REC */
+ /* ************************ */
+ /*--------------------------------------------------------------------------------*/
+ /* UPDATE FREE LIST OF OVERFLOW PAGES AS PART OF SYSTEM RESTART AFTER */
+ /* READING PAGES AND EXECUTING THE UNDO LOG. */
+ /*--------------------------------------------------------------------------------*/
+ signal->theData[0] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_ACC_OVER_REC, signal, 1, JBB);
+ } else {
+ jam();
+ srCloseDataFileLab(signal);
+ }//if
+ return;
+ }//if
+ Uint32 limitLoop;
+ if ((fragrecptr.p->noOfStoredOverPages - fragrecptr.p->nextAllocPage) < ZWRITEPAGESIZE) {
+ jam();
+ limitLoop = fragrecptr.p->noOfStoredOverPages - fragrecptr.p->nextAllocPage;
+ } else {
+ jam();
+ limitLoop = ZWRITEPAGESIZE;
+ }//if
+ ndbrequire(limitLoop <= 8);
+ for (Uint32 i = 0; i < limitLoop; i++) {
+ jam();
+ seizePage(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ fragrecptr.p->datapages[i] = spPageptr.i;
+ signal->theData[i + 6] = spPageptr.i;
+ }//for
+ fragrecptr.p->noOfExpectedPages = limitLoop;
+ signal->theData[limitLoop + 6] = fragrecptr.p->activeDataFilePage;
+ /* -----------------SEND READ PAGES SIGNAL TO THE FILE MANAGER --------- */
+ fsConnectptr.i = fragrecptr.p->fsConnPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ fsConnectptr.p->fsState = WAIT_READ_DATA;
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = 2;
+ signal->theData[4] = ZPAGE8_BASE_ADD;
+ signal->theData[5] = fragrecptr.p->noOfExpectedPages;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 15, JBA);
+ return;
+}//Dbacc::srReadOverPagesLab()
+
+void Dbacc::srCloseDataFileLab(Signal* signal)
+{
+ fsConnectptr.i = fragrecptr.p->fsConnPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ fsConnectptr.p->fsState = SR_CLOSE_DATA;
+ /* ************************ */
+ /* FSCLOSEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = 0;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+ return;
+}//Dbacc::srCloseDataFileLab()
+
+/* ************************ */
+/* ACC_SRCONF */
+/* ************************ */
+void Dbacc::sendaccSrconfLab(Signal* signal)
+{
+ fragrecptr.i = fsConnectptr.p->fragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ releaseFsConnRec(signal);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ fragrecptr.p->fragState = ACTIVEFRAG;
+ fragrecptr.p->fsConnPtr = RNIL;
+ for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) {
+ fragrecptr.p->datapages[i] = RNIL;
+ }//for
+ rlpPageptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(rlpPageptr, cpagesize, page8);
+ releaseLcpPage(signal);
+ fragrecptr.p->zeroPagePtr = RNIL;
+ signal->theData[0] = fragrecptr.p->lcpLqhPtr;
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_SRCONF, signal, 1, JBB);
+ lcpConnectptr.p->noOfLcpConf++;
+ if (lcpConnectptr.p->noOfLcpConf == 2) {
+ jam();
+ releaseLcpConnectRec(signal);
+ rootfragrecptr.p->lcpPtr = RNIL;
+ rootfragrecptr.p->rootState = ACTIVEROOT;
+ }//if
+ return;
+}//Dbacc::sendaccSrconfLab()
+
+/* --------------------------------------------------------------------------------- */
+/* CHECKSUM_CONTROL */
+/* INPUT: CCO_PAGEPTR */
+/* OUTPUT: TRESULT */
+/* */
+/* CHECK THAT CHECKSUM IN PAGE IS CORRECT TO ENSURE THAT NO ONE HAS CORRUPTED */
+/* THE PAGE INFORMATION. WHEN CALCULATING THE CHECKSUM WE REMOVE THE CHECKSUM */
+/* ITSELF FROM THE CHECKSUM BY XOR'ING THE CHECKSUM TWICE. WHEN CALCULATING */
+/* THE CHECKSUM THE CHECKSUM WORD IS ZERO WHICH MEANS NO CHANGE FROM XOR'ING. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::checksumControl(Signal* signal, Uint32 checkPage)
+{
+ Uint32 Tchs;
+ Uint32 tccoIndex;
+ Uint32 Ti;
+ Uint32 Tmp1;
+ Uint32 Tmp2;
+ Uint32 Tmp3;
+ Uint32 Tmp4;
+ Uint32 Tlimit;
+
+ Tchs = 0;
+ for (Ti = 0; Ti < 32 ; Ti++) {
+ Tlimit = 16 + (Ti << 6);
+ for (tccoIndex = (Ti << 6); tccoIndex < Tlimit; tccoIndex ++) {
+ Tmp1 = ccoPageptr.p->word32[tccoIndex];
+ Tmp2 = ccoPageptr.p->word32[tccoIndex + 16];
+ Tmp3 = ccoPageptr.p->word32[tccoIndex + 32];
+ Tmp4 = ccoPageptr.p->word32[tccoIndex + 48];
+
+ Tchs = Tchs ^ Tmp1;
+ Tchs = Tchs ^ Tmp2;
+ Tchs = Tchs ^ Tmp3;
+ Tchs = Tchs ^ Tmp4;
+ }//for
+ }//for
+ if (Tchs == 0) {
+ tresult = 0;
+ if (checkPage != 0) {
+ jam();
+ lcnCopyPageptr.p = ccoPageptr.p;
+ srCheckPage(signal);
+ }//if
+ } else {
+ tresult = 1;
+ }//if
+ if (tresult != 0) {
+ jam();
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ signal->theData[0] = RNIL;
+ signal->theData[1] = rootfragrecptr.p->mytabptr;
+ signal->theData[2] = fragrecptr.p->myfid;
+ signal->theData[3] = ccoPageptr.p->word32[ZPOS_PAGE_ID];
+ signal->theData[4] = tlupElemIndex;
+ signal->theData[5] = ccoPageptr.p->word32[ZPOS_PAGE_TYPE];
+ signal->theData[6] = tresult;
+ sendSignal(cownBlockref, GSN_DEBUG_SIG, signal, 7, JBA);
+ }//if
+}//Dbacc::checksumControl()
+
+/* ******************--------------------------------------------------------------- */
+/* START_RECREQ REQUEST TO START UNDO PROCESS */
+/* SENDER: LQH, LEVEL B */
+/* ENTER START_RECREQ WITH */
+/* CLQH_PTR, LQH CONNECTION PTR */
+/* CLQH_BLOCK_REF, LQH BLOCK REFERENCE */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* START_RECREQ REQUEST TO START UNDO PROCESS */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execSTART_RECREQ(Signal* signal)
+{
+ jamEntry();
+ clqhPtr = signal->theData[0]; /* LQH CONNECTION PTR */
+ clqhBlockRef = signal->theData[1]; /* LQH BLOCK REFERENCE */
+ tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
+ for (int i = 0; i < UndoHeader::ZNO_UNDORECORD_TYPES; i++)
+ cSrUndoRecords[i] = 0;
+ startUndoLab(signal);
+ return;
+}//Dbacc::execSTART_RECREQ()
+
+void Dbacc::startUndoLab(Signal* signal)
+{
+ cundoLogActive = ZTRUE;
+ /* ----- OPEN UNDO FILES --------- */
+ for (tmp = 0; tmp <= ZMAX_UNDO_VERSION - 1; tmp++) {
+ jam();
+ if (csrVersList[tmp] != RNIL) {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* SELECT THE NEXT SYSTEM RESTART RECORD WHICH CONTAINS AN UNDO LOG */
+ /* THAT NEEDS TO BE EXECUTED AND SET UP THE DATA TO EXECUTE IT. */
+ /*---------------------------------------------------------------------------*/
+ srVersionPtr.i = csrVersList[tmp];
+ csrVersList[tmp] = RNIL;
+ ptrCheckGuard(srVersionPtr, csrVersionRecSize, srVersionRec);
+ cactiveUndoFilePage = srVersionPtr.p->prevAddress >> 13;
+ cprevUndoaddress = srVersionPtr.p->prevAddress;
+ cactiveCheckpId = srVersionPtr.p->checkPointId;
+
+ releaseSrRec(signal);
+ startActiveUndo(signal);
+ return;
+ }//if
+ }//for
+
+ // Send report of how many undo log records where executed
+ signal->theData[0] = NDB_LE_UNDORecordsExecuted;
+ signal->theData[1] = DBACC; // From block
+ signal->theData[2] = 0; // Total records executed
+ for (int i = 0; i < 10; i++){
+ if (i < UndoHeader::ZNO_UNDORECORD_TYPES){
+ signal->theData[i+3] = cSrUndoRecords[i];
+ signal->theData[2] += cSrUndoRecords[i];
+ }else{
+ signal->theData[i+3] = 0;
+ }
+ }
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 12, JBB);
+
+ /* ******************************< */
+ /* START_RECCONF */
+ /* ******************************< */
+ /*---------------------------------------------------------------------------*/
+ /* REPORT COMPLETION OF UNDO LOG EXECUTION. */
+ /*---------------------------------------------------------------------------*/
+ cundoLogActive = ZFALSE;
+ signal->theData[0] = clqhPtr;
+ sendSignal(clqhBlockRef, GSN_START_RECCONF, signal, 1, JBB);
+ /* LQH CONNECTION PTR */
+ return;
+}//Dbacc::startUndoLab()
+
+/*---------------------------------------------------------------------------*/
+/* START THE UNDO OF AN UNDO LOG FILE BY OPENING THE UNDO LOG FILE. */
+/*---------------------------------------------------------------------------*/
+void Dbacc::startActiveUndo(Signal* signal)
+{
+ if (cprevUndoaddress == cminusOne) {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* THERE WAS NO UNDO LOG INFORMATION IN THIS LOG FILE. WE GET THE NEXT */
+ /* OR REPORT COMPLETION. */
+ /*---------------------------------------------------------------------------*/
+ signal->theData[0] = ZSTART_UNDO;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 1, JBB);
+ } else {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* OPEN THE LOG FILE PERTAINING TO THIS UNDO LOG. */
+ /*---------------------------------------------------------------------------*/
+ if (cfsFirstfreeconnect == RNIL) {
+ jam();
+ sendSystemerror(signal);
+ }//if
+ seizeFsConnectRec(signal);
+ cactiveSrFsPtr = fsConnectptr.i;
+ fsConnectptr.p->fsState = OPEN_UNDO_FILE_SR;
+ fsConnectptr.p->fsPart = 0;
+ tmp1 = 1; /* FILE VERSION ? */
+ tmp1 = (tmp1 << 8) + ZLOCALLOGFILE; /* .LOCLOG = 2 */
+ tmp1 = (tmp1 << 8) + 4; /* ROOT DIRECTORY = D4 */
+ tmp1 = (tmp1 << 8) + fsConnectptr.p->fsPart; /* P2 */
+ tmp2 = 0x0; /* D7 DON'T CREATE , READ ONLY */
+ /* DON'T TRUNCATE TO ZERO */
+ /* ---FILE NAME "D4"/"DBACC"/LCP_CONNECTPTR:LOCAL_CHECK_PID/FS_CONNECTPTR:FS_PART".LOCLOG-- */
+ /* ************************ */
+ /* FSOPENREQ */
+ /* ************************ */
+ signal->theData[0] = cownBlockref;
+ signal->theData[1] = fsConnectptr.i;
+ signal->theData[2] = cminusOne; /* #FFFFFFFF */
+ signal->theData[3] = cminusOne; /* #FFFFFFFF */
+ signal->theData[4] = cactiveCheckpId; /* CHECKPOINT VERSION */
+ signal->theData[5] = tmp1;
+ signal->theData[6] = tmp2;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ }//if
+}//Dbacc::startActiveUndo()
+
+/* ------- READ A GROUP OF UNDO PAGES --------------- */
+void Dbacc::srStartUndoLab(Signal* signal)
+{
+ /*---------------------------------------------------------------------------*/
+ /* ALL LOG FILES HAVE BEEN OPENED. WE CAN NOW READ DATA FROM THE LAST */
+ /* PAGE IN THE LAST LOG FILE AND BACKWARDS UNTIL WE REACH THE VERY */
+ /* FIRST UNDO LOG RECORD. */
+ /*---------------------------------------------------------------------------*/
+ if (cactiveUndoFilePage >= ZWRITE_UNDOPAGESIZE) {
+ jam();
+ tmp1 = ZWRITE_UNDOPAGESIZE; /* NO OF READ UNDO PAGES */
+ cactiveSrUndoPage = ZWRITE_UNDOPAGESIZE - 1; /* LAST PAGE */
+ } else {
+ jam();
+ tmp1 = cactiveUndoFilePage + 1; /* NO OF READ UNDO PAGES */
+ cactiveSrUndoPage = cactiveUndoFilePage;
+ }//if
+ fsConnectptr.i = cactiveSrFsPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = 0;
+ /* FLAG = LIST MEM PAGES, LIST FILE PAGES */
+ signal->theData[4] = ZUNDOPAGE_BASE_ADD;
+ signal->theData[5] = tmp1;
+ signal->theData[6] = 0;
+ signal->theData[7] = (cactiveUndoFilePage - tmp1) + 1;
+ signal->theData[8] = 1;
+ signal->theData[9] = cactiveUndoFilePage;
+
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 10, JBA);
+ if (tmp1 > cactiveUndoFilePage) {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* THIS IS THE LAST READ IN THIS LOG FILE. WE SET THE ACTIVE FILE */
+ /* POINTER. IF IT IS THE FIRST WE SHOULD NEVER ATTEMPT ANY MORE READS */
+ /* SINCE WE SHOULD ENCOUNTER A FIRST LOG RECORD WITH PREVIOUS PAGE ID */
+ /* EQUAL TO RNIL. */
+ /*---------------------------------------------------------------------------*/
+ cactiveSrFsPtr = RNIL;
+ fsConnectptr.p->fsState = READ_UNDO_PAGE_AND_CLOSE;
+ } else {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* WE STILL HAVE MORE INFORMATION IN THIS LOG FILE. WE ONLY MOVE BACK */
+ /* THE FILE PAGE. */
+ /*---------------------------------------------------------------------------*/
+ cactiveUndoFilePage = cactiveUndoFilePage - tmp1;
+ fsConnectptr.p->fsState = READ_UNDO_PAGE;
+ }//if
+ return;
+}//Dbacc::srStartUndoLab()
+
+/* ------- DO UNDO ---------------------------*/
+/* ******************--------------------------------------------------------------- */
+/* NEXTOPERATION ORD FOR EXECUTION OF NEXT OP */
+/* ******************------------------------------+ */
+/* SENDER: ACC, LEVEL B */
+void Dbacc::execNEXTOPERATION(Signal* signal)
+{
+ jamEntry();
+ tresult = 0;
+ srDoUndoLab(signal);
+ return;
+}//Dbacc::execNEXTOPERATION()
+
+void Dbacc::srDoUndoLab(Signal* signal)
+{
+ DirRangePtr souDirRangePtr;
+ DirectoryarrayPtr souDirptr;
+ Page8Ptr souPageidptr;
+ Uint32 tundoPageindex;
+ UndoHeader *undoHeaderPtr;
+ Uint32 tmpindex;
+
+ jam();
+ undopageptr.i = cactiveSrUndoPage;
+ ptrCheckGuard(undopageptr, cundopagesize, undopage);
+ /*---------------------------------------------------------------------------*/
+ /* LAYOUT OF AN UNDO LOG RECORD: */
+ /* ***************************** */
+ /* */
+ /* |----------------------------------------------------| */
+ /* | TABLE ID | */
+ /* |----------------------------------------------------| */
+ /* | ROOT FRAGMENT ID | */
+ /* |----------------------------------------------------| */
+ /* | LOCAL FRAGMENT ID | */
+ /* |----------------------------------------------------| */
+ /* | UNDO INFO LEN 14 b | TYPE 4 b | PAGE INDEX 14 b | */
+ /* |----------------------------------------------------| */
+ /* | INDEX INTO PAGE DIRECTORY (LOGICAL PAGE ID) | */
+ /* |----------------------------------------------------| */
+ /* | PREVIOUS UNDO LOG RECORD FOR THE FRAGMENT | */
+ /* |----------------------------------------------------| */
+ /* | PREVIOUS UNDO LOG RECORD FOR ALL FRAGMENTS | */
+ /* |----------------------------------------------------| */
+ /* | TYPE SPECIFIC PART | */
+ /* |----------------------------------------------------| */
+ /*---------------------------------------------------------------------------*/
+ /*---------------------------------------------------------------------------*/
+ /* SET THE PAGE POINTER. WE ONLY WORK WITH TWO PAGES IN THIS RESTART */
+ /* ACTIVITY. GET THE PAGE POINTER AND THE PAGE INDEX TO READ FROM. */
+ /*---------------------------------------------------------------------------*/
+ tundoindex = cprevUndoaddress & ZUNDOPAGEINDEX_MASK; //0x1fff, 13 bits.
+ undoHeaderPtr = (UndoHeader *) &undopageptr.p->undoword[tundoindex];
+ tundoindex = tundoindex + ZUNDOHEADSIZE;
+
+ /*------------------------------------------------------------------------*/
+ /* READ TABLE ID AND ROOT FRAGMENT ID AND USE THIS TO GET ROOT RECORD. */
+ /*------------------------------------------------------------------------*/
+ arrGuard((tundoindex + 6), 8192);
+
+ // TABLE ID
+ tabptr.i = undoHeaderPtr->tableId;
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+
+ // ROOT FRAGMENT ID
+ tfid = undoHeaderPtr->rootFragId;
+ ndbrequire((undoHeaderPtr->localFragId >> 1) == undoHeaderPtr->rootFragId);
+ if (!getrootfragmentrec(signal, rootfragrecptr, tfid)) {
+ jam();
+ /*---------------------------------------------------------------------*/
+ /* THE ROOT RECORD WAS NOT FOUND. OBVIOUSLY WE ARE NOT RESTARTING THIS */
+ /* FRAGMENT. WE THUS IGNORE THIS LOG RECORD AND PROCEED WITH THE NEXT. */
+ /*---------------------------------------------------------------------*/
+ creadyUndoaddress = cprevUndoaddress;
+ // PREVIOUS UNDO LOG RECORD FOR ALL FRAGMENTS
+ cprevUndoaddress = undoHeaderPtr->prevUndoAddress;
+ undoNext2Lab(signal);
+#ifdef VM_TRACE
+ ndbout_c("ignoring root fid %d", (int)tfid);
+#endif
+ return;
+ }//if
+ /*-----------------------------------------------------------------------*/
+ /* READ THE LOCAL FRAGMENT ID AND VERIFY THAT IT IS CORRECT. */
+ /*-----------------------------------------------------------------------*/
+ if (rootfragrecptr.p->fragmentid[0] == undoHeaderPtr->localFragId) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ } else {
+ if (rootfragrecptr.p->fragmentid[1] == undoHeaderPtr->localFragId) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ } else {
+ jam();
+ progError(__LINE__, 0, "Invalid local fragment id in undo log");
+ return;
+ }//if
+ }//if
+ /*------------------------------------------------------------------------*/
+ /* READ UNDO INFO LENGTH, TYPE OF LOG RECORD AND PAGE INDEX WHERE TO */
+ /* APPLY THIS LOG RECORD. ALSO STEP INDEX TO PREPARE READ OF LOGICAL */
+ /* PAGE ID. SET TMPINDEX TO INDEX THE FIRST WORD IN THE TYPE SPECIFIC */
+ /* PART. */
+ /*------------------------------------------------------------------------*/
+ // UNDO INFO LENGTH 14 b | TYPE 4 b | PAGE INDEX 14 b
+ const Uint32 tmp1 = undoHeaderPtr->variousInfo;
+ cundoinfolength = tmp1 >> 18;
+ const Uint32 tpageType = (tmp1 >> 14) & 0xf;
+ tundoPageindex = tmp1 & 0x3fff;
+
+ // INDEX INTO PAGE DIRECTORY (LOGICAL PAGE ID)
+ tmpP = undoHeaderPtr->logicalPageId ;
+ tmpindex = tundoindex;
+ arrGuard((tmpindex + cundoinfolength - 1), 8192);
+ if (fragrecptr.p->localCheckpId != cactiveCheckpId) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ /* THE FRAGMENT DID EXIST BUT IS NOT AFFECTED BY THIS UNDO LOG */
+ /* EXECUTION. EITHER IT BELONGS TO ANOTHER OR IT IS CREATED AND ONLY IN */
+ /* NEED OF EXECUTION OF REDO LOG RECORDS FROM LQH. */
+ /*-----------------------------------------------------------------------*/
+ creadyUndoaddress = cprevUndoaddress;
+ // PREVIOUS UNDO LOG RECORD FOR ALL FRAGMENTS
+ cprevUndoaddress = undoHeaderPtr->prevUndoAddress;
+
+ undoNext2Lab(signal);
+ return;
+ }//if
+ /*-----------------------------------------------------------------------*/
+ /* VERIFY CONSISTENCY OF UNDO LOG RECORDS. */
+ /*-----------------------------------------------------------------------*/
+ ndbrequire(fragrecptr.p->prevUndoposition == cprevUndoaddress);
+ cSrUndoRecords[tpageType]++;
+ switch(tpageType){
+
+ case UndoHeader::ZPAGE_INFO:{
+ jam();
+ /*----------------------------------------------------------------------*/
+ /* WE HAVE TO UNDO UPDATES IN A NORMAL PAGE. GET THE PAGE POINTER BY */
+ /* USING THE LOGICAL PAGE ID. THEN RESET THE OLD VALUE IN THE PAGE BY */
+ /* USING THE OLD DATA WHICH IS STORED IN THIS UNDO LOG RECORD. */
+ /*----------------------------------------------------------------------*/
+ souDirRangePtr.i = fragrecptr.p->directory;
+ tmpP2 = tmpP >> 8;
+ tmpP = tmpP & 0xff;
+ ptrCheckGuard(souDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(tmpP2, 256);
+ souDirptr.i = souDirRangePtr.p->dirArray[tmpP2];
+ ptrCheckGuard(souDirptr, cdirarraysize, directoryarray);
+ souPageidptr.i = souDirptr.p->pagep[tmpP];
+ ptrCheckGuard(souPageidptr, cpagesize, page8);
+ Uint32 loopLimit = tundoPageindex + cundoinfolength;
+ ndbrequire(loopLimit <= 2048);
+ for (Uint32 tmp = tundoPageindex; tmp < loopLimit; tmp++) {
+ dbgWord32(souPageidptr, tmp, undopageptr.p->undoword[tmpindex]);
+ souPageidptr.p->word32[tmp] = undopageptr.p->undoword[tmpindex];
+ tmpindex = tmpindex + 1;
+ }//for
+ break;
+ }
+
+ case UndoHeader::ZOVER_PAGE_INFO:{
+ jam();
+ /*----------------------------------------------------------------------*/
+ /* WE HAVE TO UNDO UPDATES IN AN OVERFLOW PAGE. GET THE PAGE POINTER BY*/
+ /* USING THE LOGICAL PAGE ID. THEN RESET THE OLD VALUE IN THE PAGE BY */
+ /* USING THE OLD DATA WHICH IS STORED IN THIS UNDO LOG RECORD. */
+ /*----------------------------------------------------------------------*/
+ souDirRangePtr.i = fragrecptr.p->overflowdir;
+ tmpP2 = tmpP >> 8;
+ tmpP = tmpP & 0xff;
+ ptrCheckGuard(souDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(tmpP2, 256);
+ souDirptr.i = souDirRangePtr.p->dirArray[tmpP2];
+ ptrCheckGuard(souDirptr, cdirarraysize, directoryarray);
+ souPageidptr.i = souDirptr.p->pagep[tmpP];
+ ptrCheckGuard(souPageidptr, cpagesize, page8);
+ Uint32 loopLimit = tundoPageindex + cundoinfolength;
+ ndbrequire(loopLimit <= 2048);
+ for (Uint32 tmp = tundoPageindex; tmp < loopLimit; tmp++) {
+ dbgWord32(souPageidptr, tmp, undopageptr.p->undoword[tmpindex]);
+ souPageidptr.p->word32[tmp] = undopageptr.p->undoword[tmpindex];
+ tmpindex = tmpindex + 1;
+ }//for
+ break;
+ }
+
+ case UndoHeader::ZOP_INFO: {
+ jam();
+ /*---------------------------------------------------------------------*/
+ /* AN OPERATION WAS ACTIVE WHEN LOCAL CHECKPOINT WAS EXECUTED. WE NEED */
+ /* TO RESET THE LOCKS IT HAS SET. IF THE OPERATION WAS AN INSERT OR */
+ /* THE ELEMENT WAS MARKED AS DISSAPEARED IT WILL ALSO BE REMOVED */
+ /* FROM THE PAGE */
+ /* */
+ /* BEGIN BY SEARCHING AFTER THE ELEMENT, WHEN FOUND UNDO THE */
+ /* CHANGES ON THE ELEMENT HEADER. IF IT WAS AN INSERT OPERATION OR */
+ /* MARKED AS DISSAPEARED PROCEED BY REMOVING THE ELEMENT. */
+ /*---------------------------------------------------------------------*/
+ seizeOpRec(signal);
+ // Initialise the opRec
+ operationRecPtr.p->transId1 = 0;
+ operationRecPtr.p->transId2 = RNIL;
+ operationRecPtr.p->transactionstate = ACTIVE;
+ operationRecPtr.p->commitDeleteCheckFlag = ZFALSE;
+ operationRecPtr.p->lockMode = 0;
+ operationRecPtr.p->dirtyRead = 0;
+ operationRecPtr.p->nodeType = 0;
+ operationRecPtr.p->fid = fragrecptr.p->myfid;
+ operationRecPtr.p->nextParallelQue = RNIL;
+ operationRecPtr.p->prevParallelQue = RNIL;
+ operationRecPtr.p->nextQueOp = RNIL;
+ operationRecPtr.p->prevQueOp = RNIL;
+ operationRecPtr.p->nextSerialQue = RNIL;
+ operationRecPtr.p->prevSerialQue = RNIL;
+ operationRecPtr.p->elementPage = RNIL;
+ operationRecPtr.p->keyinfoPage = RNIL;
+ operationRecPtr.p->insertIsDone = ZFALSE;
+ operationRecPtr.p->lockOwner = ZFALSE;
+ operationRecPtr.p->elementIsDisappeared = ZFALSE;
+ operationRecPtr.p->insertDeleteLen = fragrecptr.p->elementLength;
+ operationRecPtr.p->longPagePtr = RNIL;
+ operationRecPtr.p->longKeyPageIndex = RNIL;
+ operationRecPtr.p->scanRecPtr = RNIL;
+ operationRecPtr.p->isAccLockReq = ZFALSE;
+ operationRecPtr.p->isUndoLogReq = ZTRUE;
+
+ // Read operation values from undo page
+ operationRecPtr.p->operation = undopageptr.p->undoword[tmpindex];
+ tmpindex++;
+ operationRecPtr.p->hashValue = undopageptr.p->undoword[tmpindex];
+ tmpindex++;
+ const Uint32 tkeylen = undopageptr.p->undoword[tmpindex];
+ tmpindex++;
+ operationRecPtr.p->tupkeylen = tkeylen;
+ operationRecPtr.p->xfrmtupkeylen = 0; // not used
+ operationRecPtr.p->fragptr = fragrecptr.i;
+
+ ndbrequire(fragrecptr.p->keyLength != 0 &&
+ fragrecptr.p->keyLength == tkeylen);
+
+ // Read localkey1 from undo page
+ signal->theData[7 + 0] = undopageptr.p->undoword[tmpindex];
+ tmpindex = tmpindex + 1;
+ arrGuard((tmpindex - 1), 8192);
+ getElement(signal);
+ if (tgeResult != ZTRUE) {
+ jam();
+ signal->theData[0] = RNIL;
+ signal->theData[1] = tabptr.i;
+ signal->theData[2] = cactiveCheckpId;
+ signal->theData[3] = cprevUndoaddress;
+ signal->theData[4] = operationRecPtr.p->operation;
+ signal->theData[5] = operationRecPtr.p->hashValue;
+ signal->theData[6] = operationRecPtr.p->tupkeylen;
+ sendSignal(cownBlockref, GSN_DEBUG_SIG, signal, 11, JBA);
+ return;
+ }//if
+
+ operationRecPtr.p->elementPage = gePageptr.i;
+ operationRecPtr.p->elementContainer = tgeContainerptr;
+ operationRecPtr.p->elementPointer = tgeElementptr;
+ operationRecPtr.p->elementIsforward = tgeForward;
+
+ commitdelete(signal, true);
+ releaseOpRec(signal);
+ break;
+ }
+
+ default:
+ jam();
+ progError(__LINE__, 0, "Invalid pagetype in undo log");
+ break;
+
+ }//switch(tpageType)
+
+ /*----------------------------------------------------------------------*/
+ /* READ THE PAGE ID AND THE PAGE INDEX OF THE PREVIOUS UNDO LOG RECORD */
+ /* FOR THIS FRAGMENT. */
+ /*----------------------------------------------------------------------*/
+ fragrecptr.p->prevUndoposition = undoHeaderPtr->prevUndoAddressForThisFrag;
+ /*----------------------------------------------------------------------*/
+ /* READ THE PAGE ID AND THE PAGE INDEX OF THE PREVIOUS UNDO LOG RECORD */
+ /* FOR THIS UNDO LOG. */
+ /*----------------------------------------------------------------------*/
+ creadyUndoaddress = cprevUndoaddress;
+ cprevUndoaddress = undoHeaderPtr->prevUndoAddress;
+
+ if (fragrecptr.p->prevUndoposition == cminusOne) {
+ jam();
+ /*---------------------------------------------------------------------*/
+ /* WE HAVE NOW EXECUTED ALL UNDO LOG RECORDS FOR THIS FRAGMENT. WE */
+ /* NOW NEED TO UPDATE THE FREE LIST OF OVERFLOW PAGES. */
+ /*---------------------------------------------------------------------*/
+ ndbrequire(fragrecptr.p->nextAllocPage == 0);
+
+ signal->theData[0] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_ACC_OVER_REC, signal, 1, JBB);
+ return;
+ }//if
+ undoNext2Lab(signal);
+ return;
+}//Dbacc::srDoUndoLab()
+
+void Dbacc::undoNext2Lab(Signal* signal)
+{
+ /*---------------------------------------------------------------------------*/
+ /* EXECUTE NEXT UNDO LOG RECORD. */
+ /*---------------------------------------------------------------------------*/
+ if (cprevUndoaddress == cminusOne) {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* WE HAVE EXECUTED THIS UNDO LOG TO COMPLETION. IT IS NOW TIME TO TAKE*/
+ /* OF THE NEXT UNDO LOG OR REPORT COMPLETION OF UNDO LOG EXECUTION. */
+ /*---------------------------------------------------------------------------*/
+ signal->theData[0] = ZSTART_UNDO;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 1, JBB);
+ return;
+ }//if
+ if ((creadyUndoaddress >> 13) != (cprevUndoaddress >> 13)) {
+ /*---------------------------------------------------------------------------*/
+ /* WE ARE CHANGING PAGE. */
+ /*---------------------------------------------------------------------------*/
+ if (cactiveSrUndoPage == 0) {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* WE HAVE READ AND EXECUTED ALL UNDO LOG INFORMATION IN THE CURRENTLY */
+ /* READ PAGES. WE STILL HAVE MORE INFORMATION TO READ FROM FILE SINCE */
+ /* WE HAVEN'T FOUND THE FIRST LOG RECORD IN THE LOG FILE YET. */
+ /*---------------------------------------------------------------------------*/
+ srStartUndoLab(signal);
+ return;
+ } else {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* WE HAVE ANOTHER PAGE READ THAT WE NEED TO EXECUTE. */
+ /*---------------------------------------------------------------------------*/
+ cactiveSrUndoPage = cactiveSrUndoPage - 1;
+ }//if
+ }//if
+ /*---------------------------------------------------------------------------*/
+ /* REAL-TIME BREAK */
+ /*---------------------------------------------------------------------------*/
+ /* ******************************< */
+ /* NEXTOPERATION */
+ /* ******************************< */
+ sendSignal(cownBlockref, GSN_NEXTOPERATION, signal, 1, JBB);
+ return;
+}//Dbacc::undoNext2Lab()
+
+/*-----------------------------------------------------------------------------------*/
+/* AFTER COMPLETING THE READING OF DATA PAGES FROM DISK AND EXECUTING THE UNDO */
+/* LOG WE ARE READY TO UPDATE THE FREE LIST OF OVERFLOW PAGES. THIS LIST MUST */
+/* BE BUILT AGAIN SINCE IT IS NOT CHECKPOINTED. WHEN THE PAGES ARE ALLOCATED */
+/* THEY ARE NOT PART OF ANY LIST. PAGES CAN EITHER BE PUT IN FREE LIST, NOT */
+/* IN FREE LIST OR BE PUT INTO LIST OF LONG KEY PAGES. */
+/*-----------------------------------------------------------------------------------*/
+void Dbacc::execACC_OVER_REC(Signal* signal)
+{
+ DirRangePtr pnoDirRangePtr;
+ DirectoryarrayPtr pnoOverflowDirptr;
+ Page8Ptr pnoPageidptr;
+ Uint32 tpnoPageType;
+ Uint32 toverPageCheck;
+
+ jamEntry();
+ fragrecptr.i = signal->theData[0];
+ toverPageCheck = 0;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ ndbrequire((fragrecptr.p->nextAllocPage != 0) ||
+ (fragrecptr.p->firstOverflowRec == RNIL));
+ /*-----------------------------------------------------------------------------------*/
+ /* WHO HAS PUT SOMETHING INTO THE LIST BEFORE WE EVEN STARTED PUTTING THINGS */
+ /* THERE. */
+ /*-----------------------------------------------------------------------------------*/
+ ndbrequire(fragrecptr.p->loadingFlag == ZTRUE);
+ /*---------------------------------------------------------------------------*/
+ /* LOADING HAS STOPPED BEFORE WE HAVE LOADED, SYSTEM ERROR. */
+ /*---------------------------------------------------------------------------*/
+ while (toverPageCheck < ZNO_OF_OP_PER_SIGNAL) {
+ jam();
+ if (fragrecptr.p->nextAllocPage >= fragrecptr.p->lastOverIndex) {
+ jam();
+ fragrecptr.p->loadingFlag = ZFALSE;
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->lcpPtr != RNIL) {
+ jam();
+ srCloseDataFileLab(signal);
+ } else {
+ jam();
+ undoNext2Lab(signal);
+ }//if
+ return;
+ }//if
+ tmpP = fragrecptr.p->nextAllocPage;
+ pnoDirRangePtr.i = fragrecptr.p->overflowdir;
+ tmpP2 = tmpP >> 8;
+ tmpP = tmpP & 0xff;
+ arrGuard(tmpP2, 256);
+ ptrCheckGuard(pnoDirRangePtr, cdirrangesize, dirRange);
+ if (pnoDirRangePtr.p->dirArray[tmpP2] == RNIL) {
+ jam();
+ pnoPageidptr.i = RNIL;
+ } else {
+ pnoOverflowDirptr.i = pnoDirRangePtr.p->dirArray[tmpP2];
+ if (pnoOverflowDirptr.i == RNIL) {
+ jam();
+ pnoPageidptr.i = RNIL;
+ } else {
+ jam();
+ ptrCheckGuard(pnoOverflowDirptr, cdirarraysize, directoryarray);
+ pnoPageidptr.i = pnoOverflowDirptr.p->pagep[tmpP];
+ }//if
+ }//if
+ if (pnoPageidptr.i == RNIL) {
+ jam();
+ seizeOverRec(signal);
+ sorOverflowRecPtr.p->dirindex = fragrecptr.p->nextAllocPage;
+ sorOverflowRecPtr.p->overpage = RNIL;
+ priOverflowRecPtr = sorOverflowRecPtr;
+ putRecInFreeOverdir(signal);
+ } else {
+ ptrCheckGuard(pnoPageidptr, cpagesize, page8);
+ tpnoPageType = pnoPageidptr.p->word32[ZPOS_PAGE_TYPE];
+ tpnoPageType = (tpnoPageType >> ZPOS_PAGE_TYPE_BIT) & 3;
+ if (pnoPageidptr.p->word32[ZPOS_ALLOC_CONTAINERS] > ZFREE_LIMIT) {
+ jam();
+ dbgWord32(pnoPageidptr, ZPOS_OVERFLOWREC, RNIL);
+ pnoPageidptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
+ ndbrequire(pnoPageidptr.p->word32[ZPOS_PAGE_ID] == fragrecptr.p->nextAllocPage);
+ } else {
+ jam();
+ seizeOverRec(signal);
+ sorOverflowRecPtr.p->dirindex = pnoPageidptr.p->word32[ZPOS_PAGE_ID];
+ ndbrequire(sorOverflowRecPtr.p->dirindex == fragrecptr.p->nextAllocPage);
+ dbgWord32(pnoPageidptr, ZPOS_OVERFLOWREC, sorOverflowRecPtr.i);
+ pnoPageidptr.p->word32[ZPOS_OVERFLOWREC] = sorOverflowRecPtr.i;
+ sorOverflowRecPtr.p->overpage = pnoPageidptr.i;
+ porOverflowRecPtr = sorOverflowRecPtr;
+ putOverflowRecInFrag(signal);
+ if (pnoPageidptr.p->word32[ZPOS_ALLOC_CONTAINERS] == 0) {
+ jam();
+ ropPageptr = pnoPageidptr;
+ releaseOverpage(signal);
+ }//if
+ }//if
+ }//if
+ fragrecptr.p->nextAllocPage++;
+ toverPageCheck++;
+ }//while
+ signal->theData[0] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_ACC_OVER_REC, signal, 1, JBB);
+}//Dbacc::execACC_OVER_REC()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF SYSTEM RESTART MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* SCAN MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACC_SCANREQ START OF A SCAN PROCESS */
+/* SENDER: LQH, LEVEL B */
+/* ENTER ACC_SCANREQ WITH */
+/* TUSERPTR, LQH SCAN_CONNECT POINTER */
+/* TUSERBLOCKREF, LQH BLOCK REFERENCE */
+/* TABPTR, TABLE IDENTITY AND PTR */
+/* TFID ROOT FRAGMENT IDENTITY */
+/* TSCAN_FLAG , = ZCOPY, ZSCAN, ZSCAN_LOCK_ALL */
+/* ZREADLOCK, ZWRITELOCK */
+/* TSCAN_TRID1 , TRANSACTION ID PART 1 */
+/* TSCAN_TRID2 TRANSACTION ID PART 2 */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACC_SCANREQ START OF A SCAN PROCESS */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execACC_SCANREQ(Signal* signal)
+{
+ jamEntry();
+ AccScanReq * req = (AccScanReq*)&signal->theData[0];
+ tuserptr = req->senderData;
+ tuserblockref = req->senderRef;
+ tabptr.i = req->tableId;
+ tfid = req->fragmentNo;
+ tscanFlag = req->requestInfo;
+ tscanTrid1 = req->transId1;
+ tscanTrid2 = req->transId2;
+
+ tresult = 0;
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+ ndbrequire(getrootfragmentrec(signal,rootfragrecptr, tfid));
+
+ Uint32 i;
+ for (i = 0; i < MAX_PARALLEL_SCANS_PER_FRAG; i++) {
+ jam();
+ if (rootfragrecptr.p->scan[i] == RNIL) {
+ jam();
+ break;
+ }
+ }
+ ndbrequire(i != MAX_PARALLEL_SCANS_PER_FRAG);
+ ndbrequire(cfirstFreeScanRec != RNIL);
+ seizeScanRec(signal);
+
+ rootfragrecptr.p->scan[i] = scanPtr.i;
+ scanPtr.p->scanBucketState = ScanRec::FIRST_LAP;
+ scanPtr.p->scanLockMode = AccScanReq::getLockMode(tscanFlag);
+ scanPtr.p->scanReadCommittedFlag = AccScanReq::getReadCommittedFlag(tscanFlag);
+
+ /* TWELVE BITS OF THE ELEMENT HEAD ARE SCAN */
+ /* CHECK BITS. THE MASK NOTES WHICH BIT IS */
+ /* ALLOCATED FOR THE ACTIVE SCAN */
+ scanPtr.p->scanMask = 1 << i;
+ scanPtr.p->scanUserptr = tuserptr;
+ scanPtr.p->scanUserblockref = tuserblockref;
+ scanPtr.p->scanTrid1 = tscanTrid1;
+ scanPtr.p->scanTrid2 = tscanTrid2;
+ scanPtr.p->rootPtr = rootfragrecptr.i;
+ scanPtr.p->scanLockHeld = 0;
+ scanPtr.p->scanOpsAllocated = 0;
+ scanPtr.p->scanFirstActiveOp = RNIL;
+ scanPtr.p->scanFirstQueuedOp = RNIL;
+ scanPtr.p->scanLastQueuedOp = RNIL;
+ scanPtr.p->scanFirstLockedOp = RNIL;
+ scanPtr.p->scanLastLockedOp = RNIL;
+ scanPtr.p->scanState = ScanRec::WAIT_NEXT;
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ initScanFragmentPart(signal);
+
+ /*------------------------------------------------------*/
+ /* We start the timeout loop for the scan process here. */
+ /*------------------------------------------------------*/
+ ndbrequire(scanPtr.p->scanTimer == 0);
+ if (scanPtr.p->scanContinuebCounter == 0) {
+ jam();
+ scanPtr.p->scanContinuebCounter = 1;
+ signal->theData[0] = ZSEND_SCAN_HBREP;
+ signal->theData[1] = scanPtr.i;
+ sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 2);
+ }//if
+ scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter;
+ /* ************************ */
+ /* ACC_SCANCONF */
+ /* ************************ */
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] = scanPtr.i;
+ signal->theData[2] = 2;
+ /* NR OF LOCAL FRAGMENT */
+ signal->theData[3] = rootfragrecptr.p->fragmentid[0];
+ signal->theData[4] = rootfragrecptr.p->fragmentid[1];
+ signal->theData[7] = AccScanConf::ZNOT_EMPTY_FRAGMENT;
+ sendSignal(scanPtr.p->scanUserblockref, GSN_ACC_SCANCONF, signal, 8, JBB);
+ /* NOT EMPTY FRAGMENT */
+ return;
+}//Dbacc::execACC_SCANREQ()
+
+/* ******************--------------------------------------------------------------- */
+/* NEXT_SCANREQ REQUEST FOR NEXT ELEMENT OF */
+/* ******************------------------------------+ A FRAGMENT. */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execNEXT_SCANREQ(Signal* signal)
+{
+ Uint32 tscanNextFlag;
+ jamEntry();
+ scanPtr.i = signal->theData[0];
+ operationRecPtr.i = signal->theData[1];
+ tscanNextFlag = signal->theData[2];
+ /* ------------------------------------------ */
+ /* 1 = ZCOPY_NEXT GET NEXT ELEMENT */
+ /* 2 = ZCOPY_NEXT_COMMIT COMMIT THE */
+ /* ACTIVE ELEMENT AND GET THE NEXT ONE */
+ /* 3 = ZCOPY_COMMIT COMMIT THE ACTIVE ELEMENT */
+ /* 4 = ZCOPY_REPEAT GET THE ACTIVE ELEMENT */
+ /* 5 = ZCOPY_ABORT RELOCK THE ACTIVE ELEMENT */
+ /* 6 = ZCOPY_CLOSE THE SCAN PROCESS IS READY */
+ /* ------------------------------------------ */
+ tresult = 0;
+ ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
+ ndbrequire(scanPtr.p->scanState == ScanRec::WAIT_NEXT);
+
+ scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter;
+ switch (tscanNextFlag) {
+ case ZCOPY_NEXT:
+ jam();
+ /*empty*/;
+ break;
+ case ZCOPY_NEXT_COMMIT:
+ case ZCOPY_COMMIT:
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* COMMIT ACTIVE OPERATION. SEND NEXT SCAN ELEMENT IF IT IS ZCOPY_NEXT_COMMIT. */
+ /* --------------------------------------------------------------------------------- */
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_COMMIT) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // We did not have enough undo log buffers to safely commit an
+ // operation. Try again in 10 milliseconds.
+ /*--------------------------------------------------------------*/
+ sendSignalWithDelay(cownBlockref, GSN_NEXT_SCANREQ, signal, 10, 3);
+ return;
+ }//if
+ }//if
+ commitOperation(signal);
+ }//if
+ takeOutActiveScanOp(signal);
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ if (tscanNextFlag == ZCOPY_COMMIT) {
+ jam();
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ Uint32 blockNo = refToBlock(scanPtr.p->scanUserblockref);
+ EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 1);
+ return;
+ }//if
+ break;
+ case ZCOPY_CLOSE:
+ jam();
+ fragrecptr.i = scanPtr.p->activeLocalFrag;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_OPERATION) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // We did not have enough undo log buffers to commit a set of
+ // operations. Try again in 10 milliseconds.
+ /*--------------------------------------------------------------*/
+ sendSignalWithDelay(cownBlockref, GSN_NEXT_SCANREQ, signal, 10, 3);
+ return;
+ }//if
+ }//if
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* THE SCAN PROCESS IS FINISHED. RELOCK ALL LOCKED EL. RELESE ALL INVOLVED REC. */
+ /* --------------------------------------------------------------------------------- */
+ releaseScanLab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZNOT_CHECK_LCP_STOP;
+ execACC_CHECK_SCAN(signal);
+ return;
+}//Dbacc::execNEXT_SCANREQ()
+
+void Dbacc::checkNextBucketLab(Signal* signal)
+{
+ DirRangePtr cscDirRangePtr;
+ DirectoryarrayPtr cscDirptr;
+ DirectoryarrayPtr tnsDirptr;
+ Page8Ptr nsPageptr;
+ Page8Ptr cscPageidptr;
+ Page8Ptr gnsPageidptr;
+ Page8Ptr tnsPageidptr;
+ Uint32 tnsElementptr;
+ Uint32 tnsContainerptr;
+ Uint32 tnsIsLocked;
+ Uint32 tnsTmp1;
+ Uint32 tnsTmp2;
+ Uint32 tnsCopyIndex1;
+ Uint32 tnsCopyIndex2;
+ Uint32 tnsCopyDir;
+
+ tnsCopyDir = scanPtr.p->nextBucketIndex >> fragrecptr.p->k;
+ tnsCopyIndex1 = tnsCopyDir >> 8;
+ tnsCopyIndex2 = tnsCopyDir & 0xff;
+ arrGuard(tnsCopyIndex1, 256);
+ tnsDirptr.i = gnsDirRangePtr.p->dirArray[tnsCopyIndex1];
+ ptrCheckGuard(tnsDirptr, cdirarraysize, directoryarray);
+ tnsPageidptr.i = tnsDirptr.p->pagep[tnsCopyIndex2];
+ ptrCheckGuard(tnsPageidptr, cpagesize, page8);
+ gnsPageidptr.i = tnsPageidptr.i;
+ gnsPageidptr.p = tnsPageidptr.p;
+ tnsTmp1 = (1 << fragrecptr.p->k) - 1;
+ tgsePageindex = scanPtr.p->nextBucketIndex & tnsTmp1;
+ gsePageidptr.i = gnsPageidptr.i;
+ gsePageidptr.p = gnsPageidptr.p;
+ if (!getScanElement(signal)) {
+ scanPtr.p->nextBucketIndex++;
+ if (scanPtr.p->scanBucketState == ScanRec::SECOND_LAP) {
+ if (scanPtr.p->nextBucketIndex > scanPtr.p->maxBucketIndexToRescan) {
+ /* --------------------------------------------------------------------------------- */
+ // We have finished the rescan phase. We are ready to proceed with the next fragment part.
+ /* --------------------------------------------------------------------------------- */
+ jam();
+ checkNextFragmentLab(signal);
+ return;
+ }//if
+ } else if (scanPtr.p->scanBucketState == ScanRec::FIRST_LAP) {
+ if ((fragrecptr.p->p + fragrecptr.p->maxp) < scanPtr.p->nextBucketIndex) {
+ /* --------------------------------------------------------------------------------- */
+ // All buckets have been scanned a first time.
+ /* --------------------------------------------------------------------------------- */
+ if (scanPtr.p->minBucketIndexToRescan == 0xFFFFFFFF) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // We have not had any merges behind the scan. Thus it is not necessary to perform
+ // any rescan any buckets and we can proceed immediately with the next fragment part.
+ /* --------------------------------------------------------------------------------- */
+ checkNextFragmentLab(signal);
+ return;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // Some buckets are in the need of rescanning due to merges that have moved records
+ // from in front of the scan to behind the scan. During the merges we kept track of
+ // which buckets that need a rescan. We start with the minimum and end with maximum.
+ /* --------------------------------------------------------------------------------- */
+ scanPtr.p->nextBucketIndex = scanPtr.p->minBucketIndexToRescan;
+ scanPtr.p->scanBucketState = ScanRec::SECOND_LAP;
+ if (scanPtr.p->maxBucketIndexToRescan > (fragrecptr.p->p + fragrecptr.p->maxp)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // If we have had so many merges that the maximum is bigger than the number of buckets
+ // then we will simply satisfy ourselves with scanning to the end. This can only happen
+ // after bringing down the total of buckets to less than half and the minimum should
+ // be 0 otherwise there is some problem.
+ /* --------------------------------------------------------------------------------- */
+ if (scanPtr.p->minBucketIndexToRescan != 0) {
+ jam();
+ sendSystemerror(signal);
+ return;
+ }//if
+ scanPtr.p->maxBucketIndexToRescan = fragrecptr.p->p + fragrecptr.p->maxp;
+ }//if
+ }//if
+ }//if
+ }//if
+ if ((scanPtr.p->scanBucketState == ScanRec::FIRST_LAP) &&
+ (scanPtr.p->nextBucketIndex <= scanPtr.p->startNoOfBuckets)) {
+ /* --------------------------------------------------------------------------------- */
+ // We will only reset the scan indicator on the buckets that existed at the start of the
+ // scan. The others will be handled by the split and merge code.
+ /* --------------------------------------------------------------------------------- */
+ tnsTmp2 = (1 << fragrecptr.p->k) - 1;
+ trsbPageindex = scanPtr.p->nextBucketIndex & tnsTmp2;
+ if (trsbPageindex != 0) {
+ jam();
+ rsbPageidptr.i = gnsPageidptr.i;
+ rsbPageidptr.p = gnsPageidptr.p;
+ } else {
+ jam();
+ cscDirRangePtr.i = fragrecptr.p->directory;
+ tmpP = scanPtr.p->nextBucketIndex >> fragrecptr.p->k;
+ tmpP2 = tmpP >> 8;
+ tmpP = tmpP & 0xff;
+ ptrCheckGuard(cscDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(tmpP2, 256);
+ cscDirptr.i = cscDirRangePtr.p->dirArray[tmpP2];
+ ptrCheckGuard(cscDirptr, cdirarraysize, directoryarray);
+ cscPageidptr.i = cscDirptr.p->pagep[tmpP];
+ ptrCheckGuard(cscPageidptr, cpagesize, page8);
+ tmp1 = (1 << fragrecptr.p->k) - 1;
+ trsbPageindex = scanPtr.p->nextBucketIndex & tmp1;
+ rsbPageidptr.i = cscPageidptr.i;
+ rsbPageidptr.p = cscPageidptr.p;
+ }//if
+ releaseScanBucket(signal);
+ }//if
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ }//if
+ /* ----------------------------------------------------------------------- */
+ /* AN ELEMENT WHICH HAVE NOT BEEN SCANNED WAS FOUND. WE WILL PREPARE IT */
+ /* TO BE SENT TO THE LQH BLOCK FOR FURTHER PROCESSING. */
+ /* WE ASSUME THERE ARE OPERATION RECORDS AVAILABLE SINCE LQH SHOULD HAVE*/
+ /* GUARANTEED THAT THROUGH EARLY BOOKING. */
+ /* ----------------------------------------------------------------------- */
+ tnsIsLocked = tgseIsLocked;
+ tnsElementptr = tgseElementptr;
+ tnsContainerptr = tgseContainerptr;
+ nsPageptr.i = gsePageidptr.i;
+ nsPageptr.p = gsePageidptr.p;
+ seizeOpRec(signal);
+ tisoIsforward = tgseIsforward;
+ tisoContainerptr = tnsContainerptr;
+ tisoElementptr = tnsElementptr;
+ isoPageptr.i = nsPageptr.i;
+ isoPageptr.p = nsPageptr.p;
+ initScanOpRec(signal);
+
+ if (!tnsIsLocked){
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ jam();
+ slPageidptr = nsPageptr;
+ tslElementptr = tnsElementptr;
+ setlock(signal);
+ insertLockOwnersList(signal, operationRecPtr);
+ }//if
+ } else {
+ arrGuard(tnsElementptr, 2048);
+ queOperPtr.i =
+ ElementHeader::getOpPtrI(nsPageptr.p->word32[tnsElementptr]);
+ ptrCheckGuard(queOperPtr, coprecsize, operationrec);
+ if (queOperPtr.p->elementIsDisappeared == ZTRUE) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // If the lock owner indicates the element is disappeared then we will not report this
+ // tuple. We will continue with the next tuple.
+ /* --------------------------------------------------------------------------------- */
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ }//if
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ Uint32 return_result;
+ if (scanPtr.p->scanLockMode == ZREADLOCK) {
+ jam();
+ priPageptr = nsPageptr;
+ tpriElementptr = tnsElementptr;
+ return_result = placeReadInLockQueue(signal);
+ } else {
+ jam();
+ pwiPageptr = nsPageptr;
+ tpwiElementptr = tnsElementptr;
+ return_result = placeWriteInLockQueue(signal);
+ }//if
+ if (return_result == ZSERIAL_QUEUE) {
+ /* --------------------------------------------------------------------------------- */
+ /* WE PLACED THE OPERATION INTO A SERIAL QUEUE AND THUS WE HAVE TO WAIT FOR */
+ /* THE LOCK TO BE RELEASED. WE CONTINUE WITH THE NEXT ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ putOpScanLockQue(); /* PUT THE OP IN A QUE IN THE SCAN REC */
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ } else if (return_result == ZWRITE_ERROR) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // The tuple is either not committed yet or a delete in the same transaction (not
+ // possible here since we are a scan). Thus we simply continue with the next tuple.
+ /* --------------------------------------------------------------------------------- */
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ }//if
+ ndbassert(return_result == ZPARALLEL_QUEUE);
+ }//if
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ // Committed read proceed without caring for locks immediately down here except when
+ // the tuple was deleted permanently and no new operation has inserted it again.
+ /* --------------------------------------------------------------------------------- */
+ putActiveScanOp(signal);
+ sendNextScanConf(signal);
+ return;
+}//Dbacc::checkNextBucketLab()
+
+
+void Dbacc::checkNextFragmentLab(Signal* signal)
+{
+ RootfragmentrecPtr cnfRootfragrecptr;
+
+ cnfRootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(cnfRootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (scanPtr.p->activeLocalFrag == cnfRootfragrecptr.p->fragmentptr[0]) {
+ jam();
+ fragrecptr.i = cnfRootfragrecptr.p->fragmentptr[1];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ initScanFragmentPart(signal);
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ } else {
+ if (scanPtr.p->activeLocalFrag == cnfRootfragrecptr.p->fragmentptr[1]) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // Both fragments have completed their scan part and we can indicate that the scan is
+ // now completed.
+ /* --------------------------------------------------------------------------------- */
+ scanPtr.p->scanBucketState = ScanRec::SCAN_COMPLETED;
+ /*empty*/;
+ } else {
+ jam();
+ /* ALL ELEMENTS ARE SENT */
+ sendSystemerror(signal);
+ }//if
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ // The scan is completed. ACC_CHECK_SCAN will perform all the necessary checks to see
+ // what the next step is.
+ /* --------------------------------------------------------------------------------- */
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ execACC_CHECK_SCAN(signal);
+ return;
+}//Dbacc::checkNextFragmentLab()
+
+void Dbacc::initScanFragmentPart(Signal* signal)
+{
+ DirRangePtr cnfDirRangePtr;
+ DirectoryarrayPtr cnfDirptr;
+ Page8Ptr cnfPageidptr;
+ /* --------------------------------------------------------------------------------- */
+ // Set the active fragment part.
+ // Set the current bucket scanned to the first.
+ // Start with the first lap.
+ // Remember the number of buckets at start of the scan.
+ // Set the minimum and maximum to values that will always be smaller and larger than.
+ // Reset the scan indicator on the first bucket.
+ /* --------------------------------------------------------------------------------- */
+ scanPtr.p->activeLocalFrag = fragrecptr.i;
+ scanPtr.p->nextBucketIndex = 0; /* INDEX OF SCAN BUCKET */
+ scanPtr.p->scanBucketState = ScanRec::FIRST_LAP;
+ scanPtr.p->startNoOfBuckets = fragrecptr.p->p + fragrecptr.p->maxp;
+ scanPtr.p->minBucketIndexToRescan = 0xFFFFFFFF;
+ scanPtr.p->maxBucketIndexToRescan = 0;
+ cnfDirRangePtr.i = fragrecptr.p->directory;
+ ptrCheckGuard(cnfDirRangePtr, cdirrangesize, dirRange);
+ cnfDirptr.i = cnfDirRangePtr.p->dirArray[0];
+ ptrCheckGuard(cnfDirptr, cdirarraysize, directoryarray);
+ cnfPageidptr.i = cnfDirptr.p->pagep[0];
+ ptrCheckGuard(cnfPageidptr, cpagesize, page8);
+ trsbPageindex = scanPtr.p->nextBucketIndex & ((1 << fragrecptr.p->k) - 1);
+ rsbPageidptr.i = cnfPageidptr.i;
+ rsbPageidptr.p = cnfPageidptr.p;
+ releaseScanBucket(signal);
+}//Dbacc::initScanFragmentPart()
+
+/* --------------------------------------------------------------------------------- */
+/* FLAG = 6 = ZCOPY_CLOSE THE SCAN PROCESS IS READY OR ABORTED. ALL OPERATION IN THE */
+/* ACTIVE OR WAIT QUEUE ARE RELEASED, SCAN FLAG OF ROOT FRAG IS RESET AND THE SCAN */
+/* RECORD IS RELEASED. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseScanLab(Signal* signal)
+{
+ releaseAndCommitActiveOps(signal);
+ releaseAndCommitQueuedOps(signal);
+ releaseAndAbortLockedOps(signal);
+
+ rootfragrecptr.i = scanPtr.p->rootPtr;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ for (tmp = 0; tmp < MAX_PARALLEL_SCANS_PER_FRAG; tmp++) {
+ jam();
+ if (rootfragrecptr.p->scan[tmp] == scanPtr.i) {
+ jam();
+ rootfragrecptr.p->scan[tmp] = RNIL;
+ }//if
+ }//for
+ // Stops the heartbeat.
+ scanPtr.p->scanTimer = 0;
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] = RNIL;
+ signal->theData[2] = RNIL;
+ sendSignal(scanPtr.p->scanUserblockref, GSN_NEXT_SCANCONF, signal, 3, JBB);
+ releaseScanRec(signal);
+ return;
+}//Dbacc::releaseScanLab()
+
+
+void Dbacc::releaseAndCommitActiveOps(Signal* signal)
+{
+ OperationrecPtr trsoOperPtr;
+ operationRecPtr.i = scanPtr.p->scanFirstActiveOp;
+ while (operationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ trsoOperPtr.i = operationRecPtr.p->nextOp;
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ jam();
+ commitOperation(signal);
+ }//if
+ takeOutActiveScanOp(signal);
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ operationRecPtr.i = trsoOperPtr.i;
+ }//if
+}//Dbacc::releaseAndCommitActiveOps()
+
+
+void Dbacc::releaseAndCommitQueuedOps(Signal* signal)
+{
+ OperationrecPtr trsoOperPtr;
+ operationRecPtr.i = scanPtr.p->scanFirstQueuedOp;
+ while (operationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ trsoOperPtr.i = operationRecPtr.p->nextOp;
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ jam();
+ commitOperation(signal);
+ }//if
+ takeOutReadyScanQueue(signal);
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ operationRecPtr.i = trsoOperPtr.i;
+ }//if
+}//Dbacc::releaseAndCommitQueuedOps()
+
+void Dbacc::releaseAndAbortLockedOps(Signal* signal) {
+
+ OperationrecPtr trsoOperPtr;
+ operationRecPtr.i = scanPtr.p->scanFirstLockedOp;
+ while (operationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ trsoOperPtr.i = operationRecPtr.p->nextOp;
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ jam();
+ abortOperation(signal);
+ }//if
+ takeOutScanLockQueue(scanPtr.i);
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ operationRecPtr.i = trsoOperPtr.i;
+ }//if
+}//Dbacc::releaseAndAbortLockedOps()
+
+/* 3.18.3 ACC_CHECK_SCAN */
+/* ******************--------------------------------------------------------------- */
+/* ACC_CHECK_SCAN */
+/* ENTER ACC_CHECK_SCAN WITH */
+/* SCAN_PTR */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACC_CHECK_SCAN */
+/* ******************------------------------------+ */
+void Dbacc::execACC_CHECK_SCAN(Signal* signal)
+{
+ Uint32 TcheckLcpStop;
+ jamEntry();
+ scanPtr.i = signal->theData[0];
+ TcheckLcpStop = signal->theData[1];
+ ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
+ while (scanPtr.p->scanFirstQueuedOp != RNIL) {
+ jam();
+ //----------------------------------------------------------------------------
+ // An operation has been released from the lock queue. We are in the parallel
+ // queue of this tuple. We are ready to report the tuple now.
+ //----------------------------------------------------------------------------
+ operationRecPtr.i = scanPtr.p->scanFirstQueuedOp;
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ takeOutReadyScanQueue(signal);
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (operationRecPtr.p->elementIsDisappeared == ZTRUE) {
+ jam();
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_COMMIT) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // We did not have enough undo log buffers to safely abort an
+ // operation. Try again in 10 milliseconds.
+ /*--------------------------------------------------------------*/
+ sendSignalWithDelay(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 10, 2);
+ return;
+ }//if
+ }//if
+ abortOperation(signal);
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ continue;
+ }//if
+ putActiveScanOp(signal);
+ sendNextScanConf(signal);
+ return;
+ }//while
+
+
+ if ((scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) &&
+ (scanPtr.p->scanLockHeld == 0)) {
+ jam();
+ //----------------------------------------------------------------------------
+ // The scan is now completed and there are no more locks outstanding. Thus we
+ // we will report the scan as completed to LQH.
+ //----------------------------------------------------------------------------
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] = RNIL;
+ signal->theData[2] = RNIL;
+ sendSignal(scanPtr.p->scanUserblockref, GSN_NEXT_SCANCONF, signal, 3, JBB);
+ return;
+ }//if
+ if (TcheckLcpStop == AccCheckScan::ZCHECK_LCP_STOP) {
+ //---------------------------------------------------------------------------
+ // To ensure that the block of the fragment occurring at the start of a local
+ // checkpoint is not held for too long we insert a release and reacquiring of
+ // that lock here. This is performed in LQH. If we are blocked or if we have
+ // requested a sleep then we will receive RNIL in the returning signal word.
+ //---------------------------------------------------------------------------
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] =
+ ((scanPtr.p->scanLockHeld >= ZSCAN_MAX_LOCK) ||
+ (scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED));
+ EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2);
+ jamEntry();
+ if (signal->theData[0] == RNIL) {
+ jam();
+ return;
+ }//if
+ }//if
+ /**
+ * If we have more than max locks held OR
+ * scan is completed AND at least one lock held
+ * - Inform LQH about this condition
+ */
+ if ((scanPtr.p->scanLockHeld >= ZSCAN_MAX_LOCK) ||
+ (cfreeopRec == RNIL) ||
+ ((scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) &&
+ (scanPtr.p->scanLockHeld > 0))) {
+ jam();
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] = RNIL; // No operation is returned
+ signal->theData[2] = 512; // MASV
+ sendSignal(scanPtr.p->scanUserblockref, GSN_NEXT_SCANCONF, signal, 3, JBB);
+ return;
+ }
+ if (scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) {
+ jam();
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ execACC_CHECK_SCAN(signal);
+ return;
+ }//if
+
+ scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter;
+
+ fragrecptr.i = scanPtr.p->activeLocalFrag;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ gnsDirRangePtr.i = fragrecptr.p->directory;
+ ptrCheckGuard(gnsDirRangePtr, cdirrangesize, dirRange);
+ checkNextBucketLab(signal);
+ return;
+}//Dbacc::execACC_CHECK_SCAN()
+
+/* ******************---------------------------------------------------- */
+/* ACC_TO_REQ PERFORM A TAKE OVER */
+/* ******************-------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execACC_TO_REQ(Signal* signal)
+{
+ OperationrecPtr tatrOpPtr;
+
+ jamEntry();
+ tatrOpPtr.i = signal->theData[1]; /* OPER PTR OF ACC */
+ ptrCheckGuard(tatrOpPtr, coprecsize, operationrec);
+ if (tatrOpPtr.p->operation == ZSCAN_OP) {
+ tatrOpPtr.p->transId1 = signal->theData[2];
+ tatrOpPtr.p->transId2 = signal->theData[3];
+ } else {
+ jam();
+ signal->theData[0] = cminusOne;
+ signal->theData[1] = ZTO_OP_STATE_ERROR;
+ }//if
+ return;
+}//Dbacc::execACC_TO_REQ()
+
+/* --------------------------------------------------------------------------------- */
+/* CONTAINERINFO */
+/* INPUT: */
+/* CI_PAGEIDPTR (PAGE POINTER WHERE CONTAINER RESIDES) */
+/* TCI_PAGEINDEX (INDEX OF CONTAINER, USED TO CALCULATE PAGE INDEX) */
+/* TCI_ISFORWARD (DIRECTION OF CONTAINER FORWARD OR BACKWARD) */
+/* */
+/* OUTPUT: */
+/* TCI_CONTAINERPTR (A POINTER TO THE HEAD OF THE CONTAINER) */
+/* TCI_CONTAINERLEN (LENGTH OF THE CONTAINER */
+/* TCI_CONTAINERHEAD (THE HEADER OF THE CONTAINER) */
+/* */
+/* DESCRIPTION: THE ADDRESS OF THE CONTAINER WILL BE CALCULATED AND */
+/* ALL INFORMATION ABOUT THE CONTAINER WILL BE READ */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::containerinfo(Signal* signal)
+{
+ tciContainerptr = (tciPageindex << ZSHIFT_PLUS) - (tciPageindex << ZSHIFT_MINUS);
+ if (tciIsforward == ZTRUE) {
+ jam();
+ tciContainerptr = tciContainerptr + ZHEAD_SIZE;
+ } else {
+ jam();
+ tciContainerptr = ((tciContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE;
+ }//if
+ arrGuard(tciContainerptr, 2048);
+ tciContainerhead = ciPageidptr.p->word32[tciContainerptr];
+ tciContainerlen = tciContainerhead >> 26;
+}//Dbacc::containerinfo()
+
+/* --------------------------------------------------------------------------------- */
+/* GET_SCAN_ELEMENT */
+/* INPUT: GSE_PAGEIDPTR */
+/* TGSE_PAGEINDEX */
+/* OUTPUT: TGSE_IS_LOCKED (IF TRESULT /= ZFALSE) */
+/* GSE_PAGEIDPTR */
+/* TGSE_PAGEINDEX */
+/* --------------------------------------------------------------------------------- */
+bool Dbacc::getScanElement(Signal* signal)
+{
+ tgseIsforward = ZTRUE;
+ NEXTSEARCH_SCAN_LOOP:
+ ciPageidptr.i = gsePageidptr.i;
+ ciPageidptr.p = gsePageidptr.p;
+ tciPageindex = tgsePageindex;
+ tciIsforward = tgseIsforward;
+ containerinfo(signal);
+ sscPageidptr.i = gsePageidptr.i;
+ sscPageidptr.p = gsePageidptr.p;
+ tsscContainerlen = tciContainerlen;
+ tsscContainerptr = tciContainerptr;
+ tsscIsforward = tciIsforward;
+ if (searchScanContainer(signal)) {
+ jam();
+ tgseIsLocked = tsscIsLocked;
+ tgseElementptr = tsscElementptr;
+ tgseContainerptr = tsscContainerptr;
+ return true;
+ }//if
+ if (((tciContainerhead >> 7) & 0x3) != 0) {
+ jam();
+ nciPageidptr.i = gsePageidptr.i;
+ nciPageidptr.p = gsePageidptr.p;
+ tnciContainerhead = tciContainerhead;
+ tnciContainerptr = tciContainerptr;
+ nextcontainerinfo(signal);
+ tgsePageindex = tnciPageindex;
+ gsePageidptr.i = nciPageidptr.i;
+ gsePageidptr.p = nciPageidptr.p;
+ tgseIsforward = tnciIsforward;
+ goto NEXTSEARCH_SCAN_LOOP;
+ }//if
+ return false;
+}//Dbacc::getScanElement()
+
+/* --------------------------------------------------------------------------------- */
+/* INIT_SCAN_OP_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initScanOpRec(Signal* signal)
+{
+ Uint32 tisoTmp;
+ Uint32 tisoLocalPtr;
+ Uint32 guard24;
+
+ scanPtr.p->scanOpsAllocated++;
+
+ operationRecPtr.p->scanRecPtr = scanPtr.i;
+ operationRecPtr.p->operation = ZSCAN_OP;
+ operationRecPtr.p->transactionstate = ACTIVE;
+ operationRecPtr.p->commitDeleteCheckFlag = ZFALSE;
+ operationRecPtr.p->lockMode = scanPtr.p->scanLockMode;
+ operationRecPtr.p->fid = fragrecptr.p->myfid;
+ operationRecPtr.p->fragptr = fragrecptr.i;
+ operationRecPtr.p->elementIsDisappeared = ZFALSE;
+ operationRecPtr.p->nextParallelQue = RNIL;
+ operationRecPtr.p->prevParallelQue = RNIL;
+ operationRecPtr.p->nextSerialQue = RNIL;
+ operationRecPtr.p->prevSerialQue = RNIL;
+ operationRecPtr.p->prevQueOp = RNIL;
+ operationRecPtr.p->nextQueOp = RNIL;
+ operationRecPtr.p->keyinfoPage = RNIL; // Safety precaution
+ operationRecPtr.p->transId1 = scanPtr.p->scanTrid1;
+ operationRecPtr.p->transId2 = scanPtr.p->scanTrid2;
+ operationRecPtr.p->lockOwner = ZFALSE;
+ operationRecPtr.p->dirtyRead = 0;
+ operationRecPtr.p->nodeType = 0; // Not a stand-by node
+ operationRecPtr.p->elementIsforward = tisoIsforward;
+ operationRecPtr.p->elementContainer = tisoContainerptr;
+ operationRecPtr.p->elementPointer = tisoElementptr;
+ operationRecPtr.p->elementPage = isoPageptr.i;
+ operationRecPtr.p->isAccLockReq = ZFALSE;
+ operationRecPtr.p->isUndoLogReq = ZFALSE;
+ tisoLocalPtr = tisoElementptr + tisoIsforward;
+ guard24 = fragrecptr.p->localkeylen - 1;
+ for (tisoTmp = 0; tisoTmp <= guard24; tisoTmp++) {
+ arrGuard(tisoTmp, 2);
+ arrGuard(tisoLocalPtr, 2048);
+ operationRecPtr.p->localdata[tisoTmp] = isoPageptr.p->word32[tisoLocalPtr];
+ tisoLocalPtr = tisoLocalPtr + tisoIsforward;
+ }//for
+ arrGuard(tisoLocalPtr, 2048);
+ operationRecPtr.p->keydata[0] = isoPageptr.p->word32[tisoLocalPtr];
+ operationRecPtr.p->tupkeylen = fragrecptr.p->keyLength;
+ operationRecPtr.p->xfrmtupkeylen = 0; // not used
+}//Dbacc::initScanOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* NEXTCONTAINERINFO */
+/* DESCRIPTION:THE CONTAINER HEAD WILL BE CHECKED TO CALCULATE INFORMATION */
+/* ABOUT NEXT CONTAINER IN THE BUCKET. */
+/* INPUT: TNCI_CONTAINERHEAD */
+/* NCI_PAGEIDPTR */
+/* TNCI_CONTAINERPTR */
+/* OUTPUT: */
+/* TNCI_PAGEINDEX (INDEX FROM WHICH PAGE INDEX CAN BE CALCULATED). */
+/* TNCI_ISFORWARD (IS THE NEXT CONTAINER FORWARD (+1) OR BACKWARD (-1) */
+/* NCI_PAGEIDPTR (PAGE REFERENCE OF NEXT CONTAINER) */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::nextcontainerinfo(Signal* signal)
+{
+ tnciNextSamePage = (tnciContainerhead >> 9) & 0x1; /* CHECK BIT FOR CHECKING WHERE */
+ /* THE NEXT CONTAINER IS IN THE SAME PAGE */
+ tnciPageindex = tnciContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */
+ if (((tnciContainerhead >> 7) & 3) == ZLEFT) {
+ jam();
+ tnciIsforward = ZTRUE;
+ } else {
+ jam();
+ tnciIsforward = cminusOne;
+ }//if
+ if (tnciNextSamePage == ZFALSE) {
+ jam();
+ /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */
+ arrGuard(tnciContainerptr + 1, 2048);
+ tnciTmp = nciPageidptr.p->word32[tnciContainerptr + 1];
+ nciOverflowrangeptr.i = fragrecptr.p->overflowdir;
+ ptrCheckGuard(nciOverflowrangeptr, cdirrangesize, dirRange);
+ arrGuard((tnciTmp >> 8), 256);
+ nciOverflowDirptr.i = nciOverflowrangeptr.p->dirArray[tnciTmp >> 8];
+ ptrCheckGuard(nciOverflowDirptr, cdirarraysize, directoryarray);
+ nciPageidptr.i = nciOverflowDirptr.p->pagep[tnciTmp & 0xff];
+ ptrCheckGuard(nciPageidptr, cpagesize, page8);
+ }//if
+}//Dbacc::nextcontainerinfo()
+
+/* --------------------------------------------------------------------------------- */
+/* PUT_ACTIVE_SCAN_OP */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::putActiveScanOp(Signal* signal)
+{
+ OperationrecPtr pasOperationRecPtr;
+ pasOperationRecPtr.i = scanPtr.p->scanFirstActiveOp;
+ if (pasOperationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(pasOperationRecPtr, coprecsize, operationrec);
+ pasOperationRecPtr.p->prevOp = operationRecPtr.i;
+ }//if
+ operationRecPtr.p->nextOp = pasOperationRecPtr.i;
+ operationRecPtr.p->prevOp = RNIL;
+ scanPtr.p->scanFirstActiveOp = operationRecPtr.i;
+}//Dbacc::putActiveScanOp()
+
+/**
+ * putOpScanLockQueue
+ *
+ * Description: Put an operation in the doubly linked
+ * lock list on a scan record. The list is used to
+ * keep track of which operations belonging
+ * to the scan are put in serial lock list of another
+ * operation
+ *
+ * @note Use takeOutScanLockQueue to remove an operation
+ * from the list
+ *
+ */
+void Dbacc::putOpScanLockQue()
+{
+
+#ifdef VM_TRACE
+ // DEBUG CODE
+ // Check that there are as many operations in the lockqueue as
+ // scanLockHeld indicates
+ OperationrecPtr tmpOp;
+ int numLockedOpsBefore = 0;
+ tmpOp.i = scanPtr.p->scanFirstLockedOp;
+ while(tmpOp.i != RNIL){
+ numLockedOpsBefore++;
+ ptrCheckGuard(tmpOp, coprecsize, operationrec);
+ if (tmpOp.p->nextOp == RNIL)
+ ndbrequire(tmpOp.i == scanPtr.p->scanLastLockedOp);
+ tmpOp.i = tmpOp.p->nextOp;
+ }
+ ndbrequire(numLockedOpsBefore==scanPtr.p->scanLockHeld);
+#endif
+
+ OperationrecPtr pslOperationRecPtr;
+ ScanRec theScanRec;
+ theScanRec = *scanPtr.p;
+
+ pslOperationRecPtr.i = scanPtr.p->scanLastLockedOp;
+ operationRecPtr.p->prevOp = pslOperationRecPtr.i;
+ operationRecPtr.p->nextOp = RNIL;
+ if (pslOperationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(pslOperationRecPtr, coprecsize, operationrec);
+ pslOperationRecPtr.p->nextOp = operationRecPtr.i;
+ } else {
+ jam();
+ scanPtr.p->scanFirstLockedOp = operationRecPtr.i;
+ }//if
+ scanPtr.p->scanLastLockedOp = operationRecPtr.i;
+ scanPtr.p->scanLockHeld++;
+
+}//Dbacc::putOpScanLockQue()
+
+/* --------------------------------------------------------------------------------- */
+/* PUT_READY_SCAN_QUEUE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::putReadyScanQueue(Signal* signal, Uint32 scanRecIndex)
+{
+ OperationrecPtr prsOperationRecPtr;
+ ScanRecPtr TscanPtr;
+
+ TscanPtr.i = scanRecIndex;
+ ptrCheckGuard(TscanPtr, cscanRecSize, scanRec);
+
+ prsOperationRecPtr.i = TscanPtr.p->scanLastQueuedOp;
+ operationRecPtr.p->prevOp = prsOperationRecPtr.i;
+ operationRecPtr.p->nextOp = RNIL;
+ TscanPtr.p->scanLastQueuedOp = operationRecPtr.i;
+ if (prsOperationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(prsOperationRecPtr, coprecsize, operationrec);
+ prsOperationRecPtr.p->nextOp = operationRecPtr.i;
+ } else {
+ jam();
+ TscanPtr.p->scanFirstQueuedOp = operationRecPtr.i;
+ }//if
+}//Dbacc::putReadyScanQueue()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_SCAN_BUCKET */
+// Input:
+// rsbPageidptr.i Index to page where buckets starts
+// rsbPageidptr.p Pointer to page where bucket starts
+// trsbPageindex Page index of starting container in bucket
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseScanBucket(Signal* signal)
+{
+ Uint32 trsbIsforward;
+
+ trsbIsforward = ZTRUE;
+ NEXTRELEASESCANLOOP:
+ ciPageidptr.i = rsbPageidptr.i;
+ ciPageidptr.p = rsbPageidptr.p;
+ tciPageindex = trsbPageindex;
+ tciIsforward = trsbIsforward;
+ containerinfo(signal);
+ rscPageidptr.i = rsbPageidptr.i;
+ rscPageidptr.p = rsbPageidptr.p;
+ trscContainerlen = tciContainerlen;
+ trscContainerptr = tciContainerptr;
+ trscIsforward = trsbIsforward;
+ releaseScanContainer(signal);
+ if (((tciContainerhead >> 7) & 0x3) != 0) {
+ jam();
+ nciPageidptr.i = rsbPageidptr.i;
+ nciPageidptr.p = rsbPageidptr.p;
+ tnciContainerhead = tciContainerhead;
+ tnciContainerptr = tciContainerptr;
+ nextcontainerinfo(signal);
+ rsbPageidptr.i = nciPageidptr.i;
+ rsbPageidptr.p = nciPageidptr.p;
+ trsbPageindex = tnciPageindex;
+ trsbIsforward = tnciIsforward;
+ goto NEXTRELEASESCANLOOP;
+ }//if
+}//Dbacc::releaseScanBucket()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_SCAN_CONTAINER */
+/* INPUT: TRSC_CONTAINERLEN */
+/* RSC_PAGEIDPTR */
+/* TRSC_CONTAINERPTR */
+/* TRSC_ISFORWARD */
+/* SCAN_PTR */
+/* */
+/* DESCRIPTION: SEARCHS IN A CONTAINER, AND THE SCAN BIT OF THE ELEMENTS */
+/* OF THE CONTAINER IS RESET */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseScanContainer(Signal* signal)
+{
+ OperationrecPtr rscOperPtr;
+ Uint32 trscElemStep;
+ Uint32 trscElementptr;
+ Uint32 trscElemlens;
+ Uint32 trscElemlen;
+
+ if (trscContainerlen < 4) {
+ if (trscContainerlen != ZCON_HEAD_SIZE) {
+ jam();
+ sendSystemerror(signal);
+ }//if
+ return; /* 2 IS THE MINIMUM SIZE OF THE ELEMENT */
+ }//if
+ trscElemlens = trscContainerlen - ZCON_HEAD_SIZE;
+ trscElemlen = fragrecptr.p->elementLength;
+ if (trscIsforward == 1) {
+ jam();
+ trscElementptr = trscContainerptr + ZCON_HEAD_SIZE;
+ trscElemStep = trscElemlen;
+ } else {
+ jam();
+ trscElementptr = trscContainerptr - 1;
+ trscElemStep = 0 - trscElemlen;
+ }//if
+ do {
+ arrGuard(trscElementptr, 2048);
+ const Uint32 eh = rscPageidptr.p->word32[trscElementptr];
+ const Uint32 scanMask = scanPtr.p->scanMask;
+ if (ElementHeader::getUnlocked(eh)) {
+ jam();
+ const Uint32 tmp = ElementHeader::clearScanBit(eh, scanMask);
+ dbgWord32(rscPageidptr, trscElementptr, tmp);
+ rscPageidptr.p->word32[trscElementptr] = tmp;
+ } else {
+ jam();
+ rscOperPtr.i = ElementHeader::getOpPtrI(eh);
+ ptrCheckGuard(rscOperPtr, coprecsize, operationrec);
+ rscOperPtr.p->scanBits &= ~scanMask;
+ }//if
+ trscElemlens = trscElemlens - trscElemlen;
+ trscElementptr = trscElementptr + trscElemStep;
+ } while (trscElemlens > 1);
+ if (trscElemlens != 0) {
+ jam();
+ sendSystemerror(signal);
+ }//if
+}//Dbacc::releaseScanContainer()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_SCAN_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseScanRec(Signal* signal)
+{
+ // Check that all ops this scan has allocated have been
+ // released
+ ndbrequire(scanPtr.p->scanOpsAllocated==0);
+
+ // Check that all locks this scan might have aquired
+ // have been properly released
+ ndbrequire(scanPtr.p->scanLockHeld == 0);
+ ndbrequire(scanPtr.p->scanFirstLockedOp == RNIL);
+ ndbrequire(scanPtr.p->scanLastLockedOp == RNIL);
+
+ // Check that all active operations have been
+ // properly released
+ ndbrequire(scanPtr.p->scanFirstActiveOp == RNIL);
+
+ // Check that all queued operations have been
+ // properly released
+ ndbrequire(scanPtr.p->scanFirstQueuedOp == RNIL);
+ ndbrequire(scanPtr.p->scanLastQueuedOp == RNIL);
+
+ // Put scan record in free list
+ scanPtr.p->scanNextfreerec = cfirstFreeScanRec;
+ scanPtr.p->scanState = ScanRec::SCAN_DISCONNECT;
+ cfirstFreeScanRec = scanPtr.i;
+
+}//Dbacc::releaseScanRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEARCH_SCAN_CONTAINER */
+/* INPUT: TSSC_CONTAINERLEN */
+/* TSSC_CONTAINERPTR */
+/* TSSC_ISFORWARD */
+/* SSC_PAGEIDPTR */
+/* SCAN_PTR */
+/* OUTPUT: TSSC_IS_LOCKED */
+/* */
+/* DESCRIPTION: SEARCH IN A CONTAINER TO FIND THE NEXT SCAN ELEMENT. */
+/* TO DO THIS THE SCAN BIT OF THE ELEMENT HEADER IS CHECKED. IF */
+/* THIS BIT IS ZERO, IT IS SET TO ONE AND THE ELEMENT IS RETURNED.*/
+/* --------------------------------------------------------------------------------- */
+bool Dbacc::searchScanContainer(Signal* signal)
+{
+ OperationrecPtr sscOperPtr;
+ Uint32 tsscScanBits;
+ Uint32 tsscElemlens;
+ Uint32 tsscElemlen;
+ Uint32 tsscElemStep;
+
+ if (tsscContainerlen < 4) {
+ jam();
+ return false; /* 2 IS THE MINIMUM SIZE OF THE ELEMENT */
+ }//if
+ tsscElemlens = tsscContainerlen - ZCON_HEAD_SIZE;
+ tsscElemlen = fragrecptr.p->elementLength;
+ /* LENGTH OF THE ELEMENT */
+ if (tsscIsforward == 1) {
+ jam();
+ tsscElementptr = tsscContainerptr + ZCON_HEAD_SIZE;
+ tsscElemStep = tsscElemlen;
+ } else {
+ jam();
+ tsscElementptr = tsscContainerptr - 1;
+ tsscElemStep = 0 - tsscElemlen;
+ }//if
+ SCANELEMENTLOOP001:
+ arrGuard(tsscElementptr, 2048);
+ const Uint32 eh = sscPageidptr.p->word32[tsscElementptr];
+ tsscIsLocked = ElementHeader::getLocked(eh);
+ if (!tsscIsLocked){
+ jam();
+ tsscScanBits = ElementHeader::getScanBits(eh);
+ if ((scanPtr.p->scanMask & tsscScanBits) == 0) {
+ jam();
+ const Uint32 tmp = ElementHeader::setScanBit(eh, scanPtr.p->scanMask);
+ dbgWord32(sscPageidptr, tsscElementptr, tmp);
+ sscPageidptr.p->word32[tsscElementptr] = tmp;
+ return true;
+ }//if
+ } else {
+ jam();
+ sscOperPtr.i = ElementHeader::getOpPtrI(eh);
+ ptrCheckGuard(sscOperPtr, coprecsize, operationrec);
+ if ((sscOperPtr.p->scanBits & scanPtr.p->scanMask) == 0) {
+ jam();
+ sscOperPtr.p->scanBits |= scanPtr.p->scanMask;
+ return true;
+ }//if
+ }//if
+ /* THE ELEMENT IS ALREADY SENT. */
+ /* SEARCH FOR NEXT ONE */
+ tsscElemlens = tsscElemlens - tsscElemlen;
+ if (tsscElemlens > 1) {
+ jam();
+ tsscElementptr = tsscElementptr + tsscElemStep;
+ goto SCANELEMENTLOOP001;
+ }//if
+ return false;
+}//Dbacc::searchScanContainer()
+
+/* --------------------------------------------------------------------------------- */
+/* SEND THE RESPONSE NEXT_SCANCONF AND POSSIBLE KEYINFO SIGNALS AS WELL. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::sendNextScanConf(Signal* signal)
+{
+ scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter;
+ Uint32 blockNo = refToBlock(scanPtr.p->scanUserblockref);
+ jam();
+ /** ---------------------------------------------------------------------
+ * LQH WILL NOT HAVE ANY USE OF THE TUPLE KEY LENGTH IN THIS CASE AND
+ * SO WE DO NOT PROVIDE IT. IN THIS CASE THESE VALUES ARE UNDEFINED.
+ * ---------------------------------------------------------------------- */
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] = operationRecPtr.i;
+ signal->theData[2] = operationRecPtr.p->fid;
+ signal->theData[3] = operationRecPtr.p->localdata[0];
+ signal->theData[4] = operationRecPtr.p->localdata[1];
+ signal->theData[5] = fragrecptr.p->localkeylen;
+ EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 6);
+ return;
+}//Dbacc::sendNextScanConf()
+
+/*---------------------------------------------------------------------------
+ * sendScanHbRep
+ * Description: Using Dispatcher::execute() to send a heartbeat to DBTC
+ * from DBLQH telling the scan is alive. We use the sendScanHbRep()
+ * in DBLQH, this needs to be done here in DBACC since it can take
+ * a while before LQH receives an answer the normal way from ACC.
+ *--------------------------------------------------------------------------*/
+void Dbacc::sendScanHbRep(Signal* signal, Uint32 scanPtrIndex)
+{
+ scanPtr.i = scanPtrIndex;
+ ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
+
+ // If the timer status is on we continue with a new heartbeat in one second,
+ // else the loop stops and we will not send a new CONTINUEB
+ if (scanPtr.p->scanTimer != 0){
+ if (scanPtr.p->scanTimer == scanPtr.p->scanContinuebCounter){
+ jam();
+ ndbrequire(scanPtr.p->scanState != ScanRec::SCAN_DISCONNECT);
+
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] = scanPtr.p->scanTrid1;
+ signal->theData[2] = scanPtr.p->scanTrid2;
+ EXECUTE_DIRECT(DBLQH, GSN_SCAN_HBREP, signal, 3);
+ jamEntry();
+ }//if
+ scanPtr.p->scanContinuebCounter++;
+ signal->theData[0] = ZSEND_SCAN_HBREP;
+ signal->theData[1] = scanPtr.i;
+ sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 2);
+ } else {
+ jam();
+ scanPtr.p->scanContinuebCounter = 0;
+ }//if
+}//Dbacc::sendScanHbRep()
+
+/* --------------------------------------------------------------------------------- */
+/* SETLOCK */
+/* DESCRIPTION:SETS LOCK ON AN ELEMENT. INFORMATION ABOUT THE ELEMENT IS */
+/* SAVED IN THE ELEMENT HEAD.A COPY OF THIS INFORMATION WILL */
+/* BE PUT IN THE OPERATION RECORD. A FIELD IN THE HEADER OF */
+/* THE ELEMENT POINTS TO THE OPERATION RECORD. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::setlock(Signal* signal)
+{
+ Uint32 tselTmp1;
+
+ arrGuard(tslElementptr, 2048);
+ tselTmp1 = slPageidptr.p->word32[tslElementptr];
+ operationRecPtr.p->scanBits = ElementHeader::getScanBits(tselTmp1);
+ operationRecPtr.p->hashvaluePart = ElementHeader::getHashValuePart(tselTmp1);
+
+ tselTmp1 = ElementHeader::setLocked(operationRecPtr.i);
+ dbgWord32(slPageidptr, tslElementptr, tselTmp1);
+ slPageidptr.p->word32[tslElementptr] = tselTmp1;
+}//Dbacc::setlock()
+
+/* --------------------------------------------------------------------------------- */
+/* TAKE_OUT_ACTIVE_SCAN_OP */
+/* DESCRIPTION: AN ACTIVE SCAN OPERATION IS BELOGED TO AN ACTIVE LIST OF THE */
+/* SCAN RECORD. BY THIS SUBRUTIN THE LIST IS UPDATED. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::takeOutActiveScanOp(Signal* signal)
+{
+ OperationrecPtr tasOperationRecPtr;
+
+ if (operationRecPtr.p->prevOp != RNIL) {
+ jam();
+ tasOperationRecPtr.i = operationRecPtr.p->prevOp;
+ ptrCheckGuard(tasOperationRecPtr, coprecsize, operationrec);
+ tasOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp;
+ } else {
+ jam();
+ scanPtr.p->scanFirstActiveOp = operationRecPtr.p->nextOp;
+ }//if
+ if (operationRecPtr.p->nextOp != RNIL) {
+ jam();
+ tasOperationRecPtr.i = operationRecPtr.p->nextOp;
+ ptrCheckGuard(tasOperationRecPtr, coprecsize, operationrec);
+ tasOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp;
+ }//if
+}//Dbacc::takeOutActiveScanOp()
+
+/**
+ * takeOutScanLockQueue
+ *
+ * Description: Take out an operation from the doubly linked
+ * lock list on a scan record.
+ *
+ * @note Use putOpScanLockQue to insert a operation in
+ * the list
+ *
+ */
+void Dbacc::takeOutScanLockQueue(Uint32 scanRecIndex)
+{
+ OperationrecPtr tslOperationRecPtr;
+ ScanRecPtr TscanPtr;
+
+ TscanPtr.i = scanRecIndex;
+ ptrCheckGuard(TscanPtr, cscanRecSize, scanRec);
+
+ if (operationRecPtr.p->prevOp != RNIL) {
+ jam();
+ tslOperationRecPtr.i = operationRecPtr.p->prevOp;
+ ptrCheckGuard(tslOperationRecPtr, coprecsize, operationrec);
+ tslOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp;
+ } else {
+ jam();
+ // Check that first are pointing at operation to take out
+ ndbrequire(TscanPtr.p->scanFirstLockedOp==operationRecPtr.i);
+ TscanPtr.p->scanFirstLockedOp = operationRecPtr.p->nextOp;
+ }//if
+ if (operationRecPtr.p->nextOp != RNIL) {
+ jam();
+ tslOperationRecPtr.i = operationRecPtr.p->nextOp;
+ ptrCheckGuard(tslOperationRecPtr, coprecsize, operationrec);
+ tslOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp;
+ } else {
+ jam();
+ // Check that last are pointing at operation to take out
+ ndbrequire(TscanPtr.p->scanLastLockedOp==operationRecPtr.i);
+ TscanPtr.p->scanLastLockedOp = operationRecPtr.p->prevOp;
+ }//if
+ TscanPtr.p->scanLockHeld--;
+
+#ifdef VM_TRACE
+ // DEBUG CODE
+ // Check that there are as many operations in the lockqueue as
+ // scanLockHeld indicates
+ OperationrecPtr tmpOp;
+ int numLockedOps = 0;
+ tmpOp.i = TscanPtr.p->scanFirstLockedOp;
+ while(tmpOp.i != RNIL){
+ numLockedOps++;
+ ptrCheckGuard(tmpOp, coprecsize, operationrec);
+ if (tmpOp.p->nextOp == RNIL)
+ ndbrequire(tmpOp.i == TscanPtr.p->scanLastLockedOp);
+ tmpOp.i = tmpOp.p->nextOp;
+ }
+ ndbrequire(numLockedOps==TscanPtr.p->scanLockHeld);
+#endif
+}//Dbacc::takeOutScanLockQueue()
+
+/* --------------------------------------------------------------------------------- */
+/* TAKE_OUT_READY_SCAN_QUEUE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::takeOutReadyScanQueue(Signal* signal)
+{
+ OperationrecPtr trsOperationRecPtr;
+
+ if (operationRecPtr.p->prevOp != RNIL) {
+ jam();
+ trsOperationRecPtr.i = operationRecPtr.p->prevOp;
+ ptrCheckGuard(trsOperationRecPtr, coprecsize, operationrec);
+ trsOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp;
+ } else {
+ jam();
+ scanPtr.p->scanFirstQueuedOp = operationRecPtr.p->nextOp;
+ }//if
+ if (operationRecPtr.p->nextOp != RNIL) {
+ jam();
+ trsOperationRecPtr.i = operationRecPtr.p->nextOp;
+ ptrCheckGuard(trsOperationRecPtr, coprecsize, operationrec);
+ trsOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp;
+ } else {
+ jam();
+ scanPtr.p->scanLastQueuedOp = operationRecPtr.p->nextOp;
+ }//if
+}//Dbacc::takeOutReadyScanQueue()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF SCAN MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+
+bool Dbacc::getrootfragmentrec(Signal* signal, RootfragmentrecPtr& rootPtr, Uint32 fid)
+{
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+ jam();
+ if (tabptr.p->fragholder[i] == fid) {
+ jam();
+ rootPtr.i = tabptr.p->fragptrholder[i];
+ ptrCheckGuard(rootPtr, crootfragmentsize, rootfragmentrec);
+ return true;
+ }//if
+ }//for
+ return false;
+}//Dbacc::getrootfragmentrec()
+
+/* --------------------------------------------------------------------------------- */
+/* INIT_FS_OP_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initFsOpRec(Signal* signal)
+{
+ fsOpptr.p->fsOpfragrecPtr = fragrecptr.i;
+ fsOpptr.p->fsConptr = fsConnectptr.i;
+}//Dbacc::initFsOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INIT_LCP_CONN_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initLcpConnRec(Signal* signal)
+{
+ lcpConnectptr.p->lcpUserblockref = tuserblockref;
+ lcpConnectptr.p->lcpUserptr = tuserptr;
+ lcpConnectptr.p->noOfLcpConf = 0; /* NO OF RETUREND CONF SIGNALS */
+ lcpConnectptr.p->syncUndopageState = WAIT_NOTHING;
+}//Dbacc::initLcpConnRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INIT_OVERPAGE */
+/* INPUT. IOP_PAGEPTR, POINTER TO AN OVERFLOW PAGE RECORD */
+/* DESCRIPTION: CONTAINERS AND FREE LISTS OF THE PAGE, GET INITIALE VALUE */
+/* ACCORDING TO LH3 AND PAGE STRUCTOR DESCRIPTION OF NDBACC BLOCK */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initOverpage(Signal* signal)
+{
+ Uint32 tiopTmp;
+ Uint32 tiopPrevFree;
+ Uint32 tiopNextFree;
+
+ for (tiopIndex = 0; tiopIndex <= 2047; tiopIndex++) {
+ iopPageptr.p->word32[tiopIndex] = 0;
+ }//for
+ iopPageptr.p->word32[ZPOS_OVERFLOWREC] = iopOverflowRecPtr.i;
+ iopPageptr.p->word32[ZPOS_CHECKSUM] = 0;
+ iopPageptr.p->word32[ZPOS_PAGE_ID] = tiopPageId;
+ iopPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = 0;
+ tiopTmp = ZEMPTYLIST;
+ tiopTmp = (tiopTmp << 16) + (tiopTmp << 23);
+ iopPageptr.p->word32[ZPOS_EMPTY_LIST] = tiopTmp + (1 << ZPOS_PAGE_TYPE_BIT);
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */
+ /* --------------------------------------------------------------------------------- */
+ tiopIndex = ZHEAD_SIZE + 1;
+ iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST;
+ for (tiopPrevFree = 0; tiopPrevFree <= ZEMPTYLIST - 2; tiopPrevFree++) {
+ tiopIndex = tiopIndex + ZBUF_SIZE;
+ iopPageptr.p->word32[tiopIndex] = tiopPrevFree;
+ }//for
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */
+ /* --------------------------------------------------------------------------------- */
+ tiopIndex = ZHEAD_SIZE;
+ for (tiopNextFree = 1; tiopNextFree <= ZEMPTYLIST - 1; tiopNextFree++) {
+ iopPageptr.p->word32[tiopIndex] = tiopNextFree;
+ tiopIndex = tiopIndex + ZBUF_SIZE;
+ }//for
+ iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST; /* LEFT_LIST IS UPDATED */
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */
+ /* --------------------------------------------------------------------------------- */
+ tiopIndex = (ZBUF_SIZE + ZHEAD_SIZE) - 1;
+ iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST;
+ for (tiopPrevFree = 0; tiopPrevFree <= ZEMPTYLIST - 2; tiopPrevFree++) {
+ tiopIndex = tiopIndex + ZBUF_SIZE;
+ iopPageptr.p->word32[tiopIndex] = tiopPrevFree;
+ }//for
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */
+ /* --------------------------------------------------------------------------------- */
+ tiopIndex = (ZBUF_SIZE + ZHEAD_SIZE) - 2;
+ for (tiopNextFree = 1; tiopNextFree <= ZEMPTYLIST - 1; tiopNextFree++) {
+ iopPageptr.p->word32[tiopIndex] = tiopNextFree;
+ tiopIndex = tiopIndex + ZBUF_SIZE;
+ }//for
+ iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST; /* RIGHT_LIST IS UPDATED */
+}//Dbacc::initOverpage()
+
+/* --------------------------------------------------------------------------------- */
+/* INIT_PAGE */
+/* INPUT. INP_PAGEPTR, POINTER TO A PAGE RECORD */
+/* DESCRIPTION: CONTAINERS AND FREE LISTS OF THE PAGE, GET INITIALE VALUE */
+/* ACCORDING TO LH3 AND PAGE STRUCTOR DISACRIPTION OF NDBACC BLOCK */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initPage(Signal* signal)
+{
+ Uint32 tinpTmp1;
+ Uint32 tinpIndex;
+ Uint32 tinpTmp;
+ Uint32 tinpPrevFree;
+ Uint32 tinpNextFree;
+
+ for (tiopIndex = 0; tiopIndex <= 2047; tiopIndex++) {
+ inpPageptr.p->word32[tiopIndex] = 0;
+ }//for
+ /* --------------------------------------------------------------------------------- */
+ /* SET PAGE ID FOR USE OF CHECKPOINTER. */
+ /* PREPARE CONTAINER HEADERS INDICATING EMPTY CONTAINERS WITHOUT NEXT. */
+ /* --------------------------------------------------------------------------------- */
+ inpPageptr.p->word32[ZPOS_PAGE_ID] = tipPageId;
+ tinpTmp1 = ZCON_HEAD_SIZE;
+ tinpTmp1 = tinpTmp1 << 26;
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE ZNO_CONTAINERS PREDEFINED HEADERS ON LEFT SIZE. */
+ /* --------------------------------------------------------------------------------- */
+ tinpIndex = ZHEAD_SIZE;
+ for (tinpTmp = 0; tinpTmp <= ZNO_CONTAINERS - 1; tinpTmp++) {
+ inpPageptr.p->word32[tinpIndex] = tinpTmp1;
+ tinpIndex = tinpIndex + ZBUF_SIZE;
+ }//for
+ /* WORD32(ZPOS_EMPTY_LIST) DATA STRUCTURE:*/
+ /*--------------------------------------- */
+ /*| PAGE TYPE|LEFT FREE|RIGHT FREE */
+ /*| 1 | LIST | LIST */
+ /*| BIT | 7 BITS | 7 BITS */
+ /*--------------------------------------- */
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE FIRST POINTER TO DOUBLY LINKED LIST OF FREE CONTAINERS. */
+ /* INITIALISE EMPTY LISTS OF USED CONTAINERS. */
+ /* INITIALISE LEFT FREE LIST TO 64 AND RIGHT FREE LIST TO ZERO. */
+ /* ALSO INITIALISE PAGE TYPE TO NOT OVERFLOW PAGE. */
+ /* --------------------------------------------------------------------------------- */
+ tinpTmp = ZEMPTYLIST;
+ tinpTmp = (tinpTmp << 16) + (tinpTmp << 23);
+ tinpTmp = tinpTmp + (ZNO_CONTAINERS << 7);
+ inpPageptr.p->word32[ZPOS_EMPTY_LIST] = tinpTmp;
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */
+ /* --------------------------------------------------------------------------------- */
+ tinpIndex = (ZHEAD_SIZE + ZBUF_SIZE) - 1;
+ inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST;
+ for (tinpPrevFree = 0; tinpPrevFree <= ZEMPTYLIST - 2; tinpPrevFree++) {
+ tinpIndex = tinpIndex + ZBUF_SIZE;
+ inpPageptr.p->word32[tinpIndex] = tinpPrevFree;
+ }//for
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */
+ /* --------------------------------------------------------------------------------- */
+ tinpIndex = (ZHEAD_SIZE + ZBUF_SIZE) - 2;
+ for (tinpNextFree = 1; tinpNextFree <= ZEMPTYLIST - 1; tinpNextFree++) {
+ inpPageptr.p->word32[tinpIndex] = tinpNextFree;
+ tinpIndex = tinpIndex + ZBUF_SIZE;
+ }//for
+ inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST;
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */
+ /* THE FIRST ZNO_CONTAINERS ARE NOT PUT INTO FREE LIST SINCE THEY ARE */
+ /* PREDEFINED AS OCCUPIED. */
+ /* --------------------------------------------------------------------------------- */
+ tinpIndex = (ZNO_CONTAINERS * ZBUF_SIZE) + ZHEAD_SIZE;
+ for (tinpNextFree = ZNO_CONTAINERS + 1; tinpNextFree <= ZEMPTYLIST - 1; tinpNextFree++) {
+ inpPageptr.p->word32[tinpIndex] = tinpNextFree;
+ tinpIndex = tinpIndex + ZBUF_SIZE;
+ }//for
+ inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST;
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */
+ /* THE FIRST ZNO_CONTAINERS ARE NOT PUT INTO FREE LIST SINCE THEY ARE */
+ /* PREDEFINED AS OCCUPIED. */
+ /* --------------------------------------------------------------------------------- */
+ tinpIndex = ((ZNO_CONTAINERS * ZBUF_SIZE) + ZHEAD_SIZE) + 1;
+ inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST;
+ for (tinpPrevFree = ZNO_CONTAINERS; tinpPrevFree <= ZEMPTYLIST - 2; tinpPrevFree++) {
+ tinpIndex = tinpIndex + ZBUF_SIZE;
+ inpPageptr.p->word32[tinpIndex] = tinpPrevFree;
+ }//for
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE HEADER POSITIONS NOT CURRENTLY USED AND ENSURE USE OF OVERFLOW */
+ /* RECORD POINTER ON THIS PAGE LEADS TO ERROR. */
+ /* --------------------------------------------------------------------------------- */
+ inpPageptr.p->word32[ZPOS_CHECKSUM] = 0;
+ inpPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = 0;
+ inpPageptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
+}//Dbacc::initPage()
+
+/* --------------------------------------------------------------------------------- */
+/* PUT_OP_IN_FRAG_WAIT_QUE */
+/* DESCRIPTION: AN OPERATION WHICH OWNS A LOCK OF AN ELEMENT, IS PUT IN A */
+/* LIST OF THE FRAGMENT. THIS LIST IS USED TO STOP THE QUEUE */
+/* OPERATION DURING CREATE CHECK POINT PROSESS FOR STOP AND */
+/* RESTART OF THE OPERATIONS. */
+/* */
+/* IF CONTINUEB SIGNALS ARE INTRODUCED AFTER STARTING TO EXECUTE ACCKEYREQ WE */
+/* MUST PUT IT IN THIS LIST BEFORE EXITING TO ENSURE THAT WE ARE NOT BEING */
+/* LOCKED AFTER THAT LQH HAS RECEIVED ALL LCP_HOLDOP'S. THEN THE LCP WILL NEVER*/
+/* PROCEED. WE ALSO PUT IT INTO THIS LIST WHEN WAITING FOR LONG KEYS. THIS IS */
+/* ONLY NEEDED IF SIGNALS CAN ENTER BETWEEN THE KEYDATA CARRYING SIGNALS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::putOpInFragWaitQue(Signal* signal)
+{
+ OperationrecPtr tpiwOperRecPtr;
+
+ if (operationRecPtr.p->operation != ZSCAN_OP) {
+ if (fragrecptr.p->firstWaitInQueOp == RNIL) {
+ jam();
+ fragrecptr.p->firstWaitInQueOp = operationRecPtr.i;
+ } else {
+ jam();
+ tpiwOperRecPtr.i = fragrecptr.p->lastWaitInQueOp;
+ ptrCheckGuard(tpiwOperRecPtr, coprecsize, operationrec);
+ tpiwOperRecPtr.p->nextQueOp = operationRecPtr.i;
+ }//if
+ operationRecPtr.p->opState = WAIT_IN_QUEUE;
+ operationRecPtr.p->nextQueOp = RNIL;
+ operationRecPtr.p->prevQueOp = fragrecptr.p->lastWaitInQueOp;
+ fragrecptr.p->lastWaitInQueOp = operationRecPtr.i;
+ }//if
+}//Dbacc::putOpInFragWaitQue()
+
+/* --------------------------------------------------------------------------------- */
+/* PUT_OVERFLOW_REC_IN_FRAG */
+/* DESCRIPTION: AN OVERFLOW RECORD WITCH IS USED TO KEEP INFORMATION ABOUT */
+/* OVERFLOW PAGE WILL BE PUT IN A LIST OF OVERFLOW RECORDS IN */
+/* THE FRAGMENT RECORD. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::putOverflowRecInFrag(Signal* signal)
+{
+ OverflowRecordPtr tpifNextOverrecPtr;
+ OverflowRecordPtr tpifPrevOverrecPtr;
+
+ tpifNextOverrecPtr.i = fragrecptr.p->firstOverflowRec;
+ tpifPrevOverrecPtr.i = RNIL;
+ while (tpifNextOverrecPtr.i != RNIL) {
+ ptrCheckGuard(tpifNextOverrecPtr, coverflowrecsize, overflowRecord);
+ if (tpifNextOverrecPtr.p->dirindex < porOverflowRecPtr.p->dirindex) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* PROCEED IN LIST TO THE NEXT IN THE LIST SINCE THE ENTRY HAD A LOWER PAGE ID.*/
+ /* WE WANT TO ENSURE THAT LOWER PAGE ID'S ARE KEPT FULL RATHER THAN THE */
+ /* OPPOSITE TO ENSURE THAT HIGH PAGE ID'S CAN BE REMOVED WHEN SHRINKS ARE */
+ /* PERFORMED. */
+ /* --------------------------------------------------------------------------------- */
+ tpifPrevOverrecPtr = tpifNextOverrecPtr;
+ tpifNextOverrecPtr.i = tpifNextOverrecPtr.p->nextOverRec;
+ } else {
+ jam();
+ ndbrequire(tpifNextOverrecPtr.p->dirindex != porOverflowRecPtr.p->dirindex);
+ /* --------------------------------------------------------------------------------- */
+ /* TRYING TO INSERT THE SAME PAGE TWICE. SYSTEM ERROR. */
+ /* --------------------------------------------------------------------------------- */
+ break;
+ }//if
+ }//while
+ if (tpifNextOverrecPtr.i == RNIL) {
+ jam();
+ fragrecptr.p->lastOverflowRec = porOverflowRecPtr.i;
+ } else {
+ jam();
+ tpifNextOverrecPtr.p->prevOverRec = porOverflowRecPtr.i;
+ }//if
+ if (tpifPrevOverrecPtr.i == RNIL) {
+ jam();
+ fragrecptr.p->firstOverflowRec = porOverflowRecPtr.i;
+ } else {
+ jam();
+ tpifPrevOverrecPtr.p->nextOverRec = porOverflowRecPtr.i;
+ }//if
+ porOverflowRecPtr.p->prevOverRec = tpifPrevOverrecPtr.i;
+ porOverflowRecPtr.p->nextOverRec = tpifNextOverrecPtr.i;
+}//Dbacc::putOverflowRecInFrag()
+
+/* --------------------------------------------------------------------------------- */
+/* PUT_REC_IN_FREE_OVERDIR */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::putRecInFreeOverdir(Signal* signal)
+{
+ OverflowRecordPtr tpfoNextOverrecPtr;
+ OverflowRecordPtr tpfoPrevOverrecPtr;
+
+ tpfoNextOverrecPtr.i = fragrecptr.p->firstFreeDirindexRec;
+ tpfoPrevOverrecPtr.i = RNIL;
+ while (tpfoNextOverrecPtr.i != RNIL) {
+ ptrCheckGuard(tpfoNextOverrecPtr, coverflowrecsize, overflowRecord);
+ if (tpfoNextOverrecPtr.p->dirindex < priOverflowRecPtr.p->dirindex) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* PROCEED IN LIST TO THE NEXT IN THE LIST SINCE THE ENTRY HAD A LOWER PAGE ID.*/
+ /* WE WANT TO ENSURE THAT LOWER PAGE ID'S ARE KEPT FULL RATHER THAN THE */
+ /* OPPOSITE TO ENSURE THAT HIGH PAGE ID'S CAN BE REMOVED WHEN SHRINKS ARE */
+ /* PERFORMED. */
+ /* --------------------------------------------------------------------------------- */
+ tpfoPrevOverrecPtr = tpfoNextOverrecPtr;
+ tpfoNextOverrecPtr.i = tpfoNextOverrecPtr.p->nextOverList;
+ } else {
+ jam();
+ ndbrequire(tpfoNextOverrecPtr.p->dirindex != priOverflowRecPtr.p->dirindex);
+ /* --------------------------------------------------------------------------------- */
+ /* ENSURE WE ARE NOT TRYING TO INSERT THE SAME PAGE TWICE. */
+ /* --------------------------------------------------------------------------------- */
+ break;
+ }//if
+ }//while
+ if (tpfoNextOverrecPtr.i != RNIL) {
+ jam();
+ tpfoNextOverrecPtr.p->prevOverList = priOverflowRecPtr.i;
+ }//if
+ if (tpfoPrevOverrecPtr.i == RNIL) {
+ jam();
+ fragrecptr.p->firstFreeDirindexRec = priOverflowRecPtr.i;
+ } else {
+ jam();
+ tpfoPrevOverrecPtr.p->nextOverList = priOverflowRecPtr.i;
+ }//if
+ priOverflowRecPtr.p->prevOverList = tpfoPrevOverrecPtr.i;
+ priOverflowRecPtr.p->nextOverList = tpfoNextOverrecPtr.i;
+}//Dbacc::putRecInFreeOverdir()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_DIRECTORY */
+/* --------------------------------------- ----------------------------------------- */
+void Dbacc::releaseDirectory(Signal* signal)
+{
+ ptrCheckGuard(rdDirptr, cdirarraysize, directoryarray);
+ rdDirptr.p->pagep[0] = cfirstfreedir;
+ cfirstfreedir = rdDirptr.i;
+}//Dbacc::releaseDirectory()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_DIRRANGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseDirrange(Signal* signal)
+{
+ ptrCheckGuard(rdDirRangePtr, cdirrangesize, dirRange);
+ rdDirRangePtr.p->dirArray[0] = cfirstfreeDirrange;
+ cfirstfreeDirrange = rdDirRangePtr.i;
+}//Dbacc::releaseDirrange()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_FS_CONN_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseFsConnRec(Signal* signal)
+{
+ fsConnectptr.p->fsNext = cfsFirstfreeconnect;
+ cfsFirstfreeconnect = fsConnectptr.i;
+ fsConnectptr.p->fsState = WAIT_NOTHING;
+}//Dbacc::releaseFsConnRec()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_FS_OP_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseFsOpRec(Signal* signal)
+{
+ fsOpptr.p->fsOpnext = cfsFirstfreeop;
+ cfsFirstfreeop = fsOpptr.i;
+ fsOpptr.p->fsOpstate = WAIT_NOTHING;
+}//Dbacc::releaseFsOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_LCP_CONNECT_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseLcpConnectRec(Signal* signal)
+{
+ lcpConnectptr.p->lcpstate = LCP_FREE;
+ lcpConnectptr.p->nextLcpConn = cfirstfreelcpConnect;
+ lcpConnectptr.p->lcpstate = LCP_FREE;
+ cfirstfreelcpConnect = lcpConnectptr.i;
+}//Dbacc::releaseLcpConnectRec()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE OP RECORD */
+/* PUT A FREE OPERATION IN A FREE LIST OF THE OPERATIONS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseOpRec(Signal* signal)
+{
+#if 0
+ // DEBUG CODE
+ // Check that the operation to be released isn't
+ // already in the list of free operations
+ // Since this code loops through the entire list of free operations
+ // it's only enabled in VM_TRACE mode
+ OperationrecPtr opRecPtr;
+ bool opInList = false;
+ opRecPtr.i = cfreeopRec;
+ while (opRecPtr.i != RNIL){
+ if (opRecPtr.i == operationRecPtr.i){
+ opInList = true;
+ break;
+ }
+ ptrCheckGuard(opRecPtr, coprecsize, operationrec);
+ opRecPtr.i = opRecPtr.p->nextOp;
+ }
+ ndbrequire(opInList == false);
+#endif
+ ndbrequire(operationRecPtr.p->lockOwner == ZFALSE);
+
+ operationRecPtr.p->nextOp = cfreeopRec;
+ cfreeopRec = operationRecPtr.i; /* UPDATE FREE LIST OF OP RECORDS */
+ operationRecPtr.p->prevOp = RNIL;
+ operationRecPtr.p->opState = FREE_OP;
+ operationRecPtr.p->transactionstate = IDLE;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+}//Dbacc::releaseOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_OVERFLOW_REC */
+/* PUT A FREE OVERFLOW REC IN A FREE LIST OF THE OVERFLOW RECORDS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseOverflowRec(Signal* signal)
+{
+ rorOverflowRecPtr.p->nextfreeoverrec = cfirstfreeoverrec;
+ cfirstfreeoverrec = rorOverflowRecPtr.i;
+}//Dbacc::releaseOverflowRec()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_OVERPAGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseOverpage(Signal* signal)
+{
+ DirRangePtr ropOverflowrangeptr;
+ DirectoryarrayPtr ropOverflowDirptr;
+ OverflowRecordPtr ropOverflowRecPtr;
+ OverflowRecordPtr tuodOverflowRecPtr;
+ Uint32 tropTmp;
+ Uint32 tropTmp1;
+ Uint32 tropTmp2;
+
+ ropOverflowRecPtr.i = ropPageptr.p->word32[ZPOS_OVERFLOWREC];
+ ndbrequire(ropOverflowRecPtr.i != RNIL);
+ /* THE OVERFLOW REC WILL BE TAKEN OUT OF THE */
+ /* FREELIST OF OVERFLOW PAGE WITH FREE */
+ /* CONTAINER AND WILL BE PUT IN THE FREE LIST */
+ /* OF THE FREE DIRECTORY INDEXES. */
+ if ((fragrecptr.p->lastOverflowRec == ropOverflowRecPtr.i) &&
+ (fragrecptr.p->firstOverflowRec == ropOverflowRecPtr.i)) {
+ jam();
+ return; /* THERE IS ONLY ONE OVERFLOW PAGE */
+ }//if
+ if ((fragrecptr.p->createLcp == ZTRUE) &&
+ (fragrecptr.p->lcpMaxOverDirIndex > ropPageptr.p->word32[ZPOS_PAGE_ID])) {
+ /* --------------------------------------------------------------------------------- */
+ /* THE PAGE PARTICIPATES IN THE LOCAL CHECKPOINT. */
+ /* --------------------------------------------------------------------------------- */
+ if (fragrecptr.p->fragState == LCP_SEND_PAGES) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE PAGE PARTICIPATES IN THE LOCAL CHECKPOINT AND THE WRITE TO DISK HAS NOT */
+ /* YET BEEN COMPLETED. WE MUST KEEP IT A WHILE LONGER SINCE AN EMPTY PAGE IS */
+ /* NOT EQUIVALENT TO AN INITIALISED PAGE SINCE THE FREE LISTS CAN DIFFER. */
+ /* --------------------------------------------------------------------------------- */
+ return;
+ } else {
+ if ((fragrecptr.p->fragState == LCP_SEND_OVER_PAGES) &&
+ (fragrecptr.p->lcpDirIndex <= ropPageptr.p->word32[ZPOS_PAGE_ID])) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* SEE COMMENT ABOVE */
+ /* --------------------------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//if
+#if kalle
+ logicalPage = 0;
+
+ i = fragrecptr.p->directory;
+ p = dirRange.getPtr(i);
+
+ i1 = logicalPage >> 8;
+ i2 = logicalPage & 0xFF;
+
+ ndbrequire(i1 < 256);
+
+ i = p->dirArray[i1];
+ p = directoryarray.getPtr(i);
+
+ physicPageId = p->pagep[i2];
+ physicPageP = page8.getPtr(physicPageId);
+
+ p->pagep[i2] = RNIL;
+ rpPageptr = { physicPageId, physicPageP };
+ releasePage(signal);
+
+#endif
+
+ /* --------------------------------------------------------------------------------- */
+ /* IT WAS OK TO RELEASE THE PAGE. */
+ /* --------------------------------------------------------------------------------- */
+ ptrCheckGuard(ropOverflowRecPtr, coverflowrecsize, overflowRecord);
+ tfoOverflowRecPtr = ropOverflowRecPtr;
+ takeRecOutOfFreeOverpage(signal);
+ ropOverflowRecPtr.p->overpage = RNIL;
+ priOverflowRecPtr = ropOverflowRecPtr;
+ putRecInFreeOverdir(signal);
+ tropTmp = ropPageptr.p->word32[ZPOS_PAGE_ID];
+ ropOverflowrangeptr.i = fragrecptr.p->overflowdir;
+ tropTmp1 = tropTmp >> 8;
+ tropTmp2 = tropTmp & 0xff;
+ ptrCheckGuard(ropOverflowrangeptr, cdirrangesize, dirRange);
+ arrGuard(tropTmp1, 256);
+ ropOverflowDirptr.i = ropOverflowrangeptr.p->dirArray[tropTmp1];
+ ptrCheckGuard(ropOverflowDirptr, cdirarraysize, directoryarray);
+ ropOverflowDirptr.p->pagep[tropTmp2] = RNIL;
+ rpPageptr = ropPageptr;
+ releasePage(signal);
+ if (ropOverflowRecPtr.p->dirindex != (fragrecptr.p->lastOverIndex - 1)) {
+ jam();
+ return;
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* THE LAST PAGE IN THE DIRECTORY WAS RELEASED IT IS NOW NECESSARY TO REMOVE */
+ /* ALL RELEASED OVERFLOW DIRECTORIES AT THE END OF THE LIST. */
+ /* --------------------------------------------------------------------------------- */
+ do {
+ fragrecptr.p->lastOverIndex--;
+ if (tropTmp2 == 0) {
+ jam();
+ ndbrequire(tropTmp1 != 0);
+ ropOverflowrangeptr.p->dirArray[tropTmp1] = RNIL;
+ rdDirptr.i = ropOverflowDirptr.i;
+ releaseDirectory(signal);
+ tropTmp1--;
+ tropTmp2 = 255;
+ } else {
+ jam();
+ tropTmp2--;
+ }//if
+ ropOverflowDirptr.i = ropOverflowrangeptr.p->dirArray[tropTmp1];
+ ptrCheckGuard(ropOverflowDirptr, cdirarraysize, directoryarray);
+ } while (ropOverflowDirptr.p->pagep[tropTmp2] == RNIL);
+ /* --------------------------------------------------------------------------------- */
+ /* RELEASE ANY OVERFLOW RECORDS THAT ARE PART OF THE FREE INDEX LIST WHICH */
+ /* DIRECTORY INDEX NOW HAS BEEN RELEASED. */
+ /* --------------------------------------------------------------------------------- */
+ tuodOverflowRecPtr.i = fragrecptr.p->firstFreeDirindexRec;
+ jam();
+ while (tuodOverflowRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(tuodOverflowRecPtr, coverflowrecsize, overflowRecord);
+ if (tuodOverflowRecPtr.p->dirindex >= fragrecptr.p->lastOverIndex) {
+ jam();
+ rorOverflowRecPtr = tuodOverflowRecPtr;
+ troOverflowRecPtr.p = tuodOverflowRecPtr.p;
+ tuodOverflowRecPtr.i = troOverflowRecPtr.p->nextOverList;
+ takeRecOutOfFreeOverdir(signal);
+ releaseOverflowRec(signal);
+ } else {
+ jam();
+ tuodOverflowRecPtr.i = tuodOverflowRecPtr.p->nextOverList;
+ }//if
+ }//while
+}//Dbacc::releaseOverpage()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_PAGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releasePage(Signal* signal)
+{
+#ifdef VM_TRACE
+ bool inList = false;
+ Uint32 numInList = 0;
+ Page8Ptr tmpPagePtr;
+ tmpPagePtr.i = cfirstfreepage;
+ while (tmpPagePtr.i != RNIL){
+ ptrCheckGuard(tmpPagePtr, cpagesize, page8);
+ if (tmpPagePtr.i == rpPageptr.i){
+ jam(); inList = true;
+ break;
+ }
+ numInList++;
+ tmpPagePtr.i = tmpPagePtr.p->word32[0];
+ }
+ ndbrequire(inList == false);
+ // ndbrequire(numInList == cnoOfAllocatedPages);
+#endif
+ rpPageptr.p->word32[0] = cfirstfreepage;
+ cfirstfreepage = rpPageptr.i;
+ cnoOfAllocatedPages--;
+}//Dbacc::releasePage()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_LCP_PAGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseLcpPage(Signal* signal)
+{
+ rlpPageptr.p->word32[0] = cfirstfreeLcpPage;
+ cfirstfreeLcpPage = rlpPageptr.i;
+}//Dbacc::releaseLcpPage()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_SR_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseSrRec(Signal* signal)
+{
+ srVersionPtr.p->nextFreeSr = cfirstFreeSrVersionRec;
+ cfirstFreeSrVersionRec = srVersionPtr.i;
+}//Dbacc::releaseSrRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_DIRECTORY */
+/* DESCRIPTION: A DIRECTORY BLOCK (ZDIRBLOCKSIZE NUMBERS OF DIRECTORY */
+/* RECORDS WILL BE ALLOCATED AND RETURNED. */
+/* SIZE OF DIRECTORY ERROR_CODE, WILL BE RETURNED IF THERE IS NO ANY */
+/* FREE BLOCK */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeDirectory(Signal* signal)
+{
+ Uint32 tsdyIndex;
+
+ if (cfirstfreedir == RNIL) {
+ jam();
+ if (cdirarraysize <= cdirmemory) {
+ jam();
+ tresult = ZDIRSIZE_ERROR;
+ return;
+ } else {
+ jam();
+ sdDirptr.i = cdirmemory;
+ ptrCheckGuard(sdDirptr, cdirarraysize, directoryarray);
+ cdirmemory = cdirmemory + 1;
+ }//if
+ } else {
+ jam();
+ sdDirptr.i = cfirstfreedir;
+ ptrCheckGuard(sdDirptr, cdirarraysize, directoryarray);
+ cfirstfreedir = sdDirptr.p->pagep[0];
+ sdDirptr.p->pagep[0] = RNIL;
+ }//if
+ for (tsdyIndex = 0; tsdyIndex <= 255; tsdyIndex++) {
+ sdDirptr.p->pagep[tsdyIndex] = RNIL;
+ }//for
+}//Dbacc::seizeDirectory()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_DIRRANGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeDirrange(Signal* signal)
+{
+ Uint32 tsdeIndex;
+
+ newDirRangePtr.i = cfirstfreeDirrange;
+ ptrCheckGuard(newDirRangePtr, cdirrangesize, dirRange);
+ cfirstfreeDirrange = newDirRangePtr.p->dirArray[0];
+ for (tsdeIndex = 0; tsdeIndex <= 255; tsdeIndex++) {
+ newDirRangePtr.p->dirArray[tsdeIndex] = RNIL;
+ }//for
+}//Dbacc::seizeDirrange()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE FRAGREC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeFragrec(Signal* signal)
+{
+ fragrecptr.i = cfirstfreefrag;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ cfirstfreefrag = fragrecptr.p->nextfreefrag;
+ fragrecptr.p->nextfreefrag = RNIL;
+}//Dbacc::seizeFragrec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_FS_CONNECT_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeFsConnectRec(Signal* signal)
+{
+ fsConnectptr.i = cfsFirstfreeconnect;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ cfsFirstfreeconnect = fsConnectptr.p->fsNext;
+ fsConnectptr.p->fsNext = RNIL;
+ fsConnectptr.p->fsState = WAIT_NOTHING;
+}//Dbacc::seizeFsConnectRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_FS_OP_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeFsOpRec(Signal* signal)
+{
+ fsOpptr.i = cfsFirstfreeop;
+ ptrCheckGuard(fsOpptr, cfsOpsize, fsOprec);
+ cfsFirstfreeop = fsOpptr.p->fsOpnext;
+ fsOpptr.p->fsOpnext = RNIL;
+}//Dbacc::seizeFsOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_LCP_CONNECT_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeLcpConnectRec(Signal* signal)
+{
+ lcpConnectptr.i = cfirstfreelcpConnect;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ cfirstfreelcpConnect = lcpConnectptr.p->nextLcpConn;
+ lcpConnectptr.p->nextLcpConn = RNIL;
+}//Dbacc::seizeLcpConnectRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_OP_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeOpRec(Signal* signal)
+{
+ operationRecPtr.i = cfreeopRec;
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ cfreeopRec = operationRecPtr.p->nextOp; /* UPDATE FREE LIST OF OP RECORDS */
+ /* PUTS OPERTION RECORD PTR IN THE LIST */
+ /* OF OPERATION IN CONNECTION RECORD */
+ operationRecPtr.p->nextOp = RNIL;
+}//Dbacc::seizeOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE OVERFLOW RECORD */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeOverRec(Signal* signal) {
+ sorOverflowRecPtr.i = cfirstfreeoverrec;
+ ptrCheckGuard(sorOverflowRecPtr, coverflowrecsize, overflowRecord);
+ cfirstfreeoverrec = sorOverflowRecPtr.p->nextfreeoverrec;
+ sorOverflowRecPtr.p->nextfreeoverrec = RNIL;
+ sorOverflowRecPtr.p->prevOverRec = RNIL;
+ sorOverflowRecPtr.p->nextOverRec = RNIL;
+}//Dbacc::seizeOverRec()
+
+
+/**
+ * A ZPAGESIZE_ERROR has occured, out of index pages
+ * Print some debug info if debug compiled
+ */
+void Dbacc::zpagesize_error(const char* where){
+ DEBUG(where << endl
+ << " ZPAGESIZE_ERROR" << endl
+ << " cfirstfreepage=" << cfirstfreepage << endl
+ << " cfreepage=" <<cfreepage<<endl
+ << " cpagesize=" <<cpagesize<<endl
+ << " cnoOfAllocatedPages="<<cnoOfAllocatedPages);
+}
+
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_PAGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizePage(Signal* signal)
+{
+ tresult = 0;
+ if (cfirstfreepage == RNIL) {
+ if (cfreepage < cpagesize) {
+ jam();
+ spPageptr.i = cfreepage;
+ ptrCheckGuard(spPageptr, cpagesize, page8);
+ cfreepage++;
+ cnoOfAllocatedPages++;
+ } else {
+ jam();
+ zpagesize_error("Dbacc::seizePage");
+ tresult = ZPAGESIZE_ERROR;
+ }//if
+ } else {
+ jam();
+ spPageptr.i = cfirstfreepage;
+ ptrCheckGuard(spPageptr, cpagesize, page8);
+ cfirstfreepage = spPageptr.p->word32[0];
+ cnoOfAllocatedPages++;
+ }//if
+}//Dbacc::seizePage()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_PAGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeLcpPage(Page8Ptr& regPagePtr)
+{
+ regPagePtr.i = cfirstfreeLcpPage;
+ ptrCheckGuard(regPagePtr, cpagesize, page8);
+ cfirstfreeLcpPage = regPagePtr.p->word32[0];
+}//Dbacc::seizeLcpPage()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_ROOTFRAGREC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeRootfragrec(Signal* signal)
+{
+ rootfragrecptr.i = cfirstfreerootfrag;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ cfirstfreerootfrag = rootfragrecptr.p->nextroot;
+ rootfragrecptr.p->nextroot = RNIL;
+}//Dbacc::seizeRootfragrec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_SCAN_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeScanRec(Signal* signal)
+{
+ scanPtr.i = cfirstFreeScanRec;
+ ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
+ ndbrequire(scanPtr.p->scanState == ScanRec::SCAN_DISCONNECT);
+ cfirstFreeScanRec = scanPtr.p->scanNextfreerec;
+}//Dbacc::seizeScanRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_SR_VERSION_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeSrVerRec(Signal* signal)
+{
+ srVersionPtr.i = cfirstFreeSrVersionRec;
+ ptrCheckGuard(srVersionPtr, csrVersionRecSize, srVersionRec);
+ cfirstFreeSrVersionRec = srVersionPtr.p->nextFreeSr;
+}//Dbacc::seizeSrVerRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEND_SYSTEMERROR */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::sendSystemerror(Signal* signal)
+{
+ progError(0, 0);
+}//Dbacc::sendSystemerror()
+
+/* --------------------------------------------------------------------------------- */
+/* TAKE_REC_OUT_OF_FREE_OVERDIR */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::takeRecOutOfFreeOverdir(Signal* signal)
+{
+ OverflowRecordPtr tofoOverrecPtr;
+ if (troOverflowRecPtr.p->nextOverList != RNIL) {
+ jam();
+ tofoOverrecPtr.i = troOverflowRecPtr.p->nextOverList;
+ ptrCheckGuard(tofoOverrecPtr, coverflowrecsize, overflowRecord);
+ tofoOverrecPtr.p->prevOverList = troOverflowRecPtr.p->prevOverList;
+ }//if
+ if (troOverflowRecPtr.p->prevOverList != RNIL) {
+ jam();
+ tofoOverrecPtr.i = troOverflowRecPtr.p->prevOverList;
+ ptrCheckGuard(tofoOverrecPtr, coverflowrecsize, overflowRecord);
+ tofoOverrecPtr.p->nextOverList = troOverflowRecPtr.p->nextOverList;
+ } else {
+ jam();
+ fragrecptr.p->firstFreeDirindexRec = troOverflowRecPtr.p->nextOverList;
+ }//if
+}//Dbacc::takeRecOutOfFreeOverdir()
+
+/* --------------------------------------------------------------------------------- */
+/* TAKE_REC_OUT_OF_FREE_OVERPAGE */
+/* DESCRIPTION: AN OVERFLOW PAGE WHICH IS EMPTY HAVE TO BE TAKE OUT OF THE */
+/* FREE LIST OF OVERFLOW PAGE. BY THIS SUBROUTINE THIS LIST */
+/* WILL BE UPDATED. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::takeRecOutOfFreeOverpage(Signal* signal)
+{
+ OverflowRecordPtr tfoNextOverflowRecPtr;
+ OverflowRecordPtr tfoPrevOverflowRecPtr;
+
+ if (tfoOverflowRecPtr.p->nextOverRec != RNIL) {
+ jam();
+ tfoNextOverflowRecPtr.i = tfoOverflowRecPtr.p->nextOverRec;
+ ptrCheckGuard(tfoNextOverflowRecPtr, coverflowrecsize, overflowRecord);
+ tfoNextOverflowRecPtr.p->prevOverRec = tfoOverflowRecPtr.p->prevOverRec;
+ } else {
+ ndbrequire(fragrecptr.p->lastOverflowRec == tfoOverflowRecPtr.i);
+ jam();
+ fragrecptr.p->lastOverflowRec = tfoOverflowRecPtr.p->prevOverRec;
+ }//if
+ if (tfoOverflowRecPtr.p->prevOverRec != RNIL) {
+ jam();
+ tfoPrevOverflowRecPtr.i = tfoOverflowRecPtr.p->prevOverRec;
+ ptrCheckGuard(tfoPrevOverflowRecPtr, coverflowrecsize, overflowRecord);
+ tfoPrevOverflowRecPtr.p->nextOverRec = tfoOverflowRecPtr.p->nextOverRec;
+ } else {
+ ndbrequire(fragrecptr.p->firstOverflowRec == tfoOverflowRecPtr.i);
+ jam();
+ fragrecptr.p->firstOverflowRec = tfoOverflowRecPtr.p->nextOverRec;
+ }//if
+}//Dbacc::takeRecOutOfFreeOverpage()
+
+void
+Dbacc::reportMemoryUsage(Signal* signal, int gth){
+ signal->theData[0] = NDB_LE_MemoryUsage;
+ signal->theData[1] = gth;
+ signal->theData[2] = sizeof(* rpPageptr.p);
+ signal->theData[3] = cnoOfAllocatedPages;
+ signal->theData[4] = cpagesize;
+ signal->theData[5] = DBACC;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 6, JBB);
+}
+
+void
+Dbacc::execDUMP_STATE_ORD(Signal* signal)
+{
+ DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0];
+ if (dumpState->args[0] == DumpStateOrd::AccDumpOneScanRec){
+ Uint32 recordNo = RNIL;
+ if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ if (recordNo >= cscanRecSize)
+ return;
+
+ scanPtr.i = recordNo;
+ ptrAss(scanPtr, scanRec);
+ infoEvent("Dbacc::ScanRec[%d]: state=%d, transid(0x%x, 0x%x)",
+ scanPtr.i, scanPtr.p->scanState,scanPtr.p->scanTrid1,
+ scanPtr.p->scanTrid2);
+ infoEvent(" timer=%d, continueBCount=%d, "
+ "activeLocalFrag=%d, root=%d, nextBucketIndex=%d",
+ scanPtr.p->scanTimer,
+ scanPtr.p->scanContinuebCounter,
+ scanPtr.p->activeLocalFrag,
+ scanPtr.p->rootPtr,
+ scanPtr.p->nextBucketIndex);
+ infoEvent(" scanNextfreerec=%d firstActOp=%d firstLockedOp=%d, "
+ "scanLastLockedOp=%d firstQOp=%d lastQOp=%d",
+ scanPtr.p->scanNextfreerec,
+ scanPtr.p->scanFirstActiveOp,
+ scanPtr.p->scanFirstLockedOp,
+ scanPtr.p->scanLastLockedOp,
+ scanPtr.p->scanFirstQueuedOp,
+ scanPtr.p->scanLastQueuedOp);
+ infoEvent(" scanUserP=%d, startNoBuck=%d, minBucketIndexToRescan=%d, "
+ "maxBucketIndexToRescan=%d",
+ scanPtr.p->scanUserptr,
+ scanPtr.p->startNoOfBuckets,
+ scanPtr.p->minBucketIndexToRescan,
+ scanPtr.p->maxBucketIndexToRescan);
+ infoEvent(" scanBucketState=%d, scanLockHeld=%d, userBlockRef=%d, "
+ "scanMask=%d scanLockMode=%d",
+ scanPtr.p->scanBucketState,
+ scanPtr.p->scanLockHeld,
+ scanPtr.p->scanUserblockref,
+ scanPtr.p->scanMask,
+ scanPtr.p->scanLockMode);
+ return;
+ }
+
+ // Dump all ScanRec(ords)
+ if (dumpState->args[0] == DumpStateOrd::AccDumpAllScanRec){
+ Uint32 recordNo = 0;
+ if (signal->length() == 1)
+ infoEvent("ACC: Dump all ScanRec - size: %d",
+ cscanRecSize);
+ else if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ dumpState->args[0] = DumpStateOrd::AccDumpOneScanRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+
+ if (recordNo < cscanRecSize-1){
+ dumpState->args[0] = DumpStateOrd::AccDumpAllScanRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ return;
+ }
+
+ // Dump all active ScanRec(ords)
+ if (dumpState->args[0] == DumpStateOrd::AccDumpAllActiveScanRec){
+ Uint32 recordNo = 0;
+ if (signal->length() == 1)
+ infoEvent("ACC: Dump active ScanRec - size: %d",
+ cscanRecSize);
+ else if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ ScanRecPtr sp;
+ sp.i = recordNo;
+ ptrAss(sp, scanRec);
+ if (sp.p->scanState != ScanRec::SCAN_DISCONNECT){
+ dumpState->args[0] = DumpStateOrd::AccDumpOneScanRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+ }
+
+ if (recordNo < cscanRecSize-1){
+ dumpState->args[0] = DumpStateOrd::AccDumpAllActiveScanRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ return;
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::DumpPageMemory){
+ reportMemoryUsage(signal, 0);
+ return;
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::EnableUndoDelayDataWrite){
+ ndbout << "Dbacc:: delay write of datapages for table = "
+ << dumpState->args[1]<< endl;
+ c_errorInsert3000_TableId = dumpState->args[1];
+ SET_ERROR_INSERT_VALUE(3000);
+ return;
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::AccDumpOneOperationRec){
+ Uint32 recordNo = RNIL;
+ if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ if (recordNo >= coprecsize)
+ return;
+
+ OperationrecPtr tmpOpPtr;
+ tmpOpPtr.i = recordNo;
+ ptrAss(tmpOpPtr, operationrec);
+ infoEvent("Dbacc::operationrec[%d]: opState=%d, transid(0x%x, 0x%x)",
+ tmpOpPtr.i, tmpOpPtr.p->opState, tmpOpPtr.p->transId1,
+ tmpOpPtr.p->transId2);
+ infoEvent("elementIsforward=%d, elementPage=%d, elementPointer=%d ",
+ tmpOpPtr.p->elementIsforward, tmpOpPtr.p->elementPage,
+ tmpOpPtr.p->elementPointer);
+ infoEvent("fid=%d, fragptr=%d, hashvaluePart=%d ",
+ tmpOpPtr.p->fid, tmpOpPtr.p->fragptr,
+ tmpOpPtr.p->hashvaluePart);
+ infoEvent("hashValue=%d, insertDeleteLen=%d, keyinfoPage=%d ",
+ tmpOpPtr.p->hashValue, tmpOpPtr.p->insertDeleteLen,
+ tmpOpPtr.p->keyinfoPage);
+ infoEvent("nextLockOwnerOp=%d, nextOp=%d, nextParallelQue=%d ",
+ tmpOpPtr.p->nextLockOwnerOp, tmpOpPtr.p->nextOp,
+ tmpOpPtr.p->nextParallelQue);
+ infoEvent("nextQueOp=%d, nextSerialQue=%d, prevOp=%d ",
+ tmpOpPtr.p->nextQueOp, tmpOpPtr.p->nextSerialQue,
+ tmpOpPtr.p->prevOp);
+ infoEvent("prevLockOwnerOp=%d, prevParallelQue=%d, prevQueOp=%d ",
+ tmpOpPtr.p->prevLockOwnerOp, tmpOpPtr.p->nextParallelQue,
+ tmpOpPtr.p->prevQueOp);
+ infoEvent("prevSerialQue=%d, scanRecPtr=%d, longPagePtr=%d ",
+ tmpOpPtr.p->prevSerialQue, tmpOpPtr.p->scanRecPtr,
+ tmpOpPtr.p->longPagePtr);
+ infoEvent("transactionstate=%d, elementIsDisappeared=%d, insertIsDone=%d ",
+ tmpOpPtr.p->transactionstate, tmpOpPtr.p->elementIsDisappeared,
+ tmpOpPtr.p->insertIsDone);
+ infoEvent("lockMode=%d, lockOwner=%d, nodeType=%d ",
+ tmpOpPtr.p->lockMode, tmpOpPtr.p->lockOwner,
+ tmpOpPtr.p->nodeType);
+ infoEvent("operation=%d, opSimple=%d, dirtyRead=%d,scanBits=%d ",
+ tmpOpPtr.p->operation, tmpOpPtr.p->opSimple,
+ tmpOpPtr.p->dirtyRead, tmpOpPtr.p->scanBits);
+ return;
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::AccDumpNumOpRecs){
+
+ Uint32 freeOpRecs = 0;
+ OperationrecPtr opRecPtr;
+ opRecPtr.i = cfreeopRec;
+ while (opRecPtr.i != RNIL){
+ freeOpRecs++;
+ ptrCheckGuard(opRecPtr, coprecsize, operationrec);
+ opRecPtr.i = opRecPtr.p->nextOp;
+ }
+
+ infoEvent("Dbacc::OperationRecords: num=%d, free=%d",
+ coprecsize, freeOpRecs);
+
+ return;
+ }
+ if(dumpState->args[0] == DumpStateOrd::AccDumpFreeOpRecs){
+
+ OperationrecPtr opRecPtr;
+ opRecPtr.i = cfreeopRec;
+ while (opRecPtr.i != RNIL){
+
+ dumpState->args[0] = DumpStateOrd::AccDumpOneOperationRec;
+ dumpState->args[1] = opRecPtr.i;
+ execDUMP_STATE_ORD(signal);
+
+ ptrCheckGuard(opRecPtr, coprecsize, operationrec);
+ opRecPtr.i = opRecPtr.p->nextOp;
+ }
+
+
+ return;
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::AccDumpNotFreeOpRecs){
+ Uint32 recordStart = RNIL;
+ if (signal->length() == 2)
+ recordStart = dumpState->args[1];
+ else
+ return;
+
+ if (recordStart >= coprecsize)
+ return;
+
+ for (Uint32 i = recordStart; i < coprecsize; i++){
+
+ bool inFreeList = false;
+ OperationrecPtr opRecPtr;
+ opRecPtr.i = cfreeopRec;
+ while (opRecPtr.i != RNIL){
+ if (opRecPtr.i == i){
+ inFreeList = true;
+ break;
+ }
+ ptrCheckGuard(opRecPtr, coprecsize, operationrec);
+ opRecPtr.i = opRecPtr.p->nextOp;
+ }
+ if (inFreeList == false){
+ dumpState->args[0] = DumpStateOrd::AccDumpOneOperationRec;
+ dumpState->args[1] = i;
+ execDUMP_STATE_ORD(signal);
+ }
+ }
+ return;
+ }
+
+#if 0
+ if (type == 100) {
+ RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend();
+ req->primaryTableId = 2;
+ req->secondaryTableId = RNIL;
+ req->userPtr = 2;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal,
+ RelTabMemReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 101) {
+ RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend();
+ req->primaryTableId = 4;
+ req->secondaryTableId = 5;
+ req->userPtr = 4;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal,
+ RelTabMemReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 102) {
+ RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend();
+ req->primaryTableId = 6;
+ req->secondaryTableId = 8;
+ req->userPtr = 6;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal,
+ RelTabMemReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 103) {
+ DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend();
+ req->primaryTableId = 2;
+ req->secondaryTableId = RNIL;
+ req->userPtr = 2;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal,
+ DropTabFileReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 104) {
+ DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend();
+ req->primaryTableId = 4;
+ req->secondaryTableId = 5;
+ req->userPtr = 4;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal,
+ DropTabFileReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 105) {
+ DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend();
+ req->primaryTableId = 6;
+ req->secondaryTableId = 8;
+ req->userPtr = 6;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal,
+ DropTabFileReq::SignalLength, JBB);
+ return;
+ }//if
+#endif
+}//Dbacc::execDUMP_STATE_ORD()
+
+void Dbacc::execSET_VAR_REQ(Signal* signal)
+{
+#if 0
+ SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
+ ConfigParamId var = setVarReq->variable();
+ int val = setVarReq->value();
+
+
+ switch (var) {
+
+ case NoOfDiskPagesToDiskAfterRestartACC:
+ clblPagesPerTick = val;
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ case NoOfDiskPagesToDiskDuringRestartACC:
+ // Valid only during start so value not set.
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ default:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
+ } // switch
+#endif
+
+}//execSET_VAR_REQ()
+
+void
+Dbacc::execREAD_PSEUDO_REQ(Signal* signal){
+ jamEntry();
+ fragrecptr.i = signal->theData[0];
+ Uint32 attrId = signal->theData[1];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ Uint64 tmp;
+ switch(attrId){
+ case AttributeHeader::ROW_COUNT:
+ tmp = rootfragrecptr.p->noOfElements;
+ break;
+ case AttributeHeader::COMMIT_COUNT:
+ tmp = rootfragrecptr.p->m_commit_count;
+ break;
+ default:
+ tmp = 0;
+ }
+ memcpy(signal->theData, &tmp, 8); /* must be memcpy, gives strange results on
+ * ithanium gcc (GCC) 3.4.1 smp linux 2.4
+ * otherwise
+ */
+ // Uint32 * src = (Uint32*)&tmp;
+ // signal->theData[0] = src[0];
+ // signal->theData[1] = src[1];
+}
+
diff --git a/storage/ndb/src/kernel/blocks/dbacc/Makefile.am b/storage/ndb/src/kernel/blocks/dbacc/Makefile.am
new file mode 100644
index 00000000000..75a457da5bb
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbacc/Makefile.am
@@ -0,0 +1,26 @@
+
+noinst_LIBRARIES = libdbacc.a
+
+libdbacc_a_SOURCES = DbaccInit.cpp DbaccMain.cpp
+
+INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/kernel/blocks/dbtup
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbacc.dsp
+
+libdbacc.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libdbacc_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbdict/CreateIndex.txt b/storage/ndb/src/kernel/blocks/dbdict/CreateIndex.txt
index 3d11e501c07..3d11e501c07 100644
--- a/ndb/src/kernel/blocks/dbdict/CreateIndex.txt
+++ b/storage/ndb/src/kernel/blocks/dbdict/CreateIndex.txt
diff --git a/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt b/storage/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt
index d37732dcda1..d37732dcda1 100644
--- a/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt
+++ b/storage/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt
diff --git a/ndb/src/kernel/blocks/dbdict/CreateTable.txt b/storage/ndb/src/kernel/blocks/dbdict/CreateTable.txt
index 0b37e5d767f..0b37e5d767f 100644
--- a/ndb/src/kernel/blocks/dbdict/CreateTable.txt
+++ b/storage/ndb/src/kernel/blocks/dbdict/CreateTable.txt
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
new file mode 100644
index 00000000000..2c93afc4afd
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -0,0 +1,12144 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+#include <my_sys.h>
+
+#define DBDICT_C
+#include "Dbdict.hpp"
+
+#include <ndb_limits.h>
+#include <NdbOut.hpp>
+#include <Properties.hpp>
+#include <Configuration.hpp>
+#include <SectionReader.hpp>
+#include <SimpleProperties.hpp>
+#include <AttributeHeader.hpp>
+#include <signaldata/DictSchemaInfo.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/DropTabFile.hpp>
+
+#include <signaldata/EventReport.hpp>
+#include <signaldata/FsCloseReq.hpp>
+#include <signaldata/FsConf.hpp>
+#include <signaldata/FsOpenReq.hpp>
+#include <signaldata/FsReadWriteReq.hpp>
+#include <signaldata/FsRef.hpp>
+#include <signaldata/GetTabInfo.hpp>
+#include <signaldata/GetTableId.hpp>
+#include <signaldata/HotSpareRep.hpp>
+#include <signaldata/NFCompleteRep.hpp>
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/ReadNodesConf.hpp>
+#include <signaldata/RelTabMem.hpp>
+#include <signaldata/WaitGCP.hpp>
+#include <signaldata/ListTables.hpp>
+
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/AlterTrig.hpp>
+#include <signaldata/DropTrig.hpp>
+#include <signaldata/CreateIndx.hpp>
+#include <signaldata/DropIndx.hpp>
+#include <signaldata/BuildIndx.hpp>
+
+#include <signaldata/CreateEvnt.hpp>
+#include <signaldata/UtilPrepare.hpp>
+#include <signaldata/UtilExecute.hpp>
+#include <signaldata/UtilRelease.hpp>
+#include <signaldata/SumaImpl.hpp>
+#include <GrepError.hpp>
+//#include <signaldata/DropEvnt.hpp>
+
+#include <signaldata/LqhFrag.hpp>
+
+#include <signaldata/DiAddTab.hpp>
+#include <signaldata/DihStartTab.hpp>
+
+#include <signaldata/DropTable.hpp>
+#include <signaldata/DropTab.hpp>
+#include <signaldata/PrepDropTab.hpp>
+
+#include <signaldata/CreateTable.hpp>
+#include <signaldata/AlterTable.hpp>
+#include <signaldata/AlterTab.hpp>
+#include <signaldata/CreateFragmentation.hpp>
+#include <signaldata/CreateTab.hpp>
+#include <NdbSleep.h>
+
+#define ZNOT_FOUND 626
+#define ZALREADYEXIST 630
+
+//#define EVENT_PH2_DEBUG
+//#define EVENT_PH3_DEBUG
+//#define EVENT_DEBUG
+
+#define EVENT_TRACE \
+// ndbout_c("Event debug trace: File: %s Line: %u", __FILE__, __LINE__)
+
+#define DIV(x,y) (((x)+(y)-1)/(y))
+#include <ndb_version.h>
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: GENERAL MODULE -------------------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains general stuff. Mostly debug signals and */
+/* general signals that go into a specific module after checking a */
+/* state variable. Also general subroutines used by many. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+/* ---------------------------------------------------------------- */
+// This signal is used to dump states of various variables in the
+// block by command.
+/* ---------------------------------------------------------------- */
+void
+Dbdict::execDUMP_STATE_ORD(Signal* signal)
+{
+ jamEntry();
+
+#ifdef VM_TRACE
+ if(signal->theData[0] == 1222){
+ const Uint32 tab = signal->theData[1];
+ PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
+ req->senderRef = reference();
+ req->senderData = 1222;
+ req->tableId = tab;
+ sendSignal(DBLQH_REF, GSN_PREP_DROP_TAB_REQ, signal,
+ PrepDropTabReq::SignalLength, JBB);
+ }
+
+ if(signal->theData[0] == 1223){
+ const Uint32 tab = signal->theData[1];
+ PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
+ req->senderRef = reference();
+ req->senderData = 1222;
+ req->tableId = tab;
+ sendSignal(DBTC_REF, GSN_PREP_DROP_TAB_REQ, signal,
+ PrepDropTabReq::SignalLength, JBB);
+ }
+
+ if(signal->theData[0] == 1224){
+ const Uint32 tab = signal->theData[1];
+ PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
+ req->senderRef = reference();
+ req->senderData = 1222;
+ req->tableId = tab;
+ sendSignal(DBDIH_REF, GSN_PREP_DROP_TAB_REQ, signal,
+ PrepDropTabReq::SignalLength, JBB);
+ }
+
+ if(signal->theData[0] == 1225){
+ const Uint32 tab = signal->theData[1];
+ const Uint32 ver = signal->theData[2];
+ TableRecordPtr tabRecPtr;
+ c_tableRecordPool.getPtr(tabRecPtr, tab);
+ DropTableReq * req = (DropTableReq*)signal->getDataPtr();
+ req->senderData = 1225;
+ req->senderRef = numberToRef(1,1);
+ req->tableId = tab;
+ req->tableVersion = tabRecPtr.p->tableVersion + ver;
+ sendSignal(DBDICT_REF, GSN_DROP_TABLE_REQ, signal,
+ DropTableReq::SignalLength, JBB);
+ }
+#endif
+
+ return;
+}//Dbdict::execDUMP_STATE_ORD()
+
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+// CONTINUEB is used when a real-time break is needed for long
+// processes.
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+void Dbdict::execCONTINUEB(Signal* signal)
+{
+ jamEntry();
+ switch (signal->theData[0]) {
+ case ZPACK_TABLE_INTO_PAGES :
+ jam();
+ packTableIntoPages(signal, signal->theData[1], signal->theData[2]);
+ break;
+
+ case ZSEND_GET_TAB_RESPONSE :
+ jam();
+ sendGetTabResponse(signal);
+ break;
+
+ default :
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//execCONTINUEB()
+
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+// Routine to handle pack table into pages.
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+
+void Dbdict::packTableIntoPages(Signal* signal, Uint32 tableId, Uint32 pageId)
+{
+
+ PageRecordPtr pagePtr;
+ TableRecordPtr tablePtr;
+ c_pageRecordArray.getPtr(pagePtr, pageId);
+
+ memset(&pagePtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE);
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ LinearWriter w(&pagePtr.p->word[ZPAGE_HEADER_SIZE],
+ 8 * ZSIZE_OF_PAGES_IN_WORDS);
+
+ w.first();
+ packTableIntoPagesImpl(w, tablePtr, signal);
+
+ Uint32 wordsOfTable = w.getWordsUsed();
+ Uint32 pagesUsed =
+ DIV(wordsOfTable + ZPAGE_HEADER_SIZE, ZSIZE_OF_PAGES_IN_WORDS);
+ pagePtr.p->word[ZPOS_CHECKSUM] =
+ computeChecksum(&pagePtr.p->word[0], pagesUsed * ZSIZE_OF_PAGES_IN_WORDS);
+
+ switch (c_packTable.m_state) {
+ case PackTable::PTS_IDLE:
+ case PackTable::PTS_ADD_TABLE_MASTER:
+ case PackTable::PTS_ADD_TABLE_SLAVE:
+ case PackTable::PTS_RESTART:
+ ndbrequire(false);
+ break;
+ case PackTable::PTS_GET_TAB:
+ jam();
+ c_retrieveRecord.retrievedNoOfPages = pagesUsed;
+ c_retrieveRecord.retrievedNoOfWords = wordsOfTable;
+ sendGetTabResponse(signal);
+ return;
+ break;
+ }//switch
+ ndbrequire(false);
+ return;
+}//packTableIntoPages()
+
+void
+Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
+ TableRecordPtr tablePtr,
+ Signal* signal){
+
+ w.add(DictTabInfo::TableName, tablePtr.p->tableName);
+ w.add(DictTabInfo::TableId, tablePtr.i);
+#ifdef HAVE_TABLE_REORG
+ w.add(DictTabInfo::SecondTableId, tablePtr.p->secondTable);
+#else
+ w.add(DictTabInfo::SecondTableId, (Uint32)0);
+#endif
+ w.add(DictTabInfo::TableVersion, tablePtr.p->tableVersion);
+ w.add(DictTabInfo::NoOfKeyAttr, tablePtr.p->noOfPrimkey);
+ w.add(DictTabInfo::NoOfAttributes, tablePtr.p->noOfAttributes);
+ w.add(DictTabInfo::NoOfNullable, tablePtr.p->noOfNullAttr);
+ w.add(DictTabInfo::NoOfVariable, (Uint32)0);
+ w.add(DictTabInfo::KeyLength, tablePtr.p->tupKeyLength);
+
+ w.add(DictTabInfo::TableLoggedFlag, tablePtr.p->storedTable);
+ w.add(DictTabInfo::MinLoadFactor, tablePtr.p->minLoadFactor);
+ w.add(DictTabInfo::MaxLoadFactor, tablePtr.p->maxLoadFactor);
+ w.add(DictTabInfo::TableKValue, tablePtr.p->kValue);
+ w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType);
+ w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType);
+
+ if(!signal)
+ {
+ w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount);
+ }
+ else
+ {
+ Uint32 * theData = signal->getDataPtrSend();
+ CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
+ req->senderRef = 0;
+ req->senderData = RNIL;
+ req->fragmentationType = tablePtr.p->fragmentType;
+ req->noOfFragments = 0;
+ req->primaryTableId = tablePtr.i;
+ EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal,
+ CreateFragmentationReq::SignalLength);
+ if(signal->theData[0] == 0)
+ {
+ Uint16 *data = (Uint16*)&signal->theData[25];
+ Uint32 count = 2 + data[0] * data[1];
+ w.add(DictTabInfo::FragmentDataLen, 2*count);
+ w.add(DictTabInfo::FragmentData, data, 2*count);
+ }
+ }
+
+ if (tablePtr.p->primaryTableId != RNIL){
+ TableRecordPtr primTab;
+ c_tableRecordPool.getPtr(primTab, tablePtr.p->primaryTableId);
+ w.add(DictTabInfo::PrimaryTable, primTab.p->tableName);
+ w.add(DictTabInfo::PrimaryTableId, tablePtr.p->primaryTableId);
+ w.add(DictTabInfo::IndexState, tablePtr.p->indexState);
+ w.add(DictTabInfo::InsertTriggerId, tablePtr.p->insertTriggerId);
+ w.add(DictTabInfo::UpdateTriggerId, tablePtr.p->updateTriggerId);
+ w.add(DictTabInfo::DeleteTriggerId, tablePtr.p->deleteTriggerId);
+ w.add(DictTabInfo::CustomTriggerId, tablePtr.p->customTriggerId);
+ }
+ w.add(DictTabInfo::FrmLen, tablePtr.p->frmLen);
+ w.add(DictTabInfo::FrmData, tablePtr.p->frmData, tablePtr.p->frmLen);
+
+ Uint32 nextAttribute = tablePtr.p->firstAttribute;
+ AttributeRecordPtr attrPtr;
+ do {
+ jam();
+ c_attributeRecordPool.getPtr(attrPtr, nextAttribute);
+
+ w.add(DictTabInfo::AttributeName, attrPtr.p->attributeName);
+ w.add(DictTabInfo::AttributeId, attrPtr.p->attributeId);
+ w.add(DictTabInfo::AttributeKeyFlag, attrPtr.p->tupleKey > 0);
+
+ const Uint32 desc = attrPtr.p->attributeDescriptor;
+ const Uint32 attrType = AttributeDescriptor::getType(desc);
+ const Uint32 attrSize = AttributeDescriptor::getSize(desc);
+ const Uint32 arraySize = AttributeDescriptor::getArraySize(desc);
+ const Uint32 nullable = AttributeDescriptor::getNullable(desc);
+ const Uint32 DKey = AttributeDescriptor::getDKey(desc);
+
+ // AttributeType deprecated
+ w.add(DictTabInfo::AttributeSize, attrSize);
+ w.add(DictTabInfo::AttributeArraySize, arraySize);
+ w.add(DictTabInfo::AttributeNullableFlag, nullable);
+ w.add(DictTabInfo::AttributeDKey, DKey);
+ w.add(DictTabInfo::AttributeExtType, attrType);
+ w.add(DictTabInfo::AttributeExtPrecision, attrPtr.p->extPrecision);
+ w.add(DictTabInfo::AttributeExtScale, attrPtr.p->extScale);
+ w.add(DictTabInfo::AttributeExtLength, attrPtr.p->extLength);
+ w.add(DictTabInfo::AttributeAutoIncrement,
+ (Uint32)attrPtr.p->autoIncrement);
+ w.add(DictTabInfo::AttributeDefaultValue, attrPtr.p->defaultValue);
+
+ w.add(DictTabInfo::AttributeEnd, 1);
+ nextAttribute = attrPtr.p->nextAttrInTable;
+ } while (nextAttribute != RNIL);
+
+ w.add(DictTabInfo::TableEnd, 1);
+}
+
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+// The routines to handle responses from file system.
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+
+/* ---------------------------------------------------------------- */
+// A file was successfully closed.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSCLOSECONF(Signal* signal)
+{
+ FsConnectRecordPtr fsPtr;
+ FsConf * const fsConf = (FsConf *)&signal->theData[0];
+ jamEntry();
+ c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer);
+ switch (fsPtr.p->fsState) {
+ case FsConnectRecord::CLOSE_WRITE_SCHEMA:
+ jam();
+ closeWriteSchemaConf(signal, fsPtr);
+ break;
+ case FsConnectRecord::CLOSE_READ_SCHEMA:
+ jam();
+ closeReadSchemaConf(signal, fsPtr);
+ break;
+ case FsConnectRecord::CLOSE_READ_TAB_FILE:
+ jam();
+ closeReadTableConf(signal, fsPtr);
+ break;
+ case FsConnectRecord::CLOSE_WRITE_TAB_FILE:
+ jam();
+ closeWriteTableConf(signal, fsPtr);
+ break;
+ default:
+ jamLine((fsPtr.p->fsState & 0xFFF));
+ ndbrequire(false);
+ break;
+ }//switch
+}//execFSCLOSECONF()
+
+/* ---------------------------------------------------------------- */
+// A close file was refused.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSCLOSEREF(Signal* signal)
+{
+ jamEntry();
+ progError(0, 0);
+}//execFSCLOSEREF()
+
+/* ---------------------------------------------------------------- */
+// A file was successfully opened.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSOPENCONF(Signal* signal)
+{
+ FsConnectRecordPtr fsPtr;
+ jamEntry();
+ FsConf * const fsConf = (FsConf *)&signal->theData[0];
+ c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer);
+
+ Uint32 filePointer = fsConf->filePointer;
+ fsPtr.p->filePtr = filePointer;
+ switch (fsPtr.p->fsState) {
+ case FsConnectRecord::OPEN_WRITE_SCHEMA:
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::WRITE_SCHEMA;
+ writeSchemaFile(signal, filePointer, fsPtr.i);
+ break;
+ case FsConnectRecord::OPEN_READ_SCHEMA1:
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::READ_SCHEMA1;
+ readSchemaFile(signal, filePointer, fsPtr.i);
+ break;
+ case FsConnectRecord::OPEN_READ_SCHEMA2:
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::READ_SCHEMA2;
+ readSchemaFile(signal, filePointer, fsPtr.i);
+ break;
+ case FsConnectRecord::OPEN_READ_TAB_FILE1:
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::READ_TAB_FILE1;
+ readTableFile(signal, filePointer, fsPtr.i);
+ break;
+ case FsConnectRecord::OPEN_READ_TAB_FILE2:
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::READ_TAB_FILE2;
+ readTableFile(signal, filePointer, fsPtr.i);
+ break;
+ case FsConnectRecord::OPEN_WRITE_TAB_FILE:
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::WRITE_TAB_FILE;
+ writeTableFile(signal, filePointer, fsPtr.i);
+ break;
+ default:
+ jamLine((fsPtr.p->fsState & 0xFFF));
+ ndbrequire(false);
+ break;
+ }//switch
+}//execFSOPENCONF()
+
+/* ---------------------------------------------------------------- */
+// An open file was refused.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSOPENREF(Signal* signal)
+{
+ jamEntry();
+ FsRef * const fsRef = (FsRef *)&signal->theData[0];
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.getPtr(fsPtr, fsRef->userPointer);
+ switch (fsPtr.p->fsState) {
+ case FsConnectRecord::OPEN_READ_SCHEMA1:
+ openReadSchemaRef(signal, fsPtr);
+ break;
+ case FsConnectRecord::OPEN_READ_TAB_FILE1:
+ jam();
+ openReadTableRef(signal, fsPtr);
+ break;
+ default:
+ jamLine((fsPtr.p->fsState & 0xFFF));
+ ndbrequire(false);
+ break;
+ }//switch
+}//execFSOPENREF()
+
+/* ---------------------------------------------------------------- */
+// A file was successfully read.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSREADCONF(Signal* signal)
+{
+ jamEntry();
+ FsConf * const fsConf = (FsConf *)&signal->theData[0];
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer);
+ switch (fsPtr.p->fsState) {
+ case FsConnectRecord::READ_SCHEMA1:
+ case FsConnectRecord::READ_SCHEMA2:
+ readSchemaConf(signal ,fsPtr);
+ break;
+ case FsConnectRecord::READ_TAB_FILE1:
+ case FsConnectRecord::READ_TAB_FILE2:
+ jam();
+ readTableConf(signal ,fsPtr);
+ break;
+ default:
+ jamLine((fsPtr.p->fsState & 0xFFF));
+ ndbrequire(false);
+ break;
+ }//switch
+}//execFSREADCONF()
+
+/* ---------------------------------------------------------------- */
+// A read file was refused.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSREADREF(Signal* signal)
+{
+ jamEntry();
+ FsRef * const fsRef = (FsRef *)&signal->theData[0];
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.getPtr(fsPtr, fsRef->userPointer);
+ switch (fsPtr.p->fsState) {
+ case FsConnectRecord::READ_SCHEMA1:
+ readSchemaRef(signal, fsPtr);
+ break;
+ case FsConnectRecord::READ_TAB_FILE1:
+ jam();
+ readTableRef(signal, fsPtr);
+ break;
+ default:
+ jamLine((fsPtr.p->fsState & 0xFFF));
+ ndbrequire(false);
+ break;
+ }//switch
+}//execFSREADREF()
+
+/* ---------------------------------------------------------------- */
+// A file was successfully written.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSWRITECONF(Signal* signal)
+{
+ FsConf * const fsConf = (FsConf *)&signal->theData[0];
+ FsConnectRecordPtr fsPtr;
+ jamEntry();
+ c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer);
+ switch (fsPtr.p->fsState) {
+ case FsConnectRecord::WRITE_TAB_FILE:
+ writeTableConf(signal, fsPtr);
+ break;
+ case FsConnectRecord::WRITE_SCHEMA:
+ jam();
+ writeSchemaConf(signal, fsPtr);
+ break;
+ default:
+ jamLine((fsPtr.p->fsState & 0xFFF));
+ ndbrequire(false);
+ break;
+ }//switch
+}//execFSWRITECONF()
+
+/* ---------------------------------------------------------------- */
+// A write file was refused.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSWRITEREF(Signal* signal)
+{
+ jamEntry();
+ progError(0, 0);
+}//execFSWRITEREF()
+
+/* ---------------------------------------------------------------- */
+// Routines to handle Read/Write of Table Files
+/* ---------------------------------------------------------------- */
+void
+Dbdict::writeTableFile(Signal* signal, Uint32 tableId,
+ SegmentedSectionPtr tabInfoPtr, Callback* callback){
+
+ ndbrequire(c_writeTableRecord.tableWriteState == WriteTableRecord::IDLE);
+
+ Uint32 sz = tabInfoPtr.sz + ZPAGE_HEADER_SIZE;
+
+ c_writeTableRecord.noOfPages = DIV(sz, ZSIZE_OF_PAGES_IN_WORDS);
+ c_writeTableRecord.tableWriteState = WriteTableRecord::TWR_CALLBACK;
+ c_writeTableRecord.m_callback = * callback;
+
+ c_writeTableRecord.pageId = 0;
+ ndbrequire(c_writeTableRecord.noOfPages < 8);
+
+ PageRecordPtr pageRecPtr;
+ c_pageRecordArray.getPtr(pageRecPtr, c_writeTableRecord.pageId);
+ copy(&pageRecPtr.p->word[ZPAGE_HEADER_SIZE], tabInfoPtr);
+
+ memset(&pageRecPtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE);
+ pageRecPtr.p->word[ZPOS_CHECKSUM] =
+ computeChecksum(&pageRecPtr.p->word[0],
+ c_writeTableRecord.noOfPages * ZSIZE_OF_PAGES_IN_WORDS);
+
+ startWriteTableFile(signal, tableId);
+
+}
+
+void Dbdict::startWriteTableFile(Signal* signal, Uint32 tableId)
+{
+ FsConnectRecordPtr fsPtr;
+ c_writeTableRecord.tableId = tableId;
+ c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
+ fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_TAB_FILE;
+ openTableFile(signal, 0, fsPtr.i, tableId, true);
+ c_writeTableRecord.noOfTableFilesHandled = 0;
+}//Dbdict::startWriteTableFile()
+
+void Dbdict::openTableFile(Signal* signal,
+ Uint32 fileNo,
+ Uint32 fsConPtr,
+ Uint32 tableId,
+ bool writeFlag)
+{
+ TableRecordPtr tablePtr;
+ FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0];
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+
+ fsOpenReq->userReference = reference();
+ fsOpenReq->userPointer = fsConPtr;
+ if (writeFlag) {
+ jam();
+ fsOpenReq->fileFlags =
+ FsOpenReq::OM_WRITEONLY |
+ FsOpenReq::OM_TRUNCATE |
+ FsOpenReq::OM_CREATE |
+ FsOpenReq::OM_SYNC;
+ } else {
+ jam();
+ fsOpenReq->fileFlags = FsOpenReq::OM_READONLY;
+ }//if
+ ndbrequire(tablePtr.p->tableVersion < ZNIL);
+ fsOpenReq->fileNumber[3] = 0; // Initialise before byte changes
+ FsOpenReq::setVersion(fsOpenReq->fileNumber, 1);
+ FsOpenReq::setSuffix(fsOpenReq->fileNumber, FsOpenReq::S_TABLELIST);
+ FsOpenReq::v1_setDisk(fsOpenReq->fileNumber, (fileNo + 1));
+ FsOpenReq::v1_setTable(fsOpenReq->fileNumber, tableId);
+ FsOpenReq::v1_setFragment(fsOpenReq->fileNumber, (Uint32)-1);
+ FsOpenReq::v1_setS(fsOpenReq->fileNumber, tablePtr.p->tableVersion);
+ FsOpenReq::v1_setP(fsOpenReq->fileNumber, 255);
+/* ---------------------------------------------------------------- */
+// File name : D1/DBDICT/T0/S1.TableList
+// D1 means Disk 1 (set by fileNo + 1)
+// T0 means table id = 0
+// S1 means tableVersion 1
+// TableList indicates that this is a file for a table description.
+/* ---------------------------------------------------------------- */
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
+}//openTableFile()
+
+void Dbdict::writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
+{
+ FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
+
+ fsRWReq->filePointer = filePtr;
+ fsRWReq->userReference = reference();
+ fsRWReq->userPointer = fsConPtr;
+ fsRWReq->operationFlag = 0; // Initialise before bit changes
+ FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1);
+ FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
+ FsReadWriteReq::fsFormatArrayOfPages);
+ fsRWReq->varIndex = ZBAT_TABLE_FILE;
+ fsRWReq->numberOfPages = c_writeTableRecord.noOfPages;
+ fsRWReq->data.arrayOfPages.varIndex = c_writeTableRecord.pageId;
+ fsRWReq->data.arrayOfPages.fileOffset = 0; // Write to file page 0
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+}//writeTableFile()
+
+void Dbdict::writeTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ fsPtr.p->fsState = FsConnectRecord::CLOSE_WRITE_TAB_FILE;
+ closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
+ return;
+}//Dbdict::writeTableConf()
+
+void Dbdict::closeWriteTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ c_writeTableRecord.noOfTableFilesHandled++;
+ if (c_writeTableRecord.noOfTableFilesHandled < 2) {
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_TAB_FILE;
+ openTableFile(signal, 1, fsPtr.i, c_writeTableRecord.tableId, true);
+ return;
+ }
+ ndbrequire(c_writeTableRecord.noOfTableFilesHandled == 2);
+ c_fsConnectRecordPool.release(fsPtr);
+ WriteTableRecord::TableWriteState state = c_writeTableRecord.tableWriteState;
+ c_writeTableRecord.tableWriteState = WriteTableRecord::IDLE;
+ switch (state) {
+ case WriteTableRecord::IDLE:
+ case WriteTableRecord::WRITE_ADD_TABLE_MASTER :
+ case WriteTableRecord::WRITE_ADD_TABLE_SLAVE :
+ case WriteTableRecord::WRITE_RESTART_FROM_MASTER :
+ case WriteTableRecord::WRITE_RESTART_FROM_OWN :
+ ndbrequire(false);
+ break;
+ case WriteTableRecord::TWR_CALLBACK:
+ jam();
+ execute(signal, c_writeTableRecord.m_callback, 0);
+ return;
+ }
+ ndbrequire(false);
+}//Dbdict::closeWriteTableConf()
+
+void Dbdict::startReadTableFile(Signal* signal, Uint32 tableId)
+{
+ //globalSignalLoggers.log(number(), "startReadTableFile");
+ ndbrequire(!c_readTableRecord.inUse);
+
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
+ c_readTableRecord.inUse = true;
+ c_readTableRecord.tableId = tableId;
+ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE1;
+ openTableFile(signal, 0, fsPtr.i, tableId, false);
+}//Dbdict::startReadTableFile()
+
+void Dbdict::openReadTableRef(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2;
+ openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
+ return;
+}//Dbdict::openReadTableConf()
+
+void Dbdict::readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
+{
+ FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
+
+ fsRWReq->filePointer = filePtr;
+ fsRWReq->userReference = reference();
+ fsRWReq->userPointer = fsConPtr;
+ fsRWReq->operationFlag = 0; // Initialise before bit changes
+ FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0);
+ FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
+ FsReadWriteReq::fsFormatArrayOfPages);
+ fsRWReq->varIndex = ZBAT_TABLE_FILE;
+ fsRWReq->numberOfPages = c_readTableRecord.noOfPages;
+ fsRWReq->data.arrayOfPages.varIndex = c_readTableRecord.pageId;
+ fsRWReq->data.arrayOfPages.fileOffset = 0; // Write to file page 0
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+}//readTableFile()
+
+void Dbdict::readTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ /* ---------------------------------------------------------------- */
+ // Verify the data read from disk
+ /* ---------------------------------------------------------------- */
+ bool crashInd;
+ if (fsPtr.p->fsState == FsConnectRecord::READ_TAB_FILE1) {
+ jam();
+ crashInd = false;
+ } else {
+ jam();
+ crashInd = true;
+ }//if
+
+ PageRecordPtr tmpPagePtr;
+ c_pageRecordArray.getPtr(tmpPagePtr, c_readTableRecord.pageId);
+ Uint32 sz = c_readTableRecord.noOfPages * ZSIZE_OF_PAGES_IN_WORDS;
+ Uint32 chk = computeChecksum((const Uint32*)tmpPagePtr.p, sz);
+
+ ndbrequire((chk == 0) || !crashInd);
+ if(chk != 0){
+ jam();
+ ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_TAB_FILE1);
+ readTableRef(signal, fsPtr);
+ return;
+ }//if
+
+ fsPtr.p->fsState = FsConnectRecord::CLOSE_READ_TAB_FILE;
+ closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
+ return;
+}//Dbdict::readTableConf()
+
+void Dbdict::readTableRef(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2;
+ openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
+ return;
+}//Dbdict::readTableRef()
+
+void Dbdict::closeReadTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ c_fsConnectRecordPool.release(fsPtr);
+ c_readTableRecord.inUse = false;
+
+ execute(signal, c_readTableRecord.m_callback, 0);
+ return;
+}//Dbdict::closeReadTableConf()
+
+/* ---------------------------------------------------------------- */
+// Routines to handle Read/Write of Schema Files
+/* ---------------------------------------------------------------- */
+void
+Dbdict::updateSchemaState(Signal* signal, Uint32 tableId,
+ SchemaFile::TableEntry* te, Callback* callback){
+
+ jam();
+ ndbrequire(tableId < c_tableRecordPool.getSize());
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tableId);
+
+ SchemaFile::TableState newState =
+ (SchemaFile::TableState)te->m_tableState;
+ SchemaFile::TableState oldState =
+ (SchemaFile::TableState)tableEntry->m_tableState;
+
+ Uint32 newVersion = te->m_tableVersion;
+ Uint32 oldVersion = tableEntry->m_tableVersion;
+
+ bool ok = false;
+ switch(newState){
+ case SchemaFile::ADD_STARTED:
+ jam();
+ ok = true;
+ ndbrequire((oldVersion + 1) == newVersion);
+ ndbrequire(oldState == SchemaFile::INIT ||
+ oldState == SchemaFile::DROP_TABLE_COMMITTED);
+ break;
+ case SchemaFile::TABLE_ADD_COMMITTED:
+ jam();
+ ok = true;
+ ndbrequire(newVersion == oldVersion);
+ ndbrequire(oldState == SchemaFile::ADD_STARTED);
+ break;
+ case SchemaFile::ALTER_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ ndbrequire((oldVersion + 1) == newVersion);
+ ndbrequire(oldState == SchemaFile::TABLE_ADD_COMMITTED ||
+ oldState == SchemaFile::ALTER_TABLE_COMMITTED);
+ break;
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ ndbrequire(false);
+ break;
+ case SchemaFile::INIT:
+ jam();
+ ok = true;
+ ndbrequire((oldState == SchemaFile::ADD_STARTED));
+ }//if
+ ndbrequire(ok);
+
+ * tableEntry = * te;
+ computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES);
+
+ ndbrequire(c_writeSchemaRecord.inUse == false);
+ c_writeSchemaRecord.inUse = true;
+
+ c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.newFile = false;
+ c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES;
+ c_writeSchemaRecord.noOfPages = 1;
+ c_writeSchemaRecord.m_callback = * callback;
+
+ startWriteSchemaFile(signal);
+}
+
+void Dbdict::startWriteSchemaFile(Signal* signal)
+{
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
+ fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_SCHEMA;
+ openSchemaFile(signal, 0, fsPtr.i, true, c_writeSchemaRecord.newFile);
+ c_writeSchemaRecord.noOfSchemaFilesHandled = 0;
+}//Dbdict::startWriteSchemaFile()
+
+void Dbdict::openSchemaFile(Signal* signal,
+ Uint32 fileNo,
+ Uint32 fsConPtr,
+ bool writeFlag,
+ bool newFile)
+{
+ FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0];
+ fsOpenReq->userReference = reference();
+ fsOpenReq->userPointer = fsConPtr;
+ if (writeFlag) {
+ jam();
+ fsOpenReq->fileFlags =
+ FsOpenReq::OM_WRITEONLY |
+ FsOpenReq::OM_SYNC;
+ if (newFile)
+ fsOpenReq->fileFlags |=
+ FsOpenReq::OM_TRUNCATE |
+ FsOpenReq::OM_CREATE;
+ } else {
+ jam();
+ fsOpenReq->fileFlags = FsOpenReq::OM_READONLY;
+ }//if
+ fsOpenReq->fileNumber[3] = 0; // Initialise before byte changes
+ FsOpenReq::setVersion(fsOpenReq->fileNumber, 1);
+ FsOpenReq::setSuffix(fsOpenReq->fileNumber, FsOpenReq::S_SCHEMALOG);
+ FsOpenReq::v1_setDisk(fsOpenReq->fileNumber, (fileNo + 1));
+ FsOpenReq::v1_setTable(fsOpenReq->fileNumber, (Uint32)-1);
+ FsOpenReq::v1_setFragment(fsOpenReq->fileNumber, (Uint32)-1);
+ FsOpenReq::v1_setS(fsOpenReq->fileNumber, (Uint32)-1);
+ FsOpenReq::v1_setP(fsOpenReq->fileNumber, 0);
+/* ---------------------------------------------------------------- */
+// File name : D1/DBDICT/P0.SchemaLog
+// D1 means Disk 1 (set by fileNo + 1). Writes to both D1 and D2
+// SchemaLog indicates that this is a file giving a list of current tables.
+/* ---------------------------------------------------------------- */
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
+}//openSchemaFile()
+
+void Dbdict::writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
+{
+ FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
+
+ // check write record
+ WriteSchemaRecord & wr = c_writeSchemaRecord;
+ ndbrequire(wr.pageId == (wr.pageId != 0) * NDB_SF_MAX_PAGES);
+ ndbrequire(wr.noOfPages != 0);
+ ndbrequire(wr.firstPage + wr.noOfPages <= NDB_SF_MAX_PAGES);
+
+ fsRWReq->filePointer = filePtr;
+ fsRWReq->userReference = reference();
+ fsRWReq->userPointer = fsConPtr;
+ fsRWReq->operationFlag = 0; // Initialise before bit changes
+ FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1);
+ FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
+ FsReadWriteReq::fsFormatArrayOfPages);
+ fsRWReq->varIndex = ZBAT_SCHEMA_FILE;
+ fsRWReq->numberOfPages = wr.noOfPages;
+ // Write from memory page
+ fsRWReq->data.arrayOfPages.varIndex = wr.pageId + wr.firstPage;
+ fsRWReq->data.arrayOfPages.fileOffset = wr.firstPage;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+}//writeSchemaFile()
+
+void Dbdict::writeSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ fsPtr.p->fsState = FsConnectRecord::CLOSE_WRITE_SCHEMA;
+ closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
+ return;
+}//Dbdict::writeSchemaConf()
+
+void Dbdict::closeFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
+{
+ FsCloseReq * const fsCloseReq = (FsCloseReq *)&signal->theData[0];
+ fsCloseReq->filePointer = filePtr;
+ fsCloseReq->userReference = reference();
+ fsCloseReq->userPointer = fsConPtr;
+ FsCloseReq::setRemoveFileFlag(fsCloseReq->fileFlag, false);
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, FsCloseReq::SignalLength, JBA);
+ return;
+}//closeFile()
+
+void Dbdict::closeWriteSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ c_writeSchemaRecord.noOfSchemaFilesHandled++;
+ if (c_writeSchemaRecord.noOfSchemaFilesHandled < 2) {
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_SCHEMA;
+ openSchemaFile(signal, 1, fsPtr.i, true, c_writeSchemaRecord.newFile);
+ return;
+ }
+ ndbrequire(c_writeSchemaRecord.noOfSchemaFilesHandled == 2);
+
+ c_fsConnectRecordPool.release(fsPtr);
+
+ c_writeSchemaRecord.inUse = false;
+ execute(signal, c_writeSchemaRecord.m_callback, 0);
+ return;
+}//Dbdict::closeWriteSchemaConf()
+
+void Dbdict::startReadSchemaFile(Signal* signal)
+{
+ //globalSignalLoggers.log(number(), "startReadSchemaFile");
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
+ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA1;
+ openSchemaFile(signal, 0, fsPtr.i, false, false);
+}//Dbdict::startReadSchemaFile()
+
+void Dbdict::openReadSchemaRef(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA2;
+ openSchemaFile(signal, 1, fsPtr.i, false, false);
+}//Dbdict::openReadSchemaRef()
+
+void Dbdict::readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
+{
+ FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
+
+ // check read record
+ ReadSchemaRecord & rr = c_readSchemaRecord;
+ ndbrequire(rr.pageId == (rr.pageId != 0) * NDB_SF_MAX_PAGES);
+ ndbrequire(rr.noOfPages != 0);
+ ndbrequire(rr.firstPage + rr.noOfPages <= NDB_SF_MAX_PAGES);
+
+ fsRWReq->filePointer = filePtr;
+ fsRWReq->userReference = reference();
+ fsRWReq->userPointer = fsConPtr;
+ fsRWReq->operationFlag = 0; // Initialise before bit changes
+ FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0);
+ FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
+ FsReadWriteReq::fsFormatArrayOfPages);
+ fsRWReq->varIndex = ZBAT_SCHEMA_FILE;
+ fsRWReq->numberOfPages = rr.noOfPages;
+ fsRWReq->data.arrayOfPages.varIndex = rr.pageId + rr.firstPage;
+ fsRWReq->data.arrayOfPages.fileOffset = rr.firstPage;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+}//readSchemaFile()
+
+void Dbdict::readSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+/* ---------------------------------------------------------------- */
+// Verify the data read from disk
+/* ---------------------------------------------------------------- */
+ bool crashInd;
+ if (fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1) {
+ jam();
+ crashInd = false;
+ } else {
+ jam();
+ crashInd = true;
+ }//if
+
+ ReadSchemaRecord & rr = c_readSchemaRecord;
+ XSchemaFile * xsf = &c_schemaFile[rr.pageId != 0];
+
+ if (rr.schemaReadState == ReadSchemaRecord::INITIAL_READ_HEAD) {
+ jam();
+ ndbrequire(rr.firstPage == 0);
+ SchemaFile * sf = &xsf->schemaPage[0];
+ Uint32 noOfPages;
+ if (sf->NdbVersion < NDB_SF_VERSION_5_0_6) {
+ jam();
+ const Uint32 pageSize_old = 32 * 1024;
+ noOfPages = pageSize_old / NDB_SF_PAGE_SIZE - 1;
+ } else {
+ noOfPages = sf->FileSize / NDB_SF_PAGE_SIZE - 1;
+ }
+ rr.schemaReadState = ReadSchemaRecord::INITIAL_READ;
+ if (noOfPages != 0) {
+ rr.firstPage = 1;
+ rr.noOfPages = noOfPages;
+ readSchemaFile(signal, fsPtr.p->filePtr, fsPtr.i);
+ return;
+ }
+ }
+
+ SchemaFile * sf0 = &xsf->schemaPage[0];
+ xsf->noOfPages = sf0->FileSize / NDB_SF_PAGE_SIZE;
+
+ if (sf0->NdbVersion < NDB_SF_VERSION_5_0_6 &&
+ ! convertSchemaFileTo_5_0_6(xsf)) {
+ jam();
+ ndbrequire(! crashInd);
+ ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1);
+ readSchemaRef(signal, fsPtr);
+ return;
+ }
+
+ for (Uint32 n = 0; n < xsf->noOfPages; n++) {
+ SchemaFile * sf = &xsf->schemaPage[n];
+ bool ok =
+ memcmp(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)) == 0 &&
+ sf->FileSize != 0 &&
+ sf->FileSize % NDB_SF_PAGE_SIZE == 0 &&
+ sf->FileSize == sf0->FileSize &&
+ sf->PageNumber == n &&
+ computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS) == 0;
+ ndbrequire(ok || !crashInd);
+ if (! ok) {
+ jam();
+ ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1);
+ readSchemaRef(signal, fsPtr);
+ return;
+ }
+ }
+
+ fsPtr.p->fsState = FsConnectRecord::CLOSE_READ_SCHEMA;
+ closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
+ return;
+}//Dbdict::readSchemaConf()
+
+void Dbdict::readSchemaRef(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA2;
+ openSchemaFile(signal, 1, fsPtr.i, false, false);
+ return;
+}//Dbdict::readSchemaRef()
+
+void Dbdict::closeReadSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ c_fsConnectRecordPool.release(fsPtr);
+ ReadSchemaRecord::SchemaReadState state = c_readSchemaRecord.schemaReadState;
+ c_readSchemaRecord.schemaReadState = ReadSchemaRecord::IDLE;
+
+ switch(state) {
+ case ReadSchemaRecord::INITIAL_READ :
+ jam();
+ {
+ // write back both copies
+
+ ndbrequire(c_writeSchemaRecord.inUse == false);
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0 ];
+ Uint32 noOfPages =
+ (c_tableRecordPool.getSize() + NDB_SF_PAGE_ENTRIES - 1) /
+ NDB_SF_PAGE_ENTRIES;
+ resizeSchemaFile(xsf, noOfPages);
+
+ c_writeSchemaRecord.inUse = true;
+ c_writeSchemaRecord.pageId = c_schemaRecord.oldSchemaPage;
+ c_writeSchemaRecord.newFile = true;
+ c_writeSchemaRecord.firstPage = 0;
+ c_writeSchemaRecord.noOfPages = xsf->noOfPages;
+
+ c_writeSchemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::initSchemaFile_conf);
+
+ startWriteSchemaFile(signal);
+ }
+ break;
+
+ default :
+ ndbrequire(false);
+ break;
+
+ }//switch
+}//Dbdict::closeReadSchemaConf()
+
+bool
+Dbdict::convertSchemaFileTo_5_0_6(XSchemaFile * xsf)
+{
+ const Uint32 pageSize_old = 32 * 1024;
+ Uint32 page_old[pageSize_old >> 2];
+ SchemaFile * sf_old = (SchemaFile *)page_old;
+
+ if (xsf->noOfPages * NDB_SF_PAGE_SIZE != pageSize_old)
+ return false;
+ SchemaFile * sf0 = &xsf->schemaPage[0];
+ memcpy(sf_old, sf0, pageSize_old);
+
+ // init max number new pages needed
+ xsf->noOfPages = (sf_old->NoOfTableEntries + NDB_SF_PAGE_ENTRIES - 1) /
+ NDB_SF_PAGE_ENTRIES;
+ initSchemaFile(xsf, 0, xsf->noOfPages, true);
+
+ Uint32 noOfPages = 1;
+ Uint32 n, i, j;
+ for (n = 0; n < xsf->noOfPages; n++) {
+ jam();
+ for (i = 0; i < NDB_SF_PAGE_ENTRIES; i++) {
+ j = n * NDB_SF_PAGE_ENTRIES + i;
+ if (j >= sf_old->NoOfTableEntries)
+ continue;
+ const SchemaFile::TableEntry_old & te_old = sf_old->TableEntries_old[j];
+ if (te_old.m_tableState == SchemaFile::INIT ||
+ te_old.m_tableState == SchemaFile::DROP_TABLE_COMMITTED ||
+ te_old.m_noOfPages == 0)
+ continue;
+ SchemaFile * sf = &xsf->schemaPage[n];
+ SchemaFile::TableEntry & te = sf->TableEntries[i];
+ te.m_tableState = te_old.m_tableState;
+ te.m_tableVersion = te_old.m_tableVersion;
+ te.m_tableType = te_old.m_tableType;
+ te.m_info_words = te_old.m_noOfPages * ZSIZE_OF_PAGES_IN_WORDS -
+ ZPAGE_HEADER_SIZE;
+ te.m_gcp = te_old.m_gcp;
+ if (noOfPages < n)
+ noOfPages = n;
+ }
+ }
+ xsf->noOfPages = noOfPages;
+ initSchemaFile(xsf, 0, xsf->noOfPages, false);
+
+ return true;
+}
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: INITIALISATION MODULE ------------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains initialisation of data at start/restart. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+Dbdict::Dbdict(const class Configuration & conf):
+ SimulatedBlock(DBDICT, conf),
+ c_tableRecordHash(c_tableRecordPool),
+ c_attributeRecordHash(c_attributeRecordPool),
+ c_triggerRecordHash(c_triggerRecordPool),
+ c_opCreateTable(c_opRecordPool),
+ c_opDropTable(c_opRecordPool),
+ c_opCreateIndex(c_opRecordPool),
+ c_opDropIndex(c_opRecordPool),
+ c_opAlterIndex(c_opRecordPool),
+ c_opBuildIndex(c_opRecordPool),
+ c_opCreateEvent(c_opRecordPool),
+ c_opSubEvent(c_opRecordPool),
+ c_opDropEvent(c_opRecordPool),
+ c_opSignalUtil(c_opRecordPool),
+ c_opCreateTrigger(c_opRecordPool),
+ c_opDropTrigger(c_opRecordPool),
+ c_opAlterTrigger(c_opRecordPool),
+ c_opRecordSequence(0)
+{
+ BLOCK_CONSTRUCTOR(Dbdict);
+
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS, &c_maxNoOfTriggers);
+ // Transit signals
+ addRecSignal(GSN_DUMP_STATE_ORD, &Dbdict::execDUMP_STATE_ORD);
+ addRecSignal(GSN_GET_TABINFOREQ, &Dbdict::execGET_TABINFOREQ);
+ addRecSignal(GSN_GET_TABLEID_REQ, &Dbdict::execGET_TABLEDID_REQ);
+ addRecSignal(GSN_GET_TABINFO_CONF, &Dbdict::execGET_TABINFO_CONF);
+ addRecSignal(GSN_CONTINUEB, &Dbdict::execCONTINUEB);
+
+ addRecSignal(GSN_CREATE_TABLE_REQ, &Dbdict::execCREATE_TABLE_REQ);
+ addRecSignal(GSN_CREATE_TAB_REQ, &Dbdict::execCREATE_TAB_REQ);
+ addRecSignal(GSN_CREATE_TAB_REF, &Dbdict::execCREATE_TAB_REF);
+ addRecSignal(GSN_CREATE_TAB_CONF, &Dbdict::execCREATE_TAB_CONF);
+ addRecSignal(GSN_CREATE_FRAGMENTATION_REF, &Dbdict::execCREATE_FRAGMENTATION_REF);
+ addRecSignal(GSN_CREATE_FRAGMENTATION_CONF, &Dbdict::execCREATE_FRAGMENTATION_CONF);
+ addRecSignal(GSN_DIADDTABCONF, &Dbdict::execDIADDTABCONF);
+ addRecSignal(GSN_DIADDTABREF, &Dbdict::execDIADDTABREF);
+ addRecSignal(GSN_ADD_FRAGREQ, &Dbdict::execADD_FRAGREQ);
+ addRecSignal(GSN_TAB_COMMITCONF, &Dbdict::execTAB_COMMITCONF);
+ addRecSignal(GSN_TAB_COMMITREF, &Dbdict::execTAB_COMMITREF);
+ addRecSignal(GSN_ALTER_TABLE_REQ, &Dbdict::execALTER_TABLE_REQ);
+ addRecSignal(GSN_ALTER_TAB_REQ, &Dbdict::execALTER_TAB_REQ);
+ addRecSignal(GSN_ALTER_TAB_REF, &Dbdict::execALTER_TAB_REF);
+ addRecSignal(GSN_ALTER_TAB_CONF, &Dbdict::execALTER_TAB_CONF);
+
+ // Index signals
+ addRecSignal(GSN_CREATE_INDX_REQ, &Dbdict::execCREATE_INDX_REQ);
+ addRecSignal(GSN_CREATE_INDX_CONF, &Dbdict::execCREATE_INDX_CONF);
+ addRecSignal(GSN_CREATE_INDX_REF, &Dbdict::execCREATE_INDX_REF);
+
+ addRecSignal(GSN_ALTER_INDX_REQ, &Dbdict::execALTER_INDX_REQ);
+ addRecSignal(GSN_ALTER_INDX_CONF, &Dbdict::execALTER_INDX_CONF);
+ addRecSignal(GSN_ALTER_INDX_REF, &Dbdict::execALTER_INDX_REF);
+
+ addRecSignal(GSN_CREATE_TABLE_CONF, &Dbdict::execCREATE_TABLE_CONF);
+ addRecSignal(GSN_CREATE_TABLE_REF, &Dbdict::execCREATE_TABLE_REF);
+
+ addRecSignal(GSN_DROP_INDX_REQ, &Dbdict::execDROP_INDX_REQ);
+ addRecSignal(GSN_DROP_INDX_CONF, &Dbdict::execDROP_INDX_CONF);
+ addRecSignal(GSN_DROP_INDX_REF, &Dbdict::execDROP_INDX_REF);
+
+ addRecSignal(GSN_DROP_TABLE_CONF, &Dbdict::execDROP_TABLE_CONF);
+ addRecSignal(GSN_DROP_TABLE_REF, &Dbdict::execDROP_TABLE_REF);
+
+ addRecSignal(GSN_BUILDINDXREQ, &Dbdict::execBUILDINDXREQ);
+ addRecSignal(GSN_BUILDINDXCONF, &Dbdict::execBUILDINDXCONF);
+ addRecSignal(GSN_BUILDINDXREF, &Dbdict::execBUILDINDXREF);
+
+ // Util signals
+ addRecSignal(GSN_UTIL_PREPARE_CONF, &Dbdict::execUTIL_PREPARE_CONF);
+ addRecSignal(GSN_UTIL_PREPARE_REF, &Dbdict::execUTIL_PREPARE_REF);
+
+ addRecSignal(GSN_UTIL_EXECUTE_CONF, &Dbdict::execUTIL_EXECUTE_CONF);
+ addRecSignal(GSN_UTIL_EXECUTE_REF, &Dbdict::execUTIL_EXECUTE_REF);
+
+ addRecSignal(GSN_UTIL_RELEASE_CONF, &Dbdict::execUTIL_RELEASE_CONF);
+ addRecSignal(GSN_UTIL_RELEASE_REF, &Dbdict::execUTIL_RELEASE_REF);
+
+ // Event signals
+ addRecSignal(GSN_CREATE_EVNT_REQ, &Dbdict::execCREATE_EVNT_REQ);
+ addRecSignal(GSN_CREATE_EVNT_CONF, &Dbdict::execCREATE_EVNT_CONF);
+ addRecSignal(GSN_CREATE_EVNT_REF, &Dbdict::execCREATE_EVNT_REF);
+
+ addRecSignal(GSN_CREATE_SUBID_CONF, &Dbdict::execCREATE_SUBID_CONF);
+ addRecSignal(GSN_CREATE_SUBID_REF, &Dbdict::execCREATE_SUBID_REF);
+
+ addRecSignal(GSN_SUB_CREATE_CONF, &Dbdict::execSUB_CREATE_CONF);
+ addRecSignal(GSN_SUB_CREATE_REF, &Dbdict::execSUB_CREATE_REF);
+
+ addRecSignal(GSN_SUB_START_REQ, &Dbdict::execSUB_START_REQ);
+ addRecSignal(GSN_SUB_START_CONF, &Dbdict::execSUB_START_CONF);
+ addRecSignal(GSN_SUB_START_REF, &Dbdict::execSUB_START_REF);
+
+ addRecSignal(GSN_SUB_STOP_REQ, &Dbdict::execSUB_STOP_REQ);
+ addRecSignal(GSN_SUB_STOP_CONF, &Dbdict::execSUB_STOP_CONF);
+ addRecSignal(GSN_SUB_STOP_REF, &Dbdict::execSUB_STOP_REF);
+
+ addRecSignal(GSN_SUB_SYNC_CONF, &Dbdict::execSUB_SYNC_CONF);
+ addRecSignal(GSN_SUB_SYNC_REF, &Dbdict::execSUB_SYNC_REF);
+
+ addRecSignal(GSN_DROP_EVNT_REQ, &Dbdict::execDROP_EVNT_REQ);
+
+ addRecSignal(GSN_SUB_REMOVE_REQ, &Dbdict::execSUB_REMOVE_REQ);
+ addRecSignal(GSN_SUB_REMOVE_CONF, &Dbdict::execSUB_REMOVE_CONF);
+ addRecSignal(GSN_SUB_REMOVE_REF, &Dbdict::execSUB_REMOVE_REF);
+
+ // Trigger signals
+ addRecSignal(GSN_CREATE_TRIG_REQ, &Dbdict::execCREATE_TRIG_REQ);
+ addRecSignal(GSN_CREATE_TRIG_CONF, &Dbdict::execCREATE_TRIG_CONF);
+ addRecSignal(GSN_CREATE_TRIG_REF, &Dbdict::execCREATE_TRIG_REF);
+ addRecSignal(GSN_ALTER_TRIG_REQ, &Dbdict::execALTER_TRIG_REQ);
+ addRecSignal(GSN_ALTER_TRIG_CONF, &Dbdict::execALTER_TRIG_CONF);
+ addRecSignal(GSN_ALTER_TRIG_REF, &Dbdict::execALTER_TRIG_REF);
+ addRecSignal(GSN_DROP_TRIG_REQ, &Dbdict::execDROP_TRIG_REQ);
+ addRecSignal(GSN_DROP_TRIG_CONF, &Dbdict::execDROP_TRIG_CONF);
+ addRecSignal(GSN_DROP_TRIG_REF, &Dbdict::execDROP_TRIG_REF);
+
+ // Received signals
+ addRecSignal(GSN_HOT_SPAREREP, &Dbdict::execHOT_SPAREREP);
+ addRecSignal(GSN_GET_SCHEMA_INFOREQ, &Dbdict::execGET_SCHEMA_INFOREQ);
+ addRecSignal(GSN_SCHEMA_INFO, &Dbdict::execSCHEMA_INFO);
+ addRecSignal(GSN_SCHEMA_INFOCONF, &Dbdict::execSCHEMA_INFOCONF);
+ addRecSignal(GSN_DICTSTARTREQ, &Dbdict::execDICTSTARTREQ);
+ addRecSignal(GSN_READ_NODESCONF, &Dbdict::execREAD_NODESCONF);
+ addRecSignal(GSN_FSOPENCONF, &Dbdict::execFSOPENCONF);
+ addRecSignal(GSN_FSOPENREF, &Dbdict::execFSOPENREF);
+ addRecSignal(GSN_FSCLOSECONF, &Dbdict::execFSCLOSECONF);
+ addRecSignal(GSN_FSCLOSEREF, &Dbdict::execFSCLOSEREF);
+ addRecSignal(GSN_FSWRITECONF, &Dbdict::execFSWRITECONF);
+ addRecSignal(GSN_FSWRITEREF, &Dbdict::execFSWRITEREF);
+ addRecSignal(GSN_FSREADCONF, &Dbdict::execFSREADCONF);
+ addRecSignal(GSN_FSREADREF, &Dbdict::execFSREADREF);
+ addRecSignal(GSN_LQHFRAGCONF, &Dbdict::execLQHFRAGCONF);
+ addRecSignal(GSN_LQHADDATTCONF, &Dbdict::execLQHADDATTCONF);
+ addRecSignal(GSN_LQHADDATTREF, &Dbdict::execLQHADDATTREF);
+ addRecSignal(GSN_LQHFRAGREF, &Dbdict::execLQHFRAGREF);
+ addRecSignal(GSN_NDB_STTOR, &Dbdict::execNDB_STTOR);
+ addRecSignal(GSN_READ_CONFIG_REQ, &Dbdict::execREAD_CONFIG_REQ, true);
+ addRecSignal(GSN_STTOR, &Dbdict::execSTTOR);
+ addRecSignal(GSN_TC_SCHVERCONF, &Dbdict::execTC_SCHVERCONF);
+ addRecSignal(GSN_NODE_FAILREP, &Dbdict::execNODE_FAILREP);
+ addRecSignal(GSN_INCL_NODEREQ, &Dbdict::execINCL_NODEREQ);
+ addRecSignal(GSN_API_FAILREQ, &Dbdict::execAPI_FAILREQ);
+
+ addRecSignal(GSN_WAIT_GCP_REF, &Dbdict::execWAIT_GCP_REF);
+ addRecSignal(GSN_WAIT_GCP_CONF, &Dbdict::execWAIT_GCP_CONF);
+
+ addRecSignal(GSN_LIST_TABLES_REQ, &Dbdict::execLIST_TABLES_REQ);
+
+ addRecSignal(GSN_DROP_TABLE_REQ, &Dbdict::execDROP_TABLE_REQ);
+
+ addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dbdict::execPREP_DROP_TAB_REQ);
+ addRecSignal(GSN_PREP_DROP_TAB_REF, &Dbdict::execPREP_DROP_TAB_REF);
+ addRecSignal(GSN_PREP_DROP_TAB_CONF, &Dbdict::execPREP_DROP_TAB_CONF);
+
+ addRecSignal(GSN_DROP_TAB_REQ, &Dbdict::execDROP_TAB_REQ);
+ addRecSignal(GSN_DROP_TAB_REF, &Dbdict::execDROP_TAB_REF);
+ addRecSignal(GSN_DROP_TAB_CONF, &Dbdict::execDROP_TAB_CONF);
+}//Dbdict::Dbdict()
+
+Dbdict::~Dbdict()
+{
+}//Dbdict::~Dbdict()
+
+BLOCK_FUNCTIONS(Dbdict)
+
+void Dbdict::initCommonData()
+{
+/* ---------------------------------------------------------------- */
+// Initialise all common variables.
+/* ---------------------------------------------------------------- */
+ initRetrieveRecord(0, 0, 0);
+ initSchemaRecord();
+ initRestartRecord();
+ initSendSchemaRecord();
+ initReadTableRecord();
+ initWriteTableRecord();
+ initReadSchemaRecord();
+ initWriteSchemaRecord();
+
+ c_masterNodeId = ZNIL;
+ c_numberNode = 0;
+ c_noNodesFailed = 0;
+ c_failureNr = 0;
+ c_blockState = BS_IDLE;
+ c_packTable.m_state = PackTable::PTS_IDLE;
+ c_startPhase = 0;
+ c_restartType = 255; //Ensure not used restartType
+ c_tabinfoReceived = 0;
+ c_initialStart = false;
+ c_systemRestart = false;
+ c_initialNodeRestart = false;
+ c_nodeRestart = false;
+}//Dbdict::initCommonData()
+
+void Dbdict::initRecords()
+{
+ initNodeRecords();
+ initPageRecords();
+ initTableRecords();
+ initTriggerRecords();
+}//Dbdict::initRecords()
+
+void Dbdict::initSendSchemaRecord()
+{
+ c_sendSchemaRecord.noOfWords = (Uint32)-1;
+ c_sendSchemaRecord.pageId = RNIL;
+ c_sendSchemaRecord.noOfWordsCurrentlySent = 0;
+ c_sendSchemaRecord.noOfSignalsSentSinceDelay = 0;
+ c_sendSchemaRecord.inUse = false;
+ //c_sendSchemaRecord.sendSchemaState = SendSchemaRecord::IDLE;
+}//initSendSchemaRecord()
+
+void Dbdict::initReadTableRecord()
+{
+ c_readTableRecord.noOfPages = (Uint32)-1;
+ c_readTableRecord.pageId = RNIL;
+ c_readTableRecord.tableId = ZNIL;
+ c_readTableRecord.inUse = false;
+}//initReadTableRecord()
+
+void Dbdict::initWriteTableRecord()
+{
+ c_writeTableRecord.noOfPages = (Uint32)-1;
+ c_writeTableRecord.pageId = RNIL;
+ c_writeTableRecord.noOfTableFilesHandled = 3;
+ c_writeTableRecord.tableId = ZNIL;
+ c_writeTableRecord.tableWriteState = WriteTableRecord::IDLE;
+}//initWriteTableRecord()
+
+void Dbdict::initReadSchemaRecord()
+{
+ c_readSchemaRecord.pageId = RNIL;
+ c_readSchemaRecord.schemaReadState = ReadSchemaRecord::IDLE;
+}//initReadSchemaRecord()
+
+void Dbdict::initWriteSchemaRecord()
+{
+ c_writeSchemaRecord.inUse = false;
+ c_writeSchemaRecord.pageId = RNIL;
+ c_writeSchemaRecord.noOfSchemaFilesHandled = 3;
+}//initWriteSchemaRecord()
+
+void Dbdict::initRetrieveRecord(Signal* signal, Uint32 i, Uint32 returnCode)
+{
+ c_retrieveRecord.busyState = false;
+ c_retrieveRecord.blockRef = 0;
+ c_retrieveRecord.m_senderData = RNIL;
+ c_retrieveRecord.tableId = RNIL;
+ c_retrieveRecord.currentSent = 0;
+ c_retrieveRecord.retrievedNoOfPages = 0;
+ c_retrieveRecord.retrievedNoOfWords = 0;
+ c_retrieveRecord.m_useLongSig = false;
+}//initRetrieveRecord()
+
+void Dbdict::initSchemaRecord()
+{
+ c_schemaRecord.schemaPage = RNIL;
+ c_schemaRecord.oldSchemaPage = RNIL;
+}//Dbdict::initSchemaRecord()
+
+void Dbdict::initRestartRecord()
+{
+ c_restartRecord.gciToRestart = 0;
+ c_restartRecord.activeTable = ZNIL;
+}//Dbdict::initRestartRecord()
+
+void Dbdict::initNodeRecords()
+{
+ jam();
+ for (unsigned i = 1; i < MAX_NODES; i++) {
+ NodeRecordPtr nodePtr;
+ c_nodes.getPtr(nodePtr, i);
+ nodePtr.p->hotSpare = false;
+ nodePtr.p->nodeState = NodeRecord::API_NODE;
+ }//for
+}//Dbdict::initNodeRecords()
+
+void Dbdict::initPageRecords()
+{
+ c_retrieveRecord.retrievePage = ZMAX_PAGES_OF_TABLE_DEFINITION;
+ ndbrequire(ZNUMBER_OF_PAGES >= (ZMAX_PAGES_OF_TABLE_DEFINITION + 1));
+ c_schemaRecord.schemaPage = 0;
+ c_schemaRecord.oldSchemaPage = NDB_SF_MAX_PAGES;
+}//Dbdict::initPageRecords()
+
+void Dbdict::initTableRecords()
+{
+ TableRecordPtr tablePtr;
+ while (1) {
+ jam();
+ refresh_watch_dog();
+ c_tableRecordPool.seize(tablePtr);
+ if (tablePtr.i == RNIL) {
+ jam();
+ break;
+ }//if
+ initialiseTableRecord(tablePtr);
+ }//while
+}//Dbdict::initTableRecords()
+
+void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
+{
+ tablePtr.p->activePage = RNIL;
+ tablePtr.p->filePtr[0] = RNIL;
+ tablePtr.p->filePtr[1] = RNIL;
+ tablePtr.p->firstAttribute = RNIL;
+ tablePtr.p->firstPage = RNIL;
+ tablePtr.p->lastAttribute = RNIL;
+ tablePtr.p->tableId = tablePtr.i;
+ tablePtr.p->tableVersion = (Uint32)-1;
+ tablePtr.p->tabState = TableRecord::NOT_DEFINED;
+ tablePtr.p->tabReturnState = TableRecord::TRS_IDLE;
+ tablePtr.p->myConnect = RNIL;
+ tablePtr.p->fragmentType = DictTabInfo::AllNodesSmallTable;
+ memset(tablePtr.p->tableName, 0, sizeof(tablePtr.p->tableName));
+ tablePtr.p->gciTableCreated = 0;
+ tablePtr.p->noOfAttributes = ZNIL;
+ tablePtr.p->noOfNullAttr = 0;
+ tablePtr.p->ngLen = 0;
+ memset(tablePtr.p->ngData, 0, sizeof(tablePtr.p->ngData));
+ tablePtr.p->frmLen = 0;
+ memset(tablePtr.p->frmData, 0, sizeof(tablePtr.p->frmData));
+ tablePtr.p->fragmentCount = 0;
+ /*
+ tablePtr.p->lh3PageIndexBits = 0;
+ tablePtr.p->lh3DistrBits = 0;
+ tablePtr.p->lh3PageBits = 6;
+ */
+ tablePtr.p->kValue = 6;
+ tablePtr.p->localKeyLen = 1;
+ tablePtr.p->maxLoadFactor = 80;
+ tablePtr.p->minLoadFactor = 70;
+ tablePtr.p->noOfPrimkey = 1;
+ tablePtr.p->tupKeyLength = 1;
+ tablePtr.p->storedTable = true;
+ tablePtr.p->tableType = DictTabInfo::UserTable;
+ tablePtr.p->primaryTableId = RNIL;
+ // volatile elements
+ tablePtr.p->indexState = TableRecord::IS_UNDEFINED;
+ tablePtr.p->insertTriggerId = RNIL;
+ tablePtr.p->updateTriggerId = RNIL;
+ tablePtr.p->deleteTriggerId = RNIL;
+ tablePtr.p->customTriggerId = RNIL;
+ tablePtr.p->buildTriggerId = RNIL;
+ tablePtr.p->indexLocal = 0;
+}//Dbdict::initialiseTableRecord()
+
+void Dbdict::initTriggerRecords()
+{
+ TriggerRecordPtr triggerPtr;
+ while (1) {
+ jam();
+ refresh_watch_dog();
+ c_triggerRecordPool.seize(triggerPtr);
+ if (triggerPtr.i == RNIL) {
+ jam();
+ break;
+ }//if
+ initialiseTriggerRecord(triggerPtr);
+ }//while
+}
+
+void Dbdict::initialiseTriggerRecord(TriggerRecordPtr triggerPtr)
+{
+ triggerPtr.p->triggerState = TriggerRecord::TS_NOT_DEFINED;
+ triggerPtr.p->triggerLocal = 0;
+ memset(triggerPtr.p->triggerName, 0, sizeof(triggerPtr.p->triggerName));
+ triggerPtr.p->triggerId = RNIL;
+ triggerPtr.p->tableId = RNIL;
+ triggerPtr.p->triggerType = (TriggerType::Value)~0;
+ triggerPtr.p->triggerActionTime = (TriggerActionTime::Value)~0;
+ triggerPtr.p->triggerEvent = (TriggerEvent::Value)~0;
+ triggerPtr.p->monitorReplicas = false;
+ triggerPtr.p->monitorAllAttributes = false;
+ triggerPtr.p->attributeMask.clear();
+ triggerPtr.p->indexId = RNIL;
+}
+
+Uint32 Dbdict::getFsConnRecord()
+{
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.seize(fsPtr);
+ ndbrequire(fsPtr.i != RNIL);
+ fsPtr.p->filePtr = (Uint32)-1;
+ fsPtr.p->ownerPtr = RNIL;
+ fsPtr.p->fsState = FsConnectRecord::IDLE;
+ return fsPtr.i;
+}//Dbdict::getFsConnRecord()
+
+Uint32 Dbdict::getFreeTableRecord(Uint32 primaryTableId)
+{
+ Uint32 minId = (primaryTableId == RNIL ? 0 : primaryTableId + 1);
+ TableRecordPtr tablePtr;
+ TableRecordPtr firstTablePtr;
+ bool firstFound = false;
+ Uint32 tabSize = c_tableRecordPool.getSize();
+ for (tablePtr.i = minId; tablePtr.i < tabSize ; tablePtr.i++) {
+ jam();
+ c_tableRecordPool.getPtr(tablePtr);
+ if (tablePtr.p->tabState == TableRecord::NOT_DEFINED) {
+ jam();
+ initialiseTableRecord(tablePtr);
+ tablePtr.p->tabState = TableRecord::DEFINING;
+ firstFound = true;
+ firstTablePtr.i = tablePtr.i;
+ firstTablePtr.p = tablePtr.p;
+ break;
+ }//if
+ }//for
+ if (!firstFound) {
+ jam();
+ return RNIL;
+ }//if
+#ifdef HAVE_TABLE_REORG
+ bool secondFound = false;
+ for (tablePtr.i = firstTablePtr.i + 1; tablePtr.i < tabSize ; tablePtr.i++) {
+ jam();
+ c_tableRecordPool.getPtr(tablePtr);
+ if (tablePtr.p->tabState == TableRecord::NOT_DEFINED) {
+ jam();
+ initialiseTableRecord(tablePtr);
+ tablePtr.p->tabState = TableRecord::REORG_TABLE_PREPARED;
+ tablePtr.p->secondTable = firstTablePtr.i;
+ firstTablePtr.p->secondTable = tablePtr.i;
+ secondFound = true;
+ break;
+ }//if
+ }//for
+ if (!secondFound) {
+ jam();
+ firstTablePtr.p->tabState = TableRecord::NOT_DEFINED;
+ return RNIL;
+ }//if
+#endif
+ return firstTablePtr.i;
+}//Dbdict::getFreeTableRecord()
+
+Uint32 Dbdict::getFreeTriggerRecord()
+{
+ const Uint32 size = c_triggerRecordPool.getSize();
+ TriggerRecordPtr triggerPtr;
+ for (triggerPtr.i = 0; triggerPtr.i < size; triggerPtr.i++) {
+ jam();
+ c_triggerRecordPool.getPtr(triggerPtr);
+ if (triggerPtr.p->triggerState == TriggerRecord::TS_NOT_DEFINED) {
+ jam();
+ initialiseTriggerRecord(triggerPtr);
+ return triggerPtr.i;
+ }
+ }
+ return RNIL;
+}
+
+bool
+Dbdict::getNewAttributeRecord(TableRecordPtr tablePtr,
+ AttributeRecordPtr & attrPtr)
+{
+ c_attributeRecordPool.seize(attrPtr);
+ if(attrPtr.i == RNIL){
+ return false;
+ }
+
+ memset(attrPtr.p->attributeName, 0, sizeof(attrPtr.p->attributeName));
+ attrPtr.p->attributeDescriptor = 0x00012255; //Default value
+ attrPtr.p->attributeId = ZNIL;
+ attrPtr.p->nextAttrInTable = RNIL;
+ attrPtr.p->tupleKey = 0;
+ memset(attrPtr.p->defaultValue, 0, sizeof(attrPtr.p->defaultValue));
+
+ /* ---------------------------------------------------------------- */
+ // A free attribute record has been acquired. We will now link it
+ // to the table record.
+ /* ---------------------------------------------------------------- */
+ if (tablePtr.p->lastAttribute == RNIL) {
+ jam();
+ tablePtr.p->firstAttribute = attrPtr.i;
+ } else {
+ jam();
+ AttributeRecordPtr lastAttrPtr;
+ c_attributeRecordPool.getPtr(lastAttrPtr, tablePtr.p->lastAttribute);
+ lastAttrPtr.p->nextAttrInTable = attrPtr.i;
+ }//if
+ tablePtr.p->lastAttribute = attrPtr.i;
+ return true;
+}//Dbdict::getNewAttributeRecord()
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: START/RESTART HANDLING ------------------------ */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains the code that is common for all */
+/* start/restart types. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+/* ---------------------------------------------------------------- */
+// This is sent as the first signal during start/restart.
+/* ---------------------------------------------------------------- */
+void Dbdict::execSTTOR(Signal* signal)
+{
+ jamEntry();
+ c_startPhase = signal->theData[1];
+ switch (c_startPhase) {
+ case 1:
+ break;
+ case 3:
+ c_restartType = signal->theData[7]; /* valid if 3 */
+ ndbrequire(c_restartType == NodeState::ST_INITIAL_START ||
+ c_restartType == NodeState::ST_SYSTEM_RESTART ||
+ c_restartType == NodeState::ST_INITIAL_NODE_RESTART ||
+ c_restartType == NodeState::ST_NODE_RESTART);
+ break;
+ }
+ sendSTTORRY(signal);
+}//execSTTOR()
+
+void Dbdict::sendSTTORRY(Signal* signal)
+{
+ signal->theData[0] = 0; /* garbage SIGNAL KEY */
+ signal->theData[1] = 0; /* garbage SIGNAL VERSION NUMBER */
+ signal->theData[2] = 0; /* garbage */
+ signal->theData[3] = 1; /* first wanted start phase */
+ signal->theData[4] = 3; /* get type of start */
+ signal->theData[5] = ZNOMOREPHASES;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 6, JBB);
+}
+
+/* ---------------------------------------------------------------- */
+// We receive information about sizes of records.
+/* ---------------------------------------------------------------- */
+void Dbdict::execREAD_CONFIG_REQ(Signal* signal)
+{
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ jamEntry();
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ Uint32 attributesize, tablerecSize;
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_ATTRIBUTE,&attributesize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &tablerecSize));
+
+ c_attributeRecordPool.setSize(attributesize);
+ c_attributeRecordHash.setSize(64);
+ c_fsConnectRecordPool.setSize(ZFS_CONNECT_SIZE);
+ c_nodes.setSize(MAX_NODES);
+ c_pageRecordArray.setSize(ZNUMBER_OF_PAGES);
+ c_schemaPageRecordArray.setSize(2 * NDB_SF_MAX_PAGES);
+ c_tableRecordPool.setSize(tablerecSize);
+ c_tableRecordHash.setSize(tablerecSize);
+ c_triggerRecordPool.setSize(c_maxNoOfTriggers);
+ c_triggerRecordHash.setSize(c_maxNoOfTriggers);
+ c_opRecordPool.setSize(256); // XXX need config params
+ c_opCreateTable.setSize(8);
+ c_opDropTable.setSize(8);
+ c_opCreateIndex.setSize(8);
+ c_opCreateEvent.setSize(8);
+ c_opSubEvent.setSize(8);
+ c_opDropEvent.setSize(8);
+ c_opSignalUtil.setSize(8);
+ c_opDropIndex.setSize(8);
+ c_opAlterIndex.setSize(8);
+ c_opBuildIndex.setSize(8);
+ c_opCreateTrigger.setSize(8);
+ c_opDropTrigger.setSize(8);
+ c_opAlterTrigger.setSize(8);
+
+ // Initialize schema file copies
+ c_schemaFile[0].schemaPage =
+ (SchemaFile*)c_schemaPageRecordArray.getPtr(0 * NDB_SF_MAX_PAGES);
+ c_schemaFile[0].noOfPages = 0;
+ c_schemaFile[1].schemaPage =
+ (SchemaFile*)c_schemaPageRecordArray.getPtr(1 * NDB_SF_MAX_PAGES);
+ c_schemaFile[1].noOfPages = 0;
+
+ // Initialize BAT for interface to file system
+ NewVARIABLE* bat = allocateBat(2);
+ bat[0].WA = &c_schemaPageRecordArray.getPtr(0)->word[0];
+ bat[0].nrr = 2 * NDB_SF_MAX_PAGES;
+ bat[0].ClusterSize = NDB_SF_PAGE_SIZE;
+ bat[0].bits.q = NDB_SF_PAGE_SIZE_IN_WORDS_LOG2;
+ bat[0].bits.v = 5; // 32 bits per element
+ bat[1].WA = &c_pageRecordArray.getPtr(0)->word[0];
+ bat[1].nrr = ZNUMBER_OF_PAGES;
+ bat[1].ClusterSize = ZSIZE_OF_PAGES_IN_WORDS * 4;
+ bat[1].bits.q = ZLOG_SIZE_OF_PAGES_IN_WORDS; // 2**13 = 8192 elements
+ bat[1].bits.v = 5; // 32 bits per element
+
+ initCommonData();
+ initRecords();
+
+ ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ sendSignal(ref, GSN_READ_CONFIG_CONF, signal,
+ ReadConfigConf::SignalLength, JBB);
+}//execSIZEALT_REP()
+
+/* ---------------------------------------------------------------- */
+// Start phase signals sent by CNTR. We reply with NDB_STTORRY when
+// we completed this phase.
+/* ---------------------------------------------------------------- */
+void Dbdict::execNDB_STTOR(Signal* signal)
+{
+ jamEntry();
+ c_startPhase = signal->theData[2];
+ const Uint32 restartType = signal->theData[3];
+ if (restartType == NodeState::ST_INITIAL_START) {
+ jam();
+ c_initialStart = true;
+ } else if (restartType == NodeState::ST_SYSTEM_RESTART) {
+ jam();
+ c_systemRestart = true;
+ } else if (restartType == NodeState::ST_INITIAL_NODE_RESTART) {
+ jam();
+ c_initialNodeRestart = true;
+ } else if (restartType == NodeState::ST_NODE_RESTART) {
+ jam();
+ c_nodeRestart = true;
+ } else {
+ ndbrequire(false);
+ }//if
+ switch (c_startPhase) {
+ case 1:
+ jam();
+ initSchemaFile(signal);
+ break;
+ case 3:
+ jam();
+ signal->theData[0] = reference();
+ sendSignal(NDBCNTR_REF, GSN_READ_NODESREQ, signal, 1, JBB);
+ break;
+ case 6:
+ jam();
+ c_initialStart = false;
+ c_systemRestart = false;
+ c_initialNodeRestart = false;
+ c_nodeRestart = false;
+ sendNDB_STTORRY(signal);
+ break;
+ case 7:
+ // uses c_restartType
+ if(restartType == NodeState::ST_SYSTEM_RESTART &&
+ c_masterNodeId == getOwnNodeId()){
+ rebuildIndexes(signal, 0);
+ return;
+ }
+ sendNDB_STTORRY(signal);
+ break;
+ default:
+ jam();
+ sendNDB_STTORRY(signal);
+ break;
+ }//switch
+}//execNDB_STTOR()
+
+void Dbdict::sendNDB_STTORRY(Signal* signal)
+{
+ signal->theData[0] = reference();
+ sendSignal(NDBCNTR_REF, GSN_NDB_STTORRY, signal, 1, JBB);
+ return;
+}//sendNDB_STTORRY()
+
+/* ---------------------------------------------------------------- */
+// We receive the information about which nodes that are up and down.
+/* ---------------------------------------------------------------- */
+void Dbdict::execREAD_NODESCONF(Signal* signal)
+{
+ jamEntry();
+
+ ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
+ c_numberNode = readNodes->noOfNodes;
+ c_masterNodeId = readNodes->masterNodeId;
+
+ c_noNodesFailed = 0;
+ c_aliveNodes.clear();
+ for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
+ jam();
+ NodeRecordPtr nodePtr;
+ c_nodes.getPtr(nodePtr, i);
+
+ if (NodeBitmask::get(readNodes->allNodes, i)) {
+ jam();
+ nodePtr.p->nodeState = NodeRecord::NDB_NODE_ALIVE;
+ if (NodeBitmask::get(readNodes->inactiveNodes, i)) {
+ jam();
+ /**-------------------------------------------------------------------
+ *
+ * THIS NODE IS DEFINED IN THE CLUSTER BUT IS NOT ALIVE CURRENTLY.
+ * WE ADD THE NODE TO THE SET OF FAILED NODES AND ALSO SET THE
+ * BLOCKSTATE TO BUSY TO AVOID ADDING TABLES WHILE NOT ALL NODES ARE
+ * ALIVE.
+ *------------------------------------------------------------------*/
+ nodePtr.p->nodeState = NodeRecord::NDB_NODE_DEAD;
+ c_noNodesFailed++;
+ } else {
+ c_aliveNodes.set(i);
+ }
+ }//if
+ }//for
+ sendNDB_STTORRY(signal);
+}//execREAD_NODESCONF()
+
+/* ---------------------------------------------------------------- */
+// HOT_SPAREREP informs DBDICT about which nodes that have become
+// hot spare nodes.
+/* ---------------------------------------------------------------- */
+void Dbdict::execHOT_SPAREREP(Signal* signal)
+{
+ Uint32 hotSpareNodes = 0;
+ jamEntry();
+ HotSpareRep * const hotSpare = (HotSpareRep*)&signal->theData[0];
+ for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
+ if (NodeBitmask::get(hotSpare->theHotSpareNodes, i)) {
+ NodeRecordPtr nodePtr;
+ c_nodes.getPtr(nodePtr, i);
+ nodePtr.p->hotSpare = true;
+ hotSpareNodes++;
+ }//if
+ }//for
+ ndbrequire(hotSpareNodes == hotSpare->noHotSpareNodes);
+ c_noHotSpareNodes = hotSpareNodes;
+ return;
+}//execHOT_SPAREREP()
+
+void Dbdict::initSchemaFile(Signal* signal)
+{
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ xsf->noOfPages = (c_tableRecordPool.getSize() + NDB_SF_PAGE_ENTRIES - 1)
+ / NDB_SF_PAGE_ENTRIES;
+ initSchemaFile(xsf, 0, xsf->noOfPages, true);
+ // init alt copy too for INR
+ XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0];
+ oldxsf->noOfPages = xsf->noOfPages;
+ memcpy(&oldxsf->schemaPage[0], &xsf->schemaPage[0], xsf->schemaPage[0].FileSize);
+
+ if (c_initialStart || c_initialNodeRestart) {
+ jam();
+ ndbrequire(c_writeSchemaRecord.inUse == false);
+ c_writeSchemaRecord.inUse = true;
+ c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.newFile = true;
+ c_writeSchemaRecord.firstPage = 0;
+ c_writeSchemaRecord.noOfPages = xsf->noOfPages;
+
+ c_writeSchemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::initSchemaFile_conf);
+
+ startWriteSchemaFile(signal);
+ } else if (c_systemRestart || c_nodeRestart) {
+ jam();
+ ndbrequire(c_readSchemaRecord.schemaReadState == ReadSchemaRecord::IDLE);
+ c_readSchemaRecord.pageId = c_schemaRecord.oldSchemaPage;
+ c_readSchemaRecord.firstPage = 0;
+ c_readSchemaRecord.noOfPages = 1;
+ c_readSchemaRecord.schemaReadState = ReadSchemaRecord::INITIAL_READ_HEAD;
+ startReadSchemaFile(signal);
+ } else {
+ ndbrequire(false);
+ }//if
+}//Dbdict::initSchemaFile()
+
+void
+Dbdict::initSchemaFile_conf(Signal* signal, Uint32 callbackData, Uint32 rv){
+ jam();
+ sendNDB_STTORRY(signal);
+}
+
+void
+Dbdict::activateIndexes(Signal* signal, Uint32 i)
+{
+ AlterIndxReq* req = (AlterIndxReq*)signal->getDataPtrSend();
+ TableRecordPtr tablePtr;
+ for (; i < c_tableRecordPool.getSize(); i++) {
+ tablePtr.i = i;
+ c_tableRecordPool.getPtr(tablePtr);
+ if (tablePtr.p->tabState != TableRecord::DEFINED)
+ continue;
+ if (! tablePtr.p->isIndex())
+ continue;
+ jam();
+ req->setUserRef(reference());
+ req->setConnectionPtr(i);
+ req->setTableId(tablePtr.p->primaryTableId);
+ req->setIndexId(tablePtr.i);
+ req->setIndexVersion(tablePtr.p->tableVersion);
+ req->setOnline(true);
+ if (c_restartType == NodeState::ST_SYSTEM_RESTART) {
+ if (c_masterNodeId != getOwnNodeId())
+ continue;
+ // from file index state is not defined currently
+ req->setRequestType(AlterIndxReq::RT_SYSTEMRESTART);
+ req->addRequestFlag((Uint32)RequestFlag::RF_NOBUILD);
+ }
+ else if (
+ c_restartType == NodeState::ST_NODE_RESTART ||
+ c_restartType == NodeState::ST_INITIAL_NODE_RESTART) {
+ // from master index must be online
+ if (tablePtr.p->indexState != TableRecord::IS_ONLINE)
+ continue;
+ req->setRequestType(AlterIndxReq::RT_NODERESTART);
+ // activate locally, rebuild not needed
+ req->addRequestFlag((Uint32)RequestFlag::RF_LOCAL);
+ req->addRequestFlag((Uint32)RequestFlag::RF_NOBUILD);
+ } else {
+ ndbrequire(false);
+ }
+ sendSignal(reference(), GSN_ALTER_INDX_REQ,
+ signal, AlterIndxReq::SignalLength, JBB);
+ return;
+ }
+ signal->theData[0] = reference();
+ sendSignal(c_restartRecord.returnBlockRef, GSN_DICTSTARTCONF,
+ signal, 1, JBB);
+}
+
+void
+Dbdict::rebuildIndexes(Signal* signal, Uint32 i){
+ BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
+
+ TableRecordPtr indexPtr;
+ for (; i < c_tableRecordPool.getSize(); i++) {
+ indexPtr.i = i;
+ c_tableRecordPool.getPtr(indexPtr);
+ if (indexPtr.p->tabState != TableRecord::DEFINED)
+ continue;
+ if (! indexPtr.p->isIndex())
+ continue;
+
+ jam();
+
+ req->setUserRef(reference());
+ req->setConnectionPtr(i);
+ req->setRequestType(BuildIndxReq::RT_SYSTEMRESTART);
+ req->setBuildId(0); // not used
+ req->setBuildKey(0); // not used
+ req->setIndexType(indexPtr.p->tableType);
+ req->setIndexId(indexPtr.i);
+ req->setTableId(indexPtr.p->primaryTableId);
+ req->setParallelism(16);
+
+ // from file index state is not defined currently
+ if (indexPtr.p->storedTable) {
+ // rebuild not needed
+ req->addRequestFlag((Uint32)RequestFlag::RF_NOBUILD);
+ }
+
+ // send
+ sendSignal(reference(), GSN_BUILDINDXREQ,
+ signal, BuildIndxReq::SignalLength, JBB);
+ return;
+ }
+ sendNDB_STTORRY(signal);
+}
+
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: SYSTEM RESTART MODULE ------------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains code specific for system restart */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+/* ---------------------------------------------------------------- */
+// DIH asks DICT to read in table data from disk during system
+// restart. DIH also asks DICT to send information about which
+// tables that should be started as part of this system restart.
+// DICT will also activate the tables in TC as part of this process.
+/* ---------------------------------------------------------------- */
+void Dbdict::execDICTSTARTREQ(Signal* signal)
+{
+ jamEntry();
+ c_restartRecord.gciToRestart = signal->theData[0];
+ c_restartRecord.returnBlockRef = signal->theData[1];
+ if (c_nodeRestart || c_initialNodeRestart) {
+ jam();
+
+ CRASH_INSERTION(6000);
+
+ BlockReference dictRef = calcDictBlockRef(c_masterNodeId);
+ signal->theData[0] = getOwnNodeId();
+ sendSignal(dictRef, GSN_GET_SCHEMA_INFOREQ, signal, 1, JBB);
+ return;
+ }
+ ndbrequire(c_systemRestart);
+ ndbrequire(c_masterNodeId == getOwnNodeId());
+
+ c_schemaRecord.m_callback.m_callbackData = 0;
+ c_schemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::masterRestart_checkSchemaStatusComplete);
+
+ c_restartRecord.activeTable = 0;
+ c_schemaRecord.schemaPage = c_schemaRecord.oldSchemaPage; // ugly
+ checkSchemaStatus(signal);
+}//execDICTSTARTREQ()
+
+void
+Dbdict::masterRestart_checkSchemaStatusComplete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+
+ c_schemaRecord.schemaPage = 0; // ugly
+ XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0];
+ ndbrequire(oldxsf->noOfPages != 0);
+
+ LinearSectionPtr ptr[3];
+ ptr[0].p = (Uint32*)&oldxsf->schemaPage[0];
+ ptr[0].sz = oldxsf->noOfPages * NDB_SF_PAGE_SIZE_IN_WORDS;
+
+ c_sendSchemaRecord.m_SCHEMAINFO_Counter = c_aliveNodes;
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+
+ rg.m_nodes.clear(getOwnNodeId());
+ Callback c = { 0, 0 };
+ sendFragmentedSignal(rg,
+ GSN_SCHEMA_INFO,
+ signal,
+ 1, //SchemaInfo::SignalLength,
+ JBB,
+ ptr,
+ 1,
+ c);
+
+ XSchemaFile * newxsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ newxsf->noOfPages = oldxsf->noOfPages;
+ memcpy(&newxsf->schemaPage[0], &oldxsf->schemaPage[0],
+ oldxsf->noOfPages * NDB_SF_PAGE_SIZE);
+
+ signal->theData[0] = getOwnNodeId();
+ sendSignal(reference(), GSN_SCHEMA_INFOCONF, signal, 1, JBB);
+}
+
+void
+Dbdict::execGET_SCHEMA_INFOREQ(Signal* signal){
+
+ const Uint32 ref = signal->getSendersBlockRef();
+ //const Uint32 senderData = signal->theData[0];
+
+ ndbrequire(c_sendSchemaRecord.inUse == false);
+ c_sendSchemaRecord.inUse = true;
+
+ LinearSectionPtr ptr[3];
+
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ ndbrequire(xsf->noOfPages != 0);
+
+ ptr[0].p = (Uint32*)&xsf->schemaPage[0];
+ ptr[0].sz = xsf->noOfPages * NDB_SF_PAGE_SIZE_IN_WORDS;
+
+ Callback c = { safe_cast(&Dbdict::sendSchemaComplete), 0 };
+ sendFragmentedSignal(ref,
+ GSN_SCHEMA_INFO,
+ signal,
+ 1, //GetSchemaInfoConf::SignalLength,
+ JBB,
+ ptr,
+ 1,
+ c);
+}//Dbdict::execGET_SCHEMA_INFOREQ()
+
+void
+Dbdict::sendSchemaComplete(Signal * signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ ndbrequire(c_sendSchemaRecord.inUse == true);
+ c_sendSchemaRecord.inUse = false;
+
+}
+
+
+/* ---------------------------------------------------------------- */
+// We receive the schema info from master as part of all restarts
+// except the initial start where no tables exists.
+/* ---------------------------------------------------------------- */
+void Dbdict::execSCHEMA_INFO(Signal* signal)
+{
+ jamEntry();
+ if(!assembleFragments(signal)){
+ jam();
+ return;
+ }
+
+ if(getNodeState().getNodeRestartInProgress()){
+ CRASH_INSERTION(6001);
+ }
+
+ SegmentedSectionPtr schemaDataPtr;
+ signal->getSection(schemaDataPtr, 0);
+
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ ndbrequire(schemaDataPtr.sz % NDB_SF_PAGE_SIZE_IN_WORDS == 0);
+ xsf->noOfPages = schemaDataPtr.sz / NDB_SF_PAGE_SIZE_IN_WORDS;
+ copy((Uint32*)&xsf->schemaPage[0], schemaDataPtr);
+ releaseSections(signal);
+
+ SchemaFile * sf0 = &xsf->schemaPage[0];
+ if (sf0->NdbVersion < NDB_SF_VERSION_5_0_6) {
+ bool ok = convertSchemaFileTo_5_0_6(xsf);
+ ndbrequire(ok);
+ }
+
+ validateChecksum(xsf);
+
+ XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0];
+ resizeSchemaFile(xsf, oldxsf->noOfPages);
+
+ ndbrequire(signal->getSendersBlockRef() != reference());
+
+ /* ---------------------------------------------------------------- */
+ // Synchronise our view on data with other nodes in the cluster.
+ // This is an important part of restart handling where we will handle
+ // cases where the table have been added but only partially, where
+ // tables have been deleted but not completed the deletion yet and
+ // other scenarios needing synchronisation.
+ /* ---------------------------------------------------------------- */
+ c_schemaRecord.m_callback.m_callbackData = 0;
+ c_schemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::restart_checkSchemaStatusComplete);
+ c_restartRecord.activeTable = 0;
+ checkSchemaStatus(signal);
+}//execSCHEMA_INFO()
+
+void
+Dbdict::restart_checkSchemaStatusComplete(Signal * signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+
+ ndbrequire(c_writeSchemaRecord.inUse == false);
+ c_writeSchemaRecord.inUse = true;
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.newFile = true;
+ c_writeSchemaRecord.firstPage = 0;
+ c_writeSchemaRecord.noOfPages = xsf->noOfPages;
+ c_writeSchemaRecord.m_callback.m_callbackData = 0;
+ c_writeSchemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::restart_writeSchemaConf);
+
+ startWriteSchemaFile(signal);
+}
+
+void
+Dbdict::restart_writeSchemaConf(Signal * signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+
+ if(c_systemRestart){
+ jam();
+ signal->theData[0] = getOwnNodeId();
+ sendSignal(calcDictBlockRef(c_masterNodeId), GSN_SCHEMA_INFOCONF,
+ signal, 1, JBB);
+ return;
+ }
+
+ ndbrequire(c_nodeRestart || c_initialNodeRestart);
+ c_blockState = BS_IDLE;
+ activateIndexes(signal, 0);
+ return;
+}
+
+void Dbdict::execSCHEMA_INFOCONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+
+/* ---------------------------------------------------------------- */
+// This signal is received in the master as part of system restart
+// from all nodes (including the master) after they have synchronised
+// their data with the master node's schema information.
+/* ---------------------------------------------------------------- */
+ const Uint32 nodeId = signal->theData[0];
+ c_sendSchemaRecord.m_SCHEMAINFO_Counter.clearWaitingFor(nodeId);
+
+ if (!c_sendSchemaRecord.m_SCHEMAINFO_Counter.done()){
+ jam();
+ return;
+ }//if
+ activateIndexes(signal, 0);
+}//execSCHEMA_INFOCONF()
+
+void Dbdict::checkSchemaStatus(Signal* signal)
+{
+ XSchemaFile * newxsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0];
+ ndbrequire(newxsf->noOfPages == oldxsf->noOfPages);
+ const Uint32 noOfEntries = newxsf->noOfPages * NDB_SF_PAGE_ENTRIES;
+
+ for (; c_restartRecord.activeTable < noOfEntries;
+ c_restartRecord.activeTable++) {
+ jam();
+
+ Uint32 tableId = c_restartRecord.activeTable;
+ SchemaFile::TableEntry *newEntry = getTableEntry(newxsf, tableId);
+ SchemaFile::TableEntry *oldEntry = getTableEntry(oldxsf, tableId);
+ SchemaFile::TableState schemaState =
+ (SchemaFile::TableState)newEntry->m_tableState;
+ SchemaFile::TableState oldSchemaState =
+ (SchemaFile::TableState)oldEntry->m_tableState;
+
+ if (c_restartRecord.activeTable >= c_tableRecordPool.getSize()) {
+ jam();
+ ndbrequire(schemaState == SchemaFile::INIT);
+ ndbrequire(oldSchemaState == SchemaFile::INIT);
+ continue;
+ }//if
+
+ switch(schemaState){
+ case SchemaFile::INIT:{
+ jam();
+ bool ok = false;
+ switch(oldSchemaState) {
+ case SchemaFile::INIT:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ jam();
+ break;
+
+ case SchemaFile::ADD_STARTED:
+ jam();
+ case SchemaFile::TABLE_ADD_COMMITTED:
+ jam();
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::ALTER_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ jam();
+ newEntry->m_tableState = SchemaFile::INIT;
+ restartDropTab(signal, tableId);
+ return;
+ }//switch
+ ndbrequire(ok);
+ break;
+ }
+ case SchemaFile::ADD_STARTED:{
+ jam();
+ bool ok = false;
+ switch(oldSchemaState) {
+ case SchemaFile::INIT:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ break;
+ case SchemaFile::ADD_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::TABLE_ADD_COMMITTED:
+ jam();
+ case SchemaFile::ALTER_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ //------------------------------------------------------------------
+ // Add Table was started but not completed. Will be dropped in all
+ // nodes. Update schema information (restore table version).
+ //------------------------------------------------------------------
+ newEntry->m_tableState = SchemaFile::INIT;
+ restartDropTab(signal, tableId);
+ return;
+ }
+ ndbrequire(ok);
+ break;
+ }
+ case SchemaFile::TABLE_ADD_COMMITTED:{
+ jam();
+ bool ok = false;
+ switch(oldSchemaState) {
+ case SchemaFile::INIT:
+ jam();
+ case SchemaFile::ADD_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ //------------------------------------------------------------------
+ // Table was added in the master node but not in our node. We can
+ // retrieve the table definition from the master.
+ //------------------------------------------------------------------
+ restartCreateTab(signal, tableId, oldEntry, false);
+ return;
+ break;
+ case SchemaFile::TABLE_ADD_COMMITTED:
+ jam();
+ case SchemaFile::ALTER_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ //------------------------------------------------------------------
+ // Table was added in both our node and the master node. We can
+ // retrieve the table definition from our own disk.
+ //------------------------------------------------------------------
+ if(* newEntry == * oldEntry){
+ jam();
+
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ tablePtr.p->tableVersion = oldEntry->m_tableVersion;
+ tablePtr.p->tableType = (DictTabInfo::TableType)oldEntry->m_tableType;
+
+ // On NR get index from master because index state is not on file
+ const bool file = c_systemRestart || tablePtr.p->isTable();
+ restartCreateTab(signal, tableId, oldEntry, file);
+
+ return;
+ } else {
+ //------------------------------------------------------------------
+ // Must be a new version of the table if anything differs. Both table
+ // version and global checkpoint must be different.
+ // This should not happen for the master node. This can happen after
+ // drop table followed by add table or after change table.
+ // Not supported in this version.
+ //------------------------------------------------------------------
+ ndbrequire(c_masterNodeId != getOwnNodeId());
+ ndbrequire(newEntry->m_tableVersion != oldEntry->m_tableVersion);
+ jam();
+
+ restartCreateTab(signal, tableId, oldEntry, false);
+ return;
+ }//if
+ }
+ ndbrequire(ok);
+ break;
+ }
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:{
+ jam();
+ bool ok = false;
+ switch(oldSchemaState){
+ case SchemaFile::INIT:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ break;
+ case SchemaFile::ADD_STARTED:
+ jam();
+ case SchemaFile::TABLE_ADD_COMMITTED:
+ jam();
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::ALTER_TABLE_COMMITTED:
+ jam();
+ newEntry->m_tableState = SchemaFile::INIT;
+ restartDropTab(signal, tableId);
+ return;
+ }
+ ndbrequire(ok);
+ break;
+ }
+ case SchemaFile::ALTER_TABLE_COMMITTED: {
+ jam();
+ bool ok = false;
+ switch(oldSchemaState) {
+ case SchemaFile::INIT:
+ jam();
+ case SchemaFile::ADD_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:
+ jam();
+ case SchemaFile::TABLE_ADD_COMMITTED:
+ jam();
+ ok = true;
+ //------------------------------------------------------------------
+ // Table was altered in the master node but not in our node. We can
+ // retrieve the altered table definition from the master.
+ //------------------------------------------------------------------
+ restartCreateTab(signal, tableId, oldEntry, false);
+ return;
+ break;
+ case SchemaFile::ALTER_TABLE_COMMITTED:
+ jam();
+ ok = true;
+
+ //------------------------------------------------------------------
+ // Table was altered in both our node and the master node. We can
+ // retrieve the table definition from our own disk.
+ //------------------------------------------------------------------
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ tablePtr.p->tableVersion = oldEntry->m_tableVersion;
+ tablePtr.p->tableType = (DictTabInfo::TableType)oldEntry->m_tableType;
+
+ // On NR get index from master because index state is not on file
+ const bool file = c_systemRestart || tablePtr.p->isTable();
+ restartCreateTab(signal, tableId, oldEntry, file);
+
+ return;
+ }
+ ndbrequire(ok);
+ break;
+ }
+ }
+ }
+
+ execute(signal, c_schemaRecord.m_callback, 0);
+}//checkSchemaStatus()
+
+void
+Dbdict::restartCreateTab(Signal* signal, Uint32 tableId,
+ const SchemaFile::TableEntry * te, bool file){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ c_opCreateTable.seize(createTabPtr);
+ ndbrequire(!createTabPtr.isNull());
+
+ createTabPtr.p->key = ++c_opRecordSequence;
+ c_opCreateTable.add(createTabPtr);
+
+ createTabPtr.p->m_errorCode = 0;
+ createTabPtr.p->m_tablePtrI = tableId;
+ createTabPtr.p->m_coordinatorRef = reference();
+ createTabPtr.p->m_senderRef = 0;
+ createTabPtr.p->m_senderData = RNIL;
+ createTabPtr.p->m_tabInfoPtrI = RNIL;
+ createTabPtr.p->m_dihAddFragPtr = RNIL;
+
+ if(file && !ERROR_INSERTED(6002)){
+ jam();
+
+ c_readTableRecord.noOfPages =
+ DIV(te->m_info_words + ZPAGE_HEADER_SIZE, ZSIZE_OF_PAGES_IN_WORDS);
+ c_readTableRecord.pageId = 0;
+ c_readTableRecord.m_callback.m_callbackData = createTabPtr.p->key;
+ c_readTableRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartCreateTab_readTableConf);
+
+ startReadTableFile(signal, tableId);
+ return;
+ } else {
+
+ ndbrequire(c_masterNodeId != getOwnNodeId());
+
+ /**
+ * Get from master
+ */
+ GetTabInfoReq * const req = (GetTabInfoReq *)&signal->theData[0];
+ req->senderRef = reference();
+ req->senderData = createTabPtr.p->key;
+ req->requestType = GetTabInfoReq::RequestById |
+ GetTabInfoReq::LongSignalConf;
+ req->tableId = tableId;
+ sendSignal(calcDictBlockRef(c_masterNodeId), GSN_GET_TABINFOREQ, signal,
+ GetTabInfoReq::SignalLength, JBB);
+
+ if(ERROR_INSERTED(6002)){
+ NdbSleep_MilliSleep(10);
+ CRASH_INSERTION(6002);
+ }
+ }
+}
+
+void
+Dbdict::restartCreateTab_readTableConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ PageRecordPtr pageRecPtr;
+ c_pageRecordArray.getPtr(pageRecPtr, c_readTableRecord.pageId);
+
+ ParseDictTabInfoRecord parseRecord;
+ parseRecord.requestType = DictTabInfo::GetTabInfoConf;
+ parseRecord.errorCode = 0;
+
+ Uint32 sz = c_readTableRecord.noOfPages * ZSIZE_OF_PAGES_IN_WORDS;
+ SimplePropertiesLinearReader r(&pageRecPtr.p->word[0], sz);
+ handleTabInfoInit(r, &parseRecord);
+ ndbrequire(parseRecord.errorCode == 0);
+
+ /* ---------------------------------------------------------------- */
+ // We have read the table description from disk as part of system restart.
+ // We will also write it back again to ensure that both copies are ok.
+ /* ---------------------------------------------------------------- */
+ ndbrequire(c_writeTableRecord.tableWriteState == WriteTableRecord::IDLE);
+ c_writeTableRecord.noOfPages = c_readTableRecord.noOfPages;
+ c_writeTableRecord.pageId = c_readTableRecord.pageId;
+ c_writeTableRecord.tableWriteState = WriteTableRecord::TWR_CALLBACK;
+ c_writeTableRecord.m_callback.m_callbackData = callbackData;
+ c_writeTableRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartCreateTab_writeTableConf);
+ startWriteTableFile(signal, c_readTableRecord.tableId);
+}
+
+void
+Dbdict::execGET_TABINFO_CONF(Signal* signal){
+ jamEntry();
+
+ if(!assembleFragments(signal)){
+ jam();
+ return;
+ }
+
+ GetTabInfoConf * const conf = (GetTabInfoConf*)signal->getDataPtr();
+
+ const Uint32 tableId = conf->tableId;
+ const Uint32 senderData = conf->senderData;
+
+ SegmentedSectionPtr tabInfoPtr;
+ signal->getSection(tabInfoPtr, GetTabInfoConf::DICT_TAB_INFO);
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, senderData));
+ ndbrequire(!createTabPtr.isNull());
+ ndbrequire(createTabPtr.p->m_tablePtrI == tableId);
+
+ /**
+ * Put data into table record
+ */
+ ParseDictTabInfoRecord parseRecord;
+ parseRecord.requestType = DictTabInfo::GetTabInfoConf;
+ parseRecord.errorCode = 0;
+
+ SimplePropertiesSectionReader r(tabInfoPtr, getSectionSegmentPool());
+ handleTabInfoInit(r, &parseRecord);
+ ndbrequire(parseRecord.errorCode == 0);
+
+ Callback callback;
+ callback.m_callbackData = createTabPtr.p->key;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartCreateTab_writeTableConf);
+
+ signal->header.m_noOfSections = 0;
+ writeTableFile(signal, createTabPtr.p->m_tablePtrI, tabInfoPtr, &callback);
+ signal->setSection(tabInfoPtr, 0);
+ releaseSections(signal);
+}
+
+void
+Dbdict::restartCreateTab_writeTableConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ Callback callback;
+ callback.m_callbackData = callbackData;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartCreateTab_dihComplete);
+
+ SegmentedSectionPtr fragDataPtr;
+ fragDataPtr.sz = 0;
+ fragDataPtr.setNull();
+ createTab_dih(signal, createTabPtr, fragDataPtr, &callback);
+}
+
+void
+Dbdict::restartCreateTab_dihComplete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ //@todo check error
+ ndbrequire(createTabPtr.p->m_errorCode == 0);
+
+ Callback callback;
+ callback.m_callbackData = callbackData;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartCreateTab_activateComplete);
+
+ alterTab_activate(signal, createTabPtr, &callback);
+}
+
+void
+Dbdict::restartCreateTab_activateComplete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+ tabPtr.p->tabState = TableRecord::DEFINED;
+
+ c_opCreateTable.release(createTabPtr);
+
+ c_restartRecord.activeTable++;
+ checkSchemaStatus(signal);
+}
+
+void
+Dbdict::restartDropTab(Signal* signal, Uint32 tableId){
+
+ const Uint32 key = ++c_opRecordSequence;
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.seize(dropTabPtr));
+
+ dropTabPtr.p->key = key;
+ c_opDropTable.add(dropTabPtr);
+
+ dropTabPtr.p->m_errorCode = 0;
+ dropTabPtr.p->m_request.tableId = tableId;
+ dropTabPtr.p->m_coordinatorRef = 0;
+ dropTabPtr.p->m_requestType = DropTabReq::RestartDropTab;
+ dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_REQ;
+
+
+ dropTabPtr.p->m_participantData.m_block = 0;
+ dropTabPtr.p->m_participantData.m_callback.m_callbackData = key;
+ dropTabPtr.p->m_participantData.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartDropTab_complete);
+ dropTab_nextStep(signal, dropTabPtr);
+}
+
+void
+Dbdict::restartDropTab_complete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, callbackData));
+
+ //@todo check error
+
+ c_opDropTable.release(dropTabPtr);
+
+ c_restartRecord.activeTable++;
+ checkSchemaStatus(signal);
+}
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: NODE FAILURE HANDLING ------------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains the code that is used when nodes */
+/* (kernel/api) fails. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+/* ---------------------------------------------------------------- */
+// We receive a report of an API that failed.
+/* ---------------------------------------------------------------- */
+void Dbdict::execAPI_FAILREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 failedApiNode = signal->theData[0];
+ BlockReference retRef = signal->theData[1];
+
+#if 0
+ Uint32 userNode = refToNode(c_connRecord.userBlockRef);
+ if (userNode == failedApiNode) {
+ jam();
+ c_connRecord.userBlockRef = (Uint32)-1;
+ }//if
+#endif
+
+ signal->theData[0] = failedApiNode;
+ signal->theData[1] = reference();
+ sendSignal(retRef, GSN_API_FAILCONF, signal, 2, JBB);
+}//execAPI_FAILREQ()
+
+/* ---------------------------------------------------------------- */
+// We receive a report of one or more node failures of kernel nodes.
+/* ---------------------------------------------------------------- */
+void Dbdict::execNODE_FAILREP(Signal* signal)
+{
+ jamEntry();
+ NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
+
+ c_failureNr = nodeFail->failNo;
+ const Uint32 numberOfFailedNodes = nodeFail->noOfNodes;
+ const bool masterFailed = (c_masterNodeId != nodeFail->masterNodeId);
+ c_masterNodeId = nodeFail->masterNodeId;
+
+ c_noNodesFailed += numberOfFailedNodes;
+ Uint32 theFailedNodes[NodeBitmask::Size];
+ memcpy(theFailedNodes, nodeFail->theNodes, sizeof(theFailedNodes));
+
+ c_counterMgr.execNODE_FAILREP(signal);
+
+ bool ok = false;
+ switch(c_blockState){
+ case BS_IDLE:
+ jam();
+ ok = true;
+ if(c_opRecordPool.getSize() != c_opRecordPool.getNoOfFree()){
+ jam();
+ c_blockState = BS_NODE_FAILURE;
+ }
+ break;
+ case BS_CREATE_TAB:
+ jam();
+ ok = true;
+ if(!masterFailed)
+ break;
+ // fall through
+ case BS_BUSY:
+ case BS_NODE_FAILURE:
+ jam();
+ c_blockState = BS_NODE_FAILURE;
+ ok = true;
+ break;
+ }
+ ndbrequire(ok);
+
+ for(unsigned i = 1; i < MAX_NDB_NODES; i++) {
+ jam();
+ if(NodeBitmask::get(theFailedNodes, i)) {
+ jam();
+ NodeRecordPtr nodePtr;
+ c_nodes.getPtr(nodePtr, i);
+
+ nodePtr.p->nodeState = NodeRecord::NDB_NODE_DEAD;
+ NFCompleteRep * const nfCompRep = (NFCompleteRep *)&signal->theData[0];
+ nfCompRep->blockNo = DBDICT;
+ nfCompRep->nodeId = getOwnNodeId();
+ nfCompRep->failedNodeId = nodePtr.i;
+ sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+
+ c_aliveNodes.clear(i);
+ }//if
+ }//for
+
+}//execNODE_FAILREP()
+
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: NODE START HANDLING --------------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains the code that is used when kernel nodes */
+/* starts. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+/* ---------------------------------------------------------------- */
+// Include a starting node in list of nodes to be part of adding
+// and dropping tables.
+/* ---------------------------------------------------------------- */
+void Dbdict::execINCL_NODEREQ(Signal* signal)
+{
+ jamEntry();
+ NodeRecordPtr nodePtr;
+ BlockReference retRef = signal->theData[0];
+ nodePtr.i = signal->theData[1];
+
+ ndbrequire(c_noNodesFailed > 0);
+ c_noNodesFailed--;
+
+ c_nodes.getPtr(nodePtr);
+ ndbrequire(nodePtr.p->nodeState == NodeRecord::NDB_NODE_DEAD);
+ nodePtr.p->nodeState = NodeRecord::NDB_NODE_ALIVE;
+ signal->theData[0] = reference();
+ sendSignal(retRef, GSN_INCL_NODECONF, signal, 1, JBB);
+
+ c_aliveNodes.set(nodePtr.i);
+}//execINCL_NODEREQ()
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: ADD TABLE HANDLING ---------------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains the code that is used when adding a table. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+/* ---------------------------------------------------------------- */
+// This signal receives information about a table from either:
+// API, Ndbcntr or from other DICT.
+/* ---------------------------------------------------------------- */
+void
+Dbdict::execCREATE_TABLE_REQ(Signal* signal){
+ jamEntry();
+ if(!assembleFragments(signal)){
+ return;
+ }
+
+ CreateTableReq* const req = (CreateTableReq*)signal->getDataPtr();
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+
+ ParseDictTabInfoRecord parseRecord;
+ do {
+ if(getOwnNodeId() != c_masterNodeId){
+ jam();
+ parseRecord.errorCode = CreateTableRef::NotMaster;
+ break;
+ }
+
+ if (c_blockState != BS_IDLE){
+ jam();
+ parseRecord.errorCode = CreateTableRef::Busy;
+ break;
+ }
+
+ CreateTableRecordPtr createTabPtr;
+ c_opCreateTable.seize(createTabPtr);
+
+ if(createTabPtr.isNull()){
+ jam();
+ parseRecord.errorCode = CreateTableRef::Busy;
+ break;
+ }
+
+ parseRecord.requestType = DictTabInfo::CreateTableFromAPI;
+ parseRecord.errorCode = 0;
+
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr, CreateTableReq::DICT_TAB_INFO);
+ SimplePropertiesSectionReader r(ptr, getSectionSegmentPool());
+
+ handleTabInfoInit(r, &parseRecord);
+ releaseSections(signal);
+
+ if(parseRecord.errorCode != 0){
+ jam();
+ c_opCreateTable.release(createTabPtr);
+ break;
+ }
+
+ createTabPtr.p->key = ++c_opRecordSequence;
+ c_opCreateTable.add(createTabPtr);
+ createTabPtr.p->m_errorCode = 0;
+ createTabPtr.p->m_senderRef = senderRef;
+ createTabPtr.p->m_senderData = senderData;
+ createTabPtr.p->m_tablePtrI = parseRecord.tablePtr.i;
+ createTabPtr.p->m_coordinatorRef = reference();
+ createTabPtr.p->m_fragmentsPtrI = RNIL;
+ createTabPtr.p->m_dihAddFragPtr = RNIL;
+
+ Uint32 *theData = signal->getDataPtrSend(), i;
+ Uint16 *node_group= (Uint16*)&signal->theData[25];
+ CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
+ req->senderRef = reference();
+ req->senderData = createTabPtr.p->key;
+ req->primaryTableId = parseRecord.tablePtr.p->primaryTableId;
+ req->noOfFragments = parseRecord.tablePtr.p->ngLen >> 1;
+ req->fragmentationType = parseRecord.tablePtr.p->fragmentType;
+ for (i = 0; i < req->noOfFragments; i++)
+ node_group[i] = parseRecord.tablePtr.p->ngData[i];
+ if (parseRecord.tablePtr.p->isOrderedIndex()) {
+ jam();
+ // ordered index has same fragmentation as the table
+ req->primaryTableId = parseRecord.tablePtr.p->primaryTableId;
+ req->fragmentationType = DictTabInfo::DistrKeyOrderedIndex;
+ }
+ else if (parseRecord.tablePtr.p->isHashIndex())
+ {
+ jam();
+ /*
+ Unique hash indexes has same amount of fragments as primary table
+ and distributed in the same manner but has always a normal hash
+ fragmentation.
+ */
+ req->primaryTableId = parseRecord.tablePtr.p->primaryTableId;
+ req->fragmentationType = DictTabInfo::DistrKeyUniqueHashIndex;
+ }
+ else
+ {
+ jam();
+ /*
+ Blob tables come here with primaryTableId != RNIL but we only need
+ it for creating the fragments so we set it to RNIL now that we got
+ what we wanted from it to avoid other side effects.
+ */
+ parseRecord.tablePtr.p->primaryTableId = RNIL;
+ }
+ EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal,
+ CreateFragmentationReq::SignalLength);
+ jamEntry();
+ if (signal->theData[0] != 0)
+ {
+ jam();
+ parseRecord.errorCode= signal->theData[0];
+ break;
+ }
+
+ c_blockState = BS_CREATE_TAB;
+ return;
+ } while(0);
+
+ /**
+ * Something went wrong
+ */
+ releaseSections(signal);
+
+ CreateTableRef * ref = (CreateTableRef*)signal->getDataPtrSend();
+ ref->senderData = senderData;
+ ref->senderRef = reference();
+ ref->masterNodeId = c_masterNodeId;
+ ref->errorCode = parseRecord.errorCode;
+ ref->errorLine = parseRecord.errorLine;
+ ref->errorKey = parseRecord.errorKey;
+ ref->status = parseRecord.status;
+ sendSignal(senderRef, GSN_CREATE_TABLE_REF, signal,
+ CreateTableRef::SignalLength, JBB);
+}
+
+void
+Dbdict::execALTER_TABLE_REQ(Signal* signal)
+{
+ // Received by master
+ jamEntry();
+ if(!assembleFragments(signal)){
+ return;
+ }
+ AlterTableReq* const req = (AlterTableReq*)signal->getDataPtr();
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ const Uint32 changeMask = req->changeMask;
+ const Uint32 tableId = req->tableId;
+ const Uint32 tableVersion = req->tableVersion;
+ ParseDictTabInfoRecord* aParseRecord;
+
+ // Get table definition
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId, false);
+ if(tablePtr.isNull()){
+ jam();
+ alterTableRef(signal, req, AlterTableRef::NoSuchTable);
+ return;
+ }
+
+ if(getOwnNodeId() != c_masterNodeId){
+ jam();
+ alterTableRef(signal, req, AlterTableRef::NotMaster);
+ return;
+ }
+
+ if(c_blockState != BS_IDLE){
+ jam();
+ alterTableRef(signal, req, AlterTableRef::Busy);
+ return;
+ }
+
+ const TableRecord::TabState tabState = tablePtr.p->tabState;
+ bool ok = false;
+ switch(tabState){
+ case TableRecord::NOT_DEFINED:
+ case TableRecord::REORG_TABLE_PREPARED:
+ case TableRecord::DEFINING:
+ case TableRecord::CHECKED:
+ jam();
+ alterTableRef(signal, req, AlterTableRef::NoSuchTable);
+ return;
+ case TableRecord::DEFINED:
+ ok = true;
+ jam();
+ break;
+ case TableRecord::PREPARE_DROPPING:
+ case TableRecord::DROPPING:
+ jam();
+ alterTableRef(signal, req, AlterTableRef::DropInProgress);
+ return;
+ }
+ ndbrequire(ok);
+
+ if(tablePtr.p->tableVersion != tableVersion){
+ jam();
+ alterTableRef(signal, req, AlterTableRef::InvalidTableVersion);
+ return;
+ }
+ // Parse new table defintion
+ ParseDictTabInfoRecord parseRecord;
+ aParseRecord = &parseRecord;
+
+ CreateTableRecordPtr alterTabPtr; // Reuse create table records
+ c_opCreateTable.seize(alterTabPtr);
+ CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
+
+ if(alterTabPtr.isNull()){
+ jam();
+ alterTableRef(signal, req, AlterTableRef::Busy);
+ return;
+ }
+
+ regAlterTabPtr->m_changeMask = changeMask;
+ parseRecord.requestType = DictTabInfo::AlterTableFromAPI;
+ parseRecord.errorCode = 0;
+
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr, AlterTableReq::DICT_TAB_INFO);
+ SimplePropertiesSectionReader r(ptr, getSectionSegmentPool());
+
+ handleTabInfoInit(r, &parseRecord, false); // Will not save info
+
+ if(parseRecord.errorCode != 0){
+ jam();
+ c_opCreateTable.release(alterTabPtr);
+ alterTableRef(signal, req,
+ (AlterTableRef::ErrorCode) parseRecord.errorCode,
+ aParseRecord);
+ return;
+ }
+
+ releaseSections(signal);
+ regAlterTabPtr->key = ++c_opRecordSequence;
+ c_opCreateTable.add(alterTabPtr);
+ ndbrequire(c_opCreateTable.find(alterTabPtr, regAlterTabPtr->key));
+ regAlterTabPtr->m_errorCode = 0;
+ regAlterTabPtr->m_senderRef = senderRef;
+ regAlterTabPtr->m_senderData = senderData;
+ regAlterTabPtr->m_tablePtrI = parseRecord.tablePtr.i;
+ regAlterTabPtr->m_alterTableFailed = false;
+ regAlterTabPtr->m_coordinatorRef = reference();
+ regAlterTabPtr->m_fragmentsPtrI = RNIL;
+ regAlterTabPtr->m_dihAddFragPtr = RNIL;
+
+ // Alter table on all nodes
+ c_blockState = BS_BUSY;
+
+ // Send prepare request to all alive nodes
+ SimplePropertiesSectionWriter w(getSectionSegmentPool());
+ packTableIntoPagesImpl(w, parseRecord.tablePtr);
+
+ SegmentedSectionPtr tabInfoPtr;
+ w.getPtr(tabInfoPtr);
+ signal->setSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO);
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ regAlterTabPtr->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ;
+ SafeCounter safeCounter(c_counterMgr, regAlterTabPtr->m_coordinatorData.m_counter);
+ safeCounter.init<AlterTabRef>(rg, regAlterTabPtr->key);
+
+ AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend();
+ lreq->senderRef = reference();
+ lreq->senderData = regAlterTabPtr->key;
+ lreq->clientRef = regAlterTabPtr->m_senderRef;
+ lreq->clientData = regAlterTabPtr->m_senderData;
+ lreq->changeMask = changeMask;
+ lreq->tableId = tableId;
+ lreq->tableVersion = tableVersion + 1;
+ lreq->gci = tablePtr.p->gciTableCreated;
+ lreq->requestType = AlterTabReq::AlterTablePrepare;
+
+ sendSignal(rg, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+
+}
+
+void Dbdict::alterTableRef(Signal * signal,
+ AlterTableReq * req,
+ AlterTableRef::ErrorCode errCode,
+ ParseDictTabInfoRecord* parseRecord)
+{
+ jam();
+ releaseSections(signal);
+ AlterTableRef * ref = (AlterTableRef*)signal->getDataPtrSend();
+ Uint32 senderRef = req->senderRef;
+ ref->senderData = req->senderData;
+ ref->senderRef = reference();
+ ref->masterNodeId = c_masterNodeId;
+ if (parseRecord) {
+ ref->errorCode = parseRecord->errorCode;
+ ref->errorLine = parseRecord->errorLine;
+ ref->errorKey = parseRecord->errorKey;
+ ref->status = parseRecord->status;
+ }
+ else {
+ ref->errorCode = errCode;
+ ref->errorLine = 0;
+ ref->errorKey = 0;
+ ref->status = 0;
+ }
+ sendSignal(senderRef, GSN_ALTER_TABLE_REF, signal,
+ AlterTableRef::SignalLength, JBB);
+}
+
+void
+Dbdict::execALTER_TAB_REQ(Signal * signal)
+{
+ // Received in all nodes to handle change locally
+ jamEntry();
+
+ if(!assembleFragments(signal)){
+ return;
+ }
+ AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr();
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ const Uint32 changeMask = req->changeMask;
+ const Uint32 tableId = req->tableId;
+ const Uint32 tableVersion = req->tableVersion;
+ const Uint32 gci = req->gci;
+ AlterTabReq::RequestType requestType =
+ (AlterTabReq::RequestType) req->requestType;
+
+ SegmentedSectionPtr tabInfoPtr;
+ signal->getSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO);
+
+ CreateTableRecordPtr alterTabPtr; // Reuse create table records
+
+ if (senderRef != reference()) {
+ jam();
+ c_blockState = BS_BUSY;
+ }
+ if ((requestType == AlterTabReq::AlterTablePrepare)
+ && (senderRef != reference())) {
+ jam();
+ c_opCreateTable.seize(alterTabPtr);
+ if(!alterTabPtr.isNull())
+ alterTabPtr.p->m_changeMask = changeMask;
+ }
+ else {
+ jam();
+ ndbrequire(c_opCreateTable.find(alterTabPtr, senderData));
+ }
+ if(alterTabPtr.isNull()){
+ jam();
+ alterTabRef(signal, req, AlterTableRef::Busy);
+ return;
+ }
+ CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
+ regAlterTabPtr->m_alterTableId = tableId;
+ regAlterTabPtr->m_coordinatorRef = senderRef;
+
+ // Get table definition
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId, false);
+ if(tablePtr.isNull()){
+ jam();
+ alterTabRef(signal, req, AlterTableRef::NoSuchTable);
+ return;
+ }
+
+ switch(requestType) {
+ case(AlterTabReq::AlterTablePrepare): {
+ ParseDictTabInfoRecord* aParseRecord;
+
+ const TableRecord::TabState tabState = tablePtr.p->tabState;
+ bool ok = false;
+ switch(tabState){
+ case TableRecord::NOT_DEFINED:
+ case TableRecord::REORG_TABLE_PREPARED:
+ case TableRecord::DEFINING:
+ case TableRecord::CHECKED:
+ jam();
+ alterTabRef(signal, req, AlterTableRef::NoSuchTable);
+ return;
+ case TableRecord::DEFINED:
+ ok = true;
+ jam();
+ break;
+ case TableRecord::PREPARE_DROPPING:
+ case TableRecord::DROPPING:
+ jam();
+ alterTabRef(signal, req, AlterTableRef::DropInProgress);
+ return;
+ }
+ ndbrequire(ok);
+
+ if(tablePtr.p->tableVersion + 1 != tableVersion){
+ jam();
+ alterTabRef(signal, req, AlterTableRef::InvalidTableVersion);
+ return;
+ }
+ TableRecordPtr newTablePtr;
+ if (senderRef != reference()) {
+ jam();
+ // Parse altered table defintion
+ ParseDictTabInfoRecord parseRecord;
+ aParseRecord = &parseRecord;
+
+ parseRecord.requestType = DictTabInfo::AlterTableFromAPI;
+ parseRecord.errorCode = 0;
+
+ SimplePropertiesSectionReader r(tabInfoPtr, getSectionSegmentPool());
+
+ handleTabInfoInit(r, &parseRecord, false); // Will not save info
+
+ if(parseRecord.errorCode != 0){
+ jam();
+ c_opCreateTable.release(alterTabPtr);
+ alterTabRef(signal, req,
+ (AlterTableRef::ErrorCode) parseRecord.errorCode,
+ aParseRecord);
+ return;
+ }
+ regAlterTabPtr->key = senderData;
+ c_opCreateTable.add(alterTabPtr);
+ regAlterTabPtr->m_errorCode = 0;
+ regAlterTabPtr->m_senderRef = senderRef;
+ regAlterTabPtr->m_senderData = senderData;
+ regAlterTabPtr->m_tablePtrI = parseRecord.tablePtr.i;
+ regAlterTabPtr->m_fragmentsPtrI = RNIL;
+ regAlterTabPtr->m_dihAddFragPtr = RNIL;
+ newTablePtr = parseRecord.tablePtr;
+ newTablePtr.p->tableVersion = tableVersion;
+ }
+ else { // (req->senderRef == reference())
+ jam();
+ c_tableRecordPool.getPtr(newTablePtr, regAlterTabPtr->m_tablePtrI);
+ newTablePtr.p->tableVersion = tableVersion;
+ }
+ if (handleAlterTab(req, regAlterTabPtr, tablePtr, newTablePtr) == -1) {
+ jam();
+ c_opCreateTable.release(alterTabPtr);
+ alterTabRef(signal, req, AlterTableRef::UnsupportedChange);
+ return;
+ }
+ releaseSections(signal);
+ // Propagate alter table to other local blocks
+ AlterTabReq * req = (AlterTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = senderData;
+ req->changeMask = changeMask;
+ req->tableId = tableId;
+ req->tableVersion = tableVersion;
+ req->gci = gci;
+ req->requestType = requestType;
+ sendSignal(DBLQH_REF, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+ return;
+ }
+ case(AlterTabReq::AlterTableCommit): {
+ jam();
+ // Write schema for altered table to disk
+ SegmentedSectionPtr tabInfoPtr;
+ signal->getSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO);
+ regAlterTabPtr->m_tabInfoPtrI = tabInfoPtr.i;
+
+ signal->header.m_noOfSections = 0;
+
+ // Update table record
+ tablePtr.p->packedSize = tabInfoPtr.sz;
+ tablePtr.p->tableVersion = tableVersion;
+ tablePtr.p->gciTableCreated = gci;
+
+ SchemaFile::TableEntry tabEntry;
+ tabEntry.m_tableVersion = tableVersion;
+ tabEntry.m_tableType = tablePtr.p->tableType;
+ tabEntry.m_tableState = SchemaFile::ALTER_TABLE_COMMITTED;
+ tabEntry.m_gcp = gci;
+ tabEntry.m_info_words = tabInfoPtr.sz;
+ memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused));
+
+ Callback callback;
+ callback.m_callbackData = senderData;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::alterTab_writeSchemaConf);
+
+ updateSchemaState(signal, tableId, &tabEntry, &callback);
+ break;
+ }
+ case(AlterTabReq::AlterTableRevert): {
+ jam();
+ // Revert failed alter table
+ revertAlterTable(signal, changeMask, tableId, regAlterTabPtr);
+ // Acknowledge the reverted alter table
+ AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->changeMask = changeMask;
+ conf->tableId = tableId;
+ conf->tableVersion = tableVersion;
+ conf->gci = gci;
+ conf->requestType = requestType;
+ sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal,
+ AlterTabConf::SignalLength, JBB);
+ break;
+ }
+ default: ndbrequire(false);
+ }
+}
+
+void Dbdict::alterTabRef(Signal * signal,
+ AlterTabReq * req,
+ AlterTableRef::ErrorCode errCode,
+ ParseDictTabInfoRecord* parseRecord)
+{
+ jam();
+ releaseSections(signal);
+ AlterTabRef * ref = (AlterTabRef*)signal->getDataPtrSend();
+ Uint32 senderRef = req->senderRef;
+ ref->senderData = req->senderData;
+ ref->senderRef = reference();
+ if (parseRecord) {
+ jam();
+ ref->errorCode = parseRecord->errorCode;
+ ref->errorLine = parseRecord->errorLine;
+ ref->errorKey = parseRecord->errorKey;
+ ref->errorStatus = parseRecord->status;
+ }
+ else {
+ jam();
+ ref->errorCode = errCode;
+ ref->errorLine = 0;
+ ref->errorKey = 0;
+ ref->errorStatus = 0;
+ }
+ sendSignal(senderRef, GSN_ALTER_TAB_REF, signal,
+ AlterTabRef::SignalLength, JBB);
+
+ c_blockState = BS_IDLE;
+}
+
+void Dbdict::execALTER_TAB_REF(Signal * signal){
+ jamEntry();
+
+ AlterTabRef * ref = (AlterTabRef*)signal->getDataPtr();
+
+ Uint32 senderRef = ref->senderRef;
+ Uint32 senderData = ref->senderData;
+ Uint32 errorCode = ref->errorCode;
+ Uint32 errorLine = ref->errorLine;
+ Uint32 errorKey = ref->errorKey;
+ Uint32 errorStatus = ref->errorStatus;
+ AlterTabReq::RequestType requestType =
+ (AlterTabReq::RequestType) ref->requestType;
+ CreateTableRecordPtr alterTabPtr;
+ ndbrequire(c_opCreateTable.find(alterTabPtr, senderData));
+ CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
+ Uint32 changeMask = regAlterTabPtr->m_changeMask;
+ SafeCounter safeCounter(c_counterMgr, regAlterTabPtr->m_coordinatorData.m_counter);
+ safeCounter.clearWaitingFor(refToNode(senderRef));
+ switch (requestType) {
+ case(AlterTabReq::AlterTablePrepare): {
+ if (safeCounter.done()) {
+ jam();
+ // Send revert request to all alive nodes
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, regAlterTabPtr->m_alterTableId);
+ Uint32 tableId = tablePtr.p->tableId;
+ Uint32 tableVersion = tablePtr.p->tableVersion;
+ Uint32 gci = tablePtr.p->gciTableCreated;
+ SimplePropertiesSectionWriter w(getSectionSegmentPool());
+ packTableIntoPagesImpl(w, tablePtr);
+ SegmentedSectionPtr spDataPtr;
+ w.getPtr(spDataPtr);
+ signal->setSection(spDataPtr, AlterTabReq::DICT_TAB_INFO);
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ regAlterTabPtr->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ;
+ safeCounter.init<AlterTabRef>(rg, regAlterTabPtr->key);
+
+ AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend();
+ lreq->senderRef = reference();
+ lreq->senderData = regAlterTabPtr->key;
+ lreq->clientRef = regAlterTabPtr->m_senderRef;
+ lreq->clientData = regAlterTabPtr->m_senderData;
+ lreq->changeMask = changeMask;
+ lreq->tableId = tableId;
+ lreq->tableVersion = tableVersion;
+ lreq->gci = gci;
+ lreq->requestType = AlterTabReq::AlterTableRevert;
+
+ sendSignal(rg, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+ }
+ else {
+ jam();
+ regAlterTabPtr->m_alterTableFailed = true;
+ }
+ break;
+ }
+ case(AlterTabReq::AlterTableCommit):
+ jam();
+ case(AlterTabReq::AlterTableRevert): {
+ AlterTableRef * apiRef = (AlterTableRef*)signal->getDataPtrSend();
+
+ apiRef->senderData = senderData;
+ apiRef->senderRef = reference();
+ apiRef->masterNodeId = c_masterNodeId;
+ apiRef->errorCode = errorCode;
+ apiRef->errorLine = errorLine;
+ apiRef->errorKey = errorKey;
+ apiRef->status = errorStatus;
+ if (safeCounter.done()) {
+ jam();
+ sendSignal(senderRef, GSN_ALTER_TABLE_REF, signal,
+ AlterTableRef::SignalLength, JBB);
+ c_blockState = BS_IDLE;
+ }
+ else {
+ jam();
+ regAlterTabPtr->m_alterTableFailed = true;
+ regAlterTabPtr->m_alterTableRef = *apiRef;
+ }
+ break;
+ }
+ default: ndbrequire(false);
+ }
+}
+
+void
+Dbdict::execALTER_TAB_CONF(Signal * signal){
+ jamEntry();
+ AlterTabConf * const conf = (AlterTabConf*)signal->getDataPtr();
+ Uint32 senderRef = conf->senderRef;
+ Uint32 senderData = conf->senderData;
+ Uint32 changeMask = conf->changeMask;
+ Uint32 tableId = conf->tableId;
+ Uint32 tableVersion = conf->tableVersion;
+ Uint32 gci = conf->gci;
+ AlterTabReq::RequestType requestType =
+ (AlterTabReq::RequestType) conf->requestType;
+ CreateTableRecordPtr alterTabPtr;
+ ndbrequire(c_opCreateTable.find(alterTabPtr, senderData));
+ CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
+
+ switch (requestType) {
+ case(AlterTabReq::AlterTablePrepare): {
+ switch(refToBlock(signal->getSendersBlockRef())) {
+ case DBLQH: {
+ jam();
+ AlterTabReq * req = (AlterTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = senderData;
+ req->changeMask = changeMask;
+ req->tableId = tableId;
+ req->tableVersion = tableVersion;
+ req->gci = gci;
+ req->requestType = requestType;
+ sendSignal(DBDIH_REF, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+ return;
+ }
+ case DBDIH: {
+ jam();
+ AlterTabReq * req = (AlterTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = senderData;
+ req->changeMask = changeMask;
+ req->tableId = tableId;
+ req->tableVersion = tableVersion;
+ req->gci = gci;
+ req->requestType = requestType;
+ sendSignal(DBTC_REF, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+ return;
+ }
+ case DBTC: {
+ jam();
+ // Participant is done with prepare phase, send conf to coordinator
+ AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->changeMask = changeMask;
+ conf->tableId = tableId;
+ conf->tableVersion = tableVersion;
+ conf->gci = gci;
+ conf->requestType = requestType;
+ sendSignal(regAlterTabPtr->m_coordinatorRef, GSN_ALTER_TAB_CONF, signal,
+ AlterTabConf::SignalLength, JBB);
+ return;
+ }
+ default :break;
+ }
+ // Coordinator only
+ SafeCounter safeCounter(c_counterMgr, regAlterTabPtr->m_coordinatorData.m_counter);
+ safeCounter.clearWaitingFor(refToNode(senderRef));
+ if (safeCounter.done()) {
+ jam();
+ // We have received all local confirmations
+ if (regAlterTabPtr->m_alterTableFailed) {
+ jam();
+ // Send revert request to all alive nodes
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, regAlterTabPtr->m_alterTableId);
+ Uint32 tableId = tablePtr.p->tableId;
+ Uint32 tableVersion = tablePtr.p->tableVersion;
+ Uint32 gci = tablePtr.p->gciTableCreated;
+ SimplePropertiesSectionWriter w(getSectionSegmentPool());
+ packTableIntoPagesImpl(w, tablePtr);
+ SegmentedSectionPtr spDataPtr;
+ w.getPtr(spDataPtr);
+ signal->setSection(spDataPtr, AlterTabReq::DICT_TAB_INFO);
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ regAlterTabPtr->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ;
+ safeCounter.init<AlterTabRef>(rg, regAlterTabPtr->key);
+
+ AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend();
+ lreq->senderRef = reference();
+ lreq->senderData = regAlterTabPtr->key;
+ lreq->clientRef = regAlterTabPtr->m_senderRef;
+ lreq->clientData = regAlterTabPtr->m_senderData;
+ lreq->changeMask = changeMask;
+ lreq->tableId = tableId;
+ lreq->tableVersion = tableVersion;
+ lreq->gci = gci;
+ lreq->requestType = AlterTabReq::AlterTableRevert;
+
+ sendSignal(rg, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+ }
+ else {
+ jam();
+ // Send commit request to all alive nodes
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ SimplePropertiesSectionWriter w(getSectionSegmentPool());
+ packTableIntoPagesImpl(w, tablePtr);
+ SegmentedSectionPtr spDataPtr;
+ w.getPtr(spDataPtr);
+ signal->setSection(spDataPtr, AlterTabReq::DICT_TAB_INFO);
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ regAlterTabPtr->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ;
+ safeCounter.init<AlterTabRef>(rg, regAlterTabPtr->key);
+
+ AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend();
+ lreq->senderRef = reference();
+ lreq->senderData = regAlterTabPtr->key;
+ lreq->clientRef = regAlterTabPtr->m_senderRef;
+ lreq->clientData = regAlterTabPtr->m_senderData;
+ lreq->changeMask = changeMask;
+ lreq->tableId = tableId;
+ lreq->tableVersion = tableVersion;
+ lreq->gci = gci;
+ lreq->requestType = AlterTabReq::AlterTableCommit;
+
+ sendSignal(rg, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+ }
+ }
+ else {
+ // (!safeCounter.done())
+ jam();
+ }
+ break;
+ }
+ case(AlterTabReq::AlterTableRevert):
+ jam();
+ case(AlterTabReq::AlterTableCommit): {
+ SafeCounter safeCounter(c_counterMgr, regAlterTabPtr->m_coordinatorData.m_counter);
+ safeCounter.clearWaitingFor(refToNode(senderRef));
+ if (safeCounter.done()) {
+ jam();
+ // We have received all local confirmations
+ releaseSections(signal);
+ if (regAlterTabPtr->m_alterTableFailed) {
+ jam();
+ AlterTableRef * apiRef =
+ (AlterTableRef*)signal->getDataPtrSend();
+ *apiRef = regAlterTabPtr->m_alterTableRef;
+ sendSignal(regAlterTabPtr->m_senderRef, GSN_ALTER_TABLE_REF, signal,
+ AlterTableRef::SignalLength, JBB);
+ }
+ else {
+ jam();
+ // Alter table completed, inform API
+ AlterTableConf * const apiConf =
+ (AlterTableConf*)signal->getDataPtrSend();
+ apiConf->senderRef = reference();
+ apiConf->senderData = regAlterTabPtr->m_senderData;
+ apiConf->tableId = tableId;
+ apiConf->tableVersion = tableVersion;
+
+ //@todo check api failed
+ sendSignal(regAlterTabPtr->m_senderRef, GSN_ALTER_TABLE_CONF, signal,
+ AlterTableConf::SignalLength, JBB);
+ }
+
+ // Release resources
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_tablePtrI);
+ releaseTableObject(tabPtr.i, false);
+ c_opCreateTable.release(alterTabPtr);
+ c_blockState = BS_IDLE;
+ }
+ else {
+ // (!safeCounter.done())
+ jam();
+ }
+ break;
+ }
+ default: ndbrequire(false);
+ }
+}
+
+// For debugging
+inline
+void Dbdict::printTables()
+{
+ DLHashTable<TableRecord>::Iterator iter;
+ bool moreTables = c_tableRecordHash.first(iter);
+ printf("TABLES IN DICT:\n");
+ while (moreTables) {
+ TableRecordPtr tablePtr = iter.curr;
+ printf("%s ", tablePtr.p->tableName);
+ moreTables = c_tableRecordHash.next(iter);
+ }
+ printf("\n");
+}
+
+int Dbdict::handleAlterTab(AlterTabReq * req,
+ CreateTableRecord * regAlterTabPtr,
+ TableRecordPtr origTablePtr,
+ TableRecordPtr newTablePtr)
+{
+ Uint32 changeMask = req->changeMask;
+
+ if (AlterTableReq::getNameFlag(changeMask)) {
+ jam();
+ // Table rename
+ // Remove from hashtable
+#ifdef VM_TRACE
+ TableRecordPtr tmp;
+ ndbrequire(c_tableRecordHash.find(tmp, *origTablePtr.p));
+#endif
+ c_tableRecordHash.remove(origTablePtr);
+ strcpy(regAlterTabPtr->previousTableName, origTablePtr.p->tableName);
+ strcpy(origTablePtr.p->tableName, newTablePtr.p->tableName);
+ // Set new schema version
+ origTablePtr.p->tableVersion = newTablePtr.p->tableVersion;
+ // Put it back
+#ifdef VM_TRACE
+ ndbrequire(!c_tableRecordHash.find(tmp, *origTablePtr.p));
+#endif
+ c_tableRecordHash.add(origTablePtr);
+
+ return 0;
+ }
+ jam();
+ return -1;
+}
+
+void Dbdict::revertAlterTable(Signal * signal,
+ Uint32 changeMask,
+ Uint32 tableId,
+ CreateTableRecord * regAlterTabPtr)
+{
+ if (AlterTableReq::getNameFlag(changeMask)) {
+ jam();
+ // Table rename
+ // Restore previous name
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ // Remove from hashtable
+#ifdef VM_TRACE
+ TableRecordPtr tmp;
+ ndbrequire(c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
+ c_tableRecordHash.remove(tablePtr);
+ // Restore name
+ strcpy(tablePtr.p->tableName, regAlterTabPtr->previousTableName);
+ // Revert schema version
+ tablePtr.p->tableVersion = tablePtr.p->tableVersion - 1;
+ // Put it back
+#ifdef VM_TRACE
+ ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
+ c_tableRecordHash.add(tablePtr);
+
+ return;
+ }
+
+ ndbrequire(false);
+}
+
+void
+Dbdict::alterTab_writeSchemaConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ Uint32 key = callbackData;
+ CreateTableRecordPtr alterTabPtr;
+ ndbrequire(c_opCreateTable.find(alterTabPtr, key));
+ CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
+ Uint32 tableId = regAlterTabPtr->m_alterTableId;
+
+ Callback callback;
+ callback.m_callbackData = regAlterTabPtr->key;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::alterTab_writeTableConf);
+
+ SegmentedSectionPtr tabInfoPtr;
+ getSection(tabInfoPtr, regAlterTabPtr->m_tabInfoPtrI);
+
+ writeTableFile(signal, tableId, tabInfoPtr, &callback);
+
+ signal->setSection(tabInfoPtr, 0);
+ releaseSections(signal);
+}
+
+void
+Dbdict::alterTab_writeTableConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ CreateTableRecordPtr alterTabPtr;
+ ndbrequire(c_opCreateTable.find(alterTabPtr, callbackData));
+ CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
+ Uint32 coordinatorRef = regAlterTabPtr->m_coordinatorRef;
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_alterTableId);
+
+ // Alter table commit request handled successfully
+ AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = callbackData;
+ conf->tableId = tabPtr.p->tableId;
+ conf->tableVersion = tabPtr.p->tableVersion;
+ conf->gci = tabPtr.p->gciTableCreated;
+ conf->requestType = AlterTabReq::AlterTableCommit;
+ sendSignal(coordinatorRef, GSN_ALTER_TAB_CONF, signal,
+ AlterTabConf::SignalLength, JBB);
+ if(coordinatorRef != reference()) {
+ jam();
+ // Release resources
+ c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_tablePtrI);
+ releaseTableObject(tabPtr.i, false);
+ c_opCreateTable.release(alterTabPtr);
+ c_blockState = BS_IDLE;
+ }
+}
+
+void
+Dbdict::execCREATE_FRAGMENTATION_REF(Signal * signal){
+ jamEntry();
+ const Uint32 * theData = signal->getDataPtr();
+ CreateFragmentationRef * const ref = (CreateFragmentationRef*)theData;
+ (void)ref;
+ ndbrequire(false);
+}
+
+void
+Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){
+ jamEntry();
+ const Uint32 * theData = signal->getDataPtr();
+ CreateFragmentationConf * const conf = (CreateFragmentationConf*)theData;
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
+
+ ndbrequire(signal->getNoOfSections() == 1);
+
+ SegmentedSectionPtr fragDataPtr;
+ signal->getSection(fragDataPtr, CreateFragmentationConf::FRAGMENTS);
+ signal->header.m_noOfSections = 0;
+
+ /**
+ * Get table
+ */
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+
+ /**
+ * Save fragment count
+ */
+ tabPtr.p->fragmentCount = conf->noOfFragments;
+
+ /**
+ * Update table version
+ */
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ SchemaFile::TableEntry * tabEntry = getTableEntry(xsf, tabPtr.i);
+
+ tabPtr.p->tableVersion = tabEntry->m_tableVersion + 1;
+
+ /**
+ * Pack
+ */
+ SimplePropertiesSectionWriter w(getSectionSegmentPool());
+ packTableIntoPagesImpl(w, tabPtr);
+
+ SegmentedSectionPtr spDataPtr;
+ w.getPtr(spDataPtr);
+
+ signal->setSection(spDataPtr, CreateTabReq::DICT_TAB_INFO);
+ signal->setSection(fragDataPtr, CreateTabReq::FRAGMENTATION);
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter);
+ createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ;
+ createTabPtr.p->m_coordinatorData.m_requestType = CreateTabReq::CreateTablePrepare;
+ tmp.init<CreateTabRef>(rg, GSN_CREATE_TAB_REF, createTabPtr.p->key);
+
+ CreateTabReq * const req = (CreateTabReq*)theData;
+ req->senderRef = reference();
+ req->senderData = createTabPtr.p->key;
+ req->clientRef = createTabPtr.p->m_senderRef;
+ req->clientData = createTabPtr.p->m_senderData;
+ req->requestType = CreateTabReq::CreateTablePrepare;
+
+ req->gci = 0;
+ req->tableId = tabPtr.i;
+ req->tableVersion = tabEntry->m_tableVersion + 1;
+
+ sendFragmentedSignal(rg, GSN_CREATE_TAB_REQ, signal,
+ CreateTabReq::SignalLength, JBB);
+
+ return;
+}
+
+void
+Dbdict::execCREATE_TAB_REF(Signal* signal){
+ jamEntry();
+
+ CreateTabRef * const ref = (CreateTabRef*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData));
+
+ ndbrequire(createTabPtr.p->m_coordinatorRef == reference());
+ ndbrequire(createTabPtr.p->m_coordinatorData.m_gsn == GSN_CREATE_TAB_REQ);
+
+ if(ref->errorCode != CreateTabRef::NF_FakeErrorREF){
+ createTabPtr.p->setErrorCode(ref->errorCode);
+ }
+ createTab_reply(signal, createTabPtr, refToNode(ref->senderRef));
+}
+
+void
+Dbdict::execCREATE_TAB_CONF(Signal* signal){
+ jamEntry();
+
+ ndbrequire(signal->getNoOfSections() == 0);
+
+ CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
+
+ ndbrequire(createTabPtr.p->m_coordinatorRef == reference());
+ ndbrequire(createTabPtr.p->m_coordinatorData.m_gsn == GSN_CREATE_TAB_REQ);
+
+ createTab_reply(signal, createTabPtr, refToNode(conf->senderRef));
+}
+
+void
+Dbdict::createTab_reply(Signal* signal,
+ CreateTableRecordPtr createTabPtr,
+ Uint32 nodeId)
+{
+
+ SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter);
+ if(!tmp.clearWaitingFor(nodeId)){
+ jam();
+ return;
+ }
+
+ switch(createTabPtr.p->m_coordinatorData.m_requestType){
+ case CreateTabReq::CreateTablePrepare:{
+
+ if(createTabPtr.p->m_errorCode != 0){
+ jam();
+ /**
+ * Failed to prepare on atleast one node -> abort on all
+ */
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ;
+ createTabPtr.p->m_coordinatorData.m_requestType = CreateTabReq::CreateTableDrop;
+ ndbrequire(tmp.init<CreateTabRef>(rg, createTabPtr.p->key));
+
+ CreateTabReq * const req = (CreateTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = createTabPtr.p->key;
+ req->requestType = CreateTabReq::CreateTableDrop;
+
+ sendSignal(rg, GSN_CREATE_TAB_REQ, signal,
+ CreateTabReq::SignalLength, JBB);
+ return;
+ }
+
+ /**
+ * Lock mutex before commiting table
+ */
+ Mutex mutex(signal, c_mutexMgr, createTabPtr.p->m_startLcpMutex);
+ Callback c = { safe_cast(&Dbdict::createTab_startLcpMutex_locked),
+ createTabPtr.p->key};
+
+ ndbrequire(mutex.lock(c));
+ return;
+ }
+ case CreateTabReq::CreateTableCommit:{
+ jam();
+ ndbrequire(createTabPtr.p->m_errorCode == 0);
+
+ /**
+ * Unlock mutex before commiting table
+ */
+ Mutex mutex(signal, c_mutexMgr, createTabPtr.p->m_startLcpMutex);
+ Callback c = { safe_cast(&Dbdict::createTab_startLcpMutex_unlocked),
+ createTabPtr.p->key};
+ mutex.unlock(c);
+ return;
+ }
+ case CreateTabReq::CreateTableDrop:{
+ jam();
+ CreateTableRef * const ref = (CreateTableRef*)signal->getDataPtr();
+ ref->senderRef = reference();
+ ref->senderData = createTabPtr.p->m_senderData;
+ ref->errorCode = createTabPtr.p->m_errorCode;
+ ref->masterNodeId = c_masterNodeId;
+ ref->status = 0;
+ ref->errorKey = 0;
+ ref->errorLine = 0;
+
+ //@todo check api failed
+ sendSignal(createTabPtr.p->m_senderRef, GSN_CREATE_TABLE_REF, signal,
+ CreateTableRef::SignalLength, JBB);
+ c_opCreateTable.release(createTabPtr);
+ c_blockState = BS_IDLE;
+ return;
+ }
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::createTab_startLcpMutex_locked(Signal* signal,
+ Uint32 callbackData,
+ Uint32 retValue){
+ jamEntry();
+
+ ndbrequire(retValue == 0);
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ;
+ createTabPtr.p->m_coordinatorData.m_requestType = CreateTabReq::CreateTableCommit;
+ SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter);
+ tmp.init<CreateTabRef>(rg, GSN_CREATE_TAB_REF, createTabPtr.p->key);
+
+ CreateTabReq * const req = (CreateTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = createTabPtr.p->key;
+ req->requestType = CreateTabReq::CreateTableCommit;
+
+ sendSignal(rg, GSN_CREATE_TAB_REQ, signal,
+ CreateTabReq::SignalLength, JBB);
+}
+
+void
+Dbdict::createTab_startLcpMutex_unlocked(Signal* signal,
+ Uint32 callbackData,
+ Uint32 retValue){
+ jamEntry();
+
+ ndbrequire(retValue == 0);
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ createTabPtr.p->m_startLcpMutex.release(c_mutexMgr);
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+
+ CreateTableConf * const conf = (CreateTableConf*)signal->getDataPtr();
+ conf->senderRef = reference();
+ conf->senderData = createTabPtr.p->m_senderData;
+ conf->tableId = createTabPtr.p->m_tablePtrI;
+ conf->tableVersion = tabPtr.p->tableVersion;
+
+ //@todo check api failed
+ sendSignal(createTabPtr.p->m_senderRef, GSN_CREATE_TABLE_CONF, signal,
+ CreateTableConf::SignalLength, JBB);
+ c_opCreateTable.release(createTabPtr);
+ c_blockState = BS_IDLE;
+ return;
+}
+
+/***********************************************************
+ * CreateTable participant code
+ **********************************************************/
+void
+Dbdict::execCREATE_TAB_REQ(Signal* signal){
+ jamEntry();
+
+ if(!assembleFragments(signal)){
+ jam();
+ return;
+ }
+
+ CreateTabReq * const req = (CreateTabReq*)signal->getDataPtr();
+
+ CreateTabReq::RequestType rt = (CreateTabReq::RequestType)req->requestType;
+ switch(rt){
+ case CreateTabReq::CreateTablePrepare:
+ CRASH_INSERTION2(6003, getOwnNodeId() != c_masterNodeId);
+ createTab_prepare(signal, req);
+ return;
+ case CreateTabReq::CreateTableCommit:
+ CRASH_INSERTION2(6004, getOwnNodeId() != c_masterNodeId);
+ createTab_commit(signal, req);
+ return;
+ case CreateTabReq::CreateTableDrop:
+ CRASH_INSERTION2(6005, getOwnNodeId() != c_masterNodeId);
+ createTab_drop(signal, req);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::createTab_prepare(Signal* signal, CreateTabReq * req){
+
+ const Uint32 gci = req->gci;
+ const Uint32 tableId = req->tableId;
+ const Uint32 tableVersion = req->tableVersion;
+
+ SegmentedSectionPtr tabInfoPtr;
+ signal->getSection(tabInfoPtr, CreateTabReq::DICT_TAB_INFO);
+
+ CreateTableRecordPtr createTabPtr;
+ if(req->senderRef == reference()){
+ jam();
+ ndbrequire(c_opCreateTable.find(createTabPtr, req->senderData));
+ } else {
+ jam();
+ c_opCreateTable.seize(createTabPtr);
+
+ ndbrequire(!createTabPtr.isNull());
+
+ createTabPtr.p->key = req->senderData;
+ c_opCreateTable.add(createTabPtr);
+ createTabPtr.p->m_errorCode = 0;
+ createTabPtr.p->m_tablePtrI = tableId;
+ createTabPtr.p->m_coordinatorRef = req->senderRef;
+ createTabPtr.p->m_senderRef = req->clientRef;
+ createTabPtr.p->m_senderData = req->clientData;
+ createTabPtr.p->m_dihAddFragPtr = RNIL;
+
+ /**
+ * Put data into table record
+ */
+ ParseDictTabInfoRecord parseRecord;
+ parseRecord.requestType = DictTabInfo::AddTableFromDict;
+ parseRecord.errorCode = 0;
+
+ SimplePropertiesSectionReader r(tabInfoPtr, getSectionSegmentPool());
+
+ handleTabInfoInit(r, &parseRecord);
+
+ ndbrequire(parseRecord.errorCode == 0);
+ }
+
+ ndbrequire(!createTabPtr.isNull());
+
+ SegmentedSectionPtr fragPtr;
+ signal->getSection(fragPtr, CreateTabReq::FRAGMENTATION);
+
+ createTabPtr.p->m_tabInfoPtrI = tabInfoPtr.i;
+ createTabPtr.p->m_fragmentsPtrI = fragPtr.i;
+
+ signal->header.m_noOfSections = 0;
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, tableId);
+ tabPtr.p->packedSize = tabInfoPtr.sz;
+ tabPtr.p->tableVersion = tableVersion;
+ tabPtr.p->gciTableCreated = gci;
+
+ SchemaFile::TableEntry tabEntry;
+ tabEntry.m_tableVersion = tableVersion;
+ tabEntry.m_tableType = tabPtr.p->tableType;
+ tabEntry.m_tableState = SchemaFile::ADD_STARTED;
+ tabEntry.m_gcp = gci;
+ tabEntry.m_info_words = tabInfoPtr.sz;
+ memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused));
+
+ Callback callback;
+ callback.m_callbackData = createTabPtr.p->key;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::createTab_writeSchemaConf1);
+
+ updateSchemaState(signal, tableId, &tabEntry, &callback);
+}
+
+void getSection(SegmentedSectionPtr & ptr, Uint32 i);
+
+void
+Dbdict::createTab_writeSchemaConf1(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ Callback callback;
+ callback.m_callbackData = createTabPtr.p->key;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::createTab_writeTableConf);
+
+ SegmentedSectionPtr tabInfoPtr;
+ getSection(tabInfoPtr, createTabPtr.p->m_tabInfoPtrI);
+ writeTableFile(signal, createTabPtr.p->m_tablePtrI, tabInfoPtr, &callback);
+
+ createTabPtr.p->m_tabInfoPtrI = RNIL;
+ signal->setSection(tabInfoPtr, 0);
+ releaseSections(signal);
+}
+
+void
+Dbdict::createTab_writeTableConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ SegmentedSectionPtr fragDataPtr;
+ getSection(fragDataPtr, createTabPtr.p->m_fragmentsPtrI);
+
+ Callback callback;
+ callback.m_callbackData = callbackData;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::createTab_dihComplete);
+
+ createTab_dih(signal, createTabPtr, fragDataPtr, &callback);
+}
+
+void
+Dbdict::createTab_dih(Signal* signal,
+ CreateTableRecordPtr createTabPtr,
+ SegmentedSectionPtr fragDataPtr,
+ Callback * c){
+ jam();
+
+ createTabPtr.p->m_callback = * c;
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+
+ DiAddTabReq * req = (DiAddTabReq*)signal->getDataPtrSend();
+ req->connectPtr = createTabPtr.p->key;
+ req->tableId = tabPtr.i;
+ req->fragType = tabPtr.p->fragmentType;
+ req->kValue = tabPtr.p->kValue;
+ req->noOfReplicas = 0;
+ req->storedTable = tabPtr.p->storedTable;
+ req->tableType = tabPtr.p->tableType;
+ req->schemaVersion = tabPtr.p->tableVersion;
+ req->primaryTableId = tabPtr.p->primaryTableId;
+
+ if(!fragDataPtr.isNull()){
+ signal->setSection(fragDataPtr, DiAddTabReq::FRAGMENTATION);
+ }
+
+ sendSignal(DBDIH_REF, GSN_DIADDTABREQ, signal,
+ DiAddTabReq::SignalLength, JBB);
+}
+
+static
+void
+calcLHbits(Uint32 * lhPageBits, Uint32 * lhDistrBits,
+ Uint32 fid, Uint32 totalFragments)
+{
+ Uint32 distrBits = 0;
+ Uint32 pageBits = 0;
+
+ Uint32 tmp = 1;
+ while (tmp < totalFragments) {
+ jam();
+ tmp <<= 1;
+ distrBits++;
+ }//while
+#ifdef ndb_classical_lhdistrbits
+ if (tmp != totalFragments) {
+ tmp >>= 1;
+ if ((fid >= (totalFragments - tmp)) && (fid < (tmp - 1))) {
+ distrBits--;
+ }//if
+ }//if
+#endif
+ * lhPageBits = pageBits;
+ * lhDistrBits = distrBits;
+
+}//calcLHbits()
+
+
+void
+Dbdict::execADD_FRAGREQ(Signal* signal) {
+ jamEntry();
+
+ AddFragReq * const req = (AddFragReq*)signal->getDataPtr();
+
+ Uint32 dihPtr = req->dihPtr;
+ Uint32 senderData = req->senderData;
+ Uint32 tableId = req->tableId;
+ Uint32 fragId = req->fragmentId;
+ Uint32 node = req->nodeId;
+ Uint32 lcpNo = req->nextLCP;
+ Uint32 fragCount = req->totalFragments;
+ Uint32 requestInfo = req->requestInfo;
+ Uint32 startGci = req->startGci;
+
+ ndbrequire(node == getOwnNodeId());
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, senderData));
+
+ createTabPtr.p->m_dihAddFragPtr = dihPtr;
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, tableId);
+
+#if 0
+ tabPtr.p->gciTableCreated = (startGci > tabPtr.p->gciTableCreated ? startGci:
+ startGci > tabPtr.p->gciTableCreated);
+#endif
+
+ /**
+ * Calc lh3PageBits
+ */
+ Uint32 lhDistrBits = 0;
+ Uint32 lhPageBits = 0;
+ ::calcLHbits(&lhPageBits, &lhDistrBits, fragId, fragCount);
+
+ {
+ LqhFragReq* req = (LqhFragReq*)signal->getDataPtrSend();
+ req->senderData = senderData;
+ req->senderRef = reference();
+ req->fragmentId = fragId;
+ req->requestInfo = requestInfo;
+ req->tableId = tableId;
+ req->localKeyLength = tabPtr.p->localKeyLen;
+ req->maxLoadFactor = tabPtr.p->maxLoadFactor;
+ req->minLoadFactor = tabPtr.p->minLoadFactor;
+ req->kValue = tabPtr.p->kValue;
+ req->lh3DistrBits = 0; //lhDistrBits;
+ req->lh3PageBits = 0; //lhPageBits;
+ req->noOfAttributes = tabPtr.p->noOfAttributes;
+ req->noOfNullAttributes = tabPtr.p->noOfNullBits;
+ req->noOfPagesToPreAllocate = 0;
+ req->schemaVersion = tabPtr.p->tableVersion;
+ Uint32 keyLen = tabPtr.p->tupKeyLength;
+ req->keyLength = keyLen; // wl-2066 no more "long keys"
+ req->nextLCP = lcpNo;
+
+ req->noOfKeyAttr = tabPtr.p->noOfPrimkey;
+ req->noOfNewAttr = 0;
+ // noOfCharsets passed to TUP in upper half
+ req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16);
+ req->checksumIndicator = 1;
+ req->noOfAttributeGroups = 1;
+ req->GCPIndicator = 0;
+ req->startGci = startGci;
+ req->tableType = tabPtr.p->tableType;
+ req->primaryTableId = tabPtr.p->primaryTableId;
+ sendSignal(DBLQH_REF, GSN_LQHFRAGREQ, signal,
+ LqhFragReq::SignalLength, JBB);
+ }
+}
+
+void
+Dbdict::execLQHFRAGREF(Signal * signal){
+ jamEntry();
+ LqhFragRef * const ref = (LqhFragRef*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData));
+
+ createTabPtr.p->setErrorCode(ref->errorCode);
+
+ {
+ AddFragRef * const ref = (AddFragRef*)signal->getDataPtr();
+ ref->dihPtr = createTabPtr.p->m_dihAddFragPtr;
+ sendSignal(DBDIH_REF, GSN_ADD_FRAGREF, signal,
+ AddFragRef::SignalLength, JBB);
+ }
+}
+
+void
+Dbdict::execLQHFRAGCONF(Signal * signal){
+ jamEntry();
+ LqhFragConf * const conf = (LqhFragConf*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
+
+ createTabPtr.p->m_lqhFragPtr = conf->lqhFragPtr;
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+ sendLQHADDATTRREQ(signal, createTabPtr, tabPtr.p->firstAttribute);
+}
+
+void
+Dbdict::sendLQHADDATTRREQ(Signal* signal,
+ CreateTableRecordPtr createTabPtr,
+ Uint32 attributePtrI){
+ jam();
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+ LqhAddAttrReq * const req = (LqhAddAttrReq*)signal->getDataPtrSend();
+ Uint32 i = 0;
+ for(i = 0; i<LqhAddAttrReq::MAX_ATTRIBUTES && attributePtrI != RNIL; i++){
+ jam();
+ AttributeRecordPtr attrPtr;
+ c_attributeRecordPool.getPtr(attrPtr, attributePtrI);
+ LqhAddAttrReq::Entry& entry = req->attributes[i];
+ entry.attrId = attrPtr.p->attributeId;
+ entry.attrDescriptor = attrPtr.p->attributeDescriptor;
+ entry.extTypeInfo = 0;
+ // charset number passed to TUP, TUX in upper half
+ entry.extTypeInfo |= (attrPtr.p->extPrecision & ~0xFFFF);
+ if (tabPtr.p->isIndex()) {
+ Uint32 primaryAttrId;
+ if (attrPtr.p->nextAttrInTable != RNIL) {
+ getIndexAttr(tabPtr, attributePtrI, &primaryAttrId);
+ } else {
+ primaryAttrId = ZNIL;
+ if (tabPtr.p->isOrderedIndex())
+ entry.attrId = 0; // attribute goes to TUP
+ }
+ entry.attrId |= (primaryAttrId << 16);
+ }
+ attributePtrI = attrPtr.p->nextAttrInTable;
+ }
+ req->lqhFragPtr = createTabPtr.p->m_lqhFragPtr;
+ req->senderData = createTabPtr.p->key;
+ req->senderAttrPtr = attributePtrI;
+ req->noOfAttributes = i;
+
+ sendSignal(DBLQH_REF, GSN_LQHADDATTREQ, signal,
+ LqhAddAttrReq::HeaderLength + LqhAddAttrReq::EntryLength * i, JBB);
+}
+
+void
+Dbdict::execLQHADDATTREF(Signal * signal){
+ jamEntry();
+ LqhAddAttrRef * const ref = (LqhAddAttrRef*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData));
+
+ createTabPtr.p->setErrorCode(ref->errorCode);
+
+ {
+ AddFragRef * const ref = (AddFragRef*)signal->getDataPtr();
+ ref->dihPtr = createTabPtr.p->m_dihAddFragPtr;
+ sendSignal(DBDIH_REF, GSN_ADD_FRAGREF, signal,
+ AddFragRef::SignalLength, JBB);
+ }
+
+}
+
+void
+Dbdict::execLQHADDATTCONF(Signal * signal){
+ jamEntry();
+ LqhAddAttrConf * const conf = (LqhAddAttrConf*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
+
+ const Uint32 fragId = conf->fragId;
+ const Uint32 nextAttrPtr = conf->senderAttrPtr;
+ if(nextAttrPtr != RNIL){
+ jam();
+ sendLQHADDATTRREQ(signal, createTabPtr, nextAttrPtr);
+ return;
+ }
+
+ {
+ AddFragConf * const conf = (AddFragConf*)signal->getDataPtr();
+ conf->dihPtr = createTabPtr.p->m_dihAddFragPtr;
+ conf->fragId = fragId;
+ sendSignal(DBDIH_REF, GSN_ADD_FRAGCONF, signal,
+ AddFragConf::SignalLength, JBB);
+ }
+}
+
+void
+Dbdict::execDIADDTABREF(Signal* signal){
+ jam();
+
+ DiAddTabRef * const ref = (DiAddTabRef*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData));
+
+ createTabPtr.p->setErrorCode(ref->errorCode);
+ execute(signal, createTabPtr.p->m_callback, 0);
+}
+
+void
+Dbdict::execDIADDTABCONF(Signal* signal){
+ jam();
+
+ DiAddTabConf * const conf = (DiAddTabConf*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
+
+ signal->theData[0] = createTabPtr.p->key;
+ signal->theData[1] = reference();
+ signal->theData[2] = createTabPtr.p->m_tablePtrI;
+
+ if(createTabPtr.p->m_dihAddFragPtr != RNIL){
+ jam();
+
+ /**
+ * We did perform at least one LQHFRAGREQ
+ */
+ sendSignal(DBLQH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB);
+ return;
+ } else {
+ /**
+ * No local fragment (i.e. no LQHFRAGREQ)
+ */
+ execute(signal, createTabPtr.p->m_callback, 0);
+ return;
+ //sendSignal(DBDIH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB);
+ }
+}
+
+void
+Dbdict::execTAB_COMMITREF(Signal* signal) {
+ jamEntry();
+ ndbrequire(false);
+}//execTAB_COMMITREF()
+
+void
+Dbdict::execTAB_COMMITCONF(Signal* signal){
+ jamEntry();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, signal->theData[0]));
+
+ if(refToBlock(signal->getSendersBlockRef()) == DBLQH){
+
+ execute(signal, createTabPtr.p->m_callback, 0);
+ return;
+ }
+
+ if(refToBlock(signal->getSendersBlockRef()) == DBDIH){
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+
+ signal->theData[0] = tabPtr.i;
+ signal->theData[1] = tabPtr.p->tableVersion;
+ signal->theData[2] = (Uint32)tabPtr.p->storedTable;
+ signal->theData[3] = reference();
+ signal->theData[4] = (Uint32)tabPtr.p->tableType;
+ signal->theData[5] = createTabPtr.p->key;
+ signal->theData[6] = (Uint32)tabPtr.p->noOfPrimkey;
+
+ Uint32 buf[2 * MAX_ATTRIBUTES_IN_INDEX];
+ Uint32 sz = 0;
+ Uint32 tAttr = tabPtr.p->firstAttribute;
+ while (tAttr != RNIL) {
+ jam();
+ AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
+ if (aRec->tupleKey) {
+ buf[sz++] = aRec->attributeDescriptor;
+ buf[sz++] = (aRec->extPrecision >> 16); // charset number
+ }
+ tAttr = aRec->nextAttrInTable;
+ }
+ ndbrequire((int)sz == 2 * tabPtr.p->noOfPrimkey);
+
+ LinearSectionPtr lsPtr[3];
+ lsPtr[0].p = buf;
+ lsPtr[0].sz = sz;
+ // note: ACC does not reply
+ if (tabPtr.p->isTable() || tabPtr.p->isHashIndex())
+ sendSignal(DBACC_REF, GSN_TC_SCHVERREQ, signal, 7, JBB, lsPtr, 1);
+ sendSignal(DBTC_REF, GSN_TC_SCHVERREQ, signal, 7, JBB, lsPtr, 1);
+ return;
+ }
+
+ ndbrequire(false);
+}
+
+void
+Dbdict::createTab_dihComplete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ //@todo check for master failed
+
+ if(createTabPtr.p->m_errorCode == 0){
+ jam();
+
+ CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
+ conf->senderRef = reference();
+ conf->senderData = createTabPtr.p->key;
+ sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_CONF,
+ signal, CreateTabConf::SignalLength, JBB);
+ return;
+ }
+
+ CreateTabRef * const ref = (CreateTabRef*)signal->getDataPtr();
+ ref->senderRef = reference();
+ ref->senderData = createTabPtr.p->key;
+ ref->errorCode = createTabPtr.p->m_errorCode;
+ ref->errorLine = 0;
+ ref->errorKey = 0;
+ ref->errorStatus = 0;
+
+ sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_REF,
+ signal, CreateTabRef::SignalLength, JBB);
+}
+
+void
+Dbdict::createTab_commit(Signal * signal, CreateTabReq * req){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, req->senderData));
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+
+ SchemaFile::TableEntry tabEntry;
+ tabEntry.m_tableVersion = tabPtr.p->tableVersion;
+ tabEntry.m_tableType = tabPtr.p->tableType;
+ tabEntry.m_tableState = SchemaFile::TABLE_ADD_COMMITTED;
+ tabEntry.m_gcp = tabPtr.p->gciTableCreated;
+ tabEntry.m_info_words = tabPtr.p->packedSize;
+ memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused));
+
+ Callback callback;
+ callback.m_callbackData = createTabPtr.p->key;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::createTab_writeSchemaConf2);
+
+ updateSchemaState(signal, tabPtr.i, &tabEntry, &callback);
+}
+
+void
+Dbdict::createTab_writeSchemaConf2(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ Callback c;
+ c.m_callbackData = callbackData;
+ c.m_callbackFunction = safe_cast(&Dbdict::createTab_alterComplete);
+ alterTab_activate(signal, createTabPtr, &c);
+}
+
+void
+Dbdict::createTab_alterComplete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+ tabPtr.p->tabState = TableRecord::DEFINED;
+
+ //@todo check error
+ //@todo check master failed
+
+ CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
+ conf->senderRef = reference();
+ conf->senderData = createTabPtr.p->key;
+ sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_CONF,
+ signal, CreateTabConf::SignalLength, JBB);
+
+ if(createTabPtr.p->m_coordinatorRef != reference()){
+ jam();
+ c_opCreateTable.release(createTabPtr);
+ }
+}
+
+void
+Dbdict::createTab_drop(Signal* signal, CreateTabReq * req){
+ jam();
+
+ const Uint32 key = req->senderData;
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, key));
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+ tabPtr.p->tabState = TableRecord::DROPPING;
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.seize(dropTabPtr));
+
+ dropTabPtr.p->key = key;
+ c_opDropTable.add(dropTabPtr);
+
+ dropTabPtr.p->m_errorCode = 0;
+ dropTabPtr.p->m_request.tableId = createTabPtr.p->m_tablePtrI;
+ dropTabPtr.p->m_requestType = DropTabReq::CreateTabDrop;
+ dropTabPtr.p->m_coordinatorRef = createTabPtr.p->m_coordinatorRef;
+ dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_REQ;
+
+ dropTabPtr.p->m_participantData.m_block = 0;
+ dropTabPtr.p->m_participantData.m_callback.m_callbackData = req->senderData;
+ dropTabPtr.p->m_participantData.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::createTab_dropComplete);
+ dropTab_nextStep(signal, dropTabPtr);
+}
+
+void
+Dbdict::createTab_dropComplete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, callbackData));
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+
+ releaseTableObject(tabPtr.i);
+
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tabPtr.i);
+ tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED;
+
+ //@todo check error
+ //@todo check master failed
+
+ CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
+ conf->senderRef = reference();
+ conf->senderData = createTabPtr.p->key;
+ sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_CONF,
+ signal, CreateTabConf::SignalLength, JBB);
+
+ if(createTabPtr.p->m_coordinatorRef != reference()){
+ jam();
+ c_opCreateTable.release(createTabPtr);
+ }
+
+ c_opDropTable.release(dropTabPtr);
+}
+
+void
+Dbdict::alterTab_activate(Signal* signal, CreateTableRecordPtr createTabPtr,
+ Callback * c){
+
+ createTabPtr.p->m_callback = * c;
+
+ signal->theData[0] = createTabPtr.p->key;
+ signal->theData[1] = reference();
+ signal->theData[2] = createTabPtr.p->m_tablePtrI;
+ sendSignal(DBDIH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB);
+}
+
+void
+Dbdict::execTC_SCHVERCONF(Signal* signal){
+ jamEntry();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, signal->theData[1]));
+
+ execute(signal, createTabPtr.p->m_callback, 0);
+}
+
+#define tabRequire(cond, error) \
+ if (!(cond)) { \
+ jam(); \
+ parseP->errorCode = error; parseP->errorLine = __LINE__; \
+ parseP->errorKey = it.getKey(); \
+ return; \
+ }//if
+
+// handleAddTableFailure(signal, __LINE__, allocatedTable);
+
+void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
+ ParseDictTabInfoRecord * parseP,
+ bool checkExist)
+{
+/* ---------------------------------------------------------------- */
+// We always start by handling table name since this must be the first
+// item in the list. Through the table name we can derive if it is a
+// correct name, a new name or an already existing table.
+/* ---------------------------------------------------------------- */
+
+ it.first();
+
+ SimpleProperties::UnpackStatus status;
+ DictTabInfo::Table tableDesc; tableDesc.init();
+ status = SimpleProperties::unpack(it, &tableDesc,
+ DictTabInfo::TableMapping,
+ DictTabInfo::TableMappingSize,
+ true, true);
+
+ if(status != SimpleProperties::Break){
+ parseP->errorCode = CreateTableRef::InvalidFormat;
+ parseP->status = status;
+ parseP->errorKey = it.getKey();
+ parseP->errorLine = __LINE__;
+ return;
+ }
+
+ if(parseP->requestType == DictTabInfo::AlterTableFromAPI)
+ {
+ ndbrequire(!checkExist);
+ }
+ if(!checkExist)
+ {
+ ndbrequire(parseP->requestType == DictTabInfo::AlterTableFromAPI);
+ }
+
+ /* ---------------------------------------------------------------- */
+ // Verify that table name is an allowed table name.
+ // TODO
+ /* ---------------------------------------------------------------- */
+ const Uint32 tableNameLength = strlen(tableDesc.TableName) + 1;
+
+ TableRecord keyRecord;
+ tabRequire(tableNameLength <= sizeof(keyRecord.tableName),
+ CreateTableRef::TableNameTooLong);
+ strcpy(keyRecord.tableName, tableDesc.TableName);
+
+ TableRecordPtr tablePtr;
+ c_tableRecordHash.find(tablePtr, keyRecord);
+
+ if (checkExist){
+ jam();
+ /* ---------------------------------------------------------------- */
+ // Check if table already existed.
+ /* ---------------------------------------------------------------- */
+ tabRequire(tablePtr.i == RNIL, CreateTableRef::TableAlreadyExist);
+ }
+
+ switch (parseP->requestType) {
+ case DictTabInfo::CreateTableFromAPI: {
+ jam();
+ }
+ case DictTabInfo::AlterTableFromAPI:{
+ jam();
+ tablePtr.i = getFreeTableRecord(tableDesc.PrimaryTableId);
+ /* ---------------------------------------------------------------- */
+ // Check if no free tables existed.
+ /* ---------------------------------------------------------------- */
+ tabRequire(tablePtr.i != RNIL, CreateTableRef::NoMoreTableRecords);
+
+ c_tableRecordPool.getPtr(tablePtr);
+ break;
+ }
+ case DictTabInfo::AddTableFromDict:
+ case DictTabInfo::ReadTableFromDiskSR:
+ case DictTabInfo::GetTabInfoConf:
+ {
+/* ---------------------------------------------------------------- */
+// Get table id and check that table doesn't already exist
+/* ---------------------------------------------------------------- */
+ tablePtr.i = tableDesc.TableId;
+
+ if (parseP->requestType == DictTabInfo::ReadTableFromDiskSR) {
+ ndbrequire(tablePtr.i == c_restartRecord.activeTable);
+ }//if
+ if (parseP->requestType == DictTabInfo::GetTabInfoConf) {
+ ndbrequire(tablePtr.i == c_restartRecord.activeTable);
+ }//if
+
+ c_tableRecordPool.getPtr(tablePtr);
+ ndbrequire(tablePtr.p->tabState == TableRecord::NOT_DEFINED);
+
+ //Uint32 oldTableVersion = tablePtr.p->tableVersion;
+ initialiseTableRecord(tablePtr);
+ if (parseP->requestType == DictTabInfo::AddTableFromDict) {
+ jam();
+ tablePtr.p->tabState = TableRecord::DEFINING;
+ }//if
+#ifdef HAVE_TABLE_REORG
+/* ---------------------------------------------------------------- */
+// Get id of second table id and check that table doesn't already exist
+// and set up links between first and second table.
+/* ---------------------------------------------------------------- */
+ TableRecordPtr secondTablePtr;
+ secondTablePtr.i = tableDesc.SecondTableId;
+ c_tableRecordPool.getPtr(secondTablePtr);
+ ndbrequire(secondTablePtr.p->tabState == TableRecord::NOT_DEFINED);
+
+ initialiseTableRecord(secondTablePtr);
+ secondTablePtr.p->tabState = TableRecord::REORG_TABLE_PREPARED;
+ secondTablePtr.p->secondTable = tablePtr.i;
+ tablePtr.p->secondTable = secondTablePtr.i;
+#endif
+/* ---------------------------------------------------------------- */
+// Set table version
+/* ---------------------------------------------------------------- */
+ Uint32 tableVersion = tableDesc.TableVersion;
+ tablePtr.p->tableVersion = tableVersion;
+
+ break;
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ parseP->tablePtr = tablePtr;
+
+ strcpy(tablePtr.p->tableName, keyRecord.tableName);
+ if (parseP->requestType != DictTabInfo::AlterTableFromAPI) {
+ jam();
+#ifdef VM_TRACE
+ ndbout_c("Dbdict: name=%s,id=%u", tablePtr.p->tableName, tablePtr.i);
+ TableRecordPtr tmp;
+ ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
+ c_tableRecordHash.add(tablePtr);
+ }
+
+ //tablePtr.p->noOfPrimkey = tableDesc.NoOfKeyAttr;
+ //tablePtr.p->noOfNullAttr = tableDesc.NoOfNullable;
+ //tablePtr.p->tupKeyLength = tableDesc.KeyLength;
+ tablePtr.p->noOfAttributes = tableDesc.NoOfAttributes;
+ tablePtr.p->storedTable = tableDesc.TableLoggedFlag;
+ tablePtr.p->minLoadFactor = tableDesc.MinLoadFactor;
+ tablePtr.p->maxLoadFactor = tableDesc.MaxLoadFactor;
+ tablePtr.p->fragmentType = (DictTabInfo::FragmentType)tableDesc.FragmentType;
+ tablePtr.p->tableType = (DictTabInfo::TableType)tableDesc.TableType;
+ tablePtr.p->kValue = tableDesc.TableKValue;
+ tablePtr.p->fragmentCount = tableDesc.FragmentCount;
+
+ tablePtr.p->frmLen = tableDesc.FrmLen;
+ memcpy(tablePtr.p->frmData, tableDesc.FrmData, tableDesc.FrmLen);
+
+ tablePtr.p->ngLen = tableDesc.FragmentDataLen;
+ memcpy(tablePtr.p->ngData, tableDesc.FragmentData,
+ tableDesc.FragmentDataLen);
+
+ if(tableDesc.PrimaryTableId != RNIL) {
+
+ tablePtr.p->primaryTableId = tableDesc.PrimaryTableId;
+ tablePtr.p->indexState = (TableRecord::IndexState)tableDesc.IndexState;
+ tablePtr.p->insertTriggerId = tableDesc.InsertTriggerId;
+ tablePtr.p->updateTriggerId = tableDesc.UpdateTriggerId;
+ tablePtr.p->deleteTriggerId = tableDesc.DeleteTriggerId;
+ tablePtr.p->customTriggerId = tableDesc.CustomTriggerId;
+ } else {
+ tablePtr.p->primaryTableId = RNIL;
+ tablePtr.p->indexState = TableRecord::IS_UNDEFINED;
+ tablePtr.p->insertTriggerId = RNIL;
+ tablePtr.p->updateTriggerId = RNIL;
+ tablePtr.p->deleteTriggerId = RNIL;
+ tablePtr.p->customTriggerId = RNIL;
+ }
+ tablePtr.p->buildTriggerId = RNIL;
+ tablePtr.p->indexLocal = 0;
+
+ handleTabInfo(it, parseP);
+
+ if(parseP->errorCode != 0)
+ {
+ /**
+ * Release table
+ */
+ releaseTableObject(tablePtr.i, checkExist);
+ }
+}//handleTabInfoInit()
+
+void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
+ ParseDictTabInfoRecord * parseP)
+{
+ TableRecordPtr tablePtr = parseP->tablePtr;
+
+ SimpleProperties::UnpackStatus status;
+
+ Uint32 keyCount = 0;
+ Uint32 keyLength = 0;
+ Uint32 attrCount = tablePtr.p->noOfAttributes;
+ Uint32 nullCount = 0;
+ Uint32 nullBits = 0;
+ Uint32 noOfCharsets = 0;
+ Uint16 charsets[128];
+ Uint32 recordLength = 0;
+ AttributeRecordPtr attrPtr;
+ c_attributeRecordHash.removeAll();
+
+ for(Uint32 i = 0; i<attrCount; i++){
+ /**
+ * Attribute Name
+ */
+ DictTabInfo::Attribute attrDesc; attrDesc.init();
+ status = SimpleProperties::unpack(it, &attrDesc,
+ DictTabInfo::AttributeMapping,
+ DictTabInfo::AttributeMappingSize,
+ true, true);
+ if(status != SimpleProperties::Break){
+ parseP->errorCode = CreateTableRef::InvalidFormat;
+ parseP->status = status;
+ parseP->errorKey = it.getKey();
+ parseP->errorLine = __LINE__;
+ return;
+ }
+
+ /**
+ * Check that attribute is not defined twice
+ */
+ AttributeRecord tmpAttr;
+ {
+ strcpy(tmpAttr.attributeName, attrDesc.AttributeName);
+
+ AttributeRecordPtr attrPtr;
+ c_attributeRecordHash.find(attrPtr, tmpAttr);
+
+ if(attrPtr.i != RNIL){
+ parseP->errorCode = CreateTableRef::AttributeNameTwice;
+ return;
+ }
+ }
+
+ if(!getNewAttributeRecord(tablePtr, attrPtr)){
+ jam();
+ parseP->errorCode = CreateTableRef::NoMoreAttributeRecords;
+ return;
+ }
+
+ /**
+ * TmpAttrib to Attribute mapping
+ */
+ strcpy(attrPtr.p->attributeName, attrDesc.AttributeName);
+ attrPtr.p->attributeId = attrDesc.AttributeId;
+ attrPtr.p->tupleKey = (keyCount + 1) * attrDesc.AttributeKeyFlag;
+
+ attrPtr.p->extPrecision = attrDesc.AttributeExtPrecision;
+ attrPtr.p->extScale = attrDesc.AttributeExtScale;
+ attrPtr.p->extLength = attrDesc.AttributeExtLength;
+ // charset in upper half of precision
+ unsigned csNumber = (attrPtr.p->extPrecision >> 16);
+ if (csNumber != 0) {
+ /*
+ * A new charset is first accessed here on this node.
+ * TODO use separate thread (e.g. via NDBFS) if need to load from file
+ */
+ CHARSET_INFO* cs = get_charset(csNumber, MYF(0));
+ if (cs == NULL) {
+ parseP->errorCode = CreateTableRef::InvalidCharset;
+ parseP->errorLine = __LINE__;
+ return;
+ }
+ // XXX should be done somewhere in mysql
+ all_charsets[cs->number] = cs;
+ unsigned i = 0;
+ while (i < noOfCharsets) {
+ if (charsets[i] == csNumber)
+ break;
+ i++;
+ }
+ if (i == noOfCharsets) {
+ noOfCharsets++;
+ if (noOfCharsets > sizeof(charsets)/sizeof(charsets[0])) {
+ parseP->errorCode = CreateTableRef::InvalidFormat;
+ parseP->errorLine = __LINE__;
+ return;
+ }
+ charsets[i] = csNumber;
+ }
+ }
+
+ // compute attribute size and array size
+ bool translateOk = attrDesc.translateExtType();
+ tabRequire(translateOk, CreateTableRef::Inconsistency);
+
+ if(attrDesc.AttributeArraySize > 65535){
+ parseP->errorCode = CreateTableRef::ArraySizeTooBig;
+ parseP->status = status;
+ parseP->errorKey = it.getKey();
+ parseP->errorLine = __LINE__;
+ return;
+ }
+
+ Uint32 desc = 0;
+ AttributeDescriptor::setType(desc, attrDesc.AttributeExtType);
+ AttributeDescriptor::setSize(desc, attrDesc.AttributeSize);
+ AttributeDescriptor::setArray(desc, attrDesc.AttributeArraySize);
+ AttributeDescriptor::setNullable(desc, attrDesc.AttributeNullableFlag);
+ AttributeDescriptor::setDKey(desc, attrDesc.AttributeDKey);
+ AttributeDescriptor::setPrimaryKey(desc, attrDesc.AttributeKeyFlag);
+ attrPtr.p->attributeDescriptor = desc;
+ attrPtr.p->autoIncrement = attrDesc.AttributeAutoIncrement;
+ strcpy(attrPtr.p->defaultValue, attrDesc.AttributeDefaultValue);
+
+ tabRequire(attrDesc.AttributeId == i, CreateTableRef::InvalidFormat);
+
+ attrCount ++;
+ keyCount += attrDesc.AttributeKeyFlag;
+ nullCount += attrDesc.AttributeNullableFlag;
+
+ const Uint32 aSz = (1 << attrDesc.AttributeSize);
+ Uint32 sz;
+ if(aSz != 1)
+ {
+ sz = ((aSz * attrDesc.AttributeArraySize) + 31) >> 5;
+ }
+ else
+ {
+ sz = 0;
+ nullBits += attrDesc.AttributeArraySize;
+ }
+
+ if(attrDesc.AttributeArraySize == 0)
+ {
+ parseP->errorCode = CreateTableRef::InvalidArraySize;
+ parseP->status = status;
+ parseP->errorKey = it.getKey();
+ parseP->errorLine = __LINE__;
+ return;
+ }
+
+ recordLength += sz;
+ if(attrDesc.AttributeKeyFlag){
+ keyLength += sz;
+
+ if(attrDesc.AttributeNullableFlag){
+ parseP->errorCode = CreateTableRef::NullablePrimaryKey;
+ parseP->status = status;
+ parseP->errorKey = it.getKey();
+ parseP->errorLine = __LINE__;
+ return;
+ }
+ }
+
+ if (parseP->requestType != DictTabInfo::AlterTableFromAPI)
+ c_attributeRecordHash.add(attrPtr);
+
+ if(!it.next())
+ break;
+
+ if(it.getKey() != DictTabInfo::AttributeName)
+ break;
+ }//while
+
+ tablePtr.p->noOfPrimkey = keyCount;
+ tablePtr.p->noOfNullAttr = nullCount;
+ tablePtr.p->noOfCharsets = noOfCharsets;
+ tablePtr.p->tupKeyLength = keyLength;
+ tablePtr.p->noOfNullBits = nullCount + nullBits;
+
+ tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS,
+ CreateTableRef::RecordTooBig);
+ tabRequire(keyLength <= MAX_KEY_SIZE_IN_WORDS,
+ CreateTableRef::InvalidPrimaryKeySize);
+ tabRequire(keyLength > 0,
+ CreateTableRef::InvalidPrimaryKeySize);
+
+}//handleTabInfo()
+
+
+/* ---------------------------------------------------------------- */
+// DICTTABCONF is sent when participants have received all DICTTABINFO
+// and successfully handled it.
+// Also sent to self (DICT master) when index table creation ready.
+/* ---------------------------------------------------------------- */
+void Dbdict::execCREATE_TABLE_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+
+ CreateTableConf * const conf = (CreateTableConf *)signal->getDataPtr();
+ // assume part of create index operation
+ OpCreateIndexPtr opPtr;
+ c_opCreateIndex.find(opPtr, conf->senderData);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->m_request.setIndexId(conf->tableId);
+ opPtr.p->m_request.setIndexVersion(conf->tableVersion);
+ createIndex_fromCreateTable(signal, opPtr);
+}//execCREATE_TABLE_CONF()
+
+void Dbdict::execCREATE_TABLE_REF(Signal* signal)
+{
+ jamEntry();
+
+ CreateTableRef * const ref = (CreateTableRef *)signal->getDataPtr();
+ // assume part of create index operation
+ OpCreateIndexPtr opPtr;
+ c_opCreateIndex.find(opPtr, ref->senderData);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ createIndex_fromCreateTable(signal, opPtr);
+}//execCREATE_TABLE_REF()
+
+/* ---------------------------------------------------------------- */
+// New global checkpoint created.
+/* ---------------------------------------------------------------- */
+void Dbdict::execWAIT_GCP_CONF(Signal* signal)
+{
+#if 0
+ TableRecordPtr tablePtr;
+ jamEntry();
+ WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0];
+ c_tableRecordPool.getPtr(tablePtr, c_connRecord.connTableId);
+ tablePtr.p->gciTableCreated = conf->gcp;
+ sendUpdateSchemaState(signal,
+ tablePtr.i,
+ SchemaFile::TABLE_ADD_COMMITTED,
+ c_connRecord.noOfPagesForTable,
+ conf->gcp);
+#endif
+}//execWAIT_GCP_CONF()
+
+/* ---------------------------------------------------------------- */
+// Refused new global checkpoint.
+/* ---------------------------------------------------------------- */
+void Dbdict::execWAIT_GCP_REF(Signal* signal)
+{
+ jamEntry();
+ WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0];
+/* ---------------------------------------------------------------- */
+// Error Handling code needed
+/* ---------------------------------------------------------------- */
+ progError(ref->errorCode, 0);
+}//execWAIT_GCP_REF()
+
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: DROP TABLE -------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains the code used to drop a table. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+void
+Dbdict::execDROP_TABLE_REQ(Signal* signal){
+ jamEntry();
+ DropTableReq* req = (DropTableReq*)signal->getDataPtr();
+
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, req->tableId, false);
+ if(tablePtr.isNull()){
+ jam();
+ dropTableRef(signal, req, DropTableRef::NoSuchTable);
+ return;
+ }
+
+ if(getOwnNodeId() != c_masterNodeId){
+ jam();
+ dropTableRef(signal, req, DropTableRef::NotMaster);
+ return;
+ }
+
+ if(c_blockState != BS_IDLE){
+ jam();
+ dropTableRef(signal, req, DropTableRef::Busy);
+ return;
+ }
+
+ const TableRecord::TabState tabState = tablePtr.p->tabState;
+ bool ok = false;
+ switch(tabState){
+ case TableRecord::NOT_DEFINED:
+ case TableRecord::REORG_TABLE_PREPARED:
+ case TableRecord::DEFINING:
+ case TableRecord::CHECKED:
+ jam();
+ dropTableRef(signal, req, DropTableRef::NoSuchTable);
+ return;
+ case TableRecord::DEFINED:
+ ok = true;
+ jam();
+ break;
+ case TableRecord::PREPARE_DROPPING:
+ case TableRecord::DROPPING:
+ jam();
+ dropTableRef(signal, req, DropTableRef::DropInProgress);
+ return;
+ }
+ ndbrequire(ok);
+
+ if(tablePtr.p->tableVersion != req->tableVersion){
+ jam();
+ dropTableRef(signal, req, DropTableRef::InvalidTableVersion);
+ return;
+ }
+
+ /**
+ * Seems ok
+ */
+ DropTableRecordPtr dropTabPtr;
+ c_opDropTable.seize(dropTabPtr);
+
+ if(dropTabPtr.isNull()){
+ jam();
+ dropTableRef(signal, req, DropTableRef::NoDropTableRecordAvailable);
+ return;
+ }
+
+ c_blockState = BS_BUSY;
+
+ dropTabPtr.p->key = ++c_opRecordSequence;
+ c_opDropTable.add(dropTabPtr);
+
+ tablePtr.p->tabState = TableRecord::PREPARE_DROPPING;
+
+ dropTabPtr.p->m_request = * req;
+ dropTabPtr.p->m_errorCode = 0;
+ dropTabPtr.p->m_requestType = DropTabReq::OnlineDropTab;
+ dropTabPtr.p->m_coordinatorRef = reference();
+ dropTabPtr.p->m_coordinatorData.m_gsn = GSN_PREP_DROP_TAB_REQ;
+ dropTabPtr.p->m_coordinatorData.m_block = 0;
+ prepDropTab_nextStep(signal, dropTabPtr);
+}
+
+void
+Dbdict::dropTableRef(Signal * signal,
+ DropTableReq * req, DropTableRef::ErrorCode errCode){
+
+ Uint32 tableId = req->tableId;
+ Uint32 tabVersion = req->tableVersion;
+ Uint32 senderData = req->senderData;
+ Uint32 senderRef = req->senderRef;
+
+ DropTableRef * ref = (DropTableRef*)signal->getDataPtrSend();
+ ref->tableId = tableId;
+ ref->tableVersion = tabVersion;
+ ref->senderData = senderData;
+ ref->senderRef = reference();
+ ref->errorCode = errCode;
+ ref->masterNodeId = c_masterNodeId;
+ sendSignal(senderRef, GSN_DROP_TABLE_REF, signal,
+ DropTableRef::SignalLength, JBB);
+}
+
+void
+Dbdict::prepDropTab_nextStep(Signal* signal, DropTableRecordPtr dropTabPtr){
+
+ /**
+ * No errors currently allowed
+ */
+ ndbrequire(dropTabPtr.p->m_errorCode == 0);
+
+ Uint32 block = 0;
+ switch(dropTabPtr.p->m_coordinatorData.m_block){
+ case 0:
+ jam();
+ block = dropTabPtr.p->m_coordinatorData.m_block = DBDICT;
+ break;
+ case DBDICT:
+ jam();
+ block = dropTabPtr.p->m_coordinatorData.m_block = DBLQH;
+ break;
+ case DBLQH:
+ jam();
+ block = dropTabPtr.p->m_coordinatorData.m_block = DBTC;
+ break;
+ case DBTC:
+ jam();
+ block = dropTabPtr.p->m_coordinatorData.m_block = DBDIH;
+ break;
+ case DBDIH:
+ jam();
+ prepDropTab_complete(signal, dropTabPtr);
+ return;
+ default:
+ ndbrequire(false);
+ }
+
+ PrepDropTabReq * prep = (PrepDropTabReq*)signal->getDataPtrSend();
+ prep->senderRef = reference();
+ prep->senderData = dropTabPtr.p->key;
+ prep->tableId = dropTabPtr.p->m_request.tableId;
+ prep->requestType = dropTabPtr.p->m_requestType;
+
+ dropTabPtr.p->m_coordinatorData.m_signalCounter = c_aliveNodes;
+ NodeReceiverGroup rg(block, c_aliveNodes);
+ sendSignal(rg, GSN_PREP_DROP_TAB_REQ, signal,
+ PrepDropTabReq::SignalLength, JBB);
+
+#if 0
+ for (Uint32 i = 1; i < MAX_NDB_NODES; i++){
+ if(c_aliveNodes.get(i)){
+ jam();
+ BlockReference ref = numberToRef(block, i);
+
+ dropTabPtr.p->m_coordinatorData.m_signalCounter.setWaitingFor(i);
+ }
+ }
+#endif
+}
+
+void
+Dbdict::execPREP_DROP_TAB_CONF(Signal * signal){
+ jamEntry();
+
+ PrepDropTabConf * prep = (PrepDropTabConf*)signal->getDataPtr();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, prep->senderData));
+
+ ndbrequire(dropTabPtr.p->m_coordinatorRef == reference());
+ ndbrequire(dropTabPtr.p->m_request.tableId == prep->tableId);
+ ndbrequire(dropTabPtr.p->m_coordinatorData.m_gsn == GSN_PREP_DROP_TAB_REQ);
+
+ Uint32 nodeId = refToNode(prep->senderRef);
+ dropTabPtr.p->m_coordinatorData.m_signalCounter.clearWaitingFor(nodeId);
+
+ if(!dropTabPtr.p->m_coordinatorData.m_signalCounter.done()){
+ jam();
+ return;
+ }
+ prepDropTab_nextStep(signal, dropTabPtr);
+}
+
+void
+Dbdict::execPREP_DROP_TAB_REF(Signal* signal){
+ jamEntry();
+
+ PrepDropTabRef * prep = (PrepDropTabRef*)signal->getDataPtr();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, prep->senderData));
+
+ ndbrequire(dropTabPtr.p->m_coordinatorRef == reference());
+ ndbrequire(dropTabPtr.p->m_request.tableId == prep->tableId);
+ ndbrequire(dropTabPtr.p->m_coordinatorData.m_gsn == GSN_PREP_DROP_TAB_REQ);
+
+ Uint32 nodeId = refToNode(prep->senderRef);
+ dropTabPtr.p->m_coordinatorData.m_signalCounter.clearWaitingFor(nodeId);
+
+ Uint32 block = refToBlock(prep->senderRef);
+ if((prep->errorCode == PrepDropTabRef::NoSuchTable && block == DBLQH) ||
+ (prep->errorCode == PrepDropTabRef::NF_FakeErrorREF)){
+ jam();
+ /**
+ * Ignore errors:
+ * 1) no such table and LQH, it might not exists in different LQH's
+ * 2) node failure...
+ */
+ } else {
+ dropTabPtr.p->setErrorCode((Uint32)prep->errorCode);
+ }
+
+ if(!dropTabPtr.p->m_coordinatorData.m_signalCounter.done()){
+ jam();
+ return;
+ }
+ prepDropTab_nextStep(signal, dropTabPtr);
+}
+
+void
+Dbdict::prepDropTab_complete(Signal* signal, DropTableRecordPtr dropTabPtr){
+ jam();
+
+ dropTabPtr.p->m_coordinatorData.m_gsn = GSN_DROP_TAB_REQ;
+ dropTabPtr.p->m_coordinatorData.m_block = DBDICT;
+
+ DropTabReq * req = (DropTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = dropTabPtr.p->key;
+ req->tableId = dropTabPtr.p->m_request.tableId;
+ req->requestType = dropTabPtr.p->m_requestType;
+
+ dropTabPtr.p->m_coordinatorData.m_signalCounter = c_aliveNodes;
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_DROP_TAB_REQ, signal,
+ DropTabReq::SignalLength, JBB);
+}
+
+void
+Dbdict::execDROP_TAB_REF(Signal* signal){
+ jamEntry();
+
+ DropTabRef * const req = (DropTabRef*)signal->getDataPtr();
+
+ Uint32 block = refToBlock(req->senderRef);
+ ndbrequire(req->errorCode == DropTabRef::NF_FakeErrorREF ||
+ (req->errorCode == DropTabRef::NoSuchTable &&
+ (block == DBTUP || block == DBACC || block == DBLQH)));
+
+ if(block != DBDICT){
+ jam();
+ ndbrequire(refToNode(req->senderRef) == getOwnNodeId());
+ dropTab_localDROP_TAB_CONF(signal);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::execDROP_TAB_CONF(Signal* signal){
+ jamEntry();
+
+ DropTabConf * const req = (DropTabConf*)signal->getDataPtr();
+
+ if(refToBlock(req->senderRef) != DBDICT){
+ jam();
+ ndbrequire(refToNode(req->senderRef) == getOwnNodeId());
+ dropTab_localDROP_TAB_CONF(signal);
+ return;
+ }
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, req->senderData));
+
+ ndbrequire(dropTabPtr.p->m_coordinatorRef == reference());
+ ndbrequire(dropTabPtr.p->m_request.tableId == req->tableId);
+ ndbrequire(dropTabPtr.p->m_coordinatorData.m_gsn == GSN_DROP_TAB_REQ);
+
+ Uint32 nodeId = refToNode(req->senderRef);
+ dropTabPtr.p->m_coordinatorData.m_signalCounter.clearWaitingFor(nodeId);
+
+ if(!dropTabPtr.p->m_coordinatorData.m_signalCounter.done()){
+ jam();
+ return;
+ }
+
+ DropTableConf* conf = (DropTableConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = dropTabPtr.p->m_request.senderData;
+ conf->tableId = dropTabPtr.p->m_request.tableId;
+ conf->tableVersion = dropTabPtr.p->m_request.tableVersion;
+
+ Uint32 ref = dropTabPtr.p->m_request.senderRef;
+ sendSignal(ref, GSN_DROP_TABLE_CONF, signal,
+ DropTableConf::SignalLength, JBB);
+
+ c_opDropTable.release(dropTabPtr);
+ c_blockState = BS_IDLE;
+}
+
+/**
+ * DROP TABLE PARTICIPANT CODE
+ */
+void
+Dbdict::execPREP_DROP_TAB_REQ(Signal* signal){
+ jamEntry();
+ PrepDropTabReq * prep = (PrepDropTabReq*)signal->getDataPtrSend();
+
+ DropTableRecordPtr dropTabPtr;
+ if(prep->senderRef == reference()){
+ jam();
+ ndbrequire(c_opDropTable.find(dropTabPtr, prep->senderData));
+ ndbrequire(dropTabPtr.p->m_requestType == prep->requestType);
+ } else {
+ jam();
+ c_opDropTable.seize(dropTabPtr);
+ if(!dropTabPtr.isNull()){
+ dropTabPtr.p->key = prep->senderData;
+ c_opDropTable.add(dropTabPtr);
+ }
+ }
+
+ ndbrequire(!dropTabPtr.isNull());
+
+ dropTabPtr.p->m_errorCode = 0;
+ dropTabPtr.p->m_request.tableId = prep->tableId;
+ dropTabPtr.p->m_requestType = prep->requestType;
+ dropTabPtr.p->m_coordinatorRef = prep->senderRef;
+ dropTabPtr.p->m_participantData.m_gsn = GSN_PREP_DROP_TAB_REQ;
+
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, prep->tableId);
+ tablePtr.p->tabState = TableRecord::PREPARE_DROPPING;
+
+ /**
+ * Modify schema
+ */
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tablePtr.i);
+ SchemaFile::TableState tabState =
+ (SchemaFile::TableState)tableEntry->m_tableState;
+ ndbrequire(tabState == SchemaFile::TABLE_ADD_COMMITTED ||
+ tabState == SchemaFile::ALTER_TABLE_COMMITTED);
+ tableEntry->m_tableState = SchemaFile::DROP_TABLE_STARTED;
+ computeChecksum(xsf, tablePtr.i / NDB_SF_PAGE_ENTRIES);
+
+ ndbrequire(c_writeSchemaRecord.inUse == false);
+ c_writeSchemaRecord.inUse = true;
+
+ c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.newFile = false;
+ c_writeSchemaRecord.firstPage = tablePtr.i / NDB_SF_PAGE_ENTRIES;
+ c_writeSchemaRecord.noOfPages = 1;
+ c_writeSchemaRecord.m_callback.m_callbackData = dropTabPtr.p->key;
+ c_writeSchemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::prepDropTab_writeSchemaConf);
+ startWriteSchemaFile(signal);
+}
+
+void
+Dbdict::prepDropTab_writeSchemaConf(Signal* signal,
+ Uint32 dropTabPtrI,
+ Uint32 returnCode){
+ jam();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, dropTabPtrI));
+
+ ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_PREP_DROP_TAB_REQ);
+
+ /**
+ * There probably should be node fail handlign here
+ *
+ * To check that coordinator hasn't died
+ */
+
+ PrepDropTabConf * prep = (PrepDropTabConf*)signal->getDataPtr();
+ prep->senderRef = reference();
+ prep->senderData = dropTabPtrI;
+ prep->tableId = dropTabPtr.p->m_request.tableId;
+
+ dropTabPtr.p->m_participantData.m_gsn = GSN_PREP_DROP_TAB_CONF;
+ sendSignal(dropTabPtr.p->m_coordinatorRef, GSN_PREP_DROP_TAB_CONF, signal,
+ PrepDropTabConf::SignalLength, JBB);
+}
+
+void
+Dbdict::execDROP_TAB_REQ(Signal* signal){
+ jamEntry();
+ DropTabReq * req = (DropTabReq*)signal->getDataPtrSend();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, req->senderData));
+
+ ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_PREP_DROP_TAB_CONF);
+ dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_REQ;
+
+ ndbrequire(dropTabPtr.p->m_requestType == req->requestType);
+
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, dropTabPtr.p->m_request.tableId);
+ tablePtr.p->tabState = TableRecord::DROPPING;
+
+ dropTabPtr.p->m_participantData.m_block = 0;
+ dropTabPtr.p->m_participantData.m_callback.m_callbackData = dropTabPtr.p->key;
+ dropTabPtr.p->m_participantData.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::dropTab_complete);
+ dropTab_nextStep(signal, dropTabPtr);
+}
+
+#include <DebuggerNames.hpp>
+
+void
+Dbdict::dropTab_nextStep(Signal* signal, DropTableRecordPtr dropTabPtr){
+
+ /**
+ * No errors currently allowed
+ */
+ ndbrequire(dropTabPtr.p->m_errorCode == 0);
+
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, dropTabPtr.p->m_request.tableId);
+
+ Uint32 block = 0;
+ switch(dropTabPtr.p->m_participantData.m_block){
+ case 0:
+ jam();
+ block = DBTC;
+ break;
+ case DBTC:
+ jam();
+ if (tablePtr.p->isTable() || tablePtr.p->isHashIndex())
+ block = DBACC;
+ if (tablePtr.p->isOrderedIndex())
+ block = DBTUP;
+ break;
+ case DBACC:
+ jam();
+ block = DBTUP;
+ break;
+ case DBTUP:
+ jam();
+ if (tablePtr.p->isTable() || tablePtr.p->isHashIndex())
+ block = DBLQH;
+ if (tablePtr.p->isOrderedIndex())
+ block = DBTUX;
+ break;
+ case DBTUX:
+ jam();
+ block = DBLQH;
+ break;
+ case DBLQH:
+ jam();
+ block = DBDIH;
+ break;
+ case DBDIH:
+ jam();
+ execute(signal, dropTabPtr.p->m_participantData.m_callback, 0);
+ return;
+ }
+ ndbrequire(block != 0);
+ dropTabPtr.p->m_participantData.m_block = block;
+
+ DropTabReq * req = (DropTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = dropTabPtr.p->key;
+ req->tableId = dropTabPtr.p->m_request.tableId;
+ req->requestType = dropTabPtr.p->m_requestType;
+
+ const Uint32 nodeId = getOwnNodeId();
+ dropTabPtr.p->m_participantData.m_signalCounter.clearWaitingFor();
+ dropTabPtr.p->m_participantData.m_signalCounter.setWaitingFor(nodeId);
+ BlockReference ref = numberToRef(block, 0);
+ sendSignal(ref, GSN_DROP_TAB_REQ, signal, DropTabReq::SignalLength, JBB);
+}
+
+void
+Dbdict::dropTab_localDROP_TAB_CONF(Signal* signal){
+ jamEntry();
+
+ DropTabConf * conf = (DropTabConf*)signal->getDataPtr();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, conf->senderData));
+
+ ndbrequire(dropTabPtr.p->m_request.tableId == conf->tableId);
+ ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_DROP_TAB_REQ);
+
+ Uint32 nodeId = refToNode(conf->senderRef);
+ dropTabPtr.p->m_participantData.m_signalCounter.clearWaitingFor(nodeId);
+
+ if(!dropTabPtr.p->m_participantData.m_signalCounter.done()){
+ jam();
+ ndbrequire(false);
+ return;
+ }
+ dropTab_nextStep(signal, dropTabPtr);
+}
+
+void
+Dbdict::dropTab_complete(Signal* signal,
+ Uint32 dropTabPtrI,
+ Uint32 returnCode){
+ jam();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, dropTabPtrI));
+
+ Uint32 tableId = dropTabPtr.p->m_request.tableId;
+
+ /**
+ * Write to schema file
+ */
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tableId);
+ SchemaFile::TableState tabState =
+ (SchemaFile::TableState)tableEntry->m_tableState;
+ ndbrequire(tabState == SchemaFile::DROP_TABLE_STARTED);
+ tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED;
+ computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES);
+
+ ndbrequire(c_writeSchemaRecord.inUse == false);
+ c_writeSchemaRecord.inUse = true;
+
+ c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES;
+ c_writeSchemaRecord.noOfPages = 1;
+ c_writeSchemaRecord.m_callback.m_callbackData = dropTabPtr.p->key;
+ c_writeSchemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::dropTab_writeSchemaConf);
+ startWriteSchemaFile(signal);
+}
+
+void
+Dbdict::dropTab_writeSchemaConf(Signal* signal,
+ Uint32 dropTabPtrI,
+ Uint32 returnCode){
+ jam();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, dropTabPtrI));
+
+ ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_DROP_TAB_REQ);
+
+ dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_CONF;
+
+ releaseTableObject(dropTabPtr.p->m_request.tableId);
+
+ DropTabConf * conf = (DropTabConf*)signal->getDataPtr();
+ conf->senderRef = reference();
+ conf->senderData = dropTabPtrI;
+ conf->tableId = dropTabPtr.p->m_request.tableId;
+
+ dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_CONF;
+ sendSignal(dropTabPtr.p->m_coordinatorRef, GSN_DROP_TAB_CONF, signal,
+ DropTabConf::SignalLength, JBB);
+
+ if(dropTabPtr.p->m_coordinatorRef != reference()){
+ c_opDropTable.release(dropTabPtr);
+ }
+}
+
+void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash)
+{
+ TableRecordPtr tablePtr;
+ AttributeRecordPtr attrPtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ if (removeFromHash)
+ {
+#ifdef VM_TRACE
+ TableRecordPtr tmp;
+ ndbrequire(c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
+ c_tableRecordHash.remove(tablePtr);
+ }
+ tablePtr.p->tabState = TableRecord::NOT_DEFINED;
+
+ Uint32 nextAttrRecord = tablePtr.p->firstAttribute;
+ while (nextAttrRecord != RNIL) {
+ jam();
+/* ---------------------------------------------------------------- */
+// Release all attribute records
+/* ---------------------------------------------------------------- */
+ c_attributeRecordPool.getPtr(attrPtr, nextAttrRecord);
+ nextAttrRecord = attrPtr.p->nextAttrInTable;
+ c_attributeRecordPool.release(attrPtr);
+ }//if
+#ifdef HAVE_TABLE_REORG
+ Uint32 secondTableId = tablePtr.p->secondTable;
+ initialiseTableRecord(tablePtr);
+ c_tableRecordPool.getPtr(tablePtr, secondTableId);
+ initialiseTableRecord(tablePtr);
+#endif
+ return;
+}//releaseTableObject()
+
+/**
+ * DICT receives these on index create and drop.
+ */
+void Dbdict::execDROP_TABLE_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+
+ DropTableConf * const conf = (DropTableConf *)signal->getDataPtr();
+ // assume part of drop index operation
+ OpDropIndexPtr opPtr;
+ c_opDropIndex.find(opPtr, conf->senderData);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_request.getIndexId() == conf->tableId);
+ ndbrequire(opPtr.p->m_request.getIndexVersion() == conf->tableVersion);
+ dropIndex_fromDropTable(signal, opPtr);
+}
+
+void Dbdict::execDROP_TABLE_REF(Signal* signal)
+{
+ jamEntry();
+
+ DropTableRef * const ref = (DropTableRef *)signal->getDataPtr();
+ // assume part of drop index operation
+ OpDropIndexPtr opPtr;
+ c_opDropIndex.find(opPtr, ref->senderData);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ opPtr.p->m_errorLine = __LINE__;
+ dropIndex_fromDropTable(signal, opPtr);
+}
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: EXTERNAL INTERFACE TO DATA -------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains the code that is used by other modules to. */
+/* access the data within DBDICT. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+void Dbdict::execGET_TABLEDID_REQ(Signal * signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 1);
+ GetTableIdReq const * req = (GetTableIdReq *)signal->getDataPtr();
+ Uint32 senderData = req->senderData;
+ Uint32 senderRef = req->senderRef;
+ Uint32 len = req->len;
+
+ if(len>MAX_TAB_NAME_SIZE)
+ {
+ jam();
+ sendGET_TABLEID_REF((Signal*)signal,
+ (GetTableIdReq *)req,
+ GetTableIdRef::TableNameTooLong);
+ return;
+ }
+
+ char tableName[MAX_TAB_NAME_SIZE];
+ TableRecord keyRecord;
+ SegmentedSectionPtr ssPtr;
+ signal->getSection(ssPtr,GetTableIdReq::TABLE_NAME);
+ copy((Uint32*)tableName, ssPtr);
+ strcpy(keyRecord.tableName, tableName);
+ releaseSections(signal);
+
+ if(len > sizeof(keyRecord.tableName)){
+ jam();
+ sendGET_TABLEID_REF((Signal*)signal,
+ (GetTableIdReq *)req,
+ GetTableIdRef::TableNameTooLong);
+ return;
+ }
+
+ TableRecordPtr tablePtr;
+ if(!c_tableRecordHash.find(tablePtr, keyRecord)) {
+ jam();
+ sendGET_TABLEID_REF((Signal*)signal,
+ (GetTableIdReq *)req,
+ GetTableIdRef::TableNotDefined);
+ return;
+ }
+ GetTableIdConf * conf = (GetTableIdConf *)req;
+ conf->tableId = tablePtr.p->tableId;
+ conf->schemaVersion = tablePtr.p->tableVersion;
+ conf->senderData = senderData;
+ sendSignal(senderRef, GSN_GET_TABLEID_CONF, signal,
+ GetTableIdConf::SignalLength, JBB);
+
+}
+
+
+void Dbdict::sendGET_TABLEID_REF(Signal* signal,
+ GetTableIdReq * req,
+ GetTableIdRef::ErrorCode errorCode)
+{
+ GetTableIdRef * const ref = (GetTableIdRef *)req;
+ /**
+ * The format of GetTabInfo Req/Ref is the same
+ */
+ BlockReference retRef = req->senderRef;
+ ref->err = errorCode;
+ sendSignal(retRef, GSN_GET_TABLEID_REF, signal,
+ GetTableIdRef::SignalLength, JBB);
+}//sendGET_TABINFOREF()
+
+/* ---------------------------------------------------------------- */
+// Get a full table description.
+/* ---------------------------------------------------------------- */
+void Dbdict::execGET_TABINFOREQ(Signal* signal)
+{
+ jamEntry();
+ if(!assembleFragments(signal))
+ {
+ return;
+ }
+
+ GetTabInfoReq * const req = (GetTabInfoReq *)&signal->theData[0];
+
+ /**
+ * If I get a GET_TABINFO_REQ from myself
+ * it's is a one from the time queue
+ */
+ bool fromTimeQueue = (signal->senderBlockRef() == reference());
+
+ if (c_retrieveRecord.busyState && fromTimeQueue == true) {
+ jam();
+
+ sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30,
+ signal->length());
+ return;
+ }//if
+
+ const Uint32 MAX_WAITERS = 5;
+
+ if(c_retrieveRecord.busyState && fromTimeQueue == false){
+ jam();
+ if(c_retrieveRecord.noOfWaiters < MAX_WAITERS){
+ jam();
+ c_retrieveRecord.noOfWaiters++;
+
+ sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30,
+ signal->length());
+ return;
+ }
+
+ sendGET_TABINFOREF(signal, req, GetTabInfoRef::Busy);
+ return;
+ }
+
+ if(fromTimeQueue){
+ jam();
+ c_retrieveRecord.noOfWaiters--;
+ }
+
+ const bool useLongSig = (req->requestType & GetTabInfoReq::LongSignalConf);
+ const Uint32 reqType = req->requestType & (~GetTabInfoReq::LongSignalConf);
+
+ TableRecordPtr tablePtr;
+ if(reqType == GetTabInfoReq::RequestByName){
+ jam();
+ ndbrequire(signal->getNoOfSections() == 1);
+ const Uint32 len = req->tableNameLen;
+
+ TableRecord keyRecord;
+ if(len > sizeof(keyRecord.tableName)){
+ jam();
+ releaseSections(signal);
+ sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNameTooLong);
+ return;
+ }
+
+ char tableName[MAX_TAB_NAME_SIZE];
+ SegmentedSectionPtr ssPtr;
+ signal->getSection(ssPtr,GetTabInfoReq::TABLE_NAME);
+ SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
+ r0.reset(); // undo implicit first()
+ if(r0.getWords((Uint32*)tableName, ((len + 3)/4)))
+ memcpy(keyRecord.tableName, tableName, len);
+ else {
+ jam();
+ releaseSections(signal);
+ sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined);
+ return;
+ }
+ releaseSections(signal);
+ // memcpy(keyRecord.tableName, req->tableName, len);
+ //ntohS(&keyRecord.tableName[0], len);
+
+ c_tableRecordHash.find(tablePtr, keyRecord);
+ } else {
+ jam();
+ c_tableRecordPool.getPtr(tablePtr, req->tableId, false);
+ }
+
+ // The table seached for was not found
+ if(tablePtr.i == RNIL){
+ jam();
+ sendGET_TABINFOREF(signal, req, GetTabInfoRef::InvalidTableId);
+ return;
+ }//if
+
+ if (tablePtr.p->tabState != TableRecord::DEFINED) {
+ jam();
+ sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined);
+ return;
+ }//if
+
+ c_retrieveRecord.busyState = true;
+ c_retrieveRecord.blockRef = req->senderRef;
+ c_retrieveRecord.m_senderData = req->senderData;
+ c_retrieveRecord.tableId = tablePtr.i;
+ c_retrieveRecord.currentSent = 0;
+ c_retrieveRecord.m_useLongSig = useLongSig;
+
+ c_packTable.m_state = PackTable::PTS_GET_TAB;
+
+ signal->theData[0] = ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tablePtr.i;
+ signal->theData[2] = c_retrieveRecord.retrievePage;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+}//execGET_TABINFOREQ()
+
+void Dbdict::sendGetTabResponse(Signal* signal)
+{
+ PageRecordPtr pagePtr;
+ DictTabInfo * const conf = (DictTabInfo *)&signal->theData[0];
+ conf->senderRef = reference();
+ conf->senderData = c_retrieveRecord.m_senderData;
+ conf->requestType = DictTabInfo::GetTabInfoConf;
+ conf->totalLen = c_retrieveRecord.retrievedNoOfWords;
+
+ c_pageRecordArray.getPtr(pagePtr, c_retrieveRecord.retrievePage);
+ Uint32* pagePointer = (Uint32*)&pagePtr.p->word[0] + ZPAGE_HEADER_SIZE;
+
+ if(c_retrieveRecord.m_useLongSig){
+ jam();
+ GetTabInfoConf* conf = (GetTabInfoConf*)signal->getDataPtr();
+ conf->gci = 0;
+ conf->tableId = c_retrieveRecord.tableId;
+ conf->senderData = c_retrieveRecord.m_senderData;
+ conf->totalLen = c_retrieveRecord.retrievedNoOfWords;
+
+ Callback c = { safe_cast(&Dbdict::initRetrieveRecord), 0 };
+ LinearSectionPtr ptr[3];
+ ptr[0].p = pagePointer;
+ ptr[0].sz = c_retrieveRecord.retrievedNoOfWords;
+ sendFragmentedSignal(c_retrieveRecord.blockRef,
+ GSN_GET_TABINFO_CONF,
+ signal,
+ GetTabInfoConf::SignalLength,
+ JBB,
+ ptr,
+ 1,
+ c);
+ return;
+ }
+
+ ndbrequire(false);
+}//sendGetTabResponse()
+
+void Dbdict::sendGET_TABINFOREF(Signal* signal,
+ GetTabInfoReq * req,
+ GetTabInfoRef::ErrorCode errorCode)
+{
+ jamEntry();
+ GetTabInfoRef * const ref = (GetTabInfoRef *)&signal->theData[0];
+ /**
+ * The format of GetTabInfo Req/Ref is the same
+ */
+ BlockReference retRef = req->senderRef;
+ ref->errorCode = errorCode;
+
+ sendSignal(retRef, GSN_GET_TABINFOREF, signal, signal->length(), JBB);
+}//sendGET_TABINFOREF()
+
+Uint32 convertEndian(Uint32 in) {
+#ifdef WORDS_BIGENDIAN
+ Uint32 ut = 0;
+ ut += ((in >> 24) & 255);
+ ut += (((in >> 16) & 255) << 8);
+ ut += (((in >> 8) & 255) << 16);
+ ut += ((in & 255) << 24);
+ return ut;
+#else
+ return in;
+#endif
+}
+void
+Dbdict::execLIST_TABLES_REQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 i;
+ ListTablesReq * req = (ListTablesReq*)signal->getDataPtr();
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+ // save req flags
+ const Uint32 reqTableId = req->getTableId();
+ const Uint32 reqTableType = req->getTableType();
+ const bool reqListNames = req->getListNames();
+ const bool reqListIndexes = req->getListIndexes();
+ // init the confs
+ ListTablesConf * conf = (ListTablesConf *)signal->getDataPtrSend();
+ conf->senderData = senderData;
+ conf->counter = 0;
+ Uint32 pos = 0;
+ for (i = 0; i < c_tableRecordPool.getSize(); i++) {
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, i);
+ // filter
+ if (tablePtr.p->tabState == TableRecord::NOT_DEFINED ||
+ tablePtr.p->tabState == TableRecord::REORG_TABLE_PREPARED)
+ continue;
+
+
+ if ((reqTableType != (Uint32)0) && (reqTableType != (unsigned)tablePtr.p->tableType))
+ continue;
+ if (reqListIndexes && reqTableId != tablePtr.p->primaryTableId)
+ continue;
+ conf->tableData[pos] = 0;
+ // id
+ conf->setTableId(pos, tablePtr.i);
+ // type
+ conf->setTableType(pos, tablePtr.p->tableType);
+ // state
+ if (tablePtr.p->isTable()) {
+ switch (tablePtr.p->tabState) {
+ case TableRecord::DEFINING:
+ case TableRecord::CHECKED:
+ conf->setTableState(pos, DictTabInfo::StateBuilding);
+ break;
+ case TableRecord::PREPARE_DROPPING:
+ case TableRecord::DROPPING:
+ conf->setTableState(pos, DictTabInfo::StateDropping);
+ break;
+ case TableRecord::DEFINED:
+ conf->setTableState(pos, DictTabInfo::StateOnline);
+ break;
+ default:
+ conf->setTableState(pos, DictTabInfo::StateBroken);
+ break;
+ }
+ }
+ if (tablePtr.p->isIndex()) {
+ switch (tablePtr.p->indexState) {
+ case TableRecord::IS_OFFLINE:
+ conf->setTableState(pos, DictTabInfo::StateOffline);
+ break;
+ case TableRecord::IS_BUILDING:
+ conf->setTableState(pos, DictTabInfo::StateBuilding);
+ break;
+ case TableRecord::IS_DROPPING:
+ conf->setTableState(pos, DictTabInfo::StateDropping);
+ break;
+ case TableRecord::IS_ONLINE:
+ conf->setTableState(pos, DictTabInfo::StateOnline);
+ break;
+ default:
+ conf->setTableState(pos, DictTabInfo::StateBroken);
+ break;
+ }
+ }
+ // store
+ if (! tablePtr.p->storedTable) {
+ conf->setTableStore(pos, DictTabInfo::StoreTemporary);
+ } else {
+ conf->setTableStore(pos, DictTabInfo::StorePermanent);
+ }
+ pos++;
+ if (pos >= ListTablesConf::DataLength) {
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::SignalLength, JBB);
+ conf->counter++;
+ pos = 0;
+ }
+ if (! reqListNames)
+ continue;
+ const Uint32 size = strlen(tablePtr.p->tableName) + 1;
+ conf->tableData[pos] = size;
+ pos++;
+ if (pos >= ListTablesConf::DataLength) {
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::SignalLength, JBB);
+ conf->counter++;
+ pos = 0;
+ }
+ Uint32 k = 0;
+ while (k < size) {
+ char* p = (char*)&conf->tableData[pos];
+ for (Uint32 j = 0; j < 4; j++) {
+ if (k < size)
+ *p++ = tablePtr.p->tableName[k++];
+ else
+ *p++ = 0;
+ }
+ pos++;
+ if (pos >= ListTablesConf::DataLength) {
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::SignalLength, JBB);
+ conf->counter++;
+ pos = 0;
+ }
+ }
+ }
+ // XXX merge with above somehow
+ for (i = 0; i < c_triggerRecordPool.getSize(); i++) {
+ if (reqListIndexes)
+ break;
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, i);
+ if (triggerPtr.p->triggerState == TriggerRecord::TS_NOT_DEFINED)
+ continue;
+ // constant 10 hardcoded
+ Uint32 type = 10 + triggerPtr.p->triggerType;
+ if (reqTableType != 0 && reqTableType != type)
+ continue;
+ conf->tableData[pos] = 0;
+ conf->setTableId(pos, triggerPtr.i);
+ conf->setTableType(pos, type);
+ switch (triggerPtr.p->triggerState) {
+ case TriggerRecord::TS_OFFLINE:
+ conf->setTableState(pos, DictTabInfo::StateOffline);
+ break;
+ case TriggerRecord::TS_ONLINE:
+ conf->setTableState(pos, DictTabInfo::StateOnline);
+ break;
+ default:
+ conf->setTableState(pos, DictTabInfo::StateBroken);
+ break;
+ }
+ conf->setTableStore(pos, DictTabInfo::StoreTemporary);
+ pos++;
+ if (pos >= ListTablesConf::DataLength) {
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::SignalLength, JBB);
+ conf->counter++;
+ pos = 0;
+ }
+ if (! reqListNames)
+ continue;
+ const Uint32 size = strlen(triggerPtr.p->triggerName) + 1;
+ conf->tableData[pos] = size;
+ pos++;
+ if (pos >= ListTablesConf::DataLength) {
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::SignalLength, JBB);
+ conf->counter++;
+ pos = 0;
+ }
+ Uint32 k = 0;
+ while (k < size) {
+ char* p = (char*)&conf->tableData[pos];
+ for (Uint32 j = 0; j < 4; j++) {
+ if (k < size)
+ *p++ = triggerPtr.p->triggerName[k++];
+ else
+ *p++ = 0;
+ }
+ pos++;
+ if (pos >= ListTablesConf::DataLength) {
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::SignalLength, JBB);
+ conf->counter++;
+ pos = 0;
+ }
+ }
+ }
+ // last signal must have less than max length
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::HeaderLength + pos, JBB);
+}
+
+/**
+ * MODULE: Create index
+ *
+ * Create index in DICT via create table operation. Then invoke alter
+ * index opearation to online the index.
+ *
+ * Request type in CREATE_INDX signals:
+ *
+ * RT_USER - from API to DICT master
+ * RT_DICT_PREPARE - prepare participants
+ * RT_DICT_COMMIT - commit participants
+ * RT_TC - create index in TC (part of alter index operation)
+ */
+
+void
+Dbdict::execCREATE_INDX_REQ(Signal* signal)
+{
+ jamEntry();
+ CreateIndxReq* const req = (CreateIndxReq*)signal->getDataPtrSend();
+ OpCreateIndexPtr opPtr;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const CreateIndxReq::RequestType requestType = req->getRequestType();
+ if (requestType == CreateIndxReq::RT_USER) {
+ jam();
+ if (! assembleFragments(signal)) {
+ jam();
+ return;
+ }
+ if (signal->getLength() == CreateIndxReq::SignalLength) {
+ jam();
+ if (getOwnNodeId() != c_masterNodeId) {
+ jam();
+
+ releaseSections(signal);
+ OpCreateIndex opBusy;
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = 0;
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_PREPARE;
+ opPtr.p->m_errorCode = CreateIndxRef::NotMaster;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ createIndex_sendReply(signal, opPtr, true);
+ return;
+ }
+
+ // forward initial request plus operation key to all
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_CREATE_INDX_REQ,
+ signal, CreateIndxReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == CreateIndxReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpCreateIndex opBusy;
+ if (! c_opCreateIndex.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ releaseSections(signal);
+ createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opCreateIndex.add(opPtr);
+ // save attribute list
+ SegmentedSectionPtr ssPtr;
+ signal->getSection(ssPtr, CreateIndxReq::ATTRIBUTE_LIST_SECTION);
+ SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
+ r0.reset(); // undo implicit first()
+ if (! r0.getWord(&opPtr.p->m_attrList.sz) ||
+ ! r0.getWords(opPtr.p->m_attrList.id, opPtr.p->m_attrList.sz)) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidName;
+ opPtr.p->m_errorLine = __LINE__;
+ releaseSections(signal);
+ createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ // save name and index table properties
+ signal->getSection(ssPtr, CreateIndxReq::INDEX_NAME_SECTION);
+ SimplePropertiesSectionReader r1(ssPtr, getSectionSegmentPool());
+ DictTabInfo::Table tableDesc;
+ tableDesc.init();
+ SimpleProperties::UnpackStatus status = SimpleProperties::unpack(
+ r1, &tableDesc,
+ DictTabInfo::TableMapping, DictTabInfo::TableMappingSize,
+ true, true);
+ if (status != SimpleProperties::Eof) {
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidName;
+ opPtr.p->m_errorLine = __LINE__;
+ releaseSections(signal);
+ createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ memcpy(opPtr.p->m_indexName, tableDesc.TableName, MAX_TAB_NAME_SIZE);
+ opPtr.p->m_storedIndex = tableDesc.TableLoggedFlag;
+ releaseSections(signal);
+ // master expects to hear from all
+ if (opPtr.p->m_isMaster)
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ createIndex_slavePrepare(signal, opPtr);
+ createIndex_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opCreateIndex.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == CreateIndxReq::RT_DICT_COMMIT ||
+ requestType == CreateIndxReq::RT_DICT_ABORT) {
+ jam();
+ if (requestType == CreateIndxReq::RT_DICT_COMMIT) {
+ opPtr.p->m_request.setIndexId(req->getIndexId());
+ opPtr.p->m_request.setIndexVersion(req->getIndexVersion());
+ createIndex_slaveCommit(signal, opPtr);
+ } else {
+ createIndex_slaveAbort(signal, opPtr);
+ }
+ createIndex_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opCreateIndex.release(opPtr);
+ return;
+ }
+ }
+ jam();
+ // return to sender
+ releaseSections(signal);
+ OpCreateIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = CreateIndxRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ createIndex_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::execCREATE_INDX_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+ CreateIndxConf* conf = (CreateIndxConf*)signal->getDataPtrSend();
+ createIndex_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execCREATE_INDX_REF(Signal* signal)
+{
+ jamEntry();
+ CreateIndxRef* ref = (CreateIndxRef*)signal->getDataPtrSend();
+ createIndex_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::createIndex_recvReply(Signal* signal, const CreateIndxConf* conf,
+ const CreateIndxRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const CreateIndxReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == CreateIndxReq::RT_TC) {
+ jam();
+ // part of alter index operation
+ OpAlterIndexPtr opPtr;
+ c_opAlterIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterIndex_fromCreateTc(signal, opPtr);
+ return;
+ }
+ OpCreateIndexPtr opPtr;
+ c_opCreateIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ opPtr.p->setError(ref);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == CreateIndxReq::RT_DICT_COMMIT ||
+ requestType == CreateIndxReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ createIndex_sendReply(signal, opPtr, true);
+ c_opCreateIndex.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT;
+ createIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == CreateIndxReq::RT_DICT_PREPARE) {
+ jam();
+ // start index table create
+ createIndex_toCreateTable(signal, opPtr);
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT;
+ createIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::createIndex_slavePrepare(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+}
+
+void
+Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ Uint32 attrid_map[MAX_ATTRIBUTES_IN_INDEX];
+ Uint32 k;
+ jam();
+ const CreateIndxReq* const req = &opPtr.p->m_request;
+ // signal data writer
+ Uint32* wbuffer = &c_indexPage.word[0];
+ LinearWriter w(wbuffer, sizeof(c_indexPage) >> 2);
+ w.first();
+ // get table being indexed
+ if (! (req->getTableId() < c_tableRecordPool.getSize())) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidPrimaryTable;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, req->getTableId());
+ if (tablePtr.p->tabState != TableRecord::DEFINED) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidPrimaryTable;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ if (! tablePtr.p->isTable()) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidPrimaryTable;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ // compute index table record
+ TableRecord indexRec;
+ TableRecordPtr indexPtr;
+ indexPtr.i = RNIL; // invalid
+ indexPtr.p = &indexRec;
+ initialiseTableRecord(indexPtr);
+ if (req->getIndexType() == DictTabInfo::UniqueHashIndex) {
+ indexPtr.p->storedTable = opPtr.p->m_storedIndex;
+ indexPtr.p->fragmentType = DictTabInfo::DistrKeyUniqueHashIndex;
+ } else if (req->getIndexType() == DictTabInfo::OrderedIndex) {
+ // first version will not supported logging
+ if (opPtr.p->m_storedIndex) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ indexPtr.p->storedTable = false;
+ indexPtr.p->fragmentType = DictTabInfo::DistrKeyOrderedIndex;
+ } else {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ indexPtr.p->tableType = (DictTabInfo::TableType)req->getIndexType();
+ indexPtr.p->primaryTableId = req->getTableId();
+ indexPtr.p->noOfAttributes = opPtr.p->m_attrList.sz;
+ indexPtr.p->tupKeyLength = 0;
+ if (indexPtr.p->noOfAttributes == 0) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ // tree node size in words (make configurable later)
+ indexPtr.p->tupKeyLength = MAX_TTREE_NODE_SIZE;
+ }
+
+ AttributeMask mask;
+ mask.clear();
+ for (k = 0; k < opPtr.p->m_attrList.sz; k++) {
+ jam();
+ unsigned current_id= opPtr.p->m_attrList.id[k];
+ AttributeRecord* aRec= NULL;
+ Uint32 tAttr= tablePtr.p->firstAttribute;
+ for (; tAttr != RNIL; tAttr= aRec->nextAttrInTable)
+ {
+ aRec = c_attributeRecordPool.getPtr(tAttr);
+ if (aRec->attributeId != current_id)
+ continue;
+ jam();
+ break;
+ }
+ if (tAttr == RNIL) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ if (mask.get(current_id))
+ {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::DuplicateAttributes;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ mask.set(current_id);
+
+ const Uint32 a = aRec->attributeDescriptor;
+ unsigned kk= k;
+ if (indexPtr.p->isHashIndex()) {
+ const Uint32 s1 = AttributeDescriptor::getSize(a);
+ const Uint32 s2 = AttributeDescriptor::getArraySize(a);
+ indexPtr.p->tupKeyLength += ((1 << s1) * s2 + 31) >> 5;
+ // reorder the attributes according to the tableid order
+ // for unque indexes
+ for (; kk > 0 && current_id < attrid_map[kk-1]>>16; kk--)
+ attrid_map[kk]= attrid_map[kk-1];
+ }
+ attrid_map[kk]= k | (current_id << 16);
+ }
+ indexPtr.p->noOfPrimkey = indexPtr.p->noOfAttributes;
+ // plus concatenated primary table key attribute
+ indexPtr.p->noOfAttributes += 1;
+ indexPtr.p->noOfNullAttr = 0;
+ // write index table
+ w.add(DictTabInfo::TableName, opPtr.p->m_indexName);
+ w.add(DictTabInfo::TableLoggedFlag, indexPtr.p->storedTable);
+ w.add(DictTabInfo::FragmentTypeVal, indexPtr.p->fragmentType);
+ w.add(DictTabInfo::TableTypeVal, indexPtr.p->tableType);
+ w.add(DictTabInfo::PrimaryTable, tablePtr.p->tableName);
+ w.add(DictTabInfo::PrimaryTableId, tablePtr.i);
+ w.add(DictTabInfo::NoOfAttributes, indexPtr.p->noOfAttributes);
+ w.add(DictTabInfo::NoOfKeyAttr, indexPtr.p->noOfPrimkey);
+ w.add(DictTabInfo::NoOfNullable, indexPtr.p->noOfNullAttr);
+ w.add(DictTabInfo::KeyLength, indexPtr.p->tupKeyLength);
+ // write index key attributes
+ AttributeRecordPtr aRecPtr;
+ c_attributeRecordPool.getPtr(aRecPtr, tablePtr.p->firstAttribute);
+ for (k = 0; k < opPtr.p->m_attrList.sz; k++) {
+ // insert the attributes in the order decided above in attrid_map
+ // k is new order, current_id is in previous order
+ // ToDo: make sure "current_id" is stored with the table and
+ // passed up to NdbDictionary
+ unsigned current_id= opPtr.p->m_attrList.id[attrid_map[k] & 0xffff];
+ jam();
+ for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
+ AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
+ tAttr = aRec->nextAttrInTable;
+ if (aRec->attributeId != current_id)
+ continue;
+ jam();
+ const Uint32 a = aRec->attributeDescriptor;
+ bool isNullable = AttributeDescriptor::getNullable(a);
+ Uint32 attrType = AttributeDescriptor::getType(a);
+ w.add(DictTabInfo::AttributeName, aRec->attributeName);
+ w.add(DictTabInfo::AttributeId, k);
+ if (indexPtr.p->isHashIndex()) {
+ w.add(DictTabInfo::AttributeKeyFlag, (Uint32)true);
+ w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false);
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ w.add(DictTabInfo::AttributeKeyFlag, (Uint32)false);
+ w.add(DictTabInfo::AttributeNullableFlag, (Uint32)isNullable);
+ }
+ w.add(DictTabInfo::AttributeExtType, attrType);
+ w.add(DictTabInfo::AttributeExtPrecision, aRec->extPrecision);
+ w.add(DictTabInfo::AttributeExtScale, aRec->extScale);
+ w.add(DictTabInfo::AttributeExtLength, aRec->extLength);
+ w.add(DictTabInfo::AttributeEnd, (Uint32)true);
+ }
+ }
+ if (indexPtr.p->isHashIndex()) {
+ jam();
+ // write concatenated primary table key attribute
+ w.add(DictTabInfo::AttributeName, "NDB$PK");
+ w.add(DictTabInfo::AttributeId, opPtr.p->m_attrList.sz);
+ w.add(DictTabInfo::AttributeKeyFlag, (Uint32)false);
+ w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false);
+ w.add(DictTabInfo::AttributeExtType, (Uint32)DictTabInfo::ExtUnsigned);
+ w.add(DictTabInfo::AttributeExtLength, tablePtr.p->tupKeyLength+1);
+ w.add(DictTabInfo::AttributeEnd, (Uint32)true);
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ jam();
+ // write index tree node as Uint32 array attribute
+ w.add(DictTabInfo::AttributeName, "NDB$TNODE");
+ w.add(DictTabInfo::AttributeId, opPtr.p->m_attrList.sz);
+ w.add(DictTabInfo::AttributeKeyFlag, (Uint32)true);
+ w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false);
+ w.add(DictTabInfo::AttributeExtType, (Uint32)DictTabInfo::ExtUnsigned);
+ w.add(DictTabInfo::AttributeExtLength, indexPtr.p->tupKeyLength);
+ w.add(DictTabInfo::AttributeEnd, (Uint32)true);
+ }
+ // finish
+ w.add(DictTabInfo::TableEnd, (Uint32)true);
+ // remember to...
+ releaseSections(signal);
+ // send create index table request
+ CreateTableReq * const cre = (CreateTableReq*)signal->getDataPtrSend();
+ cre->senderRef = reference();
+ cre->senderData = opPtr.p->key;
+ LinearSectionPtr lsPtr[3];
+ lsPtr[0].p = wbuffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(DBDICT_REF, GSN_CREATE_TABLE_REQ,
+ signal, CreateTableReq::SignalLength, JBB, lsPtr, 1);
+}
+
+void
+Dbdict::createIndex_fromCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT;
+ createIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (! opPtr.p->m_request.getOnline()) {
+ jam();
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_COMMIT;
+ createIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ createIndex_toAlterIndex(signal, opPtr);
+}
+
+void
+Dbdict::createIndex_toAlterIndex(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+ AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(AlterIndxReq::RT_CREATE_INDEX);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ req->setTableId(opPtr.p->m_request.getTableId());
+ req->setIndexId(opPtr.p->m_request.getIndexId());
+ req->setIndexVersion(opPtr.p->m_request.getIndexVersion());
+ req->setOnline(true);
+ sendSignal(reference(), GSN_ALTER_INDX_REQ,
+ signal, AlterIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::createIndex_fromAlterIndex(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT;
+ createIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_COMMIT;
+ createIndex_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::createIndex_slaveCommit(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+ const Uint32 indexId = opPtr.p->m_request.getIndexId();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, indexId);
+ if (! opPtr.p->m_request.getOnline()) {
+ ndbrequire(indexPtr.p->indexState == TableRecord::IS_UNDEFINED);
+ indexPtr.p->indexState = TableRecord::IS_OFFLINE;
+ } else {
+ ndbrequire(indexPtr.p->indexState == TableRecord::IS_ONLINE);
+ }
+}
+
+void
+Dbdict::createIndex_slaveAbort(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+ CreateIndxReq* const req = &opPtr.p->m_request;
+ const Uint32 indexId = req->getIndexId();
+ if (indexId >= c_tableRecordPool.getSize()) {
+ jam();
+ return;
+ }
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, indexId);
+ if (! indexPtr.p->isIndex()) {
+ jam();
+ return;
+ }
+ indexPtr.p->indexState = TableRecord::IS_BROKEN;
+}
+
+void
+Dbdict::createIndex_sendSlaveReq(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+ CreateIndxReq* const req = (CreateIndxReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_CREATE_INDX_REQ,
+ signal, CreateIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::createIndex_sendReply(Signal* signal, OpCreateIndexPtr opPtr,
+ bool toUser)
+{
+ CreateIndxRef* rep = (CreateIndxRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_CREATE_INDX_CONF;
+ Uint32 length = CreateIndxConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == CreateIndxReq::RT_DICT_ABORT)
+ sendRef = false;
+ } else {
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = CreateIndxConf::SignalLength;
+ }
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setIndexId(opPtr.p->m_request.getIndexId());
+ rep->setIndexVersion(opPtr.p->m_request.getIndexVersion());
+ if (sendRef) {
+ if (opPtr.p->m_errorNode == 0)
+ opPtr.p->m_errorNode = getOwnNodeId();
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->setErrorLine(opPtr.p->m_errorLine);
+ rep->setErrorNode(opPtr.p->m_errorNode);
+ gsn = GSN_CREATE_INDX_REF;
+ length = CreateIndxRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/**
+ * MODULE: Drop index.
+ *
+ * Drop index. First alters the index offline (i.e. drops metadata in
+ * other blocks) and then drops the index table.
+ */
+
+void
+Dbdict::execDROP_INDX_REQ(Signal* signal)
+{
+ jamEntry();
+ DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend();
+ OpDropIndexPtr opPtr;
+
+ int err = DropIndxRef::BadRequestType;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const DropIndxReq::RequestType requestType = req->getRequestType();
+ if (requestType == DropIndxReq::RT_USER) {
+ jam();
+ if (signal->getLength() == DropIndxReq::SignalLength) {
+ jam();
+ if (getOwnNodeId() != c_masterNodeId) {
+ jam();
+
+ err = DropIndxRef::NotMaster;
+ goto error;
+ }
+ // forward initial request plus operation key to all
+ Uint32 indexId= req->getIndexId();
+ Uint32 indexVersion= req->getIndexVersion();
+ TableRecordPtr tmp;
+ int res = getMetaTablePtr(tmp, indexId, indexVersion);
+ switch(res){
+ case MetaData::InvalidArgument:
+ err = DropIndxRef::IndexNotFound;
+ goto error;
+ case MetaData::TableNotFound:
+ case MetaData::InvalidTableVersion:
+ err = DropIndxRef::InvalidIndexVersion;
+ goto error;
+ }
+
+ if (! tmp.p->isIndex()) {
+ jam();
+ err = DropIndxRef::NotAnIndex;
+ goto error;
+ }
+
+ if (tmp.p->indexState == TableRecord::IS_DROPPING){
+ jam();
+ err = DropIndxRef::IndexNotFound;
+ goto error;
+ }
+
+ tmp.p->indexState = TableRecord::IS_DROPPING;
+
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_DROP_INDX_REQ,
+ signal, DropIndxReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == DropIndxReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpDropIndex opBusy;
+ if (! c_opDropIndex.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = DropIndxReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = DropIndxRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ dropIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opDropIndex.add(opPtr);
+ // master expects to hear from all
+ if (opPtr.p->m_isMaster)
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ dropIndex_slavePrepare(signal, opPtr);
+ dropIndex_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opDropIndex.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == DropIndxReq::RT_DICT_COMMIT ||
+ requestType == DropIndxReq::RT_DICT_ABORT) {
+ jam();
+ if (requestType == DropIndxReq::RT_DICT_COMMIT)
+ dropIndex_slaveCommit(signal, opPtr);
+ else
+ dropIndex_slaveAbort(signal, opPtr);
+ dropIndex_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opDropIndex.release(opPtr);
+ return;
+ }
+ }
+error:
+ jam();
+ // return to sender
+ OpDropIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = (DropIndxRef::ErrorCode)err;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ dropIndex_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::execDROP_INDX_CONF(Signal* signal)
+{
+ jamEntry();
+ DropIndxConf* conf = (DropIndxConf*)signal->getDataPtrSend();
+ dropIndex_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execDROP_INDX_REF(Signal* signal)
+{
+ jamEntry();
+ DropIndxRef* ref = (DropIndxRef*)signal->getDataPtrSend();
+ dropIndex_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::dropIndex_recvReply(Signal* signal, const DropIndxConf* conf,
+ const DropIndxRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const DropIndxReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == DropIndxReq::RT_TC) {
+ jam();
+ // part of alter index operation
+ OpAlterIndexPtr opPtr;
+ c_opAlterIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterIndex_fromDropTc(signal, opPtr);
+ return;
+ }
+ OpDropIndexPtr opPtr;
+ c_opDropIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ opPtr.p->setError(ref);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == DropIndxReq::RT_DICT_COMMIT ||
+ requestType == DropIndxReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ dropIndex_sendReply(signal, opPtr, true);
+ c_opDropIndex.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = DropIndxReq::RT_DICT_ABORT;
+ dropIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == DropIndxReq::RT_DICT_PREPARE) {
+ jam();
+ // start alter offline
+ dropIndex_toAlterIndex(signal, opPtr);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::dropIndex_slavePrepare(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+ DropIndxReq* const req = &opPtr.p->m_request;
+ // check index exists
+ TableRecordPtr indexPtr;
+ if (! (req->getIndexId() < c_tableRecordPool.getSize())) {
+ jam();
+ opPtr.p->m_errorCode = DropIndxRef::IndexNotFound;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ c_tableRecordPool.getPtr(indexPtr, req->getIndexId());
+ if (indexPtr.p->tabState != TableRecord::DEFINED) {
+ jam();
+ opPtr.p->m_errorCode = DropIndxRef::IndexNotFound;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ if (! indexPtr.p->isIndex()) {
+ jam();
+ opPtr.p->m_errorCode = DropIndxRef::NotAnIndex;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ // ignore incoming primary table id
+ req->setTableId(indexPtr.p->primaryTableId);
+}
+
+void
+Dbdict::dropIndex_toAlterIndex(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+ AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(AlterIndxReq::RT_DROP_INDEX);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ req->setTableId(opPtr.p->m_request.getTableId());
+ req->setIndexId(opPtr.p->m_request.getIndexId());
+ req->setIndexVersion(opPtr.p->m_request.getIndexVersion());
+ req->setOnline(false);
+ sendSignal(reference(), GSN_ALTER_INDX_REQ,
+ signal, AlterIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::dropIndex_fromAlterIndex(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = DropIndxReq::RT_DICT_ABORT;
+ dropIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ dropIndex_toDropTable(signal, opPtr);
+}
+
+void
+Dbdict::dropIndex_toDropTable(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+ DropTableReq* const req = (DropTableReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = opPtr.p->key;
+ req->tableId = opPtr.p->m_request.getIndexId();
+ req->tableVersion = opPtr.p->m_request.getIndexVersion();
+ sendSignal(reference(), GSN_DROP_TABLE_REQ,
+ signal,DropTableReq::SignalLength, JBB);
+}
+
+void
+Dbdict::dropIndex_fromDropTable(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = DropIndxReq::RT_DICT_ABORT;
+ dropIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = DropIndxReq::RT_DICT_COMMIT;
+ dropIndex_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::dropIndex_slaveCommit(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+}
+
+void
+Dbdict::dropIndex_slaveAbort(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+ DropIndxReq* const req = &opPtr.p->m_request;
+ const Uint32 indexId = req->getIndexId();
+ if (indexId >= c_tableRecordPool.getSize()) {
+ jam();
+ return;
+ }
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, indexId);
+ indexPtr.p->indexState = TableRecord::IS_BROKEN;
+}
+
+void
+Dbdict::dropIndex_sendSlaveReq(Signal* signal, OpDropIndexPtr opPtr)
+{
+ DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_DROP_INDX_REQ,
+ signal, DropIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::dropIndex_sendReply(Signal* signal, OpDropIndexPtr opPtr,
+ bool toUser)
+{
+ DropIndxRef* rep = (DropIndxRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_DROP_INDX_CONF;
+ Uint32 length = DropIndxConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == DropIndxReq::RT_DICT_ABORT)
+ sendRef = false;
+ } else {
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = DropIndxConf::SignalLength;
+ }
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setIndexId(opPtr.p->m_request.getIndexId());
+ rep->setIndexVersion(opPtr.p->m_request.getIndexVersion());
+ if (sendRef) {
+ if (opPtr.p->m_errorNode == 0)
+ opPtr.p->m_errorNode = getOwnNodeId();
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->setErrorLine(opPtr.p->m_errorLine);
+ rep->setErrorNode(opPtr.p->m_errorNode);
+ gsn = GSN_DROP_INDX_REF;
+ length = DropIndxRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/*****************************************************
+ *
+ * Util signalling
+ *
+ *****************************************************/
+
+int
+Dbdict::sendSignalUtilReq(Callback *pcallback,
+ BlockReference ref,
+ GlobalSignalNumber gsn,
+ Signal* signal,
+ Uint32 length,
+ JobBufferLevel jbuf,
+ LinearSectionPtr ptr[3],
+ Uint32 noOfSections)
+{
+ jam();
+ EVENT_TRACE;
+ OpSignalUtilPtr utilRecPtr;
+
+ // Seize a Util Send record
+ if (!c_opSignalUtil.seize(utilRecPtr)) {
+ // Failed to allocate util record
+ return -1;
+ }
+ utilRecPtr.p->m_callback = *pcallback;
+
+ // should work for all util signal classes
+ UtilPrepareReq *req = (UtilPrepareReq*)signal->getDataPtrSend();
+ utilRecPtr.p->m_userData = req->getSenderData();
+ req->setSenderData(utilRecPtr.i);
+
+ if (ptr) {
+ jam();
+ sendSignal(ref, gsn, signal, length, jbuf, ptr, noOfSections);
+ } else {
+ jam();
+ sendSignal(ref, gsn, signal, length, jbuf);
+ }
+
+ return 0;
+}
+
+int
+Dbdict::recvSignalUtilReq(Signal* signal, Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ UtilPrepareConf * const req = (UtilPrepareConf*)signal->getDataPtr();
+ OpSignalUtilPtr utilRecPtr;
+ utilRecPtr.i = req->getSenderData();
+ if ((utilRecPtr.p = c_opSignalUtil.getPtr(utilRecPtr.i)) == NULL) {
+ jam();
+ return -1;
+ }
+
+ req->setSenderData(utilRecPtr.p->m_userData);
+ Callback c = utilRecPtr.p->m_callback;
+ c_opSignalUtil.release(utilRecPtr);
+
+ execute(signal, c, returnCode);
+ return 0;
+}
+
+void Dbdict::execUTIL_PREPARE_CONF(Signal *signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ ndbrequire(recvSignalUtilReq(signal, 0) == 0);
+}
+
+void
+Dbdict::execUTIL_PREPARE_REF(Signal *signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ ndbrequire(recvSignalUtilReq(signal, 1) == 0);
+}
+
+void Dbdict::execUTIL_EXECUTE_CONF(Signal *signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ ndbrequire(recvSignalUtilReq(signal, 0) == 0);
+}
+
+void Dbdict::execUTIL_EXECUTE_REF(Signal *signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+
+#ifdef EVENT_DEBUG
+ UtilExecuteRef * ref = (UtilExecuteRef *)signal->getDataPtrSend();
+
+ ndbout_c("execUTIL_EXECUTE_REF");
+ ndbout_c("senderData %u",ref->getSenderData());
+ ndbout_c("errorCode %u",ref->getErrorCode());
+ ndbout_c("TCErrorCode %u",ref->getTCErrorCode());
+#endif
+
+ ndbrequire(recvSignalUtilReq(signal, 1) == 0);
+}
+void Dbdict::execUTIL_RELEASE_CONF(Signal *signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ ndbrequire(false);
+ ndbrequire(recvSignalUtilReq(signal, 0) == 0);
+}
+void Dbdict::execUTIL_RELEASE_REF(Signal *signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ ndbrequire(false);
+ ndbrequire(recvSignalUtilReq(signal, 1) == 0);
+}
+
+/**
+ * MODULE: Create event
+ *
+ * Create event in DICT.
+ *
+ *
+ * Request type in CREATE_EVNT signals:
+ *
+ * Signalflow see Dbdict.txt
+ *
+ */
+
+/*****************************************************************
+ *
+ * Systable stuff
+ *
+ */
+
+const Uint32 Dbdict::sysTab_NDBEVENTS_0_szs[EVENT_SYSTEM_TABLE_LENGTH] = {
+ sizeof(((sysTab_NDBEVENTS_0*)0)->NAME),
+ sizeof(((sysTab_NDBEVENTS_0*)0)->EVENT_TYPE),
+ sizeof(((sysTab_NDBEVENTS_0*)0)->TABLE_NAME),
+ sizeof(((sysTab_NDBEVENTS_0*)0)->ATTRIBUTE_MASK),
+ sizeof(((sysTab_NDBEVENTS_0*)0)->SUBID),
+ sizeof(((sysTab_NDBEVENTS_0*)0)->SUBKEY)
+};
+
+void
+Dbdict::prepareTransactionEventSysTable (Callback *pcallback,
+ Signal* signal,
+ Uint32 senderData,
+ UtilPrepareReq::OperationTypeValue prepReq)
+{
+ // find table id for event system table
+ TableRecord keyRecord;
+ strcpy(keyRecord.tableName, EVENT_SYSTEM_TABLE_NAME);
+
+ TableRecordPtr tablePtr;
+ c_tableRecordHash.find(tablePtr, keyRecord);
+
+ ndbrequire(tablePtr.i != RNIL); // system table must exist
+
+ Uint32 tableId = tablePtr.p->tableId; /* System table */
+ Uint32 noAttr = tablePtr.p->noOfAttributes;
+ ndbrequire(noAttr == EVENT_SYSTEM_TABLE_LENGTH);
+
+ switch (prepReq) {
+ case UtilPrepareReq::Update:
+ case UtilPrepareReq::Insert:
+ case UtilPrepareReq::Write:
+ case UtilPrepareReq::Read:
+ jam();
+ break;
+ case UtilPrepareReq::Delete:
+ jam();
+ noAttr = 1; // only involves Primary key which should be the first
+ break;
+ }
+ prepareUtilTransaction(pcallback, signal, senderData, tableId, NULL,
+ prepReq, noAttr, NULL, NULL);
+}
+
+void
+Dbdict::prepareUtilTransaction(Callback *pcallback,
+ Signal* signal,
+ Uint32 senderData,
+ Uint32 tableId,
+ const char* tableName,
+ UtilPrepareReq::OperationTypeValue prepReq,
+ Uint32 noAttr,
+ Uint32 attrIds[],
+ const char *attrNames[])
+{
+ jam();
+ EVENT_TRACE;
+
+ UtilPrepareReq * utilPrepareReq =
+ (UtilPrepareReq *)signal->getDataPtrSend();
+
+ utilPrepareReq->setSenderRef(reference());
+ utilPrepareReq->setSenderData(senderData);
+
+ const Uint32 pageSizeInWords = 128;
+ Uint32 propPage[pageSizeInWords];
+ LinearWriter w(&propPage[0],128);
+ w.first();
+ w.add(UtilPrepareReq::NoOfOperations, 1);
+ w.add(UtilPrepareReq::OperationType, prepReq);
+ if (tableName) {
+ jam();
+ w.add(UtilPrepareReq::TableName, tableName);
+ } else {
+ jam();
+ w.add(UtilPrepareReq::TableId, tableId);
+ }
+ for(Uint32 i = 0; i < noAttr; i++)
+ if (tableName) {
+ jam();
+ w.add(UtilPrepareReq::AttributeName, attrNames[i]);
+ } else {
+ if (attrIds) {
+ jam();
+ w.add(UtilPrepareReq::AttributeId, attrIds[i]);
+ } else {
+ jam();
+ w.add(UtilPrepareReq::AttributeId, i);
+ }
+ }
+#ifdef EVENT_DEBUG
+ // Debugging
+ SimplePropertiesLinearReader reader(propPage, w.getWordsUsed());
+ printf("Dict::prepareInsertTransactions: Sent SimpleProperties:\n");
+ reader.printAll(ndbout);
+#endif
+
+ struct LinearSectionPtr sectionsPtr[UtilPrepareReq::NoOfSections];
+ sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].p = propPage;
+ sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].sz = w.getWordsUsed();
+
+ sendSignalUtilReq(pcallback, DBUTIL_REF, GSN_UTIL_PREPARE_REQ, signal,
+ UtilPrepareReq::SignalLength, JBB,
+ sectionsPtr, UtilPrepareReq::NoOfSections);
+}
+
+/*****************************************************************
+ *
+ * CREATE_EVNT_REQ has three types RT_CREATE, RT_GET (from user)
+ * and RT_DICT_AFTER_GET send from master DICT to slaves
+ *
+ * This function just dscpaches these to
+ *
+ * createEvent_RT_USER_CREATE
+ * createEvent_RT_USER_GET
+ * createEvent_RT_DICT_AFTER_GET
+ *
+ * repectively
+ *
+ */
+
+void
+Dbdict::execCREATE_EVNT_REQ(Signal* signal)
+{
+ jamEntry();
+
+#if 0
+ {
+ SafeCounterHandle handle;
+ {
+ SafeCounter tmp(c_counterMgr, handle);
+ tmp.init<CreateEvntRef>(CMVMI, GSN_DUMP_STATE_ORD, /* senderData */ 13);
+ tmp.clearWaitingFor();
+ tmp.setWaitingFor(3);
+ ndbrequire(!tmp.done());
+ ndbout_c("Allocted");
+ }
+ ndbrequire(!handle.done());
+ {
+ SafeCounter tmp(c_counterMgr, handle);
+ tmp.clearWaitingFor(3);
+ ndbrequire(tmp.done());
+ ndbout_c("Deallocted");
+ }
+ ndbrequire(handle.done());
+ }
+ {
+ NodeBitmask nodes;
+ nodes.clear();
+
+ nodes.set(2);
+ nodes.set(3);
+ nodes.set(4);
+ nodes.set(5);
+
+ {
+ Uint32 i = 0;
+ while((i = nodes.find(i)) != NodeBitmask::NotFound){
+ ndbout_c("1 Node id = %u", i);
+ i++;
+ }
+ }
+
+ NodeReceiverGroup rg(DBDICT, nodes);
+ RequestTracker rt2;
+ ndbrequire(rt2.done());
+ ndbrequire(!rt2.hasRef());
+ ndbrequire(!rt2.hasConf());
+ rt2.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF, 13);
+
+ RequestTracker rt3;
+ rt3.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF, 13);
+
+ ndbrequire(!rt2.done());
+ ndbrequire(!rt3.done());
+
+ rt2.reportRef(c_counterMgr, 2);
+ rt3.reportConf(c_counterMgr, 2);
+
+ ndbrequire(!rt2.done());
+ ndbrequire(!rt3.done());
+
+ rt2.reportConf(c_counterMgr, 3);
+ rt3.reportConf(c_counterMgr, 3);
+
+ ndbrequire(!rt2.done());
+ ndbrequire(!rt3.done());
+
+ rt2.reportConf(c_counterMgr, 4);
+ rt3.reportConf(c_counterMgr, 4);
+
+ ndbrequire(!rt2.done());
+ ndbrequire(!rt3.done());
+
+ rt2.reportConf(c_counterMgr, 5);
+ rt3.reportConf(c_counterMgr, 5);
+
+ ndbrequire(rt2.done());
+ ndbrequire(rt3.done());
+ }
+#endif
+
+ if (! assembleFragments(signal)) {
+ jam();
+ return;
+ }
+
+ CreateEvntReq *req = (CreateEvntReq*)signal->getDataPtr();
+ const CreateEvntReq::RequestType requestType = req->getRequestType();
+ const Uint32 requestFlag = req->getRequestFlag();
+
+ OpCreateEventPtr evntRecPtr;
+ // Seize a Create Event record
+ if (!c_opCreateEvent.seize(evntRecPtr)) {
+ // Failed to allocate event record
+ jam();
+ releaseSections(signal);
+
+ CreateEvntRef * ret = (CreateEvntRef *)signal->getDataPtrSend();
+ ret->senderRef = reference();
+ ret->setErrorCode(CreateEvntRef::SeizeError);
+ ret->setErrorLine(__LINE__);
+ ret->setErrorNode(reference());
+ sendSignal(signal->senderBlockRef(), GSN_CREATE_EVNT_REF, signal,
+ CreateEvntRef::SignalLength, JBB);
+ return;
+ }
+
+#ifdef EVENT_DEBUG
+ ndbout_c("DBDICT::execCREATE_EVNT_REQ from %u evntRecId = (%d)", refToNode(signal->getSendersBlockRef()), evntRecPtr.i);
+#endif
+
+ ndbrequire(req->getUserRef() == signal->getSendersBlockRef());
+
+ evntRecPtr.p->init(req,this);
+
+ if (requestFlag & (Uint32)CreateEvntReq::RT_DICT_AFTER_GET) {
+ jam();
+ EVENT_TRACE;
+ createEvent_RT_DICT_AFTER_GET(signal, evntRecPtr);
+ return;
+ }
+ if (requestType == CreateEvntReq::RT_USER_GET) {
+ jam();
+ EVENT_TRACE;
+ createEvent_RT_USER_GET(signal, evntRecPtr);
+ return;
+ }
+ if (requestType == CreateEvntReq::RT_USER_CREATE) {
+ jam();
+ EVENT_TRACE;
+ createEvent_RT_USER_CREATE(signal, evntRecPtr);
+ return;
+ }
+
+#ifdef EVENT_DEBUG
+ ndbout << "Dbdict.cpp: Dbdict::execCREATE_EVNT_REQ other" << endl;
+#endif
+ jam();
+ releaseSections(signal);
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+/********************************************************************
+ *
+ * Event creation
+ *
+ *****************************************************************/
+
+void
+Dbdict::createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr){
+ jam();
+ evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef());
+
+#ifdef EVENT_DEBUG
+ ndbout << "Dbdict.cpp: Dbdict::execCREATE_EVNT_REQ RT_USER" << endl;
+ char buf[128] = {0};
+ AttributeMask mask = evntRecPtr.p->m_request.getAttrListBitmask();
+ mask.getText(buf);
+ ndbout_c("mask = %s", buf);
+#endif
+
+ // Interpret the long signal
+
+ SegmentedSectionPtr ssPtr;
+ // save name and event properties
+ signal->getSection(ssPtr, CreateEvntReq::EVENT_NAME_SECTION);
+
+ SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
+#ifdef EVENT_DEBUG
+ r0.printAll(ndbout);
+#endif
+ // event name
+ if ((!r0.first()) ||
+ (r0.getValueType() != SimpleProperties::StringValue) ||
+ (r0.getValueLen() <= 0)) {
+ jam();
+ releaseSections(signal);
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+ r0.getString(evntRecPtr.p->m_eventRec.NAME);
+ {
+ int len = strlen(evntRecPtr.p->m_eventRec.NAME);
+ memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
+#ifdef EVENT_DEBUG
+ printf("CreateEvntReq::RT_USER_CREATE; EventName %s, len %u\n",
+ evntRecPtr.p->m_eventRec.NAME, len);
+ for(int i = 0; i < MAX_TAB_NAME_SIZE/4; i++)
+ printf("H'%.8x ", ((Uint32*)evntRecPtr.p->m_eventRec.NAME)[i]);
+ printf("\n");
+#endif
+ }
+ // table name
+ if ((!r0.next()) ||
+ (r0.getValueType() != SimpleProperties::StringValue) ||
+ (r0.getValueLen() <= 0)) {
+ jam();
+ releaseSections(signal);
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+ r0.getString(evntRecPtr.p->m_eventRec.TABLE_NAME);
+ {
+ int len = strlen(evntRecPtr.p->m_eventRec.TABLE_NAME);
+ memset(evntRecPtr.p->m_eventRec.TABLE_NAME+len, 0, MAX_TAB_NAME_SIZE-len);
+ }
+
+#ifdef EVENT_DEBUG
+ ndbout_c("event name: %s",evntRecPtr.p->m_eventRec.NAME);
+ ndbout_c("table name: %s",evntRecPtr.p->m_eventRec.TABLE_NAME);
+#endif
+
+ releaseSections(signal);
+
+ // Send request to SUMA
+
+ CreateSubscriptionIdReq * sumaIdReq =
+ (CreateSubscriptionIdReq *)signal->getDataPtrSend();
+
+ // make sure we save the original sender for later
+ sumaIdReq->senderData = evntRecPtr.i;
+#ifdef EVENT_DEBUG
+ ndbout << "sumaIdReq->senderData = " << sumaIdReq->senderData << endl;
+#endif
+ sendSignal(SUMA_REF, GSN_CREATE_SUBID_REQ, signal,
+ CreateSubscriptionIdReq::SignalLength, JBB);
+ // we should now return in either execCREATE_SUBID_CONF
+ // or execCREATE_SUBID_REF
+}
+
+void Dbdict::execCREATE_SUBID_REF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ CreateSubscriptionIdRef * const ref =
+ (CreateSubscriptionIdRef *)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+
+ evntRecPtr.i = ref->senderData;
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+void Dbdict::execCREATE_SUBID_CONF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+
+ CreateSubscriptionIdConf const * sumaIdConf =
+ (CreateSubscriptionIdConf *)signal->getDataPtr();
+
+ Uint32 evntRecId = sumaIdConf->senderData;
+ OpCreateEvent *evntRec;
+
+ ndbrequire((evntRec = c_opCreateEvent.getPtr(evntRecId)) != NULL);
+
+ evntRec->m_request.setEventId(sumaIdConf->subscriptionId);
+ evntRec->m_request.setEventKey(sumaIdConf->subscriptionKey);
+
+ releaseSections(signal);
+
+ Callback c = { safe_cast(&Dbdict::createEventUTIL_PREPARE), 0 };
+
+ prepareTransactionEventSysTable(&c, signal, evntRecId,
+ UtilPrepareReq::Insert);
+}
+
+void
+Dbdict::createEventComplete_RT_USER_CREATE(Signal* signal,
+ OpCreateEventPtr evntRecPtr){
+ jam();
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+/*********************************************************************
+ *
+ * UTIL_PREPARE, UTIL_EXECUTE
+ *
+ * insert or read systable NDB$EVENTS_0
+ */
+
+void interpretUtilPrepareErrorCode(UtilPrepareRef::ErrorCode errorCode,
+ bool& temporary, Uint32& line)
+{
+ switch (errorCode) {
+ case UtilPrepareRef::NO_ERROR:
+ jam();
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ case UtilPrepareRef::PREPARE_SEIZE_ERROR:
+ jam();
+ temporary = true;
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ case UtilPrepareRef::PREPARE_PAGES_SEIZE_ERROR:
+ jam();
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ case UtilPrepareRef::PREPARED_OPERATION_SEIZE_ERROR:
+ jam();
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ case UtilPrepareRef::DICT_TAB_INFO_ERROR:
+ jam();
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ case UtilPrepareRef::MISSING_PROPERTIES_SECTION:
+ jam();
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ default:
+ jam();
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ }
+}
+
+void
+Dbdict::createEventUTIL_PREPARE(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ if (returnCode == 0) {
+ UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+ jam();
+ evntRecPtr.i = req->getSenderData();
+ const Uint32 prepareId = req->getPrepareId();
+
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ Callback c = { safe_cast(&Dbdict::createEventUTIL_EXECUTE), 0 };
+
+ switch (evntRecPtr.p->m_requestType) {
+ case CreateEvntReq::RT_USER_GET:
+#ifdef EVENT_DEBUG
+ printf("get type = %d\n", CreateEvntReq::RT_USER_GET);
+#endif
+ jam();
+ executeTransEventSysTable(&c, signal,
+ evntRecPtr.i, evntRecPtr.p->m_eventRec,
+ prepareId, UtilPrepareReq::Read);
+ break;
+ case CreateEvntReq::RT_USER_CREATE:
+#ifdef EVENT_DEBUG
+ printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE);
+#endif
+ {
+ evntRecPtr.p->m_eventRec.EVENT_TYPE = evntRecPtr.p->m_request.getEventType();
+ AttributeMask m = evntRecPtr.p->m_request.getAttrListBitmask();
+ memcpy(evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK, &m,
+ sizeof(evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK));
+ evntRecPtr.p->m_eventRec.SUBID = evntRecPtr.p->m_request.getEventId();
+ evntRecPtr.p->m_eventRec.SUBKEY = evntRecPtr.p->m_request.getEventKey();
+ }
+ jam();
+ executeTransEventSysTable(&c, signal,
+ evntRecPtr.i, evntRecPtr.p->m_eventRec,
+ prepareId, UtilPrepareReq::Insert);
+ break;
+ default:
+#ifdef EVENT_DEBUG
+ printf("type = %d\n", evntRecPtr.p->m_requestType);
+ printf("bet type = %d\n", CreateEvntReq::RT_USER_GET);
+ printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE);
+#endif
+ ndbrequire(false);
+ }
+ } else { // returnCode != 0
+ UtilPrepareRef* const ref = (UtilPrepareRef*)signal->getDataPtr();
+
+ const UtilPrepareRef::ErrorCode errorCode =
+ (UtilPrepareRef::ErrorCode)ref->getErrorCode();
+
+ OpCreateEventPtr evntRecPtr;
+ evntRecPtr.i = ref->getSenderData();
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ bool temporary = false;
+ interpretUtilPrepareErrorCode(errorCode,
+ temporary, evntRecPtr.p->m_errorLine);
+ if (temporary) {
+ evntRecPtr.p->m_errorCode =
+ CreateEvntRef::makeTemporary(CreateEvntRef::Undefined);
+ }
+
+ if (evntRecPtr.p->m_errorCode == 0) {
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ }
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+ }
+}
+
+void Dbdict::executeTransEventSysTable(Callback *pcallback, Signal *signal,
+ const Uint32 ptrI,
+ sysTab_NDBEVENTS_0& m_eventRec,
+ const Uint32 prepareId,
+ UtilPrepareReq::OperationTypeValue prepReq)
+{
+ jam();
+ const Uint32 noAttr = EVENT_SYSTEM_TABLE_LENGTH;
+ Uint32 total_len = 0;
+
+ Uint32* attrHdr = signal->theData + 25;
+ Uint32* attrPtr = attrHdr;
+
+ Uint32 id=0;
+ // attribute 0 event name: Primary Key
+ {
+ AttributeHeader::init(attrPtr, id, sysTab_NDBEVENTS_0_szs[id]/4);
+ total_len += sysTab_NDBEVENTS_0_szs[id];
+ attrPtr++; id++;
+ }
+
+ switch (prepReq) {
+ case UtilPrepareReq::Read:
+ jam();
+ EVENT_TRACE;
+ // no more
+ while ( id < noAttr )
+ AttributeHeader::init(attrPtr++, id++, 0);
+ ndbrequire(id == (Uint32) noAttr);
+ break;
+ case UtilPrepareReq::Insert:
+ jam();
+ EVENT_TRACE;
+ while ( id < noAttr ) {
+ AttributeHeader::init(attrPtr, id, sysTab_NDBEVENTS_0_szs[id]/4);
+ total_len += sysTab_NDBEVENTS_0_szs[id];
+ attrPtr++; id++;
+ }
+ ndbrequire(id == (Uint32) noAttr);
+ break;
+ case UtilPrepareReq::Delete:
+ ndbrequire(id == 1);
+ break;
+ default:
+ ndbrequire(false);
+ }
+
+ LinearSectionPtr headerPtr;
+ LinearSectionPtr dataPtr;
+
+ headerPtr.p = attrHdr;
+ headerPtr.sz = noAttr;
+
+ dataPtr.p = (Uint32*)&m_eventRec;
+ dataPtr.sz = total_len/4;
+
+ ndbrequire((total_len == sysTab_NDBEVENTS_0_szs[0]) ||
+ (total_len == sizeof(sysTab_NDBEVENTS_0)));
+
+#if 0
+ printf("Header size %u\n", headerPtr.sz);
+ for(int i = 0; i < (int)headerPtr.sz; i++)
+ printf("H'%.8x ", attrHdr[i]);
+ printf("\n");
+
+ printf("Data size %u\n", dataPtr.sz);
+ for(int i = 0; i < (int)dataPtr.sz; i++)
+ printf("H'%.8x ", dataPage[i]);
+ printf("\n");
+#endif
+
+ executeTransaction(pcallback, signal,
+ ptrI,
+ prepareId,
+ id,
+ headerPtr,
+ dataPtr);
+}
+
+void Dbdict::executeTransaction(Callback *pcallback,
+ Signal* signal,
+ Uint32 senderData,
+ Uint32 prepareId,
+ Uint32 noAttr,
+ LinearSectionPtr headerPtr,
+ LinearSectionPtr dataPtr)
+{
+ jam();
+ EVENT_TRACE;
+
+ UtilExecuteReq * utilExecuteReq =
+ (UtilExecuteReq *)signal->getDataPtrSend();
+
+ utilExecuteReq->setSenderRef(reference());
+ utilExecuteReq->setSenderData(senderData);
+ utilExecuteReq->setPrepareId(prepareId);
+ utilExecuteReq->setReleaseFlag(); // must be done after setting prepareId
+
+#if 0
+ printf("Header size %u\n", headerPtr.sz);
+ for(int i = 0; i < (int)headerPtr.sz; i++)
+ printf("H'%.8x ", headerBuffer[i]);
+ printf("\n");
+
+ printf("Data size %u\n", dataPtr.sz);
+ for(int i = 0; i < (int)dataPtr.sz; i++)
+ printf("H'%.8x ", dataBuffer[i]);
+ printf("\n");
+#endif
+
+ struct LinearSectionPtr sectionsPtr[UtilExecuteReq::NoOfSections];
+ sectionsPtr[UtilExecuteReq::HEADER_SECTION].p = headerPtr.p;
+ sectionsPtr[UtilExecuteReq::HEADER_SECTION].sz = noAttr;
+ sectionsPtr[UtilExecuteReq::DATA_SECTION].p = dataPtr.p;
+ sectionsPtr[UtilExecuteReq::DATA_SECTION].sz = dataPtr.sz;
+
+ sendSignalUtilReq(pcallback, DBUTIL_REF, GSN_UTIL_EXECUTE_REQ, signal,
+ UtilExecuteReq::SignalLength, JBB,
+ sectionsPtr, UtilExecuteReq::NoOfSections);
+}
+
+void Dbdict::parseReadEventSys(Signal* signal, sysTab_NDBEVENTS_0& m_eventRec)
+{
+ SegmentedSectionPtr headerPtr, dataPtr;
+ jam();
+ signal->getSection(headerPtr, UtilExecuteReq::HEADER_SECTION);
+ SectionReader headerReader(headerPtr, getSectionSegmentPool());
+
+ signal->getSection(dataPtr, UtilExecuteReq::DATA_SECTION);
+ SectionReader dataReader(dataPtr, getSectionSegmentPool());
+
+ AttributeHeader header;
+ Uint32 *dst = (Uint32*)&m_eventRec;
+
+ for (int i = 0; i < EVENT_SYSTEM_TABLE_LENGTH; i++) {
+ headerReader.getWord((Uint32 *)&header);
+ int sz = header.getDataSize();
+ for (int i=0; i < sz; i++)
+ dataReader.getWord(dst++);
+ }
+
+ ndbrequire( ((char*)dst-(char*)&m_eventRec) == sizeof(m_eventRec) );
+
+ releaseSections(signal);
+}
+
+void Dbdict::createEventUTIL_EXECUTE(Signal *signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ if (returnCode == 0) {
+ // Entry into system table all set
+ UtilExecuteConf* const conf = (UtilExecuteConf*)signal->getDataPtr();
+ jam();
+ OpCreateEventPtr evntRecPtr;
+ evntRecPtr.i = conf->getSenderData();
+
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+ OpCreateEvent *evntRec = evntRecPtr.p;
+
+ switch (evntRec->m_requestType) {
+ case CreateEvntReq::RT_USER_GET: {
+#ifdef EVENT_DEBUG
+ printf("get type = %d\n", CreateEvntReq::RT_USER_GET);
+#endif
+ parseReadEventSys(signal, evntRecPtr.p->m_eventRec);
+
+ evntRec->m_request.setEventType(evntRecPtr.p->m_eventRec.EVENT_TYPE);
+ evntRec->m_request.setAttrListBitmask(*(AttributeMask*)evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK);
+ evntRec->m_request.setEventId(evntRecPtr.p->m_eventRec.SUBID);
+ evntRec->m_request.setEventKey(evntRecPtr.p->m_eventRec.SUBKEY);
+
+#ifdef EVENT_DEBUG
+ printf("EventName: %s\n", evntRec->m_eventRec.NAME);
+ printf("TableName: %s\n", evntRec->m_eventRec.TABLE_NAME);
+#endif
+
+ // find table id for event table
+ TableRecord keyRecord;
+ strcpy(keyRecord.tableName, evntRecPtr.p->m_eventRec.TABLE_NAME);
+
+ TableRecordPtr tablePtr;
+ c_tableRecordHash.find(tablePtr, keyRecord);
+
+ if (tablePtr.i == RNIL) {
+ jam();
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+
+ evntRec->m_request.setTableId(tablePtr.p->tableId);
+
+ createEventComplete_RT_USER_GET(signal, evntRecPtr);
+ return;
+ }
+ case CreateEvntReq::RT_USER_CREATE: {
+#ifdef EVENT_DEBUG
+ printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE);
+#endif
+ jam();
+ createEventComplete_RT_USER_CREATE(signal, evntRecPtr);
+ return;
+ }
+ break;
+ default:
+ ndbrequire(false);
+ }
+ } else { // returnCode != 0
+ UtilExecuteRef * const ref = (UtilExecuteRef *)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+ evntRecPtr.i = ref->getSenderData();
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+ jam();
+ evntRecPtr.p->m_errorNode = reference();
+ evntRecPtr.p->m_errorLine = __LINE__;
+
+ switch (ref->getErrorCode()) {
+ case UtilExecuteRef::TCError:
+ switch (ref->getTCErrorCode()) {
+ case ZNOT_FOUND:
+ jam();
+ evntRecPtr.p->m_errorCode = CreateEvntRef::EventNotFound;
+ break;
+ case ZALREADYEXIST:
+ jam();
+ evntRecPtr.p->m_errorCode = CreateEvntRef::EventNameExists;
+ break;
+ default:
+ jam();
+ evntRecPtr.p->m_errorCode = CreateEvntRef::UndefinedTCError;
+ break;
+ }
+ break;
+ default:
+ jam();
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ break;
+ }
+
+ createEvent_sendReply(signal, evntRecPtr);
+ }
+}
+
+/***********************************************************************
+ *
+ * NdbEventOperation, reading systable, creating event in suma
+ *
+ */
+
+void
+Dbdict::createEvent_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr){
+ jam();
+ EVENT_TRACE;
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_REQ::RT_USER_GET evntRecPtr.i = (%d), ref = %u", evntRecPtr.i, evntRecPtr.p->m_request.getUserRef());
+#endif
+
+ SegmentedSectionPtr ssPtr;
+
+ signal->getSection(ssPtr, 0);
+
+ SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
+#ifdef EVENT_DEBUG
+ r0.printAll(ndbout);
+#endif
+ if ((!r0.first()) ||
+ (r0.getValueType() != SimpleProperties::StringValue) ||
+ (r0.getValueLen() <= 0)) {
+ jam();
+ releaseSections(signal);
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+
+ r0.getString(evntRecPtr.p->m_eventRec.NAME);
+ int len = strlen(evntRecPtr.p->m_eventRec.NAME);
+ memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
+
+ releaseSections(signal);
+
+ Callback c = { safe_cast(&Dbdict::createEventUTIL_PREPARE), 0 };
+
+ prepareTransactionEventSysTable(&c, signal, evntRecPtr.i,
+ UtilPrepareReq::Read);
+ /*
+ * Will read systable and fill an OpCreateEventPtr
+ * and return below
+ */
+}
+
+void
+Dbdict::createEventComplete_RT_USER_GET(Signal* signal,
+ OpCreateEventPtr evntRecPtr){
+ jam();
+
+ // Send to oneself and the other DICT's
+ CreateEvntReq * req = (CreateEvntReq *)signal->getDataPtrSend();
+
+ *req = evntRecPtr.p->m_request;
+ req->senderRef = reference();
+ req->senderData = evntRecPtr.i;
+
+ req->addRequestFlag(CreateEvntReq::RT_DICT_AFTER_GET);
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Coordinator) sending GSN_CREATE_EVNT_REQ::RT_DICT_AFTER_GET to DBDICT participants evntRecPtr.i = (%d)", evntRecPtr.i);
+#endif
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ RequestTracker & p = evntRecPtr.p->m_reqTracker;
+ p.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF, evntRecPtr.i);
+
+ sendSignal(rg, GSN_CREATE_EVNT_REQ, signal, CreateEvntReq::SignalLength, JBB);
+}
+
+void
+Dbdict::createEvent_nodeFailCallback(Signal* signal, Uint32 eventRecPtrI,
+ Uint32 returnCode){
+ OpCreateEventPtr evntRecPtr;
+ c_opCreateEvent.getPtr(evntRecPtr, eventRecPtrI);
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+void Dbdict::execCREATE_EVNT_REF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ CreateEvntRef * const ref = (CreateEvntRef *)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+
+ evntRecPtr.i = ref->getUserData();
+
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_REF evntRecPtr.i = (%d)", evntRecPtr.i);
+#endif
+
+ if (ref->errorCode == CreateEvntRef::NF_FakeErrorREF){
+ jam();
+ evntRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(ref->senderRef));
+ } else {
+ jam();
+ evntRecPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(ref->senderRef));
+ }
+ createEvent_sendReply(signal, evntRecPtr);
+
+ return;
+}
+
+void Dbdict::execCREATE_EVNT_CONF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ CreateEvntConf * const conf = (CreateEvntConf *)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+
+ evntRecPtr.i = conf->getUserData();
+
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_CONF evntRecPtr.i = (%d)", evntRecPtr.i);
+#endif
+
+ evntRecPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(conf->senderRef));
+
+ // we will only have a valid tablename if it the master DICT sending this
+ // but that's ok
+ LinearSectionPtr ptr[1];
+ ptr[0].p = (Uint32 *)evntRecPtr.p->m_eventRec.TABLE_NAME;
+ ptr[0].sz =
+ (strlen(evntRecPtr.p->m_eventRec.TABLE_NAME)+4)/4; // to make sure we have a null
+
+ createEvent_sendReply(signal, evntRecPtr, ptr, 1);
+
+ return;
+}
+
+/************************************************
+ *
+ * Participant stuff
+ *
+ */
+
+void
+Dbdict::createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPtr){
+ jam();
+ evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef());
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Participant) got CREATE_EVNT_REQ::RT_DICT_AFTER_GET evntRecPtr.i = (%d)", evntRecPtr.i);
+#endif
+
+ // the signal comes from the DICT block that got the first user request!
+ // This code runs on all DICT nodes, including oneself
+
+ // Seize a Create Event record, the Coordinator will now have two seized
+ // but that's ok, it's like a recursion
+
+ SubCreateReq * sumaReq = (SubCreateReq *)signal->getDataPtrSend();
+
+ sumaReq->subscriberRef = reference(); // reference to DICT
+ sumaReq->subscriberData = evntRecPtr.i;
+ sumaReq->subscriptionId = evntRecPtr.p->m_request.getEventId();
+ sumaReq->subscriptionKey = evntRecPtr.p->m_request.getEventKey();
+ sumaReq->subscriptionType = SubCreateReq::TableEvent;
+ sumaReq->tableId = evntRecPtr.p->m_request.getTableId();
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("sending GSN_SUB_CREATE_REQ");
+#endif
+
+ sendSignal(SUMA_REF, GSN_SUB_CREATE_REQ, signal,
+ SubCreateReq::SignalLength+1 /*to get table Id*/, JBB);
+}
+
+void Dbdict::execSUB_CREATE_REF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ SubCreateRef * const ref = (SubCreateRef *)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+
+ evntRecPtr.i = ref->subscriberData;
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Participant) got SUB_CREATE_REF evntRecPtr.i = (%d)", evntRecPtr.i);
+#endif
+
+ if (ref->err == GrepError::SUBSCRIPTION_ID_NOT_UNIQUE) {
+ jam();
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("SUBSCRIPTION_ID_NOT_UNIQUE");
+#endif
+ createEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("Other error");
+#endif
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+void Dbdict::execSUB_CREATE_CONF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+
+ SubCreateConf * const sumaConf = (SubCreateConf *)signal->getDataPtr();
+
+ const Uint32 subscriptionId = sumaConf->subscriptionId;
+ const Uint32 subscriptionKey = sumaConf->subscriptionKey;
+ const Uint32 evntRecId = sumaConf->subscriberData;
+
+ OpCreateEvent *evntRec;
+ ndbrequire((evntRec = c_opCreateEvent.getPtr(evntRecId)) != NULL);
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Participant) got SUB_CREATE_CONF evntRecPtr.i = (%d)", evntRecId);
+#endif
+
+ SubSyncReq *sumaSync = (SubSyncReq *)signal->getDataPtrSend();
+
+ sumaSync->subscriptionId = subscriptionId;
+ sumaSync->subscriptionKey = subscriptionKey;
+ sumaSync->part = (Uint32) SubscriptionData::MetaData;
+ sumaSync->subscriberData = evntRecId;
+
+ sendSignal(SUMA_REF, GSN_SUB_SYNC_REQ, signal,
+ SubSyncReq::SignalLength, JBB);
+}
+
+void Dbdict::execSUB_SYNC_REF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ SubSyncRef * const ref = (SubSyncRef *)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+
+ evntRecPtr.i = ref->subscriberData;
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+void Dbdict::execSUB_SYNC_CONF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+
+ SubSyncConf * const sumaSyncConf = (SubSyncConf *)signal->getDataPtr();
+
+ // Uint32 subscriptionId = sumaSyncConf->subscriptionId;
+ // Uint32 subscriptionKey = sumaSyncConf->subscriptionKey;
+ OpCreateEventPtr evntRecPtr;
+
+ evntRecPtr.i = sumaSyncConf->subscriberData;
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ ndbrequire(sumaSyncConf->part == (Uint32)SubscriptionData::MetaData);
+
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+/****************************************************
+ *
+ * common create reply method
+ *
+ *******************************************************/
+
+void Dbdict::createEvent_sendReply(Signal* signal,
+ OpCreateEventPtr evntRecPtr,
+ LinearSectionPtr *ptr, int noLSP)
+{
+ jam();
+ EVENT_TRACE;
+
+ // check if we're ready to sent reply
+ // if we are the master dict we might be waiting for conf/ref
+
+ if (!evntRecPtr.p->m_reqTracker.done()) {
+ jam();
+ return; // there's more to come
+ }
+
+ if (evntRecPtr.p->m_reqTracker.hasRef()) {
+ ptr = NULL; // we don't want to return anything if there's an error
+ if (!evntRecPtr.p->hasError()) {
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+ jam();
+ } else
+ jam();
+ }
+
+ // reference to API if master DICT
+ // else reference to master DICT
+ Uint32 senderRef = evntRecPtr.p->m_request.getUserRef();
+ Uint32 signalLength;
+ Uint32 gsn;
+
+ if (evntRecPtr.p->hasError()) {
+ jam();
+ EVENT_TRACE;
+ CreateEvntRef * ret = (CreateEvntRef *)signal->getDataPtrSend();
+
+ ret->setEventId(evntRecPtr.p->m_request.getEventId());
+ ret->setEventKey(evntRecPtr.p->m_request.getEventKey());
+ ret->setUserData(evntRecPtr.p->m_request.getUserData());
+ ret->senderRef = reference();
+ ret->setTableId(evntRecPtr.p->m_request.getTableId());
+ ret->setEventType(evntRecPtr.p->m_request.getEventType());
+ ret->setRequestType(evntRecPtr.p->m_request.getRequestType());
+
+ ret->setErrorCode(evntRecPtr.p->m_errorCode);
+ ret->setErrorLine(evntRecPtr.p->m_errorLine);
+ ret->setErrorNode(evntRecPtr.p->m_errorNode);
+
+ signalLength = CreateEvntRef::SignalLength;
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT sending GSN_CREATE_EVNT_REF to evntRecPtr.i = (%d) node = %u ref = %u", evntRecPtr.i, refToNode(senderRef), senderRef);
+ ndbout_c("errorCode = %u", evntRecPtr.p->m_errorCode);
+ ndbout_c("errorLine = %u", evntRecPtr.p->m_errorLine);
+#endif
+ gsn = GSN_CREATE_EVNT_REF;
+
+ } else {
+ jam();
+ EVENT_TRACE;
+ CreateEvntConf * evntConf = (CreateEvntConf *)signal->getDataPtrSend();
+
+ evntConf->setEventId(evntRecPtr.p->m_request.getEventId());
+ evntConf->setEventKey(evntRecPtr.p->m_request.getEventKey());
+ evntConf->setUserData(evntRecPtr.p->m_request.getUserData());
+ evntConf->senderRef = reference();
+ evntConf->setTableId(evntRecPtr.p->m_request.getTableId());
+ evntConf->setAttrListBitmask(evntRecPtr.p->m_request.getAttrListBitmask());
+ evntConf->setEventType(evntRecPtr.p->m_request.getEventType());
+ evntConf->setRequestType(evntRecPtr.p->m_request.getRequestType());
+
+ signalLength = CreateEvntConf::SignalLength;
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT sending GSN_CREATE_EVNT_CONF to evntRecPtr.i = (%d) node = %u ref = %u", evntRecPtr.i, refToNode(senderRef), senderRef);
+#endif
+ gsn = GSN_CREATE_EVNT_CONF;
+ }
+
+ if (ptr) {
+ jam();
+ sendSignal(senderRef, gsn, signal, signalLength, JBB, ptr, noLSP);
+ } else {
+ jam();
+ sendSignal(senderRef, gsn, signal, signalLength, JBB);
+ }
+
+ c_opCreateEvent.release(evntRecPtr);
+}
+
+/*************************************************************/
+
+/********************************************************************
+ *
+ * Start event
+ *
+ *******************************************************************/
+
+void Dbdict::execSUB_START_REQ(Signal* signal)
+{
+ jamEntry();
+
+ Uint32 origSenderRef = signal->senderBlockRef();
+
+ OpSubEventPtr subbPtr;
+ if (!c_opSubEvent.seize(subbPtr)) {
+ SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend();
+ { // fix
+ Uint32 subcriberRef = ((SubStartReq*)signal->getDataPtr())->subscriberRef;
+ ref->subscriberRef = subcriberRef;
+ }
+ jam();
+ // ret->setErrorCode(SubStartRef::SeizeError);
+ // ret->setErrorLine(__LINE__);
+ // ret->setErrorNode(reference());
+ ref->senderRef = reference();
+ ref->setTemporary(SubStartRef::Busy);
+
+ sendSignal(origSenderRef, GSN_SUB_START_REF, signal,
+ SubStartRef::SignalLength2, JBB);
+ return;
+ }
+
+ {
+ const SubStartReq* req = (SubStartReq*) signal->getDataPtr();
+ subbPtr.p->m_senderRef = req->senderRef;
+ subbPtr.p->m_senderData = req->senderData;
+ subbPtr.p->m_errorCode = 0;
+ }
+
+ if (refToBlock(origSenderRef) != DBDICT) {
+ /*
+ * Coordinator
+ */
+ jam();
+
+ subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ RequestTracker & p = subbPtr.p->m_reqTracker;
+ p.init<SubStartRef>(c_counterMgr, rg, GSN_SUB_START_REF, subbPtr.i);
+
+ SubStartReq* req = (SubStartReq*) signal->getDataPtrSend();
+
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("DBDICT(Coordinator) sending GSN_SUB_START_REQ to DBDICT participants subbPtr.i = (%d)", subbPtr.i);
+#endif
+
+ sendSignal(rg, GSN_SUB_START_REQ, signal, SubStartReq::SignalLength2, JBB);
+ return;
+ }
+ /*
+ * Participant
+ */
+ ndbrequire(refToBlock(origSenderRef) == DBDICT);
+
+ {
+ SubStartReq* req = (SubStartReq*) signal->getDataPtrSend();
+
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("DBDICT(Participant) sending GSN_SUB_START_REQ to SUMA subbPtr.i = (%d)", subbPtr.i);
+#endif
+ sendSignal(SUMA_REF, GSN_SUB_START_REQ, signal, SubStartReq::SignalLength2, JBB);
+ }
+}
+
+void Dbdict::execSUB_START_REF(Signal* signal)
+{
+ jamEntry();
+
+ const SubStartRef* ref = (SubStartRef*) signal->getDataPtr();
+ Uint32 senderRef = ref->senderRef;
+
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, ref->senderData);
+
+ if (refToBlock(senderRef) == SUMA) {
+ /*
+ * Participant
+ */
+ jam();
+
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("DBDICT(Participant) got GSN_SUB_START_REF = (%d)", subbPtr.i);
+#endif
+
+ if (ref->isTemporary()){
+ jam();
+ SubStartReq* req = (SubStartReq*)signal->getDataPtrSend();
+ { // fix
+ Uint32 subscriberRef = ref->subscriberRef;
+ req->subscriberRef = subscriberRef;
+ }
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+ sendSignal(SUMA_REF, GSN_SUB_START_REQ,
+ signal, SubStartReq::SignalLength2, JBB);
+ } else {
+ jam();
+
+ SubStartRef* ref = (SubStartRef*) signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF,
+ signal, SubStartRef::SignalLength2, JBB);
+ c_opSubEvent.release(subbPtr);
+ }
+ return;
+ }
+ /*
+ * Coordinator
+ */
+ ndbrequire(refToBlock(senderRef) == DBDICT);
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("DBDICT(Coordinator) got GSN_SUB_START_REF = (%d)", subbPtr.i);
+#endif
+ if (ref->errorCode == SubStartRef::NF_FakeErrorREF){
+ jam();
+ subbPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef));
+ } else {
+ jam();
+ subbPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef));
+ }
+ completeSubStartReq(signal,subbPtr.i,0);
+}
+
+void Dbdict::execSUB_START_CONF(Signal* signal)
+{
+ jamEntry();
+
+ const SubStartConf* conf = (SubStartConf*) signal->getDataPtr();
+ Uint32 senderRef = conf->senderRef;
+
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, conf->senderData);
+
+ if (refToBlock(senderRef) == SUMA) {
+ /*
+ * Participant
+ */
+ jam();
+ SubStartConf* conf = (SubStartConf*) signal->getDataPtrSend();
+
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("DBDICT(Participant) got GSN_SUB_START_CONF = (%d)", subbPtr.i);
+#endif
+
+ conf->senderRef = reference();
+ conf->senderData = subbPtr.p->m_senderData;
+
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_CONF,
+ signal, SubStartConf::SignalLength2, JBB);
+ c_opSubEvent.release(subbPtr);
+ return;
+ }
+ /*
+ * Coordinator
+ */
+ ndbrequire(refToBlock(senderRef) == DBDICT);
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("DBDICT(Coordinator) got GSN_SUB_START_CONF = (%d)", subbPtr.i);
+#endif
+ subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef));
+ completeSubStartReq(signal,subbPtr.i,0);
+}
+
+/*
+ * Coordinator
+ */
+void Dbdict::completeSubStartReq(Signal* signal,
+ Uint32 ptrI,
+ Uint32 returnCode){
+ jam();
+
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, ptrI);
+
+ if (!subbPtr.p->m_reqTracker.done()){
+ jam();
+ return;
+ }
+
+ if (subbPtr.p->m_reqTracker.hasRef()) {
+ jam();
+#ifdef EVENT_DEBUG
+ ndbout_c("SUB_START_REF");
+#endif
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF,
+ signal, SubStartRef::SignalLength, JBB);
+ if (subbPtr.p->m_reqTracker.hasConf()) {
+ // stopStartedNodes(signal);
+ }
+ c_opSubEvent.release(subbPtr);
+ return;
+ }
+#ifdef EVENT_DEBUG
+ ndbout_c("SUB_START_CONF");
+#endif
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_CONF,
+ signal, SubStartConf::SignalLength, JBB);
+ c_opSubEvent.release(subbPtr);
+}
+
+/********************************************************************
+ *
+ * Stop event
+ *
+ *******************************************************************/
+
+void Dbdict::execSUB_STOP_REQ(Signal* signal)
+{
+ jamEntry();
+
+ Uint32 origSenderRef = signal->senderBlockRef();
+
+ OpSubEventPtr subbPtr;
+ if (!c_opSubEvent.seize(subbPtr)) {
+ SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend();
+ jam();
+ // ret->setErrorCode(SubStartRef::SeizeError);
+ // ret->setErrorLine(__LINE__);
+ // ret->setErrorNode(reference());
+ ref->senderRef = reference();
+ ref->setTemporary(SubStopRef::Busy);
+
+ sendSignal(origSenderRef, GSN_SUB_STOP_REF, signal,
+ SubStopRef::SignalLength, JBB);
+ return;
+ }
+
+ {
+ const SubStopReq* req = (SubStopReq*) signal->getDataPtr();
+ subbPtr.p->m_senderRef = req->senderRef;
+ subbPtr.p->m_senderData = req->senderData;
+ subbPtr.p->m_errorCode = 0;
+ }
+
+ if (refToBlock(origSenderRef) != DBDICT) {
+ /*
+ * Coordinator
+ */
+ jam();
+#ifdef EVENT_DEBUG
+ ndbout_c("SUB_STOP_REQ 1");
+#endif
+ subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ RequestTracker & p = subbPtr.p->m_reqTracker;
+ p.init<SubStopRef>(c_counterMgr, rg, GSN_SUB_STOP_REF, subbPtr.i);
+
+ SubStopReq* req = (SubStopReq*) signal->getDataPtrSend();
+
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+
+ sendSignal(rg, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
+ return;
+ }
+ /*
+ * Participant
+ */
+#ifdef EVENT_DEBUG
+ ndbout_c("SUB_STOP_REQ 2");
+#endif
+ ndbrequire(refToBlock(origSenderRef) == DBDICT);
+ {
+ SubStopReq* req = (SubStopReq*) signal->getDataPtrSend();
+
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+
+ sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
+ }
+}
+
+void Dbdict::execSUB_STOP_REF(Signal* signal)
+{
+ jamEntry();
+ const SubStopRef* ref = (SubStopRef*) signal->getDataPtr();
+ Uint32 senderRef = ref->senderRef;
+
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, ref->senderData);
+
+ if (refToBlock(senderRef) == SUMA) {
+ /*
+ * Participant
+ */
+ jam();
+ if (ref->isTemporary()){
+ jam();
+ SubStopReq* req = (SubStopReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+ sendSignal(SUMA_REF, GSN_SUB_STOP_REQ,
+ signal, SubStopReq::SignalLength, JBB);
+ } else {
+ jam();
+ SubStopRef* ref = (SubStopRef*) signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF,
+ signal, SubStopRef::SignalLength, JBB);
+ c_opSubEvent.release(subbPtr);
+ }
+ return;
+ }
+ /*
+ * Coordinator
+ */
+ ndbrequire(refToBlock(senderRef) == DBDICT);
+ if (ref->errorCode == SubStopRef::NF_FakeErrorREF){
+ jam();
+ subbPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef));
+ } else {
+ jam();
+ subbPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef));
+ }
+ completeSubStopReq(signal,subbPtr.i,0);
+}
+
+void Dbdict::execSUB_STOP_CONF(Signal* signal)
+{
+ jamEntry();
+
+ const SubStopConf* conf = (SubStopConf*) signal->getDataPtr();
+ Uint32 senderRef = conf->senderRef;
+
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, conf->senderData);
+
+ if (refToBlock(senderRef) == SUMA) {
+ /*
+ * Participant
+ */
+ jam();
+ SubStopConf* conf = (SubStopConf*) signal->getDataPtrSend();
+
+ conf->senderRef = reference();
+ conf->senderData = subbPtr.p->m_senderData;
+
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_CONF,
+ signal, SubStopConf::SignalLength, JBB);
+ c_opSubEvent.release(subbPtr);
+ return;
+ }
+ /*
+ * Coordinator
+ */
+ ndbrequire(refToBlock(senderRef) == DBDICT);
+ subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef));
+ completeSubStopReq(signal,subbPtr.i,0);
+}
+
+/*
+ * Coordinator
+ */
+void Dbdict::completeSubStopReq(Signal* signal,
+ Uint32 ptrI,
+ Uint32 returnCode){
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, ptrI);
+
+ if (!subbPtr.p->m_reqTracker.done()){
+ jam();
+ return;
+ }
+
+ if (subbPtr.p->m_reqTracker.hasRef()) {
+ jam();
+#ifdef EVENT_DEBUG
+ ndbout_c("SUB_STOP_REF");
+#endif
+ SubStopRef* ref = (SubStopRef*)signal->getDataPtrSend();
+
+ ref->senderRef = reference();
+ ref->senderData = subbPtr.p->m_senderData;
+ /*
+ ref->subscriptionId = subbPtr.p->m_senderData;
+ ref->subscriptionKey = subbPtr.p->m_senderData;
+ ref->part = subbPtr.p->m_part; // SubscriptionData::Part
+ ref->subscriberData = subbPtr.p->m_subscriberData;
+ ref->subscriberRef = subbPtr.p->m_subscriberRef;
+ */
+ ref->errorCode = subbPtr.p->m_errorCode;
+
+
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF,
+ signal, SubStopRef::SignalLength, JBB);
+ if (subbPtr.p->m_reqTracker.hasConf()) {
+ // stopStartedNodes(signal);
+ }
+ c_opSubEvent.release(subbPtr);
+ return;
+ }
+#ifdef EVENT_DEBUG
+ ndbout_c("SUB_STOP_CONF");
+#endif
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_CONF,
+ signal, SubStopConf::SignalLength, JBB);
+ c_opSubEvent.release(subbPtr);
+}
+
+/***************************************************************
+ * MODULE: Drop event.
+ *
+ * Drop event.
+ *
+ * TODO
+ */
+
+void
+Dbdict::execDROP_EVNT_REQ(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+
+ DropEvntReq *req = (DropEvntReq*)signal->getDataPtr();
+ const Uint32 senderRef = signal->senderBlockRef();
+ OpDropEventPtr evntRecPtr;
+
+ // Seize a Create Event record
+ if (!c_opDropEvent.seize(evntRecPtr)) {
+ // Failed to allocate event record
+ jam();
+ releaseSections(signal);
+
+ DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend();
+ ret->setErrorCode(DropEvntRef::SeizeError);
+ ret->setErrorLine(__LINE__);
+ ret->setErrorNode(reference());
+ sendSignal(senderRef, GSN_DROP_EVNT_REF, signal,
+ DropEvntRef::SignalLength, JBB);
+ return;
+ }
+
+#ifdef EVENT_DEBUG
+ ndbout_c("DBDICT::execDROP_EVNT_REQ evntRecId = (%d)", evntRecPtr.i);
+#endif
+
+ OpDropEvent* evntRec = evntRecPtr.p;
+ evntRec->init(req);
+
+ SegmentedSectionPtr ssPtr;
+
+ signal->getSection(ssPtr, 0);
+
+ SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
+#ifdef EVENT_DEBUG
+ r0.printAll(ndbout);
+#endif
+ // event name
+ if ((!r0.first()) ||
+ (r0.getValueType() != SimpleProperties::StringValue) ||
+ (r0.getValueLen() <= 0)) {
+ jam();
+ releaseSections(signal);
+
+ evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ dropEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+ r0.getString(evntRecPtr.p->m_eventRec.NAME);
+ {
+ int len = strlen(evntRecPtr.p->m_eventRec.NAME);
+ memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
+#ifdef EVENT_DEBUG
+ printf("DropEvntReq; EventName %s, len %u\n",
+ evntRecPtr.p->m_eventRec.NAME, len);
+ for(int i = 0; i < MAX_TAB_NAME_SIZE/4; i++)
+ printf("H'%.8x ", ((Uint32*)evntRecPtr.p->m_eventRec.NAME)[i]);
+ printf("\n");
+#endif
+ }
+
+ releaseSections(signal);
+
+ Callback c = { safe_cast(&Dbdict::dropEventUTIL_PREPARE_READ), 0 };
+
+ prepareTransactionEventSysTable(&c, signal, evntRecPtr.i,
+ UtilPrepareReq::Read);
+}
+
+void
+Dbdict::dropEventUTIL_PREPARE_READ(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ if (returnCode != 0) {
+ EVENT_TRACE;
+ dropEventUtilPrepareRef(signal, callbackData, returnCode);
+ return;
+ }
+
+ UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr();
+ OpDropEventPtr evntRecPtr;
+ evntRecPtr.i = req->getSenderData();
+ const Uint32 prepareId = req->getPrepareId();
+
+ ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ Callback c = { safe_cast(&Dbdict::dropEventUTIL_EXECUTE_READ), 0 };
+
+ executeTransEventSysTable(&c, signal,
+ evntRecPtr.i, evntRecPtr.p->m_eventRec,
+ prepareId, UtilPrepareReq::Read);
+}
+
+void
+Dbdict::dropEventUTIL_EXECUTE_READ(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ if (returnCode != 0) {
+ EVENT_TRACE;
+ dropEventUtilExecuteRef(signal, callbackData, returnCode);
+ return;
+ }
+
+ OpDropEventPtr evntRecPtr;
+ UtilExecuteConf * const ref = (UtilExecuteConf *)signal->getDataPtr();
+ jam();
+ evntRecPtr.i = ref->getSenderData();
+ ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ parseReadEventSys(signal, evntRecPtr.p->m_eventRec);
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ RequestTracker & p = evntRecPtr.p->m_reqTracker;
+ p.init<SubRemoveRef>(c_counterMgr, rg, GSN_SUB_REMOVE_REF,
+ evntRecPtr.i);
+
+ SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend();
+
+ req->senderRef = reference();
+ req->senderData = evntRecPtr.i;
+ req->subscriptionId = evntRecPtr.p->m_eventRec.SUBID;
+ req->subscriptionKey = evntRecPtr.p->m_eventRec.SUBKEY;
+
+ sendSignal(rg, GSN_SUB_REMOVE_REQ, signal, SubRemoveReq::SignalLength, JBB);
+}
+
+/*
+ * Participant
+ */
+
+void
+Dbdict::execSUB_REMOVE_REQ(Signal* signal)
+{
+ jamEntry();
+
+ Uint32 origSenderRef = signal->senderBlockRef();
+
+ OpSubEventPtr subbPtr;
+ if (!c_opSubEvent.seize(subbPtr)) {
+ SubRemoveRef * ref = (SubRemoveRef *)signal->getDataPtrSend();
+ jam();
+ ref->senderRef = reference();
+ ref->setTemporary(SubRemoveRef::Busy);
+
+ sendSignal(origSenderRef, GSN_SUB_REMOVE_REF, signal,
+ SubRemoveRef::SignalLength, JBB);
+ return;
+ }
+
+ {
+ const SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtr();
+ subbPtr.p->m_senderRef = req->senderRef;
+ subbPtr.p->m_senderData = req->senderData;
+ subbPtr.p->m_errorCode = 0;
+ }
+
+ SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+
+ sendSignal(SUMA_REF, GSN_SUB_REMOVE_REQ, signal, SubRemoveReq::SignalLength, JBB);
+}
+
+/*
+ * Coordintor/Participant
+ */
+
+void
+Dbdict::execSUB_REMOVE_REF(Signal* signal)
+{
+ jamEntry();
+ const SubRemoveRef* ref = (SubRemoveRef*) signal->getDataPtr();
+ Uint32 senderRef = ref->senderRef;
+
+ if (refToBlock(senderRef) == SUMA) {
+ /*
+ * Participant
+ */
+ jam();
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, ref->senderData);
+ if (ref->errorCode == (Uint32) GrepError::SUBSCRIPTION_ID_NOT_FOUND) {
+ // conf this since this may occur if a nodefailiure has occured
+ // earlier so that the systable was not cleared
+ SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_CONF,
+ signal, SubRemoveConf::SignalLength, JBB);
+ } else {
+ SubRemoveRef* ref = (SubRemoveRef*) signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_REF,
+ signal, SubRemoveRef::SignalLength, JBB);
+ }
+ c_opSubEvent.release(subbPtr);
+ return;
+ }
+ /*
+ * Coordinator
+ */
+ ndbrequire(refToBlock(senderRef) == DBDICT);
+ OpDropEventPtr eventRecPtr;
+ c_opDropEvent.getPtr(eventRecPtr, ref->senderData);
+ if (ref->errorCode == SubRemoveRef::NF_FakeErrorREF){
+ jam();
+ eventRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef));
+ } else {
+ jam();
+ eventRecPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef));
+ }
+ completeSubRemoveReq(signal,eventRecPtr.i,0);
+}
+
+void
+Dbdict::execSUB_REMOVE_CONF(Signal* signal)
+{
+ jamEntry();
+ const SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtr();
+ Uint32 senderRef = conf->senderRef;
+
+ if (refToBlock(senderRef) == SUMA) {
+ /*
+ * Participant
+ */
+ jam();
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, conf->senderData);
+ SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_CONF,
+ signal, SubRemoveConf::SignalLength, JBB);
+ c_opSubEvent.release(subbPtr);
+ return;
+ }
+ /*
+ * Coordinator
+ */
+ ndbrequire(refToBlock(senderRef) == DBDICT);
+ OpDropEventPtr eventRecPtr;
+ c_opDropEvent.getPtr(eventRecPtr, conf->senderData);
+ eventRecPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef));
+ completeSubRemoveReq(signal,eventRecPtr.i,0);
+}
+
+void
+Dbdict::completeSubRemoveReq(Signal* signal, Uint32 ptrI, Uint32 xxx)
+{
+ OpDropEventPtr evntRecPtr;
+ c_opDropEvent.getPtr(evntRecPtr, ptrI);
+
+ if (!evntRecPtr.p->m_reqTracker.done()){
+ jam();
+ return;
+ }
+
+ if (evntRecPtr.p->m_reqTracker.hasRef()) {
+ jam();
+ evntRecPtr.p->m_errorNode = reference();
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
+ dropEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+
+ Callback c = { safe_cast(&Dbdict::dropEventUTIL_PREPARE_DELETE), 0 };
+
+ prepareTransactionEventSysTable(&c, signal, evntRecPtr.i,
+ UtilPrepareReq::Delete);
+}
+
+void
+Dbdict::dropEventUTIL_PREPARE_DELETE(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ if (returnCode != 0) {
+ EVENT_TRACE;
+ dropEventUtilPrepareRef(signal, callbackData, returnCode);
+ return;
+ }
+
+ UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr();
+ OpDropEventPtr evntRecPtr;
+ jam();
+ evntRecPtr.i = req->getSenderData();
+ const Uint32 prepareId = req->getPrepareId();
+
+ ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
+#ifdef EVENT_DEBUG
+ printf("DropEvntUTIL_PREPARE; evntRecPtr.i len %u\n",evntRecPtr.i);
+#endif
+
+ Callback c = { safe_cast(&Dbdict::dropEventUTIL_EXECUTE_DELETE), 0 };
+
+ executeTransEventSysTable(&c, signal,
+ evntRecPtr.i, evntRecPtr.p->m_eventRec,
+ prepareId, UtilPrepareReq::Delete);
+}
+
+void
+Dbdict::dropEventUTIL_EXECUTE_DELETE(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ if (returnCode != 0) {
+ EVENT_TRACE;
+ dropEventUtilExecuteRef(signal, callbackData, returnCode);
+ return;
+ }
+
+ OpDropEventPtr evntRecPtr;
+ UtilExecuteConf * const ref = (UtilExecuteConf *)signal->getDataPtr();
+ jam();
+ evntRecPtr.i = ref->getSenderData();
+ ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ dropEvent_sendReply(signal, evntRecPtr);
+}
+
+void
+Dbdict::dropEventUtilPrepareRef(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ UtilPrepareRef * const ref = (UtilPrepareRef *)signal->getDataPtr();
+ OpDropEventPtr evntRecPtr;
+ evntRecPtr.i = ref->getSenderData();
+ ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ bool temporary = false;
+ interpretUtilPrepareErrorCode((UtilPrepareRef::ErrorCode)ref->getErrorCode(),
+ temporary, evntRecPtr.p->m_errorLine);
+ if (temporary) {
+ evntRecPtr.p->m_errorCode = (DropEvntRef::ErrorCode)
+ ((Uint32) DropEvntRef::Undefined | (Uint32) DropEvntRef::Temporary);
+ }
+
+ if (evntRecPtr.p->m_errorCode == 0) {
+ evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ }
+ evntRecPtr.p->m_errorNode = reference();
+
+ dropEvent_sendReply(signal, evntRecPtr);
+}
+
+void
+Dbdict::dropEventUtilExecuteRef(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ OpDropEventPtr evntRecPtr;
+ UtilExecuteRef * const ref = (UtilExecuteRef *)signal->getDataPtr();
+ jam();
+ evntRecPtr.i = ref->getSenderData();
+ ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ evntRecPtr.p->m_errorNode = reference();
+ evntRecPtr.p->m_errorLine = __LINE__;
+
+ switch (ref->getErrorCode()) {
+ case UtilExecuteRef::TCError:
+ switch (ref->getTCErrorCode()) {
+ case ZNOT_FOUND:
+ jam();
+ evntRecPtr.p->m_errorCode = DropEvntRef::EventNotFound;
+ break;
+ default:
+ jam();
+ evntRecPtr.p->m_errorCode = DropEvntRef::UndefinedTCError;
+ break;
+ }
+ break;
+ default:
+ jam();
+ evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
+ break;
+ }
+ dropEvent_sendReply(signal, evntRecPtr);
+}
+
+void Dbdict::dropEvent_sendReply(Signal* signal,
+ OpDropEventPtr evntRecPtr)
+{
+ jam();
+ EVENT_TRACE;
+ Uint32 senderRef = evntRecPtr.p->m_request.getUserRef();
+
+ if (evntRecPtr.p->hasError()) {
+ jam();
+ DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend();
+
+ ret->setUserData(evntRecPtr.p->m_request.getUserData());
+ ret->setUserRef(evntRecPtr.p->m_request.getUserRef());
+
+ ret->setErrorCode(evntRecPtr.p->m_errorCode);
+ ret->setErrorLine(evntRecPtr.p->m_errorLine);
+ ret->setErrorNode(evntRecPtr.p->m_errorNode);
+
+ sendSignal(senderRef, GSN_DROP_EVNT_REF, signal,
+ DropEvntRef::SignalLength, JBB);
+ } else {
+ jam();
+ DropEvntConf * evntConf = (DropEvntConf *)signal->getDataPtrSend();
+
+ evntConf->setUserData(evntRecPtr.p->m_request.getUserData());
+ evntConf->setUserRef(evntRecPtr.p->m_request.getUserRef());
+
+ sendSignal(senderRef, GSN_DROP_EVNT_CONF, signal,
+ DropEvntConf::SignalLength, JBB);
+ }
+
+ c_opDropEvent.release(evntRecPtr);
+}
+
+/**
+ * MODULE: Alter index
+ *
+ * Alter index state. Alter online creates the index in each TC and
+ * then invokes create trigger and alter trigger protocols to activate
+ * the 3 triggers. Alter offline does the opposite.
+ *
+ * Request type received in REQ and returned in CONF/REF:
+ *
+ * RT_USER - from API to DICT master
+ * RT_CREATE_INDEX - part of create index operation
+ * RT_DROP_INDEX - part of drop index operation
+ * RT_NODERESTART - node restart, activate locally only
+ * RT_SYSTEMRESTART - system restart, activate and build if not logged
+ * RT_DICT_PREPARE - prepare participants
+ * RT_DICT_TC - to local TC via each participant
+ * RT_DICT_COMMIT - commit in each participant
+ */
+
+void
+Dbdict::execALTER_INDX_REQ(Signal* signal)
+{
+ jamEntry();
+ AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
+ OpAlterIndexPtr opPtr;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const AlterIndxReq::RequestType requestType = req->getRequestType();
+ if (requestType == AlterIndxReq::RT_USER ||
+ requestType == AlterIndxReq::RT_CREATE_INDEX ||
+ requestType == AlterIndxReq::RT_DROP_INDEX ||
+ requestType == AlterIndxReq::RT_NODERESTART ||
+ requestType == AlterIndxReq::RT_SYSTEMRESTART) {
+ jam();
+ const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL;
+ NdbNodeBitmask receiverNodes = c_aliveNodes;
+ if (isLocal) {
+ receiverNodes.clear();
+ receiverNodes.set(getOwnNodeId());
+ }
+ if (signal->getLength() == AlterIndxReq::SignalLength) {
+ jam();
+ if (! isLocal && getOwnNodeId() != c_masterNodeId) {
+ jam();
+
+ releaseSections(signal);
+ OpAlterIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = AlterIndxRef::NotMaster;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ alterIndex_sendReply(signal, opPtr, true);
+ return;
+ }
+ // forward initial request plus operation key to all
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, receiverNodes);
+ sendSignal(rg, GSN_ALTER_INDX_REQ,
+ signal, AlterIndxReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == AlterIndxReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpAlterIndex opBusy;
+ if (! c_opAlterIndex.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = AlterIndxRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ alterIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opAlterIndex.add(opPtr);
+ // master expects to hear from all
+ if (opPtr.p->m_isMaster)
+ opPtr.p->m_signalCounter = receiverNodes;
+ // check request in all participants
+ alterIndex_slavePrepare(signal, opPtr);
+ alterIndex_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opAlterIndex.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == AlterIndxReq::RT_DICT_TC) {
+ jam();
+ if (opPtr.p->m_request.getOnline())
+ alterIndex_toCreateTc(signal, opPtr);
+ else
+ alterIndex_toDropTc(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_DICT_COMMIT ||
+ requestType == AlterIndxReq::RT_DICT_ABORT) {
+ jam();
+ if (requestType == AlterIndxReq::RT_DICT_COMMIT)
+ alterIndex_slaveCommit(signal, opPtr);
+ else
+ alterIndex_slaveAbort(signal, opPtr);
+ alterIndex_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opAlterIndex.release(opPtr);
+ return;
+ }
+ }
+ jam();
+ // return to sender
+ OpAlterIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = AlterIndxRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ alterIndex_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::execALTER_INDX_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+ AlterIndxConf* conf = (AlterIndxConf*)signal->getDataPtrSend();
+ alterIndex_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execALTER_INDX_REF(Signal* signal)
+{
+ jamEntry();
+ AlterIndxRef* ref = (AlterIndxRef*)signal->getDataPtrSend();
+ alterIndex_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::alterIndex_recvReply(Signal* signal, const AlterIndxConf* conf,
+ const AlterIndxRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const AlterIndxReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == AlterIndxReq::RT_CREATE_INDEX) {
+ jam();
+ // part of create index operation
+ OpCreateIndexPtr opPtr;
+ c_opCreateIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ createIndex_fromAlterIndex(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_DROP_INDEX) {
+ jam();
+ // part of drop index operation
+ OpDropIndexPtr opPtr;
+ c_opDropIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ dropIndex_fromAlterIndex(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_TC ||
+ requestType == AlterIndxReq::RT_TUX) {
+ jam();
+ // part of build index operation
+ OpBuildIndexPtr opPtr;
+ c_opBuildIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ buildIndex_fromOnline(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_NODERESTART) {
+ jam();
+ if (ref == 0) {
+ infoEvent("DICT: index %u activated", (unsigned)key);
+ } else {
+ warningEvent("DICT: index %u activation failed: code=%d line=%d",
+ (unsigned)key,
+ ref->getErrorCode(), ref->getErrorLine());
+ }
+ activateIndexes(signal, key + 1);
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_SYSTEMRESTART) {
+ jam();
+ if (ref == 0) {
+ infoEvent("DICT: index %u activated done", (unsigned)key);
+ } else {
+ warningEvent("DICT: index %u activated failed: code=%d line=%d node=%d",
+ (unsigned)key,
+ ref->getErrorCode(), ref->getErrorLine(), ref->getErrorNode());
+ }
+ activateIndexes(signal, key + 1);
+ return;
+ }
+ OpAlterIndexPtr opPtr;
+ c_opAlterIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ opPtr.p->setError(ref);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_DICT_COMMIT ||
+ requestType == AlterIndxReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ alterIndex_sendReply(signal, opPtr, true);
+ c_opAlterIndex.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_ABORT;
+ alterIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ if (indexPtr.p->isHashIndex()) {
+ if (requestType == AlterIndxReq::RT_DICT_PREPARE) {
+ jam();
+ if (opPtr.p->m_request.getOnline()) {
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_TC;
+ alterIndex_sendSlaveReq(signal, opPtr);
+ } else {
+ // start drop triggers
+ alterIndex_toDropTrigger(signal, opPtr);
+ }
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_DICT_TC) {
+ jam();
+ if (opPtr.p->m_request.getOnline()) {
+ // start create triggers
+ alterIndex_toCreateTrigger(signal, opPtr);
+ } else {
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_COMMIT;
+ alterIndex_sendSlaveReq(signal, opPtr);
+ }
+ return;
+ }
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ if (requestType == AlterIndxReq::RT_DICT_PREPARE) {
+ jam();
+ if (opPtr.p->m_request.getOnline()) {
+ // start create triggers
+ alterIndex_toCreateTrigger(signal, opPtr);
+ } else {
+ // start drop triggers
+ alterIndex_toDropTrigger(signal, opPtr);
+ }
+ return;
+ }
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::alterIndex_slavePrepare(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ const AlterIndxReq* const req = &opPtr.p->m_request;
+ if (! (req->getIndexId() < c_tableRecordPool.getSize())) {
+ jam();
+ opPtr.p->m_errorCode = AlterIndxRef::Inconsistency;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, req->getIndexId());
+ if (indexPtr.p->tabState != TableRecord::DEFINED) {
+ jam();
+ opPtr.p->m_errorCode = AlterIndxRef::IndexNotFound;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ if (! indexPtr.p->isIndex()) {
+ jam();
+ opPtr.p->m_errorCode = AlterIndxRef::NotAnIndex;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ if (req->getOnline())
+ indexPtr.p->indexState = TableRecord::IS_BUILDING;
+ else
+ indexPtr.p->indexState = TableRecord::IS_DROPPING;
+}
+
+void
+Dbdict::alterIndex_toCreateTc(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ // request to create index in local TC
+ CreateIndxReq* const req = (CreateIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(CreateIndxReq::RT_TC);
+ req->setIndexType(indexPtr.p->tableType);
+ req->setTableId(indexPtr.p->primaryTableId);
+ req->setIndexId(indexPtr.i);
+ req->setOnline(true);
+ getIndexAttrList(indexPtr, opPtr.p->m_attrList);
+ // send
+ LinearSectionPtr lsPtr[3];
+ lsPtr[0].p = (Uint32*)&opPtr.p->m_attrList;
+ lsPtr[0].sz = 1 + opPtr.p->m_attrList.sz;
+ sendSignal(calcTcBlockRef(getOwnNodeId()), GSN_CREATE_INDX_REQ,
+ signal, CreateIndxReq::SignalLength, JBB, lsPtr, 1);
+}
+
+void
+Dbdict::alterIndex_fromCreateTc(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ // mark created in local TC
+ if (! opPtr.p->hasError()) {
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ indexPtr.p->indexLocal |= TableRecord::IL_CREATED_TC;
+ }
+ // forward CONF or REF to master
+ ndbrequire(opPtr.p->m_requestType == AlterIndxReq::RT_DICT_TC);
+ alterIndex_sendReply(signal, opPtr, false);
+}
+
+void
+Dbdict::alterIndex_toDropTc(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ // broken index
+ if (! (indexPtr.p->indexLocal & TableRecord::IL_CREATED_TC)) {
+ jam();
+ alterIndex_sendReply(signal, opPtr, false);
+ return;
+ }
+ // request to drop in local TC
+ DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(DropIndxReq::RT_TC);
+ req->setTableId(indexPtr.p->primaryTableId);
+ req->setIndexId(indexPtr.i);
+ req->setIndexVersion(indexPtr.p->tableVersion);
+ // send
+ sendSignal(calcTcBlockRef(getOwnNodeId()), GSN_DROP_INDX_REQ,
+ signal, DropIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::alterIndex_fromDropTc(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ ndbrequire(opPtr.p->m_requestType == AlterIndxReq::RT_DICT_TC);
+ if (! opPtr.p->hasError()) {
+ // mark dropped in local TC
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ indexPtr.p->indexLocal &= ~TableRecord::IL_CREATED_TC;
+ }
+ // forward CONF or REF to master
+ alterIndex_sendReply(signal, opPtr, false);
+}
+
+void
+Dbdict::alterIndex_toCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ // start creation of index triggers
+ CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(CreateTrigReq::RT_ALTER_INDEX);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ req->setTableId(opPtr.p->m_request.getTableId());
+ req->setIndexId(opPtr.p->m_request.getIndexId());
+ req->setTriggerId(RNIL);
+ req->setTriggerActionTime(TriggerActionTime::TA_AFTER);
+ req->setMonitorAllAttributes(false);
+ req->setOnline(true); // alter online after create
+ req->setReceiverRef(0); // implicit for index triggers
+ getIndexAttrMask(indexPtr, req->getAttributeMask());
+ // name section
+ char triggerName[MAX_TAB_NAME_SIZE];
+ Uint32 buffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string
+ LinearWriter w(buffer, sizeof(buffer) >> 2);
+ LinearSectionPtr lsPtr[3];
+ if (indexPtr.p->isHashIndex()) {
+ req->setTriggerType(TriggerType::SECONDARY_INDEX);
+ req->setMonitorReplicas(false);
+ // insert
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
+ req->setTriggerId(indexPtr.p->insertTriggerId);
+ req->setTriggerEvent(TriggerEvent::TE_INSERT);
+ sprintf(triggerName, "NDB$INDEX_%u_INSERT", opPtr.p->m_request.getIndexId());
+ w.reset();
+ w.add(CreateTrigReq::TriggerNameKey, triggerName);
+ lsPtr[0].p = buffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(reference(), GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
+ // update
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
+ req->setTriggerId(indexPtr.p->updateTriggerId);
+ req->setTriggerEvent(TriggerEvent::TE_UPDATE);
+ sprintf(triggerName, "NDB$INDEX_%u_UPDATE", opPtr.p->m_request.getIndexId());
+ w.reset();
+ w.add(CreateTrigReq::TriggerNameKey, triggerName);
+ lsPtr[0].p = buffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(reference(), GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
+ // delete
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
+ req->setTriggerId(indexPtr.p->deleteTriggerId);
+ req->setTriggerEvent(TriggerEvent::TE_DELETE);
+ sprintf(triggerName, "NDB$INDEX_%u_DELETE", opPtr.p->m_request.getIndexId());
+ w.reset();
+ w.add(CreateTrigReq::TriggerNameKey, triggerName);
+ lsPtr[0].p = buffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(reference(), GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
+ // triggers left to create
+ opPtr.p->m_triggerCounter = 3;
+ return;
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ req->addRequestFlag(RequestFlag::RF_NOTCTRIGGER);
+ req->setTriggerType(TriggerType::ORDERED_INDEX);
+ req->setTriggerActionTime(TriggerActionTime::TA_CUSTOM);
+ req->setMonitorReplicas(true);
+ // one trigger for 5 events (insert, update, delete, commit, abort)
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
+ req->setTriggerId(indexPtr.p->customTriggerId);
+ req->setTriggerEvent(TriggerEvent::TE_CUSTOM);
+ sprintf(triggerName, "NDB$INDEX_%u_CUSTOM", opPtr.p->m_request.getIndexId());
+ w.reset();
+ w.add(CreateTrigReq::TriggerNameKey, triggerName);
+ lsPtr[0].p = buffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(reference(), GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
+ // triggers left to create
+ opPtr.p->m_triggerCounter = 1;
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::alterIndex_fromCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ ndbrequire(opPtr.p->m_triggerCounter != 0);
+ if (--opPtr.p->m_triggerCounter != 0) {
+ jam();
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_ABORT;
+ alterIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if(opPtr.p->m_requestType != AlterIndxReq::RT_SYSTEMRESTART){
+ // send build request
+ alterIndex_toBuildIndex(signal, opPtr);
+ return;
+ }
+
+ /**
+ * During system restart,
+ * leave index in activated but not build state.
+ *
+ * Build a bit later when REDO has been run
+ */
+ alterIndex_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::alterIndex_toDropTrigger(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ // start drop of index triggers
+ DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(DropTrigReq::RT_ALTER_INDEX);
+ req->setTableId(opPtr.p->m_request.getTableId());
+ req->setIndexId(opPtr.p->m_request.getIndexId());
+ req->setTriggerInfo(0); // not used
+ opPtr.p->m_triggerCounter = 0;
+ // insert
+ if (indexPtr.p->insertTriggerId != RNIL) {
+ req->setTriggerId(indexPtr.p->insertTriggerId);
+ sendSignal(reference(), GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+ opPtr.p->m_triggerCounter++;
+ }
+ // update
+ if (indexPtr.p->updateTriggerId != RNIL) {
+ req->setTriggerId(indexPtr.p->updateTriggerId);
+ sendSignal(reference(), GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+ opPtr.p->m_triggerCounter++;
+ }
+ // delete
+ if (indexPtr.p->deleteTriggerId != RNIL) {
+ req->setTriggerId(indexPtr.p->deleteTriggerId);
+ sendSignal(reference(), GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+ opPtr.p->m_triggerCounter++;
+ }
+ // custom
+ if (indexPtr.p->customTriggerId != RNIL) {
+ req->setTriggerId(indexPtr.p->customTriggerId);
+ sendSignal(reference(), GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+ opPtr.p->m_triggerCounter++;
+ }
+ // build
+ if (indexPtr.p->buildTriggerId != RNIL) {
+ req->setTriggerId(indexPtr.p->buildTriggerId);
+ sendSignal(reference(), GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+ opPtr.p->m_triggerCounter++;
+ }
+ if (opPtr.p->m_triggerCounter == 0) {
+ // drop in each TC
+ jam();
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_TC;
+ alterIndex_sendSlaveReq(signal, opPtr);
+ }
+}
+
+void
+Dbdict::alterIndex_fromDropTrigger(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ ndbrequire(opPtr.p->m_triggerCounter != 0);
+ if (--opPtr.p->m_triggerCounter != 0) {
+ jam();
+ return;
+ }
+ // finally drop index in each TC
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ const bool isHashIndex = indexPtr.p->isHashIndex();
+ const bool isOrderedIndex = indexPtr.p->isOrderedIndex();
+ ndbrequire(isHashIndex != isOrderedIndex); // xor
+ if (isHashIndex)
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_TC;
+ if (isOrderedIndex)
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_COMMIT;
+ alterIndex_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::alterIndex_toBuildIndex(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ // get index and table records
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
+ // build request to self (short signal)
+ BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(BuildIndxReq::RT_ALTER_INDEX);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ req->setBuildId(0); // not used
+ req->setBuildKey(0); // not used
+ req->setIndexType(indexPtr.p->tableType);
+ req->setIndexId(indexPtr.i);
+ req->setTableId(indexPtr.p->primaryTableId);
+ req->setParallelism(16);
+ // send
+ sendSignal(reference(), GSN_BUILDINDXREQ,
+ signal, BuildIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::alterIndex_fromBuildIndex(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_ABORT;
+ alterIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_COMMIT;
+ alterIndex_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::alterIndex_slaveCommit(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ // get index record
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ indexPtr.p->indexState = TableRecord::IS_ONLINE;
+}
+
+void
+Dbdict::alterIndex_slaveAbort(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ // find index record
+ const Uint32 indexId = opPtr.p->m_request.getIndexId();
+ if (indexId >= c_tableRecordPool.getSize())
+ return;
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, indexId);
+ if (! indexPtr.p->isIndex())
+ return;
+ // mark broken
+ indexPtr.p->indexState = TableRecord::IS_BROKEN;
+}
+
+void
+Dbdict::alterIndex_sendSlaveReq(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ NdbNodeBitmask receiverNodes = c_aliveNodes;
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) {
+ receiverNodes.clear();
+ receiverNodes.set(getOwnNodeId());
+ }
+ opPtr.p->m_signalCounter = receiverNodes;
+ NodeReceiverGroup rg(DBDICT, receiverNodes);
+ sendSignal(rg, GSN_ALTER_INDX_REQ,
+ signal, AlterIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::alterIndex_sendReply(Signal* signal, OpAlterIndexPtr opPtr,
+ bool toUser)
+{
+ AlterIndxRef* rep = (AlterIndxRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_ALTER_INDX_CONF;
+ Uint32 length = AlterIndxConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == AlterIndxReq::RT_DICT_ABORT)
+ sendRef = false;
+ } else {
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = AlterIndxConf::SignalLength;
+ }
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setIndexId(opPtr.p->m_request.getIndexId());
+ if (sendRef) {
+ if (opPtr.p->m_errorNode == 0)
+ opPtr.p->m_errorNode = getOwnNodeId();
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->setErrorLine(opPtr.p->m_errorLine);
+ rep->setErrorNode(opPtr.p->m_errorNode);
+ gsn = GSN_ALTER_INDX_REF;
+ length = AlterIndxRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/**
+ * MODULE: Build index
+ *
+ * Build index or all indexes on a table. Request type:
+ *
+ * RT_USER - normal user request, not yet used
+ * RT_ALTER_INDEX - from alter index
+ * RT_SYSTEM_RESTART -
+ * RT_DICT_PREPARE - prepare participants
+ * RT_DICT_TRIX - to participant on way to local TRIX
+ * RT_DICT_COMMIT - commit in each participant
+ * RT_DICT_ABORT - abort
+ * RT_TRIX - to local TRIX
+ */
+
+void
+Dbdict::execBUILDINDXREQ(Signal* signal)
+{
+ jamEntry();
+ BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
+ OpBuildIndexPtr opPtr;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const BuildIndxReq::RequestType requestType = req->getRequestType();
+ if (requestType == BuildIndxReq::RT_USER ||
+ requestType == BuildIndxReq::RT_ALTER_INDEX ||
+ requestType == BuildIndxReq::RT_SYSTEMRESTART) {
+ jam();
+
+ const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL;
+ NdbNodeBitmask receiverNodes = c_aliveNodes;
+ if (isLocal) {
+ receiverNodes.clear();
+ receiverNodes.set(getOwnNodeId());
+ }
+
+ if (signal->getLength() == BuildIndxReq::SignalLength) {
+ jam();
+
+ if (!isLocal && getOwnNodeId() != c_masterNodeId) {
+ jam();
+
+ releaseSections(signal);
+ OpBuildIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = BuildIndxRef::NotMaster;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ buildIndex_sendReply(signal, opPtr, true);
+ return;
+ }
+ // forward initial request plus operation key to all
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, receiverNodes);
+ sendSignal(rg, GSN_BUILDINDXREQ,
+ signal, BuildIndxReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == BuildIndxReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpBuildIndex opBusy;
+ if (! c_opBuildIndex.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = BuildIndxRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ buildIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opBuildIndex.add(opPtr);
+ // master expects to hear from all
+ opPtr.p->m_signalCounter = receiverNodes;
+ buildIndex_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opBuildIndex.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == BuildIndxReq::RT_DICT_TRIX) {
+ jam();
+ buildIndex_buildTrix(signal, opPtr);
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_TC ||
+ requestType == BuildIndxReq::RT_DICT_TUX) {
+ jam();
+ buildIndex_toOnline(signal, opPtr);
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_COMMIT ||
+ requestType == BuildIndxReq::RT_DICT_ABORT) {
+ jam();
+ buildIndex_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opBuildIndex.release(opPtr);
+ return;
+ }
+ }
+ jam();
+ // return to sender
+ OpBuildIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = BuildIndxRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ buildIndex_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::execBUILDINDXCONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+ BuildIndxConf* conf = (BuildIndxConf*)signal->getDataPtrSend();
+ buildIndex_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execBUILDINDXREF(Signal* signal)
+{
+ jamEntry();
+ BuildIndxRef* ref = (BuildIndxRef*)signal->getDataPtrSend();
+ buildIndex_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::buildIndex_recvReply(Signal* signal, const BuildIndxConf* conf,
+ const BuildIndxRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const BuildIndxReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == BuildIndxReq::RT_ALTER_INDEX) {
+ jam();
+ // part of alter index operation
+ OpAlterIndexPtr opPtr;
+ c_opAlterIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterIndex_fromBuildIndex(signal, opPtr);
+ return;
+ }
+
+ if (requestType == BuildIndxReq::RT_SYSTEMRESTART) {
+ jam();
+ if (ref == 0) {
+ infoEvent("DICT: index %u rebuild done", (unsigned)key);
+ } else {
+ warningEvent("DICT: index %u rebuild failed: code=%d line=%d node=%d",
+ (unsigned)key, ref->getErrorCode());
+ }
+ rebuildIndexes(signal, key + 1);
+ return;
+ }
+
+ OpBuildIndexPtr opPtr;
+ c_opBuildIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ if (requestType == BuildIndxReq::RT_TRIX) {
+ jam();
+ // forward to master
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TRIX;
+ buildIndex_sendReply(signal, opPtr, false);
+ return;
+ }
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_COMMIT ||
+ requestType == BuildIndxReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ buildIndex_sendReply(signal, opPtr, true);
+ c_opBuildIndex.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_ABORT;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ if (indexPtr.p->isHashIndex()) {
+ if (requestType == BuildIndxReq::RT_DICT_PREPARE) {
+ jam();
+ if (! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD)) {
+ buildIndex_toCreateConstr(signal, opPtr);
+ } else {
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TC;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ }
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_TRIX) {
+ jam();
+ ndbrequire(! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD));
+ buildIndex_toDropConstr(signal, opPtr);
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_TC) {
+ jam();
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_COMMIT;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ if (requestType == BuildIndxReq::RT_DICT_PREPARE) {
+ jam();
+ if (! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD)) {
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TRIX;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ } else {
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TUX;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ }
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_TRIX) {
+ jam();
+ ndbrequire(! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD));
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TUX;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_TUX) {
+ jam();
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_COMMIT;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::buildIndex_toCreateConstr(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ // request to create constraint trigger
+ CreateTrigReq* req = (CreateTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(CreateTrigReq::RT_BUILD_INDEX);
+ req->addRequestFlag(0); // none
+ req->setTableId(indexPtr.i);
+ req->setIndexId(RNIL);
+ req->setTriggerId(RNIL);
+ req->setTriggerType(TriggerType::READ_ONLY_CONSTRAINT);
+ req->setTriggerActionTime(TriggerActionTime::TA_AFTER);
+ req->setTriggerEvent(TriggerEvent::TE_UPDATE);
+ req->setMonitorReplicas(false);
+ req->setMonitorAllAttributes(false);
+ req->setOnline(true); // alter online after create
+ req->setReceiverRef(0); // no receiver, REF-ed by TUP
+ req->getAttributeMask().clear();
+ // NDB$PK is last attribute
+ req->getAttributeMask().set(indexPtr.p->noOfAttributes - 1);
+ // name section
+ char triggerName[MAX_TAB_NAME_SIZE];
+ Uint32 buffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string
+ LinearWriter w(buffer, sizeof(buffer) >> 2);
+ LinearSectionPtr lsPtr[3];
+ sprintf(triggerName, "NDB$INDEX_%u_BUILD", indexPtr.i);
+ w.reset();
+ w.add(CreateTrigReq::TriggerNameKey, triggerName);
+ lsPtr[0].p = buffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(reference(), GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
+}
+
+void
+Dbdict::buildIndex_fromCreateConstr(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_ABORT;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TRIX;
+ buildIndex_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::buildIndex_buildTrix(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
+ // build request
+ BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(BuildIndxReq::RT_TRIX);
+ req->setBuildId(0); // not yet..
+ req->setBuildKey(0); // ..in use
+ req->setIndexType(indexPtr.p->tableType);
+ req->setIndexId(indexPtr.i);
+ req->setTableId(indexPtr.p->primaryTableId);
+ req->setParallelism(16);
+ if (indexPtr.p->isHashIndex()) {
+ jam();
+ getIndexAttrList(indexPtr, opPtr.p->m_attrList);
+ getTableKeyList(tablePtr, opPtr.p->m_tableKeyList);
+ // send
+ LinearSectionPtr lsPtr[3];
+ lsPtr[0].sz = opPtr.p->m_attrList.sz;
+ lsPtr[0].p = opPtr.p->m_attrList.id;
+ lsPtr[1].sz = opPtr.p->m_tableKeyList.sz;
+ lsPtr[1].p = opPtr.p->m_tableKeyList.id;
+ sendSignal(calcTrixBlockRef(getOwnNodeId()), GSN_BUILDINDXREQ,
+ signal, BuildIndxReq::SignalLength, JBB, lsPtr, 2);
+ return;
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ jam();
+ sendSignal(calcTupBlockRef(getOwnNodeId()), GSN_BUILDINDXREQ,
+ signal, BuildIndxReq::SignalLength, JBB);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::buildIndex_toDropConstr(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ // request to drop constraint trigger
+ DropTrigReq* req = (DropTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(DropTrigReq::RT_BUILD_INDEX);
+ req->addRequestFlag(0); // none
+ req->setTableId(indexPtr.i);
+ req->setIndexId(RNIL);
+ req->setTriggerId(opPtr.p->m_constrTriggerId);
+ req->setTriggerInfo(0); // not used
+ sendSignal(reference(), GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::buildIndex_fromDropConstr(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_ABORT;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TC;
+ buildIndex_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::buildIndex_toOnline(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
+ // request to set index online in TC or TUX
+ AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TC) {
+ req->setRequestType(AlterIndxReq::RT_TC);
+ } else if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TUX) {
+ req->setRequestType(AlterIndxReq::RT_TUX);
+ } else {
+ ndbrequire(false);
+ }
+ req->setTableId(tablePtr.i);
+ req->setIndexId(indexPtr.i);
+ req->setIndexVersion(indexPtr.p->tableVersion);
+ req->setOnline(true);
+ BlockReference blockRef = 0;
+ if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TC) {
+ blockRef = calcTcBlockRef(getOwnNodeId());
+ } else if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TUX) {
+ blockRef = calcTuxBlockRef(getOwnNodeId());
+ } else {
+ ndbrequire(false);
+ }
+ // send
+ sendSignal(blockRef, GSN_ALTER_INDX_REQ,
+ signal, BuildIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::buildIndex_fromOnline(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ // forward to master
+ buildIndex_sendReply(signal, opPtr, false);
+}
+
+void
+Dbdict::buildIndex_sendSlaveReq(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ if(opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
+ {
+ opPtr.p->m_signalCounter.clearWaitingFor();
+ opPtr.p->m_signalCounter.setWaitingFor(getOwnNodeId());
+ sendSignal(reference(), GSN_BUILDINDXREQ,
+ signal, BuildIndxReq::SignalLength, JBB);
+ }
+ else
+ {
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_BUILDINDXREQ,
+ signal, BuildIndxReq::SignalLength, JBB);
+ }
+}
+
+void
+Dbdict::buildIndex_sendReply(Signal* signal, OpBuildIndexPtr opPtr,
+ bool toUser)
+{
+ BuildIndxRef* rep = (BuildIndxRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_BUILDINDXCONF;
+ Uint32 length = BuildIndxConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_ABORT)
+ sendRef = false;
+ } else {
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = BuildIndxConf::SignalLength;
+ }
+ rep->setIndexType(opPtr.p->m_request.getIndexType());
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setIndexId(opPtr.p->m_request.getIndexId());
+ if (sendRef) {
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->masterNodeId = opPtr.p->m_errorNode;
+ gsn = GSN_BUILDINDXREF;
+ length = BuildIndxRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/**
+ * MODULE: Create trigger
+ *
+ * Create trigger in all DICT blocks. Optionally start alter trigger
+ * operation to set the trigger online.
+ *
+ * Request type received in REQ and returned in CONF/REF:
+ *
+ * RT_USER - normal user e.g. BACKUP
+ * RT_ALTER_INDEX - from alter index online
+ * RT_DICT_PREPARE - seize operation in each DICT
+ * RT_DICT_COMMIT - commit create in each DICT
+ * RT_TC - sending to TC (operation alter trigger)
+ * RT_LQH - sending to LQH (operation alter trigger)
+ */
+
+void
+Dbdict::execCREATE_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend();
+ OpCreateTriggerPtr opPtr;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const CreateTrigReq::RequestType requestType = req->getRequestType();
+ if (requestType == CreateTrigReq::RT_USER ||
+ requestType == CreateTrigReq::RT_ALTER_INDEX ||
+ requestType == CreateTrigReq::RT_BUILD_INDEX) {
+ jam();
+ if (! assembleFragments(signal)) {
+ jam();
+ return;
+ }
+ const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL;
+ NdbNodeBitmask receiverNodes = c_aliveNodes;
+ if (isLocal) {
+ receiverNodes.clear();
+ receiverNodes.set(getOwnNodeId());
+ }
+ if (signal->getLength() == CreateTrigReq::SignalLength) {
+ jam();
+ if (! isLocal && getOwnNodeId() != c_masterNodeId) {
+ jam();
+
+ releaseSections(signal);
+ OpCreateTrigger opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = CreateTrigRef::NotMaster;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ createTrigger_sendReply(signal, opPtr, true);
+ return;
+ }
+ // forward initial request plus operation key to all
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, receiverNodes);
+ sendSignal(rg, GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == CreateTrigReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpCreateTrigger opBusy;
+ if (! c_opCreateTrigger.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ releaseSections(signal);
+ createTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opCreateTrigger.add(opPtr);
+ {
+ // save name
+ SegmentedSectionPtr ssPtr;
+ signal->getSection(ssPtr, CreateTrigReq::TRIGGER_NAME_SECTION);
+ SimplePropertiesSectionReader ssReader(ssPtr, getSectionSegmentPool());
+ if (ssReader.getKey() != CreateTrigReq::TriggerNameKey ||
+ ! ssReader.getString(opPtr.p->m_triggerName)) {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::InvalidName;
+ opPtr.p->m_errorLine = __LINE__;
+ releaseSections(signal);
+ createTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ }
+ releaseSections(signal);
+ {
+ // check that trigger name is unique
+ TriggerRecordPtr triggerPtr;
+ TriggerRecord keyRecord;
+ strcpy(keyRecord.triggerName, opPtr.p->m_triggerName);
+ c_triggerRecordHash.find(triggerPtr, keyRecord);
+ if (triggerPtr.i != RNIL) {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::TriggerExists;
+ opPtr.p->m_errorLine = __LINE__;
+ createTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ }
+
+ // master expects to hear from all
+ if (opPtr.p->m_isMaster)
+ opPtr.p->m_signalCounter = receiverNodes;
+ // check request in all participants
+ createTrigger_slavePrepare(signal, opPtr);
+ createTrigger_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opCreateTrigger.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == CreateTrigReq::RT_DICT_CREATE) {
+ jam();
+ // master has set trigger id
+ opPtr.p->m_request.setTriggerId(req->getTriggerId());
+ createTrigger_slaveCreate(signal, opPtr);
+ createTrigger_sendReply(signal, opPtr, false);
+ return;
+ }
+ if (requestType == CreateTrigReq::RT_DICT_COMMIT ||
+ requestType == CreateTrigReq::RT_DICT_ABORT) {
+ jam();
+ if (requestType == CreateTrigReq::RT_DICT_COMMIT)
+ createTrigger_slaveCommit(signal, opPtr);
+ else
+ createTrigger_slaveAbort(signal, opPtr);
+ createTrigger_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opCreateTrigger.release(opPtr);
+ return;
+ }
+ }
+ jam();
+ // return to sender
+ releaseSections(signal);
+ OpCreateTrigger opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = CreateTrigRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ createTrigger_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::execCREATE_TRIG_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+ CreateTrigConf* conf = (CreateTrigConf*)signal->getDataPtrSend();
+ createTrigger_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execCREATE_TRIG_REF(Signal* signal)
+{
+ jamEntry();
+ CreateTrigRef* ref = (CreateTrigRef*)signal->getDataPtrSend();
+ createTrigger_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::createTrigger_recvReply(Signal* signal, const CreateTrigConf* conf,
+ const CreateTrigRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const CreateTrigReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == CreateTrigReq::RT_ALTER_INDEX) {
+ jam();
+ // part of alter index operation
+ OpAlterIndexPtr opPtr;
+ c_opAlterIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterIndex_fromCreateTrigger(signal, opPtr);
+ return;
+ }
+ if (requestType == CreateTrigReq::RT_BUILD_INDEX) {
+ jam();
+ // part of build index operation
+ OpBuildIndexPtr opPtr;
+ c_opBuildIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ // fill in trigger id
+ opPtr.p->m_constrTriggerId = conf->getTriggerId();
+ buildIndex_fromCreateConstr(signal, opPtr);
+ return;
+ }
+ if (requestType == CreateTrigReq::RT_TC ||
+ requestType == CreateTrigReq::RT_LQH) {
+ jam();
+ // part of alter trigger operation
+ OpAlterTriggerPtr opPtr;
+ c_opAlterTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterTrigger_fromCreateLocal(signal, opPtr);
+ return;
+ }
+ OpCreateTriggerPtr opPtr;
+ c_opCreateTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ opPtr.p->setError(ref);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == CreateTrigReq::RT_DICT_COMMIT ||
+ requestType == CreateTrigReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ createTrigger_sendReply(signal, opPtr, true);
+ c_opCreateTrigger.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_ABORT;
+ createTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == CreateTrigReq::RT_DICT_PREPARE) {
+ jam();
+ // seize trigger id in master
+ createTrigger_masterSeize(signal, opPtr);
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_ABORT;
+ createTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_CREATE;
+ createTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == CreateTrigReq::RT_DICT_CREATE) {
+ jam();
+ if (opPtr.p->m_request.getOnline()) {
+ jam();
+ // start alter online
+ createTrigger_toAlterTrigger(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_COMMIT;
+ createTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::createTrigger_slavePrepare(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ jam();
+ const CreateTrigReq* const req = &opPtr.p->m_request;
+ // check trigger type
+ if (req->getRequestType() == CreateTrigReq::RT_USER &&
+ req->getTriggerType() == TriggerType::SUBSCRIPTION ||
+ req->getRequestType() == CreateTrigReq::RT_ALTER_INDEX &&
+ req->getTriggerType() == TriggerType::SECONDARY_INDEX ||
+ req->getRequestType() == CreateTrigReq::RT_ALTER_INDEX &&
+ req->getTriggerType() == TriggerType::ORDERED_INDEX ||
+ req->getRequestType() == CreateTrigReq::RT_BUILD_INDEX &&
+ req->getTriggerType() == TriggerType::READ_ONLY_CONSTRAINT) {
+ ;
+ } else {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::UnsupportedTriggerType;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ // check the table
+ const Uint32 tableId = req->getTableId();
+ if (! (tableId < c_tableRecordPool.getSize())) {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::InvalidTable;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ if (tablePtr.p->tabState != TableRecord::DEFINED) {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::InvalidTable;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+}
+
+void
+Dbdict::createTrigger_masterSeize(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ TriggerRecordPtr triggerPtr;
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) {
+ triggerPtr.i = opPtr.p->m_request.getTriggerId();
+ } else {
+ triggerPtr.i = getFreeTriggerRecord();
+ if (triggerPtr.i == RNIL) {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::TooManyTriggers;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ }
+ c_triggerRecordPool.getPtr(triggerPtr);
+ initialiseTriggerRecord(triggerPtr);
+ triggerPtr.p->triggerState = TriggerRecord::TS_DEFINING;
+ opPtr.p->m_request.setTriggerId(triggerPtr.i);
+}
+
+void
+Dbdict::createTrigger_slaveCreate(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ jam();
+ const CreateTrigReq* const req = &opPtr.p->m_request;
+ // get the trigger record
+ const Uint32 triggerId = req->getTriggerId();
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, triggerId);
+ initialiseTriggerRecord(triggerPtr);
+ // fill in trigger data
+ strcpy(triggerPtr.p->triggerName, opPtr.p->m_triggerName);
+ triggerPtr.p->triggerId = triggerId;
+ triggerPtr.p->tableId = req->getTableId();
+ triggerPtr.p->indexId = RNIL;
+ triggerPtr.p->triggerType = req->getTriggerType();
+ triggerPtr.p->triggerActionTime = req->getTriggerActionTime();
+ triggerPtr.p->triggerEvent = req->getTriggerEvent();
+ triggerPtr.p->monitorReplicas = req->getMonitorReplicas();
+ triggerPtr.p->monitorAllAttributes = req->getMonitorAllAttributes();
+ triggerPtr.p->attributeMask = req->getAttributeMask();
+ triggerPtr.p->triggerState = TriggerRecord::TS_OFFLINE;
+ // add to hash table
+ // ndbout_c("++++++++++++ Adding trigger id %u, %s", triggerPtr.p->triggerId, triggerPtr.p->triggerName);
+ c_triggerRecordHash.add(triggerPtr);
+ if (triggerPtr.p->triggerType == TriggerType::SECONDARY_INDEX ||
+ triggerPtr.p->triggerType == TriggerType::ORDERED_INDEX) {
+ jam();
+ // connect to index record XXX should be done in caller instead
+ triggerPtr.p->indexId = req->getIndexId();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId);
+ switch (triggerPtr.p->triggerEvent) {
+ case TriggerEvent::TE_INSERT:
+ indexPtr.p->insertTriggerId = triggerPtr.p->triggerId;
+ break;
+ case TriggerEvent::TE_UPDATE:
+ indexPtr.p->updateTriggerId = triggerPtr.p->triggerId;
+ break;
+ case TriggerEvent::TE_DELETE:
+ indexPtr.p->deleteTriggerId = triggerPtr.p->triggerId;
+ break;
+ case TriggerEvent::TE_CUSTOM:
+ indexPtr.p->customTriggerId = triggerPtr.p->triggerId;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ }
+ if (triggerPtr.p->triggerType == TriggerType::READ_ONLY_CONSTRAINT) {
+ jam();
+ // connect to index record XXX should be done in caller instead
+ triggerPtr.p->indexId = req->getTableId();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId);
+ indexPtr.p->buildTriggerId = triggerPtr.p->triggerId;
+ }
+}
+
+void
+Dbdict::createTrigger_toAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ jam();
+ AlterTrigReq* req = (AlterTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(AlterTrigReq::RT_CREATE_TRIGGER);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ req->setTableId(opPtr.p->m_request.getTableId());
+ req->setTriggerId(opPtr.p->m_request.getTriggerId());
+ req->setTriggerInfo(0); // not used
+ req->setOnline(true);
+ req->setReceiverRef(opPtr.p->m_request.getReceiverRef());
+ sendSignal(reference(), GSN_ALTER_TRIG_REQ,
+ signal, AlterTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::createTrigger_fromAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_ABORT;
+ createTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_COMMIT;
+ createTrigger_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::createTrigger_slaveCommit(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ jam();
+ const CreateTrigReq* const req = &opPtr.p->m_request;
+ // get the trigger record
+ const Uint32 triggerId = req->getTriggerId();
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, triggerId);
+ if (! req->getOnline()) {
+ triggerPtr.p->triggerState = TriggerRecord::TS_OFFLINE;
+ } else {
+ ndbrequire(triggerPtr.p->triggerState == TriggerRecord::TS_ONLINE);
+ }
+}
+
+void
+Dbdict::createTrigger_slaveAbort(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ jam();
+}
+
+void
+Dbdict::createTrigger_sendSlaveReq(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ NdbNodeBitmask receiverNodes = c_aliveNodes;
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) {
+ receiverNodes.clear();
+ receiverNodes.set(getOwnNodeId());
+ }
+ opPtr.p->m_signalCounter = receiverNodes;
+ NodeReceiverGroup rg(DBDICT, receiverNodes);
+ sendSignal(rg, GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::createTrigger_sendReply(Signal* signal, OpCreateTriggerPtr opPtr,
+ bool toUser)
+{
+ CreateTrigRef* rep = (CreateTrigRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_CREATE_TRIG_CONF;
+ Uint32 length = CreateTrigConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == CreateTrigReq::RT_DICT_ABORT)
+ sendRef = false;
+ } else {
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = CreateTrigConf::SignalLength;
+ }
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setIndexId(opPtr.p->m_request.getIndexId());
+ rep->setTriggerId(opPtr.p->m_request.getTriggerId());
+ rep->setTriggerInfo(opPtr.p->m_request.getTriggerInfo());
+ if (sendRef) {
+ if (opPtr.p->m_errorNode == 0)
+ opPtr.p->m_errorNode = getOwnNodeId();
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->setErrorLine(opPtr.p->m_errorLine);
+ rep->setErrorNode(opPtr.p->m_errorNode);
+ gsn = GSN_CREATE_TRIG_REF;
+ length = CreateTrigRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/**
+ * MODULE: Drop trigger.
+ */
+
+void
+Dbdict::execDROP_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend();
+ OpDropTriggerPtr opPtr;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const DropTrigReq::RequestType requestType = req->getRequestType();
+
+ if (signal->getNoOfSections() > 0) {
+ ndbrequire(signal->getNoOfSections() == 1);
+ jam();
+ TriggerRecord keyRecord;
+ OpDropTrigger opTmp;
+ opPtr.p=&opTmp;
+
+ SegmentedSectionPtr ssPtr;
+ signal->getSection(ssPtr, DropTrigReq::TRIGGER_NAME_SECTION);
+ SimplePropertiesSectionReader ssReader(ssPtr, getSectionSegmentPool());
+ if (ssReader.getKey() != DropTrigReq::TriggerNameKey ||
+ ! ssReader.getString(keyRecord.triggerName)) {
+ jam();
+ opPtr.p->m_errorCode = DropTrigRef::InvalidName;
+ opPtr.p->m_errorLine = __LINE__;
+ releaseSections(signal);
+ dropTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ releaseSections(signal);
+
+ TriggerRecordPtr triggerPtr;
+
+ // ndbout_c("++++++++++++++ Looking for trigger %s", keyRecord.triggerName);
+ c_triggerRecordHash.find(triggerPtr, keyRecord);
+ if (triggerPtr.i == RNIL) {
+ jam();
+ req->setTriggerId(RNIL);
+ } else {
+ jam();
+ // ndbout_c("++++++++++ Found trigger %s", triggerPtr.p->triggerName);
+ req->setTriggerId(triggerPtr.p->triggerId);
+ req->setTableId(triggerPtr.p->tableId);
+ }
+ }
+ if (requestType == DropTrigReq::RT_USER ||
+ requestType == DropTrigReq::RT_ALTER_INDEX ||
+ requestType == DropTrigReq::RT_BUILD_INDEX) {
+ jam();
+ if (signal->getLength() == DropTrigReq::SignalLength) {
+ if (getOwnNodeId() != c_masterNodeId) {
+ jam();
+ // forward to DICT master
+ sendSignal(calcDictBlockRef(c_masterNodeId), GSN_DROP_TRIG_REQ,
+ signal, signal->getLength(), JBB);
+ return;
+ }
+ if (!c_triggerRecordPool.findId(req->getTriggerId())) {
+ jam();
+ // return to sender
+ OpDropTrigger opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = DropTrigRef::TriggerNotFound;
+ opPtr.p->m_errorLine = __LINE__;
+ dropTrigger_sendReply(signal, opPtr, true);
+ return;
+ }
+ // forward initial request plus operation key to all
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == DropTrigReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpDropTrigger opBusy;
+ if (! c_opDropTrigger.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = DropTrigReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = DropTrigRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ dropTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opDropTrigger.add(opPtr);
+ // master expects to hear from all
+ if (opPtr.p->m_isMaster)
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ dropTrigger_slavePrepare(signal, opPtr);
+ dropTrigger_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opDropTrigger.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == DropTrigReq::RT_DICT_COMMIT ||
+ requestType == DropTrigReq::RT_DICT_ABORT) {
+ jam();
+ if (requestType == DropTrigReq::RT_DICT_COMMIT)
+ dropTrigger_slaveCommit(signal, opPtr);
+ else
+ dropTrigger_slaveAbort(signal, opPtr);
+ dropTrigger_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opDropTrigger.release(opPtr);
+ return;
+ }
+ }
+ jam();
+ // return to sender
+ OpDropTrigger opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = DropTrigRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ dropTrigger_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::execDROP_TRIG_CONF(Signal* signal)
+{
+ jamEntry();
+ DropTrigConf* conf = (DropTrigConf*)signal->getDataPtrSend();
+ dropTrigger_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execDROP_TRIG_REF(Signal* signal)
+{
+ jamEntry();
+ DropTrigRef* ref = (DropTrigRef*)signal->getDataPtrSend();
+ dropTrigger_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::dropTrigger_recvReply(Signal* signal, const DropTrigConf* conf,
+ const DropTrigRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const DropTrigReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == DropTrigReq::RT_ALTER_INDEX) {
+ jam();
+ // part of alter index operation
+ OpAlterIndexPtr opPtr;
+ c_opAlterIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterIndex_fromDropTrigger(signal, opPtr);
+ return;
+ }
+ if (requestType == DropTrigReq::RT_BUILD_INDEX) {
+ jam();
+ // part of build index operation
+ OpBuildIndexPtr opPtr;
+ c_opBuildIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ buildIndex_fromDropConstr(signal, opPtr);
+ return;
+ }
+ if (requestType == DropTrigReq::RT_TC ||
+ requestType == DropTrigReq::RT_LQH) {
+ jam();
+ // part of alter trigger operation
+ OpAlterTriggerPtr opPtr;
+ c_opAlterTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterTrigger_fromDropLocal(signal, opPtr);
+ return;
+ }
+ OpDropTriggerPtr opPtr;
+ c_opDropTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ opPtr.p->setError(ref);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == DropTrigReq::RT_DICT_COMMIT ||
+ requestType == DropTrigReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ dropTrigger_sendReply(signal, opPtr, true);
+ c_opDropTrigger.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = DropTrigReq::RT_DICT_ABORT;
+ dropTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == DropTrigReq::RT_DICT_PREPARE) {
+ jam();
+ // start alter offline
+ dropTrigger_toAlterTrigger(signal, opPtr);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::dropTrigger_slavePrepare(Signal* signal, OpDropTriggerPtr opPtr)
+{
+ jam();
+}
+
+void
+Dbdict::dropTrigger_toAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr)
+{
+ jam();
+ AlterTrigReq* req = (AlterTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(AlterTrigReq::RT_DROP_TRIGGER);
+ req->setTableId(opPtr.p->m_request.getTableId());
+ req->setTriggerId(opPtr.p->m_request.getTriggerId());
+ req->setTriggerInfo(0); // not used
+ req->setOnline(false);
+ req->setReceiverRef(0);
+ sendSignal(reference(), GSN_ALTER_TRIG_REQ,
+ signal, AlterTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::dropTrigger_fromAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr)
+{
+ jam();
+ // remove in all
+ opPtr.p->m_requestType = DropTrigReq::RT_DICT_COMMIT;
+ dropTrigger_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::dropTrigger_sendSlaveReq(Signal* signal, OpDropTriggerPtr opPtr)
+{
+ DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::dropTrigger_slaveCommit(Signal* signal, OpDropTriggerPtr opPtr)
+{
+ jam();
+ const DropTrigReq* const req = &opPtr.p->m_request;
+ // get trigger record
+ const Uint32 triggerId = req->getTriggerId();
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, triggerId);
+ if (triggerPtr.p->triggerType == TriggerType::SECONDARY_INDEX ||
+ triggerPtr.p->triggerType == TriggerType::ORDERED_INDEX) {
+ jam();
+ // disconnect from index if index trigger XXX move to drop index
+ triggerPtr.p->indexId = req->getIndexId();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId);
+ ndbrequire(! indexPtr.isNull());
+ switch (triggerPtr.p->triggerEvent) {
+ case TriggerEvent::TE_INSERT:
+ indexPtr.p->insertTriggerId = RNIL;
+ break;
+ case TriggerEvent::TE_UPDATE:
+ indexPtr.p->updateTriggerId = RNIL;
+ break;
+ case TriggerEvent::TE_DELETE:
+ indexPtr.p->deleteTriggerId = RNIL;
+ break;
+ case TriggerEvent::TE_CUSTOM:
+ indexPtr.p->customTriggerId = RNIL;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ }
+ if (triggerPtr.p->triggerType == TriggerType::READ_ONLY_CONSTRAINT) {
+ jam();
+ // disconnect from index record XXX should be done in caller instead
+ triggerPtr.p->indexId = req->getTableId();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId);
+ indexPtr.p->buildTriggerId = RNIL;
+ }
+ // remove trigger
+ // ndbout_c("++++++++++++ Removing trigger id %u, %s", triggerPtr.p->triggerId, triggerPtr.p->triggerName);
+ c_triggerRecordHash.remove(triggerPtr);
+ triggerPtr.p->triggerState = TriggerRecord::TS_NOT_DEFINED;
+}
+
+void
+Dbdict::dropTrigger_slaveAbort(Signal* signal, OpDropTriggerPtr opPtr)
+{
+ jam();
+}
+
+void
+Dbdict::dropTrigger_sendReply(Signal* signal, OpDropTriggerPtr opPtr,
+ bool toUser)
+{
+ DropTrigRef* rep = (DropTrigRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_DROP_TRIG_CONF;
+ Uint32 length = DropTrigConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == DropTrigReq::RT_DICT_ABORT)
+ sendRef = false;
+ } else {
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = DropTrigConf::SignalLength;
+ }
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setIndexId(opPtr.p->m_request.getIndexId());
+ rep->setTriggerId(opPtr.p->m_request.getTriggerId());
+ if (sendRef) {
+ if (opPtr.p->m_errorNode == 0)
+ opPtr.p->m_errorNode = getOwnNodeId();
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->setErrorLine(opPtr.p->m_errorLine);
+ rep->setErrorNode(opPtr.p->m_errorNode);
+ gsn = GSN_DROP_TRIG_REF;
+ length = CreateTrigRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/**
+ * MODULE: Alter trigger.
+ *
+ * Alter trigger state. Alter online creates the trigger first in all
+ * TC (if index trigger) and then in all LQH-TUP.
+ *
+ * Request type received in REQ and returned in CONF/REF:
+ *
+ * RT_USER - normal user e.g. BACKUP
+ * RT_CREATE_TRIGGER - from create trigger
+ * RT_DROP_TRIGGER - from drop trigger
+ * RT_DICT_PREPARE - seize operations and check request
+ * RT_DICT_TC - master to each DICT on way to TC
+ * RT_DICT_LQH - master to each DICT on way to LQH-TUP
+ * RT_DICT_COMMIT - commit state change in each DICT (no reply)
+ */
+
+void
+Dbdict::execALTER_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ AlterTrigReq* const req = (AlterTrigReq*)signal->getDataPtrSend();
+ OpAlterTriggerPtr opPtr;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const AlterTrigReq::RequestType requestType = req->getRequestType();
+ if (requestType == AlterTrigReq::RT_USER ||
+ requestType == AlterTrigReq::RT_CREATE_TRIGGER ||
+ requestType == AlterTrigReq::RT_DROP_TRIGGER) {
+ jam();
+ const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL;
+ NdbNodeBitmask receiverNodes = c_aliveNodes;
+ if (isLocal) {
+ receiverNodes.clear();
+ receiverNodes.set(getOwnNodeId());
+ }
+ if (signal->getLength() == AlterTrigReq::SignalLength) {
+ jam();
+ if (! isLocal && getOwnNodeId() != c_masterNodeId) {
+ jam();
+ // forward to DICT master
+ sendSignal(calcDictBlockRef(c_masterNodeId), GSN_ALTER_TRIG_REQ,
+ signal, AlterTrigReq::SignalLength, JBB);
+ return;
+ }
+ // forward initial request plus operation key to all
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, receiverNodes);
+ sendSignal(rg, GSN_ALTER_TRIG_REQ,
+ signal, AlterTrigReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == AlterTrigReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpAlterTrigger opBusy;
+ if (! c_opAlterTrigger.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = AlterTrigRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ alterTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opAlterTrigger.add(opPtr);
+ // master expects to hear from all
+ if (opPtr.p->m_isMaster) {
+ opPtr.p->m_nodes = receiverNodes;
+ opPtr.p->m_signalCounter = receiverNodes;
+ }
+ alterTrigger_slavePrepare(signal, opPtr);
+ alterTrigger_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opAlterTrigger.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == AlterTrigReq::RT_DICT_TC ||
+ requestType == AlterTrigReq::RT_DICT_LQH) {
+ jam();
+ if (req->getOnline())
+ alterTrigger_toCreateLocal(signal, opPtr);
+ else
+ alterTrigger_toDropLocal(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterTrigReq::RT_DICT_COMMIT ||
+ requestType == AlterTrigReq::RT_DICT_ABORT) {
+ jam();
+ if (requestType == AlterTrigReq::RT_DICT_COMMIT)
+ alterTrigger_slaveCommit(signal, opPtr);
+ else
+ alterTrigger_slaveAbort(signal, opPtr);
+ alterTrigger_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opAlterTrigger.release(opPtr);
+ return;
+ }
+ }
+ jam();
+ // return to sender
+ OpAlterTrigger opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = AlterTrigRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ alterTrigger_sendReply(signal, opPtr, true);
+ return;
+}
+
+void
+Dbdict::execALTER_TRIG_CONF(Signal* signal)
+{
+ jamEntry();
+ AlterTrigConf* conf = (AlterTrigConf*)signal->getDataPtrSend();
+ alterTrigger_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execALTER_TRIG_REF(Signal* signal)
+{
+ jamEntry();
+ AlterTrigRef* ref = (AlterTrigRef*)signal->getDataPtrSend();
+ alterTrigger_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::alterTrigger_recvReply(Signal* signal, const AlterTrigConf* conf,
+ const AlterTrigRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const AlterTrigReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == AlterTrigReq::RT_CREATE_TRIGGER) {
+ jam();
+ // part of create trigger operation
+ OpCreateTriggerPtr opPtr;
+ c_opCreateTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ createTrigger_fromAlterTrigger(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterTrigReq::RT_DROP_TRIGGER) {
+ jam();
+ // part of drop trigger operation
+ OpDropTriggerPtr opPtr;
+ c_opDropTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ dropTrigger_fromAlterTrigger(signal, opPtr);
+ return;
+ }
+ OpAlterTriggerPtr opPtr;
+ c_opAlterTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ /*
+ * If refuse on drop trig, because of non-existent trigger,
+ * comes from anyone but the master node - ignore it and
+ * remove the node from forter ALTER_TRIG communication
+ * This will happen if a new node has started since the
+ * trigger whas created.
+ */
+ if (ref &&
+ refToNode(senderRef) != refToNode(reference()) &&
+ opPtr.p->m_request.getRequestType() == AlterTrigReq::RT_DROP_TRIGGER &&
+ ref->getErrorCode() == AlterTrigRef::TriggerNotFound) {
+ jam();
+ ref = 0; // ignore this error
+ opPtr.p->m_nodes.clear(refToNode(senderRef)); // remove this from group
+ }
+ opPtr.p->setError(ref);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == AlterTrigReq::RT_DICT_COMMIT ||
+ requestType == AlterTrigReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ alterTrigger_sendReply(signal, opPtr, true);
+ c_opAlterTrigger.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_ABORT;
+ alterTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (! (opPtr.p->m_request.getRequestFlag() & RequestFlag::RF_NOTCTRIGGER)) {
+ if (requestType == AlterTrigReq::RT_DICT_PREPARE) {
+ jam();
+ if (opPtr.p->m_request.getOnline())
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_TC;
+ else
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_LQH;
+ alterTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterTrigReq::RT_DICT_TC) {
+ jam();
+ if (opPtr.p->m_request.getOnline())
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_LQH;
+ else
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_COMMIT;
+ alterTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterTrigReq::RT_DICT_LQH) {
+ jam();
+ if (opPtr.p->m_request.getOnline())
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_COMMIT;
+ else
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_TC;
+ alterTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ } else {
+ if (requestType == AlterTrigReq::RT_DICT_PREPARE) {
+ jam();
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_LQH;
+ alterTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterTrigReq::RT_DICT_LQH) {
+ jam();
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_COMMIT;
+ alterTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::alterTrigger_slavePrepare(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+ const AlterTrigReq* const req = &opPtr.p->m_request;
+ const Uint32 triggerId = req->getTriggerId();
+ TriggerRecordPtr triggerPtr;
+ if (! (triggerId < c_triggerRecordPool.getSize())) {
+ jam();
+ opPtr.p->m_errorCode = AlterTrigRef::TriggerNotFound;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ c_triggerRecordPool.getPtr(triggerPtr, triggerId);
+ if (triggerPtr.p->triggerState == TriggerRecord::TS_NOT_DEFINED) {
+ jam();
+ opPtr.p->m_errorCode = AlterTrigRef::TriggerNotFound;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+}
+
+void
+Dbdict::alterTrigger_toCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+ // find trigger record
+ const Uint32 triggerId = opPtr.p->m_request.getTriggerId();
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, triggerId);
+ CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
+ req->setRequestType(CreateTrigReq::RT_TC);
+ } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
+ req->setRequestType(CreateTrigReq::RT_LQH);
+ } else {
+ ndbassert(false);
+ }
+ req->setTableId(triggerPtr.p->tableId);
+ req->setIndexId(triggerPtr.p->indexId);
+ req->setTriggerId(triggerPtr.i);
+ req->setTriggerType(triggerPtr.p->triggerType);
+ req->setTriggerActionTime(triggerPtr.p->triggerActionTime);
+ req->setTriggerEvent(triggerPtr.p->triggerEvent);
+ req->setMonitorReplicas(triggerPtr.p->monitorReplicas);
+ req->setMonitorAllAttributes(triggerPtr.p->monitorAllAttributes);
+ req->setOnline(true);
+ req->setReceiverRef(opPtr.p->m_request.getReceiverRef());
+ BlockReference blockRef = 0;
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
+ blockRef = calcTcBlockRef(getOwnNodeId());
+ } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
+ blockRef = calcLqhBlockRef(getOwnNodeId());
+ } else {
+ ndbassert(false);
+ }
+ req->setAttributeMask(triggerPtr.p->attributeMask);
+ sendSignal(blockRef, GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::alterTrigger_fromCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+ if (! opPtr.p->hasError()) {
+ // mark created locally
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId());
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
+ triggerPtr.p->triggerLocal |= TriggerRecord::TL_CREATED_TC;
+ } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
+ triggerPtr.p->triggerLocal |= TriggerRecord::TL_CREATED_LQH;
+ } else {
+ ndbrequire(false);
+ }
+ }
+ // forward CONF or REF to master
+ alterTrigger_sendReply(signal, opPtr, false);
+}
+
+void
+Dbdict::alterTrigger_toDropLocal(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId());
+ DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
+ // broken trigger
+ if (! (triggerPtr.p->triggerLocal & TriggerRecord::TL_CREATED_TC)) {
+ jam();
+ alterTrigger_sendReply(signal, opPtr, false);
+ return;
+ }
+ req->setRequestType(DropTrigReq::RT_TC);
+ } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
+ // broken trigger
+ if (! (triggerPtr.p->triggerLocal & TriggerRecord::TL_CREATED_LQH)) {
+ jam();
+ alterTrigger_sendReply(signal, opPtr, false);
+ return;
+ }
+ req->setRequestType(DropTrigReq::RT_LQH);
+ } else {
+ ndbassert(false);
+ }
+ req->setTableId(triggerPtr.p->tableId);
+ req->setIndexId(triggerPtr.p->indexId);
+ req->setTriggerId(triggerPtr.i);
+ req->setTriggerType(triggerPtr.p->triggerType);
+ req->setTriggerActionTime(triggerPtr.p->triggerActionTime);
+ req->setTriggerEvent(triggerPtr.p->triggerEvent);
+ req->setMonitorReplicas(triggerPtr.p->monitorReplicas);
+ req->setMonitorAllAttributes(triggerPtr.p->monitorAllAttributes);
+ BlockReference blockRef = 0;
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
+ blockRef = calcTcBlockRef(getOwnNodeId());
+ } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
+ blockRef = calcLqhBlockRef(getOwnNodeId());
+ } else {
+ ndbassert(false);
+ }
+ sendSignal(blockRef, GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::alterTrigger_fromDropLocal(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+ if (! opPtr.p->hasError()) {
+ // mark dropped locally
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId());
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
+ triggerPtr.p->triggerLocal &= ~TriggerRecord::TL_CREATED_TC;
+ } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
+ triggerPtr.p->triggerLocal &= ~TriggerRecord::TL_CREATED_LQH;
+ } else {
+ ndbrequire(false);
+ }
+ }
+ // forward CONF or REF to master
+ alterTrigger_sendReply(signal, opPtr, false);
+}
+
+void
+Dbdict::alterTrigger_slaveCommit(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId());
+ // set state
+ triggerPtr.p->triggerState = TriggerRecord::TS_ONLINE;
+}
+
+void
+Dbdict::alterTrigger_slaveAbort(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+}
+
+void
+Dbdict::alterTrigger_sendSlaveReq(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ AlterTrigReq* const req = (AlterTrigReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ NdbNodeBitmask receiverNodes = c_aliveNodes;
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) {
+ receiverNodes.clear();
+ receiverNodes.set(getOwnNodeId());
+ } else {
+ opPtr.p->m_nodes.bitAND(receiverNodes);
+ receiverNodes = opPtr.p->m_nodes;
+ }
+ opPtr.p->m_signalCounter = receiverNodes;
+ NodeReceiverGroup rg(DBDICT, receiverNodes);
+ sendSignal(rg, GSN_ALTER_TRIG_REQ,
+ signal, AlterTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::alterTrigger_sendReply(Signal* signal, OpAlterTriggerPtr opPtr,
+ bool toUser)
+{
+ jam();
+ AlterTrigRef* rep = (AlterTrigRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_ALTER_TRIG_CONF;
+ Uint32 length = AlterTrigConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_ABORT) {
+ jam();
+ sendRef = false;
+ } else {
+ jam();
+ }
+ } else {
+ jam();
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = AlterTrigConf::SignalLength;
+ }
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setTriggerId(opPtr.p->m_request.getTriggerId());
+ if (sendRef) {
+ if (opPtr.p->m_errorNode == 0) {
+ jam();
+ opPtr.p->m_errorNode = getOwnNodeId();
+ } else {
+ jam();
+ }
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->setErrorLine(opPtr.p->m_errorLine);
+ rep->setErrorNode(opPtr.p->m_errorNode);
+ gsn = GSN_ALTER_TRIG_REF;
+ length = AlterTrigRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/**
+ * MODULE: Support routines for index and trigger.
+ */
+
+/*
+ This routine is used to set-up the primary key attributes of the unique
+ hash index. Since we store fragment id as part of the primary key here
+ we insert the pseudo column for getting fragment id first in the array.
+ This routine is used as part of the building of the index.
+*/
+
+void
+Dbdict::getTableKeyList(TableRecordPtr tablePtr, AttributeList& list)
+{
+ jam();
+ list.sz = 0;
+ list.id[list.sz++] = AttributeHeader::FRAGMENT;
+ for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
+ AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
+ if (aRec->tupleKey)
+ list.id[list.sz++] = aRec->attributeId;
+ tAttr = aRec->nextAttrInTable;
+ }
+}
+
+// XXX should store the primary attribute id
+void
+Dbdict::getIndexAttr(TableRecordPtr indexPtr, Uint32 itAttr, Uint32* id)
+{
+ jam();
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
+ AttributeRecord* iaRec = c_attributeRecordPool.getPtr(itAttr);
+ for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
+ AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
+ if (iaRec->equal(*aRec)) {
+ id[0] = aRec->attributeId;
+ return;
+ }
+ tAttr = aRec->nextAttrInTable;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::getIndexAttrList(TableRecordPtr indexPtr, AttributeList& list)
+{
+ jam();
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
+ list.sz = 0;
+ memset(list.id, 0, sizeof(list.id));
+ ndbrequire(indexPtr.p->noOfAttributes >= 2);
+ Uint32 itAttr = indexPtr.p->firstAttribute;
+ for (Uint32 i = 0; i < (Uint32)indexPtr.p->noOfAttributes - 1; i++) {
+ getIndexAttr(indexPtr, itAttr, &list.id[list.sz++]);
+ AttributeRecord* iaRec = c_attributeRecordPool.getPtr(itAttr);
+ itAttr = iaRec->nextAttrInTable;
+ }
+}
+
+void
+Dbdict::getIndexAttrMask(TableRecordPtr indexPtr, AttributeMask& mask)
+{
+ jam();
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
+ mask.clear();
+ ndbrequire(indexPtr.p->noOfAttributes >= 2);
+ Uint32 itAttr = indexPtr.p->firstAttribute;
+ for (Uint32 i = 0; i < (Uint32)indexPtr.p->noOfAttributes - 1; i++) {
+ Uint32 id;
+ getIndexAttr(indexPtr, itAttr, &id);
+ mask.set(id);
+ AttributeRecord* iaRec = c_attributeRecordPool.getPtr(itAttr);
+ itAttr = iaRec->nextAttrInTable;
+ }
+}
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: STORE/RESTORE SCHEMA FILE---------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* General module used to store the schema file on disk and */
+/* similar function to restore it from disk. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+void
+Dbdict::initSchemaFile(XSchemaFile * xsf, Uint32 firstPage, Uint32 lastPage,
+ bool initEntries)
+{
+ ndbrequire(lastPage <= xsf->noOfPages);
+ for (Uint32 n = firstPage; n < lastPage; n++) {
+ SchemaFile * sf = &xsf->schemaPage[n];
+ if (initEntries)
+ memset(sf, 0, NDB_SF_PAGE_SIZE);
+
+ Uint32 ndb_version = NDB_VERSION;
+ if (ndb_version < NDB_SF_VERSION_5_0_6)
+ ndb_version = NDB_SF_VERSION_5_0_6;
+
+ memcpy(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic));
+ sf->ByteOrder = 0x12345678;
+ sf->NdbVersion = ndb_version;
+ sf->FileSize = xsf->noOfPages * NDB_SF_PAGE_SIZE;
+ sf->PageNumber = n;
+ sf->CheckSum = 0;
+ sf->NoOfTableEntries = NDB_SF_PAGE_ENTRIES;
+
+ computeChecksum(xsf, n);
+ }
+}
+
+void
+Dbdict::resizeSchemaFile(XSchemaFile * xsf, Uint32 noOfPages)
+{
+ ndbrequire(noOfPages <= NDB_SF_MAX_PAGES);
+ if (xsf->noOfPages < noOfPages) {
+ jam();
+ Uint32 firstPage = xsf->noOfPages;
+ xsf->noOfPages = noOfPages;
+ initSchemaFile(xsf, 0, firstPage, false);
+ initSchemaFile(xsf, firstPage, xsf->noOfPages, true);
+ }
+ if (xsf->noOfPages > noOfPages) {
+ jam();
+ Uint32 tableId = noOfPages * NDB_SF_PAGE_ENTRIES;
+ while (tableId < xsf->noOfPages * NDB_SF_PAGE_ENTRIES) {
+ SchemaFile::TableEntry * te = getTableEntry(xsf, tableId);
+ if (te->m_tableState != SchemaFile::INIT &&
+ te->m_tableState != SchemaFile::DROP_TABLE_COMMITTED) {
+ ndbrequire(false);
+ }
+ tableId++;
+ }
+ xsf->noOfPages = noOfPages;
+ initSchemaFile(xsf, 0, xsf->noOfPages, false);
+ }
+}
+
+void
+Dbdict::computeChecksum(XSchemaFile * xsf, Uint32 pageNo){
+ SchemaFile * sf = &xsf->schemaPage[pageNo];
+ sf->CheckSum = 0;
+ sf->CheckSum = computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS);
+}
+
+bool
+Dbdict::validateChecksum(const XSchemaFile * xsf){
+
+ for (Uint32 n = 0; n < xsf->noOfPages; n++) {
+ SchemaFile * sf = &xsf->schemaPage[n];
+ Uint32 c = computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS);
+ if ( c != 0)
+ return false;
+ }
+ return true;
+}
+
+Uint32
+Dbdict::computeChecksum(const Uint32 * src, Uint32 len){
+ Uint32 ret = 0;
+ for(Uint32 i = 0; i<len; i++)
+ ret ^= src[i];
+ return ret;
+}
+
+SchemaFile::TableEntry *
+Dbdict::getTableEntry(XSchemaFile * xsf, Uint32 tableId)
+{
+ Uint32 n = tableId / NDB_SF_PAGE_ENTRIES;
+ Uint32 i = tableId % NDB_SF_PAGE_ENTRIES;
+ ndbrequire(n < xsf->noOfPages);
+
+ SchemaFile * sf = &xsf->schemaPage[n];
+ return &sf->TableEntries[i];
+}
+
+// global metadata support
+
+int
+Dbdict::getMetaTablePtr(TableRecordPtr& tablePtr, Uint32 tableId, Uint32 tableVersion)
+{
+ if (tableId >= c_tableRecordPool.getSize()) {
+ return MetaData::InvalidArgument;
+ }
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ if (tablePtr.p->tabState == TableRecord::NOT_DEFINED) {
+ return MetaData::TableNotFound;
+ }
+ if (tablePtr.p->tableVersion != tableVersion) {
+ return MetaData::InvalidTableVersion;
+ }
+ // online flag is not maintained by DICT
+ tablePtr.p->online =
+ tablePtr.p->isTable() && tablePtr.p->tabState == TableRecord::DEFINED ||
+ tablePtr.p->isIndex() && tablePtr.p->indexState == TableRecord::IS_ONLINE;
+ return 0;
+}
+
+int
+Dbdict::getMetaTable(MetaData::Table& table, Uint32 tableId, Uint32 tableVersion)
+{
+ int ret;
+ TableRecordPtr tablePtr;
+ if ((ret = getMetaTablePtr(tablePtr, tableId, tableVersion)) < 0) {
+ return ret;
+ }
+ new (&table) MetaData::Table(*tablePtr.p);
+ return 0;
+}
+
+int
+Dbdict::getMetaTable(MetaData::Table& table, const char* tableName)
+{
+ int ret;
+ TableRecordPtr tablePtr;
+ if (strlen(tableName) + 1 > MAX_TAB_NAME_SIZE) {
+ return MetaData::InvalidArgument;
+ }
+ TableRecord keyRecord;
+ strcpy(keyRecord.tableName, tableName);
+ c_tableRecordHash.find(tablePtr, keyRecord);
+ if (tablePtr.i == RNIL) {
+ return MetaData::TableNotFound;
+ }
+ if ((ret = getMetaTablePtr(tablePtr, tablePtr.i, tablePtr.p->tableVersion)) < 0) {
+ return ret;
+ }
+ new (&table) MetaData::Table(*tablePtr.p);
+ return 0;
+}
+
+int
+Dbdict::getMetaAttribute(MetaData::Attribute& attr, const MetaData::Table& table, Uint32 attributeId)
+{
+ int ret;
+ TableRecordPtr tablePtr;
+ if ((ret = getMetaTablePtr(tablePtr, table.tableId, table.tableVersion)) < 0) {
+ return ret;
+ }
+ AttributeRecordPtr attrPtr;
+ attrPtr.i = tablePtr.p->firstAttribute;
+ while (attrPtr.i != RNIL) {
+ c_attributeRecordPool.getPtr(attrPtr);
+ if (attrPtr.p->attributeId == attributeId)
+ break;
+ attrPtr.i = attrPtr.p->nextAttrInTable;
+ }
+ if (attrPtr.i == RNIL) {
+ return MetaData::AttributeNotFound;
+ }
+ new (&attr) MetaData::Attribute(*attrPtr.p);
+ return 0;
+}
+
+int
+Dbdict::getMetaAttribute(MetaData::Attribute& attr, const MetaData::Table& table, const char* attributeName)
+{
+ int ret;
+ TableRecordPtr tablePtr;
+ if ((ret = getMetaTablePtr(tablePtr, table.tableId, table.tableVersion)) < 0) {
+ return ret;
+ }
+ AttributeRecordPtr attrPtr;
+ attrPtr.i = tablePtr.p->firstAttribute;
+ while (attrPtr.i != RNIL) {
+ c_attributeRecordPool.getPtr(attrPtr);
+ if (strcmp(attrPtr.p->attributeName, attributeName) == 0)
+ break;
+ attrPtr.i = attrPtr.p->nextAttrInTable;
+ }
+ if (attrPtr.i == RNIL) {
+ return MetaData::AttributeNotFound;
+ }
+ new (&attr) MetaData::Attribute(*attrPtr.p);
+ return 0;
+}
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
new file mode 100644
index 00000000000..4ef3791a51d
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -0,0 +1,2025 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBDICT_H
+#define DBDICT_H
+
+/**
+ * Dict : Dictionary Block
+ */
+
+#include <ndb_limits.h>
+#include <trigger_definitions.h>
+#include <pc.hpp>
+#include <ArrayList.hpp>
+#include <DLHashTable.hpp>
+#include <CArray.hpp>
+#include <KeyTable2.hpp>
+#include <SimulatedBlock.hpp>
+#include <SimpleProperties.hpp>
+#include <SignalCounter.hpp>
+#include <Bitmask.hpp>
+#include <AttributeList.hpp>
+#include <signaldata/GetTableId.hpp>
+#include <signaldata/GetTabInfo.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/CreateTable.hpp>
+#include <signaldata/CreateTab.hpp>
+#include <signaldata/DropTable.hpp>
+#include <signaldata/AlterTable.hpp>
+#include <signaldata/AlterTab.hpp>
+#include <signaldata/CreateIndx.hpp>
+#include <signaldata/DropIndx.hpp>
+#include <signaldata/AlterIndx.hpp>
+#include <signaldata/BuildIndx.hpp>
+#include <signaldata/UtilPrepare.hpp>
+#include <signaldata/CreateEvnt.hpp>
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/DropTrig.hpp>
+#include <signaldata/AlterTrig.hpp>
+#include "SchemaFile.hpp"
+#include <blocks/mutexes.hpp>
+#include <SafeCounter.hpp>
+#include <RequestTracker.hpp>
+
+#ifdef DBDICT_C
+// Debug Macros
+
+/*--------------------------------------------------------------*/
+// Constants for CONTINUEB
+/*--------------------------------------------------------------*/
+#define ZPACK_TABLE_INTO_PAGES 0
+#define ZSEND_GET_TAB_RESPONSE 3
+
+
+/*--------------------------------------------------------------*/
+// Other constants in alphabetical order
+/*--------------------------------------------------------------*/
+#define ZNOMOREPHASES 255
+
+/*--------------------------------------------------------------*/
+// Schema file defines
+/*--------------------------------------------------------------*/
+#define ZSCHEMA_WORDS 4
+
+/*--------------------------------------------------------------*/
+// Page constants
+/*--------------------------------------------------------------*/
+#define ZBAT_SCHEMA_FILE 0 //Variable number of page for NDBFS
+#define ZBAT_TABLE_FILE 1 //Variable number of page for NDBFS
+#define ZPAGE_HEADER_SIZE 32
+#define ZPOS_PAGE_SIZE 16
+#define ZPOS_CHECKSUM 17
+#define ZPOS_VERSION 18
+#define ZPOS_PAGE_HEADER_SIZE 19
+
+/*--------------------------------------------------------------*/
+// Size constants
+/*--------------------------------------------------------------*/
+#define ZFS_CONNECT_SIZE 4
+#define ZSIZE_OF_PAGES_IN_WORDS 8192
+#define ZLOG_SIZE_OF_PAGES_IN_WORDS 13
+#define ZMAX_PAGES_OF_TABLE_DEFINITION 8
+#define ZNUMBER_OF_PAGES (ZMAX_PAGES_OF_TABLE_DEFINITION + 1)
+#define ZNO_OF_FRAGRECORD 5
+
+/*--------------------------------------------------------------*/
+// Error codes
+/*--------------------------------------------------------------*/
+#define ZNODE_FAILURE_ERROR 704
+#endif
+
+/**
+ * Systable NDB$EVENTS_0
+ */
+
+#define EVENT_SYSTEM_TABLE_NAME "sys/def/NDB$EVENTS_0"
+#define EVENT_SYSTEM_TABLE_LENGTH 6
+
+struct sysTab_NDBEVENTS_0 {
+ char NAME[MAX_TAB_NAME_SIZE];
+ Uint32 EVENT_TYPE;
+ char TABLE_NAME[MAX_TAB_NAME_SIZE];
+ Uint32 ATTRIBUTE_MASK[MAXNROFATTRIBUTESINWORDS];
+ Uint32 SUBID;
+ Uint32 SUBKEY;
+};
+
+/**
+ * DICT - This blocks handles all metadata
+ */
+class Dbdict: public SimulatedBlock {
+public:
+ /*
+ * 2.3 RECORD AND FILESIZES
+ */
+ /**
+ * Shared table / index record. Most of this is permanent data stored
+ * on disk. Index trigger ids are volatile.
+ */
+ struct TableRecord : public MetaData::Table {
+ /****************************************************
+ * Support variables for table handling
+ ****************************************************/
+
+ /* Active page which is sent to disk */
+ Uint32 activePage;
+
+ /** File pointer received from disk */
+ Uint32 filePtr[2];
+
+ /** Pointer to first attribute in table */
+ Uint32 firstAttribute;
+
+ /* Pointer to first page of table description */
+ Uint32 firstPage;
+
+ /** Pointer to last attribute in table */
+ Uint32 lastAttribute;
+
+ /* Temporary record used during add/drop table */
+ Uint32 myConnect;
+#ifdef HAVE_TABLE_REORG
+ /* Second table used by this table (for table reorg) */
+ Uint32 secondTable;
+#endif
+ /* Next record in Pool */
+ Uint32 nextPool;
+
+ /* Next record in hash table */
+ Uint32 nextHash;
+
+ /* Previous record in Pool */
+ Uint32 prevPool;
+
+ /* Previous record in hash table */
+ Uint32 prevHash;
+
+ enum TabState {
+ NOT_DEFINED = 0,
+ REORG_TABLE_PREPARED = 1,
+ DEFINING = 2,
+ CHECKED = 3,
+ DEFINED = 4,
+ PREPARE_DROPPING = 5,
+ DROPPING = 6
+ };
+ TabState tabState;
+
+ /* State when returning from TC_SCHVERREQ */
+ enum TabReturnState {
+ TRS_IDLE = 0,
+ ADD_TABLE = 1,
+ SLAVE_SYSTEM_RESTART = 2,
+ MASTER_SYSTEM_RESTART = 3
+ };
+ TabReturnState tabReturnState;
+
+ /** Number of words */
+ Uint32 packedSize;
+
+ /** Index state (volatile data) */
+ enum IndexState {
+ IS_UNDEFINED = 0, // initial
+ IS_OFFLINE = 1, // index table created
+ IS_BUILDING = 2, // building (local state)
+ IS_DROPPING = 3, // dropping (local state)
+ IS_ONLINE = 4, // online
+ IS_BROKEN = 9 // build or drop aborted
+ };
+ IndexState indexState;
+
+ /** Trigger ids of index (volatile data) */
+ Uint32 insertTriggerId;
+ Uint32 updateTriggerId;
+ Uint32 deleteTriggerId;
+ Uint32 customTriggerId; // ordered index
+ Uint32 buildTriggerId; // temp during build
+
+ /** Index state in other blocks on this node */
+ enum IndexLocal {
+ IL_CREATED_TC = 1 << 0 // created in TC
+ };
+ Uint32 indexLocal;
+
+ Uint32 noOfNullBits;
+
+ inline bool equal(TableRecord & rec) const {
+ return strcmp(tableName, rec.tableName) == 0;
+ }
+
+ inline Uint32 hashValue() const {
+ Uint32 h = 0;
+ for (const char* p = tableName; *p != 0; p++)
+ h = (h << 5) + h + (*p);
+ return h;
+ }
+
+ /** frm data for this table */
+ /** TODO Could preferrably be made dynamic size */
+ Uint32 frmLen;
+ char frmData[MAX_FRM_DATA_SIZE];
+ /** Node Group and Tablespace id for this table */
+ /** TODO Could preferrably be made dynamic size */
+ Uint32 ngLen;
+ Uint16 ngData[MAX_NDB_PARTITIONS];
+
+ Uint32 fragmentCount;
+ };
+
+ typedef Ptr<TableRecord> TableRecordPtr;
+ ArrayPool<TableRecord> c_tableRecordPool;
+ DLHashTable<TableRecord> c_tableRecordHash;
+
+ /**
+ * Table attributes. Permanent data.
+ *
+ * Indexes have an attribute list which duplicates primary table
+ * attributes. This is wrong but convenient.
+ */
+ struct AttributeRecord : public MetaData::Attribute {
+ union {
+ /** Pointer to the next attribute used by ArrayPool */
+ Uint32 nextPool;
+
+ /** Pointer to the next attribute used by DLHash */
+ Uint32 nextHash;
+ };
+
+ /** Pointer to the previous attribute used by DLHash */
+ Uint32 prevHash;
+
+ /** Pointer to the next attribute in table */
+ Uint32 nextAttrInTable;
+
+ inline bool equal(AttributeRecord & rec) const {
+ return strcmp(attributeName, rec.attributeName) == 0;
+ }
+
+ inline Uint32 hashValue() const {
+ Uint32 h = 0;
+ for (const char* p = attributeName; *p != 0; p++)
+ h = (h << 5) + h + (*p);
+ return h;
+ }
+ };
+
+ typedef Ptr<AttributeRecord> AttributeRecordPtr;
+ ArrayPool<AttributeRecord> c_attributeRecordPool;
+ DLHashTable<AttributeRecord> c_attributeRecordHash;
+
+ /**
+ * Triggers. This is volatile data not saved on disk. Setting a
+ * trigger online creates the trigger in TC (if index) and LQH-TUP.
+ */
+ struct TriggerRecord {
+
+ /** Trigger state */
+ enum TriggerState {
+ TS_NOT_DEFINED = 0,
+ TS_DEFINING = 1,
+ TS_OFFLINE = 2, // created globally in DICT
+ TS_BUILDING = 3,
+ TS_DROPPING = 4,
+ TS_ONLINE = 5 // activated globally
+ };
+ TriggerState triggerState;
+
+ /** Trigger state in other blocks on this node */
+ enum IndexLocal {
+ TL_CREATED_TC = 1 << 0, // created in TC
+ TL_CREATED_LQH = 1 << 1 // created in LQH-TUP
+ };
+ Uint32 triggerLocal;
+
+ /** Trigger name, used by DICT to identify the trigger */
+ char triggerName[MAX_TAB_NAME_SIZE];
+
+ /** Trigger id, used by TRIX, TC, LQH, and TUP to identify the trigger */
+ Uint32 triggerId;
+
+ /** Table id, the table the trigger is defined on */
+ Uint32 tableId;
+
+ /** Trigger type, defines what the trigger is used for */
+ TriggerType::Value triggerType;
+
+ /** Trigger action time, defines when the trigger should fire */
+ TriggerActionTime::Value triggerActionTime;
+
+ /** Trigger event, defines what events the trigger should monitor */
+ TriggerEvent::Value triggerEvent;
+
+ /** Monitor all replicas */
+ bool monitorReplicas;
+
+ /** Monitor all, the trigger monitors changes of all attributes in table */
+ bool monitorAllAttributes;
+
+ /**
+ * Attribute mask, defines what attributes are to be monitored.
+ * Can be seen as a compact representation of SQL column name list.
+ */
+ AttributeMask attributeMask;
+
+ /** Index id, only used by secondary_index triggers */
+ Uint32 indexId;
+
+ union {
+ /** Pointer to the next attribute used by ArrayPool */
+ Uint32 nextPool;
+
+ /** Next record in hash table */
+ Uint32 nextHash;
+ };
+
+ /** Previous record in hash table */
+ Uint32 prevHash;
+
+ /** Equal function, used by DLHashTable */
+ inline bool equal(TriggerRecord & rec) const {
+ return strcmp(triggerName, rec.triggerName) == 0;
+ }
+
+ /** Hash value function, used by DLHashTable */
+ inline Uint32 hashValue() const {
+ Uint32 h = 0;
+ for (const char* p = triggerName; *p != 0; p++)
+ h = (h << 5) + h + (*p);
+ return h;
+ }
+ };
+
+ Uint32 c_maxNoOfTriggers;
+ typedef Ptr<TriggerRecord> TriggerRecordPtr;
+ ArrayPool<TriggerRecord> c_triggerRecordPool;
+ DLHashTable<TriggerRecord> c_triggerRecordHash;
+
+ /**
+ * Information for each FS connection.
+ ****************************************************************************/
+ struct FsConnectRecord {
+ enum FsState {
+ IDLE = 0,
+ OPEN_WRITE_SCHEMA = 1,
+ WRITE_SCHEMA = 2,
+ CLOSE_WRITE_SCHEMA = 3,
+ OPEN_READ_SCHEMA1 = 4,
+ OPEN_READ_SCHEMA2 = 5,
+ READ_SCHEMA1 = 6,
+ READ_SCHEMA2 = 7,
+ CLOSE_READ_SCHEMA = 8,
+ OPEN_READ_TAB_FILE1 = 9,
+ OPEN_READ_TAB_FILE2 = 10,
+ READ_TAB_FILE1 = 11,
+ READ_TAB_FILE2 = 12,
+ CLOSE_READ_TAB_FILE = 13,
+ OPEN_WRITE_TAB_FILE = 14,
+ WRITE_TAB_FILE = 15,
+ CLOSE_WRITE_TAB_FILE = 16
+ };
+ /** File Pointer for this file system connection */
+ Uint32 filePtr;
+
+ /** Reference of owner record */
+ Uint32 ownerPtr;
+
+ /** State of file system connection */
+ FsState fsState;
+
+ /** Used by Array Pool for free list handling */
+ Uint32 nextPool;
+ };
+
+ typedef Ptr<FsConnectRecord> FsConnectRecordPtr;
+ ArrayPool<FsConnectRecord> c_fsConnectRecordPool;
+
+ /**
+ * This record stores all the information about a node and all its attributes
+ ****************************************************************************/
+ struct NodeRecord {
+ enum NodeState {
+ API_NODE = 0,
+ NDB_NODE_ALIVE = 1,
+ NDB_NODE_DEAD = 2
+ };
+ bool hotSpare;
+ NodeState nodeState;
+ };
+
+ typedef Ptr<NodeRecord> NodeRecordPtr;
+ CArray<NodeRecord> c_nodes;
+ NdbNodeBitmask c_aliveNodes;
+
+ /**
+ * This record stores all the information about a table and all its attributes
+ ****************************************************************************/
+ struct PageRecord {
+ Uint32 word[8192];
+ };
+
+ typedef Ptr<PageRecord> PageRecordPtr;
+ CArray<PageRecord> c_pageRecordArray;
+
+ struct SchemaPageRecord {
+ Uint32 word[NDB_SF_PAGE_SIZE_IN_WORDS];
+ };
+
+ CArray<SchemaPageRecord> c_schemaPageRecordArray;
+
+ /**
+ * A page for create index table signal.
+ */
+ PageRecord c_indexPage;
+
+public:
+ Dbdict(const class Configuration &);
+ virtual ~Dbdict();
+
+private:
+ BLOCK_DEFINES(Dbdict);
+
+ // Signal receivers
+ void execDICTSTARTREQ(Signal* signal);
+
+ void execGET_TABINFOREQ(Signal* signal);
+ void execGET_TABLEDID_REQ(Signal* signal);
+ void execGET_TABINFO_REF(Signal* signal);
+ void execGET_TABINFO_CONF(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execHOT_SPAREREP(Signal* signal);
+ void execDIADDTABCONF(Signal* signal);
+ void execDIADDTABREF(Signal* signal);
+ void execTAB_COMMITCONF(Signal* signal);
+ void execTAB_COMMITREF(Signal* signal);
+ void execGET_SCHEMA_INFOREQ(Signal* signal);
+ void execSCHEMA_INFO(Signal* signal);
+ void execSCHEMA_INFOCONF(Signal* signal);
+ void execREAD_NODESCONF(Signal* signal);
+ void execFSCLOSECONF(Signal* signal);
+ void execFSCLOSEREF(Signal* signal);
+ void execFSOPENCONF(Signal* signal);
+ void execFSOPENREF(Signal* signal);
+ void execFSREADCONF(Signal* signal);
+ void execFSREADREF(Signal* signal);
+ void execFSWRITECONF(Signal* signal);
+ void execFSWRITEREF(Signal* signal);
+ void execNDB_STTOR(Signal* signal);
+ void execREAD_CONFIG_REQ(Signal* signal);
+ void execSTTOR(Signal* signal);
+ void execTC_SCHVERCONF(Signal* signal);
+ void execNODE_FAILREP(Signal* signal);
+ void execINCL_NODEREQ(Signal* signal);
+ void execAPI_FAILREQ(Signal* signal);
+
+ void execWAIT_GCP_REF(Signal* signal);
+ void execWAIT_GCP_CONF(Signal* signal);
+
+ void execLIST_TABLES_REQ(Signal* signal);
+
+ // Index signals
+ void execCREATE_INDX_REQ(Signal* signal);
+ void execCREATE_INDX_CONF(Signal* signal);
+ void execCREATE_INDX_REF(Signal* signal);
+
+ void execALTER_INDX_REQ(Signal* signal);
+ void execALTER_INDX_CONF(Signal* signal);
+ void execALTER_INDX_REF(Signal* signal);
+
+ void execCREATE_TABLE_CONF(Signal* signal);
+ void execCREATE_TABLE_REF(Signal* signal);
+
+ void execDROP_INDX_REQ(Signal* signal);
+ void execDROP_INDX_CONF(Signal* signal);
+ void execDROP_INDX_REF(Signal* signal);
+
+ void execDROP_TABLE_CONF(Signal* signal);
+ void execDROP_TABLE_REF(Signal* signal);
+
+ void execBUILDINDXREQ(Signal* signal);
+ void execBUILDINDXCONF(Signal* signal);
+ void execBUILDINDXREF(Signal* signal);
+
+ // Util signals used by Event code
+ void execUTIL_PREPARE_CONF(Signal* signal);
+ void execUTIL_PREPARE_REF (Signal* signal);
+ void execUTIL_EXECUTE_CONF(Signal* signal);
+ void execUTIL_EXECUTE_REF (Signal* signal);
+ void execUTIL_RELEASE_CONF(Signal* signal);
+ void execUTIL_RELEASE_REF (Signal* signal);
+
+
+ // Event signals from API
+ void execCREATE_EVNT_REQ (Signal* signal);
+ void execCREATE_EVNT_CONF(Signal* signal);
+ void execCREATE_EVNT_REF (Signal* signal);
+
+ void execDROP_EVNT_REQ (Signal* signal);
+
+ void execSUB_START_REQ (Signal* signal);
+ void execSUB_START_CONF (Signal* signal);
+ void execSUB_START_REF (Signal* signal);
+
+ void execSUB_STOP_REQ (Signal* signal);
+ void execSUB_STOP_CONF (Signal* signal);
+ void execSUB_STOP_REF (Signal* signal);
+
+ // Event signals from SUMA
+
+ void execCREATE_SUBID_CONF(Signal* signal);
+ void execCREATE_SUBID_REF (Signal* signal);
+
+ void execSUB_CREATE_CONF(Signal* signal);
+ void execSUB_CREATE_REF (Signal* signal);
+
+ void execSUB_SYNC_CONF(Signal* signal);
+ void execSUB_SYNC_REF (Signal* signal);
+
+ void execSUB_REMOVE_REQ(Signal* signal);
+ void execSUB_REMOVE_CONF(Signal* signal);
+ void execSUB_REMOVE_REF(Signal* signal);
+
+ // Trigger signals
+ void execCREATE_TRIG_REQ(Signal* signal);
+ void execCREATE_TRIG_CONF(Signal* signal);
+ void execCREATE_TRIG_REF(Signal* signal);
+ void execALTER_TRIG_REQ(Signal* signal);
+ void execALTER_TRIG_CONF(Signal* signal);
+ void execALTER_TRIG_REF(Signal* signal);
+ void execDROP_TRIG_REQ(Signal* signal);
+ void execDROP_TRIG_CONF(Signal* signal);
+ void execDROP_TRIG_REF(Signal* signal);
+
+ void execDROP_TABLE_REQ(Signal* signal);
+
+ void execPREP_DROP_TAB_REQ(Signal* signal);
+ void execPREP_DROP_TAB_REF(Signal* signal);
+ void execPREP_DROP_TAB_CONF(Signal* signal);
+
+ void execDROP_TAB_REQ(Signal* signal);
+ void execDROP_TAB_REF(Signal* signal);
+ void execDROP_TAB_CONF(Signal* signal);
+
+ void execCREATE_TABLE_REQ(Signal* signal);
+ void execALTER_TABLE_REQ(Signal* signal);
+ void execCREATE_FRAGMENTATION_REF(Signal*);
+ void execCREATE_FRAGMENTATION_CONF(Signal*);
+ void execCREATE_TAB_REQ(Signal* signal);
+ void execADD_FRAGREQ(Signal* signal);
+ void execLQHFRAGREF(Signal* signal);
+ void execLQHFRAGCONF(Signal* signal);
+ void execLQHADDATTREF(Signal* signal);
+ void execLQHADDATTCONF(Signal* signal);
+ void execCREATE_TAB_REF(Signal* signal);
+ void execCREATE_TAB_CONF(Signal* signal);
+ void execALTER_TAB_REQ(Signal* signal);
+ void execALTER_TAB_REF(Signal* signal);
+ void execALTER_TAB_CONF(Signal* signal);
+
+ /*
+ * 2.4 COMMON STORED VARIABLES
+ */
+
+ /**
+ * This record stores all the state needed
+ * when the schema page is being sent to other nodes
+ ***************************************************************************/
+ struct SendSchemaRecord {
+ /** Number of words of schema data */
+ Uint32 noOfWords;
+ /** Page Id of schema data */
+ Uint32 pageId;
+
+ Uint32 nodeId;
+ SignalCounter m_SCHEMAINFO_Counter;
+
+ Uint32 noOfWordsCurrentlySent;
+ Uint32 noOfSignalsSentSinceDelay;
+
+ bool inUse;
+ };
+ SendSchemaRecord c_sendSchemaRecord;
+
+ /**
+ * This record stores all the state needed
+ * when a table file is being read from disk
+ ****************************************************************************/
+ struct ReadTableRecord {
+ /** Number of Pages */
+ Uint32 noOfPages;
+ /** Page Id*/
+ Uint32 pageId;
+ /** Table Id of read table */
+ Uint32 tableId;
+
+ bool inUse;
+ Callback m_callback;
+ };
+ ReadTableRecord c_readTableRecord;
+
+ /**
+ * This record stores all the state needed
+ * when a table file is being written to disk
+ ****************************************************************************/
+ struct WriteTableRecord {
+ /** Number of Pages */
+ Uint32 noOfPages;
+ /** Page Id*/
+ Uint32 pageId;
+ /** Table Files Handled, local state variable */
+ Uint32 noOfTableFilesHandled;
+ /** Table Id of written table */
+ Uint32 tableId;
+ /** State, indicates from where it was called */
+ enum TableWriteState {
+ IDLE = 0,
+ WRITE_ADD_TABLE_MASTER = 1,
+ WRITE_ADD_TABLE_SLAVE = 2,
+ WRITE_RESTART_FROM_MASTER = 3,
+ WRITE_RESTART_FROM_OWN = 4,
+ TWR_CALLBACK = 5
+ };
+ TableWriteState tableWriteState;
+ Callback m_callback;
+ };
+ WriteTableRecord c_writeTableRecord;
+
+ /**
+ * This record stores all the state needed
+ * when a schema file is being read from disk
+ ****************************************************************************/
+ struct ReadSchemaRecord {
+ /** Page Id of schema page */
+ Uint32 pageId;
+ /** First page to read */
+ Uint32 firstPage;
+ /** Number of pages to read */
+ Uint32 noOfPages;
+ /** State, indicates from where it was called */
+ enum SchemaReadState {
+ IDLE = 0,
+ INITIAL_READ_HEAD = 1,
+ INITIAL_READ = 2
+ };
+ SchemaReadState schemaReadState;
+ };
+ ReadSchemaRecord c_readSchemaRecord;
+
+ /**
+ * This record stores all the state needed
+ * when a schema file is being written to disk
+ ****************************************************************************/
+ struct WriteSchemaRecord {
+ /** Page Id of schema page */
+ Uint32 pageId;
+ /** Rewrite entire file */
+ Uint32 newFile;
+ /** First page to write */
+ Uint32 firstPage;
+ /** Number of pages to write */
+ Uint32 noOfPages;
+ /** Schema Files Handled, local state variable */
+ Uint32 noOfSchemaFilesHandled;
+
+ bool inUse;
+ Callback m_callback;
+ };
+ WriteSchemaRecord c_writeSchemaRecord;
+
+ /**
+ * This record stores all the information needed
+ * when a file is being read from disk
+ ****************************************************************************/
+ struct RestartRecord {
+ /** Global check point identity */
+ Uint32 gciToRestart;
+
+ /** The active table at restart process */
+ Uint32 activeTable;
+
+ /** The active table at restart process */
+ BlockReference returnBlockRef;
+ };
+ RestartRecord c_restartRecord;
+
+ /**
+ * This record stores all the information needed
+ * when a file is being read from disk
+ ****************************************************************************/
+ struct RetrieveRecord {
+ RetrieveRecord(){ noOfWaiters = 0;}
+
+ /** Only one retrieve table definition at a time */
+ bool busyState;
+
+ /**
+ * No of waiting in time queue
+ */
+ Uint32 noOfWaiters;
+
+ /** Block Reference of retriever */
+ BlockReference blockRef;
+
+ /** Id of retriever */
+ Uint32 m_senderData;
+
+ /** Table id of retrieved table */
+ Uint32 tableId;
+
+ /** Starting page to retrieve data from */
+ Uint32 retrievePage;
+
+ /** Number of pages retrieved */
+ Uint32 retrievedNoOfPages;
+
+ /** Number of words retrieved */
+ Uint32 retrievedNoOfWords;
+
+ /** Number of words sent currently */
+ Uint32 currentSent;
+
+ /**
+ * Long signal stuff
+ */
+ bool m_useLongSig;
+ };
+ RetrieveRecord c_retrieveRecord;
+
+ /**
+ * This record stores all the information needed
+ * when a file is being read from disk
+ *
+ * This is the info stored in one entry of the schema
+ * page. Each table has 4 words of info.
+ * Word 1: Schema version (upper 16 bits)
+ * Table State (lower 16 bits)
+ * Word 2: Number of pages of table description
+ * Word 3: Global checkpoint id table was created
+ * Word 4: Currently zero
+ ****************************************************************************/
+ struct SchemaRecord {
+ /** Schema file first page (0) */
+ Uint32 schemaPage;
+
+ /** Old Schema file first page (used at node restart) */
+ Uint32 oldSchemaPage;
+
+ Callback m_callback;
+ };
+ SchemaRecord c_schemaRecord;
+
+ /*
+ * Schema file, list of schema pages. Use an array until a pool
+ * exists and NDBFS interface can use it.
+ */
+ struct XSchemaFile {
+ SchemaFile* schemaPage;
+ Uint32 noOfPages;
+ };
+ // 0-normal 1-old
+ XSchemaFile c_schemaFile[2];
+
+ void initSchemaFile(XSchemaFile *, Uint32 firstPage, Uint32 lastPage,
+ bool initEntries);
+ void resizeSchemaFile(XSchemaFile * xsf, Uint32 noOfPages);
+ void computeChecksum(XSchemaFile *, Uint32 pageNo);
+ bool validateChecksum(const XSchemaFile *);
+ SchemaFile::TableEntry * getTableEntry(XSchemaFile *, Uint32 tableId);
+
+ Uint32 computeChecksum(const Uint32 * src, Uint32 len);
+
+
+ /* ----------------------------------------------------------------------- */
+ // Node References
+ /* ----------------------------------------------------------------------- */
+ Uint16 c_masterNodeId;
+
+ /* ----------------------------------------------------------------------- */
+ // Various current system properties
+ /* ----------------------------------------------------------------------- */
+ Uint16 c_numberNode;
+ Uint16 c_noHotSpareNodes;
+ Uint16 c_noNodesFailed;
+ Uint32 c_failureNr;
+
+ /* ----------------------------------------------------------------------- */
+ // State variables
+ /* ----------------------------------------------------------------------- */
+
+ enum BlockState {
+ BS_IDLE = 0,
+ BS_CREATE_TAB = 1,
+ BS_BUSY = 2,
+ BS_NODE_FAILURE = 3
+ };
+ BlockState c_blockState;
+
+ struct PackTable {
+
+ enum PackTableState {
+ PTS_IDLE = 0,
+ PTS_ADD_TABLE_MASTER = 1,
+ PTS_ADD_TABLE_SLAVE = 2,
+ PTS_GET_TAB = 3,
+ PTS_RESTART = 4
+ } m_state;
+
+ } c_packTable;
+
+ Uint32 c_startPhase;
+ Uint32 c_restartType;
+ bool c_initialStart;
+ bool c_systemRestart;
+ bool c_nodeRestart;
+ bool c_initialNodeRestart;
+ Uint32 c_tabinfoReceived;
+
+ /**
+ * Temporary structure used when parsing table info
+ */
+ struct ParseDictTabInfoRecord {
+ DictTabInfo::RequestType requestType;
+ Uint32 errorCode;
+ Uint32 errorLine;
+
+ SimpleProperties::UnpackStatus status;
+ Uint32 errorKey;
+ TableRecordPtr tablePtr;
+ };
+
+ // Operation records
+
+ /**
+ * Common part of operation records. Uses KeyTable2. Note that each
+ * seize/release invokes ctor/dtor automatically.
+ */
+ struct OpRecordCommon {
+ Uint32 key; // key shared between master and slaves
+ Uint32 nextHash;
+ Uint32 prevHash;
+ Uint32 hashValue() const {
+ return key;
+ }
+ bool equal(const OpRecordCommon& rec) const {
+ return key == rec.key;
+ }
+ };
+
+ /**
+ * Create table record
+ */
+ struct CreateTableRecord : OpRecordCommon {
+ Uint32 m_senderRef;
+ Uint32 m_senderData;
+ Uint32 m_coordinatorRef;
+
+ Uint32 m_errorCode;
+ void setErrorCode(Uint32 c){ if(m_errorCode == 0) m_errorCode = c;}
+
+ // For alter table
+ Uint32 m_changeMask;
+ bool m_alterTableFailed;
+ AlterTableRef m_alterTableRef;
+ Uint32 m_alterTableId;
+
+ /* Previous table name (used for reverting failed table rename) */
+ char previousTableName[MAX_TAB_NAME_SIZE];
+
+ Uint32 m_tablePtrI;
+ Uint32 m_tabInfoPtrI;
+ Uint32 m_fragmentsPtrI;
+
+ Uint32 m_dihAddFragPtr; // Connect ptr towards DIH
+ Uint32 m_lqhFragPtr; // Connect ptr towards LQH
+
+ Callback m_callback; // Who's using local create tab
+ MutexHandle2<DIH_START_LCP_MUTEX> m_startLcpMutex;
+
+ struct CoordinatorData {
+ Uint32 m_gsn;
+ SafeCounterHandle m_counter;
+ CreateTabReq::RequestType m_requestType;
+ } m_coordinatorData;
+ };
+ typedef Ptr<CreateTableRecord> CreateTableRecordPtr;
+
+ /**
+ * Drop table record
+ */
+ struct DropTableRecord : OpRecordCommon {
+ DropTableReq m_request;
+
+ Uint32 m_requestType;
+ Uint32 m_coordinatorRef;
+
+ Uint32 m_errorCode;
+ void setErrorCode(Uint32 c){ if(m_errorCode == 0) m_errorCode = c;}
+
+ /**
+ * When sending stuff around
+ */
+ struct CoordinatorData {
+ Uint32 m_gsn;
+ Uint32 m_block;
+ SignalCounter m_signalCounter;
+ } m_coordinatorData;
+
+ struct ParticipantData {
+ Uint32 m_gsn;
+ Uint32 m_block;
+ SignalCounter m_signalCounter;
+
+ Callback m_callback;
+ } m_participantData;
+ };
+ typedef Ptr<DropTableRecord> DropTableRecordPtr;
+
+ /**
+ * Request flags passed in signals along with request type and
+ * propagated across operations.
+ */
+ struct RequestFlag {
+ enum {
+ RF_LOCAL = 1 << 0, // create on local node only
+ RF_NOBUILD = 1 << 1, // no need to build index
+ RF_NOTCTRIGGER = 1 << 2 // alter trigger: no trigger in TC
+ };
+ };
+
+ /**
+ * Operation record for create index.
+ */
+ struct OpCreateIndex : OpRecordCommon {
+ // original request (index id will be added)
+ CreateIndxReq m_request;
+ AttributeList m_attrList;
+ char m_indexName[MAX_TAB_NAME_SIZE];
+ bool m_storedIndex;
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ CreateIndxReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ CreateIndxRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ // ctor
+ OpCreateIndex() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = CreateIndxReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = CreateIndxRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void save(const CreateIndxReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != CreateIndxRef::NoError;
+ }
+ void setError(const CreateIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const CreateTableRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ switch (ref->getErrorCode()) {
+ case CreateTableRef::TableAlreadyExist:
+ m_errorCode = CreateIndxRef::IndexExists;
+ break;
+ default:
+ m_errorCode = (CreateIndxRef::ErrorCode)ref->getErrorCode();
+ break;
+ }
+ m_errorLine = ref->getErrorLine();
+ }
+ }
+ void setError(const AlterIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (CreateIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpCreateIndex> OpCreateIndexPtr;
+
+ /**
+ * Operation record for drop index.
+ */
+ struct OpDropIndex : OpRecordCommon {
+ // original request
+ DropIndxReq m_request;
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ DropIndxReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ DropIndxRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ // ctor
+ OpDropIndex() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = DropIndxReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = DropIndxRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void save(const DropIndxReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != DropIndxRef::NoError;
+ }
+ void setError(const DropIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const AlterIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (DropIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const DropTableRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ switch(ref->errorCode) {
+ case(DropTableRef::Busy):
+ m_errorCode = DropIndxRef::Busy;
+ break;
+ case(DropTableRef::NoSuchTable):
+ m_errorCode = DropIndxRef::IndexNotFound;
+ break;
+ case(DropTableRef::DropInProgress):
+ m_errorCode = DropIndxRef::Busy;
+ break;
+ case(DropTableRef::NoDropTableRecordAvailable):
+ m_errorCode = DropIndxRef::Busy;
+ break;
+ default:
+ m_errorCode = (DropIndxRef::ErrorCode)ref->errorCode;
+ break;
+ }
+ //m_errorLine = ref->getErrorLine();
+ //m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpDropIndex> OpDropIndexPtr;
+
+ /**
+ * Operation record for alter index.
+ */
+ struct OpAlterIndex : OpRecordCommon {
+ // original request plus buffer for attribute lists
+ AlterIndxReq m_request;
+ AttributeList m_attrList;
+ AttributeList m_tableKeyList;
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ AlterIndxReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ AlterIndxRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ Uint32 m_triggerCounter;
+ // ctor
+ OpAlterIndex() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = AlterIndxReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = AlterIndxRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ m_triggerCounter = 0;
+ }
+ void save(const AlterIndxReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != AlterIndxRef::NoError;
+ }
+ void setError(const AlterIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const CreateIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const DropIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const BuildIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
+ }
+ }
+ void setError(const CreateTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const DropTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpAlterIndex> OpAlterIndexPtr;
+
+ /**
+ * Operation record for build index.
+ */
+ struct OpBuildIndex : OpRecordCommon {
+ // original request plus buffer for attribute lists
+ BuildIndxReq m_request;
+ AttributeList m_attrList;
+ AttributeList m_tableKeyList;
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ BuildIndxReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ Uint32 m_constrTriggerId;
+ // error info
+ BuildIndxRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ // ctor
+ OpBuildIndex() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = BuildIndxReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+// Uint32 m_constrTriggerId = RNIL;
+ m_errorCode = BuildIndxRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void save(const BuildIndxReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != BuildIndxRef::NoError;
+ }
+ void setError(const BuildIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ }
+ }
+ void setError(const AlterIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (BuildIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const CreateTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (BuildIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const DropTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (BuildIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpBuildIndex> OpBuildIndexPtr;
+
+ /**
+ * Operation record for Util Signals.
+ */
+ struct OpSignalUtil : OpRecordCommon{
+ Callback m_callback;
+ Uint32 m_userData;
+ };
+ typedef Ptr<OpSignalUtil> OpSignalUtilPtr;
+
+ /**
+ * Operation record for subscribe-start-stop
+ */
+ struct OpSubEvent : OpRecordCommon {
+ Uint32 m_senderRef;
+ Uint32 m_senderData;
+ Uint32 m_errorCode;
+ RequestTracker m_reqTracker;
+ };
+ typedef Ptr<OpSubEvent> OpSubEventPtr;
+
+ static const Uint32 sysTab_NDBEVENTS_0_szs[];
+
+ /**
+ * Operation record for create event.
+ */
+ struct OpCreateEvent : OpRecordCommon {
+ // original request (event id will be added)
+ CreateEvntReq m_request;
+ //AttributeMask m_attrListBitmask;
+ // AttributeList m_attrList;
+ sysTab_NDBEVENTS_0 m_eventRec;
+ // char m_eventName[MAX_TAB_NAME_SIZE];
+ // char m_tableName[MAX_TAB_NAME_SIZE];
+
+ // coordinator DICT
+ RequestTracker m_reqTracker;
+ // state info
+ CreateEvntReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ CreateEvntRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // ctor
+ OpCreateEvent() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_requestType = CreateEvntReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = CreateEvntRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void init(const CreateEvntReq* req, Dbdict* dp) {
+ m_request = *req;
+ m_errorCode = CreateEvntRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != CreateEvntRef::NoError;
+ }
+ void setError(const CreateEvntRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+
+ };
+ typedef Ptr<OpCreateEvent> OpCreateEventPtr;
+
+ /**
+ * Operation record for drop event.
+ */
+ struct OpDropEvent : OpRecordCommon {
+ // original request
+ DropEvntReq m_request;
+ // char m_eventName[MAX_TAB_NAME_SIZE];
+ sysTab_NDBEVENTS_0 m_eventRec;
+ RequestTracker m_reqTracker;
+ // error info
+ DropEvntRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // ctor
+ OpDropEvent() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_errorCode = DropEvntRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void init(const DropEvntReq* req) {
+ m_request = *req;
+ m_errorCode = DropEvntRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ bool hasError() {
+ return m_errorCode != DropEvntRef::NoError;
+ }
+ void setError(const DropEvntRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpDropEvent> OpDropEventPtr;
+
+ /**
+ * Operation record for create trigger.
+ */
+ struct OpCreateTrigger : OpRecordCommon {
+ // original request (trigger id will be added)
+ CreateTrigReq m_request;
+ char m_triggerName[MAX_TAB_NAME_SIZE];
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ CreateTrigReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ CreateTrigRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ // ctor
+ OpCreateTrigger() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = CreateTrigReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = CreateTrigRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void save(const CreateTrigReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != CreateTrigRef::NoError;
+ }
+ void setError(const CreateTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const AlterTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (CreateTrigRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpCreateTrigger> OpCreateTriggerPtr;
+
+ /**
+ * Operation record for drop trigger.
+ */
+ struct OpDropTrigger : OpRecordCommon {
+ // original request
+ DropTrigReq m_request;
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ DropTrigReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ DropTrigRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ // ctor
+ OpDropTrigger() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = DropTrigReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = DropTrigRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void save(const DropTrigReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != DropTrigRef::NoError;
+ }
+ void setError(const DropTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const AlterTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (DropTrigRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpDropTrigger> OpDropTriggerPtr;
+
+ /**
+ * Operation record for alter trigger.
+ */
+ struct OpAlterTrigger : OpRecordCommon {
+ // original request
+ AlterTrigReq m_request;
+ // nodes participating in operation
+ NdbNodeBitmask m_nodes;
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ AlterTrigReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ AlterTrigRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ // ctor
+ OpAlterTrigger() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = AlterTrigReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = AlterTrigRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void save(const AlterTrigReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != AlterTrigRef::NoError;
+ }
+ void setError(const AlterTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterTrigRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const CreateTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterTrigRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const DropTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterTrigRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpAlterTrigger> OpAlterTriggerPtr;
+
+ // Common operation record pool
+public:
+ STATIC_CONST( opCreateTableSize = sizeof(CreateTableRecord) );
+ STATIC_CONST( opDropTableSize = sizeof(DropTableRecord) );
+ STATIC_CONST( opCreateIndexSize = sizeof(OpCreateIndex) );
+ STATIC_CONST( opDropIndexSize = sizeof(OpDropIndex) );
+ STATIC_CONST( opAlterIndexSize = sizeof(OpAlterIndex) );
+ STATIC_CONST( opBuildIndexSize = sizeof(OpBuildIndex) );
+ STATIC_CONST( opCreateEventSize = sizeof(OpCreateEvent) );
+ STATIC_CONST( opSubEventSize = sizeof(OpSubEvent) );
+ STATIC_CONST( opDropEventSize = sizeof(OpDropEvent) );
+ STATIC_CONST( opSignalUtilSize = sizeof(OpSignalUtil) );
+ STATIC_CONST( opCreateTriggerSize = sizeof(OpCreateTrigger) );
+ STATIC_CONST( opDropTriggerSize = sizeof(OpDropTrigger) );
+ STATIC_CONST( opAlterTriggerSize = sizeof(OpAlterTrigger) );
+private:
+#define PTR_ALIGN(n) ((((n)+sizeof(void*)-1)>>2)&~((sizeof(void*)-1)>>2))
+ union OpRecordUnion {
+ Uint32 u_opCreateTable [PTR_ALIGN(opCreateTableSize)];
+ Uint32 u_opDropTable [PTR_ALIGN(opDropTableSize)];
+ Uint32 u_opCreateIndex [PTR_ALIGN(opCreateIndexSize)];
+ Uint32 u_opDropIndex [PTR_ALIGN(opDropIndexSize)];
+ Uint32 u_opCreateEvent [PTR_ALIGN(opCreateEventSize)];
+ Uint32 u_opSubEvent [PTR_ALIGN(opSubEventSize)];
+ Uint32 u_opDropEvent [PTR_ALIGN(opDropEventSize)];
+ Uint32 u_opSignalUtil [PTR_ALIGN(opSignalUtilSize)];
+ Uint32 u_opAlterIndex [PTR_ALIGN(opAlterIndexSize)];
+ Uint32 u_opBuildIndex [PTR_ALIGN(opBuildIndexSize)];
+ Uint32 u_opCreateTrigger[PTR_ALIGN(opCreateTriggerSize)];
+ Uint32 u_opDropTrigger [PTR_ALIGN(opDropTriggerSize)];
+ Uint32 u_opAlterTrigger [PTR_ALIGN(opAlterTriggerSize)];
+ Uint32 nextPool;
+ };
+ ArrayPool<OpRecordUnion> c_opRecordPool;
+
+ // Operation records
+ KeyTable2<CreateTableRecord, OpRecordUnion> c_opCreateTable;
+ KeyTable2<DropTableRecord, OpRecordUnion> c_opDropTable;
+ KeyTable2<OpCreateIndex, OpRecordUnion> c_opCreateIndex;
+ KeyTable2<OpDropIndex, OpRecordUnion> c_opDropIndex;
+ KeyTable2<OpAlterIndex, OpRecordUnion> c_opAlterIndex;
+ KeyTable2<OpBuildIndex, OpRecordUnion> c_opBuildIndex;
+ KeyTable2<OpCreateEvent, OpRecordUnion> c_opCreateEvent;
+ KeyTable2<OpSubEvent, OpRecordUnion> c_opSubEvent;
+ KeyTable2<OpDropEvent, OpRecordUnion> c_opDropEvent;
+ KeyTable2<OpSignalUtil, OpRecordUnion> c_opSignalUtil;
+ KeyTable2<OpCreateTrigger, OpRecordUnion> c_opCreateTrigger;
+ KeyTable2<OpDropTrigger, OpRecordUnion> c_opDropTrigger;
+ KeyTable2<OpAlterTrigger, OpRecordUnion> c_opAlterTrigger;
+
+ // Unique key for operation XXX move to some system table
+ Uint32 c_opRecordSequence;
+
+ // Statement blocks
+
+ /* ------------------------------------------------------------ */
+ // Start/Restart Handling
+ /* ------------------------------------------------------------ */
+ void sendSTTORRY(Signal* signal);
+ void sendNDB_STTORRY(Signal* signal);
+ void initSchemaFile(Signal* signal);
+
+ /* ------------------------------------------------------------ */
+ // Drop Table Handling
+ /* ------------------------------------------------------------ */
+ void releaseTableObject(Uint32 tableId, bool removeFromHash = true);
+
+ /* ------------------------------------------------------------ */
+ // General Stuff
+ /* ------------------------------------------------------------ */
+ Uint32 getFreeTableRecord(Uint32 primaryTableId);
+ Uint32 getFreeTriggerRecord();
+ bool getNewAttributeRecord(TableRecordPtr tablePtr,
+ AttributeRecordPtr & attrPtr);
+ void packTableIntoPages(Signal* signal, Uint32 tableId, Uint32 pageId);
+ void packTableIntoPagesImpl(SimpleProperties::Writer &, TableRecordPtr,
+ Signal* signal= 0);
+
+ void sendGET_TABINFOREQ(Signal* signal,
+ Uint32 tableId);
+ void sendTC_SCHVERREQ(Signal* signal,
+ Uint32 tableId,
+ BlockReference tcRef);
+
+ /* ------------------------------------------------------------ */
+ // System Restart Handling
+ /* ------------------------------------------------------------ */
+ void initSendSchemaData(Signal* signal);
+ void sendSchemaData(Signal* signal);
+ Uint32 sendSCHEMA_INFO(Signal* signal, Uint32 nodeId, Uint32* pagePointer);
+ void checkSchemaStatus(Signal* signal);
+ void sendDIHSTARTTAB_REQ(Signal* signal);
+
+ /* ------------------------------------------------------------ */
+ // Receive Table Handling
+ /* ------------------------------------------------------------ */
+ void handleTabInfoInit(SimpleProperties::Reader &,
+ ParseDictTabInfoRecord *,
+ bool checkExist = true);
+ void handleTabInfo(SimpleProperties::Reader & it, ParseDictTabInfoRecord *);
+
+ void handleAddTableFailure(Signal* signal,
+ Uint32 failureLine,
+ Uint32 tableId);
+ bool verifyTableCorrect(Signal* signal, Uint32 tableId);
+
+ /* ------------------------------------------------------------ */
+ // Add Table Handling
+ /* ------------------------------------------------------------ */
+
+ /* ------------------------------------------------------------ */
+ // Add Fragment Handling
+ /* ------------------------------------------------------------ */
+ void sendLQHADDATTRREQ(Signal*, CreateTableRecordPtr, Uint32 attributePtrI);
+
+ /* ------------------------------------------------------------ */
+ // Read/Write Schema and Table files
+ /* ------------------------------------------------------------ */
+ void updateSchemaState(Signal* signal, Uint32 tableId,
+ SchemaFile::TableEntry*, Callback*);
+ void startWriteSchemaFile(Signal* signal);
+ void openSchemaFile(Signal* signal,
+ Uint32 fileNo,
+ Uint32 fsPtr,
+ bool writeFlag,
+ bool newFile);
+ void writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
+ void writeSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void closeFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
+ void closeWriteSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void initSchemaFile_conf(Signal* signal, Uint32 i, Uint32 returnCode);
+
+ void writeTableFile(Signal* signal, Uint32 tableId,
+ SegmentedSectionPtr tabInfo, Callback*);
+ void startWriteTableFile(Signal* signal, Uint32 tableId);
+ void openTableFile(Signal* signal,
+ Uint32 fileNo,
+ Uint32 fsPtr,
+ Uint32 tableId,
+ bool writeFlag);
+ void writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
+ void writeTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void closeWriteTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+
+ void startReadTableFile(Signal* signal, Uint32 tableId);
+ void openReadTableRef(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
+ void readTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void readTableRef(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void closeReadTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+
+ void startReadSchemaFile(Signal* signal);
+ void openReadSchemaRef(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
+ void readSchemaConf(Signal* signal, FsConnectRecordPtr fsPtr);
+ void readSchemaRef(Signal* signal, FsConnectRecordPtr fsPtr);
+ void closeReadSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ bool convertSchemaFileTo_5_0_6(XSchemaFile*);
+
+ /* ------------------------------------------------------------ */
+ // Get table definitions
+ /* ------------------------------------------------------------ */
+ void sendGET_TABINFOREF(Signal* signal,
+ GetTabInfoReq*,
+ GetTabInfoRef::ErrorCode errorCode);
+
+ void sendGET_TABLEID_REF(Signal* signal,
+ GetTableIdReq * req,
+ GetTableIdRef::ErrorCode errorCode);
+
+ void sendGetTabResponse(Signal* signal);
+
+ /* ------------------------------------------------------------ */
+ // Indexes and triggers
+ /* ------------------------------------------------------------ */
+
+ // reactivate and rebuild indexes on start up
+ void activateIndexes(Signal* signal, Uint32 i);
+ void rebuildIndexes(Signal* signal, Uint32 i);
+
+ // create index
+ void createIndex_recvReply(Signal* signal, const CreateIndxConf* conf,
+ const CreateIndxRef* ref);
+ void createIndex_slavePrepare(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_fromCreateTable(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_toAlterIndex(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_fromAlterIndex(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_slaveCommit(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_slaveAbort(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_sendSlaveReq(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_sendReply(Signal* signal, OpCreateIndexPtr opPtr, bool);
+ // drop index
+ void dropIndex_recvReply(Signal* signal, const DropIndxConf* conf,
+ const DropIndxRef* ref);
+ void dropIndex_slavePrepare(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_toAlterIndex(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_fromAlterIndex(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_toDropTable(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_fromDropTable(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_slaveCommit(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_slaveAbort(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_sendSlaveReq(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_sendReply(Signal* signal, OpDropIndexPtr opPtr, bool);
+ // alter index
+ void alterIndex_recvReply(Signal* signal, const AlterIndxConf* conf,
+ const AlterIndxRef* ref);
+ void alterIndex_slavePrepare(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_toCreateTc(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_fromCreateTc(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_toDropTc(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_fromDropTc(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_toCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_fromCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_toDropTrigger(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_fromDropTrigger(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_toBuildIndex(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_fromBuildIndex(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_slaveCommit(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_slaveAbort(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_sendSlaveReq(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_sendReply(Signal* signal, OpAlterIndexPtr opPtr, bool);
+ // build index
+ void buildIndex_recvReply(Signal* signal, const BuildIndxConf* conf,
+ const BuildIndxRef* ref);
+ void buildIndex_toCreateConstr(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_fromCreateConstr(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_buildTrix(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_toDropConstr(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_fromDropConstr(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_toOnline(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_fromOnline(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_sendSlaveReq(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_sendReply(Signal* signal, OpBuildIndexPtr opPtr, bool);
+
+ // Events
+ void
+ createEventUTIL_PREPARE(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ createEventUTIL_EXECUTE(Signal *signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ dropEventUTIL_PREPARE_READ(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ dropEventUTIL_EXECUTE_READ(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ dropEventUTIL_PREPARE_DELETE(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ dropEventUTIL_EXECUTE_DELETE(Signal *signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ dropEventUtilPrepareRef(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ dropEventUtilExecuteRef(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ int
+ sendSignalUtilReq(Callback *c,
+ BlockReference ref,
+ GlobalSignalNumber gsn,
+ Signal* signal,
+ Uint32 length,
+ JobBufferLevel jbuf,
+ LinearSectionPtr ptr[3],
+ Uint32 noOfSections);
+ int
+ recvSignalUtilReq(Signal* signal, Uint32 returnCode);
+
+ void completeSubStartReq(Signal* signal, Uint32 ptrI, Uint32 returnCode);
+ void completeSubStopReq(Signal* signal, Uint32 ptrI, Uint32 returnCode);
+ void completeSubRemoveReq(Signal* signal, Uint32 ptrI, Uint32 returnCode);
+
+ void dropEvent_sendReply(Signal* signal,
+ OpDropEventPtr evntRecPtr);
+
+ void createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr);
+ void createEventComplete_RT_USER_CREATE(Signal* signal,
+ OpCreateEventPtr evntRecPtr);
+ void createEvent_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr);
+ void createEventComplete_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr);
+
+ void createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPtr);
+
+ void createEvent_nodeFailCallback(Signal* signal, Uint32 eventRecPtrI,
+ Uint32 returnCode);
+ void createEvent_sendReply(Signal* signal, OpCreateEventPtr evntRecPtr,
+ LinearSectionPtr *ptr = NULL, int noLSP = 0);
+
+ void prepareTransactionEventSysTable (Callback *c,
+ Signal* signal,
+ Uint32 senderData,
+ UtilPrepareReq::OperationTypeValue prepReq);
+ void prepareUtilTransaction(Callback *c,
+ Signal* signal,
+ Uint32 senderData,
+ Uint32 tableId,
+ const char *tableName,
+ UtilPrepareReq::OperationTypeValue prepReq,
+ Uint32 noAttr,
+ Uint32 attrIds[],
+ const char *attrNames[]);
+
+ void executeTransEventSysTable(Callback *c,
+ Signal *signal,
+ const Uint32 ptrI,
+ sysTab_NDBEVENTS_0& m_eventRec,
+ const Uint32 prepareId,
+ UtilPrepareReq::OperationTypeValue prepReq);
+ void executeTransaction(Callback *c,
+ Signal* signal,
+ Uint32 senderData,
+ Uint32 prepareId,
+ Uint32 noAttr,
+ LinearSectionPtr headerPtr,
+ LinearSectionPtr dataPtr);
+
+ void parseReadEventSys(Signal *signal, sysTab_NDBEVENTS_0& m_eventRec);
+
+ // create trigger
+ void createTrigger_recvReply(Signal* signal, const CreateTrigConf* conf,
+ const CreateTrigRef* ref);
+ void createTrigger_slavePrepare(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_masterSeize(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_slaveCreate(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_toAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_fromAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_slaveCommit(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_slaveAbort(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_sendSlaveReq(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_sendReply(Signal* signal, OpCreateTriggerPtr opPtr, bool);
+ // drop trigger
+ void dropTrigger_recvReply(Signal* signal, const DropTrigConf* conf,
+ const DropTrigRef* ref);
+ void dropTrigger_slavePrepare(Signal* signal, OpDropTriggerPtr opPtr);
+ void dropTrigger_toAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr);
+ void dropTrigger_fromAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr);
+ void dropTrigger_slaveCommit(Signal* signal, OpDropTriggerPtr opPtr);
+ void dropTrigger_slaveAbort(Signal* signal, OpDropTriggerPtr opPtr);
+ void dropTrigger_sendSlaveReq(Signal* signal, OpDropTriggerPtr opPtr);
+ void dropTrigger_sendReply(Signal* signal, OpDropTriggerPtr opPtr, bool);
+ // alter trigger
+ void alterTrigger_recvReply(Signal* signal, const AlterTrigConf* conf,
+ const AlterTrigRef* ref);
+ void alterTrigger_slavePrepare(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_toCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_fromCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_toDropLocal(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_fromDropLocal(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_slaveCommit(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_slaveAbort(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_sendSlaveReq(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_sendReply(Signal* signal, OpAlterTriggerPtr opPtr, bool);
+ // support
+ void getTableKeyList(TableRecordPtr tablePtr, AttributeList& list);
+ void getIndexAttr(TableRecordPtr indexPtr, Uint32 itAttr, Uint32* id);
+ void getIndexAttrList(TableRecordPtr indexPtr, AttributeList& list);
+ void getIndexAttrMask(TableRecordPtr indexPtr, AttributeMask& mask);
+
+ /* ------------------------------------------------------------ */
+ // Initialisation
+ /* ------------------------------------------------------------ */
+ void initCommonData();
+ void initRecords();
+ void initConnectRecord();
+ void initRetrieveRecord(Signal*, Uint32, Uint32 returnCode);
+ void initSchemaRecord();
+ void initRestartRecord();
+ void initSendSchemaRecord();
+ void initReadTableRecord();
+ void initWriteTableRecord();
+ void initReadSchemaRecord();
+ void initWriteSchemaRecord();
+
+ void initNodeRecords();
+ void initTableRecords();
+ void initialiseTableRecord(TableRecordPtr tablePtr);
+ void initTriggerRecords();
+ void initialiseTriggerRecord(TriggerRecordPtr triggerPtr);
+ void initPageRecords();
+
+ Uint32 getFsConnRecord();
+
+ bool getIsFailed(Uint32 nodeId) const;
+
+ void dropTableRef(Signal * signal, DropTableReq *, DropTableRef::ErrorCode);
+ void printTables(); // For debugging only
+ int handleAlterTab(AlterTabReq * req,
+ CreateTableRecord * regAlterTabPtr,
+ TableRecordPtr origTablePtr,
+ TableRecordPtr newTablePtr);
+ void revertAlterTable(Signal * signal,
+ Uint32 changeMask,
+ Uint32 tableId,
+ CreateTableRecord * regAlterTabPtr);
+ void alterTableRef(Signal * signal,
+ AlterTableReq *, AlterTableRef::ErrorCode,
+ ParseDictTabInfoRecord* parseRecord = NULL);
+ void alterTabRef(Signal * signal,
+ AlterTabReq *, AlterTableRef::ErrorCode,
+ ParseDictTabInfoRecord* parseRecord = NULL);
+ void alterTab_writeSchemaConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void alterTab_writeTableConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+
+ void prepDropTab_nextStep(Signal* signal, DropTableRecordPtr);
+ void prepDropTab_complete(Signal* signal, DropTableRecordPtr);
+ void prepDropTab_writeSchemaConf(Signal* signal, Uint32 dropTabPtrI, Uint32);
+
+ void dropTab_localDROP_TAB_CONF(Signal* signal);
+ void dropTab_nextStep(Signal* signal, DropTableRecordPtr);
+ void dropTab_complete(Signal* signal, Uint32 dropTabPtrI, Uint32);
+ void dropTab_writeSchemaConf(Signal* signal, Uint32 dropTabPtrI, Uint32);
+
+ void createTab_prepare(Signal* signal, CreateTabReq * req);
+ void createTab_writeSchemaConf1(Signal* signal, Uint32 callback, Uint32);
+ void createTab_writeTableConf(Signal* signal, Uint32 callbackData, Uint32);
+ void createTab_dih(Signal*, CreateTableRecordPtr,
+ SegmentedSectionPtr, Callback*);
+ void createTab_dihComplete(Signal* signal, Uint32 callbackData, Uint32);
+
+ void createTab_startLcpMutex_locked(Signal* signal, Uint32, Uint32);
+ void createTab_startLcpMutex_unlocked(Signal* signal, Uint32, Uint32);
+
+ void createTab_commit(Signal* signal, CreateTabReq * req);
+ void createTab_writeSchemaConf2(Signal* signal, Uint32 callbackData, Uint32);
+ void createTab_alterComplete(Signal*, Uint32 callbackData, Uint32);
+
+ void createTab_drop(Signal* signal, CreateTabReq * req);
+ void createTab_dropComplete(Signal* signal, Uint32 callbackData, Uint32);
+
+ void createTab_reply(Signal* signal, CreateTableRecordPtr, Uint32 nodeId);
+ void alterTab_activate(Signal*, CreateTableRecordPtr, Callback*);
+
+ void restartCreateTab(Signal*, Uint32, const SchemaFile::TableEntry *, bool);
+ void restartCreateTab_readTableConf(Signal* signal, Uint32 callback, Uint32);
+ void restartCreateTab_writeTableConf(Signal* signal, Uint32 callback, Uint32);
+ void restartCreateTab_dihComplete(Signal* signal, Uint32 callback, Uint32);
+ void restartCreateTab_activateComplete(Signal*, Uint32 callback, Uint32);
+
+ void restartDropTab(Signal* signal, Uint32 tableId);
+ void restartDropTab_complete(Signal*, Uint32 callback, Uint32);
+
+ void restart_checkSchemaStatusComplete(Signal*, Uint32 callback, Uint32);
+ void restart_writeSchemaConf(Signal*, Uint32 callbackData, Uint32);
+ void masterRestart_checkSchemaStatusComplete(Signal*, Uint32, Uint32);
+
+ void sendSchemaComplete(Signal*, Uint32 callbackData, Uint32);
+
+ // global metadata support
+ friend class MetaData;
+ int getMetaTablePtr(TableRecordPtr& tablePtr, Uint32 tableId, Uint32 tableVersion);
+ int getMetaTable(MetaData::Table& table, Uint32 tableId, Uint32 tableVersion);
+ int getMetaTable(MetaData::Table& table, const char* tableName);
+ int getMetaAttribute(MetaData::Attribute& attribute, const MetaData::Table& table, Uint32 attributeId);
+ int getMetaAttribute(MetaData::Attribute& attribute, const MetaData::Table& table, const char* attributeName);
+};
+
+#endif
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.txt b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt
index 8d4267a1c42..8d4267a1c42 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.txt
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt
diff --git a/ndb/src/kernel/blocks/dbdict/DropTable.txt b/storage/ndb/src/kernel/blocks/dbdict/DropTable.txt
index 8d364d15c57..8d364d15c57 100644
--- a/ndb/src/kernel/blocks/dbdict/DropTable.txt
+++ b/storage/ndb/src/kernel/blocks/dbdict/DropTable.txt
diff --git a/ndb/src/kernel/blocks/dbdict/Event.txt b/storage/ndb/src/kernel/blocks/dbdict/Event.txt
index 553c915d9c5..553c915d9c5 100644
--- a/ndb/src/kernel/blocks/dbdict/Event.txt
+++ b/storage/ndb/src/kernel/blocks/dbdict/Event.txt
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Makefile.am b/storage/ndb/src/kernel/blocks/dbdict/Makefile.am
new file mode 100644
index 00000000000..0c22e06b855
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/Makefile.am
@@ -0,0 +1,25 @@
+#SUBDIRS = printSchemafile
+
+noinst_LIBRARIES = libdbdict.a
+
+libdbdict_a_SOURCES = Dbdict.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbdict.dsp
+
+libdbdict.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libdbdict_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl b/storage/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl
index 1bcec156ef7..1bcec156ef7 100644
--- a/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl
+++ b/storage/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl
diff --git a/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp b/storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp
index 0226991a073..0226991a073 100644
--- a/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp
diff --git a/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl b/storage/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl
index 8740be9595d..8740be9595d 100644
--- a/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl
+++ b/storage/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl
diff --git a/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
index b9b144cd977..b9b144cd977 100644
--- a/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
new file mode 100644
index 00000000000..345d1bdac0e
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -0,0 +1,1606 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBDIH_H
+#define DBDIH_H
+
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <SimulatedBlock.hpp>
+#include "Sysfile.hpp"
+#include <ArrayList.hpp>
+#include <SignalCounter.hpp>
+
+#include <signaldata/MasterLCP.hpp>
+#include <signaldata/CopyGCIReq.hpp>
+#include <blocks/mutexes.hpp>
+
+#ifdef DBDIH_C
+
+/*###################*/
+/* FILE SYSTEM FLAGS */
+/*###################*/
+#define ZLIST_OF_PAIRS 0
+#define ZLIST_OF_PAIRS_SYNCH 16
+#define ZOPEN_READ_WRITE 2
+#define ZCREATE_READ_WRITE 0x302
+#define ZCLOSE_NO_DELETE 0
+#define ZCLOSE_DELETE 1
+
+/*###############*/
+/* NODE STATES */
+/*###############*/
+#define ZIDLE 0
+#define ZACTIVE 1
+
+/*#########*/
+/* GENERAL */
+/*#########*/
+#define ZVAR_NO_WORD 1
+#define ZVAR_NO_CRESTART_INFO 20
+#define ZVAR_NO_CRESTART_INFO_TO_FILE 21
+#define ZVALID 1
+#define ZINVALID 2
+
+/*###############*/
+/* ERROR CODES */
+/*###############*/
+// ------------------------------------------
+// Error Codes for Transactions (None sofar)
+// ------------------------------------------
+#define ZUNDEFINED_FRAGMENT_ERROR 311
+
+// --------------------------------------
+// Error Codes for Add Table
+// --------------------------------------
+#define ZREPLERROR1 306
+#define ZNOTIMPLEMENTED 307
+#define ZTABLEINSTALLED 310
+// --------------------------------------
+// Error Codes for Scan Table
+// --------------------------------------
+#define ZERRONOUSSTATE 308
+
+// --------------------------------------
+// Crash Codes
+// --------------------------------------
+#define ZCOULD_NOT_OCCUR_ERROR 300
+#define ZNOT_MASTER_ERROR 301
+#define ZWRONG_FAILURE_NUMBER_ERROR 302
+#define ZWRONG_START_NODE_ERROR 303
+#define ZNO_REPLICA_FOUND_ERROR 304
+#define ZNODE_ALREADY_STARTING_ERROR 305
+#define ZNODE_START_DISALLOWED_ERROR 309
+
+// --------------------------------------
+// Codes from LQH
+// --------------------------------------
+#define ZNODE_FAILURE_ERROR 400
+
+
+/*#########*/
+/* PHASES */
+/*#########*/
+#define ZNDB_SPH1 1
+#define ZNDB_SPH2 2
+#define ZNDB_SPH3 3
+#define ZNDB_SPH4 4
+#define ZNDB_SPH5 5
+#define ZNDB_SPH6 6
+#define ZNDB_SPH7 7
+#define ZNDB_SPH8 8
+/*#########*/
+/* SIZES */
+/*#########*/
+#define ZPAGEREC 100
+#define ZCREATE_REPLICA_FILE_SIZE 4
+#define ZPROXY_MASTER_FILE_SIZE 10
+#define ZPROXY_FILE_SIZE 10
+#endif
+
+class Dbdih: public SimulatedBlock {
+public:
+
+ // Records
+
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤
+ * THE API CONNECT RECORD IS THE SAME RECORD POINTER AS USED IN THE TC BLOCK
+ *
+ * IT KEEPS TRACK OF ALL THE OPERATIONS CONNECTED TO THIS TRANSACTION.
+ * IT IS LINKED INTO A QUEUE IN CASE THE GLOBAL CHECKPOINT IS CURRENTLY
+ * ONGOING */
+ struct ApiConnectRecord {
+ Uint32 apiGci;
+ Uint32 nextApi;
+ };
+ typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr;
+
+ /*############## CONNECT_RECORD ##############*/
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* THE CONNECT RECORD IS CREATED WHEN A TRANSACTION HAS TO START. IT KEEPS
+ ALL INTERMEDIATE INFORMATION NECESSARY FOR THE TRANSACTION FROM THE
+ DISTRIBUTED MANAGER. THE RECORD KEEPS INFORMATION ABOUT THE
+ OPERATIONS THAT HAVE TO BE CARRIED OUT BY THE TRANSACTION AND
+ ALSO THE TRAIL OF NODES FOR EACH OPERATION IN THE THE
+ TRANSACTION.
+ */
+ struct ConnectRecord {
+ enum ConnectState {
+ INUSE = 0,
+ FREE = 1,
+ STARTED = 2
+ };
+ Uint32 nodes[MAX_REPLICAS];
+ ConnectState connectState;
+ Uint32 nfConnect;
+ Uint32 table;
+ Uint32 userpointer;
+ BlockReference userblockref;
+ };
+ typedef Ptr<ConnectRecord> ConnectRecordPtr;
+
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* THESE RECORDS ARE USED WHEN CREATING REPLICAS DURING SYSTEM */
+ /* RESTART. I NEED A COMPLEX DATA STRUCTURE DESCRIBING THE REPLICAS */
+ /* I WILL TRY TO CREATE FOR EACH FRAGMENT. */
+ /* */
+ /* I STORE A REFERENCE TO THE FOUR POSSIBLE CREATE REPLICA RECORDS */
+ /* IN A COMMON STORED VARIABLE. I ALLOW A MAXIMUM OF 4 REPLICAS TO */
+ /* BE RESTARTED PER FRAGMENT. */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ struct CreateReplicaRecord {
+ Uint32 logStartGci[MAX_LOG_EXEC];
+ Uint32 logStopGci[MAX_LOG_EXEC];
+ Uint16 logNodeId[MAX_LOG_EXEC];
+ Uint32 createLcpId;
+
+ bool hotSpareUse;
+ Uint32 replicaRec;
+ Uint16 dataNodeId;
+ Uint16 lcpNo;
+ Uint16 noLogNodes;
+ };
+ typedef Ptr<CreateReplicaRecord> CreateReplicaRecordPtr;
+
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* THIS RECORD CONTAINS A FILE DESCRIPTION. THERE ARE TWO */
+ /* FILES PER TABLE TO RAISE SECURITY LEVEL AGAINST DISK CRASHES. */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ struct FileRecord {
+ enum FileStatus {
+ CLOSED = 0,
+ CRASHED = 1,
+ OPEN = 2
+ };
+ enum FileType {
+ TABLE_FILE = 0,
+ GCP_FILE = 1
+ };
+ enum ReqStatus {
+ IDLE = 0,
+ CREATING_GCP = 1,
+ OPENING_GCP = 2,
+ OPENING_COPY_GCI = 3,
+ WRITING_COPY_GCI = 4,
+ CREATING_COPY_GCI = 5,
+ OPENING_TABLE = 6,
+ READING_GCP = 7,
+ READING_TABLE = 8,
+ WRITE_INIT_GCP = 9,
+ TABLE_CREATE = 10,
+ TABLE_WRITE = 11,
+ TABLE_CLOSE = 12,
+ CLOSING_GCP = 13,
+ CLOSING_TABLE_CRASH = 14,
+ CLOSING_TABLE_SR = 15,
+ CLOSING_GCP_CRASH = 16,
+ TABLE_OPEN_FOR_DELETE = 17,
+ TABLE_CLOSE_DELETE = 18
+ };
+ Uint32 fileName[4];
+ Uint32 fileRef;
+ FileStatus fileStatus;
+ FileType fileType;
+ Uint32 nextFile;
+ ReqStatus reqStatus;
+ Uint32 tabRef;
+ };
+ typedef Ptr<FileRecord> FileRecordPtr;
+
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* THIS RECORD KEEPS THE STORAGE AND DECISIONS INFORMATION OF A FRAGMENT */
+ /* AND ITS REPLICAS. IF FRAGMENT HAS MORE THAN ONE BACK UP */
+ /* REPLICA THEN A LIST OF MORE NODES IS ATTACHED TO THIS RECORD. */
+ /* EACH RECORD IN MORE LIST HAS INFORMATION ABOUT ONE BACKUP. THIS RECORD */
+ /* ALSO HAVE THE STATUS OF THE FRAGMENT. */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* */
+ /* FRAGMENTSTORE RECORD ALIGNED TO BE 64 BYTES */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ struct Fragmentstore {
+ Uint16 activeNodes[MAX_REPLICAS];
+ Uint32 preferredPrimary;
+
+ Uint32 oldStoredReplicas; /* "DEAD" STORED REPLICAS */
+ Uint32 storedReplicas; /* "ALIVE" STORED REPLICAS */
+ Uint32 nextFragmentChunk;
+
+ Uint8 distributionKey;
+ Uint8 fragReplicas;
+ Uint8 noOldStoredReplicas; /* NUMBER OF "DEAD" STORED REPLICAS */
+ Uint8 noStoredReplicas; /* NUMBER OF "ALIVE" STORED REPLICAS*/
+ Uint8 noLcpReplicas; ///< No of replicas remaining to be LCP:ed
+ };
+ typedef Ptr<Fragmentstore> FragmentstorePtr;
+
+ /*########### PAGE RECORD ############*/
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* THIS RECORD KEEPS INFORMATION ABOUT NODE GROUPS. */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ struct NodeGroupRecord {
+ Uint32 nodesInGroup[MAX_REPLICAS + 1];
+ Uint32 nextReplicaNode;
+ Uint32 nodeCount;
+ bool activeTakeOver;
+ };
+ typedef Ptr<NodeGroupRecord> NodeGroupRecordPtr;
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* THIS RECORD KEEPS INFORMATION ABOUT NODES. */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* RECORD ALIGNED TO BE 64 BYTES. */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ enum NodefailHandlingStep {
+ NF_REMOVE_NODE_FROM_TABLE = 1,
+ NF_GCP_TAKE_OVER = 2,
+ NF_LCP_TAKE_OVER = 4
+ };
+
+ struct NodeRecord {
+ NodeRecord();
+
+ enum NodeStatus {
+ NOT_IN_CLUSTER = 0,
+ ALIVE = 1,
+ STARTING = 2,
+ DIED_NOW = 3,
+ DYING = 4,
+ DEAD = 5
+ };
+
+ struct FragmentCheckpointInfo {
+ Uint32 tableId;
+ Uint32 fragId;
+ Uint32 replicaPtr;
+ };
+
+ enum GcpState {
+ READY = 0,
+ PREPARE_SENT = 1,
+ PREPARE_RECEIVED = 2,
+ COMMIT_SENT = 3,
+ NODE_FINISHED = 4,
+ SAVE_REQ_SENT = 5,
+ SAVE_RECEIVED = 6,
+ COPY_GCI_SENT = 7
+ };
+
+ GcpState gcpstate;
+ Sysfile::ActiveStatus activeStatus;
+
+ NodeStatus nodeStatus;
+ bool useInTransactions;
+ bool allowNodeStart;
+ bool copyCompleted;
+ bool m_inclDihLcp;
+
+ FragmentCheckpointInfo startedChkpt[2];
+ FragmentCheckpointInfo queuedChkpt[2];
+
+ Bitmask<1> m_nodefailSteps;
+ Uint32 activeTabptr;
+ Uint32 nextNode;
+ Uint32 nodeGroup;
+
+ SignalCounter m_NF_COMPLETE_REP;
+
+ Uint8 dbtcFailCompleted;
+ Uint8 dblqhFailCompleted;
+ Uint8 dbdihFailCompleted;
+ Uint8 dbdictFailCompleted;
+ Uint8 recNODE_FAILREP;
+
+ Uint8 noOfQueuedChkpt;
+ Uint8 noOfStartedChkpt;
+
+ MasterLCPConf::State lcpStateAtTakeOver;
+ };
+ typedef Ptr<NodeRecord> NodeRecordPtr;
+ /**********************************************************************/
+ /* THIS RECORD KEEPS THE INFORMATION ABOUT A TABLE AND ITS FRAGMENTS */
+ /**********************************************************************/
+ struct PageRecord {
+ Uint32 word[2048];
+ /* 8 KBYTE PAGE*/
+ Uint32 nextfreepage;
+ };
+ typedef Ptr<PageRecord> PageRecordPtr;
+
+ /************ REPLICA RECORD *************/
+ /**********************************************************************/
+ /* THIS RECORD KEEPS THE INFORMATION ABOUT A REPLICA OF A FRAGMENT */
+ /**********************************************************************/
+ struct ReplicaRecord {
+ /* -------------------------------------------------------------------- */
+ /* THE GLOBAL CHECKPOINT IDENTITY WHEN THIS REPLICA WAS CREATED. */
+ /* THERE IS ONE INDEX PER REPLICA. A REPLICA INDEX IS CREATED WHEN ANODE*/
+ /* CRASH OCCURS. */
+ /* -------------------------------------------------------------------- */
+ Uint32 createGci[8];
+ /* -------------------------------------------------------------------- */
+ /* THE LAST GLOBAL CHECKPOINT IDENTITY WHICH HAS BEEN SAVED ON DISK. */
+ /* THIS VARIABLE IS ONLY VALID FOR REPLICAS WHICH HAVE "DIED". A REPLICA*/
+ /* "DIES" EITHER WHEN THE NODE CRASHES THAT KEPT THE REPLICA OR BY BEING*/
+ /* STOPPED IN A CONTROLLED MANNER. */
+ /* THERE IS ONE INDEX PER REPLICA. A REPLICA INDEX IS CREATED WHEN ANODE*/
+ /* CRASH OCCURS. */
+ /* -------------------------------------------------------------------- */
+ Uint32 replicaLastGci[8];
+ /* -------------------------------------------------------------------- */
+ /* THE LOCAL CHECKPOINT IDENTITY OF A LOCAL CHECKPOINT. */
+ /* -------------------------------------------------------------------- */
+ Uint32 lcpId[MAX_LCP_STORED];
+ /* -------------------------------------------------------------------- */
+ /* THIS VARIABLE KEEPS TRACK OF THE MAXIMUM GLOBAL CHECKPOINT COMPLETED */
+ /* FOR EACH OF THE LOCAL CHECKPOINTS IN THIS FRAGMENT REPLICA. */
+ /* -------------------------------------------------------------------- */
+ Uint32 maxGciCompleted[MAX_LCP_STORED];
+ /* -------------------------------------------------------------------- */
+ /* THIS VARIABLE KEEPS TRACK OF THE MINIMUM GLOBAL CHECKPOINT STARTEDFOR*/
+ /* EACH OF THE LOCAL CHECKPOINTS IN THIS FRAGMENT REPLICA. */
+ /* -------------------------------------------------------------------- */
+ Uint32 maxGciStarted[MAX_LCP_STORED];
+ /* -------------------------------------------------------------------- */
+ /* THE GLOBAL CHECKPOINT IDENTITY WHEN THE TABLE WAS CREATED. */
+ /* -------------------------------------------------------------------- */
+ Uint32 initialGci;
+
+ /* -------------------------------------------------------------------- */
+ /* THE REFERENCE TO THE NEXT REPLICA. EITHER IT REFERS TO THE NEXT IN */
+ /* THE FREE LIST OR IT REFERS TO THE NEXT IN A LIST OF REPLICAS ON A */
+ /* FRAGMENT. */
+ /* -------------------------------------------------------------------- */
+ Uint32 nextReplica;
+
+ /* -------------------------------------------------------------------- */
+ /* THE NODE ID WHERE THIS REPLICA IS STORED. */
+ /* -------------------------------------------------------------------- */
+ Uint16 procNode;
+
+ /* -------------------------------------------------------------------- */
+ /* The last local checkpoint id started or queued on this replica. */
+ /* -------------------------------------------------------------------- */
+ Uint32 lcpIdStarted; // Started or queued
+
+ /* -------------------------------------------------------------------- */
+ /* THIS VARIABLE SPECIFIES WHAT THE STATUS OF THE LOCAL CHECKPOINT IS.IT*/
+ /* CAN EITHER BE VALID OR INVALID. AT CREATION OF A FRAGMENT REPLICA ALL*/
+ /* LCP'S ARE INVALID. ALSO IF IF INDEX >= NO_LCP THEN THELOCALCHECKPOINT*/
+ /* IS ALWAYS INVALID. IF THE LCP BEFORE THE NEXT_LCP HAS LCP_ID THAT */
+ /* DIFFERS FROM THE LATEST LCP_ID STARTED THEN THE NEXT_LCP IS ALSO */
+ /* INVALID */
+ /* -------------------------------------------------------------------- */
+ Uint8 lcpStatus[MAX_LCP_STORED];
+
+ /* -------------------------------------------------------------------- */
+ /* THE NEXT LOCAL CHECKPOINT TO EXECUTE IN THIS FRAGMENT REPLICA. */
+ /* -------------------------------------------------------------------- */
+ Uint8 nextLcp;
+
+ /* -------------------------------------------------------------------- */
+ /* THE NUMBER OF CRASHED REPLICAS IN THIS REPLICAS SO FAR. */
+ /* -------------------------------------------------------------------- */
+ Uint8 noCrashedReplicas;
+
+ /**
+ * Is a LCP currently ongoing on fragment
+ */
+ Uint8 lcpOngoingFlag;
+ };
+ typedef Ptr<ReplicaRecord> ReplicaRecordPtr;
+
+ /*************************************************************************
+ * TAB_DESCRIPTOR IS A DESCRIPTOR OF THE LOCATION OF THE FRAGMENTS BELONGING
+ * TO THE TABLE.THE INFORMATION ABOUT FRAGMENTS OF A TABLE ARE STORED IN
+ * CHUNKS OF FRAGMENTSTORE RECORDS.
+ * THIS RECORD ALSO HAS THE NECESSARY INFORMATION TO LOCATE A FRAGMENT AND
+ * TO LOCATE A FRAGMENT AND TO TRANSLATE A KEY OF A TUPLE TO THE FRAGMENT IT
+ * BELONGS
+ */
+ struct TabRecord {
+ /**
+ * State for copying table description into pages
+ */
+ enum CopyStatus {
+ CS_IDLE,
+ CS_SR_PHASE1_READ_PAGES,
+ CS_SR_PHASE2_READ_TABLE,
+ CS_SR_PHASE3_COPY_TABLE,
+ CS_REMOVE_NODE,
+ CS_LCP_READ_TABLE,
+ CS_COPY_TAB_REQ,
+ CS_COPY_NODE_STATE,
+ CS_ADD_TABLE_MASTER,
+ CS_ADD_TABLE_SLAVE,
+ CS_INVALIDATE_NODE_LCP
+ };
+ /**
+ * State for copying pages to disk
+ */
+ enum UpdateState {
+ US_IDLE,
+ US_LOCAL_CHECKPOINT,
+ US_REMOVE_NODE,
+ US_COPY_TAB_REQ,
+ US_ADD_TABLE_MASTER,
+ US_ADD_TABLE_SLAVE,
+ US_INVALIDATE_NODE_LCP
+ };
+ enum TabLcpStatus {
+ TLS_ACTIVE = 1,
+ TLS_WRITING_TO_FILE = 2,
+ TLS_COMPLETED = 3
+ };
+ enum TabStatus {
+ TS_IDLE = 0,
+ TS_ACTIVE = 1,
+ TS_CREATING = 2,
+ TS_DROPPING = 3
+ };
+ enum Method {
+ LINEAR_HASH = 0,
+ NOTDEFINED = 1,
+ NORMAL_HASH = 2,
+ USER_DEFINED = 3
+ };
+ CopyStatus tabCopyStatus;
+ UpdateState tabUpdateState;
+ TabLcpStatus tabLcpStatus;
+ TabStatus tabStatus;
+ Method method;
+
+ Uint32 pageRef[8];
+//-----------------------------------------------------------------------------
+// Each entry in this array contains a reference to 16 fragment records in a
+// row. Thus finding the correct record is very quick provided the fragment id.
+//-----------------------------------------------------------------------------
+ Uint32 startFid[MAX_NDB_NODES];
+
+ Uint32 tabFile[2];
+ Uint32 connectrec;
+ Uint32 hashpointer;
+ Uint32 mask;
+ Uint32 noOfWords;
+ Uint32 schemaVersion;
+ Uint32 tabRemoveNode;
+ Uint32 totalfragments;
+ Uint32 noOfFragChunks;
+ Uint32 tabErrorCode;
+ struct {
+ Uint32 tabUserRef;
+ Uint32 tabUserPtr;
+ } m_dropTab;
+
+ struct DropTable {
+ Uint32 senderRef;
+ Uint32 senderData;
+ SignalCounter waitDropTabCount;
+ } m_prepDropTab;
+
+ Uint8 kvalue;
+ Uint8 noOfBackups;
+ Uint8 noPages;
+ Uint8 storedTable; /* 0 IF THE TABLE IS A TEMPORARY TABLE */
+ Uint16 tableType;
+ Uint16 primaryTableId;
+ };
+ typedef Ptr<TabRecord> TabRecordPtr;
+
+ /***************************************************************************/
+ /* THIS RECORD IS USED TO KEEP TRACK OF TAKE OVER AND STARTING A NODE. */
+ /* WE KEEP IT IN A RECORD TO ENABLE IT TO BE PARALLELISED IN THE FUTURE. */
+ /**************************************************************************/
+ struct TakeOverRecord {
+ enum ToMasterStatus {
+ IDLE = 0,
+ TO_WAIT_START_TAKE_OVER = 1,
+ TO_START_COPY = 2,
+ TO_START_COPY_ONGOING = 3,
+ TO_WAIT_START = 4,
+ STARTING = 5,
+ SELECTING_NEXT = 6,
+ TO_WAIT_PREPARE_CREATE = 9,
+ PREPARE_CREATE = 10,
+ COPY_FRAG = 11,
+ TO_WAIT_UPDATE_TO = 12,
+ TO_UPDATE_TO = 13,
+ COPY_ACTIVE = 14,
+ TO_WAIT_COMMIT_CREATE = 15,
+ LOCK_MUTEX = 23,
+ COMMIT_CREATE = 16,
+ TO_COPY_COMPLETED = 17,
+ WAIT_LCP = 18,
+ TO_END_COPY = 19,
+ TO_END_COPY_ONGOING = 20,
+ TO_WAIT_ENDING = 21,
+ ENDING = 22
+ };
+ enum ToSlaveStatus {
+ TO_SLAVE_IDLE = 0,
+ TO_SLAVE_STARTED = 1,
+ TO_SLAVE_CREATE_PREPARE = 2,
+ TO_SLAVE_COPY_FRAG_COMPLETED = 3,
+ TO_SLAVE_CREATE_COMMIT = 4,
+ TO_SLAVE_COPY_COMPLETED = 5
+ };
+ Uint32 startGci;
+ Uint32 toCopyNode;
+ Uint32 toCurrentFragid;
+ Uint32 toCurrentReplica;
+ Uint32 toCurrentTabref;
+ Uint32 toFailedNode;
+ Uint32 toStartingNode;
+ Uint32 nextTakeOver;
+ Uint32 prevTakeOver;
+ bool toNodeRestart;
+ ToMasterStatus toMasterStatus;
+ ToSlaveStatus toSlaveStatus;
+ MutexHandle2<DIH_SWITCH_PRIMARY_MUTEX> m_switchPrimaryMutexHandle;
+ };
+ typedef Ptr<TakeOverRecord> TakeOverRecordPtr;
+
+public:
+ Dbdih(const class Configuration &);
+ virtual ~Dbdih();
+
+ struct RWFragment {
+ Uint32 pageIndex;
+ Uint32 wordIndex;
+ Uint32 fragId;
+ TabRecordPtr rwfTabPtr;
+ PageRecordPtr rwfPageptr;
+ };
+ struct CopyTableNode {
+ Uint32 pageIndex;
+ Uint32 wordIndex;
+ Uint32 noOfWords;
+ TabRecordPtr ctnTabPtr;
+ PageRecordPtr ctnPageptr;
+ };
+
+private:
+ BLOCK_DEFINES(Dbdih);
+
+ void execDUMP_STATE_ORD(Signal *);
+ void execNDB_TAMPER(Signal *);
+ void execDEBUG_SIG(Signal *);
+ void execEMPTY_LCP_CONF(Signal *);
+ void execMASTER_GCPREF(Signal *);
+ void execMASTER_GCPREQ(Signal *);
+ void execMASTER_GCPCONF(Signal *);
+ void execMASTER_LCPREF(Signal *);
+ void execMASTER_LCPREQ(Signal *);
+ void execMASTER_LCPCONF(Signal *);
+ void execNF_COMPLETEREP(Signal *);
+ void execSTART_PERMREQ(Signal *);
+ void execSTART_PERMCONF(Signal *);
+ void execSTART_PERMREF(Signal *);
+ void execINCL_NODEREQ(Signal *);
+ void execINCL_NODECONF(Signal *);
+ void execEND_TOREQ(Signal *);
+ void execEND_TOCONF(Signal *);
+ void execSTART_TOREQ(Signal *);
+ void execSTART_TOCONF(Signal *);
+ void execSTART_MEREQ(Signal *);
+ void execSTART_MECONF(Signal *);
+ void execSTART_MEREF(Signal *);
+ void execSTART_COPYREQ(Signal *);
+ void execSTART_COPYCONF(Signal *);
+ void execSTART_COPYREF(Signal *);
+ void execCREATE_FRAGREQ(Signal *);
+ void execCREATE_FRAGCONF(Signal *);
+ void execDIVERIFYREQ(Signal *);
+ void execGCP_SAVECONF(Signal *);
+ void execGCP_PREPARECONF(Signal *);
+ void execGCP_PREPARE(Signal *);
+ void execGCP_NODEFINISH(Signal *);
+ void execGCP_COMMIT(Signal *);
+ void execDIHNDBTAMPER(Signal *);
+ void execCONTINUEB(Signal *);
+ void execCOPY_GCIREQ(Signal *);
+ void execCOPY_GCICONF(Signal *);
+ void execCOPY_TABREQ(Signal *);
+ void execCOPY_TABCONF(Signal *);
+ void execTCGETOPSIZECONF(Signal *);
+ void execTC_CLOPSIZECONF(Signal *);
+
+ void execLCP_FRAG_REP(Signal *);
+ void execLCP_COMPLETE_REP(Signal *);
+ void execSTART_LCP_REQ(Signal *);
+ void execSTART_LCP_CONF(Signal *);
+ MutexHandle2<DIH_START_LCP_MUTEX> c_startLcpMutexHandle;
+ void startLcpMutex_locked(Signal* signal, Uint32, Uint32);
+ void startLcpMutex_unlocked(Signal* signal, Uint32, Uint32);
+
+ MutexHandle2<DIH_SWITCH_PRIMARY_MUTEX> c_switchPrimaryMutexHandle;
+ void switchPrimaryMutex_locked(Signal* signal, Uint32, Uint32);
+ void switchPrimaryMutex_unlocked(Signal* signal, Uint32, Uint32);
+ void switch_primary_stop_node(Signal* signal, Uint32, Uint32);
+
+ void execBLOCK_COMMIT_ORD(Signal *);
+ void execUNBLOCK_COMMIT_ORD(Signal *);
+
+ void execDIH_SWITCH_REPLICA_REQ(Signal *);
+ void execDIH_SWITCH_REPLICA_REF(Signal *);
+ void execDIH_SWITCH_REPLICA_CONF(Signal *);
+
+ void execSTOP_PERM_REQ(Signal *);
+ void execSTOP_PERM_REF(Signal *);
+ void execSTOP_PERM_CONF(Signal *);
+
+ void execSTOP_ME_REQ(Signal *);
+ void execSTOP_ME_REF(Signal *);
+ void execSTOP_ME_CONF(Signal *);
+
+ void execREAD_CONFIG_REQ(Signal *);
+ void execUNBLO_DICTCONF(Signal *);
+ void execCOPY_ACTIVECONF(Signal *);
+ void execTAB_COMMITREQ(Signal *);
+ void execNODE_FAILREP(Signal *);
+ void execCOPY_FRAGCONF(Signal *);
+ void execCOPY_FRAGREF(Signal *);
+ void execDIADDTABREQ(Signal *);
+ void execDIGETNODESREQ(Signal *);
+ void execDIRELEASEREQ(Signal *);
+ void execDISEIZEREQ(Signal *);
+ void execSTTOR(Signal *);
+ void execDI_FCOUNTREQ(Signal *);
+ void execDIGETPRIMREQ(Signal *);
+ void execGCP_SAVEREF(Signal *);
+ void execGCP_TCFINISHED(Signal *);
+ void execREAD_NODESCONF(Signal *);
+ void execNDB_STTOR(Signal *);
+ void execDICTSTARTCONF(Signal *);
+ void execNDB_STARTREQ(Signal *);
+ void execGETGCIREQ(Signal *);
+ void execDIH_RESTARTREQ(Signal *);
+ void execSTART_RECCONF(Signal *);
+ void execSTART_FRAGCONF(Signal *);
+ void execADD_FRAGCONF(Signal *);
+ void execADD_FRAGREF(Signal *);
+ void execFSOPENCONF(Signal *);
+ void execFSOPENREF(Signal *);
+ void execFSCLOSECONF(Signal *);
+ void execFSCLOSEREF(Signal *);
+ void execFSREADCONF(Signal *);
+ void execFSREADREF(Signal *);
+ void execFSWRITECONF(Signal *);
+ void execFSWRITEREF(Signal *);
+ void execSET_VAR_REQ(Signal *);
+ void execCHECKNODEGROUPSREQ(Signal *);
+ void execSTART_INFOREQ(Signal*);
+ void execSTART_INFOREF(Signal*);
+ void execSTART_INFOCONF(Signal*);
+ void execWAIT_GCP_REQ(Signal* signal);
+ void execWAIT_GCP_REF(Signal* signal);
+ void execWAIT_GCP_CONF(Signal* signal);
+ void execUPDATE_TOREQ(Signal* signal);
+ void execUPDATE_TOCONF(Signal* signal);
+
+ void execPREP_DROP_TAB_REQ(Signal* signal);
+ void execWAIT_DROP_TAB_REF(Signal* signal);
+ void execWAIT_DROP_TAB_CONF(Signal* signal);
+ void execDROP_TAB_REQ(Signal* signal);
+
+ void execALTER_TAB_REQ(Signal* signal);
+
+ void execCREATE_FRAGMENTATION_REQ(Signal*);
+
+ void waitDropTabWritingToFile(Signal *, TabRecordPtr tabPtr);
+ void checkPrepDropTabComplete(Signal *, TabRecordPtr tabPtr);
+ void checkWaitDropTabFailedLqh(Signal *, Uint32 nodeId, Uint32 tableId);
+
+ // Statement blocks
+//------------------------------------
+// Methods that send signals
+//------------------------------------
+ void nullRoutine(Signal *, Uint32 nodeId);
+ void sendCOPY_GCIREQ(Signal *, Uint32 nodeId);
+ void sendDIH_SWITCH_REPLICA_REQ(Signal *, Uint32 nodeId);
+ void sendEMPTY_LCP_REQ(Signal *, Uint32 nodeId);
+ void sendEND_TOREQ(Signal *, Uint32 nodeId);
+ void sendGCP_COMMIT(Signal *, Uint32 nodeId);
+ void sendGCP_PREPARE(Signal *, Uint32 nodeId);
+ void sendGCP_SAVEREQ(Signal *, Uint32 nodeId);
+ void sendINCL_NODEREQ(Signal *, Uint32 nodeId);
+ void sendMASTER_GCPREQ(Signal *, Uint32 nodeId);
+ void sendMASTER_LCPREQ(Signal *, Uint32 nodeId);
+ void sendMASTER_LCPCONF(Signal * signal);
+ void sendSTART_RECREQ(Signal *, Uint32 nodeId);
+ void sendSTART_INFOREQ(Signal *, Uint32 nodeId);
+ void sendSTART_TOREQ(Signal *, Uint32 nodeId);
+ void sendSTOP_ME_REQ(Signal *, Uint32 nodeId);
+ void sendTC_CLOPSIZEREQ(Signal *, Uint32 nodeId);
+ void sendTCGETOPSIZEREQ(Signal *, Uint32 nodeId);
+ void sendUPDATE_TOREQ(Signal *, Uint32 nodeId);
+ void sendSTART_LCP_REQ(Signal *, Uint32 nodeId);
+
+ void sendLCP_FRAG_ORD(Signal*, NodeRecord::FragmentCheckpointInfo info);
+ void sendLastLCP_FRAG_ORD(Signal *);
+
+ void sendCopyTable(Signal *, CopyTableNode* ctn,
+ BlockReference ref, Uint32 reqinfo);
+ void sendCreateFragReq(Signal *,
+ Uint32 startGci,
+ Uint32 storedType,
+ Uint32 takeOverPtr);
+ void sendDihfragreq(Signal *,
+ TabRecordPtr regTabPtr,
+ Uint32 fragId);
+ void sendStartFragreq(Signal *,
+ TabRecordPtr regTabPtr,
+ Uint32 fragId);
+ void sendHOT_SPAREREP(Signal *);
+ void sendAddFragreq(Signal *,
+ TabRecordPtr regTabPtr,
+ Uint32 fragId,
+ Uint32 lcpNo,
+ Uint32 param);
+
+ void sendAddFragreq(Signal*, ConnectRecordPtr, TabRecordPtr, Uint32 fragId);
+ void addTable_closeConf(Signal* signal, Uint32 tabPtrI);
+ void resetReplicaSr(TabRecordPtr tabPtr);
+ void resetReplicaLcp(ReplicaRecord * replicaP, Uint32 stopGci);
+
+//------------------------------------
+// Methods for LCP functionality
+//------------------------------------
+ void checkKeepGci(Uint32 replicaStartIndex);
+ void checkLcpStart(Signal *, Uint32 lineNo);
+ void checkStartMoreLcp(Signal *, Uint32 nodeId);
+ bool reportLcpCompletion(const class LcpFragRep *);
+ void sendLCP_COMPLETE_REP(Signal *);
+
+//------------------------------------
+// Methods for Delete Table Files
+//------------------------------------
+ void startDeleteFile(Signal* signal, TabRecordPtr tabPtr);
+ void openTableFileForDelete(Signal* signal, Uint32 fileIndex);
+ void tableOpenLab(Signal* signal, FileRecordPtr regFilePtr);
+ void tableDeleteLab(Signal* signal, FileRecordPtr regFilePtr);
+
+//------------------------------------
+// File Record specific methods
+//------------------------------------
+ void closeFile(Signal *, FileRecordPtr regFilePtr);
+ void closeFileDelete(Signal *, FileRecordPtr regFilePtr);
+ void createFileRw(Signal *, FileRecordPtr regFilePtr);
+ void openFileRw(Signal *, FileRecordPtr regFilePtr);
+ void openFileRo(Signal *, FileRecordPtr regFilePtr);
+ void seizeFile(FileRecordPtr& regFilePtr);
+ void releaseFile(Uint32 fileIndex);
+
+//------------------------------------
+// Methods called when completing file
+// operation.
+//------------------------------------
+ void creatingGcpLab(Signal *, FileRecordPtr regFilePtr);
+ void openingGcpLab(Signal *, FileRecordPtr regFilePtr);
+ void openingTableLab(Signal *, FileRecordPtr regFilePtr);
+ void tableCreateLab(Signal *, FileRecordPtr regFilePtr);
+ void creatingGcpErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void openingCopyGciErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void creatingCopyGciErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void openingGcpErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void openingTableErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void tableCreateErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void closingGcpLab(Signal *, FileRecordPtr regFilePtr);
+ void closingGcpCrashLab(Signal *, FileRecordPtr regFilePtr);
+ void closingTableCrashLab(Signal *, FileRecordPtr regFilePtr);
+ void closingTableSrLab(Signal *, FileRecordPtr regFilePtr);
+ void tableCloseLab(Signal *, FileRecordPtr regFilePtr);
+ void tableCloseErrorLab(FileRecordPtr regFilePtr);
+ void readingGcpLab(Signal *, FileRecordPtr regFilePtr);
+ void readingTableLab(Signal *, FileRecordPtr regFilePtr);
+ void readingGcpErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void readingTableErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void writingCopyGciLab(Signal *, FileRecordPtr regFilePtr);
+ void writeInitGcpLab(Signal *, FileRecordPtr regFilePtr);
+ void tableWriteLab(Signal *, FileRecordPtr regFilePtr);
+ void writeInitGcpErrorLab(Signal *, FileRecordPtr regFilePtr);
+
+
+ void calculateHotSpare();
+ void checkEscalation();
+ void clearRestartInfoBits(Signal *);
+ void invalidateLcpInfoAfterSr();
+
+ bool isMaster();
+ bool isActiveMaster();
+
+ void emptyverificbuffer(Signal *, bool aContintueB);
+ Uint32 findHotSpare();
+ void handleGcpStateInMaster(Signal *, NodeRecordPtr failedNodeptr);
+ void initRestartInfo();
+ void initRestorableGciFiles();
+ void makeNodeGroups(Uint32 nodeArray[]);
+ void makePrnList(class ReadNodesConf * readNodes, Uint32 nodeArray[]);
+ void nodeResetStart();
+ void releaseTabPages(Uint32 tableId);
+ void replication(Uint32 noOfReplicas,
+ NodeGroupRecordPtr NGPtr,
+ FragmentstorePtr regFragptr);
+ void selectMasterCandidateAndSend(Signal *);
+ void setInitialActiveStatus();
+ void setLcpActiveStatusEnd();
+ void setLcpActiveStatusStart(Signal *);
+ void setNodeActiveStatus();
+ void setNodeGroups();
+ void setNodeInfo(Signal *);
+ void setNodeLcpActiveStatus();
+ void setNodeRestartInfoBits();
+ void startGcp(Signal *);
+
+ void readFragment(RWFragment* rf, FragmentstorePtr regFragptr);
+ Uint32 readPageWord(RWFragment* rf);
+ void readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr);
+ void readReplicas(RWFragment* rf, FragmentstorePtr regFragptr);
+ void readRestorableGci(Signal *, FileRecordPtr regFilePtr);
+ void readTabfile(Signal *, TabRecord* tab, FileRecordPtr regFilePtr);
+ void writeFragment(RWFragment* wf, FragmentstorePtr regFragptr);
+ void writePageWord(RWFragment* wf, Uint32 dataWord);
+ void writeReplicas(RWFragment* wf, Uint32 replicaStartIndex);
+ void writeRestorableGci(Signal *, FileRecordPtr regFilePtr);
+ void writeTabfile(Signal *, TabRecord* tab, FileRecordPtr regFilePtr);
+ void copyTabReq_complete(Signal* signal, TabRecordPtr tabPtr);
+
+ void gcpcommitreqLab(Signal *);
+ void gcpsavereqLab(Signal *);
+ void copyGciLab(Signal *, CopyGCIReq::CopyReason reason);
+ void storeNewLcpIdLab(Signal *);
+ void startLcpRoundLoopLab(Signal *, Uint32 startTableId, Uint32 startFragId);
+
+ void nodeFailCompletedCheckLab(Signal*, NodeRecordPtr failedNodePtr);
+
+ /**
+ *
+ */
+ void setLocalNodefailHandling(Signal*, Uint32 failedNodeId,
+ NodefailHandlingStep step);
+ void checkLocalNodefailComplete(Signal*, Uint32 failedNodeId,
+ NodefailHandlingStep step);
+
+ void ndbsttorry10Lab(Signal *, Uint32 _line);
+ void createMutexes(Signal* signal, Uint32 no);
+ void createMutex_done(Signal* signal, Uint32 no, Uint32 retVal);
+ void crashSystemAtGcpStop(Signal *);
+ void sendFirstDictfragsreq(Signal *, TabRecordPtr regTabPtr);
+ void addtabrefuseLab(Signal *, ConnectRecordPtr regConnectPtr, Uint32 errorCode);
+ void GCP_SAVEhandling(Signal *, Uint32 nodeId);
+ void packTableIntoPagesLab(Signal *, Uint32 tableId);
+ void readPagesIntoTableLab(Signal *, Uint32 tableId);
+ void readPagesIntoFragLab(Signal *, RWFragment* rf);
+ void readTabDescriptionLab(Signal *, Uint32 tableId);
+ void copyTableLab(Signal *, Uint32 tableId);
+ void breakCopyTableLab(Signal *,
+ TabRecordPtr regTabPtr,
+ Uint32 nodeId);
+ void checkAddfragCompletedLab(Signal *,
+ TabRecordPtr regTabPtr,
+ Uint32 fragId);
+ void completeRestartLab(Signal *);
+ void readTableFromPagesLab(Signal *, TabRecordPtr regTabPtr);
+ void srPhase2ReadTableLab(Signal *, TabRecordPtr regTabPtr);
+ void checkTcCounterLab(Signal *);
+ void calculateKeepGciLab(Signal *, Uint32 tableId, Uint32 fragId);
+ void tableUpdateLab(Signal *, TabRecordPtr regTabPtr);
+ void checkLcpCompletedLab(Signal *);
+ void initLcpLab(Signal *, Uint32 masterRef, Uint32 tableId);
+ void startGcpLab(Signal *, Uint32 aWaitTime);
+ void checkGcpStopLab(Signal *);
+ void MASTER_GCPhandling(Signal *, Uint32 failedNodeId);
+ void MASTER_LCPhandling(Signal *, Uint32 failedNodeId);
+ void rnfTableNotReadyLab(Signal *, TabRecordPtr regTabPtr, Uint32 removeNodeId);
+ void startLcpTakeOverLab(Signal *, Uint32 failedNodeId);
+
+ void startLcpMasterTakeOver(Signal *, Uint32 failedNodeId);
+ void startGcpMasterTakeOver(Signal *, Uint32 failedNodeId);
+ void checkGcpOutstanding(Signal*, Uint32 failedNodeId);
+
+ void checkEmptyLcpComplete(Signal *);
+ void lcpBlockedLab(Signal *);
+ void breakCheckTabCompletedLab(Signal *, TabRecordPtr regTabptr);
+ void readGciFileLab(Signal *);
+ void openingCopyGciSkipInitLab(Signal *, FileRecordPtr regFilePtr);
+ void startLcpRoundLab(Signal *);
+ void gcpBlockedLab(Signal *);
+ void initialStartCompletedLab(Signal *);
+ void allNodesLcpCompletedLab(Signal *);
+ void nodeRestartPh2Lab(Signal *);
+ void initGciFilesLab(Signal *);
+ void dictStartConfLab(Signal *);
+ void nodeDictStartConfLab(Signal *);
+ void ndbStartReqLab(Signal *, BlockReference ref);
+ void nodeRestartStartRecConfLab(Signal *);
+ void dihCopyCompletedLab(Signal *);
+ void release_connect(ConnectRecordPtr ptr);
+ void copyTableNode(Signal *,
+ CopyTableNode* ctn,
+ NodeRecordPtr regNodePtr);
+ void startFragment(Signal *, Uint32 tableId, Uint32 fragId);
+ bool checkLcpAllTablesDoneInLqh();
+
+ void lcpStateAtNodeFailureLab(Signal *, Uint32 nodeId);
+ void copyNodeLab(Signal *, Uint32 tableId);
+ void copyGciReqLab(Signal *);
+ void allLab(Signal *,
+ ConnectRecordPtr regConnectPtr,
+ TabRecordPtr regTabPtr);
+ void tableCopyNodeLab(Signal *, TabRecordPtr regTabPtr);
+
+ void removeNodeFromTables(Signal *, Uint32 tableId, Uint32 nodeId);
+ void removeNodeFromTable(Signal *, Uint32 tableId, TabRecordPtr tabPtr);
+ void removeNodeFromTablesComplete(Signal* signal, Uint32 nodeId);
+
+ void packFragIntoPagesLab(Signal *, RWFragment* wf);
+ void startNextChkpt(Signal *);
+ void failedNodeLcpHandling(Signal*, NodeRecordPtr failedNodePtr);
+ void failedNodeSynchHandling(Signal *, NodeRecordPtr failedNodePtr);
+ void checkCopyTab(NodeRecordPtr failedNodePtr);
+
+ void initCommonData();
+ void initialiseRecordsLab(Signal *, Uint32 stepNo, Uint32, Uint32);
+
+ void findReplica(ReplicaRecordPtr& regReplicaPtr,
+ Fragmentstore* fragPtrP, Uint32 nodeId);
+//------------------------------------
+// Node failure handling methods
+//------------------------------------
+ void startRemoveFailedNode(Signal *, NodeRecordPtr failedNodePtr);
+ void handleGcpTakeOver(Signal *, NodeRecordPtr failedNodePtr);
+ void handleLcpTakeOver(Signal *, NodeRecordPtr failedNodePtr);
+ void handleNewMaster(Signal *, NodeRecordPtr failedNodePtr);
+ void checkTakeOverInMasterAllNodeFailure(Signal*, NodeRecordPtr failedNode);
+ void checkTakeOverInMasterCopyNodeFailure(Signal*, Uint32 failedNodeId);
+ void checkTakeOverInMasterStartNodeFailure(Signal*, Uint32 takeOverPtr);
+ void checkTakeOverInNonMasterStartNodeFailure(Signal*, Uint32 takeOverPtr);
+ void handleLcpMasterTakeOver(Signal *, Uint32 nodeId);
+
+//------------------------------------
+// Replica record specific methods
+//------------------------------------
+ Uint32 findLogInterval(ConstPtr<ReplicaRecord> regReplicaPtr,
+ Uint32 startGci);
+ void findMinGci(ReplicaRecordPtr fmgReplicaPtr,
+ Uint32& keeGci,
+ Uint32& oldestRestorableGci);
+ bool findStartGci(ConstPtr<ReplicaRecord> fstReplicaPtr,
+ Uint32 tfstStopGci,
+ Uint32& tfstStartGci,
+ Uint32& tfstLcp);
+ void newCrashedReplica(Uint32 nodeId, ReplicaRecordPtr ncrReplicaPtr);
+ void packCrashedReplicas(ReplicaRecordPtr pcrReplicaPtr);
+ void releaseReplicas(Uint32 replicaPtr);
+ void removeOldCrashedReplicas(ReplicaRecordPtr rocReplicaPtr);
+ void removeTooNewCrashedReplicas(ReplicaRecordPtr rtnReplicaPtr);
+ void seizeReplicaRec(ReplicaRecordPtr& replicaPtr);
+
+//------------------------------------
+// Methods operating on a fragment and
+// its connected replicas and nodes.
+//------------------------------------
+ void allocStoredReplica(FragmentstorePtr regFragptr,
+ ReplicaRecordPtr& newReplicaPtr,
+ Uint32 nodeId);
+ Uint32 extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[]);
+ bool findBestLogNode(CreateReplicaRecord* createReplica,
+ FragmentstorePtr regFragptr,
+ Uint32 startGci,
+ Uint32 stopGci,
+ Uint32 logNode,
+ Uint32& fblStopGci);
+ bool findLogNodes(CreateReplicaRecord* createReplica,
+ FragmentstorePtr regFragptr,
+ Uint32 startGci,
+ Uint32 stopGci);
+ void findToReplica(TakeOverRecord* regTakeOver,
+ Uint32 replicaType,
+ FragmentstorePtr regFragptr,
+ ReplicaRecordPtr& ftrReplicaPtr);
+ void initFragstore(FragmentstorePtr regFragptr);
+ void insertBackup(FragmentstorePtr regFragptr, Uint32 nodeId);
+ void insertfraginfo(FragmentstorePtr regFragptr,
+ Uint32 noOfBackups,
+ Uint32* nodeArray);
+ void linkOldStoredReplica(FragmentstorePtr regFragptr,
+ ReplicaRecordPtr replicaPtr);
+ void linkStoredReplica(FragmentstorePtr regFragptr,
+ ReplicaRecordPtr replicaPtr);
+ void prepareReplicas(FragmentstorePtr regFragptr);
+ void removeNodeFromStored(Uint32 nodeId,
+ FragmentstorePtr regFragptr,
+ ReplicaRecordPtr replicaPtr);
+ void removeOldStoredReplica(FragmentstorePtr regFragptr,
+ ReplicaRecordPtr replicaPtr);
+ void removeStoredReplica(FragmentstorePtr regFragptr,
+ ReplicaRecordPtr replicaPtr);
+ void searchStoredReplicas(FragmentstorePtr regFragptr);
+ void updateNodeInfo(FragmentstorePtr regFragptr);
+
+//------------------------------------
+// Fragment allocation, deallocation and
+// find methods
+//------------------------------------
+ void allocFragments(Uint32 noOfFragments, TabRecordPtr regTabPtr);
+ void releaseFragments(TabRecordPtr regTabPtr);
+ void getFragstore(TabRecord *, Uint32 fragNo, FragmentstorePtr & ptr);
+ void initialiseFragstore();
+
+//------------------------------------
+// Page Record specific methods
+//------------------------------------
+ void allocpage(PageRecordPtr& regPagePtr);
+ void releasePage(Uint32 pageIndex);
+
+//------------------------------------
+// Table Record specific methods
+//------------------------------------
+ void initTable(TabRecordPtr regTabPtr);
+ void initTableFile(TabRecordPtr regTabPtr);
+ void releaseTable(TabRecordPtr tabPtr);
+ Uint32 findTakeOver(Uint32 failedNodeId);
+ void handleTakeOverMaster(Signal *, Uint32 takeOverPtr);
+ void handleTakeOverNewMaster(Signal *, Uint32 takeOverPtr);
+
+//------------------------------------
+// TakeOver Record specific methods
+//------------------------------------
+ void initTakeOver(TakeOverRecordPtr regTakeOverptr);
+ void seizeTakeOver(TakeOverRecordPtr& regTakeOverptr);
+ void allocateTakeOver(TakeOverRecordPtr& regTakeOverptr);
+ void releaseTakeOver(Uint32 takeOverPtr);
+ bool anyActiveTakeOver();
+ void checkToCopy();
+ void checkToCopyCompleted(Signal *);
+ bool checkToInterrupted(TakeOverRecordPtr& regTakeOverptr);
+ Uint32 getStartNode(Uint32 takeOverPtr);
+
+//------------------------------------
+// Methods for take over functionality
+//------------------------------------
+ void changeNodeGroups(Uint32 startNode, Uint32 nodeTakenOver);
+ void endTakeOver(Uint32 takeOverPtr);
+ void initStartTakeOver(const class StartToReq *,
+ TakeOverRecordPtr regTakeOverPtr);
+
+ void nodeRestartTakeOver(Signal *, Uint32 startNodeId);
+ void systemRestartTakeOverLab(Signal *);
+ void startTakeOver(Signal *,
+ Uint32 takeOverPtr,
+ Uint32 startNode,
+ Uint32 toNode);
+ void sendStartTo(Signal *, Uint32 takeOverPtr);
+ void startNextCopyFragment(Signal *, Uint32 takeOverPtr);
+ void toCopyFragLab(Signal *, Uint32 takeOverPtr);
+ void startHsAddFragConfLab(Signal *);
+ void prepareSendCreateFragReq(Signal *, Uint32 takeOverPtr);
+ void sendUpdateTo(Signal *, Uint32 takeOverPtr, Uint32 updateState);
+ void toCopyCompletedLab(Signal *, TakeOverRecordPtr regTakeOverptr);
+ void takeOverCompleted(Uint32 aNodeId);
+ void sendEndTo(Signal *, Uint32 takeOverPtr);
+
+//------------------------------------
+// Node Record specific methods
+//------------------------------------
+ void checkStartTakeOver(Signal *);
+ void insertAlive(NodeRecordPtr newNodePtr);
+ void insertDeadNode(NodeRecordPtr removeNodePtr);
+ void removeAlive(NodeRecordPtr removeNodePtr);
+ void removeDeadNode(NodeRecordPtr removeNodePtr);
+
+ NodeRecord::NodeStatus getNodeStatus(Uint32 nodeId);
+ void setNodeStatus(Uint32 nodeId, NodeRecord::NodeStatus);
+ Sysfile::ActiveStatus getNodeActiveStatus(Uint32 nodeId);
+ void setNodeActiveStatus(Uint32 nodeId, Sysfile::ActiveStatus newStatus);
+ void setNodeLcpActiveStatus(Uint32 nodeId, bool newState);
+ bool getNodeLcpActiveStatus(Uint32 nodeId);
+ bool getAllowNodeStart(Uint32 nodeId);
+ void setAllowNodeStart(Uint32 nodeId, bool newState);
+ bool getNodeCopyCompleted(Uint32 nodeId);
+ void setNodeCopyCompleted(Uint32 nodeId, bool newState);
+ bool checkNodeAlive(Uint32 nodeId);
+
+ // Initialisation
+ void initData();
+ void initRecords();
+
+ // Variables to support record structures and their free lists
+
+ ApiConnectRecord *apiConnectRecord;
+ Uint32 capiConnectFileSize;
+
+ ConnectRecord *connectRecord;
+ Uint32 cfirstconnect;
+ Uint32 cconnectFileSize;
+
+ CreateReplicaRecord *createReplicaRecord;
+ Uint32 cnoOfCreateReplicas;
+
+ FileRecord *fileRecord;
+ Uint32 cfirstfreeFile;
+ Uint32 cfileFileSize;
+
+ Fragmentstore *fragmentstore;
+ Uint32 cfirstfragstore;
+ Uint32 cfragstoreFileSize;
+
+ Uint32 c_nextNodeGroup;
+ NodeGroupRecord *nodeGroupRecord;
+
+ NodeRecord *nodeRecord;
+
+ PageRecord *pageRecord;
+ Uint32 cfirstfreepage;
+ Uint32 cpageFileSize;
+
+ ReplicaRecord *replicaRecord;
+ Uint32 cfirstfreeReplica;
+ Uint32 cnoFreeReplicaRec;
+ Uint32 creplicaFileSize;
+
+ TabRecord *tabRecord;
+ Uint32 ctabFileSize;
+
+ TakeOverRecord *takeOverRecord;
+ Uint32 cfirstfreeTakeOver;
+
+ /*
+ 2.4 C O M M O N S T O R E D V A R I A B L E S
+ ----------------------------------------------------
+ */
+ Uint32 cfirstVerifyQueue;
+ Uint32 clastVerifyQueue;
+ Uint32 cverifyQueueCounter;
+
+ /*------------------------------------------------------------------------*/
+ /* THIS VARIABLE KEEPS THE REFERENCES TO FILE RECORDS THAT DESCRIBE */
+ /* THE TWO FILES THAT ARE USED TO STORE THE VARIABLE CRESTART_INFO */
+ /* ON DISK. */
+ /*------------------------------------------------------------------------*/
+ Uint32 crestartInfoFile[2];
+ /*------------------------------------------------------------------------*/
+ /* THIS VARIABLE KEEPS TRACK OF THE STATUS OF A GLOBAL CHECKPOINT */
+ /* PARTICIPANT. THIS IS NEEDED TO HANDLE A NODE FAILURE. WHEN A NODE*/
+ /* FAILURE OCCURS IT IS EASY THAT THE PROTOCOL STOPS IF NO ACTION IS*/
+ /* TAKEN TO PREVENT THIS. THIS VARIABLE ENSURES SUCH ACTION CAN BE */
+ /* TAKEN. */
+ /*------------------------------------------------------------------------*/
+ enum GcpParticipantState {
+ GCP_PARTICIPANT_READY = 0,
+ GCP_PARTICIPANT_PREPARE_RECEIVED = 1,
+ GCP_PARTICIPANT_COMMIT_RECEIVED = 2,
+ GCP_PARTICIPANT_TC_FINISHED = 3,
+ GCP_PARTICIPANT_COPY_GCI_RECEIVED = 4
+ };
+ GcpParticipantState cgcpParticipantState;
+ /*------------------------------------------------------------------------*/
+ /* THESE VARIABLES ARE USED TO CONTROL THAT GCP PROCESSING DO NOT */
+ /*STOP FOR SOME REASON. */
+ /*------------------------------------------------------------------------*/
+ enum GcpStatus {
+ GCP_READY = 0,
+ GCP_PREPARE_SENT = 1,
+ GCP_COMMIT_SENT = 2,
+ GCP_NODE_FINISHED = 3,
+ GCP_SAVE_LQH_FINISHED = 4
+ };
+ GcpStatus cgcpStatus;
+ Uint32 cgcpStartCounter;
+ Uint32 coldGcpStatus;
+ Uint32 coldGcpId;
+ /*------------------------------------------------------------------------*/
+ /* THIS VARIABLE KEEPS TRACK OF THE STATE OF THIS NODE AS MASTER. */
+ /*------------------------------------------------------------------------*/
+ enum MasterState {
+ MASTER_IDLE = 0,
+ MASTER_ACTIVE = 1,
+ MASTER_TAKE_OVER_GCP = 2
+ };
+ MasterState cmasterState;
+ Uint16 cmasterTakeOverNode;
+ /* NODE IS NOT MASTER */
+ /* NODE IS ACTIVE AS MASTER */
+ /* NODE IS TAKING OVER AS MASTER */
+
+ struct CopyGCIMaster {
+ CopyGCIMaster(){ m_copyReason = m_waiting = CopyGCIReq::IDLE;}
+ /*------------------------------------------------------------------------*/
+ /* THIS STATE VARIABLE IS USED TO INDICATE IF COPYING OF RESTART */
+ /* INFO WAS STARTED BY A LOCAL CHECKPOINT OR AS PART OF A SYSTEM */
+ /* RESTART. */
+ /*------------------------------------------------------------------------*/
+ CopyGCIReq::CopyReason m_copyReason;
+
+ /*------------------------------------------------------------------------*/
+ /* COPYING RESTART INFO CAN BE STARTED BY LOCAL CHECKPOINTS AND BY */
+ /* GLOBAL CHECKPOINTS. WE CAN HOWEVER ONLY HANDLE ONE SUCH COPY AT */
+ /* THE TIME. THUS WE HAVE TO KEEP WAIT INFORMATION IN THIS VARIABLE.*/
+ /*------------------------------------------------------------------------*/
+ CopyGCIReq::CopyReason m_waiting;
+ } c_copyGCIMaster;
+
+ struct CopyGCISlave {
+ CopyGCISlave(){ m_copyReason = CopyGCIReq::IDLE; m_expectedNextWord = 0;}
+ /*------------------------------------------------------------------------*/
+ /* THIS STATE VARIABLE IS USED TO INDICATE IF COPYING OF RESTART */
+ /* INFO WAS STARTED BY A LOCAL CHECKPOINT OR AS PART OF A SYSTEM */
+ /* RESTART. THIS VARIABLE IS USED BY THE NODE THAT RECEIVES */
+ /* COPY_GCI_REQ. */
+ /*------------------------------------------------------------------------*/
+ Uint32 m_senderData;
+ BlockReference m_senderRef;
+ CopyGCIReq::CopyReason m_copyReason;
+
+ Uint32 m_expectedNextWord;
+ } c_copyGCISlave;
+
+ /*------------------------------------------------------------------------*/
+ /* THIS VARIABLE IS USED TO KEEP TRACK OF THE STATE OF LOCAL */
+ /* CHECKPOINTS. */
+ /*------------------------------------------------------------------------*/
+public:
+ enum LcpStatus {
+ LCP_STATUS_IDLE = 0,
+ LCP_TCGET = 1, // Only master
+ LCP_STATUS_ACTIVE = 2,
+ LCP_CALCULATE_KEEP_GCI = 4, // Only master
+ LCP_COPY_GCI = 5,
+ LCP_INIT_TABLES = 6,
+ LCP_TC_CLOPSIZE = 7, // Only master
+ LCP_START_LCP_ROUND = 8,
+ LCP_TAB_COMPLETED = 9,
+ LCP_TAB_SAVED = 10
+ };
+private:
+
+ struct LcpState {
+ LcpStatus lcpStatus;
+ Uint32 lcpStatusUpdatedPlace;
+
+ void setLcpStatus(LcpStatus status, Uint32 line){
+ lcpStatus = status;
+ lcpStatusUpdatedPlace = line;
+ }
+
+ Uint32 lcpStart;
+ Uint32 lcpStartGcp;
+ Uint32 keepGci; /* USED TO CALCULATE THE GCI TO KEEP AFTER A LCP */
+ Uint32 oldestRestorableGci;
+
+ struct CurrentFragment {
+ Uint32 tableId;
+ Uint32 fragmentId;
+ } currentFragment;
+
+ Uint32 noOfLcpFragRepOutstanding;
+
+ /*------------------------------------------------------------------------*/
+ /* USED TO ENSURE THAT LCP'S ARE EXECUTED WITH CERTAIN TIMEINTERVALS*/
+ /* EVEN WHEN SYSTEM IS NOT DOING ANYTHING. */
+ /*------------------------------------------------------------------------*/
+ Uint32 ctimer;
+ Uint32 ctcCounter;
+ Uint32 clcpDelay; /* MAX. 2^(CLCP_DELAY - 2) SEC BETWEEN LCP'S */
+
+ /*------------------------------------------------------------------------*/
+ /* THIS STATE IS USED TO TELL IF THE FIRST LCP AFTER START/RESTART */
+ /* HAS BEEN RUN. AFTER A NODE RESTART THE NODE DOES NOT ENTER */
+ /* STARTED STATE BEFORE THIS IS DONE. */
+ /*------------------------------------------------------------------------*/
+ bool immediateLcpStart;
+ bool m_LCP_COMPLETE_REP_From_Master_Received;
+ SignalCounter m_LCP_COMPLETE_REP_Counter_DIH;
+ SignalCounter m_LCP_COMPLETE_REP_Counter_LQH;
+ SignalCounter m_LAST_LCP_FRAG_ORD;
+ NdbNodeBitmask m_participatingLQH;
+ NdbNodeBitmask m_participatingDIH;
+
+ Uint32 m_masterLcpDihRef;
+ bool m_MASTER_LCPREQ_Received;
+ Uint32 m_MASTER_LCPREQ_FailedNodeId;
+ } c_lcpState;
+
+ /*------------------------------------------------------------------------*/
+ /* THIS VARIABLE KEEPS TRACK OF HOW MANY TABLES ARE ACTIVATED WHEN */
+ /* STARTING A LOCAL CHECKPOINT WE SHOULD AVOID STARTING A CHECKPOINT*/
+ /* WHEN NO TABLES ARE ACTIVATED. */
+ /*------------------------------------------------------------------------*/
+ Uint32 cnoOfActiveTables;
+ Uint32 cgcpDelay; /* Delay between global checkpoints */
+
+ BlockReference cdictblockref; /* DICTIONARY BLOCK REFERENCE */
+ Uint32 cfailurenr; /* EVERY TIME WHEN A NODE FAILURE IS REPORTED
+ THIS NUMBER IS INCREMENTED. AT THE START OF
+ THE SYSTEM THIS NUMBER MUST BE INITIATED TO
+ ZERO */
+ bool cgckptflag; /* A FLAG WHICH IS SET WHILE A NEW GLOBAL CHECK
+ POINT IS BEING CREATED. NO VERIFICATION IS ALLOWED
+ IF THE FLAG IS SET*/
+ Uint32 cgcpOrderBlocked;
+ BlockReference clocallqhblockref;
+ BlockReference clocaltcblockref;
+ BlockReference cmasterdihref;
+ Uint16 cownNodeId;
+ Uint32 cnewgcp;
+ BlockReference cndbStartReqBlockref;
+ BlockReference cntrlblockref;
+ Uint32 cgcpSameCounter;
+ Uint32 coldgcp;
+ Uint32 con_lineNodes;
+ Uint32 creceivedfrag;
+ Uint32 cremainingfrags;
+ Uint32 cstarttype;
+ Uint32 csystemnodes;
+ Uint32 currentgcp;
+
+ enum GcpMasterTakeOverState {
+ GMTOS_IDLE = 0,
+ GMTOS_INITIAL = 1,
+ ALL_READY = 2,
+ ALL_PREPARED = 3,
+ COMMIT_STARTED_NOT_COMPLETED = 4,
+ COMMIT_COMPLETED = 5,
+ PREPARE_STARTED_NOT_COMMITTED = 6,
+ SAVE_STARTED_NOT_COMPLETED = 7
+ };
+ GcpMasterTakeOverState cgcpMasterTakeOverState;
+
+public:
+ enum LcpMasterTakeOverState {
+ LMTOS_IDLE = 0,
+ LMTOS_WAIT_EMPTY_LCP = 1, // Currently doing empty LCP
+ LMTOS_WAIT_LCP_FRAG_REP = 2,// Currently waiting for outst. LCP_FRAG_REP
+ LMTOS_INITIAL = 3,
+ LMTOS_ALL_IDLE = 4,
+ LMTOS_ALL_ACTIVE = 5,
+ LMTOS_LCP_CONCLUDING = 6,
+ LMTOS_COPY_ONGOING = 7
+ };
+private:
+ class MasterTakeOverState {
+ public:
+ void set(LcpMasterTakeOverState s, Uint32 line) {
+ state = s; updatePlace = line;
+ }
+
+ LcpMasterTakeOverState state;
+ Uint32 updatePlace;
+
+ Uint32 minTableId;
+ Uint32 minFragId;
+ Uint32 failedNodeId;
+ } c_lcpMasterTakeOverState;
+
+ Uint16 cmasterNodeId;
+ Uint8 cnoHotSpare;
+
+ struct NodeStartMasterRecord {
+ Uint32 startNode;
+ Uint32 wait;
+ Uint32 failNr;
+ bool activeState;
+ bool blockLcp;
+ bool blockGcp;
+ Uint32 startInfoErrorCode;
+ Uint32 m_outstandingGsn;
+ };
+ NodeStartMasterRecord c_nodeStartMaster;
+
+ struct NodeStartSlaveRecord {
+ NodeStartSlaveRecord() { nodeId = 0;}
+
+ Uint32 nodeId;
+ };
+ NodeStartSlaveRecord c_nodeStartSlave;
+
+ Uint32 cfirstAliveNode;
+ Uint32 cfirstDeadNode;
+ Uint32 cstartPhase;
+ Uint32 cnoReplicas;
+
+ Uint32 c_startToLock;
+ Uint32 c_endToLock;
+ Uint32 c_createFragmentLock;
+ Uint32 c_updateToLock;
+
+ bool cwaitLcpSr;
+ Uint32 cnoOfNodeGroups;
+ bool cstartGcpNow;
+
+ Uint32 crestartGci; /* VALUE OF GCI WHEN SYSTEM RESTARTED OR STARTED */
+ Uint32 cminHotSpareNodes;
+
+ /**
+ * Counter variables keeping track of the number of outstanding signals
+ * for particular signals in various protocols.
+ */
+ SignalCounter c_COPY_GCIREQ_Counter;
+ SignalCounter c_COPY_TABREQ_Counter;
+ SignalCounter c_CREATE_FRAGREQ_Counter;
+ SignalCounter c_DIH_SWITCH_REPLICA_REQ_Counter;
+ SignalCounter c_EMPTY_LCP_REQ_Counter;
+ SignalCounter c_END_TOREQ_Counter;
+ SignalCounter c_GCP_COMMIT_Counter;
+ SignalCounter c_GCP_PREPARE_Counter;
+ SignalCounter c_GCP_SAVEREQ_Counter;
+ SignalCounter c_INCL_NODEREQ_Counter;
+ SignalCounter c_MASTER_GCPREQ_Counter;
+ SignalCounter c_MASTER_LCPREQ_Counter;
+ SignalCounter c_START_INFOREQ_Counter;
+ SignalCounter c_START_RECREQ_Counter;
+ SignalCounter c_START_TOREQ_Counter;
+ SignalCounter c_STOP_ME_REQ_Counter;
+ SignalCounter c_TC_CLOPSIZEREQ_Counter;
+ SignalCounter c_TCGETOPSIZEREQ_Counter;
+ SignalCounter c_UPDATE_TOREQ_Counter;
+ SignalCounter c_START_LCP_REQ_Counter;
+
+ bool c_blockCommit;
+ Uint32 c_blockCommitNo;
+
+ bool getBlockCommit() const {
+ return c_blockCommit || cgckptflag;
+ }
+
+ /**
+ * SwitchReplicaRecord - Should only be used by master
+ */
+ struct SwitchReplicaRecord {
+ void clear(){}
+
+ Uint32 nodeId;
+ Uint32 tableId;
+ Uint32 fragNo;
+ };
+ SwitchReplicaRecord c_switchReplicas;
+
+ struct StopPermProxyRecord {
+ StopPermProxyRecord() { clientRef = 0; }
+
+ Uint32 clientData;
+ BlockReference clientRef;
+ BlockReference masterRef;
+ };
+
+ struct StopPermMasterRecord {
+ StopPermMasterRecord() { clientRef = 0;}
+
+ Uint32 returnValue;
+
+ Uint32 clientData;
+ BlockReference clientRef;
+ };
+
+ StopPermProxyRecord c_stopPermProxy;
+ StopPermMasterRecord c_stopPermMaster;
+
+ void checkStopPermProxy(Signal*, NodeId failedNodeId);
+ void checkStopPermMaster(Signal*, NodeRecordPtr failedNodePtr);
+
+ void switchReplica(Signal*,
+ Uint32 nodeId,
+ Uint32 tableId,
+ Uint32 fragNo);
+
+ void switchReplicaReply(Signal*, NodeId nodeId);
+
+ /**
+ * Wait GCP (proxy)
+ */
+ struct WaitGCPProxyRecord {
+ WaitGCPProxyRecord() { clientRef = 0;}
+
+ Uint32 clientData;
+ BlockReference clientRef;
+ BlockReference masterRef;
+
+ union { Uint32 nextPool; Uint32 nextList; };
+ Uint32 prevList;
+ };
+ typedef Ptr<WaitGCPProxyRecord> WaitGCPProxyPtr;
+
+ /**
+ * Wait GCP (master)
+ */
+ struct WaitGCPMasterRecord {
+ WaitGCPMasterRecord() { clientRef = 0;}
+ Uint32 clientData;
+ BlockReference clientRef;
+
+ union { Uint32 nextPool; Uint32 nextList; };
+ Uint32 prevList;
+ };
+ typedef Ptr<WaitGCPMasterRecord> WaitGCPMasterPtr;
+
+ /**
+ * Pool/list of WaitGCPProxyRecord record
+ */
+ ArrayPool<WaitGCPProxyRecord> waitGCPProxyPool;
+ ArrayList<WaitGCPProxyRecord> c_waitGCPProxyList;
+
+ /**
+ * Pool/list of WaitGCPMasterRecord record
+ */
+ ArrayPool<WaitGCPMasterRecord> waitGCPMasterPool;
+ ArrayList<WaitGCPMasterRecord> c_waitGCPMasterList;
+
+ void checkWaitGCPProxy(Signal*, NodeId failedNodeId);
+ void checkWaitGCPMaster(Signal*, NodeId failedNodeId);
+ void emptyWaitGCPMasterQueue(Signal*);
+
+ /**
+ * Stop me
+ */
+ struct StopMeRecord {
+ StopMeRecord() { clientRef = 0;}
+
+ BlockReference clientRef;
+ Uint32 clientData;
+ };
+ StopMeRecord c_stopMe;
+
+ void checkStopMe(Signal *, NodeRecordPtr failedNodePtr);
+
+#define DIH_CDATA_SIZE 128
+ /**
+ * This variable must be atleast the size of Sysfile::SYSFILE_SIZE32
+ */
+ Uint32 cdata[DIH_CDATA_SIZE]; /* TEMPORARY ARRAY VARIABLE */
+
+ /**
+ * Sys file data
+ */
+ Uint32 sysfileData[DIH_CDATA_SIZE];
+ Uint32 sysfileDataToFile[DIH_CDATA_SIZE];
+
+ /**
+ * When a node comes up without filesystem
+ * we have to clear all LCP for that node
+ */
+ void invalidateNodeLCP(Signal *, Uint32 nodeId, Uint32 tableId);
+ void invalidateNodeLCP(Signal *, Uint32 nodeId, TabRecordPtr);
+
+ /**
+ * Reply from nodeId
+ */
+ void startInfoReply(Signal *, Uint32 nodeId);
+};
+
+#if (DIH_CDATA_SIZE < _SYSFILE_SIZE32)
+#error "cdata is to small compared to Sysfile size"
+#endif
+
+#endif
+
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
index 9a5efebc56e..9a5efebc56e 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
new file mode 100644
index 00000000000..4441452422e
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -0,0 +1,14352 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBDIH_C
+#include <ndb_limits.h>
+#include <ndb_version.h>
+#include <NdbOut.hpp>
+
+#include "Dbdih.hpp"
+#include "Configuration.hpp"
+
+#include <signaldata/BlockCommitOrd.hpp>
+#include <signaldata/CheckNodeGroups.hpp>
+#include <signaldata/CreateFrag.hpp>
+#include <signaldata/CopyActive.hpp>
+#include <signaldata/CopyFrag.hpp>
+#include <signaldata/CopyGCIReq.hpp>
+#include <signaldata/DiAddTab.hpp>
+#include <signaldata/DictStart.hpp>
+#include <signaldata/DiGetNodes.hpp>
+#include <signaldata/DihContinueB.hpp>
+#include <signaldata/DihSwitchReplica.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+#include <signaldata/EmptyLcp.hpp>
+#include <signaldata/EndTo.hpp>
+#include <signaldata/EventReport.hpp>
+#include <signaldata/GCPSave.hpp>
+#include <signaldata/HotSpareRep.hpp>
+#include <signaldata/MasterGCP.hpp>
+#include <signaldata/MasterLCP.hpp>
+#include <signaldata/NFCompleteRep.hpp>
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/ReadNodesConf.hpp>
+#include <signaldata/StartFragReq.hpp>
+#include <signaldata/StartInfo.hpp>
+#include <signaldata/StartMe.hpp>
+#include <signaldata/StartPerm.hpp>
+#include <signaldata/StartRec.hpp>
+#include <signaldata/StartTo.hpp>
+#include <signaldata/StopPerm.hpp>
+#include <signaldata/StopMe.hpp>
+#include <signaldata/TestOrd.hpp>
+#include <signaldata/UpdateTo.hpp>
+#include <signaldata/WaitGCP.hpp>
+#include <signaldata/DihStartTab.hpp>
+#include <signaldata/LCP.hpp>
+#include <signaldata/SystemError.hpp>
+
+#include <signaldata/DropTab.hpp>
+#include <signaldata/AlterTab.hpp>
+#include <signaldata/PrepDropTab.hpp>
+#include <signaldata/SumaImpl.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/CreateFragmentation.hpp>
+#include <signaldata/LqhFrag.hpp>
+#include <signaldata/FsOpenReq.hpp>
+#include <DebuggerNames.hpp>
+
+#include <EventLogger.hpp>
+extern EventLogger g_eventLogger;
+
+#define SYSFILE ((Sysfile *)&sysfileData[0])
+
+#define RETURN_IF_NODE_NOT_ALIVE(node) \
+ if (!checkNodeAlive((node))) { \
+ jam(); \
+ return; \
+ } \
+
+#define RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverIndex, regTOPtr) \
+ regTOPtr.i = takeOverIndex; \
+ ptrCheckGuard(regTOPtr, MAX_NDB_NODES, takeOverRecord); \
+ if (checkToInterrupted(regTOPtr)) { \
+ jam(); \
+ return; \
+ } \
+
+#define receiveLoopMacro(sigName, receiveNodeId)\
+{ \
+ c_##sigName##_Counter.clearWaitingFor(receiveNodeId); \
+ if(c_##sigName##_Counter.done() == false){ \
+ jam(); \
+ return; \
+ } \
+}
+
+#define sendLoopMacro(sigName, signalRoutine) \
+{ \
+ c_##sigName##_Counter.clearWaitingFor(); \
+ NodeRecordPtr specNodePtr; \
+ specNodePtr.i = cfirstAliveNode; \
+ do { \
+ jam(); \
+ ptrCheckGuard(specNodePtr, MAX_NDB_NODES, nodeRecord); \
+ c_##sigName##_Counter.setWaitingFor(specNodePtr.i); \
+ signalRoutine(signal, specNodePtr.i); \
+ specNodePtr.i = specNodePtr.p->nextNode; \
+ } while (specNodePtr.i != RNIL); \
+}
+
+static
+Uint32
+prevLcpNo(Uint32 lcpNo){
+ if(lcpNo == 0)
+ return MAX_LCP_STORED - 1;
+ return lcpNo - 1;
+}
+
+static
+Uint32
+nextLcpNo(Uint32 lcpNo){
+ lcpNo++;
+ if(lcpNo == MAX_LCP_STORED)
+ return 0;
+ return lcpNo;
+}
+
+#define gth(x, y) ndbrequire(((int)x)>((int)y))
+
+void Dbdih::nullRoutine(Signal* signal, Uint32 nodeId)
+{
+}//Dbdih::nullRoutine()
+
+void Dbdih::sendCOPY_GCIREQ(Signal* signal, Uint32 nodeId)
+{
+ ndbrequire(c_copyGCIMaster.m_copyReason != CopyGCIReq::IDLE);
+
+ const BlockReference ref = calcDihBlockRef(nodeId);
+ const Uint32 wordPerSignal = CopyGCIReq::DATA_SIZE;
+ const Uint32 noOfSignals = ((Sysfile::SYSFILE_SIZE32 + (wordPerSignal - 1)) /
+ wordPerSignal);
+
+ CopyGCIReq * const copyGCI = (CopyGCIReq *)&signal->theData[0];
+ copyGCI->anyData = nodeId;
+ copyGCI->copyReason = c_copyGCIMaster.m_copyReason;
+ copyGCI->startWord = 0;
+
+ for(Uint32 i = 0; i < noOfSignals; i++) {
+ jam();
+ { // Do copy
+ const int startWord = copyGCI->startWord;
+ for(Uint32 j = 0; j < wordPerSignal; j++) {
+ copyGCI->data[j] = sysfileData[j+startWord];
+ }//for
+ }
+ sendSignal(ref, GSN_COPY_GCIREQ, signal, 25, JBB);
+ copyGCI->startWord += wordPerSignal;
+ }//for
+}//Dbdih::sendCOPY_GCIREQ()
+
+
+void Dbdih::sendDIH_SWITCH_REPLICA_REQ(Signal* signal, Uint32 nodeId)
+{
+ const BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_DIH_SWITCH_REPLICA_REQ, signal,
+ DihSwitchReplicaReq::SignalLength, JBB);
+}//Dbdih::sendDIH_SWITCH_REPLICA_REQ()
+
+void Dbdih::sendEMPTY_LCP_REQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcLqhBlockRef(nodeId);
+ sendSignal(ref, GSN_EMPTY_LCP_REQ, signal, EmptyLcpReq::SignalLength, JBB);
+}//Dbdih::sendEMPTY_LCPREQ()
+
+void Dbdih::sendEND_TOREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_END_TOREQ, signal, EndToReq::SignalLength, JBB);
+}//Dbdih::sendEND_TOREQ()
+
+void Dbdih::sendGCP_COMMIT(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcDihBlockRef(nodeId);
+ signal->theData[0] = cownNodeId;
+ signal->theData[1] = cnewgcp;
+ sendSignal(ref, GSN_GCP_COMMIT, signal, 2, JBA);
+}//Dbdih::sendGCP_COMMIT()
+
+void Dbdih::sendGCP_PREPARE(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcDihBlockRef(nodeId);
+ signal->theData[0] = cownNodeId;
+ signal->theData[1] = cnewgcp;
+ sendSignal(ref, GSN_GCP_PREPARE, signal, 2, JBA);
+}//Dbdih::sendGCP_PREPARE()
+
+void Dbdih::sendGCP_SAVEREQ(Signal* signal, Uint32 nodeId)
+{
+ GCPSaveReq * const saveReq = (GCPSaveReq*)&signal->theData[0];
+ BlockReference ref = calcLqhBlockRef(nodeId);
+ saveReq->dihBlockRef = reference();
+ saveReq->dihPtr = nodeId;
+ saveReq->gci = coldgcp;
+ sendSignal(ref, GSN_GCP_SAVEREQ, signal, GCPSaveReq::SignalLength, JBB);
+}//Dbdih::sendGCP_SAVEREQ()
+
+void Dbdih::sendINCL_NODEREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference nodeDihRef = calcDihBlockRef(nodeId);
+ signal->theData[0] = reference();
+ signal->theData[1] = c_nodeStartMaster.startNode;
+ signal->theData[2] = c_nodeStartMaster.failNr;
+ signal->theData[3] = 0;
+ signal->theData[4] = currentgcp;
+ sendSignal(nodeDihRef, GSN_INCL_NODEREQ, signal, 5, JBB);
+}//Dbdih::sendINCL_NODEREQ()
+
+void Dbdih::sendMASTER_GCPREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_MASTER_GCPREQ, signal, MasterGCPReq::SignalLength, JBB);
+}//Dbdih::sendMASTER_GCPREQ()
+
+void Dbdih::sendMASTER_LCPREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_MASTER_LCPREQ, signal, MasterLCPReq::SignalLength, JBB);
+}//Dbdih::sendMASTER_LCPREQ()
+
+void Dbdih::sendSTART_INFOREQ(Signal* signal, Uint32 nodeId)
+{
+ const BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_START_INFOREQ, signal, StartInfoReq::SignalLength, JBB);
+}//sendSTART_INFOREQ()
+
+void Dbdih::sendSTART_RECREQ(Signal* signal, Uint32 nodeId)
+{
+ StartRecReq * const req = (StartRecReq*)&signal->theData[0];
+ BlockReference ref = calcLqhBlockRef(nodeId);
+ req->receivingNodeId = nodeId;
+ req->senderRef = reference();
+ req->keepGci = SYSFILE->keepGCI;
+ req->lastCompletedGci = SYSFILE->lastCompletedGCI[nodeId];
+ req->newestGci = SYSFILE->newestRestorableGCI;
+ sendSignal(ref, GSN_START_RECREQ, signal, StartRecReq::SignalLength, JBB);
+
+ signal->theData[0] = NDB_LE_StartREDOLog;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = SYSFILE->keepGCI;
+ signal->theData[3] = SYSFILE->lastCompletedGCI[nodeId];
+ signal->theData[4] = SYSFILE->newestRestorableGCI;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 5, JBB);
+}//Dbdih::sendSTART_RECREQ()
+
+void Dbdih::sendSTART_TOREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_START_TOREQ, signal, StartToReq::SignalLength, JBB);
+}//Dbdih::sendSTART_TOREQ()
+
+void Dbdih::sendSTOP_ME_REQ(Signal* signal, Uint32 nodeId)
+{
+ if (nodeId != getOwnNodeId()) {
+ jam();
+ const BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_STOP_ME_REQ, signal, StopMeReq::SignalLength, JBB);
+ }//if
+}//Dbdih::sendSTOP_ME_REQ()
+
+void Dbdih::sendTC_CLOPSIZEREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcTcBlockRef(nodeId);
+ signal->theData[0] = nodeId;
+ signal->theData[1] = reference();
+ sendSignal(ref, GSN_TC_CLOPSIZEREQ, signal, 2, JBB);
+}//Dbdih::sendTC_CLOPSIZEREQ()
+
+void Dbdih::sendTCGETOPSIZEREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcTcBlockRef(nodeId);
+ signal->theData[0] = nodeId;
+ signal->theData[1] = reference();
+ sendSignal(ref, GSN_TCGETOPSIZEREQ, signal, 2, JBB);
+}//Dbdih::sendTCGETOPSIZEREQ()
+
+void Dbdih::sendUPDATE_TOREQ(Signal* signal, Uint32 nodeId)
+{
+ const BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_UPDATE_TOREQ, signal, UpdateToReq::SignalLength, JBB);
+}//sendUPDATE_TOREQ()
+
+void Dbdih::execCONTINUEB(Signal* signal)
+{
+ jamEntry();
+ switch ((DihContinueB::Type)signal->theData[0]) {
+ case DihContinueB::ZPACK_TABLE_INTO_PAGES:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ packTableIntoPagesLab(signal, tableId);
+ return;
+ break;
+ }
+ case DihContinueB::ZPACK_FRAG_INTO_PAGES:
+ {
+ RWFragment wf;
+ jam();
+ wf.rwfTabPtr.i = signal->theData[1];
+ ptrCheckGuard(wf.rwfTabPtr, ctabFileSize, tabRecord);
+ wf.fragId = signal->theData[2];
+ wf.pageIndex = signal->theData[3];
+ wf.wordIndex = signal->theData[4];
+ packFragIntoPagesLab(signal, &wf);
+ return;
+ break;
+ }
+ case DihContinueB::ZREAD_PAGES_INTO_TABLE:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ readPagesIntoTableLab(signal, tableId);
+ return;
+ break;
+ }
+ case DihContinueB::ZREAD_PAGES_INTO_FRAG:
+ {
+ RWFragment rf;
+ jam();
+ rf.rwfTabPtr.i = signal->theData[1];
+ ptrCheckGuard(rf.rwfTabPtr, ctabFileSize, tabRecord);
+ rf.fragId = signal->theData[2];
+ rf.pageIndex = signal->theData[3];
+ rf.wordIndex = signal->theData[4];
+ readPagesIntoFragLab(signal, &rf);
+ return;
+ break;
+ }
+ case DihContinueB::ZCOPY_TABLE:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ copyTableLab(signal, tableId);
+ return;
+ }
+ case DihContinueB::ZCOPY_TABLE_NODE:
+ {
+ NodeRecordPtr nodePtr;
+ CopyTableNode ctn;
+ jam();
+ ctn.ctnTabPtr.i = signal->theData[1];
+ ptrCheckGuard(ctn.ctnTabPtr, ctabFileSize, tabRecord);
+ nodePtr.i = signal->theData[2];
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ ctn.pageIndex = signal->theData[3];
+ ctn.wordIndex = signal->theData[4];
+ ctn.noOfWords = signal->theData[5];
+ copyTableNode(signal, &ctn, nodePtr);
+ return;
+ }
+ case DihContinueB::ZSTART_FRAGMENT:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ Uint32 fragId = signal->theData[2];
+ startFragment(signal, tableId, fragId);
+ return;
+ }
+ case DihContinueB::ZCOMPLETE_RESTART:
+ jam();
+ completeRestartLab(signal);
+ return;
+ case DihContinueB::ZREAD_TABLE_FROM_PAGES:
+ {
+ TabRecordPtr tabPtr;
+ jam();
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ readTableFromPagesLab(signal, tabPtr);
+ return;
+ }
+ case DihContinueB::ZSR_PHASE2_READ_TABLE:
+ {
+ TabRecordPtr tabPtr;
+ jam();
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ srPhase2ReadTableLab(signal, tabPtr);
+ return;
+ }
+ case DihContinueB::ZCHECK_TC_COUNTER:
+ jam();
+#ifndef NO_LCP
+ checkTcCounterLab(signal);
+#endif
+ return;
+ case DihContinueB::ZCALCULATE_KEEP_GCI:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ Uint32 fragId = signal->theData[2];
+ calculateKeepGciLab(signal, tableId, fragId);
+ return;
+ }
+ case DihContinueB::ZSTORE_NEW_LCP_ID:
+ jam();
+ storeNewLcpIdLab(signal);
+ return;
+ case DihContinueB::ZTABLE_UPDATE:
+ {
+ TabRecordPtr tabPtr;
+ jam();
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tableUpdateLab(signal, tabPtr);
+ return;
+ }
+ case DihContinueB::ZCHECK_LCP_COMPLETED:
+ {
+ jam();
+ checkLcpCompletedLab(signal);
+ return;
+ }
+ case DihContinueB::ZINIT_LCP:
+ {
+ jam();
+ Uint32 senderRef = signal->theData[1];
+ Uint32 tableId = signal->theData[2];
+ initLcpLab(signal, senderRef, tableId);
+ return;
+ }
+ case DihContinueB::ZADD_TABLE_MASTER_PAGES:
+ {
+ TabRecordPtr tabPtr;
+ jam();
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tabPtr.p->tabUpdateState = TabRecord::US_ADD_TABLE_MASTER;
+ tableUpdateLab(signal, tabPtr);
+ return;
+ break;
+ }
+ case DihContinueB::ZDIH_ADD_TABLE_MASTER:
+ {
+ jam();
+ addTable_closeConf(signal, signal->theData[1]);
+ return;
+ }
+ case DihContinueB::ZADD_TABLE_SLAVE_PAGES:
+ {
+ TabRecordPtr tabPtr;
+ jam();
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tabPtr.p->tabUpdateState = TabRecord::US_ADD_TABLE_SLAVE;
+ tableUpdateLab(signal, tabPtr);
+ return;
+ }
+ case DihContinueB::ZDIH_ADD_TABLE_SLAVE:
+ {
+ ndbrequire(false);
+ return;
+ }
+ case DihContinueB::ZSTART_GCP:
+ jam();
+#ifndef NO_GCP
+ startGcpLab(signal, signal->theData[1]);
+#endif
+ return;
+ break;
+ case DihContinueB::ZCOPY_GCI:{
+ jam();
+ CopyGCIReq::CopyReason reason = (CopyGCIReq::CopyReason)signal->theData[1];
+ ndbrequire(c_copyGCIMaster.m_copyReason == reason);
+ sendLoopMacro(COPY_GCIREQ, sendCOPY_GCIREQ);
+ return;
+ }
+ break;
+ case DihContinueB::ZEMPTY_VERIFY_QUEUE:
+ jam();
+ emptyverificbuffer(signal, true);
+ return;
+ break;
+ case DihContinueB::ZCHECK_GCP_STOP:
+ jam();
+#ifndef NO_GCP
+ checkGcpStopLab(signal);
+#endif
+ return;
+ break;
+ case DihContinueB::ZREMOVE_NODE_FROM_TABLE:
+ {
+ jam();
+ Uint32 nodeId = signal->theData[1];
+ Uint32 tableId = signal->theData[2];
+ removeNodeFromTables(signal, nodeId, tableId);
+ return;
+ }
+ case DihContinueB::ZCOPY_NODE:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ copyNodeLab(signal, tableId);
+ return;
+ }
+ case DihContinueB::ZSTART_TAKE_OVER:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ Uint32 startNode = signal->theData[2];
+ Uint32 toNode = signal->theData[3];
+ startTakeOver(signal, takeOverPtrI, startNode, toNode);
+ return;
+ break;
+ }
+ case DihContinueB::ZCHECK_START_TAKE_OVER:
+ jam();
+ checkStartTakeOver(signal);
+ break;
+ case DihContinueB::ZTO_START_COPY_FRAG:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ startNextCopyFragment(signal, takeOverPtrI);
+ return;
+ }
+ case DihContinueB::ZINVALIDATE_NODE_LCP:
+ {
+ jam();
+ const Uint32 nodeId = signal->theData[1];
+ const Uint32 tableId = signal->theData[2];
+ invalidateNodeLCP(signal, nodeId, tableId);
+ return;
+ }
+ case DihContinueB::ZINITIALISE_RECORDS:
+ jam();
+ initialiseRecordsLab(signal,
+ signal->theData[1],
+ signal->theData[2],
+ signal->theData[3]);
+ return;
+ break;
+ case DihContinueB::ZSTART_PERMREQ_AGAIN:
+ jam();
+ nodeRestartPh2Lab(signal);
+ return;
+ break;
+ case DihContinueB::SwitchReplica:
+ {
+ jam();
+ const Uint32 nodeId = signal->theData[1];
+ const Uint32 tableId = signal->theData[2];
+ const Uint32 fragNo = signal->theData[3];
+ switchReplica(signal, nodeId, tableId, fragNo);
+ return;
+ }
+ case DihContinueB::ZSEND_START_TO:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ sendStartTo(signal, takeOverPtrI);
+ return;
+ }
+ case DihContinueB::ZSEND_ADD_FRAG:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ toCopyFragLab(signal, takeOverPtrI);
+ return;
+ }
+ case DihContinueB::ZSEND_UPDATE_TO:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ Uint32 updateState = signal->theData[4];
+ sendUpdateTo(signal, takeOverPtrI, updateState);
+ return;
+ }
+ case DihContinueB::ZSEND_END_TO:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ sendEndTo(signal, takeOverPtrI);
+ return;
+ }
+ case DihContinueB::ZSEND_CREATE_FRAG:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ Uint32 storedType = signal->theData[2];
+ Uint32 startGci = signal->theData[3];
+ sendCreateFragReq(signal, startGci, storedType, takeOverPtrI);
+ return;
+ }
+ case DihContinueB::WAIT_DROP_TAB_WRITING_TO_FILE:{
+ jam();
+ TabRecordPtr tabPtr;
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ waitDropTabWritingToFile(signal, tabPtr);
+ return;
+ }
+ case DihContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH:{
+ jam();
+ Uint32 nodeId = signal->theData[1];
+ Uint32 tableId = signal->theData[2];
+ checkWaitDropTabFailedLqh(signal, nodeId, tableId);
+ return;
+ }
+ }//switch
+
+ ndbrequire(false);
+ return;
+}//Dbdih::execCONTINUEB()
+
+void Dbdih::execCOPY_GCIREQ(Signal* signal)
+{
+ CopyGCIReq * const copyGCI = (CopyGCIReq *)&signal->theData[0];
+ jamEntry();
+ CopyGCIReq::CopyReason reason = (CopyGCIReq::CopyReason)copyGCI->copyReason;
+ const Uint32 tstart = copyGCI->startWord;
+
+ ndbrequire(cmasterdihref == signal->senderBlockRef()) ;
+ ndbrequire(c_copyGCISlave.m_copyReason == CopyGCIReq::IDLE);
+ ndbrequire(c_copyGCISlave.m_expectedNextWord == tstart);
+ ndbrequire(reason != CopyGCIReq::IDLE);
+
+ arrGuard(tstart + CopyGCIReq::DATA_SIZE, sizeof(sysfileData)/4);
+ for(Uint32 i = 0; i<CopyGCIReq::DATA_SIZE; i++)
+ cdata[tstart+i] = copyGCI->data[i];
+
+ if ((tstart + CopyGCIReq::DATA_SIZE) >= Sysfile::SYSFILE_SIZE32) {
+ jam();
+ c_copyGCISlave.m_expectedNextWord = 0;
+ } else {
+ jam();
+ c_copyGCISlave.m_expectedNextWord += CopyGCIReq::DATA_SIZE;
+ return;
+ }//if
+
+ memcpy(sysfileData, cdata, sizeof(sysfileData));
+
+ c_copyGCISlave.m_copyReason = reason;
+ c_copyGCISlave.m_senderRef = signal->senderBlockRef();
+ c_copyGCISlave.m_senderData = copyGCI->anyData;
+
+ CRASH_INSERTION2(7020, reason==CopyGCIReq::LOCAL_CHECKPOINT);
+ CRASH_INSERTION2(7008, reason==CopyGCIReq::GLOBAL_CHECKPOINT);
+
+ /* -------------------------------------------------------------------------*/
+ /* WE SET THE REQUESTER OF THE COPY GCI TO THE CURRENT MASTER. IF THE */
+ /* CURRENT MASTER WE DO NOT WANT THE NEW MASTER TO RECEIVE CONFIRM OF */
+ /* SOMETHING HE HAS NOT SENT. THE TAKE OVER MUST BE CAREFUL. */
+ /* -------------------------------------------------------------------------*/
+ bool ok = false;
+ switch(reason){
+ case CopyGCIReq::IDLE:
+ ok = true;
+ jam();
+ ndbrequire(false);
+ break;
+ case CopyGCIReq::LOCAL_CHECKPOINT: {
+ ok = true;
+ jam();
+ c_lcpState.setLcpStatus(LCP_COPY_GCI, __LINE__);
+ c_lcpState.m_masterLcpDihRef = cmasterdihref;
+ setNodeInfo(signal);
+ break;
+ }
+ case CopyGCIReq::RESTART: {
+ ok = true;
+ jam();
+ coldgcp = SYSFILE->newestRestorableGCI;
+ crestartGci = SYSFILE->newestRestorableGCI;
+ Sysfile::setRestartOngoing(SYSFILE->systemRestartBits);
+ currentgcp = coldgcp + 1;
+ cnewgcp = coldgcp + 1;
+ setNodeInfo(signal);
+ if ((Sysfile::getLCPOngoing(SYSFILE->systemRestartBits))) {
+ jam();
+ /* -------------------------------------------------------------------- */
+ // IF THERE WAS A LOCAL CHECKPOINT ONGOING AT THE CRASH MOMENT WE WILL
+ // INVALIDATE THAT LOCAL CHECKPOINT.
+ /* -------------------------------------------------------------------- */
+ invalidateLcpInfoAfterSr();
+ }//if
+ break;
+ }
+ case CopyGCIReq::GLOBAL_CHECKPOINT: {
+ ok = true;
+ jam();
+ cgcpParticipantState = GCP_PARTICIPANT_COPY_GCI_RECEIVED;
+ setNodeInfo(signal);
+ break;
+ }//if
+ case CopyGCIReq::INITIAL_START_COMPLETED:
+ ok = true;
+ jam();
+ break;
+ }
+ ndbrequire(ok);
+
+ /* ----------------------------------------------------------------------- */
+ /* WE START BY TRYING TO OPEN THE FIRST RESTORABLE GCI FILE. */
+ /* ----------------------------------------------------------------------- */
+ FileRecordPtr filePtr;
+ filePtr.i = crestartInfoFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ if (filePtr.p->fileStatus == FileRecord::OPEN) {
+ jam();
+ openingCopyGciSkipInitLab(signal, filePtr);
+ return;
+ }//if
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_COPY_GCI;
+ return;
+}//Dbdih::execCOPY_GCIREQ()
+
+void Dbdih::execDICTSTARTCONF(Signal* signal)
+{
+ jamEntry();
+ Uint32 nodeId = refToNode(signal->getSendersBlockRef());
+ if (nodeId != getOwnNodeId()) {
+ jam();
+ nodeDictStartConfLab(signal);
+ } else {
+ jam();
+ dictStartConfLab(signal);
+ }//if
+}//Dbdih::execDICTSTARTCONF()
+
+void Dbdih::execFSCLOSECONF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ filePtr.p->fileStatus = FileRecord::CLOSED;
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::CLOSING_GCP:
+ jam();
+ closingGcpLab(signal, filePtr);
+ break;
+ case FileRecord::CLOSING_GCP_CRASH:
+ jam();
+ closingGcpCrashLab(signal, filePtr);
+ break;
+ case FileRecord::CLOSING_TABLE_CRASH:
+ jam();
+ closingTableCrashLab(signal, filePtr);
+ break;
+ case FileRecord::CLOSING_TABLE_SR:
+ jam();
+ closingTableSrLab(signal, filePtr);
+ break;
+ case FileRecord::TABLE_CLOSE:
+ jam();
+ tableCloseLab(signal, filePtr);
+ break;
+ case FileRecord::TABLE_CLOSE_DELETE:
+ jam();
+ tableDeleteLab(signal, filePtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSCLOSECONF()
+
+void Dbdih::execFSCLOSEREF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::CLOSING_GCP:
+ ndbrequire(false);
+ break;
+ case FileRecord::CLOSING_GCP_CRASH:
+ jam();
+ closingGcpCrashLab(signal, filePtr);
+ break;
+ case FileRecord::CLOSING_TABLE_CRASH:
+ jam();
+ closingTableCrashLab(signal, filePtr);
+ break;
+ case FileRecord::CLOSING_TABLE_SR:
+ ndbrequire(false);
+ break;
+ case FileRecord::TABLE_CLOSE:
+ ndbrequire(false);
+ break;
+ case FileRecord::TABLE_CLOSE_DELETE:
+ ndbrequire(false);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSCLOSEREF()
+
+void Dbdih::execFSOPENCONF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ filePtr.p->fileRef = signal->theData[1];
+ filePtr.p->fileStatus = FileRecord::OPEN;
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::CREATING_GCP:
+ jam();
+ creatingGcpLab(signal, filePtr);
+ break;
+ case FileRecord::OPENING_COPY_GCI:
+ jam();
+ openingCopyGciSkipInitLab(signal, filePtr);
+ break;
+ case FileRecord::CREATING_COPY_GCI:
+ jam();
+ openingCopyGciSkipInitLab(signal, filePtr);
+ break;
+ case FileRecord::OPENING_GCP:
+ jam();
+ openingGcpLab(signal, filePtr);
+ break;
+ case FileRecord::OPENING_TABLE:
+ jam();
+ openingTableLab(signal, filePtr);
+ break;
+ case FileRecord::TABLE_CREATE:
+ jam();
+ tableCreateLab(signal, filePtr);
+ break;
+ case FileRecord::TABLE_OPEN_FOR_DELETE:
+ jam();
+ tableOpenLab(signal, filePtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSOPENCONF()
+
+void Dbdih::execFSOPENREF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::CREATING_GCP:
+ /* --------------------------------------------------------------------- */
+ /* WE DID NOT MANAGE TO CREATE A GLOBAL CHECKPOINT FILE. SERIOUS ERROR */
+ /* WHICH CAUSES A SYSTEM RESTART. */
+ /* --------------------------------------------------------------------- */
+ ndbrequire(false);
+ break;
+ case FileRecord::OPENING_COPY_GCI:
+ jam();
+ openingCopyGciErrorLab(signal, filePtr);
+ break;
+ case FileRecord::CREATING_COPY_GCI:
+ ndbrequire(false);
+ break;
+ case FileRecord::OPENING_GCP:
+ jam();
+ openingGcpErrorLab(signal, filePtr);
+ break;
+ case FileRecord::OPENING_TABLE:
+ jam();
+ openingTableErrorLab(signal, filePtr);
+ break;
+ case FileRecord::TABLE_CREATE:
+ ndbrequire(false);
+ break;
+ case FileRecord::TABLE_OPEN_FOR_DELETE:
+ jam();
+ tableDeleteLab(signal, filePtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSOPENREF()
+
+void Dbdih::execFSREADCONF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::READING_GCP:
+ jam();
+ readingGcpLab(signal, filePtr);
+ break;
+ case FileRecord::READING_TABLE:
+ jam();
+ readingTableLab(signal, filePtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSREADCONF()
+
+void Dbdih::execFSREADREF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::READING_GCP:
+ jam();
+ readingGcpErrorLab(signal, filePtr);
+ break;
+ case FileRecord::READING_TABLE:
+ jam();
+ readingTableErrorLab(signal, filePtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSREADREF()
+
+void Dbdih::execFSWRITECONF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::WRITING_COPY_GCI:
+ jam();
+ writingCopyGciLab(signal, filePtr);
+ break;
+ case FileRecord::WRITE_INIT_GCP:
+ jam();
+ writeInitGcpLab(signal, filePtr);
+ break;
+ case FileRecord::TABLE_WRITE:
+ jam();
+ tableWriteLab(signal, filePtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSWRITECONF()
+
+void Dbdih::execFSWRITEREF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::WRITING_COPY_GCI:
+ /* --------------------------------------------------------------------- */
+ /* EVEN CREATING THE FILE DID NOT WORK. WE WILL THEN CRASH. */
+ /* ERROR IN WRITING FILE. WE WILL NOT CONTINUE FROM HERE. */
+ /* --------------------------------------------------------------------- */
+ ndbrequire(false);
+ break;
+ case FileRecord::WRITE_INIT_GCP:
+ /* --------------------------------------------------------------------- */
+ /* AN ERROR OCCURRED IN WRITING A GCI FILE WHICH IS A SERIOUS ERROR */
+ /* THAT CAUSE A SYSTEM RESTART. */
+ /* --------------------------------------------------------------------- */
+ ndbrequire(false);
+ break;
+ case FileRecord::TABLE_WRITE:
+ ndbrequire(false);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSWRITEREF()
+
+void Dbdih::execGETGCIREQ(Signal* signal)
+{
+
+ jamEntry();
+ Uint32 userPtr = signal->theData[0];
+ BlockReference userRef = signal->theData[1];
+
+ signal->theData[0] = userPtr;
+ signal->theData[1] = SYSFILE->newestRestorableGCI;
+ sendSignal(userRef, GSN_GETGCICONF, signal, 2, JBB);
+}//Dbdih::execGETGCIREQ()
+
+void Dbdih::execREAD_CONFIG_REQ(Signal* signal)
+{
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ jamEntry();
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_API_CONNECT,
+ &capiConnectFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_CONNECT,&cconnectFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_FRAG_CONNECT,
+ &cfragstoreFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_REPLICAS,
+ &creplicaFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_TABLE, &ctabFileSize))
+ cfileFileSize = (2 * ctabFileSize) + 2;
+ initRecords();
+ initialiseRecordsLab(signal, 0, ref, senderData);
+ return;
+}//Dbdih::execSIZEALT_REP()
+
+void Dbdih::execSTART_COPYREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dbdih::execSTART_COPYREF()
+
+void Dbdih::execSTART_FRAGCONF(Signal* signal)
+{
+ (void)signal; // Don't want compiler warning
+ /* ********************************************************************* */
+ /* If anyone wants to add functionality in this method, be aware that */
+ /* for temporary tables no START_FRAGREQ is sent and therefore no */
+ /* START_FRAGCONF signal will be received for those tables!! */
+ /* ********************************************************************* */
+ jamEntry();
+ return;
+}//Dbdih::execSTART_FRAGCONF()
+
+void Dbdih::execSTART_MEREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dbdih::execSTART_MEREF()
+
+void Dbdih::execTAB_COMMITREQ(Signal* signal)
+{
+ TabRecordPtr tabPtr;
+ jamEntry();
+ Uint32 tdictPtr = signal->theData[0];
+ BlockReference tdictBlockref = signal->theData[1];
+ tabPtr.i = signal->theData[2];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_CREATING);
+ tabPtr.p->tabStatus = TabRecord::TS_ACTIVE;
+ signal->theData[0] = tdictPtr;
+ signal->theData[1] = cownNodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(tdictBlockref, GSN_TAB_COMMITCONF, signal, 3, JBB);
+ return;
+}//Dbdih::execTAB_COMMITREQ()
+
+/*
+ 3.2 S T A N D A R D S U B P R O G R A M S I N P L E X
+ *************************************************************
+ */
+/*
+ 3.2.1 S T A R T / R E S T A R T
+ **********************************
+ */
+/*****************************************************************************/
+/* ********** START / RESTART MODULE *************/
+/*****************************************************************************/
+/*
+ 3.2.1.1 LOADING O W N B L O C K R E F E R E N C E (ABSOLUTE PHASE 1)
+ *****************************************************************************
+ */
+void Dbdih::execDIH_RESTARTREQ(Signal* signal)
+{
+ jamEntry();
+ cntrlblockref = signal->theData[0];
+ if(theConfiguration.getInitialStart()){
+ sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB);
+ } else {
+ readGciFileLab(signal);
+ }
+ return;
+}//Dbdih::execDIH_RESTARTREQ()
+
+void Dbdih::execSTTOR(Signal* signal)
+{
+ jamEntry();
+
+ signal->theData[0] = 0;
+ signal->theData[1] = 0;
+ signal->theData[2] = 0;
+ signal->theData[3] = 1; // Next start phase
+ signal->theData[4] = 255; // Next start phase
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
+ return;
+}//Dbdih::execSTTOR()
+
+void Dbdih::initialStartCompletedLab(Signal* signal)
+{
+ /*-------------------------------------------------------------------------*/
+ /* NOW THAT (RE)START IS COMPLETED WE CAN START THE LCP.*/
+ /*-------------------------------------------------------------------------*/
+ return;
+}//Dbdih::initialStartCompletedLab()
+
+/*
+ * ***************************************************************************
+ * S E N D I N G R E P L Y T O S T A R T / R E S T A R T R E Q U E S T S
+ * ****************************************************************************
+ */
+void Dbdih::ndbsttorry10Lab(Signal* signal, Uint32 _line)
+{
+ /*-------------------------------------------------------------------------*/
+ // AN NDB START PHASE HAS BEEN COMPLETED. WHEN START PHASE 6 IS COMPLETED WE
+ // RECORD THAT THE SYSTEM IS RUNNING.
+ /*-------------------------------------------------------------------------*/
+ signal->theData[0] = reference();
+ sendSignal(cntrlblockref, GSN_NDB_STTORRY, signal, 1, JBB);
+ return;
+}//Dbdih::ndbsttorry10Lab()
+
+/*
+****************************************
+I N T E R N A L P H A S E S
+****************************************
+*/
+/*---------------------------------------------------------------------------*/
+/*NDB_STTOR START SIGNAL AT START/RESTART */
+/*---------------------------------------------------------------------------*/
+void Dbdih::execNDB_STTOR(Signal* signal)
+{
+ jamEntry();
+ BlockReference cntrRef = signal->theData[0]; /* SENDERS BLOCK REFERENCE */
+ Uint32 ownNodeId = signal->theData[1]; /* OWN PROCESSOR ID*/
+ Uint32 phase = signal->theData[2]; /* INTERNAL START PHASE*/
+ Uint32 typestart = signal->theData[3];
+
+ cstarttype = typestart;
+ cstartPhase = phase;
+
+ switch (phase){
+ case ZNDB_SPH1:
+ jam();
+ /*----------------------------------------------------------------------*/
+ /* Set the delay between local checkpoints in ndb startphase 1. */
+ /*----------------------------------------------------------------------*/
+ cownNodeId = ownNodeId;
+ /*-----------------------------------------------------------------------*/
+ // Compute all static block references in this node as part of
+ // ndb start phase 1.
+ /*-----------------------------------------------------------------------*/
+ cntrlblockref = cntrRef;
+ clocaltcblockref = calcTcBlockRef(ownNodeId);
+ clocallqhblockref = calcLqhBlockRef(ownNodeId);
+ cdictblockref = calcDictBlockRef(ownNodeId);
+ ndbsttorry10Lab(signal, __LINE__);
+ break;
+
+ case ZNDB_SPH2:
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // Set the number of replicas, maximum is 4 replicas.
+ // Read the ndb nodes from the configuration.
+ /*-----------------------------------------------------------------------*/
+
+ /*-----------------------------------------------------------------------*/
+ // For node restarts we will also add a request for permission
+ // to continue the system restart.
+ // The permission is given by the master node in the alive set.
+ /*-----------------------------------------------------------------------*/
+ createMutexes(signal, 0);
+ break;
+
+ case ZNDB_SPH3:
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // Non-master nodes performing an initial start will execute
+ // the start request here since the
+ // initial start do not synchronise so much from the master.
+ // In the master nodes the start
+ // request will be sent directly to dih (in ndb_startreq) when all
+ // nodes have completed phase 3 of the start.
+ /*-----------------------------------------------------------------------*/
+ cmasterState = MASTER_IDLE;
+ if(cstarttype == NodeState::ST_INITIAL_START ||
+ cstarttype == NodeState::ST_SYSTEM_RESTART){
+ jam();
+ cmasterState = isMaster() ? MASTER_ACTIVE : MASTER_IDLE;
+ }
+ if (!isMaster() && cstarttype == NodeState::ST_INITIAL_START) {
+ jam();
+ ndbStartReqLab(signal, cntrRef);
+ return;
+ }//if
+ ndbsttorry10Lab(signal, __LINE__);
+ break;
+
+ case ZNDB_SPH4:
+ jam();
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+ cmasterTakeOverNode = ZNIL;
+ switch(typestart){
+ case NodeState::ST_INITIAL_START:
+ jam();
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ case NodeState::ST_SYSTEM_RESTART:
+ jam();
+ if (isMaster()) {
+ jam();
+ systemRestartTakeOverLab(signal);
+ if (anyActiveTakeOver() && false) {
+ jam();
+ ndbout_c("1 - anyActiveTakeOver == true");
+ return;
+ }
+ }
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ case NodeState::ST_INITIAL_NODE_RESTART:
+ case NodeState::ST_NODE_RESTART:
+ jam();
+ /***********************************************************************
+ * When starting nodes while system is operational we must be controlled
+ * by the master since only one node restart is allowed at a time.
+ * When this signal is confirmed the master has also copied the
+ * dictionary and the distribution information.
+ */
+ StartMeReq * req = (StartMeReq*)&signal->theData[0];
+ req->startingRef = reference();
+ req->startingVersion = 0; // Obsolete
+ sendSignal(cmasterdihref, GSN_START_MEREQ, signal,
+ StartMeReq::SignalLength, JBB);
+ return;
+ }
+ ndbrequire(false);
+ break;
+ case ZNDB_SPH5:
+ jam();
+ switch(typestart){
+ case NodeState::ST_INITIAL_START:
+ case NodeState::ST_SYSTEM_RESTART:
+ jam();
+ jam();
+ /*---------------------------------------------------------------------*/
+ // WE EXECUTE A LOCAL CHECKPOINT AS A PART OF A SYSTEM RESTART.
+ // THE IDEA IS THAT WE NEED TO
+ // ENSURE THAT WE CAN RECOVER FROM PROBLEMS CAUSED BY MANY NODE
+ // CRASHES THAT CAUSES THE LOG
+ // TO GROW AND THE NUMBER OF LOG ROUNDS TO EXECUTE TO GROW.
+ // THIS CAN OTHERWISE GET US INTO
+ // A SITUATION WHICH IS UNREPAIRABLE. THUS WE EXECUTE A CHECKPOINT
+ // BEFORE ALLOWING ANY TRANSACTIONS TO START.
+ /*---------------------------------------------------------------------*/
+ if (!isMaster()) {
+ jam();
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ }//if
+
+ c_lcpState.immediateLcpStart = true;
+ cwaitLcpSr = true;
+ checkLcpStart(signal, __LINE__);
+ return;
+ case NodeState::ST_NODE_RESTART:
+ case NodeState::ST_INITIAL_NODE_RESTART:
+ jam();
+ signal->theData[0] = cownNodeId;
+ signal->theData[1] = reference();
+ sendSignal(cmasterdihref, GSN_START_COPYREQ, signal, 2, JBB);
+ return;
+ }
+ ndbrequire(false);
+ case ZNDB_SPH6:
+ jam();
+ switch(typestart){
+ case NodeState::ST_INITIAL_START:
+ case NodeState::ST_SYSTEM_RESTART:
+ jam();
+ if(isMaster()){
+ jam();
+ startGcp(signal);
+ }
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ case NodeState::ST_NODE_RESTART:
+ case NodeState::ST_INITIAL_NODE_RESTART:
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ }
+ ndbrequire(false);
+ break;
+ default:
+ jam();
+ ndbsttorry10Lab(signal, __LINE__);
+ break;
+ }//switch
+}//Dbdih::execNDB_STTOR()
+
+void
+Dbdih::createMutexes(Signal * signal, Uint32 count){
+ Callback c = { safe_cast(&Dbdih::createMutex_done), count };
+
+ switch(count){
+ case 0:{
+ Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
+ mutex.create(c);
+ return;
+ }
+ case 1:{
+ Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle);
+ mutex.create(c);
+ return;
+ }
+ }
+
+ signal->theData[0] = reference();
+ sendSignal(cntrlblockref, GSN_READ_NODESREQ, signal, 1, JBB);
+}
+
+void
+Dbdih::createMutex_done(Signal* signal, Uint32 senderData, Uint32 retVal){
+ jamEntry();
+ ndbrequire(retVal == 0);
+
+ switch(senderData){
+ case 0:{
+ Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
+ mutex.release();
+ }
+ case 1:{
+ Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle);
+ mutex.release();
+ }
+ }
+
+ createMutexes(signal, senderData + 1);
+}
+
+/*****************************************************************************/
+/* ------------------------------------------------------------------------- */
+/* WE HAVE BEEN REQUESTED BY NDBCNTR TO PERFORM A RESTART OF THE */
+/* DATABASE TABLES. */
+/* THIS SIGNAL IS SENT AFTER COMPLETING PHASE 3 IN ALL BLOCKS IN A */
+/* SYSTEM RESTART. WE WILL ALSO JUMP TO THIS LABEL FROM PHASE 3 IN AN */
+/* INITIAL START. */
+/* ------------------------------------------------------------------------- */
+/*****************************************************************************/
+void Dbdih::execNDB_STARTREQ(Signal* signal)
+{
+ jamEntry();
+ BlockReference ref = signal->theData[0];
+ cstarttype = signal->theData[1];
+ ndbStartReqLab(signal, ref);
+}//Dbdih::execNDB_STARTREQ()
+
+void Dbdih::ndbStartReqLab(Signal* signal, BlockReference ref)
+{
+ cndbStartReqBlockref = ref;
+ if (cstarttype == NodeState::ST_INITIAL_START) {
+ jam();
+ initRestartInfo();
+ initGciFilesLab(signal);
+ return;
+ }
+
+ ndbrequire(isMaster());
+ copyGciLab(signal, CopyGCIReq::RESTART); // We have already read the file!
+}//Dbdih::ndbStartReqLab()
+
+void Dbdih::execREAD_NODESCONF(Signal* signal)
+{
+ unsigned i;
+ ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
+ jamEntry();
+ Uint32 nodeArray[MAX_NDB_NODES];
+
+ csystemnodes = readNodes->noOfNodes;
+ cmasterNodeId = readNodes->masterNodeId;
+ int index = 0;
+ NdbNodeBitmask tmp; tmp.assign(2, readNodes->allNodes);
+ for (i = 1; i < MAX_NDB_NODES; i++){
+ jam();
+ if(tmp.get(i)){
+ jam();
+ nodeArray[index] = i;
+ if(NodeBitmask::get(readNodes->inactiveNodes, i) == false){
+ jam();
+ con_lineNodes++;
+ }//if
+ index++;
+ }//if
+ }//for
+
+ if(cstarttype == NodeState::ST_SYSTEM_RESTART ||
+ cstarttype == NodeState::ST_NODE_RESTART){
+
+ for(i = 1; i<MAX_NDB_NODES; i++){
+ const Uint32 stat = Sysfile::getNodeStatus(i, SYSFILE->nodeStatus);
+ if(stat == Sysfile::NS_NotDefined && !tmp.get(i)){
+ jam();
+ continue;
+ }
+
+ if(tmp.get(i) && stat != Sysfile::NS_NotDefined){
+ jam();
+ continue;
+ }
+ char buf[255];
+ BaseString::snprintf(buf, sizeof(buf),
+ "Illegal configuration change."
+ " Initial start needs to be performed "
+ " when changing no of storage nodes (node %d)", i);
+ progError(__LINE__,
+ ERR_INVALID_CONFIG,
+ buf);
+ }
+ }
+
+ ndbrequire(csystemnodes >= 1 && csystemnodes < MAX_NDB_NODES);
+ if (cstarttype == NodeState::ST_INITIAL_START) {
+ jam();
+ ndbrequire(cnoReplicas <= csystemnodes);
+ calculateHotSpare();
+ ndbrequire(cnoReplicas <= (csystemnodes - cnoHotSpare));
+ }//if
+
+ cmasterdihref = calcDihBlockRef(cmasterNodeId);
+ /*-------------------------------------------------------------------------*/
+ /* MAKE THE LIST OF PRN-RECORD WHICH IS ONE OF THE NODES-LIST IN THIS BLOCK*/
+ /*-------------------------------------------------------------------------*/
+ makePrnList(readNodes, nodeArray);
+ if (cstarttype == NodeState::ST_INITIAL_START) {
+ jam();
+ /**----------------------------------------------------------------------
+ * WHEN WE INITIALLY START A DATABASE WE WILL CREATE NODE GROUPS.
+ * ALL NODES ARE PUT INTO NODE GROUPS ALTHOUGH HOT SPARE NODES ARE PUT
+ * INTO A SPECIAL NODE GROUP. IN EACH NODE GROUP WE HAVE THE SAME AMOUNT
+ * OF NODES AS THERE ARE NUMBER OF REPLICAS.
+ * ONE POSSIBLE USAGE OF NODE GROUPS ARE TO MAKE A NODE GROUP A COMPLETE
+ * FRAGMENT OF THE DATABASE. THIS MEANS THAT ALL REPLICAS WILL BE STORED
+ * IN THE NODE GROUP.
+ *-----------------------------------------------------------------------*/
+ makeNodeGroups(nodeArray);
+ }//if
+ ndbrequire(checkNodeAlive(cmasterNodeId));
+ if (cstarttype == NodeState::ST_INITIAL_START) {
+ jam();
+ /**-----------------------------------------------------------------------
+ * INITIALISE THE SECOND NODE-LIST AND SET NODE BITS AND SOME NODE STATUS.
+ * VERY CONNECTED WITH MAKE_NODE_GROUPS. CHANGING ONE WILL AFFECT THE
+ * OTHER AS WELL.
+ *-----------------------------------------------------------------------*/
+ setInitialActiveStatus();
+ } else if (cstarttype == NodeState::ST_SYSTEM_RESTART) {
+ jam();
+ /*empty*/;
+ } else if ((cstarttype == NodeState::ST_NODE_RESTART) ||
+ (cstarttype == NodeState::ST_INITIAL_NODE_RESTART)) {
+ jam();
+ nodeRestartPh2Lab(signal);
+ return;
+ } else {
+ ndbrequire(false);
+ }//if
+ /**------------------------------------------------------------------------
+ * ESTABLISH CONNECTIONS WITH THE OTHER DIH BLOCKS AND INITIALISE THIS
+ * NODE-LIST THAT HANDLES CONNECTION WITH OTHER DIH BLOCKS.
+ *-------------------------------------------------------------------------*/
+ ndbsttorry10Lab(signal, __LINE__);
+}//Dbdih::execREAD_NODESCONF()
+
+/*---------------------------------------------------------------------------*/
+/* START NODE LOGIC FOR NODE RESTART */
+/*---------------------------------------------------------------------------*/
+void Dbdih::nodeRestartPh2Lab(Signal* signal)
+{
+ /*------------------------------------------------------------------------*/
+ // REQUEST FOR PERMISSION FROM MASTER TO START A NODE IN AN ALREADY
+ // RUNNING SYSTEM.
+ /*------------------------------------------------------------------------*/
+ StartPermReq * const req = (StartPermReq *)&signal->theData[0];
+
+ req->blockRef = reference();
+ req->nodeId = cownNodeId;
+ req->startType = cstarttype;
+ sendSignal(cmasterdihref, GSN_START_PERMREQ, signal, 3, JBB);
+}//Dbdih::nodeRestartPh2Lab()
+
+void Dbdih::execSTART_PERMCONF(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(7121);
+ Uint32 nodeId = signal->theData[0];
+ cfailurenr = signal->theData[1];
+ ndbrequire(nodeId == cownNodeId);
+ ndbsttorry10Lab(signal, __LINE__);
+}//Dbdih::execSTART_PERMCONF()
+
+void Dbdih::execSTART_PERMREF(Signal* signal)
+{
+ jamEntry();
+ Uint32 errorCode = signal->theData[1];
+ if (errorCode == ZNODE_ALREADY_STARTING_ERROR) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // The master was busy adding another node. We will wait for a second and
+ // try again.
+ /*-----------------------------------------------------------------------*/
+ signal->theData[0] = DihContinueB::ZSTART_PERMREQ_AGAIN;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 3000, 1);
+ return;
+ }//if
+ /*------------------------------------------------------------------------*/
+ // Some node process in another node involving our node was still active. We
+ // will recover from this by crashing here.
+ // This is controlled restart using the
+ // already existing features of node crashes. It is not a bug getting here.
+ /*-------------------------------------------------------------------------*/
+ ndbrequire(false);
+ return;
+}//Dbdih::execSTART_PERMREF()
+
+/*---------------------------------------------------------------------------*/
+/* THIS SIGNAL IS RECEIVED IN THE STARTING NODE WHEN THE START_MEREQ */
+/* HAS BEEN EXECUTED IN THE MASTER NODE. */
+/*---------------------------------------------------------------------------*/
+void Dbdih::execSTART_MECONF(Signal* signal)
+{
+ jamEntry();
+ StartMeConf * const startMe = (StartMeConf *)&signal->theData[0];
+ Uint32 nodeId = startMe->startingNodeId;
+ const Uint32 startWord = startMe->startWord;
+ Uint32 i;
+
+ CRASH_INSERTION(7130);
+ ndbrequire(nodeId == cownNodeId);
+ arrGuard(startWord + StartMeConf::DATA_SIZE, sizeof(cdata)/4);
+ for(i = 0; i < StartMeConf::DATA_SIZE; i++)
+ cdata[startWord+i] = startMe->data[i];
+
+ if(startWord + StartMeConf::DATA_SIZE < Sysfile::SYSFILE_SIZE32){
+ jam();
+ /**
+ * We are still waiting for data
+ */
+ return;
+ }
+ jam();
+
+ /**
+ * Copy into sysfile
+ *
+ * But dont copy lastCompletedGCI:s
+ */
+ Uint32 tempGCP[MAX_NDB_NODES];
+ for(i = 0; i < MAX_NDB_NODES; i++)
+ tempGCP[i] = SYSFILE->lastCompletedGCI[i];
+
+ for(i = 0; i < Sysfile::SYSFILE_SIZE32; i++)
+ sysfileData[i] = cdata[i];
+ for(i = 0; i < MAX_NDB_NODES; i++)
+ SYSFILE->lastCompletedGCI[i] = tempGCP[i];
+
+ setNodeActiveStatus();
+ setNodeGroups();
+ ndbsttorry10Lab(signal, __LINE__);
+}//Dbdih::execSTART_MECONF()
+
+void Dbdih::execSTART_COPYCONF(Signal* signal)
+{
+ jamEntry();
+ Uint32 nodeId = signal->theData[0];
+ ndbrequire(nodeId == cownNodeId);
+ CRASH_INSERTION(7132);
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+}//Dbdih::execSTART_COPYCONF()
+
+/*---------------------------------------------------------------------------*/
+/* MASTER LOGIC FOR NODE RESTART */
+/*---------------------------------------------------------------------------*/
+/* NODE RESTART PERMISSION REQUEST */
+/*---------------------------------------------------------------------------*/
+// A REQUEST FROM A STARTING NODE TO PERFORM A NODE RESTART. IF NO OTHER NODE
+// IS ACTIVE IN PERFORMING A NODE RESTART AND THERE ARE NO ACTIVE PROCESSES IN
+// THIS NODE INVOLVING THE STARTING NODE THIS REQUEST WILL BE GRANTED.
+/*---------------------------------------------------------------------------*/
+void Dbdih::execSTART_PERMREQ(Signal* signal)
+{
+ StartPermReq * const req = (StartPermReq*)&signal->theData[0];
+ jamEntry();
+ const BlockReference retRef = req->blockRef;
+ const Uint32 nodeId = req->nodeId;
+ const Uint32 typeStart = req->startType;
+
+ CRASH_INSERTION(7122);
+ ndbrequire(isMaster());
+ ndbrequire(refToNode(retRef) == nodeId);
+ if ((c_nodeStartMaster.activeState) ||
+ (c_nodeStartMaster.wait != ZFALSE)) {
+ jam();
+ signal->theData[0] = nodeId;
+ signal->theData[1] = ZNODE_ALREADY_STARTING_ERROR;
+ sendSignal(retRef, GSN_START_PERMREF, signal, 2, JBB);
+ return;
+ }//if
+ if (getNodeStatus(nodeId) != NodeRecord::DEAD){
+ ndbout << "nodeStatus in START_PERMREQ = "
+ << (Uint32) getNodeStatus(nodeId) << endl;
+ ndbrequire(false);
+ }//if
+
+ /*----------------------------------------------------------------------
+ * WE START THE INCLUSION PROCEDURE
+ * ---------------------------------------------------------------------*/
+ c_nodeStartMaster.failNr = cfailurenr;
+ c_nodeStartMaster.wait = ZFALSE;
+ c_nodeStartMaster.startInfoErrorCode = 0;
+ c_nodeStartMaster.startNode = nodeId;
+ c_nodeStartMaster.activeState = true;
+ c_nodeStartMaster.m_outstandingGsn = GSN_START_INFOREQ;
+
+ setNodeStatus(nodeId, NodeRecord::STARTING);
+ /**
+ * But if it's a NodeState::ST_INITIAL_NODE_RESTART
+ *
+ * We first have to clear LCP's
+ * For normal node restart we simply ensure that all nodes
+ * are informed of the node restart
+ */
+ StartInfoReq *const r =(StartInfoReq*)&signal->theData[0];
+ r->startingNodeId = nodeId;
+ r->typeStart = typeStart;
+ r->systemFailureNo = cfailurenr;
+ sendLoopMacro(START_INFOREQ, sendSTART_INFOREQ);
+}//Dbdih::execSTART_PERMREQ()
+
+void Dbdih::execSTART_INFOREF(Signal* signal)
+{
+ StartInfoRef * ref = (StartInfoRef*)&signal->theData[0];
+ if (getNodeStatus(ref->startingNodeId) != NodeRecord::STARTING) {
+ jam();
+ return;
+ }//if
+ ndbrequire(c_nodeStartMaster.startNode == ref->startingNodeId);
+ c_nodeStartMaster.startInfoErrorCode = ref->errorCode;
+ startInfoReply(signal, ref->sendingNodeId);
+}//Dbdih::execSTART_INFOREF()
+
+void Dbdih::execSTART_INFOCONF(Signal* signal)
+{
+ jamEntry();
+ StartInfoConf * conf = (StartInfoConf*)&signal->theData[0];
+ if (getNodeStatus(conf->startingNodeId) != NodeRecord::STARTING) {
+ jam();
+ return;
+ }//if
+ ndbrequire(c_nodeStartMaster.startNode == conf->startingNodeId);
+ startInfoReply(signal, conf->sendingNodeId);
+}//Dbdih::execSTART_INFOCONF()
+
+void Dbdih::startInfoReply(Signal* signal, Uint32 nodeId)
+{
+ receiveLoopMacro(START_INFOREQ, nodeId);
+ /**
+ * We're finished with the START_INFOREQ's
+ */
+ if (c_nodeStartMaster.startInfoErrorCode == 0) {
+ jam();
+ /**
+ * Everything has been a success so far
+ */
+ StartPermConf * conf = (StartPermConf*)&signal->theData[0];
+ conf->startingNodeId = c_nodeStartMaster.startNode;
+ conf->systemFailureNo = cfailurenr;
+ sendSignal(calcDihBlockRef(c_nodeStartMaster.startNode),
+ GSN_START_PERMCONF, signal, StartPermConf::SignalLength, JBB);
+ c_nodeStartMaster.m_outstandingGsn = GSN_START_PERMCONF;
+ } else {
+ jam();
+ StartPermRef * ref = (StartPermRef*)&signal->theData[0];
+ ref->startingNodeId = c_nodeStartMaster.startNode;
+ ref->errorCode = c_nodeStartMaster.startInfoErrorCode;
+ sendSignal(calcDihBlockRef(c_nodeStartMaster.startNode),
+ GSN_START_PERMREF, signal, StartPermRef::SignalLength, JBB);
+ nodeResetStart();
+ }//if
+}//Dbdih::startInfoReply()
+
+/*---------------------------------------------------------------------------*/
+/* NODE RESTART CONTINUE REQUEST */
+/*---------------------------------------------------------------------------*/
+// THIS SIGNAL AND THE CODE BELOW IS EXECUTED BY THE MASTER WHEN IT HAS BEEN
+// REQUESTED TO START UP A NEW NODE. The master instructs the starting node
+// how to set up its log for continued execution.
+/*---------------------------------------------------------------------------*/
+void Dbdih::execSTART_MEREQ(Signal* signal)
+{
+ StartMeReq * req = (StartMeReq*)&signal->theData[0];
+ jamEntry();
+ const BlockReference Tblockref = req->startingRef;
+ const Uint32 Tnodeid = refToNode(Tblockref);
+
+ ndbrequire(isMaster());
+ ndbrequire(c_nodeStartMaster.startNode == Tnodeid);
+ ndbrequire(getNodeStatus(Tnodeid) == NodeRecord::STARTING);
+
+ sendSTART_RECREQ(signal, Tnodeid);
+}//Dbdih::execSTART_MEREQ()
+
+void Dbdih::nodeRestartStartRecConfLab(Signal* signal)
+{
+ c_nodeStartMaster.blockLcp = true;
+ if ((c_lcpState.lcpStatus != LCP_STATUS_IDLE) &&
+ (c_lcpState.lcpStatus != LCP_TCGET)) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // WE WILL NOT ALLOW A NODE RESTART TO COME IN WHEN A LOCAL CHECKPOINT IS
+ // ONGOING. IT WOULD COMPLICATE THE LCP PROTOCOL TOO MUCH. WE WILL ADD THIS
+ // LATER.
+ /*-----------------------------------------------------------------------*/
+ return;
+ }//if
+ lcpBlockedLab(signal);
+}//Dbdih::nodeRestartStartRecConfLab()
+
+void Dbdih::lcpBlockedLab(Signal* signal)
+{
+ ndbrequire(getNodeStatus(c_nodeStartMaster.startNode)==NodeRecord::STARTING);
+ /*------------------------------------------------------------------------*/
+ // NOW WE HAVE COPIED ALL INFORMATION IN DICT WE ARE NOW READY TO COPY ALL
+ // INFORMATION IN DIH TO THE NEW NODE.
+ /*------------------------------------------------------------------------*/
+ c_nodeStartMaster.wait = 10;
+ signal->theData[0] = DihContinueB::ZCOPY_NODE;
+ signal->theData[1] = 0;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ c_nodeStartMaster.m_outstandingGsn = GSN_COPY_TABREQ;
+}//Dbdih::lcpBlockedLab()
+
+void Dbdih::nodeDictStartConfLab(Signal* signal)
+{
+ /*-------------------------------------------------------------------------*/
+ // NOW WE HAVE COPIED BOTH DIH AND DICT INFORMATION. WE ARE NOW READY TO
+ // INTEGRATE THE NODE INTO THE LCP AND GCP PROTOCOLS AND TO ALLOW UPDATES OF
+ // THE DICTIONARY AGAIN.
+ /*-------------------------------------------------------------------------*/
+ c_nodeStartMaster.wait = ZFALSE;
+ c_nodeStartMaster.blockGcp = true;
+ if (cgcpStatus != GCP_READY) {
+ /*-----------------------------------------------------------------------*/
+ // The global checkpoint is executing. Wait until it is completed before we
+ // continue processing the node recovery.
+ /*-----------------------------------------------------------------------*/
+ jam();
+ return;
+ }//if
+ gcpBlockedLab(signal);
+
+ /*-----------------------------------------------------------------*/
+ // Report that node restart has completed copy of dictionary.
+ /*-----------------------------------------------------------------*/
+ signal->theData[0] = NDB_LE_NR_CopyDict;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
+}//Dbdih::nodeDictStartConfLab()
+
+void Dbdih::dihCopyCompletedLab(Signal* signal)
+{
+ BlockReference ref = calcDictBlockRef(c_nodeStartMaster.startNode);
+ DictStartReq * req = (DictStartReq*)&signal->theData[0];
+ req->restartGci = cnewgcp;
+ req->senderRef = reference();
+ sendSignal(ref, GSN_DICTSTARTREQ,
+ signal, DictStartReq::SignalLength, JBB);
+ c_nodeStartMaster.m_outstandingGsn = GSN_DICTSTARTREQ;
+ c_nodeStartMaster.wait = 0;
+}//Dbdih::dihCopyCompletedLab()
+
+void Dbdih::gcpBlockedLab(Signal* signal)
+{
+ /*-----------------------------------------------------------------*/
+ // Report that node restart has completed copy of distribution info.
+ /*-----------------------------------------------------------------*/
+ signal->theData[0] = NDB_LE_NR_CopyDistr;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
+
+ /**
+ * The node DIH will be part of LCP
+ */
+ NodeRecordPtr nodePtr;
+ nodePtr.i = c_nodeStartMaster.startNode;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->m_inclDihLcp = true;
+
+ /*-------------------------------------------------------------------------*/
+ // NOW IT IS TIME TO INFORM ALL OTHER NODES IN THE CLUSTER OF THE STARTED
+ // NODE SUCH THAT THEY ALSO INCLUDE THE NODE IN THE NODE LISTS AND SO FORTH.
+ /*------------------------------------------------------------------------*/
+ sendLoopMacro(INCL_NODEREQ, sendINCL_NODEREQ);
+ /*-------------------------------------------------------------------------*/
+ // We also need to send to the starting node to ensure he is aware of the
+ // global checkpoint id and the correct state. We do not wait for any reply
+ // since the starting node will not send any.
+ /*-------------------------------------------------------------------------*/
+ sendINCL_NODEREQ(signal, c_nodeStartMaster.startNode);
+}//Dbdih::gcpBlockedLab()
+
+/*---------------------------------------------------------------------------*/
+// THIS SIGNAL IS EXECUTED IN BOTH SLAVES AND IN THE MASTER
+/*---------------------------------------------------------------------------*/
+void Dbdih::execINCL_NODECONF(Signal* signal)
+{
+ Uint32 TsendNodeId;
+ Uint32 TstartNode_or_blockref;
+
+ jamEntry();
+ TstartNode_or_blockref = signal->theData[0];
+ TsendNodeId = signal->theData[1];
+
+ if (TstartNode_or_blockref == clocallqhblockref) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // THIS SIGNAL CAME FROM THE LOCAL LQH BLOCK.
+ // WE WILL NOW SEND INCLUDE TO THE TC BLOCK.
+ /*-----------------------------------------------------------------------*/
+ signal->theData[0] = reference();
+ signal->theData[1] = c_nodeStartSlave.nodeId;
+ sendSignal(clocaltcblockref, GSN_INCL_NODEREQ, signal, 2, JBB);
+ return;
+ }//if
+ if (TstartNode_or_blockref == clocaltcblockref) {
+ jam();
+ /*----------------------------------------------------------------------*/
+ // THIS SIGNAL CAME FROM THE LOCAL LQH BLOCK.
+ // WE WILL NOW SEND INCLUDE TO THE DICT BLOCK.
+ /*----------------------------------------------------------------------*/
+ signal->theData[0] = reference();
+ signal->theData[1] = c_nodeStartSlave.nodeId;
+ sendSignal(cdictblockref, GSN_INCL_NODEREQ, signal, 2, JBB);
+ return;
+ }//if
+ if (TstartNode_or_blockref == cdictblockref) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // THIS SIGNAL CAME FROM THE LOCAL DICT BLOCK. WE WILL NOW SEND CONF TO THE
+ // BACKUP.
+ /*-----------------------------------------------------------------------*/
+ signal->theData[0] = reference();
+ signal->theData[1] = c_nodeStartSlave.nodeId;
+ sendSignal(BACKUP_REF, GSN_INCL_NODEREQ, signal, 2, JBB);
+
+ // Suma will not send response to this for now, later...
+ sendSignal(SUMA_REF, GSN_INCL_NODEREQ, signal, 2, JBB);
+ // Grep will not send response to this for now, later...
+ sendSignal(GREP_REF, GSN_INCL_NODEREQ, signal, 2, JBB);
+ return;
+ }//if
+ if (TstartNode_or_blockref == numberToRef(BACKUP, getOwnNodeId())){
+ jam();
+ signal->theData[0] = c_nodeStartSlave.nodeId;
+ signal->theData[1] = cownNodeId;
+ sendSignal(cmasterdihref, GSN_INCL_NODECONF, signal, 2, JBB);
+ c_nodeStartSlave.nodeId = 0;
+ return;
+ }
+
+ ndbrequire(cmasterdihref = reference());
+ receiveLoopMacro(INCL_NODEREQ, TsendNodeId);
+
+ CRASH_INSERTION(7128);
+ /*-------------------------------------------------------------------------*/
+ // Now that we have included the starting node in the node lists in the
+ // various blocks we are ready to start the global checkpoint protocol
+ /*------------------------------------------------------------------------*/
+ c_nodeStartMaster.wait = 11;
+ c_nodeStartMaster.blockGcp = false;
+
+ signal->theData[0] = reference();
+ sendSignal(reference(), GSN_UNBLO_DICTCONF, signal, 1, JBB);
+}//Dbdih::execINCL_NODECONF()
+
+void Dbdih::execUNBLO_DICTCONF(Signal* signal)
+{
+ jamEntry();
+ c_nodeStartMaster.wait = ZFALSE;
+ if (!c_nodeStartMaster.activeState) {
+ jam();
+ return;
+ }//if
+
+ CRASH_INSERTION(7129);
+ /**-----------------------------------------------------------------------
+ * WE HAVE NOW PREPARED IT FOR INCLUSION IN THE LCP PROTOCOL.
+ * WE CAN NOW START THE LCP PROTOCOL AGAIN.
+ * WE HAVE ALSO MADE THIS FOR THE GCP PROTOCOL.
+ * WE ARE READY TO START THE PROTOCOLS AND RESPOND TO THE START REQUEST
+ * FROM THE STARTING NODE.
+ *------------------------------------------------------------------------*/
+
+ StartMeConf * const startMe = (StartMeConf *)&signal->theData[0];
+
+ const Uint32 wordPerSignal = StartMeConf::DATA_SIZE;
+ const int noOfSignals = ((Sysfile::SYSFILE_SIZE32 + (wordPerSignal - 1)) /
+ wordPerSignal);
+
+ startMe->startingNodeId = c_nodeStartMaster.startNode;
+ startMe->startWord = 0;
+
+ const Uint32 ref = calcDihBlockRef(c_nodeStartMaster.startNode);
+ for(int i = 0; i < noOfSignals; i++){
+ jam();
+ { // Do copy
+ const int startWord = startMe->startWord;
+ for(Uint32 j = 0; j < wordPerSignal; j++){
+ startMe->data[j] = sysfileData[j+startWord];
+ }
+ }
+ sendSignal(ref, GSN_START_MECONF, signal, StartMeConf::SignalLength, JBB);
+ startMe->startWord += wordPerSignal;
+ }//for
+ c_nodeStartMaster.m_outstandingGsn = GSN_START_MECONF;
+}//Dbdih::execUNBLO_DICTCONF()
+
+/*---------------------------------------------------------------------------*/
+/* NODE RESTART COPY REQUEST */
+/*---------------------------------------------------------------------------*/
+// A NODE RESTART HAS REACHED ITS FINAL PHASE WHEN THE DATA IS TO BE COPIED
+// TO THE NODE. START_COPYREQ IS EXECUTED BY THE MASTER NODE.
+/*---------------------------------------------------------------------------*/
+void Dbdih::execSTART_COPYREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 startNodeId = signal->theData[0];
+ //BlockReference startingRef = signal->theData[1];
+ ndbrequire(c_nodeStartMaster.startNode == startNodeId);
+ /*-------------------------------------------------------------------------*/
+ // REPORT Copy process of node restart is now about to start up.
+ /*-------------------------------------------------------------------------*/
+ signal->theData[0] = NDB_LE_NR_CopyFragsStarted;
+ signal->theData[1] = startNodeId;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ CRASH_INSERTION(7131);
+ nodeRestartTakeOver(signal, startNodeId);
+ // BlockReference ref = calcQmgrBlockRef(startNodeId);
+ // signal->theData[0] = cownNodeId;
+ // Remove comments as soon as I open up the Qmgr block
+ // TODO_RONM
+ // sendSignal(ref, GSN_ALLOW_NODE_CRASHORD, signal, 1, JBB);
+}//Dbdih::execSTART_COPYREQ()
+
+/*---------------------------------------------------------------------------*/
+/* SLAVE LOGIC FOR NODE RESTART */
+/*---------------------------------------------------------------------------*/
+void Dbdih::execSTART_INFOREQ(Signal* signal)
+{
+ jamEntry();
+ StartInfoReq *const req =(StartInfoReq*)&signal->theData[0];
+ Uint32 startNode = req->startingNodeId;
+ if (cfailurenr != req->systemFailureNo) {
+ jam();
+ //---------------------------------------------------------------
+ // A failure occurred since master sent this request. We will ignore
+ // this request since the node is already dead that is starting.
+ //---------------------------------------------------------------
+ return;
+ }//if
+ CRASH_INSERTION(7123);
+ if (isMaster()) {
+ jam();
+ ndbrequire(getNodeStatus(startNode) == NodeRecord::STARTING);
+ } else {
+ jam();
+ ndbrequire(getNodeStatus(startNode) == NodeRecord::DEAD);
+ }//if
+ if ((!getAllowNodeStart(startNode)) ||
+ (c_nodeStartSlave.nodeId != 0) ||
+ (ERROR_INSERTED(7124))) {
+ jam();
+ StartInfoRef *const ref =(StartInfoRef*)&signal->theData[0];
+ ref->startingNodeId = startNode;
+ ref->sendingNodeId = cownNodeId;
+ ref->errorCode = ZNODE_START_DISALLOWED_ERROR;
+ sendSignal(cmasterdihref, GSN_START_INFOREF, signal,
+ StartInfoRef::SignalLength, JBB);
+ return;
+ }//if
+ setNodeStatus(startNode, NodeRecord::STARTING);
+ if (req->typeStart == NodeState::ST_INITIAL_NODE_RESTART) {
+ jam();
+ setAllowNodeStart(startNode, false);
+ invalidateNodeLCP(signal, startNode, 0);
+ } else {
+ jam();
+ StartInfoConf * c = (StartInfoConf*)&signal->theData[0];
+ c->sendingNodeId = cownNodeId;
+ c->startingNodeId = startNode;
+ sendSignal(cmasterdihref, GSN_START_INFOCONF, signal,
+ StartInfoConf::SignalLength, JBB);
+ return;
+ }//if
+}//Dbdih::execSTART_INFOREQ()
+
+void Dbdih::execINCL_NODEREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 retRef = signal->theData[0];
+ Uint32 nodeId = signal->theData[1];
+ Uint32 tnodeStartFailNr = signal->theData[2];
+ currentgcp = signal->theData[4];
+ CRASH_INSERTION(7127);
+ cnewgcp = currentgcp;
+ coldgcp = currentgcp - 1;
+ if (!isMaster()) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // We don't want to change the state of the master since he can be in the
+ // state LCP_TCGET at this time.
+ /*-----------------------------------------------------------------------*/
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+ }//if
+
+ /*-------------------------------------------------------------------------*/
+ // When a node is restarted we must ensure that a lcp will be run
+ // as soon as possible and the reset the delay according to the original
+ // configuration.
+ // Without an initial local checkpoint the new node will not be available.
+ /*-------------------------------------------------------------------------*/
+ if (getOwnNodeId() == nodeId) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // We are the starting node. We came here only to set the global checkpoint
+ // id's and the lcp status.
+ /*-----------------------------------------------------------------------*/
+ CRASH_INSERTION(7171);
+ return;
+ }//if
+ if (getNodeStatus(nodeId) != NodeRecord::STARTING) {
+ jam();
+ return;
+ }//if
+ ndbrequire(cfailurenr == tnodeStartFailNr);
+ ndbrequire (c_nodeStartSlave.nodeId == 0);
+ c_nodeStartSlave.nodeId = nodeId;
+
+ ndbrequire (retRef == cmasterdihref);
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ Sysfile::ActiveStatus TsaveState = nodePtr.p->activeStatus;
+ Uint32 TnodeGroup = nodePtr.p->nodeGroup;
+
+ new (nodePtr.p) NodeRecord();
+ nodePtr.p->nodeGroup = TnodeGroup;
+ nodePtr.p->activeStatus = TsaveState;
+ nodePtr.p->nodeStatus = NodeRecord::ALIVE;
+ nodePtr.p->useInTransactions = true;
+ nodePtr.p->m_inclDihLcp = true;
+
+ removeDeadNode(nodePtr);
+ insertAlive(nodePtr);
+ con_lineNodes++;
+
+ /*-------------------------------------------------------------------------*/
+ // WE WILL ALSO SEND THE INCLUDE NODE REQUEST TO THE LOCAL LQH BLOCK.
+ /*-------------------------------------------------------------------------*/
+ signal->theData[0] = reference();
+ signal->theData[1] = nodeId;
+ signal->theData[2] = currentgcp;
+ sendSignal(clocallqhblockref, GSN_INCL_NODEREQ, signal, 3, JBB);
+}//Dbdih::execINCL_NODEREQ()
+
+/* ------------------------------------------------------------------------- */
+// execINCL_NODECONF() is found in the master logic part since it is used by
+// both the master and the slaves.
+/* ------------------------------------------------------------------------- */
+
+/*****************************************************************************/
+/*********** TAKE OVER DECISION MODULE *************/
+/*****************************************************************************/
+// This module contains the subroutines that take the decision whether to take
+// over a node now or not.
+/* ------------------------------------------------------------------------- */
+/* MASTER LOGIC FOR SYSTEM RESTART */
+/* ------------------------------------------------------------------------- */
+// WE ONLY COME HERE IF WE ARE THE MASTER AND WE ARE PERFORMING A SYSTEM
+// RESTART. WE ALSO COME HERE DURING THIS SYSTEM RESTART ONE TIME PER NODE
+// THAT NEEDS TAKE OVER.
+/*---------------------------------------------------------------------------*/
+// WE CHECK IF ANY NODE NEEDS TO BE TAKEN OVER AND THE TAKE OVER HAS NOT YET
+// BEEN STARTED OR COMPLETED.
+/*---------------------------------------------------------------------------*/
+void
+Dbdih::systemRestartTakeOverLab(Signal* signal)
+{
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ break;
+ /*---------------------------------------------------------------------*/
+ // WE HAVE NOT REACHED A STATE YET WHERE THIS NODE NEEDS TO BE TAKEN OVER
+ /*---------------------------------------------------------------------*/
+ case Sysfile::NS_ActiveMissed_2:
+ case Sysfile::NS_NotActive_NotTakenOver:
+ jam();
+ /*---------------------------------------------------------------------*/
+ // THIS NODE IS IN TROUBLE.
+ // WE MUST SUCCEED WITH A LOCAL CHECKPOINT WITH THIS NODE TO REMOVE THE
+ // DANGER. IF THE NODE IS NOT ALIVE THEN THIS WILL NOT BE
+ // POSSIBLE AND WE CAN START THE TAKE OVER IMMEDIATELY IF WE HAVE ANY
+ // NODES THAT CAN PERFORM A TAKE OVER.
+ /*---------------------------------------------------------------------*/
+ if (nodePtr.p->nodeStatus != NodeRecord::ALIVE) {
+ jam();
+ Uint32 ThotSpareNode = findHotSpare();
+ if (ThotSpareNode != RNIL) {
+ jam();
+ startTakeOver(signal, RNIL, ThotSpareNode, nodePtr.i);
+ }//if
+ } else if(nodePtr.p->activeStatus == Sysfile::NS_NotActive_NotTakenOver){
+ jam();
+ /*-------------------------------------------------------------------*/
+ // NOT ACTIVE NODES THAT HAVE NOT YET BEEN TAKEN OVER NEEDS TAKE OVER
+ // IMMEDIATELY. IF WE ARE ALIVE WE TAKE OVER OUR OWN NODE.
+ /*-------------------------------------------------------------------*/
+ startTakeOver(signal, RNIL, nodePtr.i, nodePtr.i);
+ }//if
+ break;
+ case Sysfile::NS_TakeOver:
+ /**-------------------------------------------------------------------
+ * WE MUST HAVE FAILED IN THE MIDDLE OF THE TAKE OVER PROCESS.
+ * WE WILL CONCLUDE THE TAKE OVER PROCESS NOW.
+ *-------------------------------------------------------------------*/
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ Uint32 takeOverNode = Sysfile::getTakeOverNode(nodePtr.i,
+ SYSFILE->takeOver);
+ if(takeOverNode == 0){
+ jam();
+ warningEvent("Bug in take-over code restarting");
+ takeOverNode = nodePtr.i;
+ }
+ startTakeOver(signal, RNIL, nodePtr.i, takeOverNode);
+ } else {
+ jam();
+ /**-------------------------------------------------------------------
+ * We are not currently taking over, change our active status.
+ *-------------------------------------------------------------------*/
+ nodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
+ setNodeRestartInfoBits();
+ }//if
+ break;
+ case Sysfile::NS_HotSpare:
+ jam();
+ break;
+ /*---------------------------------------------------------------------*/
+ // WE NEED NOT TAKE OVER NODES THAT ARE HOT SPARE.
+ /*---------------------------------------------------------------------*/
+ case Sysfile::NS_NotDefined:
+ jam();
+ break;
+ /*---------------------------------------------------------------------*/
+ // WE NEED NOT TAKE OVER NODES THAT DO NOT EVEN EXIST IN THE CLUSTER.
+ /*---------------------------------------------------------------------*/
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ }//for
+ /*-------------------------------------------------------------------------*/
+ /* NO TAKE OVER HAS BEEN INITIATED. */
+ /*-------------------------------------------------------------------------*/
+}//Dbdih::systemRestartTakeOverLab()
+
+/*---------------------------------------------------------------------------*/
+// This subroutine is called as part of node restart in the master node.
+/*---------------------------------------------------------------------------*/
+void Dbdih::nodeRestartTakeOver(Signal* signal, Uint32 startNodeId)
+{
+ switch (getNodeActiveStatus(startNodeId)) {
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // AN ACTIVE NODE HAS BEEN STARTED. THE ACTIVE NODE MUST THEN GET ALL DATA
+ // IT HAD BEFORE ITS CRASH. WE START THE TAKE OVER IMMEDIATELY.
+ // SINCE WE ARE AN ACTIVE NODE WE WILL TAKE OVER OUR OWN NODE THAT
+ // PREVIOUSLY CRASHED.
+ /*-----------------------------------------------------------------------*/
+ startTakeOver(signal, RNIL, startNodeId, startNodeId);
+ break;
+ case Sysfile::NS_HotSpare:{
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // WHEN STARTING UP A HOT SPARE WE WILL CHECK IF ANY NODE NEEDS TO TAKEN
+ // OVER. IF SO THEN WE WILL START THE TAKE OVER.
+ /*-----------------------------------------------------------------------*/
+ bool takeOverStarted = false;
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->activeStatus == Sysfile::NS_NotActive_NotTakenOver) {
+ jam();
+ takeOverStarted = true;
+ startTakeOver(signal, RNIL, startNodeId, nodePtr.i);
+ }//if
+ }//for
+ if (!takeOverStarted) {
+ jam();
+ /*-------------------------------------------------------------------*/
+ // NO TAKE OVER WAS NEEDED AT THE MOMENT WE START-UP AND WAIT UNTIL A
+ // TAKE OVER IS NEEDED.
+ /*-------------------------------------------------------------------*/
+ BlockReference ref = calcDihBlockRef(startNodeId);
+ signal->theData[0] = startNodeId;
+ sendSignal(ref, GSN_START_COPYCONF, signal, 1, JBB);
+ }//if
+ break;
+ }
+ case Sysfile::NS_NotActive_NotTakenOver:
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // ALL DATA IN THE NODE IS LOST BUT WE HAVE NOT TAKEN OVER YET. WE WILL
+ // TAKE OVER OUR OWN NODE
+ /*-----------------------------------------------------------------------*/
+ startTakeOver(signal, RNIL, startNodeId, startNodeId);
+ break;
+ case Sysfile::NS_TakeOver:{
+ jam();
+ /*--------------------------------------------------------------------
+ * We were in the process of taking over but it was not completed.
+ * We will complete it now instead.
+ *--------------------------------------------------------------------*/
+ Uint32 takeOverNode = Sysfile::getTakeOverNode(startNodeId,
+ SYSFILE->takeOver);
+ startTakeOver(signal, RNIL, startNodeId, takeOverNode);
+ break;
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ nodeResetStart();
+}//Dbdih::nodeRestartTakeOver()
+
+/*************************************************************************/
+// Ths routine is called when starting a local checkpoint.
+/*************************************************************************/
+void Dbdih::checkStartTakeOver(Signal* signal)
+{
+ NodeRecordPtr csoNodeptr;
+ Uint32 tcsoHotSpareNode;
+ Uint32 tcsoTakeOverNode;
+ if (isMaster()) {
+ /*-----------------------------------------------------------------*/
+ /* WE WILL ONLY START TAKE OVER IF WE ARE MASTER. */
+ /*-----------------------------------------------------------------*/
+ /* WE WILL ONLY START THE TAKE OVER IF THERE WERE A NEED OF */
+ /* A TAKE OVER. */
+ /*-----------------------------------------------------------------*/
+ /* WE CAN ONLY PERFORM THE TAKE OVER IF WE HAVE A HOT SPARE */
+ /* AVAILABLE. */
+ /*-----------------------------------------------------------------*/
+ tcsoTakeOverNode = 0;
+ tcsoHotSpareNode = 0;
+ for (csoNodeptr.i = 1; csoNodeptr.i < MAX_NDB_NODES; csoNodeptr.i++) {
+ ptrAss(csoNodeptr, nodeRecord);
+ if (csoNodeptr.p->activeStatus == Sysfile::NS_NotActive_NotTakenOver) {
+ jam();
+ tcsoTakeOverNode = csoNodeptr.i;
+ } else {
+ jam();
+ if (csoNodeptr.p->activeStatus == Sysfile::NS_HotSpare) {
+ jam();
+ tcsoHotSpareNode = csoNodeptr.i;
+ }//if
+ }//if
+ }//for
+ if ((tcsoTakeOverNode != 0) &&
+ (tcsoHotSpareNode != 0)) {
+ jam();
+ startTakeOver(signal, RNIL, tcsoHotSpareNode, tcsoTakeOverNode);
+ }//if
+ }//if
+}//Dbdih::checkStartTakeOver()
+
+/*****************************************************************************/
+/*********** NODE ADDING MODULE *************/
+/*********** CODE TO HANDLE TAKE OVER *************/
+/*****************************************************************************/
+// A take over can be initiated by a number of things:
+// 1) A node restart, usually the node takes over itself but can also take
+// over somebody else if its own data was already taken over
+// 2) At system restart it is necessary to use the take over code to recover
+// nodes which had too old checkpoints to be restorable by the usual
+// restoration from disk.
+// 3) When a node has missed too many local checkpoints and is decided by the
+// master to be taken over by a hot spare node that sits around waiting
+// for this to happen.
+//
+// To support multiple node failures efficiently the code is written such that
+// only one take over can handle transitions in state but during a copy
+// fragment other take over's can perform state transitions.
+/*****************************************************************************/
+void Dbdih::startTakeOver(Signal* signal,
+ Uint32 takeOverPtrI,
+ Uint32 startNode,
+ Uint32 nodeTakenOver)
+{
+ NodeRecordPtr toNodePtr;
+ NodeGroupRecordPtr NGPtr;
+ toNodePtr.i = nodeTakenOver;
+ ptrCheckGuard(toNodePtr, MAX_NDB_NODES, nodeRecord);
+ NGPtr.i = toNodePtr.p->nodeGroup;
+ ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
+ TakeOverRecordPtr takeOverPtr;
+ if (takeOverPtrI == RNIL) {
+ jam();
+ setAllowNodeStart(startNode, false);
+ seizeTakeOver(takeOverPtr);
+ if (startNode == c_nodeStartMaster.startNode) {
+ jam();
+ takeOverPtr.p->toNodeRestart = true;
+ }//if
+ takeOverPtr.p->toStartingNode = startNode;
+ takeOverPtr.p->toFailedNode = nodeTakenOver;
+ } else {
+ jam();
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ ndbrequire(takeOverPtr.p->toStartingNode == startNode);
+ ndbrequire(takeOverPtr.p->toFailedNode == nodeTakenOver);
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_WAIT_START_TAKE_OVER);
+ }//if
+ if ((NGPtr.p->activeTakeOver) || (ERROR_INSERTED(7157))) {
+ jam();
+ /**------------------------------------------------------------------------
+ * A take over is already active in this node group. We only allow one
+ * take over per node group. Otherwise we will overload the node group and
+ * also we will require much more checks when starting up copying of
+ * fragments. The parallelism for take over is mainly to ensure that we
+ * can handle take over efficiently in large systems with 4 nodes and above
+ * A typical case is a 8 node system executing on two 8-cpu boxes.
+ * A box crash in one of the boxes will mean 4 nodes crashes.
+ * We want to be able to restart those four nodes to some
+ * extent in parallel.
+ *
+ * We will wait for a few seconds and then try again.
+ */
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_START_TAKE_OVER;
+ signal->theData[0] = DihContinueB::ZSTART_TAKE_OVER;
+ signal->theData[1] = takeOverPtr.i;
+ signal->theData[2] = startNode;
+ signal->theData[3] = nodeTakenOver;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 5000, 4);
+ return;
+ }//if
+ NGPtr.p->activeTakeOver = true;
+ if (startNode == nodeTakenOver) {
+ jam();
+ switch (getNodeActiveStatus(nodeTakenOver)) {
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ break;
+ case Sysfile::NS_NotActive_NotTakenOver:
+ case Sysfile::NS_TakeOver:
+ jam();
+ setNodeActiveStatus(nodeTakenOver, Sysfile::NS_TakeOver);
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+ } else {
+ jam();
+ setNodeActiveStatus(nodeTakenOver, Sysfile::NS_HotSpare);
+ setNodeActiveStatus(startNode, Sysfile::NS_TakeOver);
+ changeNodeGroups(startNode, nodeTakenOver);
+ }//if
+ setNodeRestartInfoBits();
+ /* ---------------------------------------------------------------------- */
+ /* WE SET THE RESTART INFORMATION TO INDICATE THAT WE ARE ABOUT TO TAKE */
+ /* OVER THE FAILED NODE. WE SET THIS INFORMATION AND WAIT UNTIL THE */
+ /* GLOBAL CHECKPOINT HAS WRITTEN THE RESTART INFORMATION. */
+ /* ---------------------------------------------------------------------- */
+ Sysfile::setTakeOverNode(takeOverPtr.p->toFailedNode, SYSFILE->takeOver,
+ startNode);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_START_COPY;
+
+ cstartGcpNow = true;
+}//Dbdih::startTakeOver()
+
+void Dbdih::changeNodeGroups(Uint32 startNode, Uint32 nodeTakenOver)
+{
+ NodeRecordPtr startNodePtr;
+ NodeRecordPtr toNodePtr;
+ startNodePtr.i = startNode;
+ ptrCheckGuard(startNodePtr, MAX_NDB_NODES, nodeRecord);
+ toNodePtr.i = nodeTakenOver;
+ ptrCheckGuard(toNodePtr, MAX_NDB_NODES, nodeRecord);
+ ndbrequire(startNodePtr.p->nodeGroup == ZNIL);
+ NodeGroupRecordPtr NGPtr;
+
+ NGPtr.i = toNodePtr.p->nodeGroup;
+ ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
+ bool nodeFound = false;
+ for (Uint32 i = 0; i < NGPtr.p->nodeCount; i++) {
+ jam();
+ if (NGPtr.p->nodesInGroup[i] == nodeTakenOver) {
+ jam();
+ NGPtr.p->nodesInGroup[i] = startNode;
+ nodeFound = true;
+ }//if
+ }//for
+ ndbrequire(nodeFound);
+ Sysfile::setNodeGroup(startNodePtr.i, SYSFILE->nodeGroups, toNodePtr.p->nodeGroup);
+ startNodePtr.p->nodeGroup = toNodePtr.p->nodeGroup;
+ Sysfile::setNodeGroup(toNodePtr.i, SYSFILE->nodeGroups, NO_NODE_GROUP_ID);
+ toNodePtr.p->nodeGroup = ZNIL;
+}//Dbdih::changeNodeGroups()
+
+void Dbdih::checkToCopy()
+{
+ TakeOverRecordPtr takeOverPtr;
+ for (takeOverPtr.i = 0;takeOverPtr.i < MAX_NDB_NODES; takeOverPtr.i++) {
+ ptrAss(takeOverPtr, takeOverRecord);
+ /*----------------------------------------------------------------------*/
+ // TAKE OVER HANDLING WRITES RESTART INFORMATION THROUGH
+ // THE GLOBAL CHECKPOINT
+ // PROTOCOL. WE CHECK HERE BEFORE STARTING A WRITE OF THE RESTART
+ // INFORMATION.
+ /*-----------------------------------------------------------------------*/
+ if (takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_START_COPY) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_START_COPY_ONGOING;
+ } else if (takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_END_COPY) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_END_COPY_ONGOING;
+ }//if
+ }//for
+}//Dbdih::checkToCopy()
+
+void Dbdih::checkToCopyCompleted(Signal* signal)
+{
+ /* ------------------------------------------------------------------------*/
+ /* WE CHECK HERE IF THE WRITING OF TAKE OVER INFORMATION ALSO HAS BEEN */
+ /* COMPLETED. */
+ /* ------------------------------------------------------------------------*/
+ TakeOverRecordPtr toPtr;
+ for (toPtr.i = 0; toPtr.i < MAX_NDB_NODES; toPtr.i++) {
+ ptrAss(toPtr, takeOverRecord);
+ if (toPtr.p->toMasterStatus == TakeOverRecord::TO_START_COPY_ONGOING){
+ jam();
+ sendStartTo(signal, toPtr.i);
+ } else if (toPtr.p->toMasterStatus == TakeOverRecord::TO_END_COPY_ONGOING){
+ jam();
+ sendEndTo(signal, toPtr.i);
+ } else {
+ jam();
+ }//if
+ }//for
+}//Dbdih::checkToCopyCompleted()
+
+bool Dbdih::checkToInterrupted(TakeOverRecordPtr& takeOverPtr)
+{
+ if (checkNodeAlive(takeOverPtr.p->toStartingNode)) {
+ jam();
+ return false;
+ } else {
+ jam();
+ endTakeOver(takeOverPtr.i);
+ return true;
+ }//if
+}//Dbdih::checkToInterrupted()
+
+void Dbdih::sendStartTo(Signal* signal, Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ CRASH_INSERTION(7155);
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ if ((c_startToLock != RNIL) || (ERROR_INSERTED(7158))) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_START;
+ signal->theData[0] = DihContinueB::ZSEND_START_TO;
+ signal->theData[1] = takeOverPtrI;
+ signal->theData[2] = takeOverPtr.p->toStartingNode;
+ signal->theData[3] = takeOverPtr.p->toFailedNode;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 30, 4);
+ return;
+ }//if
+ c_startToLock = takeOverPtrI;
+ StartToReq * const req = (StartToReq *)&signal->theData[0];
+ req->userPtr = takeOverPtr.i;
+ req->userRef = reference();
+ req->startingNodeId = takeOverPtr.p->toStartingNode;
+ req->nodeTakenOver = takeOverPtr.p->toFailedNode;
+ req->nodeRestart = takeOverPtr.p->toNodeRestart;
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::STARTING;
+ sendLoopMacro(START_TOREQ, sendSTART_TOREQ);
+}//Dbdih::sendStartTo()
+
+void Dbdih::execSTART_TOREQ(Signal* signal)
+{
+ TakeOverRecordPtr takeOverPtr;
+ jamEntry();
+ const StartToReq * const req = (StartToReq *)&signal->theData[0];
+ takeOverPtr.i = req->userPtr;
+ BlockReference ref = req->userRef;
+ Uint32 startingNode = req->startingNodeId;
+
+ CRASH_INSERTION(7133);
+ RETURN_IF_NODE_NOT_ALIVE(req->startingNodeId);
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ allocateTakeOver(takeOverPtr);
+ initStartTakeOver(req, takeOverPtr);
+
+ StartToConf * const conf = (StartToConf *)&signal->theData[0];
+ conf->userPtr = takeOverPtr.i;
+ conf->sendingNodeId = cownNodeId;
+ conf->startingNodeId = startingNode;
+ sendSignal(ref, GSN_START_TOCONF, signal, StartToConf::SignalLength, JBB);
+}//Dbdih::execSTART_TOREQ()
+
+void Dbdih::execSTART_TOCONF(Signal* signal)
+{
+ TakeOverRecordPtr takeOverPtr;
+ jamEntry();
+ const StartToConf * const conf = (StartToConf *)&signal->theData[0];
+
+ CRASH_INSERTION(7147);
+
+ RETURN_IF_NODE_NOT_ALIVE(conf->startingNodeId);
+
+ takeOverPtr.i = conf->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::STARTING);
+ ndbrequire(takeOverPtr.p->toStartingNode == conf->startingNodeId);
+ receiveLoopMacro(START_TOREQ, conf->sendingNodeId);
+ CRASH_INSERTION(7134);
+ c_startToLock = RNIL;
+
+ startNextCopyFragment(signal, takeOverPtr.i);
+}//Dbdih::execSTART_TOCONF()
+
+void Dbdih::initStartTakeOver(const StartToReq * req,
+ TakeOverRecordPtr takeOverPtr)
+{
+ takeOverPtr.p->toCurrentTabref = 0;
+ takeOverPtr.p->toCurrentFragid = 0;
+ takeOverPtr.p->toStartingNode = req->startingNodeId;
+ takeOverPtr.p->toFailedNode = req->nodeTakenOver;
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_STARTED;
+ takeOverPtr.p->toCopyNode = RNIL;
+ takeOverPtr.p->toCurrentReplica = RNIL;
+ takeOverPtr.p->toNodeRestart = req->nodeRestart;
+}//Dbdih::initStartTakeOver()
+
+void Dbdih::startNextCopyFragment(Signal* signal, Uint32 takeOverPtrI)
+{
+ TabRecordPtr tabPtr;
+ TakeOverRecordPtr takeOverPtr;
+ Uint32 loopCount;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::SELECTING_NEXT;
+ loopCount = 0;
+ if (ERROR_INSERTED(7159)) {
+ loopCount = 100;
+ }//if
+ while (loopCount++ < 100) {
+ tabPtr.i = takeOverPtr.p->toCurrentTabref;
+ if (tabPtr.i >= ctabFileSize) {
+ jam();
+ CRASH_INSERTION(7136);
+ sendUpdateTo(signal, takeOverPtr.i, UpdateToReq::TO_COPY_COMPLETED);
+ return;
+ }//if
+ ptrAss(tabPtr, tabRecord);
+ if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE){
+ jam();
+ takeOverPtr.p->toCurrentFragid = 0;
+ takeOverPtr.p->toCurrentTabref++;
+ continue;
+ }//if
+ Uint32 fragId = takeOverPtr.p->toCurrentFragid;
+ if (fragId >= tabPtr.p->totalfragments) {
+ jam();
+ takeOverPtr.p->toCurrentFragid = 0;
+ takeOverPtr.p->toCurrentTabref++;
+ if (ERROR_INSERTED(7135)) {
+ if (takeOverPtr.p->toCurrentTabref == 1) {
+ ndbrequire(false);
+ }//if
+ }//if
+ continue;
+ }//if
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ ReplicaRecordPtr loopReplicaPtr;
+ loopReplicaPtr.i = fragPtr.p->oldStoredReplicas;
+ while (loopReplicaPtr.i != RNIL) {
+ ptrCheckGuard(loopReplicaPtr, creplicaFileSize, replicaRecord);
+ if (loopReplicaPtr.p->procNode == takeOverPtr.p->toFailedNode) {
+ jam();
+ /* ----------------------------------------------------------------- */
+ /* WE HAVE FOUND A REPLICA THAT BELONGED THE FAILED NODE THAT NEEDS */
+ /* TAKE OVER. WE TAKE OVER THIS REPLICA TO THE NEW NODE. */
+ /* ----------------------------------------------------------------- */
+ takeOverPtr.p->toCurrentReplica = loopReplicaPtr.i;
+ toCopyFragLab(signal, takeOverPtr.i);
+ return;
+ } else if (loopReplicaPtr.p->procNode == takeOverPtr.p->toStartingNode) {
+ jam();
+ /* ----------------------------------------------------------------- */
+ /* WE HAVE OBVIOUSLY STARTED TAKING OVER THIS WITHOUT COMPLETING IT. */
+ /* WE */
+ /* NEED TO COMPLETE THE TAKE OVER OF THIS REPLICA. */
+ /* ----------------------------------------------------------------- */
+ takeOverPtr.p->toCurrentReplica = loopReplicaPtr.i;
+ toCopyFragLab(signal, takeOverPtr.i);
+ return;
+ } else {
+ jam();
+ loopReplicaPtr.i = loopReplicaPtr.p->nextReplica;
+ }//if
+ }//while
+ takeOverPtr.p->toCurrentFragid++;
+ }//while
+ signal->theData[0] = DihContinueB::ZTO_START_COPY_FRAG;
+ signal->theData[1] = takeOverPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+}//Dbdih::startNextCopyFragment()
+
+void Dbdih::toCopyFragLab(Signal* signal,
+ Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+
+ CreateReplicaRecordPtr createReplicaPtr;
+ createReplicaPtr.i = 0;
+ ptrAss(createReplicaPtr, createReplicaRecord);
+
+ ReplicaRecordPtr replicaPtr;
+ replicaPtr.i = takeOverPtr.p->toCurrentReplica;
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = takeOverPtr.p->toCurrentTabref;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE FOUND A REPLICA THAT NEEDS TAKE OVER. WE WILL START THIS TAKE */
+ /* OVER BY ADDING THE FRAGMENT WHEREAFTER WE WILL ORDER THE PRIMARY */
+ /* REPLICA TO COPY ITS CONTENT TO THE NEW STARTING REPLICA. */
+ /* THIS OPERATION IS A SINGLE USER OPERATION UNTIL WE HAVE SENT */
+ /* COPY_FRAGREQ. AFTER SENDING COPY_FRAGREQ WE ARE READY TO START A NEW */
+ /* FRAGMENT REPLICA. WE WILL NOT IMPLEMENT THIS IN THE FIRST PHASE. */
+ /* ----------------------------------------------------------------------- */
+ cnoOfCreateReplicas = 1;
+ createReplicaPtr.p->hotSpareUse = true;
+ createReplicaPtr.p->dataNodeId = takeOverPtr.p->toStartingNode;
+
+ prepareSendCreateFragReq(signal, takeOverPtrI);
+}//Dbdih::toCopyFragLab()
+
+void Dbdih::prepareSendCreateFragReq(Signal* signal, Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = takeOverPtr.p->toCurrentTabref;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ FragmentstorePtr fragPtr;
+
+ getFragstore(tabPtr.p, takeOverPtr.p->toCurrentFragid, fragPtr);
+ Uint32 nodes[MAX_REPLICAS];
+ extractNodeInfo(fragPtr.p, nodes);
+ takeOverPtr.p->toCopyNode = nodes[0];
+ sendCreateFragReq(signal, 0, CreateFragReq::STORED, takeOverPtr.i);
+}//Dbdih::prepareSendCreateFragReq()
+
+void Dbdih::sendCreateFragReq(Signal* signal,
+ Uint32 startGci,
+ Uint32 replicaType,
+ Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ if ((c_createFragmentLock != RNIL) ||
+ ((ERROR_INSERTED(7161))&&(replicaType == CreateFragReq::STORED)) ||
+ ((ERROR_INSERTED(7162))&&(replicaType == CreateFragReq::COMMIT_STORED))){
+ if (replicaType == CreateFragReq::STORED) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_PREPARE_CREATE;
+ } else {
+ ndbrequire(replicaType == CreateFragReq::COMMIT_STORED);
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_COMMIT_CREATE;
+ }//if
+ signal->theData[0] = DihContinueB::ZSEND_CREATE_FRAG;
+ signal->theData[1] = takeOverPtr.i;
+ signal->theData[2] = replicaType;
+ signal->theData[3] = startGci;
+ signal->theData[4] = takeOverPtr.p->toStartingNode;
+ signal->theData[5] = takeOverPtr.p->toFailedNode;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 50, 6);
+ return;
+ }//if
+ c_createFragmentLock = takeOverPtr.i;
+ sendLoopMacro(CREATE_FRAGREQ, nullRoutine);
+
+ CreateFragReq * const req = (CreateFragReq *)&signal->theData[0];
+ req->userPtr = takeOverPtr.i;
+ req->userRef = reference();
+ req->tableId = takeOverPtr.p->toCurrentTabref;
+ req->fragId = takeOverPtr.p->toCurrentFragid;
+ req->startingNodeId = takeOverPtr.p->toStartingNode;
+ req->copyNodeId = takeOverPtr.p->toCopyNode;
+ req->startGci = startGci;
+ req->replicaType = replicaType;
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = cfirstAliveNode;
+ do {
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ BlockReference ref = calcDihBlockRef(nodePtr.i);
+ sendSignal(ref, GSN_CREATE_FRAGREQ, signal,
+ CreateFragReq::SignalLength, JBB);
+ nodePtr.i = nodePtr.p->nextNode;
+ } while (nodePtr.i != RNIL);
+
+ if (replicaType == CreateFragReq::STORED) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::PREPARE_CREATE;
+ } else {
+ ndbrequire(replicaType == CreateFragReq::COMMIT_STORED);
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::COMMIT_CREATE;
+ }
+}//Dbdih::sendCreateFragReq()
+
+/* --------------------------------------------------------------------------*/
+/* AN ORDER TO START OR COMMIT THE REPLICA CREATION ARRIVED FROM THE */
+/* MASTER. */
+/* --------------------------------------------------------------------------*/
+void Dbdih::execCREATE_FRAGREQ(Signal* signal)
+{
+ jamEntry();
+ CreateFragReq * const req = (CreateFragReq *)&signal->theData[0];
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = req->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ BlockReference retRef = req->userRef;
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ Uint32 fragId = req->fragId;
+ Uint32 tdestNodeid = req->startingNodeId;
+ Uint32 tsourceNodeid = req->copyNodeId;
+ Uint32 startGci = req->startGci;
+ Uint32 replicaType = req->replicaType;
+
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ RETURN_IF_NODE_NOT_ALIVE(tdestNodeid);
+ ReplicaRecordPtr frReplicaPtr;
+ findToReplica(takeOverPtr.p, replicaType, fragPtr, frReplicaPtr);
+ ndbrequire(frReplicaPtr.i != RNIL);
+
+ switch (replicaType) {
+ case CreateFragReq::STORED:
+ jam();
+ CRASH_INSERTION(7138);
+ /* ----------------------------------------------------------------------*/
+ /* HERE WE ARE INSERTING THE NEW BACKUP NODE IN THE EXECUTION OF ALL */
+ /* OPERATIONS. FROM HERE ON ALL OPERATIONS ON THIS FRAGMENT WILL INCLUDE*/
+ /* USE OF THE NEW REPLICA. */
+ /* --------------------------------------------------------------------- */
+ insertBackup(fragPtr, tdestNodeid);
+ takeOverPtr.p->toCopyNode = tsourceNodeid;
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_CREATE_PREPARE;
+
+ fragPtr.p->distributionKey++;
+ fragPtr.p->distributionKey &= 255;
+ break;
+ case CreateFragReq::COMMIT_STORED:
+ jam();
+ CRASH_INSERTION(7139);
+ /* ----------------------------------------------------------------------*/
+ /* HERE WE ARE MOVING THE REPLICA TO THE STORED SECTION SINCE IT IS NOW */
+ /* FULLY LOADED WITH ALL DATA NEEDED. */
+ // We also update the order of the replicas here so that if the new
+ // replica is the desired primary we insert it as primary.
+ /* ----------------------------------------------------------------------*/
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_CREATE_COMMIT;
+ removeOldStoredReplica(fragPtr, frReplicaPtr);
+ linkStoredReplica(fragPtr, frReplicaPtr);
+ updateNodeInfo(fragPtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+
+ /* ------------------------------------------------------------------------*/
+ /* THE NEW NODE OF THIS REPLICA IS THE STARTING NODE. */
+ /* ------------------------------------------------------------------------*/
+ if (frReplicaPtr.p->procNode != takeOverPtr.p->toStartingNode) {
+ jam();
+ /* ---------------------------------------------------------------------*/
+ /* IF WE ARE STARTING A TAKE OVER NODE WE MUST INVALIDATE ALL LCP'S. */
+ /* OTHERWISE WE WILL TRY TO START LCP'S THAT DO NOT EXIST. */
+ /* ---------------------------------------------------------------------*/
+ frReplicaPtr.p->procNode = takeOverPtr.p->toStartingNode;
+ frReplicaPtr.p->noCrashedReplicas = 0;
+ frReplicaPtr.p->createGci[0] = startGci;
+ ndbrequire(startGci != 0xF1F1F1F1);
+ frReplicaPtr.p->replicaLastGci[0] = (Uint32)-1;
+ for (Uint32 i = 0; i < MAX_LCP_STORED; i++) {
+ frReplicaPtr.p->lcpStatus[i] = ZINVALID;
+ }//for
+ } else {
+ jam();
+ const Uint32 noCrashed = frReplicaPtr.p->noCrashedReplicas;
+ arrGuard(noCrashed, 8);
+ frReplicaPtr.p->createGci[noCrashed] = startGci;
+ ndbrequire(startGci != 0xF1F1F1F1);
+ frReplicaPtr.p->replicaLastGci[noCrashed] = (Uint32)-1;
+ }//if
+ takeOverPtr.p->toCurrentTabref = tabPtr.i;
+ takeOverPtr.p->toCurrentFragid = fragId;
+ CreateFragConf * const conf = (CreateFragConf *)&signal->theData[0];
+ conf->userPtr = takeOverPtr.i;
+ conf->tableId = tabPtr.i;
+ conf->fragId = fragId;
+ conf->sendingNodeId = cownNodeId;
+ conf->startingNodeId = tdestNodeid;
+ sendSignal(retRef, GSN_CREATE_FRAGCONF, signal,
+ CreateFragConf::SignalLength, JBB);
+}//Dbdih::execCREATE_FRAGREQ()
+
+void Dbdih::execCREATE_FRAGCONF(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(7148);
+ const CreateFragConf * const conf = (CreateFragConf *)&signal->theData[0];
+ Uint32 fragId = conf->fragId;
+
+ RETURN_IF_NODE_NOT_ALIVE(conf->startingNodeId);
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = conf->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = conf->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ ndbrequire(tabPtr.i == takeOverPtr.p->toCurrentTabref);
+ ndbrequire(fragId == takeOverPtr.p->toCurrentFragid);
+ receiveLoopMacro(CREATE_FRAGREQ, conf->sendingNodeId);
+ c_createFragmentLock = RNIL;
+
+ if (takeOverPtr.p->toMasterStatus == TakeOverRecord::PREPARE_CREATE) {
+ jam();
+ CRASH_INSERTION(7140);
+ /* --------------------------------------------------------------------- */
+ /* ALL NODES HAVE PREPARED THE INTRODUCTION OF THIS NEW NODE AND IT IS */
+ /* ALREADY IN USE. WE CAN NOW START COPYING THE FRAGMENT. */
+ /*---------------------------------------------------------------------- */
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_FRAG;
+ BlockReference ref = calcLqhBlockRef(takeOverPtr.p->toCopyNode);
+ CopyFragReq * const copyFragReq = (CopyFragReq *)&signal->theData[0];
+ copyFragReq->userPtr = takeOverPtr.i;
+ copyFragReq->userRef = reference();
+ copyFragReq->tableId = tabPtr.i;
+ copyFragReq->fragId = fragId;
+ copyFragReq->nodeId = takeOverPtr.p->toStartingNode;
+ copyFragReq->schemaVersion = tabPtr.p->schemaVersion;
+ copyFragReq->distributionKey = fragPtr.p->distributionKey;
+ sendSignal(ref, GSN_COPY_FRAGREQ, signal, CopyFragReq::SignalLength, JBB);
+ } else {
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COMMIT_CREATE);
+ jam();
+ CRASH_INSERTION(7141);
+ /* --------------------------------------------------------------------- */
+ // REPORT that copy of fragment has been completed.
+ /* --------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NR_CopyFragDone;
+ signal->theData[1] = takeOverPtr.p->toStartingNode;
+ signal->theData[2] = tabPtr.i;
+ signal->theData[3] = takeOverPtr.p->toCurrentFragid;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+ /* --------------------------------------------------------------------- */
+ /* WE HAVE NOW CREATED THIS NEW REPLICA AND WE ARE READY TO TAKE THE */
+ /* THE NEXT REPLICA. */
+ /* --------------------------------------------------------------------- */
+
+ Mutex mutex(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle);
+ mutex.unlock(); // ignore result
+
+ takeOverPtr.p->toCurrentFragid++;
+ startNextCopyFragment(signal, takeOverPtr.i);
+ }//if
+}//Dbdih::execCREATE_FRAGCONF()
+
+void Dbdih::execCOPY_FRAGREF(Signal* signal)
+{
+ const CopyFragRef * const ref = (CopyFragRef *)&signal->theData[0];
+ jamEntry();
+ Uint32 takeOverPtrI = ref->userPtr;
+ Uint32 startingNodeId = ref->startingNodeId;
+ Uint32 errorCode = ref->errorCode;
+
+ TakeOverRecordPtr takeOverPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ ndbrequire(errorCode != ZNODE_FAILURE_ERROR);
+ ndbrequire(ref->tableId == takeOverPtr.p->toCurrentTabref);
+ ndbrequire(ref->fragId == takeOverPtr.p->toCurrentFragid);
+ ndbrequire(ref->startingNodeId == takeOverPtr.p->toStartingNode);
+ ndbrequire(ref->sendingNodeId == takeOverPtr.p->toCopyNode);
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG);
+ endTakeOver(takeOverPtrI);
+ //--------------------------------------------------------------------------
+ // For some reason we did not succeed in copying a fragment. We treat this
+ // as a serious failure and crash the starting node.
+ //--------------------------------------------------------------------------
+ BlockReference cntrRef = calcNdbCntrBlockRef(startingNodeId);
+ SystemError * const sysErr = (SystemError*)&signal->theData[0];
+ sysErr->errorCode = SystemError::CopyFragRefError;
+ sysErr->errorRef = reference();
+ sysErr->data1 = errorCode;
+ sysErr->data2 = 0;
+ sendSignal(cntrRef, GSN_SYSTEM_ERROR, signal,
+ SystemError::SignalLength, JBB);
+ return;
+}//Dbdih::execCOPY_FRAGREF()
+
+void Dbdih::execCOPY_FRAGCONF(Signal* signal)
+{
+ const CopyFragConf * const conf = (CopyFragConf *)&signal->theData[0];
+ jamEntry();
+ CRASH_INSERTION(7142);
+
+ TakeOverRecordPtr takeOverPtr;
+ Uint32 takeOverPtrI = conf->userPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+
+ ndbrequire(conf->tableId == takeOverPtr.p->toCurrentTabref);
+ ndbrequire(conf->fragId == takeOverPtr.p->toCurrentFragid);
+ ndbrequire(conf->startingNodeId == takeOverPtr.p->toStartingNode);
+ ndbrequire(conf->sendingNodeId == takeOverPtr.p->toCopyNode);
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG);
+ sendUpdateTo(signal, takeOverPtr.i,
+ (Uint32)UpdateToReq::TO_COPY_FRAG_COMPLETED);
+}//Dbdih::execCOPY_FRAGCONF()
+
+void Dbdih::sendUpdateTo(Signal* signal,
+ Uint32 takeOverPtrI, Uint32 updateState)
+{
+ TakeOverRecordPtr takeOverPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ if ((c_updateToLock != RNIL) ||
+ ((ERROR_INSERTED(7163)) &&
+ (updateState == UpdateToReq::TO_COPY_FRAG_COMPLETED)) ||
+ ((ERROR_INSERTED(7169)) &&
+ (updateState == UpdateToReq::TO_COPY_COMPLETED))) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_UPDATE_TO;
+ signal->theData[0] = DihContinueB::ZSEND_UPDATE_TO;
+ signal->theData[1] = takeOverPtrI;
+ signal->theData[2] = takeOverPtr.p->toStartingNode;
+ signal->theData[3] = takeOverPtr.p->toFailedNode;
+ signal->theData[4] = updateState;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 30, 5);
+ return;
+ }//if
+ c_updateToLock = takeOverPtrI;
+ if (updateState == UpdateToReq::TO_COPY_FRAG_COMPLETED) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_UPDATE_TO;
+ } else {
+ jam();
+ ndbrequire(updateState == UpdateToReq::TO_COPY_COMPLETED);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_COPY_COMPLETED;
+ }//if
+
+ UpdateToReq * const req = (UpdateToReq *)&signal->theData[0];
+ req->userPtr = takeOverPtr.i;
+ req->userRef = reference();
+ req->updateState = (UpdateToReq::UpdateState)updateState;
+ req->startingNodeId = takeOverPtr.p->toStartingNode;
+ req->tableId = takeOverPtr.p->toCurrentTabref;
+ req->fragmentNo = takeOverPtr.p->toCurrentFragid;
+ sendLoopMacro(UPDATE_TOREQ, sendUPDATE_TOREQ);
+}//Dbdih::sendUpdateTo()
+
+void Dbdih::execUPDATE_TOREQ(Signal* signal)
+{
+ jamEntry();
+ const UpdateToReq * const req = (UpdateToReq *)&signal->theData[0];
+ BlockReference ref = req->userRef;
+ ndbrequire(cmasterdihref == ref);
+
+ CRASH_INSERTION(7154);
+ RETURN_IF_NODE_NOT_ALIVE(req->startingNodeId);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = req->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ ndbrequire(req->startingNodeId == takeOverPtr.p->toStartingNode);
+ if (req->updateState == UpdateToReq::TO_COPY_FRAG_COMPLETED) {
+ jam();
+ ndbrequire(takeOverPtr.p->toSlaveStatus == TakeOverRecord::TO_SLAVE_CREATE_PREPARE);
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_COPY_FRAG_COMPLETED;
+ takeOverPtr.p->toCurrentTabref = req->tableId;
+ takeOverPtr.p->toCurrentFragid = req->fragmentNo;
+ } else {
+ jam();
+ ndbrequire(req->updateState == UpdateToReq::TO_COPY_COMPLETED);
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_COPY_COMPLETED;
+ setNodeCopyCompleted(takeOverPtr.p->toStartingNode, true);
+ }//if
+
+
+ UpdateToConf * const conf = (UpdateToConf *)&signal->theData[0];
+ conf->userPtr = takeOverPtr.i;
+ conf->sendingNodeId = cownNodeId;
+ conf->startingNodeId = takeOverPtr.p->toStartingNode;
+ sendSignal(ref, GSN_UPDATE_TOCONF, signal, UpdateToConf::SignalLength, JBB);
+}//Dbdih::execUPDATE_TOREQ()
+
+void Dbdih::execUPDATE_TOCONF(Signal* signal)
+{
+ const UpdateToConf * const conf = (UpdateToConf *)&signal->theData[0];
+ CRASH_INSERTION(7152);
+
+ RETURN_IF_NODE_NOT_ALIVE(conf->startingNodeId);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = conf->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ receiveLoopMacro(UPDATE_TOREQ, conf->sendingNodeId);
+ CRASH_INSERTION(7153);
+ c_updateToLock = RNIL;
+
+ if (takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_COPY_COMPLETED) {
+ jam();
+ toCopyCompletedLab(signal, takeOverPtr);
+ return;
+ } else {
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_UPDATE_TO);
+ }//if
+ TabRecordPtr tabPtr;
+ tabPtr.i = takeOverPtr.p->toCurrentTabref;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, takeOverPtr.p->toCurrentFragid, fragPtr);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_ACTIVE;
+ BlockReference lqhRef = calcLqhBlockRef(takeOverPtr.p->toStartingNode);
+ CopyActiveReq * const req = (CopyActiveReq *)&signal->theData[0];
+ req->userPtr = takeOverPtr.i;
+ req->userRef = reference();
+ req->tableId = takeOverPtr.p->toCurrentTabref;
+ req->fragId = takeOverPtr.p->toCurrentFragid;
+ req->distributionKey = fragPtr.p->distributionKey;
+
+ sendSignal(lqhRef, GSN_COPY_ACTIVEREQ, signal,
+ CopyActiveReq::SignalLength, JBB);
+}//Dbdih::execUPDATE_TOCONF()
+
+void Dbdih::execCOPY_ACTIVECONF(Signal* signal)
+{
+ const CopyActiveConf * const conf = (CopyActiveConf *)&signal->theData[0];
+ jamEntry();
+ CRASH_INSERTION(7143);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = conf->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ ndbrequire(conf->tableId == takeOverPtr.p->toCurrentTabref);
+ ndbrequire(conf->fragId == takeOverPtr.p->toCurrentFragid);
+ ndbrequire(checkNodeAlive(conf->startingNodeId));
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_ACTIVE);
+
+ takeOverPtr.p->startGci = conf->startGci;
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::LOCK_MUTEX;
+
+ Mutex mutex(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle);
+ Callback c = { safe_cast(&Dbdih::switchPrimaryMutex_locked), takeOverPtr.i };
+ ndbrequire(mutex.lock(c));
+}//Dbdih::execCOPY_ACTIVECONF()
+
+void
+Dbdih::switchPrimaryMutex_locked(Signal* signal, Uint32 toPtrI, Uint32 retVal){
+ jamEntry();
+ ndbrequire(retVal == 0);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = toPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::LOCK_MUTEX);
+
+ if (!checkNodeAlive((takeOverPtr.p->toStartingNode))) {
+ // We have mutex
+ Mutex mutex(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle);
+ mutex.unlock(); // Ignore result
+
+ c_createFragmentLock = RNIL;
+ c_CREATE_FRAGREQ_Counter.clearWaitingFor();
+ endTakeOver(takeOverPtr.i);
+ return;
+ }
+
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::COMMIT_CREATE;
+ sendCreateFragReq(signal, takeOverPtr.p->startGci,
+ CreateFragReq::COMMIT_STORED, takeOverPtr.i);
+}
+
+void Dbdih::toCopyCompletedLab(Signal * signal, TakeOverRecordPtr takeOverPtr)
+{
+ signal->theData[0] = NDB_LE_NR_CopyFragsCompleted;
+ signal->theData[1] = takeOverPtr.p->toStartingNode;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ c_lcpState.immediateLcpStart = true;
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::WAIT_LCP;
+
+ /*-----------------------------------------------------------------------*/
+ /* NOW WE CAN ALLOW THE NEW NODE TO PARTICIPATE IN LOCAL CHECKPOINTS. */
+ /* WHEN THE FIRST LOCAL CHECKPOINT IS READY WE DECLARE THE TAKE OVER AS */
+ /* COMPLETED. SINCE LOCAL CHECKPOINTS HAVE BEEN BLOCKED DURING THE COPY */
+ /* PROCESS WE MUST ALSO START A NEW LOCAL CHECKPOINT PROCESS BY ENSURING */
+ /* THAT IT LOOKS LIKE IT IS TIME FOR A NEW LOCAL CHECKPOINT AND BY */
+ /* UNBLOCKING THE LOCAL CHECKPOINT AGAIN. */
+ /* --------------------------------------------------------------------- */
+}//Dbdih::toCopyCompletedLab()
+
+void Dbdih::sendEndTo(Signal* signal, Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ CRASH_INSERTION(7156);
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ if ((c_endToLock != RNIL) || (ERROR_INSERTED(7164))) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_ENDING;
+ signal->theData[0] = DihContinueB::ZSEND_END_TO;
+ signal->theData[1] = takeOverPtrI;
+ signal->theData[2] = takeOverPtr.p->toStartingNode;
+ signal->theData[3] = takeOverPtr.p->toFailedNode;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 30, 4);
+ return;
+ }//if
+ c_endToLock = takeOverPtr.i;
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::ENDING;
+ EndToReq * const req = (EndToReq *)&signal->theData[0];
+ req->userPtr = takeOverPtr.i;
+ req->userRef = reference();
+ req->startingNodeId = takeOverPtr.p->toStartingNode;
+ sendLoopMacro(END_TOREQ, sendEND_TOREQ);
+}//Dbdih::sendStartTo()
+
+void Dbdih::execEND_TOREQ(Signal* signal)
+{
+ jamEntry();
+ const EndToReq * const req = (EndToReq *)&signal->theData[0];
+ BlockReference ref = req->userRef;
+ Uint32 startingNodeId = req->startingNodeId;
+
+ CRASH_INSERTION(7144);
+ RETURN_IF_NODE_NOT_ALIVE(startingNodeId);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = req->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ ndbrequire(startingNodeId == takeOverPtr.p->toStartingNode);
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_IDLE;
+
+ if (!isMaster()) {
+ jam();
+ endTakeOver(takeOverPtr.i);
+ }//if
+
+ EndToConf * const conf = (EndToConf *)&signal->theData[0];
+ conf->userPtr = takeOverPtr.i;
+ conf->sendingNodeId = cownNodeId;
+ conf->startingNodeId = startingNodeId;
+ sendSignal(ref, GSN_END_TOCONF, signal, EndToConf::SignalLength, JBB);
+}//Dbdih::execEND_TOREQ()
+
+void Dbdih::execEND_TOCONF(Signal* signal)
+{
+ const EndToConf * const conf = (EndToConf *)&signal->theData[0];
+ jamEntry();
+
+ const Uint32 nodeId = conf->startingNodeId;
+ CRASH_INSERTION(7145);
+
+ RETURN_IF_NODE_NOT_ALIVE(nodeId);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = conf->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::ENDING);
+ ndbrequire(nodeId == takeOverPtr.p->toStartingNode);
+
+ receiveLoopMacro(END_TOREQ, conf->sendingNodeId);
+ CRASH_INSERTION(7146);
+ c_endToLock = RNIL;
+
+ /* -----------------------------------------------------------------------*/
+ /* WE HAVE FINALLY COMPLETED THE TAKE OVER. WE RESET THE STATUS AND CHECK*/
+ /* IF ANY MORE TAKE OVERS ARE NEEDED AT THE MOMENT. */
+ /* FIRST WE CHECK IF A RESTART IS ONGOING. IN THAT CASE WE RESTART PHASE */
+ /* 4 AND CHECK IF ANY MORE TAKE OVERS ARE NEEDED BEFORE WE START NDB */
+ /* CLUSTER. THIS CAN ONLY HAPPEN IN A SYSTEM RESTART. */
+ /* ---------------------------------------------------------------------- */
+ if (takeOverPtr.p->toNodeRestart) {
+ jam();
+ /* ----------------------------------------------------------------------*/
+ /* THE TAKE OVER NODE WAS A STARTING NODE. WE WILL SEND START_COPYCONF */
+ /* TO THE STARTING NODE SUCH THAT THE NODE CAN COMPLETE THE START-UP. */
+ /* --------------------------------------------------------------------- */
+ BlockReference ref = calcDihBlockRef(takeOverPtr.p->toStartingNode);
+ signal->theData[0] = takeOverPtr.p->toStartingNode;
+ sendSignal(ref, GSN_START_COPYCONF, signal, 1,JBB);
+ }//if
+ endTakeOver(takeOverPtr.i);
+
+ ndbout_c("2 - endTakeOver");
+ if (cstartPhase == ZNDB_SPH4) {
+ jam();
+ ndbrequire(false);
+ if (anyActiveTakeOver()) {
+ jam();
+ ndbout_c("4 - anyActiveTakeOver == true");
+ return;
+ }//if
+ ndbout_c("5 - anyActiveTakeOver == false -> ndbsttorry10Lab");
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ }//if
+ checkStartTakeOver(signal);
+}//Dbdih::execEND_TOCONF()
+
+void Dbdih::allocateTakeOver(TakeOverRecordPtr& takeOverPtr)
+{
+ if (isMaster()) {
+ jam();
+ //--------------------------------------------
+ // Master already seized the take over record.
+ //--------------------------------------------
+ return;
+ }//if
+ if (takeOverPtr.i == cfirstfreeTakeOver) {
+ jam();
+ seizeTakeOver(takeOverPtr);
+ } else {
+ TakeOverRecordPtr nextTakeOverptr;
+ TakeOverRecordPtr prevTakeOverptr;
+ nextTakeOverptr.i = takeOverPtr.p->nextTakeOver;
+ prevTakeOverptr.i = takeOverPtr.p->prevTakeOver;
+ if (prevTakeOverptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(prevTakeOverptr, MAX_NDB_NODES, takeOverRecord);
+ prevTakeOverptr.p->nextTakeOver = nextTakeOverptr.i;
+ }//if
+ if (nextTakeOverptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(nextTakeOverptr, MAX_NDB_NODES, takeOverRecord);
+ nextTakeOverptr.p->prevTakeOver = prevTakeOverptr.i;
+ }//if
+ }//if
+}//Dbdih::allocateTakeOver()
+
+void Dbdih::seizeTakeOver(TakeOverRecordPtr& takeOverPtr)
+{
+ TakeOverRecordPtr nextTakeOverptr;
+ ndbrequire(cfirstfreeTakeOver != RNIL);
+ takeOverPtr.i = cfirstfreeTakeOver;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ cfirstfreeTakeOver = takeOverPtr.p->nextTakeOver;
+ nextTakeOverptr.i = takeOverPtr.p->nextTakeOver;
+ if (nextTakeOverptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(nextTakeOverptr, MAX_NDB_NODES, takeOverRecord);
+ nextTakeOverptr.p->prevTakeOver = RNIL;
+ }//if
+ takeOverPtr.p->nextTakeOver = RNIL;
+ takeOverPtr.p->prevTakeOver = RNIL;
+}//Dbdih::seizeTakeOver()
+
+void Dbdih::endTakeOver(Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = takeOverPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ releaseTakeOver(takeOverPtrI);
+ if ((takeOverPtr.p->toMasterStatus != TakeOverRecord::IDLE) &&
+ (takeOverPtr.p->toMasterStatus != TakeOverRecord::TO_WAIT_START_TAKE_OVER)) {
+ jam();
+ NodeGroupRecordPtr NGPtr;
+ NodeRecordPtr nodePtr;
+ nodePtr.i = takeOverPtr.p->toStartingNode;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ NGPtr.i = nodePtr.p->nodeGroup;
+ ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
+ NGPtr.p->activeTakeOver = false;
+ }//if
+ setAllowNodeStart(takeOverPtr.p->toStartingNode, true);
+ initTakeOver(takeOverPtr);
+}//Dbdih::endTakeOver()
+
+void Dbdih::releaseTakeOver(Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = takeOverPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ takeOverPtr.p->nextTakeOver = cfirstfreeTakeOver;
+ cfirstfreeTakeOver = takeOverPtr.i;
+}//Dbdih::releaseTakeOver()
+
+void Dbdih::initTakeOver(TakeOverRecordPtr takeOverPtr)
+{
+ takeOverPtr.p->toCopyNode = RNIL;
+ takeOverPtr.p->toCurrentFragid = RNIL;
+ takeOverPtr.p->toCurrentReplica = RNIL;
+ takeOverPtr.p->toCurrentTabref = RNIL;
+ takeOverPtr.p->toFailedNode = RNIL;
+ takeOverPtr.p->toStartingNode = RNIL;
+ takeOverPtr.p->prevTakeOver = RNIL;
+ takeOverPtr.p->nextTakeOver = RNIL;
+ takeOverPtr.p->toNodeRestart = false;
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::IDLE;
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_IDLE;
+}//Dbdih::initTakeOver()
+
+bool Dbdih::anyActiveTakeOver()
+{
+ TakeOverRecordPtr takeOverPtr;
+ for (takeOverPtr.i = 0; takeOverPtr.i < MAX_NDB_NODES; takeOverPtr.i++) {
+ ptrAss(takeOverPtr, takeOverRecord);
+ if (takeOverPtr.p->toMasterStatus != TakeOverRecord::IDLE) {
+ jam();
+ return true;
+ }//if
+ }//for
+ return false;
+}//Dbdih::anyActiveTakeOver()
+
+/*****************************************************************************/
+/* ------------------------------------------------------------------------- */
+/* WE HAVE BEEN REQUESTED TO PERFORM A SYSTEM RESTART. WE START BY */
+/* READING THE GCI FILES. THIS REQUEST WILL ONLY BE SENT TO THE MASTER */
+/* DIH. THAT MEANS WE HAVE TO REPLICATE THE INFORMATION WE READ FROM */
+/* OUR FILES TO ENSURE THAT ALL NODES HAVE THE SAME DISTRIBUTION */
+/* INFORMATION. */
+/* ------------------------------------------------------------------------- */
+/*****************************************************************************/
+void Dbdih::readGciFileLab(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ filePtr.i = crestartInfoFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ filePtr.p->reqStatus = FileRecord::OPENING_GCP;
+
+ openFileRo(signal, filePtr);
+}//Dbdih::readGciFileLab()
+
+void Dbdih::openingGcpLab(Signal* signal, FileRecordPtr filePtr)
+{
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE SUCCESSFULLY OPENED A FILE CONTAINING INFORMATION ABOUT */
+ /* THE GLOBAL CHECKPOINTS THAT ARE POSSIBLE TO RESTART. */
+ /* ----------------------------------------------------------------------- */
+ readRestorableGci(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::READING_GCP;
+}//Dbdih::openingGcpLab()
+
+void Dbdih::readingGcpLab(Signal* signal, FileRecordPtr filePtr)
+{
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE NOW SUCCESSFULLY MANAGED TO READ IN THE GLOBAL CHECKPOINT */
+ /* INFORMATION FROM FILE. LATER WE WILL ADD SOME FUNCTIONALITY THAT */
+ /* CHECKS THE RESTART TIMERS TO DEDUCE FROM WHERE TO RESTART. */
+ /* NOW WE WILL SIMPLY RESTART FROM THE NEWEST GLOBAL CHECKPOINT */
+ /* POSSIBLE TO RESTORE. */
+ /* */
+ /* BEFORE WE INVOKE DICT WE NEED TO COPY CRESTART_INFO TO ALL NODES. */
+ /* WE ALSO COPY TO OUR OWN NODE. TO ENABLE US TO DO THIS PROPERLY WE */
+ /* START BY CLOSING THIS FILE. */
+ /* ----------------------------------------------------------------------- */
+ closeFile(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::CLOSING_GCP;
+}//Dbdih::readingGcpLab()
+
+void Dbdih::closingGcpLab(Signal* signal, FileRecordPtr filePtr)
+{
+ if (Sysfile::getInitialStartOngoing(SYSFILE->systemRestartBits) == false){
+ jam();
+ selectMasterCandidateAndSend(signal);
+ return;
+ } else {
+ jam();
+ sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB);
+ return;
+ }//if
+}//Dbdih::closingGcpLab()
+
+/* ------------------------------------------------------------------------- */
+/* SELECT THE MASTER CANDIDATE TO BE USED IN SYSTEM RESTARTS. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::selectMasterCandidateAndSend(Signal* signal)
+{
+ Uint32 gci = 0;
+ Uint32 masterCandidateId = 0;
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (SYSFILE->lastCompletedGCI[nodePtr.i] > gci) {
+ jam();
+ masterCandidateId = nodePtr.i;
+ gci = SYSFILE->lastCompletedGCI[nodePtr.i];
+ }//if
+ }//for
+ ndbrequire(masterCandidateId != 0);
+ setNodeGroups();
+ signal->theData[0] = masterCandidateId;
+ signal->theData[1] = gci;
+ sendSignal(cntrlblockref, GSN_DIH_RESTARTCONF, signal, 2, JBB);
+
+ Uint32 node_groups[MAX_NDB_NODES];
+ memset(node_groups, 0, sizeof(node_groups));
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ const Uint32 ng = Sysfile::getNodeGroup(nodePtr.i, SYSFILE->nodeGroups);
+ if(ng != NO_NODE_GROUP_ID){
+ ndbrequire(ng < MAX_NDB_NODES);
+ node_groups[ng]++;
+ }
+ }
+
+ for (nodePtr.i = 0; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ Uint32 count = node_groups[nodePtr.i];
+ if(count != 0 && count != cnoReplicas){
+ char buf[255];
+ BaseString::snprintf(buf, sizeof(buf),
+ "Illegal configuration change."
+ " Initial start needs to be performed "
+ " when changing no of replicas (%d != %d)",
+ node_groups[nodePtr.i], cnoReplicas);
+ progError(__LINE__,
+ ERR_INVALID_CONFIG,
+ buf);
+ }
+ }
+}//Dbdih::selectMasterCandidate()
+
+/* ------------------------------------------------------------------------- */
+/* ERROR HANDLING DURING READING RESTORABLE GCI FROM FILE. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::openingGcpErrorLab(Signal* signal, FileRecordPtr filePtr)
+{
+ filePtr.p->fileStatus = FileRecord::CRASHED;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ if (crestartInfoFile[0] == filePtr.i) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* THE FIRST FILE WAS NOT ABLE TO BE OPENED. SET STATUS TO CRASHED AND */
+ /* TRY OPEN THE NEXT FILE. */
+ /* --------------------------------------------------------------------- */
+ filePtr.i = crestartInfoFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ openFileRo(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_GCP;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* WE FAILED IN OPENING THE SECOND FILE. BOTH FILES WERE CORRUPTED. WE */
+ /* CANNOT CONTINUE THE RESTART IN THIS CASE. TELL NDBCNTR OF OUR */
+ /* FAILURE. */
+ /*---------------------------------------------------------------------- */
+ sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB);
+ return;
+ }//if
+}//Dbdih::openingGcpErrorLab()
+
+void Dbdih::readingGcpErrorLab(Signal* signal, FileRecordPtr filePtr)
+{
+ filePtr.p->fileStatus = FileRecord::CRASHED;
+ /* ----------------------------------------------------------------------- */
+ /* WE FAILED IN READING THE FILE AS WELL. WE WILL CLOSE THIS FILE. */
+ /* ----------------------------------------------------------------------- */
+ closeFile(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::CLOSING_GCP_CRASH;
+}//Dbdih::readingGcpErrorLab()
+
+void Dbdih::closingGcpCrashLab(Signal* signal, FileRecordPtr filePtr)
+{
+ if (crestartInfoFile[0] == filePtr.i) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* ERROR IN FIRST FILE, TRY THE SECOND FILE. */
+ /* --------------------------------------------------------------------- */
+ filePtr.i = crestartInfoFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_GCP;
+ return;
+ }//if
+ /* ----------------------------------------------------------------------- */
+ /* WE DISCOVERED A FAILURE WITH THE SECOND FILE AS WELL. THIS IS A */
+ /* SERIOUS PROBLEM. REPORT FAILURE TO NDBCNTR. */
+ /* ----------------------------------------------------------------------- */
+ sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB);
+}//Dbdih::closingGcpCrashLab()
+
+/*****************************************************************************/
+/* ------------------------------------------------------------------------- */
+/* THIS IS AN INITIAL RESTART. WE WILL CREATE THE TWO FILES DESCRIBING */
+/* THE GLOBAL CHECKPOINTS THAT ARE RESTORABLE. */
+/* ------------------------------------------------------------------------- */
+/*****************************************************************************/
+void Dbdih::initGciFilesLab(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ filePtr.i = crestartInfoFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ createFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::CREATING_GCP;
+}//Dbdih::initGciFilesLab()
+
+/* ------------------------------------------------------------------------- */
+/* GLOBAL CHECKPOINT FILE HAVE BEEN SUCCESSFULLY CREATED. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::creatingGcpLab(Signal* signal, FileRecordPtr filePtr)
+{
+ if (filePtr.i == crestartInfoFile[0]) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* IF CREATED FIRST THEN ALSO CREATE THE SECOND FILE. */
+ /* --------------------------------------------------------------------- */
+ filePtr.i = crestartInfoFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ createFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::CREATING_GCP;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* BOTH FILES HAVE BEEN CREATED. NOW WRITE THE INITIAL DATA TO BOTH */
+ /* OF THE FILES. */
+ /* --------------------------------------------------------------------- */
+ filePtr.i = crestartInfoFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ writeRestorableGci(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::WRITE_INIT_GCP;
+ }//if
+}//Dbdih::creatingGcpLab()
+
+/* ------------------------------------------------------------------------- */
+/* WE HAVE SUCCESSFULLY WRITTEN A GCI FILE. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::writeInitGcpLab(Signal* signal, FileRecordPtr filePtr)
+{
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ if (filePtr.i == crestartInfoFile[0]) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* WE HAVE WRITTEN THE FIRST FILE NOW ALSO WRITE THE SECOND FILE. */
+ /* --------------------------------------------------------------------- */
+ filePtr.i = crestartInfoFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ writeRestorableGci(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::WRITE_INIT_GCP;
+ } else {
+ /* --------------------------------------------------------------------- */
+ /* WE HAVE WRITTEN BOTH FILES. LEAVE BOTH FILES OPEN AND CONFIRM OUR */
+ /* PART OF THE INITIAL START. */
+ /* --------------------------------------------------------------------- */
+ if (isMaster()) {
+ jam();
+ /*---------------------------------------------------------------------*/
+ // IN MASTER NODES THE START REQUEST IS RECEIVED FROM NDBCNTR AND WE MUST
+ // RESPOND WHEN COMPLETED.
+ /*---------------------------------------------------------------------*/
+ signal->theData[0] = reference();
+ sendSignal(cndbStartReqBlockref, GSN_NDB_STARTCONF, signal, 1, JBB);
+ } else {
+ jam();
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ }//if
+ }//if
+}//Dbdih::writeInitGcpLab()
+
+/*****************************************************************************/
+/* ********** NODES DELETION MODULE *************/
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/* LOGIC FOR NODE FAILURE */
+/*---------------------------------------------------------------------------*/
+void Dbdih::execNODE_FAILREP(Signal* signal)
+{
+ Uint32 i;
+ Uint32 failedNodes[MAX_NDB_NODES];
+ jamEntry();
+ NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
+
+ cfailurenr = nodeFail->failNo;
+ Uint32 newMasterId = nodeFail->masterNodeId;
+ const Uint32 noOfFailedNodes = nodeFail->noOfNodes;
+
+ /*-------------------------------------------------------------------------*/
+ // The first step is to convert from a bit mask to an array of failed nodes.
+ /*-------------------------------------------------------------------------*/
+ Uint32 index = 0;
+ for (i = 1; i < MAX_NDB_NODES; i++) {
+ jam();
+ if(NodeBitmask::get(nodeFail->theNodes, i)){
+ jam();
+ failedNodes[index] = i;
+ index++;
+ }//if
+ }//for
+ ndbrequire(noOfFailedNodes == index);
+ ndbrequire(noOfFailedNodes - 1 < MAX_NDB_NODES);
+
+ /*-------------------------------------------------------------------------*/
+ // The second step is to update the node status of the failed nodes, remove
+ // them from the alive node list and put them into the dead node list. Also
+ // update the number of nodes on-line.
+ // We also set certain state variables ensuring that the node no longer is
+ // used in transactions and also mark that we received this signal.
+ /*-------------------------------------------------------------------------*/
+ for (i = 0; i < noOfFailedNodes; i++) {
+ jam();
+ NodeRecordPtr TNodePtr;
+ TNodePtr.i = failedNodes[i];
+ ptrCheckGuard(TNodePtr, MAX_NDB_NODES, nodeRecord);
+ TNodePtr.p->useInTransactions = false;
+ TNodePtr.p->m_inclDihLcp = false;
+ TNodePtr.p->recNODE_FAILREP = ZTRUE;
+ if (TNodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ con_lineNodes--;
+ TNodePtr.p->nodeStatus = NodeRecord::DIED_NOW;
+ removeAlive(TNodePtr);
+ insertDeadNode(TNodePtr);
+ }//if
+ }//for
+
+ /*-------------------------------------------------------------------------*/
+ // Verify that we can continue to operate the cluster. If we cannot we will
+ // not return from checkEscalation.
+ /*-------------------------------------------------------------------------*/
+ checkEscalation();
+
+ /*------------------------------------------------------------------------*/
+ // Verify that a starting node has also crashed. Reset the node start record.
+ /*-------------------------------------------------------------------------*/
+ if (c_nodeStartMaster.startNode != RNIL) {
+ ndbrequire(getNodeStatus(c_nodeStartMaster.startNode)!= NodeRecord::ALIVE);
+ }//if
+
+ /*--------------------------------------------------*/
+ /* */
+ /* WE CHANGE THE REFERENCE TO MASTER DIH */
+ /* BLOCK AND POINTER AT THIS PLACE IN THE CODE*/
+ /*--------------------------------------------------*/
+ Uint32 oldMasterId = cmasterNodeId;
+ BlockReference oldMasterRef = cmasterdihref;
+ cmasterdihref = calcDihBlockRef(newMasterId);
+ cmasterNodeId = newMasterId;
+
+ const bool masterTakeOver = (oldMasterId != newMasterId);
+
+ for(i = 0; i < noOfFailedNodes; i++) {
+ NodeRecordPtr failedNodePtr;
+ failedNodePtr.i = failedNodes[i];
+ ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
+ Uint32 activeTakeOverPtr = findTakeOver(failedNodes[i]);
+ if (oldMasterRef == reference()) {
+ /*-------------------------------------------------------*/
+ // Functions that need to be called only for master nodes.
+ /*-------------------------------------------------------*/
+ checkCopyTab(failedNodePtr);
+ checkStopPermMaster(signal, failedNodePtr);
+ checkWaitGCPMaster(signal, failedNodes[i]);
+ checkTakeOverInMasterAllNodeFailure(signal, failedNodePtr);
+ checkTakeOverInMasterCopyNodeFailure(signal, failedNodePtr.i);
+ checkTakeOverInMasterStartNodeFailure(signal, activeTakeOverPtr);
+ checkGcpOutstanding(signal, failedNodePtr.i);
+ } else {
+ jam();
+ /*-----------------------------------------------------------*/
+ // Functions that need to be called only for nodes that were
+ // not master before these failures.
+ /*-----------------------------------------------------------*/
+ checkStopPermProxy(signal, failedNodes[i]);
+ checkWaitGCPProxy(signal, failedNodes[i]);
+ if (isMaster()) {
+ /*-----------------------------------------------------------*/
+ // We take over as master since old master has failed
+ /*-----------------------------------------------------------*/
+ handleTakeOverNewMaster(signal, activeTakeOverPtr);
+ } else {
+ /*-----------------------------------------------------------*/
+ // We are not master and will not become master.
+ /*-----------------------------------------------------------*/
+ checkTakeOverInNonMasterStartNodeFailure(signal, activeTakeOverPtr);
+ }//if
+ }//if
+ /*--------------------------------------------------*/
+ // Functions that need to be called for all nodes.
+ /*--------------------------------------------------*/
+ checkStopMe(signal, failedNodePtr);
+ failedNodeLcpHandling(signal, failedNodePtr);
+ checkWaitDropTabFailedLqh(signal, failedNodePtr.i, 0); // 0 = start w/ tab 0
+ startRemoveFailedNode(signal, failedNodePtr);
+
+ /**
+ * This is the last function called
+ * It modifies failedNodePtr.p->nodeStatus
+ */
+ failedNodeSynchHandling(signal, failedNodePtr);
+ }//for
+
+ if(masterTakeOver){
+ jam();
+ startLcpMasterTakeOver(signal, oldMasterId);
+ startGcpMasterTakeOver(signal, oldMasterId);
+
+ if(getNodeState().getNodeRestartInProgress()){
+ jam();
+ progError(__LINE__,
+ ERR_SYSTEM_ERROR,
+ "Unhandle master failure during node restart");
+ }
+ }
+
+
+ if (isMaster()) {
+ jam();
+ setNodeRestartInfoBits();
+ }//if
+}//Dbdih::execNODE_FAILREP()
+
+void Dbdih::checkCopyTab(NodeRecordPtr failedNodePtr)
+{
+ jam();
+
+ if(c_nodeStartMaster.startNode != failedNodePtr.i){
+ jam();
+ return;
+ }
+
+ switch(c_nodeStartMaster.m_outstandingGsn){
+ case GSN_COPY_TABREQ:
+ jam();
+ ndbrequire(c_COPY_TABREQ_Counter.isWaitingFor(failedNodePtr.i));
+ releaseTabPages(failedNodePtr.p->activeTabptr);
+ c_COPY_TABREQ_Counter.clearWaitingFor(failedNodePtr.i);
+ c_nodeStartMaster.wait = ZFALSE;
+ break;
+ case GSN_START_INFOREQ:
+ case GSN_START_PERMCONF:
+ case GSN_DICTSTARTREQ:
+ case GSN_START_MECONF:
+ jam();
+ break;
+ default:
+ ndbout_c("outstanding gsn: %s(%d)",
+ getSignalName(c_nodeStartMaster.m_outstandingGsn),
+ c_nodeStartMaster.m_outstandingGsn);
+ ndbrequire(false);
+ }
+
+ nodeResetStart();
+}//Dbdih::checkCopyTab()
+
+void Dbdih::checkStopMe(Signal* signal, NodeRecordPtr failedNodePtr)
+{
+ jam();
+ if (c_STOP_ME_REQ_Counter.isWaitingFor(failedNodePtr.i)){
+ jam();
+ ndbrequire(c_stopMe.clientRef != 0);
+ StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0];
+ stopMeConf->senderRef = calcDihBlockRef(failedNodePtr.i);
+ stopMeConf->senderData = c_stopMe.clientData;
+ sendSignal(reference(), GSN_STOP_ME_CONF, signal,
+ StopMeConf::SignalLength, JBB);
+ }//if
+}//Dbdih::checkStopMe()
+
+void Dbdih::checkStopPermMaster(Signal* signal, NodeRecordPtr failedNodePtr)
+{
+ DihSwitchReplicaRef* const ref = (DihSwitchReplicaRef*)&signal->theData[0];
+ jam();
+ if (c_DIH_SWITCH_REPLICA_REQ_Counter.isWaitingFor(failedNodePtr.i)){
+ jam();
+ ndbrequire(c_stopPermMaster.clientRef != 0);
+ ref->senderNode = failedNodePtr.i;
+ ref->errorCode = StopPermRef::NF_CausedAbortOfStopProcedure;
+ sendSignal(reference(), GSN_DIH_SWITCH_REPLICA_REF, signal,
+ DihSwitchReplicaRef::SignalLength, JBB);
+ return;
+ }//if
+}//Dbdih::checkStopPermMaster()
+
+void Dbdih::checkStopPermProxy(Signal* signal, NodeId failedNodeId)
+{
+ jam();
+ if(c_stopPermProxy.clientRef != 0 &&
+ refToNode(c_stopPermProxy.masterRef) == failedNodeId){
+
+ /**
+ * The master has failed report to proxy-client
+ */
+ jam();
+ StopPermRef* const ref = (StopPermRef*)&signal->theData[0];
+
+ ref->senderData = c_stopPermProxy.clientData;
+ ref->errorCode = StopPermRef::NF_CausedAbortOfStopProcedure;
+ sendSignal(c_stopPermProxy.clientRef, GSN_STOP_PERM_REF, signal, 2, JBB);
+ c_stopPermProxy.clientRef = 0;
+ }//if
+}//Dbdih::checkStopPermProxy()
+
+void
+Dbdih::checkTakeOverInMasterAllNodeFailure(Signal* signal,
+ NodeRecordPtr failedNodePtr)
+{
+ //------------------------------------------------------------------------
+ // This code is used to handle the failure of "all" nodes during the
+ // take over when "all" nodes are informed about state changes in
+ // the take over protocol.
+ //--------------------------------------------------------------------------
+ if (c_START_TOREQ_Counter.isWaitingFor(failedNodePtr.i)){
+ jam();
+ StartToConf * const conf = (StartToConf *)&signal->theData[0];
+ conf->userPtr = c_startToLock;
+ conf->sendingNodeId = failedNodePtr.i;
+ conf->startingNodeId = getStartNode(c_startToLock);
+ sendSignal(reference(), GSN_START_TOCONF, signal,
+ StartToConf::SignalLength, JBB);
+ }//if
+ if (c_CREATE_FRAGREQ_Counter.isWaitingFor(failedNodePtr.i)){
+ jam();
+ CreateFragConf * const conf = (CreateFragConf *)&signal->theData[0];
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = c_createFragmentLock;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ conf->userPtr = takeOverPtr.i;
+ conf->tableId = takeOverPtr.p->toCurrentTabref;
+ conf->fragId = takeOverPtr.p->toCurrentFragid;
+ conf->sendingNodeId = failedNodePtr.i;
+ conf->startingNodeId = takeOverPtr.p->toStartingNode;
+ sendSignal(reference(), GSN_CREATE_FRAGCONF, signal,
+ CreateFragConf::SignalLength, JBB);
+ }//if
+ if (c_UPDATE_TOREQ_Counter.isWaitingFor(failedNodePtr.i)){
+ jam();
+ UpdateToConf * const conf = (UpdateToConf *)&signal->theData[0];
+ conf->userPtr = c_updateToLock;
+ conf->sendingNodeId = failedNodePtr.i;
+ conf->startingNodeId = getStartNode(c_updateToLock);
+ sendSignal(reference(), GSN_UPDATE_TOCONF, signal,
+ UpdateToConf::SignalLength, JBB);
+ }//if
+
+ if (c_END_TOREQ_Counter.isWaitingFor(failedNodePtr.i)){
+ jam();
+ EndToConf * const conf = (EndToConf *)&signal->theData[0];
+ conf->userPtr = c_endToLock;
+ conf->sendingNodeId = failedNodePtr.i;
+ conf->startingNodeId = getStartNode(c_endToLock);
+ sendSignal(reference(), GSN_END_TOCONF, signal,
+ EndToConf::SignalLength, JBB);
+ }//if
+}//Dbdih::checkTakeOverInMasterAllNodeFailure()
+
+void Dbdih::checkTakeOverInMasterCopyNodeFailure(Signal* signal,
+ Uint32 failedNodeId)
+{
+ //---------------------------------------------------------------------------
+ // This code is used to handle failure of the copying node during a take over
+ //---------------------------------------------------------------------------
+ TakeOverRecordPtr takeOverPtr;
+ for (Uint32 i = 0; i < MAX_NDB_NODES; i++) {
+ jam();
+ takeOverPtr.i = i;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ if ((takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG) &&
+ (takeOverPtr.p->toCopyNode == failedNodeId)) {
+ jam();
+ /**
+ * The copying node failed but the system is still operational.
+ * We restart the copy process by selecting a new copy node.
+ * We do not need to add a fragment however since it is already added.
+ * We start again from the prepare create fragment phase.
+ */
+ prepareSendCreateFragReq(signal, takeOverPtr.i);
+ }//if
+ }//for
+}//Dbdih::checkTakeOverInMasterCopyNodeFailure()
+
+void Dbdih::checkTakeOverInMasterStartNodeFailure(Signal* signal,
+ Uint32 takeOverPtrI)
+{
+ jam();
+ if (takeOverPtrI == RNIL) {
+ jam();
+ return;
+ }
+ //-----------------------------------------------------------------------
+ // We are the master and the starting node has failed during a take over.
+ // We need to handle this failure in different ways depending on the state.
+ //-----------------------------------------------------------------------
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = takeOverPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ bool ok = false;
+ switch (takeOverPtr.p->toMasterStatus) {
+ case TakeOverRecord::IDLE:
+ //-----------------------------------------------------------------------
+ // The state cannot be idle when it has a starting node.
+ //-----------------------------------------------------------------------
+ ndbrequire(false);
+ break;
+ case TakeOverRecord::TO_WAIT_START_TAKE_OVER:
+ jam();
+ case TakeOverRecord::TO_START_COPY:
+ jam();
+ case TakeOverRecord::TO_START_COPY_ONGOING:
+ jam();
+ case TakeOverRecord::TO_WAIT_START:
+ jam();
+ case TakeOverRecord::TO_WAIT_PREPARE_CREATE:
+ jam();
+ case TakeOverRecord::TO_WAIT_UPDATE_TO:
+ jam();
+ case TakeOverRecord::TO_WAIT_COMMIT_CREATE:
+ jam();
+ case TakeOverRecord::TO_END_COPY:
+ jam();
+ case TakeOverRecord::TO_END_COPY_ONGOING:
+ jam();
+ case TakeOverRecord::TO_WAIT_ENDING:
+ jam();
+ //-----------------------------------------------------------------------
+ // We will not do anything since an internal signal process is outstanding.
+ // When the signal arrives the take over will be released.
+ //-----------------------------------------------------------------------
+ ok = true;
+ break;
+ case TakeOverRecord::STARTING:
+ jam();
+ ok = true;
+ c_startToLock = RNIL;
+ c_START_TOREQ_Counter.clearWaitingFor();
+ endTakeOver(takeOverPtr.i);
+ break;
+ case TakeOverRecord::TO_UPDATE_TO:
+ jam();
+ ok = true;
+ c_updateToLock = RNIL;
+ c_UPDATE_TOREQ_Counter.clearWaitingFor();
+ endTakeOver(takeOverPtr.i);
+ break;
+ case TakeOverRecord::ENDING:
+ jam();
+ ok = true;
+ c_endToLock = RNIL;
+ c_END_TOREQ_Counter.clearWaitingFor();
+ endTakeOver(takeOverPtr.i);
+ break;
+ case TakeOverRecord::COMMIT_CREATE:
+ ok = true;
+ jam();
+ {// We have mutex
+ Mutex m(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle);
+ m.unlock(); // Ignore result
+ }
+ // Fall through
+ case TakeOverRecord::PREPARE_CREATE:
+ ok = true;
+ jam();
+ c_createFragmentLock = RNIL;
+ c_CREATE_FRAGREQ_Counter.clearWaitingFor();
+ endTakeOver(takeOverPtr.i);
+ break;
+ case TakeOverRecord::LOCK_MUTEX:
+ ok = true;
+ jam();
+ // Lock mutex will return and do endTakeOver
+ break;
+
+ //-----------------------------------------------------------------------
+ // Signals are outstanding to external nodes. These signals carry the node
+ // id of the starting node and will not use the take over record if the
+ // starting node has failed.
+ //-----------------------------------------------------------------------
+ case TakeOverRecord::COPY_FRAG:
+ ok = true;
+ jam();
+ //-----------------------------------------------------------------------
+ // The starting node will discover the problem. We will receive either
+ // COPY_FRAGREQ or COPY_FRAGCONF and then we can release the take over
+ // record and end the process. If the copying node should also die then
+ // we will try to send prepare create fragment and will then discover
+ // that the starting node has failed.
+ //-----------------------------------------------------------------------
+ break;
+ case TakeOverRecord::COPY_ACTIVE:
+ ok = true;
+ jam();
+ //-----------------------------------------------------------------------
+ // In this we are waiting for a signal from the starting node. Thus we
+ // can release the take over record and end the process.
+ //-----------------------------------------------------------------------
+ endTakeOver(takeOverPtr.i);
+ break;
+ case TakeOverRecord::WAIT_LCP:
+ ok = true;
+ jam();
+ //-----------------------------------------------------------------------
+ //-----------------------------------------------------------------------
+ endTakeOver(takeOverPtr.i);
+ break;
+ /**
+ * The following are states that it should not be possible to "be" in
+ */
+ case TakeOverRecord::SELECTING_NEXT:
+ jam();
+ case TakeOverRecord::TO_COPY_COMPLETED:
+ jam();
+ ndbrequire(false);
+ }
+ if(!ok){
+ jamLine(takeOverPtr.p->toSlaveStatus);
+ ndbrequire(ok);
+ }
+}//Dbdih::checkTakeOverInMasterStartNodeFailure()
+
+void Dbdih::checkTakeOverInNonMasterStartNodeFailure(Signal* signal,
+ Uint32 takeOverPtrI)
+{
+ jam();
+ if (takeOverPtrI == RNIL) {
+ jam();
+ return;
+ }
+ //-----------------------------------------------------------------------
+ // We are not master and not taking over as master. A take over was ongoing
+ // but the starting node has now failed. Handle it according to the state
+ // of the take over.
+ //-----------------------------------------------------------------------
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = takeOverPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ bool ok = false;
+ switch (takeOverPtr.p->toSlaveStatus) {
+ case TakeOverRecord::TO_SLAVE_IDLE:
+ ndbrequire(false);
+ break;
+ case TakeOverRecord::TO_SLAVE_STARTED:
+ jam();
+ case TakeOverRecord::TO_SLAVE_CREATE_PREPARE:
+ jam();
+ case TakeOverRecord::TO_SLAVE_COPY_FRAG_COMPLETED:
+ jam();
+ case TakeOverRecord::TO_SLAVE_CREATE_COMMIT:
+ jam();
+ case TakeOverRecord::TO_SLAVE_COPY_COMPLETED:
+ jam();
+ ok = true;
+ endTakeOver(takeOverPtr.i);
+ break;
+ }//switch
+ if(!ok){
+ jamLine(takeOverPtr.p->toSlaveStatus);
+ ndbrequire(ok);
+ }
+}//Dbdih::checkTakeOverInNonMasterStartNodeFailure()
+
+void Dbdih::failedNodeSynchHandling(Signal* signal,
+ NodeRecordPtr failedNodePtr)
+{
+ jam();
+ /*----------------------------------------------------*/
+ /* INITIALISE THE VARIABLES THAT KEEP TRACK OF */
+ /* WHEN A NODE FAILURE IS COMPLETED. */
+ /*----------------------------------------------------*/
+ failedNodePtr.p->dbdictFailCompleted = ZFALSE;
+ failedNodePtr.p->dbtcFailCompleted = ZFALSE;
+ failedNodePtr.p->dbdihFailCompleted = ZFALSE;
+ failedNodePtr.p->dblqhFailCompleted = ZFALSE;
+
+ failedNodePtr.p->m_NF_COMPLETE_REP.clearWaitingFor();
+
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ /**
+ * We'r waiting for nodePtr.i to complete
+ * handling of failedNodePtr.i's death
+ */
+
+ failedNodePtr.p->m_NF_COMPLETE_REP.setWaitingFor(nodePtr.i);
+ } else {
+ jam();
+ if ((nodePtr.p->nodeStatus == NodeRecord::DYING) &&
+ (nodePtr.p->m_NF_COMPLETE_REP.isWaitingFor(failedNodePtr.i))){
+ jam();
+ /*----------------------------------------------------*/
+ /* THE NODE FAILED BEFORE REPORTING THE FAILURE */
+ /* HANDLING COMPLETED ON THIS FAILED NODE. */
+ /* REPORT THAT NODE FAILURE HANDLING WAS */
+ /* COMPLETED ON THE NEW FAILED NODE FOR THIS */
+ /* PARTICULAR OLD FAILED NODE. */
+ /*----------------------------------------------------*/
+ NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0];
+ nf->blockNo = 0;
+ nf->nodeId = failedNodePtr.i;
+ nf->failedNodeId = nodePtr.i;
+ nf->from = __LINE__;
+ sendSignal(reference(), GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+ }//if
+ }//if
+ }//for
+ if (failedNodePtr.p->nodeStatus == NodeRecord::DIED_NOW) {
+ jam();
+ failedNodePtr.p->nodeStatus = NodeRecord::DYING;
+ } else {
+ jam();
+ /*----------------------------------------------------*/
+ // No more processing needed when node not even started
+ // yet. We give the node status to DEAD since we do not
+ // care whether all nodes complete the node failure
+ // handling. The node have not been included in the
+ // node failure protocols.
+ /*----------------------------------------------------*/
+ failedNodePtr.p->nodeStatus = NodeRecord::DEAD;
+ /**-----------------------------------------------------------------------
+ * WE HAVE COMPLETED HANDLING THE NODE FAILURE IN DIH. WE CAN REPORT THIS
+ * TO DIH THAT WAIT FOR THE OTHER BLOCKS TO BE CONCLUDED AS WELL.
+ *-----------------------------------------------------------------------*/
+ NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0];
+ nf->blockNo = DBDIH;
+ nf->nodeId = cownNodeId;
+ nf->failedNodeId = failedNodePtr.i;
+ nf->from = __LINE__;
+ sendSignal(reference(), GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+ }//if
+}//Dbdih::failedNodeSynchHandling()
+
+Uint32 Dbdih::findTakeOver(Uint32 failedNodeId)
+{
+ for (Uint32 i = 0; i < MAX_NDB_NODES; i++) {
+ jam();
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = i;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ if (takeOverPtr.p->toStartingNode == failedNodeId) {
+ jam();
+ return i;
+ }//if
+ }//for
+ return RNIL;
+}//Dbdih::findTakeOver()
+
+Uint32 Dbdih::getStartNode(Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = takeOverPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ return takeOverPtr.p->toStartingNode;
+}//Dbdih::getStartNode()
+
+void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr)
+{
+ jam();
+ const Uint32 nodeId = failedNodePtr.i;
+
+ if (c_lcpState.m_participatingLQH.get(failedNodePtr.i)){
+ /*----------------------------------------------------*/
+ /* THE NODE WAS INVOLVED IN A LOCAL CHECKPOINT. WE */
+ /* MUST UPDATE THE ACTIVE STATUS TO INDICATE THAT */
+ /* THE NODE HAVE MISSED A LOCAL CHECKPOINT. */
+ /*----------------------------------------------------*/
+ switch (failedNodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ jam();
+ failedNodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_1;
+ break;
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ failedNodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_2;
+ break;
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ failedNodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
+ break;
+ case Sysfile::NS_TakeOver:
+ jam();
+ failedNodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
+ break;
+ default:
+ ndbout << "activeStatus = " << (Uint32) failedNodePtr.p->activeStatus;
+ ndbout << " at failure after NODE_FAILREP of node = ";
+ ndbout << failedNodePtr.i << endl;
+ ndbrequire(false);
+ break;
+ }//switch
+ }//if
+
+ c_lcpState.m_participatingDIH.clear(failedNodePtr.i);
+ c_lcpState.m_participatingLQH.clear(failedNodePtr.i);
+
+ if(c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.isWaitingFor(failedNodePtr.i)){
+ jam();
+ LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend();
+ rep->nodeId = failedNodePtr.i;
+ rep->lcpId = SYSFILE->latestLCP_ID;
+ rep->blockNo = DBDIH;
+ sendSignal(reference(), GSN_LCP_COMPLETE_REP, signal,
+ LcpCompleteRep::SignalLength, JBB);
+ }
+
+ /**
+ * Check if we'r waiting for the failed node's LQH to complete
+ *
+ * Note that this is ran "before" LCP master take over
+ */
+ if(c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.isWaitingFor(nodeId)){
+ jam();
+
+ LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend();
+ rep->nodeId = nodeId;
+ rep->lcpId = SYSFILE->latestLCP_ID;
+ rep->blockNo = DBLQH;
+ sendSignal(reference(), GSN_LCP_COMPLETE_REP, signal,
+ LcpCompleteRep::SignalLength, JBB);
+
+ if(c_lcpState.m_LAST_LCP_FRAG_ORD.isWaitingFor(nodeId)){
+ jam();
+ /**
+ * Make sure we're ready to accept it
+ */
+ c_lcpState.m_LAST_LCP_FRAG_ORD.clearWaitingFor(nodeId);
+ }
+ }
+
+ if (c_TCGETOPSIZEREQ_Counter.isWaitingFor(failedNodePtr.i)) {
+ jam();
+ signal->theData[0] = failedNodePtr.i;
+ signal->theData[1] = 0;
+ sendSignal(reference(), GSN_TCGETOPSIZECONF, signal, 2, JBB);
+ }//if
+
+ if (c_TC_CLOPSIZEREQ_Counter.isWaitingFor(failedNodePtr.i)) {
+ jam();
+ signal->theData[0] = failedNodePtr.i;
+ sendSignal(reference(), GSN_TC_CLOPSIZECONF, signal, 1, JBB);
+ }//if
+
+ if (c_START_LCP_REQ_Counter.isWaitingFor(failedNodePtr.i)) {
+ jam();
+ StartLcpConf * conf = (StartLcpConf*)signal->getDataPtrSend();
+ conf->senderRef = numberToRef(DBLQH, failedNodePtr.i);
+ conf->lcpId = SYSFILE->latestLCP_ID;
+ sendSignal(reference(), GSN_START_LCP_CONF, signal,
+ StartLcpConf::SignalLength, JBB);
+ }//if
+
+ if (c_EMPTY_LCP_REQ_Counter.isWaitingFor(failedNodePtr.i)) {
+ jam();
+ EmptyLcpConf * const rep = (EmptyLcpConf *)&signal->theData[0];
+ rep->senderNodeId = failedNodePtr.i;
+ rep->tableId = ~0;
+ rep->fragmentId = ~0;
+ rep->lcpNo = 0;
+ rep->lcpId = SYSFILE->latestLCP_ID;
+ rep->idle = true;
+ sendSignal(reference(), GSN_EMPTY_LCP_CONF, signal,
+ EmptyLcpConf::SignalLength, JBB);
+ }//if
+
+ if (c_MASTER_LCPREQ_Counter.isWaitingFor(failedNodePtr.i)) {
+ jam();
+ MasterLCPRef * const ref = (MasterLCPRef *)&signal->theData[0];
+ ref->senderNodeId = failedNodePtr.i;
+ ref->failedNodeId = cmasterTakeOverNode;
+ sendSignal(reference(), GSN_MASTER_LCPREF, signal,
+ MasterLCPRef::SignalLength, JBB);
+ }//if
+
+}//Dbdih::failedNodeLcpHandling()
+
+void Dbdih::checkGcpOutstanding(Signal* signal, Uint32 failedNodeId){
+ if (c_GCP_PREPARE_Counter.isWaitingFor(failedNodeId)){
+ jam();
+ signal->theData[0] = failedNodeId;
+ signal->theData[1] = cnewgcp;
+ sendSignal(reference(), GSN_GCP_PREPARECONF, signal, 2, JBB);
+ }//if
+
+ if (c_GCP_COMMIT_Counter.isWaitingFor(failedNodeId)) {
+ jam();
+ signal->theData[0] = failedNodeId;
+ signal->theData[1] = coldgcp;
+ signal->theData[2] = cfailurenr;
+ sendSignal(reference(), GSN_GCP_NODEFINISH, signal, 3, JBB);
+ }//if
+
+ if (c_GCP_SAVEREQ_Counter.isWaitingFor(failedNodeId)) {
+ jam();
+ GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
+ saveRef->dihPtr = failedNodeId;
+ saveRef->nodeId = failedNodeId;
+ saveRef->gci = coldgcp;
+ saveRef->errorCode = GCPSaveRef::FakedSignalDueToNodeFailure;
+ sendSignal(reference(), GSN_GCP_SAVEREF, signal,
+ GCPSaveRef::SignalLength, JBB);
+ }//if
+
+ if (c_COPY_GCIREQ_Counter.isWaitingFor(failedNodeId)) {
+ jam();
+ signal->theData[0] = failedNodeId;
+ sendSignal(reference(), GSN_COPY_GCICONF, signal, 1, JBB);
+ }//if
+
+ if (c_MASTER_GCPREQ_Counter.isWaitingFor(failedNodeId)){
+ jam();
+ MasterGCPRef * const ref = (MasterGCPRef *)&signal->theData[0];
+ ref->senderNodeId = failedNodeId;
+ ref->failedNodeId = cmasterTakeOverNode;
+ sendSignal(reference(), GSN_MASTER_GCPREF, signal,
+ MasterGCPRef::SignalLength, JBB);
+ }//if
+}//Dbdih::handleGcpStateInMaster()
+
+
+void
+Dbdih::startLcpMasterTakeOver(Signal* signal, Uint32 nodeId){
+ jam();
+
+ c_lcpMasterTakeOverState.minTableId = ~0;
+ c_lcpMasterTakeOverState.minFragId = ~0;
+ c_lcpMasterTakeOverState.failedNodeId = nodeId;
+
+ c_lcpMasterTakeOverState.set(LMTOS_WAIT_EMPTY_LCP, __LINE__);
+
+ if(c_EMPTY_LCP_REQ_Counter.done()){
+ jam();
+ c_lcpState.m_LAST_LCP_FRAG_ORD.clearWaitingFor();
+
+ EmptyLcpReq* req = (EmptyLcpReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ sendLoopMacro(EMPTY_LCP_REQ, sendEMPTY_LCP_REQ);
+ ndbrequire(!c_EMPTY_LCP_REQ_Counter.done());
+ } else {
+ /**
+ * Node failure during master take over...
+ */
+ ndbout_c("Nodefail during master take over");
+ }
+
+ setLocalNodefailHandling(signal, nodeId, NF_LCP_TAKE_OVER);
+}
+
+void Dbdih::startGcpMasterTakeOver(Signal* signal, Uint32 oldMasterId){
+ jam();
+ /*--------------------------------------------------*/
+ /* */
+ /* THE MASTER HAVE FAILED AND WE WERE ELECTED */
+ /* TO BE THE NEW MASTER NODE. WE NEED TO QUERY*/
+ /* ALL THE OTHER NODES ABOUT THEIR STATUS IN */
+ /* ORDER TO BE ABLE TO TAKE OVER CONTROL OF */
+ /* THE GLOBAL CHECKPOINT PROTOCOL AND THE */
+ /* LOCAL CHECKPOINT PROTOCOL. */
+ /*--------------------------------------------------*/
+ if(!isMaster()){
+ jam();
+ return;
+ }
+ cmasterState = MASTER_TAKE_OVER_GCP;
+ cmasterTakeOverNode = oldMasterId;
+ MasterGCPReq * const req = (MasterGCPReq *)&signal->theData[0];
+ req->masterRef = reference();
+ req->failedNodeId = oldMasterId;
+ sendLoopMacro(MASTER_GCPREQ, sendMASTER_GCPREQ);
+ cgcpMasterTakeOverState = GMTOS_INITIAL;
+
+ signal->theData[0] = NDB_LE_GCP_TakeoverStarted;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
+
+ setLocalNodefailHandling(signal, oldMasterId, NF_GCP_TAKE_OVER);
+}//Dbdih::handleNewMaster()
+
+void Dbdih::handleTakeOverNewMaster(Signal* signal, Uint32 takeOverPtrI)
+{
+ jam();
+ if (takeOverPtrI != RNIL) {
+ jam();
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = takeOverPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ bool ok = false;
+ switch (takeOverPtr.p->toSlaveStatus) {
+ case TakeOverRecord::TO_SLAVE_IDLE:
+ ndbrequire(false);
+ break;
+ case TakeOverRecord::TO_SLAVE_STARTED:
+ jam();
+ case TakeOverRecord::TO_SLAVE_CREATE_PREPARE:
+ jam();
+ case TakeOverRecord::TO_SLAVE_COPY_FRAG_COMPLETED:
+ jam();
+ case TakeOverRecord::TO_SLAVE_CREATE_COMMIT:
+ jam();
+ ok = true;
+ infoEvent("Unhandled MasterTO of TO slaveStatus=%d killing node %d",
+ takeOverPtr.p->toSlaveStatus,
+ takeOverPtr.p->toStartingNode);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_ACTIVE;
+
+ {
+ BlockReference cntrRef = calcNdbCntrBlockRef(takeOverPtr.p->toStartingNode);
+ SystemError * const sysErr = (SystemError*)&signal->theData[0];
+ sysErr->errorCode = SystemError::CopyFragRefError;
+ sysErr->errorRef = reference();
+ sysErr->data1= 0;
+ sysErr->data2= __LINE__;
+ sendSignal(cntrRef, GSN_SYSTEM_ERROR, signal,
+ SystemError::SignalLength, JBB);
+ }
+ break;
+ case TakeOverRecord::TO_SLAVE_COPY_COMPLETED:
+ ok = true;
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::WAIT_LCP;
+ break;
+ }
+ ndbrequire(ok);
+ }//if
+}//Dbdih::handleTakeOverNewMaster()
+
+void Dbdih::startRemoveFailedNode(Signal* signal, NodeRecordPtr failedNodePtr)
+{
+ Uint32 nodeId = failedNodePtr.i;
+ if(failedNodePtr.p->nodeStatus != NodeRecord::DIED_NOW){
+ jam();
+ /**
+ * Is node isn't alive. It can't be part of LCP
+ */
+ ndbrequire(!c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.isWaitingFor(nodeId));
+
+ /**
+ * And there is no point in removing any replicas
+ * It's dead...
+ */
+ return;
+ }
+
+ jam();
+ signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
+ signal->theData[1] = failedNodePtr.i;
+ signal->theData[2] = 0; // Tab id
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+
+ setLocalNodefailHandling(signal, failedNodePtr.i, NF_REMOVE_NODE_FROM_TABLE);
+}//Dbdih::startRemoveFailedNode()
+
+/*--------------------------------------------------*/
+/* THE MASTER HAS FAILED AND THE NEW MASTER IS*/
+/* QUERYING THIS NODE ABOUT THE STATE OF THE */
+/* GLOBAL CHECKPOINT PROTOCOL */
+/*--------------------------------------------------*/
+void Dbdih::execMASTER_GCPREQ(Signal* signal)
+{
+ NodeRecordPtr failedNodePtr;
+ MasterGCPReq * const masterGCPReq = (MasterGCPReq *)&signal->theData[0];
+ jamEntry();
+ const BlockReference newMasterBlockref = masterGCPReq->masterRef;
+ const Uint32 failedNodeId = masterGCPReq->failedNodeId;
+ if (c_copyGCISlave.m_copyReason != CopyGCIReq::IDLE) {
+ jam();
+ /*--------------------------------------------------*/
+ /* WE ARE CURRENTLY WRITING THE RESTART INFO */
+ /* IN THIS NODE. SINCE ONLY ONE PROCESS IS */
+ /* ALLOWED TO DO THIS AT A TIME WE MUST ENSURE*/
+ /* THAT THIS IS NOT ONGOING WHEN THE NEW */
+ /* MASTER TAKES OVER CONTROL. IF NOT ALL NODES*/
+ /* RECEIVE THE SAME RESTART INFO DUE TO THE */
+ /* FAILURE OF THE MASTER IT IS TAKEN CARE OF */
+ /* BY THE NEW MASTER. */
+ /*--------------------------------------------------*/
+ sendSignalWithDelay(reference(), GSN_MASTER_GCPREQ,
+ signal, 10, MasterGCPReq::SignalLength);
+ return;
+ }//if
+ failedNodePtr.i = failedNodeId;
+ ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
+ if (failedNodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ /*--------------------------------------------------*/
+ /* ENSURE THAT WE HAVE PROCESSED THE SIGNAL */
+ /* NODE_FAILURE BEFORE WE PROCESS THIS REQUEST*/
+ /* FROM THE NEW MASTER. THIS ENSURES THAT WE */
+ /* HAVE REMOVED THE FAILED NODE FROM THE LIST */
+ /* OF ACTIVE NODES AND SO FORTH. */
+ /*--------------------------------------------------*/
+ sendSignalWithDelay(reference(), GSN_MASTER_GCPREQ,
+ signal, 10, MasterGCPReq::SignalLength);
+ return;
+ } else {
+ ndbrequire(failedNodePtr.p->nodeStatus == NodeRecord::DYING);
+ }//if
+ MasterGCPConf::State gcpState;
+ switch (cgcpParticipantState) {
+ case GCP_PARTICIPANT_READY:
+ jam();
+ /*--------------------------------------------------*/
+ /* THE GLOBAL CHECKPOINT IS NOT ACTIVE SINCE */
+ /* THE PREVIOUS GLOBAL CHECKPOINT IS COMPLETED*/
+ /* AND THE NEW HAVE NOT STARTED YET. */
+ /*--------------------------------------------------*/
+ gcpState = MasterGCPConf::GCP_READY;
+ break;
+ case GCP_PARTICIPANT_PREPARE_RECEIVED:
+ jam();
+ /*--------------------------------------------------*/
+ /* GCP_PREPARE HAVE BEEN RECEIVED AND RESPONSE*/
+ /* HAVE BEEN SENT. */
+ /*--------------------------------------------------*/
+ gcpState = MasterGCPConf::GCP_PREPARE_RECEIVED;
+ break;
+ case GCP_PARTICIPANT_COMMIT_RECEIVED:
+ jam();
+ /*------------------------------------------------*/
+ /* GCP_COMMIT HAVE BEEN RECEIVED BUT NOT YET*/
+ /* GCP_TCFINISHED FROM LOCAL TC. */
+ /*------------------------------------------------*/
+ gcpState = MasterGCPConf::GCP_COMMIT_RECEIVED;
+ break;
+ case GCP_PARTICIPANT_TC_FINISHED:
+ jam();
+ /*------------------------------------------------*/
+ /* GCP_COMMIT HAS BEEN RECEIVED AND ALSO */
+ /* GCP_TCFINISHED HAVE BEEN RECEIVED. */
+ /*------------------------------------------------*/
+ gcpState = MasterGCPConf::GCP_TC_FINISHED;
+ break;
+ case GCP_PARTICIPANT_COPY_GCI_RECEIVED:
+ /*--------------------------------------------------*/
+ /* COPY RESTART INFORMATION HAS BEEN RECEIVED */
+ /* BUT NOT YET COMPLETED. */
+ /*--------------------------------------------------*/
+ ndbrequire(false);
+ gcpState= MasterGCPConf::GCP_READY; // remove warning
+ break;
+ default:
+ /*------------------------------------------------*/
+ /* */
+ /* THIS SHOULD NOT OCCUR SINCE THE ABOVE */
+ /* STATES ARE THE ONLY POSSIBLE STATES AT A */
+ /* NODE WHICH WAS NOT A MASTER NODE. */
+ /*------------------------------------------------*/
+ ndbrequire(false);
+ gcpState= MasterGCPConf::GCP_READY; // remove warning
+ break;
+ }//switch
+ MasterGCPConf * const masterGCPConf = (MasterGCPConf *)&signal->theData[0];
+ masterGCPConf->gcpState = gcpState;
+ masterGCPConf->senderNodeId = cownNodeId;
+ masterGCPConf->failedNodeId = failedNodeId;
+ masterGCPConf->newGCP = cnewgcp;
+ masterGCPConf->latestLCP = SYSFILE->latestLCP_ID;
+ masterGCPConf->oldestRestorableGCI = SYSFILE->oldestRestorableGCI;
+ masterGCPConf->keepGCI = SYSFILE->keepGCI;
+ for(Uint32 i = 0; i < NdbNodeBitmask::Size; i++)
+ masterGCPConf->lcpActive[i] = SYSFILE->lcpActive[i];
+ sendSignal(newMasterBlockref, GSN_MASTER_GCPCONF, signal,
+ MasterGCPConf::SignalLength, JBB);
+}//Dbdih::execMASTER_GCPREQ()
+
+void Dbdih::execMASTER_GCPCONF(Signal* signal)
+{
+ NodeRecordPtr senderNodePtr;
+ MasterGCPConf * const masterGCPConf = (MasterGCPConf *)&signal->theData[0];
+ jamEntry();
+ senderNodePtr.i = masterGCPConf->senderNodeId;
+ ptrCheckGuard(senderNodePtr, MAX_NDB_NODES, nodeRecord);
+
+ MasterGCPConf::State gcpState = (MasterGCPConf::State)masterGCPConf->gcpState;
+ const Uint32 failedNodeId = masterGCPConf->failedNodeId;
+ const Uint32 newGcp = masterGCPConf->newGCP;
+ const Uint32 latestLcpId = masterGCPConf->latestLCP;
+ const Uint32 oldestRestorableGci = masterGCPConf->oldestRestorableGCI;
+ const Uint32 oldestKeepGci = masterGCPConf->keepGCI;
+ if (latestLcpId > SYSFILE->latestLCP_ID) {
+ jam();
+#if 0
+ ndbout_c("Dbdih: Setting SYSFILE->latestLCP_ID to %d", latestLcpId);
+ SYSFILE->latestLCP_ID = latestLcpId;
+#endif
+ SYSFILE->keepGCI = oldestKeepGci;
+ SYSFILE->oldestRestorableGCI = oldestRestorableGci;
+ for(Uint32 i = 0; i < NdbNodeBitmask::Size; i++)
+ SYSFILE->lcpActive[i] = masterGCPConf->lcpActive[i];
+ }//if
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ jam();
+ senderNodePtr.p->gcpstate = NodeRecord::READY;
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ senderNodePtr.p->gcpstate = NodeRecord::PREPARE_RECEIVED;
+ cnewgcp = newGcp;
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ jam();
+ senderNodePtr.p->gcpstate = NodeRecord::COMMIT_SENT;
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ senderNodePtr.p->gcpstate = NodeRecord::NODE_FINISHED;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ switch (cgcpMasterTakeOverState) {
+ case GMTOS_INITIAL:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ jam();
+ cgcpMasterTakeOverState = ALL_READY;
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ cgcpMasterTakeOverState = ALL_PREPARED;
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ jam();
+ cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ cgcpMasterTakeOverState = COMMIT_COMPLETED;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ case ALL_READY:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ jam();
+ /*empty*/;
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ cgcpMasterTakeOverState = PREPARE_STARTED_NOT_COMMITTED;
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ ndbrequire(false);
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ cgcpMasterTakeOverState = SAVE_STARTED_NOT_COMPLETED;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ case PREPARE_STARTED_NOT_COMMITTED:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ jam();
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ ndbrequire(false);
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ ndbrequire(false);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ case ALL_PREPARED:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ jam();
+ cgcpMasterTakeOverState = PREPARE_STARTED_NOT_COMMITTED;
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ jam();
+ cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ case COMMIT_STARTED_NOT_COMPLETED:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ ndbrequire(false);
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ jam();
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ case COMMIT_COMPLETED:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ cgcpMasterTakeOverState = SAVE_STARTED_NOT_COMPLETED;
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ jam();
+ cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ case SAVE_STARTED_NOT_COMPLETED:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ jam();
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ ndbrequire(false);
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ ndbrequire(false);
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ receiveLoopMacro(MASTER_GCPREQ, senderNodePtr.i);
+ /*-------------------------------------------------------------------------*/
+ // We have now received all responses and are ready to take over the GCP
+ // protocol as master.
+ /*-------------------------------------------------------------------------*/
+ MASTER_GCPhandling(signal, failedNodeId);
+ return;
+}//Dbdih::execMASTER_GCPCONF()
+
+void Dbdih::execMASTER_GCPREF(Signal* signal)
+{
+ const MasterGCPRef * const ref = (MasterGCPRef *)&signal->theData[0];
+ jamEntry();
+ receiveLoopMacro(MASTER_GCPREQ, ref->senderNodeId);
+ /*-------------------------------------------------------------------------*/
+ // We have now received all responses and are ready to take over the GCP
+ // protocol as master.
+ /*-------------------------------------------------------------------------*/
+ MASTER_GCPhandling(signal, ref->failedNodeId);
+}//Dbdih::execMASTER_GCPREF()
+
+void Dbdih::MASTER_GCPhandling(Signal* signal, Uint32 failedNodeId)
+{
+ NodeRecordPtr failedNodePtr;
+ cmasterState = MASTER_ACTIVE;
+ /*----------------------------------------------------------*/
+ /* REMOVE ALL ACTIVE STATUS ON ALREADY FAILED NODES */
+ /* THIS IS PERFORMED HERE SINCE WE GET THE LCP ACTIVE */
+ /* STATUS AS PART OF THE COPY RESTART INFO AND THIS IS*/
+ /* HANDLED BY THE MASTER GCP TAKE OVER PROTOCOL. */
+ /*----------------------------------------------------------*/
+
+ failedNodePtr.i = failedNodeId;
+ ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
+ switch (cgcpMasterTakeOverState) {
+ case ALL_READY:
+ jam();
+ startGcp(signal);
+ break;
+ case PREPARE_STARTED_NOT_COMMITTED:
+ {
+ NodeRecordPtr nodePtr;
+ jam();
+ c_GCP_PREPARE_Counter.clearWaitingFor();
+ nodePtr.i = cfirstAliveNode;
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->gcpstate == NodeRecord::READY) {
+ jam();
+ c_GCP_PREPARE_Counter.setWaitingFor(nodePtr.i);
+ sendGCP_PREPARE(signal, nodePtr.i);
+ }//if
+ nodePtr.i = nodePtr.p->nextNode;
+ } while(nodePtr.i != RNIL);
+ if (c_GCP_PREPARE_Counter.done()) {
+ jam();
+ gcpcommitreqLab(signal);
+ }//if
+ break;
+ }
+ case ALL_PREPARED:
+ jam();
+ gcpcommitreqLab(signal);
+ break;
+ case COMMIT_STARTED_NOT_COMPLETED:
+ {
+ NodeRecordPtr nodePtr;
+ jam();
+ c_GCP_COMMIT_Counter.clearWaitingFor();
+ nodePtr.i = cfirstAliveNode;
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->gcpstate == NodeRecord::PREPARE_RECEIVED) {
+ jam();
+ sendGCP_COMMIT(signal, nodePtr.i);
+ c_GCP_COMMIT_Counter.setWaitingFor(nodePtr.i);
+ } else {
+ ndbrequire((nodePtr.p->gcpstate == NodeRecord::NODE_FINISHED) ||
+ (nodePtr.p->gcpstate == NodeRecord::COMMIT_SENT));
+ }//if
+ nodePtr.i = nodePtr.p->nextNode;
+ } while(nodePtr.i != RNIL);
+ if (c_GCP_COMMIT_Counter.done()){
+ jam();
+ gcpsavereqLab(signal);
+ }//if
+ break;
+ }
+ case COMMIT_COMPLETED:
+ jam();
+ gcpsavereqLab(signal);
+ break;
+ case SAVE_STARTED_NOT_COMPLETED:
+ {
+ NodeRecordPtr nodePtr;
+ jam();
+ SYSFILE->newestRestorableGCI = coldgcp;
+ nodePtr.i = cfirstAliveNode;
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ SYSFILE->lastCompletedGCI[nodePtr.i] = coldgcp;
+ nodePtr.i = nodePtr.p->nextNode;
+ } while (nodePtr.i != RNIL);
+ /**-------------------------------------------------------------------
+ * THE FAILED NODE DID ALSO PARTICIPATE IN THIS GLOBAL CHECKPOINT
+ * WHICH IS RECORDED.
+ *-------------------------------------------------------------------*/
+ SYSFILE->lastCompletedGCI[failedNodeId] = coldgcp;
+ copyGciLab(signal, CopyGCIReq::GLOBAL_CHECKPOINT);
+ break;
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+
+ signal->theData[0] = NDB_LE_GCP_TakeoverCompleted;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
+
+ /*--------------------------------------------------*/
+ /* WE SEPARATE HANDLING OF GLOBAL CHECKPOINTS */
+ /* AND LOCAL CHECKPOINTS HERE. LCP'S HAVE TO */
+ /* REMOVE ALL FAILED FRAGMENTS BEFORE WE CAN */
+ /* HANDLE THE LCP PROTOCOL. */
+ /*--------------------------------------------------*/
+ checkLocalNodefailComplete(signal, failedNodeId, NF_GCP_TAKE_OVER);
+
+ return;
+}//Dbdih::masterGcpConfFromFailedLab()
+
+void
+Dbdih::invalidateNodeLCP(Signal* signal, Uint32 nodeId, Uint32 tableId)
+{
+ jamEntry();
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ const Uint32 RT_BREAK = 64;
+ if (ERROR_INSERTED(7125)) {
+ return;
+ }//if
+ for (Uint32 i = 0; i<RT_BREAK; i++) {
+ jam();
+ if (tabPtr.i >= ctabFileSize){
+ jam();
+ /**
+ * Ready with entire loop
+ * Return to master
+ */
+ setAllowNodeStart(nodeId, true);
+ if (getNodeStatus(nodeId) == NodeRecord::STARTING) {
+ jam();
+ StartInfoConf * conf = (StartInfoConf*)&signal->theData[0];
+ conf->sendingNodeId = cownNodeId;
+ conf->startingNodeId = nodeId;
+ sendSignal(cmasterdihref, GSN_START_INFOCONF, signal,
+ StartInfoConf::SignalLength, JBB);
+ }//if
+ return;
+ }//if
+ ptrAss(tabPtr, tabRecord);
+ if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) {
+ jam();
+ invalidateNodeLCP(signal, nodeId, tabPtr);
+ return;
+ }//if
+ tabPtr.i++;
+ }//for
+ signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+}//Dbdih::invalidateNodeLCP()
+
+void
+Dbdih::invalidateNodeLCP(Signal* signal, Uint32 nodeId, TabRecordPtr tabPtr)
+{
+ /**
+ * Check so that no one else is using the tab descriptior
+ */
+ if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
+ jam();
+ signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 20, 3);
+ return;
+ }//if
+
+ /**
+ * For each fragment
+ */
+ bool modified = false;
+ FragmentstorePtr fragPtr;
+ for(Uint32 fragNo = 0; fragNo < tabPtr.p->totalfragments; fragNo++){
+ jam();
+ getFragstore(tabPtr.p, fragNo, fragPtr);
+ /**
+ * For each of replica record
+ */
+ ReplicaRecordPtr replicaPtr;
+ for(replicaPtr.i = fragPtr.p->oldStoredReplicas; replicaPtr.i != RNIL;
+ replicaPtr.i = replicaPtr.p->nextReplica) {
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if(replicaPtr.p->procNode == nodeId){
+ jam();
+ /**
+ * Found one with correct node id
+ */
+ /**
+ * Invalidate all LCP's
+ */
+ modified = true;
+ for(int i = 0; i < MAX_LCP_STORED; i++) {
+ replicaPtr.p->lcpStatus[i] = ZINVALID;
+ }//if
+ /**
+ * And reset nextLcp
+ */
+ replicaPtr.p->nextLcp = 0;
+ replicaPtr.p->noCrashedReplicas = 0;
+ }//if
+ }//for
+ }//for
+
+ if (modified) {
+ jam();
+ /**
+ * Save table description to disk
+ */
+ tabPtr.p->tabCopyStatus = TabRecord::CS_INVALIDATE_NODE_LCP;
+ tabPtr.p->tabUpdateState = TabRecord::US_INVALIDATE_NODE_LCP;
+ tabPtr.p->tabRemoveNode = nodeId;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }
+
+ jam();
+ /**
+ * Move to next table
+ */
+ tabPtr.i++;
+ signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+}//Dbdih::invalidateNodeLCP()
+
+/*------------------------------------------------*/
+/* INPUT: TABPTR */
+/* TNODEID */
+/*------------------------------------------------*/
+void Dbdih::removeNodeFromTables(Signal* signal,
+ Uint32 nodeId, Uint32 tableId)
+{
+ jamEntry();
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ const Uint32 RT_BREAK = 64;
+ for (Uint32 i = 0; i<RT_BREAK; i++) {
+ jam();
+ if (tabPtr.i >= ctabFileSize){
+ jam();
+ removeNodeFromTablesComplete(signal, nodeId);
+ return;
+ }//if
+
+ ptrAss(tabPtr, tabRecord);
+ if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) {
+ jam();
+ removeNodeFromTable(signal, nodeId, tabPtr);
+ return;
+ }//if
+ tabPtr.i++;
+ }//for
+ signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+}
+
+void Dbdih::removeNodeFromTable(Signal* signal,
+ Uint32 nodeId, TabRecordPtr tabPtr){
+
+ /**
+ * Check so that no one else is using the tab descriptior
+ */
+ if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
+ jam();
+ signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 20, 3);
+ return;
+ }//if
+
+ /**
+ * For each fragment
+ */
+ Uint32 noOfRemovedReplicas = 0; // No of replicas removed
+ Uint32 noOfRemovedLcpReplicas = 0; // No of replicas in LCP removed
+ Uint32 noOfRemainingLcpReplicas = 0;// No of replicas in LCP remaining
+
+ //const Uint32 lcpId = SYSFILE->latestLCP_ID;
+ const bool lcpOngoingFlag = (tabPtr.p->tabLcpStatus== TabRecord::TLS_ACTIVE);
+
+ FragmentstorePtr fragPtr;
+ for(Uint32 fragNo = 0; fragNo < tabPtr.p->totalfragments; fragNo++){
+ jam();
+ getFragstore(tabPtr.p, fragNo, fragPtr);
+
+ /**
+ * For each of replica record
+ */
+ Uint32 replicaNo = 0;
+ ReplicaRecordPtr replicaPtr;
+ for(replicaPtr.i = fragPtr.p->storedReplicas; replicaPtr.i != RNIL;
+ replicaPtr.i = replicaPtr.p->nextReplica, replicaNo++) {
+ jam();
+
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if(replicaPtr.p->procNode == nodeId){
+ jam();
+ noOfRemovedReplicas++;
+ removeNodeFromStored(nodeId, fragPtr, replicaPtr);
+ if(replicaPtr.p->lcpOngoingFlag){
+ jam();
+ /**
+ * This replica is currently LCP:ed
+ */
+ ndbrequire(fragPtr.p->noLcpReplicas > 0);
+ fragPtr.p->noLcpReplicas --;
+
+ noOfRemovedLcpReplicas ++;
+ replicaPtr.p->lcpOngoingFlag = false;
+ }
+ }
+ }
+ noOfRemainingLcpReplicas += fragPtr.p->noLcpReplicas;
+ }
+
+ if(noOfRemovedReplicas == 0){
+ jam();
+ /**
+ * The table had no replica on the failed node
+ * continue with next table
+ */
+ tabPtr.i++;
+ signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }
+
+ /**
+ * We did remove at least one replica
+ */
+ bool ok = false;
+ switch(tabPtr.p->tabLcpStatus){
+ case TabRecord::TLS_COMPLETED:
+ ok = true;
+ jam();
+ /**
+ * WE WILL WRITE THE TABLE DESCRIPTION TO DISK AT THIS TIME
+ * INDEPENDENT OF WHAT THE LOCAL CHECKPOINT NEEDED.
+ * THIS IS TO ENSURE THAT THE FAILED NODES ARE ALSO UPDATED ON DISK
+ * IN THE DIH DATA STRUCTURES BEFORE WE COMPLETE HANDLING OF THE
+ * NODE FAILURE.
+ */
+ ndbrequire(noOfRemovedLcpReplicas == 0);
+
+ tabPtr.p->tabCopyStatus = TabRecord::CS_REMOVE_NODE;
+ tabPtr.p->tabUpdateState = TabRecord::US_REMOVE_NODE;
+ tabPtr.p->tabRemoveNode = nodeId;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::TLS_ACTIVE:
+ ok = true;
+ jam();
+ /**
+ * The table is participating in an LCP currently
+ */
+ // Fall through
+ break;
+ case TabRecord::TLS_WRITING_TO_FILE:
+ ok = true;
+ jam();
+ /**
+ * This should never happen since we in the beginning of this function
+ * checks the tabCopyStatus
+ */
+ ndbrequire(lcpOngoingFlag);
+ ndbrequire(false);
+ break;
+ }
+ ndbrequire(ok);
+
+ /**
+ * The table is participating in an LCP currently
+ * and we removed some replicas that should have been checkpointed
+ */
+ ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE);
+ ndbrequire(tabPtr.p->tabLcpStatus == TabRecord::TLS_ACTIVE);
+
+ /**
+ * Save the table
+ */
+ tabPtr.p->tabCopyStatus = TabRecord::CS_REMOVE_NODE;
+ tabPtr.p->tabUpdateState = TabRecord::US_REMOVE_NODE;
+ tabPtr.p->tabRemoveNode = nodeId;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+
+ if(noOfRemainingLcpReplicas == 0){
+ jam();
+ /**
+ * The removal on the failed node made the LCP complete
+ */
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_WRITING_TO_FILE;
+ checkLcpAllTablesDoneInLqh();
+ }
+}
+
+void
+Dbdih::removeNodeFromTablesComplete(Signal* signal, Uint32 nodeId){
+ jam();
+
+ /**
+ * Check if we "accidently" completed a LCP
+ */
+ checkLcpCompletedLab(signal);
+
+ /**
+ * Check if we (DIH) are finished with node fail handling
+ */
+ checkLocalNodefailComplete(signal, nodeId, NF_REMOVE_NODE_FROM_TABLE);
+}
+
+void
+Dbdih::checkLocalNodefailComplete(Signal* signal, Uint32 failedNodeId,
+ NodefailHandlingStep step){
+ jam();
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = failedNodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ ndbrequire(nodePtr.p->m_nodefailSteps.get(step));
+ nodePtr.p->m_nodefailSteps.clear(step);
+
+ if(nodePtr.p->m_nodefailSteps.count() > 0){
+ jam();
+ return;
+ }
+
+ NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0];
+ nf->blockNo = DBDIH;
+ nf->nodeId = cownNodeId;
+ nf->failedNodeId = failedNodeId;
+ nf->from = __LINE__;
+ sendSignal(reference(), GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+}
+
+
+void
+Dbdih::setLocalNodefailHandling(Signal* signal, Uint32 failedNodeId,
+ NodefailHandlingStep step){
+ jam();
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = failedNodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ ndbrequire(!nodePtr.p->m_nodefailSteps.get(step));
+ nodePtr.p->m_nodefailSteps.set(step);
+}
+
+void Dbdih::startLcpTakeOverLab(Signal* signal, Uint32 failedNodeId)
+{
+ /*--------------------------------------------------------------------*/
+ // Start LCP master take over process. Consists of the following steps.
+ // 1) Ensure that all LQH's have reported all fragments they have been
+ // told to checkpoint. Can be a fairly long step time-wise.
+ // 2) Query all nodes about their LCP status.
+ // During the query process we do not want our own state to change.
+ // This can change due to delayed reception of LCP_REPORT, completed
+ // save of table on disk or reception of DIH_LCPCOMPLETE from other
+ // node.
+ /*--------------------------------------------------------------------*/
+}//Dbdih::startLcpTakeOver()
+
+void Dbdih::execEMPTY_LCP_CONF(Signal* signal)
+{
+ jamEntry();
+
+ ndbrequire(c_lcpMasterTakeOverState.state == LMTOS_WAIT_EMPTY_LCP);
+
+ const EmptyLcpConf * const conf = (EmptyLcpConf *)&signal->theData[0];
+ Uint32 nodeId = conf->senderNodeId;
+
+ if(!conf->idle){
+ jam();
+ if (conf->tableId < c_lcpMasterTakeOverState.minTableId) {
+ jam();
+ c_lcpMasterTakeOverState.minTableId = conf->tableId;
+ c_lcpMasterTakeOverState.minFragId = conf->fragmentId;
+ } else if (conf->tableId == c_lcpMasterTakeOverState.minTableId &&
+ conf->fragmentId < c_lcpMasterTakeOverState.minFragId) {
+ jam();
+ c_lcpMasterTakeOverState.minFragId = conf->fragmentId;
+ }//if
+ if(isMaster()){
+ jam();
+ c_lcpState.m_LAST_LCP_FRAG_ORD.setWaitingFor(nodeId);
+ }
+ }
+
+ receiveLoopMacro(EMPTY_LCP_REQ, nodeId);
+ /*--------------------------------------------------------------------*/
+ // Received all EMPTY_LCPCONF. We can continue with next phase of the
+ // take over LCP master process.
+ /*--------------------------------------------------------------------*/
+ c_lcpMasterTakeOverState.set(LMTOS_WAIT_LCP_FRAG_REP, __LINE__);
+ checkEmptyLcpComplete(signal);
+ return;
+}//Dbdih::execEMPTY_LCPCONF()
+
+void
+Dbdih::checkEmptyLcpComplete(Signal *signal){
+
+ ndbrequire(c_lcpMasterTakeOverState.state == LMTOS_WAIT_LCP_FRAG_REP);
+
+ if(c_lcpState.noOfLcpFragRepOutstanding > 0){
+ jam();
+ return;
+ }
+
+ if(isMaster()){
+ jam();
+
+ signal->theData[0] = NDB_LE_LCP_TakeoverStarted;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
+
+ signal->theData[0] = 7012;
+ execDUMP_STATE_ORD(signal);
+
+ c_lcpMasterTakeOverState.set(LMTOS_INITIAL, __LINE__);
+ MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0];
+ req->masterRef = reference();
+ req->failedNodeId = c_lcpMasterTakeOverState.failedNodeId;
+ sendLoopMacro(MASTER_LCPREQ, sendMASTER_LCPREQ);
+ } else {
+ sendMASTER_LCPCONF(signal);
+ }
+}
+
+/*--------------------------------------------------*/
+/* THE MASTER HAS FAILED AND THE NEW MASTER IS*/
+/* QUERYING THIS NODE ABOUT THE STATE OF THE */
+/* LOCAL CHECKPOINT PROTOCOL. */
+/*--------------------------------------------------*/
+void Dbdih::execMASTER_LCPREQ(Signal* signal)
+{
+ const MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0];
+ jamEntry();
+ const BlockReference newMasterBlockref = req->masterRef;
+
+ Uint32 failedNodeId = req->failedNodeId;
+
+ /**
+ * There can be no take over with the same master
+ */
+ ndbrequire(c_lcpState.m_masterLcpDihRef != newMasterBlockref);
+ c_lcpState.m_masterLcpDihRef = newMasterBlockref;
+ c_lcpState.m_MASTER_LCPREQ_Received = true;
+ c_lcpState.m_MASTER_LCPREQ_FailedNodeId = failedNodeId;
+
+ if(newMasterBlockref != cmasterdihref){
+ jam();
+ ndbrequire(0);
+ }
+
+ sendMASTER_LCPCONF(signal);
+}//Dbdih::execMASTER_LCPREQ()
+
+void
+Dbdih::sendMASTER_LCPCONF(Signal * signal){
+
+ if(!c_EMPTY_LCP_REQ_Counter.done()){
+ /**
+ * Have not received all EMPTY_LCP_REP
+ * dare not answer MASTER_LCP_CONF yet
+ */
+ jam();
+ return;
+ }
+
+ if(!c_lcpState.m_MASTER_LCPREQ_Received){
+ jam();
+ /**
+ * Has not received MASTER_LCPREQ yet
+ */
+ return;
+ }
+
+ if(c_lcpState.lcpStatus == LCP_INIT_TABLES){
+ jam();
+ /**
+ * Still aborting old initLcpLab
+ */
+ return;
+ }
+
+ if(c_lcpState.lcpStatus == LCP_COPY_GCI){
+ jam();
+ /**
+ * Restart it
+ */
+ //Uint32 lcpId = SYSFILE->latestLCP_ID;
+ SYSFILE->latestLCP_ID--;
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+#if 0
+ if(c_copyGCISlave.m_copyReason == CopyGCIReq::LOCAL_CHECKPOINT){
+ ndbout_c("Dbdih: Also resetting c_copyGCISlave");
+ c_copyGCISlave.m_copyReason = CopyGCIReq::IDLE;
+ c_copyGCISlave.m_expectedNextWord = 0;
+ }
+#endif
+ }
+
+ MasterLCPConf::State lcpState;
+ switch (c_lcpState.lcpStatus) {
+ case LCP_STATUS_IDLE:
+ jam();
+ /*------------------------------------------------*/
+ /* LOCAL CHECKPOINT IS CURRENTLY NOT ACTIVE */
+ /* SINCE NO COPY OF RESTART INFORMATION HAVE*/
+ /* BEEN RECEIVED YET. ALSO THE PREVIOUS */
+ /* CHECKPOINT HAVE BEEN FULLY COMPLETED. */
+ /*------------------------------------------------*/
+ lcpState = MasterLCPConf::LCP_STATUS_IDLE;
+ break;
+ case LCP_STATUS_ACTIVE:
+ jam();
+ /*--------------------------------------------------*/
+ /* COPY OF RESTART INFORMATION HAS BEEN */
+ /* PERFORMED AND ALSO RESPONSE HAVE BEEN SENT.*/
+ /*--------------------------------------------------*/
+ lcpState = MasterLCPConf::LCP_STATUS_ACTIVE;
+ break;
+ case LCP_TAB_COMPLETED:
+ jam();
+ /*--------------------------------------------------------*/
+ /* ALL LCP_REPORT'S HAVE BEEN COMPLETED FOR */
+ /* ALL TABLES. SAVE OF AT LEAST ONE TABLE IS */
+ /* ONGOING YET. */
+ /*--------------------------------------------------------*/
+ lcpState = MasterLCPConf::LCP_TAB_COMPLETED;
+ break;
+ case LCP_TAB_SAVED:
+ jam();
+ /*--------------------------------------------------------*/
+ /* ALL LCP_REPORT'S HAVE BEEN COMPLETED FOR */
+ /* ALL TABLES. ALL TABLES HAVE ALSO BEEN SAVED */
+ /* ALL OTHER NODES ARE NOT YET FINISHED WITH */
+ /* THE LOCAL CHECKPOINT. */
+ /*--------------------------------------------------------*/
+ lcpState = MasterLCPConf::LCP_TAB_SAVED;
+ break;
+ case LCP_TCGET:
+ case LCP_CALCULATE_KEEP_GCI:
+ case LCP_TC_CLOPSIZE:
+ case LCP_START_LCP_ROUND:
+ /**
+ * These should only exists on the master
+ * but since this is master take over
+ * it not allowed
+ */
+ ndbrequire(false);
+ lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning
+ break;
+ case LCP_COPY_GCI:
+ case LCP_INIT_TABLES:
+ /**
+ * These two states are handled by if statements above
+ */
+ ndbrequire(false);
+ lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+
+ Uint32 failedNodeId = c_lcpState.m_MASTER_LCPREQ_FailedNodeId;
+ MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0];
+ conf->senderNodeId = cownNodeId;
+ conf->lcpState = lcpState;
+ conf->failedNodeId = failedNodeId;
+ sendSignal(c_lcpState.m_masterLcpDihRef, GSN_MASTER_LCPCONF,
+ signal, MasterLCPConf::SignalLength, JBB);
+
+ // Answer to MASTER_LCPREQ sent, reset flag so
+ // that it's not sent again before another request comes in
+ c_lcpState.m_MASTER_LCPREQ_Received = false;
+
+ if(c_lcpState.lcpStatus == LCP_TAB_SAVED){
+#ifdef VM_TRACE
+ ndbout_c("Sending extra GSN_LCP_COMPLETE_REP to new master");
+#endif
+ sendLCP_COMPLETE_REP(signal);
+ }
+
+ if(!isMaster()){
+ c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__);
+ checkLocalNodefailComplete(signal, failedNodeId, NF_LCP_TAKE_OVER);
+ }
+
+ return;
+}
+
+NdbOut&
+operator<<(NdbOut& out, const Dbdih::LcpMasterTakeOverState state){
+ switch(state){
+ case Dbdih::LMTOS_IDLE:
+ out << "LMTOS_IDLE";
+ break;
+ case Dbdih::LMTOS_WAIT_EMPTY_LCP:
+ out << "LMTOS_WAIT_EMPTY_LCP";
+ break;
+ case Dbdih::LMTOS_WAIT_LCP_FRAG_REP:
+ out << "LMTOS_WAIT_EMPTY_LCP";
+ break;
+ case Dbdih::LMTOS_INITIAL:
+ out << "LMTOS_INITIAL";
+ break;
+ case Dbdih::LMTOS_ALL_IDLE:
+ out << "LMTOS_ALL_IDLE";
+ break;
+ case Dbdih::LMTOS_ALL_ACTIVE:
+ out << "LMTOS_ALL_ACTIVE";
+ break;
+ case Dbdih::LMTOS_LCP_CONCLUDING:
+ out << "LMTOS_LCP_CONCLUDING";
+ break;
+ case Dbdih::LMTOS_COPY_ONGOING:
+ out << "LMTOS_COPY_ONGOING";
+ break;
+ }
+ return out;
+}
+
+struct MASTERLCP_StateTransitions {
+ Dbdih::LcpMasterTakeOverState CurrentState;
+ MasterLCPConf::State ParticipantState;
+ Dbdih::LcpMasterTakeOverState NewState;
+};
+
+static const
+MASTERLCP_StateTransitions g_masterLCPTakeoverStateTransitions[] = {
+ /**
+ * Current = LMTOS_INITIAL
+ */
+ { Dbdih::LMTOS_INITIAL,
+ MasterLCPConf::LCP_STATUS_IDLE,
+ Dbdih::LMTOS_ALL_IDLE },
+
+ { Dbdih::LMTOS_INITIAL,
+ MasterLCPConf::LCP_STATUS_ACTIVE,
+ Dbdih::LMTOS_ALL_ACTIVE },
+
+ { Dbdih::LMTOS_INITIAL,
+ MasterLCPConf::LCP_TAB_COMPLETED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ { Dbdih::LMTOS_INITIAL,
+ MasterLCPConf::LCP_TAB_SAVED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ /**
+ * Current = LMTOS_ALL_IDLE
+ */
+ { Dbdih::LMTOS_ALL_IDLE,
+ MasterLCPConf::LCP_STATUS_IDLE,
+ Dbdih::LMTOS_ALL_IDLE },
+
+ { Dbdih::LMTOS_ALL_IDLE,
+ MasterLCPConf::LCP_STATUS_ACTIVE,
+ Dbdih::LMTOS_COPY_ONGOING },
+
+ { Dbdih::LMTOS_ALL_IDLE,
+ MasterLCPConf::LCP_TAB_COMPLETED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ { Dbdih::LMTOS_ALL_IDLE,
+ MasterLCPConf::LCP_TAB_SAVED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ /**
+ * Current = LMTOS_COPY_ONGOING
+ */
+ { Dbdih::LMTOS_COPY_ONGOING,
+ MasterLCPConf::LCP_STATUS_IDLE,
+ Dbdih::LMTOS_COPY_ONGOING },
+
+ { Dbdih::LMTOS_COPY_ONGOING,
+ MasterLCPConf::LCP_STATUS_ACTIVE,
+ Dbdih::LMTOS_COPY_ONGOING },
+
+ /**
+ * Current = LMTOS_ALL_ACTIVE
+ */
+ { Dbdih::LMTOS_ALL_ACTIVE,
+ MasterLCPConf::LCP_STATUS_IDLE,
+ Dbdih::LMTOS_COPY_ONGOING },
+
+ { Dbdih::LMTOS_ALL_ACTIVE,
+ MasterLCPConf::LCP_STATUS_ACTIVE,
+ Dbdih::LMTOS_ALL_ACTIVE },
+
+ { Dbdih::LMTOS_ALL_ACTIVE,
+ MasterLCPConf::LCP_TAB_COMPLETED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ { Dbdih::LMTOS_ALL_ACTIVE,
+ MasterLCPConf::LCP_TAB_SAVED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ /**
+ * Current = LMTOS_LCP_CONCLUDING
+ */
+ { Dbdih::LMTOS_LCP_CONCLUDING,
+ MasterLCPConf::LCP_STATUS_IDLE,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ { Dbdih::LMTOS_LCP_CONCLUDING,
+ MasterLCPConf::LCP_STATUS_ACTIVE,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ { Dbdih::LMTOS_LCP_CONCLUDING,
+ MasterLCPConf::LCP_TAB_COMPLETED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ { Dbdih::LMTOS_LCP_CONCLUDING,
+ MasterLCPConf::LCP_TAB_SAVED,
+ Dbdih::LMTOS_LCP_CONCLUDING }
+};
+
+const Uint32 g_masterLCPTakeoverStateTransitionsRows =
+sizeof(g_masterLCPTakeoverStateTransitions) / sizeof(struct MASTERLCP_StateTransitions);
+
+void Dbdih::execMASTER_LCPCONF(Signal* signal)
+{
+ const MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0];
+ jamEntry();
+ Uint32 senderNodeId = conf->senderNodeId;
+ MasterLCPConf::State lcpState = (MasterLCPConf::State)conf->lcpState;
+ const Uint32 failedNodeId = conf->failedNodeId;
+ NodeRecordPtr nodePtr;
+ nodePtr.i = senderNodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->lcpStateAtTakeOver = lcpState;
+
+#ifdef VM_TRACE
+ ndbout_c("MASTER_LCPCONF");
+ printMASTER_LCP_CONF(stdout, &signal->theData[0], 0, 0);
+#endif
+
+ bool found = false;
+ for(Uint32 i = 0; i<g_masterLCPTakeoverStateTransitionsRows; i++){
+ const struct MASTERLCP_StateTransitions * valid =
+ &g_masterLCPTakeoverStateTransitions[i];
+
+ if(valid->CurrentState == c_lcpMasterTakeOverState.state &&
+ valid->ParticipantState == lcpState){
+ jam();
+ found = true;
+ c_lcpMasterTakeOverState.set(valid->NewState, __LINE__);
+ break;
+ }
+ }
+ ndbrequire(found);
+
+ bool ok = false;
+ switch(lcpState){
+ case MasterLCPConf::LCP_STATUS_IDLE:
+ ok = true;
+ break;
+ case MasterLCPConf::LCP_STATUS_ACTIVE:
+ case MasterLCPConf::LCP_TAB_COMPLETED:
+ case MasterLCPConf::LCP_TAB_SAVED:
+ ok = true;
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.setWaitingFor(nodePtr.i);
+ break;
+ }
+ ndbrequire(ok);
+
+ receiveLoopMacro(MASTER_LCPREQ, senderNodeId);
+ /*-------------------------------------------------------------------------*/
+ // We have now received all responses and are ready to take over the LCP
+ // protocol as master.
+ /*-------------------------------------------------------------------------*/
+ MASTER_LCPhandling(signal, failedNodeId);
+}//Dbdih::execMASTER_LCPCONF()
+
+void Dbdih::execMASTER_LCPREF(Signal* signal)
+{
+ const MasterLCPRef * const ref = (MasterLCPRef *)&signal->theData[0];
+ jamEntry();
+ receiveLoopMacro(MASTER_LCPREQ, ref->senderNodeId);
+ /*-------------------------------------------------------------------------*/
+ // We have now received all responses and are ready to take over the LCP
+ // protocol as master.
+ /*-------------------------------------------------------------------------*/
+ MASTER_LCPhandling(signal, ref->failedNodeId);
+}//Dbdih::execMASTER_LCPREF()
+
+void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
+{
+ /*-------------------------------------------------------------------------
+ *
+ * WE ARE NOW READY TO CONCLUDE THE TAKE OVER AS MASTER.
+ * WE HAVE ENOUGH INFO TO START UP ACTIVITIES IN THE PROPER PLACE.
+ * ALSO SET THE PROPER STATE VARIABLES.
+ *------------------------------------------------------------------------*/
+ c_lcpState.currentFragment.tableId = c_lcpMasterTakeOverState.minTableId;
+ c_lcpState.currentFragment.fragmentId = c_lcpMasterTakeOverState.minFragId;
+ c_lcpState.m_LAST_LCP_FRAG_ORD = c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH;
+
+ NodeRecordPtr failedNodePtr;
+ failedNodePtr.i = failedNodeId;
+ ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
+
+ switch (c_lcpMasterTakeOverState.state) {
+ case LMTOS_ALL_IDLE:
+ jam();
+ /* --------------------------------------------------------------------- */
+ // All nodes were idle in the LCP protocol. Start checking for start of LCP
+ // protocol.
+ /* --------------------------------------------------------------------- */
+#ifdef VM_TRACE
+ ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_IDLE -> checkLcpStart");
+#endif
+ checkLcpStart(signal, __LINE__);
+ break;
+ case LMTOS_COPY_ONGOING:
+ jam();
+ /* --------------------------------------------------------------------- */
+ // We were in the starting process of the LCP protocol. We will restart the
+ // protocol by calculating the keep gci and storing the new lcp id.
+ /* --------------------------------------------------------------------- */
+#ifdef VM_TRACE
+ ndbout_c("MASTER_LCPhandling:: LMTOS_COPY_ONGOING -> storeNewLcpId");
+#endif
+ if (c_lcpState.lcpStatus == LCP_STATUS_ACTIVE) {
+ jam();
+ /*---------------------------------------------------------------------*/
+ /* WE NEED TO DECREASE THE LATEST LCP ID SINCE WE HAVE ALREADY */
+ /* STARTED THIS */
+ /* LOCAL CHECKPOINT. */
+ /*---------------------------------------------------------------------*/
+ Uint32 lcpId = SYSFILE->latestLCP_ID;
+#ifdef VM_TRACE
+ ndbout_c("Decreasing latestLCP_ID from %d to %d", lcpId, lcpId - 1);
+#endif
+ SYSFILE->latestLCP_ID--;
+ }//if
+ storeNewLcpIdLab(signal);
+ break;
+ case LMTOS_ALL_ACTIVE:
+ {
+ jam();
+ /* -------------------------------------------------------------------
+ * Everybody was in the active phase. We will restart sending
+ * LCP_FRAGORD to the nodes from the new master.
+ * We also need to set dihLcpStatus to ZACTIVE
+ * in the master node since the master will wait for all nodes to
+ * complete before finalising the LCP process.
+ * ------------------------------------------------------------------ */
+#ifdef VM_TRACE
+ ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_ACTIVE -> "
+ "startLcpRoundLoopLab(table=%u, fragment=%u)",
+ c_lcpMasterTakeOverState.minTableId,
+ c_lcpMasterTakeOverState.minFragId);
+#endif
+
+ c_lcpState.keepGci = SYSFILE->keepGCI;
+ c_lcpState.setLcpStatus(LCP_START_LCP_ROUND, __LINE__);
+ startLcpRoundLoopLab(signal, 0, 0);
+ break;
+ }
+ case LMTOS_LCP_CONCLUDING:
+ {
+ jam();
+ /* ------------------------------------------------------------------- */
+ // The LCP process is in the finalisation phase. We simply wait for it to
+ // complete with signals arriving in. We need to check also if we should
+ // change state due to table write completion during state
+ // collection phase.
+ /* ------------------------------------------------------------------- */
+ ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE);
+ startLcpRoundLoopLab(signal, 0, 0);
+ break;
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ signal->theData[0] = NDB_LE_LCP_TakeoverCompleted;
+ signal->theData[1] = c_lcpMasterTakeOverState.state;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ signal->theData[0] = 7012;
+ execDUMP_STATE_ORD(signal);
+
+ signal->theData[0] = 7015;
+ execDUMP_STATE_ORD(signal);
+
+ c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__);
+
+ checkLocalNodefailComplete(signal, failedNodePtr.i, NF_LCP_TAKE_OVER);
+}
+
+/* ------------------------------------------------------------------------- */
+/* A BLOCK OR A NODE HAS COMPLETED THE HANDLING OF THE NODE FAILURE. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::execNF_COMPLETEREP(Signal* signal)
+{
+ NodeRecordPtr failedNodePtr;
+ NFCompleteRep * const nfCompleteRep = (NFCompleteRep *)&signal->theData[0];
+ jamEntry();
+ const Uint32 blockNo = nfCompleteRep->blockNo;
+ Uint32 nodeId = nfCompleteRep->nodeId;
+ failedNodePtr.i = nfCompleteRep->failedNodeId;
+
+ ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
+ switch (blockNo) {
+ case DBTC:
+ jam();
+ ndbrequire(failedNodePtr.p->dbtcFailCompleted == ZFALSE);
+ /* -------------------------------------------------------------------- */
+ // Report the event that DBTC completed node failure handling.
+ /* -------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
+ signal->theData[1] = DBTC;
+ signal->theData[2] = failedNodePtr.i;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+
+ failedNodePtr.p->dbtcFailCompleted = ZTRUE;
+ break;
+ case DBDICT:
+ jam();
+ ndbrequire(failedNodePtr.p->dbdictFailCompleted == ZFALSE);
+ /* --------------------------------------------------------------------- */
+ // Report the event that DBDICT completed node failure handling.
+ /* --------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
+ signal->theData[1] = DBDICT;
+ signal->theData[2] = failedNodePtr.i;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+
+ failedNodePtr.p->dbdictFailCompleted = ZTRUE;
+ break;
+ case DBDIH:
+ jam();
+ ndbrequire(failedNodePtr.p->dbdihFailCompleted == ZFALSE);
+ /* --------------------------------------------------------------------- */
+ // Report the event that DBDIH completed node failure handling.
+ /* --------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
+ signal->theData[1] = DBDIH;
+ signal->theData[2] = failedNodePtr.i;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+
+ failedNodePtr.p->dbdihFailCompleted = ZTRUE;
+ break;
+ case DBLQH:
+ jam();
+ ndbrequire(failedNodePtr.p->dblqhFailCompleted == ZFALSE);
+ /* --------------------------------------------------------------------- */
+ // Report the event that DBDIH completed node failure handling.
+ /* --------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
+ signal->theData[1] = DBLQH;
+ signal->theData[2] = failedNodePtr.i;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+
+ failedNodePtr.p->dblqhFailCompleted = ZTRUE;
+ break;
+ case 0: /* Node has finished */
+ jam();
+ ndbrequire(nodeId < MAX_NDB_NODES);
+
+ if (failedNodePtr.p->recNODE_FAILREP == ZFALSE) {
+ jam();
+ /* ------------------------------------------------------------------- */
+ // We received a report about completion of node failure before we
+ // received the message about the NODE failure ourselves.
+ // We will send the signal to ourselves with a small delay
+ // (10 milliseconds).
+ /* ------------------------------------------------------------------- */
+ //nf->from = __LINE__;
+ sendSignalWithDelay(reference(), GSN_NF_COMPLETEREP, signal, 10,
+ signal->length());
+ return;
+ }//if
+
+ if (!failedNodePtr.p->m_NF_COMPLETE_REP.isWaitingFor(nodeId)){
+ jam();
+ return;
+ }
+
+ failedNodePtr.p->m_NF_COMPLETE_REP.clearWaitingFor(nodeId);;
+
+ /* -------------------------------------------------------------------- */
+ // Report the event that nodeId has completed node failure handling.
+ /* -------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
+ signal->theData[1] = 0;
+ signal->theData[2] = failedNodePtr.i;
+ signal->theData[3] = nodeId;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+
+ nodeFailCompletedCheckLab(signal, failedNodePtr);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ if (failedNodePtr.p->dbtcFailCompleted == ZFALSE) {
+ jam();
+ return;
+ }//if
+ if (failedNodePtr.p->dbdictFailCompleted == ZFALSE) {
+ jam();
+ return;
+ }//if
+ if (failedNodePtr.p->dbdihFailCompleted == ZFALSE) {
+ jam();
+ return;
+ }//if
+ if (failedNodePtr.p->dblqhFailCompleted == ZFALSE) {
+ jam();
+ return;
+ }//if
+ /* ----------------------------------------------------------------------- */
+ /* ALL BLOCKS IN THIS NODE HAVE COMPLETED THEIR PART OF HANDLING THE */
+ /* NODE FAILURE. WE CAN NOW REPORT THIS COMPLETION TO ALL OTHER NODES. */
+ /* ----------------------------------------------------------------------- */
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ BlockReference ref = calcDihBlockRef(nodePtr.i);
+ NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0];
+ nf->blockNo = 0;
+ nf->nodeId = cownNodeId;
+ nf->failedNodeId = failedNodePtr.i;
+ nf->from = __LINE__;
+ sendSignal(ref, GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+ }//if
+ }//for
+ return;
+}//Dbdih::execNF_COMPLETEREP()
+
+void Dbdih::nodeFailCompletedCheckLab(Signal* signal,
+ NodeRecordPtr failedNodePtr)
+{
+ jam();
+ if (!failedNodePtr.p->m_NF_COMPLETE_REP.done()){
+ jam();
+ return;
+ }//if
+ /* ---------------------------------------------------------------------- */
+ /* ALL BLOCKS IN ALL NODES HAVE NOW REPORTED COMPLETION OF THE NODE */
+ /* FAILURE HANDLING. WE ARE NOW READY TO ACCEPT THAT THIS NODE STARTS */
+ /* AGAIN. */
+ /* ---------------------------------------------------------------------- */
+ jam();
+ failedNodePtr.p->nodeStatus = NodeRecord::DEAD;
+ failedNodePtr.p->recNODE_FAILREP = ZFALSE;
+
+ /* ---------------------------------------------------------------------- */
+ // Report the event that all nodes completed node failure handling.
+ /* ---------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
+ signal->theData[1] = 0;
+ signal->theData[2] = failedNodePtr.i;
+ signal->theData[3] = 0;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+
+ /* ---------------------------------------------------------------------- */
+ // Report to QMGR that we have concluded recovery handling of this node.
+ /* ---------------------------------------------------------------------- */
+ signal->theData[0] = failedNodePtr.i;
+ sendSignal(QMGR_REF, GSN_NDB_FAILCONF, signal, 1, JBB);
+
+ if (isMaster()) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* IF WE ARE MASTER WE MUST CHECK IF COPY FRAGMENT WAS INTERRUPTED */
+ /* BY THE FAILED NODES. */
+ /* --------------------------------------------------------------------- */
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = 0;
+ ptrAss(takeOverPtr, takeOverRecord);
+ if ((takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG) &&
+ (failedNodePtr.i == takeOverPtr.p->toCopyNode)) {
+ jam();
+#ifdef VM_TRACE
+ ndbrequire("Tell jonas" == 0);
+#endif
+ /*------------------------------------------------------------------*/
+ /* WE ARE CURRENTLY IN THE PROCESS OF COPYING A FRAGMENT. WE */
+ /* WILL CHECK IF THE COPY NODE HAVE FAILED. */
+ /*------------------------------------------------------------------*/
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::SELECTING_NEXT;
+ startNextCopyFragment(signal, takeOverPtr.i);
+ return;
+ }//if
+ checkStartTakeOver(signal);
+ }//if
+ return;
+}//Dbdih::nodeFailCompletedCheckLab()
+
+/*****************************************************************************/
+/* ********** SEIZING / RELEASING MODULE *************/
+/*****************************************************************************/
+/*
+ 3.4 L O C A L N O D E S E I Z E
+ ************************************
+ */
+/*
+ 3.4.1 L O C A L N O D E S E I Z E R E Q U E S T
+ ******************************************************
+ */
+void Dbdih::execDISEIZEREQ(Signal* signal)
+{
+ ConnectRecordPtr connectPtr;
+ jamEntry();
+ Uint32 userPtr = signal->theData[0];
+ BlockReference userRef = signal->theData[1];
+ ndbrequire(cfirstconnect != RNIL);
+ connectPtr.i = cfirstconnect;
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+ cfirstconnect = connectPtr.p->nfConnect;
+ connectPtr.p->nfConnect = RNIL;
+ connectPtr.p->userpointer = userPtr;
+ connectPtr.p->userblockref = userRef;
+ connectPtr.p->connectState = ConnectRecord::INUSE;
+ signal->theData[0] = connectPtr.p->userpointer;
+ signal->theData[1] = connectPtr.i;
+ sendSignal(userRef, GSN_DISEIZECONF, signal, 2, JBB);
+}//Dbdih::execDISEIZEREQ()
+
+/*
+ 3.5 L O C A L N O D E R E L E A S E
+ ****************************************
+ */
+/*
+ 3.5.1 L O C A L N O D E R E L E A S E R E Q U E S T
+ *******************************************************=
+ */
+void Dbdih::execDIRELEASEREQ(Signal* signal)
+{
+ ConnectRecordPtr connectPtr;
+ jamEntry();
+ connectPtr.i = signal->theData[0];
+ Uint32 userRef = signal->theData[2];
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+ ndbrequire(connectPtr.p->connectState != ConnectRecord::FREE);
+ ndbrequire(connectPtr.p->userblockref == userRef);
+ signal->theData[0] = connectPtr.p->userpointer;
+ sendSignal(connectPtr.p->userblockref, GSN_DIRELEASECONF, signal, 1, JBB);
+ release_connect(connectPtr);
+}//Dbdih::execDIRELEASEREQ()
+
+/*
+ 3.7 A D D T A B L E
+ **********************=
+ */
+/*****************************************************************************/
+/* ********** TABLE ADDING MODULE *************/
+/*****************************************************************************/
+/*
+ 3.7.1 A D D T A B L E M A I N L Y
+ ***************************************
+ */
+
+#define UNDEF_NODEGROUP 65535
+static inline void inc_node_or_group(Uint32 &node, Uint32 max_node)
+{
+ Uint32 next = node + 1;
+ node = (next == max_node ? 0 : next);
+}
+
+/*
+ Spread fragments in backwards compatible mode
+*/
+static void set_default_node_groups(Signal *signal, Uint32 noFrags)
+{
+ Uint16 *node_group_array = (Uint16*)&signal->theData[25];
+ Uint32 i;
+ node_group_array[0] = 0;
+ for (i = 1; i < noFrags; i++)
+ node_group_array[i] = UNDEF_NODEGROUP;
+}
+void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal)
+{
+ Uint16 node_group_id[MAX_NDB_PARTITIONS];
+ jamEntry();
+ CreateFragmentationReq * const req =
+ (CreateFragmentationReq*)signal->getDataPtr();
+
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ Uint32 noOfFragments = req->noOfFragments;
+ const Uint32 fragType = req->fragmentationType;
+ const Uint32 primaryTableId = req->primaryTableId;
+
+ Uint32 err = 0;
+
+ do {
+ NodeGroupRecordPtr NGPtr;
+ TabRecordPtr primTabPtr;
+ Uint32 count = 2;
+ Uint16 noOfReplicas = cnoReplicas;
+ Uint16 *fragments = (Uint16*)(signal->theData+25);
+ if (primaryTableId == RNIL) {
+ jam();
+ switch ((DictTabInfo::FragmentType)fragType)
+ {
+ /*
+ Backward compatability and for all places in code not changed.
+ */
+ case DictTabInfo::AllNodesSmallTable:
+ jam();
+ noOfFragments = csystemnodes;
+ set_default_node_groups(signal, noOfFragments);
+ break;
+ case DictTabInfo::AllNodesMediumTable:
+ jam();
+ noOfFragments = 2 * csystemnodes;
+ set_default_node_groups(signal, noOfFragments);
+ break;
+ case DictTabInfo::AllNodesLargeTable:
+ jam();
+ noOfFragments = 4 * csystemnodes;
+ set_default_node_groups(signal, noOfFragments);
+ break;
+ case DictTabInfo::SingleFragment:
+ jam();
+ noOfFragments = 1;
+ set_default_node_groups(signal, noOfFragments);
+ break;
+ default:
+ jam();
+ if (noOfFragments == 0)
+ {
+ jam();
+ err = CreateFragmentationRef::InvalidFragmentationType;
+ }
+ break;
+ }
+ if (err)
+ break;
+ /*
+ When we come here the the exact partition is specified
+ and there is an array of node groups sent along as well.
+ */
+ memcpy(&node_group_id[0], &signal->theData[25], 2 * noOfFragments);
+ Uint16 next_replica_node[MAX_NDB_NODES];
+ memset(next_replica_node,0,sizeof(next_replica_node));
+ Uint32 default_node_group= c_nextNodeGroup;
+ for(Uint32 fragNo = 0; fragNo < noOfFragments; fragNo++)
+ {
+ jam();
+ NGPtr.i = node_group_id[fragNo];
+ if (NGPtr.i == UNDEF_NODEGROUP)
+ {
+ jam();
+ NGPtr.i = default_node_group;
+ }
+ if (NGPtr.i > cnoOfNodeGroups)
+ {
+ jam();
+ err = CreateFragmentationRef::InvalidNodeGroup;
+ break;
+ }
+ ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
+ const Uint32 max = NGPtr.p->nodeCount;
+
+ Uint32 tmp= next_replica_node[NGPtr.i];
+ for(Uint32 replicaNo = 0; replicaNo < noOfReplicas; replicaNo++)
+ {
+ jam();
+ const Uint16 nodeId = NGPtr.p->nodesInGroup[tmp];
+ fragments[count++]= nodeId;
+ inc_node_or_group(tmp, max);
+ }
+ inc_node_or_group(tmp, max);
+ next_replica_node[NGPtr.i]= tmp;
+
+ /**
+ * Next node group for next fragment
+ */
+ inc_node_or_group(default_node_group, cnoOfNodeGroups);
+ }
+ if (err)
+ {
+ jam();
+ break;
+ }
+ else
+ {
+ jam();
+ c_nextNodeGroup = default_node_group;
+ }
+ } else {
+ if (primaryTableId >= ctabFileSize) {
+ jam();
+ err = CreateFragmentationRef::InvalidPrimaryTable;
+ break;
+ }
+ primTabPtr.i = primaryTableId;
+ ptrAss(primTabPtr, tabRecord);
+ if (primTabPtr.p->tabStatus != TabRecord::TS_ACTIVE) {
+ jam();
+ err = CreateFragmentationRef::InvalidPrimaryTable;
+ break;
+ }
+ noOfFragments= primTabPtr.p->totalfragments;
+ for (Uint32 fragNo = 0;
+ fragNo < noOfFragments; fragNo++) {
+ jam();
+ FragmentstorePtr fragPtr;
+ ReplicaRecordPtr replicaPtr;
+ getFragstore(primTabPtr.p, fragNo, fragPtr);
+ fragments[count++]= fragPtr.p->preferredPrimary;
+ for (replicaPtr.i = fragPtr.p->storedReplicas;
+ replicaPtr.i != RNIL;
+ replicaPtr.i = replicaPtr.p->nextReplica) {
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if (replicaPtr.p->procNode != fragPtr.p->preferredPrimary) {
+ jam();
+ fragments[count++]= replicaPtr.p->procNode;
+ }
+ }
+ for (replicaPtr.i = fragPtr.p->oldStoredReplicas;
+ replicaPtr.i != RNIL;
+ replicaPtr.i = replicaPtr.p->nextReplica) {
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if (replicaPtr.p->procNode != fragPtr.p->preferredPrimary) {
+ jam();
+ fragments[count++]= replicaPtr.p->procNode;
+ }
+ }
+ }
+ }
+ ndbrequire(count == (2U + noOfReplicas * noOfFragments));
+
+ CreateFragmentationConf * const conf =
+ (CreateFragmentationConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->noOfReplicas = (Uint32)noOfReplicas;
+ conf->noOfFragments = (Uint32)noOfFragments;
+
+ fragments[0]= noOfReplicas;
+ fragments[1]= noOfFragments;
+
+ if(senderRef != 0)
+ {
+ jam();
+ LinearSectionPtr ptr[3];
+ ptr[0].p = (Uint32*)&fragments[0];
+ ptr[0].sz = (count + 1) / 2;
+ sendSignal(senderRef,
+ GSN_CREATE_FRAGMENTATION_CONF,
+ signal,
+ CreateFragmentationConf::SignalLength,
+ JBB,
+ ptr,
+ 1);
+ }
+ // Always ACK/NACK (here ACK)
+ signal->theData[0] = 0;
+ return;
+ } while(false);
+ // Always ACK/NACK (here NACK)
+ signal->theData[0] = err;
+}
+
+void Dbdih::execDIADDTABREQ(Signal* signal)
+{
+ Uint32 fragType;
+ jamEntry();
+
+ DiAddTabReq * const req = (DiAddTabReq*)signal->getDataPtr();
+
+ // Seize connect record
+ ndbrequire(cfirstconnect != RNIL);
+ ConnectRecordPtr connectPtr;
+ connectPtr.i = cfirstconnect;
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+ cfirstconnect = connectPtr.p->nfConnect;
+
+ const Uint32 userPtr = req->connectPtr;
+ const BlockReference userRef = signal->getSendersBlockRef();
+ connectPtr.p->nfConnect = RNIL;
+ connectPtr.p->userpointer = userPtr;
+ connectPtr.p->userblockref = userRef;
+ connectPtr.p->connectState = ConnectRecord::INUSE;
+ connectPtr.p->table = req->tableId;
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tabPtr.p->connectrec = connectPtr.i;
+ tabPtr.p->tableType = req->tableType;
+ fragType= req->fragType;
+ tabPtr.p->schemaVersion = req->schemaVersion;
+ tabPtr.p->primaryTableId = req->primaryTableId;
+
+ if(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE){
+ jam();
+ tabPtr.p->tabStatus = TabRecord::TS_CREATING;
+ sendAddFragreq(signal, connectPtr, tabPtr, 0);
+ return;
+ }
+
+ if(getNodeState().getSystemRestartInProgress() &&
+ tabPtr.p->tabStatus == TabRecord::TS_IDLE){
+ jam();
+
+ ndbrequire(cmasterNodeId == getOwnNodeId());
+ tabPtr.p->tabStatus = TabRecord::TS_CREATING;
+
+ initTableFile(tabPtr);
+ FileRecordPtr filePtr;
+ filePtr.i = tabPtr.p->tabFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_TABLE;
+ return;
+ }
+
+ /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
+ /* AT THE TIME OF INITIATING THE FILE OF TABLE */
+ /* DESCRIPTION IS CREATED FOR APPROPRIATE SIZE. EACH */
+ /* EACH RECORD IN THIS FILE HAS THE INFORMATION ABOUT */
+ /* ONE TABLE. THE POINTER TO THIS RECORD IS THE TABLE */
+ /* REFERENCE. IN THE BEGINNING ALL RECORDS ARE CREATED */
+ /* BUT THEY DO NOT HAVE ANY INFORMATION ABOUT ANY TABLE*/
+ /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
+ tabPtr.p->tabStatus = TabRecord::TS_CREATING;
+ tabPtr.p->storedTable = req->storedTable;
+ tabPtr.p->kvalue = req->kValue;
+
+ switch ((DictTabInfo::FragmentType)fragType)
+ {
+ case DictTabInfo::AllNodesSmallTable:
+ case DictTabInfo::AllNodesMediumTable:
+ case DictTabInfo::AllNodesLargeTable:
+ case DictTabInfo::SingleFragment:
+ jam();
+ case DictTabInfo::DistrKeyLin:
+ jam();
+ tabPtr.p->method= TabRecord::LINEAR_HASH;
+ break;
+ case DictTabInfo::DistrKeyHash:
+ case DictTabInfo::DistrKeyUniqueHashIndex:
+ case DictTabInfo::DistrKeyOrderedIndex:
+ jam();
+ tabPtr.p->method= TabRecord::NORMAL_HASH;
+ break;
+ case DictTabInfo::UserDefined:
+ jam();
+ tabPtr.p->method= TabRecord::USER_DEFINED;
+ break;
+ default:
+ ndbrequire(false);
+ }
+
+ union {
+ Uint16 fragments[2 + MAX_FRAG_PER_NODE*MAX_REPLICAS*MAX_NDB_NODES];
+ Uint32 align;
+ };
+ SegmentedSectionPtr fragDataPtr;
+ signal->getSection(fragDataPtr, DiAddTabReq::FRAGMENTATION);
+ copy((Uint32*)fragments, fragDataPtr);
+ releaseSections(signal);
+
+ const Uint32 noReplicas = fragments[0];
+ const Uint32 noFragments = fragments[1];
+
+ tabPtr.p->noOfBackups = noReplicas - 1;
+ tabPtr.p->totalfragments = noFragments;
+ ndbrequire(noReplicas == cnoReplicas); // Only allowed
+
+ if (ERROR_INSERTED(7173)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
+ return;
+ }
+ if ((noReplicas * noFragments) > cnoFreeReplicaRec) {
+ jam();
+ addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
+ return;
+ }//if
+ if (noFragments > cremainingfrags) {
+ jam();
+ addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
+ return;
+ }//if
+
+ Uint32 logTotalFragments = 1;
+ while (logTotalFragments <= tabPtr.p->totalfragments) {
+ jam();
+ logTotalFragments <<= 1;
+ }
+ logTotalFragments >>= 1;
+ tabPtr.p->mask = logTotalFragments - 1;
+ tabPtr.p->hashpointer = tabPtr.p->totalfragments - logTotalFragments;
+ allocFragments(tabPtr.p->totalfragments, tabPtr);
+
+ Uint32 index = 2;
+ for (Uint32 fragId = 0; fragId < noFragments; fragId++) {
+ jam();
+ FragmentstorePtr fragPtr;
+ Uint32 activeIndex = 0;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ fragPtr.p->preferredPrimary = fragments[index];
+ for (Uint32 i = 0; i<noReplicas; i++) {
+ const Uint32 nodeId = fragments[index++];
+ ReplicaRecordPtr replicaPtr;
+ allocStoredReplica(fragPtr, replicaPtr, nodeId);
+ if (getNodeStatus(nodeId) == NodeRecord::ALIVE) {
+ jam();
+ ndbrequire(activeIndex < MAX_REPLICAS);
+ fragPtr.p->activeNodes[activeIndex] = nodeId;
+ activeIndex++;
+ } else {
+ jam();
+ removeStoredReplica(fragPtr, replicaPtr);
+ linkOldStoredReplica(fragPtr, replicaPtr);
+ }//if
+ }//for
+ fragPtr.p->fragReplicas = activeIndex;
+ ndbrequire(activeIndex > 0 && fragPtr.p->storedReplicas != RNIL);
+ }
+ initTableFile(tabPtr);
+ tabPtr.p->tabCopyStatus = TabRecord::CS_ADD_TABLE_MASTER;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+}
+
+void
+Dbdih::addTable_closeConf(Signal * signal, Uint32 tabPtrI){
+ TabRecordPtr tabPtr;
+ tabPtr.i = tabPtrI;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ConnectRecordPtr connectPtr;
+ connectPtr.i = tabPtr.p->connectrec;
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+
+ sendAddFragreq(signal, connectPtr, tabPtr, 0);
+}
+
+void
+Dbdih::sendAddFragreq(Signal* signal, ConnectRecordPtr connectPtr,
+ TabRecordPtr tabPtr, Uint32 fragId){
+ jam();
+ const Uint32 fragCount = tabPtr.p->totalfragments;
+ ReplicaRecordPtr replicaPtr; replicaPtr.i = RNIL;
+ for(; fragId<fragCount; fragId++){
+ jam();
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+
+ replicaPtr.i = fragPtr.p->storedReplicas;
+ while(replicaPtr.i != RNIL){
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if(replicaPtr.p->procNode == getOwnNodeId()){
+ break;
+ }
+ replicaPtr.i = replicaPtr.p->nextReplica;
+ }
+
+ if(replicaPtr.i != RNIL){
+ jam();
+ break;
+ }
+
+ replicaPtr.i = fragPtr.p->oldStoredReplicas;
+ while(replicaPtr.i != RNIL){
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if(replicaPtr.p->procNode == getOwnNodeId()){
+ break;
+ }
+ replicaPtr.i = replicaPtr.p->nextReplica;
+ }
+
+ if(replicaPtr.i != RNIL){
+ jam();
+ break;
+ }
+ }
+
+ if(replicaPtr.i != RNIL){
+ jam();
+ ndbrequire(fragId < fragCount);
+ ndbrequire(replicaPtr.p->procNode == getOwnNodeId());
+
+ Uint32 requestInfo = 0;
+ if(!tabPtr.p->storedTable){
+ requestInfo |= LqhFragReq::TemporaryTable;
+ }
+
+ if(getNodeState().getNodeRestartInProgress()){
+ requestInfo |= LqhFragReq::CreateInRunning;
+ }
+
+ AddFragReq* const req = (AddFragReq*)signal->getDataPtr();
+ req->dihPtr = connectPtr.i;
+ req->senderData = connectPtr.p->userpointer;
+ req->fragmentId = fragId;
+ req->requestInfo = requestInfo;
+ req->tableId = tabPtr.i;
+ req->nextLCP = 0;
+ req->nodeId = getOwnNodeId();
+ req->totalFragments = fragCount;
+ req->startGci = SYSFILE->newestRestorableGCI;
+ sendSignal(DBDICT_REF, GSN_ADD_FRAGREQ, signal,
+ AddFragReq::SignalLength, JBB);
+ return;
+ }
+
+ // Done
+ DiAddTabConf * const conf = (DiAddTabConf*)signal->getDataPtr();
+ conf->senderData = connectPtr.p->userpointer;
+ sendSignal(connectPtr.p->userblockref, GSN_DIADDTABCONF, signal,
+ DiAddTabConf::SignalLength, JBB);
+
+ // Release
+ release_connect(connectPtr);
+}
+void
+Dbdih::release_connect(ConnectRecordPtr ptr)
+{
+ ptr.p->userblockref = ZNIL;
+ ptr.p->userpointer = RNIL;
+ ptr.p->connectState = ConnectRecord::FREE;
+ ptr.p->nfConnect = cfirstconnect;
+ cfirstconnect = ptr.i;
+}
+
+void
+Dbdih::execADD_FRAGCONF(Signal* signal){
+ jamEntry();
+ AddFragConf * const conf = (AddFragConf*)signal->getDataPtr();
+
+ ConnectRecordPtr connectPtr;
+ connectPtr.i = conf->dihPtr;
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = connectPtr.p->table;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ sendAddFragreq(signal, connectPtr, tabPtr, conf->fragId + 1);
+}
+
+void
+Dbdih::execADD_FRAGREF(Signal* signal){
+ jamEntry();
+ AddFragRef * const ref = (AddFragRef*)signal->getDataPtr();
+
+ ConnectRecordPtr connectPtr;
+ connectPtr.i = ref->dihPtr;
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+
+ {
+ DiAddTabRef * const ref = (DiAddTabRef*)signal->getDataPtr();
+ ref->senderData = connectPtr.p->userpointer;
+ ref->errorCode = ~0;
+ sendSignal(connectPtr.p->userblockref, GSN_DIADDTABREF, signal,
+ DiAddTabRef::SignalLength, JBB);
+ }
+
+ // Release
+ release_connect(connectPtr);
+}
+
+/*
+ 3.7.1.3 R E F U S E
+ *********************
+ */
+void Dbdih::addtabrefuseLab(Signal* signal, ConnectRecordPtr connectPtr, Uint32 errorCode)
+{
+ signal->theData[0] = connectPtr.p->userpointer;
+ signal->theData[1] = errorCode;
+ sendSignal(connectPtr.p->userblockref, GSN_DIADDTABREF, signal, 2, JBB);
+ release_connect(connectPtr);
+ return;
+}//Dbdih::addtabrefuseLab()
+
+/*
+ 3.7.2 A D D T A B L E D U P L I C A T I O N
+ *************************************************
+ */
+/*
+ 3.7.2.1 A D D T A B L E D U P L I C A T I O N R E Q U E S T
+ *******************************************************************=
+ */
+
+/*
+ D E L E T E T A B L E
+ **********************=
+ */
+/*****************************************************************************/
+/*********** DELETE TABLE MODULE *************/
+/*****************************************************************************/
+void
+Dbdih::execDROP_TAB_REQ(Signal* signal){
+ jamEntry();
+ DropTabReq* req = (DropTabReq*)signal->getDataPtr();
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ tabPtr.p->m_dropTab.tabUserRef = req->senderRef;
+ tabPtr.p->m_dropTab.tabUserPtr = req->senderData;
+
+ DropTabReq::RequestType rt = (DropTabReq::RequestType)req->requestType;
+
+ switch(rt){
+ case DropTabReq::OnlineDropTab:
+ jam();
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_DROPPING);
+ releaseTable(tabPtr);
+ break;
+ case DropTabReq::CreateTabDrop:
+ jam();
+ releaseTable(tabPtr);
+ break;
+ case DropTabReq::RestartDropTab:
+ break;
+ }
+
+ startDeleteFile(signal, tabPtr);
+}
+
+void Dbdih::startDeleteFile(Signal* signal, TabRecordPtr tabPtr)
+{
+ if (tabPtr.p->tabFile[0] == RNIL) {
+ jam();
+ initTableFile(tabPtr);
+ }//if
+ openTableFileForDelete(signal, tabPtr.p->tabFile[0]);
+}//Dbdih::startDeleteFile()
+
+void Dbdih::openTableFileForDelete(Signal* signal, Uint32 fileIndex)
+{
+ FileRecordPtr filePtr;
+ filePtr.i = fileIndex;
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::TABLE_OPEN_FOR_DELETE;
+}//Dbdih::openTableFileForDelete()
+
+void Dbdih::tableOpenLab(Signal* signal, FileRecordPtr filePtr)
+{
+ closeFileDelete(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::TABLE_CLOSE_DELETE;
+ return;
+}//Dbdih::tableOpenLab()
+
+void Dbdih::tableDeleteLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if (filePtr.i == tabPtr.p->tabFile[0]) {
+ jam();
+ openTableFileForDelete(signal, tabPtr.p->tabFile[1]);
+ return;
+ }//if
+ ndbrequire(filePtr.i == tabPtr.p->tabFile[1]);
+
+ releaseFile(tabPtr.p->tabFile[0]);
+ releaseFile(tabPtr.p->tabFile[1]);
+ tabPtr.p->tabFile[0] = tabPtr.p->tabFile[1] = RNIL;
+
+ tabPtr.p->tabStatus = TabRecord::TS_IDLE;
+
+ DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend();
+ dropConf->senderRef = reference();
+ dropConf->senderData = tabPtr.p->m_dropTab.tabUserPtr;
+ dropConf->tableId = tabPtr.i;
+ sendSignal(tabPtr.p->m_dropTab.tabUserRef, GSN_DROP_TAB_CONF,
+ signal, DropTabConf::SignalLength, JBB);
+
+ tabPtr.p->m_dropTab.tabUserPtr = RNIL;
+ tabPtr.p->m_dropTab.tabUserRef = 0;
+}//Dbdih::tableDeleteLab()
+
+
+void Dbdih::releaseTable(TabRecordPtr tabPtr)
+{
+ FragmentstorePtr fragPtr;
+ if (tabPtr.p->noOfFragChunks > 0) {
+ for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
+ jam();
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ releaseReplicas(fragPtr.p->storedReplicas);
+ releaseReplicas(fragPtr.p->oldStoredReplicas);
+ }//for
+ releaseFragments(tabPtr);
+ }
+ if (tabPtr.p->tabFile[0] != RNIL) {
+ jam();
+ releaseFile(tabPtr.p->tabFile[0]);
+ releaseFile(tabPtr.p->tabFile[1]);
+ tabPtr.p->tabFile[0] = tabPtr.p->tabFile[1] = RNIL;
+ }//if
+}//Dbdih::releaseTable()
+
+void Dbdih::releaseReplicas(Uint32 replicaPtrI)
+{
+ ReplicaRecordPtr replicaPtr;
+ replicaPtr.i = replicaPtrI;
+ jam();
+ while (replicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ Uint32 tmp = replicaPtr.p->nextReplica;
+ replicaPtr.p->nextReplica = cfirstfreeReplica;
+ cfirstfreeReplica = replicaPtr.i;
+ replicaPtr.i = tmp;
+ cnoFreeReplicaRec++;
+ }//while
+}//Dbdih::releaseReplicas()
+
+void Dbdih::seizeReplicaRec(ReplicaRecordPtr& replicaPtr)
+{
+ replicaPtr.i = cfirstfreeReplica;
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ cfirstfreeReplica = replicaPtr.p->nextReplica;
+ cnoFreeReplicaRec--;
+ replicaPtr.p->nextReplica = RNIL;
+}//Dbdih::seizeReplicaRec()
+
+void Dbdih::releaseFile(Uint32 fileIndex)
+{
+ FileRecordPtr filePtr;
+ filePtr.i = fileIndex;
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ filePtr.p->nextFile = cfirstfreeFile;
+ cfirstfreeFile = filePtr.i;
+}//Dbdih::releaseFile()
+
+
+void Dbdih::execALTER_TAB_REQ(Signal * signal)
+{
+ AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr();
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ const Uint32 changeMask = req->changeMask;
+ const Uint32 tableId = req->tableId;
+ const Uint32 tableVersion = req->tableVersion;
+ const Uint32 gci = req->gci;
+ AlterTabReq::RequestType requestType =
+ (AlterTabReq::RequestType) req->requestType;
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tabPtr.p->schemaVersion = tableVersion;
+
+ // Request handled successfully
+ AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->changeMask = changeMask;
+ conf->tableId = tableId;
+ conf->tableVersion = tableVersion;
+ conf->gci = gci;
+ conf->requestType = requestType;
+ sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal,
+ AlterTabConf::SignalLength, JBB);
+}
+
+/*
+ G E T N O D E S
+ **********************=
+ */
+/*****************************************************************************/
+/* ********** TRANSACTION HANDLING MODULE *************/
+/*****************************************************************************/
+/*
+ 3.8.1 G E T N O D E S R E Q U E S T
+ ******************************************
+ Asks what nodes should be part of a transaction.
+*/
+void Dbdih::execDIGETNODESREQ(Signal* signal)
+{
+ const DiGetNodesReq * const req = (DiGetNodesReq *)&signal->theData[0];
+ FragmentstorePtr fragPtr;
+ TabRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ Uint32 hashValue = req->hashValue;
+ Uint32 ttabFileSize = ctabFileSize;
+ Uint32 fragId;
+ DiGetNodesConf * const conf = (DiGetNodesConf *)&signal->theData[0];
+ TabRecord* regTabDesc = tabRecord;
+ jamEntry();
+ ptrCheckGuard(tabPtr, ttabFileSize, regTabDesc);
+ if (tabPtr.p->method == TabRecord::LINEAR_HASH)
+ {
+ jam();
+ fragId = hashValue & tabPtr.p->mask;
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
+ if (fragId < tabPtr.p->hashpointer) {
+ jam();
+ fragId = hashValue & ((tabPtr.p->mask << 1) + 1);
+ }//if
+ }
+ else if (tabPtr.p->method == TabRecord::NORMAL_HASH)
+ {
+ jam();
+ fragId= hashValue % tabPtr.p->totalfragments;
+ }
+ else
+ {
+ jam();
+ ndbassert(tabPtr.p->method == TabRecord::USER_DEFINED);
+ fragId= hashValue;
+ if (fragId >= tabPtr.p->totalfragments)
+ {
+ jam();
+ conf->zero= 1; //Indicate error;
+ signal->theData[1]= ZUNDEFINED_FRAGMENT_ERROR;
+ return;
+ }
+ }
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ Uint32 nodeCount = extractNodeInfo(fragPtr.p, conf->nodes);
+ Uint32 sig2 = (nodeCount - 1) +
+ (fragPtr.p->distributionKey << 16);
+ conf->zero = 0;
+ conf->reqinfo = sig2;
+ conf->fragId = fragId;
+}//Dbdih::execDIGETNODESREQ()
+
+Uint32 Dbdih::extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[])
+{
+ Uint32 nodeCount = 0;
+ for (Uint32 i = 0; i < fragPtr->fragReplicas; i++) {
+ jam();
+ NodeRecordPtr nodePtr;
+ ndbrequire(i < MAX_REPLICAS);
+ nodePtr.i = fragPtr->activeNodes[i];
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->useInTransactions) {
+ jam();
+ nodes[nodeCount] = nodePtr.i;
+ nodeCount++;
+ }//if
+ }//for
+ ndbrequire(nodeCount > 0);
+ return nodeCount;
+}//Dbdih::extractNodeInfo()
+
+void
+Dbdih::getFragstore(TabRecord * tab, //In parameter
+ Uint32 fragNo, //In parameter
+ FragmentstorePtr & fragptr) //Out parameter
+{
+ FragmentstorePtr fragPtr;
+ Uint32 chunkNo = fragNo >> LOG_NO_OF_FRAGS_PER_CHUNK;
+ Uint32 chunkIndex = fragNo & (NO_OF_FRAGS_PER_CHUNK - 1);
+ Uint32 TfragstoreFileSize = cfragstoreFileSize;
+ Fragmentstore* TfragStore = fragmentstore;
+ if (chunkNo < MAX_NDB_NODES) {
+ fragPtr.i = tab->startFid[chunkNo] + chunkIndex;
+ ptrCheckGuard(fragPtr, TfragstoreFileSize, TfragStore);
+ fragptr = fragPtr;
+ return;
+ }//if
+ ndbrequire(false);
+}//Dbdih::getFragstore()
+
+void Dbdih::allocFragments(Uint32 noOfFragments, TabRecordPtr tabPtr)
+{
+ FragmentstorePtr fragPtr;
+ Uint32 noOfChunks = (noOfFragments + (NO_OF_FRAGS_PER_CHUNK - 1)) >> LOG_NO_OF_FRAGS_PER_CHUNK;
+ ndbrequire(cremainingfrags >= noOfFragments);
+ for (Uint32 i = 0; i < noOfChunks; i++) {
+ jam();
+ Uint32 baseFrag = cfirstfragstore;
+ tabPtr.p->startFid[i] = baseFrag;
+ fragPtr.i = baseFrag;
+ ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
+ cfirstfragstore = fragPtr.p->nextFragmentChunk;
+ cremainingfrags -= NO_OF_FRAGS_PER_CHUNK;
+ for (Uint32 j = 0; j < NO_OF_FRAGS_PER_CHUNK; j++) {
+ jam();
+ fragPtr.i = baseFrag + j;
+ ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
+ initFragstore(fragPtr);
+ }//if
+ }//for
+ tabPtr.p->noOfFragChunks = noOfChunks;
+}//Dbdih::allocFragments()
+
+void Dbdih::releaseFragments(TabRecordPtr tabPtr)
+{
+ FragmentstorePtr fragPtr;
+ for (Uint32 i = 0; i < tabPtr.p->noOfFragChunks; i++) {
+ jam();
+ Uint32 baseFrag = tabPtr.p->startFid[i];
+ fragPtr.i = baseFrag;
+ ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
+ fragPtr.p->nextFragmentChunk = cfirstfragstore;
+ cfirstfragstore = baseFrag;
+ tabPtr.p->startFid[i] = RNIL;
+ cremainingfrags += NO_OF_FRAGS_PER_CHUNK;
+ }//for
+ tabPtr.p->noOfFragChunks = 0;
+}//Dbdih::releaseFragments()
+
+void Dbdih::initialiseFragstore()
+{
+ Uint32 i;
+ FragmentstorePtr fragPtr;
+ for (i = 0; i < cfragstoreFileSize; i++) {
+ fragPtr.i = i;
+ ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
+ initFragstore(fragPtr);
+ }//for
+ Uint32 noOfChunks = cfragstoreFileSize >> LOG_NO_OF_FRAGS_PER_CHUNK;
+ fragPtr.i = 0;
+ cfirstfragstore = RNIL;
+ cremainingfrags = 0;
+ for (i = 0; i < noOfChunks; i++) {
+ refresh_watch_dog();
+ ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
+ fragPtr.p->nextFragmentChunk = cfirstfragstore;
+ cfirstfragstore = fragPtr.i;
+ fragPtr.i += NO_OF_FRAGS_PER_CHUNK;
+ cremainingfrags += NO_OF_FRAGS_PER_CHUNK;
+ }//for
+}//Dbdih::initialiseFragstore()
+
+/*
+ 3.9 V E R I F I C A T I O N
+ ****************************=
+ */
+/****************************************************************************/
+/* ********** VERIFICATION SUB-MODULE *************/
+/****************************************************************************/
+/*
+ 3.9.1 R E C E I V I N G O F V E R I F I C A T I O N R E Q U E S T
+ *************************************************************************
+ */
+void Dbdih::execDIVERIFYREQ(Signal* signal)
+{
+
+ jamEntry();
+ if ((getBlockCommit() == false) &&
+ (cfirstVerifyQueue == RNIL)) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // We are not blocked and the verify queue was empty currently so we can
+ // simply reply back to TC immediately. The method was called with
+ // EXECUTE_DIRECT so we reply back by setting signal data and returning.
+ // theData[0] already contains the correct information so
+ // we need not touch it.
+ /*-----------------------------------------------------------------------*/
+ signal->theData[1] = currentgcp;
+ signal->theData[2] = 0;
+ return;
+ }//if
+ /*-------------------------------------------------------------------------*/
+ // Since we are blocked we need to put this operation last in the verify
+ // queue to ensure that operation starts up in the correct order.
+ /*-------------------------------------------------------------------------*/
+ ApiConnectRecordPtr tmpApiConnectptr;
+ ApiConnectRecordPtr localApiConnectptr;
+
+ cverifyQueueCounter++;
+ localApiConnectptr.i = signal->theData[0];
+ tmpApiConnectptr.i = clastVerifyQueue;
+ ptrCheckGuard(localApiConnectptr, capiConnectFileSize, apiConnectRecord);
+ localApiConnectptr.p->apiGci = cnewgcp;
+ localApiConnectptr.p->nextApi = RNIL;
+ clastVerifyQueue = localApiConnectptr.i;
+ if (tmpApiConnectptr.i == RNIL) {
+ jam();
+ cfirstVerifyQueue = localApiConnectptr.i;
+ } else {
+ jam();
+ ptrCheckGuard(tmpApiConnectptr, capiConnectFileSize, apiConnectRecord);
+ tmpApiConnectptr.p->nextApi = localApiConnectptr.i;
+ }//if
+ emptyverificbuffer(signal, false);
+ signal->theData[2] = 1; // Indicate no immediate return
+ return;
+}//Dbdih::execDIVERIFYREQ()
+
+void Dbdih::execDI_FCOUNTREQ(Signal* signal)
+{
+ ConnectRecordPtr connectPtr;
+ TabRecordPtr tabPtr;
+ jamEntry();
+ connectPtr.i = signal->theData[0];
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
+
+ if(connectPtr.i != RNIL){
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+ if (connectPtr.p->connectState == ConnectRecord::INUSE) {
+ jam();
+ signal->theData[0] = connectPtr.p->userpointer;
+ signal->theData[1] = tabPtr.p->totalfragments;
+ sendSignal(connectPtr.p->userblockref, GSN_DI_FCOUNTCONF, signal,2, JBB);
+ return;
+ }//if
+ signal->theData[0] = connectPtr.p->userpointer;
+ signal->theData[1] = ZERRONOUSSTATE;
+ sendSignal(connectPtr.p->userblockref, GSN_DI_FCOUNTREF, signal, 2, JBB);
+ return;
+ }//if
+
+ //connectPtr.i == RNIL -> question without connect record
+ const Uint32 senderData = signal->theData[2];
+ const BlockReference senderRef = signal->senderBlockRef();
+ signal->theData[0] = RNIL;
+ signal->theData[1] = tabPtr.p->totalfragments;
+ signal->theData[2] = tabPtr.i;
+ signal->theData[3] = senderData;
+ signal->theData[4] = tabPtr.p->noOfBackups;
+ sendSignal(senderRef, GSN_DI_FCOUNTCONF, signal, 5, JBB);
+}//Dbdih::execDI_FCOUNTREQ()
+
+void Dbdih::execDIGETPRIMREQ(Signal* signal)
+{
+ FragmentstorePtr fragPtr;
+ ConnectRecordPtr connectPtr;
+ TabRecordPtr tabPtr;
+ jamEntry();
+ Uint32 passThrough = signal->theData[1];
+ tabPtr.i = signal->theData[2];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if (DictTabInfo::isOrderedIndex(tabPtr.p->tableType)) {
+ jam();
+ tabPtr.i = tabPtr.p->primaryTableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ }
+ Uint32 fragId = signal->theData[3];
+
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
+ connectPtr.i = signal->theData[0];
+ if(connectPtr.i != RNIL)
+ {
+ jam();
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+ signal->theData[0] = connectPtr.p->userpointer;
+ }
+ else
+ {
+ jam();
+ signal->theData[0] = RNIL;
+ }
+
+ Uint32 nodes[MAX_REPLICAS];
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ Uint32 count = extractNodeInfo(fragPtr.p, nodes);
+
+ signal->theData[1] = passThrough;
+ signal->theData[2] = nodes[0];
+ signal->theData[3] = nodes[1];
+ signal->theData[4] = nodes[2];
+ signal->theData[5] = nodes[3];
+ signal->theData[6] = count;
+ signal->theData[7] = tabPtr.i;
+ signal->theData[8] = fragId;
+
+ const BlockReference senderRef = signal->senderBlockRef();
+ sendSignal(senderRef, GSN_DIGETPRIMCONF, signal, 9, JBB);
+}//Dbdih::execDIGETPRIMREQ()
+
+/****************************************************************************/
+/* ********** GLOBAL-CHECK-POINT HANDLING MODULE *************/
+/****************************************************************************/
+/*
+ 3.10 G L O B A L C H E C K P O I N T ( IN M A S T E R R O L E)
+ *******************************************************************
+ */
+void Dbdih::checkGcpStopLab(Signal* signal)
+{
+ Uint32 tgcpStatus;
+
+ tgcpStatus = cgcpStatus;
+ if (tgcpStatus == coldGcpStatus) {
+ jam();
+ if (coldGcpId == cnewgcp) {
+ jam();
+ if (cgcpStatus != GCP_READY) {
+ jam();
+ cgcpSameCounter++;
+ if (cgcpSameCounter == 1200) {
+ jam();
+#ifdef VM_TRACE
+ ndbout << "System crash due to GCP Stop in state = ";
+ ndbout << (Uint32) cgcpStatus << endl;
+#endif
+ crashSystemAtGcpStop(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ if (cgcpOrderBlocked == 0) {
+ jam();
+ cgcpSameCounter++;
+ if (cgcpSameCounter == 1200) {
+ jam();
+#ifdef VM_TRACE
+ ndbout << "System crash due to GCP Stop in state = ";
+ ndbout << (Uint32) cgcpStatus << endl;
+#endif
+ crashSystemAtGcpStop(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ cgcpSameCounter = 0;
+ }//if
+ }//if
+ } else {
+ jam();
+ cgcpSameCounter = 0;
+ }//if
+ } else {
+ jam();
+ cgcpSameCounter = 0;
+ }//if
+ signal->theData[0] = DihContinueB::ZCHECK_GCP_STOP;
+ signal->theData[1] = coldGcpStatus;
+ signal->theData[2] = cgcpStatus;
+ signal->theData[3] = coldGcpId;
+ signal->theData[4] = cnewgcp;
+ signal->theData[5] = cgcpSameCounter;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 6);
+ coldGcpStatus = cgcpStatus;
+ coldGcpId = cnewgcp;
+ return;
+}//Dbdih::checkGcpStopLab()
+
+void Dbdih::startGcpLab(Signal* signal, Uint32 aWaitTime)
+{
+ if ((cgcpOrderBlocked == 1) ||
+ (c_nodeStartMaster.blockGcp == true) ||
+ (cfirstVerifyQueue != RNIL)) {
+ /*************************************************************************/
+ // 1: Global Checkpoint has been stopped by management command
+ // 2: Global Checkpoint is blocked by node recovery activity
+ // 3: Previous global checkpoint is not yet completed.
+ // All this means that global checkpoint cannot start now.
+ /*************************************************************************/
+ jam();
+ cgcpStartCounter++;
+ signal->theData[0] = DihContinueB::ZSTART_GCP;
+ signal->theData[1] = aWaitTime > 100 ? (aWaitTime - 100) : 0;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2);
+ return;
+ }//if
+ if (cstartGcpNow == false && aWaitTime > 100){
+ /*************************************************************************/
+ // We still have more than 100 milliseconds before we start the next and
+ // nobody has ordered immediate start of a global checkpoint.
+ // During initial start we will use continuos global checkpoints to
+ // speed it up since we need to complete a global checkpoint after
+ // inserting a lot of records.
+ /*************************************************************************/
+ jam();
+ cgcpStartCounter++;
+ signal->theData[0] = DihContinueB::ZSTART_GCP;
+ signal->theData[1] = (aWaitTime - 100);
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2);
+ return;
+ }//if
+ cgcpStartCounter = 0;
+ cstartGcpNow = false;
+ /***************************************************************************/
+ // Report the event that a global checkpoint has started.
+ /***************************************************************************/
+ signal->theData[0] = NDB_LE_GlobalCheckpointStarted; //Event type
+ signal->theData[1] = cnewgcp;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ CRASH_INSERTION(7000);
+ cnewgcp++;
+ signal->setTrace(TestOrd::TraceGlobalCheckpoint);
+ sendLoopMacro(GCP_PREPARE, sendGCP_PREPARE);
+ cgcpStatus = GCP_PREPARE_SENT;
+}//Dbdih::startGcpLab()
+
+void Dbdih::execGCP_PREPARECONF(Signal* signal)
+{
+ jamEntry();
+ Uint32 senderNodeId = signal->theData[0];
+ Uint32 gci = signal->theData[1];
+ ndbrequire(gci == cnewgcp);
+ receiveLoopMacro(GCP_PREPARE, senderNodeId);
+ //-------------------------------------------------------------
+ // We have now received all replies. We are ready to continue
+ // with committing the global checkpoint.
+ //-------------------------------------------------------------
+ gcpcommitreqLab(signal);
+}//Dbdih::execGCP_PREPARECONF()
+
+void Dbdih::gcpcommitreqLab(Signal* signal)
+{
+ CRASH_INSERTION(7001);
+ sendLoopMacro(GCP_COMMIT, sendGCP_COMMIT);
+ cgcpStatus = GCP_COMMIT_SENT;
+ return;
+}//Dbdih::gcpcommitreqLab()
+
+void Dbdih::execGCP_NODEFINISH(Signal* signal)
+{
+ jamEntry();
+ const Uint32 senderNodeId = signal->theData[0];
+ const Uint32 gci = signal->theData[1];
+ const Uint32 failureNr = signal->theData[2];
+ if (!isMaster()) {
+ jam();
+ ndbrequire(failureNr > cfailurenr);
+ //-------------------------------------------------------------
+ // Another node thinks we are master. This could happen when he
+ // has heard of a node failure which I have not heard of. Ignore
+ // signal in this case since we will discover it by sending
+ // MASTER_GCPREQ to the node.
+ //-------------------------------------------------------------
+ return;
+ } else if (cmasterState == MASTER_TAKE_OVER_GCP) {
+ jam();
+ //-------------------------------------------------------------
+ // We are currently taking over as master. We will delay the
+ // signal until we have completed the take over gcp handling.
+ //-------------------------------------------------------------
+ sendSignalWithDelay(reference(), GSN_GCP_NODEFINISH, signal, 20, 3);
+ return;
+ } else {
+ ndbrequire(cmasterState == MASTER_ACTIVE);
+ }//if
+ ndbrequire(gci == coldgcp);
+ receiveLoopMacro(GCP_COMMIT, senderNodeId);
+ //-------------------------------------------------------------
+ // We have now received all replies. We are ready to continue
+ // with saving the global checkpoint to disk.
+ //-------------------------------------------------------------
+ CRASH_INSERTION(7002);
+ gcpsavereqLab(signal);
+ return;
+}//Dbdih::execGCP_NODEFINISH()
+
+void Dbdih::gcpsavereqLab(Signal* signal)
+{
+ sendLoopMacro(GCP_SAVEREQ, sendGCP_SAVEREQ);
+ cgcpStatus = GCP_NODE_FINISHED;
+}//Dbdih::gcpsavereqLab()
+
+void Dbdih::execGCP_SAVECONF(Signal* signal)
+{
+ jamEntry();
+ const GCPSaveConf * const saveConf = (GCPSaveConf*)&signal->theData[0];
+ ndbrequire(saveConf->gci == coldgcp);
+ ndbrequire(saveConf->nodeId == saveConf->dihPtr);
+ SYSFILE->lastCompletedGCI[saveConf->nodeId] = saveConf->gci;
+ GCP_SAVEhandling(signal, saveConf->nodeId);
+}//Dbdih::execGCP_SAVECONF()
+
+void Dbdih::execGCP_SAVEREF(Signal* signal)
+{
+ jamEntry();
+ const GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
+ ndbrequire(saveRef->gci == coldgcp);
+ ndbrequire(saveRef->nodeId == saveRef->dihPtr);
+ /**
+ * Only allow reason not to save
+ */
+ ndbrequire(saveRef->errorCode == GCPSaveRef::NodeShutdownInProgress ||
+ saveRef->errorCode == GCPSaveRef::FakedSignalDueToNodeFailure ||
+ saveRef->errorCode == GCPSaveRef::NodeRestartInProgress);
+ GCP_SAVEhandling(signal, saveRef->nodeId);
+}//Dbdih::execGCP_SAVEREF()
+
+void Dbdih::GCP_SAVEhandling(Signal* signal, Uint32 nodeId)
+{
+ receiveLoopMacro(GCP_SAVEREQ, nodeId);
+ /*-------------------------------------------------------------------------*/
+ // All nodes have replied. We are ready to update the system file.
+ /*-------------------------------------------------------------------------*/
+ cgcpStatus = GCP_SAVE_LQH_FINISHED;
+ CRASH_INSERTION(7003);
+ checkToCopy();
+ /**------------------------------------------------------------------------
+ * SET NEW RECOVERABLE GCI. ALSO RESET RESTART COUNTER TO ZERO.
+ * THIS INDICATES THAT THE SYSTEM HAS BEEN RECOVERED AND SURVIVED AT
+ * LEAST ONE GLOBAL CHECKPOINT PERIOD. WE WILL USE THIS PARAMETER TO
+ * SET BACK THE RESTART GCI IF WE ENCOUNTER MORE THAN ONE UNSUCCESSFUL
+ * RESTART.
+ *------------------------------------------------------------------------*/
+ SYSFILE->newestRestorableGCI = coldgcp;
+ if(Sysfile::getInitialStartOngoing(SYSFILE->systemRestartBits) &&
+ getNodeState().startLevel == NodeState::SL_STARTED){
+ jam();
+#if 0
+ ndbout_c("Dbdih: Clearing initial start ongoing");
+#endif
+ Sysfile::clearInitialStartOngoing(SYSFILE->systemRestartBits);
+ }
+ copyGciLab(signal, CopyGCIReq::GLOBAL_CHECKPOINT);
+}//Dbdih::GCP_SAVEhandling()
+
+/*
+ 3.11 G L O B A L C H E C K P O I N T (N O T - M A S T E R)
+ *************************************************************
+ */
+void Dbdih::execGCP_PREPARE(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(7005);
+ Uint32 masterNodeId = signal->theData[0];
+ Uint32 gci = signal->theData[1];
+ BlockReference retRef = calcDihBlockRef(masterNodeId);
+
+ ndbrequire (cmasterdihref == retRef);
+ ndbrequire (cgcpParticipantState == GCP_PARTICIPANT_READY);
+ ndbrequire (gci == (currentgcp + 1));
+
+ cgckptflag = true;
+ cgcpParticipantState = GCP_PARTICIPANT_PREPARE_RECEIVED;
+ cnewgcp = gci;
+
+ signal->theData[0] = cownNodeId;
+ signal->theData[1] = gci;
+ sendSignal(retRef, GSN_GCP_PREPARECONF, signal, 2, JBA);
+ return;
+}//Dbdih::execGCP_PREPARE()
+
+void Dbdih::execGCP_COMMIT(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(7006);
+ Uint32 masterNodeId = signal->theData[0];
+ Uint32 gci = signal->theData[1];
+
+ ndbrequire(gci == (currentgcp + 1));
+ ndbrequire(masterNodeId = cmasterNodeId);
+ ndbrequire(cgcpParticipantState == GCP_PARTICIPANT_PREPARE_RECEIVED);
+
+ coldgcp = currentgcp;
+ currentgcp = cnewgcp;
+ cgckptflag = false;
+ emptyverificbuffer(signal, true);
+ cgcpParticipantState = GCP_PARTICIPANT_COMMIT_RECEIVED;
+ signal->theData[1] = coldgcp;
+ sendSignal(clocaltcblockref, GSN_GCP_NOMORETRANS, signal, 2, JBB);
+ return;
+}//Dbdih::execGCP_COMMIT()
+
+void Dbdih::execGCP_TCFINISHED(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(7007);
+ Uint32 gci = signal->theData[1];
+ ndbrequire(gci == coldgcp);
+
+ cgcpParticipantState = GCP_PARTICIPANT_TC_FINISHED;
+ signal->theData[0] = cownNodeId;
+ signal->theData[1] = coldgcp;
+ signal->theData[2] = cfailurenr;
+ sendSignal(cmasterdihref, GSN_GCP_NODEFINISH, signal, 3, JBB);
+}//Dbdih::execGCP_TCFINISHED()
+
+/*****************************************************************************/
+//****** RECEIVING TAMPER REQUEST FROM NDBAPI ******
+/*****************************************************************************/
+void Dbdih::execDIHNDBTAMPER(Signal* signal)
+{
+ jamEntry();
+ Uint32 tcgcpblocked = signal->theData[0];
+ /* ACTION TO BE TAKEN BY DIH */
+ Uint32 tuserpointer = signal->theData[1];
+ BlockReference tuserblockref = signal->theData[2];
+ switch (tcgcpblocked) {
+ case 1:
+ jam();
+ if (isMaster()) {
+ jam();
+ cgcpOrderBlocked = 1;
+ } else {
+ jam();
+ /* TRANSFER THE REQUEST */
+ /* TO MASTER*/
+ signal->theData[0] = tcgcpblocked;
+ signal->theData[1] = tuserpointer;
+ signal->theData[2] = tuserblockref;
+ sendSignal(cmasterdihref, GSN_DIHNDBTAMPER, signal, 3, JBB);
+ }//if
+ break;
+ case 2:
+ jam();
+ if (isMaster()) {
+ jam();
+ cgcpOrderBlocked = 0;
+ } else {
+ jam();
+ /* TRANSFER THE REQUEST */
+ /* TO MASTER*/
+ signal->theData[0] = tcgcpblocked;
+ signal->theData[1] = tuserpointer;
+ signal->theData[2] = tuserblockref;
+ sendSignal(cmasterdihref, GSN_DIHNDBTAMPER, signal, 3, JBB);
+ }//if
+ break;
+ case 3:
+ ndbrequire(false);
+ return;
+ break;
+ case 4:
+ jam();
+ signal->theData[0] = tuserpointer;
+ signal->theData[1] = crestartGci;
+ sendSignal(tuserblockref, GSN_DIHNDBTAMPER, signal, 2, JBB);
+ break;
+#ifdef ERROR_INSERT
+ case 5:
+ jam();
+ if(tuserpointer == 0)
+ {
+ jam();
+ signal->theData[0] = 0;
+ sendSignal(QMGR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(NDBCNTR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(NDBFS_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBACC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBTUP_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBLQH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBDICT_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBDIH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBTC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(CMVMI_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ return;
+ }
+ /*----------------------------------------------------------------------*/
+ // Insert errors.
+ /*----------------------------------------------------------------------*/
+ if (tuserpointer < 1000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into QMGR.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = QMGR_REF;
+ } else if (tuserpointer < 2000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into NDBCNTR.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = NDBCNTR_REF;
+ } else if (tuserpointer < 3000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into NDBFS.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = NDBFS_REF;
+ } else if (tuserpointer < 4000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into DBACC.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = DBACC_REF;
+ } else if (tuserpointer < 5000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into DBTUP.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = DBTUP_REF;
+ } else if (tuserpointer < 6000) {
+ /*---------------------------------------------------------------------*/
+ // Insert errors into DBLQH.
+ /*---------------------------------------------------------------------*/
+ jam();
+ tuserblockref = DBLQH_REF;
+ } else if (tuserpointer < 7000) {
+ /*---------------------------------------------------------------------*/
+ // Insert errors into DBDICT.
+ /*---------------------------------------------------------------------*/
+ jam();
+ tuserblockref = DBDICT_REF;
+ } else if (tuserpointer < 8000) {
+ /*---------------------------------------------------------------------*/
+ // Insert errors into DBDIH.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = DBDIH_REF;
+ } else if (tuserpointer < 9000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into DBTC.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = DBTC_REF;
+ } else if (tuserpointer < 10000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into CMVMI.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = CMVMI_REF;
+ } else if (tuserpointer < 11000) {
+ jam();
+ tuserblockref = BACKUP_REF;
+ } else if (tuserpointer < 12000) {
+ // DBUTIL_REF ?
+ jam();
+ } else if (tuserpointer < 13000) {
+ jam();
+ tuserblockref = DBTUX_REF;
+ } else if (tuserpointer < 14000) {
+ jam();
+ tuserblockref = SUMA_REF;
+ } else if (tuserpointer < 15000) {
+ jam();
+ tuserblockref = DBDICT_REF;
+ } else if (tuserpointer < 30000) {
+ /*--------------------------------------------------------------------*/
+ // Ignore errors in the 20000-range.
+ /*--------------------------------------------------------------------*/
+ jam();
+ return;
+ } else if (tuserpointer < 40000) {
+ jam();
+ /*--------------------------------------------------------------------*/
+ // Redirect errors to master DIH in the 30000-range.
+ /*--------------------------------------------------------------------*/
+ tuserblockref = cmasterdihref;
+ tuserpointer -= 30000;
+ signal->theData[0] = 5;
+ signal->theData[1] = tuserpointer;
+ signal->theData[2] = tuserblockref;
+ sendSignal(tuserblockref, GSN_DIHNDBTAMPER, signal, 3, JBB);
+ return;
+ } else if (tuserpointer < 50000) {
+ NodeRecordPtr localNodeptr;
+ Uint32 Tfound = 0;
+ jam();
+ /*--------------------------------------------------------------------*/
+ // Redirect errors to non-master DIH in the 40000-range.
+ /*--------------------------------------------------------------------*/
+ tuserpointer -= 40000;
+ for (localNodeptr.i = 1;
+ localNodeptr.i < MAX_NDB_NODES;
+ localNodeptr.i++) {
+ jam();
+ ptrAss(localNodeptr, nodeRecord);
+ if ((localNodeptr.p->nodeStatus == NodeRecord::ALIVE) &&
+ (localNodeptr.i != cmasterNodeId)) {
+ jam();
+ tuserblockref = calcDihBlockRef(localNodeptr.i);
+ Tfound = 1;
+ break;
+ }//if
+ }//for
+ if (Tfound == 0) {
+ jam();
+ /*-------------------------------------------------------------------*/
+ // Ignore since no non-master node existed.
+ /*-------------------------------------------------------------------*/
+ return;
+ }//if
+ signal->theData[0] = 5;
+ signal->theData[1] = tuserpointer;
+ signal->theData[2] = tuserblockref;
+ sendSignal(tuserblockref, GSN_DIHNDBTAMPER, signal, 3, JBB);
+ return;
+ } else {
+ jam();
+ return;
+ }//if
+ signal->theData[0] = tuserpointer;
+ if (tuserpointer != 0) {
+ sendSignal(tuserblockref, GSN_NDB_TAMPER, signal, 1, JBB);
+ } else {
+ sendSignal(QMGR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(NDBCNTR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(NDBFS_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBACC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBTUP_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBLQH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBDICT_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBDIH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBTC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(CMVMI_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ }//if
+ break;
+#endif
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execDIHNDBTAMPER()
+
+/*****************************************************************************/
+/* ********** FILE HANDLING MODULE *************/
+/*****************************************************************************/
+void Dbdih::copyGciLab(Signal* signal, CopyGCIReq::CopyReason reason)
+{
+ if(c_copyGCIMaster.m_copyReason != CopyGCIReq::IDLE){
+ /**
+ * There can currently only be one waiting
+ */
+ ndbrequire(c_copyGCIMaster.m_waiting == CopyGCIReq::IDLE);
+ c_copyGCIMaster.m_waiting = reason;
+ return;
+ }
+ c_copyGCIMaster.m_copyReason = reason;
+ sendLoopMacro(COPY_GCIREQ, sendCOPY_GCIREQ);
+
+}//Dbdih::copyGciLab()
+
+/* ------------------------------------------------------------------------- */
+/* COPY_GCICONF RESPONSE TO COPY_GCIREQ */
+/* ------------------------------------------------------------------------- */
+void Dbdih::execCOPY_GCICONF(Signal* signal)
+{
+ jamEntry();
+ NodeRecordPtr senderNodePtr;
+ senderNodePtr.i = signal->theData[0];
+ receiveLoopMacro(COPY_GCIREQ, senderNodePtr.i);
+
+ CopyGCIReq::CopyReason waiting = c_copyGCIMaster.m_waiting;
+ CopyGCIReq::CopyReason current = c_copyGCIMaster.m_copyReason;
+
+ c_copyGCIMaster.m_copyReason = CopyGCIReq::IDLE;
+ c_copyGCIMaster.m_waiting = CopyGCIReq::IDLE;
+
+ bool ok = false;
+ switch(current){
+ case CopyGCIReq::RESTART:{
+ ok = true;
+ jam();
+ DictStartReq * req = (DictStartReq*)&signal->theData[0];
+ req->restartGci = SYSFILE->newestRestorableGCI;
+ req->senderRef = reference();
+ sendSignal(cdictblockref, GSN_DICTSTARTREQ,
+ signal, DictStartReq::SignalLength, JBB);
+ break;
+ }
+ case CopyGCIReq::LOCAL_CHECKPOINT:{
+ ok = true;
+ jam();
+ startLcpRoundLab(signal);
+ break;
+ }
+ case CopyGCIReq::GLOBAL_CHECKPOINT:
+ ok = true;
+ jam();
+ checkToCopyCompleted(signal);
+
+ /************************************************************************/
+ // Report the event that a global checkpoint has completed.
+ /************************************************************************/
+ signal->setTrace(0);
+ signal->theData[0] = NDB_LE_GlobalCheckpointCompleted; //Event type
+ signal->theData[1] = coldgcp;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ CRASH_INSERTION(7004);
+ emptyWaitGCPMasterQueue(signal);
+ cgcpStatus = GCP_READY;
+ signal->theData[0] = DihContinueB::ZSTART_GCP;
+ signal->theData[1] = cgcpDelay;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2);
+ if (c_nodeStartMaster.blockGcp == true) {
+ jam();
+ /* ------------------------------------------------------------------ */
+ /* A NEW NODE WANTS IN AND WE MUST ALLOW IT TO COME IN NOW SINCE THE */
+ /* GCP IS COMPLETED. */
+ /* ------------------------------------------------------------------ */
+ gcpBlockedLab(signal);
+ }//if
+ break;
+ case CopyGCIReq::INITIAL_START_COMPLETED:
+ ok = true;
+ jam();
+ initialStartCompletedLab(signal);
+ break;
+ case CopyGCIReq::IDLE:
+ ok = false;
+ jam();
+ }
+ ndbrequire(ok);
+
+ /**
+ * Pop queue
+ */
+ if(waiting != CopyGCIReq::IDLE){
+ c_copyGCIMaster.m_copyReason = waiting;
+ signal->theData[0] = DihContinueB::ZCOPY_GCI;
+ signal->theData[1] = waiting;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ }
+}//Dbdih::execCOPY_GCICONF()
+
+void Dbdih::invalidateLcpInfoAfterSr()
+{
+ NodeRecordPtr nodePtr;
+ SYSFILE->latestLCP_ID--;
+ Sysfile::clearLCPOngoing(SYSFILE->systemRestartBits);
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (!NdbNodeBitmask::get(SYSFILE->lcpActive, nodePtr.i)){
+ jam();
+ /* ------------------------------------------------------------------- */
+ // The node was not active in the local checkpoint.
+ // To avoid that we step the active status too fast to not
+ // active we step back one step from Sysfile::NS_ActiveMissed_x.
+ /* ------------------------------------------------------------------- */
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ /* ----------------------------------------------------------------- */
+ // When not active in ongoing LCP and still active is a contradiction.
+ /* ----------------------------------------------------------------- */
+ ndbrequire(false);
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ nodePtr.p->activeStatus = Sysfile::NS_Active;
+ break;
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ nodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_1;
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ }//if
+ }//for
+ setNodeRestartInfoBits();
+}//Dbdih::invalidateLcpInfoAfterSr()
+
+/* ------------------------------------------------------------------------- */
+/* THE NEXT STEP IS TO WRITE THE FILE. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::openingCopyGciSkipInitLab(Signal* signal, FileRecordPtr filePtr)
+{
+ writeRestorableGci(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::WRITING_COPY_GCI;
+ return;
+}//Dbdih::openingCopyGciSkipInitLab()
+
+void Dbdih::writingCopyGciLab(Signal* signal, FileRecordPtr filePtr)
+{
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE NOW WRITTEN THIS FILE. WRITE ALSO NEXT FILE IF THIS IS NOT */
+ /* ALREADY THE LAST. */
+ /* ----------------------------------------------------------------------- */
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ if (filePtr.i == crestartInfoFile[0]) {
+ jam();
+ filePtr.i = crestartInfoFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ if (filePtr.p->fileStatus == FileRecord::OPEN) {
+ jam();
+ openingCopyGciSkipInitLab(signal, filePtr);
+ return;
+ }//if
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_COPY_GCI;
+ return;
+ }//if
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE COMPLETED WRITING BOTH FILES SUCCESSFULLY. NOW REPORT OUR */
+ /* SUCCESS TO THE MASTER DIH. BUT FIRST WE NEED TO RESET A NUMBER OF */
+ /* VARIABLES USED BY THE LOCAL CHECKPOINT PROCESS (ONLY IF TRIGGERED */
+ /* BY LOCAL CHECKPOINT PROCESS. */
+ /* ----------------------------------------------------------------------- */
+ CopyGCIReq::CopyReason reason = c_copyGCISlave.m_copyReason;
+
+ if (reason == CopyGCIReq::GLOBAL_CHECKPOINT) {
+ jam();
+ cgcpParticipantState = GCP_PARTICIPANT_READY;
+
+ SubGcpCompleteRep * const rep = (SubGcpCompleteRep*)signal->getDataPtr();
+ rep->gci = coldgcp;
+ rep->senderData = 0;
+ sendSignal(SUMA_REF, GSN_SUB_GCP_COMPLETE_REP, signal,
+ SubGcpCompleteRep::SignalLength, JBB);
+ }
+
+ jam();
+ c_copyGCISlave.m_copyReason = CopyGCIReq::IDLE;
+
+ if(c_copyGCISlave.m_senderRef == cmasterdihref){
+ jam();
+ /**
+ * Only if same master
+ */
+ signal->theData[0] = c_copyGCISlave.m_senderData;
+ sendSignal(c_copyGCISlave.m_senderRef, GSN_COPY_GCICONF, signal, 1, JBB);
+
+ }
+ return;
+}//Dbdih::writingCopyGciLab()
+
+void Dbdih::execSTART_LCP_REQ(Signal* signal){
+ StartLcpReq * req = (StartLcpReq*)signal->getDataPtr();
+
+ CRASH_INSERTION2(7021, isMaster());
+ CRASH_INSERTION2(7022, !isMaster());
+
+ ndbrequire(c_lcpState.m_masterLcpDihRef = req->senderRef);
+ c_lcpState.m_participatingDIH = req->participatingDIH;
+ c_lcpState.m_participatingLQH = req->participatingLQH;
+
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH = req->participatingLQH;
+ if(isMaster()){
+ jam();
+ ndbrequire(isActiveMaster());
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH = req->participatingDIH;
+
+ } else {
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.clearWaitingFor();
+ }
+
+ c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received = false;
+
+ c_lcpState.setLcpStatus(LCP_INIT_TABLES, __LINE__);
+
+ signal->theData[0] = DihContinueB::ZINIT_LCP;
+ signal->theData[1] = c_lcpState.m_masterLcpDihRef;
+ signal->theData[2] = 0;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+}
+
+void Dbdih::initLcpLab(Signal* signal, Uint32 senderRef, Uint32 tableId)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+
+ if(c_lcpState.m_masterLcpDihRef != senderRef){
+ jam();
+ /**
+ * This is LCP master takeover
+ */
+#ifdef VM_TRACE
+ ndbout_c("initLcpLab aborted due to LCP master takeover - 1");
+#endif
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+ sendMASTER_LCPCONF(signal);
+ return;
+ }
+
+ if(c_lcpState.m_masterLcpDihRef != cmasterdihref){
+ jam();
+ /**
+ * Master take over but has not yet received MASTER_LCPREQ
+ */
+#ifdef VM_TRACE
+ ndbout_c("initLcpLab aborted due to LCP master takeover - 2");
+#endif
+ return;
+ }
+
+ //const Uint32 lcpId = SYSFILE->latestLCP_ID;
+
+ for(; tabPtr.i < ctabFileSize; tabPtr.i++){
+
+ ptrAss(tabPtr, tabRecord);
+
+ if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) {
+ jam();
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
+ continue;
+ }
+
+ if (tabPtr.p->storedTable == 0) {
+ /**
+ * Temporary table
+ */
+ jam();
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
+ continue;
+ }
+
+ if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
+ /* ----------------------------------------------------------------- */
+ // We protect the updates of table data structures by this variable.
+ /* ----------------------------------------------------------------- */
+ jam();
+ signal->theData[0] = DihContinueB::ZINIT_LCP;
+ signal->theData[1] = senderRef;
+ signal->theData[2] = tabPtr.i;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 20, 3);
+ return;
+ }//if
+
+ /**
+ * Found a table
+ */
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_ACTIVE;
+
+ /**
+ * For each fragment
+ */
+ for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
+ jam();
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+
+ /**
+ * For each of replica record
+ */
+ Uint32 replicaCount = 0;
+ ReplicaRecordPtr replicaPtr;
+ for(replicaPtr.i = fragPtr.p->storedReplicas; replicaPtr.i != RNIL;
+ replicaPtr.i = replicaPtr.p->nextReplica) {
+ jam();
+
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ Uint32 nodeId = replicaPtr.p->procNode;
+ if(c_lcpState.m_participatingLQH.get(nodeId)){
+ jam();
+ replicaCount++;
+ replicaPtr.p->lcpOngoingFlag = true;
+ }
+ }
+
+ fragPtr.p->noLcpReplicas = replicaCount;
+ }//for
+
+ signal->theData[0] = DihContinueB::ZINIT_LCP;
+ signal->theData[1] = senderRef;
+ signal->theData[2] = tabPtr.i + 1;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }
+
+ /**
+ * No more tables
+ */
+ jam();
+
+ if (c_lcpState.m_masterLcpDihRef != reference()){
+ jam();
+ ndbrequire(!isMaster());
+ c_lcpState.setLcpStatus(LCP_STATUS_ACTIVE, __LINE__);
+ } else {
+ jam();
+ ndbrequire(isMaster());
+ }
+
+ CRASH_INSERTION2(7023, isMaster());
+ CRASH_INSERTION2(7024, !isMaster());
+
+ jam();
+ StartLcpConf * conf = (StartLcpConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ sendSignal(c_lcpState.m_masterLcpDihRef, GSN_START_LCP_CONF, signal,
+ StartLcpConf::SignalLength, JBB);
+ return;
+}//Dbdih::initLcpLab()
+
+/* ------------------------------------------------------------------------- */
+/* ERROR HANDLING FOR COPY RESTORABLE GCI FILE. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::openingCopyGciErrorLab(Signal* signal, FileRecordPtr filePtr)
+{
+ createFileRw(signal, filePtr);
+ /* ------------------------------------------------------------------------- */
+ /* ERROR IN OPENING FILE. WE WILL TRY BY CREATING FILE INSTEAD. */
+ /* ------------------------------------------------------------------------- */
+ filePtr.p->reqStatus = FileRecord::CREATING_COPY_GCI;
+ return;
+}//Dbdih::openingCopyGciErrorLab()
+
+/* ------------------------------------------------------------------------- */
+/* ENTER DICTSTARTCONF WITH */
+/* TBLOCKREF */
+/* ------------------------------------------------------------------------- */
+void Dbdih::dictStartConfLab(Signal* signal)
+{
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE NOW RECEIVED ALL THE TABLES TO RESTART. */
+ /* ----------------------------------------------------------------------- */
+ signal->theData[0] = DihContinueB::ZSTART_FRAGMENT;
+ signal->theData[1] = 0; /* START WITH TABLE 0 */
+ signal->theData[2] = 0; /* AND FRAGMENT 0 */
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+}//Dbdih::dictStartConfLab()
+
+
+void Dbdih::openingTableLab(Signal* signal, FileRecordPtr filePtr)
+{
+ /* ---------------------------------------------------------------------- */
+ /* SUCCESSFULLY OPENED A FILE. READ THE FIRST PAGE OF THIS FILE. */
+ /* ---------------------------------------------------------------------- */
+ TabRecordPtr tabPtr;
+ PageRecordPtr pagePtr;
+
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tabPtr.p->noPages = 1;
+ allocpage(pagePtr);
+ tabPtr.p->pageRef[0] = pagePtr.i;
+ readTabfile(signal, tabPtr.p, filePtr);
+ filePtr.p->reqStatus = FileRecord::READING_TABLE;
+ return;
+}//Dbdih::openingTableLab()
+
+void Dbdih::openingTableErrorLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ /* ---------------------------------------------------------------------- */
+ /* WE FAILED IN OPENING A FILE. IF THE FIRST FILE THEN TRY WITH THE */
+ /* DUPLICATE FILE, OTHERWISE WE REPORT AN ERROR IN THE SYSTEM RESTART. */
+ /* ---------------------------------------------------------------------- */
+ ndbrequire(filePtr.i == tabPtr.p->tabFile[0]);
+ filePtr.i = tabPtr.p->tabFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_TABLE;
+}//Dbdih::openingTableErrorLab()
+
+void Dbdih::readingTableLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ PageRecordPtr pagePtr;
+ /* ---------------------------------------------------------------------- */
+ /* WE HAVE SUCCESSFULLY READ A NUMBER OF PAGES IN THE TABLE FILE. IF */
+ /* MORE PAGES EXIST IN THE FILE THEN READ ALL PAGES IN THE FILE. */
+ /* ---------------------------------------------------------------------- */
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ pagePtr.i = tabPtr.p->pageRef[0];
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+ Uint32 noOfStoredPages = pagePtr.p->word[33];
+ if (tabPtr.p->noPages < noOfStoredPages) {
+ jam();
+ ndbrequire(noOfStoredPages <= 8);
+ for (Uint32 i = tabPtr.p->noPages; i < noOfStoredPages; i++) {
+ jam();
+ allocpage(pagePtr);
+ tabPtr.p->pageRef[i] = pagePtr.i;
+ }//for
+ tabPtr.p->noPages = noOfStoredPages;
+ readTabfile(signal, tabPtr.p, filePtr);
+ filePtr.p->reqStatus = FileRecord::READING_TABLE;
+ } else {
+ ndbrequire(tabPtr.p->noPages == pagePtr.p->word[33]);
+ ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE);
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* WE HAVE READ ALL PAGES. NOW READ FROM PAGES INTO TABLE AND FRAGMENT */
+ /* DATA STRUCTURES. */
+ /* --------------------------------------------------------------------- */
+ tabPtr.p->tabCopyStatus = TabRecord::CS_SR_PHASE1_READ_PAGES;
+ signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_TABLE;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }//if
+ return;
+}//Dbdih::readingTableLab()
+
+void Dbdih::readTableFromPagesLab(Signal* signal, TabRecordPtr tabPtr)
+{
+ FileRecordPtr filePtr;
+ filePtr.i = tabPtr.p->tabFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ /* ---------------------------------------------------------------------- */
+ /* WE HAVE NOW COPIED TO OUR NODE. WE HAVE NOW COMPLETED RESTORING */
+ /* THIS TABLE. CONTINUE WITH THE NEXT TABLE. */
+ /* WE ALSO NEED TO CLOSE THE TABLE FILE. */
+ /* ---------------------------------------------------------------------- */
+ if (filePtr.p->fileStatus != FileRecord::OPEN) {
+ jam();
+ filePtr.i = tabPtr.p->tabFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ }//if
+ closeFile(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::CLOSING_TABLE_SR;
+ return;
+}//Dbdih::readTableFromPagesLab()
+
+void Dbdih::closingTableSrLab(Signal* signal, FileRecordPtr filePtr)
+{
+ /**
+ * Update table/fragment info
+ */
+ TabRecordPtr tabPtr;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ resetReplicaSr(tabPtr);
+
+ signal->theData[0] = DihContinueB::ZCOPY_TABLE;
+ signal->theData[1] = filePtr.p->tabRef;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+
+ return;
+}//Dbdih::closingTableSrLab()
+
+void
+Dbdih::resetReplicaSr(TabRecordPtr tabPtr){
+
+ const Uint32 newestRestorableGCI = SYSFILE->newestRestorableGCI;
+
+ for(Uint32 i = 0; i<tabPtr.p->totalfragments; i++){
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, i, fragPtr);
+
+ /**
+ * 1) Start by moving all replicas into oldStoredReplicas
+ */
+ prepareReplicas(fragPtr);
+
+ /**
+ * 2) Move all "alive" replicas into storedReplicas
+ * + update noCrashedReplicas...
+ */
+ ReplicaRecordPtr replicaPtr;
+ replicaPtr.i = fragPtr.p->oldStoredReplicas;
+ while (replicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ const Uint32 nextReplicaPtrI = replicaPtr.p->nextReplica;
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = replicaPtr.p->procNode;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ const Uint32 noCrashedReplicas = replicaPtr.p->noCrashedReplicas;
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ case Sysfile::NS_ActiveMissed_2:{
+ jam();
+ /* --------------------------------------------------------------- */
+ /* THE NODE IS ALIVE AND KICKING AND ACTIVE, LET'S USE IT. */
+ /* --------------------------------------------------------------- */
+ arrGuard(noCrashedReplicas, 8);
+ Uint32 lastGci = replicaPtr.p->replicaLastGci[noCrashedReplicas];
+ if(lastGci >= newestRestorableGCI){
+ jam();
+ /** -------------------------------------------------------------
+ * THE REPLICA WAS ALIVE AT THE SYSTEM FAILURE. WE WILL SET THE
+ * LAST REPLICA GCI TO MINUS ONE SINCE IT HASN'T FAILED YET IN THE
+ * NEW SYSTEM.
+ *-------------------------------------------------------------- */
+ replicaPtr.p->replicaLastGci[noCrashedReplicas] = (Uint32)-1;
+ } else {
+ jam();
+ /*--------------------------------------------------------------
+ * SINCE IT WAS NOT ALIVE AT THE TIME OF THE SYSTEM CRASH THIS IS
+ * A COMPLETELY NEW REPLICA. WE WILL SET THE CREATE GCI TO BE THE
+ * NEXT GCI TO BE EXECUTED.
+ *--------_----------------------------------------------------- */
+ const Uint32 nextCrashed = noCrashedReplicas + 1;
+ replicaPtr.p->noCrashedReplicas = nextCrashed;
+ arrGuard(nextCrashed, 8);
+ replicaPtr.p->createGci[nextCrashed] = newestRestorableGCI + 1;
+ ndbrequire(newestRestorableGCI + 1 != 0xF1F1F1F1);
+ replicaPtr.p->replicaLastGci[nextCrashed] = (Uint32)-1;
+ }//if
+
+ resetReplicaLcp(replicaPtr.p, newestRestorableGCI);
+
+ /* -----------------------------------------------------------------
+ * LINK THE REPLICA INTO THE STORED REPLICA LIST. WE WILL USE THIS
+ * NODE AS A STORED REPLICA.
+ * WE MUST FIRST LINK IT OUT OF THE LIST OF OLD STORED REPLICAS.
+ * --------------------------------------------------------------- */
+ removeOldStoredReplica(fragPtr, replicaPtr);
+ linkStoredReplica(fragPtr, replicaPtr);
+
+ }
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }
+ }
+ replicaPtr.i = nextReplicaPtrI;
+ }//while
+ }
+}
+
+void
+Dbdih::resetReplicaLcp(ReplicaRecord * replicaP, Uint32 stopGci){
+
+ Uint32 lcpNo = replicaP->nextLcp;
+ const Uint32 startLcpNo = lcpNo;
+ do {
+ lcpNo = prevLcpNo(lcpNo);
+ ndbrequire(lcpNo < MAX_LCP_STORED);
+ if (replicaP->lcpStatus[lcpNo] == ZVALID) {
+ if (replicaP->maxGciStarted[lcpNo] < stopGci) {
+ jam();
+ /* ----------------------------------------------------------------- */
+ /* WE HAVE FOUND A USEFUL LOCAL CHECKPOINT THAT CAN BE USED FOR */
+ /* RESTARTING THIS FRAGMENT REPLICA. */
+ /* ----------------------------------------------------------------- */
+ return ;
+ }//if
+ }//if
+
+ /**
+ * WE COULD NOT USE THIS LOCAL CHECKPOINT. IT WAS TOO
+ * RECENT OR SIMPLY NOT A VALID CHECKPOINT.
+ * WE SHOULD THUS REMOVE THIS LOCAL CHECKPOINT SINCE IT WILL NEVER
+ * AGAIN BE USED. SET LCP_STATUS TO INVALID.
+ */
+ replicaP->nextLcp = lcpNo;
+ replicaP->lcpId[lcpNo] = 0;
+ replicaP->lcpStatus[lcpNo] = ZINVALID;
+ } while (lcpNo != startLcpNo);
+
+ replicaP->nextLcp = 0;
+}
+
+void Dbdih::readingTableErrorLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ /* ---------------------------------------------------------------------- */
+ /* READING THIS FILE FAILED. CLOSE IT AFTER RELEASING ALL PAGES. */
+ /* ---------------------------------------------------------------------- */
+ ndbrequire(tabPtr.p->noPages <= 8);
+ for (Uint32 i = 0; i < tabPtr.p->noPages; i++) {
+ jam();
+ releasePage(tabPtr.p->pageRef[i]);
+ }//for
+ closeFile(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::CLOSING_TABLE_CRASH;
+ return;
+}//Dbdih::readingTableErrorLab()
+
+void Dbdih::closingTableCrashLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ /* ---------------------------------------------------------------------- */
+ /* WE HAVE NOW CLOSED A FILE WHICH WE HAD A READ ERROR WITH. PROCEED */
+ /* WITH NEXT FILE IF NOT THE LAST OTHERWISE REPORT ERROR. */
+ /* ---------------------------------------------------------------------- */
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ ndbrequire(filePtr.i == tabPtr.p->tabFile[0]);
+ filePtr.i = tabPtr.p->tabFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_TABLE;
+}//Dbdih::closingTableCrashLab()
+
+/*****************************************************************************/
+/* ********** COPY TABLE MODULE *************/
+/*****************************************************************************/
+void Dbdih::execCOPY_TABREQ(Signal* signal)
+{
+ CRASH_INSERTION(7172);
+
+ TabRecordPtr tabPtr;
+ PageRecordPtr pagePtr;
+ jamEntry();
+ BlockReference ref = signal->theData[0];
+ Uint32 reqinfo = signal->theData[1];
+ tabPtr.i = signal->theData[2];
+ Uint32 schemaVersion = signal->theData[3];
+ Uint32 noOfWords = signal->theData[4];
+ ndbrequire(ref == cmasterdihref);
+ ndbrequire(!isMaster());
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if (reqinfo == 1) {
+ jam();
+ tabPtr.p->schemaVersion = schemaVersion;
+ initTableFile(tabPtr);
+ }//if
+ ndbrequire(tabPtr.p->noPages < 8);
+ if (tabPtr.p->noOfWords == 0) {
+ jam();
+ allocpage(pagePtr);
+ tabPtr.p->pageRef[tabPtr.p->noPages] = pagePtr.i;
+ tabPtr.p->noPages++;
+ } else {
+ jam();
+ pagePtr.i = tabPtr.p->pageRef[tabPtr.p->noPages - 1];
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+ }//if
+ ndbrequire(tabPtr.p->noOfWords + 15 < 2048);
+ ndbrequire(tabPtr.p->noOfWords < 2048);
+ MEMCOPY_NO_WORDS(&pagePtr.p->word[tabPtr.p->noOfWords], &signal->theData[5], 16);
+ tabPtr.p->noOfWords += 16;
+ if (tabPtr.p->noOfWords == 2048) {
+ jam();
+ tabPtr.p->noOfWords = 0;
+ }//if
+ if (noOfWords > 16) {
+ jam();
+ return;
+ }//if
+ tabPtr.p->noOfWords = 0;
+ ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE);
+ tabPtr.p->tabCopyStatus = TabRecord::CS_COPY_TAB_REQ;
+ signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_TABLE;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+}//Dbdih::execCOPY_TABREQ()
+
+void
+Dbdih::copyTabReq_complete(Signal* signal, TabRecordPtr tabPtr){
+ if (!isMaster()) {
+ jam();
+ //----------------------------------------------------------------------------
+ // In this particular case we do not release table pages if we are master. The
+ // reason is that the master could still be sending the table info to another
+ // node.
+ //----------------------------------------------------------------------------
+ releaseTabPages(tabPtr.i);
+ tabPtr.p->tabStatus = TabRecord::TS_ACTIVE;
+ for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
+ jam();
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ updateNodeInfo(fragPtr);
+ }//for
+ }//if
+ signal->theData[0] = cownNodeId;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(cmasterdihref, GSN_COPY_TABCONF, signal, 2, JBB);
+}
+
+/*****************************************************************************/
+/* ****** READ FROM A NUMBER OF PAGES INTO THE TABLE DATA STRUCTURES ********/
+/*****************************************************************************/
+void Dbdih::readPagesIntoTableLab(Signal* signal, Uint32 tableId)
+{
+ RWFragment rf;
+ rf.wordIndex = 35;
+ rf.pageIndex = 0;
+ rf.rwfTabPtr.i = tableId;
+ ptrCheckGuard(rf.rwfTabPtr, ctabFileSize, tabRecord);
+ rf.rwfPageptr.i = rf.rwfTabPtr.p->pageRef[0];
+ ptrCheckGuard(rf.rwfPageptr, cpageFileSize, pageRecord);
+ rf.rwfTabPtr.p->totalfragments = readPageWord(&rf);
+ rf.rwfTabPtr.p->noOfBackups = readPageWord(&rf);
+ rf.rwfTabPtr.p->hashpointer = readPageWord(&rf);
+ rf.rwfTabPtr.p->kvalue = readPageWord(&rf);
+ rf.rwfTabPtr.p->mask = readPageWord(&rf);
+ rf.rwfTabPtr.p->method = (TabRecord::Method)readPageWord(&rf);
+ /* ---------------------------------- */
+ /* Type of table, 2 = temporary table */
+ /* ---------------------------------- */
+ rf.rwfTabPtr.p->storedTable = readPageWord(&rf);
+
+ Uint32 noOfFrags = rf.rwfTabPtr.p->totalfragments;
+ ndbrequire(noOfFrags > 0);
+ ndbrequire((noOfFrags * (rf.rwfTabPtr.p->noOfBackups + 1)) <= cnoFreeReplicaRec);
+ allocFragments(noOfFrags, rf.rwfTabPtr);
+
+ signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_FRAG;
+ signal->theData[1] = rf.rwfTabPtr.i;
+ signal->theData[2] = 0;
+ signal->theData[3] = rf.pageIndex;
+ signal->theData[4] = rf.wordIndex;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
+ return;
+}//Dbdih::readPagesIntoTableLab()
+
+void Dbdih::readPagesIntoFragLab(Signal* signal, RWFragment* rf)
+{
+ ndbrequire(rf->pageIndex < 8);
+ rf->rwfPageptr.i = rf->rwfTabPtr.p->pageRef[rf->pageIndex];
+ ptrCheckGuard(rf->rwfPageptr, cpageFileSize, pageRecord);
+ FragmentstorePtr fragPtr;
+ getFragstore(rf->rwfTabPtr.p, rf->fragId, fragPtr);
+ readFragment(rf, fragPtr);
+ readReplicas(rf, fragPtr);
+ rf->fragId++;
+ if (rf->fragId == rf->rwfTabPtr.p->totalfragments) {
+ jam();
+ switch (rf->rwfTabPtr.p->tabCopyStatus) {
+ case TabRecord::CS_SR_PHASE1_READ_PAGES:
+ jam();
+ releaseTabPages(rf->rwfTabPtr.i);
+ rf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ signal->theData[0] = DihContinueB::ZREAD_TABLE_FROM_PAGES;
+ signal->theData[1] = rf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::CS_COPY_TAB_REQ:
+ jam();
+ rf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ if(getNodeState().getSystemRestartInProgress()){
+ jam();
+ copyTabReq_complete(signal, rf->rwfTabPtr);
+ return;
+ }
+ rf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ rf->rwfTabPtr.p->tabUpdateState = TabRecord::US_COPY_TAB_REQ;
+ signal->theData[0] = DihContinueB::ZTABLE_UPDATE;
+ signal->theData[1] = rf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ } else {
+ jam();
+ signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_FRAG;
+ signal->theData[1] = rf->rwfTabPtr.i;
+ signal->theData[2] = rf->fragId;
+ signal->theData[3] = rf->pageIndex;
+ signal->theData[4] = rf->wordIndex;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
+ }//if
+ return;
+}//Dbdih::readPagesIntoFragLab()
+
+/*****************************************************************************/
+/***** WRITING FROM TABLE DATA STRUCTURES INTO A SET OF PAGES ******/
+// execCONTINUEB(ZPACK_TABLE_INTO_PAGES)
+/*****************************************************************************/
+void Dbdih::packTableIntoPagesLab(Signal* signal, Uint32 tableId)
+{
+ RWFragment wf;
+ TabRecordPtr tabPtr;
+ allocpage(wf.rwfPageptr);
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tabPtr.p->pageRef[0] = wf.rwfPageptr.i;
+ tabPtr.p->noPages = 1;
+ wf.wordIndex = 35;
+ wf.pageIndex = 0;
+ writePageWord(&wf, tabPtr.p->totalfragments);
+ writePageWord(&wf, tabPtr.p->noOfBackups);
+ writePageWord(&wf, tabPtr.p->hashpointer);
+ writePageWord(&wf, tabPtr.p->kvalue);
+ writePageWord(&wf, tabPtr.p->mask);
+ writePageWord(&wf, tabPtr.p->method);
+ writePageWord(&wf, tabPtr.p->storedTable);
+
+ signal->theData[0] = DihContinueB::ZPACK_FRAG_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = 0;
+ signal->theData[3] = wf.pageIndex;
+ signal->theData[4] = wf.wordIndex;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
+}//Dbdih::packTableIntoPagesLab()
+
+/*****************************************************************************/
+// execCONTINUEB(ZPACK_FRAG_INTO_PAGES)
+/*****************************************************************************/
+void Dbdih::packFragIntoPagesLab(Signal* signal, RWFragment* wf)
+{
+ ndbrequire(wf->pageIndex < 8);
+ wf->rwfPageptr.i = wf->rwfTabPtr.p->pageRef[wf->pageIndex];
+ ptrCheckGuard(wf->rwfPageptr, cpageFileSize, pageRecord);
+ FragmentstorePtr fragPtr;
+ getFragstore(wf->rwfTabPtr.p, wf->fragId, fragPtr);
+ writeFragment(wf, fragPtr);
+ writeReplicas(wf, fragPtr.p->storedReplicas);
+ writeReplicas(wf, fragPtr.p->oldStoredReplicas);
+ wf->fragId++;
+ if (wf->fragId == wf->rwfTabPtr.p->totalfragments) {
+ jam();
+ PageRecordPtr pagePtr;
+ pagePtr.i = wf->rwfTabPtr.p->pageRef[0];
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+ pagePtr.p->word[33] = wf->rwfTabPtr.p->noPages;
+ pagePtr.p->word[34] = ((wf->rwfTabPtr.p->noPages - 1) * 2048) + wf->wordIndex;
+ switch (wf->rwfTabPtr.p->tabCopyStatus) {
+ case TabRecord::CS_SR_PHASE2_READ_TABLE:
+ /* -------------------------------------------------------------------*/
+ // We are performing a system restart and we are now ready to copy the
+ // table from this node (the master) to all other nodes.
+ /* -------------------------------------------------------------------*/
+ jam();
+ wf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ signal->theData[0] = DihContinueB::ZSR_PHASE2_READ_TABLE;
+ signal->theData[1] = wf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::CS_COPY_NODE_STATE:
+ jam();
+ tableCopyNodeLab(signal, wf->rwfTabPtr);
+ return;
+ break;
+ case TabRecord::CS_LCP_READ_TABLE:
+ jam();
+ signal->theData[0] = DihContinueB::ZTABLE_UPDATE;
+ signal->theData[1] = wf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::CS_REMOVE_NODE:
+ case TabRecord::CS_INVALIDATE_NODE_LCP:
+ jam();
+ signal->theData[0] = DihContinueB::ZTABLE_UPDATE;
+ signal->theData[1] = wf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::CS_ADD_TABLE_MASTER:
+ jam();
+ wf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ signal->theData[0] = DihContinueB::ZADD_TABLE_MASTER_PAGES;
+ signal->theData[1] = wf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::CS_ADD_TABLE_SLAVE:
+ jam();
+ wf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ signal->theData[0] = DihContinueB::ZADD_TABLE_SLAVE_PAGES;
+ signal->theData[1] = wf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ } else {
+ jam();
+ signal->theData[0] = DihContinueB::ZPACK_FRAG_INTO_PAGES;
+ signal->theData[1] = wf->rwfTabPtr.i;
+ signal->theData[2] = wf->fragId;
+ signal->theData[3] = wf->pageIndex;
+ signal->theData[4] = wf->wordIndex;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
+ }//if
+ return;
+}//Dbdih::packFragIntoPagesLab()
+
+/*****************************************************************************/
+/* ********** START FRAGMENT MODULE *************/
+/*****************************************************************************/
+void Dbdih::startFragment(Signal* signal, Uint32 tableId, Uint32 fragId)
+{
+ Uint32 TloopCount = 0;
+ TabRecordPtr tabPtr;
+ while (true) {
+ if (TloopCount > 100) {
+ jam();
+ signal->theData[0] = DihContinueB::ZSTART_FRAGMENT;
+ signal->theData[1] = tableId;
+ signal->theData[2] = 0;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }
+
+ if (tableId >= ctabFileSize) {
+ jam();
+ signal->theData[0] = DihContinueB::ZCOMPLETE_RESTART;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
+ return;
+ }//if
+
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE){
+ jam();
+ TloopCount++;
+ tableId++;
+ fragId = 0;
+ continue;
+ }
+
+ if(tabPtr.p->storedTable == 0){
+ jam();
+ TloopCount++;
+ tableId++;
+ fragId = 0;
+ continue;
+ }
+
+ jam();
+ break;
+ }//while
+
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ /* ----------------------------------------------------------------------- */
+ /* WE NEED TO RESET THE REPLICA DATA STRUCTURES. THIS MEANS THAT WE */
+ /* MUST REMOVE REPLICAS THAT WAS NOT STARTED AT THE GCI TO RESTORE. WE */
+ /* NEED TO PUT ALL STORED REPLICAS ON THE LIST OF OLD STORED REPLICAS */
+ /* RESET THE NUMBER OF REPLICAS TO CREATE. */
+ /* ----------------------------------------------------------------------- */
+ cnoOfCreateReplicas = 0;
+ /* ----------------------------------------------------------------------- */
+ /* WE WILL NEVER START MORE THAN FOUR FRAGMENT REPLICAS WHATEVER THE */
+ /* DESIRED REPLICATION IS. */
+ /* ----------------------------------------------------------------------- */
+ ndbrequire(tabPtr.p->noOfBackups < 4);
+ /* ----------------------------------------------------------------------- */
+ /* SEARCH FOR STORED REPLICAS THAT CAN BE USED TO RESTART THE SYSTEM. */
+ /* ----------------------------------------------------------------------- */
+ searchStoredReplicas(fragPtr);
+ if (cnoOfCreateReplicas == 0) {
+ /* --------------------------------------------------------------------- */
+ /* THERE WERE NO STORED REPLICAS AVAILABLE THAT CAN SERVE AS REPLICA TO*/
+ /* RESTART THE SYSTEM FROM. IN A LATER RELEASE WE WILL ADD */
+ /* FUNCTIONALITY TO CHECK IF THERE ARE ANY STANDBY NODES THAT COULD DO */
+ /* THIS TASK INSTEAD IN THIS IMPLEMENTATION WE SIMPLY CRASH THE SYSTEM.*/
+ /* THIS WILL DECREASE THE GCI TO RESTORE WHICH HOPEFULLY WILL MAKE IT */
+ /* POSSIBLE TO RESTORE THE SYSTEM. */
+ /* --------------------------------------------------------------------- */
+ char buf[100];
+ BaseString::snprintf(buf, sizeof(buf),
+ "Unable to find restorable replica for "
+ "table: %d fragment: %d gci: %d",
+ tableId, fragId, SYSFILE->newestRestorableGCI);
+ progError(__LINE__,
+ ERR_SYSTEM_ERROR,
+ buf);
+ ndbrequire(false);
+ return;
+ }//if
+
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE CHANGED THE NODE TO BE PRIMARY REPLICA AND THE NODES TO BE */
+ /* BACKUP NODES. WE MUST UPDATE THIS NODES DATA STRUCTURE SINCE WE */
+ /* WILL NOT COPY THE TABLE DATA TO OURSELF. */
+ /* ----------------------------------------------------------------------- */
+ updateNodeInfo(fragPtr);
+ /* ----------------------------------------------------------------------- */
+ /* NOW WE HAVE COLLECTED ALL THE REPLICAS WE COULD GET. WE WILL NOW */
+ /* RESTART THE FRAGMENT REPLICAS WE HAVE FOUND IRRESPECTIVE OF IF THERE*/
+ /* ARE ENOUGH ACCORDING TO THE DESIRED REPLICATION. */
+ /* ----------------------------------------------------------------------- */
+ /* WE START BY SENDING ADD_FRAGREQ FOR THOSE REPLICAS THAT NEED IT. */
+ /* ----------------------------------------------------------------------- */
+ CreateReplicaRecordPtr createReplicaPtr;
+ for (createReplicaPtr.i = 0;
+ createReplicaPtr.i < cnoOfCreateReplicas;
+ createReplicaPtr.i++) {
+ jam();
+ ptrCheckGuard(createReplicaPtr, 4, createReplicaRecord);
+ createReplicaPtr.p->hotSpareUse = false;
+ }//for
+
+ sendStartFragreq(signal, tabPtr, fragId);
+
+ /**
+ * Don't wait for START_FRAGCONF
+ */
+ fragId++;
+ if (fragId >= tabPtr.p->totalfragments) {
+ jam();
+ tabPtr.i++;
+ fragId = 0;
+ }//if
+ signal->theData[0] = DihContinueB::ZSTART_FRAGMENT;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = fragId;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+
+ return;
+}//Dbdih::startFragmentLab()
+
+
+/*****************************************************************************/
+/* ********** COMPLETE RESTART MODULE *************/
+/*****************************************************************************/
+void Dbdih::completeRestartLab(Signal* signal)
+{
+ sendLoopMacro(START_RECREQ, sendSTART_RECREQ);
+}//completeRestartLab()
+
+/* ------------------------------------------------------------------------- */
+// SYSTEM RESTART:
+/* A NODE HAS COMPLETED RESTORING ALL DATABASE FRAGMENTS. */
+// NODE RESTART:
+// THE STARTING NODE HAS PREPARED ITS LOG FILES TO ENABLE EXECUTION
+// OF TRANSACTIONS.
+// Precondition:
+// This signal must be received by the master node.
+/* ------------------------------------------------------------------------- */
+void Dbdih::execSTART_RECCONF(Signal* signal)
+{
+ jamEntry();
+ Uint32 senderNodeId = signal->theData[0];
+ ndbrequire(isMaster());
+ if (getNodeState().startLevel >= NodeState::SL_STARTED){
+ /* --------------------------------------------------------------------- */
+ // Since our node is already up and running this must be a node restart.
+ // This means that we should be the master node,
+ // otherwise we have a problem.
+ /* --------------------------------------------------------------------- */
+ jam();
+ ndbrequire(senderNodeId == c_nodeStartMaster.startNode);
+ nodeRestartStartRecConfLab(signal);
+ return;
+ } else {
+ /* --------------------------------------------------------------------- */
+ // This was the system restart case. We set the state indicating that the
+ // node has completed restoration of all fragments.
+ /* --------------------------------------------------------------------- */
+ receiveLoopMacro(START_RECREQ, senderNodeId);
+
+ signal->theData[0] = reference();
+ sendSignal(cntrlblockref, GSN_NDB_STARTCONF, signal, 1, JBB);
+ return;
+ }//if
+}//Dbdih::execSTART_RECCONF()
+
+void Dbdih::copyNodeLab(Signal* signal, Uint32 tableId)
+{
+ /* ----------------------------------------------------------------------- */
+ // This code is executed by the master to assist a node restart in receiving
+ // the data in the master.
+ /* ----------------------------------------------------------------------- */
+ Uint32 TloopCount = 0;
+
+ if (!c_nodeStartMaster.activeState) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ // Obviously the node crashed in the middle of its node restart. We will
+ // stop this process simply by returning after resetting the wait indicator.
+ /* ---------------------------------------------------------------------- */
+ c_nodeStartMaster.wait = ZFALSE;
+ return;
+ }//if
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ while (tabPtr.i < ctabFileSize) {
+ ptrAss(tabPtr, tabRecord);
+ if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) {
+ /* -------------------------------------------------------------------- */
+ // The table is defined. We will start by packing the table into pages.
+ // The tabCopyStatus indicates to the CONTINUEB(ZPACK_TABLE_INTO_PAGES)
+ // who called it. After packing the table into page(s) it will be sent to
+ // the starting node by COPY_TABREQ signals. After returning from the
+ // starting node we will return to this subroutine and continue
+ // with the next table.
+ /* -------------------------------------------------------------------- */
+ ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE);
+ tabPtr.p->tabCopyStatus = TabRecord::CS_COPY_NODE_STATE;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ } else {
+ jam();
+ if (TloopCount > 100) {
+ /* ------------------------------------------------------------------ */
+ // Introduce real-time break after looping through 100 not copied tables
+ /* ----------------------------------------------------------------- */
+ jam();
+ signal->theData[0] = DihContinueB::ZCOPY_NODE;
+ signal->theData[1] = tabPtr.i + 1;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ } else {
+ jam();
+ TloopCount++;
+ tabPtr.i++;
+ }//if
+ }//if
+ }//while
+ dihCopyCompletedLab(signal);
+ return;
+}//Dbdih::copyNodeLab()
+
+void Dbdih::tableCopyNodeLab(Signal* signal, TabRecordPtr tabPtr)
+{
+ /* ----------------------------------------------------------------------- */
+ /* COPY PAGES READ TO STARTING NODE. */
+ /* ----------------------------------------------------------------------- */
+ if (!c_nodeStartMaster.activeState) {
+ jam();
+ releaseTabPages(tabPtr.i);
+ c_nodeStartMaster.wait = ZFALSE;
+ return;
+ }//if
+ NodeRecordPtr copyNodePtr;
+ PageRecordPtr pagePtr;
+ copyNodePtr.i = c_nodeStartMaster.startNode;
+ ptrCheckGuard(copyNodePtr, MAX_NDB_NODES, nodeRecord);
+
+ copyNodePtr.p->activeTabptr = tabPtr.i;
+ pagePtr.i = tabPtr.p->pageRef[0];
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+
+ signal->theData[0] = DihContinueB::ZCOPY_TABLE_NODE;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = copyNodePtr.i;
+ signal->theData[3] = 0;
+ signal->theData[4] = 0;
+ signal->theData[5] = pagePtr.p->word[34];
+ sendSignal(reference(), GSN_CONTINUEB, signal, 6, JBB);
+}//Dbdih::tableCopyNodeLab()
+
+/* ------------------------------------------------------------------------- */
+// execCONTINUEB(ZCOPY_TABLE)
+// This routine is used to copy the table descriptions from the master to
+// other nodes. It is used in the system restart to copy from master to all
+// starting nodes.
+/* ------------------------------------------------------------------------- */
+void Dbdih::copyTableLab(Signal* signal, Uint32 tableId)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrAss(tabPtr, tabRecord);
+
+ ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE);
+ tabPtr.p->tabCopyStatus = TabRecord::CS_SR_PHASE2_READ_TABLE;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//Dbdih::copyTableLab()
+
+/* ------------------------------------------------------------------------- */
+// execCONTINUEB(ZSR_PHASE2_READ_TABLE)
+/* ------------------------------------------------------------------------- */
+void Dbdih::srPhase2ReadTableLab(Signal* signal, TabRecordPtr tabPtr)
+{
+ /* ----------------------------------------------------------------------- */
+ // We set the sendCOPY_TABREQState to ZACTIVE for all nodes since it is a long
+ // process to send off all table descriptions. Thus we ensure that we do
+ // not encounter race conditions where one node is completed before the
+ // sending process is completed. This could lead to that we start off the
+ // system before we actually finished all copying of table descriptions
+ // and could lead to strange errors.
+ /* ----------------------------------------------------------------------- */
+
+ //sendLoopMacro(COPY_TABREQ, nullRoutine);
+
+ breakCopyTableLab(signal, tabPtr, cfirstAliveNode);
+ return;
+}//Dbdih::srPhase2ReadTableLab()
+
+/* ------------------------------------------------------------------------- */
+/* COPY PAGES READ TO ALL NODES. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::breakCopyTableLab(Signal* signal, TabRecordPtr tabPtr, Uint32 nodeId)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ while (nodePtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.i == getOwnNodeId()){
+ jam();
+ /* ------------------------------------------------------------------- */
+ /* NOT NECESSARY TO COPY TO MY OWN NODE. I ALREADY HAVE THE PAGES. */
+ /* I DO HOWEVER NEED TO STORE THE TABLE DESCRIPTION ONTO DISK. */
+ /* ------------------------------------------------------------------- */
+ /* IF WE ARE MASTER WE ONLY NEED TO SAVE THE TABLE ON DISK. WE ALREADY */
+ /* HAVE THE TABLE DESCRIPTION IN THE DATA STRUCTURES. */
+ // AFTER COMPLETING THE WRITE TO DISK THE MASTER WILL ALSO SEND
+ // COPY_TABCONF AS ALL THE OTHER NODES.
+ /* ------------------------------------------------------------------- */
+ c_COPY_TABREQ_Counter.setWaitingFor(nodePtr.i);
+ tabPtr.p->tabUpdateState = TabRecord::US_COPY_TAB_REQ;
+ signal->theData[0] = DihContinueB::ZTABLE_UPDATE;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ nodePtr.i = nodePtr.p->nextNode;
+ } else {
+ PageRecordPtr pagePtr;
+ /* -------------------------------------------------------------------- */
+ // RATHER THAN SENDING ALL COPY_TABREQ IN PARALLEL WE WILL SERIALISE THIS
+ // ACTIVITY AND WILL THUS CALL breakCopyTableLab AGAIN WHEN COMPLETED THE
+ // SENDING OF COPY_TABREQ'S.
+ /* -------------------------------------------------------------------- */
+ jam();
+ tabPtr.p->tabCopyStatus = TabRecord::CS_SR_PHASE3_COPY_TABLE;
+ pagePtr.i = tabPtr.p->pageRef[0];
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+ signal->theData[0] = DihContinueB::ZCOPY_TABLE_NODE;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = nodePtr.i;
+ signal->theData[3] = 0;
+ signal->theData[4] = 0;
+ signal->theData[5] = pagePtr.p->word[34];
+ sendSignal(reference(), GSN_CONTINUEB, signal, 6, JBB);
+ return;
+ }//if
+ }//while
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE NOW SENT THE TABLE PAGES TO ALL NODES. EXIT AND WAIT FOR ALL */
+ /* REPLIES. */
+ /* ----------------------------------------------------------------------- */
+ return;
+}//Dbdih::breakCopyTableLab()
+
+/* ------------------------------------------------------------------------- */
+// execCONTINUEB(ZCOPY_TABLE_NODE)
+/* ------------------------------------------------------------------------- */
+void Dbdih::copyTableNode(Signal* signal,
+ CopyTableNode* ctn, NodeRecordPtr nodePtr)
+{
+ if (getNodeState().startLevel >= NodeState::SL_STARTED){
+ /* --------------------------------------------------------------------- */
+ // We are in the process of performing a node restart and are copying a
+ // table description to a starting node. We will check that no nodes have
+ // crashed in this process.
+ /* --------------------------------------------------------------------- */
+ if (!c_nodeStartMaster.activeState) {
+ jam();
+ /** ------------------------------------------------------------------
+ * The starting node crashed. We will release table pages and stop this
+ * copy process and allow new node restarts to start.
+ * ------------------------------------------------------------------ */
+ releaseTabPages(ctn->ctnTabPtr.i);
+ c_nodeStartMaster.wait = ZFALSE;
+ return;
+ }//if
+ }//if
+ ndbrequire(ctn->pageIndex < 8);
+ ctn->ctnPageptr.i = ctn->ctnTabPtr.p->pageRef[ctn->pageIndex];
+ ptrCheckGuard(ctn->ctnPageptr, cpageFileSize, pageRecord);
+ /**
+ * If first page & firstWord reqinfo = 1 (first signal)
+ */
+ Uint32 reqinfo = (ctn->pageIndex == 0) && (ctn->wordIndex == 0);
+ if(reqinfo == 1){
+ c_COPY_TABREQ_Counter.setWaitingFor(nodePtr.i);
+ }
+
+ for (Uint32 i = 0; i < 16; i++) {
+ jam();
+ sendCopyTable(signal, ctn, calcDihBlockRef(nodePtr.i), reqinfo);
+ reqinfo = 0;
+ if (ctn->noOfWords <= 16) {
+ jam();
+ switch (ctn->ctnTabPtr.p->tabCopyStatus) {
+ case TabRecord::CS_SR_PHASE3_COPY_TABLE:
+ /* ------------------------------------------------------------------ */
+ // We have copied the table description to this node.
+ // We will now proceed
+ // with sending the table description to the next node in the node list.
+ /* ------------------------------------------------------------------ */
+ jam();
+ ctn->ctnTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ breakCopyTableLab(signal, ctn->ctnTabPtr, nodePtr.p->nextNode);
+ return;
+ break;
+ case TabRecord::CS_COPY_NODE_STATE:
+ jam();
+ ctn->ctnTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ } else {
+ jam();
+ ctn->wordIndex += 16;
+ if (ctn->wordIndex == 2048) {
+ jam();
+ ctn->wordIndex = 0;
+ ctn->pageIndex++;
+ ndbrequire(ctn->pageIndex < 8);
+ ctn->ctnPageptr.i = ctn->ctnTabPtr.p->pageRef[ctn->pageIndex];
+ ptrCheckGuard(ctn->ctnPageptr, cpageFileSize, pageRecord);
+ }//if
+ ctn->noOfWords -= 16;
+ }//if
+ }//for
+ signal->theData[0] = DihContinueB::ZCOPY_TABLE_NODE;
+ signal->theData[1] = ctn->ctnTabPtr.i;
+ signal->theData[2] = nodePtr.i;
+ signal->theData[3] = ctn->pageIndex;
+ signal->theData[4] = ctn->wordIndex;
+ signal->theData[5] = ctn->noOfWords;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 6, JBB);
+}//Dbdih::copyTableNodeLab()
+
+void Dbdih::sendCopyTable(Signal* signal, CopyTableNode* ctn,
+ BlockReference ref, Uint32 reqinfo)
+{
+ signal->theData[0] = reference();
+ signal->theData[1] = reqinfo;
+ signal->theData[2] = ctn->ctnTabPtr.i;
+ signal->theData[3] = ctn->ctnTabPtr.p->schemaVersion;
+ signal->theData[4] = ctn->noOfWords;
+ ndbrequire(ctn->wordIndex + 15 < 2048);
+ MEMCOPY_NO_WORDS(&signal->theData[5], &ctn->ctnPageptr.p->word[ctn->wordIndex], 16);
+ sendSignal(ref, GSN_COPY_TABREQ, signal, 21, JBB);
+}//Dbdih::sendCopyTable()
+
+void Dbdih::execCOPY_TABCONF(Signal* signal)
+{
+ NodeRecordPtr nodePtr;
+ jamEntry();
+ nodePtr.i = signal->theData[0];
+ Uint32 tableId = signal->theData[1];
+ if (getNodeState().startLevel >= NodeState::SL_STARTED){
+ /* --------------------------------------------------------------------- */
+ // We are in the process of performing a node restart. Continue by copying
+ // the next table to the starting node.
+ /* --------------------------------------------------------------------- */
+ jam();
+ NodeRecordPtr nodePtr;
+ nodePtr.i = signal->theData[0];
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ c_COPY_TABREQ_Counter.clearWaitingFor(nodePtr.i);
+
+ releaseTabPages(tableId);
+ signal->theData[0] = DihContinueB::ZCOPY_NODE;
+ signal->theData[1] = tableId + 1;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ } else {
+ /* --------------------------------------------------------------------- */
+ // We are in the process of performing a system restart. Check if all nodes
+ // have saved the new table description to file and then continue with the
+ // next table.
+ /* --------------------------------------------------------------------- */
+ receiveLoopMacro(COPY_TABREQ, nodePtr.i);
+ /* --------------------------------------------------------------------- */
+ /* WE HAVE NOW COPIED TO ALL NODES. WE HAVE NOW COMPLETED RESTORING */
+ /* THIS TABLE. CONTINUE WITH THE NEXT TABLE. */
+ /* WE NEED TO RELEASE THE PAGES IN THE TABLE IN THIS NODE HERE. */
+ /* WE ALSO NEED TO CLOSE THE TABLE FILE. */
+ /* --------------------------------------------------------------------- */
+ releaseTabPages(tableId);
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ConnectRecordPtr connectPtr;
+ connectPtr.i = tabPtr.p->connectrec;
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+
+ sendAddFragreq(signal, connectPtr, tabPtr, 0);
+ return;
+ }//if
+}//Dbdih::execCOPY_TABCONF()
+
+/*
+ 3.13 L O C A L C H E C K P O I N T (M A S T E R)
+ ****************************************************
+ */
+/*****************************************************************************/
+/* ********** LOCAL-CHECK-POINT-HANDLING MODULE *************/
+/*****************************************************************************/
+/* ------------------------------------------------------------------------- */
+/* IT IS TIME TO CHECK IF IT IS TIME TO START A LOCAL CHECKPOINT. */
+/* WE WILL EITHER START AFTER 1 MILLION WORDS HAVE ARRIVED OR WE WILL */
+/* EXECUTE AFTER ABOUT 16 MINUTES HAVE PASSED BY. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::checkTcCounterLab(Signal* signal)
+{
+ CRASH_INSERTION(7009);
+ if (c_lcpState.lcpStatus != LCP_STATUS_IDLE) {
+ ndbout << "lcpStatus = " << (Uint32) c_lcpState.lcpStatus;
+ ndbout << "lcpStatusUpdatedPlace = " <<
+ c_lcpState.lcpStatusUpdatedPlace << endl;
+ ndbrequire(false);
+ return;
+ }//if
+ c_lcpState.ctimer += 32;
+ if ((c_nodeStartMaster.blockLcp == true) ||
+ ((c_lcpState.lcpStartGcp + 1) > currentgcp)) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ // No reason to start juggling the states and checking for start of LCP if
+ // we are blocked to start an LCP anyway.
+ // We also block LCP start if we have not completed one global checkpoints
+ // before starting another local checkpoint.
+ /* --------------------------------------------------------------------- */
+ signal->theData[0] = DihContinueB::ZCHECK_TC_COUNTER;
+ signal->theData[1] = __LINE__;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1 * 100, 2);
+ return;
+ }//if
+ c_lcpState.setLcpStatus(LCP_TCGET, __LINE__);
+
+ c_lcpState.ctcCounter = c_lcpState.ctimer;
+ sendLoopMacro(TCGETOPSIZEREQ, sendTCGETOPSIZEREQ);
+}//Dbdih::checkTcCounterLab()
+
+void Dbdih::checkLcpStart(Signal* signal, Uint32 lineNo)
+{
+ /* ----------------------------------------------------------------------- */
+ // Verify that we are not attempting to start another instance of the LCP
+ // when it is not alright to do so.
+ /* ----------------------------------------------------------------------- */
+ ndbrequire(c_lcpState.lcpStart == ZIDLE);
+ c_lcpState.lcpStart = ZACTIVE;
+ signal->theData[0] = DihContinueB::ZCHECK_TC_COUNTER;
+ signal->theData[1] = lineNo;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 2);
+}//Dbdih::checkLcpStart()
+
+/* ------------------------------------------------------------------------- */
+/*TCGETOPSIZECONF HOW MUCH OPERATION SIZE HAVE BEEN EXECUTED BY TC */
+/* ------------------------------------------------------------------------- */
+void Dbdih::execTCGETOPSIZECONF(Signal* signal)
+{
+ jamEntry();
+ Uint32 senderNodeId = signal->theData[0];
+ c_lcpState.ctcCounter += signal->theData[1];
+
+ receiveLoopMacro(TCGETOPSIZEREQ, senderNodeId);
+
+ ndbrequire(c_lcpState.lcpStatus == LCP_TCGET);
+ ndbrequire(c_lcpState.lcpStart == ZACTIVE);
+ /* ----------------------------------------------------------------------- */
+ // We are not actively starting another LCP, still we receive this signal.
+ // This is not ok.
+ /* ---------------------------------------------------------------------- */
+ /* ALL TC'S HAVE RESPONDED NOW. NOW WE WILL CHECK IF ENOUGH OPERATIONS */
+ /* HAVE EXECUTED TO ENABLE US TO START A NEW LOCAL CHECKPOINT. */
+ /* WHILE COPYING DICTIONARY AND DISTRIBUTION INFO TO A STARTING NODE */
+ /* WE WILL ALSO NOT ALLOW THE LOCAL CHECKPOINT TO PROCEED. */
+ /*----------------------------------------------------------------------- */
+ if (c_lcpState.immediateLcpStart == false) {
+ if ((c_lcpState.ctcCounter <
+ ((Uint32)1 << c_lcpState.clcpDelay)) ||
+ (c_nodeStartMaster.blockLcp == true)) {
+ jam();
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+
+ signal->theData[0] = DihContinueB::ZCHECK_TC_COUNTER;
+ signal->theData[1] = __LINE__;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1 * 100, 2);
+ return;
+ }//if
+ }//if
+ c_lcpState.lcpStart = ZIDLE;
+ c_lcpState.immediateLcpStart = false;
+ /* -----------------------------------------------------------------------
+ * Now the initial lcp is started,
+ * we can reset the delay to its orginal value
+ * --------------------------------------------------------------------- */
+ CRASH_INSERTION(7010);
+ /* ----------------------------------------------------------------------- */
+ /* IF MORE THAN 1 MILLION WORDS PASSED THROUGH THE TC'S THEN WE WILL */
+ /* START A NEW LOCAL CHECKPOINT. CLEAR CTIMER. START CHECKPOINT */
+ /* ACTIVITY BY CALCULATING THE KEEP GLOBAL CHECKPOINT. */
+ // Also remember the current global checkpoint to ensure that we run at least
+ // one global checkpoints between each local checkpoint that we start up.
+ /* ----------------------------------------------------------------------- */
+ c_lcpState.ctimer = 0;
+ c_lcpState.keepGci = coldgcp;
+ c_lcpState.lcpStartGcp = currentgcp;
+ /* ----------------------------------------------------------------------- */
+ /* UPDATE THE NEW LATEST LOCAL CHECKPOINT ID. */
+ /* ----------------------------------------------------------------------- */
+ cnoOfActiveTables = 0;
+ c_lcpState.setLcpStatus(LCP_CALCULATE_KEEP_GCI, __LINE__);
+ c_lcpState.oldestRestorableGci = SYSFILE->oldestRestorableGCI;
+ ndbrequire(((int)c_lcpState.oldestRestorableGci) > 0);
+
+ if (ERROR_INSERTED(7011)) {
+ signal->theData[0] = NDB_LE_LCPStoppedInCalcKeepGci;
+ signal->theData[1] = 0;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+ return;
+ }//if
+ signal->theData[0] = DihContinueB::ZCALCULATE_KEEP_GCI;
+ signal->theData[1] = 0; /* TABLE ID = 0 */
+ signal->theData[2] = 0; /* FRAGMENT ID = 0 */
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+}//Dbdih::execTCGETOPSIZECONF()
+
+/* ------------------------------------------------------------------------- */
+/* WE NEED TO CALCULATE THE OLDEST GLOBAL CHECKPOINT THAT WILL BE */
+/* COMPLETELY RESTORABLE AFTER EXECUTING THIS LOCAL CHECKPOINT. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::calculateKeepGciLab(Signal* signal, Uint32 tableId, Uint32 fragId)
+{
+ TabRecordPtr tabPtr;
+ Uint32 TloopCount = 1;
+ tabPtr.i = tableId;
+ do {
+ if (tabPtr.i >= ctabFileSize) {
+ if (cnoOfActiveTables > 0) {
+ jam();
+ signal->theData[0] = DihContinueB::ZSTORE_NEW_LCP_ID;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
+ return;
+ } else {
+ jam();
+ /* ------------------------------------------------------------------ */
+ /* THERE ARE NO TABLES TO CHECKPOINT. WE STOP THE CHECKPOINT ALREADY */
+ /* HERE TO AVOID STRANGE PROBLEMS LATER. */
+ /* ------------------------------------------------------------------ */
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+ checkLcpStart(signal, __LINE__);
+ return;
+ }//if
+ }//if
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE ||
+ tabPtr.p->storedTable == 0) {
+ if (TloopCount > 100) {
+ jam();
+ signal->theData[0] = DihContinueB::ZCALCULATE_KEEP_GCI;
+ signal->theData[1] = tabPtr.i + 1;
+ signal->theData[2] = 0;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ } else {
+ jam();
+ TloopCount++;
+ tabPtr.i++;
+ }//if
+ } else {
+ jam();
+ TloopCount = 0;
+ }//if
+ } while (TloopCount != 0);
+ cnoOfActiveTables++;
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ checkKeepGci(fragPtr.p->storedReplicas);
+ fragId++;
+ if (fragId >= tabPtr.p->totalfragments) {
+ jam();
+ tabPtr.i++;
+ fragId = 0;
+ }//if
+ signal->theData[0] = DihContinueB::ZCALCULATE_KEEP_GCI;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = fragId;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+}//Dbdih::calculateKeepGciLab()
+
+/* ------------------------------------------------------------------------- */
+/* WE NEED TO STORE ON DISK THE FACT THAT WE ARE STARTING THIS LOCAL */
+/* CHECKPOINT ROUND. THIS WILL INVALIDATE ALL THE LOCAL CHECKPOINTS */
+/* THAT WILL EVENTUALLY BE OVERWRITTEN AS PART OF THIS LOCAL CHECKPOINT*/
+/* ------------------------------------------------------------------------- */
+void Dbdih::storeNewLcpIdLab(Signal* signal)
+{
+ /***************************************************************************/
+ // Report the event that a local checkpoint has started.
+ /***************************************************************************/
+ signal->theData[0] = NDB_LE_LocalCheckpointStarted; //Event type
+ signal->theData[1] = SYSFILE->latestLCP_ID + 1;
+ signal->theData[2] = c_lcpState.keepGci;
+ signal->theData[3] = c_lcpState.oldestRestorableGci;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+
+ signal->setTrace(TestOrd::TraceLocalCheckpoint);
+
+ CRASH_INSERTION(7013);
+ SYSFILE->keepGCI = c_lcpState.keepGci;
+ //Uint32 lcpId = SYSFILE->latestLCP_ID;
+ SYSFILE->latestLCP_ID++;
+ SYSFILE->oldestRestorableGCI = c_lcpState.oldestRestorableGci;
+
+ const Uint32 oldestRestorableGCI = SYSFILE->oldestRestorableGCI;
+ //const Uint32 newestRestorableGCI = SYSFILE->newestRestorableGCI;
+ //ndbrequire(newestRestorableGCI >= oldestRestorableGCI);
+
+ Int32 val = oldestRestorableGCI;
+ ndbrequire(val > 0);
+
+ /* ----------------------------------------------------------------------- */
+ /* SET BIT INDICATING THAT LOCAL CHECKPOINT IS ONGOING. THIS IS CLEARED */
+ /* AT THE END OF A LOCAL CHECKPOINT. */
+ /* ----------------------------------------------------------------------- */
+ SYSFILE->setLCPOngoing(SYSFILE->systemRestartBits);
+ /* ---------------------------------------------------------------------- */
+ /* CHECK IF ANY NODE MUST BE TAKEN OUT OF SERVICE AND REFILLED WITH */
+ /* NEW FRESH DATA FROM AN ACTIVE NODE. */
+ /* ---------------------------------------------------------------------- */
+ setLcpActiveStatusStart(signal);
+ c_lcpState.setLcpStatus(LCP_COPY_GCI, __LINE__);
+ //#ifdef VM_TRACE
+ // infoEvent("LocalCheckpoint %d started", SYSFILE->latestLCP_ID);
+ // signal->theData[0] = 7012;
+ // execDUMP_STATE_ORD(signal);
+ //#endif
+
+ copyGciLab(signal, CopyGCIReq::LOCAL_CHECKPOINT);
+}//Dbdih::storeNewLcpIdLab()
+
+void Dbdih::startLcpRoundLab(Signal* signal) {
+ jam();
+
+ Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
+ Callback c = { safe_cast(&Dbdih::startLcpMutex_locked), 0 };
+ ndbrequire(mutex.lock(c));
+}
+
+void
+Dbdih::startLcpMutex_locked(Signal* signal, Uint32 senderData, Uint32 retVal){
+ jamEntry();
+ ndbrequire(retVal == 0);
+
+ StartLcpReq* req = (StartLcpReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->lcpId = SYSFILE->latestLCP_ID;
+ req->participatingLQH = c_lcpState.m_participatingLQH;
+ req->participatingDIH = c_lcpState.m_participatingDIH;
+ sendLoopMacro(START_LCP_REQ, sendSTART_LCP_REQ);
+}
+void
+Dbdih::sendSTART_LCP_REQ(Signal* signal, Uint32 nodeId){
+ BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_START_LCP_REQ, signal, StartLcpReq::SignalLength, JBB);
+}
+
+void
+Dbdih::execSTART_LCP_CONF(Signal* signal){
+ StartLcpConf * conf = (StartLcpConf*)signal->getDataPtr();
+
+ Uint32 nodeId = refToNode(conf->senderRef);
+ receiveLoopMacro(START_LCP_REQ, nodeId);
+
+ Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
+ Callback c = { safe_cast(&Dbdih::startLcpMutex_unlocked), 0 };
+ mutex.unlock(c);
+}
+
+void
+Dbdih::startLcpMutex_unlocked(Signal* signal, Uint32 data, Uint32 retVal){
+ jamEntry();
+ ndbrequire(retVal == 0);
+
+ Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
+ mutex.release();
+
+ CRASH_INSERTION(7014);
+ c_lcpState.setLcpStatus(LCP_TC_CLOPSIZE, __LINE__);
+ sendLoopMacro(TC_CLOPSIZEREQ, sendTC_CLOPSIZEREQ);
+}
+
+void Dbdih::execTC_CLOPSIZECONF(Signal* signal) {
+ jamEntry();
+ Uint32 senderNodeId = signal->theData[0];
+ receiveLoopMacro(TC_CLOPSIZEREQ, senderNodeId);
+
+ ndbrequire(c_lcpState.lcpStatus == LCP_TC_CLOPSIZE);
+ /* ----------------------------------------------------------------------- */
+ /* ALL TC'S HAVE CLEARED THEIR OPERATION SIZE COUNTERS. NOW PROCEED BY */
+ /* STARTING THE LOCAL CHECKPOINT IN EACH LQH. */
+ /* ----------------------------------------------------------------------- */
+ c_lcpState.m_LAST_LCP_FRAG_ORD = c_lcpState.m_participatingLQH;
+
+ CRASH_INSERTION(7015);
+ c_lcpState.setLcpStatus(LCP_START_LCP_ROUND, __LINE__);
+ startLcpRoundLoopLab(signal, 0, 0);
+}//Dbdih::execTC_CLOPSIZECONF()
+
+void Dbdih::startLcpRoundLoopLab(Signal* signal,
+ Uint32 startTableId, Uint32 startFragId)
+{
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ ndbrequire(nodePtr.p->noOfStartedChkpt == 0);
+ ndbrequire(nodePtr.p->noOfQueuedChkpt == 0);
+ }//if
+ }//if
+ c_lcpState.currentFragment.tableId = startTableId;
+ c_lcpState.currentFragment.fragmentId = startFragId;
+ startNextChkpt(signal);
+}//Dbdih::startLcpRoundLoopLab()
+
+void Dbdih::startNextChkpt(Signal* signal)
+{
+ Uint32 lcpId = SYSFILE->latestLCP_ID;
+
+ NdbNodeBitmask busyNodes;
+ busyNodes.clear();
+ const Uint32 lcpNodes = c_lcpState.m_participatingLQH.count();
+
+ bool save = true;
+ LcpState::CurrentFragment curr = c_lcpState.currentFragment;
+
+ while (curr.tableId < ctabFileSize) {
+ TabRecordPtr tabPtr;
+ tabPtr.i = curr.tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if ((tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) ||
+ (tabPtr.p->tabLcpStatus != TabRecord::TLS_ACTIVE)) {
+ curr.tableId++;
+ curr.fragmentId = 0;
+ continue;
+ }//if
+
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, curr.fragmentId, fragPtr);
+
+ ReplicaRecordPtr replicaPtr;
+ for(replicaPtr.i = fragPtr.p->storedReplicas;
+ replicaPtr.i != RNIL ;
+ replicaPtr.i = replicaPtr.p->nextReplica){
+
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = replicaPtr.p->procNode;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ if (replicaPtr.p->lcpOngoingFlag &&
+ replicaPtr.p->lcpIdStarted < lcpId) {
+ jam();
+ //-------------------------------------------------------------------
+ // We have found a replica on a node that performs local checkpoint
+ // that is alive and that have not yet been started.
+ //-------------------------------------------------------------------
+
+ if (nodePtr.p->noOfStartedChkpt < 2) {
+ jam();
+ /**
+ * Send LCP_FRAG_ORD to LQH
+ */
+
+ /**
+ * Mark the replica so with lcpIdStarted == true
+ */
+ replicaPtr.p->lcpIdStarted = lcpId;
+
+ Uint32 i = nodePtr.p->noOfStartedChkpt;
+ nodePtr.p->startedChkpt[i].tableId = tabPtr.i;
+ nodePtr.p->startedChkpt[i].fragId = curr.fragmentId;
+ nodePtr.p->startedChkpt[i].replicaPtr = replicaPtr.i;
+ nodePtr.p->noOfStartedChkpt = i + 1;
+
+ sendLCP_FRAG_ORD(signal, nodePtr.p->startedChkpt[i]);
+ } else if (nodePtr.p->noOfQueuedChkpt < 2) {
+ jam();
+ /**
+ * Put LCP_FRAG_ORD "in queue"
+ */
+
+ /**
+ * Mark the replica so with lcpIdStarted == true
+ */
+ replicaPtr.p->lcpIdStarted = lcpId;
+
+ Uint32 i = nodePtr.p->noOfQueuedChkpt;
+ nodePtr.p->queuedChkpt[i].tableId = tabPtr.i;
+ nodePtr.p->queuedChkpt[i].fragId = curr.fragmentId;
+ nodePtr.p->queuedChkpt[i].replicaPtr = replicaPtr.i;
+ nodePtr.p->noOfQueuedChkpt = i + 1;
+ } else {
+ jam();
+
+ if(save){
+ /**
+ * Stop increasing value on first that was "full"
+ */
+ c_lcpState.currentFragment = curr;
+ save = false;
+ }
+
+ busyNodes.set(nodePtr.i);
+ if(busyNodes.count() == lcpNodes){
+ /**
+ * There were no possibility to start the local checkpoint
+ * and it was not possible to queue it up. In this case we
+ * stop the start of local checkpoints until the nodes with a
+ * backlog have performed more checkpoints. We will return and
+ * will not continue the process of starting any more checkpoints.
+ */
+ return;
+ }//if
+ }//if
+ }
+ }//while
+ curr.fragmentId++;
+ if (curr.fragmentId >= tabPtr.p->totalfragments) {
+ jam();
+ curr.fragmentId = 0;
+ curr.tableId++;
+ }//if
+ }//while
+
+ sendLastLCP_FRAG_ORD(signal);
+}//Dbdih::startNextChkpt()
+
+void Dbdih::sendLastLCP_FRAG_ORD(Signal* signal)
+{
+ LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0];
+ lcpFragOrd->tableId = RNIL;
+ lcpFragOrd->fragmentId = 0;
+ lcpFragOrd->lcpId = SYSFILE->latestLCP_ID;
+ lcpFragOrd->lcpNo = 0;
+ lcpFragOrd->keepGci = c_lcpState.keepGci;
+ lcpFragOrd->lastFragmentFlag = true;
+
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+
+ if(nodePtr.p->noOfQueuedChkpt == 0 &&
+ nodePtr.p->noOfStartedChkpt == 0 &&
+ c_lcpState.m_LAST_LCP_FRAG_ORD.isWaitingFor(nodePtr.i)){
+ jam();
+
+ CRASH_INSERTION(7028);
+
+ /**
+ * Nothing queued or started <=> Complete on that node
+ *
+ */
+ c_lcpState.m_LAST_LCP_FRAG_ORD.clearWaitingFor(nodePtr.i);
+ if(ERROR_INSERTED(7075)){
+ continue;
+ }
+ BlockReference ref = calcLqhBlockRef(nodePtr.i);
+ sendSignal(ref, GSN_LCP_FRAG_ORD, signal,LcpFragOrd::SignalLength, JBB);
+ }
+ }
+ if(ERROR_INSERTED(7075)){
+ if(c_lcpState.m_LAST_LCP_FRAG_ORD.done())
+ CRASH_INSERTION(7075);
+ }
+}//Dbdih::sendLastLCP_FRAGORD()
+
+/* ------------------------------------------------------------------------- */
+/* A FRAGMENT REPLICA HAS COMPLETED EXECUTING ITS LOCAL CHECKPOINT. */
+/* CHECK IF ALL REPLICAS IN THE TABLE HAVE COMPLETED. IF SO STORE THE */
+/* THE TABLE DISTRIBUTION ON DISK. ALSO SEND LCP_REPORT TO ALL OTHER */
+/* NODES SO THAT THEY CAN STORE THE TABLE ONTO DISK AS WELL. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::execLCP_FRAG_REP(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE);
+
+#if 0
+ printLCP_FRAG_REP(stdout,
+ signal->getDataPtr(),
+ signal->length(), number());
+#endif
+
+ LcpFragRep * const lcpReport = (LcpFragRep *)&signal->theData[0];
+ Uint32 nodeId = lcpReport->nodeId;
+ Uint32 tableId = lcpReport->tableId;
+ Uint32 fragId = lcpReport->fragId;
+
+ jamEntry();
+
+ CRASH_INSERTION2(7025, isMaster());
+ CRASH_INSERTION2(7016, !isMaster());
+
+ bool fromTimeQueue = (signal->senderBlockRef() == reference());
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if(tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // If the table is currently copied to disk we also
+ // stop already here to avoid strange half-way updates
+ // of the table data structures.
+ /*-----------------------------------------------------------------------*/
+ /*
+ We need to send this signal without a delay since we have discovered
+ that we have run out of space in the short time queue. This problem
+ is very erunlikely to happen but it has and it results in a node crash.
+ This should be considered a "quick fix" and not a permanent solution.
+ A cleaner/better way would be to check the time queue if it is full or
+ not before sending this signal.
+ */
+ sendSignal(reference(), GSN_LCP_FRAG_REP, signal, signal->length(), JBB);
+ /* Kept here for reference
+ sendSignalWithDelay(reference(), GSN_LCP_FRAG_REP,
+ signal, 20, signal->length());
+ */
+
+ if(!fromTimeQueue){
+ c_lcpState.noOfLcpFragRepOutstanding++;
+ }
+
+ return;
+ }//if
+
+ if(fromTimeQueue){
+ jam();
+
+ ndbrequire(c_lcpState.noOfLcpFragRepOutstanding > 0);
+ c_lcpState.noOfLcpFragRepOutstanding--;
+ }
+
+ bool tableDone = reportLcpCompletion(lcpReport);
+
+ if(tableDone){
+ jam();
+
+ if(tabPtr.p->tabStatus == TabRecord::TS_DROPPING){
+ jam();
+ ndbout_c("TS_DROPPING - Neglecting to save Table: %d Frag: %d - ",
+ tableId,
+ fragId);
+ } else {
+ jam();
+ /**
+ * Write table description to file
+ */
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_WRITING_TO_FILE;
+ tabPtr.p->tabCopyStatus = TabRecord::CS_LCP_READ_TABLE;
+ tabPtr.p->tabUpdateState = TabRecord::US_LOCAL_CHECKPOINT;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+
+ checkLcpAllTablesDoneInLqh();
+ }
+ }
+
+#ifdef VM_TRACE
+ /* --------------------------------------------------------------------- */
+ // REPORT that local checkpoint have completed this fragment.
+ /* --------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_LCPFragmentCompleted;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tableId;
+ signal->theData[3] = fragId;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+#endif
+
+ bool ok = false;
+ switch(c_lcpMasterTakeOverState.state){
+ case LMTOS_IDLE:
+ ok = true;
+ jam();
+ /**
+ * Fall through
+ */
+ break;
+ case LMTOS_WAIT_EMPTY_LCP: // LCP Take over waiting for EMPTY_LCPCONF
+ jam();
+ return;
+ case LMTOS_WAIT_LCP_FRAG_REP:
+ jam();
+ checkEmptyLcpComplete(signal);
+ return;
+ case LMTOS_INITIAL:
+ case LMTOS_ALL_IDLE:
+ case LMTOS_ALL_ACTIVE:
+ case LMTOS_LCP_CONCLUDING:
+ case LMTOS_COPY_ONGOING:
+ ndbrequire(false);
+ }
+ ndbrequire(ok);
+
+ /* ----------------------------------------------------------------------- */
+ // Check if there are more LCP's to start up.
+ /* ----------------------------------------------------------------------- */
+ if(isMaster()){
+ jam();
+
+ /**
+ * Remove from "running" array
+ */
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ const Uint32 outstanding = nodePtr.p->noOfStartedChkpt;
+ ndbrequire(outstanding > 0);
+ if(nodePtr.p->startedChkpt[0].tableId != tableId ||
+ nodePtr.p->startedChkpt[0].fragId != fragId){
+ jam();
+ ndbrequire(outstanding > 1);
+ ndbrequire(nodePtr.p->startedChkpt[1].tableId == tableId);
+ ndbrequire(nodePtr.p->startedChkpt[1].fragId == fragId);
+ } else {
+ jam();
+ nodePtr.p->startedChkpt[0] = nodePtr.p->startedChkpt[1];
+ }
+ nodePtr.p->noOfStartedChkpt--;
+ checkStartMoreLcp(signal, nodeId);
+ }
+}
+
+bool
+Dbdih::checkLcpAllTablesDoneInLqh(){
+ TabRecordPtr tabPtr;
+
+ /**
+ * Check if finished with all tables
+ */
+ for (tabPtr.i = 0; tabPtr.i < ctabFileSize; tabPtr.i++) {
+ jam();
+ ptrAss(tabPtr, tabRecord);
+ if ((tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) &&
+ (tabPtr.p->tabLcpStatus == TabRecord::TLS_ACTIVE)) {
+ jam();
+ /**
+ * Nope, not finished with all tables
+ */
+ return false;
+ }//if
+ }//for
+
+ CRASH_INSERTION2(7026, isMaster());
+ CRASH_INSERTION2(7017, !isMaster());
+
+ c_lcpState.setLcpStatus(LCP_TAB_COMPLETED, __LINE__);
+ return true;
+}
+
+void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr,
+ Fragmentstore* fragPtrP, Uint32 nodeId)
+{
+ replicaPtr.i = fragPtrP->storedReplicas;
+ while(replicaPtr.i != RNIL){
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if (replicaPtr.p->procNode == nodeId) {
+ jam();
+ return;
+ } else {
+ jam();
+ replicaPtr.i = replicaPtr.p->nextReplica;
+ }//if
+ };
+
+#ifdef VM_TRACE
+ ndbout_c("Fragment Replica(node=%d) not found", nodeId);
+ replicaPtr.i = fragPtrP->oldStoredReplicas;
+ while(replicaPtr.i != RNIL){
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if (replicaPtr.p->procNode == nodeId) {
+ jam();
+ break;
+ } else {
+ jam();
+ replicaPtr.i = replicaPtr.p->nextReplica;
+ }//if
+ };
+ if(replicaPtr.i != RNIL){
+ ndbout_c("...But was found in oldStoredReplicas");
+ } else {
+ ndbout_c("...And wasn't found in oldStoredReplicas");
+ }
+#endif
+ ndbrequire(false);
+}//Dbdih::findReplica()
+
+/**
+ * Return true if table is all fragment replicas have been checkpointed
+ * to disk (in all LQHs)
+ * false otherwise
+ */
+bool
+Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport)
+{
+ Uint32 lcpNo = lcpReport->lcpNo;
+ Uint32 lcpId = lcpReport->lcpId;
+ Uint32 maxGciStarted = lcpReport->maxGciStarted;
+ Uint32 maxGciCompleted = lcpReport->maxGciCompleted;
+ Uint32 tableId = lcpReport->tableId;
+ Uint32 fragId = lcpReport->fragId;
+ Uint32 nodeId = lcpReport->nodeId;
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+
+ ReplicaRecordPtr replicaPtr;
+ findReplica(replicaPtr, fragPtr.p, nodeId);
+
+ ndbrequire(replicaPtr.p->lcpOngoingFlag == true);
+ if(lcpNo != replicaPtr.p->nextLcp){
+ ndbout_c("lcpNo = %d replicaPtr.p->nextLcp = %d",
+ lcpNo, replicaPtr.p->nextLcp);
+ ndbrequire(false);
+ }
+ ndbrequire(lcpNo == replicaPtr.p->nextLcp);
+ ndbrequire(lcpNo < MAX_LCP_STORED);
+ ndbrequire(replicaPtr.p->lcpId[lcpNo] != lcpId);
+
+ replicaPtr.p->lcpIdStarted = lcpId;
+ replicaPtr.p->lcpOngoingFlag = false;
+
+ removeOldCrashedReplicas(replicaPtr);
+ replicaPtr.p->lcpId[lcpNo] = lcpId;
+ replicaPtr.p->lcpStatus[lcpNo] = ZVALID;
+ replicaPtr.p->maxGciStarted[lcpNo] = maxGciStarted;
+ gth(maxGciStarted + 1, 0);
+ replicaPtr.p->maxGciCompleted[lcpNo] = maxGciCompleted;
+ replicaPtr.p->nextLcp = nextLcpNo(replicaPtr.p->nextLcp);
+
+ ndbrequire(fragPtr.p->noLcpReplicas > 0);
+ fragPtr.p->noLcpReplicas --;
+
+ if(fragPtr.p->noLcpReplicas > 0){
+ jam();
+ return false;
+ }
+
+ for (Uint32 fid = 0; fid < tabPtr.p->totalfragments; fid++) {
+ jam();
+ getFragstore(tabPtr.p, fid, fragPtr);
+ if (fragPtr.p->noLcpReplicas > 0){
+ jam();
+ /* ----------------------------------------------------------------- */
+ // Not all fragments in table have been checkpointed.
+ /* ----------------------------------------------------------------- */
+ if(0)
+ ndbout_c("reportLcpCompletion: fragment %d not ready", fid);
+ return false;
+ }//if
+ }//for
+ return true;
+}//Dbdih::reportLcpCompletion()
+
+void Dbdih::checkStartMoreLcp(Signal* signal, Uint32 nodeId)
+{
+ ndbrequire(isMaster());
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ ndbrequire(nodePtr.p->noOfStartedChkpt < 2);
+
+ if (nodePtr.p->noOfQueuedChkpt > 0) {
+ jam();
+ nodePtr.p->noOfQueuedChkpt--;
+ Uint32 i = nodePtr.p->noOfStartedChkpt;
+ nodePtr.p->startedChkpt[i] = nodePtr.p->queuedChkpt[0];
+ nodePtr.p->queuedChkpt[0] = nodePtr.p->queuedChkpt[1];
+ //-------------------------------------------------------------------
+ // We can send a LCP_FRAGORD to the node ordering it to perform a
+ // local checkpoint on this fragment replica.
+ //-------------------------------------------------------------------
+ nodePtr.p->noOfStartedChkpt = i + 1;
+
+ sendLCP_FRAG_ORD(signal, nodePtr.p->startedChkpt[i]);
+ }
+
+ /* ----------------------------------------------------------------------- */
+ // When there are no more outstanding LCP reports and there are no one queued
+ // in at least one node, then we are ready to make sure all nodes have at
+ // least two outstanding LCP requests per node and at least two queued for
+ // sending.
+ /* ----------------------------------------------------------------------- */
+ startNextChkpt(signal);
+}//Dbdih::checkStartMoreLcp()
+
+void
+Dbdih::sendLCP_FRAG_ORD(Signal* signal,
+ NodeRecord::FragmentCheckpointInfo info){
+
+ ReplicaRecordPtr replicaPtr;
+ replicaPtr.i = info.replicaPtr;
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+
+ BlockReference ref = calcLqhBlockRef(replicaPtr.p->procNode);
+
+ LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0];
+ lcpFragOrd->tableId = info.tableId;
+ lcpFragOrd->fragmentId = info.fragId;
+ lcpFragOrd->lcpId = SYSFILE->latestLCP_ID;
+ lcpFragOrd->lcpNo = replicaPtr.p->nextLcp;
+ lcpFragOrd->keepGci = c_lcpState.keepGci;
+ lcpFragOrd->lastFragmentFlag = false;
+ sendSignal(ref, GSN_LCP_FRAG_ORD, signal, LcpFragOrd::SignalLength, JBB);
+}
+
+void Dbdih::checkLcpCompletedLab(Signal* signal)
+{
+ if(c_lcpState.lcpStatus < LCP_TAB_COMPLETED){
+ jam();
+ return;
+ }
+
+ TabRecordPtr tabPtr;
+ for (tabPtr.i = 0; tabPtr.i < ctabFileSize; tabPtr.i++) {
+ jam();
+ ptrAss(tabPtr, tabRecord);
+ if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) {
+ if (tabPtr.p->tabLcpStatus != TabRecord::TLS_COMPLETED) {
+ jam();
+ return;
+ }//if
+ }//if
+ }//for
+
+ CRASH_INSERTION2(7027, isMaster());
+ CRASH_INSERTION2(7018, !isMaster());
+
+ if(c_lcpState.lcpStatus == LCP_TAB_COMPLETED){
+ /**
+ * We'r done
+ */
+ c_lcpState.setLcpStatus(LCP_TAB_SAVED, __LINE__);
+ sendLCP_COMPLETE_REP(signal);
+ return;
+ }
+
+ ndbrequire(c_lcpState.lcpStatus == LCP_TAB_SAVED);
+ allNodesLcpCompletedLab(signal);
+ return;
+}//Dbdih::checkLcpCompletedLab()
+
+void
+Dbdih::sendLCP_COMPLETE_REP(Signal* signal){
+ jam();
+ LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend();
+ rep->nodeId = getOwnNodeId();
+ rep->lcpId = SYSFILE->latestLCP_ID;
+ rep->blockNo = DBDIH;
+
+ sendSignal(c_lcpState.m_masterLcpDihRef, GSN_LCP_COMPLETE_REP, signal,
+ LcpCompleteRep::SignalLength, JBB);
+}
+
+/*-------------------------------------------------------------------------- */
+/* COMP_LCP_ROUND A LQH HAS COMPLETED A LOCAL CHECKPOINT */
+/*------------------------------------------------------------------------- */
+void Dbdih::execLCP_COMPLETE_REP(Signal* signal)
+{
+ jamEntry();
+
+#if 0
+ ndbout_c("LCP_COMPLETE_REP");
+ printLCP_COMPLETE_REP(stdout,
+ signal->getDataPtr(),
+ signal->length(), number());
+#endif
+
+ LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtr();
+ Uint32 lcpId = rep->lcpId;
+ Uint32 nodeId = rep->nodeId;
+ Uint32 blockNo = rep->blockNo;
+
+ if(c_lcpMasterTakeOverState.state > LMTOS_WAIT_LCP_FRAG_REP){
+ jam();
+ /**
+ * Don't allow LCP_COMPLETE_REP to arrive during
+ * LCP master take over
+ */
+ ndbrequire(isMaster());
+ ndbrequire(blockNo == DBDIH);
+ sendSignalWithDelay(reference(), GSN_LCP_COMPLETE_REP, signal, 100,
+ signal->length());
+ return;
+ }
+
+ ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE);
+
+ switch(blockNo){
+ case DBLQH:
+ jam();
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.clearWaitingFor(nodeId);
+ ndbrequire(!c_lcpState.m_LAST_LCP_FRAG_ORD.isWaitingFor(nodeId));
+ break;
+ case DBDIH:
+ jam();
+ ndbrequire(isMaster());
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.clearWaitingFor(nodeId);
+ break;
+ case 0:
+ jam();
+ ndbrequire(!isMaster());
+ ndbrequire(c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received == false);
+ c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received = true;
+ break;
+ default:
+ ndbrequire(false);
+ }
+ ndbrequire(lcpId == SYSFILE->latestLCP_ID);
+
+ allNodesLcpCompletedLab(signal);
+ return;
+}
+
+void Dbdih::allNodesLcpCompletedLab(Signal* signal)
+{
+ jam();
+
+ if (c_lcpState.lcpStatus != LCP_TAB_SAVED) {
+ jam();
+ /**
+ * We have not sent LCP_COMPLETE_REP to master DIH yet
+ */
+ return;
+ }//if
+
+ if (!c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.done()){
+ jam();
+ return;
+ }
+
+ if (!c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.done()){
+ jam();
+ return;
+ }
+
+ if (!isMaster() &&
+ c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received == false){
+ jam();
+ /**
+ * Wait until master DIH has signaled lcp is complete
+ */
+ return;
+ }
+
+ if(c_lcpMasterTakeOverState.state != LMTOS_IDLE){
+ jam();
+#ifdef VM_TRACE
+ ndbout_c("Exiting from allNodesLcpCompletedLab");
+#endif
+ return;
+ }
+
+
+ /*------------------------------------------------------------------------ */
+ /* WE HAVE NOW COMPLETED A LOCAL CHECKPOINT. WE ARE NOW READY TO WAIT */
+ /* FOR THE NEXT LOCAL CHECKPOINT. SEND WITHOUT TIME-OUT SINCE IT MIGHT */
+ /* BE TIME TO START THE NEXT LOCAL CHECKPOINT IMMEDIATELY. */
+ /* CLEAR BIT 3 OF SYSTEM RESTART BITS TO INDICATE THAT THERE IS NO */
+ /* LOCAL CHECKPOINT ONGOING. THIS WILL BE WRITTEN AT SOME LATER TIME */
+ /* DURING A GLOBAL CHECKPOINT. IT IS NOT NECESSARY TO WRITE IT */
+ /* IMMEDIATELY. WE WILL ALSO CLEAR BIT 2 OF SYSTEM RESTART BITS IF ALL */
+ /* CURRENTLY ACTIVE NODES COMPLETED THE LOCAL CHECKPOINT. */
+ /*------------------------------------------------------------------------ */
+ CRASH_INSERTION(7019);
+ signal->setTrace(0);
+
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+ setLcpActiveStatusEnd();
+ Sysfile::clearLCPOngoing(SYSFILE->systemRestartBits);
+
+ if(!isMaster()){
+ jam();
+ /**
+ * We're not master, be content
+ */
+ return;
+ }
+
+ // Send LCP_COMPLETE_REP to all other nodes
+ // allowing them to set their lcpStatus to LCP_STATUS_IDLE
+ LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend();
+ rep->nodeId = getOwnNodeId();
+ rep->lcpId = SYSFILE->latestLCP_ID;
+ rep->blockNo = 0; // 0 = Sent from master
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = cfirstAliveNode;
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.i != cownNodeId){
+ BlockReference ref = calcDihBlockRef(nodePtr.i);
+ sendSignal(ref, GSN_LCP_COMPLETE_REP, signal,
+ LcpCompleteRep::SignalLength, JBB);
+ }
+ nodePtr.i = nodePtr.p->nextNode;
+ } while (nodePtr.i != RNIL);
+
+
+ jam();
+ /***************************************************************************/
+ // Report the event that a local checkpoint has completed.
+ /***************************************************************************/
+ signal->theData[0] = NDB_LE_LocalCheckpointCompleted; //Event type
+ signal->theData[1] = SYSFILE->latestLCP_ID;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ /**
+ * Start checking for next LCP
+ */
+ checkLcpStart(signal, __LINE__);
+
+ if (cwaitLcpSr == true) {
+ jam();
+ cwaitLcpSr = false;
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ }//if
+
+ if (c_nodeStartMaster.blockLcp == true) {
+ jam();
+ lcpBlockedLab(signal);
+ return;
+ }//if
+ return;
+}//Dbdih::allNodesLcpCompletedLab()
+
+/******************************************************************************/
+/* ********** TABLE UPDATE MODULE *************/
+/* ****************************************************************************/
+/* ------------------------------------------------------------------------- */
+/* THIS MODULE IS USED TO UPDATE THE TABLE DESCRIPTION. IT STARTS BY */
+/* CREATING THE FIRST TABLE FILE, THEN UPDATES THIS FILE AND CLOSES IT.*/
+/* AFTER THAT THE SAME HAPPENS WITH THE SECOND FILE. AFTER THAT THE */
+/* TABLE DISTRIBUTION HAS BEEN UPDATED. */
+/* */
+/* THE REASON FOR CREATING THE FILE AND NOT OPENING IT IS TO ENSURE */
+/* THAT WE DO NOT GET A MIX OF OLD AND NEW INFORMATION IN THE FILE IN */
+/* ERROR SITUATIONS. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::tableUpdateLab(Signal* signal, TabRecordPtr tabPtr) {
+ FileRecordPtr filePtr;
+ filePtr.i = tabPtr.p->tabFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ createFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::TABLE_CREATE;
+ return;
+}//Dbdih::tableUpdateLab()
+
+void Dbdih::tableCreateLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ writeTabfile(signal, tabPtr.p, filePtr);
+ filePtr.p->reqStatus = FileRecord::TABLE_WRITE;
+ return;
+}//Dbdih::tableCreateLab()
+
+void Dbdih::tableWriteLab(Signal* signal, FileRecordPtr filePtr)
+{
+ closeFile(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::TABLE_CLOSE;
+ return;
+}//Dbdih::tableWriteLab()
+
+void Dbdih::tableCloseLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if (filePtr.i == tabPtr.p->tabFile[0]) {
+ jam();
+ filePtr.i = tabPtr.p->tabFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ createFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::TABLE_CREATE;
+ return;
+ }//if
+ switch (tabPtr.p->tabUpdateState) {
+ case TabRecord::US_LOCAL_CHECKPOINT:
+ jam();
+ releaseTabPages(tabPtr.i);
+ signal->theData[0] = DihContinueB::ZCHECK_LCP_COMPLETED;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
+
+ tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
+ return;
+ break;
+ case TabRecord::US_REMOVE_NODE:
+ jam();
+ releaseTabPages(tabPtr.i);
+ for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
+ jam();
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ updateNodeInfo(fragPtr);
+ }//for
+ tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+ if (tabPtr.p->tabLcpStatus == TabRecord::TLS_WRITING_TO_FILE) {
+ jam();
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
+ signal->theData[0] = DihContinueB::ZCHECK_LCP_COMPLETED;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
+ }//if
+ signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
+ signal->theData[1] = tabPtr.p->tabRemoveNode;
+ signal->theData[2] = tabPtr.i + 1;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ break;
+ case TabRecord::US_INVALIDATE_NODE_LCP:
+ jam();
+ releaseTabPages(tabPtr.i);
+ tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+
+ signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP;
+ signal->theData[1] = tabPtr.p->tabRemoveNode;
+ signal->theData[2] = tabPtr.i + 1;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ case TabRecord::US_COPY_TAB_REQ:
+ jam();
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+ copyTabReq_complete(signal, tabPtr);
+ return;
+ break;
+ case TabRecord::US_ADD_TABLE_MASTER:
+ jam();
+ releaseTabPages(tabPtr.i);
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+ signal->theData[0] = DihContinueB::ZDIH_ADD_TABLE_MASTER;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::US_ADD_TABLE_SLAVE:
+ jam();
+ releaseTabPages(tabPtr.i);
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+ signal->theData[0] = DihContinueB::ZDIH_ADD_TABLE_SLAVE;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+}//Dbdih::tableCloseLab()
+
+/**
+ * GCP stop detected,
+ * send SYSTEM_ERROR to all other alive nodes
+ */
+void Dbdih::crashSystemAtGcpStop(Signal* signal){
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ const BlockReference ref =
+ numberToRef(refToBlock(cntrlblockref), nodePtr.i);
+ SystemError * const sysErr = (SystemError*)&signal->theData[0];
+ sysErr->errorCode = SystemError::GCPStopDetected;
+ sysErr->errorRef = reference();
+ sysErr->data1 = cgcpStatus;
+ sysErr->data2 = cgcpOrderBlocked;
+ sendSignal(ref, GSN_SYSTEM_ERROR, signal,
+ SystemError::SignalLength, JBA);
+ }//if
+ }//for
+ return;
+}//Dbdih::crashSystemAtGcpStop()
+
+/*************************************************************************/
+/* */
+/* MODULE: ALLOCPAGE */
+/* DESCRIPTION: THE SUBROUTINE IS CALLED WITH POINTER TO PAGE */
+/* RECORD. A PAGE RECORD IS TAKEN FROM */
+/* THE FREE PAGE LIST */
+/*************************************************************************/
+void Dbdih::allocpage(PageRecordPtr& pagePtr)
+{
+ ndbrequire(cfirstfreepage != RNIL);
+ pagePtr.i = cfirstfreepage;
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+ cfirstfreepage = pagePtr.p->nextfreepage;
+ pagePtr.p->nextfreepage = RNIL;
+}//Dbdih::allocpage()
+
+/*************************************************************************/
+/* */
+/* MODULE: ALLOC_STORED_REPLICA */
+/* DESCRIPTION: THE SUBROUTINE IS CALLED TO GET A REPLICA RECORD, */
+/* TO INITIALISE IT AND TO LINK IT INTO THE FRAGMENT */
+/* STORE RECORD. USED FOR STORED REPLICAS. */
+/*************************************************************************/
+void Dbdih::allocStoredReplica(FragmentstorePtr fragPtr,
+ ReplicaRecordPtr& newReplicaPtr,
+ Uint32 nodeId)
+{
+ Uint32 i;
+ ReplicaRecordPtr arrReplicaPtr;
+ ReplicaRecordPtr arrPrevReplicaPtr;
+
+ seizeReplicaRec(newReplicaPtr);
+ for (i = 0; i < MAX_LCP_STORED; i++) {
+ newReplicaPtr.p->maxGciCompleted[i] = 0;
+ newReplicaPtr.p->maxGciStarted[i] = 0;
+ newReplicaPtr.p->lcpId[i] = 0;
+ newReplicaPtr.p->lcpStatus[i] = ZINVALID;
+ }//for
+ newReplicaPtr.p->noCrashedReplicas = 0;
+ newReplicaPtr.p->initialGci = currentgcp;
+ for (i = 0; i < 8; i++) {
+ newReplicaPtr.p->replicaLastGci[i] = (Uint32)-1;
+ newReplicaPtr.p->createGci[i] = 0;
+ }//for
+ newReplicaPtr.p->createGci[0] = currentgcp;
+ ndbrequire(currentgcp != 0xF1F1F1F1);
+ newReplicaPtr.p->nextLcp = 0;
+ newReplicaPtr.p->procNode = nodeId;
+ newReplicaPtr.p->lcpOngoingFlag = false;
+ newReplicaPtr.p->lcpIdStarted = 0;
+
+ arrPrevReplicaPtr.i = RNIL;
+ arrReplicaPtr.i = fragPtr.p->storedReplicas;
+ while (arrReplicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(arrReplicaPtr, creplicaFileSize, replicaRecord);
+ arrPrevReplicaPtr = arrReplicaPtr;
+ arrReplicaPtr.i = arrReplicaPtr.p->nextReplica;
+ }//while
+ if (arrPrevReplicaPtr.i == RNIL) {
+ jam();
+ fragPtr.p->storedReplicas = newReplicaPtr.i;
+ } else {
+ jam();
+ arrPrevReplicaPtr.p->nextReplica = newReplicaPtr.i;
+ }//if
+ fragPtr.p->noStoredReplicas++;
+}//Dbdih::allocStoredReplica()
+
+/*************************************************************************/
+/* CALCULATE HOW MANY HOT SPARES THAT ARE TO BE ASSIGNED IN THIS SYSTEM */
+/*************************************************************************/
+void Dbdih::calculateHotSpare()
+{
+ Uint32 tchsTmp;
+ Uint32 tchsNoNodes;
+
+ switch (cnoReplicas) {
+ case 1:
+ jam();
+ cnoHotSpare = 0;
+ break;
+ case 2:
+ case 3:
+ case 4:
+ jam();
+ if (csystemnodes > cnoReplicas) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* WITH MORE NODES THAN REPLICAS WE WILL ALWAYS USE AT LEAST ONE HOT */
+ /* SPARE IF THAT HAVE BEEN REQUESTED BY THE CONFIGURATION FILE. THE */
+ /* NUMBER OF NODES TO BE USED FOR NORMAL OPERATION IS ALWAYS */
+ /* A MULTIPLE OF THE NUMBER OF REPLICAS SINCE WE WILL ORGANISE NODES */
+ /* INTO NODE GROUPS. THE REMAINING NODES WILL BE HOT SPARE NODES. */
+ /* --------------------------------------------------------------------- */
+ if ((csystemnodes - cnoReplicas) >= cminHotSpareNodes) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ // We set the minimum number of hot spares according to users request
+ // through the configuration file.
+ /* --------------------------------------------------------------------- */
+ tchsNoNodes = csystemnodes - cminHotSpareNodes;
+ cnoHotSpare = cminHotSpareNodes;
+ } else if (cminHotSpareNodes > 0) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ // The user requested at least one hot spare node and we will support him
+ // in that.
+ /* --------------------------------------------------------------------- */
+ tchsNoNodes = csystemnodes - 1;
+ cnoHotSpare = 1;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------- */
+ // The user did not request any hot spare nodes so in this case we will
+ // only use hot spare nodes if the number of nodes is such that we cannot
+ // use all nodes as normal nodes.
+ /* --------------------------------------------------------------------- */
+ tchsNoNodes = csystemnodes;
+ cnoHotSpare = 0;
+ }//if
+ } else {
+ jam();
+ /* --------------------------------------------------------------------- */
+ // We only have enough to support the replicas. We will not have any hot
+ // spares.
+ /* --------------------------------------------------------------------- */
+ tchsNoNodes = csystemnodes;
+ cnoHotSpare = 0;
+ }//if
+ tchsTmp = tchsNoNodes - (cnoReplicas * (tchsNoNodes / cnoReplicas));
+ cnoHotSpare = cnoHotSpare + tchsTmp;
+ break;
+ default:
+ jam();
+ progError(0, 0);
+ break;
+ }//switch
+}//Dbdih::calculateHotSpare()
+
+/*************************************************************************/
+/* CHECK IF THE NODE CRASH IS TO ESCALATE INTO A SYSTEM CRASH. WE COULD */
+/* DO THIS BECAUSE ALL REPLICAS OF SOME FRAGMENT ARE LOST. WE COULD ALSO */
+/* DO IT AFTER MANY NODE FAILURES THAT MAKE IT VERY DIFFICULT TO RESTORE */
+/* DATABASE AFTER A SYSTEM CRASH. IT MIGHT EVEN BE IMPOSSIBLE AND THIS */
+/* MUST BE AVOIDED EVEN MORE THAN AVOIDING SYSTEM CRASHES. */
+/*************************************************************************/
+void Dbdih::checkEscalation()
+{
+ Uint32 TnodeGroup[MAX_NDB_NODES];
+ NodeRecordPtr nodePtr;
+ Uint32 i;
+ for (i = 0; i < MAX_NDB_NODES; i++) {
+ TnodeGroup[i] = ZFALSE;
+ }//for
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE &&
+ nodePtr.p->activeStatus == Sysfile::NS_Active){
+ ndbrequire(nodePtr.p->nodeGroup < MAX_NDB_NODES);
+ TnodeGroup[nodePtr.p->nodeGroup] = ZTRUE;
+ }
+ }
+ for (i = 0; i < cnoOfNodeGroups; i++) {
+ jam();
+ if (TnodeGroup[i] == ZFALSE) {
+ jam();
+ progError(__LINE__, ERR_SYSTEM_ERROR, "Lost node group");
+ }//if
+ }//for
+}//Dbdih::checkEscalation()
+
+/*************************************************************************/
+/* */
+/* MODULE: CHECK_KEEP_GCI */
+/* DESCRIPTION: CHECK FOR MINIMUM GCI RESTORABLE WITH NEW LOCAL */
+/* CHECKPOINT. */
+/*************************************************************************/
+void Dbdih::checkKeepGci(Uint32 replicaStartIndex)
+{
+ ReplicaRecordPtr ckgReplicaPtr;
+ ckgReplicaPtr.i = replicaStartIndex;
+ while (ckgReplicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(ckgReplicaPtr, creplicaFileSize, replicaRecord);
+ Uint32 keepGci;
+ Uint32 oldestRestorableGci;
+ findMinGci(ckgReplicaPtr, keepGci, oldestRestorableGci);
+ if (keepGci < c_lcpState.keepGci) {
+ jam();
+ /* ------------------------------------------------------------------- */
+ /* WE MUST KEEP LOG RECORDS SO THAT WE CAN USE ALL LOCAL CHECKPOINTS */
+ /* THAT ARE AVAILABLE. THUS WE NEED TO CALCULATE THE MINIMUM OVER ALL */
+ /* FRAGMENTS. */
+ /* ------------------------------------------------------------------- */
+ c_lcpState.keepGci = keepGci;
+ }//if
+ if (oldestRestorableGci > c_lcpState.oldestRestorableGci) {
+ jam();
+ c_lcpState.oldestRestorableGci = oldestRestorableGci;
+ ndbrequire(((int)c_lcpState.oldestRestorableGci) >= 0);
+ }//if
+ ckgReplicaPtr.i = ckgReplicaPtr.p->nextReplica;
+ }//while
+}//Dbdih::checkKeepGci()
+
+void Dbdih::closeFile(Signal* signal, FileRecordPtr filePtr)
+{
+ signal->theData[0] = filePtr.p->fileRef;
+ signal->theData[1] = reference();
+ signal->theData[2] = filePtr.i;
+ signal->theData[3] = ZCLOSE_NO_DELETE;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+}//Dbdih::closeFile()
+
+void Dbdih::closeFileDelete(Signal* signal, FileRecordPtr filePtr)
+{
+ signal->theData[0] = filePtr.p->fileRef;
+ signal->theData[1] = reference();
+ signal->theData[2] = filePtr.i;
+ signal->theData[3] = ZCLOSE_DELETE;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+}//Dbdih::closeFileDelete()
+
+void Dbdih::createFileRw(Signal* signal, FileRecordPtr filePtr)
+{
+ signal->theData[0] = reference();
+ signal->theData[1] = filePtr.i;
+ signal->theData[2] = filePtr.p->fileName[0];
+ signal->theData[3] = filePtr.p->fileName[1];
+ signal->theData[4] = filePtr.p->fileName[2];
+ signal->theData[5] = filePtr.p->fileName[3];
+ signal->theData[6] = ZCREATE_READ_WRITE;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+}//Dbdih::createFileRw()
+
+void Dbdih::emptyverificbuffer(Signal* signal, bool aContinueB)
+{
+ if(cfirstVerifyQueue == RNIL){
+ jam();
+ return;
+ }//if
+ ApiConnectRecordPtr localApiConnectptr;
+ if(getBlockCommit() == false){
+ jam();
+ ndbrequire(cverifyQueueCounter > 0);
+ cverifyQueueCounter--;
+ localApiConnectptr.i = cfirstVerifyQueue;
+ ptrCheckGuard(localApiConnectptr, capiConnectFileSize, apiConnectRecord);
+ ndbrequire(localApiConnectptr.p->apiGci <= currentgcp);
+ cfirstVerifyQueue = localApiConnectptr.p->nextApi;
+ if (cfirstVerifyQueue == RNIL) {
+ jam();
+ ndbrequire(cverifyQueueCounter == 0);
+ clastVerifyQueue = RNIL;
+ }//if
+ signal->theData[0] = localApiConnectptr.i;
+ signal->theData[1] = currentgcp;
+ sendSignal(clocaltcblockref, GSN_DIVERIFYCONF, signal, 2, JBB);
+ if (aContinueB == true) {
+ jam();
+ //-----------------------------------------------------------------------
+ // This emptying happened as part of a take-out process by continueb signals.
+ // This ensures that we will empty the queue eventually. We will also empty
+ // one item every time we insert one item to ensure that the list doesn't
+ // grow when it is not blocked.
+ //-----------------------------------------------------------------------
+ signal->theData[0] = DihContinueB::ZEMPTY_VERIFY_QUEUE;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
+ }//if
+ } else {
+ jam();
+ //-----------------------------------------------------------------------
+ // We are blocked so it is no use in continuing the emptying of the
+ // verify buffer. Whenever the block is removed the emptying will
+ // restart.
+ //-----------------------------------------------------------------------
+ }
+ return;
+}//Dbdih::emptyverificbuffer()
+
+/*----------------------------------------------------------------*/
+/* FIND A FREE HOT SPARE IF AVAILABLE AND ALIVE. */
+/*----------------------------------------------------------------*/
+Uint32 Dbdih::findHotSpare()
+{
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ if (nodePtr.p->activeStatus == Sysfile::NS_HotSpare) {
+ jam();
+ return nodePtr.i;
+ }//if
+ }//if
+ }//for
+ return RNIL;
+}//Dbdih::findHotSpare()
+
+/*************************************************************************/
+/* FIND THE NODES FROM WHICH WE CAN EXECUTE THE LOG TO RESTORE THE */
+/* DATA NODE IN A SYSTEM RESTART. */
+/*************************************************************************/
+bool Dbdih::findLogNodes(CreateReplicaRecord* createReplica,
+ FragmentstorePtr fragPtr,
+ Uint32 startGci,
+ Uint32 stopGci)
+{
+ ConstPtr<ReplicaRecord> flnReplicaPtr;
+ flnReplicaPtr.i = createReplica->replicaRec;
+ ptrCheckGuard(flnReplicaPtr, creplicaFileSize, replicaRecord);
+ /* --------------------------------------------------------------------- */
+ /* WE START BY CHECKING IF THE DATA NODE CAN HANDLE THE LOG ALL BY */
+ /* ITSELF. THIS IS THE DESIRED BEHAVIOUR. IF THIS IS NOT POSSIBLE */
+ /* THEN WE SEARCH FOR THE BEST POSSIBLE NODES AMONG THE NODES THAT */
+ /* ARE PART OF THIS SYSTEM RESTART. */
+ /* THIS CAN ONLY BE HANDLED BY THE LAST CRASHED REPLICA. */
+ /* The condition is that the replica was created before or at the */
+ /* time of the starting gci, in addition it must have been alive */
+ /* at the time of the stopping gci. This is checked by two */
+ /* conditions, the first checks replicaLastGci and the second */
+ /* checks that it is also smaller than the last gci the node was */
+ /* involved in. This is necessary to check since createGci is set */
+ /* Last + 1 and sometimes startGci = stopGci + 1 and in that case */
+ /* it could happen that replicaLastGci is set to -1 with CreateGci */
+ /* set to LastGci + 1. */
+ /* --------------------------------------------------------------------- */
+ arrGuard(flnReplicaPtr.p->noCrashedReplicas, 8);
+ const Uint32 noCrashed = flnReplicaPtr.p->noCrashedReplicas;
+
+ if (!(ERROR_INSERTED(7073) || ERROR_INSERTED(7074))&&
+ (startGci >= flnReplicaPtr.p->createGci[noCrashed]) &&
+ (stopGci <= flnReplicaPtr.p->replicaLastGci[noCrashed]) &&
+ (stopGci <= SYSFILE->lastCompletedGCI[flnReplicaPtr.p->procNode])) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* WE FOUND ALL THE LOG RECORDS NEEDED IN THE DATA NODE. WE WILL */
+ /* USE THOSE. */
+ /* --------------------------------------------------------------------- */
+ createReplica->noLogNodes = 1;
+ createReplica->logStartGci[0] = startGci;
+ createReplica->logStopGci[0] = stopGci;
+ createReplica->logNodeId[0] = flnReplicaPtr.p->procNode;
+ return true;
+ }//if
+ Uint32 logNode = 0;
+ do {
+ Uint32 fblStopGci;
+ jam();
+ if(!findBestLogNode(createReplica,
+ fragPtr,
+ startGci,
+ stopGci,
+ logNode,
+ fblStopGci)){
+ jam();
+ return false;
+ }
+
+ logNode++;
+ if (fblStopGci >= stopGci) {
+ jam();
+ createReplica->noLogNodes = logNode;
+ return true;
+ }//if
+ startGci = fblStopGci + 1;
+ if (logNode >= 4) { // Why??
+ jam();
+ break;
+ }//if
+ } while (1);
+ /* --------------------------------------------------------------------- */
+ /* IT WAS NOT POSSIBLE TO RESTORE THE REPLICA. THIS CAN EITHER BE */
+ /* BECAUSE OF LACKING NODES OR BECAUSE OF A REALLY SERIOUS PROBLEM.*/
+ /* --------------------------------------------------------------------- */
+ return false;
+}//Dbdih::findLogNodes()
+
+/*************************************************************************/
+/* FIND THE BEST POSSIBLE LOG NODE TO EXECUTE THE LOG AS SPECIFIED */
+/* BY THE INPUT PARAMETERS. WE SCAN THROUGH ALL ALIVE REPLICAS. */
+/* THIS MEANS STORED, OLD_STORED */
+/*************************************************************************/
+bool
+Dbdih::findBestLogNode(CreateReplicaRecord* createReplica,
+ FragmentstorePtr fragPtr,
+ Uint32 startGci,
+ Uint32 stopGci,
+ Uint32 logNode,
+ Uint32& fblStopGci)
+{
+ ConstPtr<ReplicaRecord> fblFoundReplicaPtr;
+ ConstPtr<ReplicaRecord> fblReplicaPtr;
+
+ /* --------------------------------------------------------------------- */
+ /* WE START WITH ZERO AS FOUND TO ENSURE THAT FIRST HIT WILL BE */
+ /* BETTER. */
+ /* --------------------------------------------------------------------- */
+ fblStopGci = 0;
+ fblReplicaPtr.i = fragPtr.p->storedReplicas;
+ while (fblReplicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(fblReplicaPtr, creplicaFileSize, replicaRecord);
+ if (checkNodeAlive(fblReplicaPtr.p->procNode)) {
+ jam();
+ Uint32 fliStopGci = findLogInterval(fblReplicaPtr, startGci);
+ if (fliStopGci > fblStopGci) {
+ jam();
+ fblStopGci = fliStopGci;
+ fblFoundReplicaPtr = fblReplicaPtr;
+ }//if
+ }//if
+ fblReplicaPtr.i = fblReplicaPtr.p->nextReplica;
+ }//while
+ fblReplicaPtr.i = fragPtr.p->oldStoredReplicas;
+ while (fblReplicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(fblReplicaPtr, creplicaFileSize, replicaRecord);
+ if (checkNodeAlive(fblReplicaPtr.p->procNode)) {
+ jam();
+ Uint32 fliStopGci = findLogInterval(fblReplicaPtr, startGci);
+ if (fliStopGci > fblStopGci) {
+ jam();
+ fblStopGci = fliStopGci;
+ fblFoundReplicaPtr = fblReplicaPtr;
+ }//if
+ }//if
+ fblReplicaPtr.i = fblReplicaPtr.p->nextReplica;
+ }//while
+ if (fblStopGci != 0) {
+ jam();
+ ndbrequire(logNode < MAX_LOG_EXEC);
+ createReplica->logNodeId[logNode] = fblFoundReplicaPtr.p->procNode;
+ createReplica->logStartGci[logNode] = startGci;
+ if (fblStopGci >= stopGci) {
+ jam();
+ createReplica->logStopGci[logNode] = stopGci;
+ } else {
+ jam();
+ createReplica->logStopGci[logNode] = fblStopGci;
+ }//if
+ }//if
+
+ return fblStopGci != 0;
+}//Dbdih::findBestLogNode()
+
+Uint32 Dbdih::findLogInterval(ConstPtr<ReplicaRecord> replicaPtr,
+ Uint32 startGci)
+{
+ ndbrequire(replicaPtr.p->noCrashedReplicas <= 8);
+ Uint32 loopLimit = replicaPtr.p->noCrashedReplicas + 1;
+ for (Uint32 i = 0; i < loopLimit; i++) {
+ jam();
+ if (replicaPtr.p->createGci[i] <= startGci) {
+ if (replicaPtr.p->replicaLastGci[i] >= startGci) {
+ jam();
+ return replicaPtr.p->replicaLastGci[i];
+ }//if
+ }//if
+ }//for
+ return 0;
+}//Dbdih::findLogInterval()
+
+/*************************************************************************/
+/* */
+/* MODULE: FIND THE MINIMUM GCI THAT THIS NODE HAS LOG RECORDS FOR.*/
+/*************************************************************************/
+void Dbdih::findMinGci(ReplicaRecordPtr fmgReplicaPtr,
+ Uint32& keepGci,
+ Uint32& oldestRestorableGci)
+{
+ Uint32 nextLcpNo;
+ Uint32 lcpNo;
+ for (Uint32 i = 0; i < MAX_LCP_STORED; i++) {
+ jam();
+ if ((fmgReplicaPtr.p->lcpStatus[i] == ZVALID) &&
+ ((fmgReplicaPtr.p->lcpId[i] + MAX_LCP_STORED) <= (SYSFILE->latestLCP_ID + 1))) {
+ jam();
+ /*--------------------------------------------------------------------*/
+ // We invalidate the checkpoint we are preparing to overwrite.
+ // The LCP id is still the old lcp id,
+ // this is the reason of comparing with lcpId + 1.
+ /*---------------------------------------------------------------------*/
+ fmgReplicaPtr.p->lcpStatus[i] = ZINVALID;
+ }//if
+ }//for
+ keepGci = (Uint32)-1;
+ oldestRestorableGci = 0;
+ nextLcpNo = fmgReplicaPtr.p->nextLcp;
+ lcpNo = fmgReplicaPtr.p->nextLcp;
+ do {
+ ndbrequire(lcpNo < MAX_LCP_STORED);
+ if (fmgReplicaPtr.p->lcpStatus[lcpNo] == ZVALID) {
+ jam();
+ keepGci = fmgReplicaPtr.p->maxGciCompleted[lcpNo];
+ oldestRestorableGci = fmgReplicaPtr.p->maxGciStarted[lcpNo];
+ ndbrequire(((int)oldestRestorableGci) >= 0);
+ return;
+ } else {
+ jam();
+ ndbrequire(fmgReplicaPtr.p->lcpStatus[lcpNo] == ZINVALID);
+ if (fmgReplicaPtr.p->createGci[0] == fmgReplicaPtr.p->initialGci) {
+ jam();
+ /*-------------------------------------------------------------------
+ * WE CAN STILL RESTORE THIS REPLICA WITHOUT ANY LOCAL CHECKPOINTS BY
+ * ONLY USING THE LOG. IF THIS IS NOT POSSIBLE THEN WE REPORT THE LAST
+ * VALID LOCAL CHECKPOINT AS THE MINIMUM GCI RECOVERABLE.
+ *-----------------------------------------------------------------*/
+ keepGci = fmgReplicaPtr.p->createGci[0];
+ }//if
+ }//if
+ lcpNo = prevLcpNo(lcpNo);
+ } while (lcpNo != nextLcpNo);
+ return;
+}//Dbdih::findMinGci()
+
+bool Dbdih::findStartGci(ConstPtr<ReplicaRecord> replicaPtr,
+ Uint32 stopGci,
+ Uint32& startGci,
+ Uint32& lcpNo)
+{
+ lcpNo = replicaPtr.p->nextLcp;
+ const Uint32 startLcpNo = lcpNo;
+ do {
+ lcpNo = prevLcpNo(lcpNo);
+ ndbrequire(lcpNo < MAX_LCP_STORED);
+ if (replicaPtr.p->lcpStatus[lcpNo] == ZVALID) {
+ if (replicaPtr.p->maxGciStarted[lcpNo] < stopGci) {
+ jam();
+ /* ----------------------------------------------------------------- */
+ /* WE HAVE FOUND A USEFUL LOCAL CHECKPOINT THAT CAN BE USED FOR */
+ /* RESTARTING THIS FRAGMENT REPLICA. */
+ /* ----------------------------------------------------------------- */
+ startGci = replicaPtr.p->maxGciCompleted[lcpNo] + 1;
+ return true;
+ }
+ }
+ } while (lcpNo != startLcpNo);
+ /* --------------------------------------------------------------------- */
+ /* NO VALID LOCAL CHECKPOINT WAS AVAILABLE. WE WILL ADD THE */
+ /* FRAGMENT. THUS THE NEXT LCP MUST BE SET TO ZERO. */
+ /* WE MUST EXECUTE THE LOG FROM THE INITIAL GLOBAL CHECKPOINT WHEN */
+ /* THE TABLE WAS CREATED. */
+ /* --------------------------------------------------------------------- */
+ startGci = replicaPtr.p->initialGci;
+ ndbrequire(replicaPtr.p->nextLcp == 0);
+ return false;
+}//Dbdih::findStartGci()
+
+/**************************************************************************/
+/* ---------------------------------------------------------------------- */
+/* FIND A TAKE OVER REPLICA WHICH IS TO BE STARTED OR COMMITTED WHEN*/
+/* TAKING OVER A FAILED NODE. */
+/* ---------------------------------------------------------------------- */
+/*************************************************************************/
+void Dbdih::findToReplica(TakeOverRecord* regTakeOver,
+ Uint32 replicaType,
+ FragmentstorePtr fragPtr,
+ ReplicaRecordPtr& ftrReplicaPtr)
+{
+ switch (replicaType) {
+ case CreateFragReq::STORED:
+ case CreateFragReq::COMMIT_STORED:
+ /* ----------------------------------------------------------------------*/
+ /* HERE WE SEARCH FOR STORED REPLICAS. THE REPLICA MUST BE STORED IN THE */
+ /* SECTION FOR OLD STORED REPLICAS SINCE WE HAVE NOT TAKEN OVER YET. */
+ /* ----------------------------------------------------------------------*/
+ ftrReplicaPtr.i = fragPtr.p->oldStoredReplicas;
+ while (ftrReplicaPtr.i != RNIL) {
+ ptrCheckGuard(ftrReplicaPtr, creplicaFileSize, replicaRecord);
+ if (ftrReplicaPtr.p->procNode == regTakeOver->toStartingNode) {
+ jam();
+ return;
+ } else {
+ if (ftrReplicaPtr.p->procNode == regTakeOver->toFailedNode) {
+ jam();
+ return;
+ } else {
+ jam();
+ ftrReplicaPtr.i = ftrReplicaPtr.p->nextReplica;
+ }//if
+ }//if
+ }//while
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dbdih::findToReplica()
+
+void Dbdih::initCommonData()
+{
+ c_blockCommit = false;
+ c_blockCommitNo = 0;
+ c_createFragmentLock = RNIL;
+ c_endToLock = RNIL;
+ cfailurenr = 1;
+ cfirstAliveNode = RNIL;
+ cfirstDeadNode = RNIL;
+ cfirstVerifyQueue = RNIL;
+ cgckptflag = false;
+ cgcpDelay = 0;
+ cgcpMasterTakeOverState = GMTOS_IDLE;
+ cgcpOrderBlocked = 0;
+ cgcpParticipantState = GCP_PARTICIPANT_READY;
+ cgcpSameCounter = 0;
+ cgcpStartCounter = 0;
+ cgcpStatus = GCP_READY;
+
+ clastVerifyQueue = RNIL;
+ c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__);
+
+ c_lcpState.clcpDelay = 0;
+ c_lcpState.lcpStart = ZIDLE;
+ c_lcpState.lcpStartGcp = 0;
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+ c_lcpState.currentFragment.tableId = 0;
+ c_lcpState.currentFragment.fragmentId = 0;
+ c_lcpState.noOfLcpFragRepOutstanding = 0;
+ c_lcpState.keepGci = 0;
+ c_lcpState.oldestRestorableGci = 0;
+ c_lcpState.ctcCounter = 0;
+ c_lcpState.ctimer = 0;
+ c_lcpState.immediateLcpStart = false;
+ c_lcpState.m_MASTER_LCPREQ_Received = false;
+
+ cmasterdihref = 0;
+ cmasterNodeId = 0;
+ cmasterState = MASTER_IDLE;
+ cmasterTakeOverNode = 0;
+ cnewgcp = 0;
+ cnoHotSpare = 0;
+ cnoOfActiveTables = 0;
+ cnoOfNodeGroups = 0;
+ c_nextNodeGroup = 0;
+ cnoReplicas = 0;
+ coldgcp = 0;
+ coldGcpId = 0;
+ coldGcpStatus = cgcpStatus;
+ con_lineNodes = 0;
+ creceivedfrag = 0;
+ crestartGci = 0;
+ crestartInfoFile[0] = RNIL;
+ crestartInfoFile[1] = RNIL;
+ cstartGcpNow = false;
+ cstartPhase = 0;
+ c_startToLock = RNIL;
+ cstarttype = (Uint32)-1;
+ csystemnodes = 0;
+ c_updateToLock = RNIL;
+ currentgcp = 0;
+ cverifyQueueCounter = 0;
+ cwaitLcpSr = false;
+
+ nodeResetStart();
+ c_nodeStartMaster.wait = ZFALSE;
+
+ memset(&sysfileData[0], 0, sizeof(sysfileData));
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ c_lcpState.clcpDelay = 20;
+ ndb_mgm_get_int_parameter(p, CFG_DB_LCP_INTERVAL, &c_lcpState.clcpDelay);
+ c_lcpState.clcpDelay = c_lcpState.clcpDelay > 31 ? 31 : c_lcpState.clcpDelay;
+
+ cminHotSpareNodes = 0;
+ //ndb_mgm_get_int_parameter(p, CFG_DB_MIN_HOT_SPARES, &cminHotSpareNodes);
+ cminHotSpareNodes = cminHotSpareNodes > 2 ? 2 : cminHotSpareNodes;
+
+ cnoReplicas = 1;
+ ndb_mgm_get_int_parameter(p, CFG_DB_NO_REPLICAS, &cnoReplicas);
+ cnoReplicas = cnoReplicas > 4 ? 4 : cnoReplicas;
+
+ cgcpDelay = 2000;
+ ndb_mgm_get_int_parameter(p, CFG_DB_GCP_INTERVAL, &cgcpDelay);
+ cgcpDelay = cgcpDelay > 60000 ? 60000 : (cgcpDelay < 10 ? 10 : cgcpDelay);
+}//Dbdih::initCommonData()
+
+void Dbdih::initFragstore(FragmentstorePtr fragPtr)
+{
+ fragPtr.p->storedReplicas = RNIL;
+ fragPtr.p->oldStoredReplicas = RNIL;
+
+ fragPtr.p->noStoredReplicas = 0;
+ fragPtr.p->noOldStoredReplicas = 0;
+ fragPtr.p->fragReplicas = 0;
+ fragPtr.p->preferredPrimary = 0;
+
+ for (Uint32 i = 0; i < MAX_REPLICAS; i++)
+ fragPtr.p->activeNodes[i] = 0;
+
+ fragPtr.p->noLcpReplicas = 0;
+ fragPtr.p->distributionKey = 0;
+}//Dbdih::initFragstore()
+
+/*************************************************************************/
+/* */
+/* MODULE: INIT_RESTART_INFO */
+/* DESCRIPTION: INITIATE RESTART INFO VARIABLE AND VARIABLES FOR */
+/* GLOBAL CHECKPOINTS. */
+/*************************************************************************/
+void Dbdih::initRestartInfo()
+{
+ Uint32 i;
+ for (i = 0; i < MAX_NDB_NODES; i++) {
+ SYSFILE->lastCompletedGCI[i] = 0;
+ }//for
+ NodeRecordPtr nodePtr;
+ nodePtr.i = cfirstAliveNode;
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ SYSFILE->lastCompletedGCI[nodePtr.i] = 1;
+ /* FIRST GCP = 1 ALREADY SET BY LQH */
+ nodePtr.i = nodePtr.p->nextNode;
+ } while (nodePtr.i != RNIL);
+ coldgcp = 1;
+ currentgcp = 2;
+ cnewgcp = 2;
+ crestartGci = 1;
+
+ SYSFILE->keepGCI = 1;
+ SYSFILE->oldestRestorableGCI = 1;
+ SYSFILE->newestRestorableGCI = 1;
+ SYSFILE->systemRestartBits = 0;
+ for (i = 0; i < NodeBitmask::Size; i++) {
+ SYSFILE->lcpActive[0] = 0;
+ }//for
+ for (i = 0; i < Sysfile::TAKE_OVER_SIZE; i++) {
+ SYSFILE->takeOver[i] = 0;
+ }//for
+ Sysfile::setInitialStartOngoing(SYSFILE->systemRestartBits);
+}//Dbdih::initRestartInfo()
+
+/*--------------------------------------------------------------------*/
+/* NODE GROUP BITS ARE INITIALISED BEFORE THIS. */
+/* NODE ACTIVE BITS ARE INITIALISED BEFORE THIS. */
+/*--------------------------------------------------------------------*/
+/*************************************************************************/
+/* */
+/* MODULE: INIT_RESTORABLE_GCI_FILES */
+/* DESCRIPTION: THE SUBROUTINE SETS UP THE FILES THAT REFERS TO THE*/
+/* FILES THAT KEEP THE VARIABLE CRESTART_INFO */
+/*************************************************************************/
+void Dbdih::initRestorableGciFiles()
+{
+ Uint32 tirgTmp;
+ FileRecordPtr filePtr;
+ seizeFile(filePtr);
+ filePtr.p->tabRef = RNIL;
+ filePtr.p->fileType = FileRecord::GCP_FILE;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ filePtr.p->fileStatus = FileRecord::CLOSED;
+ crestartInfoFile[0] = filePtr.i;
+ filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */
+ filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */
+ filePtr.p->fileName[2] = (Uint32)-1; /* S PART IGNORED */
+ tirgTmp = 1; /* FILE NAME VERSION 1 */
+ tirgTmp = (tirgTmp << 8) + 6; /* .SYSFILE */
+ tirgTmp = (tirgTmp << 8) + 1; /* D1 DIRECTORY */
+ tirgTmp = (tirgTmp << 8) + 0; /* P0 FILE NAME */
+ filePtr.p->fileName[3] = tirgTmp;
+ /* --------------------------------------------------------------------- */
+ /* THE NAME BECOMES /D1/DBDICT/S0.SYSFILE */
+ /* --------------------------------------------------------------------- */
+ seizeFile(filePtr);
+ filePtr.p->tabRef = RNIL;
+ filePtr.p->fileType = FileRecord::GCP_FILE;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ filePtr.p->fileStatus = FileRecord::CLOSED;
+ crestartInfoFile[1] = filePtr.i;
+ filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */
+ filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */
+ filePtr.p->fileName[2] = (Uint32)-1; /* S PART IGNORED */
+ tirgTmp = 1; /* FILE NAME VERSION 1 */
+ tirgTmp = (tirgTmp << 8) + 6; /* .SYSFILE */
+ tirgTmp = (tirgTmp << 8) + 2; /* D1 DIRECTORY */
+ tirgTmp = (tirgTmp << 8) + 0; /* P0 FILE NAME */
+ filePtr.p->fileName[3] = tirgTmp;
+ /* --------------------------------------------------------------------- */
+ /* THE NAME BECOMES /D2/DBDICT/P0.SYSFILE */
+ /* --------------------------------------------------------------------- */
+}//Dbdih::initRestorableGciFiles()
+
+void Dbdih::initTable(TabRecordPtr tabPtr)
+{
+ tabPtr.p->noOfFragChunks = 0;
+ tabPtr.p->method = TabRecord::NOTDEFINED;
+ tabPtr.p->tabStatus = TabRecord::TS_IDLE;
+ tabPtr.p->noOfWords = 0;
+ tabPtr.p->noPages = 0;
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
+ tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+ tabPtr.p->noOfBackups = 0;
+ tabPtr.p->kvalue = 0;
+ tabPtr.p->hashpointer = (Uint32)-1;
+ tabPtr.p->mask = 0;
+ tabPtr.p->storedTable = 1;
+ tabPtr.p->tabErrorCode = 0;
+ tabPtr.p->schemaVersion = (Uint32)-1;
+ tabPtr.p->tabRemoveNode = RNIL;
+ tabPtr.p->totalfragments = (Uint32)-1;
+ tabPtr.p->connectrec = RNIL;
+ tabPtr.p->tabFile[0] = RNIL;
+ tabPtr.p->tabFile[1] = RNIL;
+ tabPtr.p->m_dropTab.tabUserRef = 0;
+ tabPtr.p->m_dropTab.tabUserPtr = RNIL;
+ Uint32 i;
+ for (i = 0; i < MAX_NDB_NODES; i++) {
+ tabPtr.p->startFid[i] = RNIL;
+ }//for
+ for (i = 0; i < 8; i++) {
+ tabPtr.p->pageRef[i] = RNIL;
+ }//for
+ tabPtr.p->tableType = DictTabInfo::UndefTableType;
+}//Dbdih::initTable()
+
+/*************************************************************************/
+/* */
+/* MODULE: INIT_TABLE_FILES */
+/* DESCRIPTION: THE SUBROUTINE SETS UP THE FILES THAT REFERS TO THE*/
+/* FILES THAT KEEP THE TABLE FRAGMENTATION DESCRIPTION. */
+/*************************************************************************/
+void Dbdih::initTableFile(TabRecordPtr tabPtr)
+{
+ Uint32 titfTmp;
+ FileRecordPtr filePtr;
+ seizeFile(filePtr);
+ filePtr.p->tabRef = tabPtr.i;
+ filePtr.p->fileType = FileRecord::TABLE_FILE;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ filePtr.p->fileStatus = FileRecord::CLOSED;
+ tabPtr.p->tabFile[0] = filePtr.i;
+ filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */
+ filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */
+ filePtr.p->fileName[2] = tabPtr.i; /* Stid FILE NAME */
+ titfTmp = 1; /* FILE NAME VERSION 1 */
+ titfTmp = (titfTmp << 8) + 3; /* .FRAGLIST */
+ titfTmp = (titfTmp << 8) + 1; /* D1 DIRECTORY */
+ titfTmp = (titfTmp << 8) + 255; /* P PART IGNORED */
+ filePtr.p->fileName[3] = titfTmp;
+ /* --------------------------------------------------------------------- */
+ /* THE NAME BECOMES /D1/DBDICT/Stid.FRAGLIST */
+ /* --------------------------------------------------------------------- */
+ seizeFile(filePtr);
+ filePtr.p->tabRef = tabPtr.i;
+ filePtr.p->fileType = FileRecord::TABLE_FILE;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ filePtr.p->fileStatus = FileRecord::CLOSED;
+ tabPtr.p->tabFile[1] = filePtr.i;
+ filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */
+ filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */
+ filePtr.p->fileName[2] = tabPtr.i; /* Stid FILE NAME */
+ titfTmp = 1; /* FILE NAME VERSION 1 */
+ titfTmp = (titfTmp << 8) + 3; /* .FRAGLIST */
+ titfTmp = (titfTmp << 8) + 2; /* D2 DIRECTORY */
+ titfTmp = (titfTmp << 8) + 255; /* P PART IGNORED */
+ filePtr.p->fileName[3] = titfTmp;
+ /* --------------------------------------------------------------------- */
+ /* THE NAME BECOMES /D2/DBDICT/Stid.FRAGLIST */
+ /* --------------------------------------------------------------------- */
+}//Dbdih::initTableFile()
+
+void Dbdih::initialiseRecordsLab(Signal* signal,
+ Uint32 stepNo, Uint32 retRef, Uint32 retData)
+{
+ switch (stepNo) {
+ case 0:
+ jam();
+ initCommonData();
+ break;
+ case 1:{
+ ApiConnectRecordPtr apiConnectptr;
+ jam();
+ /******** INTIALIZING API CONNECT RECORDS ********/
+ for (apiConnectptr.i = 0; apiConnectptr.i < capiConnectFileSize; apiConnectptr.i++) {
+ refresh_watch_dog();
+ ptrAss(apiConnectptr, apiConnectRecord);
+ apiConnectptr.p->nextApi = RNIL;
+ }//for
+ jam();
+ break;
+ }
+ case 2:{
+ ConnectRecordPtr connectPtr;
+ jam();
+ /****** CONNECT ******/
+ for (connectPtr.i = 0; connectPtr.i < cconnectFileSize; connectPtr.i++) {
+ refresh_watch_dog();
+ ptrAss(connectPtr, connectRecord);
+ connectPtr.p->userpointer = RNIL;
+ connectPtr.p->userblockref = ZNIL;
+ connectPtr.p->connectState = ConnectRecord::FREE;
+ connectPtr.p->table = RNIL;
+ connectPtr.p->nfConnect = connectPtr.i + 1;
+ }//for
+ connectPtr.i = cconnectFileSize - 1;
+ ptrAss(connectPtr, connectRecord);
+ connectPtr.p->nfConnect = RNIL;
+ cfirstconnect = 0;
+ break;
+ }
+ case 3:
+ {
+ FileRecordPtr filePtr;
+ jam();
+ /******** INTIALIZING FILE RECORDS ********/
+ for (filePtr.i = 0; filePtr.i < cfileFileSize; filePtr.i++) {
+ ptrAss(filePtr, fileRecord);
+ filePtr.p->nextFile = filePtr.i + 1;
+ filePtr.p->fileStatus = FileRecord::CLOSED;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ }//for
+ filePtr.i = cfileFileSize - 1;
+ ptrAss(filePtr, fileRecord);
+ filePtr.p->nextFile = RNIL;
+ cfirstfreeFile = 0;
+ initRestorableGciFiles();
+ break;
+ }
+ case 4:
+ jam();
+ initialiseFragstore();
+ break;
+ case 5:
+ {
+ jam();
+ /******* NODE GROUP RECORD ******/
+ /******* NODE RECORD ******/
+ NodeGroupRecordPtr loopNGPtr;
+ for (loopNGPtr.i = 0; loopNGPtr.i < MAX_NDB_NODES; loopNGPtr.i++) {
+ ptrAss(loopNGPtr, nodeGroupRecord);
+ loopNGPtr.p->nodesInGroup[0] = RNIL;
+ loopNGPtr.p->nodesInGroup[1] = RNIL;
+ loopNGPtr.p->nodesInGroup[2] = RNIL;
+ loopNGPtr.p->nodesInGroup[3] = RNIL;
+ loopNGPtr.p->nextReplicaNode = 0;
+ loopNGPtr.p->nodeCount = 0;
+ loopNGPtr.p->activeTakeOver = false;
+ }//for
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 0; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ ptrAss(nodePtr, nodeRecord);
+ new (nodePtr.p) NodeRecord();
+ }//for
+ break;
+ }
+ case 6:
+ {
+ PageRecordPtr pagePtr;
+ jam();
+ /******* PAGE RECORD ******/
+ for (pagePtr.i = 0; pagePtr.i < cpageFileSize; pagePtr.i++) {
+ refresh_watch_dog();
+ ptrAss(pagePtr, pageRecord);
+ pagePtr.p->nextfreepage = pagePtr.i + 1;
+ }//for
+ pagePtr.i = cpageFileSize - 1;
+ ptrAss(pagePtr, pageRecord);
+ pagePtr.p->nextfreepage = RNIL;
+ cfirstfreepage = 0;
+ break;
+ }
+ case 7:
+ {
+ ReplicaRecordPtr initReplicaPtr;
+ jam();
+ /******* REPLICA RECORD ******/
+ for (initReplicaPtr.i = 0; initReplicaPtr.i < creplicaFileSize;
+ initReplicaPtr.i++) {
+ refresh_watch_dog();
+ ptrAss(initReplicaPtr, replicaRecord);
+ initReplicaPtr.p->lcpIdStarted = 0;
+ initReplicaPtr.p->lcpOngoingFlag = false;
+ initReplicaPtr.p->nextReplica = initReplicaPtr.i + 1;
+ }//for
+ initReplicaPtr.i = creplicaFileSize - 1;
+ ptrAss(initReplicaPtr, replicaRecord);
+ initReplicaPtr.p->nextReplica = RNIL;
+ cnoFreeReplicaRec = creplicaFileSize;
+ cfirstfreeReplica = 0;
+ break;
+ }
+ case 8:
+ {
+ TabRecordPtr loopTabptr;
+ jam();
+ /********* TAB-DESCRIPTOR ********/
+ for (loopTabptr.i = 0; loopTabptr.i < ctabFileSize; loopTabptr.i++) {
+ ptrAss(loopTabptr, tabRecord);
+ refresh_watch_dog();
+ initTable(loopTabptr);
+ }//for
+ break;
+ }
+ case 9:
+ {
+ TakeOverRecordPtr takeOverPtr;
+ jam();
+ cfirstfreeTakeOver = RNIL;
+ for (takeOverPtr.i = 0; takeOverPtr.i < MAX_NDB_NODES; takeOverPtr.i++) {
+ ptrAss(takeOverPtr, takeOverRecord);
+ initTakeOver(takeOverPtr);
+ releaseTakeOver(takeOverPtr.i);
+ }//for
+
+ ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = retData;
+ sendSignal(retRef, GSN_READ_CONFIG_CONF, signal,
+ ReadConfigConf::SignalLength, JBB);
+ return;
+ break;
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ jam();
+ /* ---------------------------------------------------------------------- */
+ /* SEND REAL-TIME BREAK DURING INIT OF VARIABLES DURING SYSTEM RESTART. */
+ /* ---------------------------------------------------------------------- */
+ signal->theData[0] = DihContinueB::ZINITIALISE_RECORDS;
+ signal->theData[1] = stepNo + 1;
+ signal->theData[2] = retRef;
+ signal->theData[3] = retData;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
+}//Dbdih::initialiseRecordsLab()
+
+/*************************************************************************/
+/* INSERT THE NODE INTO THE LINKED LIST OF NODES INVOLVED ALL */
+/* DISTRIBUTED PROTOCOLS (EXCEPT GCP PROTOCOL THAT USES THE DIH */
+/* LINKED LIST INSTEAD). */
+/*************************************************************************/
+void Dbdih::insertAlive(NodeRecordPtr newNodePtr)
+{
+ NodeRecordPtr nodePtr;
+
+ nodePtr.i = cfirstAliveNode;
+ if (nodePtr.i == RNIL) {
+ jam();
+ cfirstAliveNode = newNodePtr.i;
+ } else {
+ do {
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->nextNode == RNIL) {
+ jam();
+ nodePtr.p->nextNode = newNodePtr.i;
+ break;
+ } else {
+ jam();
+ nodePtr.i = nodePtr.p->nextNode;
+ }//if
+ } while (1);
+ }//if
+ newNodePtr.p->nextNode = RNIL;
+}//Dbdih::insertAlive()
+
+void Dbdih::insertBackup(FragmentstorePtr fragPtr, Uint32 nodeId)
+{
+ for (Uint32 i = fragPtr.p->fragReplicas; i > 1; i--) {
+ jam();
+ ndbrequire(i < MAX_REPLICAS && i > 0);
+ fragPtr.p->activeNodes[i] = fragPtr.p->activeNodes[i - 1];
+ }//for
+ fragPtr.p->activeNodes[1] = nodeId;
+ fragPtr.p->fragReplicas++;
+}//Dbdih::insertBackup()
+
+void Dbdih::insertDeadNode(NodeRecordPtr newNodePtr)
+{
+ NodeRecordPtr nodePtr;
+
+ nodePtr.i = cfirstDeadNode;
+ if (nodePtr.i == RNIL) {
+ jam();
+ cfirstDeadNode = newNodePtr.i;
+ } else {
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->nextNode == RNIL) {
+ jam();
+ nodePtr.p->nextNode = newNodePtr.i;
+ break;
+ } else {
+ jam();
+ nodePtr.i = nodePtr.p->nextNode;
+ }//if
+ } while (1);
+ }//if
+ newNodePtr.p->nextNode = RNIL;
+}//Dbdih::insertDeadNode()
+
+void Dbdih::linkOldStoredReplica(FragmentstorePtr fragPtr,
+ ReplicaRecordPtr replicatePtr)
+{
+ ReplicaRecordPtr losReplicaPtr;
+
+ replicatePtr.p->nextReplica = RNIL;
+ fragPtr.p->noOldStoredReplicas++;
+ losReplicaPtr.i = fragPtr.p->oldStoredReplicas;
+ if (losReplicaPtr.i == RNIL) {
+ jam();
+ fragPtr.p->oldStoredReplicas = replicatePtr.i;
+ return;
+ }//if
+ ptrCheckGuard(losReplicaPtr, creplicaFileSize, replicaRecord);
+ while (losReplicaPtr.p->nextReplica != RNIL) {
+ jam();
+ losReplicaPtr.i = losReplicaPtr.p->nextReplica;
+ ptrCheckGuard(losReplicaPtr, creplicaFileSize, replicaRecord);
+ }//if
+ losReplicaPtr.p->nextReplica = replicatePtr.i;
+}//Dbdih::linkOldStoredReplica()
+
+void Dbdih::linkStoredReplica(FragmentstorePtr fragPtr,
+ ReplicaRecordPtr replicatePtr)
+{
+ ReplicaRecordPtr lsrReplicaPtr;
+
+ fragPtr.p->noStoredReplicas++;
+ replicatePtr.p->nextReplica = RNIL;
+ lsrReplicaPtr.i = fragPtr.p->storedReplicas;
+ if (fragPtr.p->storedReplicas == RNIL) {
+ jam();
+ fragPtr.p->storedReplicas = replicatePtr.i;
+ return;
+ }//if
+ ptrCheckGuard(lsrReplicaPtr, creplicaFileSize, replicaRecord);
+ while (lsrReplicaPtr.p->nextReplica != RNIL) {
+ jam();
+ lsrReplicaPtr.i = lsrReplicaPtr.p->nextReplica;
+ ptrCheckGuard(lsrReplicaPtr, creplicaFileSize, replicaRecord);
+ }//if
+ lsrReplicaPtr.p->nextReplica = replicatePtr.i;
+}//Dbdih::linkStoredReplica()
+
+/*************************************************************************/
+/* MAKE NODE GROUPS BASED ON THE LIST OF NODES RECEIVED FROM CNTR */
+/*************************************************************************/
+void Dbdih::makeNodeGroups(Uint32 nodeArray[])
+{
+ NodeRecordPtr mngNodeptr;
+ Uint32 tmngNode;
+ Uint32 tmngNodeGroup;
+ Uint32 tmngLimit;
+ Uint32 i;
+
+ /**-----------------------------------------------------------------------
+ * ASSIGN ALL ACTIVE NODES INTO NODE GROUPS. HOT SPARE NODES ARE ASSIGNED
+ * TO NODE GROUP ZNIL
+ *-----------------------------------------------------------------------*/
+ tmngNodeGroup = 0;
+ tmngLimit = csystemnodes - cnoHotSpare;
+ ndbrequire(tmngLimit < MAX_NDB_NODES);
+ for (i = 0; i < tmngLimit; i++) {
+ NodeGroupRecordPtr NGPtr;
+ jam();
+ tmngNode = nodeArray[i];
+ mngNodeptr.i = tmngNode;
+ ptrCheckGuard(mngNodeptr, MAX_NDB_NODES, nodeRecord);
+ mngNodeptr.p->nodeGroup = tmngNodeGroup;
+ NGPtr.i = tmngNodeGroup;
+ ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
+ arrGuard(NGPtr.p->nodeCount, MAX_REPLICAS);
+ NGPtr.p->nodesInGroup[NGPtr.p->nodeCount++] = mngNodeptr.i;
+ if (NGPtr.p->nodeCount == cnoReplicas) {
+ jam();
+ tmngNodeGroup++;
+ }//if
+ }//for
+ cnoOfNodeGroups = tmngNodeGroup;
+ ndbrequire(csystemnodes < MAX_NDB_NODES);
+ for (i = tmngLimit + 1; i < csystemnodes; i++) {
+ jam();
+ tmngNode = nodeArray[i];
+ mngNodeptr.i = tmngNode;
+ ptrCheckGuard(mngNodeptr, MAX_NDB_NODES, nodeRecord);
+ mngNodeptr.p->nodeGroup = ZNIL;
+ }//for
+ for(i = 0; i < MAX_NDB_NODES; i++){
+ jam();
+ Sysfile::setNodeGroup(i, SYSFILE->nodeGroups, NO_NODE_GROUP_ID);
+ }//for
+ for (mngNodeptr.i = 1; mngNodeptr.i < MAX_NDB_NODES; mngNodeptr.i++) {
+ jam();
+ ptrAss(mngNodeptr, nodeRecord);
+ if (mngNodeptr.p->nodeGroup != ZNIL) {
+ jam();
+ Sysfile::setNodeGroup(mngNodeptr.i, SYSFILE->nodeGroups, mngNodeptr.p->nodeGroup);
+ }//if
+ }//for
+}//Dbdih::makeNodeGroups()
+
+/**
+ * On node failure QMGR asks DIH about node groups. This is
+ * a direct signal (function call in same process). Input is
+ * bitmask of surviving nodes. The routine is not concerned
+ * about node count. Reply is one of:
+ * 1) win - we can survive, and nobody else can
+ * 2) lose - we cannot survive
+ * 3) partition - we can survive but there could be others
+ */
+void Dbdih::execCHECKNODEGROUPSREQ(Signal* signal)
+{
+ jamEntry();
+ CheckNodeGroups* sd = (CheckNodeGroups*)&signal->theData[0];
+
+ bool direct = (sd->requestType & CheckNodeGroups::Direct);
+ bool ok = false;
+ switch(sd->requestType & ~CheckNodeGroups::Direct){
+ case CheckNodeGroups::ArbitCheck:{
+ ok = true;
+ jam();
+ unsigned missall = 0;
+ unsigned haveall = 0;
+ for (Uint32 i = 0; i < cnoOfNodeGroups; i++) {
+ jam();
+ NodeGroupRecordPtr ngPtr;
+ ngPtr.i = i;
+ ptrAss(ngPtr, nodeGroupRecord);
+ Uint32 count = 0;
+ for (Uint32 j = 0; j < ngPtr.p->nodeCount; j++) {
+ jam();
+ Uint32 nodeId = ngPtr.p->nodesInGroup[j];
+ if (sd->mask.get(nodeId)) {
+ jam();
+ count++;
+ }//if
+ }//for
+ if (count == 0) {
+ jam();
+ missall++;
+ }//if
+ if (count == ngPtr.p->nodeCount) {
+ haveall++;
+ }//if
+ }//for
+
+ if (missall) {
+ jam();
+ sd->output = CheckNodeGroups::Lose;
+ } else if (haveall) {
+ jam();
+ sd->output = CheckNodeGroups::Win;
+ } else {
+ jam();
+ sd->output = CheckNodeGroups::Partitioning;
+ }//if
+ }
+ break;
+ case CheckNodeGroups::GetNodeGroup:
+ ok = true;
+ sd->output = Sysfile::getNodeGroup(getOwnNodeId(), SYSFILE->nodeGroups);
+ break;
+ case CheckNodeGroups::GetNodeGroupMembers: {
+ ok = true;
+ Uint32 ownNodeGoup =
+ Sysfile::getNodeGroup(sd->nodeId, SYSFILE->nodeGroups);
+
+ sd->output = ownNodeGoup;
+ sd->mask.clear();
+
+ NodeGroupRecordPtr ngPtr;
+ ngPtr.i = ownNodeGoup;
+ ptrAss(ngPtr, nodeGroupRecord);
+ for (Uint32 j = 0; j < ngPtr.p->nodeCount; j++) {
+ jam();
+ sd->mask.set(ngPtr.p->nodesInGroup[j]);
+ }
+#if 0
+ for (int i = 0; i < MAX_NDB_NODES; i++) {
+ if (ownNodeGoup ==
+ Sysfile::getNodeGroup(i, SYSFILE->nodeGroups)) {
+ sd->mask.set(i);
+ }
+ }
+#endif
+ }
+ break;
+ }
+ ndbrequire(ok);
+
+ if (!direct)
+ sendSignal(sd->blockRef, GSN_CHECKNODEGROUPSCONF, signal,
+ CheckNodeGroups::SignalLength, JBB);
+}//Dbdih::execCHECKNODEGROUPSREQ()
+
+void Dbdih::makePrnList(ReadNodesConf * readNodes, Uint32 nodeArray[])
+{
+ cfirstAliveNode = RNIL;
+ ndbrequire(con_lineNodes > 0);
+ ndbrequire(csystemnodes < MAX_NDB_NODES);
+ for (Uint32 i = 0; i < csystemnodes; i++) {
+ NodeRecordPtr nodePtr;
+ jam();
+ nodePtr.i = nodeArray[i];
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ new (nodePtr.p) NodeRecord();
+ if (NodeBitmask::get(readNodes->inactiveNodes, nodePtr.i) == false){
+ jam();
+ nodePtr.p->nodeStatus = NodeRecord::ALIVE;
+ nodePtr.p->useInTransactions = true;
+ nodePtr.p->copyCompleted = true;
+ nodePtr.p->m_inclDihLcp = true;
+ insertAlive(nodePtr);
+ } else {
+ jam();
+ nodePtr.p->nodeStatus = NodeRecord::DEAD;
+ insertDeadNode(nodePtr);
+ }//if
+ }//for
+}//Dbdih::makePrnList()
+
+/*************************************************************************/
+/* A NEW CRASHED REPLICA IS ADDED BY A NODE FAILURE. */
+/*************************************************************************/
+void Dbdih::newCrashedReplica(Uint32 nodeId, ReplicaRecordPtr ncrReplicaPtr)
+{
+ /*----------------------------------------------------------------------*/
+ /* SET THE REPLICA_LAST_GCI OF THE CRASHED REPLICA TO LAST GCI */
+ /* EXECUTED BY THE FAILED NODE. */
+ /*----------------------------------------------------------------------*/
+ /* WE HAVE A NEW CRASHED REPLICA. INITIATE CREATE GCI TO INDICATE */
+ /* THAT THE NEW REPLICA IS NOT STARTED YET AND REPLICA_LAST_GCI IS*/
+ /* SET TO -1 TO INDICATE THAT IT IS NOT DEAD YET. */
+ /*----------------------------------------------------------------------*/
+ arrGuard(ncrReplicaPtr.p->noCrashedReplicas + 1, 8);
+ ncrReplicaPtr.p->replicaLastGci[ncrReplicaPtr.p->noCrashedReplicas] =
+ SYSFILE->lastCompletedGCI[nodeId];
+ ncrReplicaPtr.p->noCrashedReplicas = ncrReplicaPtr.p->noCrashedReplicas + 1;
+ ncrReplicaPtr.p->createGci[ncrReplicaPtr.p->noCrashedReplicas] = 0;
+ ncrReplicaPtr.p->replicaLastGci[ncrReplicaPtr.p->noCrashedReplicas] =
+ (Uint32)-1;
+}//Dbdih::newCrashedReplica()
+
+/*************************************************************************/
+/* AT NODE FAILURE DURING START OF A NEW NODE WE NEED TO RESET A */
+/* SET OF VARIABLES CONTROLLING THE START AND INDICATING ONGOING */
+/* START OF A NEW NODE. */
+/*************************************************************************/
+void Dbdih::nodeResetStart()
+{
+ jam();
+ c_nodeStartMaster.startNode = RNIL;
+ c_nodeStartMaster.failNr = cfailurenr;
+ c_nodeStartMaster.activeState = false;
+ c_nodeStartMaster.blockGcp = false;
+ c_nodeStartMaster.blockLcp = false;
+ c_nodeStartMaster.m_outstandingGsn = 0;
+}//Dbdih::nodeResetStart()
+
+void Dbdih::openFileRw(Signal* signal, FileRecordPtr filePtr)
+{
+ signal->theData[0] = reference();
+ signal->theData[1] = filePtr.i;
+ signal->theData[2] = filePtr.p->fileName[0];
+ signal->theData[3] = filePtr.p->fileName[1];
+ signal->theData[4] = filePtr.p->fileName[2];
+ signal->theData[5] = filePtr.p->fileName[3];
+ signal->theData[6] = FsOpenReq::OM_READWRITE;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+}//Dbdih::openFileRw()
+
+void Dbdih::openFileRo(Signal* signal, FileRecordPtr filePtr)
+{
+ signal->theData[0] = reference();
+ signal->theData[1] = filePtr.i;
+ signal->theData[2] = filePtr.p->fileName[0];
+ signal->theData[3] = filePtr.p->fileName[1];
+ signal->theData[4] = filePtr.p->fileName[2];
+ signal->theData[5] = filePtr.p->fileName[3];
+ signal->theData[6] = FsOpenReq::OM_READONLY;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+}//Dbdih::openFileRw()
+
+/*************************************************************************/
+/* REMOVE A CRASHED REPLICA BY PACKING THE ARRAY OF CREATED GCI AND*/
+/* THE LAST GCI OF THE CRASHED REPLICA. */
+/*************************************************************************/
+void Dbdih::packCrashedReplicas(ReplicaRecordPtr replicaPtr)
+{
+ ndbrequire(replicaPtr.p->noCrashedReplicas > 0);
+ ndbrequire(replicaPtr.p->noCrashedReplicas <= 8);
+ for (Uint32 i = 0; i < replicaPtr.p->noCrashedReplicas; i++) {
+ jam();
+ replicaPtr.p->createGci[i] = replicaPtr.p->createGci[i + 1];
+ replicaPtr.p->replicaLastGci[i] = replicaPtr.p->replicaLastGci[i + 1];
+ }//for
+ replicaPtr.p->noCrashedReplicas--;
+
+#ifdef VM_TRACE
+ for (Uint32 i = 0; i < replicaPtr.p->noCrashedReplicas; i++) {
+ jam();
+ ndbrequire(replicaPtr.p->createGci[i] != 0xF1F1F1F1);
+ ndbrequire(replicaPtr.p->replicaLastGci[i] != 0xF1F1F1F1);
+ }//for
+#endif
+}//Dbdih::packCrashedReplicas()
+
+void Dbdih::prepareReplicas(FragmentstorePtr fragPtr)
+{
+ ReplicaRecordPtr prReplicaPtr;
+ Uint32 prevReplica = RNIL;
+
+ /* --------------------------------------------------------------------- */
+ /* BEGIN BY LINKING ALL REPLICA RECORDS ONTO THE OLD STORED REPLICA*/
+ /* LIST. */
+ /* AT A SYSTEM RESTART OBVIOUSLY ALL NODES ARE OLD. */
+ /* --------------------------------------------------------------------- */
+ prReplicaPtr.i = fragPtr.p->storedReplicas;
+ while (prReplicaPtr.i != RNIL) {
+ jam();
+ prevReplica = prReplicaPtr.i;
+ ptrCheckGuard(prReplicaPtr, creplicaFileSize, replicaRecord);
+ prReplicaPtr.i = prReplicaPtr.p->nextReplica;
+ }//while
+ /* --------------------------------------------------------------------- */
+ /* LIST OF STORED REPLICAS WILL BE EMPTY NOW. */
+ /* --------------------------------------------------------------------- */
+ if (prevReplica != RNIL) {
+ prReplicaPtr.i = prevReplica;
+ ptrCheckGuard(prReplicaPtr, creplicaFileSize, replicaRecord);
+ prReplicaPtr.p->nextReplica = fragPtr.p->oldStoredReplicas;
+ fragPtr.p->oldStoredReplicas = fragPtr.p->storedReplicas;
+ fragPtr.p->storedReplicas = RNIL;
+ fragPtr.p->noOldStoredReplicas += fragPtr.p->noStoredReplicas;
+ fragPtr.p->noStoredReplicas = 0;
+ }//if
+}//Dbdih::prepareReplicas()
+
+void Dbdih::readFragment(RWFragment* rf, FragmentstorePtr fragPtr)
+{
+ Uint32 TreadFid = readPageWord(rf);
+ fragPtr.p->preferredPrimary = readPageWord(rf);
+ fragPtr.p->noStoredReplicas = readPageWord(rf);
+ fragPtr.p->noOldStoredReplicas = readPageWord(rf);
+ Uint32 TdistKey = readPageWord(rf);
+
+ ndbrequire(fragPtr.p->noStoredReplicas > 0);
+ ndbrequire(TreadFid == rf->fragId);
+ ndbrequire(TdistKey < 256);
+ if ((cstarttype == NodeState::ST_NODE_RESTART) ||
+ (cstarttype == NodeState::ST_INITIAL_NODE_RESTART)) {
+ jam();
+ fragPtr.p->distributionKey = TdistKey;
+ }//if
+}//Dbdih::readFragment()
+
+Uint32 Dbdih::readPageWord(RWFragment* rf)
+{
+ if (rf->wordIndex >= 2048) {
+ jam();
+ ndbrequire(rf->wordIndex == 2048);
+ rf->pageIndex++;
+ ndbrequire(rf->pageIndex < 8);
+ rf->rwfPageptr.i = rf->rwfTabPtr.p->pageRef[rf->pageIndex];
+ ptrCheckGuard(rf->rwfPageptr, cpageFileSize, pageRecord);
+ rf->wordIndex = 32;
+ }//if
+ Uint32 dataWord = rf->rwfPageptr.p->word[rf->wordIndex];
+ rf->wordIndex++;
+ return dataWord;
+}//Dbdih::readPageWord()
+
+void Dbdih::readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr)
+{
+ Uint32 i;
+ readReplicaPtr.p->procNode = readPageWord(rf);
+ readReplicaPtr.p->initialGci = readPageWord(rf);
+ readReplicaPtr.p->noCrashedReplicas = readPageWord(rf);
+ readReplicaPtr.p->nextLcp = readPageWord(rf);
+
+ for (i = 0; i < MAX_LCP_STORED; i++) {
+ readReplicaPtr.p->maxGciCompleted[i] = readPageWord(rf);
+ readReplicaPtr.p->maxGciStarted[i] = readPageWord(rf);
+ readReplicaPtr.p->lcpId[i] = readPageWord(rf);
+ readReplicaPtr.p->lcpStatus[i] = readPageWord(rf);
+ }//for
+ const Uint32 noCrashedReplicas = readReplicaPtr.p->noCrashedReplicas;
+ ndbrequire(noCrashedReplicas < 8);
+ for (i = 0; i < noCrashedReplicas; i++) {
+ readReplicaPtr.p->createGci[i] = readPageWord(rf);
+ readReplicaPtr.p->replicaLastGci[i] = readPageWord(rf);
+ ndbrequire(readReplicaPtr.p->createGci[i] != 0xF1F1F1F1);
+ ndbrequire(readReplicaPtr.p->replicaLastGci[i] != 0xF1F1F1F1);
+ }//for
+ for(i = noCrashedReplicas; i<8; i++){
+ readReplicaPtr.p->createGci[i] = readPageWord(rf);
+ readReplicaPtr.p->replicaLastGci[i] = readPageWord(rf);
+ // They are not initialized...
+ readReplicaPtr.p->createGci[i] = 0;
+ readReplicaPtr.p->replicaLastGci[i] = ~0;
+ }
+ /* ---------------------------------------------------------------------- */
+ /* IF THE LAST COMPLETED LOCAL CHECKPOINT IS VALID AND LARGER THAN */
+ /* THE LAST COMPLETED CHECKPOINT THEN WE WILL INVALIDATE THIS LOCAL */
+ /* CHECKPOINT FOR THIS REPLICA. */
+ /* ---------------------------------------------------------------------- */
+ Uint32 trraLcp = prevLcpNo(readReplicaPtr.p->nextLcp);
+ ndbrequire(trraLcp < MAX_LCP_STORED);
+ if ((readReplicaPtr.p->lcpStatus[trraLcp] == ZVALID) &&
+ (readReplicaPtr.p->lcpId[trraLcp] > SYSFILE->latestLCP_ID)) {
+ jam();
+ readReplicaPtr.p->lcpStatus[trraLcp] = ZINVALID;
+ }//if
+ /* ---------------------------------------------------------------------- */
+ /* WE ALSO HAVE TO INVALIDATE ANY LOCAL CHECKPOINTS THAT HAVE BEEN */
+ /* INVALIDATED BY MOVING BACK THE RESTART GCI. */
+ /* ---------------------------------------------------------------------- */
+ for (i = 0; i < MAX_LCP_STORED; i++) {
+ jam();
+ if ((readReplicaPtr.p->lcpStatus[i] == ZVALID) &&
+ (readReplicaPtr.p->maxGciStarted[i] > SYSFILE->newestRestorableGCI)) {
+ jam();
+ readReplicaPtr.p->lcpStatus[i] = ZINVALID;
+ }//if
+ }//for
+ /* ---------------------------------------------------------------------- */
+ /* WE WILL REMOVE ANY OCCURRENCES OF REPLICAS THAT HAVE CRASHED */
+ /* THAT ARE NO LONGER VALID DUE TO MOVING RESTART GCI BACKWARDS. */
+ /* ---------------------------------------------------------------------- */
+ removeTooNewCrashedReplicas(readReplicaPtr);
+ /* ---------------------------------------------------------------------- */
+ /* WE WILL REMOVE ANY OCCURRENCES OF REPLICAS THAT HAVE CRASHED */
+ /* THAT ARE NO LONGER VALID SINCE THEY ARE NO LONGER RESTORABLE. */
+ /* ---------------------------------------------------------------------- */
+ removeOldCrashedReplicas(readReplicaPtr);
+ /* --------------------------------------------------------------------- */
+ // We set the last GCI of the replica that was alive before the node
+ // crashed last time. We set it to the last GCI which the node participated in.
+ /* --------------------------------------------------------------------- */
+ ndbrequire(readReplicaPtr.p->noCrashedReplicas < 8);
+ readReplicaPtr.p->replicaLastGci[readReplicaPtr.p->noCrashedReplicas] =
+ SYSFILE->lastCompletedGCI[readReplicaPtr.p->procNode];
+ /* ---------------------------------------------------------------------- */
+ /* FIND PROCESSOR RECORD */
+ /* ---------------------------------------------------------------------- */
+}//Dbdih::readReplica()
+
+void Dbdih::readReplicas(RWFragment* rf, FragmentstorePtr fragPtr)
+{
+ Uint32 i;
+ ReplicaRecordPtr newReplicaPtr;
+ Uint32 noStoredReplicas = fragPtr.p->noStoredReplicas;
+ Uint32 noOldStoredReplicas = fragPtr.p->noOldStoredReplicas;
+ /* ----------------------------------------------------------------------- */
+ /* WE CLEAR THE NUMBER OF STORED REPLICAS SINCE IT WILL BE CALCULATED */
+ /* BY THE LINKING SUBROUTINES. */
+ /* ----------------------------------------------------------------------- */
+ fragPtr.p->noStoredReplicas = 0;
+ fragPtr.p->noOldStoredReplicas = 0;
+ Uint32 replicaIndex = 0;
+ ndbrequire(noStoredReplicas + noOldStoredReplicas <= MAX_REPLICAS);
+ for (i = 0; i < noStoredReplicas; i++) {
+ seizeReplicaRec(newReplicaPtr);
+ readReplica(rf, newReplicaPtr);
+ if (checkNodeAlive(newReplicaPtr.p->procNode)) {
+ jam();
+ ndbrequire(replicaIndex < MAX_REPLICAS);
+ fragPtr.p->activeNodes[replicaIndex] = newReplicaPtr.p->procNode;
+ replicaIndex++;
+ linkStoredReplica(fragPtr, newReplicaPtr);
+ } else {
+ jam();
+ linkOldStoredReplica(fragPtr, newReplicaPtr);
+ }//if
+ }//for
+ fragPtr.p->fragReplicas = noStoredReplicas;
+ for (i = 0; i < noOldStoredReplicas; i++) {
+ jam();
+ seizeReplicaRec(newReplicaPtr);
+ readReplica(rf, newReplicaPtr);
+ linkOldStoredReplica(fragPtr, newReplicaPtr);
+ }//for
+}//Dbdih::readReplicas()
+
+void Dbdih::readRestorableGci(Signal* signal, FileRecordPtr filePtr)
+{
+ signal->theData[0] = filePtr.p->fileRef;
+ signal->theData[1] = reference();
+ signal->theData[2] = filePtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS;
+ signal->theData[4] = ZVAR_NO_CRESTART_INFO;
+ signal->theData[5] = 1;
+ signal->theData[6] = 0;
+ signal->theData[7] = 0;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+}//Dbdih::readRestorableGci()
+
+void Dbdih::readTabfile(Signal* signal, TabRecord* tab, FileRecordPtr filePtr)
+{
+ signal->theData[0] = filePtr.p->fileRef;
+ signal->theData[1] = reference();
+ signal->theData[2] = filePtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS;
+ signal->theData[4] = ZVAR_NO_WORD;
+ signal->theData[5] = tab->noPages;
+ for (Uint32 i = 0; i < tab->noPages; i++) {
+ signal->theData[6 + (2 * i)] = tab->pageRef[i];
+ signal->theData[7 + (2 * i)] = i;
+ }//for
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 22, JBA);
+}//Dbdih::readTabfile()
+
+void Dbdih::releasePage(Uint32 pageIndex)
+{
+ PageRecordPtr pagePtr;
+ pagePtr.i = pageIndex;
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+ pagePtr.p->nextfreepage = cfirstfreepage;
+ cfirstfreepage = pagePtr.i;
+}//Dbdih::releasePage()
+
+void Dbdih::releaseTabPages(Uint32 tableId)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ ndbrequire(tabPtr.p->noPages <= 8);
+ for (Uint32 i = 0; i < tabPtr.p->noPages; i++) {
+ jam();
+ releasePage(tabPtr.p->pageRef[i]);
+ }//for
+ tabPtr.p->noPages = 0;
+}//Dbdih::releaseTabPages()
+
+/*************************************************************************/
+/* REMOVE NODE FROM SET OF ALIVE NODES. */
+/*************************************************************************/
+void Dbdih::removeAlive(NodeRecordPtr removeNodePtr)
+{
+ NodeRecordPtr nodePtr;
+
+ nodePtr.i = cfirstAliveNode;
+ if (nodePtr.i == removeNodePtr.i) {
+ jam();
+ cfirstAliveNode = removeNodePtr.p->nextNode;
+ return;
+ }//if
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->nextNode == removeNodePtr.i) {
+ jam();
+ nodePtr.p->nextNode = removeNodePtr.p->nextNode;
+ break;
+ } else {
+ jam();
+ nodePtr.i = nodePtr.p->nextNode;
+ }//if
+ } while (1);
+}//Dbdih::removeAlive()
+
+/*************************************************************************/
+/* REMOVE NODE FROM SET OF DEAD NODES. */
+/*************************************************************************/
+void Dbdih::removeDeadNode(NodeRecordPtr removeNodePtr)
+{
+ NodeRecordPtr nodePtr;
+
+ nodePtr.i = cfirstDeadNode;
+ if (nodePtr.i == removeNodePtr.i) {
+ jam();
+ cfirstDeadNode = removeNodePtr.p->nextNode;
+ return;
+ }//if
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->nextNode == removeNodePtr.i) {
+ jam();
+ nodePtr.p->nextNode = removeNodePtr.p->nextNode;
+ break;
+ } else {
+ jam();
+ nodePtr.i = nodePtr.p->nextNode;
+ }//if
+ } while (1);
+}//Dbdih::removeDeadNode()
+
+/*---------------------------------------------------------------*/
+/* REMOVE REPLICAS OF A FAILED NODE FROM LIST OF STORED */
+/* REPLICAS AND MOVE IT TO THE LIST OF OLD STORED REPLICAS.*/
+/* ALSO UPDATE THE CRASHED REPLICA INFORMATION. */
+/*---------------------------------------------------------------*/
+void Dbdih::removeNodeFromStored(Uint32 nodeId,
+ FragmentstorePtr fragPtr,
+ ReplicaRecordPtr replicatePtr)
+{
+ newCrashedReplica(nodeId, replicatePtr);
+ removeStoredReplica(fragPtr, replicatePtr);
+ linkOldStoredReplica(fragPtr, replicatePtr);
+ ndbrequire(fragPtr.p->storedReplicas != RNIL);
+}//Dbdih::removeNodeFromStored()
+
+/*************************************************************************/
+/* REMOVE ANY OLD CRASHED REPLICAS THAT ARE NOT RESTORABLE ANY MORE*/
+/*************************************************************************/
+void Dbdih::removeOldCrashedReplicas(ReplicaRecordPtr rocReplicaPtr)
+{
+ while (rocReplicaPtr.p->noCrashedReplicas > 0) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* ONLY IF THERE IS AT LEAST ONE REPLICA THEN CAN WE REMOVE ANY. */
+ /* --------------------------------------------------------------------- */
+ if (rocReplicaPtr.p->replicaLastGci[0] < SYSFILE->oldestRestorableGCI){
+ jam();
+ /* ------------------------------------------------------------------- */
+ /* THIS CRASHED REPLICA HAS BECOME EXTINCT AND MUST BE REMOVED TO */
+ /* GIVE SPACE FOR NEW CRASHED REPLICAS. */
+ /* ------------------------------------------------------------------- */
+ packCrashedReplicas(rocReplicaPtr);
+ } else {
+ break;
+ }//if
+ }//while
+ if (rocReplicaPtr.p->createGci[0] < SYSFILE->keepGCI){
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* MOVE FORWARD THE CREATE GCI TO A GCI THAT CAN BE USED. WE HAVE */
+ /* NO CERTAINTY IN FINDING ANY LOG RECORDS FROM OLDER GCI'S. */
+ /* --------------------------------------------------------------------- */
+ rocReplicaPtr.p->createGci[0] = SYSFILE->keepGCI;
+ ndbrequire(SYSFILE->keepGCI != 0xF1F1F1F1);
+ }//if
+}//Dbdih::removeOldCrashedReplicas()
+
+void Dbdih::removeOldStoredReplica(FragmentstorePtr fragPtr,
+ ReplicaRecordPtr replicatePtr)
+{
+ ReplicaRecordPtr rosTmpReplicaPtr;
+ ReplicaRecordPtr rosPrevReplicaPtr;
+
+ fragPtr.p->noOldStoredReplicas--;
+ if (fragPtr.p->oldStoredReplicas == replicatePtr.i) {
+ jam();
+ fragPtr.p->oldStoredReplicas = replicatePtr.p->nextReplica;
+ } else {
+ rosPrevReplicaPtr.i = fragPtr.p->oldStoredReplicas;
+ ptrCheckGuard(rosPrevReplicaPtr, creplicaFileSize, replicaRecord);
+ rosTmpReplicaPtr.i = rosPrevReplicaPtr.p->nextReplica;
+ while (rosTmpReplicaPtr.i != replicatePtr.i) {
+ jam();
+ rosPrevReplicaPtr.i = rosTmpReplicaPtr.i;
+ ptrCheckGuard(rosPrevReplicaPtr, creplicaFileSize, replicaRecord);
+ ptrCheckGuard(rosTmpReplicaPtr, creplicaFileSize, replicaRecord);
+ rosTmpReplicaPtr.i = rosTmpReplicaPtr.p->nextReplica;
+ }//if
+ rosPrevReplicaPtr.p->nextReplica = replicatePtr.p->nextReplica;
+ }//if
+}//Dbdih::removeOldStoredReplica()
+
+void Dbdih::removeStoredReplica(FragmentstorePtr fragPtr,
+ ReplicaRecordPtr replicatePtr)
+{
+ ReplicaRecordPtr rsrTmpReplicaPtr;
+ ReplicaRecordPtr rsrPrevReplicaPtr;
+
+ fragPtr.p->noStoredReplicas--;
+ if (fragPtr.p->storedReplicas == replicatePtr.i) {
+ jam();
+ fragPtr.p->storedReplicas = replicatePtr.p->nextReplica;
+ } else {
+ jam();
+ rsrPrevReplicaPtr.i = fragPtr.p->storedReplicas;
+ rsrTmpReplicaPtr.i = fragPtr.p->storedReplicas;
+ ptrCheckGuard(rsrTmpReplicaPtr, creplicaFileSize, replicaRecord);
+ rsrTmpReplicaPtr.i = rsrTmpReplicaPtr.p->nextReplica;
+ while (rsrTmpReplicaPtr.i != replicatePtr.i) {
+ jam();
+ rsrPrevReplicaPtr.i = rsrTmpReplicaPtr.i;
+ ptrCheckGuard(rsrTmpReplicaPtr, creplicaFileSize, replicaRecord);
+ rsrTmpReplicaPtr.i = rsrTmpReplicaPtr.p->nextReplica;
+ }//while
+ ptrCheckGuard(rsrPrevReplicaPtr, creplicaFileSize, replicaRecord);
+ rsrPrevReplicaPtr.p->nextReplica = replicatePtr.p->nextReplica;
+ }//if
+}//Dbdih::removeStoredReplica()
+
+/*************************************************************************/
+/* REMOVE ALL TOO NEW CRASHED REPLICAS THAT IS IN THIS REPLICA. */
+/*************************************************************************/
+void Dbdih::removeTooNewCrashedReplicas(ReplicaRecordPtr rtnReplicaPtr)
+{
+ while (rtnReplicaPtr.p->noCrashedReplicas > 0) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* REMOVE ALL REPLICAS THAT ONLY LIVED IN A PERIOD THAT HAVE BEEN */
+ /* REMOVED FROM THE RESTART INFORMATION SINCE THE RESTART FAILED */
+ /* TOO MANY TIMES. */
+ /* --------------------------------------------------------------------- */
+ arrGuard(rtnReplicaPtr.p->noCrashedReplicas - 1, 8);
+ if (rtnReplicaPtr.p->createGci[rtnReplicaPtr.p->noCrashedReplicas - 1] >
+ SYSFILE->newestRestorableGCI){
+ jam();
+ rtnReplicaPtr.p->createGci[rtnReplicaPtr.p->noCrashedReplicas - 1] =
+ (Uint32)-1;
+ rtnReplicaPtr.p->replicaLastGci[rtnReplicaPtr.p->noCrashedReplicas - 1] =
+ (Uint32)-1;
+ rtnReplicaPtr.p->noCrashedReplicas--;
+ } else {
+ break;
+ }//if
+ }//while
+}//Dbdih::removeTooNewCrashedReplicas()
+
+/*************************************************************************/
+/* */
+/* MODULE: SEARCH FOR POSSIBLE REPLICAS THAT CAN HANDLE THE GLOBAL */
+/* CHECKPOINT WITHOUT NEEDING ANY EXTRA LOGGING FACILITIES.*/
+/* A MAXIMUM OF FOUR NODES IS RETRIEVED. */
+/*************************************************************************/
+void Dbdih::searchStoredReplicas(FragmentstorePtr fragPtr)
+{
+ Uint32 nextReplicaPtrI;
+ ConstPtr<ReplicaRecord> replicaPtr;
+
+ replicaPtr.i = fragPtr.p->storedReplicas;
+ while (replicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ nextReplicaPtrI = replicaPtr.p->nextReplica;
+ NodeRecordPtr nodePtr;
+ nodePtr.i = replicaPtr.p->procNode;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ case Sysfile::NS_ActiveMissed_2:{
+ /* ----------------------------------------------------------------- */
+ /* INITIALISE THE CREATE REPLICA STRUCTURE THAT IS USED FOR SENDING*/
+ /* TO LQH START_FRAGREQ. */
+ /* SET THE DATA NODE WHERE THE LOCAL CHECKPOINT IS FOUND. ALSO */
+ /* SET A REFERENCE TO THE REPLICA POINTER OF THAT. */
+ /* ----------------------------------------------------------------- */
+ CreateReplicaRecordPtr createReplicaPtr;
+ createReplicaPtr.i = cnoOfCreateReplicas;
+ ptrCheckGuard(createReplicaPtr, 4, createReplicaRecord);
+ cnoOfCreateReplicas++;
+ createReplicaPtr.p->dataNodeId = replicaPtr.p->procNode;
+ createReplicaPtr.p->replicaRec = replicaPtr.i;
+ /* ----------------------------------------------------------------- */
+ /* WE NEED TO SEARCH FOR A PROPER LOCAL CHECKPOINT TO USE FOR THE */
+ /* SYSTEM RESTART. */
+ /* ----------------------------------------------------------------- */
+ Uint32 startGci;
+ Uint32 startLcpNo;
+ Uint32 stopGci = SYSFILE->newestRestorableGCI;
+ bool result = findStartGci(replicaPtr,
+ stopGci,
+ startGci,
+ startLcpNo);
+ if (!result) {
+ jam();
+ /* --------------------------------------------------------------- */
+ /* WE COULD NOT FIND ANY LOCAL CHECKPOINT. THE FRAGMENT THUS DO NOT*/
+ /* CONTAIN ANY VALID LOCAL CHECKPOINT. IT DOES HOWEVER CONTAIN A */
+ /* VALID FRAGMENT LOG. THUS BY FIRST CREATING THE FRAGMENT AND THEN*/
+ /* EXECUTING THE FRAGMENT LOG WE CAN CREATE THE FRAGMENT AS */
+ /* DESIRED. THIS SHOULD ONLY OCCUR AFTER CREATING A FRAGMENT. */
+ /* */
+ /* TO INDICATE THAT NO LOCAL CHECKPOINT IS TO BE USED WE SET THE */
+ /* LOCAL CHECKPOINT TO ZNIL. */
+ /* --------------------------------------------------------------- */
+ createReplicaPtr.p->lcpNo = ZNIL;
+ } else {
+ jam();
+ /* --------------------------------------------------------------- */
+ /* WE FOUND A PROPER LOCAL CHECKPOINT TO RESTART FROM. */
+ /* SET LOCAL CHECKPOINT ID AND LOCAL CHECKPOINT NUMBER. */
+ /* --------------------------------------------------------------- */
+ createReplicaPtr.p->lcpNo = startLcpNo;
+ arrGuard(startLcpNo, MAX_LCP_STORED);
+ createReplicaPtr.p->createLcpId = replicaPtr.p->lcpId[startLcpNo];
+ }//if
+
+ if(ERROR_INSERTED(7073) || ERROR_INSERTED(7074)){
+ jam();
+ nodePtr.p->nodeStatus = NodeRecord::DEAD;
+ }
+
+ /* ----------------------------------------------------------------- */
+ /* WE HAVE EITHER FOUND A LOCAL CHECKPOINT OR WE ARE PLANNING TO */
+ /* EXECUTE THE LOG FROM THE INITIAL CREATION OF THE TABLE. IN BOTH */
+ /* CASES WE NEED TO FIND A SET OF LOGS THAT CAN EXECUTE SUCH THAT */
+ /* WE RECOVER TO THE SYSTEM RESTART GLOBAL CHECKPOINT. */
+ /* -_--------------------------------------------------------------- */
+ if (!findLogNodes(createReplicaPtr.p, fragPtr, startGci, stopGci)) {
+ jam();
+ /* --------------------------------------------------------------- */
+ /* WE WERE NOT ABLE TO FIND ANY WAY OF RESTORING THIS REPLICA. */
+ /* THIS IS A POTENTIAL SYSTEM ERROR. */
+ /* --------------------------------------------------------------- */
+ cnoOfCreateReplicas--;
+ return;
+ }//if
+
+ if(ERROR_INSERTED(7073) || ERROR_INSERTED(7074)){
+ jam();
+ nodePtr.p->nodeStatus = NodeRecord::ALIVE;
+ }
+
+ break;
+ }
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }//switch
+ }
+ replicaPtr.i = nextReplicaPtrI;
+ }//while
+}//Dbdih::searchStoredReplicas()
+
+/*************************************************************************/
+/* */
+/* MODULE: SEIZE_FILE */
+/* DESCRIPTION: THE SUBROUTINE SEIZES A FILE RECORD FROM THE */
+/* FREE LIST. */
+/*************************************************************************/
+void Dbdih::seizeFile(FileRecordPtr& filePtr)
+{
+ filePtr.i = cfirstfreeFile;
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ cfirstfreeFile = filePtr.p->nextFile;
+ filePtr.p->nextFile = RNIL;
+}//Dbdih::seizeFile()
+
+/*************************************************************************/
+/* SEND CREATE_FRAGREQ TO ALL NODES IN THE NDB CLUSTER. */
+/*************************************************************************/
+/*************************************************************************/
+/* */
+/* MODULE: FIND THE START GCI AND LOCAL CHECKPOINT TO USE. */
+/*************************************************************************/
+void Dbdih::sendStartFragreq(Signal* signal,
+ TabRecordPtr tabPtr, Uint32 fragId)
+{
+ CreateReplicaRecordPtr replicaPtr;
+ for (replicaPtr.i = 0; replicaPtr.i < cnoOfCreateReplicas; replicaPtr.i++) {
+ jam();
+ ptrAss(replicaPtr, createReplicaRecord);
+ BlockReference ref = calcLqhBlockRef(replicaPtr.p->dataNodeId);
+ StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0];
+ startFragReq->userPtr = replicaPtr.p->replicaRec;
+ startFragReq->userRef = reference();
+ startFragReq->lcpNo = replicaPtr.p->lcpNo;
+ startFragReq->lcpId = replicaPtr.p->createLcpId;
+ startFragReq->tableId = tabPtr.i;
+ startFragReq->fragId = fragId;
+
+ if(ERROR_INSERTED(7072) || ERROR_INSERTED(7074)){
+ jam();
+ const Uint32 noNodes = replicaPtr.p->noLogNodes;
+ Uint32 start = replicaPtr.p->logStartGci[noNodes - 1];
+ const Uint32 stop = replicaPtr.p->logStopGci[noNodes - 1];
+
+ for(Uint32 i = noNodes; i < 4 && (stop - start) > 0; i++){
+ replicaPtr.p->noLogNodes++;
+ replicaPtr.p->logStopGci[i - 1] = start;
+
+ replicaPtr.p->logNodeId[i] = replicaPtr.p->logNodeId[i-1];
+ replicaPtr.p->logStartGci[i] = start + 1;
+ replicaPtr.p->logStopGci[i] = stop;
+ start += 1;
+ }
+ }
+
+ startFragReq->noOfLogNodes = replicaPtr.p->noLogNodes;
+
+ for (Uint32 i = 0; i < 4 ; i++) {
+ startFragReq->lqhLogNode[i] = replicaPtr.p->logNodeId[i];
+ startFragReq->startGci[i] = replicaPtr.p->logStartGci[i];
+ startFragReq->lastGci[i] = replicaPtr.p->logStopGci[i];
+ }//for
+
+ sendSignal(ref, GSN_START_FRAGREQ, signal,
+ StartFragReq::SignalLength, JBB);
+ }//for
+}//Dbdih::sendStartFragreq()
+
+/*************************************************************************/
+/* SET THE INITIAL ACTIVE STATUS ON ALL NODES AND PUT INTO LISTS. */
+/*************************************************************************/
+void Dbdih::setInitialActiveStatus()
+{
+ NodeRecordPtr siaNodeptr;
+ Uint32 tsiaNodeActiveStatus;
+ Uint32 tsiaNoActiveNodes;
+
+ tsiaNoActiveNodes = csystemnodes - cnoHotSpare;
+ for(Uint32 i = 0; i<Sysfile::NODE_STATUS_SIZE; i++)
+ SYSFILE->nodeStatus[i] = 0;
+ for (siaNodeptr.i = 1; siaNodeptr.i < MAX_NDB_NODES; siaNodeptr.i++) {
+ ptrAss(siaNodeptr, nodeRecord);
+ if (siaNodeptr.p->nodeStatus == NodeRecord::ALIVE) {
+ if (tsiaNoActiveNodes == 0) {
+ jam();
+ siaNodeptr.p->activeStatus = Sysfile::NS_HotSpare;
+ } else {
+ jam();
+ tsiaNoActiveNodes = tsiaNoActiveNodes - 1;
+ siaNodeptr.p->activeStatus = Sysfile::NS_Active;
+ }//if
+ } else {
+ jam();
+ siaNodeptr.p->activeStatus = Sysfile::NS_NotDefined;
+ }//if
+ switch (siaNodeptr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ jam();
+ tsiaNodeActiveStatus = Sysfile::NS_Active;
+ break;
+ case Sysfile::NS_HotSpare:
+ jam();
+ tsiaNodeActiveStatus = Sysfile::NS_HotSpare;
+ break;
+ case Sysfile::NS_NotDefined:
+ jam();
+ tsiaNodeActiveStatus = Sysfile::NS_NotDefined;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ Sysfile::setNodeStatus(siaNodeptr.i, SYSFILE->nodeStatus,
+ tsiaNodeActiveStatus);
+ }//for
+}//Dbdih::setInitialActiveStatus()
+
+/*************************************************************************/
+/* SET LCP ACTIVE STATUS AT THE END OF A LOCAL CHECKPOINT. */
+/*************************************************************************/
+void Dbdih::setLcpActiveStatusEnd()
+{
+ NodeRecordPtr nodePtr;
+
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (c_lcpState.m_participatingLQH.get(nodePtr.i)){
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ /*-------------------------------------------------------------------*/
+ /* THE NODE PARTICIPATED IN THIS CHECKPOINT.
+ * WE CAN SET ITS STATUS TO ACTIVE */
+ /*-------------------------------------------------------------------*/
+ nodePtr.p->activeStatus = Sysfile::NS_Active;
+ takeOverCompleted(nodePtr.i);
+ break;
+ case Sysfile::NS_TakeOver:
+ jam();
+ /*-------------------------------------------------------------------*/
+ /* THE NODE HAS COMPLETED A CHECKPOINT AFTER TAKE OVER. WE CAN NOW */
+ /* SET ITS STATUS TO ACTIVE. WE CAN ALSO COMPLETE THE TAKE OVER */
+ /* AND ALSO WE CLEAR THE TAKE OVER NODE IN THE RESTART INFO. */
+ /*-------------------------------------------------------------------*/
+ nodePtr.p->activeStatus = Sysfile::NS_Active;
+ takeOverCompleted(nodePtr.i);
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ }//if
+ }//for
+
+ if(getNodeState().getNodeRestartInProgress()){
+ jam();
+ if(c_lcpState.m_participatingLQH.get(getOwnNodeId())){
+ nodePtr.i = getOwnNodeId();
+ ptrAss(nodePtr, nodeRecord);
+ ndbrequire(nodePtr.p->activeStatus == Sysfile::NS_Active);
+ ndbout_c("NR: setLcpActiveStatusEnd - m_participatingLQH");
+ } else {
+ ndbout_c("NR: setLcpActiveStatusEnd - !m_participatingLQH");
+ }
+ }
+
+ c_lcpState.m_participatingDIH.clear();
+ c_lcpState.m_participatingLQH.clear();
+ if (isMaster()) {
+ jam();
+ setNodeRestartInfoBits();
+ }//if
+}//Dbdih::setLcpActiveStatusEnd()
+
+void Dbdih::takeOverCompleted(Uint32 aNodeId)
+{
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = findTakeOver(aNodeId);
+ if (takeOverPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ if (takeOverPtr.p->toMasterStatus != TakeOverRecord::WAIT_LCP) {
+ jam();
+ ndbrequire(!isMaster());
+ return;
+ }//if
+ ndbrequire(isMaster());
+ Sysfile::setTakeOverNode(aNodeId, SYSFILE->takeOver, 0);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_END_COPY;
+ cstartGcpNow = true;
+ }//if
+}//Dbdih::takeOverCompleted()
+
+/*************************************************************************/
+/* SET LCP ACTIVE STATUS BEFORE STARTING A LOCAL CHECKPOINT. */
+/*************************************************************************/
+void Dbdih::setLcpActiveStatusStart(Signal* signal)
+{
+ NodeRecordPtr nodePtr;
+
+ c_lcpState.m_participatingLQH.clear();
+ c_lcpState.m_participatingDIH.clear();
+
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ ptrAss(nodePtr, nodeRecord);
+#if 0
+ if(nodePtr.p->nodeStatus != NodeRecord::NOT_IN_CLUSTER){
+ infoEvent("Node %d nodeStatus=%d activeStatus=%d copyCompleted=%d lcp=%d",
+ nodePtr.i,
+ nodePtr.p->nodeStatus,
+ nodePtr.p->activeStatus,
+ nodePtr.p->copyCompleted,
+ nodePtr.p->m_inclDihLcp);
+ }
+#endif
+ if(nodePtr.p->nodeStatus == NodeRecord::ALIVE && nodePtr.p->m_inclDihLcp){
+ jam();
+ c_lcpState.m_participatingDIH.set(nodePtr.i);
+ }
+
+ if ((nodePtr.p->nodeStatus == NodeRecord::ALIVE) &&
+ (nodePtr.p->copyCompleted)) {
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ jam();
+ /*-------------------------------------------------------------------*/
+ // The normal case. Starting a LCP for a started node which hasn't
+ // missed the previous LCP.
+ /*-------------------------------------------------------------------*/
+ c_lcpState.m_participatingLQH.set(nodePtr.i);
+ break;
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ /*-------------------------------------------------------------------*/
+ // The node is starting up and is participating in a local checkpoint
+ // as the final phase of the start-up. We can still use the checkpoints
+ // on the node after a system restart.
+ /*-------------------------------------------------------------------*/
+ c_lcpState.m_participatingLQH.set(nodePtr.i);
+ break;
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ /*-------------------------------------------------------------------*/
+ // The node is starting up and is participating in a local checkpoint
+ // as the final phase of the start-up. We have missed so
+ // many checkpoints that we no longer can use this node to
+ // recreate fragments from disk.
+ // It must be taken over with the copy fragment process after a system
+ // crash. We indicate this by setting the active status to TAKE_OVER.
+ /*-------------------------------------------------------------------*/
+ nodePtr.p->activeStatus = Sysfile::NS_TakeOver;
+ //break; // Fall through
+ case Sysfile::NS_TakeOver:{
+ TakeOverRecordPtr takeOverPtr;
+ jam();
+ /*-------------------------------------------------------------------*/
+ /* THIS NODE IS CURRENTLY TAKING OVER A FAILED NODE. */
+ /*-------------------------------------------------------------------*/
+ takeOverPtr.i = findTakeOver(nodePtr.i);
+ if (takeOverPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ if (takeOverPtr.p->toMasterStatus == TakeOverRecord::WAIT_LCP) {
+ jam();
+ /*---------------------------------------------------------------
+ * ALL THE INFORMATION HAVE BEEN REPLICATED TO THE NEW
+ * NODE AND WE ARE ONLY WAITING FOR A LOCAL CHECKPOINT TO BE
+ * PERFORMED ON THE NODE TO SET ITS STATUS TO ACTIVE.
+ */
+ infoEvent("Node %d is WAIT_LCP including in LCP", nodePtr.i);
+ c_lcpState.m_participatingLQH.set(nodePtr.i);
+ }//if
+ }//if
+ break;
+ }
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }//switch
+ } else {
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ jam();
+ nodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_1;
+ break;
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ nodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_2;
+ break;
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ if ((nodePtr.p->nodeStatus == NodeRecord::ALIVE) &&
+ (!nodePtr.p->copyCompleted)) {
+ jam();
+ /*-----------------------------------------------------------------*/
+ // The node is currently starting up and has not completed the
+ // copy phase.
+ // It will thus be in the TAKE_OVER state.
+ /*-----------------------------------------------------------------*/
+ ndbrequire(findTakeOver(nodePtr.i) != RNIL);
+ nodePtr.p->activeStatus = Sysfile::NS_TakeOver;
+ } else {
+ jam();
+ /*-----------------------------------------------------------------*/
+ /* THE NODE IS ACTIVE AND HAS NOT COMPLETED ANY OF THE LAST 3
+ * CHECKPOINTS */
+ /* WE MUST TAKE IT OUT OF ACTION AND START A NEW NODE TO TAKE OVER.*/
+ /*-----------------------------------------------------------------*/
+ nodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
+ }//if
+ break;
+ case Sysfile::NS_TakeOver:
+ jam();
+ break;
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }//switch
+ }//if
+ }//for
+ if (isMaster()) {
+ jam();
+ checkStartTakeOver(signal);
+ setNodeRestartInfoBits();
+ }//if
+}//Dbdih::setLcpActiveStatusStart()
+
+/*************************************************************************/
+/* SET NODE ACTIVE STATUS AT SYSTEM RESTART AND WHEN UPDATED BY MASTER */
+/*************************************************************************/
+void Dbdih::setNodeActiveStatus()
+{
+ NodeRecordPtr snaNodeptr;
+
+ for (snaNodeptr.i = 1; snaNodeptr.i < MAX_NDB_NODES; snaNodeptr.i++) {
+ ptrAss(snaNodeptr, nodeRecord);
+ const Uint32 tsnaNodeBits = Sysfile::getNodeStatus(snaNodeptr.i,
+ SYSFILE->nodeStatus);
+ switch (tsnaNodeBits) {
+ case Sysfile::NS_Active:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_Active;
+ break;
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_ActiveMissed_1;
+ break;
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_ActiveMissed_2;
+ break;
+ case Sysfile::NS_TakeOver:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_TakeOver;
+ break;
+ case Sysfile::NS_HotSpare:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_HotSpare;
+ break;
+ case Sysfile::NS_NotActive_NotTakenOver:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
+ break;
+ case Sysfile::NS_NotDefined:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_NotDefined;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ }//for
+}//Dbdih::setNodeActiveStatus()
+
+/***************************************************************************/
+/* SET THE NODE GROUP BASED ON THE RESTART INFORMATION OR AS SET BY MASTER */
+/***************************************************************************/
+void Dbdih::setNodeGroups()
+{
+ NodeGroupRecordPtr NGPtr;
+ NodeRecordPtr sngNodeptr;
+ Uint32 Ti;
+
+ for (Ti = 0; Ti < MAX_NDB_NODES; Ti++) {
+ NGPtr.i = Ti;
+ ptrAss(NGPtr, nodeGroupRecord);
+ NGPtr.p->nodeCount = 0;
+ }//for
+ for (sngNodeptr.i = 1; sngNodeptr.i < MAX_NDB_NODES; sngNodeptr.i++) {
+ ptrAss(sngNodeptr, nodeRecord);
+ Sysfile::ActiveStatus s =
+ (Sysfile::ActiveStatus)Sysfile::getNodeStatus(sngNodeptr.i,
+ SYSFILE->nodeStatus);
+ switch (s){
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ case Sysfile::NS_ActiveMissed_2:
+ case Sysfile::NS_NotActive_NotTakenOver:
+ case Sysfile::NS_TakeOver:
+ jam();
+ sngNodeptr.p->nodeGroup = Sysfile::getNodeGroup(sngNodeptr.i,
+ SYSFILE->nodeGroups);
+ NGPtr.i = sngNodeptr.p->nodeGroup;
+ ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
+ NGPtr.p->nodesInGroup[NGPtr.p->nodeCount] = sngNodeptr.i;
+ NGPtr.p->nodeCount++;
+ break;
+ case Sysfile::NS_HotSpare:
+ case Sysfile::NS_NotDefined:
+ jam();
+ sngNodeptr.p->nodeGroup = ZNIL;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ }//for
+ cnoOfNodeGroups = 0;
+ for (Ti = 0; Ti < MAX_NDB_NODES; Ti++) {
+ jam();
+ NGPtr.i = Ti;
+ ptrAss(NGPtr, nodeGroupRecord);
+ if (NGPtr.p->nodeCount != 0) {
+ jam();
+ cnoOfNodeGroups++;
+ }//if
+ }//for
+ cnoHotSpare = csystemnodes - (cnoOfNodeGroups * cnoReplicas);
+}//Dbdih::setNodeGroups()
+
+/*************************************************************************/
+/* SET NODE INFORMATION AFTER RECEIVING RESTART INFORMATION FROM MASTER. */
+/* WE TAKE THE OPPORTUNITY TO SYNCHRONISE OUR DATA WITH THE MASTER. IT */
+/* IS ONLY THE MASTER THAT WILL ACT ON THIS DATA. WE WILL KEEP THEM */
+/* UPDATED FOR THE CASE WHEN WE HAVE TO BECOME MASTER. */
+/*************************************************************************/
+void Dbdih::setNodeInfo(Signal* signal)
+{
+ setNodeActiveStatus();
+ setNodeGroups();
+ sendHOT_SPAREREP(signal);
+}//Dbdih::setNodeInfo()
+
+/*************************************************************************/
+// Keep also DBDICT informed about the Hot Spare situation in the cluster.
+/*************************************************************************/
+void Dbdih::sendHOT_SPAREREP(Signal* signal)
+{
+ NodeRecordPtr locNodeptr;
+ Uint32 Ti = 0;
+ HotSpareRep * const hotSpare = (HotSpareRep*)&signal->theData[0];
+ NodeBitmask::clear(hotSpare->theHotSpareNodes);
+ for (locNodeptr.i = 1; locNodeptr.i < MAX_NDB_NODES; locNodeptr.i++) {
+ ptrAss(locNodeptr, nodeRecord);
+ switch (locNodeptr.p->activeStatus) {
+ case Sysfile::NS_HotSpare:
+ jam();
+ NodeBitmask::set(hotSpare->theHotSpareNodes, locNodeptr.i);
+ Ti++;
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ }//for
+ hotSpare->noHotSpareNodes = Ti;
+ sendSignal(DBDICT_REF, GSN_HOT_SPAREREP,
+ signal, HotSpareRep::SignalLength, JBB);
+}//Dbdih::sendHOT_SPAREREP()
+
+/*************************************************************************/
+/* SET LCP ACTIVE STATUS FOR ALL NODES BASED ON THE INFORMATION IN */
+/* THE RESTART INFORMATION. */
+/*************************************************************************/
+#if 0
+void Dbdih::setNodeLcpActiveStatus()
+{
+ c_lcpState.m_lcpActiveStatus.clear();
+ for (Uint32 i = 1; i < MAX_NDB_NODES; i++) {
+ if (NodeBitmask::get(SYSFILE->lcpActive, i)) {
+ jam();
+ c_lcpState.m_lcpActiveStatus.set(i);
+ }//if
+ }//for
+}//Dbdih::setNodeLcpActiveStatus()
+#endif
+
+/*************************************************************************/
+/* SET THE RESTART INFO BITS BASED ON THE NODES ACTIVE STATUS. */
+/*************************************************************************/
+void Dbdih::setNodeRestartInfoBits()
+{
+ NodeRecordPtr nodePtr;
+ Uint32 tsnrNodeGroup;
+ Uint32 tsnrNodeActiveStatus;
+ Uint32 i;
+ for(i = 1; i < MAX_NDB_NODES; i++){
+ Sysfile::setNodeStatus(i, SYSFILE->nodeStatus, Sysfile::NS_Active);
+ }//for
+ for(i = 1; i < Sysfile::NODE_GROUPS_SIZE; i++){
+ SYSFILE->nodeGroups[i] = 0;
+ }//for
+ NdbNodeBitmask::clear(SYSFILE->lcpActive);
+
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ ptrAss(nodePtr, nodeRecord);
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_Active;
+ break;
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_ActiveMissed_1;
+ break;
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_ActiveMissed_2;
+ break;
+ case Sysfile::NS_HotSpare:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_HotSpare;
+ break;
+ case Sysfile::NS_TakeOver:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_TakeOver;
+ break;
+ case Sysfile::NS_NotActive_NotTakenOver:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_NotActive_NotTakenOver;
+ break;
+ case Sysfile::NS_NotDefined:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_NotDefined;
+ break;
+ default:
+ ndbrequire(false);
+ tsnrNodeActiveStatus = Sysfile::NS_NotDefined; // remove warning
+ break;
+ }//switch
+ Sysfile::setNodeStatus(nodePtr.i, SYSFILE->nodeStatus,
+ tsnrNodeActiveStatus);
+ if (nodePtr.p->nodeGroup == ZNIL) {
+ jam();
+ tsnrNodeGroup = NO_NODE_GROUP_ID;
+ } else {
+ jam();
+ tsnrNodeGroup = nodePtr.p->nodeGroup;
+ }//if
+ Sysfile::setNodeGroup(nodePtr.i, SYSFILE->nodeGroups, tsnrNodeGroup);
+ if (c_lcpState.m_participatingLQH.get(nodePtr.i)){
+ jam();
+ NodeBitmask::set(SYSFILE->lcpActive, nodePtr.i);
+ }//if
+ }//for
+}//Dbdih::setNodeRestartInfoBits()
+
+/*************************************************************************/
+/* START THE GLOBAL CHECKPOINT PROTOCOL IN MASTER AT START-UP */
+/*************************************************************************/
+void Dbdih::startGcp(Signal* signal)
+{
+ cgcpStatus = GCP_READY;
+ coldGcpStatus = cgcpStatus;
+ coldGcpId = cnewgcp;
+ cgcpSameCounter = 0;
+ signal->theData[0] = DihContinueB::ZSTART_GCP;
+ signal->theData[1] = 0;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ signal->theData[0] = DihContinueB::ZCHECK_GCP_STOP;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1);
+}//Dbdih::startGcp()
+
+void Dbdih::updateNodeInfo(FragmentstorePtr fragPtr)
+{
+ ReplicaRecordPtr replicatePtr;
+ Uint32 index = 0;
+ replicatePtr.i = fragPtr.p->storedReplicas;
+ do {
+ jam();
+ ptrCheckGuard(replicatePtr, creplicaFileSize, replicaRecord);
+ ndbrequire(index < MAX_REPLICAS);
+ fragPtr.p->activeNodes[index] = replicatePtr.p->procNode;
+ index++;
+ replicatePtr.i = replicatePtr.p->nextReplica;
+ } while (replicatePtr.i != RNIL);
+ fragPtr.p->fragReplicas = index;
+
+ /* ----------------------------------------------------------------------- */
+ // We switch primary to the preferred primary if the preferred primary is
+ // in the list.
+ /* ----------------------------------------------------------------------- */
+ const Uint32 prefPrim = fragPtr.p->preferredPrimary;
+ for (Uint32 i = 1; i < index; i++) {
+ jam();
+ ndbrequire(i < MAX_REPLICAS);
+ if (fragPtr.p->activeNodes[i] == prefPrim){
+ jam();
+ Uint32 switchNode = fragPtr.p->activeNodes[0];
+ fragPtr.p->activeNodes[0] = prefPrim;
+ fragPtr.p->activeNodes[i] = switchNode;
+ break;
+ }//if
+ }//for
+}//Dbdih::updateNodeInfo()
+
+void Dbdih::writeFragment(RWFragment* wf, FragmentstorePtr fragPtr)
+{
+ writePageWord(wf, wf->fragId);
+ writePageWord(wf, fragPtr.p->preferredPrimary);
+ writePageWord(wf, fragPtr.p->noStoredReplicas);
+ writePageWord(wf, fragPtr.p->noOldStoredReplicas);
+ writePageWord(wf, fragPtr.p->distributionKey);
+}//Dbdih::writeFragment()
+
+void Dbdih::writePageWord(RWFragment* wf, Uint32 dataWord)
+{
+ if (wf->wordIndex >= 2048) {
+ jam();
+ ndbrequire(wf->wordIndex == 2048);
+ allocpage(wf->rwfPageptr);
+ wf->wordIndex = 32;
+ wf->pageIndex++;
+ ndbrequire(wf->pageIndex < 8);
+ wf->rwfTabPtr.p->pageRef[wf->pageIndex] = wf->rwfPageptr.i;
+ wf->rwfTabPtr.p->noPages++;
+ }//if
+ wf->rwfPageptr.p->word[wf->wordIndex] = dataWord;
+ wf->wordIndex++;
+}//Dbdih::writePageWord()
+
+void Dbdih::writeReplicas(RWFragment* wf, Uint32 replicaStartIndex)
+{
+ ReplicaRecordPtr wfReplicaPtr;
+ wfReplicaPtr.i = replicaStartIndex;
+ while (wfReplicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(wfReplicaPtr, creplicaFileSize, replicaRecord);
+ writePageWord(wf, wfReplicaPtr.p->procNode);
+ writePageWord(wf, wfReplicaPtr.p->initialGci);
+ writePageWord(wf, wfReplicaPtr.p->noCrashedReplicas);
+ writePageWord(wf, wfReplicaPtr.p->nextLcp);
+ Uint32 i;
+ for (i = 0; i < MAX_LCP_STORED; i++) {
+ writePageWord(wf, wfReplicaPtr.p->maxGciCompleted[i]);
+ writePageWord(wf, wfReplicaPtr.p->maxGciStarted[i]);
+ writePageWord(wf, wfReplicaPtr.p->lcpId[i]);
+ writePageWord(wf, wfReplicaPtr.p->lcpStatus[i]);
+ }//if
+ for (i = 0; i < 8; i++) {
+ writePageWord(wf, wfReplicaPtr.p->createGci[i]);
+ writePageWord(wf, wfReplicaPtr.p->replicaLastGci[i]);
+ }//if
+
+ wfReplicaPtr.i = wfReplicaPtr.p->nextReplica;
+ }//while
+}//Dbdih::writeReplicas()
+
+void Dbdih::writeRestorableGci(Signal* signal, FileRecordPtr filePtr)
+{
+ for (Uint32 i = 0; i < Sysfile::SYSFILE_SIZE32; i++) {
+ sysfileDataToFile[i] = sysfileData[i];
+ }//for
+ signal->theData[0] = filePtr.p->fileRef;
+ signal->theData[1] = reference();
+ signal->theData[2] = filePtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS_SYNCH;
+ signal->theData[4] = ZVAR_NO_CRESTART_INFO_TO_FILE;
+ signal->theData[5] = 1; /* AMOUNT OF PAGES */
+ signal->theData[6] = 0; /* MEMORY PAGE = 0 SINCE COMMON STORED VARIABLE */
+ signal->theData[7] = 0;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+}//Dbdih::writeRestorableGci()
+
+void Dbdih::writeTabfile(Signal* signal, TabRecord* tab, FileRecordPtr filePtr)
+{
+ signal->theData[0] = filePtr.p->fileRef;
+ signal->theData[1] = reference();
+ signal->theData[2] = filePtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS;
+ signal->theData[4] = ZVAR_NO_WORD;
+ signal->theData[5] = tab->noPages;
+ for (Uint32 i = 0; i < tab->noPages; i++) {
+ jam();
+ signal->theData[6 + (2 * i)] = tab->pageRef[i];
+ signal->theData[7 + (2 * i)] = i;
+ }//for
+ Uint32 length = 6 + (2 * tab->noPages);
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, length, JBA);
+}//Dbdih::writeTabfile()
+
+void Dbdih::execDEBUG_SIG(Signal* signal)
+{
+ signal = signal; //Avoid compiler warnings
+}//Dbdih::execDEBUG_SIG()
+
+void
+Dbdih::execDUMP_STATE_ORD(Signal* signal)
+{
+ DumpStateOrd * const & dumpState = (DumpStateOrd *)&signal->theData[0];
+ if (dumpState->args[0] == DumpStateOrd::DihDumpNodeRestartInfo) {
+ infoEvent("c_nodeStartMaster.blockLcp = %d, c_nodeStartMaster.blockGcp = %d, c_nodeStartMaster.wait = %d",
+ c_nodeStartMaster.blockLcp, c_nodeStartMaster.blockGcp, c_nodeStartMaster.wait);
+ infoEvent("cstartGcpNow = %d, cgcpStatus = %d",
+ cstartGcpNow, cgcpStatus);
+ infoEvent("cfirstVerifyQueue = %d, cverifyQueueCounter = %d",
+ cfirstVerifyQueue, cverifyQueueCounter);
+ infoEvent("cgcpOrderBlocked = %d, cgcpStartCounter = %d",
+ cgcpOrderBlocked, cgcpStartCounter);
+ }//if
+ if (dumpState->args[0] == DumpStateOrd::DihDumpNodeStatusInfo) {
+ NodeRecordPtr localNodePtr;
+ infoEvent("Printing nodeStatus of all nodes");
+ for (localNodePtr.i = 1; localNodePtr.i < MAX_NDB_NODES; localNodePtr.i++) {
+ ptrAss(localNodePtr, nodeRecord);
+ if (localNodePtr.p->nodeStatus != NodeRecord::NOT_IN_CLUSTER) {
+ infoEvent("Node = %d has status = %d",
+ localNodePtr.i, localNodePtr.p->nodeStatus);
+ }//if
+ }//for
+ }//if
+
+ if (dumpState->args[0] == DumpStateOrd::DihPrintFragmentation){
+ infoEvent("Printing fragmentation of all tables --");
+ for(Uint32 i = 0; i<ctabFileSize; i++){
+ TabRecordPtr tabPtr;
+ tabPtr.i = i;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ if(tabPtr.p->tabStatus != TabRecord::TS_ACTIVE)
+ continue;
+
+ for(Uint32 j = 0; j < tabPtr.p->totalfragments; j++){
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, j, fragPtr);
+
+ Uint32 nodeOrder[MAX_REPLICAS];
+ const Uint32 noOfReplicas = extractNodeInfo(fragPtr.p, nodeOrder);
+ char buf[100];
+ BaseString::snprintf(buf, sizeof(buf), " Table %d Fragment %d - ", tabPtr.i, j);
+ for(Uint32 k = 0; k < noOfReplicas; k++){
+ char tmp[100];
+ BaseString::snprintf(tmp, sizeof(tmp), "%d ", nodeOrder[k]);
+ strcat(buf, tmp);
+ }
+ infoEvent(buf);
+ }
+ }
+ }
+
+ if (signal->theData[0] == 7000) {
+ infoEvent("ctimer = %d, cgcpParticipantState = %d, cgcpStatus = %d",
+ c_lcpState.ctimer, cgcpParticipantState, cgcpStatus);
+ infoEvent("coldGcpStatus = %d, coldGcpId = %d, cmasterState = %d",
+ coldGcpStatus, coldGcpId, cmasterState);
+ infoEvent("cmasterTakeOverNode = %d, ctcCounter = %d",
+ cmasterTakeOverNode, c_lcpState.ctcCounter);
+ }//if
+ if (signal->theData[0] == 7001) {
+ infoEvent("c_lcpState.keepGci = %d",
+ c_lcpState.keepGci);
+ infoEvent("c_lcpState.lcpStatus = %d, clcpStartGcp = %d",
+ c_lcpState.lcpStatus,
+ c_lcpState.lcpStartGcp);
+ infoEvent("cgcpStartCounter = %d, cimmediateLcpStart = %d",
+ cgcpStartCounter, c_lcpState.immediateLcpStart);
+ }//if
+ if (signal->theData[0] == 7002) {
+ infoEvent("cnoOfActiveTables = %d, cgcpDelay = %d",
+ cnoOfActiveTables, cgcpDelay);
+ infoEvent("cdictblockref = %d, cfailurenr = %d",
+ cdictblockref, cfailurenr);
+ infoEvent("con_lineNodes = %d, reference() = %d, creceivedfrag = %d",
+ con_lineNodes, reference(), creceivedfrag);
+ }//if
+ if (signal->theData[0] == 7003) {
+ infoEvent("cfirstAliveNode = %d, cgckptflag = %d",
+ cfirstAliveNode, cgckptflag);
+ infoEvent("clocallqhblockref = %d, clocaltcblockref = %d, cgcpOrderBlocked = %d",
+ clocallqhblockref, clocaltcblockref, cgcpOrderBlocked);
+ infoEvent("cstarttype = %d, csystemnodes = %d, currentgcp = %d",
+ cstarttype, csystemnodes, currentgcp);
+ }//if
+ if (signal->theData[0] == 7004) {
+ infoEvent("cmasterdihref = %d, cownNodeId = %d, cnewgcp = %d",
+ cmasterdihref, cownNodeId, cnewgcp);
+ infoEvent("cndbStartReqBlockref = %d, cremainingfrags = %d",
+ cndbStartReqBlockref, cremainingfrags);
+ infoEvent("cntrlblockref = %d, cgcpSameCounter = %d, coldgcp = %d",
+ cntrlblockref, cgcpSameCounter, coldgcp);
+ }//if
+ if (signal->theData[0] == 7005) {
+ infoEvent("crestartGci = %d",
+ crestartGci);
+ }//if
+ if (signal->theData[0] == 7006) {
+ infoEvent("clcpDelay = %d, cgcpMasterTakeOverState = %d",
+ c_lcpState.clcpDelay, cgcpMasterTakeOverState);
+ infoEvent("cmasterNodeId = %d", cmasterNodeId);
+ infoEvent("cnoHotSpare = %d, c_nodeStartMaster.startNode = %d, c_nodeStartMaster.wait = %d",
+ cnoHotSpare, c_nodeStartMaster.startNode, c_nodeStartMaster.wait);
+ }//if
+ if (signal->theData[0] == 7007) {
+ infoEvent("c_nodeStartMaster.failNr = %d", c_nodeStartMaster.failNr);
+ infoEvent("c_nodeStartMaster.startInfoErrorCode = %d",
+ c_nodeStartMaster.startInfoErrorCode);
+ infoEvent("c_nodeStartMaster.blockLcp = %d, c_nodeStartMaster.blockGcp = %d",
+ c_nodeStartMaster.blockLcp, c_nodeStartMaster.blockGcp);
+ }//if
+ if (signal->theData[0] == 7008) {
+ infoEvent("cfirstDeadNode = %d, cstartPhase = %d, cnoReplicas = %d",
+ cfirstDeadNode, cstartPhase, cnoReplicas);
+ infoEvent("cwaitLcpSr = %d",cwaitLcpSr);
+ }//if
+ if (signal->theData[0] == 7009) {
+ infoEvent("ccalcOldestRestorableGci = %d, cnoOfNodeGroups = %d",
+ c_lcpState.oldestRestorableGci, cnoOfNodeGroups);
+ infoEvent("cstartGcpNow = %d",
+ cstartGcpNow);
+ infoEvent("crestartGci = %d",
+ crestartGci);
+ }//if
+ if (signal->theData[0] == 7010) {
+ infoEvent("cminHotSpareNodes = %d, c_lcpState.lcpStatusUpdatedPlace = %d, cLcpStart = %d",
+ cminHotSpareNodes, c_lcpState.lcpStatusUpdatedPlace, c_lcpState.lcpStart);
+ infoEvent("c_blockCommit = %d, c_blockCommitNo = %d",
+ c_blockCommit, c_blockCommitNo);
+ }//if
+ if (signal->theData[0] == 7011){
+ infoEvent("c_COPY_GCIREQ_Counter = %s",
+ c_COPY_GCIREQ_Counter.getText());
+ infoEvent("c_COPY_TABREQ_Counter = %s",
+ c_COPY_TABREQ_Counter.getText());
+ infoEvent("c_CREATE_FRAGREQ_Counter = %s",
+ c_CREATE_FRAGREQ_Counter.getText());
+ infoEvent("c_DIH_SWITCH_REPLICA_REQ_Counter = %s",
+ c_DIH_SWITCH_REPLICA_REQ_Counter.getText());
+ infoEvent("c_EMPTY_LCP_REQ_Counter = %s",c_EMPTY_LCP_REQ_Counter.getText());
+ infoEvent("c_END_TOREQ_Counter = %s", c_END_TOREQ_Counter.getText());
+ infoEvent("c_GCP_COMMIT_Counter = %s", c_GCP_COMMIT_Counter.getText());
+ infoEvent("c_GCP_PREPARE_Counter = %s", c_GCP_PREPARE_Counter.getText());
+ infoEvent("c_GCP_SAVEREQ_Counter = %s", c_GCP_SAVEREQ_Counter.getText());
+ infoEvent("c_INCL_NODEREQ_Counter = %s", c_INCL_NODEREQ_Counter.getText());
+ infoEvent("c_MASTER_GCPREQ_Counter = %s",
+ c_MASTER_GCPREQ_Counter.getText());
+ infoEvent("c_MASTER_LCPREQ_Counter = %s",
+ c_MASTER_LCPREQ_Counter.getText());
+ infoEvent("c_START_INFOREQ_Counter = %s",
+ c_START_INFOREQ_Counter.getText());
+ infoEvent("c_START_RECREQ_Counter = %s", c_START_RECREQ_Counter.getText());
+ infoEvent("c_START_TOREQ_Counter = %s", c_START_TOREQ_Counter.getText());
+ infoEvent("c_STOP_ME_REQ_Counter = %s", c_STOP_ME_REQ_Counter.getText());
+ infoEvent("c_TC_CLOPSIZEREQ_Counter = %s",
+ c_TC_CLOPSIZEREQ_Counter.getText());
+ infoEvent("c_TCGETOPSIZEREQ_Counter = %s",
+ c_TCGETOPSIZEREQ_Counter.getText());
+ infoEvent("c_UPDATE_TOREQ_Counter = %s", c_UPDATE_TOREQ_Counter.getText());
+ }
+
+ if(signal->theData[0] == 7012){
+ char buf[8*_NDB_NODE_BITMASK_SIZE+1];
+ infoEvent("ParticipatingDIH = %s", c_lcpState.m_participatingDIH.getText(buf));
+ infoEvent("ParticipatingLQH = %s", c_lcpState.m_participatingLQH.getText(buf));
+ infoEvent("m_LCP_COMPLETE_REP_Counter_DIH = %s",
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.getText());
+ infoEvent("m_LCP_COMPLETE_REP_Counter_LQH = %s",
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.getText());
+ infoEvent("m_LAST_LCP_FRAG_ORD = %s",
+ c_lcpState.m_LAST_LCP_FRAG_ORD.getText());
+ infoEvent("m_LCP_COMPLETE_REP_From_Master_Received = %d",
+ c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received);
+
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if(nodePtr.p->nodeStatus == NodeRecord::ALIVE){
+ Uint32 i;
+ for(i = 0; i<nodePtr.p->noOfStartedChkpt; i++){
+ infoEvent("Node %d: started: table=%d fragment=%d replica=%d",
+ nodePtr.i,
+ nodePtr.p->startedChkpt[i].tableId,
+ nodePtr.p->startedChkpt[i].fragId,
+ nodePtr.p->startedChkpt[i].replicaPtr);
+ }
+
+ for(i = 0; i<nodePtr.p->noOfQueuedChkpt; i++){
+ infoEvent("Node %d: queued: table=%d fragment=%d replica=%d",
+ nodePtr.i,
+ nodePtr.p->queuedChkpt[i].tableId,
+ nodePtr.p->queuedChkpt[i].fragId,
+ nodePtr.p->queuedChkpt[i].replicaPtr);
+ }
+ }
+ }
+ }
+
+ if(dumpState->args[0] == 7019 && signal->getLength() == 2)
+ {
+ char buf2[8+1];
+ NodeRecordPtr nodePtr;
+ nodePtr.i = signal->theData[1];
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ infoEvent("NF Node %d tc: %d lqh: %d dih: %d dict: %d recNODE_FAILREP: %d",
+ nodePtr.i,
+ nodePtr.p->dbtcFailCompleted,
+ nodePtr.p->dblqhFailCompleted,
+ nodePtr.p->dbdihFailCompleted,
+ nodePtr.p->dbdictFailCompleted,
+ nodePtr.p->recNODE_FAILREP);
+ infoEvent(" m_NF_COMPLETE_REP: %s m_nodefailSteps: %s",
+ nodePtr.p->m_NF_COMPLETE_REP.getText(),
+ nodePtr.p->m_nodefailSteps.getText(buf2));
+ }
+
+ if(dumpState->args[0] == 7020 && signal->getLength() > 3)
+ {
+ Uint32 gsn= signal->theData[1];
+ Uint32 block= signal->theData[2];
+ Uint32 length= signal->length() - 3;
+ memmove(signal->theData, signal->theData+3, 4*length);
+ sendSignal(numberToRef(block, getOwnNodeId()), gsn, signal, length, JBB);
+
+ warningEvent("-- SENDING CUSTOM SIGNAL --");
+ char buf[100], buf2[100];
+ buf2[0]= 0;
+ for(Uint32 i = 0; i<length; i++)
+ {
+ snprintf(buf, 100, "%s %.8x", buf2, signal->theData[i]);
+ snprintf(buf2, 100, "%s", buf);
+ }
+ warningEvent("gsn: %d block: %s, length: %d theData: %s",
+ gsn, getBlockName(block, "UNKNOWN"), length, buf);
+
+ g_eventLogger.warning("-- SENDING CUSTOM SIGNAL --");
+ g_eventLogger.warning("gsn: %d block: %s, length: %d theData: %s",
+ gsn, getBlockName(block, "UNKNOWN"), length, buf);
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::DihDumpLCPState){
+ infoEvent("-- Node %d LCP STATE --", getOwnNodeId());
+ infoEvent("lcpStatus = %d (update place = %d) ",
+ c_lcpState.lcpStatus, c_lcpState.lcpStatusUpdatedPlace);
+ infoEvent
+ ("lcpStart = %d lcpStartGcp = %d keepGci = %d oldestRestorable = %d",
+ c_lcpState.lcpStart, c_lcpState.lcpStartGcp,
+ c_lcpState.keepGci, c_lcpState.oldestRestorableGci);
+
+ infoEvent
+ ("immediateLcpStart = %d masterLcpNodeId = %d",
+ c_lcpState.immediateLcpStart,
+ refToNode(c_lcpState.m_masterLcpDihRef));
+ infoEvent("-- Node %d LCP STATE --", getOwnNodeId());
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::DihDumpLCPMasterTakeOver){
+ infoEvent("-- Node %d LCP MASTER TAKE OVER STATE --", getOwnNodeId());
+ infoEvent
+ ("c_lcpMasterTakeOverState.state = %d updatePlace = %d failedNodeId = %d",
+ c_lcpMasterTakeOverState.state,
+ c_lcpMasterTakeOverState.updatePlace,
+ c_lcpMasterTakeOverState.failedNodeId);
+
+ infoEvent("c_lcpMasterTakeOverState.minTableId = %u minFragId = %u",
+ c_lcpMasterTakeOverState.minTableId,
+ c_lcpMasterTakeOverState.minFragId);
+
+ infoEvent("-- Node %d LCP MASTER TAKE OVER STATE --", getOwnNodeId());
+ }
+
+ if (signal->theData[0] == 7015){
+ for(Uint32 i = 0; i<ctabFileSize; i++){
+ TabRecordPtr tabPtr;
+ tabPtr.i = i;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ if(tabPtr.p->tabStatus != TabRecord::TS_ACTIVE)
+ continue;
+
+ infoEvent
+ ("Table %d: TabCopyStatus: %d TabUpdateStatus: %d TabLcpStatus: %d",
+ tabPtr.i,
+ tabPtr.p->tabCopyStatus,
+ tabPtr.p->tabUpdateState,
+ tabPtr.p->tabLcpStatus);
+
+ FragmentstorePtr fragPtr;
+ for (Uint32 fid = 0; fid < tabPtr.p->totalfragments; fid++) {
+ jam();
+ getFragstore(tabPtr.p, fid, fragPtr);
+
+ char buf[100], buf2[100];
+ BaseString::snprintf(buf, sizeof(buf), " Fragment %d: noLcpReplicas==%d ",
+ fid, fragPtr.p->noLcpReplicas);
+
+ Uint32 num=0;
+ ReplicaRecordPtr replicaPtr;
+ replicaPtr.i = fragPtr.p->storedReplicas;
+ do {
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ BaseString::snprintf(buf2, sizeof(buf2), "%s %d(on %d)=%d(%s)",
+ buf, num,
+ replicaPtr.p->procNode,
+ replicaPtr.p->lcpIdStarted,
+ replicaPtr.p->lcpOngoingFlag ? "Ongoing" : "Idle");
+ BaseString::snprintf(buf, sizeof(buf), "%s", buf2);
+
+ num++;
+ replicaPtr.i = replicaPtr.p->nextReplica;
+ } while (replicaPtr.i != RNIL);
+ infoEvent(buf);
+ }
+ }
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::EnableUndoDelayDataWrite){
+ ndbout << "Dbdih:: delay write of datapages for table = "
+ << dumpState->args[1]<< endl;
+ // Send this dump to ACC and TUP
+ EXECUTE_DIRECT(DBACC, GSN_DUMP_STATE_ORD, signal, 2);
+ EXECUTE_DIRECT(DBTUP, GSN_DUMP_STATE_ORD, signal, 2);
+
+ // Start immediate LCP
+ c_lcpState.ctimer += (1 << c_lcpState.clcpDelay);
+ return;
+ }
+
+ if (signal->theData[0] == DumpStateOrd::DihAllAllowNodeStart) {
+ for (Uint32 i = 1; i < MAX_NDB_NODES; i++)
+ setAllowNodeStart(i, true);
+ return;
+ }//if
+ if (signal->theData[0] == DumpStateOrd::DihMinTimeBetweenLCP) {
+ // Set time between LCP to min value
+ ndbout << "Set time between LCP to min value" << endl;
+ c_lcpState.clcpDelay = 0; // TimeBetweenLocalCheckpoints.min
+ return;
+ }
+ if (signal->theData[0] == DumpStateOrd::DihMaxTimeBetweenLCP) {
+ // Set time between LCP to max value
+ ndbout << "Set time between LCP to max value" << endl;
+ c_lcpState.clcpDelay = 31; // TimeBetweenLocalCheckpoints.max
+ return;
+ }
+
+ if(dumpState->args[0] == 7098){
+ if(signal->length() == 3){
+ jam();
+ infoEvent("startLcpRoundLoopLab(tabel=%d, fragment=%d)",
+ signal->theData[1], signal->theData[2]);
+ startLcpRoundLoopLab(signal, signal->theData[1], signal->theData[2]);
+ return;
+ } else {
+ infoEvent("Invalid no of arguments to 7098 - startLcpRoundLoopLab -"
+ " expected 2 (tableId, fragmentId)");
+ }
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::DihStartLcpImmediately){
+ c_lcpState.ctimer += (1 << c_lcpState.clcpDelay);
+ return;
+ }
+}//Dbdih::execDUMP_STATE_ORD()
+
+void
+Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){
+ jamEntry();
+
+ PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+
+ PrepDropTabRef::ErrorCode err = PrepDropTabRef::OK;
+ { /**
+ * Check table state
+ */
+ bool ok = false;
+ switch(tabPtr.p->tabStatus){
+ case TabRecord::TS_IDLE:
+ ok = true;
+ jam();
+ err = PrepDropTabRef::NoSuchTable;
+ break;
+ case TabRecord::TS_DROPPING:
+ ok = true;
+ jam();
+ err = PrepDropTabRef::PrepDropInProgress;
+ break;
+ case TabRecord::TS_CREATING:
+ jam();
+ ok = true;
+ break;
+ case TabRecord::TS_ACTIVE:
+ ok = true;
+ jam();
+ break;
+ }
+ ndbrequire(ok);
+ }
+
+ if(err != PrepDropTabRef::OK){
+ jam();
+ PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = err;
+ sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal,
+ PrepDropTabRef::SignalLength, JBB);
+ return;
+ }
+
+ tabPtr.p->tabStatus = TabRecord::TS_DROPPING;
+ tabPtr.p->m_prepDropTab.senderRef = senderRef;
+ tabPtr.p->m_prepDropTab.senderData = senderData;
+
+ if(isMaster()){
+ /**
+ * Remove from queue
+ */
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (c_lcpState.m_participatingLQH.get(nodePtr.i)){
+
+ Uint32 index = 0;
+ Uint32 count = nodePtr.p->noOfQueuedChkpt;
+ while(index < count){
+ if(nodePtr.p->queuedChkpt[index].tableId == tabPtr.i){
+ jam();
+ // ndbout_c("Unqueuing %d", index);
+
+ count--;
+ for(Uint32 i = index; i<count; i++){
+ jam();
+ nodePtr.p->queuedChkpt[i] = nodePtr.p->queuedChkpt[i + 1];
+ }
+ } else {
+ index++;
+ }
+ }
+ nodePtr.p->noOfQueuedChkpt = count;
+ }
+ }
+ }
+
+ { /**
+ * Check table lcp state
+ */
+
+ bool ok = false;
+ switch(tabPtr.p->tabLcpStatus){
+ case TabRecord::TLS_COMPLETED:
+ case TabRecord::TLS_WRITING_TO_FILE:
+ ok = true;
+ jam();
+ break;
+ return;
+ case TabRecord::TLS_ACTIVE:
+ ok = true;
+ jam();
+
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
+
+ /**
+ * First check if all fragments are done
+ */
+ if(checkLcpAllTablesDoneInLqh()){
+ jam();
+
+ ndbout_c("This is the last table");
+
+ /**
+ * Then check if saving of tab info is done for all tables
+ */
+ LcpStatus a = c_lcpState.lcpStatus;
+ checkLcpCompletedLab(signal);
+
+ if(a != c_lcpState.lcpStatus){
+ ndbout_c("And all tables are written to already written disk");
+ }
+ }
+ break;
+ }
+ ndbrequire(ok);
+ }
+
+ { /**
+ * Send WaitDropTabReq to all LQH
+ */
+ WaitDropTabReq * req = (WaitDropTabReq*)signal->getDataPtrSend();
+ req->tableId = tabPtr.i;
+ req->senderRef = reference();
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = cfirstAliveNode;
+ tabPtr.p->m_prepDropTab.waitDropTabCount.clearWaitingFor();
+ while(nodePtr.i != RNIL){
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ tabPtr.p->m_prepDropTab.waitDropTabCount.setWaitingFor(nodePtr.i);
+ sendSignal(calcLqhBlockRef(nodePtr.i), GSN_WAIT_DROP_TAB_REQ,
+ signal, WaitDropTabReq::SignalLength, JBB);
+
+ nodePtr.i = nodePtr.p->nextNode;
+ }
+ }
+
+ waitDropTabWritingToFile(signal, tabPtr);
+}
+
+void
+Dbdih::waitDropTabWritingToFile(Signal* signal, TabRecordPtr tabPtr){
+
+ if(tabPtr.p->tabLcpStatus == TabRecord::TLS_WRITING_TO_FILE){
+ jam();
+ signal->theData[0] = DihContinueB::WAIT_DROP_TAB_WRITING_TO_FILE;
+ signal->theData[1] = tabPtr.i;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2);
+ return;
+ }
+
+ ndbrequire(tabPtr.p->tabLcpStatus == TabRecord::TLS_COMPLETED);
+ checkPrepDropTabComplete(signal, tabPtr);
+}
+
+void
+Dbdih::checkPrepDropTabComplete(Signal* signal, TabRecordPtr tabPtr){
+
+ if(tabPtr.p->tabLcpStatus != TabRecord::TLS_COMPLETED){
+ jam();
+ return;
+ }
+
+ if(!tabPtr.p->m_prepDropTab.waitDropTabCount.done()){
+ jam();
+ return;
+ }
+
+ const Uint32 ref = tabPtr.p->m_prepDropTab.senderRef;
+ if(ref != 0){
+ PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ conf->senderData = tabPtr.p->m_prepDropTab.senderData;
+ sendSignal(tabPtr.p->m_prepDropTab.senderRef, GSN_PREP_DROP_TAB_CONF,
+ signal, PrepDropTabConf::SignalLength, JBB);
+ tabPtr.p->m_prepDropTab.senderRef = 0;
+ }
+}
+
+void
+Dbdih::execWAIT_DROP_TAB_REF(Signal* signal){
+ jamEntry();
+ WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtr();
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = ref->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_DROPPING);
+ Uint32 nodeId = refToNode(ref->senderRef);
+
+ ndbrequire(ref->errorCode == WaitDropTabRef::NoSuchTable ||
+ ref->errorCode == WaitDropTabRef::NF_FakeErrorREF);
+
+ tabPtr.p->m_prepDropTab.waitDropTabCount.clearWaitingFor(nodeId);
+ checkPrepDropTabComplete(signal, tabPtr);
+}
+
+void
+Dbdih::execWAIT_DROP_TAB_CONF(Signal* signal){
+ jamEntry();
+ WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr();
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = conf->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_DROPPING);
+ Uint32 nodeId = refToNode(conf->senderRef);
+ tabPtr.p->m_prepDropTab.waitDropTabCount.clearWaitingFor(nodeId);
+ checkPrepDropTabComplete(signal, tabPtr);
+}
+
+void
+Dbdih::checkWaitDropTabFailedLqh(Signal* signal, Uint32 nodeId, Uint32 tableId){
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+
+ WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr();
+ conf->tableId = tableId;
+
+ const Uint32 RT_BREAK = 16;
+ for(Uint32 i = 0; i<RT_BREAK && tabPtr.i < ctabFileSize; i++, tabPtr.i++){
+ ptrAss(tabPtr, tabRecord);
+ if(tabPtr.p->tabStatus == TabRecord::TS_DROPPING){
+ if(tabPtr.p->m_prepDropTab.waitDropTabCount.isWaitingFor(nodeId)){
+ conf->senderRef = calcLqhBlockRef(nodeId);
+ execWAIT_DROP_TAB_CONF(signal);
+ tabPtr.i++;
+ break;
+ }
+ }
+ }
+
+ if(tabPtr.i == ctabFileSize){
+ /**
+ * Finished
+ */
+ jam();
+ return;
+ }
+
+ signal->theData[0] = DihContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+}
+
+
+void
+Dbdih::execNDB_TAMPER(Signal* signal)
+{
+ if ((ERROR_INSERTED(7011)) &&
+ (signal->theData[0] == 7012)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ calculateKeepGciLab(signal, 0, 0);
+ return;
+ }//if
+ SET_ERROR_INSERT_VALUE(signal->theData[0]);
+ return;
+}//Dbdih::execNDB_TAMPER()
+
+void Dbdih::execSET_VAR_REQ(Signal* signal) {
+#if 0
+ SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
+ ConfigParamId var = setVarReq->variable();
+ int val = setVarReq->value();
+
+
+ switch (var) {
+ case TimeBetweenLocalCheckpoints:
+ c_lcpState.clcpDelay = val;
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ case TimeBetweenGlobalCheckpoints:
+ cgcpDelay = val;
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ default:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
+ } // switch
+#endif
+}
+
+void Dbdih::execBLOCK_COMMIT_ORD(Signal* signal){
+ BlockCommitOrd* const block = (BlockCommitOrd *)&signal->theData[0];
+
+ jamEntry();
+#if 0
+ ndbrequire(c_blockCommit == false ||
+ c_blockCommitNo == block->failNo);
+#else
+ if(!(c_blockCommit == false || c_blockCommitNo == block->failNo)){
+ infoEvent("Possible bug in Dbdih::execBLOCK_COMMIT_ORD c_blockCommit = %d c_blockCommitNo = %d"
+ " sig->failNo = %d", c_blockCommit, c_blockCommitNo, block->failNo);
+ }
+#endif
+ c_blockCommit = true;
+ c_blockCommitNo = block->failNo;
+}
+
+void Dbdih::execUNBLOCK_COMMIT_ORD(Signal* signal){
+ UnblockCommitOrd* const unblock = (UnblockCommitOrd *)&signal->theData[0];
+ (void)unblock;
+
+ jamEntry();
+
+ if(c_blockCommit == true){
+ jam();
+ // ndbrequire(c_blockCommitNo == unblock->failNo);
+
+ c_blockCommit = false;
+ emptyverificbuffer(signal, true);
+ }
+}
+
+void Dbdih::execSTOP_PERM_REQ(Signal* signal){
+
+ jamEntry();
+
+ StopPermReq* const req = (StopPermReq*)&signal->theData[0];
+ StopPermRef* const ref = (StopPermRef*)&signal->theData[0];
+
+ const Uint32 senderData = req->senderData;
+ const BlockReference senderRef = req->senderRef;
+ const NodeId nodeId = refToNode(senderRef);
+
+ if (isMaster()) {
+ /**
+ * Master
+ */
+ jam();
+ CRASH_INSERTION(7065);
+ if (c_stopPermMaster.clientRef != 0) {
+ jam();
+
+ ref->senderData = senderData;
+ ref->errorCode = StopPermRef::NodeShutdownInProgress;
+ sendSignal(senderRef, GSN_STOP_PERM_REF, signal,
+ StopPermRef::SignalLength, JBB);
+ return;
+ }//if
+
+ if (c_nodeStartMaster.activeState) {
+ jam();
+ ref->senderData = senderData;
+ ref->errorCode = StopPermRef::NodeStartInProgress;
+ sendSignal(senderRef, GSN_STOP_PERM_REF, signal,
+ StopPermRef::SignalLength, JBB);
+ return;
+ }//if
+
+ /**
+ * Lock
+ */
+ c_nodeStartMaster.activeState = true;
+ c_stopPermMaster.clientRef = senderRef;
+
+ c_stopPermMaster.clientData = senderData;
+ c_stopPermMaster.returnValue = 0;
+ c_switchReplicas.clear();
+
+ Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle);
+ Callback c = { safe_cast(&Dbdih::switch_primary_stop_node), nodeId };
+ ndbrequire(mutex.lock(c));
+ } else {
+ /**
+ * Proxy part
+ */
+ jam();
+ CRASH_INSERTION(7066);
+ if(c_stopPermProxy.clientRef != 0){
+ jam();
+ ref->senderData = senderData;
+ ref->errorCode = StopPermRef::NodeShutdownInProgress;
+ sendSignal(senderRef, GSN_STOP_PERM_REF, signal, 2, JBB);
+ return;
+ }//if
+
+ c_stopPermProxy.clientRef = senderRef;
+ c_stopPermProxy.masterRef = cmasterdihref;
+ c_stopPermProxy.clientData = senderData;
+
+ req->senderRef = reference();
+ req->senderData = senderData;
+ sendSignal(cmasterdihref, GSN_STOP_PERM_REQ, signal,
+ StopPermReq::SignalLength, JBB);
+ }//if
+}//Dbdih::execSTOP_PERM_REQ()
+
+void
+Dbdih::switch_primary_stop_node(Signal* signal, Uint32 node_id, Uint32 ret_val)
+{
+ ndbrequire(ret_val == 0);
+ signal->theData[0] = DihContinueB::SwitchReplica;
+ signal->theData[1] = node_id;
+ signal->theData[2] = 0; // table id
+ signal->theData[3] = 0; // fragment id
+ sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
+}
+
+void Dbdih::execSTOP_PERM_REF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(c_stopPermProxy.clientRef != 0);
+ ndbrequire(c_stopPermProxy.masterRef == signal->senderBlockRef());
+ sendSignal(c_stopPermProxy.clientRef, GSN_STOP_PERM_REF, signal, 2, JBB);
+ c_stopPermProxy.clientRef = 0;
+}//Dbdih::execSTOP_PERM_REF()
+
+void Dbdih::execSTOP_PERM_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(c_stopPermProxy.clientRef != 0);
+ ndbrequire(c_stopPermProxy.masterRef == signal->senderBlockRef());
+ sendSignal(c_stopPermProxy.clientRef, GSN_STOP_PERM_CONF, signal, 1, JBB);
+ c_stopPermProxy.clientRef = 0;
+}//Dbdih::execSTOP_PERM_CONF()
+
+void Dbdih::execDIH_SWITCH_REPLICA_REQ(Signal* signal)
+{
+ jamEntry();
+ DihSwitchReplicaReq* const req = (DihSwitchReplicaReq*)&signal->theData[0];
+ const Uint32 tableId = req->tableId;
+ const Uint32 fragNo = req->fragNo;
+ const BlockReference senderRef = req->senderRef;
+
+ CRASH_INSERTION(7067);
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
+ if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
+ jam();
+ sendSignal(reference(), GSN_DIH_SWITCH_REPLICA_REQ, signal,
+ DihSwitchReplicaReq::SignalLength, JBB);
+ return;
+ }//if
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragNo, fragPtr);
+
+ /**
+ * Do funky stuff
+ */
+ Uint32 oldOrder[MAX_REPLICAS];
+ const Uint32 noOfReplicas = extractNodeInfo(fragPtr.p, oldOrder);
+
+ if (noOfReplicas < req->noOfReplicas) {
+ jam();
+ //---------------------------------------------------------------------
+ // A crash occurred in the middle of our switch handling.
+ //---------------------------------------------------------------------
+ DihSwitchReplicaRef* const ref = (DihSwitchReplicaRef*)&signal->theData[0];
+ ref->senderNode = cownNodeId;
+ ref->errorCode = StopPermRef::NF_CausedAbortOfStopProcedure;
+ sendSignal(senderRef, GSN_DIH_SWITCH_REPLICA_REF, signal,
+ DihSwitchReplicaRef::SignalLength, JBB);
+ }//if
+ for (Uint32 i = 0; i < noOfReplicas; i++) {
+ jam();
+ ndbrequire(i < MAX_REPLICAS);
+ fragPtr.p->activeNodes[i] = req->newNodeOrder[i];
+ }//for
+ /**
+ * Reply
+ */
+ DihSwitchReplicaConf* const conf = (DihSwitchReplicaConf*)&signal->theData[0];
+ conf->senderNode = cownNodeId;
+ sendSignal(senderRef, GSN_DIH_SWITCH_REPLICA_CONF, signal,
+ DihSwitchReplicaConf::SignalLength, JBB);
+}//Dbdih::execDIH_SWITCH_REPLICA_REQ()
+
+void Dbdih::execDIH_SWITCH_REPLICA_CONF(Signal* signal)
+{
+ jamEntry();
+ /**
+ * Response to master
+ */
+ CRASH_INSERTION(7068);
+ DihSwitchReplicaConf* const conf = (DihSwitchReplicaConf*)&signal->theData[0];
+ switchReplicaReply(signal, conf->senderNode);
+}//Dbdih::execDIH_SWITCH_REPLICA_CONF()
+
+void Dbdih::execDIH_SWITCH_REPLICA_REF(Signal* signal)
+{
+ jamEntry();
+ DihSwitchReplicaRef* const ref = (DihSwitchReplicaRef*)&signal->theData[0];
+ if(c_stopPermMaster.returnValue == 0){
+ jam();
+ c_stopPermMaster.returnValue = ref->errorCode;
+ }//if
+ switchReplicaReply(signal, ref->senderNode);
+}//Dbdih::execDIH_SWITCH_REPLICA_REF()
+
+void Dbdih::switchReplicaReply(Signal* signal,
+ NodeId nodeId){
+ jam();
+ receiveLoopMacro(DIH_SWITCH_REPLICA_REQ, nodeId);
+ //------------------------------------------------------
+ // We have received all responses from the nodes. Thus
+ // we have completed switching replica roles. Continue
+ // with the next fragment.
+ //------------------------------------------------------
+ if(c_stopPermMaster.returnValue != 0){
+ jam();
+ c_switchReplicas.tableId = ctabFileSize + 1;
+ }//if
+ c_switchReplicas.fragNo++;
+
+ signal->theData[0] = DihContinueB::SwitchReplica;
+ signal->theData[1] = c_switchReplicas.nodeId;
+ signal->theData[2] = c_switchReplicas.tableId;
+ signal->theData[3] = c_switchReplicas.fragNo;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
+}//Dbdih::switchReplicaReply()
+
+void
+Dbdih::switchReplica(Signal* signal,
+ Uint32 nodeId,
+ Uint32 tableId,
+ Uint32 fragNo){
+ jam();
+ DihSwitchReplicaReq* const req = (DihSwitchReplicaReq*)&signal->theData[0];
+
+ const Uint32 RT_BREAK = 64;
+
+ for (Uint32 i = 0; i < RT_BREAK; i++) {
+ jam();
+ if (tableId >= ctabFileSize) {
+ jam();
+ StopPermConf* const conf = (StopPermConf*)&signal->theData[0];
+ StopPermRef* const ref = (StopPermRef*)&signal->theData[0];
+ /**
+ * Finished with all tables
+ */
+ if(c_stopPermMaster.returnValue == 0) {
+ jam();
+ conf->senderData = c_stopPermMaster.clientData;
+ sendSignal(c_stopPermMaster.clientRef, GSN_STOP_PERM_CONF,
+ signal, 1, JBB);
+ } else {
+ jam();
+ ref->senderData = c_stopPermMaster.clientData;
+ ref->errorCode = c_stopPermMaster.returnValue;
+ sendSignal(c_stopPermMaster.clientRef, GSN_STOP_PERM_REF, signal, 2,JBB);
+ }//if
+
+ /**
+ * UnLock
+ */
+ c_nodeStartMaster.activeState = false;
+ c_stopPermMaster.clientRef = 0;
+ c_stopPermMaster.clientData = 0;
+ c_stopPermMaster.returnValue = 0;
+ Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle);
+ mutex.unlock(); // ignore result
+ return;
+ }//if
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) {
+ jam();
+ tableId++;
+ fragNo = 0;
+ continue;
+ }//if
+ if (fragNo >= tabPtr.p->totalfragments) {
+ jam();
+ tableId++;
+ fragNo = 0;
+ continue;
+ }//if
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragNo, fragPtr);
+
+ Uint32 oldOrder[MAX_REPLICAS];
+ const Uint32 noOfReplicas = extractNodeInfo(fragPtr.p, oldOrder);
+
+ if(oldOrder[0] != nodeId) {
+ jam();
+ fragNo++;
+ continue;
+ }//if
+ req->tableId = tableId;
+ req->fragNo = fragNo;
+ req->noOfReplicas = noOfReplicas;
+ for (Uint32 i = 0; i < (noOfReplicas - 1); i++) {
+ req->newNodeOrder[i] = oldOrder[i+1];
+ }//for
+ req->newNodeOrder[noOfReplicas-1] = nodeId;
+ req->senderRef = reference();
+
+ /**
+ * Initialize struct
+ */
+ c_switchReplicas.tableId = tableId;
+ c_switchReplicas.fragNo = fragNo;
+ c_switchReplicas.nodeId = nodeId;
+
+ sendLoopMacro(DIH_SWITCH_REPLICA_REQ, sendDIH_SWITCH_REPLICA_REQ);
+ return;
+ }//for
+
+ signal->theData[0] = DihContinueB::SwitchReplica;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tableId;
+ signal->theData[3] = fragNo;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
+}//Dbdih::switchReplica()
+
+void Dbdih::execSTOP_ME_REQ(Signal* signal)
+{
+ jamEntry();
+ StopMeReq* const req = (StopMeReq*)&signal->theData[0];
+ const BlockReference senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ const Uint32 nodeId = refToNode(senderRef);
+ {
+ /**
+ * Set node dead (remove from operations)
+ */
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->useInTransactions = false;
+ }
+ if (nodeId != getOwnNodeId()) {
+ jam();
+ StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0];
+ stopMeConf->senderData = senderData;
+ stopMeConf->senderRef = reference();
+ sendSignal(senderRef, GSN_STOP_ME_CONF, signal,
+ StopMeConf::SignalLength, JBB);
+ return;
+ }//if
+
+ /**
+ * Local signal
+ */
+ jam();
+ ndbrequire(c_stopMe.clientRef == 0);
+
+ c_stopMe.clientData = senderData;
+ c_stopMe.clientRef = senderRef;
+
+ req->senderData = senderData;
+ req->senderRef = reference();
+
+ sendLoopMacro(STOP_ME_REQ, sendSTOP_ME_REQ);
+
+ /**
+ * Send conf to self
+ */
+ StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0];
+ stopMeConf->senderData = senderData;
+ stopMeConf->senderRef = reference();
+ sendSignal(reference(), GSN_STOP_ME_CONF, signal,
+ StopMeConf::SignalLength, JBB);
+}//Dbdih::execSTOP_ME_REQ()
+
+void Dbdih::execSTOP_ME_REF(Signal* signal)
+{
+ ndbrequire(false);
+}
+
+void Dbdih::execSTOP_ME_CONF(Signal* signal)
+{
+ jamEntry();
+ StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0];
+
+ const Uint32 senderRef = stopMeConf->senderRef;
+ const Uint32 senderData = stopMeConf->senderData;
+ const Uint32 nodeId = refToNode(senderRef);
+
+ ndbrequire(c_stopMe.clientRef != 0);
+ ndbrequire(c_stopMe.clientData == senderData);
+
+ receiveLoopMacro(STOP_ME_REQ, nodeId);
+ //---------------------------------------------------------
+ // All STOP_ME_REQ have been received. We will send the
+ // confirmation back to the requesting block.
+ //---------------------------------------------------------
+
+ stopMeConf->senderRef = reference();
+ stopMeConf->senderData = c_stopMe.clientData;
+ sendSignal(c_stopMe.clientRef, GSN_STOP_ME_CONF, signal,
+ StopMeConf::SignalLength, JBB);
+ c_stopMe.clientRef = 0;
+}//Dbdih::execSTOP_ME_CONF()
+
+void Dbdih::execWAIT_GCP_REQ(Signal* signal)
+{
+ jamEntry();
+ WaitGCPReq* const req = (WaitGCPReq*)&signal->theData[0];
+ WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0];
+ WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0];
+ const Uint32 senderData = req->senderData;
+ const BlockReference senderRef = req->senderRef;
+ const Uint32 requestType = req->requestType;
+
+ if(requestType == WaitGCPReq::CurrentGCI) {
+ jam();
+ conf->senderData = senderData;
+ conf->gcp = cnewgcp;
+ sendSignal(senderRef, GSN_WAIT_GCP_CONF, signal,
+ WaitGCPConf::SignalLength, JBB);
+ return;
+ }//if
+
+ if(isMaster()) {
+ /**
+ * Master
+ */
+ jam();
+
+ if((requestType == WaitGCPReq::CompleteIfRunning) &&
+ (cgcpStatus == GCP_READY)) {
+ jam();
+ conf->senderData = senderData;
+ conf->gcp = coldgcp;
+ sendSignal(senderRef, GSN_WAIT_GCP_CONF, signal,
+ WaitGCPConf::SignalLength, JBB);
+ return;
+ }//if
+
+ WaitGCPMasterPtr ptr;
+ if(c_waitGCPMasterList.seize(ptr) == false){
+ jam();
+ ref->senderData = senderData;
+ ref->errorCode = WaitGCPRef::NoWaitGCPRecords;
+ sendSignal(senderRef, GSN_WAIT_GCP_REF, signal,
+ WaitGCPRef::SignalLength, JBB);
+ return;
+ }//if
+ ptr.p->clientRef = senderRef;
+ ptr.p->clientData = senderData;
+
+ if((requestType == WaitGCPReq::CompleteForceStart) &&
+ (cgcpStatus == GCP_READY)) {
+ jam();
+ cstartGcpNow = true;
+ }//if
+ return;
+ } else {
+ /**
+ * Proxy part
+ */
+ jam();
+ WaitGCPProxyPtr ptr;
+ if (c_waitGCPProxyList.seize(ptr) == false) {
+ jam();
+ ref->senderData = senderData;
+ ref->errorCode = WaitGCPRef::NoWaitGCPRecords;
+ sendSignal(senderRef, GSN_WAIT_GCP_REF, signal,
+ WaitGCPRef::SignalLength, JBB);
+ return;
+ }//if
+ ptr.p->clientRef = senderRef;
+ ptr.p->clientData = senderData;
+ ptr.p->masterRef = cmasterdihref;
+
+ req->senderData = ptr.i;
+ req->senderRef = reference();
+ req->requestType = requestType;
+
+ sendSignal(cmasterdihref, GSN_WAIT_GCP_REQ, signal,
+ WaitGCPReq::SignalLength, JBB);
+ return;
+ }//if
+}//Dbdih::execWAIT_GCP_REQ()
+
+void Dbdih::execWAIT_GCP_REF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(!isMaster());
+ WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0];
+
+ const Uint32 proxyPtr = ref->senderData;
+ const Uint32 errorCode = ref->errorCode;
+
+ WaitGCPProxyPtr ptr;
+ ptr.i = proxyPtr;
+ c_waitGCPProxyList.getPtr(ptr);
+
+ ref->senderData = ptr.p->clientData;
+ ref->errorCode = errorCode;
+ sendSignal(ptr.p->clientRef, GSN_WAIT_GCP_REF, signal,
+ WaitGCPRef::SignalLength, JBB);
+
+ c_waitGCPProxyList.release(ptr);
+}//Dbdih::execWAIT_GCP_REF()
+
+void Dbdih::execWAIT_GCP_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(!isMaster());
+ WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0];
+ const Uint32 proxyPtr = conf->senderData;
+ const Uint32 gcp = conf->gcp;
+ WaitGCPProxyPtr ptr;
+
+ ptr.i = proxyPtr;
+ c_waitGCPProxyList.getPtr(ptr);
+
+ conf->senderData = ptr.p->clientData;
+ conf->gcp = gcp;
+ sendSignal(ptr.p->clientRef, GSN_WAIT_GCP_CONF, signal,
+ WaitGCPConf::SignalLength, JBB);
+
+ c_waitGCPProxyList.release(ptr);
+}//Dbdih::execWAIT_GCP_CONF()
+
+void Dbdih::checkWaitGCPProxy(Signal* signal, NodeId failedNodeId)
+{
+ jam();
+ WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0];
+ ref->errorCode = WaitGCPRef::NF_CausedAbortOfProcedure;
+
+ WaitGCPProxyPtr ptr;
+ c_waitGCPProxyList.first(ptr);
+ while(ptr.i != RNIL) {
+ jam();
+ const Uint32 i = ptr.i;
+ const Uint32 clientData = ptr.p->clientData;
+ const BlockReference clientRef = ptr.p->clientRef;
+ const BlockReference masterRef = ptr.p->masterRef;
+
+ c_waitGCPProxyList.next(ptr);
+ if(refToNode(masterRef) == failedNodeId) {
+ jam();
+ c_waitGCPProxyList.release(i);
+ ref->senderData = clientData;
+ sendSignal(clientRef, GSN_WAIT_GCP_REF, signal,
+ WaitGCPRef::SignalLength, JBB);
+ }//if
+ }//while
+}//Dbdih::checkWaitGCPProxy()
+
+void Dbdih::checkWaitGCPMaster(Signal* signal, NodeId failedNodeId)
+{
+ jam();
+ WaitGCPMasterPtr ptr;
+ c_waitGCPMasterList.first(ptr);
+
+ while (ptr.i != RNIL) {
+ jam();
+ const Uint32 i = ptr.i;
+ const NodeId nodeId = refToNode(ptr.p->clientRef);
+
+ c_waitGCPMasterList.next(ptr);
+ if (nodeId == failedNodeId) {
+ jam()
+ c_waitGCPMasterList.release(i);
+ }//if
+ }//while
+}//Dbdih::checkWaitGCPMaster()
+
+void Dbdih::emptyWaitGCPMasterQueue(Signal* signal)
+{
+ jam();
+ WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0];
+ conf->gcp = coldgcp;
+
+ WaitGCPMasterPtr ptr;
+ c_waitGCPMasterList.first(ptr);
+ while(ptr.i != RNIL) {
+ jam();
+ const Uint32 i = ptr.i;
+ const Uint32 clientData = ptr.p->clientData;
+ const BlockReference clientRef = ptr.p->clientRef;
+
+ c_waitGCPMasterList.next(ptr);
+ conf->senderData = clientData;
+ sendSignal(clientRef, GSN_WAIT_GCP_CONF, signal,
+ WaitGCPConf::SignalLength, JBB);
+
+ c_waitGCPMasterList.release(i);
+ }//while
+}//Dbdih::emptyWaitGCPMasterQueue()
+
+void Dbdih::setNodeStatus(Uint32 nodeId, NodeRecord::NodeStatus newStatus)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->nodeStatus = newStatus;
+}//Dbdih::setNodeStatus()
+
+Dbdih::NodeRecord::NodeStatus Dbdih::getNodeStatus(Uint32 nodeId)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ return nodePtr.p->nodeStatus;
+}//Dbdih::getNodeStatus()
+
+Sysfile::ActiveStatus
+Dbdih::getNodeActiveStatus(Uint32 nodeId)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ return nodePtr.p->activeStatus;
+}//Dbdih::getNodeActiveStatus()
+
+
+void
+Dbdih::setNodeActiveStatus(Uint32 nodeId, Sysfile::ActiveStatus newStatus)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->activeStatus = newStatus;
+}//Dbdih::setNodeActiveStatus()
+
+void Dbdih::setAllowNodeStart(Uint32 nodeId, bool newState)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->allowNodeStart = newState;
+}//Dbdih::setAllowNodeStart()
+
+void Dbdih::setNodeCopyCompleted(Uint32 nodeId, bool newState)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->copyCompleted = newState;
+}//Dbdih::setNodeCopyCompleted()
+
+bool Dbdih::getAllowNodeStart(Uint32 nodeId)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ return nodePtr.p->allowNodeStart;
+}//Dbdih::getAllowNodeStart()
+
+bool Dbdih::getNodeCopyCompleted(Uint32 nodeId)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ return nodePtr.p->copyCompleted;
+}//Dbdih::getNodeCopyCompleted()
+
+bool Dbdih::checkNodeAlive(Uint32 nodeId)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ndbrequire(nodeId > 0);
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->nodeStatus != NodeRecord::ALIVE) {
+ return false;
+ } else {
+ return true;
+ }//if
+}//Dbdih::checkNodeAlive()
+
+bool Dbdih::isMaster()
+{
+ return (reference() == cmasterdihref);
+}//Dbdih::isMaster()
+
+bool Dbdih::isActiveMaster()
+{
+ return ((reference() == cmasterdihref) && (cmasterState == MASTER_ACTIVE));
+}//Dbdih::isActiveMaster()
+
+Dbdih::NodeRecord::NodeRecord(){
+ m_nodefailSteps.clear();
+ gcpstate = NodeRecord::READY;
+
+ activeStatus = Sysfile::NS_NotDefined;
+ recNODE_FAILREP = ZFALSE;
+ nodeGroup = ZNIL;
+ dbtcFailCompleted = ZTRUE;
+ dbdictFailCompleted = ZTRUE;
+ dbdihFailCompleted = ZTRUE;
+ dblqhFailCompleted = ZTRUE;
+ noOfStartedChkpt = 0;
+ noOfQueuedChkpt = 0;
+ lcpStateAtTakeOver = (MasterLCPConf::State)255;
+
+ activeTabptr = RNIL;
+ nodeStatus = NodeRecord::NOT_IN_CLUSTER;
+ useInTransactions = false;
+ copyCompleted = false;
+ allowNodeStart = true;
+}
diff --git a/ndb/src/kernel/blocks/dbdih/LCP.txt b/storage/ndb/src/kernel/blocks/dbdih/LCP.txt
index 500c82f6baf..500c82f6baf 100644
--- a/ndb/src/kernel/blocks/dbdih/LCP.txt
+++ b/storage/ndb/src/kernel/blocks/dbdih/LCP.txt
diff --git a/storage/ndb/src/kernel/blocks/dbdih/Makefile.am b/storage/ndb/src/kernel/blocks/dbdih/Makefile.am
new file mode 100644
index 00000000000..0b35884a586
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdih/Makefile.am
@@ -0,0 +1,23 @@
+noinst_LIBRARIES = libdbdih.a
+
+libdbdih_a_SOURCES = DbdihInit.cpp DbdihMain.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbdih.dsp
+
+libdbdih.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libdbdih_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbdih/Sysfile.hpp b/storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp
index 3e2f3b0dd48..3e2f3b0dd48 100644
--- a/ndb/src/kernel/blocks/dbdih/Sysfile.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp
diff --git a/ndb/src/kernel/blocks/dbdih/printSysfile/Makefile b/storage/ndb/src/kernel/blocks/dbdih/printSysfile/Makefile
index 4c4b1026aff..4c4b1026aff 100644
--- a/ndb/src/kernel/blocks/dbdih/printSysfile/Makefile
+++ b/storage/ndb/src/kernel/blocks/dbdih/printSysfile/Makefile
diff --git a/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp b/storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp
index efa4b9c92c5..efa4b9c92c5 100644
--- a/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp
diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
new file mode 100644
index 00000000000..5328f42ba83
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -0,0 +1,2956 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBLQH_H
+#define DBLQH_H
+
+#include <pc.hpp>
+#include <ndb_limits.h>
+#include <SimulatedBlock.hpp>
+#include <DLList.hpp>
+#include <DLFifoList.hpp>
+#include <DLHashTable.hpp>
+
+#include <NodeBitmask.hpp>
+#include <signaldata/LCP.hpp>
+#include <signaldata/LqhTransConf.hpp>
+#include <signaldata/LqhFrag.hpp>
+
+// primary key is stored in TUP
+#include <../dbtup/Dbtup.hpp>
+
+#ifdef DBLQH_C
+// Constants
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS USED WHEN MASTER REQUESTS STATE OF COPY FRAGMENTS. */
+/* ------------------------------------------------------------------------- */
+#define ZCOPY_CLOSING 0
+#define ZCOPY_ONGOING 1
+#define ZCOPY_ACTIVATION 2
+/* ------------------------------------------------------------------------- */
+/* STATES FOR THE VARIABLE GCP_LOG_PART_STATE */
+/* ------------------------------------------------------------------------- */
+#define ZIDLE 0
+#define ZWAIT_DISK 1
+#define ZON_DISK 2
+#define ZACTIVE 1
+/* ------------------------------------------------------------------------- */
+/* STATES FOR THE VARIABLE CSR_PHASES_STARTED */
+/* ------------------------------------------------------------------------- */
+#define ZSR_NO_PHASE_STARTED 0
+#define ZSR_PHASE1_COMPLETED 1
+#define ZSR_PHASE2_COMPLETED 2
+#define ZSR_BOTH_PHASES_STARTED 3
+/* ------------------------------------------------------------------------- */
+/* THE NUMBER OF PAGES IN A MBYTE, THE TWO LOGARITHM OF THIS. */
+/* THE NUMBER OF MBYTES IN A LOG FILE. */
+/* THE MAX NUMBER OF PAGES READ/WRITTEN FROM/TO DISK DURING */
+/* A WRITE OR READ. */
+/* ------------------------------------------------------------------------- */
+#define ZNOT_DIRTY 0
+#define ZDIRTY 1
+#define ZREAD_AHEAD_SIZE 8
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS OF THE LOG PAGES */
+/* ------------------------------------------------------------------------- */
+#define ZPAGE_HEADER_SIZE 32
+#define ZNO_MBYTES_IN_FILE 16
+#define ZPAGE_SIZE 8192
+#define ZPAGES_IN_MBYTE 32
+#define ZTWOLOG_NO_PAGES_IN_MBYTE 5
+#define ZTWOLOG_PAGE_SIZE 13
+#define ZMAX_MM_BUFFER_SIZE 32 // Main memory window during log execution
+
+#define ZMAX_PAGES_WRITTEN 8 // Max pages before writing to disk (=> config)
+#define ZMIN_READ_BUFFER_SIZE 2 // Minimum number of pages to execute log
+#define ZMIN_LOG_PAGES_OPERATION 10 // Minimum no of pages before stopping
+
+#define ZPOS_CHECKSUM 0
+#define ZPOS_LOG_LAP 1
+#define ZPOS_MAX_GCI_COMPLETED 2
+#define ZPOS_MAX_GCI_STARTED 3
+#define ZNEXT_PAGE 4
+#define ZPREV_PAGE 5
+#define ZPOS_VERSION 6
+#define ZPOS_NO_LOG_FILES 7
+#define ZCURR_PAGE_INDEX 8
+#define ZLAST_LOG_PREP_REF 10
+#define ZPOS_DIRTY 11
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS FOR THE VARIOUS REPLICA AND NODE TYPES. */
+/* ------------------------------------------------------------------------- */
+#define ZPRIMARY_NODE 0
+#define ZBACKUP_NODE 1
+#define ZSTANDBY_NODE 2
+#define ZTC_NODE 3
+#define ZLOG_NODE 3
+/* ------------------------------------------------------------------------- */
+/* VARIOUS CONSTANTS USED AS FLAGS TO THE FILE MANAGER. */
+/* ------------------------------------------------------------------------- */
+#define ZOPEN_READ 0
+#define ZOPEN_WRITE 1
+#define ZOPEN_READ_WRITE 2
+#define ZVAR_NO_LOG_PAGE_WORD 1
+#define ZLIST_OF_PAIRS 0
+#define ZLIST_OF_PAIRS_SYNCH 16
+#define ZARRAY_OF_PAGES 1
+#define ZLIST_OF_MEM_PAGES 2
+#define ZLIST_OF_MEM_PAGES_SYNCH 18
+#define ZCLOSE_NO_DELETE 0
+#define ZCLOSE_DELETE 1
+#define ZPAGE_ZERO 0
+/* ------------------------------------------------------------------------- */
+/* THE FOLLOWING CONSTANTS ARE USED TO DESCRIBE THE TYPES OF */
+/* LOG RECORDS, THE SIZE OF THE VARIOUS LOG RECORD TYPES AND */
+/* THE POSITIONS WITHIN THOSE LOG RECORDS. */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* THESE CONSTANTS DESCRIBE THE SIZES OF VARIOUS TYPES OF LOG REORDS. */
+/* NEXT_LOG_SIZE IS ACTUALLY ONE. THE REASON WE SET IT TO 2 IS TO */
+/* SIMPLIFY THE CODE SINCE OTHERWISE HAVE TO USE A SPECIAL VERSION */
+/* OF READ_LOGWORD WHEN READING LOG RECORD TYPE */
+/* SINCE NEXT MBYTE TYPE COULD BE THE VERY LAST WORD IN THE MBYTE. */
+/* BY SETTING IT TO 2 WE ENSURE IT IS NEVER THE VERY LAST WORD */
+/* IN THE MBYTE. */
+/* ------------------------------------------------------------------------- */
+#define ZFD_HEADER_SIZE 3
+#define ZFD_PART_SIZE 48
+#define ZLOG_HEAD_SIZE 6
+#define ZNEXT_LOG_SIZE 2
+#define ZABORT_LOG_SIZE 3
+#define ZCOMMIT_LOG_SIZE 9
+#define ZCOMPLETED_GCI_LOG_SIZE 2
+/* ------------------------------------------------------------------------- */
+/* THESE CONSTANTS DESCRIBE THE TYPE OF A LOG RECORD. */
+/* THIS IS THE FIRST WORD OF A LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZNEW_PREP_OP_TYPE 0
+#define ZPREP_OP_TYPE 1
+#define ZCOMMIT_TYPE 2
+#define ZABORT_TYPE 3
+#define ZFD_TYPE 4
+#define ZFRAG_SPLIT_TYPE 5
+#define ZNEXT_LOG_RECORD_TYPE 6
+#define ZNEXT_MBYTE_TYPE 7
+#define ZCOMPLETED_GCI_TYPE 8
+#define ZINVALID_COMMIT_TYPE 9
+/* ------------------------------------------------------------------------- */
+/* THE POSITIONS OF LOGGED DATA IN A FILE DESCRIPTOR LOG RECORD HEADER.*/
+/* ALSO THE MAXIMUM NUMBER OF FILE DESCRIPTORS IN A LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_LOG_TYPE 0
+#define ZPOS_NO_FD 1
+#define ZPOS_FILE_NO 2
+#define ZMAX_LOG_FILES_IN_PAGE_ZERO 40
+/* ------------------------------------------------------------------------- */
+/* THE POSITIONS WITHIN A PREPARE LOG RECORD AND A NEW PREPARE */
+/* LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_HASH_VALUE 2
+#define ZPOS_SCHEMA_VERSION 3
+#define ZPOS_TRANS_TICKET 4
+#define ZPOS_OP_TYPE 5
+#define ZPOS_NO_ATTRINFO 6
+#define ZPOS_NO_KEYINFO 7
+/* ------------------------------------------------------------------------- */
+/* THE POSITIONS WITHIN A COMMIT LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_COMMIT_TRANSID1 1
+#define ZPOS_COMMIT_TRANSID2 2
+#define ZPOS_COMMIT_GCI 3
+#define ZPOS_COMMIT_TABLE_REF 4
+#define ZPOS_COMMIT_FRAGID 5
+#define ZPOS_COMMIT_FILE_NO 6
+#define ZPOS_COMMIT_START_PAGE_NO 7
+#define ZPOS_COMMIT_START_PAGE_INDEX 8
+#define ZPOS_COMMIT_STOP_PAGE_NO 9
+/* ------------------------------------------------------------------------- */
+/* THE POSITIONS WITHIN A ABORT LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_ABORT_TRANSID1 1
+#define ZPOS_ABORT_TRANSID2 2
+/* ------------------------------------------------------------------------- */
+/* THE POSITION WITHIN A COMPLETED GCI LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_COMPLETED_GCI 1
+/* ------------------------------------------------------------------------- */
+/* THE POSITIONS WITHIN A NEW PREPARE LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_NEW_PREP_FILE_NO 8
+#define ZPOS_NEW_PREP_PAGE_REF 9
+
+#define ZLAST_WRITE_IN_FILE 1
+#define ZENFORCE_WRITE 2
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS USED AS INPUT TO SUBROUTINE WRITE_LOG_PAGES AMONG OTHERS. */
+/* ------------------------------------------------------------------------- */
+#define ZNORMAL 0
+#define ZINIT 1
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS USED BY CONTINUEB TO DEDUCE WHICH CONTINUE SIGNAL IS TO */
+/* BE EXECUTED AS A RESULT OF THIS CONTINUEB SIGNAL. */
+/* ------------------------------------------------------------------------- */
+#define ZLOG_LQHKEYREQ 0
+#define ZPACK_LQHKEYREQ 1
+#define ZSEND_ATTRINFO 2
+#define ZSR_GCI_LIMITS 3
+#define ZSR_LOG_LIMITS 4
+#define ZSEND_EXEC_CONF 5
+#define ZEXEC_SR 6
+#define ZSR_FOURTH_COMP 7
+#define ZINIT_FOURTH 8
+#define ZTIME_SUPERVISION 9
+#define ZSR_PHASE3_START 10
+#define ZLQH_TRANS_NEXT 11
+#define ZLQH_RELEASE_AT_NODE_FAILURE 12
+#define ZSCAN_TC_CONNECT 13
+#define ZINITIALISE_RECORDS 14
+#define ZINIT_GCP_REC 15
+#define ZRESTART_OPERATIONS_AFTER_STOP 16
+#define ZCHECK_LCP_STOP_BLOCKED 17
+#define ZSCAN_MARKERS 18
+#define ZOPERATION_EVENT_REP 19
+#define ZPREP_DROP_TABLE 20
+
+/* ------------------------------------------------------------------------- */
+/* NODE STATE DURING SYSTEM RESTART, VARIABLES CNODES_SR_STATE */
+/* AND CNODES_EXEC_SR_STATE. */
+/* ------------------------------------------------------------------------- */
+#define ZSTART_SR 1
+#define ZEXEC_SR_COMPLETED 2
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS USED BY NODE STATUS TO DEDUCE THE STATUS OF A NODE. */
+/* ------------------------------------------------------------------------- */
+#define ZNODE_UP 0
+#define ZNODE_DOWN 1
+/* ------------------------------------------------------------------------- */
+/* START PHASES */
+/* ------------------------------------------------------------------------- */
+#define ZLAST_START_PHASE 255
+#define ZSTART_PHASE1 1
+#define ZSTART_PHASE2 2
+#define ZSTART_PHASE3 3
+#define ZSTART_PHASE4 4
+#define ZSTART_PHASE6 6
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS USED BY SCAN AND COPY FRAGMENT PROCEDURES */
+/* ------------------------------------------------------------------------- */
+#define ZSTORED_PROC_SCAN 0
+#define ZSTORED_PROC_COPY 2
+#define ZDELETE_STORED_PROC_ID 3
+//#define ZSCAN_NEXT 1
+//#define ZSCAN_NEXT_COMMIT 2
+//#define ZSCAN_NEXT_ABORT 12
+#define ZCOPY_COMMIT 3
+#define ZCOPY_REPEAT 4
+#define ZCOPY_ABORT 5
+#define ZCOPY_CLOSE 6
+//#define ZSCAN_CLOSE 6
+//#define ZEMPTY_FRAGMENT 0
+#define ZWRITE_LOCK 1
+#define ZSCAN_FRAG_CLOSED 2
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES ADDED IN VERSION 0.1 AND 0.2 */
+/* ------------------------------------------------------------------------- */
+#define ZNOT_FOUND 1 // Not an error code, a return value
+#define ZNO_FREE_LQH_CONNECTION 414
+#define ZGET_DATAREC_ERROR 418
+#define ZGET_ATTRINBUF_ERROR 419
+#define ZNO_FREE_FRAGMENTREC 460 // Insert new fragment error code
+#define ZTAB_FILE_SIZE 464 // Insert new fragment error code + Start kernel
+#define ZNO_ADD_FRAGREC 465 // Insert new fragment error code
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES ADDED IN VERSION 0.3 */
+/* ------------------------------------------------------------------------- */
+#define ZTAIL_PROBLEM_IN_LOG_ERROR 410
+#define ZGCI_TOO_LOW_ERROR 429 // GCP_SAVEREF error code
+#define ZTAB_STATE_ERROR 474 // Insert new fragment error code
+#define ZTOO_NEW_GCI_ERROR 479 // LCP Start error
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES ADDED IN VERSION 0.4 */
+/* ------------------------------------------------------------------------- */
+
+#define ZNO_FREE_FRAG_SCAN_REC_ERROR 490 // SCAN_FRAGREF error code
+#define ZCOPY_NO_FRAGMENT_ERROR 491 // COPY_FRAGREF error code
+#define ZTAKE_OVER_ERROR 499
+#define ZCOPY_NODE_ERROR 1204
+#define ZTOO_MANY_COPY_ACTIVE_ERROR 1208 // COPY_FRAG and COPY_ACTIVEREF code
+#define ZCOPY_ACTIVE_ERROR 1210 // COPY_ACTIVEREF error code
+#define ZNO_TC_CONNECT_ERROR 1217 // Simple Read + SCAN
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES ADDED IN VERSION 1.X */
+/* ------------------------------------------------------------------------- */
+//#define ZSCAN_BOOK_ACC_OP_ERROR 1219 // SCAN_FRAGREF error code
+#define ZFILE_CHANGE_PROBLEM_IN_LOG_ERROR 1220
+#define ZTEMPORARY_REDO_LOG_FAILURE 1221
+#define ZNO_FREE_MARKER_RECORDS_ERROR 1222
+#define ZNODE_SHUTDOWN_IN_PROGESS 1223
+#define ZTOO_MANY_FRAGMENTS 1224
+#define ZTABLE_NOT_DEFINED 1225
+#define ZDROP_TABLE_IN_PROGRESS 1226
+#define ZINVALID_SCHEMA_VERSION 1227
+
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES ADDED IN VERSION 2.X */
+/* ------------------------------------------------------------------------- */
+#define ZNODE_FAILURE_ERROR 400
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES FROM ACC */
+/* ------------------------------------------------------------------------- */
+#define ZNO_TUPLE_FOUND 626
+#define ZTUPLE_ALREADY_EXIST 630
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES FROM TUP */
+/* ------------------------------------------------------------------------- */
+#define ZSEARCH_CONDITION_FALSE 899
+#define ZUSER_ERROR_CODE_LIMIT 6000
+#endif
+
+/**
+ * @class dblqh
+ *
+ * @section secIntro Introduction
+ *
+ * Dblqh is the coordinator of the LDM. Dblqh is responsible for
+ * performing operations on tuples. It does this job with help of
+ * Dbacc block (that manages the index structures) and Dbtup
+ * (that manages the tuples).
+ *
+ * Dblqh also keeps track of the participants and acts as a coordinator of
+ * 2-phase commits. Logical redo logging is also handled by the Dblqh
+ * block.
+ *
+ * @section secModules Modules
+ *
+ * The code is partitioned into the following modules:
+ * - START / RESTART
+ * - Start phase 1: Load our block reference and our processor id
+ * - Start phase 2: Initiate all records within the block
+ * Connect LQH with ACC and TUP.
+ * - Start phase 4: Connect LQH with LQH. Connect every LQH with
+ * every LQH in the database system.
+ * If initial start, then create the fragment log files.
+ * If system restart or node restart,
+ * then open the fragment log files and
+ * find the end of the log files.
+ * - ADD / DELETE FRAGMENT<br>
+ * Used by dictionary to create new fragments and delete old fragments.
+ * - EXECUTION<br>
+ * handles the reception of lqhkeyreq and all processing
+ * of operations on behalf of this request.
+ * This does also involve reception of various types of attrinfo
+ * and keyinfo.
+ * It also involves communication with ACC and TUP.
+ * - LOG<br>
+ * The log module handles the reading and writing of the log.
+ * It is also responsible for handling system restart.
+ * It controls the system restart in TUP and ACC as well.
+ * - TRANSACTION<br>
+ * This module handles the commit and the complete phases.
+ * - MODULE TO HANDLE TC FAILURE<br>
+ * - SCAN<br>
+ * This module contains the code that handles a scan of a particular
+ * fragment.
+ * It operates under the control of TC and orders ACC to
+ * perform a scan of all tuples in the fragment.
+ * TUP performs the necessary search conditions
+ * to ensure that only valid tuples are returned to the application.
+ * - NODE RECOVERY<br>
+ * Used when a node has failed.
+ * It performs a copy of a fragment to a new replica of the fragment.
+ * It does also shut down all connections to the failed node.
+ * - LOCAL CHECKPOINT<br>
+ * Handles execution and control of LCPs
+ * It controls the LCPs in TUP and ACC.
+ * It also interacts with DIH to control which GCPs are recoverable.
+ * - GLOBAL CHECKPOINT<br>
+ * Helps DIH in discovering when GCPs are recoverable.
+ * It handles the request gcp_savereq that requests LQH to
+ * save a particular GCP to disk and respond when completed.
+ * - FILE HANDLING<br>
+ * With submodules:
+ * - SIGNAL RECEPTION
+ * - NORMAL OPERATION
+ * - FILE CHANGE
+ * - INITIAL START
+ * - SYSTEM RESTART PHASE ONE
+ * - SYSTEM RESTART PHASE TWO,
+ * - SYSTEM RESTART PHASE THREE
+ * - SYSTEM RESTART PHASE FOUR
+ * - ERROR
+ * - TEST
+ * - LOG
+ */
+class Dblqh: public SimulatedBlock {
+public:
+ enum LcpCloseState {
+ LCP_IDLE = 0,
+ LCP_RUNNING = 1, // LCP is running
+ LCP_CLOSE_STARTED = 2, // Completion(closing of files) has started
+ ACC_LCP_CLOSE_COMPLETED = 3,
+ TUP_LCP_CLOSE_COMPLETED = 4
+ };
+
+ enum ExecUndoLogState {
+ EULS_IDLE = 0,
+ EULS_STARTED = 1,
+ EULS_COMPLETED = 2,
+ EULS_ACC_COMPLETED = 3,
+ EULS_TUP_COMPLETED = 4
+ };
+
+ struct AddFragRecord {
+ enum AddFragStatus {
+ FREE = 0,
+ ACC_ADDFRAG = 1,
+ WAIT_TWO_TUP = 2,
+ WAIT_ONE_TUP = 3,
+ WAIT_TWO_TUX = 4,
+ WAIT_ONE_TUX = 5,
+ WAIT_ADD_ATTR = 6,
+ TUP_ATTR_WAIT1 = 7,
+ TUP_ATTR_WAIT2 = 8,
+ TUX_ATTR_WAIT1 = 9,
+ TUX_ATTR_WAIT2 = 10
+ };
+ LqhAddAttrReq::Entry attributes[LqhAddAttrReq::MAX_ATTRIBUTES];
+ UintR accConnectptr;
+ AddFragStatus addfragStatus;
+ UintR dictConnectptr;
+ UintR fragmentPtr;
+ UintR nextAddfragrec;
+ UintR noOfAllocPages;
+ UintR schemaVer;
+ UintR tup1Connectptr;
+ UintR tup2Connectptr;
+ UintR tux1Connectptr;
+ UintR tux2Connectptr;
+ UintR checksumIndicator;
+ UintR GCPIndicator;
+ BlockReference dictBlockref;
+ Uint32 m_senderAttrPtr;
+ Uint16 addfragErrorCode;
+ Uint16 attrSentToTup;
+ Uint16 attrReceived;
+ Uint16 addFragid;
+ Uint16 fragid1;
+ Uint16 fragid2;
+ Uint16 noOfAttr;
+ Uint16 noOfNull;
+ Uint16 tabId;
+ Uint16 totalAttrReceived;
+ Uint16 fragCopyCreation;
+ Uint16 noOfKeyAttr;
+ Uint32 noOfNewAttr; // noOfCharsets in upper half
+ Uint16 noOfAttributeGroups;
+ Uint16 lh3DistrBits;
+ Uint16 tableType;
+ Uint16 primaryTableId;
+ };// Size 108 bytes
+ typedef Ptr<AddFragRecord> AddFragRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ ATTRIBUTE INFORMATION RECORD $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * Can contain one (1) attrinfo signal.
+ * One signal contains 24 attr. info words.
+ * But 32 elements are used to make plex happy.
+ * Some of the elements are used to the following things:
+ * - Data length in this record is stored in the
+ * element indexed by ZINBUF_DATA_LEN.
+ * - Next attrinbuf is pointed out by the element
+ * indexed by ZINBUF_NEXT.
+ */
+ struct Attrbuf {
+ UintR attrbuf[32];
+ }; // Size 128 bytes
+ typedef Ptr<Attrbuf> AttrbufPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ DATA BUFFER $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This buffer is used as a general data storage.
+ */
+ struct Databuf {
+ UintR data[4];
+ UintR nextDatabuf;
+ }; // size 20 bytes
+ typedef Ptr<Databuf> DatabufPtr;
+
+ struct ScanRecord {
+ enum ScanState {
+ SCAN_FREE = 0,
+ WAIT_STORED_PROC_COPY = 1,
+ WAIT_STORED_PROC_SCAN = 2,
+ WAIT_NEXT_SCAN_COPY = 3,
+ WAIT_NEXT_SCAN = 4,
+ WAIT_DELETE_STORED_PROC_ID_SCAN = 5,
+ WAIT_DELETE_STORED_PROC_ID_COPY = 6,
+ WAIT_ACC_COPY = 7,
+ WAIT_ACC_SCAN = 8,
+ WAIT_SCAN_NEXTREQ = 10,
+ WAIT_CLOSE_SCAN = 12,
+ WAIT_CLOSE_COPY = 13,
+ WAIT_RELEASE_LOCK = 14,
+ WAIT_TUPKEY_COPY = 15,
+ WAIT_LQHKEY_COPY = 16,
+ IN_QUEUE = 17
+ };
+ enum ScanType {
+ ST_IDLE = 0,
+ SCAN = 1,
+ COPY = 2
+ };
+
+ UintR scan_acc_op_ptr[32];
+ Uint32 scan_acc_index;
+ Uint32 scan_acc_attr_recs;
+ UintR scanApiOpPtr;
+ UintR scanLocalref[2];
+
+ Uint32 m_max_batch_size_rows;
+ Uint32 m_max_batch_size_bytes;
+
+ Uint32 m_curr_batch_size_rows;
+ Uint32 m_curr_batch_size_bytes;
+
+ bool check_scan_batch_completed() const;
+
+ UintR copyPtr;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ Uint32 nextHash;
+ Uint32 prevHash;
+ bool equal(const ScanRecord & key) const {
+ return scanNumber == key.scanNumber && fragPtrI == key.fragPtrI;
+ }
+ Uint32 hashValue() const {
+ return fragPtrI ^ scanNumber;
+ }
+
+ UintR scanAccPtr;
+ UintR scanAiLength;
+ UintR scanErrorCounter;
+ UintR scanLocalFragid;
+ UintR scanSchemaVersion;
+
+ /**
+ * This is _always_ main table, even in range scan
+ * in which case scanTcrec->fragmentptr is different
+ */
+ Uint32 fragPtrI;
+ UintR scanStoredProcId;
+ ScanState scanState;
+ UintR scanTcrec;
+ ScanType scanType;
+ BlockReference scanApiBlockref;
+ NodeId scanNodeId;
+ Uint16 scanReleaseCounter;
+ Uint16 scanNumber;
+
+ // scan source block ACC TUX TUP
+ BlockReference scanBlockref;
+
+ Uint8 scanCompletedStatus;
+ Uint8 scanFlag;
+ Uint8 scanLockHold;
+ Uint8 scanLockMode;
+ Uint8 readCommitted;
+ Uint8 rangeScan;
+ Uint8 descending;
+ Uint8 tupScan;
+ Uint8 scanTcWaiting;
+ Uint8 scanKeyinfoFlag;
+ Uint8 m_last_row;
+ }; // Size 272 bytes
+ typedef Ptr<ScanRecord> ScanRecordPtr;
+
+ struct Fragrecord {
+ enum ExecSrStatus {
+ IDLE = 0,
+ ACTIVE_REMOVE_AFTER = 1,
+ ACTIVE = 2
+ };
+ /**
+ * Possible state transitions are:
+ * - FREE -> DEFINED Fragment record is allocated
+ * - DEFINED -> ACTIVE Add fragment is completed and
+ * fragment is ready to
+ * receive operations.
+ * - DEFINED -> ACTIVE_CREATION Add fragment is completed and
+ * fragment is ready to
+ * receive operations in parallel
+ * with a copy fragment
+ * which is performed from the
+ * primary replica
+ * - DEFINED -> CRASH_RECOVERING A fragment is ready to be
+ * recovered from a local
+ * checkpoint on disk
+ * - ACTIVE -> BLOCKED A local checkpoint is to be
+ * started. No more operations
+ * are allowed to be started until
+ * the local checkpoint
+ * has been started.
+ * - ACTIVE -> REMOVING A fragment is removed from the node
+ * - BLOCKED -> ACTIVE Operations are allowed again in
+ * the fragment.
+ * - CRASH_RECOVERING -> ACTIVE A fragment has been recovered and
+ * are now ready for
+ * operations again.
+ * - CRASH_RECOVERING -> REMOVING Fragment recovery failed or
+ * was cancelled.
+ * - ACTIVE_CREATION -> ACTIVE A fragment is now copied and now
+ * is a normal fragment
+ * - ACTIVE_CREATION -> REMOVING Copying of the fragment failed
+ * - REMOVING -> FREE Removing of the fragment is
+ * completed and the fragment
+ * is now free again.
+ */
+ enum FragStatus {
+ FREE = 0, ///< Fragment record is currently not in use
+ FSACTIVE = 1, ///< Fragment is defined and usable for operations
+ DEFINED = 2, ///< Fragment is defined but not yet usable by
+ ///< operations
+ BLOCKED = 3, ///< LQH is waiting for all active operations to
+ ///< complete the current phase so that the
+ ///< local checkpoint can be started.
+ ACTIVE_CREATION = 4, ///< Fragment is defined and active but is under
+ ///< creation by the primary LQH.
+ CRASH_RECOVERING = 5, ///< Fragment is recovering after a crash by
+ ///< executing the fragment log and so forth.
+ ///< Will need further breakdown.
+ REMOVING = 6 ///< The fragment is currently removed.
+ ///< Operations are not allowed.
+ };
+ enum LogFlag {
+ STATE_TRUE = 0,
+ STATE_FALSE = 1
+ };
+ enum SrStatus {
+ SS_IDLE = 0,
+ SS_STARTED = 1,
+ SS_COMPLETED = 2
+ };
+ enum LcpFlag {
+ LCP_STATE_TRUE = 0,
+ LCP_STATE_FALSE = 1
+ };
+ /**
+ * Last GCI for executing the fragment log in this phase.
+ */
+ UintR execSrLastGci[4];
+ /**
+ * Start GCI for executing the fragment log in this phase.
+ */
+ UintR execSrStartGci[4];
+ /**
+ * Requesting user pointer for executing the fragment log in
+ * this phase
+ */
+ UintR execSrUserptr[4];
+ /**
+ * The LCP identifier of the LCP's.
+ * =0 means that the LCP number has not been stored.
+ * The LCP identifier is supplied by DIH when starting the LCP.
+ */
+ UintR lcpId[MAX_LCP_STORED];
+ UintR maxGciInLcp;
+ /**
+ * This variable contains the maximum global checkpoint
+ * identifier that exists in a certain local checkpoint.
+ * Maximum 4 local checkpoints is possible in this release.
+ */
+ UintR maxGciCompletedInLcp;
+ UintR srLastGci[4];
+ UintR srStartGci[4];
+ /**
+ * The fragment pointers in ACC
+ */
+ UintR accFragptr[2];
+ /**
+ * The EXEC_SR variables are used to keep track of which fragments
+ * that are interested in being executed as part of executing the
+ * fragment loop.
+ * It is initialised for every phase of executing the
+ * fragment log (the fragment log can be executed upto four times).
+ *
+ * Each execution is capable of executing the log records on four
+ * fragment replicas.
+ */
+ /**
+ * Requesting block reference for executing the fragment log
+ * in this phase.
+ */
+ BlockReference execSrBlockref[4];
+ /**
+ * This variable contains references to active scan and copy
+ * fragment operations on the fragment.
+ * A maximum of four concurrently active is allowed.
+ */
+ typedef Bitmask<4> ScanNumberMask;
+ ScanNumberMask m_scanNumberMask;
+ DLList<ScanRecord>::Head m_activeScans;
+ DLFifoList<ScanRecord>::Head m_queuedScans;
+
+ Uint16 srLqhLognode[4];
+ /**
+ * The fragment pointers in TUP and TUX
+ */
+ UintR tupFragptr[2];
+ UintR tuxFragptr[2];
+ /**
+ * This queue is where operations are put when blocked in ACC
+ * during start of a local chkp.
+ */
+ UintR accBlockedList;
+ /**
+ * This is the queue where all operations that are active on the
+ * fragment is put.
+ * This is used to deduct when the fragment do
+ * no longer contain any active operations.
+ * This is needed when starting a local checkpoint.
+ */
+ UintR activeList;
+ /**
+ * This variable keeps track of how many operations that are
+ * active that have skipped writing the log but not yet committed
+ * or aborted. This is used during start of fragment.
+ */
+ UintR activeTcCounter;
+ /**
+ * This status specifies whether this fragment is actively
+ * engaged in executing the fragment log.
+ */
+ ExecSrStatus execSrStatus;
+ /**
+ * The fragment id of this fragment.
+ */
+ UintR fragId;
+ /**
+ * Status of fragment
+ */
+ FragStatus fragStatus;
+ /**
+ * Indicates a local checkpoint is active and thus can generate
+ * UNDO log records.
+ */
+ UintR fragActiveStatus;
+ /**
+ * Reference to current LCP record.
+ * If no LCP is ongoing on the fragment then the value is RNIL.
+ * If LCP_REF /= RNIL then a local checkpoint is ongoing in the
+ * fragment.
+ * LCP_STATE in LCP_RECORD specifies the state of the
+ * local checkpoint.
+ */
+ UintR lcpRef;
+ /**
+ * This flag indicates whether logging is currently activated at
+ * the fragment.
+ * During a system restart it is temporarily shut off.
+ * Some fragments have it permanently shut off.
+ */
+ LogFlag logFlag;
+ UintR masterPtr;
+ /**
+ * This variable contains the maximum global checkpoint identifier
+ * which was completed when the local checkpoint was started.
+ */
+ /**
+ * Reference to the next fragment record in a free list of fragment
+ * records.
+ */
+ UintR nextFrag;
+ /**
+ * The newest GCI that has been committed on fragment
+ */
+ UintR newestGci;
+ SrStatus srStatus;
+ UintR srUserptr;
+ /**
+ * The starting global checkpoint of this fragment.
+ */
+ UintR startGci;
+ /**
+ * A reference to the table owning this fragment.
+ */
+ UintR tabRef;
+ /**
+ * This is the queue to put operations that have been blocked
+ * during start of a local chkp.
+ */
+ UintR firstWaitQueue;
+ UintR lastWaitQueue;
+ /**
+ * The block reference to ACC on the fragment makes it
+ * possible to have different ACC blocks for different
+ * fragments in the future.
+ */
+ BlockReference accBlockref;
+ /**
+ * Ordered index block.
+ */
+ BlockReference tuxBlockref;
+ /**
+ * The master block reference as sent in COPY_ACTIVEREQ.
+ */
+ BlockReference masterBlockref;
+ /**
+ * These variables are used during system restart to recall
+ * from which node to execute the fragment log and which GCI's
+ * this node should start and stop from. Also to remember who
+ * to send the response to when system restart is completed.
+ */
+ BlockReference srBlockref;
+ /**
+ * The block reference to TUP on the fragment makes it
+ * possible to have different TUP blocks for different
+ * fragments in the future.
+ */
+ BlockReference tupBlockref;
+ /**
+ * This state indicates if the fragment will participate in a
+ * checkpoint.
+ * Temporary tables with Fragrecord::logFlag permanently off
+ * will also have Fragrecord::lcpFlag off.
+ */
+ LcpFlag lcpFlag;
+ /**
+ * Used to ensure that updates started with old
+ * configuration do not arrive here after the copy fragment
+ * has started.
+ * If they are allowed to arrive after they
+ * could update a record that has already been replicated to
+ * the new node. This type of arrival should be extremely
+ * rare but we must anyway ensure that no harm is done.
+ */
+ Uint16 copyNode;
+ /**
+ * This variable ensures that only one copy fragment is
+ * active at a time on the fragment.
+ */
+ Uint8 copyFragState;
+ /**
+ * The number of fragment replicas that will execute the log
+ * records in this round of executing the fragment
+ * log. Maximum four is possible.
+ */
+ Uint8 execSrNoReplicas;
+ /**
+ * This variable contains what type of replica this fragment
+ * is. Two types are possible:
+ * - Primary/Backup replica = 0
+ * - Stand-by replica = 1
+ *
+ * It is not possible to distinguish between primary and
+ * backup on a fragment.
+ * This can only be done per transaction.
+ * DIH can change from primary to backup without informing
+ * the various replicas about this change.
+ */
+ Uint8 fragCopy;
+ /**
+ * This is the last fragment distribution key that we have
+ * heard of.
+ */
+ Uint8 fragDistributionKey;
+ /**
+ * The identity of the next local checkpoint this fragment
+ * should perform.
+ */
+ Uint8 nextLcp;
+ /**
+ * How many local checkpoints does the fragment contain
+ */
+ Uint8 srChkpnr;
+ Uint8 srNoLognodes;
+ /**
+ * Table type.
+ */
+ Uint8 tableType;
+ /**
+ * For ordered index fragment, i-value of corresponding
+ * fragment in primary table.
+ */
+ UintR tableFragptr;
+ };
+ typedef Ptr<Fragrecord> FragrecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ GLOBAL CHECKPOINT RECORD $$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This record describes a global checkpoint that is
+ * completed. It waits for all log records belonging to this
+ * global checkpoint to be saved on disk.
+ */
+ struct GcpRecord {
+ /**
+ * The file number within each log part where the log was
+ * located when gcp_savereq was received. The last record
+ * belonging to this global checkpoint is certainly before
+ * this place in the log. We could come even closer but it
+ * would cost performance and doesn't seem like a good
+ * idea. This is simple and it works.
+ */
+ Uint16 gcpFilePtr[4];
+ /**
+ * The page number within the file for each log part.
+ */
+ Uint16 gcpPageNo[4];
+ /**
+ * The word number within the last page that was written for
+ * each log part.
+ */
+ Uint16 gcpWordNo[4];
+ /**
+ * The identity of this global checkpoint.
+ */
+ UintR gcpId;
+ /**
+ * The state of this global checkpoint, one for each log part.
+ */
+ Uint8 gcpLogPartState[4];
+ /**
+ * The sync state of this global checkpoint, one for each
+ * log part.
+ */
+ Uint8 gcpSyncReady[4];
+ /**
+ * User pointer of the sender of gcp_savereq (= master DIH).
+ */
+ UintR gcpUserptr;
+ /**
+ * Block reference of the sender of gcp_savereq
+ * (= master DIH).
+ */
+ BlockReference gcpBlockref;
+ }; // Size 44 bytes
+ typedef Ptr<GcpRecord> GcpRecordPtr;
+
+ struct HostRecord {
+ bool inPackedList;
+ UintR noOfPackedWordsLqh;
+ UintR packedWordsLqh[30];
+ UintR noOfPackedWordsTc;
+ UintR packedWordsTc[29];
+ BlockReference hostLqhBlockRef;
+ BlockReference hostTcBlockRef;
+ };// Size 128 bytes
+ typedef Ptr<HostRecord> HostRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ LOCAL CHECKPOINT RECORD $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This record contains the information about a local
+ * checkpoint that is ongoing. This record is also used as a
+ * system restart record.
+ */
+ struct LcpRecord {
+ LcpRecord() { m_EMPTY_LCP_REQ.clear(); }
+
+ enum LcpState {
+ LCP_IDLE = 0,
+ LCP_COMPLETED = 2,
+ LCP_WAIT_FRAGID = 3,
+ LCP_WAIT_TUP_PREPLCP = 4,
+ LCP_WAIT_HOLDOPS = 5,
+ LCP_WAIT_ACTIVE_FINISH = 6,
+ LCP_START_CHKP = 7,
+ LCP_BLOCKED_COMP = 8,
+ LCP_SR_WAIT_FRAGID = 9,
+ LCP_SR_STARTED = 10,
+ LCP_SR_COMPLETED = 11
+ };
+ Uint32 firstLcpLocAcc;
+ Uint32 firstLcpLocTup;
+ Uint32 lcpAccptr;
+
+ LcpState lcpState;
+ bool lastFragmentFlag;
+
+ struct FragOrd {
+ Uint32 fragPtrI;
+ LcpFragOrd lcpFragOrd;
+ };
+ FragOrd currentFragment;
+
+ bool lcpQueued;
+ FragOrd queuedFragment;
+
+ bool reportEmpty;
+ NdbNodeBitmask m_EMPTY_LCP_REQ;
+ }; // Size 76 bytes
+ typedef Ptr<LcpRecord> LcpRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$ LOCAL CHECKPOINT SUPPORT RECORD $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This record contains the information about an outstanding
+ * request to TUP or ACC. Used for both local checkpoints and
+ * system restart.
+ */
+ struct LcpLocRecord {
+ enum LcpLocstate {
+ IDLE = 0,
+ WAIT_TUP_PREPLCP = 1,
+ WAIT_LCPHOLDOP = 2,
+ HOLDOP_READY = 3,
+ ACC_WAIT_STARTED = 4,
+ ACC_STARTED = 5,
+ ACC_COMPLETED = 6,
+ TUP_WAIT_STARTED = 7,
+ TUP_STARTED = 8,
+ TUP_COMPLETED = 9,
+ SR_ACC_STARTED = 10,
+ SR_TUP_STARTED = 11,
+ SR_ACC_COMPLETED = 12,
+ SR_TUP_COMPLETED = 13
+ };
+ enum WaitingBlock {
+ ACC = 0,
+ TUP = 1,
+ NONE = 2
+ };
+
+ LcpLocstate lcpLocstate;
+ UintR locFragid;
+ UintR masterLcpRec;
+ UintR nextLcpLoc;
+ UintR tupRef;
+ WaitingBlock waitingBlock;
+ Uint32 accContCounter;
+ }; // 28 bytes
+ typedef Ptr<LcpLocRecord> LcpLocRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* */
+ /* THE RECORDS THAT START BY LOG_ ARE A PART OF THE LOG MANAGER. */
+ /* THESE RECORDS ARE USED TO HANDLE THE FRAGMENT LOG. */
+ /* */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ LOG RECORD $$$$$$$ */
+ /* */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* THIS RECORD IS ALIGNED TO BE 256 BYTES. */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This record describes the current state of a log.
+ * A log consists of a number of log files.
+ * These log files are described by the log file record.
+ *
+ * There will be 4 sets of log files.
+ * Different tables will use different log files dependent
+ * on the table id.
+ * This ensures that more than one outstanding request can
+ * be sent to the file system.
+ * The log file to use is found by performing a very simple hash
+ * function.
+ */
+ struct LogPartRecord {
+ enum LogPartState {
+ IDLE = 0, ///< Nothing happens at the moment
+ ACTIVE = 1, ///< An operation is active logging
+ SR_FIRST_PHASE = 2, ///< Finding the end of the log and
+ ///< the information about global
+ ///< checkpoints in the log is ongoing.
+ SR_FIRST_PHASE_COMPLETED = 3, ///< First phase completed
+ SR_THIRD_PHASE_STARTED = 4, ///< Executing fragment log is in 3rd ph
+ SR_THIRD_PHASE_COMPLETED = 5,
+ SR_FOURTH_PHASE_STARTED = 6, ///< Finding the log tail and head
+ ///< is the fourth phase.
+ SR_FOURTH_PHASE_COMPLETED = 7,
+ FILE_CHANGE_PROBLEM = 8, ///< For some reason the write to
+ ///< page zero in file zero have not
+ ///< finished after 15 mbyte of
+ ///< log data have been written
+ TAIL_PROBLEM = 9 ///< Only 1 mbyte of log left.
+ ///< No operations allowed to enter the
+ ///< log. Only special log records
+ ///< are allowed
+ };
+ enum WaitWriteGciLog {
+ WWGL_TRUE = 0,
+ WWGL_FALSE = 1
+ };
+ enum LogExecState {
+ LES_IDLE = 0,
+ LES_SEARCH_STOP = 1,
+ LES_SEARCH_START = 2,
+ LES_EXEC_LOG = 3,
+ LES_EXEC_LOG_NEW_MBYTE = 4,
+ LES_EXEC_LOG_NEW_FILE = 5,
+ LES_EXEC_LOGREC_FROM_FILE = 6,
+ LES_EXEC_LOG_COMPLETED = 7,
+ LES_WAIT_READ_EXEC_SR_NEW_MBYTE = 8,
+ LES_WAIT_READ_EXEC_SR = 9,
+ LES_EXEC_LOG_INVALIDATE = 10
+ };
+
+ /**
+ * Is a CONTINUEB(ZLOG_LQHKEYREQ) signal sent and
+ * outstanding. We do not want several instances of this
+ * signal out in the air since that would create multiple
+ * writers of the list.
+ */
+ UintR LogLqhKeyReqSent;
+ /**
+ * Contains the current log file where log records are
+ * written. During system restart it is used to indicate the
+ * last log file.
+ */
+ UintR currentLogfile;
+ /**
+ * The log file used to execute log records from far behind.
+ */
+ UintR execSrExecLogFile;
+ /**
+ * The currently executing prepare record starts in this log
+ * page. This variable is used to enable that a log record is
+ * executed multiple times in execution of the log.
+ */
+ UintR execSrLogPage;
+ /**
+ * This variable keeps track of the lfo record where the
+ * pages that were read from disk when an operations log
+ * record were not found in the main memory buffer for log
+ * pages.
+ */
+ UintR execSrLfoRec;
+ /**
+ * The starting page number when reading log from far behind.
+ */
+ UintR execSrStartPageNo;
+ /**
+ * The last page number when reading log from far behind.
+ */
+ UintR execSrStopPageNo;
+ /**
+ * Contains a reference to the first log file, file number 0.
+ */
+ UintR firstLogfile;
+ /**
+ * The head of the operations queued for logging.
+ */
+ UintR firstLogQueue;
+ /**
+ * This variable contains the oldest operation in this log
+ * part which have not been committed yet.
+ */
+ UintR firstLogTcrec;
+ /**
+ * The first reference to a set of 8 pages. These are used
+ * during execution of the log to keep track of which pages
+ * are in memory and which are not.
+ */
+ UintR firstPageRef;
+ /**
+ * This variable contains the global checkpoint record
+ * waiting for disk writes to complete.
+ */
+ UintR gcprec;
+ /**
+ * The last reference to a set of 8 pages. These are used
+ * during execution of the log to keep track of which pages
+ * are in memory and which are not.
+ */
+ UintR lastPageRef;
+ /**
+ * The tail of the operations queued for logging.
+ */
+ UintR lastLogQueue;
+ /**
+ * This variable contains the newest operation in this log
+ * part which have not been committed yet.
+ */
+ UintR lastLogTcrec;
+ /**
+ * This variable indicates which was the last mbyte that was
+ * written before the system crashed. Discovered during
+ * system restart.
+ */
+ UintR lastLogfile;
+ /**
+ * This variable is used to keep track of the state during
+ * the third phase of the system restart, i.e. when
+ * LogPartRecord::logPartState ==
+ * LogPartRecord::SR_THIRD_PHASE_STARTED.
+ */
+ LogExecState logExecState;
+ /**
+ * This variable contains the lap number of this log part.
+ */
+ UintR logLap;
+ /**
+ * This variable contains the place to stop executing the log
+ * in this phase.
+ */
+ UintR logLastGci;
+ /**
+ * This variable contains the place to start executing the
+ * log in this phase.
+ */
+ UintR logStartGci;
+ /**
+ * The latest GCI completed in this log part.
+ */
+ UintR logPartNewestCompletedGCI;
+ /**
+ * The current state of this log part.
+ */
+ LogPartState logPartState;
+ /**
+ * A timer that is set every time a log page is sent to disk.
+ * Ensures that log pages are not kept in main memory for
+ * more than a certain time.
+ */
+ UintR logPartTimer;
+ /**
+ * The current timer which is set by the periodic signal
+ * received by LQH
+ */
+ UintR logTimer;
+ /**
+ * Contains the number of the log tail file and the mbyte
+ * reference within that file. This information ensures that
+ * the tail is not overwritten when writing new log records.
+ */
+ UintR logTailFileNo;
+ /**
+ * The TcConnectionrec used during execution of this log part.
+ */
+ UintR logTcConrec;
+ /**
+ * The number of pages that currently resides in the main
+ * memory buffer. It does not refer pages that are currently
+ * read from the log files. Only to pages already read
+ * from the log file.
+ */
+ UintR mmBufferSize;
+ /**
+ * Contains the current number of log files in this log part.
+ */
+ UintR noLogFiles;
+ /**
+ * This variable is used only during execution of a log
+ * record. It keeps track of in which page record a log
+ * record was started. It is used then to deduce which
+ * pages that are dirty after that the log records on the
+ * page have been executed.
+ *
+ * It is also used to find out where to write the invalidate
+ * command when that is needed.
+ */
+ UintR prevLogpage;
+ /**
+ * The number of files remaining to gather GCI information
+ * for during system restart. Only used if number of files
+ * is larger than 60.
+ */
+ UintR srRemainingFiles;
+ /**
+ * The log file where to start executing the log during
+ * system restart.
+ */
+ UintR startLogfile;
+ /**
+ * The last log file in which to execute the log during system
+ * restart.
+ */
+ UintR stopLogfile;
+ /**
+ * This variable keeps track of when we want to write a complete
+ * gci log record but have been blocked by an ongoing log operation.
+ */
+ WaitWriteGciLog waitWriteGciLog;
+ /**
+ * The currently executing prepare record starts in this index
+ * in the log page.
+ */
+ Uint16 execSrLogPageIndex;
+ /**
+ * Which of the four exec_sr's in the fragment is currently executing
+ */
+ Uint16 execSrExecuteIndex;
+ /**
+ * The number of pages executed in the current mbyte.
+ */
+ Uint16 execSrPagesExecuted;
+ /**
+ * The number of pages read from disk that have arrived and are
+ * currently awaiting execution of the log.
+ */
+ Uint16 execSrPagesRead;
+ /**
+ * The number of pages read from disk and currently not arrived
+ * to the block.
+ */
+ Uint16 execSrPagesReading;
+ /**
+ * This variable refers to the new header file where we will
+ * start writing the log after a system restart have been completed.
+ */
+ Uint16 headFileNo;
+ /**
+ * This variable refers to the page number within the header file.
+ */
+ Uint16 headPageNo;
+ /**
+ * This variable refers to the index within the new header
+ * page.
+ */
+ Uint16 headPageIndex;
+ /**
+ * This variables indicates which was the last mbyte in the last
+ * logfile before a system crash. Discovered during system restart.
+ */
+ Uint16 lastMbyte;
+ /**
+ * This variable is used only during execution of a log
+ * record. It keeps track of in which file page a log
+ * record was started. It is used if it is needed to write a
+ * dirty page to disk during log execution (this happens when
+ * commit records are invalidated).
+ */
+ Uint16 prevFilepage;
+ /**
+ * This is used to save where we were in the execution of log
+ * records when we find a commit record that needs to be
+ * executed.
+ *
+ * This variable is also used to remember the index where the
+ * log type was in the log record. It is only used in this
+ * role when finding a commit record that needs to be
+ * invalidated.
+ */
+ Uint16 savePageIndex;
+ Uint8 logTailMbyte;
+ /**
+ * The mbyte within the starting log file where to start
+ * executing the log.
+ */
+ Uint8 startMbyte;
+ /**
+ * The last mbyte in which to execute the log during system
+ * restart.
+ */
+ Uint8 stopMbyte;
+ /**
+ * This variable refers to the file where invalidation is
+ * occuring during system/node restart.
+ */
+ Uint16 invalidateFileNo;
+ /**
+ * This variable refers to the page where invalidation is
+ * occuring during system/node restart.
+ */
+ Uint16 invalidatePageNo;
+ }; // Size 164 Bytes
+ typedef Ptr<LogPartRecord> LogPartRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ LOG FILE RECORD $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* THIS RECORD IS ALIGNED TO BE 288 (256 + 32) BYTES. */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This record contains information about a log file.
+ * A log file contains log records from several tables and
+ * fragments of a table. LQH can contain more than
+ * one log file to ensure faster log processing.
+ *
+ * The number of pages to write to disk at a time is
+ * configurable.
+ */
+ struct LogFileRecord {
+ enum FileChangeState {
+ NOT_ONGOING = 0,
+ BOTH_WRITES_ONGOING = 1,
+ LAST_WRITE_ONGOING = 2,
+ FIRST_WRITE_ONGOING = 3,
+ WRITE_PAGE_ZERO_ONGOING = 4
+ };
+ enum LogFileStatus {
+ LFS_IDLE = 0, ///< Log file record not in use
+ CLOSED = 1, ///< Log file closed
+ OPENING_INIT = 2,
+ OPEN_SR_FRONTPAGE = 3, ///< Log file opened as part of system
+ ///< restart. Open file 0 to find
+ ///< the front page of the log part.
+ OPEN_SR_LAST_FILE = 4, ///< Open last log file that was written
+ ///< before the system restart.
+ OPEN_SR_NEXT_FILE = 5, ///< Open a log file which is 16 files
+ ///< backwards to find the next
+ ///< information about GCPs.
+ OPEN_EXEC_SR_START = 6, ///< Log file opened as part of
+ ///< executing
+ ///< log during system restart.
+ OPEN_EXEC_SR_NEW_MBYTE = 7,
+ OPEN_SR_FOURTH_PHASE = 8,
+ OPEN_SR_FOURTH_NEXT = 9,
+ OPEN_SR_FOURTH_ZERO = 10,
+ OPENING_WRITE_LOG = 11, ///< Log file opened as part of writing
+ ///< log during normal operation.
+ OPEN_EXEC_LOG = 12,
+ CLOSING_INIT = 13,
+ CLOSING_SR = 14, ///< Log file closed as part of system
+ ///< restart. Currently trying to
+ ///< find where to start executing the
+ ///< log
+ CLOSING_EXEC_SR = 15, ///< Log file closed as part of
+ ///< executing log during system restart
+ CLOSING_EXEC_SR_COMPLETED = 16,
+ CLOSING_WRITE_LOG = 17, ///< Log file closed as part of writing
+ ///< log during normal operation.
+ CLOSING_EXEC_LOG = 18,
+ OPEN_INIT = 19,
+ OPEN = 20, ///< Log file open
+ OPEN_SR_INVALIDATE_PAGES = 21,
+ CLOSE_SR_INVALIDATE_PAGES = 22
+ };
+
+ /**
+ * When a new mbyte is started in the log we have to find out
+ * how far back in the log we still have prepared operations
+ * which have been neither committed or aborted. This variable
+ * keeps track of this value for each of the mbytes in this
+ * log file. This is used in writing down these values in the
+ * header of each log file. That information is used during
+ * system restart to find the tail of the log.
+ */
+ UintR logLastPrepRef[16];
+ /**
+ * The max global checkpoint completed before the mbyte in the
+ * log file was started. One variable per mbyte.
+ */
+ UintR logMaxGciCompleted[16];
+ /**
+ * The max global checkpoint started before the mbyte in the log
+ * file was started. One variable per mbyte.
+ */
+ UintR logMaxGciStarted[16];
+ /**
+ * This variable contains the file name as needed by the file
+ * system when opening the file.
+ */
+ UintR fileName[4];
+ /**
+ * This variable has a reference to the log page which is
+ * currently in use by the log.
+ */
+ UintR currentLogpage;
+ /**
+ * The number of the current mbyte in the log file.
+ */
+ UintR currentMbyte;
+ /**
+ * This variable is used when changing files. It is to find
+ * out when both the last write in the previous file and the
+ * first write in this file has been completed. After these
+ * writes have completed the variable keeps track of when the
+ * write to page zero in file zero is completed.
+ */
+ FileChangeState fileChangeState;
+ /**
+ * The number of the file within this log part.
+ */
+ UintR fileNo;
+ /**
+ * This variable shows where to read/write the next pages into
+ * the log. Used when writing the log during normal operation
+ * and when reading the log during system restart. It
+ * specifies the page position where each page is 8 kbyte.
+ */
+ UintR filePosition;
+ /**
+ * This contains the file pointer needed by the file system
+ * when reading/writing/closing and synching.
+ */
+ UintR fileRef;
+ /**
+ * The head of the pages waiting for shipment to disk.
+ * They are filled with log info.
+ */
+ UintR firstFilledPage;
+ /**
+ * A list of active read/write operations on the log file.
+ * Operations are always put in last and the first should
+ * always complete first.
+ */
+ UintR firstLfo;
+ UintR lastLfo;
+ /**
+ * The tail of the pages waiting for shipment to disk.
+ * They are filled with log info.
+ */
+ UintR lastFilledPage;
+ /**
+ * This variable keeps track of the last written page in the
+ * file while writing page zero in file zero when changing log
+ * file.
+ */
+ UintR lastPageWritten;
+ /**
+ * This variable keeps track of the last written word in the
+ * last page written in the file while writing page zero in
+ * file zero when changing log file.
+ */
+ UintR lastWordWritten;
+ /**
+ * This variable contains the last word written in the last page.
+ */
+ UintR logFilePagesToDiskWithoutSynch;
+ /**
+ * This variable keeps track of the number of pages written since
+ * last synch on this log file.
+ */
+ LogFileStatus logFileStatus;
+ /**
+ * A reference to page zero in this file.
+ * This page is written before the file is closed.
+ */
+ UintR logPageZero;
+ /**
+ * This variable contains a reference to the record describing
+ * this log part. One of four records (0,1,2 or 3).
+ */
+ UintR logPartRec;
+ /**
+ * Next free log file record or next log file in this log.
+ */
+ UintR nextLogFile;
+ /**
+ * The previous log file.
+ */
+ UintR prevLogFile;
+ /**
+ * The number of remaining words in this mbyte of the log file.
+ */
+ UintR remainingWordsInMbyte;
+ /**
+ * The current file page within the current log file. This is
+ * a reference within the file and not a reference to a log
+ * page record. It is used to deduce where log records are
+ * written. Particularly completed gcp records and prepare log
+ * records.
+ */
+ Uint16 currentFilepage;
+ /**
+ * The number of pages in the list referenced by
+ * LOG_PAGE_BUFFER.
+ */
+ Uint16 noLogpagesInBuffer;
+ }; // Size 288 bytes
+ typedef Ptr<LogFileRecord> LogFileRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ LOG OPERATION RECORD $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This record contains a currently active file operation
+ * that has started by the log module.
+ */
+ struct LogFileOperationRecord {
+ enum LfoState {
+ IDLE = 0, ///< Operation is not used at the moment
+ INIT_WRITE_AT_END = 1, ///< Write in file so that it grows to
+ ///< 16 Mbyte
+ INIT_FIRST_PAGE = 2, ///< Initialise the first page in a file
+ WRITE_GCI_ZERO = 3,
+ WRITE_INIT_MBYTE = 4,
+ WRITE_DIRTY = 5,
+ READ_SR_FRONTPAGE = 6, ///< Read page zero in file zero during
+ ///< system restart
+ READ_SR_LAST_FILE = 7, ///< Read page zero in last file open
+ ///< before system crash
+ READ_SR_NEXT_FILE = 8, ///< Read 60 files backwards to find
+ ///< further information GCPs in page
+ ///< zero
+ READ_SR_LAST_MBYTE = 9,
+ READ_EXEC_SR = 10,
+ READ_EXEC_LOG = 11,
+ READ_SR_FOURTH_PHASE = 12,
+ READ_SR_FOURTH_ZERO = 13,
+ FIRST_PAGE_WRITE_IN_LOGFILE = 14,
+ LAST_WRITE_IN_FILE = 15,
+ WRITE_PAGE_ZERO = 16,
+ ACTIVE_WRITE_LOG = 17, ///< A write operation during
+ ///< writing of log
+ READ_SR_INVALIDATE_PAGES = 18,
+ WRITE_SR_INVALIDATE_PAGES = 19
+ };
+ /**
+ * We have to remember the log pages read.
+ * Otherwise we cannot build the linked list after the pages have
+ * arrived to main memory.
+ */
+ UintR logPageArray[16];
+ /**
+ * A list of the pages that are part of this active operation.
+ */
+ UintR firstLfoPage;
+ /**
+ * A timer to ensure that records are not lost.
+ */
+ UintR lfoTimer;
+ /**
+ * The word number of the last written word in the last during
+ * a file write.
+ */
+ UintR lfoWordWritten;
+ /**
+ * This variable contains the state of the log file operation.
+ */
+ LfoState lfoState;
+ /**
+ * The log file that the file operation affects.
+ */
+ UintR logFileRec;
+ /**
+ * The log file operations on a file are kept in a linked list.
+ */
+ UintR nextLfo;
+ /**
+ * The page number of the first read/written page during a file
+ * read/write.
+ */
+ Uint16 lfoPageNo;
+ /**
+ * The number of pages written or read during an operation to
+ * the log file.
+ */
+ Uint16 noPagesRw;
+ }; // 92 bytes
+ typedef Ptr<LogFileOperationRecord> LogFileOperationRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ LOG PAGE RECORD $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * These are the 8 k pages used to store log records before storing
+ * them in the file system.
+ * Since 64 kbyte is sent to disk at a time it is necessary to have
+ * at least 4*64 kbytes of log pages.
+ * To handle multiple outstanding requests we need some additional pages.
+ * Thus we allocate 1 mbyte to ensure that we do not get problems with
+ * insufficient number of pages.
+ */
+ struct LogPageRecord {
+ /**
+ * This variable contains the pages that are sent to disk.
+ *
+ * All pages contain a header of 12 words:
+ * - WORD 0: CHECKSUM Calculated before storing on disk and
+ * checked when read from disk.
+ * - WORD 1: LAP How many wraparounds have the log
+ * experienced since initial start of the
+ * system.
+ * - WORD 2: MAX_GCI_COMPLETED Which is the maximum gci which have
+ * completed before this page. This
+ * gci will not be found in this
+ * page and hereafter in the log.
+ * - WORD 3: MAX_GCI_STARTED The maximum gci which have started
+ * before this page.
+ * - WORD 4: NEXT_PAGE Pointer to the next page.
+ * Only used in main memory
+ * - WORD 5: PREVIOUS_PAGE Pointer to the previous page.
+ * Currently not used.
+ * - WORD 6: VERSION NDB version that wrote the page.
+ * - WORD 7: NO_LOG_FILES Number of log files in this log part.
+ * - WORD 8: CURRENT PAGE INDEX This keeps track of where we are in the
+ * page.
+ * This is only used when pages is in
+ * memory.
+ * - WORD 9: OLD PREPARE FILE NO This keeps track of the oldest prepare
+ * operation still alive (not committed
+ * or aborted) when this mbyte started.
+ * - WORD 10: OLD PREPARE PAGE REF File page reference within this file
+ * number.
+ * Page no + Page index.
+ * If no prepare was alive then these
+ * values points this mbyte.
+ * - WORD 11: DIRTY FLAG = 0 means not dirty and
+ * = 1 means the page is dirty.
+ * Is used when executing log when
+ * a need to write invalid commit
+ * records arise.
+ *
+ * The remaining 2036 words are used for log information, i.e.
+ * log records.
+ *
+ * A log record on this page has the following layout:
+ * - WORD 0: LOG RECORD TYPE
+ * The following types are supported:
+ * - PREPARE OPERATION An operation not yet committed.
+ * - NEW PREPARE OPERATION A prepared operation already
+ * logged is inserted
+ * into the log again so that the
+ * log tail can be advanced.
+ * This can happen when a transaction is
+ * committed for a long time.
+ * - ABORT TRANSACTION A previously prepared transaction
+ * was aborted.
+ * - COMMIT TRANSACTION A previously prepared transaction
+ * was committed.
+ * - INVALID COMMIT A previous commit record was
+ * invalidated by a
+ * subsequent system restart.
+ * A log record must be invalidated
+ * in a system restart if it belongs
+ * to a global checkpoint id which
+ * is not included in the system
+ * restart.
+ * Otherwise it will be included in
+ * a subsequent system restart since
+ * it will then most likely belong
+ * to a global checkpoint id which
+ * is part of that system
+ * restart.
+ * This is not a correct behaviour
+ * since this operation is lost in a
+ * system restart and should not
+ * reappear at a later system
+ * restart.
+ * - COMPLETED GCI A GCI has now been completed.
+ * - FRAGMENT SPLIT A fragment has been split
+ * (not implemented yet)
+ * - FILE DESCRIPTOR This is always the first log record
+ * in a file.
+ * It is always placed on page 0 after
+ * the header.
+ * It is written when the file is
+ * opened and when the file is closed.
+ * - NEXT LOG RECORD This log record only records where
+ * the next log record starts.
+ * - NEXT MBYTE RECORD This log record specifies that there
+ * are no more log records in this mbyte.
+ *
+ *
+ * A FILE DESCRIPTOR log record continues as follows:
+ * - WORD 1: NO_LOG_DESCRIPTORS This defines the number of
+ * descriptors of log files that
+ * will follow hereafter (max 32).
+ * the log descriptor will describe
+ * information about
+ * max_gci_completed,
+ * max_gci_started and log_lap at
+ * every 1 mbyte of the log file
+ * since a log file is 16 mbyte
+ * always, i need 16 entries in the
+ * array with max_gci_completed,
+ * max_gci_started and log_lap. thus
+ * 32 entries per log file
+ * descriptor (max 32*48 = 1536,
+ * always fits in page 0).
+ * - WORD 2: LAST LOG FILE The number of the log file currently
+ * open. This is only valid in file 0.
+ * - WORD 3 - WORD 18: MAX_GCI_COMPLETED for every 1 mbyte
+ * in this log file.
+ * - WORD 19 - WORD 34: MAX_GCI_STARTED for every 1 mbyte
+ * in this log file.
+ *
+ * Then it continues for NO_LOG_DESCRIPTORS until all subsequent
+ * log files (max 32) have been properly described.
+ *
+ *
+ * A PREPARE OPERATION log record continues as follows:
+ * - WORD 1: LOG RECORD SIZE
+ * - WORD 2: HASH VALUE
+ * - WORD 3: SCHEMA VERSION
+ * - WORD 4: OPERATION TYPE
+ * = 0 READ,
+ * = 1 UPDATE,
+ * = 2 INSERT,
+ * = 3 DELETE
+ * - WORD 5: NUMBER OF WORDS IN ATTRINFO PART
+ * - WORD 6: KEY LENGTH IN WORDS
+ * - WORD 7 - (WORD 7 + KEY_LENGTH - 1) The tuple key
+ * - (WORD 7 + KEY_LENGTH) -
+ * (WORD 7 + KEY_LENGTH + ATTRINFO_LENGTH - 1) The attrinfo
+ *
+ * A log record can be spread in several pages in some cases.
+ * The next log record always starts immediately after this log record.
+ * A log record does however never traverse a 1 mbyte boundary.
+ * This is used to ensure that we can always come back if something
+ * strange occurs in the log file.
+ * To ensure this we also have log records which only records
+ * the next log record.
+ *
+ *
+ * A COMMIT TRANSACTION log record continues as follows:
+ * - WORD 1: TRANSACTION ID PART 1
+ * - WORD 2: TRANSACTION ID PART 2
+ * - WORD 3: FRAGMENT ID OF THE OPERATION
+ * - WORD 4: TABLE ID OF THE OPERATION
+ * - WORD 5: THE FILE NUMBER OF THE PREPARE RECORD
+ * - WORD 6: THE STARTING PAGE NUMBER OF THE PREPARE RECORD
+ * - WORD 7: THE STARTING PAGE INDEX OF THE PREPARE RECORD
+ * - WORD 8: THE STOP PAGE NUMBER OF THE PREPARE RECORD
+ * - WORD 9: GLOBAL CHECKPOINT OF THE TRANSACTION
+ *
+ *
+ * An ABORT TRANSACTION log record continues as follows:
+ * - WORD 1: TRANSACTION ID PART 1
+ * - WORD 2: TRANSACTION ID PART 2
+ *
+ *
+ * A COMPLETED CGI log record continues as follows:
+ * - WORD 1: THE COMPLETED GCI
+ *
+ *
+ * A NEXT LOG RECORD log record continues as follows:
+ * - There is no more information needed.
+ * The next log record will always refer to the start of the next page.
+ *
+ * A NEXT MBYTE RECORD log record continues as follows:
+ * - There is no more information needed.
+ * The next mbyte will always refer to the start of the next mbyte.
+ */
+ UintR logPageWord[8192]; // Size 32 kbytes
+ };
+ typedef Ptr<LogPageRecord> LogPageRecordPtr;
+
+ struct PageRefRecord {
+ UintR pageRef[8];
+ UintR prNext;
+ UintR prPrev;
+ Uint16 prFileNo;
+ Uint16 prPageNo;
+ }; // size 44 bytes
+ typedef Ptr<PageRefRecord> PageRefRecordPtr;
+
+ struct Tablerec {
+ enum TableStatus {
+ TABLE_DEFINED = 0,
+ NOT_DEFINED = 1,
+ ADD_TABLE_ONGOING = 2,
+ PREP_DROP_TABLE_ONGOING = 3,
+ PREP_DROP_TABLE_DONE = 4
+ };
+
+ UintR fragrec[MAX_FRAG_PER_NODE];
+ Uint16 fragid[MAX_FRAG_PER_NODE];
+ /**
+ * Status of the table
+ */
+ TableStatus tableStatus;
+ /**
+ * Table type and target table of index.
+ */
+ Uint16 tableType;
+ Uint16 primaryTableId;
+ Uint32 schemaVersion;
+
+ Uint32 usageCount;
+ NdbNodeBitmask waitingTC;
+ NdbNodeBitmask waitingDIH;
+ }; // Size 100 bytes
+ typedef Ptr<Tablerec> TablerecPtr;
+
+ struct TcConnectionrec {
+ enum ListState {
+ NOT_IN_LIST = 0,
+ IN_ACTIVE_LIST = 1,
+ ACC_BLOCK_LIST = 2,
+ WAIT_QUEUE_LIST = 3
+ };
+ enum LogWriteState {
+ NOT_STARTED = 0,
+ NOT_WRITTEN = 1,
+ NOT_WRITTEN_WAIT = 2,
+ WRITTEN = 3
+ };
+ enum AbortState {
+ ABORT_IDLE = 0,
+ ABORT_ACTIVE = 1,
+ NEW_FROM_TC = 2,
+ REQ_FROM_TC = 3,
+ ABORT_FROM_TC = 4,
+ ABORT_FROM_LQH = 5
+ };
+ enum TransactionState {
+ IDLE = 0,
+
+ /* -------------------------------------------------------------------- */
+ // Transaction in progress states
+ /* -------------------------------------------------------------------- */
+ WAIT_ACC = 1,
+ WAIT_TUPKEYINFO = 2,
+ WAIT_ATTR = 3,
+ WAIT_TUP = 4,
+ STOPPED = 5,
+ LOG_QUEUED = 6,
+ PREPARED = 7,
+ LOG_COMMIT_WRITTEN_WAIT_SIGNAL = 8,
+ LOG_COMMIT_QUEUED_WAIT_SIGNAL = 9,
+
+ /* -------------------------------------------------------------------- */
+ // Commit in progress states
+ /* -------------------------------------------------------------------- */
+ COMMIT_STOPPED = 10,
+ LOG_COMMIT_QUEUED = 11,
+ COMMIT_QUEUED = 12,
+ COMMITTED = 13,
+
+ /* -------------------------------------------------------------------- */
+ // Abort in progress states
+ /* -------------------------------------------------------------------- */
+ WAIT_ACC_ABORT = 14,
+ ABORT_QUEUED = 15,
+ ABORT_STOPPED = 16,
+ WAIT_AI_AFTER_ABORT = 17,
+ LOG_ABORT_QUEUED = 18,
+ WAIT_TUP_TO_ABORT = 19,
+
+ /* -------------------------------------------------------------------- */
+ // Scan in progress states
+ /* -------------------------------------------------------------------- */
+ WAIT_SCAN_AI = 20,
+ SCAN_STATE_USED = 21,
+ SCAN_FIRST_STOPPED = 22,
+ SCAN_CHECK_STOPPED = 23,
+ SCAN_STOPPED = 24,
+ SCAN_RELEASE_STOPPED = 25,
+ SCAN_CLOSE_STOPPED = 26,
+ COPY_CLOSE_STOPPED = 27,
+ COPY_FIRST_STOPPED = 28,
+ COPY_STOPPED = 29,
+ SCAN_TUPKEY = 30,
+ COPY_TUPKEY = 31,
+
+ TC_NOT_CONNECTED = 32,
+ PREPARED_RECEIVED_COMMIT = 33, // Temporary state in write commit log
+ LOG_COMMIT_WRITTEN = 34 // Temporary state in write commit log
+ };
+ enum ConnectState {
+ DISCONNECTED = 0,
+ CONNECTED = 1,
+ COPY_CONNECTED = 2,
+ LOG_CONNECTED = 3
+ };
+ ConnectState connectState;
+ UintR copyCountWords;
+ UintR firstAttrinfo[5];
+ UintR tupkeyData[4];
+ UintR transid[2];
+ AbortState abortState;
+ UintR accConnectrec;
+ UintR applOprec;
+ UintR clientConnectrec;
+ UintR tcTimer;
+ UintR currReclenAi;
+ UintR currTupAiLen;
+ UintR firstAttrinbuf;
+ UintR firstTupkeybuf;
+ UintR fragmentid;
+ UintR fragmentptr;
+ UintR gci;
+ UintR hashValue;
+ UintR lastTupkeybuf;
+ UintR lastAttrinbuf;
+ /**
+ * Each operation (TcConnectrec) can be stored in max one out of many
+ * lists.
+ * This variable keeps track of which list it is in.
+ */
+ ListState listState;
+
+ UintR logStartFileNo;
+ LogWriteState logWriteState;
+ UintR nextHashRec;
+ UintR nextLogTcrec;
+ UintR nextTcLogQueue;
+ UintR nextTc;
+ UintR nextTcConnectrec;
+ UintR prevHashRec;
+ UintR prevLogTcrec;
+ UintR prevTc;
+ UintR readlenAi;
+ UintR reqRef;
+ UintR reqinfo;
+ UintR schemaVersion;
+ UintR storedProcId;
+ UintR simpleTcConnect;
+ UintR tableref;
+ UintR tcOprec;
+ UintR tcScanInfo;
+ UintR tcScanRec;
+ UintR totReclenAi;
+ UintR totSendlenAi;
+ UintR tupConnectrec;
+ UintR savePointId;
+ TransactionState transactionState;
+ BlockReference applRef;
+ BlockReference clientBlockref;
+
+ BlockReference reqBlockref;
+ BlockReference tcBlockref;
+ BlockReference tcAccBlockref;
+ BlockReference tcTuxBlockref;
+ BlockReference tcTupBlockref;
+ Uint32 commitAckMarker;
+ union {
+ Uint32 m_scan_curr_range_no;
+ UintR noFiredTriggers;
+ };
+ Uint16 errorCode;
+ Uint16 logStartPageIndex;
+ Uint16 logStartPageNo;
+ Uint16 logStopPageNo;
+ Uint16 nextReplica;
+ Uint16 primKeyLen;
+ Uint16 save1;
+ Uint16 nodeAfterNext[3];
+
+ Uint8 activeCreat;
+ Uint8 apiVersionNo;
+ Uint8 dirtyOp;
+ Uint8 indTakeOver;
+ Uint8 lastReplicaNo;
+ Uint8 localFragptr;
+ Uint8 lockType;
+ Uint8 nextSeqNoReplica;
+ Uint8 opSimple;
+ Uint8 opExec;
+ Uint8 operation;
+ Uint8 reclenAiLqhkey;
+ Uint8 m_offset_current_keybuf;
+ Uint8 replicaType;
+ Uint8 simpleRead;
+ Uint8 seqNoReplica;
+ Uint8 tcNodeFailrec;
+ }; /* p2c: size = 280 bytes */
+
+ typedef Ptr<TcConnectionrec> TcConnectionrecPtr;
+
+ struct TcNodeFailRecord {
+ enum TcFailStatus {
+ TC_STATE_TRUE = 0,
+ TC_STATE_FALSE = 1,
+ TC_STATE_BREAK = 2
+ };
+ UintR lastNewTcRef;
+ UintR newTcRef;
+ TcFailStatus tcFailStatus;
+ UintR tcRecNow;
+ BlockReference lastNewTcBlockref;
+ BlockReference newTcBlockref;
+ Uint16 oldNodeId;
+ }; // Size 28 bytes
+ typedef Ptr<TcNodeFailRecord> TcNodeFailRecordPtr;
+
+ struct CommitLogRecord {
+ Uint32 startPageNo;
+ Uint32 startPageIndex;
+ Uint32 stopPageNo;
+ Uint32 fileNo;
+ };
+
+public:
+ Dblqh(const class Configuration &);
+ virtual ~Dblqh();
+
+private:
+ BLOCK_DEFINES(Dblqh);
+
+ void execPACKED_SIGNAL(Signal* signal);
+ void execDEBUG_SIG(Signal* signal);
+ void execATTRINFO(Signal* signal);
+ void execKEYINFO(Signal* signal);
+ void execLQHKEYREQ(Signal* signal);
+ void execLQHKEYREF(Signal* signal);
+ void execCOMMIT(Signal* signal);
+ void execCOMPLETE(Signal* signal);
+ void execLQHKEYCONF(Signal* signal);
+ void execTESTSIG(Signal* signal);
+ void execLQH_RESTART_OP(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+ void execSTART_RECREQ(Signal* signal);
+ void execSTART_RECCONF(Signal* signal);
+ void execEXEC_FRAGREQ(Signal* signal);
+ void execEXEC_FRAGCONF(Signal* signal);
+ void execEXEC_FRAGREF(Signal* signal);
+ void execSTART_EXEC_SR(Signal* signal);
+ void execEXEC_SRREQ(Signal* signal);
+ void execEXEC_SRCONF(Signal* signal);
+ void execREAD_PSEUDO_REQ(Signal* signal);
+
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execACC_COM_BLOCK(Signal* signal);
+ void execACC_COM_UNBLOCK(Signal* signal);
+ void execTUP_COM_BLOCK(Signal* signal);
+ void execTUP_COM_UNBLOCK(Signal* signal);
+ void execACC_ABORTCONF(Signal* signal);
+ void execNODE_FAILREP(Signal* signal);
+ void execCHECK_LCP_STOP(Signal* signal);
+ void execSEND_PACKED(Signal* signal);
+ void execTUP_ATTRINFO(Signal* signal);
+ void execREAD_CONFIG_REQ(Signal* signal);
+ void execLQHFRAGREQ(Signal* signal);
+ void execLQHADDATTREQ(Signal* signal);
+ void execTUP_ADD_ATTCONF(Signal* signal);
+ void execTUP_ADD_ATTRREF(Signal* signal);
+ void execACCFRAGCONF(Signal* signal);
+ void execACCFRAGREF(Signal* signal);
+ void execTUPFRAGCONF(Signal* signal);
+ void execTUPFRAGREF(Signal* signal);
+ void execTAB_COMMITREQ(Signal* signal);
+ void execACCSEIZECONF(Signal* signal);
+ void execACCSEIZEREF(Signal* signal);
+ void execREAD_NODESCONF(Signal* signal);
+ void execREAD_NODESREF(Signal* signal);
+ void execSTTOR(Signal* signal);
+ void execNDB_STTOR(Signal* signal);
+ void execTUPSEIZECONF(Signal* signal);
+ void execTUPSEIZEREF(Signal* signal);
+ void execACCKEYCONF(Signal* signal);
+ void execACCKEYREF(Signal* signal);
+ void execTUPKEYCONF(Signal* signal);
+ void execTUPKEYREF(Signal* signal);
+ void execABORT(Signal* signal);
+ void execABORTREQ(Signal* signal);
+ void execCOMMITREQ(Signal* signal);
+ void execCOMPLETEREQ(Signal* signal);
+ void execMEMCHECKREQ(Signal* signal);
+ void execSCAN_FRAGREQ(Signal* signal);
+ void execSCAN_NEXTREQ(Signal* signal);
+ void execACC_SCANCONF(Signal* signal);
+ void execACC_SCANREF(Signal* signal);
+ void execNEXT_SCANCONF(Signal* signal);
+ void execNEXT_SCANREF(Signal* signal);
+ void execACC_TO_REF(Signal* signal);
+ void execSTORED_PROCCONF(Signal* signal);
+ void execSTORED_PROCREF(Signal* signal);
+ void execCOPY_FRAGREQ(Signal* signal);
+ void execCOPY_ACTIVEREQ(Signal* signal);
+ void execCOPY_STATEREQ(Signal* signal);
+ void execLQH_TRANSREQ(Signal* signal);
+ void execTRANSID_AI(Signal* signal);
+ void execINCL_NODEREQ(Signal* signal);
+ void execACC_LCPCONF(Signal* signal);
+ void execACC_LCPREF(Signal* signal);
+ void execACC_LCPSTARTED(Signal* signal);
+ void execACC_CONTOPCONF(Signal* signal);
+ void execLCP_FRAGIDCONF(Signal* signal);
+ void execLCP_FRAGIDREF(Signal* signal);
+ void execLCP_HOLDOPCONF(Signal* signal);
+ void execLCP_HOLDOPREF(Signal* signal);
+ void execTUP_PREPLCPCONF(Signal* signal);
+ void execTUP_PREPLCPREF(Signal* signal);
+ void execTUP_LCPCONF(Signal* signal);
+ void execTUP_LCPREF(Signal* signal);
+ void execTUP_LCPSTARTED(Signal* signal);
+ void execEND_LCPCONF(Signal* signal);
+
+ void execLCP_FRAG_ORD(Signal* signal);
+ void execEMPTY_LCP_REQ(Signal* signal);
+
+ void execSTART_FRAGREQ(Signal* signal);
+ void execSTART_RECREF(Signal* signal);
+ void execSR_FRAGIDCONF(Signal* signal);
+ void execSR_FRAGIDREF(Signal* signal);
+ void execACC_SRCONF(Signal* signal);
+ void execACC_SRREF(Signal* signal);
+ void execTUP_SRCONF(Signal* signal);
+ void execTUP_SRREF(Signal* signal);
+ void execGCP_SAVEREQ(Signal* signal);
+ void execFSOPENCONF(Signal* signal);
+ void execFSOPENREF(Signal* signal);
+ void execFSCLOSECONF(Signal* signal);
+ void execFSCLOSEREF(Signal* signal);
+ void execFSWRITECONF(Signal* signal);
+ void execFSWRITEREF(Signal* signal);
+ void execFSREADCONF(Signal* signal);
+ void execFSREADREF(Signal* signal);
+ void execSCAN_HBREP(Signal* signal);
+ void execSET_VAR_REQ(Signal* signal);
+ void execTIME_SIGNAL(Signal* signal);
+ void execFSSYNCCONF(Signal* signal);
+ void execFSSYNCREF(Signal* signal);
+
+ void execALTER_TAB_REQ(Signal* signal);
+ void execALTER_TAB_CONF(Signal* signal);
+
+ void execCREATE_TRIG_CONF(Signal* signal);
+ void execCREATE_TRIG_REF(Signal* signal);
+ void execCREATE_TRIG_REQ(Signal* signal);
+
+ void execDROP_TRIG_CONF(Signal* signal);
+ void execDROP_TRIG_REF(Signal* signal);
+ void execDROP_TRIG_REQ(Signal* signal);
+
+ void execPREP_DROP_TAB_REQ(Signal* signal);
+ void execWAIT_DROP_TAB_REQ(Signal* signal);
+ void execDROP_TAB_REQ(Signal* signal);
+
+ void execLQH_ALLOCREQ(Signal* signal);
+ void execLQH_WRITELOG_REQ(Signal* signal);
+
+ void execTUXFRAGCONF(Signal* signal);
+ void execTUXFRAGREF(Signal* signal);
+ void execTUX_ADD_ATTRCONF(Signal* signal);
+ void execTUX_ADD_ATTRREF(Signal* signal);
+
+ // Statement blocks
+
+ void init_acc_ptr_list(ScanRecord*);
+ bool seize_acc_ptr_list(ScanRecord*, Uint32);
+ void release_acc_ptr_list(ScanRecord*);
+ Uint32 get_acc_ptr_from_scan_record(ScanRecord*, Uint32, bool);
+ void set_acc_ptr_in_scan_record(ScanRecord*, Uint32, Uint32);
+ void i_get_acc_ptr(ScanRecord*, Uint32*&, Uint32);
+
+ void removeTable(Uint32 tableId);
+ void sendLCP_COMPLETE_REP(Signal* signal, Uint32 lcpId);
+ void sendEMPTY_LCP_CONF(Signal* signal, bool idle);
+ void sendLCP_FRAGIDREQ(Signal* signal);
+ void sendLCP_FRAG_REP(Signal * signal, const LcpRecord::FragOrd &) const;
+
+ void updatePackedList(Signal* signal, HostRecord * ahostptr, Uint16 hostId);
+ void LQHKEY_abort(Signal* signal, int errortype);
+ void LQHKEY_error(Signal* signal, int errortype);
+ void nextRecordCopy(Signal* signal);
+ void calculateHash(Signal* signal);
+ void continueAfterCheckLcpStopBlocked(Signal* signal);
+ void checkLcpStopBlockedLab(Signal* signal);
+ void sendCommittedTc(Signal* signal, BlockReference atcBlockref);
+ void sendCompletedTc(Signal* signal, BlockReference atcBlockref);
+ void sendLqhkeyconfTc(Signal* signal, BlockReference atcBlockref);
+ void sendCommitLqh(Signal* signal, BlockReference alqhBlockref);
+ void sendCompleteLqh(Signal* signal, BlockReference alqhBlockref);
+ void sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr);
+ void sendPackedSignalTc(Signal* signal, HostRecord * ahostptr);
+ Uint32 handleLongTupKey(Signal* signal,
+ Uint32 lenSofar,
+ Uint32 primKeyLen,
+ Uint32* dataPtr);
+ void cleanUp(Signal* signal);
+ void sendAttrinfoLoop(Signal* signal);
+ void sendAttrinfoSignal(Signal* signal);
+ void sendLqhAttrinfoSignal(Signal* signal);
+ void sendKeyinfoAcc(Signal* signal, Uint32 pos);
+ Uint32 initScanrec(const class ScanFragReq *);
+ void initScanTc(Signal* signal,
+ Uint32 transid1,
+ Uint32 transid2,
+ Uint32 fragId,
+ Uint32 nodeId);
+ void finishScanrec(Signal* signal);
+ void releaseScanrec(Signal* signal);
+ void seizeScanrec(Signal* signal);
+ Uint32 sendKeyinfo20(Signal* signal, ScanRecord *, TcConnectionrec *);
+ void sendScanFragConf(Signal* signal, Uint32 scanCompleted);
+ void initCopyrec(Signal* signal);
+ void initCopyTc(Signal* signal);
+ void sendCopyActiveConf(Signal* signal,Uint32 tableId);
+ void checkLcpCompleted(Signal* signal);
+ void checkLcpHoldop(Signal* signal);
+ bool checkLcpStarted(Signal* signal);
+ void checkLcpTupprep(Signal* signal);
+ void getNextFragForLcp(Signal* signal);
+ void initLcpLocAcc(Signal* signal, Uint32 fragId);
+ void initLcpLocTup(Signal* signal, Uint32 fragId);
+ void moveAccActiveFrag(Signal* signal);
+ void moveActiveToAcc(Signal* signal);
+ void releaseLocalLcps(Signal* signal);
+ void seizeLcpLoc(Signal* signal);
+ void sendAccContOp(Signal* signal);
+ void sendStartLcp(Signal* signal);
+ void setLogTail(Signal* signal, Uint32 keepGci);
+ Uint32 remainingLogSize(const LogFileRecordPtr &sltCurrLogFilePtr,
+ const LogPartRecordPtr &sltLogPartPtr);
+ void checkGcpCompleted(Signal* signal, Uint32 pageWritten, Uint32 wordWritten);
+ void initFsopenconf(Signal* signal);
+ void initFsrwconf(Signal* signal);
+ void initLfo(Signal* signal);
+ void initLogfile(Signal* signal, Uint32 fileNo);
+ void initLogpage(Signal* signal);
+ void openFileRw(Signal* signal, LogFileRecordPtr olfLogFilePtr);
+ void openLogfileInit(Signal* signal);
+ void openNextLogfile(Signal* signal);
+ void releaseLfo(Signal* signal);
+ void releaseLfoPages(Signal* signal);
+ void releaseLogpage(Signal* signal);
+ void seizeLfo(Signal* signal);
+ void seizeLogfile(Signal* signal);
+ void seizeLogpage(Signal* signal);
+ void writeFileDescriptor(Signal* signal);
+ void writeFileHeaderOpen(Signal* signal, Uint32 type);
+ void writeInitMbyte(Signal* signal);
+ void writeSinglePage(Signal* signal, Uint32 pageNo, Uint32 wordWritten);
+ void buildLinkedLogPageList(Signal* signal);
+ void changeMbyte(Signal* signal);
+ Uint32 checkIfExecLog(Signal* signal);
+ void checkNewMbyte(Signal* signal);
+ void checkReadExecSr(Signal* signal);
+ void checkScanTcCompleted(Signal* signal);
+ void checkSrCompleted(Signal* signal);
+ void closeFile(Signal* signal, LogFileRecordPtr logFilePtr);
+ void completedLogPage(Signal* signal, Uint32 clpType);
+ void deleteFragrec(Uint32 fragId);
+ void deleteTransidHash(Signal* signal);
+ void findLogfile(Signal* signal,
+ Uint32 fileNo,
+ LogPartRecordPtr flfLogPartPtr,
+ LogFileRecordPtr* parLogFilePtr);
+ void findPageRef(Signal* signal, CommitLogRecord* commitLogRecord);
+ int findTransaction(UintR Transid1, UintR Transid2, UintR TcOprec);
+ void getFirstInLogQueue(Signal* signal);
+ bool getFragmentrec(Signal* signal, Uint32 fragId);
+ void initialiseAddfragrec(Signal* signal);
+ void initialiseAttrbuf(Signal* signal);
+ void initialiseDatabuf(Signal* signal);
+ void initialiseFragrec(Signal* signal);
+ void initialiseGcprec(Signal* signal);
+ void initialiseLcpRec(Signal* signal);
+ void initialiseLcpLocrec(Signal* signal);
+ void initialiseLfo(Signal* signal);
+ void initialiseLogFile(Signal* signal);
+ void initialiseLogPage(Signal* signal);
+ void initialiseLogPart(Signal* signal);
+ void initialisePageRef(Signal* signal);
+ void initialiseScanrec(Signal* signal);
+ void initialiseTabrec(Signal* signal);
+ void initialiseTcrec(Signal* signal);
+ void initialiseTcNodeFailRec(Signal* signal);
+ void initFragrec(Signal* signal,
+ Uint32 tableId,
+ Uint32 fragId,
+ Uint32 copyType);
+ void initFragrecSr(Signal* signal);
+ void initGciInLogFileRec(Signal* signal, Uint32 noFdDesc);
+ void initLcpSr(Signal* signal,
+ Uint32 lcpNo,
+ Uint32 lcpId,
+ Uint32 tableId,
+ Uint32 fragId,
+ Uint32 fragPtr);
+ void initLogpart(Signal* signal);
+ void initLogPointers(Signal* signal);
+ void initReqinfoExecSr(Signal* signal);
+ bool insertFragrec(Signal* signal, Uint32 fragId);
+ void linkActiveFrag(Signal* signal);
+ void linkFragQueue(Signal* signal);
+ void linkWaitLog(Signal* signal, LogPartRecordPtr regLogPartPtr);
+ void logNextStart(Signal* signal);
+ void moveToPageRef(Signal* signal);
+ void readAttrinfo(Signal* signal);
+ void readCommitLog(Signal* signal, CommitLogRecord* commitLogRecord);
+ void readExecLog(Signal* signal);
+ void readExecSrNewMbyte(Signal* signal);
+ void readExecSr(Signal* signal);
+ void readKey(Signal* signal);
+ void readLogData(Signal* signal, Uint32 noOfWords, Uint32* dataPtr);
+ void readLogHeader(Signal* signal);
+ Uint32 readLogword(Signal* signal);
+ Uint32 readLogwordExec(Signal* signal);
+ void readSinglePage(Signal* signal, Uint32 pageNo);
+ void releaseAccList(Signal* signal);
+ void releaseActiveCopy(Signal* signal);
+ void releaseActiveFrag(Signal* signal);
+ void releaseActiveList(Signal* signal);
+ void releaseAddfragrec(Signal* signal);
+ void releaseFragrec();
+ void releaseLcpLoc(Signal* signal);
+ void releaseOprec(Signal* signal);
+ void releasePageRef(Signal* signal);
+ void releaseMmPages(Signal* signal);
+ void releasePrPages(Signal* signal);
+ void releaseTcrec(Signal* signal, TcConnectionrecPtr tcConnectptr);
+ void releaseTcrecLog(Signal* signal, TcConnectionrecPtr tcConnectptr);
+ void releaseWaitQueue(Signal* signal);
+ void removeLogTcrec(Signal* signal);
+ void removePageRef(Signal* signal);
+ Uint32 returnExecLog(Signal* signal);
+ int saveTupattrbuf(Signal* signal, Uint32* dataPtr, Uint32 length);
+ void seizeAddfragrec(Signal* signal);
+ void seizeAttrinbuf(Signal* signal);
+ Uint32 seize_attrinbuf();
+ Uint32 release_attrinbuf(Uint32);
+ Uint32 copy_bounds(Uint32 * dst, TcConnectionrec*);
+
+ void seizeFragmentrec(Signal* signal);
+ void seizePageRef(Signal* signal);
+ void seizeTcrec();
+ void seizeTupkeybuf(Signal* signal);
+ void sendAborted(Signal* signal);
+ void sendLqhTransconf(Signal* signal, LqhTransConf::OperationStatus);
+ void sendTupkey(Signal* signal);
+ void startExecSr(Signal* signal);
+ void startNextExecSr(Signal* signal);
+ void startTimeSupervision(Signal* signal);
+ void stepAhead(Signal* signal, Uint32 stepAheadWords);
+ void systemError(Signal* signal);
+ void writeAbortLog(Signal* signal);
+ void writeCommitLog(Signal* signal, LogPartRecordPtr regLogPartPtr);
+ void writeCompletedGciLog(Signal* signal);
+ void writeDirty(Signal* signal);
+ void writeKey(Signal* signal);
+ void writeLogHeader(Signal* signal);
+ void writeLogWord(Signal* signal, Uint32 data);
+ void writeNextLog(Signal* signal);
+ void errorReport(Signal* signal, int place);
+ void warningReport(Signal* signal, int place);
+ void invalidateLogAfterLastGCI(Signal *signal);
+ void readFileInInvalidate(Signal *signal);
+ void exitFromInvalidate(Signal* signal);
+ Uint32 calcPageCheckSum(LogPageRecordPtr logP);
+
+ // Generated statement blocks
+ void systemErrorLab(Signal* signal);
+ void initFourth(Signal* signal);
+ void packLqhkeyreqLab(Signal* signal);
+ void sendNdbSttorryLab(Signal* signal);
+ void execSrCompletedLab(Signal* signal);
+ void execLogRecord(Signal* signal);
+ void srPhase3Comp(Signal* signal);
+ void srLogLimits(Signal* signal);
+ void srGciLimits(Signal* signal);
+ void srPhase3Start(Signal* signal);
+ void warningHandlerLab(Signal* signal);
+ void checkStartCompletedLab(Signal* signal);
+ void continueAbortLab(Signal* signal);
+ void abortContinueAfterBlockedLab(Signal* signal, bool canBlock);
+ void abortCommonLab(Signal* signal);
+ void localCommitLab(Signal* signal);
+ void abortErrorLab(Signal* signal);
+ void continueAfterReceivingAllAiLab(Signal* signal);
+ void abortStateHandlerLab(Signal* signal);
+ void writeAttrinfoLab(Signal* signal);
+ void scanAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length);
+ void abort_scan(Signal* signal, Uint32 scan_ptr_i, Uint32 errcode);
+ void localAbortStateHandlerLab(Signal* signal);
+ void logLqhkeyreqLab(Signal* signal);
+ void lqhAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length);
+ void rwConcludedAiLab(Signal* signal);
+ void aiStateErrorCheckLab(Signal* signal, Uint32* dataPtr, Uint32 length);
+ void takeOverErrorLab(Signal* signal);
+ void endgettupkeyLab(Signal* signal);
+ void noFreeRecordLab(Signal* signal,
+ const class LqhKeyReq * lqhKeyReq,
+ Uint32 errorCode);
+ void logLqhkeyrefLab(Signal* signal);
+ void closeCopyLab(Signal* signal);
+ void commitReplyLab(Signal* signal);
+ void completeUnusualLab(Signal* signal);
+ void completeTransNotLastLab(Signal* signal);
+ void completedLab(Signal* signal);
+ void copyCompletedLab(Signal* signal);
+ void completeLcpRoundLab(Signal* signal);
+ void continueAfterLogAbortWriteLab(Signal* signal);
+ void sendAttrinfoLab(Signal* signal);
+ void sendExecConf(Signal* signal);
+ void execSr(Signal* signal);
+ void srFourthComp(Signal* signal);
+ void timeSup(Signal* signal);
+ void closeCopyRequestLab(Signal* signal);
+ void closeScanRequestLab(Signal* signal);
+ void scanTcConnectLab(Signal* signal, Uint32 startTcCon, Uint32 fragId);
+ void initGcpRecLab(Signal* signal);
+ void prepareContinueAfterBlockedLab(Signal* signal);
+ void commitContinueAfterBlockedLab(Signal* signal);
+ void continueCopyAfterBlockedLab(Signal* signal);
+ void continueFirstCopyAfterBlockedLab(Signal* signal);
+ void continueFirstScanAfterBlockedLab(Signal* signal);
+ void continueScanAfterBlockedLab(Signal* signal);
+ void continueScanReleaseAfterBlockedLab(Signal* signal);
+ void continueCloseScanAfterBlockedLab(Signal* signal);
+ void continueCloseCopyAfterBlockedLab(Signal* signal);
+ void sendExecFragRefLab(Signal* signal);
+ void fragrefLab(Signal* signal, BlockReference retRef,
+ Uint32 retPtr, Uint32 errorCode);
+ void abortAddFragOps(Signal* signal);
+ void rwConcludedLab(Signal* signal);
+ void sendsttorryLab(Signal* signal);
+ void initialiseRecordsLab(Signal* signal, Uint32 data, Uint32, Uint32);
+ void startphase2Lab(Signal* signal, Uint32 config);
+ void startphase3Lab(Signal* signal);
+ void startphase4Lab(Signal* signal);
+ void startphase6Lab(Signal* signal);
+ void moreconnectionsLab(Signal* signal);
+ void scanReleaseLocksLab(Signal* signal);
+ void closeScanLab(Signal* signal);
+ void nextScanConfLoopLab(Signal* signal);
+ void scanNextLoopLab(Signal* signal);
+ void commitReqLab(Signal* signal, Uint32 gci);
+ void completeTransLastLab(Signal* signal);
+ void tupScanCloseConfLab(Signal* signal);
+ void tupCopyCloseConfLab(Signal* signal);
+ void accScanCloseConfLab(Signal* signal);
+ void accCopyCloseConfLab(Signal* signal);
+ void nextScanConfScanLab(Signal* signal);
+ void nextScanConfCopyLab(Signal* signal);
+ void continueScanNextReqLab(Signal* signal);
+ void keyinfoLab(const Uint32 * src, const Uint32 * end);
+ void copySendTupkeyReqLab(Signal* signal);
+ void storedProcConfScanLab(Signal* signal);
+ void storedProcConfCopyLab(Signal* signal);
+ void copyStateFinishedLab(Signal* signal);
+ void lcpCompletedLab(Signal* signal);
+ void lcpStartedLab(Signal* signal);
+ void contChkpNextFragLab(Signal* signal);
+ void startLcpRoundLab(Signal* signal);
+ void startFragRefLab(Signal* signal);
+ void srCompletedLab(Signal* signal);
+ void openFileInitLab(Signal* signal);
+ void openSrFrontpageLab(Signal* signal);
+ void openSrLastFileLab(Signal* signal);
+ void openSrNextFileLab(Signal* signal);
+ void openExecSrStartLab(Signal* signal);
+ void openExecSrNewMbyteLab(Signal* signal);
+ void openSrFourthPhaseLab(Signal* signal);
+ void openSrFourthZeroSkipInitLab(Signal* signal);
+ void openSrFourthZeroLab(Signal* signal);
+ void openExecLogLab(Signal* signal);
+ void checkInitCompletedLab(Signal* signal);
+ void closingSrLab(Signal* signal);
+ void closeExecSrLab(Signal* signal);
+ void execLogComp(Signal* signal);
+ void closeWriteLogLab(Signal* signal);
+ void closeExecLogLab(Signal* signal);
+ void writePageZeroLab(Signal* signal);
+ void lastWriteInFileLab(Signal* signal);
+ void initWriteEndLab(Signal* signal);
+ void initFirstPageLab(Signal* signal);
+ void writeGciZeroLab(Signal* signal);
+ void writeDirtyLab(Signal* signal);
+ void writeInitMbyteLab(Signal* signal);
+ void writeLogfileLab(Signal* signal);
+ void firstPageWriteLab(Signal* signal);
+ void readSrLastMbyteLab(Signal* signal);
+ void readSrLastFileLab(Signal* signal);
+ void readSrNextFileLab(Signal* signal);
+ void readExecSrLab(Signal* signal);
+ void readExecLogLab(Signal* signal);
+ void readSrFourthPhaseLab(Signal* signal);
+ void readSrFourthZeroLab(Signal* signal);
+ void copyLqhKeyRefLab(Signal* signal);
+ void restartOperationsLab(Signal* signal);
+ void lqhTransNextLab(Signal* signal);
+ void restartOperationsAfterStopLab(Signal* signal);
+ void sttorStartphase1Lab(Signal* signal);
+ void startphase1Lab(Signal* signal, Uint32 config, Uint32 nodeId);
+ void tupkeyConfLab(Signal* signal);
+ void copyTupkeyConfLab(Signal* signal);
+ void scanTupkeyConfLab(Signal* signal);
+ void scanTupkeyRefLab(Signal* signal);
+ void accScanConfScanLab(Signal* signal);
+ void accScanConfCopyLab(Signal* signal);
+ void scanLockReleasedLab(Signal* signal);
+ void openSrFourthNextLab(Signal* signal);
+ void closingInitLab(Signal* signal);
+ void closeExecSrCompletedLab(Signal* signal);
+ void readSrFrontpageLab(Signal* signal);
+
+ void sendAddFragReq(Signal* signal);
+ void sendAddAttrReq(Signal* signal);
+ void checkDropTab(Signal*);
+ Uint32 checkDropTabState(Tablerec::TableStatus, Uint32) const;
+
+ // Initialisation
+ void initData();
+ void initRecords();
+
+ Dbtup* c_tup;
+ Uint32 readPrimaryKeys(ScanRecord*, TcConnectionrec*, Uint32 * dst);
+// ----------------------------------------------------------------
+// These are variables handling the records. For most records one
+// pointer to the array of structs, one pointer-struct, a file size
+// and a first free record variable. The pointer struct are temporary
+// variables that are kept on the class object since there are often a
+// great deal of those variables that exist simultaneously and
+// thus no perfect solution of handling them is currently available.
+// ----------------------------------------------------------------
+/* ------------------------------------------------------------------------- */
+/* POSITIONS WITHIN THE ATTRINBUF AND THE MAX SIZE OF DATA WITHIN AN */
+/* ATTRINBUF. */
+/* ------------------------------------------------------------------------- */
+
+
+#define ZADDFRAGREC_FILE_SIZE 1
+ AddFragRecord *addFragRecord;
+ AddFragRecordPtr addfragptr;
+ UintR cfirstfreeAddfragrec;
+ UintR caddfragrecFileSize;
+
+#define ZATTRINBUF_FILE_SIZE 12288 // 1.5 MByte
+#define ZINBUF_DATA_LEN 24 /* POSITION OF 'DATA LENGHT'-VARIABLE. */
+#define ZINBUF_NEXT 25 /* POSITION OF 'NEXT'-VARIABLE. */
+ Attrbuf *attrbuf;
+ AttrbufPtr attrinbufptr;
+ UintR cfirstfreeAttrinbuf;
+ UintR cattrinbufFileSize;
+ Uint32 c_no_attrinbuf_recs;
+
+#define ZDATABUF_FILE_SIZE 10000 // 200 kByte
+ Databuf *databuf;
+ DatabufPtr databufptr;
+ UintR cfirstfreeDatabuf;
+ UintR cdatabufFileSize;
+
+// Configurable
+ Fragrecord *fragrecord;
+ FragrecordPtr fragptr;
+ UintR cfirstfreeFragrec;
+ UintR cfragrecFileSize;
+
+#define ZGCPREC_FILE_SIZE 1
+ GcpRecord *gcpRecord;
+ GcpRecordPtr gcpPtr;
+ UintR cgcprecFileSize;
+
+// MAX_NDB_NODES is the size of this array
+ HostRecord *hostRecord;
+ UintR chostFileSize;
+
+#define ZNO_CONCURRENT_LCP 1
+ LcpRecord *lcpRecord;
+ LcpRecordPtr lcpPtr;
+ UintR cfirstfreeLcpLoc;
+ UintR clcpFileSize;
+
+#define ZLCP_LOCREC_FILE_SIZE 4
+ LcpLocRecord *lcpLocRecord;
+ LcpLocRecordPtr lcpLocptr;
+ UintR clcpLocrecFileSize;
+
+#define ZLOG_PART_FILE_SIZE 4
+ LogPartRecord *logPartRecord;
+ LogPartRecordPtr logPartPtr;
+ UintR clogPartFileSize;
+
+// Configurable
+ LogFileRecord *logFileRecord;
+ LogFileRecordPtr logFilePtr;
+ UintR cfirstfreeLogFile;
+ UintR clogFileFileSize;
+
+#define ZLFO_FILE_SIZE 256 /* MAX 256 OUTSTANDING FILE OPERATIONS */
+ LogFileOperationRecord *logFileOperationRecord;
+ LogFileOperationRecordPtr lfoPtr;
+ UintR cfirstfreeLfo;
+ UintR clfoFileSize;
+
+ LogPageRecord *logPageRecord;
+ LogPageRecordPtr logPagePtr;
+ UintR cfirstfreeLogPage;
+ UintR clogPageFileSize;
+
+#define ZPAGE_REF_FILE_SIZE 20
+ PageRefRecord *pageRefRecord;
+ PageRefRecordPtr pageRefPtr;
+ UintR cfirstfreePageRef;
+ UintR cpageRefFileSize;
+
+#define ZSCANREC_FILE_SIZE 100
+ ArrayPool<ScanRecord> c_scanRecordPool;
+ ScanRecordPtr scanptr;
+ UintR cscanNoFreeRec;
+ Uint32 cscanrecFileSize;
+
+// Configurable
+ Tablerec *tablerec;
+ TablerecPtr tabptr;
+ UintR ctabrecFileSize;
+
+// Configurable
+ TcConnectionrec *tcConnectionrec;
+ TcConnectionrecPtr tcConnectptr;
+ UintR cfirstfreeTcConrec;
+ UintR ctcConnectrecFileSize;
+
+// MAX_NDB_NODES is the size of this array
+ TcNodeFailRecord *tcNodeFailRecord;
+ TcNodeFailRecordPtr tcNodeFailptr;
+ UintR ctcNodeFailrecFileSize;
+
+ Uint16 terrorCode;
+
+ Uint32 c_firstInNodeGroup;
+
+// ------------------------------------------------------------------------
+// These variables are used to store block state which do not need arrays
+// of struct's.
+// ------------------------------------------------------------------------
+ Uint32 c_lcpId;
+ Uint32 cnoOfFragsCheckpointed;
+
+/* ------------------------------------------------------------------------- */
+// cmaxWordsAtNodeRec keeps track of how many words that currently are
+// outstanding in a node recovery situation.
+// cbookedAccOps keeps track of how many operation records that have been
+// booked in ACC for the scan processes.
+// cmaxAccOps contains the maximum number of operation records which can be
+// allocated for scan purposes in ACC.
+/* ------------------------------------------------------------------------- */
+ UintR cmaxWordsAtNodeRec;
+ UintR cbookedAccOps;
+ UintR cmaxAccOps;
+/* ------------------------------------------------------------------------- */
+/*THIS STATE VARIABLE IS ZTRUE IF AN ADD NODE IS ONGOING. ADD NODE MEANS */
+/*THAT CONNECTIONS ARE SET-UP TO THE NEW NODE. */
+/* ------------------------------------------------------------------------- */
+ Uint8 caddNodeState;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE SPECIFIES WHICH TYPE OF RESTART THAT IS ONGOING */
+/* ------------------------------------------------------------------------- */
+ Uint16 cstartType;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE INDICATES WHETHER AN INITIAL RESTART IS ONGOING OR NOT. */
+/* ------------------------------------------------------------------------- */
+ Uint8 cinitialStartOngoing;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE KEEPS TRACK OF WHEN TUP AND ACC HAVE COMPLETED EXECUTING */
+/*THEIR UNDO LOG. */
+/* ------------------------------------------------------------------------- */
+ ExecUndoLogState csrExecUndoLogState;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE KEEPS TRACK OF WHEN TUP AND ACC HAVE CONFIRMED COMPLETION */
+/*OF A LOCAL CHECKPOINT ROUND. */
+/* ------------------------------------------------------------------------- */
+ LcpCloseState clcpCompletedState;
+/* ------------------------------------------------------------------------- */
+/*DURING CONNECTION PROCESSES IN SYSTEM RESTART THESE VARIABLES KEEP TRACK */
+/*OF HOW MANY CONNECTIONS AND RELEASES THAT ARE TO BE PERFORMED. */
+/* ------------------------------------------------------------------------- */
+/***************************************************************************>*/
+/*THESE VARIABLES CONTAIN INFORMATION USED DURING SYSTEM RESTART. */
+/***************************************************************************>*/
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE IS ZTRUE IF THE SIGNAL START_REC_REQ HAVE BEEN RECEIVED. */
+/*RECEPTION OF THIS SIGNAL INDICATES THAT ALL FRAGMENTS THAT THIS NODE */
+/*SHOULD START HAVE BEEN RECEIVED. */
+/* ------------------------------------------------------------------------- */
+ Uint8 cstartRecReq;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE KEEPS TRACK OF HOW MANY FRAGMENTS THAT PARTICIPATE IN */
+/*EXECUTING THE LOG. IF ZERO WE DON'T NEED TO EXECUTE THE LOG AT ALL. */
+/* ------------------------------------------------------------------------- */
+ UintR cnoFragmentsExecSr;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE KEEPS TRACK OF WHICH OF THE FIRST TWO RESTART PHASES THAT */
+/*HAVE COMPLETED. */
+/* ------------------------------------------------------------------------- */
+ Uint8 csrPhaseStarted;
+/* ------------------------------------------------------------------------- */
+/*NUMBER OF PHASES COMPLETED OF EXECUTING THE FRAGMENT LOG. */
+/* ------------------------------------------------------------------------- */
+ Uint8 csrPhasesCompleted;
+/* ------------------------------------------------------------------------- */
+/*THE BLOCK REFERENCE OF THE MASTER DIH DURING SYSTEM RESTART. */
+/* ------------------------------------------------------------------------- */
+ BlockReference cmasterDihBlockref;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE IS THE HEAD OF A LINKED LIST OF FRAGMENTS WAITING TO BE */
+/*RESTORED FROM DISK. */
+/* ------------------------------------------------------------------------- */
+ UintR cfirstWaitFragSr;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE IS THE HEAD OF A LINKED LIST OF FRAGMENTS THAT HAVE BEEN */
+/*RESTORED FROM DISK THAT AWAITS EXECUTION OF THE FRAGMENT LOG. */
+/* ------------------------------------------------------------------------- */
+ UintR cfirstCompletedFragSr;
+
+ /**
+ * List of fragment that the log execution is completed for
+ */
+ Uint32 c_redo_log_complete_frags;
+
+/* ------------------------------------------------------------------------- */
+/*USED DURING SYSTEM RESTART, INDICATES THE OLDEST GCI THAT CAN BE RESTARTED */
+/*FROM AFTER THIS SYSTEM RESTART. USED TO FIND THE LOG TAIL. */
+/* ------------------------------------------------------------------------- */
+ UintR crestartOldestGci;
+/* ------------------------------------------------------------------------- */
+/*USED DURING SYSTEM RESTART, INDICATES THE NEWEST GCI THAT CAN BE RESTARTED */
+/*AFTER THIS SYSTEM RESTART. USED TO FIND THE LOG HEAD. */
+/* ------------------------------------------------------------------------- */
+ UintR crestartNewestGci;
+/* ------------------------------------------------------------------------- */
+/*THE NUMBER OF LOG FILES. SET AS A PARAMETER WHEN NDB IS STARTED. */
+/* ------------------------------------------------------------------------- */
+ UintR cnoLogFiles;
+/* ------------------------------------------------------------------------- */
+/*THESE TWO VARIABLES CONTAIN THE NEWEST GCI RECEIVED IN THE BLOCK AND THE */
+/*NEWEST COMPLETED GCI IN THE BLOCK. */
+/* ------------------------------------------------------------------------- */
+ UintR cnewestGci;
+ UintR cnewestCompletedGci;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE ONLY PASSES INFORMATION FROM STTOR TO STTORRY = TEMPORARY */
+/* ------------------------------------------------------------------------- */
+ Uint16 csignalKey;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE CONTAINS THE CURRENT START PHASE IN THE BLOCK. IS ZNIL IF */
+/*NO SYSTEM RESTART IS ONGOING. */
+/* ------------------------------------------------------------------------- */
+ Uint16 cstartPhase;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE CONTAIN THE CURRENT GLOBAL CHECKPOINT RECORD. IT'S RNIL IF */
+/*NOT A GCP SAVE IS ONGOING. */
+/* ------------------------------------------------------------------------- */
+ UintR ccurrentGcprec;
+/* ------------------------------------------------------------------------- */
+/*THESE VARIABLES ARE USED TO KEEP TRACK OF ALL ACTIVE COPY FRAGMENTS IN LQH.*/
+/* ------------------------------------------------------------------------- */
+ Uint8 cnoActiveCopy;
+ UintR cactiveCopy[4];
+
+/* ------------------------------------------------------------------------- */
+/*THESE VARIABLES CONTAIN THE BLOCK REFERENCES OF THE OTHER NDB BLOCKS. */
+/*ALSO THE BLOCK REFERENCE OF MY OWN BLOCK = LQH */
+/* ------------------------------------------------------------------------- */
+ BlockReference caccBlockref;
+ BlockReference ctupBlockref;
+ BlockReference ctuxBlockref;
+ BlockReference cownref;
+ UintR cLqhTimeOutCount;
+ UintR cLqhTimeOutCheckCount;
+ UintR cnoOfLogPages;
+ bool caccCommitBlocked;
+ bool ctupCommitBlocked;
+ bool cCommitBlocked;
+ UintR cCounterAccCommitBlocked;
+ UintR cCounterTupCommitBlocked;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE CONTAINS MY OWN PROCESSOR ID. */
+/* ------------------------------------------------------------------------- */
+ NodeId cownNodeid;
+
+/* ------------------------------------------------------------------------- */
+/*THESE VARIABLES CONTAIN INFORMATION ABOUT THE OTHER NODES IN THE SYSTEM */
+/*THESE VARIABLES ARE MOSTLY USED AT SYSTEM RESTART AND ADD NODE TO SET-UP */
+/*AND RELEASE CONNECTIONS TO OTHER NODES IN THE CLUSTER. */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/*THIS ARRAY CONTAINS THE PROCESSOR ID'S OF THE NODES THAT ARE ALIVE. */
+/*CNO_OF_NODES SPECIFIES HOW MANY NODES THAT ARE CURRENTLY ALIVE. */
+/*CNODE_VERSION SPECIFIES THE NDB VERSION EXECUTING ON THE NODE. */
+/* ------------------------------------------------------------------------- */
+ UintR cpackedListIndex;
+ Uint16 cpackedList[MAX_NDB_NODES];
+ UintR cnodeData[MAX_NDB_NODES];
+ UintR cnodeStatus[MAX_NDB_NODES];
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE INDICATES WHETHER A CERTAIN NODE HAS SENT ALL FRAGMENTS THAT */
+/*NEED TO HAVE THE LOG EXECUTED. */
+/* ------------------------------------------------------------------------- */
+ Uint8 cnodeSrState[MAX_NDB_NODES];
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE INDICATES WHETHER A CERTAIN NODE HAVE EXECUTED THE LOG */
+/* ------------------------------------------------------------------------- */
+ Uint8 cnodeExecSrState[MAX_NDB_NODES];
+ UintR cnoOfNodes;
+
+/* ------------------------------------------------------------------------- */
+/* THIS VARIABLE CONTAINS THE DIRECTORY OF A HASH TABLE OF ALL ACTIVE */
+/* OPERATION IN THE BLOCK. IT IS USED TO BE ABLE TO QUICKLY ABORT AN */
+/* OPERATION WHERE THE CONNECTION WAS LOST DUE TO NODE FAILURES. IT IS */
+/* ACTUALLY USED FOR ALL ABORTS COMMANDED BY TC. */
+/* ------------------------------------------------------------------------- */
+ UintR preComputedRequestInfoMask;
+ UintR ctransidHash[1024];
+
+ Uint32 c_diskless;
+
+public:
+ /**
+ *
+ */
+ struct CommitAckMarker {
+ Uint32 transid1;
+ Uint32 transid2;
+
+ Uint32 apiRef; // Api block ref
+ Uint32 apiOprec; // Connection Object in NDB API
+ Uint32 tcNodeId;
+ union { Uint32 nextPool; Uint32 nextHash; };
+ Uint32 prevHash;
+
+ inline bool equal(const CommitAckMarker & p) const {
+ return ((p.transid1 == transid1) && (p.transid2 == transid2));
+ }
+
+ inline Uint32 hashValue() const {
+ return transid1;
+ }
+ };
+
+ typedef Ptr<CommitAckMarker> CommitAckMarkerPtr;
+ ArrayPool<CommitAckMarker> m_commitAckMarkerPool;
+ DLHashTable<CommitAckMarker> m_commitAckMarkerHash;
+ typedef DLHashTable<CommitAckMarker>::Iterator CommitAckMarkerIterator;
+ void execREMOVE_MARKER_ORD(Signal* signal);
+ void scanMarkers(Signal* signal, Uint32 tcNodeFail, Uint32 bucket, Uint32 i);
+
+ struct Counters {
+ Uint32 operations;
+
+ inline void clear(){
+ operations = 0;
+ }
+ };
+
+ Counters c_Counters;
+
+ inline bool getAllowRead() const {
+ return getNodeState().startLevel < NodeState::SL_STOPPING_3;
+ }
+
+ DLHashTable<ScanRecord> c_scanTakeOverHash;
+};
+
+inline
+bool
+Dblqh::ScanRecord::check_scan_batch_completed() const
+{
+ Uint32 max_rows = m_max_batch_size_rows;
+ Uint32 max_bytes = m_max_batch_size_bytes;
+
+ return (max_rows > 0 && (m_curr_batch_size_rows >= max_rows)) ||
+ (max_bytes > 0 && (m_curr_batch_size_bytes >= max_bytes));
+}
+
+inline
+void
+Dblqh::i_get_acc_ptr(ScanRecord* scanP, Uint32* &acc_ptr, Uint32 index)
+{
+ if (index == 0) {
+ acc_ptr= (Uint32*)&scanP->scan_acc_op_ptr[0];
+ } else {
+ Uint32 attr_buf_index, attr_buf_rec;
+
+ AttrbufPtr regAttrPtr;
+ jam();
+ attr_buf_rec= (index + 31) / 32;
+ attr_buf_index= (index - 1) & 31;
+ regAttrPtr.i= scanP->scan_acc_op_ptr[attr_buf_rec];
+ ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
+ acc_ptr= (Uint32*)&regAttrPtr.p->attrbuf[attr_buf_index];
+ }
+}
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
new file mode 100644
index 00000000000..0ef72bd35ad
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
@@ -0,0 +1,455 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#include <pc.hpp>
+#define DBLQH_C
+#include "Dblqh.hpp"
+#include <ndb_limits.h>
+
+#define DEBUG(x) { ndbout << "LQH::" << x << endl; }
+
+void Dblqh::initData()
+{
+ caddfragrecFileSize = ZADDFRAGREC_FILE_SIZE;
+ cattrinbufFileSize = ZATTRINBUF_FILE_SIZE;
+ c_no_attrinbuf_recs= ZATTRINBUF_FILE_SIZE;
+ cdatabufFileSize = ZDATABUF_FILE_SIZE;
+ cfragrecFileSize = 0;
+ cgcprecFileSize = ZGCPREC_FILE_SIZE;
+ chostFileSize = MAX_NDB_NODES;
+ clcpFileSize = ZNO_CONCURRENT_LCP;
+ clcpLocrecFileSize = ZLCP_LOCREC_FILE_SIZE;
+ clfoFileSize = ZLFO_FILE_SIZE;
+ clogFileFileSize = 0;
+ clogPartFileSize = ZLOG_PART_FILE_SIZE;
+ cpageRefFileSize = ZPAGE_REF_FILE_SIZE;
+ cscanrecFileSize = ZSCANREC_FILE_SIZE;
+ ctabrecFileSize = 0;
+ ctcConnectrecFileSize = 0;
+ ctcNodeFailrecFileSize = MAX_NDB_NODES;
+
+ addFragRecord = 0;
+ attrbuf = 0;
+ databuf = 0;
+ fragrecord = 0;
+ gcpRecord = 0;
+ hostRecord = 0;
+ lcpRecord = 0;
+ lcpLocRecord = 0;
+ logPartRecord = 0;
+ logFileRecord = 0;
+ logFileOperationRecord = 0;
+ logPageRecord = 0;
+ pageRefRecord = 0;
+ tablerec = 0;
+ tcConnectionrec = 0;
+ tcNodeFailRecord = 0;
+
+ // Records with constant sizes
+
+ cLqhTimeOutCount = 0;
+ cLqhTimeOutCheckCount = 0;
+ cbookedAccOps = 0;
+ c_redo_log_complete_frags = RNIL;
+}//Dblqh::initData()
+
+void Dblqh::initRecords()
+{
+ // Records with dynamic sizes
+ addFragRecord = (AddFragRecord*)allocRecord("AddFragRecord",
+ sizeof(AddFragRecord),
+ caddfragrecFileSize);
+ attrbuf = (Attrbuf*)allocRecord("Attrbuf",
+ sizeof(Attrbuf),
+ cattrinbufFileSize);
+
+ databuf = (Databuf*)allocRecord("Databuf",
+ sizeof(Databuf),
+ cdatabufFileSize);
+
+ fragrecord = (Fragrecord*)allocRecord("Fragrecord",
+ sizeof(Fragrecord),
+ cfragrecFileSize);
+
+ gcpRecord = (GcpRecord*)allocRecord("GcpRecord",
+ sizeof(GcpRecord),
+ cgcprecFileSize);
+
+ hostRecord = (HostRecord*)allocRecord("HostRecord",
+ sizeof(HostRecord),
+ chostFileSize);
+
+ lcpRecord = (LcpRecord*)allocRecord("LcpRecord",
+ sizeof(LcpRecord),
+ clcpFileSize);
+
+ for(Uint32 i = 0; i<clcpFileSize; i++){
+ new (&lcpRecord[i])LcpRecord();
+ }
+
+ lcpLocRecord = (LcpLocRecord*)allocRecord("LcpLocRecord",
+ sizeof(LcpLocRecord),
+ clcpLocrecFileSize);
+
+ logPartRecord = (LogPartRecord*)allocRecord("LogPartRecord",
+ sizeof(LogPartRecord),
+ clogPartFileSize);
+
+ logFileRecord = (LogFileRecord*)allocRecord("LogFileRecord",
+ sizeof(LogFileRecord),
+ clogFileFileSize);
+
+ logFileOperationRecord = (LogFileOperationRecord*)
+ allocRecord("LogFileOperationRecord",
+ sizeof(LogFileOperationRecord),
+ clfoFileSize);
+
+ logPageRecord = (LogPageRecord*)allocRecord("LogPageRecord",
+ sizeof(LogPageRecord),
+ clogPageFileSize,
+ false);
+
+ pageRefRecord = (PageRefRecord*)allocRecord("PageRefRecord",
+ sizeof(PageRefRecord),
+ cpageRefFileSize);
+
+ cscanNoFreeRec = cscanrecFileSize;
+ c_scanRecordPool.setSize(cscanrecFileSize);
+ c_scanTakeOverHash.setSize(64);
+
+ tablerec = (Tablerec*)allocRecord("Tablerec",
+ sizeof(Tablerec),
+ ctabrecFileSize);
+
+ tcConnectionrec = (TcConnectionrec*)allocRecord("TcConnectionrec",
+ sizeof(TcConnectionrec),
+ ctcConnectrecFileSize);
+
+ m_commitAckMarkerPool.setSize(ctcConnectrecFileSize);
+ m_commitAckMarkerHash.setSize(1024);
+
+ tcNodeFailRecord = (TcNodeFailRecord*)allocRecord("TcNodeFailRecord",
+ sizeof(TcNodeFailRecord),
+ ctcNodeFailrecFileSize);
+
+ /*
+ ndbout << "FRAGREC SIZE = " << sizeof(Fragrecord) << endl;
+ ndbout << "TAB SIZE = " << sizeof(Tablerec) << endl;
+ ndbout << "GCP SIZE = " << sizeof(GcpRecord) << endl;
+ ndbout << "LCP SIZE = " << sizeof(LcpRecord) << endl;
+ ndbout << "LCPLOC SIZE = " << sizeof(LcpLocRecord) << endl;
+ ndbout << "LOGPART SIZE = " << sizeof(LogPartRecord) << endl;
+ ndbout << "LOGFILE SIZE = " << sizeof(LogFileRecord) << endl;
+ ndbout << "TC SIZE = " << sizeof(TcConnectionrec) << endl;
+ ndbout << "HOST SIZE = " << sizeof(HostRecord) << endl;
+ ndbout << "LFO SIZE = " << sizeof(LogFileOperationRecord) << endl;
+ ndbout << "PR SIZE = " << sizeof(PageRefRecord) << endl;
+ ndbout << "SCAN SIZE = " << sizeof(ScanRecord) << endl;
+*/
+
+ // Initialize BAT for interface to file system
+ NewVARIABLE* bat = allocateBat(2);
+ bat[1].WA = &logPageRecord->logPageWord[0];
+ bat[1].nrr = clogPageFileSize;
+ bat[1].ClusterSize = sizeof(LogPageRecord);
+ bat[1].bits.q = ZTWOLOG_PAGE_SIZE;
+ bat[1].bits.v = 5;
+}//Dblqh::initRecords()
+
+Dblqh::Dblqh(const class Configuration & conf):
+ SimulatedBlock(DBLQH, conf),
+ m_commitAckMarkerHash(m_commitAckMarkerPool),
+ c_scanTakeOverHash(c_scanRecordPool)
+{
+ Uint32 log_page_size= 0;
+ BLOCK_CONSTRUCTOR(Dblqh);
+
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_REDO_BUFFER,
+ &log_page_size);
+
+ /**
+ * Always set page size in half MBytes
+ */
+ clogPageFileSize= (log_page_size / sizeof(LogPageRecord));
+ Uint32 mega_byte_part= clogPageFileSize & 15;
+ if (mega_byte_part != 0) {
+ jam();
+ clogPageFileSize+= (16 - mega_byte_part);
+ }
+
+ addRecSignal(GSN_PACKED_SIGNAL, &Dblqh::execPACKED_SIGNAL);
+ addRecSignal(GSN_DEBUG_SIG, &Dblqh::execDEBUG_SIG);
+ addRecSignal(GSN_ATTRINFO, &Dblqh::execATTRINFO);
+ addRecSignal(GSN_KEYINFO, &Dblqh::execKEYINFO);
+ addRecSignal(GSN_LQHKEYREQ, &Dblqh::execLQHKEYREQ);
+ addRecSignal(GSN_LQHKEYREF, &Dblqh::execLQHKEYREF);
+ addRecSignal(GSN_COMMIT, &Dblqh::execCOMMIT);
+ addRecSignal(GSN_COMPLETE, &Dblqh::execCOMPLETE);
+ addRecSignal(GSN_LQHKEYCONF, &Dblqh::execLQHKEYCONF);
+#ifdef VM_TRACE
+ addRecSignal(GSN_TESTSIG, &Dblqh::execTESTSIG);
+#endif
+ addRecSignal(GSN_LQH_RESTART_OP, &Dblqh::execLQH_RESTART_OP);
+ addRecSignal(GSN_CONTINUEB, &Dblqh::execCONTINUEB);
+ addRecSignal(GSN_START_RECREQ, &Dblqh::execSTART_RECREQ);
+ addRecSignal(GSN_START_RECCONF, &Dblqh::execSTART_RECCONF);
+ addRecSignal(GSN_EXEC_FRAGREQ, &Dblqh::execEXEC_FRAGREQ);
+ addRecSignal(GSN_EXEC_FRAGCONF, &Dblqh::execEXEC_FRAGCONF);
+ addRecSignal(GSN_EXEC_FRAGREF, &Dblqh::execEXEC_FRAGREF);
+ addRecSignal(GSN_START_EXEC_SR, &Dblqh::execSTART_EXEC_SR);
+ addRecSignal(GSN_EXEC_SRREQ, &Dblqh::execEXEC_SRREQ);
+ addRecSignal(GSN_EXEC_SRCONF, &Dblqh::execEXEC_SRCONF);
+ addRecSignal(GSN_SCAN_HBREP, &Dblqh::execSCAN_HBREP);
+
+ addRecSignal(GSN_ALTER_TAB_REQ, &Dblqh::execALTER_TAB_REQ);
+
+ // Trigger signals, transit to from TUP
+ addRecSignal(GSN_CREATE_TRIG_REQ, &Dblqh::execCREATE_TRIG_REQ);
+ addRecSignal(GSN_CREATE_TRIG_CONF, &Dblqh::execCREATE_TRIG_CONF);
+ addRecSignal(GSN_CREATE_TRIG_REF, &Dblqh::execCREATE_TRIG_REF);
+
+ addRecSignal(GSN_DROP_TRIG_REQ, &Dblqh::execDROP_TRIG_REQ);
+ addRecSignal(GSN_DROP_TRIG_CONF, &Dblqh::execDROP_TRIG_CONF);
+ addRecSignal(GSN_DROP_TRIG_REF, &Dblqh::execDROP_TRIG_REF);
+
+ addRecSignal(GSN_DUMP_STATE_ORD, &Dblqh::execDUMP_STATE_ORD);
+ addRecSignal(GSN_ACC_COM_BLOCK, &Dblqh::execACC_COM_BLOCK);
+ addRecSignal(GSN_ACC_COM_UNBLOCK, &Dblqh::execACC_COM_UNBLOCK);
+ addRecSignal(GSN_TUP_COM_BLOCK, &Dblqh::execTUP_COM_BLOCK);
+ addRecSignal(GSN_TUP_COM_UNBLOCK, &Dblqh::execTUP_COM_UNBLOCK);
+ addRecSignal(GSN_NODE_FAILREP, &Dblqh::execNODE_FAILREP);
+ addRecSignal(GSN_CHECK_LCP_STOP, &Dblqh::execCHECK_LCP_STOP);
+ addRecSignal(GSN_SEND_PACKED, &Dblqh::execSEND_PACKED);
+ addRecSignal(GSN_TUP_ATTRINFO, &Dblqh::execTUP_ATTRINFO);
+ addRecSignal(GSN_READ_CONFIG_REQ, &Dblqh::execREAD_CONFIG_REQ, true);
+ addRecSignal(GSN_LQHFRAGREQ, &Dblqh::execLQHFRAGREQ);
+ addRecSignal(GSN_LQHADDATTREQ, &Dblqh::execLQHADDATTREQ);
+ addRecSignal(GSN_TUP_ADD_ATTCONF, &Dblqh::execTUP_ADD_ATTCONF);
+ addRecSignal(GSN_TUP_ADD_ATTRREF, &Dblqh::execTUP_ADD_ATTRREF);
+ addRecSignal(GSN_ACCFRAGCONF, &Dblqh::execACCFRAGCONF);
+ addRecSignal(GSN_ACCFRAGREF, &Dblqh::execACCFRAGREF);
+ addRecSignal(GSN_TUPFRAGCONF, &Dblqh::execTUPFRAGCONF);
+ addRecSignal(GSN_TUPFRAGREF, &Dblqh::execTUPFRAGREF);
+ addRecSignal(GSN_TAB_COMMITREQ, &Dblqh::execTAB_COMMITREQ);
+ addRecSignal(GSN_ACCSEIZECONF, &Dblqh::execACCSEIZECONF);
+ addRecSignal(GSN_ACCSEIZEREF, &Dblqh::execACCSEIZEREF);
+ addRecSignal(GSN_READ_NODESCONF, &Dblqh::execREAD_NODESCONF);
+ addRecSignal(GSN_READ_NODESREF, &Dblqh::execREAD_NODESREF);
+ addRecSignal(GSN_STTOR, &Dblqh::execSTTOR);
+ addRecSignal(GSN_NDB_STTOR, &Dblqh::execNDB_STTOR);
+ addRecSignal(GSN_TUPSEIZECONF, &Dblqh::execTUPSEIZECONF);
+ addRecSignal(GSN_TUPSEIZEREF, &Dblqh::execTUPSEIZEREF);
+ addRecSignal(GSN_ACCKEYCONF, &Dblqh::execACCKEYCONF);
+ addRecSignal(GSN_ACCKEYREF, &Dblqh::execACCKEYREF);
+ addRecSignal(GSN_TUPKEYCONF, &Dblqh::execTUPKEYCONF);
+ addRecSignal(GSN_TUPKEYREF, &Dblqh::execTUPKEYREF);
+ addRecSignal(GSN_ABORT, &Dblqh::execABORT);
+ addRecSignal(GSN_ABORTREQ, &Dblqh::execABORTREQ);
+ addRecSignal(GSN_COMMITREQ, &Dblqh::execCOMMITREQ);
+ addRecSignal(GSN_COMPLETEREQ, &Dblqh::execCOMPLETEREQ);
+#ifdef VM_TRACE
+ addRecSignal(GSN_MEMCHECKREQ, &Dblqh::execMEMCHECKREQ);
+#endif
+ addRecSignal(GSN_SCAN_FRAGREQ, &Dblqh::execSCAN_FRAGREQ);
+ addRecSignal(GSN_SCAN_NEXTREQ, &Dblqh::execSCAN_NEXTREQ);
+ addRecSignal(GSN_ACC_SCANCONF, &Dblqh::execACC_SCANCONF);
+ addRecSignal(GSN_ACC_SCANREF, &Dblqh::execACC_SCANREF);
+ addRecSignal(GSN_NEXT_SCANCONF, &Dblqh::execNEXT_SCANCONF);
+ addRecSignal(GSN_NEXT_SCANREF, &Dblqh::execNEXT_SCANREF);
+ addRecSignal(GSN_STORED_PROCCONF, &Dblqh::execSTORED_PROCCONF);
+ addRecSignal(GSN_STORED_PROCREF, &Dblqh::execSTORED_PROCREF);
+ addRecSignal(GSN_COPY_FRAGREQ, &Dblqh::execCOPY_FRAGREQ);
+ addRecSignal(GSN_COPY_ACTIVEREQ, &Dblqh::execCOPY_ACTIVEREQ);
+ addRecSignal(GSN_COPY_STATEREQ, &Dblqh::execCOPY_STATEREQ);
+ addRecSignal(GSN_LQH_TRANSREQ, &Dblqh::execLQH_TRANSREQ);
+ addRecSignal(GSN_TRANSID_AI, &Dblqh::execTRANSID_AI);
+ addRecSignal(GSN_INCL_NODEREQ, &Dblqh::execINCL_NODEREQ);
+ addRecSignal(GSN_ACC_LCPCONF, &Dblqh::execACC_LCPCONF);
+ addRecSignal(GSN_ACC_LCPREF, &Dblqh::execACC_LCPREF);
+ addRecSignal(GSN_ACC_LCPSTARTED, &Dblqh::execACC_LCPSTARTED);
+ addRecSignal(GSN_ACC_CONTOPCONF, &Dblqh::execACC_CONTOPCONF);
+ addRecSignal(GSN_LCP_FRAGIDCONF, &Dblqh::execLCP_FRAGIDCONF);
+ addRecSignal(GSN_LCP_FRAGIDREF, &Dblqh::execLCP_FRAGIDREF);
+ addRecSignal(GSN_LCP_HOLDOPCONF, &Dblqh::execLCP_HOLDOPCONF);
+ addRecSignal(GSN_LCP_HOLDOPREF, &Dblqh::execLCP_HOLDOPREF);
+ addRecSignal(GSN_TUP_PREPLCPCONF, &Dblqh::execTUP_PREPLCPCONF);
+ addRecSignal(GSN_TUP_PREPLCPREF, &Dblqh::execTUP_PREPLCPREF);
+ addRecSignal(GSN_TUP_LCPCONF, &Dblqh::execTUP_LCPCONF);
+ addRecSignal(GSN_TUP_LCPREF, &Dblqh::execTUP_LCPREF);
+ addRecSignal(GSN_TUP_LCPSTARTED, &Dblqh::execTUP_LCPSTARTED);
+ addRecSignal(GSN_END_LCPCONF, &Dblqh::execEND_LCPCONF);
+
+ addRecSignal(GSN_EMPTY_LCP_REQ, &Dblqh::execEMPTY_LCP_REQ);
+ addRecSignal(GSN_LCP_FRAG_ORD, &Dblqh::execLCP_FRAG_ORD);
+
+ addRecSignal(GSN_START_FRAGREQ, &Dblqh::execSTART_FRAGREQ);
+ addRecSignal(GSN_START_RECREF, &Dblqh::execSTART_RECREF);
+ addRecSignal(GSN_SR_FRAGIDCONF, &Dblqh::execSR_FRAGIDCONF);
+ addRecSignal(GSN_SR_FRAGIDREF, &Dblqh::execSR_FRAGIDREF);
+ addRecSignal(GSN_ACC_SRCONF, &Dblqh::execACC_SRCONF);
+ addRecSignal(GSN_ACC_SRREF, &Dblqh::execACC_SRREF);
+ addRecSignal(GSN_TUP_SRCONF, &Dblqh::execTUP_SRCONF);
+ addRecSignal(GSN_TUP_SRREF, &Dblqh::execTUP_SRREF);
+ addRecSignal(GSN_GCP_SAVEREQ, &Dblqh::execGCP_SAVEREQ);
+ addRecSignal(GSN_FSOPENCONF, &Dblqh::execFSOPENCONF);
+ addRecSignal(GSN_FSOPENREF, &Dblqh::execFSOPENREF);
+ addRecSignal(GSN_FSCLOSECONF, &Dblqh::execFSCLOSECONF);
+ addRecSignal(GSN_FSCLOSEREF, &Dblqh::execFSCLOSEREF);
+ addRecSignal(GSN_FSWRITECONF, &Dblqh::execFSWRITECONF);
+ addRecSignal(GSN_FSWRITEREF, &Dblqh::execFSWRITEREF);
+ addRecSignal(GSN_FSREADCONF, &Dblqh::execFSREADCONF);
+ addRecSignal(GSN_FSREADREF, &Dblqh::execFSREADREF);
+ addRecSignal(GSN_ACC_ABORTCONF, &Dblqh::execACC_ABORTCONF);
+ addRecSignal(GSN_SET_VAR_REQ, &Dblqh::execSET_VAR_REQ);
+ addRecSignal(GSN_TIME_SIGNAL, &Dblqh::execTIME_SIGNAL);
+ addRecSignal(GSN_FSSYNCCONF, &Dblqh::execFSSYNCCONF);
+ addRecSignal(GSN_FSSYNCREF, &Dblqh::execFSSYNCREF);
+ addRecSignal(GSN_REMOVE_MARKER_ORD, &Dblqh::execREMOVE_MARKER_ORD);
+
+ //addRecSignal(GSN_DROP_TAB_REQ, &Dblqh::execDROP_TAB_REQ);
+ addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dblqh::execPREP_DROP_TAB_REQ);
+ addRecSignal(GSN_WAIT_DROP_TAB_REQ, &Dblqh::execWAIT_DROP_TAB_REQ);
+ addRecSignal(GSN_DROP_TAB_REQ, &Dblqh::execDROP_TAB_REQ);
+
+ addRecSignal(GSN_LQH_ALLOCREQ, &Dblqh::execLQH_ALLOCREQ);
+ addRecSignal(GSN_LQH_WRITELOG_REQ, &Dblqh::execLQH_WRITELOG_REQ);
+
+ // TUX
+ addRecSignal(GSN_TUXFRAGCONF, &Dblqh::execTUXFRAGCONF);
+ addRecSignal(GSN_TUXFRAGREF, &Dblqh::execTUXFRAGREF);
+ addRecSignal(GSN_TUX_ADD_ATTRCONF, &Dblqh::execTUX_ADD_ATTRCONF);
+ addRecSignal(GSN_TUX_ADD_ATTRREF, &Dblqh::execTUX_ADD_ATTRREF);
+
+ addRecSignal(GSN_READ_PSEUDO_REQ, &Dblqh::execREAD_PSEUDO_REQ);
+
+ initData();
+
+#ifdef VM_TRACE
+ {
+ void* tmp[] = {
+ &addfragptr,
+ &attrinbufptr,
+ &databufptr,
+ &fragptr,
+ &gcpPtr,
+ &lcpPtr,
+ &lcpLocptr,
+ &logPartPtr,
+ &logFilePtr,
+ &lfoPtr,
+ &logPagePtr,
+ &pageRefPtr,
+ &scanptr,
+ &tabptr,
+ &tcConnectptr,
+ &tcNodeFailptr,
+ };
+ init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0]));
+ }
+#endif
+
+}//Dblqh::Dblqh()
+
+Dblqh::~Dblqh()
+{
+ // Records with dynamic sizes
+ deallocRecord((void **)&addFragRecord, "AddFragRecord",
+ sizeof(AddFragRecord),
+ caddfragrecFileSize);
+
+ deallocRecord((void**)&attrbuf,
+ "Attrbuf",
+ sizeof(Attrbuf),
+ cattrinbufFileSize);
+
+ deallocRecord((void**)&databuf,
+ "Databuf",
+ sizeof(Databuf),
+ cdatabufFileSize);
+
+ deallocRecord((void**)&fragrecord,
+ "Fragrecord",
+ sizeof(Fragrecord),
+ cfragrecFileSize);
+
+ deallocRecord((void**)&gcpRecord,
+ "GcpRecord",
+ sizeof(GcpRecord),
+ cgcprecFileSize);
+
+ deallocRecord((void**)&hostRecord,
+ "HostRecord",
+ sizeof(HostRecord),
+ chostFileSize);
+
+ deallocRecord((void**)&lcpRecord,
+ "LcpRecord",
+ sizeof(LcpRecord),
+ clcpFileSize);
+
+ deallocRecord((void**)&lcpLocRecord,
+ "LcpLocRecord",
+ sizeof(LcpLocRecord),
+ clcpLocrecFileSize);
+
+ deallocRecord((void**)&logPartRecord,
+ "LogPartRecord",
+ sizeof(LogPartRecord),
+ clogPartFileSize);
+
+ deallocRecord((void**)&logFileRecord,
+ "LogFileRecord",
+ sizeof(LogFileRecord),
+ clogFileFileSize);
+
+ deallocRecord((void**)&logFileOperationRecord,
+ "LogFileOperationRecord",
+ sizeof(LogFileOperationRecord),
+ clfoFileSize);
+
+ deallocRecord((void**)&logPageRecord,
+ "LogPageRecord",
+ sizeof(LogPageRecord),
+ clogPageFileSize);
+
+ deallocRecord((void**)&pageRefRecord,
+ "PageRefRecord",
+ sizeof(PageRefRecord),
+ cpageRefFileSize);
+
+
+ deallocRecord((void**)&tablerec,
+ "Tablerec",
+ sizeof(Tablerec),
+ ctabrecFileSize);
+
+ deallocRecord((void**)&tcConnectionrec,
+ "TcConnectionrec",
+ sizeof(TcConnectionrec),
+ ctcConnectrecFileSize);
+
+ deallocRecord((void**)&tcNodeFailRecord,
+ "TcNodeFailRecord",
+ sizeof(TcNodeFailRecord),
+ ctcNodeFailrecFileSize);
+}//Dblqh::~Dblqh()
+
+BLOCK_FUNCTIONS(Dblqh)
+
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
new file mode 100644
index 00000000000..be3d259986d
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -0,0 +1,18635 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBLQH_C
+#include "Dblqh.hpp"
+#include <ndb_limits.h>
+#include <md5_hash.hpp>
+
+#include <ndb_version.h>
+#include <signaldata/TuxBound.hpp>
+#include <signaldata/AccScan.hpp>
+#include <signaldata/CopyActive.hpp>
+#include <signaldata/CopyFrag.hpp>
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/DropTrig.hpp>
+#include <signaldata/EmptyLcp.hpp>
+#include <signaldata/EventReport.hpp>
+#include <signaldata/ExecFragReq.hpp>
+#include <signaldata/GCPSave.hpp>
+#include <signaldata/TcKeyRef.hpp>
+#include <signaldata/LqhKey.hpp>
+#include <signaldata/NextScan.hpp>
+#include <signaldata/NFCompleteRep.hpp>
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/ReadNodesConf.hpp>
+#include <signaldata/RelTabMem.hpp>
+#include <signaldata/ScanFrag.hpp>
+#include <signaldata/SrFragidConf.hpp>
+#include <signaldata/StartFragReq.hpp>
+#include <signaldata/StartRec.hpp>
+#include <signaldata/TupKey.hpp>
+#include <signaldata/TupCommit.hpp>
+#include <signaldata/LqhFrag.hpp>
+#include <signaldata/AccFrag.hpp>
+#include <signaldata/TupFrag.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+#include <signaldata/PackedSignal.hpp>
+
+#include <signaldata/PrepDropTab.hpp>
+#include <signaldata/DropTab.hpp>
+
+#include <signaldata/AlterTab.hpp>
+
+#include <signaldata/LCP.hpp>
+
+// Use DEBUG to print messages that should be
+// seen only when we debug the product
+#ifdef VM_TRACE
+#define DEBUG(x) ndbout << "DBLQH: "<< x << endl;
+NdbOut &
+operator<<(NdbOut& out, Dblqh::TcConnectionrec::TransactionState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::TcConnectionrec::LogWriteState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::TcConnectionrec::ListState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::TcConnectionrec::AbortState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::ScanRecord::ScanState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::LogFileOperationRecord::LfoState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::ScanRecord::ScanType state){
+ out << (int)state;
+ return out;
+}
+
+#else
+#define DEBUG(x)
+#endif
+
+//#define MARKER_TRACE 1
+//#define TRACE_SCAN_TAKEOVER 1
+
+const Uint32 NR_ScanNo = 0;
+
+void Dblqh::execACC_COM_BLOCK(Signal* signal)
+{
+ jamEntry();
+/* ------------------------------------------------------------------------- */
+// Undo log buffer in ACC is in critical sector of being full.
+/* ------------------------------------------------------------------------- */
+ cCounterAccCommitBlocked++;
+ caccCommitBlocked = true;
+ cCommitBlocked = true;
+ return;
+}//Dblqh::execACC_COM_BLOCK()
+
+void Dblqh::execACC_COM_UNBLOCK(Signal* signal)
+{
+ jamEntry();
+/* ------------------------------------------------------------------------- */
+// Undo log buffer in ACC ok again.
+/* ------------------------------------------------------------------------- */
+ caccCommitBlocked = false;
+ if (ctupCommitBlocked == false) {
+ jam();
+ cCommitBlocked = false;
+ }//if
+ return;
+}//Dblqh::execACC_COM_UNBLOCK()
+
+void Dblqh::execTUP_COM_BLOCK(Signal* signal)
+{
+ jamEntry();
+/* ------------------------------------------------------------------------- */
+// Undo log buffer in TUP is in critical sector of being full.
+/* ------------------------------------------------------------------------- */
+ cCounterTupCommitBlocked++;
+ ctupCommitBlocked = true;
+ cCommitBlocked = true;
+ return;
+}//Dblqh::execTUP_COM_BLOCK()
+
+void Dblqh::execTUP_COM_UNBLOCK(Signal* signal)
+{
+ jamEntry();
+/* ------------------------------------------------------------------------- */
+// Undo log buffer in TUP ok again.
+/* ------------------------------------------------------------------------- */
+ ctupCommitBlocked = false;
+ if (caccCommitBlocked == false) {
+ jam();
+ cCommitBlocked = false;
+ }//if
+ return;
+}//Dblqh::execTUP_COM_UNBLOCK()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEND SYSTEM ERROR ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::systemError(Signal* signal)
+{
+ progError(0, 0);
+}//Dblqh::systemError()
+
+/* *************** */
+/* ACCSEIZEREF > */
+/* *************** */
+void Dblqh::execACCSEIZEREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execACCSEIZEREF()
+
+/* ******************************************************>> */
+/* THIS SIGNAL IS USED TO HANDLE REAL-TIME */
+/* BREAKS THAT ARE NECESSARY TO ENSURE REAL-TIME */
+/* OPERATION OF LQH. */
+/* This signal is also used for signal loops, for example */
+/* the timeout handling for writing logs every second. */
+/* ******************************************************>> */
+void Dblqh::execCONTINUEB(Signal* signal)
+{
+ jamEntry();
+ Uint32 tcase = signal->theData[0];
+ Uint32 data0 = signal->theData[1];
+ Uint32 data1 = signal->theData[2];
+ Uint32 data2 = signal->theData[3];
+#if 0
+ if (tcase == RNIL) {
+ tcConnectptr.i = data0;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ ndbout << "State = " << tcConnectptr.p->transactionState;
+ ndbout << " seqNoReplica = " << tcConnectptr.p->seqNoReplica;
+ ndbout << " tcNodeFailrec = " << tcConnectptr.p->tcNodeFailrec;
+ ndbout << " activeCreat = " << tcConnectptr.p->activeCreat;
+ ndbout << endl;
+ ndbout << "tupkeyData0 = " << tcConnectptr.p->tupkeyData[0];
+ ndbout << "tupkeyData1 = " << tcConnectptr.p->tupkeyData[1];
+ ndbout << "tupkeyData2 = " << tcConnectptr.p->tupkeyData[2];
+ ndbout << "tupkeyData3 = " << tcConnectptr.p->tupkeyData[3];
+ ndbout << endl;
+ ndbout << "abortState = " << tcConnectptr.p->abortState;
+ ndbout << "listState = " << tcConnectptr.p->listState;
+ ndbout << endl;
+ return;
+ }//if
+#endif
+ switch (tcase) {
+ case ZLOG_LQHKEYREQ:
+ if (cnoOfLogPages == 0) {
+ jam();
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
+ return;
+ }//if
+ logPartPtr.i = data0;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+
+ tcConnectptr.i = logPartPtr.p->firstLogQueue;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if ((cCommitBlocked == true) &&
+ (fragptr.p->fragActiveStatus == ZTRUE)) {
+ jam();
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
+ return;
+ }//if
+ logPartPtr.p->LogLqhKeyReqSent = ZFALSE;
+ getFirstInLogQueue(signal);
+
+ switch (tcConnectptr.p->transactionState) {
+ case TcConnectionrec::LOG_QUEUED:
+ if (tcConnectptr.p->abortState != TcConnectionrec::ABORT_IDLE) {
+ jam();
+ logNextStart(signal);
+ abortCommonLab(signal);
+ return;
+ } else {
+ jam();
+/*------------------------------------------------------------*/
+/* WE MUST SET THE STATE OF THE LOG PART TO IDLE TO */
+/* ENSURE THAT WE ARE NOT QUEUED AGAIN ON THE LOG PART */
+/* WE WILL SET THE LOG PART STATE TO ACTIVE IMMEDIATELY */
+/* SO NO OTHER PROCESS WILL SEE THIS STATE. IT IS MERELY*/
+/* USED TO ENABLE REUSE OF CODE. */
+/*------------------------------------------------------------*/
+ if (logPartPtr.p->logPartState == LogPartRecord::ACTIVE) {
+ jam();
+ logPartPtr.p->logPartState = LogPartRecord::IDLE;
+ }//if
+ logLqhkeyreqLab(signal);
+ return;
+ }//if
+ break;
+ case TcConnectionrec::LOG_ABORT_QUEUED:
+ jam();
+ writeAbortLog(signal);
+ removeLogTcrec(signal);
+ logNextStart(signal);
+ continueAfterLogAbortWriteLab(signal);
+ return;
+ break;
+ case TcConnectionrec::LOG_COMMIT_QUEUED:
+ case TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL:
+ jam();
+ writeCommitLog(signal, logPartPtr);
+ logNextStart(signal);
+ if (tcConnectptr.p->transactionState == TcConnectionrec::LOG_COMMIT_QUEUED) {
+ if (tcConnectptr.p->seqNoReplica != 0) {
+ jam();
+ commitReplyLab(signal);
+ } else {
+ jam();
+ localCommitLab(signal);
+ }//if
+ return;
+ } else {
+ jam();
+ tcConnectptr.p->transactionState = TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL;
+ return;
+ }//if
+ break;
+ case TcConnectionrec::COMMIT_QUEUED:
+ jam();
+ logNextStart(signal);
+ localCommitLab(signal);
+ break;
+ case TcConnectionrec::ABORT_QUEUED:
+ jam();
+ logNextStart(signal);
+ abortCommonLab(signal);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+ break;
+ case ZSR_GCI_LIMITS:
+ jam();
+ signal->theData[0] = data0;
+ srGciLimits(signal);
+ return;
+ break;
+ case ZSR_LOG_LIMITS:
+ jam();
+ signal->theData[0] = data0;
+ signal->theData[1] = data1;
+ signal->theData[2] = data2;
+ srLogLimits(signal);
+ return;
+ break;
+ case ZSEND_EXEC_CONF:
+ jam();
+ signal->theData[0] = data0;
+ sendExecConf(signal);
+ return;
+ break;
+ case ZEXEC_SR:
+ jam();
+ signal->theData[0] = data0;
+ execSr(signal);
+ return;
+ break;
+ case ZSR_FOURTH_COMP:
+ jam();
+ signal->theData[0] = data0;
+ srFourthComp(signal);
+ return;
+ break;
+ case ZINIT_FOURTH:
+ jam();
+ signal->theData[0] = data0;
+ initFourth(signal);
+ return;
+ break;
+ case ZTIME_SUPERVISION:
+ jam();
+ signal->theData[0] = data0;
+ timeSup(signal);
+ return;
+ break;
+ case ZSR_PHASE3_START:
+ jam();
+ signal->theData[0] = data0;
+ srPhase3Start(signal);
+ return;
+ break;
+ case ZLQH_TRANS_NEXT:
+ jam();
+ tcNodeFailptr.i = data0;
+ ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+ lqhTransNextLab(signal);
+ return;
+ break;
+ case ZSCAN_TC_CONNECT:
+ jam();
+ tabptr.i = data1;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ scanTcConnectLab(signal, data0, data2);
+ return;
+ break;
+ case ZINITIALISE_RECORDS:
+ jam();
+ initialiseRecordsLab(signal, data0, data2, signal->theData[4]);
+ return;
+ break;
+ case ZINIT_GCP_REC:
+ jam();
+ gcpPtr.i = 0;
+ ptrAss(gcpPtr, gcpRecord);
+ initGcpRecLab(signal);
+ return;
+ break;
+ case ZRESTART_OPERATIONS_AFTER_STOP:
+ jam();
+ tcConnectptr.i = data0;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ if (tcConnectptr.p->listState != TcConnectionrec::WAIT_QUEUE_LIST) {
+ jam();
+ return;
+ }//if
+ releaseWaitQueue(signal);
+ linkActiveFrag(signal);
+ restartOperationsAfterStopLab(signal);
+ return;
+ break;
+ case ZCHECK_LCP_STOP_BLOCKED:
+ jam();
+ c_scanRecordPool.getPtr(scanptr, data0);
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ checkLcpStopBlockedLab(signal);
+ return;
+ case ZSCAN_MARKERS:
+ jam();
+ scanMarkers(signal, data0, data1, data2);
+ return;
+ break;
+
+ case ZOPERATION_EVENT_REP:
+ jam();
+ /* --------------------------------------------------------------------- */
+ // Report information about transaction activity once per second.
+ /* --------------------------------------------------------------------- */
+ if (signal->theData[1] == 0) {
+ signal->theData[0] = NDB_LE_OperationReportCounters;
+ signal->theData[1] = c_Counters.operations;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+ }//if
+ c_Counters.clear();
+ signal->theData[0] = ZOPERATION_EVENT_REP;
+ signal->theData[1] = 0;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 2);
+ break;
+ case ZPREP_DROP_TABLE:
+ jam();
+ checkDropTab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dblqh::execCONTINUEB()
+
+/* *********************************************************> */
+/* Request from DBDIH to include a new node in the node list */
+/* and so forth. */
+/* *********************************************************> */
+void Dblqh::execINCL_NODEREQ(Signal* signal)
+{
+ jamEntry();
+ BlockReference retRef = signal->theData[0];
+ Uint32 nodeId = signal->theData[1];
+ cnewestGci = signal->theData[2];
+ cnewestCompletedGci = signal->theData[2] - 1;
+ ndbrequire(cnoOfNodes < MAX_NDB_NODES);
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ if (cnodeData[i] == nodeId) {
+ jam();
+ cnodeStatus[i] = ZNODE_UP;
+ }//if
+ }//for
+ signal->theData[0] = cownref;
+ sendSignal(retRef, GSN_INCL_NODECONF, signal, 1, JBB);
+ return;
+}//Dblqh::execINCL_NODEREQ()
+
+void Dblqh::execTUPSEIZEREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execTUPSEIZEREF()
+
+/* ########################################################################## */
+/* ####### START / RESTART MODULE ####### */
+/* ########################################################################## */
+/* ************************************************************************>> */
+/* This is first signal that arrives in a start / restart. Sender is NDBCNTR_REF. */
+/* ************************************************************************>> */
+void Dblqh::execSTTOR(Signal* signal)
+{
+ UintR tstartPhase;
+
+ jamEntry();
+ /* START CASE */
+ tstartPhase = signal->theData[1];
+ /* SYSTEM RESTART RANK */
+ csignalKey = signal->theData[6];
+ switch (tstartPhase) {
+ case ZSTART_PHASE1:
+ jam();
+ cstartPhase = tstartPhase;
+ sttorStartphase1Lab(signal);
+ c_tup = (Dbtup*)globalData.getBlock(DBTUP);
+ ndbrequire(c_tup != 0);
+ return;
+ break;
+ default:
+ jam();
+ /*empty*/;
+ sendsttorryLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::execSTTOR()
+
+/* ***************************************> */
+/* Restart phases 1 - 6, sender is Ndbcntr */
+/* ***************************************> */
+void Dblqh::execNDB_STTOR(Signal* signal)
+{
+ jamEntry();
+ Uint32 ownNodeId = signal->theData[1]; /* START PHASE*/
+ cstartPhase = signal->theData[2]; /* MY NODE ID */
+ cstartType = signal->theData[3]; /* START TYPE */
+
+ switch (cstartPhase) {
+ case ZSTART_PHASE1:
+ jam();
+ preComputedRequestInfoMask = 0;
+ LqhKeyReq::setKeyLen(preComputedRequestInfoMask, RI_KEYLEN_MASK);
+ LqhKeyReq::setLastReplicaNo(preComputedRequestInfoMask, RI_LAST_REPL_MASK);
+ LqhKeyReq::setLockType(preComputedRequestInfoMask, RI_LOCK_TYPE_MASK);
+ // Dont LqhKeyReq::setApplicationAddressFlag
+ LqhKeyReq::setDirtyFlag(preComputedRequestInfoMask, 1);
+ // Dont LqhKeyReq::setInterpretedFlag
+ LqhKeyReq::setSimpleFlag(preComputedRequestInfoMask, 1);
+ LqhKeyReq::setOperation(preComputedRequestInfoMask, RI_OPERATION_MASK);
+ // Dont setAIInLqhKeyReq
+ // Dont setSeqNoReplica
+ // Dont setSameClientAndTcFlag
+ // Dont setReturnedReadLenAIFlag
+ // Dont setAPIVersion
+ LqhKeyReq::setMarkerFlag(preComputedRequestInfoMask, 1);
+ //preComputedRequestInfoMask = 0x003d7fff;
+ startphase1Lab(signal, /* dummy */ ~0, ownNodeId);
+
+ signal->theData[0] = ZOPERATION_EVENT_REP;
+ signal->theData[1] = 1;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
+ return;
+ break;
+ case ZSTART_PHASE2:
+ jam();
+ startphase2Lab(signal, /* dummy */ ~0);
+ return;
+ break;
+ case ZSTART_PHASE3:
+ jam();
+ startphase3Lab(signal);
+ return;
+ break;
+ case ZSTART_PHASE4:
+ jam();
+ startphase4Lab(signal);
+ return;
+ break;
+ case ZSTART_PHASE6:
+ jam();
+ startphase6Lab(signal);
+ return;
+ break;
+ default:
+ jam();
+ /*empty*/;
+ sendNdbSttorryLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::execNDB_STTOR()
+
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* +++++++ START PHASE 1 +++++++ */
+/* LOAD OUR BLOCK REFERENCE AND OUR PROCESSOR ID */
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+void Dblqh::sttorStartphase1Lab(Signal* signal)
+{
+ sendsttorryLab(signal);
+ return;
+}//Dblqh::sttorStartphase1Lab()
+
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* +++++++ START PHASE 2 +++++++ */
+/* */
+/* INITIATE ALL RECORDS WITHIN THE BLOCK */
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+void Dblqh::startphase1Lab(Signal* signal, Uint32 _dummy, Uint32 ownNodeId)
+{
+ UintR Ti;
+ HostRecordPtr ThostPtr;
+
+/* ------- INITIATE ALL RECORDS ------- */
+ cownNodeid = ownNodeId;
+ caccBlockref = calcAccBlockRef (cownNodeid);
+ ctupBlockref = calcTupBlockRef (cownNodeid);
+ ctuxBlockref = calcTuxBlockRef (cownNodeid);
+ cownref = calcLqhBlockRef (cownNodeid);
+ for (Ti = 0; Ti < chostFileSize; Ti++) {
+ ThostPtr.i = Ti;
+ ptrCheckGuard(ThostPtr, chostFileSize, hostRecord);
+ ThostPtr.p->hostLqhBlockRef = calcLqhBlockRef(ThostPtr.i);
+ ThostPtr.p->hostTcBlockRef = calcTcBlockRef(ThostPtr.i);
+ ThostPtr.p->inPackedList = false;
+ ThostPtr.p->noOfPackedWordsLqh = 0;
+ ThostPtr.p->noOfPackedWordsTc = 0;
+ }//for
+ cpackedListIndex = 0;
+ sendNdbSttorryLab(signal);
+ return;
+}//Dblqh::startphase1Lab()
+
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* +++++++ START PHASE 2 +++++++ */
+/* */
+/* CONNECT LQH WITH ACC AND TUP. */
+/* EVERY CONNECTION RECORD IN LQH IS ASSIGNED TO ONE ACC CONNECTION RECORD */
+/* AND ONE TUP CONNECTION RECORD. */
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+void Dblqh::startphase2Lab(Signal* signal, Uint32 _dummy)
+{
+ cmaxWordsAtNodeRec = MAX_NO_WORDS_OUTSTANDING_COPY_FRAGMENT;
+/* -- ACC AND TUP CONNECTION PROCESS -- */
+ tcConnectptr.i = 0;
+ ptrAss(tcConnectptr, tcConnectionrec);
+ moreconnectionsLab(signal);
+ return;
+}//Dblqh::startphase2Lab()
+
+void Dblqh::moreconnectionsLab(Signal* signal)
+{
+ tcConnectptr.p->tcAccBlockref = caccBlockref;
+ // set TUX block here (no operation is seized in TUX)
+ tcConnectptr.p->tcTuxBlockref = ctuxBlockref;
+/* NO STATE CHECKING IS PERFORMED, ASSUMED TO WORK */
+/* *************** */
+/* ACCSEIZEREQ < */
+/* *************** */
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ sendSignal(caccBlockref, GSN_ACCSEIZEREQ, signal, 2, JBB);
+ return;
+}//Dblqh::moreconnectionsLab()
+
+/* ***************> */
+/* ACCSEIZECONF > */
+/* ***************> */
+void Dblqh::execACCSEIZECONF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ tcConnectptr.p->accConnectrec = signal->theData[1];
+/* *************** */
+/* TUPSEIZEREQ < */
+/* *************** */
+ tcConnectptr.p->tcTupBlockref = ctupBlockref;
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ sendSignal(ctupBlockref, GSN_TUPSEIZEREQ, signal, 2, JBB);
+ return;
+}//Dblqh::execACCSEIZECONF()
+
+/* ***************> */
+/* TUPSEIZECONF > */
+/* ***************> */
+void Dblqh::execTUPSEIZECONF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ tcConnectptr.p->tupConnectrec = signal->theData[1];
+/* ------- CHECK IF THERE ARE MORE CONNECTIONS TO BE CONNECTED ------- */
+ tcConnectptr.i = tcConnectptr.p->nextTcConnectrec;
+ if (tcConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ moreconnectionsLab(signal);
+ return;
+ }//if
+/* ALL LQH_CONNECT RECORDS ARE CONNECTED TO ACC AND TUP ---- */
+ sendNdbSttorryLab(signal);
+ return;
+}//Dblqh::execTUPSEIZECONF()
+
+/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* +++++++ START PHASE 4 +++++++ */
+/* */
+/* CONNECT LQH WITH LQH. */
+/* CONNECT EACH LQH WITH EVERY LQH IN THE DATABASE SYSTEM. */
+/* IF INITIAL START THEN CREATE THE FRAGMENT LOG FILES */
+/*IF SYSTEM RESTART OR NODE RESTART THEN OPEN THE FRAGMENT LOG FILES AND */
+/*FIND THE END OF THE LOG FILES. */
+/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* WAIT UNTIL ADD NODE PROCESSES ARE COMPLETED */
+/* IF INITIAL START ALSO WAIT FOR LOG FILES TO INITIALISED */
+/*START TIME SUPERVISION OF LOG FILES. WE HAVE TO WRITE LOG PAGES TO DISK */
+/*EVEN IF THE PAGES ARE NOT FULL TO ENSURE THAT THEY COME TO DISK ASAP. */
+/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+void Dblqh::startphase3Lab(Signal* signal)
+{
+ LogFileRecordPtr prevLogFilePtr;
+ LogFileRecordPtr zeroLogFilePtr;
+
+ caddNodeState = ZTRUE;
+/* ***************<< */
+/* READ_NODESREQ < */
+/* ***************<< */
+ cinitialStartOngoing = ZTRUE;
+ ndbrequire(cnoLogFiles != 0);
+
+ for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ initLogpart(signal);
+ for (Uint32 fileNo = 0; fileNo < cnoLogFiles; fileNo++) {
+ seizeLogfile(signal);
+ if (fileNo != 0) {
+ jam();
+ prevLogFilePtr.p->nextLogFile = logFilePtr.i;
+ logFilePtr.p->prevLogFile = prevLogFilePtr.i;
+ } else {
+ jam();
+ logPartPtr.p->firstLogfile = logFilePtr.i;
+ logPartPtr.p->currentLogfile = logFilePtr.i;
+ zeroLogFilePtr.i = logFilePtr.i;
+ zeroLogFilePtr.p = logFilePtr.p;
+ }//if
+ prevLogFilePtr.i = logFilePtr.i;
+ prevLogFilePtr.p = logFilePtr.p;
+ initLogfile(signal, fileNo);
+ if ((cstartType == NodeState::ST_INITIAL_START) ||
+ (cstartType == NodeState::ST_INITIAL_NODE_RESTART)) {
+ if (logFilePtr.i == zeroLogFilePtr.i) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*IN AN INITIAL START WE START BY CREATING ALL LOG FILES AND SETTING THEIR */
+/*PROPER SIZE AND INITIALISING PAGE ZERO IN ALL FILES. */
+/*WE START BY CREATING FILE ZERO IN EACH LOG PART AND THEN PROCEED */
+/*SEQUENTIALLY THROUGH ALL LOG FILES IN THE LOG PART. */
+/* ------------------------------------------------------------------------- */
+ openLogfileInit(signal);
+ }//if
+ }//if
+ }//for
+ zeroLogFilePtr.p->prevLogFile = logFilePtr.i;
+ logFilePtr.p->nextLogFile = zeroLogFilePtr.i;
+ }//for
+ if (cstartType != NodeState::ST_INITIAL_START &&
+ cstartType != NodeState::ST_INITIAL_NODE_RESTART) {
+ jam();
+ ndbrequire(cstartType == NodeState::ST_NODE_RESTART ||
+ cstartType == NodeState::ST_SYSTEM_RESTART);
+ /** --------------------------------------------------------------------
+ * THIS CODE KICKS OFF THE SYSTEM RESTART AND NODE RESTART. IT STARTS UP
+ * THE RESTART BY FINDING THE END OF THE LOG AND FROM THERE FINDING THE
+ * INFO ABOUT THE GLOBAL CHECKPOINTS IN THE FRAGMENT LOG.
+ --------------------------------------------------------------------- */
+ for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
+ jam();
+ LogFileRecordPtr locLogFilePtr;
+ ptrAss(logPartPtr, logPartRecord);
+ locLogFilePtr.i = logPartPtr.p->firstLogfile;
+ ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FRONTPAGE;
+ openFileRw(signal, locLogFilePtr);
+ }//for
+ }//if
+
+ signal->theData[0] = cownref;
+ sendSignal(NDBCNTR_REF, GSN_READ_NODESREQ, signal, 1, JBB);
+ return;
+}//Dblqh::startphase3Lab()
+
+/* ****************** */
+/* READ_NODESCONF > */
+/* ****************** */
+void Dblqh::execREAD_NODESCONF(Signal* signal)
+{
+ jamEntry();
+
+ ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
+ cnoOfNodes = readNodes->noOfNodes;
+
+ unsigned ind = 0;
+ unsigned i = 0;
+ for (i = 1; i < MAX_NDB_NODES; i++) {
+ jam();
+ if (NodeBitmask::get(readNodes->allNodes, i)) {
+ jam();
+ cnodeData[ind] = i;
+ cnodeStatus[ind] = NodeBitmask::get(readNodes->inactiveNodes, i);
+ //readNodes->getVersionId(i, readNodes->theVersionIds) not used
+ ind++;
+ }//if
+ }//for
+ ndbrequire(ind == cnoOfNodes);
+ ndbrequire(cnoOfNodes >= 1 && cnoOfNodes < MAX_NDB_NODES);
+ ndbrequire(!(cnoOfNodes == 1 && cstartType == NodeState::ST_NODE_RESTART));
+
+ caddNodeState = ZFALSE;
+ if (cstartType == NodeState::ST_SYSTEM_RESTART) {
+ jam();
+ sendNdbSttorryLab(signal);
+ return;
+ }//if
+ checkStartCompletedLab(signal);
+ return;
+}//Dblqh::execREAD_NODESCONF()
+
+void Dblqh::checkStartCompletedLab(Signal* signal)
+{
+ if (caddNodeState == ZFALSE) {
+ if (cinitialStartOngoing == ZFALSE) {
+ jam();
+ sendNdbSttorryLab(signal);
+ return;
+ }//if
+ }//if
+ return;
+}//Dblqh::checkStartCompletedLab()
+
+void Dblqh::startphase4Lab(Signal* signal)
+{
+ sendNdbSttorryLab(signal);
+ return;
+}//Dblqh::startphase4Lab()
+
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* SET CONCURRENCY OF LOCAL CHECKPOINTS TO BE USED AFTER SYSTEM RESTART. */
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+void Dblqh::startphase6Lab(Signal* signal)
+{
+ cstartPhase = ZNIL;
+ cstartType = ZNIL;
+ sendNdbSttorryLab(signal);
+ return;
+}//Dblqh::startphase6Lab()
+
+void Dblqh::sendNdbSttorryLab(Signal* signal)
+{
+ signal->theData[0] = cownref;
+ sendSignal(NDBCNTR_REF, GSN_NDB_STTORRY, signal, 1, JBB);
+ return;
+}//Dblqh::sendNdbSttorryLab()
+
+void Dblqh::sendsttorryLab(Signal* signal)
+{
+/* *********<< */
+/* STTORRY < */
+/* *********<< */
+ signal->theData[0] = csignalKey; /* SIGNAL KEY */
+ signal->theData[1] = 3; /* BLOCK CATEGORY */
+ signal->theData[2] = 2; /* SIGNAL VERSION NUMBER */
+ signal->theData[3] = ZSTART_PHASE1;
+ signal->theData[4] = 255;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
+ return;
+}//Dblqh::sendsttorryLab()
+
+/* ***************>> */
+/* READ_NODESREF > */
+/* ***************>> */
+void Dblqh::execREAD_NODESREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execREAD_NODESREF()
+
+/* *************** */
+/* SIZEALT_REP > */
+/* *************** */
+void Dblqh::execREAD_CONFIG_REQ(Signal* signal)
+{
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ jamEntry();
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ cnoLogFiles = 8;
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_REDOLOG_FILES,
+ &cnoLogFiles));
+ ndbrequire(cnoLogFiles > 0);
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_FRAG, &cfragrecFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TABLE, &ctabrecFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TC_CONNECT,
+ &ctcConnectrecFileSize));
+ clogFileFileSize = 4 * cnoLogFiles;
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_SCAN, &cscanrecFileSize));
+ cmaxAccOps = cscanrecFileSize * MAX_PARALLEL_OP_PER_SCAN;
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_diskless));
+
+ initRecords();
+ initialiseRecordsLab(signal, 0, ref, senderData);
+
+ return;
+}//Dblqh::execSIZEALT_REP()
+
+/* ########################################################################## */
+/* ####### ADD/DELETE FRAGMENT MODULE ####### */
+/* THIS MODULE IS USED BY DICTIONARY TO CREATE NEW FRAGMENTS AND DELETE */
+/* OLD FRAGMENTS. */
+/* */
+/* ########################################################################## */
+/* -------------------------------------------------------------- */
+/* FRAG REQ */
+/* -------------------------------------------------------------- */
+/* *********************************************************> */
+/* LQHFRAGREQ: Create new fragments for a table. Sender DICT */
+/* *********************************************************> */
+
+// this unbelievable mess could be replaced by one signal to LQH
+// and execute direct to local DICT to get everything at once
+
+void Dblqh::execLQHFRAGREQ(Signal* signal)
+{
+ jamEntry();
+ LqhFragReq * req = (LqhFragReq*)signal->getDataPtr();
+
+ Uint32 retPtr = req->senderData;
+ BlockReference retRef = req->senderRef;
+ Uint32 fragId = req->fragmentId;
+ Uint32 reqinfo = req->requestInfo;
+ tabptr.i = req->tableId;
+ Uint16 tlocalKeylen = req->localKeyLength;
+ Uint32 tmaxLoadFactor = req->maxLoadFactor;
+ Uint32 tminLoadFactor = req->minLoadFactor;
+ Uint8 tk = req->kValue;
+ Uint8 tlhstar = req->lh3DistrBits;
+ Uint8 tlh = req->lh3PageBits;
+ Uint32 tnoOfAttr = req->noOfAttributes;
+ Uint32 tnoOfNull = req->noOfNullAttributes;
+ Uint32 noOfAlloc = req->noOfPagesToPreAllocate;
+ Uint32 tschemaVersion = req->schemaVersion;
+ Uint32 ttupKeyLength = req->keyLength;
+ Uint32 nextLcp = req->nextLCP;
+ Uint32 noOfKeyAttr = req->noOfKeyAttr;
+ Uint32 noOfNewAttr = req->noOfNewAttr;
+ Uint32 checksumIndicator = req->checksumIndicator;
+ Uint32 noOfAttributeGroups = req->noOfAttributeGroups;
+ Uint32 gcpIndicator = req->GCPIndicator;
+ Uint32 startGci = req->startGci;
+ Uint32 tableType = req->tableType;
+ Uint32 primaryTableId = req->primaryTableId;
+
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ bool tempTable = ((reqinfo & LqhFragReq::TemporaryTable) != 0);
+
+ /* Temporary tables set to defined in system restart */
+ if (tabptr.p->tableStatus == Tablerec::NOT_DEFINED){
+ tabptr.p->tableStatus = Tablerec::ADD_TABLE_ONGOING;
+ tabptr.p->tableType = tableType;
+ tabptr.p->primaryTableId = primaryTableId;
+ tabptr.p->schemaVersion = tschemaVersion;
+ }//if
+
+ if (tabptr.p->tableStatus != Tablerec::ADD_TABLE_ONGOING){
+ jam();
+ fragrefLab(signal, retRef, retPtr, ZTAB_STATE_ERROR);
+ return;
+ }//if
+ //--------------------------------------------------------------------
+ // We could arrive here if we create the fragment as part of a take
+ // over by a hot spare node. The table is then is already created
+ // and bit 31 is set, thus indicating that we are creating a fragment
+ // by copy creation. Also since the node has already been started we
+ // know that it is not a node restart ongoing.
+ //--------------------------------------------------------------------
+
+ if (getFragmentrec(signal, fragId)) {
+ jam();
+ fragrefLab(signal, retRef, retPtr, terrorCode);
+ return;
+ }//if
+ if (!insertFragrec(signal, fragId)) {
+ jam();
+ fragrefLab(signal, retRef, retPtr, terrorCode);
+ return;
+ }//if
+ Uint32 copyType = reqinfo & 3;
+ initFragrec(signal, tabptr.i, fragId, copyType);
+ fragptr.p->startGci = startGci;
+ fragptr.p->newestGci = startGci;
+ fragptr.p->tableType = tableType;
+
+ if (DictTabInfo::isOrderedIndex(tableType)) {
+ jam();
+ // find corresponding primary table fragment
+ TablerecPtr tTablePtr;
+ tTablePtr.i = primaryTableId;
+ ptrCheckGuard(tTablePtr, ctabrecFileSize, tablerec);
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = RNIL;
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+ if (tTablePtr.p->fragid[i] == fragptr.p->fragId) {
+ jam();
+ tFragPtr.i = tTablePtr.p->fragrec[i];
+ break;
+ }
+ }
+ ndbrequire(tFragPtr.i != RNIL);
+ // store it
+ fragptr.p->tableFragptr = tFragPtr.i;
+ } else {
+ fragptr.p->tableFragptr = fragptr.i;
+ }
+
+ if (tempTable) {
+//--------------------------------------------
+// reqinfo bit 3-4 = 2 means temporary table
+// without logging or checkpointing.
+//--------------------------------------------
+ jam();
+ fragptr.p->logFlag = Fragrecord::STATE_FALSE;
+ fragptr.p->lcpFlag = Fragrecord::LCP_STATE_FALSE;
+ }//if
+
+ fragptr.p->nextLcp = nextLcp;
+//----------------------------------------------
+// For node restarts it is not necessarily zero
+//----------------------------------------------
+ if (cfirstfreeAddfragrec == RNIL) {
+ jam();
+ deleteFragrec(fragId);
+ fragrefLab(signal, retRef, retPtr, ZNO_ADD_FRAGREC);
+ return;
+ }//if
+ seizeAddfragrec(signal);
+ addfragptr.p->addFragid = fragId;
+ addfragptr.p->fragmentPtr = fragptr.i;
+ addfragptr.p->dictBlockref = retRef;
+ addfragptr.p->dictConnectptr = retPtr;
+ addfragptr.p->m_senderAttrPtr = RNIL;
+ addfragptr.p->noOfAttr = tnoOfAttr;
+ addfragptr.p->noOfNull = tnoOfNull;
+ addfragptr.p->noOfAllocPages = noOfAlloc;
+ addfragptr.p->tabId = tabptr.i;
+ addfragptr.p->totalAttrReceived = 0;
+ addfragptr.p->attrSentToTup = ZNIL;/* TO FIND PROGRAMMING ERRORS QUICKLY */
+ addfragptr.p->schemaVer = tschemaVersion;
+ Uint32 tmp = (reqinfo & LqhFragReq::CreateInRunning);
+ addfragptr.p->fragCopyCreation = (tmp == 0 ? 0 : 1);
+ addfragptr.p->addfragErrorCode = 0;
+ addfragptr.p->noOfKeyAttr = noOfKeyAttr;
+ addfragptr.p->noOfNewAttr = noOfNewAttr;
+ addfragptr.p->checksumIndicator = checksumIndicator;
+ addfragptr.p->noOfAttributeGroups = noOfAttributeGroups;
+ addfragptr.p->GCPIndicator = gcpIndicator;
+ addfragptr.p->lh3DistrBits = tlhstar;
+ addfragptr.p->tableType = tableType;
+ addfragptr.p->primaryTableId = primaryTableId;
+ //
+ addfragptr.p->tup1Connectptr = RNIL;
+ addfragptr.p->tup2Connectptr = RNIL;
+ addfragptr.p->tux1Connectptr = RNIL;
+ addfragptr.p->tux2Connectptr = RNIL;
+
+ if (DictTabInfo::isTable(tableType) ||
+ DictTabInfo::isHashIndex(tableType)) {
+ jam();
+ AccFragReq* const accreq = (AccFragReq*)signal->getDataPtrSend();
+ accreq->userPtr = addfragptr.i;
+ accreq->userRef = cownref;
+ accreq->tableId = tabptr.i;
+ accreq->reqInfo = copyType << 4;
+ accreq->fragId = fragId;
+ accreq->localKeyLen = tlocalKeylen;
+ accreq->maxLoadFactor = tmaxLoadFactor;
+ accreq->minLoadFactor = tminLoadFactor;
+ accreq->kValue = tk;
+ accreq->lhFragBits = tlhstar;
+ accreq->lhDirBits = tlh;
+ accreq->keyLength = ttupKeyLength;
+ /* ----------------------------------------------------------------------- */
+ /* Send ACCFRAGREQ, when confirmation is received send 2 * TUPFRAGREQ to */
+ /* create 2 tuple fragments on this node. */
+ /* ----------------------------------------------------------------------- */
+ addfragptr.p->addfragStatus = AddFragRecord::ACC_ADDFRAG;
+ sendSignal(fragptr.p->accBlockref, GSN_ACCFRAGREQ,
+ signal, AccFragReq::SignalLength, JBB);
+ return;
+ }
+ if (DictTabInfo::isOrderedIndex(tableType)) {
+ jam();
+ // NOTE: next 2 lines stolen from ACC
+ addfragptr.p->fragid1 = (fragId << 1) | 0;
+ addfragptr.p->fragid2 = (fragId << 1) | 1;
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_TWO_TUP;
+ sendAddFragReq(signal);
+ return;
+ }
+ ndbrequire(false);
+}//Dblqh::execLQHFRAGREQ()
+
+/* *************** */
+/* ACCFRAGCONF > */
+/* *************** */
+void Dblqh::execACCFRAGCONF(Signal* signal)
+{
+ jamEntry();
+ addfragptr.i = signal->theData[0];
+ Uint32 taccConnectptr = signal->theData[1];
+ Uint32 fragId1 = signal->theData[2];
+ Uint32 fragId2 = signal->theData[3];
+ Uint32 accFragPtr1 = signal->theData[4];
+ Uint32 accFragPtr2 = signal->theData[5];
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ ndbrequire(addfragptr.p->addfragStatus == AddFragRecord::ACC_ADDFRAG);
+
+ addfragptr.p->accConnectptr = taccConnectptr;
+ addfragptr.p->fragid1 = fragId1;
+ addfragptr.p->fragid2 = fragId2;
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->accFragptr[0] = accFragPtr1;
+ fragptr.p->accFragptr[1] = accFragPtr2;
+
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_TWO_TUP;
+ sendAddFragReq(signal);
+}//Dblqh::execACCFRAGCONF()
+
+/* *************** */
+/* TUPFRAGCONF > */
+/* *************** */
+void Dblqh::execTUPFRAGCONF(Signal* signal)
+{
+ jamEntry();
+ addfragptr.i = signal->theData[0];
+ Uint32 tupConnectptr = signal->theData[1];
+ Uint32 tupFragPtr = signal->theData[2]; /* TUP FRAGMENT POINTER */
+ Uint32 localFragId = signal->theData[3]; /* LOCAL FRAGMENT ID */
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (localFragId == addfragptr.p->fragid1) {
+ jam();
+ fragptr.p->tupFragptr[0] = tupFragPtr;
+ } else if (localFragId == addfragptr.p->fragid2) {
+ jam();
+ fragptr.p->tupFragptr[1] = tupFragPtr;
+ } else {
+ ndbrequire(false);
+ return;
+ }//if
+ switch (addfragptr.p->addfragStatus) {
+ case AddFragRecord::WAIT_TWO_TUP:
+ jam();
+ fragptr.p->tupFragptr[0] = tupFragPtr;
+ addfragptr.p->tup1Connectptr = tupConnectptr;
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_ONE_TUP;
+ sendAddFragReq(signal);
+ break;
+ case AddFragRecord::WAIT_ONE_TUP:
+ jam();
+ fragptr.p->tupFragptr[1] = tupFragPtr;
+ addfragptr.p->tup2Connectptr = tupConnectptr;
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_TWO_TUX;
+ sendAddFragReq(signal);
+ break;
+ }
+ goto done_with_frag;
+ break;
+ case AddFragRecord::WAIT_TWO_TUX:
+ jam();
+ fragptr.p->tuxFragptr[0] = tupFragPtr;
+ addfragptr.p->tux1Connectptr = tupConnectptr;
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_ONE_TUX;
+ sendAddFragReq(signal);
+ break;
+ case AddFragRecord::WAIT_ONE_TUX:
+ jam();
+ fragptr.p->tuxFragptr[1] = tupFragPtr;
+ addfragptr.p->tux2Connectptr = tupConnectptr;
+ goto done_with_frag;
+ break;
+ done_with_frag:
+ /* ---------------------------------------------------------------- */
+ /* Finished create of fragments. Now ready for creating attributes. */
+ /* ---------------------------------------------------------------- */
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_ADD_ATTR;
+ {
+ LqhFragConf* conf = (LqhFragConf*)signal->getDataPtrSend();
+ conf->senderData = addfragptr.p->dictConnectptr;
+ conf->lqhFragPtr = addfragptr.i;
+ sendSignal(addfragptr.p->dictBlockref, GSN_LQHFRAGCONF,
+ signal, LqhFragConf::SignalLength, JBB);
+ }
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+}//Dblqh::execTUPFRAGCONF()
+
+/* *************** */
+/* TUXFRAGCONF > */
+/* *************** */
+void Dblqh::execTUXFRAGCONF(Signal* signal)
+{
+ jamEntry();
+ execTUPFRAGCONF(signal);
+}//Dblqh::execTUXFRAGCONF
+
+/*
+ * Add fragment in TUP or TUX. Called up to 4 times.
+ */
+void
+Dblqh::sendAddFragReq(Signal* signal)
+{
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP ||
+ addfragptr.p->addfragStatus == AddFragRecord::WAIT_ONE_TUP) {
+ if (DictTabInfo::isTable(addfragptr.p->tableType) ||
+ DictTabInfo::isHashIndex(addfragptr.p->tableType)) {
+ jam();
+ signal->theData[0] = addfragptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = 0; /* ADD TABLE */
+ signal->theData[3] = addfragptr.p->tabId;
+ signal->theData[4] = addfragptr.p->noOfAttr;
+ signal->theData[5] =
+ addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP
+ ? addfragptr.p->fragid1 : addfragptr.p->fragid2;
+ signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1;
+ signal->theData[7] = addfragptr.p->noOfNull;
+ signal->theData[8] = addfragptr.p->schemaVer;
+ signal->theData[9] = addfragptr.p->noOfKeyAttr;
+ signal->theData[10] = addfragptr.p->noOfNewAttr;
+ signal->theData[11] = addfragptr.p->checksumIndicator;
+ signal->theData[12] = addfragptr.p->noOfAttributeGroups;
+ signal->theData[13] = addfragptr.p->GCPIndicator;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ,
+ signal, TupFragReq::SignalLength, JBB);
+ return;
+ }
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
+ jam();
+ signal->theData[0] = addfragptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = 0; /* ADD TABLE */
+ signal->theData[3] = addfragptr.p->tabId;
+ signal->theData[4] = 1; /* ordered index: one array attr */
+ signal->theData[5] =
+ addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP
+ ? addfragptr.p->fragid1 : addfragptr.p->fragid2;
+ signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1;
+ signal->theData[7] = 0; /* ordered index: no nullable */
+ signal->theData[8] = addfragptr.p->schemaVer;
+ signal->theData[9] = 1; /* ordered index: one key */
+ signal->theData[10] = addfragptr.p->noOfNewAttr;
+ signal->theData[11] = addfragptr.p->checksumIndicator;
+ signal->theData[12] = addfragptr.p->noOfAttributeGroups;
+ signal->theData[13] = addfragptr.p->GCPIndicator;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ,
+ signal, TupFragReq::SignalLength, JBB);
+ return;
+ }
+ }
+ if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUX ||
+ addfragptr.p->addfragStatus == AddFragRecord::WAIT_ONE_TUX) {
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
+ jam();
+ TuxFragReq* const tuxreq = (TuxFragReq*)signal->getDataPtrSend();
+ tuxreq->userPtr = addfragptr.i;
+ tuxreq->userRef = cownref;
+ tuxreq->reqInfo = 0; /* ADD TABLE */
+ tuxreq->tableId = addfragptr.p->tabId;
+ ndbrequire(addfragptr.p->noOfAttr >= 2);
+ tuxreq->noOfAttr = addfragptr.p->noOfAttr - 1; /* skip NDB$TNODE */
+ tuxreq->fragId =
+ addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUX
+ ? addfragptr.p->fragid1: addfragptr.p->fragid2;
+ tuxreq->fragOff = addfragptr.p->lh3DistrBits;
+ tuxreq->tableType = addfragptr.p->tableType;
+ tuxreq->primaryTableId = addfragptr.p->primaryTableId;
+ // pointer to index fragment in TUP
+ tuxreq->tupIndexFragPtrI =
+ addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUX ?
+ fragptr.p->tupFragptr[0] : fragptr.p->tupFragptr[1];
+ // pointers to table fragments in TUP and ACC
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = fragptr.p->tableFragptr;
+ ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
+ tuxreq->tupTableFragPtrI[0] = tFragPtr.p->tupFragptr[0];
+ tuxreq->tupTableFragPtrI[1] = tFragPtr.p->tupFragptr[1];
+ tuxreq->accTableFragPtrI[0] = tFragPtr.p->accFragptr[0];
+ tuxreq->accTableFragPtrI[1] = tFragPtr.p->accFragptr[1];
+ sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ,
+ signal, TuxFragReq::SignalLength, JBB);
+ return;
+ }
+ }
+ ndbrequire(false);
+}//Dblqh::sendAddFragReq
+
+/* ************************************************************************> */
+/* LQHADDATTRREQ: Request from DICT to create attributes for the new table. */
+/* ************************************************************************> */
+void Dblqh::execLQHADDATTREQ(Signal* signal)
+{
+ jamEntry();
+ LqhAddAttrReq * const req = (LqhAddAttrReq*)signal->getDataPtr();
+
+ addfragptr.i = req->lqhFragPtr;
+ const Uint32 tnoOfAttr = req->noOfAttributes;
+ const Uint32 senderData = req->senderData;
+ const Uint32 senderAttrPtr = req->senderAttrPtr;
+
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ ndbrequire(addfragptr.p->addfragStatus == AddFragRecord::WAIT_ADD_ATTR);
+ ndbrequire((tnoOfAttr != 0) && (tnoOfAttr <= LqhAddAttrReq::MAX_ATTRIBUTES));
+ addfragptr.p->totalAttrReceived += tnoOfAttr;
+ ndbrequire(addfragptr.p->totalAttrReceived <= addfragptr.p->noOfAttr);
+
+ addfragptr.p->attrReceived = tnoOfAttr;
+ for (Uint32 i = 0; i < tnoOfAttr; i++) {
+ addfragptr.p->attributes[i] = req->attributes[i];
+ }//for
+ addfragptr.p->attrSentToTup = 0;
+ ndbrequire(addfragptr.p->dictConnectptr == senderData);
+ addfragptr.p->m_senderAttrPtr = senderAttrPtr;
+ addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT1;
+ sendAddAttrReq(signal);
+}//Dblqh::execLQHADDATTREQ()
+
+/* *********************>> */
+/* TUP_ADD_ATTCONF > */
+/* *********************>> */
+void Dblqh::execTUP_ADD_ATTCONF(Signal* signal)
+{
+ jamEntry();
+ addfragptr.i = signal->theData[0];
+ // implies that operation was released on the other side
+ const bool lastAttr = signal->theData[1];
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ switch (addfragptr.p->addfragStatus) {
+ case AddFragRecord::TUP_ATTR_WAIT1:
+ jam();
+ if (lastAttr)
+ addfragptr.p->tup1Connectptr = RNIL;
+ addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT2;
+ sendAddAttrReq(signal);
+ break;
+ case AddFragRecord::TUP_ATTR_WAIT2:
+ jam();
+ if (lastAttr)
+ addfragptr.p->tup2Connectptr = RNIL;
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
+ addfragptr.p->addfragStatus = AddFragRecord::TUX_ATTR_WAIT1;
+ sendAddAttrReq(signal);
+ break;
+ }
+ goto done_with_attr;
+ break;
+ case AddFragRecord::TUX_ATTR_WAIT1:
+ jam();
+ if (lastAttr)
+ addfragptr.p->tux1Connectptr = RNIL;
+ addfragptr.p->addfragStatus = AddFragRecord::TUX_ATTR_WAIT2;
+ sendAddAttrReq(signal);
+ break;
+ case AddFragRecord::TUX_ATTR_WAIT2:
+ jam();
+ if (lastAttr)
+ addfragptr.p->tux2Connectptr = RNIL;
+ goto done_with_attr;
+ break;
+ done_with_attr:
+ addfragptr.p->attrSentToTup = addfragptr.p->attrSentToTup + 1;
+ ndbrequire(addfragptr.p->attrSentToTup <= addfragptr.p->attrReceived);
+ ndbrequire(addfragptr.p->totalAttrReceived <= addfragptr.p->noOfAttr);
+ if (addfragptr.p->attrSentToTup < addfragptr.p->attrReceived) {
+ // more in this batch
+ jam();
+ addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT1;
+ sendAddAttrReq(signal);
+ } else if (addfragptr.p->totalAttrReceived < addfragptr.p->noOfAttr) {
+ // more batches to receive
+ jam();
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_ADD_ATTR;
+ LqhAddAttrConf *const conf = (LqhAddAttrConf*)signal->getDataPtrSend();
+ conf->senderData = addfragptr.p->dictConnectptr;
+ conf->senderAttrPtr = addfragptr.p->m_senderAttrPtr;
+ conf->fragId = addfragptr.p->addFragid;
+ sendSignal(addfragptr.p->dictBlockref, GSN_LQHADDATTCONF,
+ signal, LqhAddAttrConf::SignalLength, JBB);
+ } else {
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ /* ------------------------------------------------------------------
+ * WE HAVE NOW COMPLETED ADDING THIS FRAGMENT. WE NOW NEED TO SET THE
+ * PROPER STATE IN FRAG_STATUS DEPENDENT ON IF WE ARE CREATING A NEW
+ * REPLICA OR IF WE ARE CREATING A TABLE. FOR FRAGMENTS IN COPY
+ * PROCESS WE DO NOT WANT LOGGING ACTIVATED.
+ * ----------------------------------------------------------------- */
+ if (addfragptr.p->fragCopyCreation == 1) {
+ jam();
+ if (! DictTabInfo::isOrderedIndex(addfragptr.p->tableType))
+ fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION;
+ else
+ fragptr.p->fragStatus = Fragrecord::FSACTIVE;
+ fragptr.p->logFlag = Fragrecord::STATE_FALSE;
+ } else {
+ jam();
+ fragptr.p->fragStatus = Fragrecord::FSACTIVE;
+ }//if
+ LqhAddAttrConf *const conf = (LqhAddAttrConf*)signal->getDataPtrSend();
+ conf->senderData = addfragptr.p->dictConnectptr;
+ conf->senderAttrPtr = addfragptr.p->m_senderAttrPtr;
+ conf->fragId = addfragptr.p->addFragid;
+ sendSignal(addfragptr.p->dictBlockref, GSN_LQHADDATTCONF, signal,
+ LqhAddAttrConf::SignalLength, JBB);
+ releaseAddfragrec(signal);
+ }//if
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+}
+
+/* **********************>> */
+/* TUX_ADD_ATTRCONF > */
+/* **********************>> */
+void Dblqh::execTUX_ADD_ATTRCONF(Signal* signal)
+{
+ jamEntry();
+ execTUP_ADD_ATTCONF(signal);
+}//Dblqh::execTUX_ADD_ATTRCONF
+
+/*
+ * Add attribute in TUP or TUX. Called up to 4 times.
+ */
+void
+Dblqh::sendAddAttrReq(Signal* signal)
+{
+ arrGuard(addfragptr.p->attrSentToTup, LqhAddAttrReq::MAX_ATTRIBUTES);
+ LqhAddAttrReq::Entry& entry =
+ addfragptr.p->attributes[addfragptr.p->attrSentToTup];
+ const Uint32 attrId = entry.attrId & 0xffff;
+ const Uint32 primaryAttrId = entry.attrId >> 16;
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (addfragptr.p->addfragStatus == AddFragRecord::TUP_ATTR_WAIT1 ||
+ addfragptr.p->addfragStatus == AddFragRecord::TUP_ATTR_WAIT2) {
+ if (DictTabInfo::isTable(addfragptr.p->tableType) ||
+ DictTabInfo::isHashIndex(addfragptr.p->tableType) ||
+ (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) &&
+ primaryAttrId == ZNIL)) {
+ jam();
+ TupAddAttrReq* const tupreq = (TupAddAttrReq*)signal->getDataPtrSend();
+ tupreq->tupConnectPtr =
+ addfragptr.p->addfragStatus == AddFragRecord::TUP_ATTR_WAIT1
+ ? addfragptr.p->tup1Connectptr : addfragptr.p->tup2Connectptr;
+ tupreq->notused1 = 0;
+ tupreq->attrId = attrId;
+ tupreq->attrDescriptor = entry.attrDescriptor;
+ tupreq->extTypeInfo = entry.extTypeInfo;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUP_ADD_ATTRREQ,
+ signal, TupAddAttrReq::SignalLength, JBB);
+ return;
+ }
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) &&
+ primaryAttrId != ZNIL) {
+ // this attribute is not for TUP
+ jam();
+ TupAddAttrConf* tupconf = (TupAddAttrConf*)signal->getDataPtrSend();
+ tupconf->userPtr = addfragptr.i;
+ tupconf->lastAttr = false;
+ sendSignal(reference(), GSN_TUP_ADD_ATTCONF,
+ signal, TupAddAttrConf::SignalLength, JBB);
+ return;
+ }
+ }
+ if (addfragptr.p->addfragStatus == AddFragRecord::TUX_ATTR_WAIT1 ||
+ addfragptr.p->addfragStatus == AddFragRecord::TUX_ATTR_WAIT2) {
+ jam();
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) &&
+ primaryAttrId != ZNIL) {
+ jam();
+ TuxAddAttrReq* const tuxreq = (TuxAddAttrReq*)signal->getDataPtrSend();
+ tuxreq->tuxConnectPtr =
+ addfragptr.p->addfragStatus == AddFragRecord::TUX_ATTR_WAIT1
+ ? addfragptr.p->tux1Connectptr : addfragptr.p->tux2Connectptr;
+ tuxreq->notused1 = 0;
+ tuxreq->attrId = attrId;
+ tuxreq->attrDescriptor = entry.attrDescriptor;
+ tuxreq->extTypeInfo = entry.extTypeInfo;
+ tuxreq->primaryAttrId = primaryAttrId;
+ sendSignal(fragptr.p->tuxBlockref, GSN_TUX_ADD_ATTRREQ,
+ signal, TuxAddAttrReq::SignalLength, JBB);
+ return;
+ }
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) &&
+ primaryAttrId == ZNIL) {
+ // this attribute is not for TUX
+ jam();
+ TuxAddAttrConf* tuxconf = (TuxAddAttrConf*)signal->getDataPtrSend();
+ tuxconf->userPtr = addfragptr.i;
+ tuxconf->lastAttr = false;
+ sendSignal(reference(), GSN_TUX_ADD_ATTRCONF,
+ signal, TuxAddAttrConf::SignalLength, JBB);
+ return;
+ }
+ }
+ ndbrequire(false);
+}//Dblqh::sendAddAttrReq
+
+/* ************************************************************************>> */
+/* TAB_COMMITREQ: Commit the new table for use in transactions. Sender DICT. */
+/* ************************************************************************>> */
+void Dblqh::execTAB_COMMITREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 dihPtr = signal->theData[0];
+ BlockReference dihBlockref = signal->theData[1];
+ tabptr.i = signal->theData[2];
+
+ if (tabptr.i >= ctabrecFileSize) {
+ jam();
+ terrorCode = ZTAB_FILE_SIZE;
+ signal->theData[0] = dihPtr;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = tabptr.i;
+ signal->theData[3] = terrorCode;
+ sendSignal(dihBlockref, GSN_TAB_COMMITREF, signal, 4, JBB);
+ return;
+ }//if
+ ptrAss(tabptr, tablerec);
+ if (tabptr.p->tableStatus != Tablerec::ADD_TABLE_ONGOING) {
+ jam();
+ terrorCode = ZTAB_STATE_ERROR;
+ signal->theData[0] = dihPtr;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = tabptr.i;
+ signal->theData[3] = terrorCode;
+ signal->theData[4] = tabptr.p->tableStatus;
+ sendSignal(dihBlockref, GSN_TAB_COMMITREF, signal, 5, JBB);
+ ndbrequire(false);
+ return;
+ }//if
+ tabptr.p->usageCount = 0;
+ tabptr.p->tableStatus = Tablerec::TABLE_DEFINED;
+ signal->theData[0] = dihPtr;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = tabptr.i;
+ sendSignal(dihBlockref, GSN_TAB_COMMITCONF, signal, 3, JBB);
+ return;
+}//Dblqh::execTAB_COMMITREQ()
+
+
+void Dblqh::fragrefLab(Signal* signal,
+ BlockReference fragBlockRef,
+ Uint32 fragConPtr,
+ Uint32 errorCode)
+{
+ LqhFragRef * ref = (LqhFragRef*)signal->getDataPtrSend();
+ ref->senderData = fragConPtr;
+ ref->errorCode = errorCode;
+ sendSignal(fragBlockRef, GSN_LQHFRAGREF, signal,
+ LqhFragRef::SignalLength, JBB);
+ return;
+}//Dblqh::fragrefLab()
+
+/*
+ * Abort on-going ops.
+ */
+void Dblqh::abortAddFragOps(Signal* signal)
+{
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ signal->theData[0] = (Uint32)-1;
+ if (addfragptr.p->tup1Connectptr != RNIL) {
+ jam();
+ signal->theData[1] = addfragptr.p->tup1Connectptr;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
+ addfragptr.p->tup1Connectptr = RNIL;
+ }
+ if (addfragptr.p->tup2Connectptr != RNIL) {
+ jam();
+ signal->theData[1] = addfragptr.p->tup2Connectptr;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
+ addfragptr.p->tup2Connectptr = RNIL;
+ }
+ if (addfragptr.p->tux1Connectptr != RNIL) {
+ jam();
+ signal->theData[1] = addfragptr.p->tux1Connectptr;
+ sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
+ addfragptr.p->tux1Connectptr = RNIL;
+ }
+ if (addfragptr.p->tux2Connectptr != RNIL) {
+ jam();
+ signal->theData[1] = addfragptr.p->tux2Connectptr;
+ sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
+ addfragptr.p->tux2Connectptr = RNIL;
+ }
+}
+
+/* ************>> */
+/* ACCFRAGREF > */
+/* ************>> */
+void Dblqh::execACCFRAGREF(Signal* signal)
+{
+ jamEntry();
+ addfragptr.i = signal->theData[0];
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ terrorCode = signal->theData[1];
+ ndbrequire(addfragptr.p->addfragStatus == AddFragRecord::ACC_ADDFRAG);
+ addfragptr.p->addfragErrorCode = terrorCode;
+
+ const Uint32 ref = addfragptr.p->dictBlockref;
+ const Uint32 senderData = addfragptr.p->dictConnectptr;
+ const Uint32 errorCode = addfragptr.p->addfragErrorCode;
+ releaseAddfragrec(signal);
+ fragrefLab(signal, ref, senderData, errorCode);
+
+ return;
+}//Dblqh::execACCFRAGREF()
+
+/* ************>> */
+/* TUPFRAGREF > */
+/* ************>> */
+void Dblqh::execTUPFRAGREF(Signal* signal)
+{
+ jamEntry();
+ addfragptr.i = signal->theData[0];
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ terrorCode = signal->theData[1];
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ addfragptr.p->addfragErrorCode = terrorCode;
+
+ // no operation to release, just add some jams
+ switch (addfragptr.p->addfragStatus) {
+ case AddFragRecord::WAIT_TWO_TUP:
+ jam();
+ break;
+ case AddFragRecord::WAIT_ONE_TUP:
+ jam();
+ break;
+ case AddFragRecord::WAIT_TWO_TUX:
+ jam();
+ break;
+ case AddFragRecord::WAIT_ONE_TUX:
+ jam();
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ abortAddFragOps(signal);
+
+ const Uint32 ref = addfragptr.p->dictBlockref;
+ const Uint32 senderData = addfragptr.p->dictConnectptr;
+ const Uint32 errorCode = addfragptr.p->addfragErrorCode;
+ releaseAddfragrec(signal);
+ fragrefLab(signal, ref, senderData, errorCode);
+
+}//Dblqh::execTUPFRAGREF()
+
+/* ************>> */
+/* TUXFRAGREF > */
+/* ************>> */
+void Dblqh::execTUXFRAGREF(Signal* signal)
+{
+ jamEntry();
+ execTUPFRAGREF(signal);
+}//Dblqh::execTUXFRAGREF
+
+/* *********************> */
+/* TUP_ADD_ATTREF > */
+/* *********************> */
+void Dblqh::execTUP_ADD_ATTRREF(Signal* signal)
+{
+ jamEntry();
+ addfragptr.i = signal->theData[0];
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ terrorCode = signal->theData[1];
+ addfragptr.p->addfragErrorCode = terrorCode;
+
+ // operation was released on the other side
+ switch (addfragptr.p->addfragStatus) {
+ case AddFragRecord::TUP_ATTR_WAIT1:
+ jam();
+ ndbrequire(addfragptr.p->tup1Connectptr != RNIL);
+ addfragptr.p->tup1Connectptr = RNIL;
+ break;
+ case AddFragRecord::TUP_ATTR_WAIT2:
+ jam();
+ ndbrequire(addfragptr.p->tup2Connectptr != RNIL);
+ addfragptr.p->tup2Connectptr = RNIL;
+ break;
+ case AddFragRecord::TUX_ATTR_WAIT1:
+ jam();
+ ndbrequire(addfragptr.p->tux1Connectptr != RNIL);
+ addfragptr.p->tux1Connectptr = RNIL;
+ break;
+ case AddFragRecord::TUX_ATTR_WAIT2:
+ jam();
+ ndbrequire(addfragptr.p->tux2Connectptr != RNIL);
+ addfragptr.p->tux2Connectptr = RNIL;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ abortAddFragOps(signal);
+
+ const Uint32 Ref = addfragptr.p->dictBlockref;
+ const Uint32 senderData = addfragptr.p->dictConnectptr;
+ const Uint32 errorCode = addfragptr.p->addfragErrorCode;
+ releaseAddfragrec(signal);
+
+ LqhAddAttrRef *const ref = (LqhAddAttrRef*)signal->getDataPtrSend();
+ ref->senderData = senderData;
+ ref->errorCode = errorCode;
+ sendSignal(Ref, GSN_LQHADDATTREF, signal,
+ LqhAddAttrRef::SignalLength, JBB);
+
+}//Dblqh::execTUP_ADD_ATTRREF()
+
+/* **********************> */
+/* TUX_ADD_ATTRREF > */
+/* **********************> */
+void Dblqh::execTUX_ADD_ATTRREF(Signal* signal)
+{
+ jamEntry();
+ execTUP_ADD_ATTRREF(signal);
+}//Dblqh::execTUX_ADD_ATTRREF
+
+void
+Dblqh::execPREP_DROP_TAB_REQ(Signal* signal){
+ jamEntry();
+
+ PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+
+ TablerecPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
+
+ Uint32 errCode = 0;
+ errCode = checkDropTabState(tabPtr.p->tableStatus, GSN_PREP_DROP_TAB_REQ);
+ if(errCode != 0){
+ jam();
+
+ PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = errCode;
+ sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal,
+ PrepDropTabRef::SignalLength, JBB);
+ return;
+ }
+
+ tabPtr.p->tableStatus = Tablerec::PREP_DROP_TABLE_ONGOING;
+ tabPtr.p->waitingTC.clear();
+ tabPtr.p->waitingDIH.clear();
+
+ PrepDropTabConf * conf = (PrepDropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ sendSignal(senderRef, GSN_PREP_DROP_TAB_CONF, signal,
+ PrepDropTabConf::SignalLength, JBB);
+
+ signal->theData[0] = ZPREP_DROP_TABLE;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = senderRef;
+ signal->theData[3] = senderData;
+ checkDropTab(signal);
+}
+
+void
+Dblqh::checkDropTab(Signal* signal){
+
+ TablerecPtr tabPtr;
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
+
+ ndbrequire(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING);
+
+ if(tabPtr.p->usageCount > 0){
+ jam();
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 4);
+ return;
+ }
+
+ bool lcpDone = true;
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+ if(lcpPtr.p->lcpState != LcpRecord::LCP_IDLE){
+ jam();
+
+ if(lcpPtr.p->currentFragment.lcpFragOrd.tableId == tabPtr.i){
+ jam();
+ lcpDone = false;
+ }
+
+ if(lcpPtr.p->lcpQueued &&
+ lcpPtr.p->queuedFragment.lcpFragOrd.tableId == tabPtr.i){
+ jam();
+ lcpDone = false;
+ }
+ }
+
+ if(!lcpDone){
+ jam();
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 4);
+ return;
+ }
+
+ tabPtr.p->tableStatus = Tablerec::PREP_DROP_TABLE_DONE;
+
+ WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ for(Uint32 i = 1; i<MAX_NDB_NODES; i++){
+ if(tabPtr.p->waitingTC.get(i)){
+ tabPtr.p->waitingTC.clear(i);
+ sendSignal(calcTcBlockRef(i), GSN_WAIT_DROP_TAB_CONF, signal,
+ WaitDropTabConf::SignalLength, JBB);
+ }
+ if(tabPtr.p->waitingDIH.get(i)){
+ tabPtr.p->waitingDIH.clear(i);
+ sendSignal(calcDihBlockRef(i), GSN_WAIT_DROP_TAB_CONF, signal,
+ WaitDropTabConf::SignalLength, JBB);
+ }
+ }
+}
+
+void
+Dblqh::execWAIT_DROP_TAB_REQ(Signal* signal){
+ jamEntry();
+ WaitDropTabReq * req = (WaitDropTabReq*)signal->getDataPtr();
+
+ TablerecPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 nodeId = refToNode(senderRef);
+ Uint32 blockNo = refToBlock(senderRef);
+
+ if(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING){
+ jam();
+ switch(blockNo){
+ case DBTC:
+ tabPtr.p->waitingTC.set(nodeId);
+ break;
+ case DBDIH:
+ tabPtr.p->waitingDIH.set(nodeId);
+ break;
+ default:
+ ndbrequire(false);
+ }
+ return;
+ }
+
+ if(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
+ jam();
+ WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ sendSignal(senderRef, GSN_WAIT_DROP_TAB_CONF, signal,
+ WaitDropTabConf::SignalLength, JBB);
+ return;
+ }
+
+ WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtrSend();
+ ref->tableId = tabPtr.i;
+ ref->senderRef = reference();
+
+ bool ok = false;
+ switch(tabPtr.p->tableStatus){
+ case Tablerec::TABLE_DEFINED:
+ ok = true;
+ ref->errorCode = WaitDropTabRef::IllegalTableState;
+ break;
+ case Tablerec::NOT_DEFINED:
+ ok = true;
+ ref->errorCode = WaitDropTabRef::NoSuchTable;
+ break;
+ case Tablerec::ADD_TABLE_ONGOING:
+ ok = true;
+ ref->errorCode = WaitDropTabRef::IllegalTableState;
+ break;
+ case Tablerec::PREP_DROP_TABLE_ONGOING:
+ case Tablerec::PREP_DROP_TABLE_DONE:
+ // Should have been take care of above
+ ndbrequire(false);
+ }
+ ndbrequire(ok);
+ ref->tableStatus = tabPtr.p->tableStatus;
+ sendSignal(senderRef, GSN_WAIT_DROP_TAB_REF, signal,
+ WaitDropTabRef::SignalLength, JBB);
+ return;
+}
+
+void
+Dblqh::execDROP_TAB_REQ(Signal* signal){
+ jamEntry();
+
+ DropTabReq* req = (DropTabReq*)signal->getDataPtr();
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+
+ TablerecPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
+
+ do {
+ if(req->requestType == DropTabReq::RestartDropTab){
+ jam();
+ break;
+ }
+
+ if(req->requestType == DropTabReq::OnlineDropTab){
+ jam();
+ Uint32 errCode = 0;
+ errCode = checkDropTabState(tabPtr.p->tableStatus, GSN_DROP_TAB_REQ);
+ if(errCode != 0){
+ jam();
+
+ DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = errCode;
+ sendSignal(senderRef, GSN_DROP_TAB_REF, signal,
+ DropTabRef::SignalLength, JBB);
+ return;
+ }
+ }
+
+ removeTable(tabPtr.i);
+
+ } while(false);
+
+ ndbrequire(tabPtr.p->usageCount == 0);
+ tabPtr.p->tableStatus = Tablerec::NOT_DEFINED;
+
+ DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend();
+ dropConf->senderRef = reference();
+ dropConf->senderData = senderData;
+ dropConf->tableId = tabPtr.i;
+ sendSignal(senderRef, GSN_DROP_TAB_CONF,
+ signal, DropTabConf::SignalLength, JBB);
+}
+
+Uint32
+Dblqh::checkDropTabState(Tablerec::TableStatus status, Uint32 gsn) const{
+
+ if(gsn == GSN_PREP_DROP_TAB_REQ){
+ switch(status){
+ case Tablerec::NOT_DEFINED:
+ jam();
+ // Fall through
+ case Tablerec::ADD_TABLE_ONGOING:
+ jam();
+ return PrepDropTabRef::NoSuchTable;
+ break;
+ case Tablerec::PREP_DROP_TABLE_ONGOING:
+ jam();
+ return PrepDropTabRef::PrepDropInProgress;
+ break;
+ case Tablerec::PREP_DROP_TABLE_DONE:
+ jam();
+ return PrepDropTabRef::DropInProgress;
+ break;
+ case Tablerec::TABLE_DEFINED:
+ jam();
+ return 0;
+ break;
+ }
+ ndbrequire(0);
+ }
+
+ if(gsn == GSN_DROP_TAB_REQ){
+ switch(status){
+ case Tablerec::NOT_DEFINED:
+ jam();
+ // Fall through
+ case Tablerec::ADD_TABLE_ONGOING:
+ jam();
+ return DropTabRef::NoSuchTable;
+ break;
+ case Tablerec::PREP_DROP_TABLE_ONGOING:
+ jam();
+ return DropTabRef::PrepDropInProgress;
+ break;
+ case Tablerec::PREP_DROP_TABLE_DONE:
+ jam();
+ return 0;
+ break;
+ case Tablerec::TABLE_DEFINED:
+ jam();
+ return DropTabRef::DropWoPrep;
+ }
+ ndbrequire(0);
+ }
+ ndbrequire(0);
+ return RNIL;
+}
+
+void Dblqh::removeTable(Uint32 tableId)
+{
+ tabptr.i = tableId;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+
+ for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
+ jam();
+ if (tabptr.p->fragid[i] != ZNIL) {
+ jam();
+ deleteFragrec(tabptr.p->fragid[i]);
+ }//if
+ }//for
+}//Dblqh::removeTable()
+
+void
+Dblqh::execALTER_TAB_REQ(Signal* signal)
+{
+ jamEntry();
+ AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr();
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ const Uint32 changeMask = req->changeMask;
+ const Uint32 tableId = req->tableId;
+ const Uint32 tableVersion = req->tableVersion;
+ const Uint32 gci = req->gci;
+ AlterTabReq::RequestType requestType =
+ (AlterTabReq::RequestType) req->requestType;
+
+ TablerecPtr tablePtr;
+ tablePtr.i = tableId;
+ ptrCheckGuard(tablePtr, ctabrecFileSize, tablerec);
+ tablePtr.p->schemaVersion = tableVersion;
+
+ // Request handled successfully
+ AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->changeMask = changeMask;
+ conf->tableId = tableId;
+ conf->tableVersion = tableVersion;
+ conf->gci = gci;
+ conf->requestType = requestType;
+ sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal,
+ AlterTabConf::SignalLength, JBB);
+}
+
+/* ************************************************************************>>
+ * TIME_SIGNAL: Handles time-out of local operations. This is a clean-up
+ * handler. If no other measure has succeeded in cleaning up after time-outs
+ * or else then this routine will remove the transaction after 120 seconds of
+ * inactivity. The check is performed once per 10 second. Sender is QMGR.
+ * ************************************************************************>> */
+void Dblqh::execTIME_SIGNAL(Signal* signal)
+{
+ jamEntry();
+ cLqhTimeOutCount++;
+ cLqhTimeOutCheckCount++;
+ if ((cCounterAccCommitBlocked > 0) ||
+ (cCounterTupCommitBlocked > 0)) {
+ jam();
+ signal->theData[0] = NDB_LE_UndoLogBlocked;
+ signal->theData[1] = cCounterTupCommitBlocked;
+ signal->theData[2] = cCounterAccCommitBlocked;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+
+ cCounterTupCommitBlocked = 0;
+ cCounterAccCommitBlocked = 0;
+ }//if
+ if (cLqhTimeOutCheckCount < 10) {
+ jam();
+ return;
+ }//if
+ cLqhTimeOutCheckCount = 0;
+#ifdef VM_TRACE
+ TcConnectionrecPtr tTcConptr;
+
+ for (tTcConptr.i = 0; tTcConptr.i < ctcConnectrecFileSize;
+ tTcConptr.i++) {
+ jam();
+ ptrAss(tTcConptr, tcConnectionrec);
+ if ((tTcConptr.p->tcTimer != 0) &&
+ ((tTcConptr.p->tcTimer + 120) < cLqhTimeOutCount)) {
+ ndbout << "Dblqh::execTIME_SIGNAL"<<endl
+ << "Timeout found in tcConnectRecord " <<tTcConptr.i<<endl
+ << " cLqhTimeOutCount = " << cLqhTimeOutCount << endl
+ << " tcTimer="<<tTcConptr.p->tcTimer<<endl
+ << " tcTimer+120="<<tTcConptr.p->tcTimer + 120<<endl;
+
+ ndbout << " transactionState = " << tTcConptr.p->transactionState<<endl;
+ ndbout << " operation = " << tTcConptr.p->operation<<endl;
+ ndbout << " tcNodeFailrec = " << tTcConptr.p->tcNodeFailrec
+ << " seqNoReplica = " << tTcConptr.p->seqNoReplica
+ << " simpleRead = " << tTcConptr.p->simpleRead
+ << endl;
+ ndbout << " replicaType = " << tTcConptr.p->replicaType
+ << " reclenAiLqhkey = " << tTcConptr.p->reclenAiLqhkey
+ << " opExec = " << tTcConptr.p->opExec
+ << endl;
+ ndbout << " opSimple = " << tTcConptr.p->opSimple
+ << " nextSeqNoReplica = " << tTcConptr.p->nextSeqNoReplica
+ << " lockType = " << tTcConptr.p->lockType
+ << " localFragptr = " << tTcConptr.p->localFragptr
+ << endl;
+ ndbout << " lastReplicaNo = " << tTcConptr.p->lastReplicaNo
+ << " indTakeOver = " << tTcConptr.p->indTakeOver
+ << " dirtyOp = " << tTcConptr.p->dirtyOp
+ << endl;
+ ndbout << " activeCreat = " << tTcConptr.p->activeCreat
+ << " tcBlockref = " << hex << tTcConptr.p->tcBlockref
+ << " reqBlockref = " << hex << tTcConptr.p->reqBlockref
+ << " primKeyLen = " << tTcConptr.p->primKeyLen
+ << endl;
+ ndbout << " nextReplica = " << tTcConptr.p->nextReplica
+ << " tcBlockref = " << hex << tTcConptr.p->tcBlockref
+ << " reqBlockref = " << hex << tTcConptr.p->reqBlockref
+ << " primKeyLen = " << tTcConptr.p->primKeyLen
+ << endl;
+ ndbout << " logStopPageNo = " << tTcConptr.p->logStopPageNo
+ << " logStartPageNo = " << tTcConptr.p->logStartPageNo
+ << " logStartPageIndex = " << tTcConptr.p->logStartPageIndex
+ << endl;
+ ndbout << " errorCode = " << tTcConptr.p->errorCode
+ << " clientBlockref = " << hex << tTcConptr.p->clientBlockref
+ << " applRef = " << hex << tTcConptr.p->applRef
+ << " totSendlenAi = " << tTcConptr.p->totSendlenAi
+ << endl;
+ ndbout << " totReclenAi = " << tTcConptr.p->totReclenAi
+ << " tcScanRec = " << tTcConptr.p->tcScanRec
+ << " tcScanInfo = " << tTcConptr.p->tcScanInfo
+ << " tcOprec = " << hex << tTcConptr.p->tcOprec
+ << endl;
+ ndbout << " tableref = " << tTcConptr.p->tableref
+ << " simpleTcConnect = " << tTcConptr.p->simpleTcConnect
+ << " storedProcId = " << tTcConptr.p->storedProcId
+ << " schemaVersion = " << tTcConptr.p->schemaVersion
+ << endl;
+ ndbout << " reqinfo = " << tTcConptr.p->reqinfo
+ << " reqRef = " << tTcConptr.p->reqRef
+ << " readlenAi = " << tTcConptr.p->readlenAi
+ << " prevTc = " << tTcConptr.p->prevTc
+ << endl;
+ ndbout << " prevLogTcrec = " << tTcConptr.p->prevLogTcrec
+ << " prevHashRec = " << tTcConptr.p->prevHashRec
+ << " nodeAfterNext0 = " << tTcConptr.p->nodeAfterNext[0]
+ << " nodeAfterNext1 = " << tTcConptr.p->nodeAfterNext[1]
+ << endl;
+ ndbout << " nextTcConnectrec = " << tTcConptr.p->nextTcConnectrec
+ << " nextTc = " << tTcConptr.p->nextTc
+ << " nextTcLogQueue = " << tTcConptr.p->nextTcLogQueue
+ << " nextLogTcrec = " << tTcConptr.p->nextLogTcrec
+ << endl;
+ ndbout << " nextHashRec = " << tTcConptr.p->nextHashRec
+ << " logWriteState = " << tTcConptr.p->logWriteState
+ << " logStartFileNo = " << tTcConptr.p->logStartFileNo
+ << " listState = " << tTcConptr.p->listState
+ << endl;
+ ndbout << " lastAttrinbuf = " << tTcConptr.p->lastAttrinbuf
+ << " lastTupkeybuf = " << tTcConptr.p->lastTupkeybuf
+ << " hashValue = " << tTcConptr.p->hashValue
+ << endl;
+ ndbout << " gci = " << tTcConptr.p->gci
+ << " fragmentptr = " << tTcConptr.p->fragmentptr
+ << " fragmentid = " << tTcConptr.p->fragmentid
+ << " firstTupkeybuf = " << tTcConptr.p->firstTupkeybuf
+ << endl;
+ ndbout << " firstAttrinbuf = " << tTcConptr.p->firstAttrinbuf
+ << " currTupAiLen = " << tTcConptr.p->currTupAiLen
+ << " currReclenAi = " << tTcConptr.p->currReclenAi
+ << endl;
+ ndbout << " tcTimer = " << tTcConptr.p->tcTimer
+ << " clientConnectrec = " << tTcConptr.p->clientConnectrec
+ << " applOprec = " << hex << tTcConptr.p->applOprec
+ << " abortState = " << tTcConptr.p->abortState
+ << endl;
+ ndbout << " transid0 = " << hex << tTcConptr.p->transid[0]
+ << " transid1 = " << hex << tTcConptr.p->transid[1]
+ << " tupkeyData0 = " << tTcConptr.p->tupkeyData[0]
+ << " tupkeyData1 = " << tTcConptr.p->tupkeyData[1]
+ << endl;
+ ndbout << " tupkeyData2 = " << tTcConptr.p->tupkeyData[2]
+ << " tupkeyData3 = " << tTcConptr.p->tupkeyData[3]
+ << endl;
+ switch (tTcConptr.p->transactionState) {
+
+ case TcConnectionrec::SCAN_STATE_USED:
+ if (tTcConptr.p->tcScanRec < cscanrecFileSize){
+ ScanRecordPtr TscanPtr;
+ c_scanRecordPool.getPtr(TscanPtr, tTcConptr.p->tcScanRec);
+ ndbout << " scanState = " << TscanPtr.p->scanState << endl;
+ //TscanPtr.p->scanLocalref[2];
+ ndbout << " copyPtr="<<TscanPtr.p->copyPtr
+ << " scanAccPtr="<<TscanPtr.p->scanAccPtr
+ << " scanAiLength="<<TscanPtr.p->scanAiLength
+ << endl;
+ ndbout << " m_curr_batch_size_rows="<<
+ TscanPtr.p->m_curr_batch_size_rows
+ << " m_max_batch_size_rows="<<
+ TscanPtr.p->m_max_batch_size_rows
+ << " scanErrorCounter="<<TscanPtr.p->scanErrorCounter
+ << " scanLocalFragid="<<TscanPtr.p->scanLocalFragid
+ << endl;
+ ndbout << " scanSchemaVersion="<<TscanPtr.p->scanSchemaVersion
+ << " scanStoredProcId="<<TscanPtr.p->scanStoredProcId
+ << " scanTcrec="<<TscanPtr.p->scanTcrec
+ << endl;
+ ndbout << " scanType="<<TscanPtr.p->scanType
+ << " scanApiBlockref="<<TscanPtr.p->scanApiBlockref
+ << " scanNodeId="<<TscanPtr.p->scanNodeId
+ << " scanCompletedStatus="<<TscanPtr.p->scanCompletedStatus
+ << endl;
+ ndbout << " scanFlag="<<TscanPtr.p->scanFlag
+ << " scanLockHold="<<TscanPtr.p->scanLockHold
+ << " scanLockMode="<<TscanPtr.p->scanLockMode
+ << " scanNumber="<<TscanPtr.p->scanNumber
+ << endl;
+ ndbout << " scanReleaseCounter="<<TscanPtr.p->scanReleaseCounter
+ << " scanTcWaiting="<<TscanPtr.p->scanTcWaiting
+ << " scanKeyinfoFlag="<<TscanPtr.p->scanKeyinfoFlag
+ << endl;
+ }else{
+ ndbout << "No connected scan record found" << endl;
+ }
+ break;
+ default:
+ break;
+ }//switch
+
+ // Reset the timer
+ tTcConptr.p->tcTimer = 0;
+ }//if
+ }//for
+#endif
+#ifdef VM_TRACE
+ for (lfoPtr.i = 0; lfoPtr.i < clfoFileSize; lfoPtr.i++) {
+ ptrAss(lfoPtr, logFileOperationRecord);
+ if ((lfoPtr.p->lfoTimer != 0) &&
+ ((lfoPtr.p->lfoTimer + 120) < cLqhTimeOutCount)) {
+ ndbout << "We have lost LFO record" << endl;
+ ndbout << "index = " << lfoPtr.i;
+ ndbout << "State = " << lfoPtr.p->lfoState;
+ ndbout << " Page No = " << lfoPtr.p->lfoPageNo;
+ ndbout << " noPagesRw = " << lfoPtr.p->noPagesRw;
+ ndbout << "lfoWordWritten = " << lfoPtr.p->lfoWordWritten << endl;
+ lfoPtr.p->lfoTimer = cLqhTimeOutCount;
+ }//if
+ }//for
+
+#endif
+
+#if 0
+ LcpRecordPtr TlcpPtr;
+ // Print information about the current local checkpoint
+ TlcpPtr.i = 0;
+ ptrAss(TlcpPtr, lcpRecord);
+ ndbout << "Information about LCP in this LQH" << endl
+ << " lcpState="<<TlcpPtr.p->lcpState<<endl
+ << " firstLcpLocAcc="<<TlcpPtr.p->firstLcpLocAcc<<endl
+ << " firstLcpLocTup="<<TlcpPtr.p->firstLcpLocTup<<endl
+ << " lcpAccptr="<<TlcpPtr.p->lcpAccptr<<endl
+ << " lastFragmentFlag="<<TlcpPtr.p->lastFragmentFlag<<endl
+ << " lcpQueued="<<TlcpPtr.p->lcpQueued<<endl
+ << " reportEmptyref="<< TlcpPtr.p->reportEmptyRef<<endl
+ << " reportEmpty="<<TlcpPtr.p->reportEmpty<<endl;
+#endif
+}//Dblqh::execTIME_SIGNAL()
+
+/* ######################################################################### */
+/* ####### EXECUTION MODULE ####### */
+/* THIS MODULE HANDLES THE RECEPTION OF LQHKEYREQ AND ALL PROCESSING */
+/* OF OPERATIONS ON BEHALF OF THIS REQUEST. THIS DOES ALSO INVOLVE */
+/* RECEPTION OF VARIOUS TYPES OF ATTRINFO AND KEYINFO. IT DOES ALSO */
+/* INVOLVE COMMUNICATION WITH ACC AND TUP. */
+/* ######################################################################### */
+
+void Dblqh::noFreeRecordLab(Signal* signal,
+ const LqhKeyReq * lqhKeyReq,
+ Uint32 errCode)
+{
+ jamEntry();
+ const Uint32 transid1 = lqhKeyReq->transId1;
+ const Uint32 transid2 = lqhKeyReq->transId2;
+ const Uint32 reqInfo = lqhKeyReq->requestInfo;
+
+ if(errCode == ZNO_FREE_MARKER_RECORDS_ERROR ||
+ errCode == ZNODE_SHUTDOWN_IN_PROGESS){
+ releaseTcrec(signal, tcConnectptr);
+ }
+
+ if (LqhKeyReq::getSimpleFlag(reqInfo) &&
+ LqhKeyReq::getOperation(reqInfo) == ZREAD){
+ jam();
+ ndbrequire(LqhKeyReq::getApplicationAddressFlag(reqInfo));
+ const Uint32 apiRef = lqhKeyReq->variableData[0];
+ const Uint32 apiOpRec = lqhKeyReq->variableData[1];
+
+ TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend();
+
+ tcKeyRef->connectPtr = apiOpRec;
+ tcKeyRef->transId[0] = transid1;
+ tcKeyRef->transId[1] = transid2;
+ tcKeyRef->errorCode = errCode;
+ sendSignal(apiRef, GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB);
+ } else {
+ jam();
+
+ const Uint32 clientPtr = lqhKeyReq->clientConnectPtr;
+ Uint32 TcOprec = clientPtr;
+ if(LqhKeyReq::getSameClientAndTcFlag(reqInfo) == 1){
+ if(LqhKeyReq::getApplicationAddressFlag(reqInfo))
+ TcOprec = lqhKeyReq->variableData[2];
+ else
+ TcOprec = lqhKeyReq->variableData[0];
+ }
+
+ LqhKeyRef * const ref = (LqhKeyRef*)signal->getDataPtrSend();
+ ref->userRef = clientPtr;
+ ref->connectPtr = TcOprec;
+ ref->errorCode = errCode;
+ ref->transId1 = transid1;
+ ref->transId2 = transid2;
+ sendSignal(signal->senderBlockRef(), GSN_LQHKEYREF, signal,
+ LqhKeyRef::SignalLength, JBB);
+ }//if
+ return;
+}//Dblqh::noFreeRecordLab()
+
+void Dblqh::LQHKEY_abort(Signal* signal, int errortype)
+{
+ switch (errortype) {
+ case 0:
+ jam();
+ terrorCode = ZCOPY_NODE_ERROR;
+ break;
+ case 1:
+ jam();
+ terrorCode = ZNO_FREE_LQH_CONNECTION;
+ break;
+ case 2:
+ jam();
+ terrorCode = signal->theData[1];
+ break;
+ case 3:
+ jam();
+ ndbrequire((tcConnectptr.p->transactionState == TcConnectionrec::WAIT_ACC_ABORT) ||
+ (tcConnectptr.p->transactionState == TcConnectionrec::ABORT_STOPPED) ||
+ (tcConnectptr.p->transactionState == TcConnectionrec::ABORT_QUEUED));
+ return;
+ break;
+ case 4:
+ jam();
+ if(tabptr.p->tableStatus == Tablerec::NOT_DEFINED){
+ jam();
+ terrorCode = ZTABLE_NOT_DEFINED;
+ } else if (tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING ||
+ tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
+ jam();
+ terrorCode = ZDROP_TABLE_IN_PROGRESS;
+ } else {
+ ndbrequire(0);
+ }
+ break;
+ case 5:
+ jam();
+ terrorCode = ZINVALID_SCHEMA_VERSION;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ abortErrorLab(signal);
+}//Dblqh::LQHKEY_abort()
+
+void Dblqh::LQHKEY_error(Signal* signal, int errortype)
+{
+ switch (errortype) {
+ case 0:
+ jam();
+ break;
+ case 1:
+ jam();
+ break;
+ case 2:
+ jam();
+ break;
+ case 3:
+ jam();
+ break;
+ case 4:
+ jam();
+ break;
+ case 5:
+ jam();
+ break;
+ case 6:
+ jam();
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ ndbrequire(false);
+}//Dblqh::LQHKEY_error()
+
+void Dblqh::execLQHKEYREF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ terrorCode = signal->theData[2];
+ Uint32 transid1 = signal->theData[3];
+ Uint32 transid2 = signal->theData[4];
+ if (tcConnectptr.i >= ctcConnectrecFileSize) {
+ errorReport(signal, 3);
+ return;
+ }//if
+/*------------------------------------------------------------------*/
+/* WE HAVE TO CHECK THAT THE SIGNAL DO NOT BELONG TO SOMETHING*/
+/* REMOVED DUE TO A TIME-OUT. */
+/*------------------------------------------------------------------*/
+ ptrAss(tcConnectptr, tcConnectionrec);
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ switch (regTcPtr->connectState) {
+ case TcConnectionrec::CONNECTED:
+ jam();
+ if ((regTcPtr->transid[0] != transid1) ||
+ (regTcPtr->transid[1] != transid2)) {
+ warningReport(signal, 14);
+ return;
+ }//if
+ if (regTcPtr->abortState != TcConnectionrec::ABORT_IDLE) {
+ warningReport(signal, 15);
+ return;
+ }//if
+ abortErrorLab(signal);
+ return;
+ break;
+ case TcConnectionrec::LOG_CONNECTED:
+ jam();
+ logLqhkeyrefLab(signal);
+ return;
+ break;
+ case TcConnectionrec::COPY_CONNECTED:
+ jam();
+ copyLqhKeyRefLab(signal);
+ return;
+ break;
+ default:
+ warningReport(signal, 16);
+ return;
+ break;
+ }//switch
+}//Dblqh::execLQHKEYREF()
+
+/* -------------------------------------------------------------------------- */
+/* ------- ENTER PACKED_SIGNAL ------- */
+/* Execution of packed signal. The packed signal can contain COMMIT, COMPLETE */
+/* or LQHKEYCONF signals. These signals will be executed by their resp. exec */
+/* functions. */
+/* -------------------------------------------------------------------------- */
+void Dblqh::execPACKED_SIGNAL(Signal* signal)
+{
+ Uint32 Tstep = 0;
+ Uint32 Tlength;
+ Uint32 TpackedData[28];
+ Uint32 sig0, sig1, sig2, sig3 ,sig4, sig5, sig6;
+
+ jamEntry();
+ Tlength = signal->length();
+ ndbrequire(Tlength <= 25);
+ MEMCOPY_NO_WORDS(&TpackedData[0], &signal->theData[0], Tlength);
+ while (Tlength > Tstep) {
+ switch (TpackedData[Tstep] >> 28) {
+ case ZCOMMIT:
+ jam();
+ sig0 = TpackedData[Tstep + 0] & 0x0FFFFFFF;
+ sig1 = TpackedData[Tstep + 1];
+ sig2 = TpackedData[Tstep + 2];
+ sig3 = TpackedData[Tstep + 3];
+ signal->theData[0] = sig0;
+ signal->theData[1] = sig1;
+ signal->theData[2] = sig2;
+ signal->theData[3] = sig3;
+ signal->header.theLength = 4;
+ execCOMMIT(signal);
+ Tstep += 4;
+ break;
+ case ZCOMPLETE:
+ jam();
+ sig0 = TpackedData[Tstep + 0] & 0x0FFFFFFF;
+ sig1 = TpackedData[Tstep + 1];
+ sig2 = TpackedData[Tstep + 2];
+ signal->theData[0] = sig0;
+ signal->theData[1] = sig1;
+ signal->theData[2] = sig2;
+ signal->header.theLength = 3;
+ execCOMPLETE(signal);
+ Tstep += 3;
+ break;
+ case ZLQHKEYCONF: {
+ jam();
+ LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+
+ sig0 = TpackedData[Tstep + 0] & 0x0FFFFFFF;
+ sig1 = TpackedData[Tstep + 1];
+ sig2 = TpackedData[Tstep + 2];
+ sig3 = TpackedData[Tstep + 3];
+ sig4 = TpackedData[Tstep + 4];
+ sig5 = TpackedData[Tstep + 5];
+ sig6 = TpackedData[Tstep + 6];
+ lqhKeyConf->connectPtr = sig0;
+ lqhKeyConf->opPtr = sig1;
+ lqhKeyConf->userRef = sig2;
+ lqhKeyConf->readLen = sig3;
+ lqhKeyConf->transId1 = sig4;
+ lqhKeyConf->transId2 = sig5;
+ lqhKeyConf->noFiredTriggers = sig6;
+ execLQHKEYCONF(signal);
+ Tstep += LqhKeyConf::SignalLength;
+ break;
+ }
+ case ZREMOVE_MARKER:
+ jam();
+ sig0 = TpackedData[Tstep + 1];
+ sig1 = TpackedData[Tstep + 2];
+ signal->theData[0] = sig0;
+ signal->theData[1] = sig1;
+ signal->header.theLength = 2;
+ execREMOVE_MARKER_ORD(signal);
+ Tstep += 3;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ }//switch
+ }//while
+ ndbrequire(Tlength == Tstep);
+ return;
+}//Dblqh::execPACKED_SIGNAL()
+
+void
+Dblqh::execREMOVE_MARKER_ORD(Signal* signal)
+{
+ CommitAckMarker key;
+ key.transid1 = signal->theData[0];
+ key.transid2 = signal->theData[1];
+ jamEntry();
+
+ CommitAckMarkerPtr removedPtr;
+ m_commitAckMarkerHash.release(removedPtr, key);
+ ndbrequire(removedPtr.i != RNIL);
+#ifdef MARKER_TRACE
+ ndbout_c("Rem marker[%.8x %.8x]", key.transid1, key.transid2);
+#endif
+}
+
+
+/* -------------------------------------------------------------------------- */
+/* ------- ENTER SEND_PACKED ------- */
+/* Used to force a packed signal to be sent if local signal buffer is not */
+/* empty. */
+/* -------------------------------------------------------------------------- */
+void Dblqh::execSEND_PACKED(Signal* signal)
+{
+ HostRecordPtr Thostptr;
+ UintR i;
+ UintR TpackedListIndex = cpackedListIndex;
+ jamEntry();
+ for (i = 0; i < TpackedListIndex; i++) {
+ Thostptr.i = cpackedList[i];
+ ptrAss(Thostptr, hostRecord);
+ jam();
+ ndbrequire(Thostptr.i - 1 < MAX_NDB_NODES - 1);
+ if (Thostptr.p->noOfPackedWordsLqh > 0) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ }//if
+ if (Thostptr.p->noOfPackedWordsTc > 0) {
+ jam();
+ sendPackedSignalTc(signal, Thostptr.p);
+ }//if
+ Thostptr.p->inPackedList = false;
+ }//for
+ cpackedListIndex = 0;
+ return;
+}//Dblqh::execSEND_PACKED()
+
+void
+Dblqh::updatePackedList(Signal* signal, HostRecord * ahostptr, Uint16 hostId)
+{
+ Uint32 TpackedListIndex = cpackedListIndex;
+ if (ahostptr->inPackedList == false) {
+ jam();
+ ahostptr->inPackedList = true;
+ cpackedList[TpackedListIndex] = hostId;
+ cpackedListIndex = TpackedListIndex + 1;
+ }//if
+}//Dblqh::updatePackedList()
+
+void
+Dblqh::execREAD_PSEUDO_REQ(Signal* signal){
+ jamEntry();
+ TcConnectionrecPtr regTcPtr;
+ regTcPtr.i = signal->theData[0];
+ ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec);
+
+ if(signal->theData[1] != AttributeHeader::RANGE_NO)
+ {
+ jam();
+ FragrecordPtr regFragptr;
+ regFragptr.i = regTcPtr.p->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+
+ signal->theData[0] = regFragptr.p->accFragptr[regTcPtr.p->localFragptr];
+ EXECUTE_DIRECT(DBACC, GSN_READ_PSEUDO_REQ, signal, 2);
+ }
+ else
+ {
+ signal->theData[0] = regTcPtr.p->m_scan_curr_range_no;
+ }
+}
+
+/* ************>> */
+/* TUPKEYCONF > */
+/* ************>> */
+void Dblqh::execTUPKEYCONF(Signal* signal)
+{
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ const TupKeyConf * const tupKeyConf = (TupKeyConf *)signal->getDataPtr();
+ Uint32 tcIndex = tupKeyConf->userPtr;
+ jamEntry();
+ tcConnectptr.i = tcIndex;
+ ptrCheckGuard(tcConnectptr, ttcConnectrecFileSize, regTcConnectionrec);
+ switch (tcConnectptr.p->transactionState) {
+ case TcConnectionrec::WAIT_TUP:
+ jam();
+ if (tcConnectptr.p->seqNoReplica == 0) // Primary replica
+ tcConnectptr.p->noFiredTriggers = tupKeyConf->noFiredTriggers;
+ tupkeyConfLab(signal);
+ break;
+ case TcConnectionrec::COPY_TUPKEY:
+ jam();
+ copyTupkeyConfLab(signal);
+ break;
+ case TcConnectionrec::SCAN_TUPKEY:
+ jam();
+ scanTupkeyConfLab(signal);
+ break;
+ case TcConnectionrec::WAIT_TUP_TO_ABORT:
+ jam();
+/* ------------------------------------------------------------------------- */
+// Abort was not ready to start until this signal came back. Now we are ready
+// to start the abort.
+/* ------------------------------------------------------------------------- */
+ releaseActiveFrag(signal);
+ abortCommonLab(signal);
+ break;
+ case TcConnectionrec::WAIT_ACC_ABORT:
+ case TcConnectionrec::ABORT_QUEUED:
+ jam();
+/* -------------------------------------------------------------------------- */
+/* IGNORE SINCE ABORT OF THIS OPERATION IS ONGOING ALREADY. */
+/* -------------------------------------------------------------------------- */
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dblqh::execTUPKEYCONF()
+
+/* ************> */
+/* TUPKEYREF > */
+/* ************> */
+void Dblqh::execTUPKEYREF(Signal* signal)
+{
+ const TupKeyRef * const tupKeyRef = (TupKeyRef *)signal->getDataPtr();
+
+ jamEntry();
+ tcConnectptr.i = tupKeyRef->userRef;
+ terrorCode = tupKeyRef->errorCode;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ switch (tcConnectptr.p->transactionState) {
+ case TcConnectionrec::WAIT_TUP:
+ jam();
+ releaseActiveFrag(signal);
+ abortErrorLab(signal);
+ break;
+ case TcConnectionrec::COPY_TUPKEY:
+ ndbrequire(false);
+ break;
+ case TcConnectionrec::SCAN_TUPKEY:
+ jam();
+ scanTupkeyRefLab(signal);
+ break;
+ case TcConnectionrec::WAIT_TUP_TO_ABORT:
+ jam();
+/* ------------------------------------------------------------------------- */
+// Abort was not ready to start until this signal came back. Now we are ready
+// to start the abort.
+/* ------------------------------------------------------------------------- */
+ releaseActiveFrag(signal);
+ abortCommonLab(signal);
+ break;
+ case TcConnectionrec::WAIT_ACC_ABORT:
+ case TcConnectionrec::ABORT_QUEUED:
+ jam();
+/* ------------------------------------------------------------------------- */
+/* IGNORE SINCE ABORT OF THIS OPERATION IS ONGOING ALREADY. */
+/* ------------------------------------------------------------------------- */
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dblqh::execTUPKEYREF()
+
+void Dblqh::sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr)
+{
+ Uint32 noOfWords = ahostptr->noOfPackedWordsLqh;
+ BlockReference hostRef = ahostptr->hostLqhBlockRef;
+ MEMCOPY_NO_WORDS(&signal->theData[0],
+ &ahostptr->packedWordsLqh[0],
+ noOfWords);
+ sendSignal(hostRef, GSN_PACKED_SIGNAL, signal, noOfWords, JBB);
+ ahostptr->noOfPackedWordsLqh = 0;
+}//Dblqh::sendPackedSignalLqh()
+
+void Dblqh::sendPackedSignalTc(Signal* signal, HostRecord * ahostptr)
+{
+ Uint32 noOfWords = ahostptr->noOfPackedWordsTc;
+ BlockReference hostRef = ahostptr->hostTcBlockRef;
+ MEMCOPY_NO_WORDS(&signal->theData[0],
+ &ahostptr->packedWordsTc[0],
+ noOfWords);
+ sendSignal(hostRef, GSN_PACKED_SIGNAL, signal, noOfWords, JBB);
+ ahostptr->noOfPackedWordsTc = 0;
+}//Dblqh::sendPackedSignalTc()
+
+void Dblqh::sendCommitLqh(Signal* signal, BlockReference alqhBlockref)
+{
+ HostRecordPtr Thostptr;
+ Thostptr.i = refToNode(alqhBlockref);
+ ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
+ if (Thostptr.p->noOfPackedWordsLqh > 21) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ Uint32 pos = Thostptr.p->noOfPackedWordsLqh;
+ Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMMIT << 28);
+ Uint32 gci = tcConnectptr.p->gci;
+ Uint32 transid1 = tcConnectptr.p->transid[0];
+ Uint32 transid2 = tcConnectptr.p->transid[1];
+ Thostptr.p->packedWordsLqh[pos] = ptrAndType;
+ Thostptr.p->packedWordsLqh[pos + 1] = gci;
+ Thostptr.p->packedWordsLqh[pos + 2] = transid1;
+ Thostptr.p->packedWordsLqh[pos + 3] = transid2;
+ Thostptr.p->noOfPackedWordsLqh = pos + 4;
+}//Dblqh::sendCommitLqh()
+
+void Dblqh::sendCompleteLqh(Signal* signal, BlockReference alqhBlockref)
+{
+ HostRecordPtr Thostptr;
+ Thostptr.i = refToNode(alqhBlockref);
+ ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
+ if (Thostptr.p->noOfPackedWordsLqh > 22) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ Uint32 pos = Thostptr.p->noOfPackedWordsLqh;
+ Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMPLETE << 28);
+ Uint32 transid1 = tcConnectptr.p->transid[0];
+ Uint32 transid2 = tcConnectptr.p->transid[1];
+ Thostptr.p->packedWordsLqh[pos] = ptrAndType;
+ Thostptr.p->packedWordsLqh[pos + 1] = transid1;
+ Thostptr.p->packedWordsLqh[pos + 2] = transid2;
+ Thostptr.p->noOfPackedWordsLqh = pos + 3;
+}//Dblqh::sendCompleteLqh()
+
+void Dblqh::sendCommittedTc(Signal* signal, BlockReference atcBlockref)
+{
+ HostRecordPtr Thostptr;
+ Thostptr.i = refToNode(atcBlockref);
+ ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
+ if (Thostptr.p->noOfPackedWordsTc > 22) {
+ jam();
+ sendPackedSignalTc(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ Uint32 pos = Thostptr.p->noOfPackedWordsTc;
+ Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMMITTED << 28);
+ Uint32 transid1 = tcConnectptr.p->transid[0];
+ Uint32 transid2 = tcConnectptr.p->transid[1];
+ Thostptr.p->packedWordsTc[pos] = ptrAndType;
+ Thostptr.p->packedWordsTc[pos + 1] = transid1;
+ Thostptr.p->packedWordsTc[pos + 2] = transid2;
+ Thostptr.p->noOfPackedWordsTc = pos + 3;
+}//Dblqh::sendCommittedTc()
+
+void Dblqh::sendCompletedTc(Signal* signal, BlockReference atcBlockref)
+{
+ HostRecordPtr Thostptr;
+ Thostptr.i = refToNode(atcBlockref);
+ ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
+ if (Thostptr.p->noOfPackedWordsTc > 22) {
+ jam();
+ sendPackedSignalTc(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ Uint32 pos = Thostptr.p->noOfPackedWordsTc;
+ Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMPLETED << 28);
+ Uint32 transid1 = tcConnectptr.p->transid[0];
+ Uint32 transid2 = tcConnectptr.p->transid[1];
+ Thostptr.p->packedWordsTc[pos] = ptrAndType;
+ Thostptr.p->packedWordsTc[pos + 1] = transid1;
+ Thostptr.p->packedWordsTc[pos + 2] = transid2;
+ Thostptr.p->noOfPackedWordsTc = pos + 3;
+}//Dblqh::sendCompletedTc()
+
+void Dblqh::sendLqhkeyconfTc(Signal* signal, BlockReference atcBlockref)
+{
+ LqhKeyConf* lqhKeyConf;
+ HostRecordPtr Thostptr;
+
+ Thostptr.i = refToNode(atcBlockref);
+ ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
+ if (refToBlock(atcBlockref) == DBTC) {
+ jam();
+/*******************************************************************
+// This signal was intended for DBTC as part of the normal transaction
+// execution.
+********************************************************************/
+ if (Thostptr.p->noOfPackedWordsTc > (25 - LqhKeyConf::SignalLength)) {
+ jam();
+ sendPackedSignalTc(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ lqhKeyConf = (LqhKeyConf *)
+ &Thostptr.p->packedWordsTc[Thostptr.p->noOfPackedWordsTc];
+ Thostptr.p->noOfPackedWordsTc += LqhKeyConf::SignalLength;
+ } else {
+ jam();
+/*******************************************************************
+// This signal was intended for DBLQH as part of log execution or
+// node recovery.
+********************************************************************/
+ if (Thostptr.p->noOfPackedWordsLqh > (25 - LqhKeyConf::SignalLength)) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ lqhKeyConf = (LqhKeyConf *)
+ &Thostptr.p->packedWordsLqh[Thostptr.p->noOfPackedWordsLqh];
+ Thostptr.p->noOfPackedWordsLqh += LqhKeyConf::SignalLength;
+ }//if
+ Uint32 ptrAndType = tcConnectptr.i | (ZLQHKEYCONF << 28);
+ Uint32 tcOprec = tcConnectptr.p->tcOprec;
+ Uint32 ownRef = cownref;
+ Uint32 readlenAi = tcConnectptr.p->readlenAi;
+ Uint32 transid1 = tcConnectptr.p->transid[0];
+ Uint32 transid2 = tcConnectptr.p->transid[1];
+ Uint32 noFiredTriggers = tcConnectptr.p->noFiredTriggers;
+ lqhKeyConf->connectPtr = ptrAndType;
+ lqhKeyConf->opPtr = tcOprec;
+ lqhKeyConf->userRef = ownRef;
+ lqhKeyConf->readLen = readlenAi;
+ lqhKeyConf->transId1 = transid1;
+ lqhKeyConf->transId2 = transid2;
+ lqhKeyConf->noFiredTriggers = noFiredTriggers;
+}//Dblqh::sendLqhkeyconfTc()
+
+/* ************************************************************************>>
+ * KEYINFO: Get tuple request from DBTC. Next step is to contact DBACC to get
+ * key to tuple if all key/attrinfo has been received, else for more attrinfo
+ * signals.
+ * ************************************************************************>> */
+void Dblqh::execKEYINFO(Signal* signal)
+{
+ Uint32 tcOprec = signal->theData[0];
+ Uint32 transid1 = signal->theData[1];
+ Uint32 transid2 = signal->theData[2];
+ jamEntry();
+ if (findTransaction(transid1, transid2, tcOprec) != ZOK) {
+ jam();
+ return;
+ }//if
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ TcConnectionrec::TransactionState state = regTcPtr->transactionState;
+ if (state != TcConnectionrec::WAIT_TUPKEYINFO &&
+ state != TcConnectionrec::WAIT_SCAN_AI)
+ {
+ jam();
+/*****************************************************************************/
+/* TRANSACTION WAS ABORTED, THIS IS MOST LIKELY A SIGNAL BELONGING TO THE */
+/* ABORTED TRANSACTION. THUS IGNORE THE SIGNAL. */
+/*****************************************************************************/
+ return;
+ }//if
+ Uint32 errorCode = handleLongTupKey(signal,
+ (Uint32)regTcPtr->save1,
+ (Uint32)regTcPtr->primKeyLen,
+ &signal->theData[3]);
+ if (errorCode != 0) {
+ if (errorCode == 1) {
+ jam();
+ return;
+ }//if
+ jam();
+ terrorCode = errorCode;
+ if(state == TcConnectionrec::WAIT_TUPKEYINFO)
+ abortErrorLab(signal);
+ else
+ abort_scan(signal, regTcPtr->tcScanRec, errorCode);
+ return;
+ }//if
+ if(state == TcConnectionrec::WAIT_TUPKEYINFO)
+ {
+ FragrecordPtr regFragptr;
+ regFragptr.i = regTcPtr->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+ fragptr = regFragptr;
+ endgettupkeyLab(signal);
+ }
+ return;
+}//Dblqh::execKEYINFO()
+
+/* ------------------------------------------------------------------------- */
+/* FILL IN KEY DATA INTO DATA BUFFERS. */
+/* ------------------------------------------------------------------------- */
+Uint32 Dblqh::handleLongTupKey(Signal* signal,
+ Uint32 keyLength,
+ Uint32 primKeyLength,
+ Uint32* dataPtr)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Uint32 dataPos = 0;
+ while (true) {
+ keyLength += 4;
+ if (cfirstfreeDatabuf == RNIL) {
+ jam();
+ return ZGET_DATAREC_ERROR;
+ }//if
+ seizeTupkeybuf(signal);
+ Databuf * const regDataPtr = databufptr.p;
+ Uint32 data0 = dataPtr[dataPos];
+ Uint32 data1 = dataPtr[dataPos + 1];
+ Uint32 data2 = dataPtr[dataPos + 2];
+ Uint32 data3 = dataPtr[dataPos + 3];
+ regDataPtr->data[0] = data0;
+ regDataPtr->data[1] = data1;
+ regDataPtr->data[2] = data2;
+ regDataPtr->data[3] = data3;
+ dataPos += 4;
+ if (keyLength < primKeyLength) {
+ if (dataPos > 16) {
+ jam();
+/* SAVE STATE AND WAIT FOR KEYINFO */
+ regTcPtr->save1 = keyLength;
+ return 1;
+ }//if
+ } else {
+ jam();
+ return 0;
+ }//if
+ }//while
+}//Dblqh::handleLongTupKey()
+
+/* ------------------------------------------------------------------------- */
+/* ------- HANDLE ATTRINFO SIGNALS ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+/* ************************************************************************>> */
+/* ATTRINFO: Continuation of KEYINFO signal (except for scans that do not use*/
+/* any KEYINFO). When all key and attribute info is received we contact DBACC*/
+/* for index handling. */
+/* ************************************************************************>> */
+void Dblqh::execATTRINFO(Signal* signal)
+{
+ Uint32 tcOprec = signal->theData[0];
+ Uint32 transid1 = signal->theData[1];
+ Uint32 transid2 = signal->theData[2];
+ jamEntry();
+ if (findTransaction(transid1,
+ transid2,
+ tcOprec) != ZOK) {
+ jam();
+ return;
+ }//if
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Uint32 length = signal->length() - 3;
+ Uint32 totReclenAi = regTcPtr->totReclenAi;
+ Uint32 currReclenAi = regTcPtr->currReclenAi + length;
+ Uint32* dataPtr = &signal->theData[3];
+ regTcPtr->currReclenAi = currReclenAi;
+ if (totReclenAi == currReclenAi) {
+ switch (regTcPtr->transactionState) {
+ case TcConnectionrec::WAIT_ATTR:
+ {
+ Fragrecord *regFragrecord = fragrecord;
+ Uint32 fragIndex = regTcPtr->fragmentptr;
+ Uint32 tfragrecFileSize = cfragrecFileSize;
+ jam();
+ fragptr.i = fragIndex;
+ ptrCheckGuard(fragptr, tfragrecFileSize, regFragrecord);
+ lqhAttrinfoLab(signal, dataPtr, length);
+ endgettupkeyLab(signal);
+ return;
+ break;
+ }
+ case TcConnectionrec::WAIT_SCAN_AI:
+ jam();
+ scanAttrinfoLab(signal, dataPtr, length);
+ return;
+ break;
+ case TcConnectionrec::WAIT_TUP_TO_ABORT:
+ case TcConnectionrec::LOG_ABORT_QUEUED:
+ case TcConnectionrec::ABORT_QUEUED:
+ case TcConnectionrec::ABORT_STOPPED:
+ case TcConnectionrec::WAIT_ACC_ABORT:
+ case TcConnectionrec::WAIT_AI_AFTER_ABORT:
+ jam();
+ aiStateErrorCheckLab(signal, dataPtr,length);
+ return;
+ break;
+ default:
+ jam();
+ ndbrequire(regTcPtr->abortState != TcConnectionrec::ABORT_IDLE);
+ break;
+ }//switch
+ } else if (currReclenAi < totReclenAi) {
+ jam();
+ switch (regTcPtr->transactionState) {
+ case TcConnectionrec::WAIT_ATTR:
+ jam();
+ lqhAttrinfoLab(signal, dataPtr, length);
+ return;
+ break;
+ case TcConnectionrec::WAIT_SCAN_AI:
+ jam();
+ scanAttrinfoLab(signal, dataPtr, length);
+ return;
+ break;
+ case TcConnectionrec::WAIT_TUP_TO_ABORT:
+ case TcConnectionrec::LOG_ABORT_QUEUED:
+ case TcConnectionrec::ABORT_QUEUED:
+ case TcConnectionrec::ABORT_STOPPED:
+ case TcConnectionrec::WAIT_ACC_ABORT:
+ case TcConnectionrec::WAIT_AI_AFTER_ABORT:
+ jam();
+ aiStateErrorCheckLab(signal, dataPtr, length);
+ return;
+ break;
+ default:
+ jam();
+ ndbrequire(regTcPtr->abortState != TcConnectionrec::ABORT_IDLE);
+ break;
+ }//switch
+ } else {
+ switch (regTcPtr->transactionState) {
+ case TcConnectionrec::WAIT_SCAN_AI:
+ jam();
+ scanAttrinfoLab(signal, dataPtr, length);
+ return;
+ break;
+ default:
+ ndbout_c("%d", regTcPtr->transactionState);
+ ndbrequire(false);
+ break;
+ }//switch
+ }//if
+ return;
+}//Dblqh::execATTRINFO()
+
+/* ************************************************************************>> */
+/* TUP_ATTRINFO: Interpreted execution in DBTUP generates redo-log info */
+/* which is sent back to DBLQH for logging. This is because the decision */
+/* to execute or not is made in DBTUP and thus we cannot start logging until */
+/* DBTUP part has been run. */
+/* ************************************************************************>> */
+void Dblqh::execTUP_ATTRINFO(Signal* signal)
+{
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ Uint32 length = signal->length() - 3;
+ Uint32 tcIndex = signal->theData[0];
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ jamEntry();
+ tcConnectptr.i = tcIndex;
+ ptrCheckGuard(tcConnectptr, ttcConnectrecFileSize, regTcConnectionrec);
+ ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::WAIT_TUP);
+ if (saveTupattrbuf(signal, &signal->theData[3], length) == ZOK) {
+ return;
+ } else {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* WE ARE WAITING FOR RESPONSE FROM TUP HERE. THUS WE NEED TO */
+/* GO THROUGH THE STATE MACHINE FOR THE OPERATION. */
+/* ------------------------------------------------------------------------- */
+ localAbortStateHandlerLab(signal);
+ }//if
+}//Dblqh::execTUP_ATTRINFO()
+
+/* ------------------------------------------------------------------------- */
+/* ------- HANDLE ATTRINFO FROM LQH ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::lqhAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->operation != ZREAD) {
+ if (regTcPtr->opExec != 1) {
+ if (saveTupattrbuf(signal, dataPtr, length) == ZOK) {
+ ;
+ } else {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* WE MIGHT BE WAITING FOR RESPONSE FROM SOME BLOCK HERE. THUS WE NEED TO */
+/* GO THROUGH THE STATE MACHINE FOR THE OPERATION. */
+/* ------------------------------------------------------------------------- */
+ localAbortStateHandlerLab(signal);
+ return;
+ }//if
+ }//if
+ }//if
+ Uint32 sig0 = regTcPtr->tupConnectrec;
+ Uint32 blockNo = refToBlock(regTcPtr->tcTupBlockref);
+ signal->theData[0] = sig0;
+ EXECUTE_DIRECT(blockNo, GSN_ATTRINFO, signal, length + 3);
+ jamEntry();
+}//Dblqh::lqhAttrinfoLab()
+
+/* ------------------------------------------------------------------------- */
+/* ------ FIND TRANSACTION BY USING HASH TABLE ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+int Dblqh::findTransaction(UintR Transid1, UintR Transid2, UintR TcOprec)
+{
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ TcConnectionrecPtr locTcConnectptr;
+
+ Uint32 ThashIndex = (Transid1 ^ TcOprec) & 1023;
+ locTcConnectptr.i = ctransidHash[ThashIndex];
+ while (locTcConnectptr.i != RNIL) {
+ ptrCheckGuard(locTcConnectptr, ttcConnectrecFileSize, regTcConnectionrec);
+ if ((locTcConnectptr.p->transid[0] == Transid1) &&
+ (locTcConnectptr.p->transid[1] == Transid2) &&
+ (locTcConnectptr.p->tcOprec == TcOprec)) {
+/* FIRST PART OF TRANSACTION CORRECT */
+/* SECOND PART ALSO CORRECT */
+/* THE OPERATION RECORD POINTER IN TC WAS ALSO CORRECT */
+ jam();
+ tcConnectptr.i = locTcConnectptr.i;
+ tcConnectptr.p = locTcConnectptr.p;
+ return (int)ZOK;
+ }//if
+ jam();
+/* THIS WAS NOT THE TRANSACTION WHICH WAS SOUGHT */
+ locTcConnectptr.i = locTcConnectptr.p->nextHashRec;
+ }//while
+/* WE DID NOT FIND THE TRANSACTION, REPORT NOT FOUND */
+ return (int)ZNOT_FOUND;
+}//Dblqh::findTransaction()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SAVE ATTRINFO FROM TUP IN ATTRINBUF ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+int Dblqh::saveTupattrbuf(Signal* signal, Uint32* dataPtr, Uint32 length)
+{
+ Uint32 tfirstfreeAttrinbuf = cfirstfreeAttrinbuf;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Uint32 currTupAiLen = regTcPtr->currTupAiLen;
+ if (tfirstfreeAttrinbuf == RNIL) {
+ jam();
+ terrorCode = ZGET_ATTRINBUF_ERROR;
+ return ZGET_ATTRINBUF_ERROR;
+ }//if
+ seizeAttrinbuf(signal);
+ Attrbuf * const regAttrPtr = attrinbufptr.p;
+ MEMCOPY_NO_WORDS(&regAttrPtr->attrbuf[0], dataPtr, length);
+ regTcPtr->currTupAiLen = currTupAiLen + length;
+ regAttrPtr->attrbuf[ZINBUF_DATA_LEN] = length;
+ return ZOK;
+}//Dblqh::saveTupattrbuf()
+
+/* ==========================================================================
+ * ======= SEIZE ATTRIBUTE IN BUFFER =======
+ *
+ * GET A NEW ATTRINBUF AND SETS ATTRINBUFPTR.
+ * ========================================================================= */
+void Dblqh::seizeAttrinbuf(Signal* signal)
+{
+ AttrbufPtr tmpAttrinbufptr;
+ AttrbufPtr regAttrinbufptr;
+ Attrbuf *regAttrbuf = attrbuf;
+ Uint32 tattrinbufFileSize = cattrinbufFileSize;
+
+ regAttrinbufptr.i = seize_attrinbuf();
+ tmpAttrinbufptr.i = tcConnectptr.p->lastAttrinbuf;
+ ptrCheckGuard(regAttrinbufptr, tattrinbufFileSize, regAttrbuf);
+ tcConnectptr.p->lastAttrinbuf = regAttrinbufptr.i;
+ regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN] = 0;
+ if (tmpAttrinbufptr.i == RNIL) {
+ jam();
+ tcConnectptr.p->firstAttrinbuf = regAttrinbufptr.i;
+ } else {
+ jam();
+ ptrCheckGuard(tmpAttrinbufptr, tattrinbufFileSize, regAttrbuf);
+ tmpAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = regAttrinbufptr.i;
+ }//if
+ regAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = RNIL;
+ attrinbufptr = regAttrinbufptr;
+}//Dblqh::seizeAttrinbuf()
+
+/* ==========================================================================
+ * ======= SEIZE TC CONNECT RECORD =======
+ *
+ * GETS A NEW TC CONNECT RECORD FROM FREELIST.
+ * ========================================================================= */
+void Dblqh::seizeTcrec()
+{
+ TcConnectionrecPtr locTcConnectptr;
+
+ locTcConnectptr.i = cfirstfreeTcConrec;
+ ptrCheckGuard(locTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ Uint32 nextTc = locTcConnectptr.p->nextTcConnectrec;
+ locTcConnectptr.p->nextTcConnectrec = RNIL;
+ locTcConnectptr.p->clientConnectrec = RNIL;
+ locTcConnectptr.p->clientBlockref = RNIL;
+ locTcConnectptr.p->abortState = TcConnectionrec::ABORT_IDLE;
+ locTcConnectptr.p->tcTimer = cLqhTimeOutCount;
+ locTcConnectptr.p->tableref = RNIL;
+ locTcConnectptr.p->savePointId = 0;
+ cfirstfreeTcConrec = nextTc;
+ tcConnectptr = locTcConnectptr;
+ locTcConnectptr.p->connectState = TcConnectionrec::CONNECTED;
+}//Dblqh::seizeTcrec()
+
+/* ==========================================================================
+ * ======= SEIZE DATA BUFFER =======
+ * ========================================================================= */
+void Dblqh::seizeTupkeybuf(Signal* signal)
+{
+ Databuf *regDatabuf = databuf;
+ DatabufPtr tmpDatabufptr;
+ DatabufPtr regDatabufptr;
+ Uint32 tdatabufFileSize = cdatabufFileSize;
+
+/* ------- GET A DATABUF. ------- */
+ regDatabufptr.i = cfirstfreeDatabuf;
+ tmpDatabufptr.i = tcConnectptr.p->lastTupkeybuf;
+ ptrCheckGuard(regDatabufptr, tdatabufFileSize, regDatabuf);
+ Uint32 nextFirst = regDatabufptr.p->nextDatabuf;
+ tcConnectptr.p->lastTupkeybuf = regDatabufptr.i;
+ if (tmpDatabufptr.i == RNIL) {
+ jam();
+ tcConnectptr.p->firstTupkeybuf = regDatabufptr.i;
+ } else {
+ jam();
+ ptrCheckGuard(tmpDatabufptr, tdatabufFileSize, regDatabuf);
+ tmpDatabufptr.p->nextDatabuf = regDatabufptr.i;
+ }//if
+ cfirstfreeDatabuf = nextFirst;
+ regDatabufptr.p->nextDatabuf = RNIL;
+ databufptr = regDatabufptr;
+}//Dblqh::seizeTupkeybuf()
+
+/* ------------------------------------------------------------------------- */
+/* ------- TAKE CARE OF LQHKEYREQ ------- */
+/* LQHKEYREQ IS THE SIGNAL THAT STARTS ALL OPERATIONS IN THE LQH BLOCK */
+/* THIS SIGNAL CONTAINS A LOT OF INFORMATION ABOUT WHAT TYPE OF OPERATION, */
+/* KEY INFORMATION, ATTRIBUTE INFORMATION, NODE INFORMATION AND A LOT MORE */
+/* ------------------------------------------------------------------------- */
+void Dblqh::execLQHKEYREQ(Signal* signal)
+{
+ UintR sig0, sig1, sig2, sig3, sig4, sig5;
+ Uint8 tfragDistKey;
+
+ const LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)signal->getDataPtr();
+
+ sig0 = lqhKeyReq->clientConnectPtr;
+ if (cfirstfreeTcConrec != RNIL && !ERROR_INSERTED(5031)) {
+ jamEntry();
+ seizeTcrec();
+ } else {
+/* ------------------------------------------------------------------------- */
+/* NO FREE TC RECORD AVAILABLE, THUS WE CANNOT HANDLE THE REQUEST. */
+/* ------------------------------------------------------------------------- */
+ if (ERROR_INSERTED(5031)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ }
+ noFreeRecordLab(signal, lqhKeyReq, ZNO_TC_CONNECT_ERROR);
+ return;
+ }//if
+
+ if(ERROR_INSERTED(5038) &&
+ refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
+ jam();
+ SET_ERROR_INSERT_VALUE(5039);
+ return;
+ }
+
+ c_Counters.operations++;
+
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ regTcPtr->clientBlockref = signal->senderBlockRef();
+ regTcPtr->clientConnectrec = sig0;
+ regTcPtr->tcOprec = sig0;
+ regTcPtr->storedProcId = ZNIL;
+
+ UintR TtotReclenAi = lqhKeyReq->attrLen;
+ sig1 = lqhKeyReq->savePointId;
+ sig2 = lqhKeyReq->hashValue;
+ UintR Treqinfo = lqhKeyReq->requestInfo;
+ sig4 = lqhKeyReq->tableSchemaVersion;
+ sig5 = lqhKeyReq->tcBlockref;
+
+ regTcPtr->savePointId = sig1;
+ regTcPtr->hashValue = sig2;
+ const Uint32 schemaVersion = regTcPtr->schemaVersion = LqhKeyReq::getSchemaVersion(sig4);
+ tabptr.i = LqhKeyReq::getTableId(sig4);
+ regTcPtr->tcBlockref = sig5;
+
+ const Uint8 op = LqhKeyReq::getOperation(Treqinfo);
+ if (op == ZREAD && !getAllowRead()){
+ noFreeRecordLab(signal, lqhKeyReq, ZNODE_SHUTDOWN_IN_PROGESS);
+ return;
+ }
+
+ regTcPtr->totReclenAi = LqhKeyReq::getAttrLen(TtotReclenAi);
+ regTcPtr->tcScanInfo = lqhKeyReq->scanInfo;
+ regTcPtr->indTakeOver = LqhKeyReq::getScanTakeOverFlag(TtotReclenAi);
+
+ regTcPtr->readlenAi = 0;
+ regTcPtr->currTupAiLen = 0;
+ regTcPtr->listState = TcConnectionrec::NOT_IN_LIST;
+ regTcPtr->logWriteState = TcConnectionrec::NOT_STARTED;
+ regTcPtr->fragmentptr = RNIL;
+
+ sig0 = lqhKeyReq->fragmentData;
+ sig1 = lqhKeyReq->transId1;
+ sig2 = lqhKeyReq->transId2;
+ sig3 = lqhKeyReq->variableData[0];
+ sig4 = lqhKeyReq->variableData[1];
+
+ regTcPtr->fragmentid = LqhKeyReq::getFragmentId(sig0);
+ regTcPtr->nextReplica = LqhKeyReq::getNextReplicaNodeId(sig0);
+ regTcPtr->transid[0] = sig1;
+ regTcPtr->transid[1] = sig2;
+ regTcPtr->applRef = sig3;
+ regTcPtr->applOprec = sig4;
+
+ regTcPtr->commitAckMarker = RNIL;
+ if(LqhKeyReq::getMarkerFlag(Treqinfo)){
+ jam();
+
+ CommitAckMarkerPtr markerPtr;
+ m_commitAckMarkerHash.seize(markerPtr);
+ if(markerPtr.i == RNIL){
+ noFreeRecordLab(signal, lqhKeyReq, ZNO_FREE_MARKER_RECORDS_ERROR);
+ return;
+ }
+ markerPtr.p->transid1 = sig1;
+ markerPtr.p->transid2 = sig2;
+ markerPtr.p->apiRef = sig3;
+ markerPtr.p->apiOprec = sig4;
+ const NodeId tcNodeId = refToNode(sig5);
+ markerPtr.p->tcNodeId = tcNodeId;
+
+ CommitAckMarkerPtr tmp;
+#ifdef VM_TRACE
+#ifdef MARKER_TRACE
+ ndbout_c("Add marker[%.8x %.8x]", markerPtr.p->transid1, markerPtr.p->transid2);
+#endif
+ ndbrequire(!m_commitAckMarkerHash.find(tmp, * markerPtr.p));
+#endif
+ m_commitAckMarkerHash.add(markerPtr);
+ regTcPtr->commitAckMarker = markerPtr.i;
+ }
+
+ regTcPtr->reqinfo = Treqinfo;
+ regTcPtr->lastReplicaNo = LqhKeyReq::getLastReplicaNo(Treqinfo);
+ regTcPtr->lockType = LqhKeyReq::getLockType(Treqinfo);
+ regTcPtr->dirtyOp = LqhKeyReq::getDirtyFlag(Treqinfo);
+ regTcPtr->opExec = LqhKeyReq::getInterpretedFlag(Treqinfo);
+ regTcPtr->opSimple = LqhKeyReq::getSimpleFlag(Treqinfo);
+ regTcPtr->operation = LqhKeyReq::getOperation(Treqinfo);
+ regTcPtr->simpleRead = regTcPtr->operation == ZREAD && regTcPtr->opSimple;
+ regTcPtr->seqNoReplica = LqhKeyReq::getSeqNoReplica(Treqinfo);
+ UintR TreclenAiLqhkey = LqhKeyReq::getAIInLqhKeyReq(Treqinfo);
+ regTcPtr->apiVersionNo = 0;
+
+ CRASH_INSERTION2(5041, regTcPtr->simpleRead &&
+ refToNode(signal->senderBlockRef()) != cownNodeid);
+
+ regTcPtr->reclenAiLqhkey = TreclenAiLqhkey;
+ regTcPtr->currReclenAi = TreclenAiLqhkey;
+ UintR TitcKeyLen = LqhKeyReq::getKeyLen(Treqinfo);
+ regTcPtr->primKeyLen = TitcKeyLen;
+ regTcPtr->noFiredTriggers = lqhKeyReq->noFiredTriggers;
+
+ UintR TapplAddressInd = LqhKeyReq::getApplicationAddressFlag(Treqinfo);
+ UintR nextPos = (TapplAddressInd << 1);
+ UintR TsameClientAndTcOprec = LqhKeyReq::getSameClientAndTcFlag(Treqinfo);
+ if (TsameClientAndTcOprec == 1) {
+ regTcPtr->tcOprec = lqhKeyReq->variableData[nextPos];
+ nextPos++;
+ }//if
+ UintR TnextReplicasIndicator = regTcPtr->lastReplicaNo -
+ regTcPtr->seqNoReplica;
+ if (TnextReplicasIndicator > 1) {
+ regTcPtr->nodeAfterNext[0] = lqhKeyReq->variableData[nextPos] & 0xFFFF;
+ regTcPtr->nodeAfterNext[1] = lqhKeyReq->variableData[nextPos] >> 16;
+ nextPos++;
+ }//if
+ UintR TstoredProcIndicator = LqhKeyReq::getStoredProcFlag(TtotReclenAi);
+ if (TstoredProcIndicator == 1) {
+ regTcPtr->storedProcId = lqhKeyReq->variableData[nextPos] & ZNIL;
+ nextPos++;
+ }//if
+ UintR TreadLenAiIndicator = LqhKeyReq::getReturnedReadLenAIFlag(Treqinfo);
+ if (TreadLenAiIndicator == 1) {
+ regTcPtr->readlenAi = lqhKeyReq->variableData[nextPos] & ZNIL;
+ nextPos++;
+ }//if
+ sig0 = lqhKeyReq->variableData[nextPos + 0];
+ sig1 = lqhKeyReq->variableData[nextPos + 1];
+ sig2 = lqhKeyReq->variableData[nextPos + 2];
+ sig3 = lqhKeyReq->variableData[nextPos + 3];
+
+ regTcPtr->tupkeyData[0] = sig0;
+ regTcPtr->tupkeyData[1] = sig1;
+ regTcPtr->tupkeyData[2] = sig2;
+ regTcPtr->tupkeyData[3] = sig3;
+
+ if (TitcKeyLen > 0) {
+ if (TitcKeyLen < 4) {
+ nextPos += TitcKeyLen;
+ } else {
+ nextPos += 4;
+ }//if
+ } else {
+ LQHKEY_error(signal, 3);
+ return;
+ }//if
+
+ if ((LqhKeyReq::FixedSignalLength + nextPos + TreclenAiLqhkey) !=
+ signal->length()) {
+ LQHKEY_error(signal, 2);
+ return;
+ }//if
+ UintR TseqNoReplica = regTcPtr->seqNoReplica;
+ UintR TlastReplicaNo = regTcPtr->lastReplicaNo;
+ if (TseqNoReplica == TlastReplicaNo) {
+ jam();
+ regTcPtr->nextReplica = ZNIL;
+ } else {
+ if (TseqNoReplica < TlastReplicaNo) {
+ jam();
+ regTcPtr->nextSeqNoReplica = TseqNoReplica + 1;
+ if ((regTcPtr->nextReplica == 0) ||
+ (regTcPtr->nextReplica == cownNodeid)) {
+ LQHKEY_error(signal, 0);
+ }//if
+ } else {
+ LQHKEY_error(signal, 4);
+ return;
+ }//if
+ }//if
+ TcConnectionrecPtr localNextTcConnectptr;
+ Uint32 hashIndex = (regTcPtr->transid[0] ^ regTcPtr->tcOprec) & 1023;
+ localNextTcConnectptr.i = ctransidHash[hashIndex];
+ ctransidHash[hashIndex] = tcConnectptr.i;
+ regTcPtr->prevHashRec = RNIL;
+ regTcPtr->nextHashRec = localNextTcConnectptr.i;
+ if (localNextTcConnectptr.i != RNIL) {
+/* -------------------------------------------------------------------------- */
+/* ENSURE THAT THE NEXT RECORD HAS SET PREVIOUS TO OUR RECORD IF IT EXISTS */
+/* -------------------------------------------------------------------------- */
+ ptrCheckGuard(localNextTcConnectptr,
+ ctcConnectrecFileSize, tcConnectionrec);
+ jam();
+ localNextTcConnectptr.p->prevHashRec = tcConnectptr.i;
+ }//if
+ if (tabptr.i >= ctabrecFileSize) {
+ LQHKEY_error(signal, 5);
+ return;
+ }//if
+ ptrAss(tabptr, tablerec);
+ if(tabptr.p->tableStatus != Tablerec::TABLE_DEFINED){
+ LQHKEY_abort(signal, 4);
+ return;
+ }
+ if(tabptr.p->schemaVersion != schemaVersion){
+ LQHKEY_abort(signal, 5);
+ return;
+ }
+
+ regTcPtr->tableref = tabptr.i;
+ tabptr.p->usageCount++;
+
+ if (!getFragmentrec(signal, regTcPtr->fragmentid)) {
+ LQHKEY_error(signal, 6);
+ return;
+ }//if
+ regTcPtr->localFragptr = regTcPtr->hashValue & 1;
+ Uint8 TcopyType = fragptr.p->fragCopy;
+ tfragDistKey = fragptr.p->fragDistributionKey;
+ if (fragptr.p->fragStatus == Fragrecord::ACTIVE_CREATION) {
+ jam();
+ regTcPtr->activeCreat = ZTRUE;
+ CRASH_INSERTION(5002);
+ } else {
+ regTcPtr->activeCreat = ZFALSE;
+ }//if
+ regTcPtr->replicaType = TcopyType;
+ regTcPtr->fragmentptr = fragptr.i;
+ Uint8 TdistKey = LqhKeyReq::getDistributionKey(TtotReclenAi);
+ if ((tfragDistKey != TdistKey) &&
+ (regTcPtr->seqNoReplica == 0) &&
+ (regTcPtr->dirtyOp == ZFALSE) &&
+ (regTcPtr->simpleRead == ZFALSE)) {
+ /* ----------------------------------------------------------------------
+ * WE HAVE DIFFERENT OPINION THAN THE DIH THAT STARTED THE TRANSACTION.
+ * THE REASON COULD BE THAT THIS IS AN OLD DISTRIBUTION WHICH IS NO LONGER
+ * VALID TO USE. THIS MUST BE CHECKED.
+ * ONE IS ADDED TO THE DISTRIBUTION KEY EVERY TIME WE ADD A NEW REPLICA.
+ * FAILED REPLICAS DO NOT AFFECT THE DISTRIBUTION KEY. THIS MEANS THAT THE
+ * MAXIMUM DEVIATION CAN BE ONE BETWEEN THOSE TWO VALUES.
+ * --------------------------------------------------------------------- */
+ Int32 tmp = TdistKey - tfragDistKey;
+ tmp = (tmp < 0 ? - tmp : tmp);
+ if ((tmp <= 1) || (tfragDistKey == 0)) {
+ LQHKEY_abort(signal, 0);
+ return;
+ }//if
+ LQHKEY_error(signal, 1);
+ }//if
+ if (TreclenAiLqhkey != 0) {
+ if (regTcPtr->operation != ZREAD) {
+ if (regTcPtr->operation != ZDELETE) {
+ if (regTcPtr->opExec != 1) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* */
+/* UPDATES, WRITES AND INSERTS THAT ARE NOT INTERPRETED WILL USE THE */
+/* SAME ATTRINFO IN ALL REPLICAS. THUS WE SAVE THE ATTRINFO ALREADY */
+/* TO SAVE A SIGNAL FROM TUP TO LQH. INTERPRETED EXECUTION IN TUP */
+/* WILL CREATE NEW ATTRINFO FOR THE OTHER REPLICAS AND IT IS THUS NOT */
+/* A GOOD IDEA TO SAVE THE INFORMATION HERE. READS WILL ALSO BE */
+/* UNNECESSARY TO SAVE SINCE THAT ATTRINFO WILL NEVER BE SENT TO ANY */
+/* MORE REPLICAS. */
+/*---------------------------------------------------------------------------*/
+/* READS AND DELETES CAN ONLY HAVE INFORMATION ABOUT WHAT IS TO BE READ. */
+/* NO INFORMATION THAT NEEDS LOGGING. */
+/*---------------------------------------------------------------------------*/
+ sig0 = lqhKeyReq->variableData[nextPos + 0];
+ sig1 = lqhKeyReq->variableData[nextPos + 1];
+ sig2 = lqhKeyReq->variableData[nextPos + 2];
+ sig3 = lqhKeyReq->variableData[nextPos + 3];
+ sig4 = lqhKeyReq->variableData[nextPos + 4];
+
+ regTcPtr->firstAttrinfo[0] = sig0;
+ regTcPtr->firstAttrinfo[1] = sig1;
+ regTcPtr->firstAttrinfo[2] = sig2;
+ regTcPtr->firstAttrinfo[3] = sig3;
+ regTcPtr->firstAttrinfo[4] = sig4;
+ regTcPtr->currTupAiLen = TreclenAiLqhkey;
+ } else {
+ jam();
+ regTcPtr->reclenAiLqhkey = 0;
+ }//if
+ } else {
+ jam();
+ regTcPtr->reclenAiLqhkey = 0;
+ }//if
+ }//if
+ sig0 = lqhKeyReq->variableData[nextPos + 0];
+ sig1 = lqhKeyReq->variableData[nextPos + 1];
+ sig2 = lqhKeyReq->variableData[nextPos + 2];
+ sig3 = lqhKeyReq->variableData[nextPos + 3];
+ sig4 = lqhKeyReq->variableData[nextPos + 4];
+
+ signal->theData[0] = regTcPtr->tupConnectrec;
+ signal->theData[3] = sig0;
+ signal->theData[4] = sig1;
+ signal->theData[5] = sig2;
+ signal->theData[6] = sig3;
+ signal->theData[7] = sig4;
+ EXECUTE_DIRECT(refToBlock(regTcPtr->tcTupBlockref), GSN_ATTRINFO,
+ signal, TreclenAiLqhkey + 3);
+ jamEntry();
+ if (signal->theData[0] == (UintR)-1) {
+ LQHKEY_abort(signal, 2);
+ return;
+ }//if
+ }//if
+/* ------- TAKE CARE OF PRIM KEY DATA ------- */
+ if (regTcPtr->primKeyLen <= 4) {
+ endgettupkeyLab(signal);
+ return;
+ } else {
+ jam();
+/*--------------------------------------------------------------------*/
+/* KEY LENGTH WAS MORE THAN 4 WORDS (WORD = 4 BYTE). THUS WE */
+/* HAVE TO ALLOCATE A DATA BUFFER TO STORE THE KEY DATA AND */
+/* WAIT FOR THE KEYINFO SIGNAL. */
+/*--------------------------------------------------------------------*/
+ regTcPtr->save1 = 4;
+ regTcPtr->transactionState = TcConnectionrec::WAIT_TUPKEYINFO;
+ return;
+ }//if
+ return;
+}//Dblqh::execLQHKEYREQ()
+
+void Dblqh::endgettupkeyLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->totReclenAi == regTcPtr->currReclenAi) {
+ ;
+ } else {
+ jam();
+ ndbrequire(regTcPtr->currReclenAi < regTcPtr->totReclenAi);
+ regTcPtr->transactionState = TcConnectionrec::WAIT_ATTR;
+ return;
+ }//if
+/* ---------------------------------------------------------------------- */
+/* NOW RECEPTION OF LQHKEYREQ IS COMPLETED THE NEXT STEP IS TO START*/
+/* PROCESSING THE MESSAGE. IF THE MESSAGE IS TO A STAND-BY NODE */
+/* WITHOUT NETWORK REDUNDANCY OR PREPARE-TO-COMMIT ACTIVATED THE */
+/* PREPARATION TO SEND TO THE NEXT NODE WILL START IMMEDIATELY. */
+/* */
+/* OTHERWISE THE PROCESSING WILL START AFTER SETTING THE PROPER */
+/* STATE. HOWEVER BEFORE PROCESSING THE MESSAGE */
+/* IT IS NECESSARY TO CHECK THAT THE FRAGMENT IS NOT PERFORMING */
+/* A CHECKPOINT. THE OPERATION SHALL ALSO BE LINKED INTO THE */
+/* FRAGMENT QUEUE OR LIST OF ACTIVE OPERATIONS. */
+/* */
+/* THE FIRST STEP IN PROCESSING THE MESSAGE IS TO CONTACT DBACC. */
+/*------------------------------------------------------------------------*/
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ case Fragrecord::CRASH_RECOVERING:
+ case Fragrecord::ACTIVE_CREATION:
+ linkActiveFrag(signal);
+ prepareContinueAfterBlockedLab(signal);
+ return;
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ regTcPtr->transactionState = TcConnectionrec::STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dblqh::endgettupkeyLab()
+
+void Dblqh::prepareContinueAfterBlockedLab(Signal* signal)
+{
+ UintR ttcScanOp;
+ UintR taccreq;
+
+/* -------------------------------------------------------------------------- */
+/* INPUT: TC_CONNECTPTR ACTIVE CONNECTION RECORD */
+/* FRAGPTR FRAGMENT RECORD */
+/* -------------------------------------------------------------------------- */
+/* -------------------------------------------------------------------------- */
+/* CONTINUE HERE AFTER BEING BLOCKED FOR A WHILE DURING LOCAL CHECKPOINT. */
+/* -------------------------------------------------------------------------- */
+/* ALSO AFTER NORMAL PROCEDURE WE CONTINUE HERE */
+/* -------------------------------------------------------------------------- */
+ Uint32 tc_ptr_i = tcConnectptr.i;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->indTakeOver == ZTRUE) {
+ jam();
+ ttcScanOp = KeyInfo20::getScanOp(regTcPtr->tcScanInfo);
+ scanptr.i = RNIL;
+ {
+ ScanRecord key;
+ key.scanNumber = KeyInfo20::getScanNo(regTcPtr->tcScanInfo);
+ key.fragPtrI = fragptr.i;
+ c_scanTakeOverHash.find(scanptr, key);
+#ifdef TRACE_SCAN_TAKEOVER
+ if(scanptr.i == RNIL)
+ ndbout_c("not finding (%d %d)", key.scanNumber, key.fragPtrI);
+#endif
+ }
+ if (scanptr.i == RNIL) {
+ jam();
+ releaseActiveFrag(signal);
+ takeOverErrorLab(signal);
+ return;
+ }//if
+ Uint32 accOpPtr= get_acc_ptr_from_scan_record(scanptr.p,
+ ttcScanOp,
+ true);
+ if (accOpPtr == RNIL) {
+ jam();
+ releaseActiveFrag(signal);
+ takeOverErrorLab(signal);
+ return;
+ }//if
+ signal->theData[1] = accOpPtr;
+ signal->theData[2] = regTcPtr->transid[0];
+ signal->theData[3] = regTcPtr->transid[1];
+ EXECUTE_DIRECT(refToBlock(regTcPtr->tcAccBlockref), GSN_ACC_TO_REQ,
+ signal, 4);
+ if (signal->theData[0] == (UintR)-1) {
+ execACC_TO_REF(signal);
+ return;
+ }//if
+ jamEntry();
+ }//if
+/*-------------------------------------------------------------------*/
+/* IT IS NOW TIME TO CONTACT ACC. THE TUPLE KEY WILL BE SENT */
+/* AND THIS WILL BE TRANSLATED INTO A LOCAL KEY BY USING THE */
+/* LOCAL PART OF THE LH3-ALGORITHM. ALSO PROPER LOCKS ON THE */
+/* TUPLE WILL BE SET. FOR INSERTS AND DELETES THE MESSAGE WILL */
+/* START AN INSERT/DELETE INTO THE HASH TABLE. */
+/* */
+/* BEFORE SENDING THE MESSAGE THE REQUEST INFORMATION IS SET */
+/* PROPERLY. */
+/* ----------------------------------------------------------------- */
+#if 0
+ if (regTcPtr->tableref != 0) {
+ switch (regTcPtr->operation) {
+ case ZREAD: ndbout << "Läsning "; break;
+ case ZUPDATE: ndbout << " Uppdatering "; break;
+ case ZWRITE: ndbout << "Write "; break;
+ case ZINSERT: ndbout << "Inläggning "; break;
+ case ZDELETE: ndbout << "Borttagning "; break;
+ default: ndbout << "????"; break;
+ }
+ ndbout << "med nyckel = " << regTcPtr->tupkeyData[0] << endl;
+ }
+#endif
+
+ regTcPtr->transactionState = TcConnectionrec::WAIT_ACC;
+ taccreq = regTcPtr->operation;
+ taccreq = taccreq + (regTcPtr->opSimple << 3);
+ taccreq = taccreq + (regTcPtr->lockType << 4);
+ taccreq = taccreq + (regTcPtr->dirtyOp << 6);
+ taccreq = taccreq + (regTcPtr->replicaType << 7);
+ taccreq = taccreq + (regTcPtr->apiVersionNo << 9);
+/* ************ */
+/* ACCKEYREQ < */
+/* ************ */
+ ndbrequire(regTcPtr->localFragptr < 2);
+ Uint32 sig0, sig1, sig2, sig3, sig4;
+ sig0 = regTcPtr->accConnectrec;
+ sig1 = fragptr.p->accFragptr[regTcPtr->localFragptr];
+ sig2 = regTcPtr->hashValue;
+ sig3 = regTcPtr->primKeyLen;
+ sig4 = regTcPtr->transid[0];
+ signal->theData[0] = sig0;
+ signal->theData[1] = sig1;
+ signal->theData[2] = taccreq;
+ signal->theData[3] = sig2;
+ signal->theData[4] = sig3;
+ signal->theData[5] = sig4;
+
+ sig0 = regTcPtr->transid[1];
+ sig1 = regTcPtr->tupkeyData[0];
+ sig2 = regTcPtr->tupkeyData[1];
+ sig3 = regTcPtr->tupkeyData[2];
+ sig4 = regTcPtr->tupkeyData[3];
+ signal->theData[6] = sig0;
+ signal->theData[7] = sig1;
+ signal->theData[8] = sig2;
+ signal->theData[9] = sig3;
+ signal->theData[10] = sig4;
+ if (regTcPtr->primKeyLen > 4) {
+ sendKeyinfoAcc(signal, 11);
+ }//if
+ EXECUTE_DIRECT(refToBlock(regTcPtr->tcAccBlockref), GSN_ACCKEYREQ,
+ signal, 7 + regTcPtr->primKeyLen);
+ if (signal->theData[0] < RNIL) {
+ signal->theData[0] = tc_ptr_i;
+ execACCKEYCONF(signal);
+ return;
+ } else if (signal->theData[0] == RNIL) {
+ ;
+ } else {
+ ndbrequire(signal->theData[0] == (UintR)-1);
+ signal->theData[0] = tc_ptr_i;
+ execACCKEYREF(signal);
+ }//if
+ return;
+}//Dblqh::prepareContinueAfterBlockedLab()
+
+/* ========================================================================== */
+/* ======= SEND KEYINFO TO ACC ======= */
+/* */
+/* ========================================================================== */
+void Dblqh::sendKeyinfoAcc(Signal* signal, Uint32 Ti)
+{
+ DatabufPtr regDatabufptr;
+ regDatabufptr.i = tcConnectptr.p->firstTupkeybuf;
+
+ do {
+ jam();
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ Uint32 sig0 = regDatabufptr.p->data[0];
+ Uint32 sig1 = regDatabufptr.p->data[1];
+ Uint32 sig2 = regDatabufptr.p->data[2];
+ Uint32 sig3 = regDatabufptr.p->data[3];
+ signal->theData[Ti] = sig0;
+ signal->theData[Ti + 1] = sig1;
+ signal->theData[Ti + 2] = sig2;
+ signal->theData[Ti + 3] = sig3;
+ regDatabufptr.i = regDatabufptr.p->nextDatabuf;
+ Ti += 4;
+ } while (regDatabufptr.i != RNIL);
+}//Dblqh::sendKeyinfoAcc()
+
+void Dblqh::execLQH_ALLOCREQ(Signal* signal)
+{
+ TcConnectionrecPtr regTcPtr;
+ FragrecordPtr regFragptr;
+
+ jamEntry();
+ regTcPtr.i = signal->theData[0];
+ ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec);
+
+ regFragptr.i = regTcPtr.p->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+
+ ndbrequire(regTcPtr.p->localFragptr < 2);
+ signal->theData[0] = regTcPtr.p->tupConnectrec;
+ signal->theData[1] = regFragptr.p->tupFragptr[regTcPtr.p->localFragptr];
+ signal->theData[2] = regTcPtr.p->tableref;
+ Uint32 tup = refToBlock(regTcPtr.p->tcTupBlockref);
+ EXECUTE_DIRECT(tup, GSN_TUP_ALLOCREQ, signal, 3);
+}//Dblqh::execTUP_ALLOCREQ()
+
+/* ************>> */
+/* ACCKEYCONF > */
+/* ************>> */
+void Dblqh::execACCKEYCONF(Signal* signal)
+{
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ Uint32 tcIndex = signal->theData[0];
+ Uint32 Tfragid = signal->theData[2];
+ Uint32 localKey1 = signal->theData[3];
+ Uint32 localKey2 = signal->theData[4];
+ Uint32 localKeyFlag = signal->theData[5];
+ jamEntry();
+ tcConnectptr.i = tcIndex;
+ ptrCheckGuard(tcConnectptr, ttcConnectrecFileSize, regTcConnectionrec);
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->transactionState != TcConnectionrec::WAIT_ACC) {
+ LQHKEY_abort(signal, 3);
+ return;
+ }//if
+ /* ------------------------------------------------------------------------
+ * Set transaction state and also reset the activeCreat since that is only
+ * valid in cases where the record was not present.
+ * ------------------------------------------------------------------------ */
+ regTcPtr->transactionState = TcConnectionrec::WAIT_TUP;
+ regTcPtr->activeCreat = ZFALSE;
+ /* ------------------------------------------------------------------------
+ * IT IS NOW TIME TO CONTACT THE TUPLE MANAGER. THE TUPLE MANAGER NEEDS THE
+ * INFORMATION ON WHICH TABLE AND FRAGMENT, THE LOCAL KEY AND IT NEEDS TO
+ * KNOW THE TYPE OF OPERATION TO PERFORM. TUP CAN SEND THE ATTRINFO DATA
+ * EITHER TO THE TC BLOCK OR DIRECTLY TO THE APPLICATION. THE SCHEMA VERSION
+ * IS NEEDED SINCE TWO SCHEMA VERSIONS CAN BE ACTIVE SIMULTANEOUSLY ON A
+ * TABLE.
+ * ----------------------------------------------------------------------- */
+ if (regTcPtr->operation == ZWRITE)
+ {
+ Uint32 op= signal->theData[1];
+ if(likely(op == ZINSERT || op == ZUPDATE))
+ {
+ regTcPtr->operation = op;
+ }
+ else
+ {
+ warningEvent("Convering %d to ZUPDATE", op);
+ regTcPtr->operation = ZUPDATE;
+ }
+ }//if
+
+ ndbrequire(localKeyFlag == 1);
+ localKey2 = localKey1 & MAX_TUPLES_PER_PAGE;
+ localKey1 = localKey1 >> MAX_TUPLES_BITS;
+ Uint32 Ttupreq = regTcPtr->dirtyOp;
+ Ttupreq = Ttupreq + (regTcPtr->opSimple << 1);
+ Ttupreq = Ttupreq + (regTcPtr->operation << 6);
+ Ttupreq = Ttupreq + (regTcPtr->opExec << 10);
+ Ttupreq = Ttupreq + (regTcPtr->apiVersionNo << 11);
+
+ /* ---------------------------------------------------------------------
+ * Clear interpreted mode bit since we do not want the next replica to
+ * use interpreted mode. The next replica will receive a normal write.
+ * --------------------------------------------------------------------- */
+ regTcPtr->opExec = 0;
+ /* ************< */
+ /* TUPKEYREQ < */
+ /* ************< */
+ TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtrSend();
+ Uint32 sig0, sig1, sig2, sig3;
+
+ sig0 = regTcPtr->tupConnectrec;
+ sig1 = regTcPtr->tableref;
+ tupKeyReq->connectPtr = sig0;
+ tupKeyReq->request = Ttupreq;
+ tupKeyReq->tableRef = sig1;
+ tupKeyReq->fragId = Tfragid;
+ tupKeyReq->keyRef1 = localKey1;
+ tupKeyReq->keyRef2 = localKey2;
+
+ sig0 = regTcPtr->totReclenAi;
+ sig1 = regTcPtr->applOprec;
+ sig2 = regTcPtr->applRef;
+ sig3 = regTcPtr->schemaVersion;
+ FragrecordPtr regFragptr;
+ regFragptr.i = regTcPtr->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+ tupKeyReq->attrBufLen = sig0;
+ tupKeyReq->opRef = sig1;
+ tupKeyReq->applRef = sig2;
+ tupKeyReq->schemaVersion = sig3;
+
+ ndbrequire(regTcPtr->localFragptr < 2);
+ sig0 = regTcPtr->storedProcId;
+ sig1 = regTcPtr->transid[0];
+ sig2 = regTcPtr->transid[1];
+ sig3 = regFragptr.p->tupFragptr[regTcPtr->localFragptr];
+ Uint32 tup = refToBlock(regTcPtr->tcTupBlockref);
+
+ tupKeyReq->storedProcedure = sig0;
+ tupKeyReq->transId1 = sig1;
+ tupKeyReq->transId2 = sig2;
+ tupKeyReq->fragPtr = sig3;
+ tupKeyReq->primaryReplica = (tcConnectptr.p->seqNoReplica == 0)?true:false;
+ tupKeyReq->coordinatorTC = tcConnectptr.p->tcBlockref;
+ tupKeyReq->tcOpIndex = tcConnectptr.p->tcOprec;
+ tupKeyReq->savePointId = tcConnectptr.p->savePointId;
+
+ EXECUTE_DIRECT(tup, GSN_TUPKEYREQ, signal, TupKeyReq::SignalLength);
+}//Dblqh::execACCKEYCONF()
+
+/* --------------------------------------------------------------------------
+ * ------- ENTER TUP... -------
+ * ENTER TUPKEYCONF WITH
+ * TC_CONNECTPTR,
+ * TDATA2, LOCAL KEY REFERENCE 1, ONLY INTERESTING AFTER INSERT
+ * TDATA3, LOCAL KEY REFERENCE 1, ONLY INTERESTING AFTER INSERT
+ * TDATA4, TOTAL LENGTH OF READ DATA SENT TO TC/APPLICATION
+ * TDATA5 TOTAL LENGTH OF UPDATE DATA SENT TO/FROM TUP
+ * GOTO TUPKEY_CONF
+ *
+ * TAKE CARE OF RESPONSES FROM TUPLE MANAGER.
+ * -------------------------------------------------------------------------- */
+void Dblqh::tupkeyConfLab(Signal* signal)
+{
+/* ---- GET OPERATION TYPE AND CHECK WHAT KIND OF OPERATION IS REQUESTED ---- */
+ const TupKeyConf * const tupKeyConf = (TupKeyConf *)&signal->theData[0];
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->simpleRead) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THE OPERATION IS A SIMPLE READ. WE WILL IMMEDIATELY COMMIT THE OPERATION.
+ * SINCE WE HAVE NOT RELEASED THE FRAGMENT LOCK (FOR LOCAL CHECKPOINTS) YET
+ * WE CAN GO IMMEDIATELY TO COMMIT_CONTINUE_AFTER_BLOCKED.
+ * WE HAVE ALREADY SENT THE RESPONSE SO WE ARE NOT INTERESTED IN READ LENGTH
+ * ---------------------------------------------------------------------- */
+ regTcPtr->gci = cnewestGci;
+ releaseActiveFrag(signal);
+ commitContinueAfterBlockedLab(signal);
+ return;
+ }//if
+ if (tupKeyConf->readLength != 0) {
+ jam();
+
+ /* SET BIT 15 IN REQINFO */
+ LqhKeyReq::setApplicationAddressFlag(regTcPtr->reqinfo, 1);
+
+ regTcPtr->readlenAi = tupKeyConf->readLength;
+ }//if
+ regTcPtr->totSendlenAi = tupKeyConf->writeLength;
+ ndbrequire(regTcPtr->totSendlenAi == regTcPtr->currTupAiLen);
+ rwConcludedLab(signal);
+ return;
+}//Dblqh::tupkeyConfLab()
+
+/* --------------------------------------------------------------------------
+ * THE CODE IS FOUND IN THE SIGNAL RECEPTION PART OF LQH
+ * -------------------------------------------------------------------------- */
+void Dblqh::rwConcludedLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ /* ------------------------------------------------------------------------
+ * WE HAVE NOW CONCLUDED READING/WRITING IN ACC AND TUP FOR THIS OPERATION.
+ * IT IS NOW TIME TO LOG THE OPERATION, SEND REQUEST TO NEXT NODE OR TC AND
+ * FOR SOME TYPES OF OPERATIONS IT IS EVEN TIME TO COMMIT THE OPERATION.
+ * ------------------------------------------------------------------------ */
+ if (regTcPtr->operation == ZREAD) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * A NORMAL READ OPERATION IS NOT LOGGED BUT IS NOT COMMITTED UNTIL THE
+ * COMMIT SIGNAL ARRIVES. THUS WE CONTINUE PACKING THE RESPONSE.
+ * ---------------------------------------------------------------------- */
+ releaseActiveFrag(signal);
+ packLqhkeyreqLab(signal);
+ return;
+ } else {
+ FragrecordPtr regFragptr;
+ regFragptr.i = regTcPtr->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+ if (regFragptr.p->logFlag == Fragrecord::STATE_FALSE){
+ if (regTcPtr->dirtyOp == ZTRUE) {
+ jam();
+ /* ------------------------------------------------------------------
+ * THIS OPERATION WAS A WRITE OPERATION THAT DO NOT NEED LOGGING AND
+ * THAT CAN CAN BE COMMITTED IMMEDIATELY.
+ * ------------------------------------------------------------------ */
+ regTcPtr->gci = cnewestGci;
+ releaseActiveFrag(signal);
+ commitContinueAfterBlockedLab(signal);
+ return;
+ } else {
+ jam();
+ /* ------------------------------------------------------------------
+ * A NORMAL WRITE OPERATION ON A FRAGMENT WHICH DO NOT NEED LOGGING.
+ * WE WILL PACK THE REQUEST/RESPONSE TO THE NEXT NODE/TO TC.
+ * ------------------------------------------------------------------ */
+ regTcPtr->logWriteState = TcConnectionrec::NOT_WRITTEN;
+ releaseActiveFrag(signal);
+ packLqhkeyreqLab(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * A DIRTY OPERATION WHICH NEEDS LOGGING. WE START BY LOGGING THE
+ * REQUEST. IN THIS CASE WE WILL RELEASE THE FRAGMENT LOCK FIRST.
+ * --------------------------------------------------------------------
+ * A NORMAL WRITE OPERATION THAT NEEDS LOGGING AND WILL NOT BE
+ * PREMATURELY COMMITTED.
+ * -------------------------------------------------------------------- */
+ releaseActiveFrag(signal);
+ logLqhkeyreqLab(signal);
+ return;
+ }//if
+ }//if
+}//Dblqh::rwConcludedLab()
+
+void Dblqh::rwConcludedAiLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ fragptr.i = regTcPtr->fragmentptr;
+ /* ------------------------------------------------------------------------
+ * WE HAVE NOW CONCLUDED READING/WRITING IN ACC AND TUP FOR THIS OPERATION.
+ * IT IS NOW TIME TO LOG THE OPERATION, SEND REQUEST TO NEXT NODE OR TC AND
+ * FOR SOME TYPES OF OPERATIONS IT IS EVEN TIME TO COMMIT THE OPERATION.
+ * IN THIS CASE WE HAVE ALREADY RELEASED THE FRAGMENT LOCK.
+ * ERROR CASES AT FRAGMENT CREATION AND STAND-BY NODES ARE THE REASONS FOR
+ * COMING HERE.
+ * ------------------------------------------------------------------------ */
+ if (regTcPtr->operation == ZREAD) {
+ if (regTcPtr->opSimple == 1) {
+ jam();
+ /* --------------------------------------------------------------------
+ * THE OPERATION IS A SIMPLE READ. WE WILL IMMEDIATELY COMMIT THE
+ * OPERATION.
+ * -------------------------------------------------------------------- */
+ regTcPtr->gci = cnewestGci;
+ localCommitLab(signal);
+ return;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * A NORMAL READ OPERATION IS NOT LOGGED BUT IS NOT COMMITTED UNTIL
+ * THE COMMIT SIGNAL ARRIVES. THUS WE CONTINUE PACKING THE RESPONSE.
+ * -------------------------------------------------------------------- */
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ packLqhkeyreqLab(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (fragptr.p->logFlag == Fragrecord::STATE_FALSE) {
+ if (regTcPtr->dirtyOp == ZTRUE) {
+ /* ------------------------------------------------------------------
+ * THIS OPERATION WAS A WRITE OPERATION THAT DO NOT NEED LOGGING AND
+ * THAT CAN CAN BE COMMITTED IMMEDIATELY.
+ * ------------------------------------------------------------------ */
+ jam();
+ /* ----------------------------------------------------------------
+ * IT MUST BE ACTIVE CREATION OF A FRAGMENT.
+ * ---------------------------------------------------------------- */
+ regTcPtr->gci = cnewestGci;
+ localCommitLab(signal);
+ return;
+ } else {
+ /* ------------------------------------------------------------------
+ * A NORMAL WRITE OPERATION ON A FRAGMENT WHICH DO NOT NEED LOGGING.
+ * WE WILL PACK THE REQUEST/RESPONSE TO THE NEXT NODE/TO TC.
+ * ------------------------------------------------------------------ */
+ jam();
+ /* ---------------------------------------------------------------
+ * IT MUST BE ACTIVE CREATION OF A FRAGMENT.
+ * NOT A DIRTY OPERATION THUS PACK REQUEST/RESPONSE.
+ * ---------------------------------------------------------------- */
+ regTcPtr->logWriteState = TcConnectionrec::NOT_WRITTEN;
+ packLqhkeyreqLab(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * A DIRTY OPERATION WHICH NEEDS LOGGING. WE START BY LOGGING THE
+ * REQUEST. IN THIS CASE WE WILL RELEASE THE FRAGMENT LOCK FIRST.
+ * -------------------------------------------------------------------- */
+ /* A NORMAL WRITE OPERATION THAT NEEDS LOGGING AND WILL NOT BE
+ * PREMATURELY COMMITTED.
+ * -------------------------------------------------------------------- */
+ logLqhkeyreqLab(signal);
+ return;
+ }//if
+ }//if
+}//Dblqh::rwConcludedAiLab()
+
+/* ##########################################################################
+ * ####### LOG MODULE #######
+ *
+ * ##########################################################################
+ * --------------------------------------------------------------------------
+ * THE LOG MODULE HANDLES THE READING AND WRITING OF THE LOG
+ * IT IS ALSO RESPONSIBLE FOR HANDLING THE SYSTEM RESTART.
+ * IT CONTROLS THE SYSTEM RESTART IN TUP AND ACC AS WELL.
+ * -------------------------------------------------------------------------- */
+void Dblqh::logLqhkeyreqLab(Signal* signal)
+{
+ UintR tcurrentFilepage;
+ TcConnectionrecPtr tmpTcConnectptr;
+
+ if (cnoOfLogPages < ZMIN_LOG_PAGES_OPERATION || ERROR_INSERTED(5032)) {
+ jam();
+ if(ERROR_INSERTED(5032)){
+ CLEAR_ERROR_INSERT_VALUE;
+ }
+/*---------------------------------------------------------------------------*/
+// The log disk is having problems in catching up with the speed of execution.
+// We must wait with writing the log of this operation to ensure we do not
+// overload the log.
+/*---------------------------------------------------------------------------*/
+ terrorCode = ZTEMPORARY_REDO_LOG_FAILURE;
+ abortErrorLab(signal);
+ return;
+ }//if
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ logPartPtr.i = regTcPtr->hashValue & 3;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+/* -------------------------------------------------- */
+/* THIS PART IS USED TO WRITE THE LOG */
+/* -------------------------------------------------- */
+/* -------------------------------------------------- */
+/* CHECK IF A LOG OPERATION IS ONGOING ALREADY. */
+/* IF SO THEN QUEUE THE OPERATION FOR LATER */
+/* RESTART WHEN THE LOG PART IS FREE AGAIN. */
+/* -------------------------------------------------- */
+ LogPartRecord * const regLogPartPtr = logPartPtr.p;
+
+ if(ERROR_INSERTED(5033)){
+ jam();
+ CLEAR_ERROR_INSERT_VALUE;
+
+ if ((regLogPartPtr->firstLogQueue != RNIL) &&
+ (regLogPartPtr->LogLqhKeyReqSent == ZFALSE)) {
+ /* -------------------------------------------------- */
+ /* WE HAVE A PROBLEM IN THAT THE LOG HAS NO */
+ /* ROOM FOR ADDITIONAL OPERATIONS AT THE MOMENT.*/
+ /* -------------------------------------------------- */
+ /* -------------------------------------------------- */
+ /* WE MUST STILL RESTART QUEUED OPERATIONS SO */
+ /* THEY ALSO CAN BE ABORTED. */
+ /* -------------------------------------------------- */
+ regLogPartPtr->LogLqhKeyReqSent = ZTRUE;
+ signal->theData[0] = ZLOG_LQHKEYREQ;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+
+ terrorCode = ZTAIL_PROBLEM_IN_LOG_ERROR;
+ abortErrorLab(signal);
+ return;
+ }
+
+ if (regLogPartPtr->logPartState == LogPartRecord::IDLE) {
+ ;
+ } else if (regLogPartPtr->logPartState == LogPartRecord::ACTIVE) {
+ jam();
+ linkWaitLog(signal, logPartPtr);
+ regTcPtr->transactionState = TcConnectionrec::LOG_QUEUED;
+ return;
+ } else {
+ if ((regLogPartPtr->firstLogQueue != RNIL) &&
+ (regLogPartPtr->LogLqhKeyReqSent == ZFALSE)) {
+/* -------------------------------------------------- */
+/* WE HAVE A PROBLEM IN THAT THE LOG HAS NO */
+/* ROOM FOR ADDITIONAL OPERATIONS AT THE MOMENT.*/
+/* -------------------------------------------------- */
+/* -------------------------------------------------- */
+/* WE MUST STILL RESTART QUEUED OPERATIONS SO */
+/* THEY ALSO CAN BE ABORTED. */
+/* -------------------------------------------------- */
+ regLogPartPtr->LogLqhKeyReqSent = ZTRUE;
+ signal->theData[0] = ZLOG_LQHKEYREQ;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+ if (regLogPartPtr->logPartState == LogPartRecord::TAIL_PROBLEM) {
+ jam();
+ terrorCode = ZTAIL_PROBLEM_IN_LOG_ERROR;
+ } else {
+ ndbrequire(regLogPartPtr->logPartState == LogPartRecord::FILE_CHANGE_PROBLEM);
+ jam();
+ terrorCode = ZFILE_CHANGE_PROBLEM_IN_LOG_ERROR;
+ }//if
+ abortErrorLab(signal);
+ return;
+ }//if
+ regLogPartPtr->logPartState = LogPartRecord::ACTIVE;
+ logFilePtr.i = regLogPartPtr->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+/* -------------------------------------------------- */
+/* CHECK IF A NEW MBYTE IS TO BE STARTED. IF */
+/* SO INSERT A NEXT LOG RECORD, WRITE THE LOG */
+/* AND PLACE THE LOG POINTER ON THE NEW POSITION*/
+/* IF A NEW FILE IS TO BE USED, CHANGE FILE AND */
+/* ALSO START OPENING THE NEXT LOG FILE. IF A */
+/* LAP HAS BEEN COMPLETED THEN ADD ONE TO LAP */
+/* COUNTER. */
+/* -------------------------------------------------- */
+ checkNewMbyte(signal);
+/* -------------------------------------------------- */
+/* INSERT THE OPERATION RECORD LAST IN THE LIST */
+/* OF NOT COMPLETED OPERATIONS. ALSO RECORD THE */
+/* FILE NO, PAGE NO AND PAGE INDEX OF THE START */
+/* OF THIS LOG RECORD. */
+/* IT IS NOT ALLOWED TO INSERT IT INTO THE LIST */
+/* BEFORE CHECKING THE NEW MBYTE SINCE THAT WILL*/
+/* CAUSE THE OLD VALUES OF TC_CONNECTPTR TO BE */
+/* USED IN WRITE_FILE_DESCRIPTOR. */
+/* -------------------------------------------------- */
+ Uint32 tcIndex = tcConnectptr.i;
+ tmpTcConnectptr.i = regLogPartPtr->lastLogTcrec;
+ regLogPartPtr->lastLogTcrec = tcIndex;
+ if (tmpTcConnectptr.i == RNIL) {
+ jam();
+ regLogPartPtr->firstLogTcrec = tcIndex;
+ } else {
+ ptrCheckGuard(tmpTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ tmpTcConnectptr.p->nextLogTcrec = tcIndex;
+ }//if
+ Uint32 fileNo = logFilePtr.p->fileNo;
+ tcurrentFilepage = logFilePtr.p->currentFilepage;
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ regTcPtr->nextLogTcrec = RNIL;
+ regTcPtr->prevLogTcrec = tmpTcConnectptr.i;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ Uint32 pageIndex = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ regTcPtr->logStartFileNo = fileNo;
+ regTcPtr->logStartPageNo = tcurrentFilepage;
+ regTcPtr->logStartPageIndex = pageIndex;
+/* -------------------------------------------------- */
+/* WRITE THE LOG HEADER OF THIS OPERATION. */
+/* -------------------------------------------------- */
+ writeLogHeader(signal);
+/* -------------------------------------------------- */
+/* WRITE THE TUPLE KEY OF THIS OPERATION. */
+/* -------------------------------------------------- */
+ writeKey(signal);
+/* -------------------------------------------------- */
+/* WRITE THE ATTRIBUTE INFO OF THIS OPERATION. */
+/* -------------------------------------------------- */
+ writeAttrinfoLab(signal);
+
+ logNextStart(signal);
+/* -------------------------------------------------- */
+/* RESET THE STATE OF THE LOG PART. IF ANY */
+/* OPERATIONS HAVE QUEUED THEN START THE FIRST */
+/* OF THESE. */
+/* -------------------------------------------------- */
+/* -------------------------------------------------- */
+/* CONTINUE WITH PACKING OF LQHKEYREQ */
+/* -------------------------------------------------- */
+ tcurrentFilepage = logFilePtr.p->currentFilepage;
+ if (logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] == ZPAGE_HEADER_SIZE) {
+ jam();
+ tcurrentFilepage--;
+ }//if
+ regTcPtr->logStopPageNo = tcurrentFilepage;
+ regTcPtr->logWriteState = TcConnectionrec::WRITTEN;
+ if (regTcPtr->abortState != TcConnectionrec::ABORT_IDLE) {
+/* -------------------------------------------------- */
+/* AN ABORT HAVE BEEN ORDERED. THE ABORT WAITED */
+/* FOR THE LOG WRITE TO BE COMPLETED. NOW WE */
+/* CAN PROCEED WITH THE NORMAL ABORT HANDLING. */
+/* -------------------------------------------------- */
+ abortCommonLab(signal);
+ return;
+ }//if
+ if (regTcPtr->dirtyOp != ZTRUE) {
+ packLqhkeyreqLab(signal);
+ } else {
+ /* ----------------------------------------------------------------------
+ * I NEED TO INSERT A COMMIT LOG RECORD SINCE WE ARE WRITING LOG IN THIS
+ * TRANSACTION. SINCE WE RELEASED THE LOG LOCK JUST NOW NO ONE ELSE CAN BE
+ * ACTIVE IN WRITING THE LOG. WE THUS WRITE THE LOG WITHOUT GETTING A LOCK
+ * SINCE WE ARE ONLY WRITING A COMMIT LOG RECORD.
+ * ---------------------------------------------------------------------- */
+ writeCommitLog(signal, logPartPtr);
+ /* ----------------------------------------------------------------------
+ * DIRTY OPERATIONS SHOULD COMMIT BEFORE THEY PACK THE REQUEST/RESPONSE.
+ * ---------------------------------------------------------------------- */
+ regTcPtr->gci = cnewestGci;
+ localCommitLab(signal);
+ }//if
+}//Dblqh::logLqhkeyreqLab()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEND LQHKEYREQ */
+/* */
+/* NO STATE CHECKING SINCE THE SIGNAL IS A LOCAL SIGNAL. THE EXECUTION OF */
+/* THE OPERATION IS COMPLETED. IT IS NOW TIME TO SEND THE OPERATION TO THE */
+/* NEXT REPLICA OR TO TC. */
+/* ------------------------------------------------------------------------- */
+void Dblqh::packLqhkeyreqLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->nextReplica == ZNIL) {
+/* ------------------------------------------------------------------------- */
+/* ------- SEND LQHKEYCONF ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+ sendLqhkeyconfTc(signal, regTcPtr->tcBlockref);
+ if (regTcPtr->dirtyOp != ZTRUE) {
+ jam();
+ regTcPtr->transactionState = TcConnectionrec::PREPARED;
+ releaseOprec(signal);
+ } else {
+ jam();
+/*************************************************************>*/
+/* DIRTY WRITES ARE USED IN TWO SITUATIONS. THE FIRST */
+/* SITUATION IS WHEN THEY ARE USED TO UPDATE COUNTERS AND*/
+/* OTHER ATTRIBUTES WHICH ARE NOT SENSITIVE TO CONSISTE- */
+/* NCY. THE SECOND SITUATION IS BY OPERATIONS THAT ARE */
+/* SENT AS PART OF A COPY FRAGMENT PROCESS. */
+/* */
+/* DURING A COPY FRAGMENT PROCESS THERE IS NO LOGGING */
+/* ONGOING SINCE THE FRAGMENT IS NOT COMPLETE YET. THE */
+/* LOGGING STARTS AFTER COMPLETING THE LAST COPY TUPLE */
+/* OPERATION. THE EXECUTION OF THE LAST COPY TUPLE DOES */
+/* ALSO START A LOCAL CHECKPOINT SO THAT THE FRAGMENT */
+/* REPLICA IS RECOVERABLE. THUS GLOBAL CHECKPOINT ID FOR */
+/* THOSE OPERATIONS ARE NOT INTERESTING. */
+/* */
+/* A DIRTY WRITE IS BY DEFINITION NOT CONSISTENT. THUS */
+/* IT CAN USE ANY GLOBAL CHECKPOINT. THE IDEA HERE IS TO */
+/* ALWAYS USE THE LATEST DEFINED GLOBAL CHECKPOINT ID IN */
+/* THIS NODE. */
+/*************************************************************>*/
+ cleanUp(signal);
+ }//if
+ return;
+ }//if
+/* ------------------------------------------------------------------------- */
+/* ------- SEND LQHKEYREQ ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* THERE ARE MORE REPLICAS TO SEND THE OPERATION TO. A NEW LQHKEYREQ WILL BE */
+/* PREPARED FOR THE NEXT REPLICA. */
+/* ------------------------------------------------------------------------- */
+/* CLEAR REPLICA TYPE, ATTRINFO INDICATOR (IN LQHKEYREQ), */
+/* INTERPRETED EXECUTION, SEQUENTIAL NUMBER OF REPLICA. */
+// Set bit indicating Client and TC record not the same.
+// Set readlenAi indicator if readlenAi != 0
+// Stored Procedure Indicator not set.
+/* ------------------------------------------------------------------------- */
+ LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)&signal->theData[0];
+
+ UintR Treqinfo;
+ UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6;
+ Treqinfo = preComputedRequestInfoMask & regTcPtr->reqinfo;
+
+ UintR TapplAddressIndicator = (regTcPtr->nextSeqNoReplica == 0 ? 0 : 1);
+ LqhKeyReq::setApplicationAddressFlag(Treqinfo, TapplAddressIndicator);
+ LqhKeyReq::setInterpretedFlag(Treqinfo, regTcPtr->opExec);
+ LqhKeyReq::setSeqNoReplica(Treqinfo, regTcPtr->nextSeqNoReplica);
+ LqhKeyReq::setAIInLqhKeyReq(Treqinfo, regTcPtr->reclenAiLqhkey);
+ UintR TreadLenAiInd = (regTcPtr->readlenAi == 0 ? 0 : 1);
+ UintR TsameLqhAndClient = (tcConnectptr.i ==
+ regTcPtr->tcOprec ? 0 : 1);
+ LqhKeyReq::setSameClientAndTcFlag(Treqinfo, TsameLqhAndClient);
+ LqhKeyReq::setReturnedReadLenAIFlag(Treqinfo, TreadLenAiInd);
+
+ UintR TotReclenAi = regTcPtr->totSendlenAi;
+/* ------------------------------------------------------------------------- */
+/* WE ARE NOW PREPARED TO SEND THE LQHKEYREQ. WE HAVE TO DECIDE IF ATTRINFO */
+/* IS INCLUDED IN THE LQHKEYREQ SIGNAL AND THEN SEND IT. */
+/* TAKE OVER SCAN OPERATION IS NEVER USED ON BACKUPS, LOG RECORDS AND START-UP*/
+/* OF NEW REPLICA AND THUS ONLY TOT_SENDLEN_AI IS USED THE UPPER 16 BITS ARE */
+/* ZERO. */
+/* ------------------------------------------------------------------------- */
+ sig0 = tcConnectptr.i;
+ sig1 = regTcPtr->savePointId;
+ sig2 = regTcPtr->hashValue;
+ sig4 = regTcPtr->tcBlockref;
+
+ lqhKeyReq->clientConnectPtr = sig0;
+ lqhKeyReq->attrLen = TotReclenAi;
+ lqhKeyReq->savePointId = sig1;
+ lqhKeyReq->hashValue = sig2;
+ lqhKeyReq->requestInfo = Treqinfo;
+ lqhKeyReq->tcBlockref = sig4;
+
+ sig0 = regTcPtr->tableref + (regTcPtr->schemaVersion << 16);
+ sig1 = regTcPtr->fragmentid + (regTcPtr->nodeAfterNext[0] << 16);
+ sig2 = regTcPtr->transid[0];
+ sig3 = regTcPtr->transid[1];
+ sig4 = regTcPtr->applRef;
+ sig5 = regTcPtr->applOprec;
+ sig6 = regTcPtr->tcOprec;
+ UintR nextPos = (TapplAddressIndicator << 1);
+
+ lqhKeyReq->tableSchemaVersion = sig0;
+ lqhKeyReq->fragmentData = sig1;
+ lqhKeyReq->transId1 = sig2;
+ lqhKeyReq->transId2 = sig3;
+ lqhKeyReq->noFiredTriggers = regTcPtr->noFiredTriggers;
+ lqhKeyReq->variableData[0] = sig4;
+ lqhKeyReq->variableData[1] = sig5;
+ lqhKeyReq->variableData[2] = sig6;
+
+ nextPos += TsameLqhAndClient;
+
+ if ((regTcPtr->lastReplicaNo - regTcPtr->nextSeqNoReplica) > 1) {
+ sig0 = (UintR)regTcPtr->nodeAfterNext[1] +
+ (UintR)(regTcPtr->nodeAfterNext[2] << 16);
+ lqhKeyReq->variableData[nextPos] = sig0;
+ nextPos++;
+ }//if
+ sig0 = regTcPtr->readlenAi;
+ sig1 = regTcPtr->tupkeyData[0];
+ sig2 = regTcPtr->tupkeyData[1];
+ sig3 = regTcPtr->tupkeyData[2];
+ sig4 = regTcPtr->tupkeyData[3];
+
+ lqhKeyReq->variableData[nextPos] = sig0;
+ nextPos += TreadLenAiInd;
+ lqhKeyReq->variableData[nextPos] = sig1;
+ lqhKeyReq->variableData[nextPos + 1] = sig2;
+ lqhKeyReq->variableData[nextPos + 2] = sig3;
+ lqhKeyReq->variableData[nextPos + 3] = sig4;
+ UintR TkeyLen = LqhKeyReq::getKeyLen(Treqinfo);
+ if (TkeyLen < 4) {
+ nextPos += TkeyLen;
+ } else {
+ nextPos += 4;
+ }//if
+
+ sig0 = regTcPtr->firstAttrinfo[0];
+ sig1 = regTcPtr->firstAttrinfo[1];
+ sig2 = regTcPtr->firstAttrinfo[2];
+ sig3 = regTcPtr->firstAttrinfo[3];
+ sig4 = regTcPtr->firstAttrinfo[4];
+ UintR TAiLen = regTcPtr->reclenAiLqhkey;
+ BlockReference lqhRef = calcLqhBlockRef(regTcPtr->nextReplica);
+
+ lqhKeyReq->variableData[nextPos] = sig0;
+ lqhKeyReq->variableData[nextPos + 1] = sig1;
+ lqhKeyReq->variableData[nextPos + 2] = sig2;
+ lqhKeyReq->variableData[nextPos + 3] = sig3;
+ lqhKeyReq->variableData[nextPos + 4] = sig4;
+
+ nextPos += TAiLen;
+
+ sendSignal(lqhRef, GSN_LQHKEYREQ, signal,
+ nextPos + LqhKeyReq::FixedSignalLength, JBB);
+ if (regTcPtr->primKeyLen > 4) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* MORE THAN 4 WORDS OF KEY DATA IS IN THE OPERATION. THEREFORE WE NEED TO */
+/* PREPARE A KEYINFO SIGNAL. MORE THAN ONE KEYINFO SIGNAL CAN BE SENT. */
+/* ------------------------------------------------------------------------- */
+ sendTupkey(signal);
+ }//if
+/* ------------------------------------------------------------------------- */
+/* NOW I AM PREPARED TO SEND ALL THE ATTRINFO SIGNALS. AT THE MOMENT A LOOP */
+/* SENDS ALL AT ONCE. LATER WE HAVE TO ADDRESS THE PROBLEM THAT THESE COULD */
+/* LEAD TO BUFFER EXPLOSION => NODE CRASH. */
+/* ------------------------------------------------------------------------- */
+/* NEW CODE TO SEND ATTRINFO IN PACK_LQHKEYREQ */
+/* THIS CODE USES A REAL-TIME BREAK AFTER */
+/* SENDING 16 SIGNALS. */
+/* -------------------------------------------------- */
+ sig0 = regTcPtr->tcOprec;
+ sig1 = regTcPtr->transid[0];
+ sig2 = regTcPtr->transid[1];
+ signal->theData[0] = sig0;
+ signal->theData[1] = sig1;
+ signal->theData[2] = sig2;
+ AttrbufPtr regAttrinbufptr;
+ regAttrinbufptr.i = regTcPtr->firstAttrinbuf;
+ while (regAttrinbufptr.i != RNIL) {
+ ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
+ jam();
+ Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN];
+ ndbrequire(dataLen != 0);
+ MEMCOPY_NO_WORDS(&signal->theData[3], &regAttrinbufptr.p->attrbuf[0], dataLen);
+ regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
+ sendSignal(lqhRef, GSN_ATTRINFO, signal, dataLen + 3, JBB);
+ }//while
+ regTcPtr->transactionState = TcConnectionrec::PREPARED;
+ if (regTcPtr->dirtyOp == ZTRUE) {
+ jam();
+/*************************************************************>*/
+/* DIRTY WRITES ARE USED IN TWO SITUATIONS. THE FIRST */
+/* SITUATION IS WHEN THEY ARE USED TO UPDATE COUNTERS AND*/
+/* OTHER ATTRIBUTES WHICH ARE NOT SENSITIVE TO CONSISTE- */
+/* NCY. THE SECOND SITUATION IS BY OPERATIONS THAT ARE */
+/* SENT AS PART OF A COPY FRAGMENT PROCESS. */
+/* */
+/* DURING A COPY FRAGMENT PROCESS THERE IS NO LOGGING */
+/* ONGOING SINCE THE FRAGMENT IS NOT COMPLETE YET. THE */
+/* LOGGING STARTS AFTER COMPLETING THE LAST COPY TUPLE */
+/* OPERATION. THE EXECUTION OF THE LAST COPY TUPLE DOES */
+/* ALSO START A LOCAL CHECKPOINT SO THAT THE FRAGMENT */
+/* REPLICA IS RECOVERABLE. THUS GLOBAL CHECKPOINT ID FOR */
+/* THOSE OPERATIONS ARE NOT INTERESTING. */
+/* */
+/* A DIRTY WRITE IS BY DEFINITION NOT CONSISTENT. THUS */
+/* IT CAN USE ANY GLOBAL CHECKPOINT. THE IDEA HERE IS TO */
+/* ALWAYS USE THE LATEST DEFINED GLOBAL CHECKPOINT ID IN */
+/* THIS NODE. */
+/*************************************************************>*/
+ cleanUp(signal);
+ return;
+ }//if
+ /* ------------------------------------------------------------------------
+ * ALL INFORMATION NEEDED BY THE COMMIT PHASE AND COMPLETE PHASE IS
+ * KEPT IN THE TC_CONNECT RECORD. TO ENSURE PROPER USE OF MEMORY
+ * RESOURCES WE DEALLOCATE THE ATTRINFO RECORD AND KEY RECORDS
+ * AS SOON AS POSSIBLE.
+ * ------------------------------------------------------------------------ */
+ releaseOprec(signal);
+}//Dblqh::packLqhkeyreqLab()
+
+/* ========================================================================= */
+/* ==== CHECK IF THE LOG RECORD FITS INTO THE CURRENT MBYTE, ======= */
+/* OTHERWISE SWITCH TO NEXT MBYTE. */
+/* */
+/* ========================================================================= */
+void Dblqh::checkNewMbyte(Signal* signal)
+{
+ UintR tcnmTmp;
+ UintR ttotalLogSize;
+
+/* -------------------------------------------------- */
+/* CHECK IF A NEW MBYTE OF LOG RECORD IS TO BE */
+/* OPENED BEFORE WRITING THE LOG RECORD. NO LOG */
+/* RECORDS ARE ALLOWED TO SPAN A MBYTE BOUNDARY */
+/* */
+/* INPUT: TC_CONNECTPTR THE OPERATION */
+/* LOG_FILE_PTR THE LOG FILE */
+/* OUTPUT: LOG_FILE_PTR THE NEW LOG FILE */
+/* -------------------------------------------------- */
+ ttotalLogSize = ZLOG_HEAD_SIZE + tcConnectptr.p->currTupAiLen;
+ ttotalLogSize = ttotalLogSize + tcConnectptr.p->primKeyLen;
+ tcnmTmp = logFilePtr.p->remainingWordsInMbyte;
+ if ((ttotalLogSize + ZNEXT_LOG_SIZE) <= tcnmTmp) {
+ ndbrequire(tcnmTmp >= ttotalLogSize);
+ logFilePtr.p->remainingWordsInMbyte = tcnmTmp - ttotalLogSize;
+ return;
+ } else {
+ jam();
+/* -------------------------------------------------- */
+/* IT WAS NOT ENOUGH SPACE IN THIS MBYTE FOR */
+/* THIS LOG RECORD. MOVE TO NEXT MBYTE */
+/* THIS MIGHT INCLUDE CHANGING LOG FILE */
+/* -------------------------------------------------- */
+/* WE HAVE TO INSERT A NEXT LOG RECORD FIRST */
+/* -------------------------------------------------- */
+/* THEN CONTINUE BY WRITING THE FILE DESCRIPTORS*/
+/* -------------------------------------------------- */
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ changeMbyte(signal);
+ tcnmTmp = logFilePtr.p->remainingWordsInMbyte;
+ }//if
+ ndbrequire(tcnmTmp >= ttotalLogSize);
+ logFilePtr.p->remainingWordsInMbyte = tcnmTmp - ttotalLogSize;
+}//Dblqh::checkNewMbyte()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE OPERATION HEADER TO LOG -------
+ *
+ * SUBROUTINE SHORT NAME: WLH
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeLogHeader(Signal* signal)
+{
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ Uint32 hashValue = tcConnectptr.p->hashValue;
+ Uint32 operation = tcConnectptr.p->operation;
+ Uint32 keyLen = tcConnectptr.p->primKeyLen;
+ Uint32 aiLen = tcConnectptr.p->currTupAiLen;
+ Uint32 totLogLen = aiLen + keyLen + ZLOG_HEAD_SIZE;
+ if ((logPos + ZLOG_HEAD_SIZE) < ZPAGE_SIZE) {
+ Uint32* dataPtr = &logPagePtr.p->logPageWord[logPos];
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + ZLOG_HEAD_SIZE;
+ dataPtr[0] = ZPREP_OP_TYPE;
+ dataPtr[1] = totLogLen;
+ dataPtr[2] = hashValue;
+ dataPtr[3] = operation;
+ dataPtr[4] = aiLen;
+ dataPtr[5] = keyLen;
+ } else {
+ writeLogWord(signal, ZPREP_OP_TYPE);
+ writeLogWord(signal, totLogLen);
+ writeLogWord(signal, hashValue);
+ writeLogWord(signal, operation);
+ writeLogWord(signal, aiLen);
+ writeLogWord(signal, keyLen);
+ }//if
+}//Dblqh::writeLogHeader()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE TUPLE KEY TO LOG -------
+ *
+ * SUBROUTINE SHORT NAME: WK
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeKey(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Uint32 logPos, endPos, dataLen;
+ Int32 remainingLen;
+ logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ remainingLen = regTcPtr->primKeyLen;
+ dataLen = remainingLen;
+ if (remainingLen > 4)
+ dataLen = 4;
+ remainingLen -= dataLen;
+ endPos = logPos + dataLen;
+ if (endPos < ZPAGE_SIZE) {
+ MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos],
+ &regTcPtr->tupkeyData[0],
+ dataLen);
+ } else {
+ jam();
+ for (Uint32 i = 0; i < dataLen; i++)
+ writeLogWord(signal, regTcPtr->tupkeyData[i]);
+ endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ }//if
+ DatabufPtr regDatabufptr;
+ regDatabufptr.i = regTcPtr->firstTupkeybuf;
+ while (remainingLen > 0) {
+ logPos = endPos;
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ dataLen = remainingLen;
+ if (remainingLen > 4)
+ dataLen = 4;
+ remainingLen -= dataLen;
+ endPos += dataLen;
+ if (endPos < ZPAGE_SIZE) {
+ MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos],
+ &regDatabufptr.p->data[0],
+ dataLen);
+ } else {
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos;
+ for (Uint32 i = 0; i < dataLen; i++)
+ writeLogWord(signal, regDatabufptr.p->data[i]);
+ endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ }//if
+ regDatabufptr.i = regDatabufptr.p->nextDatabuf;
+ }//while
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = endPos;
+ ndbrequire(regDatabufptr.i == RNIL);
+}//Dblqh::writeKey()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE ATTRINFO TO LOG -------
+ *
+ * SUBROUTINE SHORT NAME: WA
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeAttrinfoLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Uint32 totLen = regTcPtr->currTupAiLen;
+ if (totLen == 0)
+ return;
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ Uint32 lqhLen = regTcPtr->reclenAiLqhkey;
+ ndbrequire(totLen >= lqhLen);
+ Uint32 endPos = logPos + lqhLen;
+ totLen -= lqhLen;
+ if (endPos < ZPAGE_SIZE) {
+ MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos],
+ &regTcPtr->firstAttrinfo[0],
+ lqhLen);
+ } else {
+ for (Uint32 i = 0; i < lqhLen; i++)
+ writeLogWord(signal, regTcPtr->firstAttrinfo[i]);
+ endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ }//if
+ AttrbufPtr regAttrinbufptr;
+ regAttrinbufptr.i = regTcPtr->firstAttrinbuf;
+ while (totLen > 0) {
+ logPos = endPos;
+ ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
+ Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN];
+ ndbrequire(totLen >= dataLen);
+ ndbrequire(dataLen > 0);
+ totLen -= dataLen;
+ endPos += dataLen;
+ if (endPos < ZPAGE_SIZE) {
+ MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos],
+ &regAttrinbufptr.p->attrbuf[0],
+ dataLen);
+ } else {
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos;
+ for (Uint32 i = 0; i < dataLen; i++)
+ writeLogWord(signal, regAttrinbufptr.p->attrbuf[i]);
+ endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ }//if
+ regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
+ }//while
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = endPos;
+ ndbrequire(regAttrinbufptr.i == RNIL);
+}//Dblqh::writeAttrinfoLab()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEND TUPLE KEY IN KEYINFO SIGNAL(S) ------- */
+/* */
+/* SUBROUTINE SHORT NAME: STU */
+/* ------------------------------------------------------------------------- */
+void Dblqh::sendTupkey(Signal* signal)
+{
+ UintR TdataPos = 3;
+ BlockReference lqhRef = calcLqhBlockRef(tcConnectptr.p->nextReplica);
+ signal->theData[0] = tcConnectptr.p->tcOprec;
+ signal->theData[1] = tcConnectptr.p->transid[0];
+ signal->theData[2] = tcConnectptr.p->transid[1];
+ databufptr.i = tcConnectptr.p->firstTupkeybuf;
+ do {
+ ptrCheckGuard(databufptr, cdatabufFileSize, databuf);
+ signal->theData[TdataPos] = databufptr.p->data[0];
+ signal->theData[TdataPos + 1] = databufptr.p->data[1];
+ signal->theData[TdataPos + 2] = databufptr.p->data[2];
+ signal->theData[TdataPos + 3] = databufptr.p->data[3];
+
+ databufptr.i = databufptr.p->nextDatabuf;
+ TdataPos += 4;
+ if (databufptr.i == RNIL) {
+ jam();
+ sendSignal(lqhRef, GSN_KEYINFO, signal, TdataPos, JBB);
+ return;
+ } else if (TdataPos == 23) {
+ jam();
+ sendSignal(lqhRef, GSN_KEYINFO, signal, 23, JBB);
+ TdataPos = 3;
+ }
+ } while (1);
+}//Dblqh::sendTupkey()
+
+void Dblqh::cleanUp(Signal* signal)
+{
+ releaseOprec(signal);
+ deleteTransidHash(signal);
+ releaseTcrec(signal, tcConnectptr);
+}//Dblqh::cleanUp()
+
+/* --------------------------------------------------------------------------
+ * ---- RELEASE ALL RECORDS CONNECTED TO THE OPERATION RECORD AND THE ----
+ * OPERATION RECORD ITSELF
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseOprec(Signal* signal)
+{
+ UintR Tmpbuf;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+/* ---- RELEASE DATA BUFFERS ------------------- */
+ DatabufPtr regDatabufptr;
+ regDatabufptr.i = regTcPtr->firstTupkeybuf;
+/* --------------------------------------------------------------------------
+ * ------- RELEASE DATA BUFFERS -------
+ *
+ * ------------------------------------------------------------------------- */
+
+ while (regDatabufptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ Tmpbuf = regDatabufptr.p->nextDatabuf;
+ regDatabufptr.p->nextDatabuf = cfirstfreeDatabuf;
+ cfirstfreeDatabuf = regDatabufptr.i;
+ regDatabufptr.i = Tmpbuf;
+ }//while
+/* ---- RELEASE ATTRINFO BUFFERS ------------------- */
+ AttrbufPtr regAttrinbufptr;
+ regAttrinbufptr.i = regTcPtr->firstAttrinbuf;
+ /* ########################################################################
+ * ####### RELEASE_ATTRINBUF #######
+ *
+ * ####################################################################### */
+ while (regAttrinbufptr.i != RNIL) {
+ jam();
+ regAttrinbufptr.i= release_attrinbuf(regAttrinbufptr.i);
+ }//while
+ regTcPtr->firstAttrinbuf = RNIL;
+ regTcPtr->lastAttrinbuf = RNIL;
+ regTcPtr->firstTupkeybuf = RNIL;
+ regTcPtr->lastTupkeybuf = RNIL;
+}//Dblqh::releaseOprec()
+
+/* ------------------------------------------------------------------------- */
+/* ------ DELETE TRANSACTION ID FROM HASH TABLE ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::deleteTransidHash(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ TcConnectionrecPtr prevHashptr;
+ TcConnectionrecPtr nextHashptr;
+
+ prevHashptr.i = regTcPtr->prevHashRec;
+ nextHashptr.i = regTcPtr->nextHashRec;
+ if (prevHashptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(prevHashptr, ctcConnectrecFileSize, tcConnectionrec);
+ prevHashptr.p->nextHashRec = nextHashptr.i;
+ } else {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* THE OPERATION WAS PLACED FIRST IN THE LIST OF THE HASH TABLE. NEED TO SET */
+/* A NEW LEADER OF THE LIST. */
+/* ------------------------------------------------------------------------- */
+ Uint32 hashIndex = (regTcPtr->transid[0] ^ regTcPtr->tcOprec) & 1023;
+ ctransidHash[hashIndex] = nextHashptr.i;
+ }//if
+ if (nextHashptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(nextHashptr, ctcConnectrecFileSize, tcConnectionrec);
+ nextHashptr.p->prevHashRec = prevHashptr.i;
+ }//if
+}//Dblqh::deleteTransidHash()
+
+/* --------------------------------------------------------------------------
+ * ------- LINK OPERATION IN ACTIVE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME: LAF
+// Input Pointers:
+// tcConnectptr
+// fragptr
+ * ------------------------------------------------------------------------- */
+void Dblqh::linkActiveFrag(Signal* signal)
+{
+ TcConnectionrecPtr lafTcConnectptr;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Fragrecord * const regFragPtr = fragptr.p;
+ Uint32 tcIndex = tcConnectptr.i;
+ lafTcConnectptr.i = regFragPtr->activeList;
+ regTcPtr->prevTc = RNIL;
+ regFragPtr->activeList = tcIndex;
+ ndbrequire(regTcPtr->listState == TcConnectionrec::NOT_IN_LIST);
+ regTcPtr->nextTc = lafTcConnectptr.i;
+ regTcPtr->listState = TcConnectionrec::IN_ACTIVE_LIST;
+ if (lafTcConnectptr.i == RNIL) {
+ return;
+ } else {
+ jam();
+ ptrCheckGuard(lafTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ lafTcConnectptr.p->prevTc = tcIndex;
+ }//if
+ return;
+}//Dblqh::linkActiveFrag()
+
+/* -------------------------------------------------------------------------
+ * ------- RELEASE OPERATION FROM ACTIVE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME = RAF
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseActiveFrag(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ TcConnectionrecPtr ralTcNextConnectptr;
+ TcConnectionrecPtr ralTcPrevConnectptr;
+ fragptr.i = regTcPtr->fragmentptr;
+ ralTcPrevConnectptr.i = regTcPtr->prevTc;
+ ralTcNextConnectptr.i = regTcPtr->nextTc;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ Fragrecord * const regFragPtr = fragptr.p;
+ ndbrequire(regTcPtr->listState == TcConnectionrec::IN_ACTIVE_LIST);
+ regTcPtr->listState = TcConnectionrec::NOT_IN_LIST;
+
+ if (ralTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(ralTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ ralTcNextConnectptr.p->prevTc = ralTcPrevConnectptr.i;
+ }//if
+ if (ralTcPrevConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(ralTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ ralTcPrevConnectptr.p->nextTc = regTcPtr->nextTc;
+ } else {
+ jam();
+ /* ----------------------------------------------------------------------
+ * OPERATION RECORD IS FIRST IN ACTIVE LIST
+ * THIS MEANS THAT THERE EXISTS NO PREVIOUS TC THAT NEEDS TO BE UPDATED.
+ * --------------------------------------------------------------------- */
+ regFragPtr->activeList = ralTcNextConnectptr.i;
+ }//if
+ if (regFragPtr->lcpRef != RNIL) {
+ jam();
+ lcpPtr.i = regFragPtr->lcpRef;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_WAIT_ACTIVE_FINISH);
+
+ /* --------------------------------------------------------------------
+ * IF A FRAGMENT IS CURRENTLY STARTING A LOCAL CHECKPOINT AND IT
+ * IS WAITING FOR ACTIVE OPERATIONS TO BE COMPLETED WITH THE
+ * CURRENT PHASE, THEN IT IS CHECKED WHETHER THE
+ * LAST ACTIVE OPERATION WAS NOW COMPLETED.
+ * ------------------------------------------------------------------- */
+ if (regFragPtr->activeList == RNIL) {
+ jam();
+ /* ------------------------------------------------------------------
+ * ACTIVE LIST ON FRAGMENT IS EMPTY AND WE ARE WAITING FOR
+ * THIS TO HAPPEN.
+ * WE WILL NOW START THE CHECKPOINT IN TUP AND ACC.
+ * ----------------------------------------------------------------- */
+ /* SEND START LOCAL CHECKPOINT TO ACC AND TUP */
+ /* ----------------------------------------------------------------- */
+ fragptr.p->lcpRef = RNIL;
+ lcpPtr.p->lcpState = LcpRecord::LCP_START_CHKP;
+ sendStartLcp(signal);
+ }//if
+ }//if
+}//Dblqh::releaseActiveFrag()
+
+/* ######################################################################### */
+/* ####### TRANSACTION MODULE ####### */
+/* THIS MODULE HANDLES THE COMMIT AND THE COMPLETE PHASE. */
+/* ######################################################################### */
+void Dblqh::warningReport(Signal* signal, int place)
+{
+ switch (place) {
+ case 0:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMMIT in wrong state in Dblqh" << endl;
+#endif
+ break;
+ case 1:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMMIT with wrong transid in Dblqh" << endl;
+#endif
+ break;
+ case 2:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMPLETE in wrong state in Dblqh" << endl;
+#endif
+ break;
+ case 3:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMPLETE with wrong transid in Dblqh" << endl;
+#endif
+ break;
+ case 4:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMMITREQ in wrong state in Dblqh" << endl;
+#endif
+ break;
+ case 5:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMMITREQ with wrong transid in Dblqh" << endl;
+#endif
+ break;
+ case 6:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMPLETEREQ in wrong state in Dblqh" << endl;
+#endif
+ break;
+ case 7:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMPLETEREQ with wrong transid in Dblqh" << endl;
+#endif
+ break;
+ case 8:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received ABORT with non-existing transid in Dblqh" << endl;
+#endif
+ break;
+ case 9:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received ABORTREQ with non-existing transid in Dblqh" << endl;
+#endif
+ break;
+ case 10:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received ABORTREQ in wrong state in Dblqh" << endl;
+#endif
+ break;
+ case 11:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMMIT when tc-rec released in Dblqh" << endl;
+#endif
+ break;
+ case 12:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMPLETE when tc-rec released in Dblqh" << endl;
+#endif
+ break;
+ case 13:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received LQHKEYREF when tc-rec released in Dblqh" << endl;
+#endif
+ break;
+ case 14:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received LQHKEYREF with wrong transid in Dblqh" << endl;
+#endif
+ break;
+ case 15:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received LQHKEYREF when already aborting in Dblqh" << endl;
+#endif
+ break;
+ case 16:
+ jam();
+ ndbrequire(cstartPhase == ZNIL);
+#ifdef ABORT_TRACE
+ ndbout << "W: Received LQHKEYREF in wrong state in Dblqh" << endl;
+#endif
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ return;
+}//Dblqh::warningReport()
+
+void Dblqh::errorReport(Signal* signal, int place)
+{
+ switch (place) {
+ case 0:
+ jam();
+ break;
+ case 1:
+ jam();
+ break;
+ case 2:
+ jam();
+ break;
+ case 3:
+ jam();
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ systemErrorLab(signal);
+ return;
+}//Dblqh::errorReport()
+
+/* ************************************************************************>>
+ * COMMIT: Start commit request from TC. This signal is originally sent as a
+ * packed signal and this function is called from execPACKED_SIGNAL.
+ * This is the normal commit protocol where TC first send this signal to the
+ * backup node which then will send COMMIT to the primary node. If
+ * everything is ok the primary node send COMMITTED back to TC.
+ * ************************************************************************>> */
+void Dblqh::execCOMMIT(Signal* signal)
+{
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ Uint32 tcIndex = signal->theData[0];
+ Uint32 gci = signal->theData[1];
+ Uint32 transid1 = signal->theData[2];
+ Uint32 transid2 = signal->theData[3];
+ jamEntry();
+ if (tcIndex >= ttcConnectrecFileSize) {
+ errorReport(signal, 0);
+ return;
+ }//if
+ if (ERROR_INSERTED(5011)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMMIT, signal, 2000, 4);
+ return;
+ }//if
+ if (ERROR_INSERTED(5012)) {
+ SET_ERROR_INSERT_VALUE(5017);
+ sendSignalWithDelay(cownref, GSN_COMMIT, signal, 2000, 4);
+ return;
+ }//if
+ tcConnectptr.i = tcIndex;
+ ptrAss(tcConnectptr, regTcConnectionrec);
+ if ((tcConnectptr.p->transid[0] == transid1) &&
+ (tcConnectptr.p->transid[1] == transid2)) {
+ commitReqLab(signal, gci);
+ return;
+ }//if
+ warningReport(signal, 1);
+ return;
+}//Dblqh::execCOMMIT()
+
+/* ************************************************************************>>
+ * COMMITREQ: Commit request from TC. This is the commit protocol used if
+ * one of the nodes is not behaving correctly. TC explicitly sends COMMITREQ
+ * to both the backup and primary node and gets a COMMITCONF back if the
+ * COMMIT was ok.
+ * ************************************************************************>> */
+void Dblqh::execCOMMITREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 reqPtr = signal->theData[0];
+ BlockReference reqBlockref = signal->theData[1];
+ Uint32 gci = signal->theData[2];
+ Uint32 transid1 = signal->theData[3];
+ Uint32 transid2 = signal->theData[4];
+ Uint32 tcOprec = signal->theData[6];
+ if (ERROR_INSERTED(5004)) {
+ systemErrorLab(signal);
+ }
+ if (ERROR_INSERTED(5017)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMMITREQ, signal, 2000, 7);
+ return;
+ }//if
+ if (findTransaction(transid1,
+ transid2,
+ tcOprec) != ZOK) {
+ warningReport(signal, 5);
+ return;
+ }//if
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ switch (regTcPtr->transactionState) {
+ case TcConnectionrec::PREPARED:
+ case TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL:
+ case TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL:
+ jam();
+/*-------------------------------------------------------*/
+/* THE NORMAL CASE. */
+/*-------------------------------------------------------*/
+ regTcPtr->reqBlockref = reqBlockref;
+ regTcPtr->reqRef = reqPtr;
+ regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
+ commitReqLab(signal, gci);
+ return;
+ break;
+ case TcConnectionrec::COMMITTED:
+ jam();
+/*---------------------------------------------------------*/
+/* FOR SOME REASON THE COMMIT PHASE HAVE BEEN */
+/* FINISHED AFTER A TIME OUT. WE NEED ONLY SEND A */
+/* COMMITCONF SIGNAL. */
+/*---------------------------------------------------------*/
+ regTcPtr->reqBlockref = reqBlockref;
+ regTcPtr->reqRef = reqPtr;
+ regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
+ signal->theData[0] = regTcPtr->reqRef;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = regTcPtr->transid[0];
+ signal->theData[3] = regTcPtr->transid[1];
+ sendSignal(regTcPtr->reqBlockref, GSN_COMMITCONF, signal, 4, JBB);
+ break;
+ case TcConnectionrec::COMMIT_STOPPED:
+ jam();
+ regTcPtr->reqBlockref = reqBlockref;
+ regTcPtr->reqRef = reqPtr;
+ regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
+ /*empty*/;
+ break;
+ default:
+ jam();
+ warningReport(signal, 4);
+ return;
+ break;
+ }//switch
+ return;
+}//Dblqh::execCOMMITREQ()
+
+/* ************************************************************************>>
+ * COMPLETE : Complete the transaction. Sent as a packed signal from TC.
+ * Works the same way as COMMIT protocol. This is the normal case with both
+ * primary and backup working (See COMMIT).
+ * ************************************************************************>> */
+void Dblqh::execCOMPLETE(Signal* signal)
+{
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ Uint32 tcIndex = signal->theData[0];
+ Uint32 transid1 = signal->theData[1];
+ Uint32 transid2 = signal->theData[2];
+ jamEntry();
+ if (tcIndex >= ttcConnectrecFileSize) {
+ errorReport(signal, 1);
+ return;
+ }//if
+ if (ERROR_INSERTED(5013)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMPLETE, signal, 2000, 3);
+ return;
+ }//if
+ if (ERROR_INSERTED(5014)) {
+ SET_ERROR_INSERT_VALUE(5018);
+ sendSignalWithDelay(cownref, GSN_COMPLETE, signal, 2000, 3);
+ return;
+ }//if
+ tcConnectptr.i = tcIndex;
+ ptrAss(tcConnectptr, regTcConnectionrec);
+ if ((tcConnectptr.p->transactionState == TcConnectionrec::COMMITTED) &&
+ (tcConnectptr.p->transid[0] == transid1) &&
+ (tcConnectptr.p->transid[1] == transid2)) {
+ if (tcConnectptr.p->seqNoReplica != 0) {
+ jam();
+ localCommitLab(signal);
+ return;
+ } else {
+ jam();
+ completeTransLastLab(signal);
+ return;
+ }//if
+ }//if
+ if (tcConnectptr.p->transactionState != TcConnectionrec::COMMITTED) {
+ warningReport(signal, 2);
+ } else {
+ warningReport(signal, 3);
+ }//if
+}//Dblqh::execCOMPLETE()
+
+/* ************************************************************************>>
+ * COMPLETEREQ: Complete request from TC. Same as COMPLETE but used if one
+ * node is not working ok (See COMMIT).
+ * ************************************************************************>> */
+void Dblqh::execCOMPLETEREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 reqPtr = signal->theData[0];
+ BlockReference reqBlockref = signal->theData[1];
+ Uint32 transid1 = signal->theData[2];
+ Uint32 transid2 = signal->theData[3];
+ Uint32 tcOprec = signal->theData[5];
+ if (ERROR_INSERTED(5005)) {
+ systemErrorLab(signal);
+ }
+ if (ERROR_INSERTED(5018)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMPLETEREQ, signal, 2000, 6);
+ return;
+ }//if
+ if (findTransaction(transid1,
+ transid2,
+ tcOprec) != ZOK) {
+ jam();
+/*---------------------------------------------------------*/
+/* FOR SOME REASON THE COMPLETE PHASE STARTED AFTER */
+/* A TIME OUT. THE TRANSACTION IS GONE. WE NEED TO */
+/* REPORT COMPLETION ANYWAY. */
+/*---------------------------------------------------------*/
+ signal->theData[0] = reqPtr;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = transid1;
+ signal->theData[3] = transid2;
+ sendSignal(reqBlockref, GSN_COMPLETECONF, signal, 4, JBB);
+ warningReport(signal, 7);
+ return;
+ }//if
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ switch (regTcPtr->transactionState) {
+ case TcConnectionrec::COMMITTED:
+ jam();
+ regTcPtr->reqBlockref = reqBlockref;
+ regTcPtr->reqRef = reqPtr;
+ regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
+ /*empty*/;
+ break;
+/*---------------------------------------------------------*/
+/* THE NORMAL CASE. */
+/*---------------------------------------------------------*/
+ case TcConnectionrec::COMMIT_STOPPED:
+ jam();
+/*---------------------------------------------------------*/
+/* FOR SOME REASON THE COMPLETE PHASE STARTED AFTER */
+/* A TIME OUT. WE HAVE SET THE PROPER VARIABLES SUCH */
+/* THAT A COMPLETECONF WILL BE SENT WHEN COMPLETE IS */
+/* FINISHED. */
+/*---------------------------------------------------------*/
+ regTcPtr->reqBlockref = reqBlockref;
+ regTcPtr->reqRef = reqPtr;
+ regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
+ return;
+ break;
+ default:
+ jam();
+ warningReport(signal, 6);
+ return;
+ break;
+ }//switch
+ if (regTcPtr->seqNoReplica != 0) {
+ jam();
+ localCommitLab(signal);
+ return;
+ } else {
+ jam();
+ completeTransLastLab(signal);
+ return;
+ }//if
+}//Dblqh::execCOMPLETEREQ()
+
+/* ************> */
+/* COMPLETED > */
+/* ************> */
+void Dblqh::execLQHKEYCONF(Signal* signal)
+{
+ LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+ Uint32 tcIndex = lqhKeyConf->opPtr;
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ jamEntry();
+ if (tcIndex >= ttcConnectrecFileSize) {
+ errorReport(signal, 2);
+ return;
+ }//if
+ tcConnectptr.i = tcIndex;
+ ptrAss(tcConnectptr, regTcConnectionrec);
+ switch (tcConnectptr.p->connectState) {
+ case TcConnectionrec::LOG_CONNECTED:
+ jam();
+ completedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::COPY_CONNECTED:
+ jam();
+ copyCompletedLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dblqh::execLQHKEYCONF()
+
+/* ------------------------------------------------------------------------- */
+/* ------- COMMIT PHASE ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::commitReqLab(Signal* signal, Uint32 gci)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ TcConnectionrec::LogWriteState logWriteState = regTcPtr->logWriteState;
+ TcConnectionrec::TransactionState transState = regTcPtr->transactionState;
+ regTcPtr->gci = gci;
+ if (transState == TcConnectionrec::PREPARED) {
+ if (logWriteState == TcConnectionrec::WRITTEN) {
+ jam();
+ regTcPtr->transactionState = TcConnectionrec::PREPARED_RECEIVED_COMMIT;
+ TcConnectionrecPtr saveTcPtr = tcConnectptr;
+ Uint32 blockNo = refToBlock(regTcPtr->tcTupBlockref);
+ signal->theData[0] = regTcPtr->tupConnectrec;
+ signal->theData[1] = gci;
+ EXECUTE_DIRECT(blockNo, GSN_TUP_WRITELOG_REQ, signal, 2);
+ jamEntry();
+ if (regTcPtr->transactionState == TcConnectionrec::LOG_COMMIT_QUEUED) {
+ jam();
+ return;
+ }//if
+ ndbrequire(regTcPtr->transactionState == TcConnectionrec::LOG_COMMIT_WRITTEN);
+ tcConnectptr = saveTcPtr;
+ } else if (logWriteState == TcConnectionrec::NOT_STARTED) {
+ jam();
+ } else if (logWriteState == TcConnectionrec::NOT_WRITTEN) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* IT IS A READ OPERATION OR OTHER OPERATION THAT DO NOT USE THE LOG. */
+/*---------------------------------------------------------------------------*/
+/*---------------------------------------------------------------------------*/
+/* THE LOG HAS NOT BEEN WRITTEN SINCE THE LOG FLAG WAS FALSE. THIS CAN OCCUR */
+/* WHEN WE ARE STARTING A NEW FRAGMENT. */
+/*---------------------------------------------------------------------------*/
+ regTcPtr->logWriteState = TcConnectionrec::NOT_STARTED;
+ } else {
+ ndbrequire(logWriteState == TcConnectionrec::NOT_WRITTEN_WAIT);
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE STATE WAS SET TO NOT_WRITTEN BY THE OPERATION BUT LATER A SCAN OF ALL */
+/* OPERATION RECORD CHANGED IT INTO NOT_WRITTEN_WAIT. THIS INDICATES THAT WE */
+/* ARE WAITING FOR THIS OPERATION TO COMMIT OR ABORT SO THAT WE CAN FIND THE */
+/* STARTING GLOBAL CHECKPOINT OF THIS NEW FRAGMENT. */
+/*---------------------------------------------------------------------------*/
+ checkScanTcCompleted(signal);
+ }//if
+ } else if (transState == TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL) {
+ jam();
+ regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_QUEUED;
+ return;
+ } else if (transState == TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL) {
+ jam();
+ } else {
+ warningReport(signal, 0);
+ return;
+ }//if
+ if (regTcPtr->seqNoReplica != 0) {
+ jam();
+ commitReplyLab(signal);
+ return;
+ }//if
+ localCommitLab(signal);
+ return;
+}//Dblqh::commitReqLab()
+
+void Dblqh::execLQH_WRITELOG_REQ(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Uint32 gci = signal->theData[1];
+ Uint32 newestGci = cnewestGci;
+ TcConnectionrec::LogWriteState logWriteState = regTcPtr->logWriteState;
+ TcConnectionrec::TransactionState transState = regTcPtr->transactionState;
+ regTcPtr->gci = gci;
+ if (gci > newestGci) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* KEEP TRACK OF NEWEST GLOBAL CHECKPOINT THAT LQH HAS HEARD OF. */
+/* ------------------------------------------------------------------------- */
+ cnewestGci = gci;
+ }//if
+ if (logWriteState == TcConnectionrec::WRITTEN) {
+/*---------------------------------------------------------------------------*/
+/* I NEED TO INSERT A COMMIT LOG RECORD SINCE WE ARE WRITING LOG IN THIS */
+/* TRANSACTION. */
+/*---------------------------------------------------------------------------*/
+ jam();
+ LogPartRecordPtr regLogPartPtr;
+ Uint32 noOfLogPages = cnoOfLogPages;
+ jam();
+ regLogPartPtr.i = regTcPtr->hashValue & 3;
+ ptrCheckGuard(regLogPartPtr, clogPartFileSize, logPartRecord);
+ if ((regLogPartPtr.p->logPartState == LogPartRecord::ACTIVE) ||
+ (noOfLogPages == 0)) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS LOG PART WAS CURRENTLY ACTIVE WRITING ANOTHER LOG RECORD. WE MUST */
+/* WAIT UNTIL THIS PART HAS COMPLETED ITS OPERATION. */
+/*---------------------------------------------------------------------------*/
+// We must delay the write of commit info to the log to safe-guard against
+// a crash due to lack of log pages. We temporary stop all log writes to this
+// log part to ensure that we don't get a buffer explosion in the delayed
+// signal buffer instead.
+/*---------------------------------------------------------------------------*/
+ linkWaitLog(signal, regLogPartPtr);
+ if (transState == TcConnectionrec::PREPARED) {
+ jam();
+ regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL;
+ } else {
+ jam();
+ ndbrequire(transState == TcConnectionrec::PREPARED_RECEIVED_COMMIT);
+ regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_QUEUED;
+ }//if
+ if (regLogPartPtr.p->logPartState == LogPartRecord::IDLE) {
+ jam();
+ regLogPartPtr.p->logPartState = LogPartRecord::ACTIVE;
+ }//if
+ return;
+ }//if
+ writeCommitLog(signal, regLogPartPtr);
+ if (transState == TcConnectionrec::PREPARED) {
+ jam();
+ regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL;
+ } else {
+ jam();
+ ndbrequire(transState == TcConnectionrec::PREPARED_RECEIVED_COMMIT);
+ regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_WRITTEN;
+ }//if
+ }//if
+}//Dblqh::execLQH_WRITELOG_REQ()
+
+void Dblqh::localCommitLab(Signal* signal)
+{
+ FragrecordPtr regFragptr;
+ regFragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+ Fragrecord::FragStatus status = regFragptr.p->fragStatus;
+ fragptr = regFragptr;
+ switch (status) {
+ case Fragrecord::FSACTIVE:
+ case Fragrecord::CRASH_RECOVERING:
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ commitContinueAfterBlockedLab(signal);
+ return;
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::COMMIT_STOPPED;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dblqh::localCommitLab()
+
+void Dblqh::commitContinueAfterBlockedLab(Signal* signal)
+{
+/* ------------------------------------------------------------------------- */
+/*INPUT: TC_CONNECTPTR ACTIVE OPERATION RECORD */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/*CONTINUE HERE AFTER BEING BLOCKED FOR A WHILE DURING LOCAL CHECKPOINT. */
+/*The operation is already removed from the active list since there is no */
+/*chance for any real-time breaks before we need to release it. */
+/* ------------------------------------------------------------------------- */
+/*ALSO AFTER NORMAL PROCEDURE WE CONTINUE */
+/*WE MUST COMMIT TUP BEFORE ACC TO ENSURE THAT NO ONE RACES IN AND SEES A */
+/*DIRTY STATE IN TUP. */
+/* ------------------------------------------------------------------------- */
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Fragrecord * const regFragptr = fragptr.p;
+ Uint32 operation = regTcPtr->operation;
+ Uint32 simpleRead = regTcPtr->simpleRead;
+ Uint32 dirtyOp = regTcPtr->dirtyOp;
+ if (regTcPtr->activeCreat == ZFALSE) {
+ if ((cCommitBlocked == true) &&
+ (regFragptr->fragActiveStatus == ZTRUE)) {
+ jam();
+/* ------------------------------------------------------------------------- */
+// TUP and/or ACC have problems in writing the undo log to disk fast enough.
+// We must avoid the commit at this time and try later instead. The fragment
+// is also active with a local checkpoint and this commit can generate UNDO
+// log records that overflow the UNDO log buffer.
+/* ------------------------------------------------------------------------- */
+/*---------------------------------------------------------------------------*/
+// We must delay the write of commit info to the log to safe-guard against
+// a crash due to lack of log pages. We temporary stop all log writes to this
+// log part to ensure that we don't get a buffer explosion in the delayed
+// signal buffer instead.
+/*---------------------------------------------------------------------------*/
+ logPartPtr.i = regTcPtr->hashValue & 3;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ linkWaitLog(signal, logPartPtr);
+ regTcPtr->transactionState = TcConnectionrec::COMMIT_QUEUED;
+ if (logPartPtr.p->logPartState == LogPartRecord::IDLE) {
+ jam();
+ logPartPtr.p->logPartState = LogPartRecord::ACTIVE;
+ }//if
+ return;
+ }//if
+ if (operation != ZREAD) {
+ TupCommitReq * const tupCommitReq =
+ (TupCommitReq *)signal->getDataPtrSend();
+ Uint32 sig0 = regTcPtr->tupConnectrec;
+ Uint32 tup = refToBlock(regTcPtr->tcTupBlockref);
+ jam();
+ tupCommitReq->opPtr = sig0;
+ tupCommitReq->gci = regTcPtr->gci;
+ tupCommitReq->hashValue = regTcPtr->hashValue;
+ EXECUTE_DIRECT(tup, GSN_TUP_COMMITREQ, signal,
+ TupCommitReq::SignalLength);
+ Uint32 acc = refToBlock(regTcPtr->tcAccBlockref);
+ signal->theData[0] = regTcPtr->accConnectrec;
+ EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1);
+ } else {
+ if(!dirtyOp){
+ Uint32 acc = refToBlock(regTcPtr->tcAccBlockref);
+ signal->theData[0] = regTcPtr->accConnectrec;
+ EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1);
+ }
+ }
+ jamEntry();
+ if (simpleRead) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*THE OPERATION WAS A SIMPLE READ THUS THE COMMIT PHASE IS ONLY NEEDED TO */
+/*RELEASE THE LOCKS. AT THIS POINT IN THE CODE THE LOCKS ARE RELEASED AND WE */
+/*ARE IN A POSITION TO SEND LQHKEYCONF TO TC. WE WILL ALSO RELEASE ALL */
+/*RESOURCES BELONGING TO THIS OPERATION SINCE NO MORE WORK WILL BE */
+/*PERFORMED. */
+/* ------------------------------------------------------------------------- */
+ cleanUp(signal);
+ return;
+ }//if
+ }//if
+ Uint32 seqNoReplica = regTcPtr->seqNoReplica;
+ if (regTcPtr->gci > regFragptr->newestGci) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*IT IS THE FIRST TIME THIS GLOBAL CHECKPOINT IS INVOLVED IN UPDATING THIS */
+/*FRAGMENT. UPDATE THE VARIABLE THAT KEEPS TRACK OF NEWEST GCI IN FRAGMENT */
+/* ------------------------------------------------------------------------- */
+ regFragptr->newestGci = regTcPtr->gci;
+ }//if
+ if (dirtyOp != ZTRUE) {
+ if (seqNoReplica != 0) {
+ jam();
+ completeTransNotLastLab(signal);
+ return;
+ }//if
+ commitReplyLab(signal);
+ return;
+ } else {
+/* ------------------------------------------------------------------------- */
+/*WE MUST HANDLE DIRTY WRITES IN A SPECIAL WAY. THESE OPERATIONS WILL NOT */
+/*SEND ANY COMMIT OR COMPLETE MESSAGES TO OTHER NODES. THEY WILL MERELY SEND */
+/*THOSE SIGNALS INTERNALLY. */
+/* ------------------------------------------------------------------------- */
+ if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) {
+ jam();
+ packLqhkeyreqLab(signal);
+ } else {
+ ndbrequire(regTcPtr->abortState != TcConnectionrec::NEW_FROM_TC);
+ jam();
+ sendLqhTransconf(signal, LqhTransConf::Committed);
+ cleanUp(signal);
+ }//if
+ }//if
+}//Dblqh::commitContinueAfterBlockedLab()
+
+void Dblqh::commitReplyLab(Signal* signal)
+{
+/* -------------------------------------------------------------- */
+/* BACKUP AND STAND-BY REPLICAS ONLY UPDATE THE TRANSACTION STATE */
+/* -------------------------------------------------------------- */
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ TcConnectionrec::AbortState abortState = regTcPtr->abortState;
+ regTcPtr->transactionState = TcConnectionrec::COMMITTED;
+ if (abortState == TcConnectionrec::ABORT_IDLE) {
+ Uint32 clientBlockref = regTcPtr->clientBlockref;
+ if (regTcPtr->seqNoReplica == 0) {
+ jam();
+ sendCommittedTc(signal, clientBlockref);
+ return;
+ } else {
+ jam();
+ sendCommitLqh(signal, clientBlockref);
+ return;
+ }//if
+ } else if (regTcPtr->abortState == TcConnectionrec::REQ_FROM_TC) {
+ jam();
+ signal->theData[0] = regTcPtr->reqRef;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = regTcPtr->transid[0];
+ signal->theData[3] = regTcPtr->transid[1];
+ sendSignal(tcConnectptr.p->reqBlockref, GSN_COMMITCONF, signal, 4, JBB);
+ } else {
+ ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC);
+ jam();
+ sendLqhTransconf(signal, LqhTransConf::Committed);
+ }//if
+ return;
+}//Dblqh::commitReplyLab()
+
+/* ------------------------------------------------------------------------- */
+/* ------- COMPLETE PHASE ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::completeTransNotLastLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) {
+ Uint32 clientBlockref = regTcPtr->clientBlockref;
+ jam();
+ sendCompleteLqh(signal, clientBlockref);
+ cleanUp(signal);
+ return;
+ } else {
+ jam();
+ completeUnusualLab(signal);
+ return;
+ }//if
+}//Dblqh::completeTransNotLastLab()
+
+void Dblqh::completeTransLastLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) {
+ Uint32 clientBlockref = regTcPtr->clientBlockref;
+ jam();
+/* ------------------------------------------------------------------------- */
+/*DIRTY WRITES WHICH ARE LAST IN THE CHAIN OF REPLICAS WILL SEND COMPLETED */
+/*INSTEAD OF SENDING PREPARED TO THE TC (OR OTHER INITIATOR OF OPERATION). */
+/* ------------------------------------------------------------------------- */
+ sendCompletedTc(signal, clientBlockref);
+ cleanUp(signal);
+ return;
+ } else {
+ jam();
+ completeUnusualLab(signal);
+ return;
+ }//if
+}//Dblqh::completeTransLastLab()
+
+void Dblqh::completeUnusualLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->abortState == TcConnectionrec::ABORT_FROM_TC) {
+ jam();
+ sendAborted(signal);
+ } else if (regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC) {
+ jam();
+ sendLqhTransconf(signal, LqhTransConf::Committed);
+ } else {
+ ndbrequire(regTcPtr->abortState == TcConnectionrec::REQ_FROM_TC);
+ jam();
+ signal->theData[0] = regTcPtr->reqRef;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = regTcPtr->transid[0];
+ signal->theData[3] = regTcPtr->transid[1];
+ sendSignal(regTcPtr->reqBlockref,
+ GSN_COMPLETECONF, signal, 4, JBB);
+ }//if
+ cleanUp(signal);
+ return;
+}//Dblqh::completeUnusualLab()
+
+/* ========================================================================= */
+/* ======= RELEASE TC CONNECT RECORD ======= */
+/* */
+/* RELEASE A TC CONNECT RECORD TO THE FREELIST. */
+/* ========================================================================= */
+void Dblqh::releaseTcrec(Signal* signal, TcConnectionrecPtr locTcConnectptr)
+{
+ jam();
+ locTcConnectptr.p->tcTimer = 0;
+ locTcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED;
+ locTcConnectptr.p->nextTcConnectrec = cfirstfreeTcConrec;
+ cfirstfreeTcConrec = locTcConnectptr.i;
+
+ TablerecPtr tabPtr;
+ tabPtr.i = locTcConnectptr.p->tableref;
+ if(tabPtr.i == RNIL)
+ return;
+
+ ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
+
+ /**
+ * Normal case
+ */
+ ndbrequire(tabPtr.p->usageCount > 0);
+ tabPtr.p->usageCount--;
+}//Dblqh::releaseTcrec()
+
+void Dblqh::releaseTcrecLog(Signal* signal, TcConnectionrecPtr locTcConnectptr)
+{
+ jam();
+ locTcConnectptr.p->tcTimer = 0;
+ locTcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED;
+ locTcConnectptr.p->nextTcConnectrec = cfirstfreeTcConrec;
+ cfirstfreeTcConrec = locTcConnectptr.i;
+
+ TablerecPtr tabPtr;
+ tabPtr.i = locTcConnectptr.p->tableref;
+ if(tabPtr.i == RNIL)
+ return;
+
+}//Dblqh::releaseTcrecLog()
+
+/* ------------------------------------------------------------------------- */
+/* ------- ABORT PHASE ------- */
+/* */
+/*THIS PART IS USED AT ERRORS THAT CAUSE ABORT OF TRANSACTION. */
+/* ------------------------------------------------------------------------- */
+/* ***************************************************>> */
+/* ABORT: Abort transaction in connection. Sender TC. */
+/* This is the normal protocol (See COMMIT) */
+/* ***************************************************>> */
+void Dblqh::execABORT(Signal* signal)
+{
+ jamEntry();
+ Uint32 tcOprec = signal->theData[0];
+ BlockReference tcBlockref = signal->theData[1];
+ Uint32 transid1 = signal->theData[2];
+ Uint32 transid2 = signal->theData[3];
+ CRASH_INSERTION(5003);
+ if (ERROR_INSERTED(5015)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_ABORT, signal, 2000, 4);
+ return;
+ }//if
+ if (findTransaction(transid1,
+ transid2,
+ tcOprec) != ZOK) {
+ jam();
+
+ if(ERROR_INSERTED(5039) &&
+ refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
+ jam();
+ SET_ERROR_INSERT_VALUE(5040);
+ return;
+ }
+
+ if(ERROR_INSERTED(5040) &&
+ refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
+ jam();
+ SET_ERROR_INSERT_VALUE(5003);
+ return;
+ }
+
+/* ------------------------------------------------------------------------- */
+// SEND ABORTED EVEN IF NOT FOUND.
+//THE TRANSACTION MIGHT NEVER HAVE ARRIVED HERE.
+/* ------------------------------------------------------------------------- */
+ signal->theData[0] = tcOprec;
+ signal->theData[1] = transid1;
+ signal->theData[2] = transid2;
+ signal->theData[3] = cownNodeid;
+ signal->theData[4] = ZTRUE;
+ sendSignal(tcBlockref, GSN_ABORTED, signal, 5, JBB);
+ warningReport(signal, 8);
+ return;
+ }//if
+/* ------------------------------------------------------------------------- */
+/*A GUIDING DESIGN PRINCIPLE IN HANDLING THESE ERROR SITUATIONS HAVE BEEN */
+/*KEEP IT SIMPLE. THUS WE RATHER INSERT A WAIT AND SET THE ABORT_STATE TO */
+/*ACTIVE RATHER THAN WRITE NEW CODE TO HANDLE EVERY SPECIAL SITUATION. */
+/* ------------------------------------------------------------------------- */
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->nextReplica != ZNIL) {
+/* ------------------------------------------------------------------------- */
+// We will immediately send the ABORT message also to the next LQH node in line.
+/* ------------------------------------------------------------------------- */
+ BlockReference TLqhRef = calcLqhBlockRef(regTcPtr->nextReplica);
+ signal->theData[0] = regTcPtr->tcOprec;
+ signal->theData[1] = regTcPtr->tcBlockref;
+ signal->theData[2] = regTcPtr->transid[0];
+ signal->theData[3] = regTcPtr->transid[1];
+ sendSignal(TLqhRef, GSN_ABORT, signal, 4, JBB);
+ }//if
+ regTcPtr->abortState = TcConnectionrec::ABORT_FROM_TC;
+ regTcPtr->activeCreat = ZFALSE;
+
+ const Uint32 commitAckMarker = regTcPtr->commitAckMarker;
+ if(commitAckMarker != RNIL){
+ jam();
+#ifdef MARKER_TRACE
+ {
+ CommitAckMarkerPtr tmp;
+ m_commitAckMarkerHash.getPtr(tmp, commitAckMarker);
+ ndbout_c("Ab2 marker[%.8x %.8x]", tmp.p->transid1, tmp.p->transid2);
+ }
+#endif
+ m_commitAckMarkerHash.release(commitAckMarker);
+ regTcPtr->commitAckMarker = RNIL;
+ }
+
+ abortStateHandlerLab(signal);
+
+ return;
+}//Dblqh::execABORT()
+
+/* ************************************************************************>>
+ * ABORTREQ: Same as ABORT but used in case one node isn't working ok.
+ * (See COMMITREQ)
+ * ************************************************************************>> */
+void Dblqh::execABORTREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 reqPtr = signal->theData[0];
+ BlockReference reqBlockref = signal->theData[1];
+ Uint32 transid1 = signal->theData[2];
+ Uint32 transid2 = signal->theData[3];
+ Uint32 tcOprec = signal->theData[5];
+ if (ERROR_INSERTED(5006)) {
+ systemErrorLab(signal);
+ }
+ if (ERROR_INSERTED(5016)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_ABORTREQ, signal, 2000, 6);
+ return;
+ }//if
+ if (findTransaction(transid1,
+ transid2,
+ tcOprec) != ZOK) {
+ signal->theData[0] = reqPtr;
+ signal->theData[2] = cownNodeid;
+ signal->theData[3] = transid1;
+ signal->theData[4] = transid2;
+ sendSignal(reqBlockref, GSN_ABORTCONF, signal, 5, JBB);
+ warningReport(signal, 9);
+ return;
+ }//if
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->transactionState != TcConnectionrec::PREPARED) {
+ warningReport(signal, 10);
+ return;
+ }//if
+ regTcPtr->reqBlockref = reqBlockref;
+ regTcPtr->reqRef = reqPtr;
+ regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
+ regTcPtr->activeCreat = ZFALSE;
+ abortCommonLab(signal);
+ return;
+}//Dblqh::execABORTREQ()
+
+/* ************>> */
+/* ACC_TO_REF > */
+/* ************>> */
+void Dblqh::execACC_TO_REF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ releaseActiveFrag(signal);
+ abortErrorLab(signal);
+ return;
+}//Dblqh::execACC_TO_REF()
+
+/* ************> */
+/* ACCKEYREF > */
+/* ************> */
+void Dblqh::execACCKEYREF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ terrorCode = signal->theData[1];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ TcConnectionrec * const tcPtr = tcConnectptr.p;
+ switch (tcPtr->transactionState) {
+ case TcConnectionrec::WAIT_ACC:
+ jam();
+ releaseActiveFrag(signal);
+ break;
+ case TcConnectionrec::WAIT_ACC_ABORT:
+ case TcConnectionrec::ABORT_STOPPED:
+ case TcConnectionrec::ABORT_QUEUED:
+ jam();
+/* ------------------------------------------------------------------------- */
+/*IGNORE SINCE ABORT OF THIS OPERATION IS ONGOING ALREADY. */
+/* ------------------------------------------------------------------------- */
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ const Uint32 errCode = terrorCode;
+ tcPtr->errorCode = errCode;
+/* ------------------------------------------------------------------------- */
+/*WHEN AN ABORT FROM TC ARRIVES IT COULD ACTUALLY BE A CORRECT BEHAVIOUR */
+/*SINCE THE TUPLE MIGHT NOT HAVE ARRIVED YET OR ALREADY HAVE BEEN INSERTED. */
+/* ------------------------------------------------------------------------- */
+ if (tcPtr->activeCreat == ZTRUE) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*THIS IS A NORMAL EVENT DURING CREATION OF A FRAGMENT. PERFORM ABORT IN */
+/*TUP AND ACC AND THEN CONTINUE WITH NORMAL COMMIT PROCESSING. IF THE ERROR */
+/*HAPPENS TO BE A SERIOUS ERROR THEN PERFORM ABORT PROCESSING AS NORMAL. */
+/* ------------------------------------------------------------------------- */
+ switch (tcPtr->operation) {
+ case ZUPDATE:
+ case ZDELETE:
+ jam();
+ if (errCode != ZNO_TUPLE_FOUND) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*A NORMAL ERROR WILL BE TREATED AS A NORMAL ABORT AND WILL ABORT THE */
+/*TRANSACTION. NO SPECIAL HANDLING IS NEEDED. */
+/* ------------------------------------------------------------------------- */
+ tcPtr->activeCreat = ZFALSE;
+ }//if
+ break;
+ case ZINSERT:
+ jam();
+ if (errCode != ZTUPLE_ALREADY_EXIST) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*A NORMAL ERROR WILL BE TREATED AS A NORMAL ABORT AND WILL ABORT THE */
+/*TRANSACTION. NO SPECIAL HANDLING IS NEEDED. */
+/* ------------------------------------------------------------------------- */
+ tcPtr->activeCreat = ZFALSE;
+ }//if
+ break;
+ default:
+ jam();
+/* ------------------------------------------------------------------------- */
+/*A NORMAL ERROR WILL BE TREATED AS A NORMAL ABORT AND WILL ABORT THE */
+/*TRANSACTION. NO SPECIAL HANDLING IS NEEDED. */
+/* ------------------------------------------------------------------------- */
+ tcPtr->activeCreat = ZFALSE;
+ break;
+ }//switch
+ } else {
+ /**
+ * Only primary replica can get ZTUPLE_ALREADY_EXIST || ZNO_TUPLE_FOUND
+ *
+ * Unless it's a simple or dirty read
+ *
+ * NOT TRUE!
+ * 1) op1 - primary insert ok
+ * 2) op1 - backup insert fail (log full or what ever)
+ * 3) op1 - delete ok @ primary
+ * 4) op1 - delete fail @ backup
+ *
+ * -> ZNO_TUPLE_FOUND is possible
+ */
+ ndbrequire
+ (tcPtr->seqNoReplica == 0 ||
+ errCode != ZTUPLE_ALREADY_EXIST ||
+ (tcPtr->operation == ZREAD && (tcPtr->dirtyOp || tcPtr->opSimple)));
+ }
+ tcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH;
+ abortCommonLab(signal);
+ return;
+}//Dblqh::execACCKEYREF()
+
+void Dblqh::localAbortStateHandlerLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->abortState != TcConnectionrec::ABORT_IDLE) {
+ jam();
+ return;
+ }//if
+ regTcPtr->activeCreat = ZFALSE;
+ regTcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH;
+ regTcPtr->errorCode = terrorCode;
+ abortStateHandlerLab(signal);
+ return;
+}//Dblqh::localAbortStateHandlerLab()
+
+void Dblqh::abortStateHandlerLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ switch (regTcPtr->transactionState) {
+ case TcConnectionrec::PREPARED:
+ jam();
+/* ------------------------------------------------------------------------- */
+/*THE OPERATION IS ALREADY PREPARED AND SENT TO THE NEXT LQH OR BACK TO TC. */
+/*WE CAN SIMPLY CONTINUE WITH THE ABORT PROCESS. */
+/*IF IT WAS A CHECK FOR TRANSACTION STATUS THEN WE REPORT THE STATUS TO THE */
+/*NEW TC AND CONTINUE WITH THE NEXT OPERATION IN LQH. */
+/* ------------------------------------------------------------------------- */
+ if (regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC) {
+ jam();
+ sendLqhTransconf(signal, LqhTransConf::Prepared);
+ return;
+ }//if
+ break;
+ case TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL:
+ case TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL:
+ jam();
+/* ------------------------------------------------------------------------- */
+// We can only reach these states for multi-updates on a record in a transaction.
+// We know that at least one of those has received the COMMIT signal, thus we
+// declare us only prepared since we then receive the expected COMMIT signal.
+/* ------------------------------------------------------------------------- */
+ ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC);
+ sendLqhTransconf(signal, LqhTransConf::Prepared);
+ break;
+ case TcConnectionrec::WAIT_TUPKEYINFO:
+ case TcConnectionrec::WAIT_ATTR:
+ jam();
+/* ------------------------------------------------------------------------- */
+/* WE ARE CURRENTLY WAITING FOR MORE INFORMATION. WE CAN START THE ABORT */
+/* PROCESS IMMEDIATELY. THE KEYINFO AND ATTRINFO SIGNALS WILL BE DROPPED */
+/* SINCE THE ABORT STATE WILL BE SET. */
+/* ------------------------------------------------------------------------- */
+ break;
+ case TcConnectionrec::WAIT_TUP:
+ jam();
+/* ------------------------------------------------------------------------- */
+// TUP is currently active. We have to wait for the TUPKEYREF or TUPKEYCONF
+// to arrive since we might otherwise jeopardise the local checkpoint
+// consistency in overload situations.
+/* ------------------------------------------------------------------------- */
+ regTcPtr->transactionState = TcConnectionrec::WAIT_TUP_TO_ABORT;
+ return;
+ case TcConnectionrec::WAIT_ACC:
+ jam();
+ if (regTcPtr->listState == TcConnectionrec::ACC_BLOCK_LIST) {
+ jam();
+/* ------------------------------------------------------------------------- */
+// If the operation is in the ACC Blocked list the operation is not allowed
+// to start yet. We release it from the ACC Blocked list and will go through
+// the gate in abortCommonLab(..) where it will be blocked.
+/* ------------------------------------------------------------------------- */
+ fragptr.i = regTcPtr->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ releaseAccList(signal);
+ } else {
+ jam();
+/* ------------------------------------------------------------------------- */
+// We start the abort immediately since the operation is still in the active
+// list and the fragment cannot have been frozen yet. By sending LCP_HOLDOPCONF
+// as direct signals we avoid the problem that we might find the operation
+// in an unexpected list in ACC.
+// We cannot accept being blocked before aborting ACC here since that would
+// lead to seriously complex issues.
+/* ------------------------------------------------------------------------- */
+ abortContinueAfterBlockedLab(signal, false);
+ return;
+ }//if
+ break;
+ case TcConnectionrec::LOG_QUEUED:
+ jam();
+/* ------------------------------------------------------------------------- */
+/*CURRENTLY QUEUED FOR LOGGING. WAIT UNTIL THE LOG RECORD HAVE BEEN INSERTED */
+/*AND THEN CONTINUE THE ABORT PROCESS. */
+//Could also be waiting for an overloaded log disk. In this case it is easy
+//to abort when CONTINUEB arrives.
+/* ------------------------------------------------------------------------- */
+ return;
+ break;
+ case TcConnectionrec::STOPPED:
+ jam();
+ /* ---------------------------------------------------------------------
+ * WE ARE CURRENTLY QUEUED FOR ACCESS TO THE FRAGMENT BY A LCP
+ * Since nothing has been done, just release operation
+ * i.e. no prepare log record has been written
+ * so no abort log records needs to be written
+ */
+ releaseWaitQueue(signal);
+ continueAfterLogAbortWriteLab(signal);
+ return;
+ break;
+ case TcConnectionrec::WAIT_AI_AFTER_ABORT:
+ jam();
+/* ------------------------------------------------------------------------- */
+/* ABORT OF ACC AND TUP ALREADY COMPLETED. THIS STATE IS ONLY USED WHEN */
+/* CREATING A NEW FRAGMENT. */
+/* ------------------------------------------------------------------------- */
+ continueAbortLab(signal);
+ return;
+ break;
+ case TcConnectionrec::WAIT_TUP_TO_ABORT:
+ case TcConnectionrec::ABORT_STOPPED:
+ case TcConnectionrec::LOG_ABORT_QUEUED:
+ case TcConnectionrec::WAIT_ACC_ABORT:
+ case TcConnectionrec::ABORT_QUEUED:
+ jam();
+/* ------------------------------------------------------------------------- */
+/*ABORT IS ALREADY ONGOING DUE TO SOME ERROR. WE HAVE ALREADY SET THE STATE */
+/*OF THE ABORT SO THAT WE KNOW THAT TC EXPECTS A REPORT. WE CAN THUS SIMPLY */
+/*EXIT. */
+/* ------------------------------------------------------------------------- */
+ return;
+ break;
+ case TcConnectionrec::COMMIT_STOPPED:
+ case TcConnectionrec::LOG_COMMIT_QUEUED:
+ case TcConnectionrec::COMMIT_QUEUED:
+ jam();
+/* ------------------------------------------------------------------------- */
+/*THIS IS ONLY AN ALLOWED STATE IF A DIRTY WRITE OR SIMPLE READ IS PERFORMED.*/
+/*IF WE ARE MERELY CHECKING THE TRANSACTION STATE IT IS ALSO AN ALLOWED STATE*/
+/* ------------------------------------------------------------------------- */
+ if (regTcPtr->dirtyOp == ZTRUE) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*COMPLETE THE DIRTY WRITE AND THEN REPORT COMPLETED BACK TO TC. SINCE IT IS */
+/*A DIRTY WRITE IT IS ALLOWED TO COMMIT EVEN IF THE TRANSACTION ABORTS. */
+/* ------------------------------------------------------------------------- */
+ return;
+ }//if
+ if (regTcPtr->simpleRead) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*A SIMPLE READ IS CURRENTLY RELEASING THE LOCKS OR WAITING FOR ACCESS TO */
+/*ACC TO CLEAR THE LOCKS. COMPLETE THIS PROCESS AND THEN RETURN AS NORMAL. */
+/*NO DATA HAS CHANGED DUE TO THIS SIMPLE READ ANYWAY. */
+/* ------------------------------------------------------------------------- */
+ return;
+ }//if
+ ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC);
+ jam();
+/* ------------------------------------------------------------------------- */
+/*WE ARE ONLY CHECKING THE STATUS OF THE TRANSACTION. IT IS COMMITTING. */
+/*COMPLETE THE COMMIT LOCALLY AND THEN SEND REPORT OF COMMITTED TO THE NEW TC*/
+/* ------------------------------------------------------------------------- */
+ return;
+ break;
+ case TcConnectionrec::COMMITTED:
+ jam();
+ ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC);
+/* ------------------------------------------------------------------------- */
+/*WE ARE CHECKING TRANSACTION STATUS. REPORT COMMITTED AND CONTINUE WITH THE */
+/*NEXT OPERATION. */
+/* ------------------------------------------------------------------------- */
+ sendLqhTransconf(signal, LqhTransConf::Committed);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+/* ------------------------------------------------------------------------- */
+/*THE STATE WAS NOT AN ALLOWED STATE ON A NORMAL OPERATION. SCANS AND COPY */
+/*FRAGMENT OPERATIONS SHOULD HAVE EXECUTED IN ANOTHER PATH. */
+/* ------------------------------------------------------------------------- */
+ break;
+ }//switch
+ abortCommonLab(signal);
+ return;
+}//Dblqh::abortStateHandlerLab()
+
+void Dblqh::abortErrorLab(Signal* signal)
+{
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) {
+ jam();
+ regTcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH;
+ regTcPtr->errorCode = terrorCode;
+ }//if
+ /* -----------------------------------------------------------------------
+ * ACTIVE CREATION IS RESET FOR ALL ERRORS WHICH SHOULD BE HANDLED
+ * WITH NORMAL ABORT HANDLING.
+ * ----------------------------------------------------------------------- */
+ regTcPtr->activeCreat = ZFALSE;
+ abortCommonLab(signal);
+ return;
+}//Dblqh::abortErrorLab()
+
+void Dblqh::abortCommonLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ const Uint32 commitAckMarker = regTcPtr->commitAckMarker;
+ if(regTcPtr->activeCreat != ZTRUE && commitAckMarker != RNIL){
+ /**
+ * There is no NR ongoing and we have a marker
+ */
+ jam();
+#ifdef MARKER_TRACE
+ {
+ CommitAckMarkerPtr tmp;
+ m_commitAckMarkerHash.getPtr(tmp, commitAckMarker);
+ ndbout_c("Abo marker[%.8x %.8x]", tmp.p->transid1, tmp.p->transid2);
+ }
+#endif
+ m_commitAckMarkerHash.release(commitAckMarker);
+ regTcPtr->commitAckMarker = RNIL;
+ }
+
+ fragptr.i = regTcPtr->fragmentptr;
+ if (fragptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ case Fragrecord::CRASH_RECOVERING:
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ linkActiveFrag(signal);
+ abortContinueAfterBlockedLab(signal, true);
+ return;
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ regTcPtr->transactionState = TcConnectionrec::ABORT_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ } else {
+ jam();
+ continueAbortLab(signal);
+ }//if
+}//Dblqh::abortCommonLab()
+
+void Dblqh::abortContinueAfterBlockedLab(Signal* signal, bool canBlock)
+{
+ /* ------------------------------------------------------------------------
+ * INPUT: TC_CONNECTPTR ACTIVE OPERATION RECORD
+ * ------------------------------------------------------------------------
+ * ------------------------------------------------------------------------
+ * CAN COME HERE AS RESTART AFTER BEING BLOCKED BY A LOCAL CHECKPOINT.
+ * ------------------------------------------------------------------------
+ * ALSO AS PART OF A NORMAL ABORT WITHOUT BLOCKING.
+ * WE MUST ABORT TUP BEFORE ACC TO ENSURE THAT NO ONE RACES IN
+ * AND SEES A STATE IN TUP.
+ * ------------------------------------------------------------------------ */
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ fragptr.i = regTcPtr->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if ((cCommitBlocked == true) &&
+ (fragptr.p->fragActiveStatus == ZTRUE) &&
+ (canBlock == true) &&
+ (regTcPtr->operation != ZREAD)) {
+ jam();
+/* ------------------------------------------------------------------------- */
+// TUP and/or ACC have problems in writing the undo log to disk fast enough.
+// We must avoid the abort at this time and try later instead. The fragment
+// is also active with a local checkpoint and this commit can generate UNDO
+// log records that overflow the UNDO log buffer.
+//
+// In certain situations it is simply too complex to insert a wait state here
+// since ACC is active and we cannot release the operation from the active
+// list without causing great complexity.
+/* ------------------------------------------------------------------------- */
+/*---------------------------------------------------------------------------*/
+// We must delay the write of abort info to the log to safe-guard against
+// a crash due to lack of log pages. We temporary stop all log writes to this
+// log part to ensure that we don't get a buffer explosion in the delayed
+// signal buffer instead.
+/*---------------------------------------------------------------------------*/
+ releaseActiveFrag(signal);
+ logPartPtr.i = regTcPtr->hashValue & 3;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ linkWaitLog(signal, logPartPtr);
+ regTcPtr->transactionState = TcConnectionrec::ABORT_QUEUED;
+ if (logPartPtr.p->logPartState == LogPartRecord::IDLE) {
+ jam();
+ logPartPtr.p->logPartState = LogPartRecord::ACTIVE;
+ }//if
+ return;
+ }//if
+ signal->theData[0] = regTcPtr->tupConnectrec;
+ EXECUTE_DIRECT(DBTUP, GSN_TUP_ABORTREQ, signal, 1);
+ regTcPtr->transactionState = TcConnectionrec::WAIT_ACC_ABORT;
+ signal->theData[0] = regTcPtr->accConnectrec;
+ EXECUTE_DIRECT(DBACC, GSN_ACC_ABORTREQ, signal, 1);
+ /* ------------------------------------------------------------------------
+ * We need to insert a real-time break by sending ACC_ABORTCONF through the
+ * job buffer to ensure that we catch any ACCKEYCONF or TUPKEYCONF or
+ * TUPKEYREF that are in the job buffer but not yet processed. Doing
+ * everything without that would race and create a state error when they
+ * are executed.
+ * ----------------------------------------------------------------------- */
+ return;
+}//Dblqh::abortContinueAfterBlockedLab()
+
+/* ******************>> */
+/* ACC_ABORTCONF > */
+/* ******************>> */
+void Dblqh::execACC_ABORTCONF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ ndbrequire(regTcPtr->transactionState == TcConnectionrec::WAIT_ACC_ABORT);
+ if (regTcPtr->activeCreat == ZTRUE) {
+ /* ----------------------------------------------------------------------
+ * A NORMAL EVENT DURING CREATION OF A FRAGMENT. WE NOW NEED TO CONTINUE
+ * WITH NORMAL COMMIT PROCESSING.
+ * ---------------------------------------------------------------------- */
+ if (regTcPtr->currTupAiLen == regTcPtr->totReclenAi) {
+ jam();
+ regTcPtr->abortState = TcConnectionrec::ABORT_IDLE;
+ rwConcludedLab(signal);
+ return;
+ } else {
+ ndbrequire(regTcPtr->currTupAiLen < regTcPtr->totReclenAi);
+ jam();
+ releaseActiveFrag(signal);
+ regTcPtr->transactionState = TcConnectionrec::WAIT_AI_AFTER_ABORT;
+ return;
+ }//if
+ }//if
+ releaseActiveFrag(signal);
+ continueAbortLab(signal);
+ return;
+}//Dblqh::execACC_ABORTCONF()
+
+void Dblqh::continueAbortLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ /* ------------------------------------------------------------------------
+ * AN ERROR OCCURED IN THE ACTIVE CREATION AFTER THE ABORT PHASE.
+ * WE NEED TO CONTINUE WITH A NORMAL ABORT.
+ * ------------------------------------------------------------------------
+ * ALSO USED FOR NORMAL CLEAN UP AFTER A NORMAL ABORT.
+ * ------------------------------------------------------------------------
+ * ALSO USED WHEN NO FRAGMENT WAS SET UP ON OPERATION.
+ * ------------------------------------------------------------------------ */
+ if (regTcPtr->logWriteState == TcConnectionrec::WRITTEN) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * I NEED TO INSERT A ABORT LOG RECORD SINCE WE ARE WRITING LOG IN THIS
+ * TRANSACTION.
+ * ---------------------------------------------------------------------- */
+ initLogPointers(signal);
+ if (logPartPtr.p->logPartState == LogPartRecord::ACTIVE) {
+ jam();
+ /* --------------------------------------------------------------------
+ * A PREPARE OPERATION IS CURRENTLY WRITING IN THE LOG.
+ * WE MUST WAIT ON OUR TURN TO WRITE THE LOG.
+ * IT IS NECESSARY TO WRITE ONE LOG RECORD COMPLETELY
+ * AT A TIME OTHERWISE WE WILL SCRAMBLE THE LOG.
+ * -------------------------------------------------------------------- */
+ linkWaitLog(signal, logPartPtr);
+ regTcPtr->transactionState = TcConnectionrec::LOG_ABORT_QUEUED;
+ return;
+ }//if
+ if (cnoOfLogPages == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+// We must delay the write of commit info to the log to safe-guard against
+// a crash due to lack of log pages. We temporary stop all log writes to this
+// log part to ensure that we don't get a buffer explosion in the delayed
+// signal buffer instead.
+/*---------------------------------------------------------------------------*/
+ linkWaitLog(signal, logPartPtr);
+ regTcPtr->transactionState = TcConnectionrec::LOG_ABORT_QUEUED;
+ if (logPartPtr.p->logPartState == LogPartRecord::IDLE) {
+ jam();
+ logPartPtr.p->logPartState = LogPartRecord::ACTIVE;
+ }//if
+ return;
+ }//if
+ writeAbortLog(signal);
+ removeLogTcrec(signal);
+ } else if (regTcPtr->logWriteState == TcConnectionrec::NOT_STARTED) {
+ jam();
+ } else if (regTcPtr->logWriteState == TcConnectionrec::NOT_WRITTEN) {
+ jam();
+ /* ------------------------------------------------------------------
+ * IT IS A READ OPERATION OR OTHER OPERATION THAT DO NOT USE THE LOG.
+ * ------------------------------------------------------------------ */
+ /* ------------------------------------------------------------------
+ * THE LOG HAS NOT BEEN WRITTEN SINCE THE LOG FLAG WAS FALSE.
+ * THIS CAN OCCUR WHEN WE ARE STARTING A NEW FRAGMENT.
+ * ------------------------------------------------------------------ */
+ regTcPtr->logWriteState = TcConnectionrec::NOT_STARTED;
+ } else {
+ ndbrequire(regTcPtr->logWriteState == TcConnectionrec::NOT_WRITTEN_WAIT);
+ jam();
+ /* ----------------------------------------------------------------
+ * THE STATE WAS SET TO NOT_WRITTEN BY THE OPERATION BUT LATER
+ * A SCAN OF ALL OPERATION RECORD CHANGED IT INTO NOT_WRITTEN_WAIT.
+ * THIS INDICATES THAT WE ARE WAITING FOR THIS OPERATION TO COMMIT
+ * OR ABORT SO THAT WE CAN FIND THE
+ * STARTING GLOBAL CHECKPOINT OF THIS NEW FRAGMENT.
+ * ---------------------------------------------------------------- */
+ checkScanTcCompleted(signal);
+ }//if
+ continueAfterLogAbortWriteLab(signal);
+ return;
+}//Dblqh::continueAbortLab()
+
+void Dblqh::continueAfterLogAbortWriteLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->simpleRead) {
+ jam();
+ TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend();
+
+ tcKeyRef->connectPtr = regTcPtr->applOprec;
+ tcKeyRef->transId[0] = regTcPtr->transid[0];
+ tcKeyRef->transId[1] = regTcPtr->transid[1];
+ tcKeyRef->errorCode = regTcPtr->errorCode;
+ sendSignal(regTcPtr->applRef,
+ GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB);
+ cleanUp(signal);
+ return;
+ }//if
+ if (regTcPtr->abortState == TcConnectionrec::ABORT_FROM_LQH) {
+ LqhKeyRef * const lqhKeyRef = (LqhKeyRef *)signal->getDataPtrSend();
+
+ jam();
+ lqhKeyRef->userRef = regTcPtr->clientConnectrec;
+ lqhKeyRef->connectPtr = regTcPtr->tcOprec;
+ lqhKeyRef->errorCode = regTcPtr->errorCode;
+ lqhKeyRef->transId1 = regTcPtr->transid[0];
+ lqhKeyRef->transId2 = regTcPtr->transid[1];
+ sendSignal(regTcPtr->clientBlockref, GSN_LQHKEYREF, signal,
+ LqhKeyRef::SignalLength, JBB);
+ } else if (regTcPtr->abortState == TcConnectionrec::ABORT_FROM_TC) {
+ jam();
+ sendAborted(signal);
+ } else if (regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC) {
+ jam();
+ sendLqhTransconf(signal, LqhTransConf::Aborted);
+ } else {
+ ndbrequire(regTcPtr->abortState == TcConnectionrec::REQ_FROM_TC);
+ jam();
+ signal->theData[0] = regTcPtr->reqRef;
+ signal->theData[1] = tcConnectptr.i;
+ signal->theData[2] = cownNodeid;
+ signal->theData[3] = regTcPtr->transid[0];
+ signal->theData[4] = regTcPtr->transid[1];
+ sendSignal(regTcPtr->reqBlockref, GSN_ABORTCONF,
+ signal, 5, JBB);
+ }//if
+ cleanUp(signal);
+}//Dblqh::continueAfterLogAbortWriteLab()
+
+/* ##########################################################################
+ * ####### MODULE TO HANDLE TC FAILURE #######
+ *
+ * ########################################################################## */
+
+/* ************************************************************************>>
+ * NODE_FAILREP: Node failure report. Sender Ndbcntr. Set status of failed
+ * node to down and reply with NF_COMPLETEREP to DIH which will report that
+ * LQH has completed failure handling.
+ * ************************************************************************>> */
+void Dblqh::execNODE_FAILREP(Signal* signal)
+{
+ UintR TfoundNodes = 0;
+ UintR TnoOfNodes;
+ UintR Tdata[MAX_NDB_NODES];
+ Uint32 i;
+
+ NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
+
+ TnoOfNodes = nodeFail->noOfNodes;
+ UintR index = 0;
+ for (i = 1; i < MAX_NDB_NODES; i++) {
+ jam();
+ if(NodeBitmask::get(nodeFail->theNodes, i)){
+ jam();
+ Tdata[index] = i;
+ index++;
+ }//if
+ }//for
+
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+
+ ndbrequire(index == TnoOfNodes);
+ ndbrequire(cnoOfNodes - 1 < MAX_NDB_NODES);
+ for (i = 0; i < TnoOfNodes; i++) {
+ const Uint32 nodeId = Tdata[i];
+ lcpPtr.p->m_EMPTY_LCP_REQ.clear(nodeId);
+
+ for (Uint32 j = 0; j < cnoOfNodes; j++) {
+ jam();
+ if (cnodeData[j] == nodeId){
+ jam();
+ cnodeStatus[j] = ZNODE_DOWN;
+
+ TfoundNodes++;
+ }//if
+ }//for
+ NFCompleteRep * const nfCompRep = (NFCompleteRep *)&signal->theData[0];
+ nfCompRep->blockNo = DBLQH;
+ nfCompRep->nodeId = cownNodeid;
+ nfCompRep->failedNodeId = Tdata[i];
+ sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+ }//for
+ ndbrequire(TnoOfNodes == TfoundNodes);
+}//Dblqh::execNODE_FAILREP()
+
+/* ************************************************************************>>
+ * LQH_TRANSREQ: Report status of all transactions where TC was coordinated
+ * by a crashed TC
+ * ************************************************************************>> */
+/* ************************************************************************>>
+ * THIS SIGNAL IS RECEIVED AFTER A NODE CRASH.
+ * THE NODE HAD A TC AND COORDINATED A NUMBER OF TRANSACTIONS.
+ * NOW THE MASTER NODE IS PICKING UP THOSE TRANSACTIONS
+ * TO COMPLETE THEM. EITHER ABORT THEM OR COMMIT THEM.
+ * ************************************************************************>> */
+void Dblqh::execLQH_TRANSREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 newTcPtr = signal->theData[0];
+ BlockReference newTcBlockref = signal->theData[1];
+ Uint32 oldNodeId = signal->theData[2];
+ tcNodeFailptr.i = oldNodeId;
+ ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+ if ((tcNodeFailptr.p->tcFailStatus == TcNodeFailRecord::TC_STATE_TRUE) ||
+ (tcNodeFailptr.p->tcFailStatus == TcNodeFailRecord::TC_STATE_BREAK)) {
+ jam();
+ tcNodeFailptr.p->lastNewTcBlockref = newTcBlockref;
+ /* ------------------------------------------------------------------------
+ * WE HAVE RECEIVED A SIGNAL SPECIFYING THAT WE NEED TO HANDLE THE FAILURE
+ * OF A TC. NOW WE RECEIVE ANOTHER SIGNAL WITH THE SAME ORDER. THIS CAN
+ * OCCUR IF THE NEW TC FAILS. WE MUST BE CAREFUL IN THIS CASE SO THAT WE DO
+ * NOT START PARALLEL ACTIVITIES TRYING TO DO THE SAME THING. WE SAVE THE
+ * NEW BLOCK REFERENCE TO THE LAST NEW TC IN A VARIABLE AND ASSIGN TO IT TO
+ * NEW_TC_BLOCKREF WHEN THE OLD PROCESS RETURNS TO LQH_TRANS_NEXT. IT IS
+ * CERTAIN TO COME THERE SINCE THIS IS THE ONLY PATH TO TAKE CARE OF THE
+ * NEXT TC CONNECT RECORD. WE SET THE STATUS TO BREAK TO INDICATE TO THE OLD
+ * PROCESS WHAT IS HAPPENING.
+ * ------------------------------------------------------------------------ */
+ tcNodeFailptr.p->lastNewTcRef = newTcPtr;
+ tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_BREAK;
+ return;
+ }//if
+ tcNodeFailptr.p->oldNodeId = oldNodeId;
+ tcNodeFailptr.p->newTcBlockref = newTcBlockref;
+ tcNodeFailptr.p->newTcRef = newTcPtr;
+ tcNodeFailptr.p->tcRecNow = 0;
+ tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_TRUE;
+ signal->theData[0] = ZLQH_TRANS_NEXT;
+ signal->theData[1] = tcNodeFailptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//Dblqh::execLQH_TRANSREQ()
+
+void Dblqh::lqhTransNextLab(Signal* signal)
+{
+ UintR tend;
+ UintR tstart;
+ UintR guard0;
+
+ if (tcNodeFailptr.p->tcFailStatus == TcNodeFailRecord::TC_STATE_BREAK) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * AN INTERRUPTION TO THIS NODE FAIL HANDLING WAS RECEIVED AND A NEW
+ * TC HAVE BEEN ASSIGNED TO TAKE OVER THE FAILED TC. PROBABLY THE OLD
+ * NEW TC HAVE FAILED.
+ * ---------------------------------------------------------------------- */
+ tcNodeFailptr.p->newTcBlockref = tcNodeFailptr.p->lastNewTcBlockref;
+ tcNodeFailptr.p->newTcRef = tcNodeFailptr.p->lastNewTcRef;
+ tcNodeFailptr.p->tcRecNow = 0;
+ tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_TRUE;
+ }//if
+ tstart = tcNodeFailptr.p->tcRecNow;
+ tend = tstart + 200;
+ guard0 = tend;
+ for (tcConnectptr.i = tstart; tcConnectptr.i <= guard0; tcConnectptr.i++) {
+ jam();
+ if (tcConnectptr.i >= ctcConnectrecFileSize) {
+ jam();
+ /**
+ * Finished with scanning operation record
+ *
+ * now scan markers
+ */
+ scanMarkers(signal, tcNodeFailptr.i, 0, RNIL);
+ return;
+ }//if
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ if (tcConnectptr.p->transactionState != TcConnectionrec::IDLE) {
+ if (tcConnectptr.p->transactionState != TcConnectionrec::TC_NOT_CONNECTED) {
+ if (tcConnectptr.p->tcScanRec == RNIL) {
+ if (refToNode(tcConnectptr.p->tcBlockref) == tcNodeFailptr.p->oldNodeId) {
+ if (tcConnectptr.p->operation != ZREAD) {
+ jam();
+ tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
+ tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
+ abortStateHandlerLab(signal);
+ return;
+ } else {
+ jam();
+ if (tcConnectptr.p->opSimple != ZTRUE) {
+ jam();
+ tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
+ tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
+ abortStateHandlerLab(signal);
+ return;
+ }//if
+ }//if
+ }//if
+ } else {
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ if (scanptr.p->scanType == ScanRecord::COPY) {
+ jam();
+ if (scanptr.p->scanNodeId == tcNodeFailptr.p->oldNodeId) {
+ jam();
+ /* ------------------------------------------------------------
+ * THE RECEIVER OF THE COPY HAVE FAILED.
+ * WE HAVE TO CLOSE THE COPY PROCESS.
+ * ------------------------------------------------------------ */
+ tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
+ tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
+ closeCopyRequestLab(signal);
+ return;
+ }//if
+ } else {
+ if (scanptr.p->scanType == ScanRecord::SCAN) {
+ jam();
+ if (refToNode(tcConnectptr.p->tcBlockref) ==
+ tcNodeFailptr.p->oldNodeId) {
+ jam();
+ tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
+ tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
+ closeScanRequestLab(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ /* ------------------------------------------------------------
+ * THIS IS AN ERROR THAT SHOULD NOT OCCUR. WE CRASH THE SYSTEM.
+ * ------------------------------------------------------------ */
+ systemErrorLab(signal);
+ return;
+ }//if
+ }//if
+ }//if
+ }//if
+ }//if
+ }//for
+ tcNodeFailptr.p->tcRecNow = tend + 1;
+ signal->theData[0] = ZLQH_TRANS_NEXT;
+ signal->theData[1] = tcNodeFailptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//Dblqh::lqhTransNextLab()
+
+void
+Dblqh::scanMarkers(Signal* signal,
+ Uint32 tcNodeFail,
+ Uint32 startBucket,
+ Uint32 i){
+
+ jam();
+
+ TcNodeFailRecordPtr tcNodeFailPtr;
+ tcNodeFailPtr.i = tcNodeFail;
+ ptrCheckGuard(tcNodeFailPtr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+ const Uint32 crashedTcNodeId = tcNodeFailPtr.p->oldNodeId;
+
+ CommitAckMarkerIterator iter;
+ if(i == RNIL){
+ m_commitAckMarkerHash.next(startBucket, iter);
+ } else {
+ jam();
+ iter.curr.i = i;
+ iter.bucket = startBucket;
+ m_commitAckMarkerHash.getPtr(iter.curr);
+ m_commitAckMarkerHash.next(iter);
+ }
+
+ const Uint32 RT_BREAK = 256;
+ for(i = 0; i<RT_BREAK || iter.bucket == startBucket; i++){
+ jam();
+
+ if(iter.curr.i == RNIL){
+ /**
+ * Done with iteration
+ */
+ jam();
+
+ tcNodeFailPtr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_FALSE;
+ signal->theData[0] = tcNodeFailPtr.p->newTcRef;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = LqhTransConf::LastTransConf;
+ sendSignal(tcNodeFailPtr.p->newTcBlockref, GSN_LQH_TRANSCONF,
+ signal, 3, JBB);
+ return;
+ }
+
+ if(iter.curr.p->tcNodeId == crashedTcNodeId){
+ jam();
+
+ /**
+ * Found marker belonging to crashed node
+ */
+ LqhTransConf * const lqhTransConf = (LqhTransConf *)&signal->theData[0];
+ lqhTransConf->tcRef = tcNodeFailPtr.p->newTcRef;
+ lqhTransConf->lqhNodeId = cownNodeid;
+ lqhTransConf->operationStatus = LqhTransConf::Marker;
+ lqhTransConf->transId1 = iter.curr.p->transid1;
+ lqhTransConf->transId2 = iter.curr.p->transid2;
+ lqhTransConf->apiRef = iter.curr.p->apiRef;
+ lqhTransConf->apiOpRec = iter.curr.p->apiOprec;
+ sendSignal(tcNodeFailPtr.p->newTcBlockref, GSN_LQH_TRANSCONF,
+ signal, 7, JBB);
+
+ signal->theData[0] = ZSCAN_MARKERS;
+ signal->theData[1] = tcNodeFailPtr.i;
+ signal->theData[2] = iter.bucket;
+ signal->theData[3] = iter.curr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+ return;
+ }
+
+ m_commitAckMarkerHash.next(iter);
+ }
+
+ signal->theData[0] = ZSCAN_MARKERS;
+ signal->theData[1] = tcNodeFailPtr.i;
+ signal->theData[2] = iter.bucket;
+ signal->theData[3] = RNIL;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+}
+
+/* #########################################################################
+ * ####### SCAN MODULE #######
+ *
+ * #########################################################################
+ * -------------------------------------------------------------------------
+ * THIS MODULE CONTAINS THE CODE THAT HANDLES A SCAN OF A PARTICULAR FRAGMENT
+ * IT OPERATES UNDER THE CONTROL OF TC AND ORDERS ACC TO PERFORM A SCAN OF
+ * ALL TUPLES IN THE FRAGMENT. TUP PERFORMS THE NECESSARY SEARCH CONDITIONS
+ * TO ENSURE THAT ONLY VALID TUPLES ARE RETURNED TO THE APPLICATION.
+ * ------------------------------------------------------------------------- */
+/* *************** */
+/* ACC_SCANCONF > */
+/* *************** */
+void Dblqh::execACC_SCANCONF(Signal* signal)
+{
+ AccScanConf * const accScanConf = (AccScanConf *)&signal->theData[0];
+ jamEntry();
+ scanptr.i = accScanConf->scanPtr;
+ c_scanRecordPool.getPtr(scanptr);
+ if (scanptr.p->scanState == ScanRecord::WAIT_ACC_SCAN) {
+ accScanConfScanLab(signal);
+ } else {
+ ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_ACC_COPY);
+ accScanConfCopyLab(signal);
+ }//if
+}//Dblqh::execACC_SCANCONF()
+
+/* ************>> */
+/* ACC_SCANREF > */
+/* ************>> */
+void Dblqh::execACC_SCANREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execACC_SCANREF()
+
+/* ***************>> */
+/* NEXT_SCANCONF > */
+/* ***************>> */
+void Dblqh::execNEXT_SCANCONF(Signal* signal)
+{
+ NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0];
+ jamEntry();
+ scanptr.i = nextScanConf->scanPtr;
+ c_scanRecordPool.getPtr(scanptr);
+ if (nextScanConf->localKeyLength == 1) {
+ jam();
+ nextScanConf->localKey[1] =
+ nextScanConf->localKey[0] & MAX_TUPLES_PER_PAGE;
+ nextScanConf->localKey[0] = nextScanConf->localKey[0] >> MAX_TUPLES_BITS;
+ }//if
+ switch (scanptr.p->scanState) {
+ case ScanRecord::WAIT_CLOSE_SCAN:
+ jam();
+ accScanCloseConfLab(signal);
+ break;
+ case ScanRecord::WAIT_CLOSE_COPY:
+ jam();
+ accCopyCloseConfLab(signal);
+ break;
+ case ScanRecord::WAIT_NEXT_SCAN:
+ jam();
+ nextScanConfScanLab(signal);
+ break;
+ case ScanRecord::WAIT_NEXT_SCAN_COPY:
+ jam();
+ nextScanConfCopyLab(signal);
+ break;
+ case ScanRecord::WAIT_RELEASE_LOCK:
+ jam();
+ ndbrequire(signal->length() == 1);
+ scanLockReleasedLab(signal);
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dblqh::execNEXT_SCANCONF()
+
+/* ***************> */
+/* NEXT_SCANREF > */
+/* ***************> */
+void Dblqh::execNEXT_SCANREF(Signal* signal)
+{
+ jamEntry();
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execNEXT_SCANREF()
+
+/* ******************> */
+/* STORED_PROCCONF > */
+/* ******************> */
+void Dblqh::execSTORED_PROCCONF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ Uint32 storedProcId = signal->theData[1];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ switch (scanptr.p->scanState) {
+ case ScanRecord::WAIT_STORED_PROC_SCAN:
+ jam();
+ scanptr.p->scanStoredProcId = storedProcId;
+ storedProcConfScanLab(signal);
+ break;
+ case ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN:
+ jam();
+ releaseActiveFrag(signal);
+ tupScanCloseConfLab(signal);
+ break;
+ case ScanRecord::WAIT_STORED_PROC_COPY:
+ jam();
+ scanptr.p->scanStoredProcId = storedProcId;
+ storedProcConfCopyLab(signal);
+ break;
+ case ScanRecord::WAIT_DELETE_STORED_PROC_ID_COPY:
+ jam();
+ releaseActiveFrag(signal);
+ tupCopyCloseConfLab(signal);
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dblqh::execSTORED_PROCCONF()
+
+/* ****************** */
+/* STORED_PROCREF > */
+/* ****************** */
+void Dblqh::execSTORED_PROCREF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ Uint32 errorCode = signal->theData[1];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ switch (scanptr.p->scanState) {
+ case ScanRecord::WAIT_STORED_PROC_SCAN:
+ jam();
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ scanptr.p->scanStoredProcId = signal->theData[2];
+ tcConnectptr.p->errorCode = errorCode;
+ closeScanLab(signal);
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dblqh::execSTORED_PROCREF()
+
+/* --------------------------------------------------------------------------
+ * ENTER SCAN_NEXTREQ
+ * --------------------------------------------------------------------------
+ * PRECONDITION:
+ * TRANSACTION_STATE = SCAN_STATE
+ * SCAN_STATE = WAIT_SCAN_NEXTREQ
+ *
+ * Case scanLockHold: ZTRUE = Unlock previous round of
+ * scanned row(s) and fetch next set of rows.
+ * ZFALSE = Fetch new set of rows.
+ * Number of rows to read depends on parallelism and how many rows
+ * left to scan in the fragment. SCAN_NEXTREQ can also be sent with
+ * closeFlag == ZTRUE to close the scan.
+ * ------------------------------------------------------------------------- */
+void Dblqh::execSCAN_NEXTREQ(Signal* signal)
+{
+ jamEntry();
+ const ScanFragNextReq * const nextReq =
+ (ScanFragNextReq*)&signal->theData[0];
+ const Uint32 transid1 = nextReq->transId1;
+ const Uint32 transid2 = nextReq->transId2;
+ const Uint32 senderData = nextReq->senderData;
+
+ if (findTransaction(transid1, transid2, senderData) != ZOK){
+ jam();
+ DEBUG(senderData <<
+ " Received SCAN_NEXTREQ in LQH with close flag when closed");
+ ndbrequire(nextReq->closeFlag == ZTRUE);
+ return;
+ }
+
+ // Crash node if signal sender is same node
+ CRASH_INSERTION2(5021, refToNode(signal->senderBlockRef()) == cownNodeid);
+ // Crash node if signal sender is NOT same node
+ CRASH_INSERTION2(5022, refToNode(signal->senderBlockRef()) != cownNodeid);
+
+ if (ERROR_INSERTED(5023)){
+ // Drop signal if sender is same node
+ if (refToNode(signal->senderBlockRef()) == cownNodeid) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+ }//if
+ if (ERROR_INSERTED(5024)){
+ // Drop signal if sender is NOT same node
+ if (refToNode(signal->senderBlockRef()) != cownNodeid) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+ }//if
+ if (ERROR_INSERTED(5025)){
+ // Delay signal if sender is NOT same node
+ if (refToNode(signal->senderBlockRef()) != cownNodeid) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_SCAN_NEXTREQ, signal, 1000,
+ signal->length());
+ return;
+ }
+ }//if
+ if (ERROR_INSERTED(5030)){
+ ndbout << "ERROR 5030" << endl;
+ CLEAR_ERROR_INSERT_VALUE;
+ // Drop signal
+ return;
+ }//if
+
+ if(ERROR_INSERTED(5036)){
+ return;
+ }
+
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ ndbrequire(scanptr.i != RNIL);
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanTcWaiting = ZTRUE;
+
+ /* ------------------------------------------------------------------
+ * If close flag is set this scan should be closed
+ * If we are waiting for SCAN_NEXTREQ set flag to stop scanning and
+ * continue execution else set flags and wait until the scan
+ * completes itself
+ * ------------------------------------------------------------------ */
+ if (nextReq->closeFlag == ZTRUE){
+ jam();
+ if(ERROR_INSERTED(5034)){
+ CLEAR_ERROR_INSERT_VALUE;
+ }
+ if(ERROR_INSERTED(5036)){
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+ closeScanRequestLab(signal);
+ return;
+ }//if
+
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+
+ /**
+ * Change parameters while running
+ * (is currently not supported)
+ */
+ const Uint32 max_rows = nextReq->batch_size_rows;
+ const Uint32 max_bytes = nextReq->batch_size_bytes;
+ ndbrequire(scanptr.p->m_max_batch_size_rows == max_rows);
+ ndbrequire(scanptr.p->m_max_batch_size_bytes == max_bytes);
+
+ /* --------------------------------------------------------------------
+ * If scanLockHold = TRUE we need to unlock previous round of
+ * scanned records.
+ * scanReleaseLocks will set states for this and send a NEXT_SCANREQ.
+ * When confirm signal NEXT_SCANCONF arrives we call
+ * continueScanNextReqLab to continue scanning new rows and
+ * acquiring new locks.
+ * -------------------------------------------------------------------- */
+ if ((scanptr.p->scanLockHold == ZTRUE) &&
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+
+ /* -----------------------------------------------------------------------
+ * We end up here when scanLockHold = FALSE or no rows was locked from
+ * previous round.
+ * Simply continue scanning.
+ * ----------------------------------------------------------------------- */
+ continueScanNextReqLab(signal);
+}//Dblqh::execSCAN_NEXTREQ()
+
+void Dblqh::continueScanNextReqLab(Signal* signal)
+{
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+ closeScanLab(signal);
+ return;
+ }//if
+
+ if(scanptr.p->m_last_row){
+ jam();
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ return;
+ }
+
+ // Update timer on tcConnectRecord
+ tcConnectptr.p->tcTimer = cLqhTimeOutCount;
+ init_acc_ptr_list(scanptr.p);
+ scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT;
+ scanNextLoopLab(signal);
+}//Dblqh::continueScanNextReqLab()
+
+/* -------------------------------------------------------------------------
+ * WE NEED TO RELEASE LOCKS BEFORE CONTINUING
+ * ------------------------------------------------------------------------- */
+void Dblqh::scanReleaseLocksLab(Signal* signal)
+{
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_RELEASE_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ }//switch
+ continueScanReleaseAfterBlockedLab(signal);
+}//Dblqh::scanReleaseLocksLab()
+
+void Dblqh::continueScanReleaseAfterBlockedLab(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanState = ScanRecord::WAIT_RELEASE_LOCK;
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1]=
+ get_acc_ptr_from_scan_record(scanptr.p,
+ scanptr.p->scanReleaseCounter -1,
+ false);
+ signal->theData[2] = NextScanReq::ZSCAN_COMMIT;
+ sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+}//Dblqh::continueScanReleaseAfterBlockedLab()
+
+/* -------------------------------------------------------------------------
+ * ENTER SCAN_NEXTREQ
+ * -------------------------------------------------------------------------
+ * SCAN_NEXT_REQ SIGNAL ARRIVED IN THE MIDDLE OF EXECUTION OF THE SCAN.
+ * IT WAS A REQUEST TO CLOSE THE SCAN. WE WILL CLOSE THE SCAN IN A
+ * CAREFUL MANNER TO ENSURE THAT NO ERROR OCCURS.
+ * -------------------------------------------------------------------------
+ * PRECONDITION:
+ * TRANSACTION_STATE = SCAN_STATE_USED
+ * TSCAN_COMPLETED = ZTRUE
+ * -------------------------------------------------------------------------
+ * WE CAN ALSO ARRIVE AT THIS LABEL AFTER A NODE CRASH OF THE SCAN
+ * COORDINATOR.
+ * ------------------------------------------------------------------------- */
+void Dblqh::closeScanRequestLab(Signal* signal)
+{
+ DEBUG("transactionState = " << tcConnectptr.p->transactionState);
+ switch (tcConnectptr.p->transactionState) {
+ case TcConnectionrec::SCAN_STATE_USED:
+ DEBUG("scanState = " << scanptr.p->scanState);
+ switch (scanptr.p->scanState) {
+ case ScanRecord::IN_QUEUE:
+ jam();
+ tupScanCloseConfLab(signal);
+ break;
+ case ScanRecord::WAIT_NEXT_SCAN:
+ jam();
+ /* -------------------------------------------------------------------
+ * SET COMPLETION STATUS AND WAIT FOR OPPORTUNITY TO STOP THE SCAN.
+ * ------------------------------------------------------------------- */
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ break;
+ case ScanRecord::WAIT_ACC_SCAN:
+ case ScanRecord::WAIT_STORED_PROC_SCAN:
+ jam();
+ /* -------------------------------------------------------------------
+ * WE ARE CURRENTLY STARTING UP THE SCAN. SET COMPLETED STATUS
+ * AND WAIT FOR COMPLETION OF STARTUP.
+ * ------------------------------------------------------------------- */
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ break;
+ case ScanRecord::WAIT_CLOSE_SCAN:
+ case ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN:
+ jam();
+ /*empty*/;
+ break;
+ /* -------------------------------------------------------------------
+ * CLOSE IS ALREADY ONGOING. WE NEED NOT DO ANYTHING.
+ * ------------------------------------------------------------------- */
+ case ScanRecord::WAIT_RELEASE_LOCK:
+ jam();
+ /* -------------------------------------------------------------------
+ * WE ARE CURRENTLY RELEASING RECORD LOCKS. AFTER COMPLETING THIS
+ * WE WILL START TO CLOSE THE SCAN.
+ * ------------------------------------------------------------------- */
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ break;
+ case ScanRecord::WAIT_SCAN_NEXTREQ:
+ jam();
+ /* -------------------------------------------------------------------
+ * WE ARE WAITING FOR A SCAN_NEXTREQ FROM SCAN COORDINATOR(TC)
+ * WICH HAVE CRASHED. CLOSE THE SCAN
+ * ------------------------------------------------------------------- */
+ scanptr.p->scanCompletedStatus = ZTRUE;
+
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+
+ if (scanptr.p->scanLockHold == ZTRUE) {
+ if (scanptr.p->m_curr_batch_size_rows > 0) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ }//if
+ closeScanLab(signal);
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+ break;
+ case TcConnectionrec::WAIT_SCAN_AI:
+ jam();
+ /* ---------------------------------------------------------------------
+ * WE ARE STILL WAITING FOR THE ATTRIBUTE INFORMATION THAT
+ * OBVIOUSLY WILL NOT ARRIVE. WE CAN QUIT IMMEDIATELY HERE.
+ * --------------------------------------------------------------------- */
+ //XXX jonas this have to be wrong...
+ releaseOprec(signal);
+ if (tcConnectptr.p->abortState == TcConnectionrec::NEW_FROM_TC) {
+ jam();
+ tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec;
+ ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+ tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1;
+ signal->theData[0] = ZLQH_TRANS_NEXT;
+ signal->theData[1] = tcNodeFailptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }//if
+ tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE;
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes= 0;
+ sendScanFragConf(signal, ZTRUE);
+ abort_scan(signal, scanptr.i, 0);
+ return;
+ break;
+ case TcConnectionrec::SCAN_TUPKEY:
+ case TcConnectionrec::SCAN_FIRST_STOPPED:
+ case TcConnectionrec::SCAN_CHECK_STOPPED:
+ case TcConnectionrec::SCAN_STOPPED:
+ jam();
+ /* ---------------------------------------------------------------------
+ * SET COMPLETION STATUS AND WAIT FOR OPPORTUNITY TO STOP THE SCAN.
+ * --------------------------------------------------------------------- */
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ break;
+ case TcConnectionrec::SCAN_RELEASE_STOPPED:
+ jam();
+ /* ---------------------------------------------------------------------
+ * WE ARE CURRENTLY RELEASING RECORD LOCKS. AFTER COMPLETING
+ * THIS WE WILL START TO CLOSE THE SCAN.
+ * --------------------------------------------------------------------- */
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ break;
+ case TcConnectionrec::SCAN_CLOSE_STOPPED:
+ jam();
+ /* ---------------------------------------------------------------------
+ * CLOSE IS ALREADY ONGOING. WE NEED NOT DO ANYTHING.
+ * --------------------------------------------------------------------- */
+ /*empty*/;
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dblqh::closeScanRequestLab()
+
+/* -------------------------------------------------------------------------
+ * ENTER NEXT_SCANCONF
+ * -------------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_RELEASE_LOCK
+ * ------------------------------------------------------------------------- */
+void Dblqh::scanLockReleasedLab(Signal* signal)
+{
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ releaseActiveFrag(signal);
+
+ if (scanptr.p->scanReleaseCounter == scanptr.p->m_curr_batch_size_rows) {
+ if ((scanptr.p->scanErrorCounter > 0) ||
+ (scanptr.p->scanCompletedStatus == ZTRUE)) {
+ jam();
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes = 0;
+ closeScanLab(signal);
+ } else if (scanptr.p->check_scan_batch_completed() &&
+ scanptr.p->scanLockHold != ZTRUE) {
+ jam();
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ } else if (scanptr.p->m_last_row && !scanptr.p->scanLockHold) {
+ jam();
+ closeScanLab(signal);
+ return;
+ } else {
+ jam();
+ /*
+ * We came here after releasing locks after
+ * receiving SCAN_NEXTREQ from TC. We only come here
+ * when scanHoldLock == ZTRUE
+ */
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes = 0;
+ continueScanNextReqLab(signal);
+ }//if
+ } else if (scanptr.p->scanReleaseCounter < scanptr.p->m_curr_batch_size_rows) {
+ jam();
+ scanptr.p->scanReleaseCounter++;
+ scanReleaseLocksLab(signal);
+ } else {
+ jam();
+ /*
+ We come here when we have been scanning for a long time and not been able
+ to find m_max_batch_size_rows records to return. We needed to release
+ the record we didn't want, but now we are returning all found records to
+ the API.
+ */
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ }//if
+}//Dblqh::scanLockReleasedLab()
+
+bool
+Dblqh::seize_acc_ptr_list(ScanRecord* scanP, Uint32 batch_size)
+{
+ Uint32 i;
+ Uint32 attr_buf_recs= (batch_size + 30) / 32;
+
+ if (batch_size > 1) {
+ if (c_no_attrinbuf_recs < attr_buf_recs) {
+ jam();
+ return false;
+ }
+ for (i= 1; i <= attr_buf_recs; i++) {
+ scanP->scan_acc_op_ptr[i]= seize_attrinbuf();
+ }
+ }
+ scanP->scan_acc_attr_recs= attr_buf_recs;
+ scanP->scan_acc_index = 0;
+ return true;
+}
+
+void
+Dblqh::release_acc_ptr_list(ScanRecord* scanP)
+{
+ Uint32 i, attr_buf_recs;
+ attr_buf_recs= scanP->scan_acc_attr_recs;
+
+ for (i= 1; i <= attr_buf_recs; i++) {
+ release_attrinbuf(scanP->scan_acc_op_ptr[i]);
+ }
+ scanP->scan_acc_attr_recs= 0;
+ scanP->scan_acc_index = 0;
+}
+
+Uint32
+Dblqh::seize_attrinbuf()
+{
+ AttrbufPtr regAttrPtr;
+ Uint32 ret_attr_buf;
+ ndbrequire(c_no_attrinbuf_recs > 0);
+ c_no_attrinbuf_recs--;
+ ret_attr_buf= cfirstfreeAttrinbuf;
+ regAttrPtr.i= ret_attr_buf;
+ ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
+ cfirstfreeAttrinbuf= regAttrPtr.p->attrbuf[ZINBUF_NEXT];
+ return ret_attr_buf;
+}
+
+Uint32
+Dblqh::release_attrinbuf(Uint32 attr_buf_i)
+{
+ Uint32 next_buf;
+ AttrbufPtr regAttrPtr;
+ c_no_attrinbuf_recs++;
+ regAttrPtr.i= attr_buf_i;
+ ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
+ next_buf= regAttrPtr.p->attrbuf[ZINBUF_NEXT];
+ regAttrPtr.p->attrbuf[ZINBUF_NEXT]= cfirstfreeAttrinbuf;
+ cfirstfreeAttrinbuf= regAttrPtr.i;
+ return next_buf;
+}
+
+void
+Dblqh::init_acc_ptr_list(ScanRecord* scanP)
+{
+ scanP->scan_acc_index = 0;
+}
+
+Uint32
+Dblqh::get_acc_ptr_from_scan_record(ScanRecord* scanP,
+ Uint32 index,
+ bool crash_flag)
+{
+ Uint32* acc_ptr;
+ Uint32 attr_buf_rec, attr_buf_index;
+ if (!((index < MAX_PARALLEL_OP_PER_SCAN) &&
+ index < scanP->scan_acc_index)) {
+ ndbrequire(crash_flag);
+ return RNIL;
+ }
+ i_get_acc_ptr(scanP, acc_ptr, index);
+ return *acc_ptr;
+}
+
+void
+Dblqh::set_acc_ptr_in_scan_record(ScanRecord* scanP,
+ Uint32 index, Uint32 acc)
+{
+ Uint32 *acc_ptr;
+ ndbrequire((index == 0 || scanP->scan_acc_index == index) &&
+ (index < MAX_PARALLEL_OP_PER_SCAN));
+ scanP->scan_acc_index= index + 1;
+ i_get_acc_ptr(scanP, acc_ptr, index);
+ *acc_ptr= acc;
+}
+
+/* -------------------------------------------------------------------------
+ * SCAN_FRAGREQ: Request to start scanning the specified fragment of a table.
+ * ------------------------------------------------------------------------- */
+void Dblqh::execSCAN_FRAGREQ(Signal* signal)
+{
+ ScanFragReq * const scanFragReq = (ScanFragReq *)&signal->theData[0];
+ ScanFragRef * ref;
+ const Uint32 transid1 = scanFragReq->transId1;
+ const Uint32 transid2 = scanFragReq->transId2;
+ Uint32 errorCode= 0;
+ Uint32 senderData;
+ Uint32 hashIndex;
+ TcConnectionrecPtr nextHashptr;
+
+ jamEntry();
+ const Uint32 reqinfo = scanFragReq->requestInfo;
+ const Uint32 fragId = (scanFragReq->fragmentNoKeyLen & 0xFFFF);
+ const Uint32 keyLen = (scanFragReq->fragmentNoKeyLen >> 16);
+ tabptr.i = scanFragReq->tableId;
+ const Uint32 max_rows = scanFragReq->batch_size_rows;
+ const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo);
+ const Uint8 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo);
+ const Uint8 rangeScan = ScanFragReq::getRangeScanFlag(reqinfo);
+ const Uint8 tupScan = ScanFragReq::getTupScanFlag(reqinfo);
+
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ if(tabptr.p->tableStatus != Tablerec::TABLE_DEFINED){
+ senderData = scanFragReq->senderData;
+ goto error_handler_early_1;
+ }
+
+ if (cfirstfreeTcConrec != RNIL) {
+ seizeTcrec();
+ tcConnectptr.p->clientConnectrec = scanFragReq->senderData;
+ tcConnectptr.p->clientBlockref = signal->senderBlockRef();
+ tcConnectptr.p->savePointId = scanFragReq->savePointId;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * NO FREE TC RECORD AVAILABLE, THUS WE CANNOT HANDLE THE REQUEST.
+ * -------------------------------------------------------------------- */
+ errorCode = ZNO_TC_CONNECT_ERROR;
+ senderData = scanFragReq->senderData;
+ goto error_handler_early;
+ }//if
+ /**
+ * A write allways have to get keyinfo
+ */
+ ndbrequire(scanLockMode == 0 || keyinfo);
+
+ ndbrequire(max_rows > 0 && max_rows <= MAX_PARALLEL_OP_PER_SCAN);
+ if (!getFragmentrec(signal, fragId)) {
+ errorCode = 1231;
+ goto error_handler;
+ }//if
+
+ // Verify scan type vs table type (both sides are boolean)
+ if (rangeScan != DictTabInfo::isOrderedIndex(fragptr.p->tableType)) {
+ errorCode = 1232;
+ goto error_handler;
+ }//if
+
+ // 1 scan record is reserved for node recovery
+ if (cscanNoFreeRec < 2) {
+ jam();
+ errorCode = ScanFragRef::ZNO_FREE_SCANREC_ERROR;
+ goto error_handler;
+ }
+
+ // XXX adjust cmaxAccOps for range scans and remove this comment
+ if ((cbookedAccOps + max_rows) > cmaxAccOps) {
+ jam();
+ errorCode = ScanFragRef::ZSCAN_BOOK_ACC_OP_ERROR;
+ goto error_handler;
+ }//if
+
+ ndbrequire(c_scanRecordPool.seize(scanptr));
+ initScanTc(signal,
+ transid1,
+ transid2,
+ fragId,
+ ZNIL);
+ tcConnectptr.p->save1 = 4;
+ tcConnectptr.p->primKeyLen = keyLen + 4; // hard coded in execKEYINFO
+ errorCode = initScanrec(scanFragReq);
+ if (errorCode != ZOK) {
+ jam();
+ goto error_handler2;
+ }//if
+ cscanNoFreeRec--;
+ cbookedAccOps += max_rows;
+
+ hashIndex = (tcConnectptr.p->transid[0] ^ tcConnectptr.p->tcOprec) & 1023;
+ nextHashptr.i = ctransidHash[hashIndex];
+ ctransidHash[hashIndex] = tcConnectptr.i;
+ tcConnectptr.p->prevHashRec = RNIL;
+ tcConnectptr.p->nextHashRec = nextHashptr.i;
+ if (nextHashptr.i != RNIL) {
+ jam();
+ /* ---------------------------------------------------------------------
+ * ENSURE THAT THE NEXT RECORD HAS SET PREVIOUS TO OUR RECORD
+ * IF IT EXISTS
+ * --------------------------------------------------------------------- */
+ ptrCheckGuard(nextHashptr, ctcConnectrecFileSize, tcConnectionrec);
+ nextHashptr.p->prevHashRec = tcConnectptr.i;
+ }//if
+ if (scanptr.p->scanAiLength > 0) {
+ jam();
+ tcConnectptr.p->transactionState = TcConnectionrec::WAIT_SCAN_AI;
+ return;
+ }//if
+ continueAfterReceivingAllAiLab(signal);
+ return;
+
+error_handler2:
+ // no scan number allocated
+ c_scanRecordPool.release(scanptr);
+error_handler:
+ ref = (ScanFragRef*)&signal->theData[0];
+ tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE;
+ ref->senderData = tcConnectptr.p->clientConnectrec;
+ ref->transId1 = transid1;
+ ref->transId2 = transid2;
+ ref->errorCode = errorCode;
+ sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal,
+ ScanFragRef::SignalLength, JBB);
+ releaseOprec(signal);
+ releaseTcrec(signal, tcConnectptr);
+ return;
+
+ error_handler_early_1:
+ if(tabptr.p->tableStatus == Tablerec::NOT_DEFINED){
+ jam();
+ errorCode = ZTABLE_NOT_DEFINED;
+ } else if (tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING ||
+ tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
+ jam();
+ errorCode = ZDROP_TABLE_IN_PROGRESS;
+ } else {
+ ndbrequire(0);
+ }
+ error_handler_early:
+ ref = (ScanFragRef*)&signal->theData[0];
+ ref->senderData = senderData;
+ ref->transId1 = transid1;
+ ref->transId2 = transid2;
+ ref->errorCode = errorCode;
+ sendSignal(signal->senderBlockRef(), GSN_SCAN_FRAGREF, signal,
+ ScanFragRef::SignalLength, JBB);
+}//Dblqh::execSCAN_FRAGREQ()
+
+void Dblqh::continueAfterReceivingAllAiLab(Signal* signal)
+{
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+
+ if(scanptr.p->scanState == ScanRecord::IN_QUEUE){
+ jam();
+ return;
+ }
+
+ scanptr.p->scanState = ScanRecord::WAIT_ACC_SCAN;
+ AccScanReq * req = (AccScanReq*)&signal->theData[0];
+ req->senderData = scanptr.i;
+ req->senderRef = cownref;
+ req->tableId = tcConnectptr.p->tableref;
+ req->fragmentNo = tcConnectptr.p->fragmentid;
+ req->requestInfo = 0;
+ AccScanReq::setLockMode(req->requestInfo, scanptr.p->scanLockMode);
+ AccScanReq::setReadCommittedFlag(req->requestInfo, scanptr.p->readCommitted);
+ AccScanReq::setDescendingFlag(req->requestInfo, scanptr.p->descending);
+ req->transId1 = tcConnectptr.p->transid[0];
+ req->transId2 = tcConnectptr.p->transid[1];
+ req->savePointId = tcConnectptr.p->savePointId;
+ sendSignal(scanptr.p->scanBlockref, GSN_ACC_SCANREQ, signal,
+ AccScanReq::SignalLength, JBB);
+}//Dblqh::continueAfterReceivingAllAiLab()
+
+void Dblqh::scanAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ if (saveTupattrbuf(signal, dataPtr, length) == ZOK) {
+ if (tcConnectptr.p->currTupAiLen < scanptr.p->scanAiLength) {
+ jam();
+ } else {
+ jam();
+ ndbrequire(tcConnectptr.p->currTupAiLen == scanptr.p->scanAiLength);
+ continueAfterReceivingAllAiLab(signal);
+ }//if
+ return;
+ }//if
+ abort_scan(signal, scanptr.i, ZGET_ATTRINBUF_ERROR);
+}
+
+void Dblqh::abort_scan(Signal* signal, Uint32 scan_ptr_i, Uint32 errcode){
+ jam();
+ scanptr.i = scan_ptr_i;
+ c_scanRecordPool.getPtr(scanptr);
+
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ finishScanrec(signal);
+ releaseScanrec(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::IDLE;
+ tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE;
+
+ if(errcode)
+ {
+ jam();
+ ScanFragRef * ref = (ScanFragRef*)&signal->theData[0];
+ ref->senderData = tcConnectptr.p->clientConnectrec;
+ ref->transId1 = tcConnectptr.p->transid[0];
+ ref->transId2 = tcConnectptr.p->transid[1];
+ ref->errorCode = errcode;
+ sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal,
+ ScanFragRef::SignalLength, JBB);
+ }
+ deleteTransidHash(signal);
+ releaseOprec(signal);
+ releaseTcrec(signal, tcConnectptr);
+}
+
+/*---------------------------------------------------------------------*/
+/* Send this 'I am alive' signal to TC when it is received from ACC */
+/* We include the scanPtr.i that comes from ACC in signalData[1], this */
+/* tells TC which fragment record to check for a timeout. */
+/*---------------------------------------------------------------------*/
+void Dblqh::execSCAN_HBREP(Signal* signal)
+{
+ jamEntry();
+ scanptr.i = signal->theData[0];
+ c_scanRecordPool.getPtr(scanptr);
+ switch(scanptr.p->scanType){
+ case ScanRecord::SCAN:
+ if (scanptr.p->scanTcWaiting == ZTRUE) {
+ jam();
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ const Uint32 transid1 = signal->theData[1];
+ const Uint32 transid2 = signal->theData[2];
+ ndbrequire(transid1 == tcConnectptr.p->transid[0] &&
+ transid2 == tcConnectptr.p->transid[1]);
+
+ // Update counter on tcConnectPtr
+ if (tcConnectptr.p->tcTimer != 0){
+ tcConnectptr.p->tcTimer = cLqhTimeOutCount;
+ } else {
+ jam();
+ //ndbout << "SCAN_HBREP when tcTimer was off" << endl;
+ }
+
+ signal->theData[0] = tcConnectptr.p->clientConnectrec;
+ signal->theData[1] = tcConnectptr.p->transid[0];
+ signal->theData[2] = tcConnectptr.p->transid[1];
+ sendSignal(tcConnectptr.p->clientBlockref,
+ GSN_SCAN_HBREP, signal, 3, JBB);
+ }//if
+ break;
+ case ScanRecord::COPY:
+ // ndbout << "Dblqh::execSCAN_HBREP Dropping SCAN_HBREP" << endl;
+ break;
+ default:
+ ndbrequire(false);
+ }
+}
+
+void Dblqh::accScanConfScanLab(Signal* signal)
+{
+ AccScanConf * const accScanConf = (AccScanConf *)&signal->theData[0];
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ /* -----------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_ACC_SCAN
+ * ----------------------------------------------------------------------- */
+ if (accScanConf->flag == AccScanConf::ZEMPTY_FRAGMENT) {
+ jam();
+ /* ---------------------------------------------------------------------
+ * THE FRAGMENT WAS EMPTY.
+ * REPORT SUCCESSFUL COPYING.
+ * --------------------------------------------------------------------- */
+ tupScanCloseConfLab(signal);
+ return;
+ }//if
+ scanptr.p->scanAccPtr = accScanConf->accPtr;
+ if (scanptr.p->rangeScan) {
+ jam();
+ TuxBoundInfo* req = (TuxBoundInfo*)signal->getDataPtrSend();
+ req->errorCode = RNIL;
+ req->tuxScanPtrI = scanptr.p->scanAccPtr;
+ Uint32 len = req->boundAiLength = copy_bounds(req->data, tcConnectptr.p);
+ EXECUTE_DIRECT(DBTUX, GSN_TUX_BOUND_INFO, signal,
+ TuxBoundInfo::SignalLength + len);
+
+ jamEntry();
+ if (req->errorCode != 0) {
+ jam();
+ /*
+ * Cannot use STORED_PROCREF to abort since even the REF
+ * returns a stored proc id. So record error and continue.
+ * The scan is already Invalid in TUX and returns empty set.
+ */
+ tcConnectptr.p->errorCode = req->errorCode;
+ }
+ }
+
+ scanptr.p->scanState = ScanRecord::WAIT_STORED_PROC_SCAN;
+ if(scanptr.p->scanStoredProcId == RNIL)
+ {
+ jam();
+ signal->theData[0] = tcConnectptr.p->tupConnectrec;
+ signal->theData[1] = tcConnectptr.p->tableref;
+ signal->theData[2] = scanptr.p->scanSchemaVersion;
+ signal->theData[3] = ZSTORED_PROC_SCAN;
+
+ signal->theData[4] = scanptr.p->scanAiLength;
+ sendSignal(tcConnectptr.p->tcTupBlockref,
+ GSN_STORED_PROCREQ, signal, 5, JBB);
+
+ signal->theData[0] = tcConnectptr.p->tupConnectrec;
+ AttrbufPtr regAttrinbufptr;
+ Uint32 firstAttr = regAttrinbufptr.i = tcConnectptr.p->firstAttrinbuf;
+ while (regAttrinbufptr.i != RNIL) {
+ ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
+ jam();
+ Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN];
+ ndbrequire(dataLen != 0);
+ // first 3 words already set in STORED_PROCREQ
+ MEMCOPY_NO_WORDS(&signal->theData[3],
+ &regAttrinbufptr.p->attrbuf[0],
+ dataLen);
+ sendSignal(tcConnectptr.p->tcTupBlockref,
+ GSN_ATTRINFO, signal, dataLen + 3, JBB);
+ regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
+ c_no_attrinbuf_recs++;
+ }//while
+
+ /**
+ * Release attr info
+ */
+ if(firstAttr != RNIL)
+ {
+ regAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = cfirstfreeAttrinbuf;
+ cfirstfreeAttrinbuf = firstAttr;
+ tcConnectptr.p->firstAttrinbuf = tcConnectptr.p->lastAttrinbuf = RNIL;
+ }
+ }
+ else
+ {
+ jam();
+ storedProcConfScanLab(signal);
+ }
+}//Dblqh::accScanConfScanLab()
+
+#define print_buf(s,idx,len) {\
+ printf(s); Uint32 t2=len; DatabufPtr t3; t3.i = idx; \
+ while(t3.i != RNIL && t2-- > 0){\
+ ptrCheckGuard(t3, cdatabufFileSize, databuf);\
+ printf("%d ", t3.i); t3.i= t3.p->nextDatabuf;\
+ } printf("\n"); }
+
+Uint32
+Dblqh::copy_bounds(Uint32 * dst, TcConnectionrec* tcPtrP)
+{
+ /**
+ * copy_bounds handles multiple bounds by
+ * in the 16 upper bits of the first words (used to specify bound type)
+ * setting the length of this specific bound
+ *
+ */
+
+ DatabufPtr regDatabufptr;
+ Uint32 left = 4 - tcPtrP->m_offset_current_keybuf; // left in buf
+ Uint32 totalLen = tcPtrP->primKeyLen - 4;
+ regDatabufptr.i = tcPtrP->firstTupkeybuf;
+
+ ndbassert(tcPtrP->primKeyLen >= 4);
+ ndbassert(tcPtrP->m_offset_current_keybuf < 4);
+ ndbassert(!(totalLen == 0 && regDatabufptr.i != RNIL));
+ ndbassert(!(totalLen != 0 && regDatabufptr.i == RNIL));
+
+ if(totalLen)
+ {
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ Uint32 sig0 = regDatabufptr.p->data[0];
+ Uint32 sig1 = regDatabufptr.p->data[1];
+ Uint32 sig2 = regDatabufptr.p->data[2];
+ Uint32 sig3 = regDatabufptr.p->data[3];
+
+ switch(left){
+ case 4:
+ * dst++ = sig0;
+ case 3:
+ * dst++ = sig1;
+ case 2:
+ * dst++ = sig2;
+ case 1:
+ * dst++ = sig3;
+ }
+
+ Uint32 first = (* (dst - left)); // First word in range
+
+ // Length of this range
+ Uint8 offset;
+ const Uint32 len = (first >> 16) ? (first >> 16) : totalLen;
+ tcPtrP->m_scan_curr_range_no = (first & 0xFFF0) >> 4;
+ (* (dst - left)) = (first & 0xF); // Remove length & range no
+
+ if(len < left)
+ {
+ offset = len;
+ }
+ else
+ {
+ Databuf * lastP;
+ left = (len - left);
+ regDatabufptr.i = regDatabufptr.p->nextDatabuf;
+
+ while(left >= 4)
+ {
+ left -= 4;
+ lastP = regDatabufptr.p;
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ sig0 = regDatabufptr.p->data[0];
+ sig1 = regDatabufptr.p->data[1];
+ sig2 = regDatabufptr.p->data[2];
+ sig3 = regDatabufptr.p->data[3];
+ regDatabufptr.i = regDatabufptr.p->nextDatabuf;
+
+ * dst++ = sig0;
+ * dst++ = sig1;
+ * dst++ = sig2;
+ * dst++ = sig3;
+ }
+
+ if(left > 0)
+ {
+ lastP = regDatabufptr.p;
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ sig0 = regDatabufptr.p->data[0];
+ sig1 = regDatabufptr.p->data[1];
+ sig2 = regDatabufptr.p->data[2];
+ sig3 = regDatabufptr.p->data[3];
+ * dst++ = sig0;
+ * dst++ = sig1;
+ * dst++ = sig2;
+ * dst++ = sig3;
+ }
+ else
+ {
+ lastP = regDatabufptr.p;
+ }
+ offset = left & 3;
+ lastP->nextDatabuf = cfirstfreeDatabuf;
+ cfirstfreeDatabuf = tcPtrP->firstTupkeybuf;
+ ndbassert(cfirstfreeDatabuf != RNIL);
+ }
+
+ if(len == totalLen && regDatabufptr.i != RNIL)
+ {
+ regDatabufptr.p->nextDatabuf = cfirstfreeDatabuf;
+ cfirstfreeDatabuf = regDatabufptr.i;
+ tcPtrP->lastTupkeybuf = regDatabufptr.i = RNIL;
+ ndbassert(cfirstfreeDatabuf != RNIL);
+ }
+
+ tcPtrP->m_offset_current_keybuf = offset;
+ tcPtrP->firstTupkeybuf = regDatabufptr.i;
+ tcPtrP->primKeyLen = 4 + totalLen - len;
+
+ return len;
+ }
+ return totalLen;
+}
+
+/* -------------------------------------------------------------------------
+ * ENTER STORED_PROCCONF WITH
+ * TC_CONNECTPTR,
+ * TSTORED_PROC_ID
+ * -------------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_STORED_PROC_SCAN
+ * ------------------------------------------------------------------------- */
+void Dblqh::storedProcConfScanLab(Signal* signal)
+{
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+ // STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
+ closeScanLab(signal);
+ return;
+ }//if
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_FIRST_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ continueFirstScanAfterBlockedLab(signal);
+}//Dblqh::storedProcConfScanLab()
+
+void Dblqh::continueFirstScanAfterBlockedLab(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN;
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = RNIL;
+ signal->theData[2] = NextScanReq::ZSCAN_NEXT;
+ sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ return;
+}//Dblqh::continueFirstScanAfterBlockedLab()
+
+/* -------------------------------------------------------------------------
+ * When executing a scan we must come up to the surface at times to make
+ * sure we can quickly start local checkpoints.
+ * ------------------------------------------------------------------------- */
+void Dblqh::execCHECK_LCP_STOP(Signal* signal)
+{
+ jamEntry();
+ scanptr.i = signal->theData[0];
+ c_scanRecordPool.getPtr(scanptr);
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (signal->theData[1] == ZTRUE) {
+ jam();
+ releaseActiveFrag(signal);
+ signal->theData[0] = ZCHECK_LCP_STOP_BLOCKED;
+ signal->theData[1] = scanptr.i;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
+ signal->theData[0] = RNIL;
+ return;
+ }//if
+ if (fragptr.p->fragStatus != Fragrecord::FSACTIVE) {
+ ndbrequire(fragptr.p->fragStatus == Fragrecord::BLOCKED);
+ releaseActiveFrag(signal);
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_CHECK_STOPPED;
+ signal->theData[0] = RNIL;
+ }//if
+}//Dblqh::execCHECK_LCP_STOP()
+
+void Dblqh::checkLcpStopBlockedLab(Signal* signal)
+{
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ continueAfterCheckLcpStopBlocked(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_CHECK_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dblqh::checkLcpStopBlockedLab()
+
+void Dblqh::continueAfterCheckLcpStopBlocked(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = AccCheckScan::ZNOT_CHECK_LCP_STOP;
+ EXECUTE_DIRECT(refToBlock(scanptr.p->scanBlockref), GSN_ACC_CHECK_SCAN,
+ signal, 2);
+}//Dblqh::continueAfterCheckLcpStopBlocked()
+
+/* -------------------------------------------------------------------------
+ * ENTER NEXT_SCANCONF
+ * -------------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_NEXT_SCAN
+ * ------------------------------------------------------------------------- */
+void Dblqh::nextScanConfScanLab(Signal* signal)
+{
+ NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0];
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ if (nextScanConf->fragId == RNIL) {
+ jam();
+ /* ---------------------------------------------------------------------
+ * THERE ARE NO MORE TUPLES TO FETCH. IF WE HAVE ANY
+ * OPERATIONS STILL NEEDING A LOCK WE REPORT TO THE
+ * APPLICATION AND CLOSE THE SCAN WHEN THE NEXT SCAN
+ * REQUEST IS RECEIVED. IF WE DO NOT HAVE ANY NEED FOR
+ * LOCKS WE CAN CLOSE THE SCAN IMMEDIATELY.
+ * --------------------------------------------------------------------- */
+ releaseActiveFrag(signal);
+ /*************************************************************
+ * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
+ ************************************************************ */
+ if (!scanptr.p->scanLockHold)
+ {
+ jam();
+ closeScanLab(signal);
+ return;
+ }
+
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ if ((scanptr.p->scanLockHold == ZTRUE) &&
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ jam();
+ closeScanLab(signal);
+ return;
+ }//if
+
+ if (scanptr.p->m_curr_batch_size_rows > 0) {
+ jam();
+
+ if((tcConnectptr.p->primKeyLen - 4) == 0)
+ scanptr.p->scanCompletedStatus = ZTRUE;
+
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ return;
+ }//if
+ closeScanLab(signal);
+ return;
+ }//if
+
+ // If accOperationPtr == RNIL no record was returned by ACC
+ if (nextScanConf->accOperationPtr == RNIL) {
+ jam();
+ /*************************************************************
+ * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
+ ************************************************************ */
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ releaseActiveFrag(signal);
+ if ((scanptr.p->scanLockHold == ZTRUE) &&
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ jam();
+ closeScanLab(signal);
+ return;
+ }//if
+
+ if (scanptr.p->m_curr_batch_size_rows > 0) {
+ jam();
+ releaseActiveFrag(signal);
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ return;
+ }//if
+
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ sendSignal(scanptr.p->scanBlockref,
+ GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ }//if
+ jam();
+ set_acc_ptr_in_scan_record(scanptr.p,
+ scanptr.p->m_curr_batch_size_rows,
+ nextScanConf->accOperationPtr);
+ jam();
+ scanptr.p->scanLocalref[0] = nextScanConf->localKey[0];
+ scanptr.p->scanLocalref[1] = nextScanConf->localKey[1];
+ scanptr.p->scanLocalFragid = nextScanConf->fragId;
+ nextScanConfLoopLab(signal);
+}//Dblqh::nextScanConfScanLab()
+
+void Dblqh::nextScanConfLoopLab(Signal* signal)
+{
+ /* ----------------------------------------------------------------------
+ * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
+ * ---------------------------------------------------------------------- */
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+ releaseActiveFrag(signal);
+ if ((scanptr.p->scanLockHold == ZTRUE) &&
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ closeScanLab(signal);
+ return;
+ }//if
+ jam();
+ Uint32 tableRef;
+ Uint32 tupFragPtr;
+ Uint32 reqinfo = (scanptr.p->scanLockHold == ZFALSE);
+ reqinfo = reqinfo + (tcConnectptr.p->operation << 6);
+ reqinfo = reqinfo + (tcConnectptr.p->opExec << 10);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_TUPKEY;
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (! scanptr.p->rangeScan) {
+ tableRef = tcConnectptr.p->tableref;
+ tupFragPtr = fragptr.p->tupFragptr[scanptr.p->scanLocalFragid & 1];
+ } else {
+ jam();
+ // for ordered index use primary table
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = fragptr.p->tableFragptr;
+ ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
+ tableRef = tFragPtr.p->tabRef;
+ tupFragPtr = tFragPtr.p->tupFragptr[scanptr.p->scanLocalFragid & 1];
+ }
+ {
+ jam();
+ TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtrSend();
+
+ tupKeyReq->connectPtr = tcConnectptr.p->tupConnectrec;
+ tupKeyReq->request = reqinfo;
+ tupKeyReq->tableRef = tableRef;
+ tupKeyReq->fragId = scanptr.p->scanLocalFragid;
+ tupKeyReq->keyRef1 = scanptr.p->scanLocalref[0];
+ tupKeyReq->keyRef2 = scanptr.p->scanLocalref[1];
+ tupKeyReq->attrBufLen = 0;
+ tupKeyReq->opRef = scanptr.p->scanApiOpPtr;
+ tupKeyReq->applRef = scanptr.p->scanApiBlockref;
+ tupKeyReq->schemaVersion = scanptr.p->scanSchemaVersion;
+ tupKeyReq->storedProcedure = scanptr.p->scanStoredProcId;
+ tupKeyReq->transId1 = tcConnectptr.p->transid[0];
+ tupKeyReq->transId2 = tcConnectptr.p->transid[1];
+ tupKeyReq->fragPtr = tupFragPtr;
+ tupKeyReq->primaryReplica = (tcConnectptr.p->seqNoReplica == 0)?true:false;
+ tupKeyReq->coordinatorTC = tcConnectptr.p->tcBlockref;
+ tupKeyReq->tcOpIndex = tcConnectptr.p->tcOprec;
+ tupKeyReq->savePointId = tcConnectptr.p->savePointId;
+ Uint32 blockNo = refToBlock(tcConnectptr.p->tcTupBlockref);
+ EXECUTE_DIRECT(blockNo, GSN_TUPKEYREQ, signal,
+ TupKeyReq::SignalLength);
+ }
+}
+
+/* -------------------------------------------------------------------------
+ * RECEPTION OF FURTHER KEY INFORMATION WHEN KEY SIZE > 16 BYTES.
+ * -------------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_SCAN_KEYINFO
+ * ------------------------------------------------------------------------- */
+void
+Dblqh::keyinfoLab(const Uint32 * src, const Uint32 * end)
+{
+ do {
+ jam();
+ seizeTupkeybuf(0);
+ databufptr.p->data[0] = * src ++;
+ databufptr.p->data[1] = * src ++;
+ databufptr.p->data[2] = * src ++;
+ databufptr.p->data[3] = * src ++;
+ } while (src < end);
+}//Dblqh::keyinfoLab()
+
+Uint32
+Dblqh::readPrimaryKeys(ScanRecord *scanP, TcConnectionrec *tcConP, Uint32 *dst)
+{
+ Uint32 tableId = tcConP->tableref;
+ Uint32 fragId = scanP->scanLocalFragid;
+ Uint32 fragPageId = scanP->scanLocalref[0];
+ Uint32 pageIndex = scanP->scanLocalref[1];
+
+ if(scanP->rangeScan)
+ {
+ jam();
+ // for ordered index use primary table
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = fragptr.p->tableFragptr;
+ ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
+ tableId = tFragPtr.p->tabRef;
+ }
+
+ int ret = c_tup->accReadPk(tableId, fragId, fragPageId, pageIndex, dst, false);
+ if(0)
+ ndbout_c("readPrimaryKeys(table: %d fragment: %d [ %d %d ] -> %d",
+ tableId, fragId, fragPageId, pageIndex, ret);
+ ndbassert(ret > 0);
+
+ return ret;
+}
+
+/* -------------------------------------------------------------------------
+ * ENTER TUPKEYCONF
+ * -------------------------------------------------------------------------
+ * PRECONDITION: TRANSACTION_STATE = SCAN_TUPKEY
+ * ------------------------------------------------------------------------- */
+void Dblqh::scanTupkeyConfLab(Signal* signal)
+{
+ const TupKeyConf * conf = (TupKeyConf *)signal->getDataPtr();
+ UintR tdata4 = conf->readLength;
+ UintR tdata5 = conf->lastRow;
+
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ releaseActiveFrag(signal);
+ c_scanRecordPool.getPtr(scanptr);
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ /* ---------------------------------------------------------------------
+ * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
+ * --------------------------------------------------------------------- */
+ if ((scanptr.p->scanLockHold == ZTRUE) &&
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ jam();
+ closeScanLab(signal);
+ return;
+ }//if
+ if (scanptr.p->scanKeyinfoFlag) {
+ jam();
+ // Inform API about keyinfo len aswell
+ tdata4 += sendKeyinfo20(signal, scanptr.p, tcConnectptr.p);
+ }//if
+ ndbrequire(scanptr.p->m_curr_batch_size_rows < MAX_PARALLEL_OP_PER_SCAN);
+ scanptr.p->m_curr_batch_size_bytes+= tdata4;
+ scanptr.p->m_curr_batch_size_rows++;
+ scanptr.p->m_last_row = tdata5;
+ if (scanptr.p->check_scan_batch_completed() | tdata5){
+ if (scanptr.p->scanLockHold == ZTRUE) {
+ jam();
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ return;
+ } else {
+ jam();
+ scanptr.p->scanReleaseCounter = scanptr.p->m_curr_batch_size_rows;
+ scanReleaseLocksLab(signal);
+ return;
+ }
+ } else {
+ if (scanptr.p->scanLockHold == ZTRUE) {
+ jam();
+ scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT;
+ } else {
+ jam();
+ scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_COMMIT;
+ }
+ }
+ scanNextLoopLab(signal);
+}//Dblqh::scanTupkeyConfLab()
+
+void Dblqh::scanNextLoopLab(Signal* signal)
+{
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ }//switch
+ continueScanAfterBlockedLab(signal);
+}//Dblqh::scanNextLoopLab()
+
+void Dblqh::continueScanAfterBlockedLab(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ Uint32 accOpPtr;
+ if (scanptr.p->scanFlag == NextScanReq::ZSCAN_NEXT_ABORT) {
+ jam();
+ scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_COMMIT;
+ accOpPtr= get_acc_ptr_from_scan_record(scanptr.p,
+ scanptr.p->m_curr_batch_size_rows,
+ false);
+ scanptr.p->scan_acc_index--;
+ } else if (scanptr.p->scanFlag == NextScanReq::ZSCAN_NEXT_COMMIT) {
+ jam();
+ accOpPtr= get_acc_ptr_from_scan_record(scanptr.p,
+ scanptr.p->m_curr_batch_size_rows-1,
+ false);
+ } else {
+ jam();
+ accOpPtr = RNIL; // The value is not used in ACC
+ }//if
+ scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN;
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = accOpPtr;
+ signal->theData[2] = scanptr.p->scanFlag;
+ sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+}//Dblqh::continueScanAfterBlockedLab()
+
+/* -------------------------------------------------------------------------
+ * ENTER TUPKEYREF WITH
+ * TC_CONNECTPTR,
+ * TERROR_CODE
+ * -------------------------------------------------------------------------
+ * PRECONDITION: TRANSACTION_STATE = SCAN_TUPKEY
+ * ------------------------------------------------------------------------- */
+void Dblqh::scanTupkeyRefLab(Signal* signal)
+{
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ releaseActiveFrag(signal);
+ c_scanRecordPool.getPtr(scanptr);
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ /* ---------------------------------------------------------------------
+ * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
+ * --------------------------------------------------------------------- */
+ if ((scanptr.p->scanLockHold == ZTRUE) &&
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ jam();
+ closeScanLab(signal);
+ return;
+ }//if
+ if ((terrorCode != ZSEARCH_CONDITION_FALSE) &&
+ (terrorCode != ZNO_TUPLE_FOUND) &&
+ (terrorCode >= ZUSER_ERROR_CODE_LIMIT)) {
+ scanptr.p->scanErrorCounter++;
+ tcConnectptr.p->errorCode = terrorCode;
+
+ if (scanptr.p->scanLockHold == ZTRUE) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ } else {
+ jam();
+ scanptr.p->m_curr_batch_size_rows++;
+ scanptr.p->scanReleaseCounter = scanptr.p->m_curr_batch_size_rows;
+ }//if
+ /* --------------------------------------------------------------------
+ * WE NEED TO RELEASE ALL LOCKS CURRENTLY
+ * HELD BY THIS SCAN.
+ * -------------------------------------------------------------------- */
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ Uint32 time_passed= tcConnectptr.p->tcTimer - cLqhTimeOutCount;
+ if (scanptr.p->m_curr_batch_size_rows > 0) {
+ if (time_passed > 1) {
+ /* -----------------------------------------------------------------------
+ * WE NEED TO ENSURE THAT WE DO NOT SEARCH FOR THE NEXT TUPLE FOR A
+ * LONG TIME WHILE WE KEEP A LOCK ON A FOUND TUPLE. WE RATHER REPORT
+ * THE FOUND TUPLE IF FOUND TUPLES ARE RARE. If more than 10 ms passed we
+ * send the found tuples to the API.
+ * ----------------------------------------------------------------------- */
+ scanptr.p->scanReleaseCounter = scanptr.p->m_curr_batch_size_rows + 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }
+ } else {
+ if (time_passed > 10) {
+ jam();
+ signal->theData[0]= scanptr.i;
+ signal->theData[1]= tcConnectptr.p->transid[0];
+ signal->theData[2]= tcConnectptr.p->transid[1];
+ execSCAN_HBREP(signal);
+ }
+ }
+ scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_ABORT;
+ scanNextLoopLab(signal);
+}//Dblqh::scanTupkeyRefLab()
+
+/* -------------------------------------------------------------------------
+ * THE SCAN HAS BEEN COMPLETED. EITHER BY REACHING THE END OR BY COMMAND
+ * FROM THE APPLICATION OR BY SOME SORT OF ERROR CONDITION.
+ * ------------------------------------------------------------------------- */
+void Dblqh::closeScanLab(Signal* signal)
+{
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_CLOSE_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ }//switch
+ continueCloseScanAfterBlockedLab(signal);
+}//Dblqh::closeScanLab()
+
+void Dblqh::continueCloseScanAfterBlockedLab(Signal* signal)
+{
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanState = ScanRecord::WAIT_CLOSE_SCAN;
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = RNIL;
+ signal->theData[2] = NextScanReq::ZSCAN_CLOSE;
+ sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+}//Dblqh::continueCloseScanAfterBlockedLab()
+
+/* -------------------------------------------------------------------------
+ * ENTER NEXT_SCANCONF
+ * -------------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_CLOSE_SCAN
+ * ------------------------------------------------------------------------- */
+void Dblqh::accScanCloseConfLab(Signal* signal)
+{
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+
+ if((tcConnectptr.p->primKeyLen - 4) > 0 &&
+ scanptr.p->scanCompletedStatus != ZTRUE)
+ {
+ jam();
+ releaseActiveFrag(signal);
+ continueAfterReceivingAllAiLab(signal);
+ return;
+ }
+
+ scanptr.p->scanState = ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN;
+ signal->theData[0] = tcConnectptr.p->tupConnectrec;
+ signal->theData[1] = tcConnectptr.p->tableref;
+ signal->theData[2] = scanptr.p->scanSchemaVersion;
+ signal->theData[3] = ZDELETE_STORED_PROC_ID;
+ signal->theData[4] = scanptr.p->scanStoredProcId;
+ sendSignal(tcConnectptr.p->tcTupBlockref,
+ GSN_STORED_PROCREQ, signal, 5, JBB);
+}//Dblqh::accScanCloseConfLab()
+
+/* -------------------------------------------------------------------------
+ * ENTER STORED_PROCCONF WITH
+ * -------------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_DELETE_STORED_PROC_ID_SCAN
+ * ------------------------------------------------------------------------- */
+void Dblqh::tupScanCloseConfLab(Signal* signal)
+{
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (tcConnectptr.p->abortState == TcConnectionrec::NEW_FROM_TC) {
+ jam();
+ tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec;
+ ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+ tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1;
+ signal->theData[0] = ZLQH_TRANS_NEXT;
+ signal->theData[1] = tcNodeFailptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ } else if (tcConnectptr.p->errorCode != 0) {
+ jam();
+ ScanFragRef * ref = (ScanFragRef*)&signal->theData[0];
+ ref->senderData = tcConnectptr.p->clientConnectrec;
+ ref->transId1 = tcConnectptr.p->transid[0];
+ ref->transId2 = tcConnectptr.p->transid[1];
+ ref->errorCode = tcConnectptr.p->errorCode;
+ sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal,
+ ScanFragRef::SignalLength, JBB);
+ } else {
+ jam();
+ sendScanFragConf(signal, ZSCAN_FRAG_CLOSED);
+ }//if
+ finishScanrec(signal);
+ releaseScanrec(signal);
+ tcConnectptr.p->tcScanRec = RNIL;
+ deleteTransidHash(signal);
+ releaseOprec(signal);
+ releaseTcrec(signal, tcConnectptr);
+}//Dblqh::tupScanCloseConfLab()
+
+/* =========================================================================
+ * ======= INITIATE SCAN RECORD =======
+ *
+ * SUBROUTINE SHORT NAME = ISC
+ * ========================================================================= */
+Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
+{
+ const Uint32 reqinfo = scanFragReq->requestInfo;
+ const Uint32 max_rows = scanFragReq->batch_size_rows;
+ const Uint32 max_bytes = scanFragReq->batch_size_bytes;
+ const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo);
+ const Uint32 scanLockHold = ScanFragReq::getHoldLockFlag(reqinfo);
+ const Uint32 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo);
+ const Uint32 readCommitted = ScanFragReq::getReadCommittedFlag(reqinfo);
+ const Uint32 rangeScan = ScanFragReq::getRangeScanFlag(reqinfo);
+ const Uint32 descending = ScanFragReq::getDescendingFlag(reqinfo);
+ const Uint32 tupScan = ScanFragReq::getTupScanFlag(reqinfo);
+ const Uint32 attrLen = ScanFragReq::getAttrLen(reqinfo);
+ const Uint32 scanPrio = ScanFragReq::getScanPrio(reqinfo);
+
+ scanptr.p->scanKeyinfoFlag = keyinfo;
+ scanptr.p->scanLockHold = scanLockHold;
+ scanptr.p->scanCompletedStatus = ZFALSE;
+ scanptr.p->scanType = ScanRecord::SCAN;
+ scanptr.p->scanApiBlockref = scanFragReq->resultRef;
+ scanptr.p->scanAiLength = attrLen;
+ scanptr.p->scanTcrec = tcConnectptr.i;
+ scanptr.p->scanSchemaVersion = scanFragReq->schemaVersion;
+
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes= 0;
+ scanptr.p->m_max_batch_size_rows = max_rows;
+ scanptr.p->m_max_batch_size_bytes = max_bytes;
+
+ if (! rangeScan && ! tupScan)
+ scanptr.p->scanBlockref = tcConnectptr.p->tcAccBlockref;
+ else if (! tupScan)
+ scanptr.p->scanBlockref = tcConnectptr.p->tcTuxBlockref;
+ else
+ scanptr.p->scanBlockref = tcConnectptr.p->tcTupBlockref;
+
+ scanptr.p->scanErrorCounter = 0;
+ scanptr.p->scanLockMode = scanLockMode;
+ scanptr.p->readCommitted = readCommitted;
+ scanptr.p->rangeScan = rangeScan;
+ scanptr.p->descending = descending;
+ scanptr.p->tupScan = tupScan;
+ scanptr.p->scanState = ScanRecord::SCAN_FREE;
+ scanptr.p->scanFlag = ZFALSE;
+ scanptr.p->scanLocalref[0] = 0;
+ scanptr.p->scanLocalref[1] = 0;
+ scanptr.p->scanLocalFragid = 0;
+ scanptr.p->scanTcWaiting = ZTRUE;
+ scanptr.p->scanNumber = ~0;
+ scanptr.p->scanApiOpPtr = scanFragReq->clientOpPtr;
+ scanptr.p->m_last_row = 0;
+ scanptr.p->scanStoredProcId = RNIL;
+
+ if (max_rows == 0 || (max_bytes > 0 && max_rows > max_bytes)){
+ jam();
+ return ScanFragRef::ZWRONG_BATCH_SIZE;
+ }
+ if (!seize_acc_ptr_list(scanptr.p, max_rows)){
+ jam();
+ return ScanFragRef::ZTOO_MANY_ACTIVE_SCAN_ERROR;
+ }
+ /**
+ * Used for scan take over
+ */
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = fragptr.p->tableFragptr;
+ ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
+ scanptr.p->fragPtrI = fragptr.p->tableFragptr;
+
+ /**
+ * !idx uses 1 - (MAX_PARALLEL_SCANS_PER_FRAG - 1) = 1-11
+ * idx uses from MAX_PARALLEL_SCANS_PER_FRAG - MAX = 12-42)
+ */
+ Uint32 start = (rangeScan || tupScan ? MAX_PARALLEL_SCANS_PER_FRAG : 1 );
+ Uint32 stop = (rangeScan || tupScan ? MAX_PARALLEL_INDEX_SCANS_PER_FRAG : MAX_PARALLEL_SCANS_PER_FRAG - 1);
+ stop += start;
+ Uint32 free = tFragPtr.p->m_scanNumberMask.find(start);
+
+ if(free == Fragrecord::ScanNumberMask::NotFound || free >= stop){
+ jam();
+
+ if(scanPrio == 0){
+ jam();
+ return ScanFragRef::ZTOO_MANY_ACTIVE_SCAN_ERROR;
+ }
+
+ /**
+ * Put on queue
+ */
+ scanptr.p->scanState = ScanRecord::IN_QUEUE;
+ LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
+ fragptr.p->m_queuedScans);
+ queue.add(scanptr);
+ return ZOK;
+ }
+
+ scanptr.p->scanNumber = free;
+ tFragPtr.p->m_scanNumberMask.clear(free);// Update mask
+
+ LocalDLList<ScanRecord> active(c_scanRecordPool, fragptr.p->m_activeScans);
+ active.add(scanptr);
+ if(scanptr.p->scanKeyinfoFlag){
+ jam();
+#ifdef VM_TRACE
+ ScanRecordPtr tmp;
+ ndbrequire(!c_scanTakeOverHash.find(tmp, * scanptr.p));
+#endif
+#ifdef TRACE_SCAN_TAKEOVER
+ ndbout_c("adding (%d %d) table: %d fragId: %d frag.i: %d tableFragptr: %d",
+ scanptr.p->scanNumber, scanptr.p->fragPtrI,
+ tabptr.i, scanFragReq->fragmentNoKeyLen & 0xFFFF,
+ fragptr.i, fragptr.p->tableFragptr);
+#endif
+ c_scanTakeOverHash.add(scanptr);
+ }
+ init_acc_ptr_list(scanptr.p);
+ return ZOK;
+}
+
+/* =========================================================================
+ * ======= INITIATE TC RECORD AT SCAN =======
+ *
+ * SUBROUTINE SHORT NAME = IST
+ * ========================================================================= */
+void Dblqh::initScanTc(Signal* signal,
+ Uint32 transid1,
+ Uint32 transid2,
+ Uint32 fragId,
+ Uint32 nodeId)
+{
+ tcConnectptr.p->transid[0] = transid1;
+ tcConnectptr.p->transid[1] = transid2;
+ tcConnectptr.p->tcScanRec = scanptr.i;
+ tcConnectptr.p->tableref = tabptr.i;
+ tcConnectptr.p->fragmentid = fragId;
+ tcConnectptr.p->fragmentptr = fragptr.i;
+ tcConnectptr.p->tcOprec = tcConnectptr.p->clientConnectrec;
+ tcConnectptr.p->tcBlockref = tcConnectptr.p->clientBlockref;
+ tcConnectptr.p->errorCode = 0;
+ tcConnectptr.p->reclenAiLqhkey = 0;
+ tcConnectptr.p->abortState = TcConnectionrec::ABORT_IDLE;
+ tcConnectptr.p->nextReplica = nodeId;
+ tcConnectptr.p->currTupAiLen = 0;
+ tcConnectptr.p->opExec = 1;
+ tcConnectptr.p->operation = ZREAD;
+ tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
+ tcConnectptr.p->commitAckMarker = RNIL;
+ tcConnectptr.p->m_offset_current_keybuf = 0;
+ tcConnectptr.p->m_scan_curr_range_no = 0;
+
+ tabptr.p->usageCount++;
+}//Dblqh::initScanTc()
+
+/* =========================================================================
+ * ======= FINISH SCAN RECORD =======
+ *
+ * REMOVE SCAN RECORD FROM PER FRAGMENT LIST.
+ * ========================================================================= */
+void Dblqh::finishScanrec(Signal* signal)
+{
+ release_acc_ptr_list(scanptr.p);
+
+ LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
+ fragptr.p->m_queuedScans);
+
+ if(scanptr.p->scanState == ScanRecord::IN_QUEUE){
+ jam();
+ queue.release(scanptr);
+ return;
+ }
+
+ if(scanptr.p->scanKeyinfoFlag){
+ jam();
+ ScanRecordPtr tmp;
+#ifdef TRACE_SCAN_TAKEOVER
+ ndbout_c("removing (%d %d)", scanptr.p->scanNumber, scanptr.p->fragPtrI);
+#endif
+ c_scanTakeOverHash.remove(tmp, * scanptr.p);
+ ndbrequire(tmp.p == scanptr.p);
+ }
+
+ LocalDLList<ScanRecord> scans(c_scanRecordPool, fragptr.p->m_activeScans);
+ scans.release(scanptr);
+
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = scanptr.p->fragPtrI;
+ ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
+
+ const Uint32 scanNumber = scanptr.p->scanNumber;
+ ndbrequire(!tFragPtr.p->m_scanNumberMask.get(scanNumber));
+ ScanRecordPtr restart;
+
+ /**
+ * Start on of queued scans
+ */
+ if(scanNumber == NR_ScanNo || !queue.first(restart)){
+ jam();
+ tFragPtr.p->m_scanNumberMask.set(scanNumber);
+ return;
+ }
+
+ if(ERROR_INSERTED(5034)){
+ jam();
+ tFragPtr.p->m_scanNumberMask.set(scanNumber);
+ return;
+ }
+
+ ndbrequire(restart.p->scanState == ScanRecord::IN_QUEUE);
+
+ ScanRecordPtr tmpScan = scanptr;
+ TcConnectionrecPtr tmpTc = tcConnectptr;
+
+ tcConnectptr.i = restart.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ restart.p->scanNumber = scanNumber;
+
+ queue.remove(restart);
+ scans.add(restart);
+ if(restart.p->scanKeyinfoFlag){
+ jam();
+#ifdef VM_TRACE
+ ScanRecordPtr tmp;
+ ndbrequire(!c_scanTakeOverHash.find(tmp, * restart.p));
+#endif
+ c_scanTakeOverHash.add(restart);
+#ifdef TRACE_SCAN_TAKEOVER
+ ndbout_c("adding-r (%d %d)", restart.p->scanNumber, restart.p->fragPtrI);
+#endif
+ }
+
+ restart.p->scanState = ScanRecord::SCAN_FREE; // set in initScanRec
+ if(tcConnectptr.p->transactionState == TcConnectionrec::SCAN_STATE_USED)
+ {
+ jam();
+ scanptr = restart;
+ continueAfterReceivingAllAiLab(signal);
+ }
+ else
+ {
+ ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::WAIT_SCAN_AI);
+ }
+ scanptr = tmpScan;
+ tcConnectptr = tmpTc;
+}//Dblqh::finishScanrec()
+
+/* =========================================================================
+ * ======= RELEASE SCAN RECORD =======
+ *
+ * RELEASE A SCAN RECORD TO THE FREELIST.
+ * ========================================================================= */
+void Dblqh::releaseScanrec(Signal* signal)
+{
+ scanptr.p->scanState = ScanRecord::SCAN_FREE;
+ scanptr.p->scanType = ScanRecord::ST_IDLE;
+ scanptr.p->scanTcWaiting = ZFALSE;
+ cbookedAccOps -= scanptr.p->m_max_batch_size_rows;
+ cscanNoFreeRec++;
+}//Dblqh::releaseScanrec()
+
+/* ------------------------------------------------------------------------
+ * ------- SEND KEYINFO20 TO API -------
+ *
+ * ------------------------------------------------------------------------ */
+Uint32 Dblqh::sendKeyinfo20(Signal* signal,
+ ScanRecord * scanP,
+ TcConnectionrec * tcConP)
+{
+ ndbrequire(scanP->m_curr_batch_size_rows < MAX_PARALLEL_OP_PER_SCAN);
+ KeyInfo20 * keyInfo = (KeyInfo20 *)&signal->theData[0];
+
+ /**
+ * Note that this code requires signal->theData to be big enough for
+ * a entire key
+ */
+ const BlockReference ref = scanP->scanApiBlockref;
+ const Uint32 scanOp = scanP->m_curr_batch_size_rows;
+ const Uint32 nodeId = refToNode(ref);
+ const bool connectedToNode = getNodeInfo(nodeId).m_connected;
+ const Uint32 type = getNodeInfo(nodeId).m_type;
+ const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+ const bool old_dest = (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0));
+ const bool longable = true; // TODO is_api && !old_dest;
+
+ Uint32 * dst = keyInfo->keyData;
+ dst += nodeId == getOwnNodeId() ? 0 : KeyInfo20::DataLength;
+
+ Uint32 keyLen = readPrimaryKeys(scanP, tcConP, dst);
+ Uint32 fragId = tcConP->fragmentid;
+ keyInfo->clientOpPtr = scanP->scanApiOpPtr;
+ keyInfo->keyLen = keyLen;
+ keyInfo->scanInfo_Node =
+ KeyInfo20::setScanInfo(scanOp, scanP->scanNumber) + (fragId << 20);
+ keyInfo->transId1 = tcConP->transid[0];
+ keyInfo->transId2 = tcConP->transid[1];
+
+ Uint32 * src = signal->theData+25;
+ if(connectedToNode){
+ jam();
+
+ if(nodeId != getOwnNodeId()){
+ jam();
+
+ if(keyLen <= KeyInfo20::DataLength || !longable) {
+ while(keyLen > KeyInfo20::DataLength){
+ jam();
+ MEMCOPY_NO_WORDS(keyInfo->keyData, src, KeyInfo20::DataLength);
+ sendSignal(ref, GSN_KEYINFO20, signal, 25, JBB);
+ src += KeyInfo20::DataLength;;
+ keyLen -= KeyInfo20::DataLength;
+ }
+
+ MEMCOPY_NO_WORDS(keyInfo->keyData, src, keyLen);
+ sendSignal(ref, GSN_KEYINFO20, signal,
+ KeyInfo20::HeaderLength+keyLen, JBB);
+ return keyLen;
+ }
+
+ LinearSectionPtr ptr[3];
+ ptr[0].p = src;
+ ptr[0].sz = keyLen;
+ sendSignal(ref, GSN_KEYINFO20, signal, KeyInfo20::HeaderLength,
+ JBB, ptr, 1);
+ return keyLen;
+ }
+
+ EXECUTE_DIRECT(refToBlock(ref), GSN_KEYINFO20, signal,
+ KeyInfo20::HeaderLength + keyLen);
+ jamEntry();
+ return keyLen;
+ }
+
+ /**
+ * If this node does not have a direct connection
+ * to the receiving node we want to send the signals
+ * routed via the node that controls this read
+ */
+ Uint32 routeBlockref = tcConP->clientBlockref;
+
+ if(keyLen < KeyInfo20::DataLength || !longable){
+ jam();
+
+ while (keyLen > (KeyInfo20::DataLength - 1)) {
+ jam();
+ MEMCOPY_NO_WORDS(keyInfo->keyData, src, KeyInfo20::DataLength - 1);
+ keyInfo->keyData[KeyInfo20::DataLength-1] = ref;
+ sendSignal(routeBlockref, GSN_KEYINFO20_R, signal, 25, JBB);
+ src += KeyInfo20::DataLength - 1;
+ keyLen -= KeyInfo20::DataLength - 1;
+ }
+
+ MEMCOPY_NO_WORDS(keyInfo->keyData, src, keyLen);
+ keyInfo->keyData[keyLen] = ref;
+ sendSignal(routeBlockref, GSN_KEYINFO20_R, signal,
+ KeyInfo20::HeaderLength+keyLen+1, JBB);
+ return keyLen;
+ }
+
+ keyInfo->keyData[0] = ref;
+ LinearSectionPtr ptr[3];
+ ptr[0].p = src;
+ ptr[0].sz = keyLen;
+ sendSignal(routeBlockref, GSN_KEYINFO20_R, signal,
+ KeyInfo20::HeaderLength+1, JBB, ptr, 1);
+ return keyLen;
+}
+
+/* ------------------------------------------------------------------------
+ * ------- SEND SCAN_FRAGCONF TO TC THAT CONTROLS THE SCAN -------
+ *
+ * ------------------------------------------------------------------------ */
+void Dblqh::sendScanFragConf(Signal* signal, Uint32 scanCompleted)
+{
+ Uint32 completed_ops= scanptr.p->m_curr_batch_size_rows;
+ Uint32 total_len= scanptr.p->m_curr_batch_size_bytes;
+ scanptr.p->scanTcWaiting = ZFALSE;
+
+ if(ERROR_INSERTED(5037)){
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+ ScanFragConf * conf = (ScanFragConf*)&signal->theData[0];
+ NodeId tc_node_id= refToNode(tcConnectptr.p->clientBlockref);
+ Uint32 trans_id1= tcConnectptr.p->transid[0];
+ Uint32 trans_id2= tcConnectptr.p->transid[1];
+
+ conf->senderData = tcConnectptr.p->clientConnectrec;
+ conf->completedOps = completed_ops;
+ conf->fragmentCompleted = scanCompleted;
+ conf->transId1 = trans_id1;
+ conf->transId2 = trans_id2;
+ conf->total_len= total_len;
+ sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGCONF,
+ signal, ScanFragConf::SignalLength, JBB);
+
+ if(!scanptr.p->scanLockHold)
+ {
+ jam();
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes= 0;
+ }
+}//Dblqh::sendScanFragConf()
+
+/* ######################################################################### */
+/* ####### NODE RECOVERY MODULE ####### */
+/* */
+/* ######################################################################### */
+/*---------------------------------------------------------------------------*/
+/* */
+/* THIS MODULE IS USED WHEN A NODE HAS FAILED. IT PERFORMS A COPY OF A */
+/* FRAGMENT TO A NEW REPLICA OF THE FRAGMENT. IT DOES ALSO SHUT DOWN ALL */
+/* CONNECTIONS TO THE FAILED NODE. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::calculateHash(Signal* signal)
+{
+ DatabufPtr locDatabufptr;
+ UintR Ti;
+ UintR Tdata0;
+ UintR Tdata1;
+ UintR Tdata2;
+ UintR Tdata3;
+ UintR* Tdata32;
+ Uint64 Tdata[512];
+
+ Tdata32 = (UintR*)&Tdata[0];
+
+ Tdata0 = tcConnectptr.p->tupkeyData[0];
+ Tdata1 = tcConnectptr.p->tupkeyData[1];
+ Tdata2 = tcConnectptr.p->tupkeyData[2];
+ Tdata3 = tcConnectptr.p->tupkeyData[3];
+ Tdata32[0] = Tdata0;
+ Tdata32[1] = Tdata1;
+ Tdata32[2] = Tdata2;
+ Tdata32[3] = Tdata3;
+ locDatabufptr.i = tcConnectptr.p->firstTupkeybuf;
+ Ti = 4;
+ while (locDatabufptr.i != RNIL) {
+ ptrCheckGuard(locDatabufptr, cdatabufFileSize, databuf);
+ Tdata0 = locDatabufptr.p->data[0];
+ Tdata1 = locDatabufptr.p->data[1];
+ Tdata2 = locDatabufptr.p->data[2];
+ Tdata3 = locDatabufptr.p->data[3];
+ Tdata32[Ti ] = Tdata0;
+ Tdata32[Ti + 1] = Tdata1;
+ Tdata32[Ti + 2] = Tdata2;
+ Tdata32[Ti + 3] = Tdata3;
+ locDatabufptr.i = locDatabufptr.p->nextDatabuf;
+ Ti += 4;
+ }//while
+ tcConnectptr.p->hashValue =
+ md5_hash((Uint64*)&Tdata32[0], (UintR)tcConnectptr.p->primKeyLen);
+}//Dblqh::calculateHash()
+
+/* *************************************** */
+/* COPY_FRAGREQ: Start copying a fragment */
+/* *************************************** */
+void Dblqh::execCOPY_FRAGREQ(Signal* signal)
+{
+ jamEntry();
+ const CopyFragReq * const copyFragReq = (CopyFragReq *)&signal->theData[0];
+ tabptr.i = copyFragReq->tableId;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ const Uint32 fragId = copyFragReq->fragId;
+ const Uint32 copyPtr = copyFragReq->userPtr;
+ const Uint32 userRef = copyFragReq->userRef;
+ const Uint32 nodeId = copyFragReq->nodeId;
+
+ ndbrequire(cnoActiveCopy < 3);
+ ndbrequire(getFragmentrec(signal, fragId));
+ ndbrequire(fragptr.p->copyFragState == ZIDLE);
+ ndbrequire(cfirstfreeTcConrec != RNIL);
+ ndbrequire(fragptr.p->m_scanNumberMask.get(NR_ScanNo));
+
+ fragptr.p->fragDistributionKey = copyFragReq->distributionKey;
+
+ if (DictTabInfo::isOrderedIndex(tabptr.p->tableType)) {
+ jam();
+ /**
+ * Ordered index doesn't need to be copied
+ */
+ CopyFragConf * const conf = (CopyFragConf *)&signal->theData[0];
+ conf->userPtr = copyPtr;
+ conf->sendingNodeId = cownNodeid;
+ conf->startingNodeId = nodeId;
+ conf->tableId = tabptr.i;
+ conf->fragId = fragId;
+ sendSignal(userRef, GSN_COPY_FRAGCONF, signal,
+ CopyFragConf::SignalLength, JBB);
+ return;
+ }//if
+
+ LocalDLList<ScanRecord> scans(c_scanRecordPool, fragptr.p->m_activeScans);
+ ndbrequire(scans.seize(scanptr));
+/* ------------------------------------------------------------------------- */
+// We keep track of how many operation records in ACC that has been booked.
+// Copy fragment has records always booked and thus need not book any. The
+// most operations in parallel use is the m_max_batch_size_rows.
+// This variable has to be set-up here since it is used by releaseScanrec
+// to unbook operation records in ACC.
+/* ------------------------------------------------------------------------- */
+ scanptr.p->m_max_batch_size_rows = 0;
+ scanptr.p->rangeScan = 0;
+ scanptr.p->tupScan = 0;
+ seizeTcrec();
+
+ /**
+ * Remove implicit cast/usage of CopyFragReq
+ */
+ //initCopyrec(signal);
+ scanptr.p->copyPtr = copyPtr;
+ scanptr.p->scanType = ScanRecord::COPY;
+ scanptr.p->scanApiBlockref = userRef;
+ scanptr.p->scanNodeId = nodeId;
+ scanptr.p->scanTcrec = tcConnectptr.i;
+ scanptr.p->scanSchemaVersion = copyFragReq->schemaVersion;
+ scanptr.p->scanCompletedStatus = ZFALSE;
+ scanptr.p->scanErrorCounter = 0;
+ scanptr.p->scanNumber = NR_ScanNo;
+ scanptr.p->scanKeyinfoFlag = 0; // Don't put into hash
+ scanptr.p->fragPtrI = fragptr.i;
+ fragptr.p->m_scanNumberMask.clear(NR_ScanNo);
+ scanptr.p->scanBlockref = DBACC_REF;
+
+ initScanTc(signal,
+ 0,
+ (DBLQH << 20) + (cownNodeid << 8),
+ fragId,
+ copyFragReq->nodeId);
+ cactiveCopy[cnoActiveCopy] = fragptr.i;
+ cnoActiveCopy++;
+
+ tcConnectptr.p->copyCountWords = 0;
+ tcConnectptr.p->tcOprec = tcConnectptr.i;
+ tcConnectptr.p->schemaVersion = scanptr.p->scanSchemaVersion;
+ scanptr.p->scanState = ScanRecord::WAIT_ACC_COPY;
+ AccScanReq * req = (AccScanReq*)&signal->theData[0];
+ req->senderData = scanptr.i;
+ req->senderRef = cownref;
+ req->tableId = tabptr.i;
+ req->fragmentNo = fragId;
+ req->requestInfo = 0;
+ AccScanReq::setLockMode(req->requestInfo, 0);
+ AccScanReq::setReadCommittedFlag(req->requestInfo, 0);
+ req->transId1 = tcConnectptr.p->transid[0];
+ req->transId2 = tcConnectptr.p->transid[1];
+ req->savePointId = tcConnectptr.p->savePointId;
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_ACC_SCANREQ, signal,
+ AccScanReq::SignalLength, JBB);
+ return;
+}//Dblqh::execCOPY_FRAGREQ()
+
+void Dblqh::accScanConfCopyLab(Signal* signal)
+{
+ AccScanConf * const accScanConf = (AccScanConf *)&signal->theData[0];
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+/*--------------------------------------------------------------------------*/
+/* PRECONDITION: SCAN_STATE = WAIT_ACC_COPY */
+/*--------------------------------------------------------------------------*/
+ if (accScanConf->flag == AccScanConf::ZEMPTY_FRAGMENT) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE FRAGMENT WAS EMPTY. */
+/* REPORT SUCCESSFUL COPYING. */
+/*---------------------------------------------------------------------------*/
+ tupCopyCloseConfLab(signal);
+ return;
+ }//if
+ scanptr.p->scanAccPtr = accScanConf->accPtr;
+ scanptr.p->scanState = ScanRecord::WAIT_STORED_PROC_COPY;
+ signal->theData[0] = tcConnectptr.p->tupConnectrec;
+ signal->theData[1] = tcConnectptr.p->tableref;
+ signal->theData[2] = scanptr.p->scanSchemaVersion;
+ signal->theData[3] = ZSTORED_PROC_COPY;
+// theData[4] is not used in TUP with ZSTORED_PROC_COPY
+ sendSignal(tcConnectptr.p->tcTupBlockref, GSN_STORED_PROCREQ, signal, 5, JBB);
+ return;
+}//Dblqh::accScanConfCopyLab()
+
+/*---------------------------------------------------------------------------*/
+/* ENTER STORED_PROCCONF WITH */
+/* TC_CONNECTPTR, */
+/* TSTORED_PROC_ID */
+/*---------------------------------------------------------------------------*/
+void Dblqh::storedProcConfCopyLab(Signal* signal)
+{
+/*---------------------------------------------------------------------------*/
+/* PRECONDITION: SCAN_STATE = WAIT_STORED_PROC_COPY */
+/*---------------------------------------------------------------------------*/
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE COPY PROCESS HAVE BEEN COMPLETED, MOST LIKELY DUE TO A NODE FAILURE.*/
+/*---------------------------------------------------------------------------*/
+ closeCopyLab(signal);
+ return;
+ }//if
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN_COPY;
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::COPY_FIRST_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ continueFirstCopyAfterBlockedLab(signal);
+ return;
+}//Dblqh::storedProcConfCopyLab()
+
+void Dblqh::continueFirstCopyAfterBlockedLab(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = RNIL;
+ signal->theData[2] = NextScanReq::ZSCAN_NEXT;
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ return;
+}//Dblqh::continueFirstCopyAfterBlockedLab()
+
+/*---------------------------------------------------------------------------*/
+/* ENTER NEXT_SCANCONF WITH */
+/* SCANPTR, */
+/* TFRAGID, */
+/* TACC_OPPTR, */
+/* TLOCAL_KEY1, */
+/* TLOCAL_KEY2, */
+/* TKEY_LENGTH, */
+/* TKEY1, */
+/* TKEY2, */
+/* TKEY3, */
+/* TKEY4 */
+/*---------------------------------------------------------------------------*/
+/* PRECONDITION: SCAN_STATE = WAIT_NEXT_SCAN_COPY */
+/*---------------------------------------------------------------------------*/
+void Dblqh::nextScanConfCopyLab(Signal* signal)
+{
+ NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0];
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ if (nextScanConf->fragId == RNIL) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THERE ARE NO MORE TUPLES TO FETCH. WE NEED TO CLOSE */
+/* THE COPY IN ACC AND DELETE THE STORED PROCEDURE IN TUP */
+/*---------------------------------------------------------------------------*/
+ releaseActiveFrag(signal);
+ if (tcConnectptr.p->copyCountWords == 0) {
+ closeCopyLab(signal);
+ return;
+ }//if
+/*---------------------------------------------------------------------------*/
+// Wait until copying is completed also at the starting node before reporting
+// completion. Signal completion through scanCompletedStatus-flag.
+/*---------------------------------------------------------------------------*/
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ return;
+ }//if
+
+ // If accOperationPtr == RNIL no record was returned by ACC
+ if (nextScanConf->accOperationPtr == RNIL) {
+ jam();
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ }
+
+ set_acc_ptr_in_scan_record(scanptr.p, 0, nextScanConf->accOperationPtr);
+ initCopyTc(signal);
+ copySendTupkeyReqLab(signal);
+ return;
+}//Dblqh::nextScanConfCopyLab()
+
+void Dblqh::copySendTupkeyReqLab(Signal* signal)
+{
+ Uint32 reqinfo = 0;
+ Uint32 tupFragPtr;
+
+ reqinfo = reqinfo + (tcConnectptr.p->operation << 6);
+ reqinfo = reqinfo + (tcConnectptr.p->opExec << 10);
+ tcConnectptr.p->transactionState = TcConnectionrec::COPY_TUPKEY;
+ scanptr.p->scanState = ScanRecord::WAIT_TUPKEY_COPY;
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ tupFragPtr = fragptr.p->tupFragptr[scanptr.p->scanLocalFragid & 1];
+ {
+ TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtrSend();
+
+ tupKeyReq->connectPtr = tcConnectptr.p->tupConnectrec;
+ tupKeyReq->request = reqinfo;
+ tupKeyReq->tableRef = tcConnectptr.p->tableref;
+ tupKeyReq->fragId = scanptr.p->scanLocalFragid;
+ tupKeyReq->keyRef1 = scanptr.p->scanLocalref[0];
+ tupKeyReq->keyRef2 = scanptr.p->scanLocalref[1];
+ tupKeyReq->attrBufLen = 0;
+ tupKeyReq->opRef = tcConnectptr.i;
+ tupKeyReq->applRef = cownref;
+ tupKeyReq->schemaVersion = scanptr.p->scanSchemaVersion;
+ tupKeyReq->storedProcedure = scanptr.p->scanStoredProcId;
+ tupKeyReq->transId1 = tcConnectptr.p->transid[0];
+ tupKeyReq->transId2 = tcConnectptr.p->transid[1];
+ tupKeyReq->fragPtr = tupFragPtr;
+ tupKeyReq->primaryReplica = (tcConnectptr.p->seqNoReplica == 0)?true:false;
+ tupKeyReq->coordinatorTC = tcConnectptr.p->tcBlockref;
+ tupKeyReq->tcOpIndex = tcConnectptr.p->tcOprec;
+ tupKeyReq->savePointId = tcConnectptr.p->savePointId;
+ Uint32 blockNo = refToBlock(tcConnectptr.p->tcTupBlockref);
+ EXECUTE_DIRECT(blockNo, GSN_TUPKEYREQ, signal,
+ TupKeyReq::SignalLength);
+ }
+}//Dblqh::copySendTupkeyReqLab()
+
+/*---------------------------------------------------------------------------*/
+/* USED IN COPYING OPERATION TO RECEIVE ATTRINFO FROM TUP. */
+/*---------------------------------------------------------------------------*/
+/* ************>> */
+/* TRANSID_AI > */
+/* ************>> */
+void Dblqh::execTRANSID_AI(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ Uint32 length = signal->length() - 3;
+ ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::COPY_TUPKEY);
+ Uint32 * src = &signal->theData[3];
+ while(length > 22){
+ if (saveTupattrbuf(signal, src, 22) == ZOK) {
+ ;
+ } else {
+ jam();
+ tcConnectptr.p->errorCode = ZGET_ATTRINBUF_ERROR;
+ return;
+ }//if
+ src += 22;
+ length -= 22;
+ }
+ if (saveTupattrbuf(signal, src, length) == ZOK) {
+ return;
+ }
+ jam();
+ tcConnectptr.p->errorCode = ZGET_ATTRINBUF_ERROR;
+}//Dblqh::execTRANSID_AI()
+
+/*--------------------------------------------------------------------------*/
+/* ENTER TUPKEYCONF WITH */
+/* TC_CONNECTPTR, */
+/* TDATA2, */
+/* TDATA3, */
+/* TDATA4, */
+/* TDATA5 */
+/*--------------------------------------------------------------------------*/
+/* PRECONDITION: TRANSACTION_STATE = COPY_TUPKEY */
+/*--------------------------------------------------------------------------*/
+void Dblqh::copyTupkeyConfLab(Signal* signal)
+{
+ const TupKeyConf * const tupKeyConf = (TupKeyConf *)signal->getDataPtr();
+
+ UintR readLength = tupKeyConf->readLength;
+
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ ScanRecord* scanP = scanptr.p;
+ releaseActiveFrag(signal);
+ if (tcConnectptr.p->errorCode != 0) {
+ jam();
+ closeCopyLab(signal);
+ return;
+ }//if
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE COPY PROCESS HAVE BEEN CLOSED. MOST LIKELY A NODE FAILURE. */
+/*---------------------------------------------------------------------------*/
+ closeCopyLab(signal);
+ return;
+ }//if
+ TcConnectionrec * tcConP = tcConnectptr.p;
+ tcConnectptr.p->totSendlenAi = readLength;
+ tcConnectptr.p->connectState = TcConnectionrec::COPY_CONNECTED;
+
+ // Read primary keys (used to get here via scan keyinfo)
+ Uint32* tmp = signal->getDataPtrSend()+24;
+ Uint32 len= tcConnectptr.p->primKeyLen = readPrimaryKeys(scanP, tcConP, tmp);
+
+ // Calculate hash (no need to linearies key)
+ tcConnectptr.p->hashValue = md5_hash((Uint64*)tmp, len);
+
+ // Move into databuffer to make packLqhkeyreqLab happy
+ memcpy(tcConP->tupkeyData, tmp, 4*4);
+ if(len > 4)
+ keyinfoLab(tmp+4, tmp + len);
+ LqhKeyReq::setKeyLen(tcConP->reqinfo, len);
+
+/*---------------------------------------------------------------------------*/
+// To avoid using up to many operation records in ACC we will increase the
+// constant to ensure that we never send more than 40 records at a time.
+// This is where the constant 56 comes from. For long records this constant
+// will not matter that much. The current maximum is 6000 words outstanding
+// (including a number of those 56 words not really sent). We also have to
+// ensure that there are never more simultaneous usage of these operation
+// records to ensure that node recovery does not fail because of simultaneous
+// scanning.
+/*---------------------------------------------------------------------------*/
+ UintR TnoOfWords = readLength + len;
+ TnoOfWords = TnoOfWords + MAGIC_CONSTANT;
+ TnoOfWords = TnoOfWords + (TnoOfWords >> 2);
+
+ /*-----------------------------------------------------------------
+ * NOTE for transid1!
+ * Transid1 in the tcConnection record is used load regulate the
+ * copy(node recovery) process.
+ * The number of outstanding words are written in the transid1
+ * variable. This will be sent to the starting node in the
+ * LQHKEYREQ signal and when the answer is returned in the LQHKEYCONF
+ * we can reduce the number of outstanding words and check to see
+ * if more LQHKEYREQ signals should be sent.
+ *
+ * However efficient this method is rather unsafe in such way that
+ * it overwrites the transid1 original data.
+ *
+ * Also see TR 587.
+ *----------------------------------------------------------------*/
+ tcConnectptr.p->transid[0] = TnoOfWords; // Data overload, see note!
+ packLqhkeyreqLab(signal);
+ tcConnectptr.p->copyCountWords += TnoOfWords;
+ scanptr.p->scanState = ScanRecord::WAIT_LQHKEY_COPY;
+ if (tcConnectptr.p->copyCountWords < cmaxWordsAtNodeRec) {
+ nextRecordCopy(signal);
+ return;
+ }//if
+ return;
+}//Dblqh::copyTupkeyConfLab()
+
+/*---------------------------------------------------------------------------*/
+/* ENTER LQHKEYCONF */
+/*---------------------------------------------------------------------------*/
+/* PRECONDITION: CONNECT_STATE = COPY_CONNECTED */
+/*---------------------------------------------------------------------------*/
+void Dblqh::copyCompletedLab(Signal* signal)
+{
+ const LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+
+ ndbrequire(tcConnectptr.p->transid[1] == lqhKeyConf->transId2);
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ if (tcConnectptr.p->copyCountWords >= cmaxWordsAtNodeRec) {
+ tcConnectptr.p->copyCountWords -= lqhKeyConf->transId1; // Data overload, see note!
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+/*---------------------------------------------------------------------------*/
+// Copy to complete, we will not start any new copying.
+/*---------------------------------------------------------------------------*/
+ closeCopyLab(signal);
+ return;
+ }//if
+ if (tcConnectptr.p->copyCountWords < cmaxWordsAtNodeRec) {
+ jam();
+ nextRecordCopy(signal);
+ }//if
+ return;
+ }//if
+ tcConnectptr.p->copyCountWords -= lqhKeyConf->transId1; // Data overload, see note!
+ ndbrequire(tcConnectptr.p->copyCountWords <= cmaxWordsAtNodeRec);
+ if (tcConnectptr.p->copyCountWords > 0) {
+ jam();
+ return;
+ }//if
+/*---------------------------------------------------------------------------*/
+// No more outstanding copies. We will only start new ones from here if it was
+// stopped before and this only happens when copyCountWords is bigger than the
+// threshold value. Since this did not occur we must be waiting for completion.
+// Check that this is so. If not we crash to find out what is going on.
+/*---------------------------------------------------------------------------*/
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+ closeCopyLab(signal);
+ return;
+ }//if
+ if (scanptr.p->scanState == ScanRecord::WAIT_LQHKEY_COPY) {
+ jam();
+/*---------------------------------------------------------------------------*/
+// Make sure that something is in progress. Otherwise we will simply stop
+// and nothing more will happen.
+/*---------------------------------------------------------------------------*/
+ systemErrorLab(signal);
+ return;
+ }//if
+ return;
+}//Dblqh::copyCompletedLab()
+
+void Dblqh::nextRecordCopy(Signal* signal)
+{
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ if (scanptr.p->scanState != ScanRecord::WAIT_LQHKEY_COPY) {
+ jam();
+/*---------------------------------------------------------------------------*/
+// Make sure that nothing is in progress. Otherwise we will have to simultaneous
+// scans on the same record and this will certainly lead to unexpected
+// behaviour.
+/*---------------------------------------------------------------------------*/
+ systemErrorLab(signal);
+ return;
+ }//if
+ scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN_COPY;
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::COPY_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ continueCopyAfterBlockedLab(signal);
+ return;
+}//Dblqh::nextRecordCopy()
+
+void Dblqh::continueCopyAfterBlockedLab(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ tcConnectptr.p->errorCode = 0;
+ Uint32 acc_op_ptr= get_acc_ptr_from_scan_record(scanptr.p, 0, false);
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = acc_op_ptr;
+ signal->theData[2] = NextScanReq::ZSCAN_NEXT_COMMIT;
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ return;
+}//Dblqh::continueCopyAfterBlockedLab()
+
+void Dblqh::copyLqhKeyRefLab(Signal* signal)
+{
+ ndbrequire(tcConnectptr.p->transid[1] == signal->theData[4]);
+ tcConnectptr.p->copyCountWords -= signal->theData[3];
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanErrorCounter++;
+ tcConnectptr.p->errorCode = terrorCode;
+ closeCopyLab(signal);
+ return;
+}//Dblqh::copyLqhKeyRefLab()
+
+void Dblqh::closeCopyLab(Signal* signal)
+{
+ if (tcConnectptr.p->copyCountWords > 0) {
+/*---------------------------------------------------------------------------*/
+// We are still waiting for responses from the starting node.
+// Wait until all of those have arrived until we start the
+// close process.
+/*---------------------------------------------------------------------------*/
+ jam();
+ return;
+ }//if
+ tcConnectptr.p->transid[0] = 0;
+ tcConnectptr.p->transid[1] = 0;
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanState = ScanRecord::WAIT_CLOSE_COPY;
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::COPY_CLOSE_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ continueCloseCopyAfterBlockedLab(signal);
+ return;
+}//Dblqh::closeCopyLab()
+
+void Dblqh::continueCloseCopyAfterBlockedLab(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = RNIL;
+ signal->theData[2] = ZCOPY_CLOSE;
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ return;
+}//Dblqh::continueCloseCopyAfterBlockedLab()
+
+/*---------------------------------------------------------------------------*/
+/* ENTER NEXT_SCANCONF WITH */
+/* SCANPTR, */
+/* TFRAGID, */
+/* TACC_OPPTR, */
+/* TLOCAL_KEY1, */
+/* TLOCAL_KEY2, */
+/* TKEY_LENGTH, */
+/* TKEY1, */
+/* TKEY2, */
+/* TKEY3, */
+/* TKEY4 */
+/*---------------------------------------------------------------------------*/
+/* PRECONDITION: SCAN_STATE = WAIT_CLOSE_COPY */
+/*---------------------------------------------------------------------------*/
+void Dblqh::accCopyCloseConfLab(Signal* signal)
+{
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ scanptr.p->scanState = ScanRecord::WAIT_DELETE_STORED_PROC_ID_COPY;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ signal->theData[0] = tcConnectptr.p->tupConnectrec;
+ signal->theData[1] = tcConnectptr.p->tableref;
+ signal->theData[2] = scanptr.p->scanSchemaVersion;
+ signal->theData[3] = ZDELETE_STORED_PROC_ID;
+ signal->theData[4] = scanptr.p->scanStoredProcId;
+ sendSignal(tcConnectptr.p->tcTupBlockref, GSN_STORED_PROCREQ, signal, 5, JBB);
+ return;
+}//Dblqh::accCopyCloseConfLab()
+
+/*---------------------------------------------------------------------------*/
+/* ENTER STORED_PROCCONF WITH */
+/* TC_CONNECTPTR, */
+/* TSTORED_PROC_ID */
+/*---------------------------------------------------------------------------*/
+/* PRECONDITION: SCAN_STATE = WAIT_DELETE_STORED_PROC_ID_COPY */
+/*---------------------------------------------------------------------------*/
+void Dblqh::tupCopyCloseConfLab(Signal* signal)
+{
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->copyFragState = ZIDLE;
+
+ if (tcConnectptr.p->abortState == TcConnectionrec::NEW_FROM_TC) {
+ jam();
+ tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec;
+ ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+ tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1;
+ signal->theData[0] = ZLQH_TRANS_NEXT;
+ signal->theData[1] = tcNodeFailptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+
+ CopyFragRef * const ref = (CopyFragRef *)&signal->theData[0];
+ ref->userPtr = scanptr.p->copyPtr;
+ ref->sendingNodeId = cownNodeid;
+ ref->startingNodeId = scanptr.p->scanNodeId;
+ ref->tableId = fragptr.p->tabRef;
+ ref->fragId = fragptr.p->fragId;
+ ref->errorCode = ZNODE_FAILURE_ERROR;
+ sendSignal(scanptr.p->scanApiBlockref, GSN_COPY_FRAGREF, signal,
+ CopyFragRef::SignalLength, JBB);
+ } else {
+ if (scanptr.p->scanErrorCounter > 0) {
+ jam();
+ CopyFragRef * const ref = (CopyFragRef *)&signal->theData[0];
+ ref->userPtr = scanptr.p->copyPtr;
+ ref->sendingNodeId = cownNodeid;
+ ref->startingNodeId = scanptr.p->scanNodeId;
+ ref->tableId = fragptr.p->tabRef;
+ ref->fragId = fragptr.p->fragId;
+ ref->errorCode = tcConnectptr.p->errorCode;
+ sendSignal(scanptr.p->scanApiBlockref, GSN_COPY_FRAGREF, signal,
+ CopyFragRef::SignalLength, JBB);
+ } else {
+ jam();
+ CopyFragConf * const conf = (CopyFragConf *)&signal->theData[0];
+ conf->userPtr = scanptr.p->copyPtr;
+ conf->sendingNodeId = cownNodeid;
+ conf->startingNodeId = scanptr.p->scanNodeId;
+ conf->tableId = tcConnectptr.p->tableref;
+ conf->fragId = tcConnectptr.p->fragmentid;
+ sendSignal(scanptr.p->scanApiBlockref, GSN_COPY_FRAGCONF, signal,
+ CopyFragConf::SignalLength, JBB);
+ }//if
+ }//if
+ releaseActiveCopy(signal);
+ tcConnectptr.p->tcScanRec = RNIL;
+ finishScanrec(signal);
+ releaseOprec(signal);
+ releaseTcrec(signal, tcConnectptr);
+ releaseScanrec(signal);
+}//Dblqh::tupCopyCloseConfLab()
+
+/*---------------------------------------------------------------------------*/
+/* A NODE FAILURE OCCURRED DURING THE COPY PROCESS. WE NEED TO CLOSE THE */
+/* COPY PROCESS SINCE A NODE FAILURE DURING THE COPY PROCESS WILL ALSO */
+/* FAIL THE NODE THAT IS TRYING TO START-UP. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::closeCopyRequestLab(Signal* signal)
+{
+ scanptr.p->scanErrorCounter++;
+ switch (scanptr.p->scanState) {
+ case ScanRecord::WAIT_TUPKEY_COPY:
+ case ScanRecord::WAIT_NEXT_SCAN_COPY:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* SET COMPLETION STATUS AND WAIT FOR OPPORTUNITY TO STOP THE SCAN. */
+// ALSO SET NO OF WORDS OUTSTANDING TO ZERO TO AVOID ETERNAL WAIT.
+/*---------------------------------------------------------------------------*/
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ tcConnectptr.p->copyCountWords = 0;
+ break;
+ case ScanRecord::WAIT_ACC_COPY:
+ case ScanRecord::WAIT_STORED_PROC_COPY:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* WE ARE CURRENTLY STARTING UP THE SCAN. SET COMPLETED STATUS AND WAIT FOR*/
+/* COMPLETION OF STARTUP. */
+/*---------------------------------------------------------------------------*/
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ break;
+ case ScanRecord::WAIT_CLOSE_COPY:
+ case ScanRecord::WAIT_DELETE_STORED_PROC_ID_COPY:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* CLOSE IS ALREADY ONGOING. WE NEED NOT DO ANYTHING. */
+/*---------------------------------------------------------------------------*/
+ break;
+ case ScanRecord::WAIT_LQHKEY_COPY:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* WE ARE WAITING FOR THE FAILED NODE. THE NODE WILL NEVER COME BACK. */
+// WE NEED TO START THE FAILURE HANDLING IMMEDIATELY.
+// ALSO SET NO OF WORDS OUTSTANDING TO ZERO TO AVOID ETERNAL WAIT.
+/*---------------------------------------------------------------------------*/
+ tcConnectptr.p->copyCountWords = 0;
+ closeCopyLab(signal);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dblqh::closeCopyRequestLab()
+
+/* ****************************************************** */
+/* COPY_ACTIVEREQ: Change state of a fragment to ACTIVE. */
+/* ****************************************************** */
+void Dblqh::execCOPY_ACTIVEREQ(Signal* signal)
+{
+ CRASH_INSERTION(5026);
+
+ const CopyActiveReq * const req = (CopyActiveReq *)&signal->theData[0];
+ jamEntry();
+ Uint32 masterPtr = req->userPtr;
+ BlockReference masterRef = req->userRef;
+ tabptr.i = req->tableId;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ Uint32 fragId = req->fragId;
+ ndbrequire(getFragmentrec(signal, fragId));
+
+ fragptr.p->fragDistributionKey = req->distributionKey;
+
+ ndbrequire(cnoActiveCopy < 3);
+ cactiveCopy[cnoActiveCopy] = fragptr.i;
+ cnoActiveCopy++;
+ fragptr.p->masterBlockref = masterRef;
+ fragptr.p->masterPtr = masterPtr;
+ if (fragptr.p->fragStatus == Fragrecord::FSACTIVE) {
+ jam();
+/*------------------------------------------------------*/
+/* PROCESS HAVE ALREADY BEEN STARTED BY PREVIOUS */
+/* MASTER. WE HAVE ALREADY SET THE PROPER MASTER */
+/* BLOCK REFERENCE. */
+/*------------------------------------------------------*/
+ if (fragptr.p->activeTcCounter == 0) {
+ jam();
+/*------------------------------------------------------*/
+/* PROCESS WAS EVEN COMPLETED. */
+/*------------------------------------------------------*/
+ sendCopyActiveConf(signal, tabptr.i);
+ }//if
+ return;
+ }//if
+ fragptr.p->fragStatus = Fragrecord::FSACTIVE;
+ if (fragptr.p->lcpFlag == Fragrecord::LCP_STATE_TRUE) {
+ jam();
+ fragptr.p->logFlag = Fragrecord::STATE_TRUE;
+ }//if
+ fragptr.p->activeTcCounter = 1;
+/*------------------------------------------------------*/
+/* SET IT TO ONE TO ENSURE THAT IT IS NOT POSSIBLE*/
+/* TO DECREASE IT TO ZERO UNTIL WE HAVE COMPLETED */
+/* THE SCAN. */
+/*------------------------------------------------------*/
+ signal->theData[0] = ZSCAN_TC_CONNECT;
+ signal->theData[1] = 0;
+ signal->theData[2] = tabptr.i;
+ signal->theData[3] = fragId;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+ return;
+}//Dblqh::execCOPY_ACTIVEREQ()
+
+void Dblqh::scanTcConnectLab(Signal* signal, Uint32 tstartTcConnect, Uint32 fragId)
+{
+ Uint32 tendTcConnect;
+
+ ndbrequire(getFragmentrec(signal, fragId));
+ if ((tstartTcConnect + 200) >= ctcConnectrecFileSize) {
+ jam();
+ tendTcConnect = ctcConnectrecFileSize - 1;
+ } else {
+ jam();
+ tendTcConnect = tstartTcConnect + 200;
+ }//if
+ for (tcConnectptr.i = tstartTcConnect;
+ tcConnectptr.i <= tendTcConnect;
+ tcConnectptr.i++) {
+ jam();
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ if (tcConnectptr.p->transactionState != TcConnectionrec::IDLE) {
+ switch (tcConnectptr.p->logWriteState) {
+ case TcConnectionrec::NOT_WRITTEN:
+ jam();
+ if (fragptr.i == tcConnectptr.p->fragmentptr) {
+ jam();
+ fragptr.p->activeTcCounter = fragptr.p->activeTcCounter + 1;
+ tcConnectptr.p->logWriteState = TcConnectionrec::NOT_WRITTEN_WAIT;
+ }//if
+ break;
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }//switch
+ }//if
+ }//for
+ if (tendTcConnect < (ctcConnectrecFileSize - 1)) {
+ jam();
+ signal->theData[0] = ZSCAN_TC_CONNECT;
+ signal->theData[1] = tendTcConnect + 1;
+ signal->theData[2] = tabptr.i;
+ signal->theData[3] = fragId;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+ } else {
+ jam();
+/*------------------------------------------------------*/
+/* THE SCAN HAVE BEEN COMPLETED. WE CHECK IF ALL */
+/* OPERATIONS HAVE ALREADY BEEN COMPLETED. */
+/*------------------------------------------------------*/
+ ndbrequire(fragptr.p->activeTcCounter > 0);
+ fragptr.p->activeTcCounter--;
+ if (fragptr.p->activeTcCounter == 0) {
+ jam();
+/*------------------------------------------------------*/
+/* SET START GLOBAL CHECKPOINT TO THE NEXT */
+/* CHECKPOINT WE HAVE NOT YET HEARD ANYTHING ABOUT*/
+/* THIS GCP WILL BE COMPLETELY COVERED BY THE LOG.*/
+/*------------------------------------------------------*/
+ fragptr.p->startGci = cnewestGci + 1;
+ sendCopyActiveConf(signal, tabptr.i);
+ }//if
+ }//if
+ return;
+}//Dblqh::scanTcConnectLab()
+
+/*---------------------------------------------------------------------------*/
+/* A NEW MASTER IS REQUESTING THE STATE IN LQH OF THE COPY FRAGMENT PARTS. */
+/*---------------------------------------------------------------------------*/
+/* ***************>> */
+/* COPY_STATEREQ > */
+/* ***************>> */
+void Dblqh::execCOPY_STATEREQ(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(0)
+#if 0
+ Uint32* dataPtr = &signal->theData[2];
+ BlockReference tmasterBlockref = signal->theData[0];
+ Uint32 tnoCopy = 0;
+ do {
+ jam();
+ arrGuard(tnoCopy, 4);
+ fragptr.i = cactiveCopy[tnoCopy];
+ if (fragptr.i == RNIL) {
+ jam();
+ break;
+ }//if
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (fragptr.p->copyFragState != ZIDLE) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS FRAGMENT IS CURRENTLY ACTIVE IN COPYING THE FRAGMENT. */
+/*---------------------------------------------------------------------------*/
+ scanptr.i = fragptr.p->fragScanRec[NR_ScanNo];
+ c_scanRecordPool.getPtr(scanptr);
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+ dataPtr[3 + (tnoCopy << 2)] = ZCOPY_CLOSING;
+ } else {
+ jam();
+ dataPtr[3 + (tnoCopy << 2)] = ZCOPY_ONGOING;
+ }//if
+ dataPtr[2 + (tnoCopy << 2)] = scanptr.p->scanSchemaVersion;
+ scanptr.p->scanApiBlockref = tmasterBlockref;
+ } else {
+ ndbrequire(fragptr.p->activeTcCounter != 0);
+/*---------------------------------------------------------------------------*/
+/* COPY FRAGMENT IS COMPLETED AND WE ARE CURRENTLY GETTING THE STARTING */
+/* GCI OF THE NEW REPLICA OF THIS FRAGMENT. */
+/*---------------------------------------------------------------------------*/
+ fragptr.p->masterBlockref = tmasterBlockref;
+ dataPtr[3 + (tnoCopy << 2)] = ZCOPY_ACTIVATION;
+ }//if
+ dataPtr[tnoCopy << 2] = fragptr.p->tabRef;
+ dataPtr[1 + (tnoCopy << 2)] = fragptr.p->fragId;
+ tnoCopy++;
+ } while (tnoCopy < cnoActiveCopy);
+ signal->theData[0] = cownNodeid;
+ signal->theData[1] = tnoCopy;
+ sendSignal(tmasterBlockref, GSN_COPY_STATECONF, signal, 18, JBB);
+#endif
+ return;
+}//Dblqh::execCOPY_STATEREQ()
+
+/* ========================================================================= */
+/* ======= INITIATE TC RECORD AT COPY FRAGMENT ======= */
+/* */
+/* SUBROUTINE SHORT NAME = ICT */
+/* ========================================================================= */
+void Dblqh::initCopyTc(Signal* signal)
+{
+ const NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0];
+ scanptr.p->scanLocalref[0] = nextScanConf->localKey[0];
+ scanptr.p->scanLocalref[1] = nextScanConf->localKey[1];
+ scanptr.p->scanLocalFragid = nextScanConf->fragId;
+ tcConnectptr.p->operation = ZREAD;
+ tcConnectptr.p->apiVersionNo = 0;
+ tcConnectptr.p->opExec = 0; /* NOT INTERPRETED MODE */
+ tcConnectptr.p->schemaVersion = scanptr.p->scanSchemaVersion;
+ Uint32 reqinfo = 0;
+ LqhKeyReq::setLockType(reqinfo, ZINSERT);
+ LqhKeyReq::setDirtyFlag(reqinfo, 1);
+ LqhKeyReq::setSimpleFlag(reqinfo, 1);
+ LqhKeyReq::setOperation(reqinfo, ZWRITE);
+ /* AILen in LQHKEYREQ IS ZERO */
+ tcConnectptr.p->reqinfo = reqinfo;
+/* ------------------------------------------------------------------------ */
+/* THE RECEIVING NODE WILL EXPECT THAT IT IS THE LAST NODE AND WILL */
+/* SEND COMPLETED AS THE RESPONSE SIGNAL SINCE DIRTY_OP BIT IS SET. */
+/* ------------------------------------------------------------------------ */
+ tcConnectptr.p->nodeAfterNext[0] = ZNIL;
+ tcConnectptr.p->nodeAfterNext[1] = ZNIL;
+ tcConnectptr.p->tcBlockref = cownref;
+ tcConnectptr.p->readlenAi = 0;
+ tcConnectptr.p->storedProcId = ZNIL;
+ tcConnectptr.p->opExec = 0;
+ tcConnectptr.p->nextSeqNoReplica = 0;
+ tcConnectptr.p->dirtyOp = ZFALSE;
+ tcConnectptr.p->lastReplicaNo = 0;
+ tcConnectptr.p->currTupAiLen = 0;
+ tcConnectptr.p->tcTimer = cLqhTimeOutCount;
+}//Dblqh::initCopyTc()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEND COPY_ACTIVECONF TO MASTER DIH ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::sendCopyActiveConf(Signal* signal, Uint32 tableId)
+{
+ releaseActiveCopy(signal);
+ CopyActiveConf * const conf = (CopyActiveConf *)&signal->theData[0];
+ conf->userPtr = fragptr.p->masterPtr;
+ conf->tableId = tableId;
+ conf->fragId = fragptr.p->fragId;
+ conf->startingNodeId = cownNodeid;
+ conf->startGci = fragptr.p->startGci;
+ sendSignal(fragptr.p->masterBlockref, GSN_COPY_ACTIVECONF, signal,
+ CopyActiveConf::SignalLength, JBB);
+}//Dblqh::sendCopyActiveConf()
+
+/* ##########################################################################
+ * ####### LOCAL CHECKPOINT MODULE #######
+ *
+ * ##########################################################################
+ * --------------------------------------------------------------------------
+ * THIS MODULE HANDLES THE EXECUTION AND CONTROL OF LOCAL CHECKPOINTS
+ * IT CONTROLS THE LOCAL CHECKPOINTS IN TUP AND ACC. IT DOES ALSO INTERACT
+ * WITH DIH TO CONTROL WHICH GLOBAL CHECKPOINTS THAT ARE RECOVERABLE
+ * ------------------------------------------------------------------------- */
+void Dblqh::execEMPTY_LCP_REQ(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(5008);
+ EmptyLcpReq * const emptyLcpOrd = (EmptyLcpReq*)&signal->theData[0];
+
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+
+ Uint32 nodeId = refToNode(emptyLcpOrd->senderRef);
+
+ lcpPtr.p->m_EMPTY_LCP_REQ.set(nodeId);
+ lcpPtr.p->reportEmpty = true;
+
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE){
+ jam();
+ bool ok = false;
+ switch(clcpCompletedState){
+ case LCP_IDLE:
+ ok = true;
+ sendEMPTY_LCP_CONF(signal, true);
+ break;
+ case LCP_RUNNING:
+ ok = true;
+ sendEMPTY_LCP_CONF(signal, false);
+ break;
+ case LCP_CLOSE_STARTED:
+ jam();
+ case ACC_LCP_CLOSE_COMPLETED:
+ jam();
+ case TUP_LCP_CLOSE_COMPLETED:
+ jam();
+ ok = true;
+ break;
+ }
+ ndbrequire(ok);
+
+ }//if
+
+ return;
+}//Dblqh::execEMPTY_LCPREQ()
+
+void Dblqh::execLCP_FRAG_ORD(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(5010);
+ LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0];
+ Uint32 lcpId = lcpFragOrd->lcpId;
+
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+
+ lcpPtr.p->lastFragmentFlag = lcpFragOrd->lastFragmentFlag;
+ if (lcpFragOrd->lastFragmentFlag) {
+ jam();
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE) {
+ jam();
+ /* ----------------------------------------------------------
+ * NOW THE COMPLETE LOCAL CHECKPOINT ROUND IS COMPLETED.
+ * -------------------------------------------------------- */
+ if (cnoOfFragsCheckpointed > 0) {
+ jam();
+ completeLcpRoundLab(signal);
+ } else {
+ jam();
+ sendLCP_COMPLETE_REP(signal, lcpId);
+ }//if
+ }
+ return;
+ }//if
+ tabptr.i = lcpFragOrd->tableId;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+
+ ndbrequire(tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING ||
+ tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE ||
+ tabptr.p->tableStatus == Tablerec::TABLE_DEFINED);
+
+ ndbrequire(getFragmentrec(signal, lcpFragOrd->fragmentId));
+
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+ ndbrequire(!lcpPtr.p->lcpQueued);
+ if (c_lcpId < lcpFragOrd->lcpId) {
+ jam();
+ /**
+ * A new LCP
+ */
+ c_lcpId = lcpFragOrd->lcpId;
+ ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_IDLE);
+ setLogTail(signal, lcpFragOrd->keepGci);
+ ndbrequire(clcpCompletedState == LCP_IDLE);
+ clcpCompletedState = LCP_RUNNING;
+ }//if
+ cnoOfFragsCheckpointed++;
+
+ if(tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
+ jam();
+ LcpRecord::FragOrd fragOrd;
+ fragOrd.fragPtrI = fragptr.i;
+ fragOrd.lcpFragOrd = * lcpFragOrd;
+ sendLCP_FRAG_REP(signal, fragOrd);
+ return;
+ }
+
+ if (lcpPtr.p->lcpState != LcpRecord::LCP_IDLE) {
+ ndbrequire(lcpPtr.p->lcpQueued == false);
+ lcpPtr.p->lcpQueued = true;
+ lcpPtr.p->queuedFragment.fragPtrI = fragptr.i;
+ lcpPtr.p->queuedFragment.lcpFragOrd = * lcpFragOrd;
+ return;
+ }//if
+
+ lcpPtr.p->currentFragment.fragPtrI = fragptr.i;
+ lcpPtr.p->currentFragment.lcpFragOrd = * lcpFragOrd;
+
+ sendLCP_FRAGIDREQ(signal);
+}//Dblqh::execLCP_FRAGORD()
+
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_PTR:LCP_STATE = WAIT_FRAGID
+ * --------------------------------------------------------------------------
+ * WE NOW HAVE THE LOCAL FRAGMENTS THAT THE LOCAL CHECKPOINT WILL USE.
+ * -------------------------------------------------------------------------- */
+void Dblqh::execLCP_FRAGIDCONF(Signal* signal)
+{
+ UintR Tfragid[4];
+
+ jamEntry();
+
+ lcpPtr.i = signal->theData[0];
+
+ Uint32 TaccPtr = signal->theData[1];
+ Uint32 noLocfrag = signal->theData[2];
+ Tfragid[0] = signal->theData[3];
+ Tfragid[1] = signal->theData[4];
+
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_WAIT_FRAGID);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECKING OF TNO_LOCFRAG VALUE. OUT OF BOUND WILL IMPLY THAT AN
+ * INDEX OUT OF RANGE WILL CAUSE A SYSTEM RESTART WHICH IS DESIRED.
+ * ------------------------------------------------------------------------ */
+ lcpPtr.p->lcpAccptr = TaccPtr;
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ ndbrequire(noLocfrag - 1 < 2);
+ for (Uint32 Tindex = 0; Tindex < noLocfrag; Tindex++) {
+ jam();
+ Uint32 fragId = Tfragid[Tindex];
+ /* ----------------------------------------------------------------------
+ * THERE IS NO ERROR CHECKING ON PURPOSE. IT IS POSSIBLE TO CALCULATE HOW
+ * MANY LOCAL LCP RECORDS THERE SHOULD BE. IT SHOULD NEVER HAPPEN THAT
+ * THERE IS NO ONE FREE. IF THERE IS NO ONE IT WILL ALSO BE A POINTER
+ * OUT OF RANGE WHICH IS AN ERROR CODE IN ITSELF. REUSES ERROR HANDLING
+ * IN AXE VM.
+ * ---------------------------------------------------------------------- */
+ seizeLcpLoc(signal);
+ initLcpLocAcc(signal, fragId);
+ seizeLcpLoc(signal);
+ initLcpLocTup(signal, fragId);
+ signal->theData[0] = lcpLocptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ signal->theData[3] = lcpLocptr.p->locFragid;
+ signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
+ signal->theData[5] = lcpPtr.p->currentFragment.lcpFragOrd.lcpId % MAX_LCP_STORED;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUP_PREPLCPREQ, signal, 6, JBB);
+ }//for
+ lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_TUP_PREPLCP;
+ return;
+}//Dblqh::execLCP_FRAGIDCONF()
+
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_STATE = WAIT_TUPPREPLCP
+ * --------------------------------------------------------------------------
+ * WE HAVE NOW PREPARED A LOCAL FRAGMENT IN TUP FOR LCP EXECUTION.
+ * -------------------------------------------------------------------------- */
+void Dblqh::execTUP_PREPLCPCONF(Signal* signal)
+{
+ UintR ttupPtr;
+
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ttupPtr = signal->theData[1];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::WAIT_TUP_PREPLCP);
+
+ lcpLocptr.p->tupRef = ttupPtr;
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::IDLE;
+ checkLcpTupprep(signal);
+ if (lcpPtr.p->lcpState != LcpRecord::LCP_WAIT_HOLDOPS) {
+ jam();
+ return;
+ }//if
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ lcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ do {
+ jam();
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::WAIT_LCPHOLDOP;
+ signal->theData[0] = lcpPtr.p->lcpAccptr;
+ signal->theData[1] = lcpLocptr.p->locFragid;
+ signal->theData[2] = 0;
+ signal->theData[3] = lcpLocptr.i;
+ sendSignal(fragptr.p->accBlockref, GSN_LCP_HOLDOPREQ, signal, 4, JBA);
+ lcpLocptr.i = lcpLocptr.p->nextLcpLoc;
+ } while (lcpLocptr.i != RNIL);
+ /* ------------------------------------------------------------------------
+ * SET STATE ON FRAGMENT TO BLOCKED TO ENSURE THAT NO MORE OPERATIONS ARE
+ * STARTED FROM LQH IN TUP AND ACC UNTIL THE START CHECKPOINT HAS BEEN
+ * COMPLETED. ALSO SET THE LOCAL CHECKPOINT STATE TO WAIT FOR
+ * LCP_HOLDOPCONF
+ * ----------------------------------------------------------------------- */
+ fragptr.p->fragStatus = Fragrecord::BLOCKED;
+ fragptr.p->fragActiveStatus = ZTRUE;
+ lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_HOLDOPS;
+ return;
+}//Dblqh::execTUP_PREPLCPCONF()
+
+void Dblqh::execTUP_PREPLCPREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execTUP_PREPLCPREF()
+
+void Dblqh::execLCP_FRAGIDREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execLCP_FRAGIDREF()
+
+/* --------------------------------------------------------------------------
+ * A NUMBER OF OPERATIONS THAT HAVE BEEN SET ON HOLD IN ACC. MOVE THOSE TO
+ * LIST OF BLOCKED ACC OPERATIONS. IF MORE OPERATIONS ARE BLOCKED GET THOSE
+ * OTHERWISE CONTINUE THE LOCAL CHECKPOINT BY REQUESTING TUP AND ACC TO
+ * WRITE THEIR START CHECKPOINT.
+ * --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = WAIT_LCPHOLDOP
+ * ------------------------------------------------------------------------- */
+/* ***************>> */
+/* LCP_HOLDOPCONF > */
+/* ***************>> */
+void Dblqh::execLCP_HOLDOPCONF(Signal* signal)
+{
+ UintR tnoHoldops;
+ Uint32 Tdata[23];
+ Uint32 Tlength;
+
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ Tlength = signal->theData[1];
+ for (Uint32 i = 0; i < 23; i++)
+ Tdata[i] = signal->theData[i + 2];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::WAIT_LCPHOLDOP);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS
+ * REFERENCE WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
+ * ----------------------------------------------------------------------- */
+ tnoHoldops = Tlength & 65535;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ ndbrequire(tnoHoldops <= 23);
+ for (Uint32 Tindex = 0; Tindex < tnoHoldops; Tindex++) {
+ jam();
+ tcConnectptr.i = Tdata[Tindex];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ moveActiveToAcc(signal);
+ }//for
+ if ((Tlength >> 16) == 1) {
+ jam();
+ /* MORE HOLDOPS NEEDED */
+ signal->theData[0] = lcpPtr.p->lcpAccptr;
+ signal->theData[1] = lcpLocptr.p->locFragid;
+ signal->theData[2] = 1;
+ signal->theData[3] = lcpLocptr.i;
+ sendSignal(fragptr.p->accBlockref, GSN_LCP_HOLDOPREQ, signal, 4, JBA);
+ return;
+ } else {
+ jam();
+
+ /* NO MORE HOLDOPS NEEDED */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::HOLDOP_READY;
+ checkLcpHoldop(signal);
+
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_WAIT_ACTIVE_FINISH) {
+ if (fragptr.p->activeList == RNIL) {
+ jam();
+ /* ------------------------------------------------------------------
+ * THERE ARE NO MORE ACTIVE OPERATIONS. IT IS NOW OK TO START THE
+ * LOCAL CHECKPOINT IN BOTH TUP AND ACC.
+ * ----------------------------------------------------------------- */
+ sendStartLcp(signal);
+ lcpPtr.p->lcpState = LcpRecord::LCP_START_CHKP;
+ } else {
+ jam();
+ // Set this to signal releaseActiveFrag
+ // that it should check to see if itäs time to call sendStartLcp
+ fragptr.p->lcpRef = lcpPtr.i;
+ }//if
+ }//if
+ }//if
+
+ /* ----------------------- */
+ /* ELSE */
+ /* ------------------------------------------------------------------------
+ * THERE ARE STILL MORE ACTIVE OPERATIONS. WAIT UNTIL THEY ARE FINSIHED.
+ * THIS IS DISCOVERED WHEN RELEASE_ACTIVE_FRAG IS EXECUTED.
+ * ------------------------------------------------------------------------
+ * DO NOTHING, EXIT IS EXECUTED BELOW
+ * ----------------------------------------------------------------------- */
+ return;
+}//Dblqh::execLCP_HOLDOPCONF()
+
+/* ***************> */
+/* LCP_HOLDOPREF > */
+/* ***************> */
+void Dblqh::execLCP_HOLDOPREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execLCP_HOLDOPREF()
+
+/* ************************************************************************>>
+ * ACC_LCPSTARTED: Confirm that ACC started local checkpoint and undo
+ * logging is on.
+ * ************************************************************************>>
+ * --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = ACC_WAIT_STARTED
+ * ------------------------------------------------------------------------- */
+void Dblqh::execACC_LCPSTARTED(Signal* signal)
+{
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS
+ * REFERENCE WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
+ * ----------------------------------------------------------------------- */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::ACC_STARTED;
+ lcpStartedLab(signal);
+ return;
+}//Dblqh::execACC_LCPSTARTED()
+
+/* ******************************************> */
+/* TUP_LCPSTARTED: Same as above but for TUP. */
+/* ******************************************> */
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = TUP_WAIT_STARTED
+ * ------------------------------------------------------------------------- */
+void Dblqh::execTUP_LCPSTARTED(Signal* signal)
+{
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_WAIT_STARTED);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS REFERENCE
+ * WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
+ * ----------------------------------------------------------------------- */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::TUP_STARTED;
+ lcpStartedLab(signal);
+ return;
+}//Dblqh::execTUP_LCPSTARTED()
+
+void Dblqh::lcpStartedLab(Signal* signal)
+{
+ if (checkLcpStarted(signal))
+ {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THE LOCAL CHECKPOINT HAS BEEN STARTED. IT IS NOW TIME TO
+ * RESTART THE TRANSACTIONS WHICH HAVE BEEN BLOCKED.
+ * --------------------------------------------------------------------- */
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ /* ----------------------------------------------------------------------
+ * UPDATE THE MAX_GCI_IN_LCP AND MAX_GCI_COMPLETED_IN_LCP NOW BEFORE
+ * ACTIVATING THE FRAGMENT AGAIN.
+ * --------------------------------------------------------------------- */
+ ndbrequire(lcpPtr.p->currentFragment.lcpFragOrd.lcpNo < MAX_LCP_STORED);
+ fragptr.p->maxGciInLcp = fragptr.p->newestGci;
+ fragptr.p->maxGciCompletedInLcp = cnewestCompletedGci;
+ sendAccContOp(signal); /* START OPERATIONS IN ACC */
+ moveAccActiveFrag(signal); /* MOVE FROM ACC BLOCKED LIST TO ACTIVE LIST
+ ON FRAGMENT */
+ }
+ /*---------------*/
+ /* ELSE */
+ /*-------------------------------------------------------------------------*/
+ /* THE LOCAL CHECKPOINT HAS NOT BEEN STARTED. EXIT AND WAIT FOR
+ * MORE SIGNALS */
+ /*-------------------------------------------------------------------------*/
+ /* DO NOTHING, EXIT IS EXECUTED BELOW */
+ /*-------------------------------------------------------------------------*/
+ return;
+}//Dblqh::lcpStartedLab()
+
+/*---------------------------------------------------------------------------
+ * ACC HAVE RESTARTED THE BLOCKED OPERATIONS AGAIN IN ONE FRAGMENT PART.
+ * IT IS NOW OUR TURN TO RESTART ALL OPERATIONS QUEUED IN LQH IF ALL
+ * FRAGMENT PARTS ARE COMPLETED.
+ *-------------------------------------------------------------------------- */
+void Dblqh::execACC_CONTOPCONF(Signal* signal)
+{
+ if(ERROR_INSERTED(5035) && signal->getSendersBlockRef() != reference()){
+ sendSignalWithDelay(reference(), GSN_ACC_CONTOPCONF, signal, 1000,
+ signal->length());
+ return;
+ }
+
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ lcpLocptr.p->accContCounter = 1;
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ lcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ do {
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (lcpLocptr.p->accContCounter == 0) {
+ jam();
+ return;
+ }//if
+ lcpLocptr.i = lcpLocptr.p->nextLcpLoc;
+ } while (lcpLocptr.i != RNIL);
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ restartOperationsLab(signal);
+ return;
+}//Dblqh::execACC_CONTOPCONF()
+
+/* ********************************************************* */
+/* LQH_RESTART_OP: Restart operations after beeing blocked. */
+/* ********************************************************* */
+/*---------------------------------------------------------------------------*/
+/* PRECONDITION: FRAG_STATUS = BLOCKED AND LCP_STATE = STARTED */
+/*---------------------------------------------------------------------------*/
+void Dblqh::execLQH_RESTART_OP(Signal* signal)
+{
+ jamEntry();
+ fragptr.i = signal->theData[0];
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+
+ lcpPtr.i = signal->theData[1];
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ ndbrequire(fragptr.p->fragStatus == Fragrecord::BLOCKED);
+ restartOperationsLab(signal);
+}//Dblqh::execLQH_RESTART_OP()
+
+void Dblqh::restartOperationsLab(Signal* signal)
+{
+ Uint32 loopCount = 0;
+ tcConnectptr.i = fragptr.p->firstWaitQueue;
+ do {
+ if (tcConnectptr.i != RNIL) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* START UP THE TRANSACTION AGAIN. WE START IT AS A SEPARATE SIGNAL. */
+/*---------------------------------------------------------------------------*/
+ signal->theData[0] = ZRESTART_OPERATIONS_AFTER_STOP;
+ signal->theData[1] = tcConnectptr.i;
+ signal->theData[2] = fragptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ tcConnectptr.i = tcConnectptr.p->nextTc;
+ } else {
+ jam();
+/*--------------------------------------------------------------------------*/
+/* NO MORE OPERATIONS TO RESTART. WE CAN NOW RESET THE STATE TO ACTIVE AND */
+/* RESTART NORMAL ACTIVITIES ON THE FRAGMENT WHILE THE FUZZY PART OF THE */
+/* LOCAL CHECKPOINT IS COMPLETING. */
+/* IF THE CHECKPOINT WAS COMPLETED ALREADY ON THIS FRAGMENT WE PROCEED WITH */
+/* THE NEXT FRAGMENT NOW THAT WE HAVE COMPLETED THIS CHECKPOINT. */
+/*--------------------------------------------------------------------------*/
+ fragptr.p->fragStatus = Fragrecord::FSACTIVE;
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_BLOCKED_COMP) {
+ jam();
+ contChkpNextFragLab(signal);
+ return;
+ }//if
+ return;
+ }//if
+ loopCount++;
+ if (loopCount > 16) {
+ jam();
+ signal->theData[0] = fragptr.i;
+ signal->theData[1] = lcpPtr.i;
+ sendSignal(cownref, GSN_LQH_RESTART_OP, signal, 2, JBB);
+ return;
+ }//if
+ } while (1);
+}//Dblqh::restartOperationsLab()
+
+void Dblqh::restartOperationsAfterStopLab(Signal* signal)
+{
+ /*-------------------------------------------------------------------------
+ * WHEN ARRIVING HERE THE OPERATION IS ALREADY SET IN THE ACTIVE LIST.
+ * THUS WE CAN IMMEDIATELY CALL THE METHODS THAT EXECUTE FROM WHERE
+ * THE OPERATION WAS STOPPED.
+ *------------------------------------------------------------------------ */
+ switch (tcConnectptr.p->transactionState) {
+ case TcConnectionrec::STOPPED:
+ jam();
+ /*-----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND ACCKEYREQ
+ *---------------------------------------------------------------------- */
+ prepareContinueAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::COMMIT_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND ACC_COMMITREQ
+ * --------------------------------------------------------------------- */
+ releaseActiveFrag(signal);
+ commitContinueAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::ABORT_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND ACC_ABORTREQ
+ * --------------------------------------------------------------------- */
+ abortContinueAfterBlockedLab(signal, true);
+ return;
+ break;
+ case TcConnectionrec::COPY_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING COPY FRAGMENT
+ * --------------------------------------------------------------------- */
+ continueCopyAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::COPY_FIRST_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING COPY FRAGMENT
+ * --------------------------------------------------------------------- */
+ continueFirstCopyAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::SCAN_FIRST_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN
+ * --------------------------------------------------------------------- */
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ continueFirstScanAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::SCAN_CHECK_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN
+ * --------------------------------------------------------------------- */
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ continueAfterCheckLcpStopBlocked(signal);
+ return;
+ break;
+ case TcConnectionrec::SCAN_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN
+ * --------------------------------------------------------------------- */
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ continueScanAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::SCAN_RELEASE_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING RELEASE
+ * LOCKS IN SCAN
+ * --------------------------------------------------------------------- */
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ continueScanReleaseAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::SCAN_CLOSE_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING CLOSE OF SCAN
+ * --------------------------------------------------------------------- */
+ continueCloseScanAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::COPY_CLOSE_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING CLOSE OF COPY
+ * --------------------------------------------------------------------- */
+ continueCloseCopyAfterBlockedLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::restartOperationsAfterStopLab()
+
+/* *************** */
+/* ACC_LCPCONF > */
+/* *************** */
+/*---------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = ACC_STARTED
+ *-------------------------------------------------------------------------- */
+void Dblqh::execACC_LCPCONF(Signal* signal)
+{
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_STARTED);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN
+ * THIS REFERENCE WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A
+ * SYSTEM RESTART.
+ * ----------------------------------------------------------------------- */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::ACC_COMPLETED;
+ lcpCompletedLab(signal);
+ return;
+}//Dblqh::execACC_LCPCONF()
+
+/* *************** */
+/* TUP_LCPCONF > */
+/* *************** */
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = TUP_STARTED
+ * ------------------------------------------------------------------------- */
+void Dblqh::execTUP_LCPCONF(Signal* signal)
+{
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_STARTED);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS
+ * REFERENCE WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
+ * ----------------------------------------------------------------------- */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::TUP_COMPLETED;
+ lcpCompletedLab(signal);
+ return;
+}//Dblqh::execTUP_LCPCONF()
+
+void Dblqh::lcpCompletedLab(Signal* signal)
+{
+ checkLcpCompleted(signal);
+ if (lcpPtr.p->lcpState != LcpRecord::LCP_COMPLETED) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THE LOCAL CHECKPOINT HAS NOT BEEN COMPLETED, EXIT & WAIT
+ * FOR MORE SIGNALS
+ * --------------------------------------------------------------------- */
+ return;
+ }//if
+ /* ------------------------------------------------------------------------
+ * THE LOCAL CHECKPOINT HAS BEEN COMPLETED. IT IS NOW TIME TO START
+ * A LOCAL CHECKPOINT ON THE NEXT FRAGMENT OR COMPLETE THIS LCP ROUND.
+ * ------------------------------------------------------------------------
+ * WE START BY SENDING LCP_REPORT TO DIH TO REPORT THE COMPLETED LCP.
+ * TO CATER FOR NODE CRASHES WE SEND IT IN PARALLEL TO ALL NODES.
+ * ----------------------------------------------------------------------- */
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->fragActiveStatus = ZFALSE;
+
+ contChkpNextFragLab(signal);
+ return;
+}//Dblqh::lcpCompletedLab()
+
+void
+Dblqh::sendLCP_FRAG_REP(Signal * signal,
+ const LcpRecord::FragOrd & fragOrd) const {
+
+ FragrecordPtr fragPtr;
+ fragPtr.i = fragOrd.fragPtrI;
+ ptrCheckGuard(fragPtr, cfragrecFileSize, fragrecord);
+
+ ndbrequire(fragOrd.lcpFragOrd.lcpNo < MAX_LCP_STORED);
+ LcpFragRep * const lcpReport = (LcpFragRep *)&signal->theData[0];
+ lcpReport->nodeId = cownNodeid;
+ lcpReport->lcpId = fragOrd.lcpFragOrd.lcpId;
+ lcpReport->lcpNo = fragOrd.lcpFragOrd.lcpNo;
+ lcpReport->tableId = fragOrd.lcpFragOrd.tableId;
+ lcpReport->fragId = fragOrd.lcpFragOrd.fragmentId;
+ lcpReport->maxGciCompleted = fragPtr.p->maxGciCompletedInLcp;
+ lcpReport->maxGciStarted = fragPtr.p->maxGciInLcp;
+
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ Uint32 nodeId = cnodeData[i];
+ if(cnodeStatus[i] == ZNODE_UP){
+ jam();
+ BlockReference Tblockref = calcDihBlockRef(nodeId);
+ sendSignal(Tblockref, GSN_LCP_FRAG_REP, signal,
+ LcpFragRep::SignalLength, JBB);
+ }//if
+ }//for
+}
+
+void Dblqh::contChkpNextFragLab(Signal* signal)
+{
+ /* ------------------------------------------------------------------------
+ * UPDATE THE LATEST LOCAL CHECKPOINT COMPLETED ON FRAGMENT.
+ * UPDATE THE LCP_ID OF THIS CHECKPOINT.
+ * REMOVE THE LINK BETWEEN THE FRAGMENT RECORD AND THE LCP RECORD.
+ * ----------------------------------------------------------------------- */
+ if (fragptr.p->fragStatus == Fragrecord::BLOCKED) {
+ jam();
+ /**
+ * LCP of fragment complete
+ * but restarting of operations isn't
+ */
+ lcpPtr.p->lcpState = LcpRecord::LCP_BLOCKED_COMP;
+ //restartOperationsLab(signal);
+ return;
+ }//if
+
+ /**
+ * Send rep when fragment is done + unblocked
+ */
+ sendLCP_FRAG_REP(signal, lcpPtr.p->currentFragment);
+
+ /* ------------------------------------------------------------------------
+ * WE ALSO RELEASE THE LOCAL LCP RECORDS.
+ * ----------------------------------------------------------------------- */
+ releaseLocalLcps(signal);
+ if (lcpPtr.p->lcpQueued) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * Transfer the state from the queued to the active LCP.
+ * --------------------------------------------------------------------- */
+ lcpPtr.p->lcpQueued = false;
+ lcpPtr.p->currentFragment = lcpPtr.p->queuedFragment;
+
+ /* ----------------------------------------------------------------------
+ * START THE QUEUED LOCAL CHECKPOINT.
+ * --------------------------------------------------------------------- */
+ sendLCP_FRAGIDREQ(signal);
+ return;
+ }//if
+
+ lcpPtr.p->lcpState = LcpRecord::LCP_IDLE;
+ if (lcpPtr.p->lastFragmentFlag){
+ jam();
+ /* ----------------------------------------------------------------------
+ * NOW THE COMPLETE LOCAL CHECKPOINT ROUND IS COMPLETED.
+ * --------------------------------------------------------------------- */
+ completeLcpRoundLab(signal);
+ return;
+ }//if
+
+ if (lcpPtr.p->reportEmpty) {
+ jam();
+ sendEMPTY_LCP_CONF(signal, false);
+ }//if
+ return;
+}//Dblqh::contChkpNextFragLab()
+
+void Dblqh::sendLCP_FRAGIDREQ(Signal* signal)
+{
+ ndbrequire(lcpPtr.p->firstLcpLocTup == RNIL);
+ ndbrequire(lcpPtr.p->firstLcpLocAcc == RNIL);
+
+ TablerecPtr tabPtr;
+ tabPtr.i = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ ptrAss(tabPtr, tablerec);
+ if(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING ||
+ tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
+ jam();
+ /**
+ * Fake that the fragment is done
+ */
+ lcpCompletedLab(signal);
+ return;
+ }
+
+ ndbrequire(tabPtr.p->tableStatus == Tablerec::TABLE_DEFINED);
+
+ lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_FRAGID;
+ signal->theData[0] = lcpPtr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
+ signal->theData[3] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId;
+ signal->theData[5] = lcpPtr.p->currentFragment.lcpFragOrd.lcpId % MAX_LCP_STORED;
+ sendSignal(fragptr.p->accBlockref, GSN_LCP_FRAGIDREQ, signal, 6, JBB);
+}//Dblqh::sendLCP_FRAGIDREQ()
+
+void Dblqh::sendEMPTY_LCP_CONF(Signal* signal, bool idle)
+{
+
+ EmptyLcpConf * const rep = (EmptyLcpConf*)&signal->theData[0];
+ /* ----------------------------------------------------------------------
+ * We have been requested to report when there are no more local
+ * waiting to be started or ongoing. In this signal we also report
+ * the last completed fragments state.
+ * ---------------------------------------------------------------------- */
+ rep->senderNodeId = getOwnNodeId();
+ if(!idle){
+ jam();
+ rep->idle = 0 ;
+ rep->tableId = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ rep->fragmentId = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId;
+ rep->lcpNo = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
+ rep->lcpId = lcpPtr.p->currentFragment.lcpFragOrd.lcpId;
+ } else {
+ jam();
+ rep->idle = 1;
+ rep->tableId = ~0;
+ rep->fragmentId = ~0;
+ rep->lcpNo = ~0;
+ rep->lcpId = c_lcpId;
+ }
+
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ Uint32 nodeId = cnodeData[i];
+ if (lcpPtr.p->m_EMPTY_LCP_REQ.get(nodeId)) {
+ jam();
+
+ BlockReference blockref = calcDihBlockRef(nodeId);
+ sendSignal(blockref, GSN_EMPTY_LCP_CONF, signal,
+ EmptyLcpConf::SignalLength, JBB);
+ }//if
+ }//for
+
+ lcpPtr.p->reportEmpty = false;
+ lcpPtr.p->m_EMPTY_LCP_REQ.clear();
+}//Dblqh::sendEMPTY_LCPCONF()
+
+void Dblqh::execACC_LCPREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execACC_LCPREF()
+
+void Dblqh::execTUP_LCPREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execTUP_LCPREF()
+
+/* --------------------------------------------------------------------------
+ * THE LOCAL CHECKPOINT ROUND IS NOW COMPLETED. SEND COMPLETED MESSAGE
+ * TO THE MASTER DIH.
+ * ------------------------------------------------------------------------- */
+void Dblqh::completeLcpRoundLab(Signal* signal)
+{
+ clcpCompletedState = LCP_CLOSE_STARTED;
+ signal->theData[0] = caccBlockref;
+ signal->theData[1] = cownref;
+ sendSignal(caccBlockref, GSN_END_LCPREQ, signal, 2, JBB);
+ signal->theData[0] = ctupBlockref;
+ signal->theData[1] = cownref;
+ sendSignal(ctupBlockref, GSN_END_LCPREQ, signal, 2, JBB);
+ return;
+}//Dblqh::completeLcpRoundLab()
+
+void Dblqh::execEND_LCPCONF(Signal* signal)
+{
+ jamEntry();
+ BlockReference userpointer = signal->theData[0];
+ if (userpointer == caccBlockref) {
+ if (clcpCompletedState == LCP_CLOSE_STARTED) {
+ jam();
+ clcpCompletedState = ACC_LCP_CLOSE_COMPLETED;
+ return;
+ } else {
+ jam();
+ ndbrequire(clcpCompletedState == TUP_LCP_CLOSE_COMPLETED);
+ clcpCompletedState = LCP_IDLE;
+ }//if
+ } else {
+ ndbrequire(userpointer == ctupBlockref);
+ if (clcpCompletedState == LCP_CLOSE_STARTED) {
+ jam();
+ clcpCompletedState = TUP_LCP_CLOSE_COMPLETED;
+ return;
+ } else {
+ jam();
+ ndbrequire(clcpCompletedState == ACC_LCP_CLOSE_COMPLETED);
+ clcpCompletedState = LCP_IDLE;
+ }//if
+ }//if
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+ sendLCP_COMPLETE_REP(signal, lcpPtr.p->currentFragment.lcpFragOrd.lcpId);
+}//Dblqh::execEND_LCPCONF()
+
+void Dblqh::sendLCP_COMPLETE_REP(Signal* signal, Uint32 lcpId)
+{
+ cnoOfFragsCheckpointed = 0;
+ ndbrequire((cnoOfNodes - 1) < (MAX_NDB_NODES - 1));
+ /* ------------------------------------------------------------------------
+ * WE SEND COMP_LCP_ROUND TO ALL NODES TO PREPARE FOR NODE CRASHES.
+ * ----------------------------------------------------------------------- */
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+ lcpPtr.p->lastFragmentFlag = false;
+
+ LcpCompleteRep* rep = (LcpCompleteRep*)signal->getDataPtrSend();
+ rep->nodeId = getOwnNodeId();
+ rep->lcpId = lcpId;
+ rep->blockNo = DBLQH;
+
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ Uint32 nodeId = cnodeData[i];
+ if(cnodeStatus[i] == ZNODE_UP){
+ jam();
+
+ BlockReference blockref = calcDihBlockRef(nodeId);
+ sendSignal(blockref, GSN_LCP_COMPLETE_REP, signal,
+ LcpCompleteRep::SignalLength, JBB);
+ }//if
+ }//for
+
+ if(lcpPtr.p->reportEmpty){
+ jam();
+ sendEMPTY_LCP_CONF(signal, true);
+ }
+ return;
+}//Dblqh::sendCOMP_LCP_ROUND()
+
+/* ==========================================================================
+ * ======= CHECK IF ALL PARTS OF A LOCAL CHECKPOINT ARE COMPLETED =======
+ *
+ * SUBROUTINE SHORT NAME = CLC
+ * ========================================================================= */
+void Dblqh::checkLcpCompleted(Signal* signal)
+{
+ LcpLocRecordPtr clcLcpLocptr;
+
+ clcLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ while (clcLcpLocptr.i != RNIL) {
+ ptrCheckGuard(clcLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (clcLcpLocptr.p->lcpLocstate != LcpLocRecord::ACC_COMPLETED) {
+ jam();
+ ndbrequire((clcLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED) ||
+ (clcLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_STARTED));
+ return;
+ }//if
+ clcLcpLocptr.i = clcLcpLocptr.p->nextLcpLoc;
+ }
+
+ clcLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
+ while (clcLcpLocptr.i != RNIL){
+ ptrCheckGuard(clcLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (clcLcpLocptr.p->lcpLocstate != LcpLocRecord::TUP_COMPLETED) {
+ jam();
+ ndbrequire((clcLcpLocptr.p->lcpLocstate==LcpLocRecord::TUP_WAIT_STARTED)
+ ||(clcLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_STARTED));
+ return;
+ }//if
+ clcLcpLocptr.i = clcLcpLocptr.p->nextLcpLoc;
+ }
+
+ lcpPtr.p->lcpState = LcpRecord::LCP_COMPLETED;
+}//Dblqh::checkLcpCompleted()
+
+/* ==========================================================================
+ * ======= CHECK IF ALL HOLD OPERATIONS ARE COMPLETED =======
+ *
+ * SUBROUTINE SHORT NAME = CHO
+ * ========================================================================= */
+void Dblqh::checkLcpHoldop(Signal* signal)
+{
+ LcpLocRecordPtr choLcpLocptr;
+
+ choLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ do {
+ ptrCheckGuard(choLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (choLcpLocptr.p->lcpLocstate != LcpLocRecord::HOLDOP_READY) {
+ ndbrequire(choLcpLocptr.p->lcpLocstate == LcpLocRecord::WAIT_LCPHOLDOP);
+ return;
+ }//if
+ choLcpLocptr.i = choLcpLocptr.p->nextLcpLoc;
+ } while (choLcpLocptr.i != RNIL);
+ lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_ACTIVE_FINISH;
+}//Dblqh::checkLcpHoldop()
+
+/* ==========================================================================
+ * ======= CHECK IF ALL PARTS OF A LOCAL CHECKPOINT ARE STARTED =======
+ *
+ * SUBROUTINE SHORT NAME = CLS
+ * ========================================================================== */
+bool
+Dblqh::checkLcpStarted(Signal* signal)
+{
+ LcpLocRecordPtr clsLcpLocptr;
+
+ terrorCode = ZOK;
+ clsLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ int i = 0;
+ do {
+ ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED){
+ return false;
+ }//if
+ clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc;
+ i++;
+ } while (clsLcpLocptr.i != RNIL);
+
+ i = 0;
+ clsLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
+ do {
+ ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_WAIT_STARTED){
+ return false;
+ }//if
+ clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc;
+ i++;
+ } while (clsLcpLocptr.i != RNIL);
+
+ return true;
+}//Dblqh::checkLcpStarted()
+
+/* ==========================================================================
+ * ======= CHECK IF ALL PREPARE TUP OPERATIONS ARE COMPLETED =======
+ *
+ * SUBROUTINE SHORT NAME = CLT
+ * ========================================================================== */
+void Dblqh::checkLcpTupprep(Signal* signal)
+{
+ LcpLocRecordPtr cltLcpLocptr;
+ cltLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
+ do {
+ ptrCheckGuard(cltLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (cltLcpLocptr.p->lcpLocstate != LcpLocRecord::IDLE) {
+ ndbrequire(cltLcpLocptr.p->lcpLocstate == LcpLocRecord::WAIT_TUP_PREPLCP);
+ return;
+ }//if
+ cltLcpLocptr.i = cltLcpLocptr.p->nextLcpLoc;
+ } while (cltLcpLocptr.i != RNIL);
+ lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_HOLDOPS;
+}//Dblqh::checkLcpTupprep()
+
+/* ==========================================================================
+ * ======= INITIATE LCP LOCAL RECORD USED TOWARDS ACC =======
+ *
+ * ========================================================================== */
+void Dblqh::initLcpLocAcc(Signal* signal, Uint32 fragId)
+{
+ lcpLocptr.p->nextLcpLoc = lcpPtr.p->firstLcpLocAcc;
+ lcpPtr.p->firstLcpLocAcc = lcpLocptr.i;
+ lcpLocptr.p->locFragid = fragId;
+ lcpLocptr.p->waitingBlock = LcpLocRecord::ACC;
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::IDLE;
+ lcpLocptr.p->masterLcpRec = lcpPtr.i;
+ lcpLocptr.p->tupRef = RNIL;
+}//Dblqh::initLcpLocAcc()
+
+/* ==========================================================================
+ * ======= INITIATE LCP LOCAL RECORD USED TOWARDS TUP =======
+ *
+ * ========================================================================== */
+void Dblqh::initLcpLocTup(Signal* signal, Uint32 fragId)
+{
+ lcpLocptr.p->nextLcpLoc = lcpPtr.p->firstLcpLocTup;
+ lcpPtr.p->firstLcpLocTup = lcpLocptr.i;
+ lcpLocptr.p->locFragid = fragId;
+ lcpLocptr.p->waitingBlock = LcpLocRecord::TUP;
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::WAIT_TUP_PREPLCP;
+ lcpLocptr.p->masterLcpRec = lcpPtr.i;
+ lcpLocptr.p->tupRef = RNIL;
+}//Dblqh::initLcpLocTup()
+
+/* --------------------------------------------------------------------------
+ * ------- MOVE OPERATION FROM ACC WAITING LIST ON FRAGMENT -------
+ * ------- TO ACTIVE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME = MAA
+ * -------------------------------------------------------------------------- */
+void Dblqh::moveAccActiveFrag(Signal* signal)
+{
+ UintR maaTcNextConnectptr;
+
+ tcConnectptr.i = fragptr.p->accBlockedList;
+ fragptr.p->accBlockedList = RNIL;
+ /* ------------------------------------------------------------------------
+ * WE WILL MOVE ALL RECORDS FROM THE ACC BLOCKED LIST AT ONCE.
+ * ------------------------------------------------------------------------ */
+ while (tcConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ maaTcNextConnectptr = tcConnectptr.p->nextTc;
+ ndbrequire(tcConnectptr.p->listState == TcConnectionrec::ACC_BLOCK_LIST);
+ tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
+ linkActiveFrag(signal);
+ tcConnectptr.i = maaTcNextConnectptr;
+ }//while
+}//Dblqh::moveAccActiveFrag()
+
+/* --------------------------------------------------------------------------
+ * ------- MOVE OPERATION FROM ACTIVE LIST ON FRAGMENT -------
+ * ------- TO ACC BLOCKED LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME = MAT
+ * -------------------------------------------------------------------------- */
+void Dblqh::moveActiveToAcc(Signal* signal)
+{
+ TcConnectionrecPtr matTcNextConnectptr;
+
+ releaseActiveList(signal);
+ /* ------------------------------------------------------------------------
+ * PUT OPERATION RECORD FIRST IN ACC BLOCKED LIST.
+ * ------------------------------------------------------------------------ */
+ matTcNextConnectptr.i = fragptr.p->accBlockedList;
+ tcConnectptr.p->nextTc = matTcNextConnectptr.i;
+ tcConnectptr.p->prevTc = RNIL;
+ tcConnectptr.p->listState = TcConnectionrec::ACC_BLOCK_LIST;
+ fragptr.p->accBlockedList = tcConnectptr.i;
+ if (matTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(matTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ matTcNextConnectptr.p->prevTc = tcConnectptr.i;
+ }//if
+}//Dblqh::moveActiveToAcc()
+
+/* ------------------------------------------------------------------------- */
+/* ---- RELEASE LOCAL LCP RECORDS AFTER COMPLETION OF A LOCAL CHECKPOINT---- */
+/* */
+/* SUBROUTINE SHORT NAME = RLL */
+/* ------------------------------------------------------------------------- */
+void Dblqh::releaseLocalLcps(Signal* signal)
+{
+ lcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ while (lcpLocptr.i != RNIL){
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ Uint32 tmp = lcpLocptr.p->nextLcpLoc;
+ releaseLcpLoc(signal);
+ lcpLocptr.i = tmp;
+ }
+ lcpPtr.p->firstLcpLocAcc = RNIL;
+
+ lcpLocptr.i = lcpPtr.p->firstLcpLocTup;
+ while (lcpLocptr.i != RNIL){
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ Uint32 tmp = lcpLocptr.p->nextLcpLoc;
+ releaseLcpLoc(signal);
+ lcpLocptr.i = tmp;
+ }
+ lcpPtr.p->firstLcpLocTup = RNIL;
+
+}//Dblqh::releaseLocalLcps()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEIZE LCP LOCAL RECORD ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::seizeLcpLoc(Signal* signal)
+{
+ lcpLocptr.i = cfirstfreeLcpLoc;
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ cfirstfreeLcpLoc = lcpLocptr.p->nextLcpLoc;
+ lcpLocptr.p->nextLcpLoc = RNIL;
+}//Dblqh::seizeLcpLoc()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEND ACC_CONT_OP ------- */
+/* */
+/* INPUT: LCP_PTR LOCAL CHECKPOINT RECORD */
+/* FRAGPTR FRAGMENT RECORD */
+/* */
+/* SUBROUTINE SHORT NAME = SAC */
+/* ------------------------------------------------------------------------- */
+void Dblqh::sendAccContOp(Signal* signal)
+{
+ LcpLocRecordPtr sacLcpLocptr;
+
+ int count = 0;
+ sacLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ do {
+ ptrCheckGuard(sacLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ sacLcpLocptr.p->accContCounter = 0;
+ /* ------------------------------------------------------------------- */
+ /*SEND START OPERATIONS TO ACC AGAIN */
+ /* ------------------------------------------------------------------- */
+ signal->theData[0] = lcpPtr.p->lcpAccptr;
+ signal->theData[1] = sacLcpLocptr.p->locFragid;
+ sendSignal(fragptr.p->accBlockref, GSN_ACC_CONTOPREQ, signal, 2, JBA);
+ sacLcpLocptr.i = sacLcpLocptr.p->nextLcpLoc;
+ } while (sacLcpLocptr.i != RNIL);
+
+}//Dblqh::sendAccContOp()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEND ACC_LCPREQ AND TUP_LCPREQ ------- */
+/* */
+/* INPUT: LCP_PTR LOCAL CHECKPOINT RECORD */
+/* FRAGPTR FRAGMENT RECORD */
+/* SUBROUTINE SHORT NAME = STL */
+/* ------------------------------------------------------------------------- */
+void Dblqh::sendStartLcp(Signal* signal)
+{
+ LcpLocRecordPtr stlLcpLocptr;
+ stlLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ do {
+ jam();
+ ptrCheckGuard(stlLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ stlLcpLocptr.p->lcpLocstate = LcpLocRecord::ACC_WAIT_STARTED;
+ signal->theData[0] = lcpPtr.p->lcpAccptr;
+ signal->theData[1] = stlLcpLocptr.i;
+ signal->theData[2] = stlLcpLocptr.p->locFragid;
+ sendSignal(fragptr.p->accBlockref, GSN_ACC_LCPREQ, signal, 3, JBA);
+ stlLcpLocptr.i = stlLcpLocptr.p->nextLcpLoc;
+ } while (stlLcpLocptr.i != RNIL);
+
+ stlLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
+ do {
+ jam();
+ ptrCheckGuard(stlLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ stlLcpLocptr.p->lcpLocstate = LcpLocRecord::TUP_WAIT_STARTED;
+ signal->theData[0] = stlLcpLocptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = stlLcpLocptr.p->tupRef;
+ if(ERROR_INSERTED(5077))
+ sendSignalWithDelay(fragptr.p->tupBlockref, GSN_TUP_LCPREQ,
+ signal, 5000, 3);
+ else
+ sendSignal(fragptr.p->tupBlockref, GSN_TUP_LCPREQ, signal, 3, JBA);
+ stlLcpLocptr.i = stlLcpLocptr.p->nextLcpLoc;
+ } while (stlLcpLocptr.i != RNIL);
+
+ if(ERROR_INSERTED(5077))
+ {
+ ndbout_c("Delayed TUP_LCPREQ with 5 sec");
+ }
+}//Dblqh::sendStartLcp()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SET THE LOG TAIL IN THE LOG FILES ------- */
+/* */
+/*THIS SUBROUTINE HAVE BEEN BUGGY AND IS RATHER COMPLEX. IT IS IMPORTANT TO */
+/*REMEMBER THAT WE SEARCH FROM THE TAIL UNTIL WE REACH THE HEAD (CURRENT). */
+/*THE TAIL AND HEAD CAN BE ON THE SAME MBYTE. WE SEARCH UNTIL WE FIND A MBYTE*/
+/*THAT WE NEED TO KEEP. WE THEN SET THE TAIL TO BE THE PREVIOUS. IF WE DO */
+/*NOT FIND A MBYTE THAT WE NEED TO KEEP UNTIL WE REACH THE HEAD THEN WE USE */
+/*THE HEAD AS TAIL. FINALLY WE HAVE TO MOVE BACK THE TAIL TO ALSO INCLUDE */
+/*ALL PREPARE RECORDS. THIS MEANS THAT LONG-LIVED TRANSACTIONS ARE DANGEROUS */
+/*FOR SHORT LOGS. */
+/* ------------------------------------------------------------------------- */
+
+// this function has not been verified yet
+Uint32 Dblqh::remainingLogSize(const LogFileRecordPtr &sltCurrLogFilePtr,
+ const LogPartRecordPtr &sltLogPartPtr)
+{
+ Uint32 hf = sltCurrLogFilePtr.p->fileNo*ZNO_MBYTES_IN_FILE+sltCurrLogFilePtr.p->currentMbyte;
+ Uint32 tf = sltLogPartPtr.p->logTailFileNo*ZNO_MBYTES_IN_FILE+sltLogPartPtr.p->logTailMbyte;
+ Uint32 sz = sltLogPartPtr.p->noLogFiles*ZNO_MBYTES_IN_FILE;
+ if (tf > hf) hf += sz;
+ return sz-(hf-tf);
+}
+
+void Dblqh::setLogTail(Signal* signal, Uint32 keepGci)
+{
+ LogPartRecordPtr sltLogPartPtr;
+ LogFileRecordPtr sltLogFilePtr;
+#if 0
+ LogFileRecordPtr sltCurrLogFilePtr;
+#endif
+ UintR tsltMbyte;
+ UintR tsltStartMbyte;
+ UintR tsltIndex;
+ UintR tsltFlag;
+
+ for (sltLogPartPtr.i = 0; sltLogPartPtr.i < 4; sltLogPartPtr.i++) {
+ jam();
+ ptrAss(sltLogPartPtr, logPartRecord);
+ findLogfile(signal, sltLogPartPtr.p->logTailFileNo,
+ sltLogPartPtr, &sltLogFilePtr);
+
+#if 0
+ sltCurrLogFilePtr.i = sltLogPartPtr.p->currentLogfile;
+ ptrCheckGuard(sltCurrLogFilePtr, clogFileFileSize, logFileRecord);
+ infoEvent("setLogTail: Available log file %d size = %d[mbytes]+%d[words]", sltLogPartPtr.i,
+ remainingLogSize(sltCurrLogFilePtr, sltLogPartPtr), sltCurrLogFilePtr.p->remainingWordsInMbyte);
+#endif
+
+ tsltMbyte = sltLogPartPtr.p->logTailMbyte;
+ tsltStartMbyte = tsltMbyte;
+ tsltFlag = ZFALSE;
+ if (sltLogFilePtr.i == sltLogPartPtr.p->currentLogfile) {
+/* ------------------------------------------------------------------------- */
+/*THE LOG AND THE TAIL IS ALREADY IN THE SAME FILE. */
+/* ------------------------------------------------------------------------- */
+ if (sltLogFilePtr.p->currentMbyte >= sltLogPartPtr.p->logTailMbyte) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*THE CURRENT MBYTE IS AHEAD OF OR AT THE TAIL. THUS WE WILL ONLY LOOK FOR */
+/*THE TAIL UNTIL WE REACH THE CURRENT MBYTE WHICH IS IN THIS LOG FILE. */
+/*IF THE LOG TAIL IS AHEAD OF THE CURRENT MBYTE BUT IN THE SAME LOG FILE */
+/*THEN WE HAVE TO SEARCH THROUGH ALL FILES BEFORE WE COME TO THE CURRENT */
+/*MBYTE. WE ALWAYS STOP WHEN WE COME TO THE CURRENT MBYTE SINCE THE TAIL */
+/*CAN NEVER BE BEFORE THE HEAD. */
+/* ------------------------------------------------------------------------- */
+ tsltFlag = ZTRUE;
+ }//if
+ }//if
+
+/* ------------------------------------------------------------------------- */
+/*NOW START SEARCHING FOR THE NEW TAIL, STARTING AT THE CURRENT TAIL AND */
+/*PROCEEDING UNTIL WE FIND A MBYTE WHICH IS NEEDED TO KEEP OR UNTIL WE REACH */
+/*CURRENT MBYTE (THE HEAD). */
+/* ------------------------------------------------------------------------- */
+ SLT_LOOP:
+ for (tsltIndex = tsltStartMbyte;
+ tsltIndex <= ZNO_MBYTES_IN_FILE - 1;
+ tsltIndex++) {
+ if (sltLogFilePtr.p->logMaxGciStarted[tsltIndex] >= keepGci) {
+/* ------------------------------------------------------------------------- */
+/*WE ARE NOT ALLOWED TO STEP THE LOG ANY FURTHER AHEAD */
+/*SET THE NEW LOG TAIL AND CONTINUE WITH NEXT LOG PART. */
+/*THIS MBYTE IS NOT TO BE INCLUDED SO WE NEED TO STEP BACK ONE MBYTE. */
+/* ------------------------------------------------------------------------- */
+ if (tsltIndex != 0) {
+ jam();
+ tsltMbyte = tsltIndex - 1;
+ } else {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*STEPPING BACK INCLUDES ALSO STEPPING BACK TO THE PREVIOUS LOG FILE. */
+/* ------------------------------------------------------------------------- */
+ tsltMbyte = ZNO_MBYTES_IN_FILE - 1;
+ sltLogFilePtr.i = sltLogFilePtr.p->prevLogFile;
+ ptrCheckGuard(sltLogFilePtr, clogFileFileSize, logFileRecord);
+ }//if
+ goto SLT_BREAK;
+ } else {
+ jam();
+ if (tsltFlag == ZTRUE) {
+/* ------------------------------------------------------------------------- */
+/*WE ARE IN THE SAME FILE AS THE CURRENT MBYTE AND WE CAN REACH THE CURRENT */
+/*MBYTE BEFORE WE REACH A NEW TAIL. */
+/* ------------------------------------------------------------------------- */
+ if (tsltIndex == sltLogFilePtr.p->currentMbyte) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*THE TAIL OF THE LOG IS ACTUALLY WITHIN THE CURRENT MBYTE. THUS WE SET THE */
+/*LOG TAIL TO BE THE CURRENT MBYTE. */
+/* ------------------------------------------------------------------------- */
+ tsltMbyte = sltLogFilePtr.p->currentMbyte;
+ goto SLT_BREAK;
+ }//if
+ }//if
+ }//if
+ }//for
+ sltLogFilePtr.i = sltLogFilePtr.p->nextLogFile;
+ ptrCheckGuard(sltLogFilePtr, clogFileFileSize, logFileRecord);
+ if (sltLogFilePtr.i == sltLogPartPtr.p->currentLogfile) {
+ jam();
+ tsltFlag = ZTRUE;
+ }//if
+ tsltStartMbyte = 0;
+ goto SLT_LOOP;
+ SLT_BREAK:
+ jam();
+ {
+ UintR ToldTailFileNo = sltLogPartPtr.p->logTailFileNo;
+ UintR ToldTailMByte = sltLogPartPtr.p->logTailMbyte;
+
+ arrGuard(tsltMbyte, 16);
+ sltLogPartPtr.p->logTailFileNo =
+ sltLogFilePtr.p->logLastPrepRef[tsltMbyte] >> 16;
+/* ------------------------------------------------------------------------- */
+/*SINCE LOG_MAX_GCI_STARTED ONLY KEEP TRACK OF COMMIT LOG RECORDS WE ALSO */
+/*HAVE TO STEP BACK THE TAIL SO THAT WE INCLUDE ALL PREPARE RECORDS */
+/*NEEDED FOR THOSE COMMIT RECORDS IN THIS MBYTE. THIS IS A RATHER */
+/*CONSERVATIVE APPROACH BUT IT WORKS. */
+/* ------------------------------------------------------------------------- */
+ sltLogPartPtr.p->logTailMbyte =
+ sltLogFilePtr.p->logLastPrepRef[tsltMbyte] & 65535;
+ if ((ToldTailFileNo != sltLogPartPtr.p->logTailFileNo) ||
+ (ToldTailMByte != sltLogPartPtr.p->logTailMbyte)) {
+ jam();
+ if (sltLogPartPtr.p->logPartState == LogPartRecord::TAIL_PROBLEM) {
+ if (sltLogPartPtr.p->firstLogQueue == RNIL) {
+ jam();
+ sltLogPartPtr.p->logPartState = LogPartRecord::IDLE;
+ } else {
+ jam();
+ sltLogPartPtr.p->logPartState = LogPartRecord::ACTIVE;
+ }//if
+ }//if
+ }//if
+ }
+#if 0
+ infoEvent("setLogTail: Available log file %d size = %d[mbytes]+%d[words]", sltLogPartPtr.i,
+ remainingLogSize(sltCurrLogFilePtr, sltLogPartPtr), sltCurrLogFilePtr.p->remainingWordsInMbyte);
+#endif
+ }//for
+
+}//Dblqh::setLogTail()
+
+/* ######################################################################### */
+/* ####### GLOBAL CHECKPOINT MODULE ####### */
+/* */
+/* ######################################################################### */
+/*---------------------------------------------------------------------------*/
+/* THIS MODULE HELPS DIH IN DISCOVERING WHEN GLOBAL CHECKPOINTS ARE */
+/* RECOVERABLE. IT HANDLES THE REQUEST GCP_SAVEREQ THAT REQUESTS LQH TO */
+/* SAVE A PARTICULAR GLOBAL CHECKPOINT TO DISK AND RESPOND WHEN COMPLETED. */
+/*---------------------------------------------------------------------------*/
+/* *************** */
+/* GCP_SAVEREQ > */
+/* *************** */
+void Dblqh::execGCP_SAVEREQ(Signal* signal)
+{
+ jamEntry();
+ const GCPSaveReq * const saveReq = (GCPSaveReq *)&signal->theData[0];
+
+ if (ERROR_INSERTED(5000)) {
+ systemErrorLab(signal);
+ }
+
+ if (ERROR_INSERTED(5007)){
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_GCP_SAVEREQ, signal, 10000,
+ signal->length());
+ return;
+ }
+
+ const Uint32 dihBlockRef = saveReq->dihBlockRef;
+ const Uint32 dihPtr = saveReq->dihPtr;
+ const Uint32 gci = saveReq->gci;
+
+ ndbrequire(gci >= cnewestCompletedGci);
+
+ if (gci == cnewestCompletedGci) {
+/*---------------------------------------------------------------------------*/
+/* GLOBAL CHECKPOINT HAVE ALREADY BEEN HANDLED. REQUEST MUST HAVE BEEN SENT */
+/* FROM NEW MASTER DIH. */
+/*---------------------------------------------------------------------------*/
+ if (ccurrentGcprec == RNIL) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS INDICATES THAT WE HAVE ALREADY SENT GCP_SAVECONF TO PREVIOUS MASTER. */
+/* WE SIMPLY SEND IT ALSO TO THE NEW MASTER. */
+/*---------------------------------------------------------------------------*/
+ GCPSaveConf * const saveConf = (GCPSaveConf*)&signal->theData[0];
+ saveConf->dihPtr = dihPtr;
+ saveConf->nodeId = getOwnNodeId();
+ saveConf->gci = cnewestCompletedGci;
+ sendSignal(dihBlockRef, GSN_GCP_SAVECONF, signal,
+ GCPSaveConf::SignalLength, JBA);
+ return;
+ }
+ jam();
+/*---------------------------------------------------------------------------*/
+/* WE HAVE NOT YET SENT THE RESPONSE TO THE OLD MASTER. WE WILL SET THE NEW */
+/* RECEIVER OF THE RESPONSE AND THEN EXIT SINCE THE PROCESS IS ALREADY */
+/* STARTED. */
+/*---------------------------------------------------------------------------*/
+ gcpPtr.i = ccurrentGcprec;
+ ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
+ gcpPtr.p->gcpUserptr = dihPtr;
+ gcpPtr.p->gcpBlockref = dihBlockRef;
+ return;
+ }//if
+
+ ndbrequire(ccurrentGcprec == RNIL);
+
+
+ if(getNodeState().startLevel >= NodeState::SL_STOPPING_4){
+ GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
+ saveRef->dihPtr = dihPtr;
+ saveRef->nodeId = getOwnNodeId();
+ saveRef->gci = gci;
+ saveRef->errorCode = GCPSaveRef::NodeShutdownInProgress;
+ sendSignal(dihBlockRef, GSN_GCP_SAVEREF, signal,
+ GCPSaveRef::SignalLength, JBB);
+ return;
+ }
+
+ if(getNodeState().getNodeRestartInProgress()){
+ GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
+ saveRef->dihPtr = dihPtr;
+ saveRef->nodeId = getOwnNodeId();
+ saveRef->gci = gci;
+ saveRef->errorCode = GCPSaveRef::NodeRestartInProgress;
+ sendSignal(dihBlockRef, GSN_GCP_SAVEREF, signal,
+ GCPSaveRef::SignalLength, JBB);
+ return;
+ }
+
+ ccurrentGcprec = 0;
+ gcpPtr.i = ccurrentGcprec;
+ ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
+
+ cnewestCompletedGci = gci;
+ if (gci > cnewestGci) {
+ jam();
+ cnewestGci = gci;
+ }//if
+
+ gcpPtr.p->gcpBlockref = dihBlockRef;
+ gcpPtr.p->gcpUserptr = dihPtr;
+ gcpPtr.p->gcpId = gci;
+ bool tlogActive = false;
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ ptrAss(logPartPtr, logPartRecord);
+ if (logPartPtr.p->logPartState == LogPartRecord::ACTIVE) {
+ jam();
+ logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_TRUE;
+ tlogActive = true;
+ } else {
+ jam();
+ logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_FALSE;
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ writeCompletedGciLog(signal);
+ }//if
+ }//for
+ if (tlogActive == true) {
+ jam();
+ return;
+ }//if
+ initGcpRecLab(signal);
+ startTimeSupervision(signal);
+ return;
+}//Dblqh::execGCP_SAVEREQ()
+
+/* ------------------------------------------------------------------------- */
+/* START TIME SUPERVISION OF THE LOG PARTS. */
+/* ------------------------------------------------------------------------- */
+void Dblqh::startTimeSupervision(Signal* signal)
+{
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* WE HAVE TO START CHECKING IF THE LOG IS TO BE WRITTEN EVEN IF PAGES ARE */
+/* FULL. INITIALISE THE VALUES OF WHERE WE ARE IN THE LOG CURRENTLY. */
+/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+ logPartPtr.p->logPartTimer = 0;
+ logPartPtr.p->logTimer = 1;
+ signal->theData[0] = ZTIME_SUPERVISION;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//for
+}//Dblqh::startTimeSupervision()
+
+/*---------------------------------------------------------------------------*/
+/* WE SET THE GLOBAL CHECKPOINT VARIABLES AFTER WRITING THE COMPLETED GCI LOG*/
+/* RECORD. THIS ENSURES THAT WE WILL ENCOUNTER THE COMPLETED GCI RECORD WHEN */
+/* WE EXECUTE THE FRAGMENT LOG. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::initGcpRecLab(Signal* signal)
+{
+/* ======================================================================== */
+/* ======= INITIATE GCP RECORD ======= */
+/* */
+/* SUBROUTINE SHORT NAME = IGR */
+/* ======================================================================== */
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+/*--------------------------------------------------*/
+/* BY SETTING THE GCPREC = 0 WE START THE */
+/* CHECKING BY CHECK_GCP_COMPLETED. THIS */
+/* CHECKING MUST NOT BE STARTED UNTIL WE HAVE */
+/* INSERTED ALL COMPLETE GCI LOG RECORDS IN */
+/* ALL LOG PARTS. */
+/*--------------------------------------------------*/
+ logPartPtr.p->gcprec = 0;
+ gcpPtr.p->gcpLogPartState[logPartPtr.i] = ZWAIT_DISK;
+ gcpPtr.p->gcpSyncReady[logPartPtr.i] = ZFALSE;
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ gcpPtr.p->gcpFilePtr[logPartPtr.i] = logFilePtr.i;
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ if (logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] == ZPAGE_HEADER_SIZE) {
+ jam();
+/*--------------------------------------------------*/
+/* SINCE THE CURRENT FILEPAGE POINTS AT THE */
+/* NEXT WORD TO BE WRITTEN WE HAVE TO ADJUST */
+/* FOR THIS BY DECREASING THE FILE PAGE BY ONE*/
+/* IF NO WORD HAS BEEN WRITTEN ON THE CURRENT */
+/* FILEPAGE. */
+/*--------------------------------------------------*/
+ gcpPtr.p->gcpPageNo[logPartPtr.i] = logFilePtr.p->currentFilepage - 1;
+ gcpPtr.p->gcpWordNo[logPartPtr.i] = ZPAGE_SIZE - 1;
+ } else {
+ jam();
+ gcpPtr.p->gcpPageNo[logPartPtr.i] = logFilePtr.p->currentFilepage;
+ gcpPtr.p->gcpWordNo[logPartPtr.i] =
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] - 1;
+ }//if
+ }//for
+ return;
+}//Dblqh::initGcpRecLab()
+
+/* ========================================================================= */
+/* ==== CHECK IF ANY GLOBAL CHECKPOINTS ARE COMPLETED AFTER A COMPLETED===== */
+/* DISK WRITE. */
+/* */
+/* SUBROUTINE SHORT NAME = CGC */
+/* ========================================================================= */
+void Dblqh::checkGcpCompleted(Signal* signal,
+ Uint32 tcgcPageWritten,
+ Uint32 tcgcWordWritten)
+{
+ UintR tcgcFlag;
+ UintR tcgcJ;
+
+ gcpPtr.i = logPartPtr.p->gcprec;
+ if (gcpPtr.i != RNIL) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* IF THE GLOBAL CHECKPOINT IS NOT WAITING FOR COMPLETION THEN WE CAN QUIT */
+/* THE SEARCH IMMEDIATELY. */
+/* ------------------------------------------------------------------------- */
+ ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
+ if (gcpPtr.p->gcpFilePtr[logPartPtr.i] == logFilePtr.i) {
+/* ------------------------------------------------------------------------- */
+/* IF THE COMPLETED DISK OPERATION WAS ON ANOTHER FILE THAN THE ONE WE ARE */
+/* WAITING FOR, THEN WE CAN ALSO QUIT THE SEARCH IMMEDIATELY. */
+/* ------------------------------------------------------------------------- */
+ if (tcgcPageWritten < gcpPtr.p->gcpPageNo[logPartPtr.i]) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* THIS LOG PART HAVE NOT YET WRITTEN THE GLOBAL CHECKPOINT TO DISK. */
+/* ------------------------------------------------------------------------- */
+ return;
+ } else {
+ if (tcgcPageWritten == gcpPtr.p->gcpPageNo[logPartPtr.i]) {
+ if (tcgcWordWritten < gcpPtr.p->gcpWordNo[logPartPtr.i]) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* THIS LOG PART HAVE NOT YET WRITTEN THE GLOBAL CHECKPOINT TO DISK. */
+/* ------------------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//if
+/* ------------------------------------------------------------------------- */
+/* THIS LOG PART HAVE WRITTEN THE GLOBAL CHECKPOINT TO DISK. */
+/* ------------------------------------------------------------------------- */
+ logPartPtr.p->gcprec = RNIL;
+ gcpPtr.p->gcpLogPartState[logPartPtr.i] = ZON_DISK;
+ tcgcFlag = ZTRUE;
+ for (tcgcJ = 0; tcgcJ <= 3; tcgcJ++) {
+ jam();
+ if (gcpPtr.p->gcpLogPartState[tcgcJ] != ZON_DISK) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*ALL LOG PARTS HAVE NOT SAVED THIS GLOBAL CHECKPOINT TO DISK YET. WAIT FOR */
+/*THEM TO COMPLETE. */
+/* ------------------------------------------------------------------------- */
+ tcgcFlag = ZFALSE;
+ }//if
+ }//for
+ if (tcgcFlag == ZTRUE) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*WE HAVE FOUND A COMPLETED GLOBAL CHECKPOINT OPERATION. WE NOW NEED TO SEND */
+/*GCP_SAVECONF, REMOVE THE GCP RECORD FROM THE LIST OF WAITING GCP RECORDS */
+/*ON THIS LOG PART AND RELEASE THE GCP RECORD. */
+// After changing the log implementation we need to perform a FSSYNCREQ on all
+// log files where the last log word resided first before proceeding.
+/* ------------------------------------------------------------------------- */
+ UintR Ti;
+ for (Ti = 0; Ti < 4; Ti++) {
+ LogFileRecordPtr loopLogFilePtr;
+ loopLogFilePtr.i = gcpPtr.p->gcpFilePtr[Ti];
+ ptrCheckGuard(loopLogFilePtr, clogFileFileSize, logFileRecord);
+ if (loopLogFilePtr.p->logFileStatus == LogFileRecord::OPEN) {
+ jam();
+ signal->theData[0] = loopLogFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = gcpPtr.p->gcpFilePtr[Ti];
+ sendSignal(NDBFS_REF, GSN_FSSYNCREQ, signal, 3, JBA);
+ } else {
+ ndbrequire((loopLogFilePtr.p->logFileStatus ==
+ LogFileRecord::CLOSED) ||
+ (loopLogFilePtr.p->logFileStatus ==
+ LogFileRecord::CLOSING_WRITE_LOG) ||
+ (loopLogFilePtr.p->logFileStatus ==
+ LogFileRecord::OPENING_WRITE_LOG));
+ signal->theData[0] = loopLogFilePtr.i;
+ execFSSYNCCONF(signal);
+ }//if
+ }//for
+ return;
+ }//if
+ }//if
+ }//if
+}//Dblqh::checkGcpCompleted()
+
+void
+Dblqh::execFSSYNCCONF(Signal* signal)
+{
+ GcpRecordPtr localGcpPtr;
+ LogFileRecordPtr localLogFilePtr;
+ LogPartRecordPtr localLogPartPtr;
+ localLogFilePtr.i = signal->theData[0];
+ ptrCheckGuard(localLogFilePtr, clogFileFileSize, logFileRecord);
+ localLogPartPtr.i = localLogFilePtr.p->logPartRec;
+ localGcpPtr.i = ccurrentGcprec;
+ ptrCheckGuard(localGcpPtr, cgcprecFileSize, gcpRecord);
+ localGcpPtr.p->gcpSyncReady[localLogPartPtr.i] = ZTRUE;
+ UintR Ti;
+ for (Ti = 0; Ti < 4; Ti++) {
+ jam();
+ if (localGcpPtr.p->gcpSyncReady[Ti] == ZFALSE) {
+ jam();
+ return;
+ }//if
+ }//for
+ GCPSaveConf * const saveConf = (GCPSaveConf *)&signal->theData[0];
+ saveConf->dihPtr = localGcpPtr.p->gcpUserptr;
+ saveConf->nodeId = getOwnNodeId();
+ saveConf->gci = localGcpPtr.p->gcpId;
+ sendSignal(localGcpPtr.p->gcpBlockref, GSN_GCP_SAVECONF, signal,
+ GCPSaveConf::SignalLength, JBA);
+ ccurrentGcprec = RNIL;
+}//Dblqh::execFSSYNCCONF()
+
+void
+Dblqh::execFSSYNCREF(Signal* signal)
+{
+ jamEntry();
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execFSSYNCREF()
+
+
+/* ######################################################################### */
+/* ####### FILE HANDLING MODULE ####### */
+/* */
+/* ######################################################################### */
+/* THIS MODULE HANDLES RESPONSE MESSAGES FROM THE FILE SYSTEM */
+/* ######################################################################### */
+/* ######################################################################### */
+/* SIGNAL RECEPTION MODULE */
+/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
+/* */
+/* THIS MODULE CHECKS THE STATE AND JUMPS TO THE PROPER PART OF THE FILE */
+/* HANDLING MODULE. */
+/* ######################################################################### */
+/* *************** */
+/* FSCLOSECONF > */
+/* *************** */
+void Dblqh::execFSCLOSECONF(Signal* signal)
+{
+ jamEntry();
+ logFilePtr.i = signal->theData[0];
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ switch (logFilePtr.p->logFileStatus) {
+ case LogFileRecord::CLOSE_SR_INVALIDATE_PAGES:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ // Set the prev file to check if we shall close it.
+ logFilePtr.i = logFilePtr.p->prevLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ exitFromInvalidate(signal);
+ return;
+ break;
+ case LogFileRecord::CLOSING_INIT:
+ jam();
+ closingInitLab(signal);
+ return;
+ break;
+ case LogFileRecord::CLOSING_SR:
+ jam();
+ closingSrLab(signal);
+ return;
+ break;
+ case LogFileRecord::CLOSING_EXEC_SR:
+ jam();
+ closeExecSrLab(signal);
+ return;
+ break;
+ case LogFileRecord::CLOSING_EXEC_SR_COMPLETED:
+ jam();
+ closeExecSrCompletedLab(signal);
+ return;
+ break;
+ case LogFileRecord::CLOSING_WRITE_LOG:
+ jam();
+ closeWriteLogLab(signal);
+ return;
+ break;
+ case LogFileRecord::CLOSING_EXEC_LOG:
+ jam();
+ closeExecLogLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::execFSCLOSECONF()
+
+/* ************>> */
+/* FSCLOSEREF > */
+/* ************>> */
+void Dblqh::execFSCLOSEREF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execFSCLOSEREF()
+
+/* ************>> */
+/* FSOPENCONF > */
+/* ************>> */
+void Dblqh::execFSOPENCONF(Signal* signal)
+{
+ jamEntry();
+ initFsopenconf(signal);
+ switch (logFilePtr.p->logFileStatus) {
+ case LogFileRecord::OPEN_SR_INVALIDATE_PAGES:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ readFileInInvalidate(signal);
+ return;
+ break;
+ case LogFileRecord::OPENING_INIT:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openFileInitLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_SR_FRONTPAGE:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openSrFrontpageLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_SR_LAST_FILE:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openSrLastFileLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_SR_NEXT_FILE:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openSrNextFileLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_EXEC_SR_START:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openExecSrStartLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_EXEC_SR_NEW_MBYTE:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openExecSrNewMbyteLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_SR_FOURTH_PHASE:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openSrFourthPhaseLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_SR_FOURTH_NEXT:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openSrFourthNextLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_SR_FOURTH_ZERO:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openSrFourthZeroLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPENING_WRITE_LOG:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ return;
+ break;
+ case LogFileRecord::OPEN_EXEC_LOG:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openExecLogLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::execFSOPENCONF()
+
+/* ************> */
+/* FSOPENREF > */
+/* ************> */
+void Dblqh::execFSOPENREF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execFSOPENREF()
+
+/* ************>> */
+/* FSREADCONF > */
+/* ************>> */
+void Dblqh::execFSREADCONF(Signal* signal)
+{
+ jamEntry();
+ initFsrwconf(signal);
+
+ switch (lfoPtr.p->lfoState) {
+ case LogFileOperationRecord::READ_SR_LAST_MBYTE:
+ jam();
+ releaseLfo(signal);
+ readSrLastMbyteLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_FRONTPAGE:
+ jam();
+ releaseLfo(signal);
+ readSrFrontpageLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_LAST_FILE:
+ jam();
+ releaseLfo(signal);
+ readSrLastFileLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_NEXT_FILE:
+ jam();
+ releaseLfo(signal);
+ readSrNextFileLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_EXEC_SR:
+ jam();
+ readExecSrLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_EXEC_LOG:
+ jam();
+ readExecLogLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES:
+ jam();
+ invalidateLogAfterLastGCI(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_FOURTH_PHASE:
+ jam();
+ releaseLfo(signal);
+ readSrFourthPhaseLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_FOURTH_ZERO:
+ jam();
+ releaseLfo(signal);
+ readSrFourthZeroLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::execFSREADCONF()
+
+/* ************>> */
+/* FSREADCONF > */
+/* ************>> */
+void Dblqh::execFSREADREF(Signal* signal)
+{
+ jamEntry();
+ lfoPtr.i = signal->theData[0];
+ ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
+ terrorCode = signal->theData[1];
+ switch (lfoPtr.p->lfoState) {
+ case LogFileOperationRecord::READ_SR_LAST_MBYTE:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_FRONTPAGE:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_LAST_FILE:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_NEXT_FILE:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_EXEC_SR:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_EXEC_LOG:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_FOURTH_PHASE:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_FOURTH_ZERO:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES:
+ jam()
+ systemErrorLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ return;
+}//Dblqh::execFSREADREF()
+
+/* *************** */
+/* FSWRITECONF > */
+/* *************** */
+void Dblqh::execFSWRITECONF(Signal* signal)
+{
+ jamEntry();
+ initFsrwconf(signal);
+ switch (lfoPtr.p->lfoState) {
+ case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
+ jam();
+ invalidateLogAfterLastGCI(signal);
+ return;
+ break;
+ case LogFileOperationRecord::WRITE_PAGE_ZERO:
+ jam();
+ writePageZeroLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::LAST_WRITE_IN_FILE:
+ jam();
+ lastWriteInFileLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::INIT_WRITE_AT_END:
+ jam();
+ initWriteEndLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::INIT_FIRST_PAGE:
+ jam();
+ initFirstPageLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::WRITE_GCI_ZERO:
+ jam();
+ writeGciZeroLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::WRITE_DIRTY:
+ jam();
+ writeDirtyLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::WRITE_INIT_MBYTE:
+ jam();
+ writeInitMbyteLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::ACTIVE_WRITE_LOG:
+ jam();
+ writeLogfileLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::FIRST_PAGE_WRITE_IN_LOGFILE:
+ jam();
+ firstPageWriteLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::execFSWRITECONF()
+
+/* ************>> */
+/* FSWRITEREF > */
+/* ************>> */
+void Dblqh::execFSWRITEREF(Signal* signal)
+{
+ jamEntry();
+ lfoPtr.i = signal->theData[0];
+ ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
+ terrorCode = signal->theData[1];
+ switch (lfoPtr.p->lfoState) {
+ case LogFileOperationRecord::WRITE_PAGE_ZERO:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::LAST_WRITE_IN_FILE:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::INIT_WRITE_AT_END:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::INIT_FIRST_PAGE:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::WRITE_GCI_ZERO:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::WRITE_DIRTY:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::WRITE_INIT_MBYTE:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::ACTIVE_WRITE_LOG:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::FIRST_PAGE_WRITE_IN_LOGFILE:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
+ jam();
+ systemErrorLab(signal);
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ break;
+ }//switch
+}//Dblqh::execFSWRITEREF()
+
+
+/* ========================================================================= */
+/* ======= INITIATE WHEN RECEIVING FSOPENCONF ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initFsopenconf(Signal* signal)
+{
+ logFilePtr.i = signal->theData[0];
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logFilePtr.p->fileRef = signal->theData[1];
+ logPartPtr.i = logFilePtr.p->logPartRec;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logFilePtr.p->currentMbyte = 0;
+ logFilePtr.p->filePosition = 0;
+ logFilePtr.p->logFilePagesToDiskWithoutSynch = 0;
+}//Dblqh::initFsopenconf()
+
+/* ========================================================================= */
+/* ======= INITIATE WHEN RECEIVING FSREADCONF AND FSWRITECONF ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initFsrwconf(Signal* signal)
+{
+ lfoPtr.i = signal->theData[0];
+ ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
+ logFilePtr.i = lfoPtr.p->logFileRec;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPartPtr.i = logFilePtr.p->logPartRec;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logPagePtr.i = lfoPtr.p->firstLfoPage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+}//Dblqh::initFsrwconf()
+
+/* ######################################################################### */
+/* NORMAL OPERATION MODULE */
+/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
+/* */
+/* THIS PART HANDLES THE NORMAL OPENING, CLOSING AND WRITING OF LOG FILES */
+/* DURING NORMAL OPERATION. */
+/* ######################################################################### */
+/*---------------------------------------------------------------------------*/
+/* THIS SIGNAL IS USED TO SUPERVISE THAT THE LOG RECORDS ARE NOT KEPT IN MAIN*/
+/* MEMORY FOR MORE THAN 1 SECOND TO ACHIEVE THE PROPER RELIABILITY. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::timeSup(Signal* signal)
+{
+ LogPageRecordPtr origLogPagePtr;
+ Uint32 wordWritten;
+
+ jamEntry();
+ logPartPtr.i = signal->theData[0];
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ if (logPartPtr.p->logPartTimer != logPartPtr.p->logTimer) {
+ jam();
+/*--------------------------------------------------------------------------*/
+/* THIS LOG PART HAS NOT WRITTEN TO DISK DURING THE LAST SECOND. */
+/*--------------------------------------------------------------------------*/
+ switch (logPartPtr.p->logPartState) {
+ case LogPartRecord::FILE_CHANGE_PROBLEM:
+ jam();
+/*--------------------------------------------------------------------------*/
+/* THIS LOG PART HAS PROBLEMS IN CHANGING FILES MAKING IT IMPOSSIBLE */
+// TO WRITE TO THE FILE CURRENTLY. WE WILL COMEBACK LATER AND SEE IF
+// THE PROBLEM HAS BEEN FIXED.
+/*--------------------------------------------------------------------------*/
+ case LogPartRecord::ACTIVE:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* AN OPERATION IS CURRENTLY ACTIVE IN WRITING THIS LOG PART. WE THUS CANNOT */
+/* WRITE ANYTHING TO DISK AT THIS MOMENT. WE WILL SEND A SIGNAL DELAYED FOR */
+/* 10 MS AND THEN TRY AGAIN. POSSIBLY THE LOG PART WILL HAVE BEEN WRITTEN */
+/* UNTIL THEN OR ELSE IT SHOULD BE FREE TO WRITE AGAIN. */
+/*---------------------------------------------------------------------------*/
+ signal->theData[0] = ZTIME_SUPERVISION;
+ signal->theData[1] = logPartPtr.i;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
+ return;
+ break;
+ case LogPartRecord::IDLE:
+ case LogPartRecord::TAIL_PROBLEM:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* IDLE AND NOT WRITTEN TO DISK IN A SECOND. ALSO WHEN WE HAVE A TAIL PROBLEM*/
+/* WE HAVE TO WRITE TO DISK AT TIMES. WE WILL FIRST CHECK WHETHER ANYTHING */
+/* AT ALL HAVE BEEN WRITTEN TO THE PAGES BEFORE WRITING TO DISK. */
+/*---------------------------------------------------------------------------*/
+/* WE HAVE TO WRITE TO DISK IN ALL CASES SINCE THERE COULD BE INFORMATION */
+/* STILL IN THE LOG THAT WAS GENERATED BEFORE THE PREVIOUS TIME SUPERVISION */
+/* BUT AFTER THE LAST DISK WRITE. THIS PREVIOUSLY STOPPED ALL DISK WRITES */
+/* WHEN NO MORE LOG WRITES WERE PERFORMED (THIS HAPPENED WHEN LOG GOT FULL */
+/* AND AFTER LOADING THE INITIAL RECORDS IN INITIAL START). */
+/*---------------------------------------------------------------------------*/
+ if (((logFilePtr.p->currentFilepage + 1) & (ZPAGES_IN_MBYTE -1)) == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS IS THE LAST PAGE IN THIS MBYTE. WRITE NEXT LOG AND SWITCH TO NEXT */
+/* MBYTE. */
+/*---------------------------------------------------------------------------*/
+ changeMbyte(signal);
+ } else {
+/*---------------------------------------------------------------------------*/
+/* WRITE THE LOG PAGE TO DISK EVEN IF IT IS NOT FULL. KEEP PAGE AND WRITE A */
+/* COPY. THE ORIGINAL PAGE WILL BE WRITTEN AGAIN LATER ON. */
+/*---------------------------------------------------------------------------*/
+ wordWritten = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] - 1;
+ origLogPagePtr.i = logPagePtr.i;
+ origLogPagePtr.p = logPagePtr.p;
+ seizeLogpage(signal);
+ MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[0],
+ &origLogPagePtr.p->logPageWord[0],
+ wordWritten + 1);
+ ndbrequire(wordWritten < ZPAGE_SIZE);
+ if (logFilePtr.p->noLogpagesInBuffer > 0) {
+ jam();
+ completedLogPage(signal, ZENFORCE_WRITE);
+/*---------------------------------------------------------------------------*/
+/*SINCE WE ARE ONLY WRITING PART OF THE LAST PAGE WE HAVE TO UPDATE THE WORD */
+/*WRITTEN TO REFLECT THE REAL LAST WORD WRITTEN. WE ALSO HAVE TO MOVE THE */
+/*FILE POSITION ONE STEP BACKWARDS SINCE WE ARE NOT WRITING THE LAST PAGE */
+/*COMPLETELY. IT WILL BE WRITTEN AGAIN. */
+/*---------------------------------------------------------------------------*/
+ lfoPtr.p->lfoWordWritten = wordWritten;
+ logFilePtr.p->filePosition = logFilePtr.p->filePosition - 1;
+ } else {
+ if (wordWritten == (ZPAGE_HEADER_SIZE - 1)) {
+/*---------------------------------------------------------------------------*/
+/*THIS IS POSSIBLE BUT VERY UNLIKELY. IF THE PAGE WAS COMPLETED AFTER THE LAST*/
+/*WRITE TO DISK THEN NO_LOG_PAGES_IN_BUFFER > 0 AND IF NOT WRITTEN SINCE LAST*/
+/*WRITE TO DISK THEN THE PREVIOUS PAGE MUST HAVE BEEN WRITTEN BY SOME */
+/*OPERATION AND THAT BECAME COMPLETELY FULL. IN ANY CASE WE NEED NOT WRITE AN*/
+/*EMPTY PAGE TO DISK. */
+/*---------------------------------------------------------------------------*/
+ jam();
+ releaseLogpage(signal);
+ } else {
+ jam();
+ writeSinglePage(signal, logFilePtr.p->currentFilepage, wordWritten);
+ lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG;
+ }//if
+ }//if
+ }//if
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ }//if
+ logPartPtr.p->logTimer++;
+ return;
+}//Dblqh::timeSup()
+
+void Dblqh::writeLogfileLab(Signal* signal)
+{
+/*---------------------------------------------------------------------------*/
+/* CHECK IF ANY GLOBAL CHECKPOINTS ARE COMPLETED DUE TO THIS COMPLETED DISK */
+/* WRITE. */
+/*---------------------------------------------------------------------------*/
+ switch (logFilePtr.p->fileChangeState) {
+ case LogFileRecord::NOT_ONGOING:
+ jam();
+ checkGcpCompleted(signal,
+ ((lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1),
+ lfoPtr.p->lfoWordWritten);
+ break;
+#if 0
+ case LogFileRecord::BOTH_WRITES_ONGOING:
+ jam();
+ ndbout_c("not crashing!!");
+ // Fall-through
+#endif
+ case LogFileRecord::WRITE_PAGE_ZERO_ONGOING:
+ case LogFileRecord::LAST_WRITE_ONGOING:
+ jam();
+ logFilePtr.p->lastPageWritten = (lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1;
+ logFilePtr.p->lastWordWritten = lfoPtr.p->lfoWordWritten;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ releaseLfoPages(signal);
+ releaseLfo(signal);
+ return;
+}//Dblqh::writeLogfileLab()
+
+void Dblqh::closeWriteLogLab(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ return;
+}//Dblqh::closeWriteLogLab()
+
+/* ######################################################################### */
+/* FILE CHANGE MODULE */
+/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
+/* */
+/*THIS PART OF THE FILE MODULE HANDLES WHEN WE ARE CHANGING LOG FILE DURING */
+/*NORMAL OPERATION. WE HAVE TO BE CAREFUL WHEN WE ARE CHANGING LOG FILE SO */
+/*THAT WE DO NOT COMPLICATE THE SYSTEM RESTART PROCESS TOO MUCH. */
+/*THE IDEA IS THAT WE START BY WRITING THE LAST WRITE IN THE OLD FILE AND WE */
+/*ALSO WRITE THE FIRST PAGE OF THE NEW FILE CONCURRENT WITH THAT. THIS FIRST */
+/*PAGE IN THE NEW FILE DO NOT CONTAIN ANY LOG RECORDS OTHER THAN A DESCRIPTOR*/
+/*CONTAINING INFORMATION ABOUT GCI'S NEEDED AT SYSTEM RESTART AND A NEXT LOG */
+/*RECORD. */
+/* */
+/*WHEN BOTH OF THOSE WRITES HAVE COMPLETED WE ALSO WRITE PAGE ZERO IN FILE */
+/*ZERO. THE ONLY INFORMATION WHICH IS INTERESTING HERE IS THE NEW FILE NUMBER*/
+/* */
+/*IF OPTIMISATIONS ARE NEEDED OF THE LOG HANDLING THEN IT IS POSSIBLE TO */
+/*AVOID WRITING THE FIRST PAGE OF THE NEW PAGE IMMEDIATELY. THIS COMPLICATES */
+/*THE SYSTEM RESTART AND ONE HAS TO TAKE SPECIAL CARE WITH FILE ZERO. IT IS */
+/*HOWEVER NO LARGE PROBLEM TO CHANGE INTO THIS SCENARIO. TO AVOID ALSO THE */
+/*WRITING OF PAGE ZERO IS ALSO POSSIBLE BUT COMPLICATES THE DESIGN EVEN */
+/*FURTHER. IT GETS FAIRLY COMPLEX TO FIND THE END OF THE LOG. SOME SORT OF */
+/*BINARY SEARCH IS HOWEVER MOST LIKELY A GOOD METHODOLOGY FOR THIS. */
+/* ######################################################################### */
+void Dblqh::firstPageWriteLab(Signal* signal)
+{
+ releaseLfo(signal);
+/*---------------------------------------------------------------------------*/
+/* RELEASE PAGE ZERO IF THE FILE IS NOT FILE 0. */
+/*---------------------------------------------------------------------------*/
+ Uint32 fileNo = logFilePtr.p->fileNo;
+ if (fileNo != 0) {
+ jam();
+ releaseLogpage(signal);
+ }//if
+/*---------------------------------------------------------------------------*/
+/* IF A NEW FILE HAS BEEN OPENED WE SHALL ALWAYS ALSO WRITE TO PAGE O IN */
+/* FILE 0. THE AIM IS TO MAKE RESTARTS EASIER BY SPECIFYING WHICH IS THE */
+/* LAST FILE WHERE LOGGING HAS STARTED. */
+/*---------------------------------------------------------------------------*/
+/* FIRST CHECK WHETHER THE LAST WRITE IN THE PREVIOUS FILE HAVE COMPLETED */
+/*---------------------------------------------------------------------------*/
+ if (logFilePtr.p->fileChangeState == LogFileRecord::BOTH_WRITES_ONGOING) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE LAST WRITE WAS STILL ONGOING. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->fileChangeState = LogFileRecord::LAST_WRITE_ONGOING;
+ return;
+ } else {
+ jam();
+ ndbrequire(logFilePtr.p->fileChangeState == LogFileRecord::FIRST_WRITE_ONGOING);
+/*---------------------------------------------------------------------------*/
+/* WRITE TO PAGE 0 IN IN FILE 0 NOW. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->fileChangeState = LogFileRecord::WRITE_PAGE_ZERO_ONGOING;
+ if (fileNo == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* IF THE NEW FILE WAS 0 THEN WE HAVE ALREADY WRITTEN PAGE ZERO IN FILE 0. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING;
+ return;
+ } else {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* WRITE PAGE ZERO IN FILE ZERO. LOG_FILE_REC WILL REFER TO THE LOG FILE WE */
+/* HAVE JUST WRITTEN PAGE ZERO IN TO GET HOLD OF LOG_FILE_PTR FOR THIS */
+/* RECORD QUICKLY. THIS IS NEEDED TO GET HOLD OF THE FILE_CHANGE_STATE. */
+/* THE ONLY INFORMATION WE WANT TO CHANGE IS THE LAST FILE NUMBER IN THE */
+/* FILE DESCRIPTOR. THIS IS USED AT SYSTEM RESTART TO FIND THE END OF THE */
+/* LOG PART. */
+/*---------------------------------------------------------------------------*/
+ Uint32 currLogFile = logFilePtr.i;
+ logFilePtr.i = logPartPtr.p->firstLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->logPageZero;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] = fileNo;
+ writeSinglePage(signal, 0, ZPAGE_SIZE - 1);
+ lfoPtr.p->logFileRec = currLogFile;
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_PAGE_ZERO;
+ return;
+ }//if
+ }//if
+}//Dblqh::firstPageWriteLab()
+
+void Dblqh::lastWriteInFileLab(Signal* signal)
+{
+ LogFileRecordPtr locLogFilePtr;
+/*---------------------------------------------------------------------------*/
+/* CHECK IF ANY GLOBAL CHECKPOINTS ARE COMPLETED DUE TO THIS COMPLETED DISK */
+/* WRITE. */
+/*---------------------------------------------------------------------------*/
+ checkGcpCompleted(signal,
+ ((lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1),
+ (ZPAGE_SIZE - 1));
+ releaseLfoPages(signal);
+ releaseLfo(signal);
+/*---------------------------------------------------------------------------*/
+/* IF THE FILE IS NOT IN USE OR THE NEXT FILE TO BE USED WE WILL CLOSE IT. */
+/*---------------------------------------------------------------------------*/
+ locLogFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
+ if (logFilePtr.i != locLogFilePtr.i) {
+ if (logFilePtr.i != locLogFilePtr.p->nextLogFile) {
+ if (logFilePtr.p->fileNo != 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE FILE IS NOT FILE ZERO EITHER. WE WILL NOT CLOSE FILE ZERO SINCE WE */
+/* USE IT TO KEEP TRACK OF THE CURRENT LOG FILE BY WRITING PAGE ZERO IN */
+/* FILE ZERO. */
+/*---------------------------------------------------------------------------*/
+/* WE WILL CLOSE THE FILE. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_WRITE_LOG;
+ closeFile(signal, logFilePtr);
+ }//if
+ }//if
+ }//if
+/*---------------------------------------------------------------------------*/
+/* IF A NEW FILE HAS BEEN OPENED WE SHALL ALWAYS ALSO WRITE TO PAGE O IN */
+/* FILE 0. THE AIM IS TO MAKE RESTARTS EASIER BY SPECIFYING WHICH IS THE */
+/* LAST FILE WHERE LOGGING HAS STARTED. */
+/*---------------------------------------------------------------------------*/
+/* FIRST CHECK WHETHER THE FIRST WRITE IN THE NEW FILE HAVE COMPLETED */
+/* THIS STATE INFORMATION IS IN THE NEW LOG FILE AND THUS WE HAVE TO MOVE */
+/* THE LOG FILE POINTER TO THIS LOG FILE. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ if (logFilePtr.p->fileChangeState == LogFileRecord::BOTH_WRITES_ONGOING) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE FIRST WRITE WAS STILL ONGOING. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->fileChangeState = LogFileRecord::FIRST_WRITE_ONGOING;
+ return;
+ } else {
+ ndbrequire(logFilePtr.p->fileChangeState == LogFileRecord::LAST_WRITE_ONGOING);
+/*---------------------------------------------------------------------------*/
+/* WRITE TO PAGE 0 IN IN FILE 0 NOW. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->fileChangeState = LogFileRecord::WRITE_PAGE_ZERO_ONGOING;
+ Uint32 fileNo = logFilePtr.p->fileNo;
+ if (fileNo == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* IF THE NEW FILE WAS 0 THEN WE HAVE ALREADY WRITTEN PAGE ZERO IN FILE 0. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING;
+ return;
+ } else {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* WRITE PAGE ZERO IN FILE ZERO. LOG_FILE_REC WILL REFER TO THE LOG FILE WE */
+/* HAVE JUST WRITTEN PAGE ZERO IN TO GET HOLD OF LOG_FILE_PTR FOR THIS */
+/* RECORD QUICKLY. THIS IS NEEDED TO GET HOLD OF THE FILE_CHANGE_STATE. */
+/* THE ONLY INFORMATION WE WANT TO CHANGE IS THE LAST FILE NUMBER IN THE */
+/* FILE DESCRIPTOR. THIS IS USED AT SYSTEM RESTART TO FIND THE END OF THE */
+/* LOG PART. */
+/*---------------------------------------------------------------------------*/
+ Uint32 currLogFile = logFilePtr.i;
+ logFilePtr.i = logPartPtr.p->firstLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->logPageZero;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] = fileNo;
+ writeSinglePage(signal, 0, ZPAGE_SIZE - 1);
+ lfoPtr.p->logFileRec = currLogFile;
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_PAGE_ZERO;
+ return;
+ }//if
+ }//if
+}//Dblqh::lastWriteInFileLab()
+
+void Dblqh::writePageZeroLab(Signal* signal)
+{
+ logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING;
+/*---------------------------------------------------------------------------*/
+/* IT COULD HAVE ARRIVED PAGE WRITES TO THE CURRENT FILE WHILE WE WERE */
+/* WAITING FOR THIS DISK WRITE TO COMPLETE. THEY COULD NOT CHECK FOR */
+/* COMPLETED GLOBAL CHECKPOINTS. THUS WE SHOULD DO THAT NOW INSTEAD. */
+/*---------------------------------------------------------------------------*/
+ checkGcpCompleted(signal,
+ logFilePtr.p->lastPageWritten,
+ logFilePtr.p->lastWordWritten);
+ releaseLfo(signal);
+ return;
+}//Dblqh::writePageZeroLab()
+
+/* ######################################################################### */
+/* INITIAL START MODULE */
+/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
+/* */
+/*THIS MODULE INITIALISES ALL THE LOG FILES THAT ARE NEEDED AT A SYSTEM */
+/*RESTART AND WHICH ARE USED DURING NORMAL OPERATIONS. IT CREATES THE FILES */
+/*AND SETS A PROPER SIZE OF THEM AND INITIALISES THE FIRST PAGE IN EACH FILE */
+/* ######################################################################### */
+void Dblqh::openFileInitLab(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN_INIT;
+ seizeLogpage(signal);
+ writeSinglePage(signal, (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE) - 1, ZPAGE_SIZE - 1);
+ lfoPtr.p->lfoState = LogFileOperationRecord::INIT_WRITE_AT_END;
+ return;
+}//Dblqh::openFileInitLab()
+
+void Dblqh::initWriteEndLab(Signal* signal)
+{
+ releaseLfo(signal);
+ initLogpage(signal);
+ if (logFilePtr.p->fileNo == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* PAGE ZERO IN FILE ZERO MUST SET LOG LAP TO ONE SINCE IT HAS STARTED */
+/* WRITING TO THE LOG, ALSO GLOBAL CHECKPOINTS ARE SET TO ZERO. */
+/*---------------------------------------------------------------------------*/
+ logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1;
+ logPagePtr.p->logPageWord[ZPOS_MAX_GCI_STARTED] = 0;
+ logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED] = 0;
+ logFilePtr.p->logMaxGciStarted[0] = 0;
+ logFilePtr.p->logMaxGciCompleted[0] = 0;
+ }//if
+/*---------------------------------------------------------------------------*/
+/* REUSE CODE FOR INITIALISATION OF FIRST PAGE IN ALL LOG FILES. */
+/*---------------------------------------------------------------------------*/
+ writeFileHeaderOpen(signal, ZINIT);
+ return;
+}//Dblqh::initWriteEndLab()
+
+void Dblqh::initFirstPageLab(Signal* signal)
+{
+ releaseLfo(signal);
+ if (logFilePtr.p->fileNo == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* IN FILE ZERO WE WILL INSERT A PAGE ONE WHERE WE WILL INSERT A COMPLETED */
+/* GCI RECORD FOR GCI = 0. */
+/*---------------------------------------------------------------------------*/
+ initLogpage(signal);
+ logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1;
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE] = ZCOMPLETED_GCI_TYPE;
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + 1] = 1;
+ writeSinglePage(signal, 1, ZPAGE_SIZE - 1);
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_GCI_ZERO;
+ return;
+ }//if
+ logFilePtr.p->currentMbyte = 1;
+ writeInitMbyte(signal);
+ return;
+}//Dblqh::initFirstPageLab()
+
+void Dblqh::writeGciZeroLab(Signal* signal)
+{
+ releaseLfo(signal);
+ logFilePtr.p->currentMbyte = 1;
+ writeInitMbyte(signal);
+ return;
+}//Dblqh::writeGciZeroLab()
+
+void Dblqh::writeInitMbyteLab(Signal* signal)
+{
+ releaseLfo(signal);
+ logFilePtr.p->currentMbyte = logFilePtr.p->currentMbyte + 1;
+ if (logFilePtr.p->currentMbyte == ZNO_MBYTES_IN_FILE) {
+ jam();
+ releaseLogpage(signal);
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_INIT;
+ closeFile(signal, logFilePtr);
+ return;
+ }//if
+ writeInitMbyte(signal);
+ return;
+}//Dblqh::writeInitMbyteLab()
+
+void Dblqh::closingInitLab(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ logPartPtr.i = logFilePtr.p->logPartRec;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ if (logFilePtr.p->nextLogFile == logPartPtr.p->firstLogfile) {
+ jam();
+ checkInitCompletedLab(signal);
+ return;
+ } else {
+ jam();
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ openLogfileInit(signal);
+ }//if
+ return;
+}//Dblqh::closingInitLab()
+
+void Dblqh::checkInitCompletedLab(Signal* signal)
+{
+ logPartPtr.p->logPartState = LogPartRecord::SR_FIRST_PHASE_COMPLETED;
+/*---------------------------------------------------------------------------*/
+/* WE HAVE NOW INITIALISED ALL FILES IN THIS LOG PART. WE CAN NOW SET THE */
+/* THE LOG LAP TO ONE SINCE WE WILL START WITH LOG LAP ONE. LOG LAP = ZERO */
+/* MEANS THIS PART OF THE LOG IS NOT WRITTEN YET. */
+/*---------------------------------------------------------------------------*/
+ logPartPtr.p->logLap = 1;
+ logPartPtr.i = 0;
+CHECK_LOG_PARTS_LOOP:
+ ptrAss(logPartPtr, logPartRecord);
+ if (logPartPtr.p->logPartState != LogPartRecord::SR_FIRST_PHASE_COMPLETED) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS PART HAS STILL NOT COMPLETED. WAIT FOR THIS TO OCCUR. */
+/*---------------------------------------------------------------------------*/
+ return;
+ }//if
+ if (logPartPtr.i == 3) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* ALL LOG PARTS ARE COMPLETED. NOW WE CAN CONTINUE WITH THE RESTART */
+/* PROCESSING. THE NEXT STEP IS TO PREPARE FOR EXECUTING OPERATIONS. THUS WE */
+/* NEED TO INITIALISE ALL NEEDED DATA AND TO OPEN FILE ZERO AND THE NEXT AND */
+/* TO SET THE CURRENT LOG PAGE TO BE PAGE 1 IN FILE ZERO. */
+/*---------------------------------------------------------------------------*/
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ ptrAss(logPartPtr, logPartRecord);
+ signal->theData[0] = ZINIT_FOURTH;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//for
+ return;
+ } else {
+ jam();
+ logPartPtr.i = logPartPtr.i + 1;
+ goto CHECK_LOG_PARTS_LOOP;
+ }//if
+}//Dblqh::checkInitCompletedLab()
+
+/* ========================================================================= */
+/* ======= INITIATE LOG FILE OPERATION RECORD WHEN ALLOCATED ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initLfo(Signal* signal)
+{
+ lfoPtr.p->firstLfoPage = RNIL;
+ lfoPtr.p->lfoState = LogFileOperationRecord::IDLE;
+ lfoPtr.p->logFileRec = logFilePtr.i;
+ lfoPtr.p->noPagesRw = 0;
+ lfoPtr.p->lfoPageNo = ZNIL;
+}//Dblqh::initLfo()
+
+/* ========================================================================= */
+/* ======= INITIATE LOG FILE WHEN ALLOCATED ======= */
+/* */
+/* INPUT: TFILE_NO NUMBER OF THE FILE INITIATED */
+/* LOG_PART_PTR NUMBER OF LOG PART */
+/* SUBROUTINE SHORT NAME = IL */
+/* ========================================================================= */
+void Dblqh::initLogfile(Signal* signal, Uint32 fileNo)
+{
+ UintR tilTmp;
+ UintR tilIndex;
+
+ logFilePtr.p->currentFilepage = 0;
+ logFilePtr.p->currentLogpage = RNIL;
+ logFilePtr.p->fileName[0] = (UintR)-1;
+ logFilePtr.p->fileName[1] = (UintR)-1; /* = H'FFFFFFFF = -1 */
+ logFilePtr.p->fileName[2] = fileNo; /* Sfile_no */
+ tilTmp = 1; /* VERSION 1 OF FILE NAME */
+ tilTmp = (tilTmp << 8) + 1; /* FRAGMENT LOG => .FRAGLOG AS EXTENSION */
+ tilTmp = (tilTmp << 8) + (8 + logPartPtr.i); /* DIRECTORY = D(8+Part)/DBLQH */
+ tilTmp = (tilTmp << 8) + 255; /* IGNORE Pxx PART OF FILE NAME */
+ logFilePtr.p->fileName[3] = tilTmp;
+/* ========================================================================= */
+/* FILE NAME BECOMES /D2/DBLQH/Tpart_no/Sfile_no.FRAGLOG */
+/* ========================================================================= */
+ logFilePtr.p->fileNo = fileNo;
+ logFilePtr.p->filePosition = 0;
+ logFilePtr.p->firstLfo = RNIL;
+ logFilePtr.p->lastLfo = RNIL;
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ logFilePtr.p->logPartRec = logPartPtr.i;
+ logFilePtr.p->noLogpagesInBuffer = 0;
+ logFilePtr.p->firstFilledPage = RNIL;
+ logFilePtr.p->lastFilledPage = RNIL;
+ logFilePtr.p->lastPageWritten = 0;
+ logFilePtr.p->logPageZero = RNIL;
+ logFilePtr.p->currentMbyte = 0;
+ for (tilIndex = 0; tilIndex <= 15; tilIndex++) {
+ logFilePtr.p->logMaxGciCompleted[tilIndex] = (UintR)-1;
+ logFilePtr.p->logMaxGciStarted[tilIndex] = (UintR)-1;
+ logFilePtr.p->logLastPrepRef[tilIndex] = 0;
+ }//for
+}//Dblqh::initLogfile()
+
+/* ========================================================================= */
+/* ======= INITIATE LOG PAGE WHEN ALLOCATED ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initLogpage(Signal* signal)
+{
+ TcConnectionrecPtr ilpTcConnectptr;
+
+ logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = logPartPtr.p->logLap;
+ logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED] =
+ logPartPtr.p->logPartNewestCompletedGCI;
+ logPagePtr.p->logPageWord[ZPOS_MAX_GCI_STARTED] = cnewestGci;
+ logPagePtr.p->logPageWord[ZPOS_VERSION] = NDB_VERSION;
+ logPagePtr.p->logPageWord[ZPOS_NO_LOG_FILES] = logPartPtr.p->noLogFiles;
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
+ ilpTcConnectptr.i = logPartPtr.p->firstLogTcrec;
+ if (ilpTcConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(ilpTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF] =
+ (ilpTcConnectptr.p->logStartFileNo << 16) +
+ (ilpTcConnectptr.p->logStartPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE);
+ } else {
+ jam();
+ logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF] =
+ (logFilePtr.p->fileNo << 16) +
+ (logFilePtr.p->currentFilepage >> ZTWOLOG_NO_PAGES_IN_MBYTE);
+ }//if
+}//Dblqh::initLogpage()
+
+/* ------------------------------------------------------------------------- */
+/* ------- OPEN LOG FILE FOR READ AND WRITE ------- */
+/* */
+/* SUBROUTINE SHORT NAME = OFR */
+/* ------------------------------------------------------------------------- */
+void Dblqh::openFileRw(Signal* signal, LogFileRecordPtr olfLogFilePtr)
+{
+ signal->theData[0] = cownref;
+ signal->theData[1] = olfLogFilePtr.i;
+ signal->theData[2] = olfLogFilePtr.p->fileName[0];
+ signal->theData[3] = olfLogFilePtr.p->fileName[1];
+ signal->theData[4] = olfLogFilePtr.p->fileName[2];
+ signal->theData[5] = olfLogFilePtr.p->fileName[3];
+ signal->theData[6] = ZOPEN_READ_WRITE;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+}//Dblqh::openFileRw()
+
+/* ------------------------------------------------------------------------- */
+/* ------- OPEN LOG FILE DURING INITIAL START ------- */
+/* */
+/* SUBROUTINE SHORT NAME = OLI */
+/* ------------------------------------------------------------------------- */
+void Dblqh::openLogfileInit(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::OPENING_INIT;
+ signal->theData[0] = cownref;
+ signal->theData[1] = logFilePtr.i;
+ signal->theData[2] = logFilePtr.p->fileName[0];
+ signal->theData[3] = logFilePtr.p->fileName[1];
+ signal->theData[4] = logFilePtr.p->fileName[2];
+ signal->theData[5] = logFilePtr.p->fileName[3];
+ signal->theData[6] = 0x302;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+}//Dblqh::openLogfileInit()
+
+/* OPEN FOR READ/WRITE, DO CREATE AND DO TRUNCATE FILE */
+/* ------------------------------------------------------------------------- */
+/* ------- OPEN NEXT LOG FILE ------- */
+/* */
+/* SUBROUTINE SHORT NAME = ONL */
+/* ------------------------------------------------------------------------- */
+void Dblqh::openNextLogfile(Signal* signal)
+{
+ LogFileRecordPtr onlLogFilePtr;
+
+ if (logPartPtr.p->noLogFiles > 2) {
+ jam();
+/* -------------------------------------------------- */
+/* IF ONLY 1 OR 2 LOG FILES EXIST THEN THEY ARE */
+/* ALWAYS OPEN AND THUS IT IS NOT NECESSARY TO */
+/* OPEN THEM NOW. */
+/* -------------------------------------------------- */
+ onlLogFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(onlLogFilePtr, clogFileFileSize, logFileRecord);
+ if (onlLogFilePtr.p->logFileStatus != LogFileRecord::CLOSED) {
+ ndbrequire(onlLogFilePtr.p->fileNo == 0);
+ return;
+ }//if
+ onlLogFilePtr.p->logFileStatus = LogFileRecord::OPENING_WRITE_LOG;
+ signal->theData[0] = cownref;
+ signal->theData[1] = onlLogFilePtr.i;
+ signal->theData[2] = onlLogFilePtr.p->fileName[0];
+ signal->theData[3] = onlLogFilePtr.p->fileName[1];
+ signal->theData[4] = onlLogFilePtr.p->fileName[2];
+ signal->theData[5] = onlLogFilePtr.p->fileName[3];
+ signal->theData[6] = 2;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ }//if
+}//Dblqh::openNextLogfile()
+
+ /* OPEN FOR READ/WRITE, DON'T CREATE AND DON'T TRUNCATE FILE */
+/* ------------------------------------------------------------------------- */
+/* ------- RELEASE LFO RECORD ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::releaseLfo(Signal* signal)
+{
+#ifdef VM_TRACE
+ // Check that lfo record isn't already in free list
+ LogFileOperationRecordPtr TlfoPtr;
+ TlfoPtr.i = cfirstfreeLfo;
+ while (TlfoPtr.i != RNIL){
+ ptrCheckGuard(TlfoPtr, clfoFileSize, logFileOperationRecord);
+ ndbrequire(TlfoPtr.i != lfoPtr.i);
+ TlfoPtr.i = TlfoPtr.p->nextLfo;
+ }
+#endif
+ lfoPtr.p->nextLfo = cfirstfreeLfo;
+ lfoPtr.p->lfoTimer = 0;
+ cfirstfreeLfo = lfoPtr.i;
+ lfoPtr.p->lfoState = LogFileOperationRecord::IDLE;
+}//Dblqh::releaseLfo()
+
+/* ------------------------------------------------------------------------- */
+/* ------- RELEASE ALL LOG PAGES CONNECTED TO A LFO RECORD ------- */
+/* */
+/* SUBROUTINE SHORT NAME = RLP */
+/* ------------------------------------------------------------------------- */
+void Dblqh::releaseLfoPages(Signal* signal)
+{
+ LogPageRecordPtr rlpLogPagePtr;
+
+ logPagePtr.i = lfoPtr.p->firstLfoPage;
+RLP_LOOP:
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ rlpLogPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ releaseLogpage(signal);
+ if (rlpLogPagePtr.i != RNIL) {
+ jam();
+ logPagePtr.i = rlpLogPagePtr.i;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ goto RLP_LOOP;
+ }//if
+ lfoPtr.p->firstLfoPage = RNIL;
+}//Dblqh::releaseLfoPages()
+
+/* ------------------------------------------------------------------------- */
+/* ------- RELEASE LOG PAGE ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::releaseLogpage(Signal* signal)
+{
+#ifdef VM_TRACE
+ // Check that log page isn't already in free list
+ LogPageRecordPtr TlogPagePtr;
+ TlogPagePtr.i = cfirstfreeLogPage;
+ while (TlogPagePtr.i != RNIL){
+ ptrCheckGuard(TlogPagePtr, clogPageFileSize, logPageRecord);
+ ndbrequire(TlogPagePtr.i != logPagePtr.i);
+ TlogPagePtr.i = TlogPagePtr.p->logPageWord[ZNEXT_PAGE];
+ }
+#endif
+
+ cnoOfLogPages++;
+ logPagePtr.p->logPageWord[ZNEXT_PAGE] = cfirstfreeLogPage;
+ cfirstfreeLogPage = logPagePtr.i;
+}//Dblqh::releaseLogpage()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEIZE LFO RECORD ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::seizeLfo(Signal* signal)
+{
+ lfoPtr.i = cfirstfreeLfo;
+ ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
+ cfirstfreeLfo = lfoPtr.p->nextLfo;
+ lfoPtr.p->nextLfo = RNIL;
+ lfoPtr.p->lfoTimer = cLqhTimeOutCount;
+}//Dblqh::seizeLfo()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEIZE LOG FILE RECORD ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::seizeLogfile(Signal* signal)
+{
+ logFilePtr.i = cfirstfreeLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+/* ------------------------------------------------------------------------- */
+/*IF LIST IS EMPTY THEN A SYSTEM CRASH IS INVOKED SINCE LOG_FILE_PTR = RNIL */
+/* ------------------------------------------------------------------------- */
+ cfirstfreeLogFile = logFilePtr.p->nextLogFile;
+ logFilePtr.p->nextLogFile = RNIL;
+}//Dblqh::seizeLogfile()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEIZE LOG PAGE RECORD ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::seizeLogpage(Signal* signal)
+{
+ cnoOfLogPages--;
+ logPagePtr.i = cfirstfreeLogPage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+/* ------------------------------------------------------------------------- */
+/*IF LIST IS EMPTY THEN A SYSTEM CRASH IS INVOKED SINCE LOG_PAGE_PTR = RNIL */
+/* ------------------------------------------------------------------------- */
+ cfirstfreeLogPage = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
+}//Dblqh::seizeLogpage()
+
+/* ------------------------------------------------------------------------- */
+/* ------- WRITE FILE DESCRIPTOR INFORMATION ------- */
+/* */
+/* SUBROUTINE SHORT NAME: WFD */
+// Pointer handling:
+// logFilePtr in
+// logPartPtr in
+/* ------------------------------------------------------------------------- */
+void Dblqh::writeFileDescriptor(Signal* signal)
+{
+ TcConnectionrecPtr wfdTcConnectptr;
+ UintR twfdFileNo;
+ UintR twfdMbyte;
+
+/* -------------------------------------------------- */
+/* START BY WRITING TO LOG FILE RECORD */
+/* -------------------------------------------------- */
+ arrGuard(logFilePtr.p->currentMbyte, 16);
+ logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] =
+ logPartPtr.p->logPartNewestCompletedGCI;
+ logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] = cnewestGci;
+ wfdTcConnectptr.i = logPartPtr.p->firstLogTcrec;
+ if (wfdTcConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(wfdTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ twfdFileNo = wfdTcConnectptr.p->logStartFileNo;
+ twfdMbyte = wfdTcConnectptr.p->logStartPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE;
+ logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] =
+ (twfdFileNo << 16) + twfdMbyte;
+ } else {
+ jam();
+ logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] =
+ (logFilePtr.p->fileNo << 16) + logFilePtr.p->currentMbyte;
+ }//if
+}//Dblqh::writeFileDescriptor()
+
+/* ------------------------------------------------------------------------- */
+/* ------- WRITE THE HEADER PAGE OF A NEW FILE ------- */
+/* */
+/* SUBROUTINE SHORT NAME: WMO */
+/* ------------------------------------------------------------------------- */
+void Dblqh::writeFileHeaderOpen(Signal* signal, Uint32 wmoType)
+{
+ LogFileRecordPtr wmoLogFilePtr;
+ UintR twmoNoLogDescriptors;
+ UintR twmoLoop;
+ UintR twmoIndex;
+
+/* -------------------------------------------------- */
+/* WRITE HEADER INFORMATION IN THE NEW FILE. */
+/* -------------------------------------------------- */
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_LOG_TYPE] = ZFD_TYPE;
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] =
+ logFilePtr.p->fileNo;
+ if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ jam();
+ twmoNoLogDescriptors = ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ } else {
+ jam();
+ twmoNoLogDescriptors = logPartPtr.p->noLogFiles;
+ }//if
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD] =
+ twmoNoLogDescriptors;
+ wmoLogFilePtr.i = logFilePtr.i;
+ twmoLoop = 0;
+WMO_LOOP:
+ jam();
+ if (twmoLoop < twmoNoLogDescriptors) {
+ jam();
+ ptrCheckGuard(wmoLogFilePtr, clogFileFileSize, logFileRecord);
+ for (twmoIndex = 0; twmoIndex <= ZNO_MBYTES_IN_FILE - 1; twmoIndex++) {
+ jam();
+ arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (twmoLoop * ZFD_PART_SIZE)) + twmoIndex, ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (twmoLoop * ZFD_PART_SIZE)) + twmoIndex] =
+ wmoLogFilePtr.p->logMaxGciCompleted[twmoIndex];
+ arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) +
+ twmoIndex, ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) + twmoIndex] =
+ wmoLogFilePtr.p->logMaxGciStarted[twmoIndex];
+ arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) +
+ twmoIndex, ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) + twmoIndex] =
+ wmoLogFilePtr.p->logLastPrepRef[twmoIndex];
+ }//for
+ wmoLogFilePtr.i = wmoLogFilePtr.p->prevLogFile;
+ twmoLoop = twmoLoop + 1;
+ goto WMO_LOOP;
+ }//if
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
+ (ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (ZFD_PART_SIZE * twmoNoLogDescriptors);
+ arrGuard(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX], ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] =
+ ZNEXT_LOG_RECORD_TYPE;
+/* ------------------------------------------------------- */
+/* THIS IS A SPECIAL WRITE OF THE FIRST PAGE IN THE */
+/* LOG FILE. THIS HAS SPECIAL SIGNIFANCE TO FIND */
+/* THE END OF THE LOG AT SYSTEM RESTART. */
+/* ------------------------------------------------------- */
+ writeSinglePage(signal, 0, ZPAGE_SIZE - 1);
+ if (wmoType == ZINIT) {
+ jam();
+ lfoPtr.p->lfoState = LogFileOperationRecord::INIT_FIRST_PAGE;
+ } else {
+ jam();
+ lfoPtr.p->lfoState = LogFileOperationRecord::FIRST_PAGE_WRITE_IN_LOGFILE;
+ }//if
+ logFilePtr.p->filePosition = 1;
+ if (wmoType == ZNORMAL) {
+ jam();
+/* -------------------------------------------------- */
+/* ALLOCATE A NEW PAGE SINCE THE CURRENT IS */
+/* WRITTEN. */
+/* -------------------------------------------------- */
+ seizeLogpage(signal);
+ initLogpage(signal);
+ logFilePtr.p->currentLogpage = logPagePtr.i;
+ logFilePtr.p->currentFilepage = logFilePtr.p->currentFilepage + 1;
+ }//if
+}//Dblqh::writeFileHeaderOpen()
+
+/* -------------------------------------------------- */
+/* THE NEW FILE POSITION WILL ALWAYS BE 1 SINCE */
+/* WE JUST WROTE THE FIRST PAGE IN THE LOG FILE */
+/* -------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------- WRITE A MBYTE HEADER DURING INITIAL START ------- */
+/* */
+/* SUBROUTINE SHORT NAME: WIM */
+/* ------------------------------------------------------------------------- */
+void Dblqh::writeInitMbyte(Signal* signal)
+{
+ initLogpage(signal);
+ writeSinglePage(signal, logFilePtr.p->currentMbyte * ZPAGES_IN_MBYTE, ZPAGE_SIZE - 1);
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_INIT_MBYTE;
+}//Dblqh::writeInitMbyte()
+
+/* ------------------------------------------------------------------------- */
+/* ------- WRITE A SINGLE PAGE INTO A FILE ------- */
+/* */
+/* INPUT: TWSP_PAGE_NO THE PAGE NUMBER WRITTEN */
+/* SUBROUTINE SHORT NAME: WSP */
+/* ------------------------------------------------------------------------- */
+void Dblqh::writeSinglePage(Signal* signal, Uint32 pageNo, Uint32 wordWritten)
+{
+ seizeLfo(signal);
+ initLfo(signal);
+ lfoPtr.p->firstLfoPage = logPagePtr.i;
+ logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
+
+ // Calculate checksum for page
+ logPagePtr.p->logPageWord[ZPOS_CHECKSUM] = calcPageCheckSum(logPagePtr);
+
+ lfoPtr.p->lfoPageNo = pageNo;
+ lfoPtr.p->lfoWordWritten = wordWritten;
+ lfoPtr.p->noPagesRw = 1;
+/* -------------------------------------------------- */
+/* SET TIMER ON THIS LOG PART TO SIGNIFY THAT A */
+/* LOG RECORD HAS BEEN SENT AT THIS TIME. */
+/* -------------------------------------------------- */
+ logPartPtr.p->logPartTimer = logPartPtr.p->logTimer;
+ signal->theData[0] = logFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lfoPtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS_SYNCH;
+ signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
+ signal->theData[5] = 1; /* ONE PAGE WRITTEN */
+ signal->theData[6] = logPagePtr.i;
+ signal->theData[7] = pageNo;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+}//Dblqh::writeSinglePage()
+
+/* ##########################################################################
+ * SYSTEM RESTART PHASE ONE MODULE
+ * THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING.
+ *
+ * THIS MODULE CONTAINS THE CODE FOR THE FIRST PHASE OF THE SYSTEM RESTART.
+ * THE AIM OF THIS PHASE IS TO FIND THE END OF THE LOG AND TO FIND
+ * INFORMATION ABOUT WHERE GLOBAL CHECKPOINTS ARE COMPLETED AND STARTED
+ * IN THE LOG. THIS INFORMATION IS NEEDED TO START PHASE THREE OF
+ * THE SYSTEM RESTART.
+ * ########################################################################## */
+/* --------------------------------------------------------------------------
+ * A SYSTEM RESTART OR NODE RESTART IS ONGOING. WE HAVE NOW OPENED FILE 0
+ * NOW WE NEED TO READ PAGE 0 TO FIND WHICH LOG FILE THAT WAS OPEN AT
+ * CRASH TIME.
+ * -------------------------------------------------------------------------- */
+void Dblqh::openSrFrontpageLab(Signal* signal)
+{
+ readSinglePage(signal, 0);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_FRONTPAGE;
+ return;
+}//Dblqh::openSrFrontpageLab()
+
+/* -------------------------------------------------------------------------
+ * WE HAVE NOW READ PAGE 0 IN FILE 0. CHECK THE LAST OPEN FILE. ACTUALLY THE
+ * LAST OPEN FILE COULD BE THE NEXT AFTER THAT. CHECK THAT FIRST. WHEN THE
+ * LAST WAS FOUND WE CAN FIND ALL THE NEEDED INFORMATION WHERE TO START AND
+ * STOP READING THE LOG.
+ * -------------------------------------------------------------------------- */
+void Dblqh::readSrFrontpageLab(Signal* signal)
+{
+ Uint32 fileNo = logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO];
+ if (fileNo == 0) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * FILE 0 WAS ALSO LAST FILE SO WE DO NOT NEED TO READ IT AGAIN.
+ * ---------------------------------------------------------------------- */
+ readSrLastFileLab(signal);
+ return;
+ }//if
+ /* ------------------------------------------------------------------------
+ * CLOSE FILE 0 SO THAT WE HAVE CLOSED ALL FILES WHEN STARTING TO READ
+ * THE FRAGMENT LOG. ALSO RELEASE PAGE ZERO.
+ * ------------------------------------------------------------------------ */
+ releaseLogpage(signal);
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR;
+ closeFile(signal, logFilePtr);
+ LogFileRecordPtr locLogFilePtr;
+ findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_LAST_FILE;
+ openFileRw(signal, locLogFilePtr);
+ return;
+}//Dblqh::readSrFrontpageLab()
+
+void Dblqh::openSrLastFileLab(Signal* signal)
+{
+ readSinglePage(signal, 0);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_LAST_FILE;
+ return;
+}//Dblqh::openSrLastFileLab()
+
+void Dblqh::readSrLastFileLab(Signal* signal)
+{
+ logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP];
+ if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ jam();
+ initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO);
+ } else {
+ jam();
+ initGciInLogFileRec(signal, logPartPtr.p->noLogFiles);
+ }//if
+ releaseLogpage(signal);
+ /* ------------------------------------------------------------------------
+ * NOW WE HAVE FOUND THE LAST LOG FILE. WE ALSO NEED TO FIND THE LAST
+ * MBYTE THAT WAS LAST WRITTEN BEFORE THE SYSTEM CRASH.
+ * ------------------------------------------------------------------------ */
+ logPartPtr.p->lastLogfile = logFilePtr.i;
+ readSinglePage(signal, 0);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_LAST_MBYTE;
+ logFilePtr.p->currentMbyte = 0;
+ return;
+}//Dblqh::readSrLastFileLab()
+
+void Dblqh::readSrLastMbyteLab(Signal* signal)
+{
+ if (logPartPtr.p->lastMbyte == ZNIL) {
+ if (logPagePtr.p->logPageWord[ZPOS_LOG_LAP] < logPartPtr.p->logLap) {
+ jam();
+ logPartPtr.p->lastMbyte = logFilePtr.p->currentMbyte - 1;
+ }//if
+ }//if
+ arrGuard(logFilePtr.p->currentMbyte, 16);
+ logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] =
+ logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED];
+ logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] =
+ logPagePtr.p->logPageWord[ZPOS_MAX_GCI_STARTED];
+ logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] =
+ logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF];
+ releaseLogpage(signal);
+ if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) {
+ jam();
+ logFilePtr.p->currentMbyte++;
+ readSinglePage(signal, ZPAGES_IN_MBYTE * logFilePtr.p->currentMbyte);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_LAST_MBYTE;
+ return;
+ } else {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THE LOG WAS IN THE LAST MBYTE WHEN THE CRASH OCCURRED SINCE ALL
+ * LOG LAPS ARE EQUAL TO THE CURRENT LOG LAP.
+ * ---------------------------------------------------------------------- */
+ if (logPartPtr.p->lastMbyte == ZNIL) {
+ jam();
+ logPartPtr.p->lastMbyte = ZNO_MBYTES_IN_FILE - 1;
+ }//if
+ }//if
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR;
+ closeFile(signal, logFilePtr);
+ if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ Uint32 fileNo;
+ if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ jam();
+ fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ } else {
+ jam();
+ fileNo =
+ (logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) -
+ ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ }//if
+ if (fileNo == 0) {
+ jam();
+ /* --------------------------------------------------------------------
+ * AVOID USING FILE 0 AGAIN SINCE THAT IS PROBABLY CLOSING AT THE
+ * MOMENT.
+ * -------------------------------------------------------------------- */
+ fileNo = 1;
+ logPartPtr.p->srRemainingFiles =
+ logPartPtr.p->noLogFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1);
+ } else {
+ jam();
+ logPartPtr.p->srRemainingFiles =
+ logPartPtr.p->noLogFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ }//if
+ LogFileRecordPtr locLogFilePtr;
+ findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_NEXT_FILE;
+ openFileRw(signal, locLogFilePtr);
+ return;
+ }//if
+ /* ------------------------------------------------------------------------
+ * THERE WERE NO NEED TO READ ANY MORE PAGE ZERO IN OTHER FILES.
+ * WE NOW HAVE ALL THE NEEDED INFORMATION ABOUT THE GCI'S THAT WE NEED.
+ * NOW JUST WAIT FOR CLOSE OPERATIONS TO COMPLETE.
+ * ------------------------------------------------------------------------ */
+ return;
+}//Dblqh::readSrLastMbyteLab()
+
+void Dblqh::openSrNextFileLab(Signal* signal)
+{
+ readSinglePage(signal, 0);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_NEXT_FILE;
+ return;
+}//Dblqh::openSrNextFileLab()
+
+void Dblqh::readSrNextFileLab(Signal* signal)
+{
+ if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ jam();
+ initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO);
+ } else {
+ jam();
+ initGciInLogFileRec(signal, logPartPtr.p->srRemainingFiles);
+ }//if
+ releaseLogpage(signal);
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR;
+ closeFile(signal, logFilePtr);
+ if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ Uint32 fileNo;
+ if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ jam();
+ fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ } else {
+ jam();
+ fileNo =
+ (logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) -
+ ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ }//if
+ if (fileNo == 0) {
+ jam();
+ /* --------------------------------------------------------------------
+ * AVOID USING FILE 0 AGAIN SINCE THAT IS PROBABLY CLOSING AT THE MOMENT.
+ * -------------------------------------------------------------------- */
+ fileNo = 1;
+ logPartPtr.p->srRemainingFiles =
+ logPartPtr.p->srRemainingFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1);
+ } else {
+ jam();
+ logPartPtr.p->srRemainingFiles =
+ logPartPtr.p->srRemainingFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ }//if
+ LogFileRecordPtr locLogFilePtr;
+ findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_NEXT_FILE;
+ openFileRw(signal, locLogFilePtr);
+ }//if
+ /* ------------------------------------------------------------------------
+ * THERE WERE NO NEED TO READ ANY MORE PAGE ZERO IN OTHER FILES.
+ * WE NOW HAVE ALL THE NEEDED INFORMATION ABOUT THE GCI'S THAT WE NEED.
+ * NOW JUST WAIT FOR CLOSE OPERATIONS TO COMPLETE.
+ * ------------------------------------------------------------------------ */
+ return;
+}//Dblqh::readSrNextFileLab()
+
+void Dblqh::closingSrLab(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ logPartPtr.i = logFilePtr.p->logPartRec;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logFilePtr.i = logPartPtr.p->firstLogfile;
+ do {
+ jam();
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ if (logFilePtr.p->logFileStatus != LogFileRecord::CLOSED) {
+ jam();
+ /* --------------------------------------------------------------------
+ * EXIT AND WAIT FOR REMAINING LOG FILES TO COMPLETE THEIR WORK.
+ * -------------------------------------------------------------------- */
+ return;
+ }//if
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ } while (logFilePtr.i != logPartPtr.p->firstLogfile);
+ /* ------------------------------------------------------------------------
+ * ALL FILES IN THIS PART HAVE BEEN CLOSED. THIS INDICATES THAT THE FIRST
+ * PHASE OF THE SYSTEM RESTART HAVE BEEN CONCLUDED FOR THIS LOG PART.
+ * CHECK IF ALL OTHER LOG PARTS ARE ALSO COMPLETED.
+ * ------------------------------------------------------------------------ */
+ logPartPtr.p->logPartState = LogPartRecord::SR_FIRST_PHASE_COMPLETED;
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ if (logPartPtr.p->logPartState != LogPartRecord::SR_FIRST_PHASE_COMPLETED) {
+ jam();
+ /* --------------------------------------------------------------------
+ * EXIT AND WAIT FOR THE REST OF THE LOG PARTS TO COMPLETE.
+ * -------------------------------------------------------------------- */
+ return;
+ }//if
+ }//for
+ /* ------------------------------------------------------------------------
+ * THE FIRST PHASE HAVE BEEN COMPLETED.
+ * ------------------------------------------------------------------------ */
+ signal->theData[0] = ZSR_PHASE3_START;
+ signal->theData[1] = ZSR_PHASE1_COMPLETED;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//Dblqh::closingSrLab()
+
+/* ##########################################################################
+ * ####### SYSTEM RESTART PHASE TWO MODULE #######
+ *
+ * THIS MODULE HANDLES THE SYSTEM RESTART WHERE LQH CONTROLS TUP AND ACC TO
+ * ENSURE THAT THEY HAVE KNOWLEDGE OF ALL FRAGMENTS AND HAVE DONE THE NEEDED
+ * READING OF DATA FROM FILE AND EXECUTION OF LOCAL LOGS. THIS PROCESS
+ * EXECUTES CONCURRENTLY WITH PHASE ONE OF THE SYSTEM RESTART. THIS PHASE
+ * FINDS THE INFORMATION ABOUT THE FRAGMENT LOG NEEDED TO EXECUTE THE FRAGMENT
+ * LOG.
+ * WHEN TUP AND ACC HAVE PREPARED ALL FRAGMENTS THEN LQH ORDERS THOSE LQH'S
+ * THAT ARE RESPONSIBLE TO EXECUTE THE FRAGMENT LOGS TO DO SO. IT IS POSSIBLE
+ * THAT ANOTHER NODE EXECUTES THE LOG FOR A FRAGMENT RESIDING AT THIS NODE.
+ * ########################################################################## */
+/* ***************>> */
+/* START_FRAGREQ > */
+/* ***************>> */
+void Dblqh::execSTART_FRAGREQ(Signal* signal)
+{
+ const StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0];
+ jamEntry();
+
+ tabptr.i = startFragReq->tableId;
+ Uint32 fragId = startFragReq->fragId;
+
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ if (!getFragmentrec(signal, fragId)) {
+ startFragRefLab(signal);
+ return;
+ }//if
+ tabptr.p->tableStatus = Tablerec::TABLE_DEFINED;
+
+ initFragrecSr(signal);
+ if (startFragReq->lcpNo == ZNIL) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THERE WAS NO LOCAL CHECKPOINT AVAILABLE FOR THIS FRAGMENT. WE DO
+ * NOT NEED TO READ IN THE LOCAL FRAGMENT. WE HAVE ALREADY ADDED THE
+ * FRAGMENT AS AN EMPTY FRAGMENT AT THIS POINT. THUS WE CAN SIMPLY
+ * EXIT AND THE FRAGMENT WILL PARTICIPATE IN THE EXECUTION OF THE LOG.
+ * PUT FRAGMENT ON LIST OF COMPLETED FRAGMENTS FOR EXECUTION OF LOG.
+ * ---------------------------------------------------------------------- */
+ fragptr.p->nextFrag = cfirstCompletedFragSr;
+ cfirstCompletedFragSr = fragptr.i;
+ return;
+ }//if
+ if (cfirstWaitFragSr == RNIL) {
+ jam();
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE) {
+ jam();
+ initLcpSr(signal, startFragReq->lcpNo,
+ startFragReq->lcpId, tabptr.i,
+ fragId, fragptr.i);
+ signal->theData[0] = lcpPtr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
+ signal->theData[3] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId;
+ sendSignal(fragptr.p->accBlockref, GSN_SR_FRAGIDREQ, signal, 5, JBB);
+ return;
+ }//if
+ }//if
+ fragptr.p->nextFrag = cfirstWaitFragSr;
+ cfirstWaitFragSr = fragptr.i;
+}//Dblqh::execSTART_FRAGREQ()
+
+void Dblqh::startFragRefLab(Signal* signal)
+{
+ const StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0];
+ BlockReference userRef = startFragReq->userRef;
+ Uint32 userPtr = startFragReq->userPtr;
+ signal->theData[0] = userPtr;
+ signal->theData[1] = terrorCode;
+ signal->theData[2] = cownNodeid;
+ sendSignal(userRef, GSN_START_FRAGREF, signal, 3, JBB);
+ return;
+}//Dblqh::startFragRefLab()
+
+/* ***************>> */
+/* SR_FRAGIDCONF > */
+/* ***************>> */
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_PTR:LCP_STATE = SR_WAIT_FRAGID
+ * -------------------------------------------------------------------------- */
+void Dblqh::execSR_FRAGIDCONF(Signal* signal)
+{
+ SrFragidConf * const srFragidConf = (SrFragidConf *)&signal->theData[0];
+ jamEntry();
+
+ lcpPtr.i = srFragidConf->lcpPtr;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_SR_WAIT_FRAGID);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECKING OF TNO_LOCFRAG VALUE. OUT OF BOUND WILL IMPLY THAT AN
+ * INDEX OUT OF RANGE WILL CAUSE A SYSTEM RESTART WHICH IS DESIRED.
+ * ------------------------------------------------------------------------ */
+ lcpPtr.p->lcpAccptr = srFragidConf->accPtr;
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->accFragptr[0] = srFragidConf->fragPtr[0];
+ fragptr.p->accFragptr[1] = srFragidConf->fragPtr[1];
+ Uint32 noLocFrag = srFragidConf->noLocFrag;
+ ndbrequire(noLocFrag == 2);
+ Uint32 fragid[2];
+ Uint32 i;
+ for (i = 0; i < noLocFrag; i++) {
+ fragid[i] = srFragidConf->fragId[i];
+ }//for
+
+ for (i = 0; i < noLocFrag; i++) {
+ jam();
+ Uint32 fragId = fragid[i];
+ /* ----------------------------------------------------------------------
+ * THERE IS NO ERROR CHECKING ON PURPOSE. IT IS POSSIBLE TO CALCULATE HOW
+ * MANY LOCAL LCP RECORDS THERE SHOULD BE. IT SHOULD NEVER HAPPEN THAT
+ * THERE IS NO ONE FREE. IF THERE IS NO ONE IT WILL ALSO BE A POINTER
+ * OUT OF RANGE WHICH IS AN ERROR CODE IN ITSELF. REUSES ERROR
+ * HANDLING IN AXE VM.
+ * ---------------------------------------------------------------------- */
+ seizeLcpLoc(signal);
+ initLcpLocAcc(signal, fragId);
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::SR_ACC_STARTED;
+ signal->theData[0] = lcpPtr.p->lcpAccptr;
+ signal->theData[1] = lcpLocptr.i;
+ signal->theData[2] = lcpLocptr.p->locFragid;
+ signal->theData[3] = lcpPtr.p->currentFragment.lcpFragOrd.lcpId % MAX_LCP_STORED;
+ sendSignal(fragptr.p->accBlockref, GSN_ACC_SRREQ, signal, 4, JBB);
+ seizeLcpLoc(signal);
+ initLcpLocTup(signal, fragId);
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::SR_TUP_STARTED;
+ signal->theData[0] = lcpLocptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ signal->theData[3] = lcpLocptr.p->locFragid;
+ signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUP_SRREQ, signal, 5, JBB);
+ }//for
+ lcpPtr.p->lcpState = LcpRecord::LCP_SR_STARTED;
+ return;
+}//Dblqh::execSR_FRAGIDCONF()
+
+/* ***************> */
+/* SR_FRAGIDREF > */
+/* ***************> */
+void Dblqh::execSR_FRAGIDREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execSR_FRAGIDREF()
+
+/* ************>> */
+/* ACC_SRCONF > */
+/* ************>> */
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = SR_ACC_STARTED
+ * -------------------------------------------------------------------------- */
+void Dblqh::execACC_SRCONF(Signal* signal)
+{
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (lcpLocptr.p->lcpLocstate != LcpLocRecord::SR_ACC_STARTED) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS REFERENCE
+ * WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
+ * ------------------------------------------------------------------------ */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::SR_ACC_COMPLETED;
+ srCompletedLab(signal);
+ return;
+}//Dblqh::execACC_SRCONF()
+
+/* ************> */
+/* ACC_SRREF > */
+/* ************> */
+void Dblqh::execACC_SRREF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execACC_SRREF()
+
+/* ************>> */
+/* TUP_SRCONF > */
+/* ************>> */
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = SR_TUP_STARTED
+ * -------------------------------------------------------------------------- */
+void Dblqh::execTUP_SRCONF(Signal* signal)
+{
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ Uint32 tupFragPtr = signal->theData[1];
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::SR_TUP_STARTED);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS REFERENCE
+ * WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
+ * ------------------------------------------------------------------------ */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::SR_TUP_COMPLETED;
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (lcpLocptr.i == lcpPtr.p->firstLcpLocTup) {
+ jam();
+ fragptr.p->tupFragptr[1] = tupFragPtr;
+ } else {
+ jam();
+ fragptr.p->tupFragptr[0] = tupFragPtr;
+ }//if
+ srCompletedLab(signal);
+ return;
+}//Dblqh::execTUP_SRCONF()
+
+void Dblqh::srCompletedLab(Signal* signal)
+{
+ checkSrCompleted(signal);
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_SR_COMPLETED) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THE SYSTEM RESTART OF THIS FRAGMENT HAS BEEN COMPLETED. IT IS NOW
+ * TIME TO START A SYSTEM RESTART ON THE NEXT FRAGMENT OR CONTINUE
+ * WITH THE NEXT STEP OF THE SYSTEM RESTART. THIS STEP IS TO EXECUTE
+ * THE FRAGMENT LOGS.
+ * ----------------------------------------------------------------------
+ * WE RELEASE THE LOCAL LCP RECORDS.
+ * --------------------------------------------------------------------- */
+ releaseLocalLcps(signal);
+ /* ----------------------------------------------------------------------
+ * PUT FRAGMENT ON LIST OF FRAGMENTS WHICH HAVE BEEN STARTED AS PART OF
+ * THE SYSTEM RESTART. THEY ARE NOW WAITING TO EXECUTE THE FRAGMENT LOG.
+ * --------------------------------------------------------------------- */
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->nextFrag = cfirstCompletedFragSr;
+ cfirstCompletedFragSr = fragptr.i;
+ if (cfirstWaitFragSr != RNIL) {
+ jam();
+ /* --------------------------------------------------------------------
+ * ANOTHER FRAGMENT IS WAITING FOR SYSTEM RESTART. RESTART THIS
+ * FRAGMENT AS WELL.
+ * -------------------------------------------------------------------- */
+ fragptr.i = cfirstWaitFragSr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ cfirstWaitFragSr = fragptr.p->nextFrag;
+ /* --------------------------------------------------------------------
+ * RETRIEVE DATA FROM THE FRAGMENT RECORD.
+ * -------------------------------------------------------------------- */
+ ndbrequire(fragptr.p->srChkpnr < MAX_LCP_STORED);
+ initLcpSr(signal,
+ fragptr.p->srChkpnr,
+ fragptr.p->lcpId[fragptr.p->srChkpnr],
+ fragptr.p->tabRef,
+ fragptr.p->fragId,
+ fragptr.i);
+ signal->theData[0] = lcpPtr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
+ signal->theData[3] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId;
+ sendSignal(fragptr.p->accBlockref, GSN_SR_FRAGIDREQ, signal, 5, JBB);
+ return;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * NO MORE FRAGMENTS ARE WAITING FOR SYSTEM RESTART.
+ * -------------------------------------------------------------------- */
+ lcpPtr.p->lcpState = LcpRecord::LCP_IDLE;
+ if (cstartRecReq == ZTRUE) {
+ jam();
+ /* ----------------------------------------------------------------
+ * WE HAVE ALSO RECEIVED AN INDICATION THAT NO MORE FRAGMENTS
+ * NEEDS RESTART.
+ * NOW IT IS TIME TO START EXECUTING THE UNDO LOG.
+ * ----------------------------------------------------------------
+ * WE ARE NOW IN A POSITION TO ORDER TUP AND ACC TO START
+ * EXECUTING THEIR UNDO LOGS. THIS MUST BE DONE BEFORE THE
+ * FRAGMENT LOGS CAN BE EXECUTED.
+ * ---------------------------------------------------------------- */
+ csrExecUndoLogState = EULS_STARTED;
+ signal->theData[0] = caccBlockref;
+ signal->theData[1] = cownref;
+ sendSignal(caccBlockref, GSN_START_RECREQ, signal, 2, JBB);
+ signal->theData[0] = ctupBlockref;
+ signal->theData[1] = cownref;
+ sendSignal(ctupBlockref, GSN_START_RECREQ, signal, 2, JBB);
+ return;
+ } else {
+ jam();
+ /* ----------------------------------------------------------------
+ * WE HAVE NOT RECEIVED ALL FRAGMENTS YET OR AT LEAST NOT WE
+ * HAVE NOT RECEIVED THE START_RECREQ SIGNAL. EXIT AND WAIT
+ * FOR MORE.
+ * ---------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//if
+ /*---------------*/
+ /* ELSE */
+ /*-------------------------------------------------------------------------
+ * THE SYSTEM RESTART ON THIS FRAGMENT HAS NOT BEEN COMPLETED,
+ * EXIT AND WAIT FOR MORE SIGNALS
+ *-------------------------------------------------------------------------
+ * DO NOTHING, EXIT IS EXECUTED BELOW
+ *------------------------------------------------------------------------- */
+ return;
+}//Dblqh::srCompletedLab()
+
+/* ************> */
+/* TUP_SRREF > */
+/* ************> */
+void Dblqh::execTUP_SRREF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execTUP_SRREF()
+
+/* ***************> */
+/* START_RECREQ > */
+/* ***************> */
+void Dblqh::execSTART_RECREQ(Signal* signal)
+{
+ CRASH_INSERTION(5027);
+
+ jamEntry();
+ StartRecReq * const req = (StartRecReq*)&signal->theData[0];
+ cmasterDihBlockref = req->senderRef;
+
+ crestartOldestGci = req->keepGci;
+ crestartNewestGci = req->lastCompletedGci;
+ cnewestGci = req->newestGci;
+
+ ndbrequire(req->receivingNodeId == cownNodeid);
+
+ cnewestCompletedGci = cnewestGci;
+ cstartRecReq = ZTRUE;
+ for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
+ ptrAss(logPartPtr, logPartRecord);
+ logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci;
+ }//for
+ /* ------------------------------------------------------------------------
+ * WE HAVE TO SET THE OLDEST AND THE NEWEST GLOBAL CHECKPOINT IDENTITY
+ * THAT WILL SURVIVE THIS SYSTEM RESTART. THIS IS NEEDED SO THAT WE CAN
+ * SET THE LOG HEAD AND LOG TAIL PROPERLY BEFORE STARTING THE SYSTEM AGAIN.
+ * WE ALSO NEED TO SET CNEWEST_GCI TO ENSURE THAT LOG RECORDS ARE EXECUTED
+ * WITH A PROPER GCI.
+ *------------------------------------------------------------------------ */
+ if (cstartType == NodeState::ST_NODE_RESTART) {
+ jam();
+ signal->theData[0] = ZSR_PHASE3_START;
+ signal->theData[1] = ZSR_PHASE2_COMPLETED;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }//if
+ if(cstartType == NodeState::ST_INITIAL_NODE_RESTART){
+ jam();
+ StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
+ conf->startingNodeId = getOwnNodeId();
+ sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
+ StartRecConf::SignalLength, JBB);
+ return;
+ }//if
+ if (cfirstWaitFragSr == RNIL) {
+ /* ----------------------------------------------------------------------
+ * THERE ARE NO FRAGMENTS WAITING TO BE RESTARTED.
+ * --------------------------------------------------------------------- */
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE) {
+ jam();
+ /* --------------------------------------------------------------------
+ * THERE ARE NO FRAGMENTS THAT ARE CURRENTLY PERFORMING THEIR
+ * SYSTEM RESTART.
+ * --------------------------------------------------------------------
+ * WE ARE NOW IN A POSITION TO ORDER TUP AND ACC TO START EXECUTING
+ * THEIR UNDO LOGS. THIS MUST BE DONE BEFORE THE FRAGMENT LOGS
+ * CAN BE EXECUTED.
+ * ------------------------------------------------------------------- */
+ csrExecUndoLogState = EULS_STARTED;
+ signal->theData[0] = caccBlockref;
+ signal->theData[1] = cownref;
+ sendSignal(caccBlockref, GSN_START_RECREQ, signal, 2, JBB);
+ signal->theData[0] = ctupBlockref;
+ signal->theData[1] = cownref;
+ sendSignal(ctupBlockref, GSN_START_RECREQ, signal, 2, JBB);
+ }//if
+ }//if
+ /* -----------------------------------------------------------------------
+ * EXIT AND WAIT FOR COMPLETION OF ALL FRAGMENTS.
+ * ----------------------------------------------------------------------- */
+ return;
+}//Dblqh::execSTART_RECREQ()
+
+/* ***************>> */
+/* START_RECCONF > */
+/* ***************>> */
+void Dblqh::execSTART_RECCONF(Signal* signal)
+{
+ jamEntry();
+ BlockReference userRef = signal->theData[0];
+ if (userRef == caccBlockref) {
+ if (csrExecUndoLogState == EULS_STARTED) {
+ jam();
+ csrExecUndoLogState = EULS_ACC_COMPLETED;
+ } else {
+ ndbrequire(csrExecUndoLogState == EULS_TUP_COMPLETED);
+ jam();
+ csrExecUndoLogState = EULS_COMPLETED;
+ /* --------------------------------------------------------------------
+ * START THE FIRST PHASE OF EXECUTION OF THE LOG.
+ * ------------------------------------------------------------------- */
+ startExecSr(signal);
+ }//if
+ } else {
+ ndbrequire(userRef == ctupBlockref);
+ if (csrExecUndoLogState == EULS_STARTED) {
+ jam();
+ csrExecUndoLogState = EULS_TUP_COMPLETED;
+ } else {
+ ndbrequire(csrExecUndoLogState == EULS_ACC_COMPLETED);
+ jam();
+ csrExecUndoLogState = EULS_COMPLETED;
+ /* --------------------------------------------------------------------
+ * START THE FIRST PHASE OF EXECUTION OF THE LOG.
+ * ------------------------------------------------------------------- */
+ startExecSr(signal);
+ }//if
+ }//if
+ return;
+}//Dblqh::execSTART_RECCONF()
+
+/* ***************> */
+/* START_RECREF > */
+/* ***************> */
+void Dblqh::execSTART_RECREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execSTART_RECREF()
+
+/* ***************>> */
+/* START_EXEC_SR > */
+/* ***************>> */
+void Dblqh::execSTART_EXEC_SR(Signal* signal)
+{
+ FragrecordPtr prevFragptr;
+ jamEntry();
+ fragptr.i = signal->theData[0];
+ prevFragptr.i = signal->theData[1];
+ if (fragptr.i == RNIL) {
+ jam();
+ ndbrequire(cnoOfNodes < MAX_NDB_NODES);
+ /* ----------------------------------------------------------------------
+ * NO MORE FRAGMENTS TO START EXECUTING THE LOG ON.
+ * SEND EXEC_SRREQ TO ALL LQH TO INDICATE THAT THIS NODE WILL
+ * NOT REQUEST ANY MORE FRAGMENTS TO EXECUTE THE FRAGMENT LOG ON.
+ * ----------------------------------------------------------------------
+ * WE NEED TO SEND THOSE SIGNALS EVEN IF WE HAVE NOT REQUESTED
+ * ANY FRAGMENTS PARTICIPATE IN THIS PHASE.
+ * --------------------------------------------------------------------- */
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ if (cnodeStatus[i] == ZNODE_UP) {
+ jam();
+ ndbrequire(cnodeData[i] < MAX_NDB_NODES);
+ BlockReference ref = calcLqhBlockRef(cnodeData[i]);
+ signal->theData[0] = cownNodeid;
+ sendSignal(ref, GSN_EXEC_SRREQ, signal, 1, JBB);
+ }//if
+ }//for
+ } else {
+ jam();
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (fragptr.p->srNoLognodes > csrPhasesCompleted) {
+ jam();
+ Uint32 index = csrPhasesCompleted;
+ arrGuard(index, 4);
+ BlockReference ref = calcLqhBlockRef(fragptr.p->srLqhLognode[index]);
+ fragptr.p->srStatus = Fragrecord::SS_STARTED;
+ /* --------------------------------------------------------------------
+ * SINCE WE CAN HAVE SEVERAL LQH NODES PER FRAGMENT WE CALCULATE
+ * THE LQH POINTER IN SUCH A WAY THAT WE CAN DEDUCE WHICH OF THE
+ * LQH NODES THAT HAS RESPONDED WHEN EXEC_FRAGCONF IS RECEIVED.
+ * ------------------------------------------------------------------- */
+ ExecFragReq * const execFragReq = (ExecFragReq *)&signal->theData[0];
+ execFragReq->userPtr = fragptr.i;
+ execFragReq->userRef = cownref;
+ execFragReq->tableId = fragptr.p->tabRef;
+ execFragReq->fragId = fragptr.p->fragId;
+ execFragReq->startGci = fragptr.p->srStartGci[index];
+ execFragReq->lastGci = fragptr.p->srLastGci[index];
+ sendSignal(ref, GSN_EXEC_FRAGREQ, signal, ExecFragReq::SignalLength, JBB);
+ prevFragptr.i = fragptr.i;
+ fragptr.i = fragptr.p->nextFrag;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * THIS FRAGMENT IS NOW FINISHED WITH THE SYSTEM RESTART. IT DOES
+ * NOT NEED TO PARTICIPATE IN ANY MORE PHASES. REMOVE IT FROM THE
+ * LIST OF COMPLETED FRAGMENTS TO EXECUTE THE LOG ON.
+ * ALSO SEND START_FRAGCONF TO DIH AND SET THE STATE TO ACTIVE ON THE
+ * FRAGMENT.
+ * ------------------------------------------------------------------- */
+ Uint32 next = fragptr.p->nextFrag;
+ if (prevFragptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(prevFragptr, cfragrecFileSize, fragrecord);
+ prevFragptr.p->nextFrag = next;
+ } else {
+ jam();
+ cfirstCompletedFragSr = next;
+ }//if
+
+ /**
+ * Put fragment on list which has completed REDO log
+ */
+ fragptr.p->nextFrag = c_redo_log_complete_frags;
+ c_redo_log_complete_frags = fragptr.i;
+
+ fragptr.p->fragStatus = Fragrecord::FSACTIVE;
+ fragptr.p->logFlag = Fragrecord::STATE_TRUE;
+ signal->theData[0] = fragptr.p->srUserptr;
+ signal->theData[1] = cownNodeid;
+ sendSignal(fragptr.p->srBlockref, GSN_START_FRAGCONF, signal, 2, JBB);
+ /* --------------------------------------------------------------------
+ * WE HAVE TO ENSURE THAT THIS FRAGMENT IS NOT PUT BACK ON THE LIST BY
+ * MISTAKE. WE DO THIS BY ALSO REMOVING IT AS PREVIOUS IN START_EXEC_SR
+ * THIS IS PERFORMED BY KEEPING PREV_FRAGPTR AS PREV_FRAGPTR BUT MOVING
+ * FRAGPTR TO THE NEXT FRAGMENT IN THE LIST.
+ * ------------------------------------------------------------------- */
+ fragptr.i = next;
+ }//if
+ signal->theData[0] = fragptr.i;
+ signal->theData[1] = prevFragptr.i;
+ sendSignal(cownref, GSN_START_EXEC_SR, signal, 2, JBB);
+ }//if
+ return;
+}//Dblqh::execSTART_EXEC_SR()
+
+/* ***************> */
+/* EXEC_FRAGREQ > */
+/* ***************> */
+/* --------------------------------------------------------------------------
+ * THIS SIGNAL IS USED TO REQUEST THAT A FRAGMENT PARTICIPATES IN EXECUTING
+ * THE LOG IN THIS NODE.
+ * ------------------------------------------------------------------------- */
+void Dblqh::execEXEC_FRAGREQ(Signal* signal)
+{
+ ExecFragReq * const execFragReq = (ExecFragReq *)&signal->theData[0];
+ jamEntry();
+ tabptr.i = execFragReq->tableId;
+ Uint32 fragId = execFragReq->fragId;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ if (!getFragmentrec(signal, fragId)) {
+ jam();
+ if (!insertFragrec(signal, fragId)) {
+ jam();
+ sendExecFragRefLab(signal);
+ return;
+ }//if
+ initFragrec(signal, tabptr.i, fragId, ZLOG_NODE);
+ fragptr.p->execSrStatus = Fragrecord::ACTIVE_REMOVE_AFTER;
+ } else {
+ jam();
+ if (fragptr.p->execSrStatus == Fragrecord::ACTIVE_REMOVE_AFTER) {
+ jam();
+ fragptr.p->execSrStatus = Fragrecord::ACTIVE_REMOVE_AFTER;
+ } else {
+ jam();
+ }//if
+ }//if
+ ndbrequire(fragptr.p->execSrNoReplicas < 4);
+ fragptr.p->execSrBlockref[fragptr.p->execSrNoReplicas] = execFragReq->userRef;
+ fragptr.p->execSrUserptr[fragptr.p->execSrNoReplicas] = execFragReq->userPtr;
+ fragptr.p->execSrStartGci[fragptr.p->execSrNoReplicas] = execFragReq->startGci;
+ fragptr.p->execSrLastGci[fragptr.p->execSrNoReplicas] = execFragReq->lastGci;
+ fragptr.p->execSrStatus = Fragrecord::ACTIVE;
+ fragptr.p->execSrNoReplicas++;
+ cnoFragmentsExecSr++;
+ return;
+}//Dblqh::execEXEC_FRAGREQ()
+
+void Dblqh::sendExecFragRefLab(Signal* signal)
+{
+ ExecFragReq * const execFragReq = (ExecFragReq *)&signal->theData[0];
+ BlockReference retRef = execFragReq->userRef;
+ Uint32 retPtr = execFragReq->userPtr;
+
+ signal->theData[0] = retPtr;
+ signal->theData[1] = terrorCode;
+ sendSignal(retRef, GSN_EXEC_FRAGREF, signal, 2, JBB);
+ return;
+}//Dblqh::sendExecFragRefLab()
+
+/* ***************>> */
+/* EXEC_FRAGCONF > */
+/* ***************>> */
+void Dblqh::execEXEC_FRAGCONF(Signal* signal)
+{
+ jamEntry();
+ fragptr.i = signal->theData[0];
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->srStatus = Fragrecord::SS_COMPLETED;
+ return;
+}//Dblqh::execEXEC_FRAGCONF()
+
+/* ***************> */
+/* EXEC_FRAGREF > */
+/* ***************> */
+void Dblqh::execEXEC_FRAGREF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execEXEC_FRAGREF()
+
+/* *************** */
+/* EXEC_SRCONF > */
+/* *************** */
+void Dblqh::execEXEC_SRCONF(Signal* signal)
+{
+ jamEntry();
+ Uint32 nodeId = signal->theData[0];
+ arrGuard(nodeId, MAX_NDB_NODES);
+ cnodeExecSrState[nodeId] = ZEXEC_SR_COMPLETED;
+ ndbrequire(cnoOfNodes < MAX_NDB_NODES);
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ if (cnodeStatus[i] == ZNODE_UP) {
+ jam();
+ nodeId = cnodeData[i];
+ arrGuard(nodeId, MAX_NDB_NODES);
+ if (cnodeExecSrState[nodeId] != ZEXEC_SR_COMPLETED) {
+ jam();
+ /* ------------------------------------------------------------------
+ * ALL NODES HAVE NOT REPORTED COMPLETION OF EXECUTING FRAGMENT
+ * LOGS YET.
+ * ----------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//for
+ /* ------------------------------------------------------------------------
+ * CLEAR NODE SYSTEM RESTART EXECUTION STATE TO PREPARE FOR NEXT PHASE OF
+ * LOG EXECUTION.
+ * ----------------------------------------------------------------------- */
+ for (nodeId = 0; nodeId < MAX_NDB_NODES; nodeId++) {
+ cnodeExecSrState[nodeId] = ZSTART_SR;
+ }//for
+ /* ------------------------------------------------------------------------
+ * NOW CHECK IF ALL FRAGMENTS IN THIS PHASE HAVE COMPLETED. IF SO START THE
+ * NEXT PHASE.
+ * ----------------------------------------------------------------------- */
+ fragptr.i = cfirstCompletedFragSr;
+ if (fragptr.i == RNIL) {
+ jam();
+ execSrCompletedLab(signal);
+ return;
+ }//if
+ do {
+ jam();
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ ndbrequire(fragptr.p->srStatus == Fragrecord::SS_COMPLETED);
+ fragptr.i = fragptr.p->nextFrag;
+ } while (fragptr.i != RNIL);
+ execSrCompletedLab(signal);
+ return;
+}//Dblqh::execEXEC_SRCONF()
+
+void Dblqh::execSrCompletedLab(Signal* signal)
+{
+ csrPhasesCompleted++;
+ /* ------------------------------------------------------------------------
+ * ALL FRAGMENTS WERE COMPLETED. THIS PHASE IS COMPLETED. IT IS NOW TIME TO
+ * START THE NEXT PHASE.
+ * ----------------------------------------------------------------------- */
+ if (csrPhasesCompleted >= 4) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THIS WAS THE LAST PHASE. WE HAVE NOW COMPLETED THE EXECUTION THE
+ * FRAGMENT LOGS IN ALL NODES. BEFORE WE SEND START_RECCONF TO THE
+ * MASTER DIH TO INDICATE A COMPLETED SYSTEM RESTART IT IS NECESSARY
+ * TO FIND THE HEAD AND THE TAIL OF THE LOG WHEN NEW OPERATIONS START
+ * TO COME AGAIN.
+ *
+ * THE FIRST STEP IS TO FIND THE HEAD AND TAIL MBYTE OF EACH LOG PART.
+ * TO DO THIS WE REUSE THE CONTINUEB SIGNAL SR_LOG_LIMITS. THEN WE
+ * HAVE TO FIND THE ACTUAL PAGE NUMBER AND PAGE INDEX WHERE TO
+ * CONTINUE WRITING THE LOG AFTER THE SYSTEM RESTART.
+ * --------------------------------------------------------------------- */
+ for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ logPartPtr.p->logPartState = LogPartRecord::SR_FOURTH_PHASE_STARTED;
+ logPartPtr.p->logLastGci = crestartNewestGci;
+ logPartPtr.p->logStartGci = crestartOldestGci;
+ logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_STOP;
+ if (logPartPtr.p->headFileNo == ZNIL) {
+ jam();
+ /* -----------------------------------------------------------------
+ * IF WE HAVEN'T FOUND ANY HEAD OF THE LOG THEN WE ARE IN SERIOUS
+ * PROBLEM. THIS SHOULD NOT OCCUR. IF IT OCCURS ANYWAY THEN WE
+ * HAVE TO FIND A CURE FOR THIS PROBLEM.
+ * ----------------------------------------------------------------- */
+ systemErrorLab(signal);
+ return;
+ }//if
+ signal->theData[0] = ZSR_LOG_LIMITS;
+ signal->theData[1] = logPartPtr.i;
+ signal->theData[2] = logPartPtr.p->lastLogfile;
+ signal->theData[3] = logPartPtr.p->lastMbyte;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+ }//for
+ return;
+ } else {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THERE ARE YET MORE PHASES TO RESTART.
+ * WE MUST INITIALISE DATA FOR NEXT PHASE AND SEND START SIGNAL.
+ * --------------------------------------------------------------------- */
+ startExecSr(signal);
+ }//if
+ return;
+}//Dblqh::execSrCompletedLab()
+
+/* ************>> */
+/* EXEC_SRREQ > */
+/* ************>> */
+void Dblqh::execEXEC_SRREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 nodeId = signal->theData[0];
+ ndbrequire(nodeId < MAX_NDB_NODES);
+ cnodeSrState[nodeId] = ZEXEC_SR_COMPLETED;
+ ndbrequire(cnoOfNodes < MAX_NDB_NODES);
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ if (cnodeStatus[i] == ZNODE_UP) {
+ jam();
+ nodeId = cnodeData[i];
+ if (cnodeSrState[nodeId] != ZEXEC_SR_COMPLETED) {
+ jam();
+ /* ------------------------------------------------------------------
+ * ALL NODES HAVE NOT REPORTED COMPLETION OF SENDING EXEC_FRAGREQ YET.
+ * ----------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//for
+ /* ------------------------------------------------------------------------
+ * CLEAR NODE SYSTEM RESTART STATE TO PREPARE FOR NEXT PHASE OF LOG
+ * EXECUTION
+ * ----------------------------------------------------------------------- */
+ for (nodeId = 0; nodeId < MAX_NDB_NODES; nodeId++) {
+ cnodeSrState[nodeId] = ZSTART_SR;
+ }//for
+ if (csrPhasesCompleted != 0) {
+ /* ----------------------------------------------------------------------
+ * THE FIRST PHASE MUST ALWAYS EXECUTE THE LOG.
+ * --------------------------------------------------------------------- */
+ if (cnoFragmentsExecSr == 0) {
+ jam();
+ /* --------------------------------------------------------------------
+ * THERE WERE NO FRAGMENTS THAT NEEDED TO EXECUTE THE LOG IN THIS PHASE.
+ * ------------------------------------------------------------------- */
+ srPhase3Comp(signal);
+ return;
+ }//if
+ }//if
+ /* ------------------------------------------------------------------------
+ * NOW ALL NODES HAVE SENT ALL EXEC_FRAGREQ. NOW WE CAN START EXECUTING THE
+ * LOG FROM THE MINIMUM GCI NEEDED UNTIL THE MAXIMUM GCI NEEDED.
+ *
+ * WE MUST FIRST CHECK IF THE FIRST PHASE OF THE SYSTEM RESTART HAS BEEN
+ * COMPLETED. THIS HANDLING IS PERFORMED IN THE FILE SYSTEM MODULE
+ * ----------------------------------------------------------------------- */
+ signal->theData[0] = ZSR_PHASE3_START;
+ signal->theData[1] = ZSR_PHASE2_COMPLETED;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//Dblqh::execEXEC_SRREQ()
+
+/* ######################################################################### */
+/* SYSTEM RESTART PHASE THREE MODULE */
+/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
+/* */
+/* THIS MODULE IS CONCERNED WITH EXECUTING THE FRAGMENT LOG. IT DOES ALSO */
+/* CONTAIN SIGNAL RECEPTIONS LQHKEYCONF AND LQHKEYREF SINCE LQHKEYREQ IS USED*/
+/* TO EXECUTE THE LOG RECORDS. */
+/* */
+/* BEFORE IT STARTS IT HAS BEEN DECIDED WHERE TO START AND WHERE TO STOP */
+/* READING THE FRAGMENT LOG BY USING THE INFORMATION ABOUT GCI DISCOVERED IN */
+/* PHASE ONE OF THE SYSTEM RESTART. */
+/* ######################################################################### */
+/*---------------------------------------------------------------------------*/
+/* PHASE THREE OF THE SYSTEM RESTART CAN NOW START. ONE OF THE PHASES HAVE */
+/* COMPLETED. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::srPhase3Start(Signal* signal)
+{
+ UintR tsrPhaseStarted;
+
+ jamEntry();
+ tsrPhaseStarted = signal->theData[0];
+ if (csrPhaseStarted == ZSR_NO_PHASE_STARTED) {
+ jam();
+ csrPhaseStarted = tsrPhaseStarted;
+ if (cstartType == NodeState::ST_NODE_RESTART) {
+ ndbrequire(cinitialStartOngoing == ZTRUE);
+ cinitialStartOngoing = ZFALSE;
+ checkStartCompletedLab(signal);
+ }//if
+ return;
+ }//if
+ ndbrequire(csrPhaseStarted != tsrPhaseStarted);
+ ndbrequire(csrPhaseStarted != ZSR_BOTH_PHASES_STARTED);
+
+ csrPhaseStarted = ZSR_BOTH_PHASES_STARTED;
+ for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ logPartPtr.p->logPartState = LogPartRecord::SR_THIRD_PHASE_STARTED;
+ logPartPtr.p->logStartGci = (UintR)-1;
+ if (csrPhasesCompleted == 0) {
+ jam();
+ /* --------------------------------------------------------------------
+ * THE FIRST PHASE WE MUST ENSURE THAT IT REACHES THE END OF THE LOG.
+ * ------------------------------------------------------------------- */
+ logPartPtr.p->logLastGci = crestartNewestGci;
+ } else {
+ jam();
+ logPartPtr.p->logLastGci = 2;
+ }//if
+ }//for
+ if (cstartType == NodeState::ST_NODE_RESTART) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * FOR A NODE RESTART WE HAVE NO FRAGMENTS DEFINED YET.
+ * THUS WE CAN SKIP THAT PART
+ * --------------------------------------------------------------------- */
+ signal->theData[0] = ZSR_GCI_LIMITS;
+ signal->theData[1] = RNIL;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ } else {
+ jam();
+ signal->theData[0] = ZSR_GCI_LIMITS;
+ signal->theData[1] = 0;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+ return;
+}//Dblqh::srPhase3Start()
+
+/* --------------------------------------------------------------------------
+ * WE NOW WE NEED TO FIND THE LIMITS WITHIN WHICH TO EXECUTE
+ * THE FRAGMENT LOG
+ * ------------------------------------------------------------------------- */
+void Dblqh::srGciLimits(Signal* signal)
+{
+ LogPartRecordPtr tmpLogPartPtr;
+
+ jamEntry();
+ fragptr.i = signal->theData[0];
+ Uint32 loopCount = 0;
+ logPartPtr.i = 0;
+ ptrAss(logPartPtr, logPartRecord);
+ while (fragptr.i < cfragrecFileSize) {
+ jam();
+ ptrAss(fragptr, fragrecord);
+ if (fragptr.p->execSrStatus != Fragrecord::IDLE) {
+ jam();
+ ndbrequire(fragptr.p->execSrNoReplicas - 1 < 4);
+ for (Uint32 i = 0; i < fragptr.p->execSrNoReplicas; i++) {
+ jam();
+ if (fragptr.p->execSrStartGci[i] < logPartPtr.p->logStartGci) {
+ jam();
+ logPartPtr.p->logStartGci = fragptr.p->execSrStartGci[i];
+ }//if
+ if (fragptr.p->execSrLastGci[i] > logPartPtr.p->logLastGci) {
+ jam();
+ logPartPtr.p->logLastGci = fragptr.p->execSrLastGci[i];
+ }//if
+ }//for
+ }//if
+ loopCount++;
+ if (loopCount > 20) {
+ jam();
+ signal->theData[0] = ZSR_GCI_LIMITS;
+ signal->theData[1] = fragptr.i + 1;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ } else {
+ jam();
+ fragptr.i++;
+ }//if
+ }//while
+ if (logPartPtr.p->logStartGci == (UintR)-1) {
+ jam();
+ /* --------------------------------------------------------------------
+ * THERE WERE NO FRAGMENTS TO INSTALL WE WILL EXECUTE THE LOG AS
+ * SHORT AS POSSIBLE TO REACH THE END OF THE LOG. THIS WE DO BY
+ * STARTING AT THE STOP GCI.
+ * ------------------------------------------------------------------- */
+ logPartPtr.p->logStartGci = logPartPtr.p->logLastGci;
+ }//if
+ for (tmpLogPartPtr.i = 1; tmpLogPartPtr.i < 4; tmpLogPartPtr.i++) {
+ ptrAss(tmpLogPartPtr, logPartRecord);
+ tmpLogPartPtr.p->logStartGci = logPartPtr.p->logStartGci;
+ tmpLogPartPtr.p->logLastGci = logPartPtr.p->logLastGci;
+ }//for
+ for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_STOP;
+ signal->theData[0] = ZSR_LOG_LIMITS;
+ signal->theData[1] = logPartPtr.i;
+ signal->theData[2] = logPartPtr.p->lastLogfile;
+ signal->theData[3] = logPartPtr.p->lastMbyte;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+ }//for
+}//Dblqh::srGciLimits()
+
+/* --------------------------------------------------------------------------
+ * IT IS NOW TIME TO FIND WHERE TO START EXECUTING THE LOG.
+ * THIS SIGNAL IS SENT FOR EACH LOG PART AND STARTS THE EXECUTION
+ * OF THE LOG FOR THIS PART.
+ *-------------------------------------------------------------------------- */
+void Dblqh::srLogLimits(Signal* signal)
+{
+ Uint32 tlastPrepRef;
+ Uint32 tmbyte;
+
+ jamEntry();
+ logPartPtr.i = signal->theData[0];
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logFilePtr.i = signal->theData[1];
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ tmbyte = signal->theData[2];
+ Uint32 loopCount = 0;
+ /* ------------------------------------------------------------------------
+ * WE ARE SEARCHING FOR THE START AND STOP MBYTE OF THE LOG THAT IS TO BE
+ * EXECUTED.
+ * ----------------------------------------------------------------------- */
+ while(true) {
+ ndbrequire(tmbyte < 16);
+ if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_STOP) {
+ if (logFilePtr.p->logMaxGciCompleted[tmbyte] < logPartPtr.p->logLastGci) {
+ jam();
+ /* --------------------------------------------------------------------
+ * WE ARE STEPPING BACKWARDS FROM MBYTE TO MBYTE. THIS IS THE FIRST
+ * MBYTE WHICH IS TO BE INCLUDED IN THE LOG EXECUTION. THE STOP GCI
+ * HAS NOT BEEN COMPLETED BEFORE THIS MBYTE. THUS THIS MBYTE HAVE
+ * TO BE EXECUTED.
+ * ------------------------------------------------------------------- */
+ logPartPtr.p->stopLogfile = logFilePtr.i;
+ logPartPtr.p->stopMbyte = tmbyte;
+ logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_START;
+ }//if
+ }//if
+ /* ------------------------------------------------------------------------
+ * WHEN WE HAVEN'T FOUND THE STOP MBYTE IT IS NOT NECESSARY TO LOOK FOR THE
+ * START MBYTE. THE REASON IS THE FOLLOWING LOGIC CHAIN:
+ * MAX_GCI_STARTED >= MAX_GCI_COMPLETED >= LAST_GCI >= START_GCI
+ * THUS MAX_GCI_STARTED >= START_GCI. THUS MAX_GCI_STARTED < START_GCI CAN
+ * NOT BE TRUE AS WE WILL CHECK OTHERWISE.
+ * ----------------------------------------------------------------------- */
+ if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_START) {
+ if (logFilePtr.p->logMaxGciStarted[tmbyte] < logPartPtr.p->logStartGci) {
+ jam();
+ /* --------------------------------------------------------------------
+ * WE HAVE NOW FOUND THE START OF THE EXECUTION OF THE LOG.
+ * WE STILL HAVE TO MOVE IT BACKWARDS TO ALSO INCLUDE THE
+ * PREPARE RECORDS WHICH WERE STARTED IN A PREVIOUS MBYTE.
+ * ------------------------------------------------------------------- */
+ tlastPrepRef = logFilePtr.p->logLastPrepRef[tmbyte];
+ logPartPtr.p->startMbyte = tlastPrepRef & 65535;
+ LogFileRecordPtr locLogFilePtr;
+ findLogfile(signal, tlastPrepRef >> 16, logPartPtr, &locLogFilePtr);
+ logPartPtr.p->startLogfile = locLogFilePtr.i;
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG;
+ }//if
+ }//if
+ if (logPartPtr.p->logExecState != LogPartRecord::LES_EXEC_LOG) {
+ if (tmbyte == 0) {
+ jam();
+ tmbyte = ZNO_MBYTES_IN_FILE - 1;
+ logFilePtr.i = logFilePtr.p->prevLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ } else {
+ jam();
+ tmbyte--;
+ }//if
+ if (logPartPtr.p->lastLogfile == logFilePtr.i) {
+ ndbrequire(logPartPtr.p->lastMbyte != tmbyte);
+ }//if
+ if (loopCount > 20) {
+ jam();
+ signal->theData[0] = ZSR_LOG_LIMITS;
+ signal->theData[1] = logPartPtr.i;
+ signal->theData[2] = logFilePtr.i;
+ signal->theData[3] = tmbyte;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+ return;
+ }//if
+ loopCount++;
+ } else {
+ jam();
+ break;
+ }//if
+ }//while
+ /* ------------------------------------------------------------------------
+ * WE HAVE NOW FOUND BOTH THE START AND THE STOP OF THE LOG. NOW START
+ * EXECUTING THE LOG. THE FIRST ACTION IS TO OPEN THE LOG FILE WHERE TO
+ * START EXECUTING THE LOG.
+ * ----------------------------------------------------------------------- */
+ if (logPartPtr.p->logPartState == LogPartRecord::SR_THIRD_PHASE_STARTED) {
+ jam();
+ logFilePtr.i = logPartPtr.p->startLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN_EXEC_SR_START;
+ openFileRw(signal, logFilePtr);
+ } else {
+ jam();
+ ndbrequire(logPartPtr.p->logPartState == LogPartRecord::SR_FOURTH_PHASE_STARTED);
+ /* --------------------------------------------------------------------
+ * WE HAVE NOW FOUND THE TAIL MBYTE IN THE TAIL FILE.
+ * SET THOSE PARAMETERS IN THE LOG PART.
+ * WE HAVE ALSO FOUND THE HEAD MBYTE. WE STILL HAVE TO SEARCH
+ * FOR THE PAGE NUMBER AND PAGE INDEX WHERE TO SET THE HEAD.
+ * ------------------------------------------------------------------- */
+ logFilePtr.i = logPartPtr.p->startLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPartPtr.p->logTailFileNo = logFilePtr.p->fileNo;
+ logPartPtr.p->logTailMbyte = logPartPtr.p->startMbyte;
+ /* --------------------------------------------------------------------
+ * THE HEAD WE ACTUALLY FOUND DURING EXECUTION OF LOG SO WE USE
+ * THIS INFO HERE RATHER THAN THE MBYTE WE FOUND TO BE THE HEADER.
+ * ------------------------------------------------------------------- */
+ LogFileRecordPtr locLogFilePtr;
+ findLogfile(signal, logPartPtr.p->headFileNo, logPartPtr, &locLogFilePtr);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_PHASE;
+ openFileRw(signal, locLogFilePtr);
+ }//if
+ return;
+}//Dblqh::srLogLimits()
+
+void Dblqh::openExecSrStartLab(Signal* signal)
+{
+ logPartPtr.p->currentLogfile = logFilePtr.i;
+ logFilePtr.p->currentMbyte = logPartPtr.p->startMbyte;
+ /* ------------------------------------------------------------------------
+ * WE NEED A TC CONNECT RECORD TO HANDLE EXECUTION OF LOG RECORDS.
+ * ------------------------------------------------------------------------ */
+ seizeTcrec();
+ logPartPtr.p->logTcConrec = tcConnectptr.i;
+ /* ------------------------------------------------------------------------
+ * THE FIRST LOG RECORD TO EXECUTE IS ALWAYS AT A NEW MBYTE.
+ * SET THE NUMBER OF PAGES IN THE MAIN MEMORY BUFFER TO ZERO AS AN INITIAL
+ * VALUE. THIS VALUE WILL BE UPDATED AND ENSURED THAT IT RELEASES PAGES IN
+ * THE SUBROUTINE READ_EXEC_SR.
+ * ----------------------------------------------------------------------- */
+ logPartPtr.p->mmBufferSize = 0;
+ readExecSrNewMbyte(signal);
+ return;
+}//Dblqh::openExecSrStartLab()
+
+/* ---------------------------------------------------------------------------
+ * WE WILL ALWAYS ENSURE THAT WE HAVE AT LEAST 16 KBYTE OF LOG PAGES WHEN WE
+ * START READING A LOG RECORD. THE ONLY EXCEPTION IS WHEN WE COME CLOSE TO A
+ * MBYTE BOUNDARY. SINCE WE KNOW THAT LOG RECORDS ARE NEVER WRITTEN ACROSS A
+ * MBYTE BOUNDARY THIS IS NOT A PROBLEM.
+ *
+ * WE START BY READING 64 KBYTE BEFORE STARTING TO EXECUTE THE LOG RECORDS.
+ * WHEN WE COME BELOW 64 KBYTE WE READ ANOTHER SET OF LOG PAGES. WHEN WE
+ * GO BELOW 16 KBYTE WE WAIT UNTIL THE READ PAGES HAVE ENTERED THE BLOCK.
+ * ------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------
+ * NEW PAGES FROM LOG FILE DURING EXECUTION OF LOG HAS ARRIVED.
+ * ------------------------------------------------------------------------- */
+void Dblqh::readExecSrLab(Signal* signal)
+{
+ buildLinkedLogPageList(signal);
+ /* ------------------------------------------------------------------------
+ * WE NEED TO SET THE CURRENT PAGE INDEX OF THE FIRST PAGE SINCE IT CAN BE
+ * USED IMMEDIATELY WITHOUT ANY OTHER INITIALISATION. THE REST OF THE PAGES
+ * WILL BE INITIALISED BY READ_LOGWORD.
+ * ----------------------------------------------------------------------- */
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
+ if (logPartPtr.p->logExecState ==
+ LogPartRecord::LES_WAIT_READ_EXEC_SR_NEW_MBYTE) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THIS IS THE FIRST READ DURING THE EXECUTION OF THIS MBYTE. SET THE
+ * NEW CURRENT LOG PAGE TO THE FIRST OF THESE PAGES. CHANGE
+ * LOG_EXEC_STATE TO ENSURE THAT WE START EXECUTION OF THE LOG.
+ * --------------------------------------------------------------------- */
+ logFilePtr.p->currentFilepage = logFilePtr.p->currentMbyte *
+ ZPAGES_IN_MBYTE;
+ logPartPtr.p->prevFilepage = logFilePtr.p->currentFilepage;
+ logFilePtr.p->currentLogpage = lfoPtr.p->firstLfoPage;
+ logPartPtr.p->prevLogpage = logFilePtr.p->currentLogpage;
+ }//if
+ moveToPageRef(signal);
+ releaseLfo(signal);
+ /* ------------------------------------------------------------------------
+ * NOW WE HAVE COMPLETED THE RECEPTION OF THESE PAGES.
+ * NOW CHECK IF WE NEED TO READ MORE PAGES.
+ * ----------------------------------------------------------------------- */
+ checkReadExecSr(signal);
+ if (logPartPtr.p->logExecState == LogPartRecord::LES_EXEC_LOG) {
+ jam();
+ signal->theData[0] = ZEXEC_SR;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }//if
+ return;
+}//Dblqh::readExecSrLab()
+
+void Dblqh::openExecSrNewMbyteLab(Signal* signal)
+{
+ readExecSrNewMbyte(signal);
+ return;
+}//Dblqh::openExecSrNewMbyteLab()
+
+void Dblqh::closeExecSrLab(Signal* signal)
+{
+ LogFileRecordPtr locLogFilePtr;
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ logPartPtr.i = logFilePtr.p->logPartRec;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ locLogFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_EXEC_SR_NEW_MBYTE;
+ openFileRw(signal, locLogFilePtr);
+ return;
+}//Dblqh::closeExecSrLab()
+
+void Dblqh::writeDirtyLab(Signal* signal)
+{
+ releaseLfo(signal);
+ signal->theData[0] = logPartPtr.i;
+ execSr(signal);
+ return;
+}//Dblqh::writeDirtyLab()
+
+/* --------------------------------------------------------------------------
+ * EXECUTE A LOG RECORD WITHIN THE CURRENT MBYTE.
+ * ------------------------------------------------------------------------- */
+void Dblqh::execSr(Signal* signal)
+{
+ LogFileRecordPtr nextLogFilePtr;
+ LogPageRecordPtr tmpLogPagePtr;
+ Uint32 logWord;
+
+ jamEntry();
+ logPartPtr.i = signal->theData[0];
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+
+ do {
+ jam();
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logPartPtr.p->prevLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ if (logPagePtr.p->logPageWord[ZPOS_DIRTY] == ZDIRTY) {
+ jam();
+ switch (logPartPtr.p->logExecState) {
+ case LogPartRecord::LES_EXEC_LOG_COMPLETED:
+ case LogPartRecord::LES_EXEC_LOG_NEW_FILE:
+ case LogPartRecord::LES_EXEC_LOG_NEW_MBYTE:
+ jam();
+ /* ------------------------------------------------------------------
+ * IN THIS WE HAVE COMPLETED EXECUTION OF THE CURRENT LOG PAGE
+ * AND CAN WRITE IT TO DISK SINCE IT IS DIRTY.
+ * ----------------------------------------------------------------- */
+ writeDirty(signal);
+ return;
+ break;
+ case LogPartRecord::LES_EXEC_LOG:
+ jam();
+ /* --------------------------------------------------------------------
+ * IN THIS CASE WE ONLY WRITE THE PAGE TO DISK IF WE HAVE COMPLETED
+ * EXECUTION OF LOG RECORDS BELONGING TO THIS LOG PAGE.
+ * ------------------------------------------------------------------- */
+ if (logFilePtr.p->currentLogpage != logPartPtr.p->prevLogpage) {
+ jam();
+ writeDirty(signal);
+ return;
+ }//if
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ }//if
+ if (logFilePtr.p->currentLogpage != logPartPtr.p->prevLogpage) {
+ jam();
+ logPartPtr.p->prevLogpage = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ logPartPtr.p->prevFilepage++;
+ continue;
+ }//if
+ switch (logPartPtr.p->logExecState) {
+ case LogPartRecord::LES_EXEC_LOG_COMPLETED:
+ jam();
+ releaseMmPages(signal);
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_EXEC_SR_COMPLETED;
+ closeFile(signal, logFilePtr);
+ return;
+ break;
+ case LogPartRecord::LES_EXEC_LOG_NEW_MBYTE:
+ jam();
+ logFilePtr.p->currentMbyte++;
+ readExecSrNewMbyte(signal);
+ return;
+ break;
+ case LogPartRecord::LES_EXEC_LOG_NEW_FILE:
+ jam();
+ nextLogFilePtr.i = logFilePtr.p->nextLogFile;
+ logPartPtr.p->currentLogfile = nextLogFilePtr.i;
+ ptrCheckGuard(nextLogFilePtr, clogFileFileSize, logFileRecord);
+ nextLogFilePtr.p->currentMbyte = 0;
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_EXEC_SR;
+ closeFile(signal, logFilePtr);
+ return;
+ break;
+ case LogPartRecord::LES_EXEC_LOG:
+ jam();
+ /*empty*/;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPartPtr.p->savePageIndex = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ if (logPartPtr.p->execSrPagesRead < ZMIN_READ_BUFFER_SIZE) {
+ /* --------------------------------------------------------------------
+ * THERE WERE LESS THAN 16 KBYTE OF LOG PAGES REMAINING. WE WAIT UNTIL
+ * THE NEXT 64 KBYTE ARRIVES UNTIL WE CONTINUE AGAIN.
+ * ------------------------------------------------------------------- */
+ if ((logPartPtr.p->execSrPagesRead +
+ logPartPtr.p->execSrPagesExecuted) < ZPAGES_IN_MBYTE) {
+ jam();
+ /* ------------------------------------------------------------------
+ * WE ONLY STOP AND WAIT IF THERE MORE PAGES TO READ. IF IT IS NOT
+ * THEN IT IS THE END OF THE MBYTE AND WE WILL CONTINUE. IT IS NO
+ * RISK THAT A LOG RECORD WE FIND WILL NOT BE READ AT THIS TIME
+ * SINCE THE LOG RECORDS NEVER SPAN OVER A MBYTE BOUNDARY.
+ * ----------------------------------------------------------------- */
+ readExecSr(signal);
+ logPartPtr.p->logExecState = LogPartRecord::LES_WAIT_READ_EXEC_SR;
+ return;
+ }//if
+ }//if
+ logWord = readLogword(signal);
+ switch (logWord) {
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZPREP_OP_TYPE:
+ {
+ logWord = readLogword(signal);
+ stepAhead(signal, logWord - 2);
+ break;
+ }
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZINVALID_COMMIT_TYPE:
+ jam();
+ stepAhead(signal, ZCOMMIT_LOG_SIZE - 1);
+ break;
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZCOMMIT_TYPE:
+ {
+ CommitLogRecord commitLogRecord;
+ jam();
+ tcConnectptr.i = logPartPtr.p->logTcConrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ readCommitLog(signal, &commitLogRecord);
+ if (tcConnectptr.p->gci > crestartNewestGci) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS LOG RECORD MUST BE IGNORED. IT IS PART OF A GLOBAL CHECKPOINT WHICH */
+/* WILL BE INVALIDATED BY THE SYSTEM RESTART. IF NOT INVALIDATED IT MIGHT BE */
+/* EXECUTED IN A FUTURE SYSTEM RESTART. */
+/*---------------------------------------------------------------------------*/
+ tmpLogPagePtr.i = logPartPtr.p->prevLogpage;
+ ptrCheckGuard(tmpLogPagePtr, clogPageFileSize, logPageRecord);
+ arrGuard(logPartPtr.p->savePageIndex, ZPAGE_SIZE);
+ tmpLogPagePtr.p->logPageWord[logPartPtr.p->savePageIndex] =
+ ZINVALID_COMMIT_TYPE;
+ tmpLogPagePtr.p->logPageWord[ZPOS_DIRTY] = ZDIRTY;
+ } else {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* CHECK IF I AM SUPPOSED TO EXECUTE THIS LOG RECORD. IF I AM THEN SAVE PAGE */
+/* INDEX IN CURRENT LOG PAGE SINCE IT WILL BE OVERWRITTEN WHEN EXECUTING THE */
+/* LOG RECORD. */
+/*---------------------------------------------------------------------------*/
+ logPartPtr.p->execSrExecuteIndex = 0;
+ Uint32 result = checkIfExecLog(signal);
+ if (result == ZOK) {
+ jam();
+//*---------------------------------------------------------------------------*/
+/* IN A NODE RESTART WE WILL NEVER END UP HERE SINCE NO FRAGMENTS HAVE BEEN */
+/* DEFINED YET. THUS NO EXTRA CHECKING FOR NODE RESTART IS NECESSARY. */
+/*---------------------------------------------------------------------------*/
+ logPartPtr.p->savePageIndex =
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ tcConnectptr.p->fragmentptr = fragptr.i;
+ findPageRef(signal, &commitLogRecord);
+ logPartPtr.p->execSrLogPageIndex = commitLogRecord.startPageIndex;
+ if (logPagePtr.i != RNIL) {
+ jam();
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = commitLogRecord.startPageIndex;
+ logPartPtr.p->execSrLogPage = logPagePtr.i;
+ execLogRecord(signal);
+ return;
+ }//if
+ logPartPtr.p->execSrStartPageNo = commitLogRecord.startPageNo;
+ logPartPtr.p->execSrStopPageNo = commitLogRecord.stopPageNo;
+ findLogfile(signal, commitLogRecord.fileNo, logPartPtr, &logFilePtr);
+ logPartPtr.p->execSrExecLogFile = logFilePtr.i;
+ if (logFilePtr.i == logPartPtr.p->currentLogfile) {
+ jam();
+ readExecLog(signal);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_EXEC_LOG;
+ return;
+ } else {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE FILE IS CURRENTLY NOT OPEN. WE MUST OPEN IT BEFORE WE CAN READ FROM */
+/* THE FILE. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN_EXEC_LOG;
+ openFileRw(signal, logFilePtr);
+ return;
+ }//if
+ }//if
+ }//if
+ break;
+ }
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZABORT_TYPE:
+ jam();
+ stepAhead(signal, ZABORT_LOG_SIZE - 1);
+ break;
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZFD_TYPE:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS IS THE FIRST ITEM WE ENCOUNTER IN A NEW FILE. AT THIS MOMENT WE SHALL*/
+/* SIMPLY BYPASS IT. IT HAS NO SIGNIFANCE WHEN EXECUTING THE LOG. IT HAS ITS */
+/* SIGNIFANCE WHEN FINDING THE START END THE END OF THE LOG. */
+/* WE HARDCODE THE PAGE INDEX SINCE THIS SHOULD NEVER BE FOUND AT ANY OTHER */
+/* PLACE THAN IN THE FIRST PAGE OF A NEW FILE IN THE FIRST POSITION AFTER THE*/
+/* HEADER. */
+/*---------------------------------------------------------------------------*/
+ ndbrequire(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] ==
+ (ZPAGE_HEADER_SIZE + ZPOS_NO_FD));
+ {
+ Uint32 noFdDescriptors =
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD];
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
+ (ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (noFdDescriptors * ZFD_PART_SIZE);
+ }
+ break;
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZNEXT_LOG_RECORD_TYPE:
+ jam();
+ stepAhead(signal, ZPAGE_SIZE - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]);
+ break;
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZNEXT_MBYTE_TYPE:
+/*---------------------------------------------------------------------------*/
+/* WE WILL SKIP A PART OF THE LOG FILE. ACTUALLY THE NEXT POINTER IS TO */
+/* A NEW MBYTE. THEREFORE WE WILL START UP A NEW MBYTE. THIS NEW MBYTE IS */
+/* HOWEVER ONLY STARTED IF IT IS NOT AFTER THE STOP MBYTE. */
+/* IF WE HAVE REACHED THE END OF THE STOP MBYTE THEN THE EXECUTION OF THE LOG*/
+/* IS COMPLETED. */
+/*---------------------------------------------------------------------------*/
+ if (logPartPtr.p->currentLogfile == logPartPtr.p->stopLogfile) {
+ if (logFilePtr.p->currentMbyte == logPartPtr.p->stopMbyte) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS WAS THE LAST MBYTE TO EXECUTE IN THIS LOG PART. WE SHOULD HAVE FOUND */
+/* A COMPLETED GCI RECORD OF THE LAST GCI BEFORE THIS. FOR SOME REASON THIS */
+/* RECORD WAS NOT AVAILABLE ON THE LOG. CRASH THE SYSTEM, A VERY SERIOUS */
+/* ERROR WHICH WE MUST REALLY WORK HARD TO AVOID. */
+/*---------------------------------------------------------------------------*/
+/*---------------------------------------------------------------------------*/
+/* SEND A SIGNAL TO THE SIGNAL LOG AND THEN CRASH THE SYSTEM. */
+/*---------------------------------------------------------------------------*/
+ signal->theData[0] = RNIL;
+ signal->theData[1] = logPartPtr.i;
+ Uint32 tmp = logFilePtr.p->fileName[3];
+ tmp = (tmp >> 8) & 0xff;// To get the Directory, DXX.
+ signal->theData[2] = tmp;
+ signal->theData[3] = logFilePtr.p->fileNo;
+ signal->theData[4] = logFilePtr.p->currentFilepage;
+ signal->theData[5] = logFilePtr.p->currentMbyte;
+ signal->theData[6] = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ sendSignal(cownref, GSN_DEBUG_SIG, signal, 7, JBA);
+ return;
+ }//if
+ }//if
+/*---------------------------------------------------------------------------*/
+/* START EXECUTION OF A NEW MBYTE IN THE LOG. */
+/*---------------------------------------------------------------------------*/
+ if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) {
+ jam();
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_NEW_MBYTE;
+ } else {
+ ndbrequire(logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1));
+ jam();
+/*---------------------------------------------------------------------------*/
+/* WE HAVE TO CHANGE FILE. CLOSE THIS ONE AND THEN OPEN THE NEXT. */
+/*---------------------------------------------------------------------------*/
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_NEW_FILE;
+ }//if
+ break;
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZCOMPLETED_GCI_TYPE:
+ jam();
+ logWord = readLogword(signal);
+ if (logWord == logPartPtr.p->logLastGci) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* IF IT IS THE LAST GCI TO LIVE AFTER SYSTEM RESTART THEN WE RECORD THE NEXT*/
+/* WORD AS THE NEW HEADER OF THE LOG FILE. OTHERWISE WE SIMPLY IGNORE THIS */
+/* LOG RECORD. */
+/*---------------------------------------------------------------------------*/
+ if (csrPhasesCompleted == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/*WE ONLY RECORD THE HEAD OF THE LOG IN THE FIRST LOG ROUND OF LOG EXECUTION.*/
+/*---------------------------------------------------------------------------*/
+ logPartPtr.p->headFileNo = logFilePtr.p->fileNo;
+ logPartPtr.p->headPageNo = logFilePtr.p->currentFilepage;
+ logPartPtr.p->headPageIndex =
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ }//if
+/*---------------------------------------------------------------------------*/
+/* THERE IS NO NEED OF EXECUTING PAST THIS LINE SINCE THERE WILL ONLY BE LOG */
+/* RECORDS THAT WILL BE OF NO INTEREST. THUS CLOSE THE FILE AND START THE */
+/* NEXT PHASE OF THE SYSTEM RESTART. */
+/*---------------------------------------------------------------------------*/
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_COMPLETED;
+ }//if
+ break;
+ default:
+ jam();
+/* ========================================================================= */
+/* ========================================================================= */
+/*---------------------------------------------------------------------------*/
+/* SEND A SIGNAL TO THE SIGNAL LOG AND THEN CRASH THE SYSTEM. */
+/*---------------------------------------------------------------------------*/
+ signal->theData[0] = RNIL;
+ signal->theData[1] = logPartPtr.i;
+ Uint32 tmp = logFilePtr.p->fileName[3];
+ tmp = (tmp >> 8) & 0xff;// To get the Directory, DXX.
+ signal->theData[2] = tmp;
+ signal->theData[3] = logFilePtr.p->fileNo;
+ signal->theData[4] = logFilePtr.p->currentMbyte;
+ signal->theData[5] = logFilePtr.p->currentFilepage;
+ signal->theData[6] = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ signal->theData[7] = logWord;
+ sendSignal(cownref, GSN_DEBUG_SIG, signal, 8, JBA);
+ return;
+ break;
+ }//switch
+/*---------------------------------------------------------------------------*/
+// We continue to execute log records until we find a proper one to execute or
+// that we reach a new page.
+/*---------------------------------------------------------------------------*/
+ } while (1);
+}//Dblqh::execSr()
+
+/*---------------------------------------------------------------------------*/
+/* THIS SIGNAL IS ONLY RECEIVED TO BE CAPTURED IN THE SIGNAL LOG. IT IS */
+/* ALSO USED TO CRASH THE SYSTEM AFTER SENDING A SIGNAL TO THE LOG. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::execDEBUG_SIG(Signal* signal)
+{
+/*
+2.5 TEMPORARY VARIABLES
+-----------------------
+*/
+ UintR tdebug;
+
+ jamEntry();
+ logPagePtr.i = signal->theData[0];
+ tdebug = logPagePtr.p->logPageWord[0];
+
+ char buf[100];
+ BaseString::snprintf(buf, 100,
+ "Error while reading REDO log.\n"
+ "D=%d, F=%d Mb=%d FP=%d W1=%d W2=%d",
+ signal->theData[2], signal->theData[3], signal->theData[4],
+ signal->theData[5], signal->theData[6], signal->theData[7]);
+
+ progError(__LINE__, ERR_SR_REDOLOG, buf);
+
+ return;
+}//Dblqh::execDEBUG_SIG()
+
+/*---------------------------------------------------------------------------*/
+/*---------------------------------------------------------------------------*/
+void Dblqh::closeExecLogLab(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ signal->theData[0] = ZEXEC_SR;
+ signal->theData[1] = logFilePtr.p->logPartRec;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//Dblqh::closeExecLogLab()
+
+void Dblqh::openExecLogLab(Signal* signal)
+{
+ readExecLog(signal);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_EXEC_LOG;
+ return;
+}//Dblqh::openExecLogLab()
+
+void Dblqh::readExecLogLab(Signal* signal)
+{
+ buildLinkedLogPageList(signal);
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOGREC_FROM_FILE;
+ logPartPtr.p->execSrLfoRec = lfoPtr.i;
+ logPartPtr.p->execSrLogPage = logPagePtr.i;
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
+ logPartPtr.p->execSrLogPageIndex;
+ execLogRecord(signal);
+ return;
+}//Dblqh::readExecLogLab()
+
+/*---------------------------------------------------------------------------*/
+/* THIS CODE IS USED TO EXECUTE A LOG RECORD WHEN IT'S DATA HAVE BEEN LOCATED*/
+/* AND TRANSFERRED INTO MEMORY. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::execLogRecord(Signal* signal)
+{
+ jamEntry();
+
+ tcConnectptr.i = logPartPtr.p->logTcConrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ // Read a log record and prepare it for execution
+ readLogHeader(signal);
+ readKey(signal);
+ readAttrinfo(signal);
+ initReqinfoExecSr(signal);
+ arrGuard(logPartPtr.p->execSrExecuteIndex, 4);
+ BlockReference ref = fragptr.p->execSrBlockref[logPartPtr.p->execSrExecuteIndex];
+ tcConnectptr.p->nextReplica = refToNode(ref);
+ tcConnectptr.p->connectState = TcConnectionrec::LOG_CONNECTED;
+ tcConnectptr.p->tcOprec = tcConnectptr.i;
+ packLqhkeyreqLab(signal);
+ return;
+}//Dblqh::execLogRecord()
+
+//----------------------------------------------------------------------------
+// This function invalidates log pages after the last GCI record in a
+// system/node restart. This is to ensure that the end of the log is
+// consistent. This function is executed last in start phase 3.
+// RT 450. EDTJAMO.
+//----------------------------------------------------------------------------
+void Dblqh::invalidateLogAfterLastGCI(Signal* signal) {
+
+ jam();
+ if (logPartPtr.p->logExecState != LogPartRecord::LES_EXEC_LOG_INVALIDATE) {
+ jam();
+ systemError(signal);
+ }
+
+ if (logFilePtr.p->fileNo != logPartPtr.p->invalidateFileNo) {
+ jam();
+ systemError(signal);
+ }
+
+ switch (lfoPtr.p->lfoState) {
+ case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
+ jam();
+ releaseLfo(signal);
+ releaseLogpage(signal);
+ if (logPartPtr.p->invalidatePageNo < (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE - 1)) {
+ // We continue in this file.
+ logPartPtr.p->invalidatePageNo++;
+ } else {
+ // We continue in the next file.
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPartPtr.p->invalidateFileNo = logFilePtr.p->fileNo;
+ // Page 0 is used for file descriptors.
+ logPartPtr.p->invalidatePageNo = 1;
+ if (logFilePtr.p->logFileStatus != LogFileRecord::OPEN) {
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_INVALIDATE_PAGES;
+ openFileRw(signal, logFilePtr);
+ return;
+ break;
+ }
+ }
+ // Read a page from the log file.
+ readFileInInvalidate(signal);
+ return;
+ break;
+
+ case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES:
+ jam();
+ releaseLfo(signal);
+ // Check if this page must be invalidated.
+ // If the log lap number on a page after the head of the tail is the same
+ // as the actual log lap number we must invalidate this page. Otherwise it
+ // could be impossible to find the end of the log in a later system/node
+ // restart.
+ if (logPagePtr.p->logPageWord[ZPOS_LOG_LAP] == logPartPtr.p->logLap) {
+ // This page must be invalidated.
+ logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 0;
+ // Contact NDBFS. Real time break.
+ writeSinglePage(signal, logPartPtr.p->invalidatePageNo, ZPAGE_SIZE - 1);
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES;
+ } else {
+ // We are done with invalidating. Finish start phase 3.4.
+ exitFromInvalidate(signal);
+ }
+ return;
+ break;
+
+ default:
+ jam();
+ systemError(signal);
+ return;
+ break;
+ }
+
+ return;
+}//Dblqh::invalidateLogAfterLastGCI
+
+void Dblqh::readFileInInvalidate(Signal* signal) {
+ jam();
+ // Contact NDBFS. Real time break.
+ readSinglePage(signal, logPartPtr.p->invalidatePageNo);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_INVALIDATE_PAGES;
+}
+
+void Dblqh::exitFromInvalidate(Signal* signal) {
+ jam();
+ // Close files if necessary. Current file and the next file should be
+ // left open.
+ if (logFilePtr.i != logPartPtr.p->currentLogfile) {
+ LogFileRecordPtr currentLogFilePtr;
+ LogFileRecordPtr nextAfterCurrentLogFilePtr;
+
+ currentLogFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(currentLogFilePtr, clogFileFileSize, logFileRecord);
+
+ nextAfterCurrentLogFilePtr.i = currentLogFilePtr.p->nextLogFile;
+
+ if (logFilePtr.i != nextAfterCurrentLogFilePtr.i) {
+ // This file should be closed.
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSE_SR_INVALIDATE_PAGES;
+ closeFile(signal, logFilePtr);
+ // Return from this function and wait for close confirm. Then come back
+ // and test the previous file for closing.
+ return;
+ }
+ }
+
+ // We are done with closing files, send completed signal and exit this phase.
+ signal->theData[0] = ZSR_FOURTH_COMP;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}
+
+
+/*---------------------------------------------------------------------------*/
+/* THE EXECUTION OF A LOG RECORD IS COMPLETED. RELEASE PAGES IF THEY WERE */
+/* READ FROM DISK FOR THIS PARTICULAR OPERATION. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::completedLab(Signal* signal)
+{
+ Uint32 result = returnExecLog(signal);
+/*---------------------------------------------------------------------------*/
+/* ENTER COMPLETED WITH */
+/* LQH_CONNECTPTR */
+/*---------------------------------------------------------------------------*/
+ if (result == ZOK) {
+ jam();
+ execLogRecord(signal);
+ return;
+ } else if (result == ZNOT_OK) {
+ jam();
+ signal->theData[0] = ZEXEC_SR;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ } else {
+ jam();
+ /*empty*/;
+ }//if
+/*---------------------------------------------------------------------------*/
+/* WE HAVE TO WAIT FOR CLOSING OF THE EXECUTED LOG FILE BEFORE PROCEEDING IN */
+/* RARE CASES. */
+/*---------------------------------------------------------------------------*/
+ return;
+}//Dblqh::completedLab()
+
+/*---------------------------------------------------------------------------*/
+/* EXECUTION OF LOG RECORD WAS NOT SUCCESSFUL. CHECK IF IT IS OK ANYWAY, */
+/* THEN EXECUTE THE NEXT LOG RECORD. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::logLqhkeyrefLab(Signal* signal)
+{
+ Uint32 result = returnExecLog(signal);
+ switch (tcConnectptr.p->operation) {
+ case ZUPDATE:
+ case ZDELETE:
+ jam();
+ ndbrequire(terrorCode == ZNO_TUPLE_FOUND);
+ break;
+ case ZINSERT:
+ jam();
+ ndbrequire(terrorCode == ZTUPLE_ALREADY_EXIST);
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ if (result == ZOK) {
+ jam();
+ execLogRecord(signal);
+ return;
+ } else if (result == ZNOT_OK) {
+ jam();
+ signal->theData[0] = ZEXEC_SR;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ } else {
+ jam();
+ /*empty*/;
+ }//if
+ /* ------------------------------------------------------------------------
+ * WE HAVE TO WAIT FOR CLOSING OF THE EXECUTED LOG FILE BEFORE
+ * PROCEEDING IN RARE CASES.
+ * ----------------------------------------------------------------------- */
+ return;
+}//Dblqh::logLqhkeyrefLab()
+
+void Dblqh::closeExecSrCompletedLab(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ signal->theData[0] = logFilePtr.p->logPartRec;
+ execLogComp(signal);
+ return;
+}//Dblqh::closeExecSrCompletedLab()
+
+/* --------------------------------------------------------------------------
+ * ONE OF THE LOG PARTS HAVE COMPLETED EXECUTING THE LOG. CHECK IF ALL LOG
+ * PARTS ARE COMPLETED. IF SO START SENDING EXEC_FRAGCONF AND EXEC_SRCONF.
+ * ------------------------------------------------------------------------- */
+void Dblqh::execLogComp(Signal* signal)
+{
+ logPartPtr.i = signal->theData[0];
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logPartPtr.p->logPartState = LogPartRecord::SR_THIRD_PHASE_COMPLETED;
+ /* ------------------------------------------------------------------------
+ * WE MUST RELEASE THE TC CONNECT RECORD HERE SO THAT IT CAN BE REUSED.
+ * ----------------------------------------------------------------------- */
+ tcConnectptr.i = logPartPtr.p->logTcConrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ releaseTcrecLog(signal, tcConnectptr);
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ if (logPartPtr.p->logPartState != LogPartRecord::SR_THIRD_PHASE_COMPLETED) {
+ if (logPartPtr.p->logPartState != LogPartRecord::SR_THIRD_PHASE_STARTED) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ } else {
+ jam();
+ /* ------------------------------------------------------------------
+ * THIS LOG PART WAS NOT COMPLETED YET. EXIT AND WAIT FOR IT
+ * TO COMPLETE
+ * ----------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//for
+ /* ------------------------------------------------------------------------
+ * ALL LOG PARTS HAVE COMPLETED THE EXECUTION OF THE LOG. WE CAN NOW START
+ * SENDING THE EXEC_FRAGCONF SIGNALS TO ALL INVOLVED FRAGMENTS.
+ * ----------------------------------------------------------------------- */
+ if (cstartType != NodeState::ST_NODE_RESTART) {
+ jam();
+ signal->theData[0] = ZSEND_EXEC_CONF;
+ signal->theData[1] = 0;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ } else {
+ jam();
+ /* ----------------------------------------------------------------------
+ * FOR NODE RESTART WE CAN SKIP A NUMBER OF STEPS SINCE WE HAVE NO
+ * FRAGMENTS DEFINED AT THIS POINT. OBVIOUSLY WE WILL NOT NEED TO
+ * EXECUTE ANY MORE LOG STEPS EITHER AND THUS WE CAN IMMEDIATELY
+ * START FINDING THE END AND THE START OF THE LOG.
+ * --------------------------------------------------------------------- */
+ csrPhasesCompleted = 3;
+ execSrCompletedLab(signal);
+ return;
+ }//if
+ return;
+}//Dblqh::execLogComp()
+
+/* --------------------------------------------------------------------------
+ * GO THROUGH THE FRAGMENT RECORDS TO DEDUCE TO WHICH SHALL BE SENT
+ * EXEC_FRAGCONF AFTER COMPLETING THE EXECUTION OF THE LOG.
+ * ------------------------------------------------------------------------- */
+void Dblqh::sendExecConf(Signal* signal)
+{
+ jamEntry();
+ fragptr.i = signal->theData[0];
+ Uint32 loopCount = 0;
+ while (fragptr.i < cfragrecFileSize) {
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (fragptr.p->execSrStatus != Fragrecord::IDLE) {
+ jam();
+ ndbrequire(fragptr.p->execSrNoReplicas - 1 < 4);
+ for (Uint32 i = 0; i < fragptr.p->execSrNoReplicas; i++) {
+ jam();
+ signal->theData[0] = fragptr.p->execSrUserptr[i];
+ sendSignal(fragptr.p->execSrBlockref[i], GSN_EXEC_FRAGCONF,
+ signal, 1, JBB);
+ }//for
+ if (fragptr.p->execSrStatus == Fragrecord::ACTIVE) {
+ jam();
+ fragptr.p->execSrStatus = Fragrecord::IDLE;
+ } else {
+ ndbrequire(fragptr.p->execSrStatus == Fragrecord::ACTIVE_REMOVE_AFTER);
+ jam();
+ Uint32 fragId = fragptr.p->fragId;
+ tabptr.i = fragptr.p->tabRef;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ deleteFragrec(fragId);
+ }//if
+ fragptr.p->execSrNoReplicas = 0;
+ }//if
+ loopCount++;
+ if (loopCount > 20) {
+ jam();
+ signal->theData[0] = ZSEND_EXEC_CONF;
+ signal->theData[1] = fragptr.i + 1;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ } else {
+ jam();
+ fragptr.i++;
+ }//if
+ }//while
+ /* ----------------------------------------------------------------------
+ * WE HAVE NOW SENT ALL EXEC_FRAGCONF. NOW IT IS TIME TO SEND
+ * EXEC_SRCONF TO ALL NODES.
+ * --------------------------------------------------------------------- */
+ srPhase3Comp(signal);
+}//Dblqh::sendExecConf()
+
+/* --------------------------------------------------------------------------
+ * PHASE 3 HAS NOW COMPLETED. INFORM ALL OTHER NODES OF THIS EVENT.
+ * ------------------------------------------------------------------------- */
+void Dblqh::srPhase3Comp(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(cnoOfNodes < MAX_NDB_NODES);
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ if (cnodeStatus[i] == ZNODE_UP) {
+ jam();
+ ndbrequire(cnodeData[i] < MAX_NDB_NODES);
+ BlockReference ref = calcLqhBlockRef(cnodeData[i]);
+ signal->theData[0] = cownNodeid;
+ sendSignal(ref, GSN_EXEC_SRCONF, signal, 1, JBB);
+ }//if
+ }//for
+ return;
+}//Dblqh::srPhase3Comp()
+
+/* ##########################################################################
+ * SYSTEM RESTART PHASE FOUR MODULE
+ * THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING.
+ *
+ * THIS MODULE SETS UP THE HEAD AND TAIL POINTERS OF THE LOG PARTS IN THE
+ * FRAGMENT LOG. WHEN IT IS COMPLETED IT REPORTS TO THE MASTER DIH THAT
+ * IT HAS COMPLETED THE PART OF THE SYSTEM RESTART WHERE THE DATABASE IS
+ * LOADED.
+ * IT ALSO OPENS THE CURRENT LOG FILE AND THE NEXT AND SETS UP THE FIRST
+ * LOG PAGE WHERE NEW LOG DATA IS TO BE INSERTED WHEN THE SYSTEM STARTS
+ * AGAIN.
+ *
+ * THIS PART IS ACTUALLY EXECUTED FOR ALL RESTART TYPES.
+ * ######################################################################### */
+void Dblqh::initFourth(Signal* signal)
+{
+ LogFileRecordPtr locLogFilePtr;
+ jamEntry();
+ logPartPtr.i = signal->theData[0];
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ crestartNewestGci = 1;
+ crestartOldestGci = 1;
+ /* ------------------------------------------------------------------------
+ * INITIALISE LOG PART AND LOG FILES AS NEEDED.
+ * ----------------------------------------------------------------------- */
+ logPartPtr.p->headFileNo = 0;
+ logPartPtr.p->headPageNo = 1;
+ logPartPtr.p->headPageIndex = ZPAGE_HEADER_SIZE + 2;
+ logPartPtr.p->logPartState = LogPartRecord::SR_FOURTH_PHASE_STARTED;
+ logPartPtr.p->logTailFileNo = 0;
+ logPartPtr.p->logTailMbyte = 0;
+ locLogFilePtr.i = logPartPtr.p->firstLogfile;
+ ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_PHASE;
+ openFileRw(signal, locLogFilePtr);
+ return;
+}//Dblqh::initFourth()
+
+void Dblqh::openSrFourthPhaseLab(Signal* signal)
+{
+ /* ------------------------------------------------------------------------
+ * WE HAVE NOW OPENED THE HEAD LOG FILE WE WILL NOW START READING IT
+ * FROM THE HEAD MBYTE TO FIND THE NEW HEAD OF THE LOG.
+ * ----------------------------------------------------------------------- */
+ readSinglePage(signal, logPartPtr.p->headPageNo);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_FOURTH_PHASE;
+ return;
+}//Dblqh::openSrFourthPhaseLab()
+
+void Dblqh::readSrFourthPhaseLab(Signal* signal)
+{
+ if(c_diskless){
+ jam();
+ logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1;
+ }
+
+ /* ------------------------------------------------------------------------
+ * INITIALISE ALL LOG PART INFO AND LOG FILE INFO THAT IS NEEDED TO
+ * START UP THE SYSTEM.
+ * ------------------------------------------------------------------------
+ * INITIALISE THE NEWEST GLOBAL CHECKPOINT IDENTITY AND THE NEWEST
+ * COMPLETED GLOBAL CHECKPOINT IDENITY AS THE NEWEST THAT WAS RESTARTED.
+ * ------------------------------------------------------------------------
+ * INITIALISE THE HEAD PAGE INDEX IN THIS PAGE.
+ * ASSIGN IT AS THE CURRENT LOGPAGE.
+ * ASSIGN THE FILE AS THE CURRENT LOG FILE.
+ * ASSIGN THE CURRENT FILE NUMBER FROM THE CURRENT LOG FILE AND THE NEXT
+ * FILE NUMBER FROM THE NEXT LOG FILE.
+ * ASSIGN THE CURRENT FILEPAGE FROM HEAD PAGE NUMBER.
+ * ASSIGN THE CURRENT MBYTE BY DIVIDING PAGE NUMBER BY 128.
+ * INITIALISE LOG LAP TO BE THE LOG LAP AS FOUND IN THE HEAD PAGE.
+ * WE HAVE TO CALCULATE THE NUMBER OF REMAINING WORDS IN THIS MBYTE.
+ * ----------------------------------------------------------------------- */
+ cnewestGci = crestartNewestGci;
+ cnewestCompletedGci = crestartNewestGci;
+ logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci;
+ logPartPtr.p->currentLogfile = logFilePtr.i;
+ logFilePtr.p->filePosition = logPartPtr.p->headPageNo;
+ logFilePtr.p->currentMbyte =
+ logPartPtr.p->headPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE;
+ logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING;
+ logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP];
+ logFilePtr.p->currentFilepage = logPartPtr.p->headPageNo;
+ logFilePtr.p->currentLogpage = logPagePtr.i;
+
+ initLogpage(signal);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPartPtr.p->headPageIndex;
+ logFilePtr.p->remainingWordsInMbyte =
+ ((
+ ((logFilePtr.p->currentMbyte + 1) * ZPAGES_IN_MBYTE) -
+ logFilePtr.p->currentFilepage) *
+ (ZPAGE_SIZE - ZPAGE_HEADER_SIZE)) -
+ (logPartPtr.p->headPageIndex - ZPAGE_HEADER_SIZE);
+ /* ------------------------------------------------------------------------
+ * THE NEXT STEP IS TO OPEN THE NEXT LOG FILE (IF THERE IS ONE).
+ * ----------------------------------------------------------------------- */
+ if (logFilePtr.p->nextLogFile != logFilePtr.i) {
+ LogFileRecordPtr locLogFilePtr;
+ jam();
+ locLogFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_NEXT;
+ openFileRw(signal, locLogFilePtr);
+ } else {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THIS CAN ONLY OCCUR IF WE HAVE ONLY ONE LOG FILE. THIS LOG FILE MUST
+ * BE LOG FILE ZERO AND THAT IS THE FILE WE CURRENTLY HAVE READ.
+ * THUS WE CAN CONTINUE IMMEDIATELY TO READ PAGE ZERO IN FILE ZERO.
+ * --------------------------------------------------------------------- */
+ openSrFourthZeroSkipInitLab(signal);
+ return;
+ }//if
+ return;
+}//Dblqh::readSrFourthPhaseLab()
+
+void Dblqh::openSrFourthNextLab(Signal* signal)
+{
+ /* ------------------------------------------------------------------------
+ * WE MUST ALSO HAVE FILE 0 OPEN ALL THE TIME.
+ * ----------------------------------------------------------------------- */
+ logFilePtr.i = logPartPtr.p->firstLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ if (logFilePtr.p->logFileStatus == LogFileRecord::OPEN) {
+ jam();
+ openSrFourthZeroSkipInitLab(signal);
+ return;
+ } else {
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_ZERO;
+ openFileRw(signal, logFilePtr);
+ }//if
+ return;
+}//Dblqh::openSrFourthNextLab()
+
+void Dblqh::openSrFourthZeroLab(Signal* signal)
+{
+ openSrFourthZeroSkipInitLab(signal);
+ return;
+}//Dblqh::openSrFourthZeroLab()
+
+void Dblqh::openSrFourthZeroSkipInitLab(Signal* signal)
+{
+ if (logFilePtr.i == logPartPtr.p->currentLogfile) {
+ if (logFilePtr.p->currentFilepage == 0) {
+ jam();
+ /* -------------------------------------------------------------------
+ * THE HEADER PAGE IN THE LOG IS PAGE ZERO IN FILE ZERO.
+ * THIS SHOULD NEVER OCCUR.
+ * ------------------------------------------------------------------- */
+ systemErrorLab(signal);
+ return;
+ }//if
+ }//if
+ readSinglePage(signal, 0);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_FOURTH_ZERO;
+ return;
+}//Dblqh::openSrFourthZeroSkipInitLab()
+
+void Dblqh::readSrFourthZeroLab(Signal* signal)
+{
+ logFilePtr.p->logPageZero = logPagePtr.i;
+ // --------------------------------------------------------------------
+ // This is moved to invalidateLogAfterLastGCI(), RT453.
+ // signal->theData[0] = ZSR_FOURTH_COMP;
+ // signal->theData[1] = logPartPtr.i;
+ // sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ // --------------------------------------------------------------------
+
+ // Need to invalidate log pages after the head of the log. RT 453. EDTJAMO.
+ // Set the start of the invalidation.
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPartPtr.p->invalidateFileNo = logPartPtr.p->headFileNo;
+ logPartPtr.p->invalidatePageNo = logPartPtr.p->headPageNo;
+
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_INVALIDATE;
+ seizeLfo(signal);
+ initLfo(signal);
+ // The state here is a little confusing, but simulates that we return
+ // to invalidateLogAfterLastGCI() from an invalidate write and are ready
+ // to read a page from file.
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES;
+
+ invalidateLogAfterLastGCI(signal);
+ return;
+}//Dblqh::readSrFourthZeroLab()
+
+/* --------------------------------------------------------------------------
+ * ONE OF THE LOG PARTS HAVE COMPLETED PHASE FOUR OF THE SYSTEM RESTART.
+ * CHECK IF ALL LOG PARTS ARE COMPLETED. IF SO SEND START_RECCONF
+ * ------------------------------------------------------------------------- */
+void Dblqh::srFourthComp(Signal* signal)
+{
+ jamEntry();
+ logPartPtr.i = signal->theData[0];
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logPartPtr.p->logPartState = LogPartRecord::SR_FOURTH_PHASE_COMPLETED;
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ if (logPartPtr.p->logPartState != LogPartRecord::SR_FOURTH_PHASE_COMPLETED) {
+ if (logPartPtr.p->logPartState != LogPartRecord::SR_FOURTH_PHASE_STARTED) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ } else {
+ jam();
+ /* ------------------------------------------------------------------
+ * THIS LOG PART WAS NOT COMPLETED YET.
+ * EXIT AND WAIT FOR IT TO COMPLETE
+ * ----------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//for
+ /* ------------------------------------------------------------------------
+ * ALL LOG PARTS HAVE COMPLETED PHASE FOUR OF THE SYSTEM RESTART.
+ * WE CAN NOW SEND START_RECCONF TO THE MASTER DIH IF IT WAS A
+ * SYSTEM RESTART. OTHERWISE WE WILL CONTINUE WITH AN INITIAL START.
+ * SET LOG PART STATE TO IDLE TO
+ * INDICATE THAT NOTHING IS GOING ON IN THE LOG PART.
+ * ----------------------------------------------------------------------- */
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ ptrAss(logPartPtr, logPartRecord);
+ logPartPtr.p->logPartState = LogPartRecord::IDLE;
+ }//for
+
+ if ((cstartType == NodeState::ST_INITIAL_START) ||
+ (cstartType == NodeState::ST_INITIAL_NODE_RESTART)) {
+ jam();
+
+ ndbrequire(cinitialStartOngoing == ZTRUE);
+ cinitialStartOngoing = ZFALSE;
+
+ checkStartCompletedLab(signal);
+ return;
+ } else if ((cstartType == NodeState::ST_NODE_RESTART) ||
+ (cstartType == NodeState::ST_SYSTEM_RESTART)) {
+ jam();
+ StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
+ conf->startingNodeId = getOwnNodeId();
+ sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
+ StartRecConf::SignalLength, JBB);
+
+ if(cstartType == NodeState::ST_SYSTEM_RESTART){
+ fragptr.i = c_redo_log_complete_frags;
+ while(fragptr.i != RNIL){
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ signal->theData[0] = fragptr.p->tabRef;
+ signal->theData[1] = fragptr.p->fragId;
+ sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
+ fragptr.i = fragptr.p->nextFrag;
+ }
+ }
+ } else {
+ ndbrequire(false);
+ }//if
+ return;
+}//Dblqh::srFourthComp()
+
+/* ######################################################################### */
+/* ####### ERROR MODULE ####### */
+/* */
+/* ######################################################################### */
+void Dblqh::warningHandlerLab(Signal* signal)
+{
+ systemErrorLab(signal);
+ return;
+}//Dblqh::warningHandlerLab()
+
+/*---------------------------------------------------------------------------*/
+/* AN ERROR OCCURRED THAT WE WILL NOT TREAT AS SYSTEM ERROR. MOST OFTEN THIS */
+/* WAS CAUSED BY AN ERRONEUS SIGNAL SENT BY ANOTHER NODE. WE DO NOT WISH TO */
+/* CRASH BECAUSE OF FAULTS IN OTHER NODES. THUS WE ONLY REPORT A WARNING. */
+/* THIS IS CURRENTLY NOT IMPLEMENTED AND FOR THE MOMENT WE GENERATE A SYSTEM */
+/* ERROR SINCE WE WANT TO FIND FAULTS AS QUICKLY AS POSSIBLE IN A TEST PHASE.*/
+/* IN A LATER PHASE WE WILL CHANGE THIS TO BE A WARNING MESSAGE INSTEAD. */
+/*---------------------------------------------------------------------------*/
+/*---------------------------------------------------------------------------*/
+/* THIS TYPE OF ERROR SHOULD NOT GENERATE A SYSTEM ERROR IN A PRODUCT */
+/* RELEASE. THIS IS A TEMPORARY SOLUTION DURING TEST PHASE TO QUICKLY */
+/* FIND ERRORS. NORMALLY THIS SHOULD GENERATE A WARNING MESSAGE ONTO */
+/* SOME ERROR LOGGER. THIS WILL LATER BE IMPLEMENTED BY SOME SIGNAL. */
+/*---------------------------------------------------------------------------*/
+/* ------ SYSTEM ERROR SITUATIONS ------- */
+/* IN SITUATIONS WHERE THE STATE IS ERRONEOUS OR IF THE ERROR OCCURS IN */
+/* THE COMMIT, COMPLETE OR ABORT PHASE, WE PERFORM A CRASH OF THE AXE VM*/
+/*---------------------------------------------------------------------------*/
+
+void Dblqh::systemErrorLab(Signal* signal)
+{
+ progError(0, 0);
+/*************************************************************************>*/
+/* WE WANT TO INVOKE AN IMMEDIATE ERROR HERE SO WE GET THAT BY */
+/* INSERTING A CERTAIN POINTER OUT OF RANGE. */
+/*************************************************************************>*/
+}//Dblqh::systemErrorLab()
+
+/* ------- ERROR SITUATIONS ------- */
+
+void Dblqh::aiStateErrorCheckLab(Signal* signal, Uint32* dataPtr, Uint32 length)
+{
+ ndbrequire(tcConnectptr.p->abortState != TcConnectionrec::ABORT_IDLE);
+ if (tcConnectptr.p->transactionState != TcConnectionrec::IDLE) {
+ jam();
+/*************************************************************************>*/
+/* TRANSACTION ABORT IS ONGOING. IT CAN STILL BE A PART OF AN */
+/* OPERATION THAT SHOULD CONTINUE SINCE THE TUPLE HAS NOT ARRIVED */
+/* YET. THIS IS POSSIBLE IF ACTIVE CREATION OF THE FRAGMENT IS */
+/* ONGOING. */
+/*************************************************************************>*/
+ if (tcConnectptr.p->activeCreat == ZTRUE) {
+ jam();
+/*************************************************************************>*/
+/* ONGOING ABORTS DURING ACTIVE CREATION MUST SAVE THE ATTRIBUTE INFO*/
+/* SO THAT IT CAN BE SENT TO THE NEXT NODE IN THE COMMIT CHAIN. THIS */
+/* IS NEEDED SINCE ALL ABORTS DURING CREATION OF A FRAGMENT ARE NOT */
+/* REALLY ERRORS. A MISSING TUPLE TO BE UPDATED SIMPLY MEANS THAT */
+/* IT HASN'T BEEN TRANSFERRED TO THE NEW REPLICA YET. */
+/*************************************************************************>*/
+/*************************************************************************>*/
+/* AFTER THIS ERROR THE ABORT MUST BE COMPLETED. TO ENSURE THIS SET */
+/* ACTIVE CREATION TO FALSE. THIS WILL ENSURE THAT THE ABORT IS */
+/* COMPLETED. */
+/*************************************************************************>*/
+ if (saveTupattrbuf(signal, dataPtr, length) == ZOK) {
+ jam();
+ if (tcConnectptr.p->transactionState ==
+ TcConnectionrec::WAIT_AI_AFTER_ABORT) {
+ if (tcConnectptr.p->currTupAiLen == tcConnectptr.p->totReclenAi) {
+ jam();
+/*************************************************************************>*/
+/* WE WERE WAITING FOR MORE ATTRIBUTE INFO AFTER A SUCCESSFUL ABORT */
+/* IN ACTIVE CREATION STATE. THE TRANSACTION SHOULD CONTINUE AS IF */
+/* IT WAS COMMITTED. NOW ALL INFO HAS ARRIVED AND WE CAN CONTINUE */
+/* WITH NORMAL PROCESSING AS IF THE TRANSACTION WAS PREPARED. */
+/* SINCE THE FRAGMENT IS UNDER CREATION WE KNOW THAT LOGGING IS */
+/* DISABLED. WE STILL HAVE TO CATER FOR DIRTY OPERATION OR NOT. */
+/*************************************************************************>*/
+ tcConnectptr.p->abortState = TcConnectionrec::ABORT_IDLE;
+ rwConcludedAiLab(signal);
+ return;
+ } else {
+ ndbrequire(tcConnectptr.p->currTupAiLen < tcConnectptr.p->totReclenAi);
+ jam();
+ return; /* STILL WAITING FOR MORE ATTRIBUTE INFO */
+ }//if
+ }//if
+ } else {
+ jam();
+/*************************************************************************>*/
+/* AFTER THIS ERROR THE ABORT MUST BE COMPLETED. TO ENSURE THIS SET */
+/* ACTIVE CREATION TO ABORT. THIS WILL ENSURE THAT THE ABORT IS */
+/* COMPLETED AND THAT THE ERROR CODE IS PROPERLY SET */
+/*************************************************************************>*/
+ tcConnectptr.p->errorCode = terrorCode;
+ tcConnectptr.p->activeCreat = ZFALSE;
+ if (tcConnectptr.p->transactionState ==
+ TcConnectionrec::WAIT_AI_AFTER_ABORT) {
+ jam();
+/*************************************************************************>*/
+/* ABORT IS ALREADY COMPLETED. WE NEED TO RESTART IT FROM WHERE IT */
+/* WAS INTERRUPTED. */
+/*************************************************************************>*/
+ continueAbortLab(signal);
+ return;
+ } else {
+ jam();
+ return;
+/*************************************************************************>*/
+// Abort is ongoing. It will complete since we set the activeCreat = ZFALSE
+/*************************************************************************>*/
+ }//if
+ }//if
+ }//if
+ }//if
+/*************************************************************************>*/
+/* TRANSACTION HAVE BEEN ABORTED. THUS IGNORE ALL SIGNALS BELONGING TO IT. */
+/*************************************************************************>*/
+ return;
+}//Dblqh::aiStateErrorCheckLab()
+
+void Dblqh::takeOverErrorLab(Signal* signal)
+{
+ terrorCode = ZTAKE_OVER_ERROR;
+ abortErrorLab(signal);
+ return;
+}//Dblqh::takeOverErrorLab()
+
+/* ##########################################################################
+ * TEST MODULE
+ * ######################################################################### */
+#ifdef VM_TRACE
+void Dblqh::execTESTSIG(Signal* signal)
+{
+ jamEntry();
+ Uint32 userpointer = signal->theData[0];
+ BlockReference userblockref = signal->theData[1];
+ Uint32 testcase = signal->theData[2];
+
+ signal->theData[0] = userpointer;
+ signal->theData[1] = cownref;
+ signal->theData[2] = testcase;
+ sendSignal(userblockref, GSN_TESTSIG, signal, 25, JBB);
+ return;
+}//Dblqh::execTESTSIG()
+
+/* *************** */
+/* MEMCHECKREQ > */
+/* *************** */
+/* ************************************************************************>>
+ * THIS SIGNAL IS PURELY FOR TESTING PURPOSES. IT CHECKS THE FREE LIST
+ * AND REPORTS THE NUMBER OF FREE RECORDS.
+ * THIS CAN BE DONE TO ENSURE THAT NO RECORDS HAS BEEN LOST
+ * ************************************************************************> */
+void Dblqh::execMEMCHECKREQ(Signal* signal)
+{
+ Uint32* dataPtr = &signal->theData[0];
+ jamEntry();
+ BlockReference userblockref = signal->theData[0];
+ Uint32 index = 0;
+ for (Uint32 i = 0; i < 7; i++)
+ dataPtr[i] = 0;
+ addfragptr.i = cfirstfreeAddfragrec;
+ while (addfragptr.i != RNIL) {
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ addfragptr.i = addfragptr.p->nextAddfragrec;
+ dataPtr[index]++;
+ }//while
+ index++;
+ attrinbufptr.i = cfirstfreeAttrinbuf;
+ while (attrinbufptr.i != RNIL) {
+ ptrCheckGuard(attrinbufptr, cattrinbufFileSize, attrbuf);
+ attrinbufptr.i = attrinbufptr.p->attrbuf[ZINBUF_NEXT];
+ dataPtr[index]++;
+ }//while
+ index++;
+ databufptr.i = cfirstfreeDatabuf;
+ while (databufptr.i != RNIL) {
+ ptrCheckGuard(databufptr, cdatabufFileSize, databuf);
+ databufptr.i = databufptr.p->nextDatabuf;
+ dataPtr[index]++;
+ }//while
+ index++;
+ fragptr.i = cfirstfreeFragrec;
+ while (fragptr.i != RNIL) {
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.i = fragptr.p->nextFrag;
+ dataPtr[index]++;
+ }//while
+ index++;
+ for (tabptr.i = 0;
+ tabptr.i < ctabrecFileSize;
+ tabptr.i++) {
+ ptrAss(tabptr, tablerec);
+ if (tabptr.p->tableStatus == Tablerec::NOT_DEFINED) {
+ dataPtr[index]++;
+ }//if
+ }//for
+ index++;
+ tcConnectptr.i = cfirstfreeTcConrec;
+ while (tcConnectptr.i != RNIL) {
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ tcConnectptr.i = tcConnectptr.p->nextTcConnectrec;
+ dataPtr[index]++;
+ }//while
+ sendSignal(userblockref, GSN_MEMCHECKCONF, signal, 10, JBB);
+ return;
+}//Dblqh::execMEMCHECKREQ()
+
+#endif
+
+/* ************************************************************************* */
+/* ************************* STATEMENT BLOCKS ****************************** */
+/* ************************************************************************* */
+/* ========================================================================= */
+/* ====== BUILD LINKED LIST OF LOG PAGES AFTER RECEIVING FSREADCONF ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::buildLinkedLogPageList(Signal* signal)
+{
+ LogPageRecordPtr bllLogPagePtr;
+
+ arrGuard(lfoPtr.p->noPagesRw - 1, 16);
+ arrGuard(lfoPtr.p->noPagesRw, 16);
+ for (UintR tbllIndex = 0; tbllIndex < lfoPtr.p->noPagesRw; tbllIndex++) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * BUILD LINKED LIST BUT ALSO ENSURE THAT PAGE IS NOT SEEN AS DIRTY
+ * INITIALLY.
+ * --------------------------------------------------------------------- */
+ bllLogPagePtr.i = lfoPtr.p->logPageArray[tbllIndex];
+ ptrCheckGuard(bllLogPagePtr, clogPageFileSize, logPageRecord);
+
+// #if VM_TRACE
+// // Check logPage checksum before modifying it
+// Uint32 calcCheckSum = calcPageCheckSum(bllLogPagePtr);
+// Uint32 checkSum = bllLogPagePtr.p->logPageWord[ZPOS_CHECKSUM];
+// if (checkSum != calcCheckSum) {
+// ndbout << "Redolog: Checksum failure." << endl;
+// progError(__LINE__, ERR_NDBREQUIRE, "Redolog: Checksum failure.");
+// }
+// #endif
+
+ bllLogPagePtr.p->logPageWord[ZNEXT_PAGE] =
+ lfoPtr.p->logPageArray[tbllIndex + 1];
+ bllLogPagePtr.p->logPageWord[ZPOS_DIRTY] = ZNOT_DIRTY;
+ }//for
+ bllLogPagePtr.i = lfoPtr.p->logPageArray[lfoPtr.p->noPagesRw - 1];
+ ptrCheckGuard(bllLogPagePtr, clogPageFileSize, logPageRecord);
+ bllLogPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
+}//Dblqh::buildLinkedLogPageList()
+
+/* =========================================================================
+ * ======= CHANGE TO NEXT MBYTE IN LOG =======
+ *
+ * ========================================================================= */
+void Dblqh::changeMbyte(Signal* signal)
+{
+ writeNextLog(signal);
+ writeFileDescriptor(signal);
+}//Dblqh::changeMbyte()
+
+/* ========================================================================= */
+/* ====== CHECK IF THIS COMMIT LOG RECORD IS TO BE EXECUTED ======= */
+/* */
+/* SUBROUTINE SHORT NAME = CEL */
+/* ========================================================================= */
+Uint32 Dblqh::checkIfExecLog(Signal* signal)
+{
+ tabptr.i = tcConnectptr.p->tableref;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ if (getFragmentrec(signal, tcConnectptr.p->fragmentid) &&
+ (tabptr.p->schemaVersion == tcConnectptr.p->schemaVersion)) {
+ if (fragptr.p->execSrStatus != Fragrecord::IDLE) {
+ if (fragptr.p->execSrNoReplicas > logPartPtr.p->execSrExecuteIndex) {
+ ndbrequire((fragptr.p->execSrNoReplicas - 1) < 4);
+ for (Uint32 i = logPartPtr.p->execSrExecuteIndex;
+ i < fragptr.p->execSrNoReplicas;
+ i++) {
+ jam();
+ if (tcConnectptr.p->gci >= fragptr.p->execSrStartGci[i]) {
+ if (tcConnectptr.p->gci <= fragptr.p->execSrLastGci[i]) {
+ jam();
+ logPartPtr.p->execSrExecuteIndex = i;
+ return ZOK;
+ }//if
+ }//if
+ }//for
+ }//if
+ }//if
+ }//if
+ return ZNOT_OK;
+}//Dblqh::checkIfExecLog()
+
+/* ========================================================================= */
+/* == CHECK IF THERE IS LESS THAN 192 KBYTE IN THE BUFFER PLUS INCOMING === */
+/* READS ALREADY STARTED. IF SO IS THE CASE THEN START ANOTHER READ IF */
+/* THERE ARE MORE PAGES IN THIS MBYTE. */
+/* */
+/* ========================================================================= */
+void Dblqh::checkReadExecSr(Signal* signal)
+{
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG;
+ logPartPtr.p->execSrPagesRead = logPartPtr.p->execSrPagesRead + 8;
+ logPartPtr.p->execSrPagesReading = logPartPtr.p->execSrPagesReading - 8;
+ if ((logPartPtr.p->execSrPagesRead + logPartPtr.p->execSrPagesReading) <
+ ZREAD_AHEAD_SIZE) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * WE HAVE LESS THAN 64 KBYTE OF LOG PAGES REMAINING IN MEMORY OR ON
+ * ITS WAY TO MAIN MEMORY. READ IN 8 MORE PAGES.
+ * --------------------------------------------------------------------- */
+ if ((logPartPtr.p->execSrPagesRead + logPartPtr.p->execSrPagesExecuted) <
+ ZPAGES_IN_MBYTE) {
+ jam();
+ /* --------------------------------------------------------------------
+ * THERE ARE MORE PAGES TO READ IN THIS MBYTE. READ THOSE FIRST
+ * IF >= ZPAGES_IN_MBYTE THEN THERE ARE NO MORE PAGES TO READ. THUS
+ * WE PROCEED WITH EXECUTION OF THE LOG.
+ * ------------------------------------------------------------------- */
+ readExecSr(signal);
+ logPartPtr.p->logExecState = LogPartRecord::LES_WAIT_READ_EXEC_SR;
+ }//if
+ }//if
+}//Dblqh::checkReadExecSr()
+
+/* ========================================================================= */
+/* ==== CHECK IF START OF NEW FRAGMENT IS COMPLETED AND WE CAN ======= */
+/* ==== GET THE START GCI ======= */
+/* */
+/* SUBROUTINE SHORT NAME = CTC */
+/* ========================================================================= */
+void Dblqh::checkScanTcCompleted(Signal* signal)
+{
+ tcConnectptr.p->logWriteState = TcConnectionrec::NOT_STARTED;
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->activeTcCounter = fragptr.p->activeTcCounter - 1;
+ if (fragptr.p->activeTcCounter == 0) {
+ jam();
+ fragptr.p->startGci = cnewestGci + 1;
+ tabptr.i = tcConnectptr.p->tableref;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ sendCopyActiveConf(signal, tcConnectptr.p->tableref);
+ }//if
+}//Dblqh::checkScanTcCompleted()
+
+/* ==========================================================================
+ * === CHECK IF ALL PARTS OF A SYSTEM RESTART ON A FRAGMENT ARE COMPLETED ===
+ *
+ * SUBROUTINE SHORT NAME = CSC
+ * ========================================================================= */
+void Dblqh::checkSrCompleted(Signal* signal)
+{
+ LcpLocRecordPtr cscLcpLocptr;
+
+ terrorCode = ZOK;
+ ptrGuard(lcpPtr);
+ cscLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+CSC_ACC_DOWHILE:
+ ptrCheckGuard(cscLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (cscLcpLocptr.p->lcpLocstate != LcpLocRecord::SR_ACC_COMPLETED) {
+ jam();
+ if (cscLcpLocptr.p->lcpLocstate != LcpLocRecord::SR_ACC_STARTED) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+ return;
+ }//if
+ cscLcpLocptr.i = cscLcpLocptr.p->nextLcpLoc;
+ if (cscLcpLocptr.i != RNIL) {
+ jam();
+ goto CSC_ACC_DOWHILE;
+ }//if
+ cscLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
+CSC_TUP_DOWHILE:
+ ptrCheckGuard(cscLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (cscLcpLocptr.p->lcpLocstate != LcpLocRecord::SR_TUP_COMPLETED) {
+ jam();
+ if (cscLcpLocptr.p->lcpLocstate != LcpLocRecord::SR_TUP_STARTED) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+ return;
+ }//if
+ cscLcpLocptr.i = cscLcpLocptr.p->nextLcpLoc;
+ if (cscLcpLocptr.i != RNIL) {
+ jam();
+ goto CSC_TUP_DOWHILE;
+ }//if
+ lcpPtr.p->lcpState = LcpRecord::LCP_SR_COMPLETED;
+}//Dblqh::checkSrCompleted()
+
+/* ------------------------------------------------------------------------- */
+/* ------ CLOSE A FILE DURING EXECUTION OF FRAGMENT LOG ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::closeFile(Signal* signal, LogFileRecordPtr clfLogFilePtr)
+{
+ signal->theData[0] = clfLogFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = clfLogFilePtr.i;
+ signal->theData[3] = ZCLOSE_NO_DELETE;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+}//Dblqh::closeFile()
+
+
+/* ---------------------------------------------------------------- */
+/* ---------------- A LOG PAGE HAVE BEEN COMPLETED ---------------- */
+/* */
+/* SUBROUTINE SHORT NAME = CLP */
+// Input Pointers:
+// logFilePtr
+// logPagePtr
+// logPartPtr
+// Defines lfoPtr
+/* ---------------------------------------------------------------- */
+void Dblqh::completedLogPage(Signal* signal, Uint32 clpType)
+{
+ LogPageRecordPtr clpLogPagePtr;
+ LogPageRecordPtr wlpLogPagePtr;
+ UintR twlpNoPages;
+ UintR twlpType;
+
+ if (logFilePtr.p->firstFilledPage == RNIL) {
+ jam();
+ logFilePtr.p->firstFilledPage = logPagePtr.i;
+ } else {
+ jam();
+ clpLogPagePtr.i = logFilePtr.p->lastFilledPage;
+ ptrCheckGuard(clpLogPagePtr, clogPageFileSize, logPageRecord);
+ clpLogPagePtr.p->logPageWord[ZNEXT_PAGE] = logPagePtr.i;
+ }//if
+ logFilePtr.p->lastFilledPage = logPagePtr.i;
+ logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
+ logFilePtr.p->noLogpagesInBuffer = logFilePtr.p->noLogpagesInBuffer + 1;
+ if (logFilePtr.p->noLogpagesInBuffer != ZMAX_PAGES_WRITTEN) {
+ if (clpType != ZLAST_WRITE_IN_FILE) {
+ if (clpType != ZENFORCE_WRITE) {
+ jam();
+ return;
+ }//if
+ }//if
+ }//if
+ twlpType = clpType;
+/* ------------------------------------------------------------------------- */
+/* ------ WRITE A SET OF LOG PAGES TO DISK ------- */
+/* */
+/* SUBROUTINE SHORT NAME: WLP */
+/* ------------------------------------------------------------------------- */
+ seizeLfo(signal);
+ initLfo(signal);
+ Uint32* dataPtr = &signal->theData[6];
+ twlpNoPages = 0;
+ wlpLogPagePtr.i = logFilePtr.p->firstFilledPage;
+ do {
+ dataPtr[twlpNoPages] = wlpLogPagePtr.i;
+ twlpNoPages++;
+ ptrCheckGuard(wlpLogPagePtr, clogPageFileSize, logPageRecord);
+
+ // Calculate checksum for page
+ wlpLogPagePtr.p->logPageWord[ZPOS_CHECKSUM] = calcPageCheckSum(wlpLogPagePtr);
+ wlpLogPagePtr.i = wlpLogPagePtr.p->logPageWord[ZNEXT_PAGE];
+ } while (wlpLogPagePtr.i != RNIL);
+ ndbrequire(twlpNoPages < 9);
+ dataPtr[twlpNoPages] = logFilePtr.p->filePosition;
+/* -------------------------------------------------- */
+/* SET TIMER ON THIS LOG PART TO SIGNIFY THAT A */
+/* LOG RECORD HAS BEEN SENT AT THIS TIME. */
+/* -------------------------------------------------- */
+ logPartPtr.p->logPartTimer = logPartPtr.p->logTimer;
+ signal->theData[0] = logFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lfoPtr.i;
+ logFilePtr.p->logFilePagesToDiskWithoutSynch += twlpNoPages;
+ if (twlpType == ZLAST_WRITE_IN_FILE) {
+ jam();
+ logFilePtr.p->logFilePagesToDiskWithoutSynch = 0;
+ signal->theData[3] = ZLIST_OF_MEM_PAGES_SYNCH;
+ } else if (logFilePtr.p->logFilePagesToDiskWithoutSynch >
+ MAX_REDO_PAGES_WITHOUT_SYNCH) {
+ jam();
+ logFilePtr.p->logFilePagesToDiskWithoutSynch = 0;
+ signal->theData[3] = ZLIST_OF_MEM_PAGES_SYNCH;
+ } else {
+ jam();
+ signal->theData[3] = ZLIST_OF_MEM_PAGES;
+ }//if
+ signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
+ signal->theData[5] = twlpNoPages;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 15, JBA);
+ if (twlpType == ZNORMAL) {
+ jam();
+ lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG;
+ } else if (twlpType == ZLAST_WRITE_IN_FILE) {
+ jam();
+ lfoPtr.p->lfoState = LogFileOperationRecord::LAST_WRITE_IN_FILE;
+ } else {
+ ndbrequire(twlpType == ZENFORCE_WRITE);
+ jam();
+ lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG;
+ }//if
+ /* ----------------------------------------------------------------------- */
+ /* ------ MOVE PAGES FROM LOG FILE TO LFO RECORD ------- */
+ /* */
+ /* ----------------------------------------------------------------------- */
+ /* -------------------------------------------------- */
+ /* MOVE PAGES TO LFO RECORD AND REMOVE THEM */
+ /* FROM LOG FILE RECORD. */
+ /* -------------------------------------------------- */
+ lfoPtr.p->firstLfoPage = logFilePtr.p->firstFilledPage;
+ logFilePtr.p->firstFilledPage = RNIL;
+ logFilePtr.p->lastFilledPage = RNIL;
+ logFilePtr.p->noLogpagesInBuffer = 0;
+
+ lfoPtr.p->noPagesRw = twlpNoPages;
+ lfoPtr.p->lfoPageNo = logFilePtr.p->filePosition;
+ lfoPtr.p->lfoWordWritten = ZPAGE_SIZE - 1;
+ logFilePtr.p->filePosition += twlpNoPages;
+}//Dblqh::completedLogPage()
+
+/* ---------------------------------------------------------------- */
+/* ---------------- DELETE FRAGMENT RECORD ------------------------ */
+/* */
+/* SUBROUTINE SHORT NAME = DFR */
+/* ---------------------------------------------------------------- */
+void Dblqh::deleteFragrec(Uint32 fragId)
+{
+ Uint32 indexFound= RNIL;
+ fragptr.i = RNIL;
+ for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
+ jam();
+ if (tabptr.p->fragid[i] == fragId) {
+ fragptr.i = tabptr.p->fragrec[i];
+ indexFound = i;
+ break;
+ }//if
+ }//for
+ if (fragptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ tabptr.p->fragid[indexFound] = ZNIL;
+ tabptr.p->fragrec[indexFound] = RNIL;
+ releaseFragrec();
+ }//if
+}//Dblqh::deleteFragrec()
+
+/* ------------------------------------------------------------------------- */
+/* ------- FIND LOG FILE RECORD GIVEN FILE NUMBER ------- */
+/* */
+/* INPUT: TFLF_FILE_NO THE FILE NUMBER */
+/* FLF_LOG_PART_PTR THE LOG PART RECORD */
+/* OUTPUT: FLF_LOG_FILE_PTR THE FOUND LOG FILE RECORD */
+/* SUBROUTINE SHORT NAME = FLF */
+/* ------------------------------------------------------------------------- */
+void Dblqh::findLogfile(Signal* signal,
+ Uint32 fileNo,
+ LogPartRecordPtr flfLogPartPtr,
+ LogFileRecordPtr* parLogFilePtr)
+{
+ LogFileRecordPtr locLogFilePtr;
+ locLogFilePtr.i = flfLogPartPtr.p->firstLogfile;
+ Uint32 loopCount = 0;
+ while (true) {
+ ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
+ if (locLogFilePtr.p->fileNo == fileNo) {
+ jam();
+ ndbrequire(loopCount == fileNo);
+ parLogFilePtr->i = locLogFilePtr.i;
+ parLogFilePtr->p = locLogFilePtr.p;
+ return;
+ }//if
+ locLogFilePtr.i = locLogFilePtr.p->nextLogFile;
+ loopCount++;
+ ndbrequire(loopCount < flfLogPartPtr.p->noLogFiles);
+ }//while
+}//Dblqh::findLogfile()
+
+/* ------------------------------------------------------------------------- */
+/* ------ FIND PAGE REFERENCE IN MEMORY BUFFER AT LOG EXECUTION ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::findPageRef(Signal* signal, CommitLogRecord* commitLogRecord)
+{
+ UintR tfprIndex;
+
+ logPagePtr.i = RNIL;
+ if (ERROR_INSERTED(5020)) {
+ // Force system to read page from disk
+ return;
+ }
+ pageRefPtr.i = logPartPtr.p->lastPageRef;
+ do {
+ ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
+ if (commitLogRecord->fileNo == pageRefPtr.p->prFileNo) {
+ if (commitLogRecord->startPageNo >= pageRefPtr.p->prPageNo) {
+ if (commitLogRecord->startPageNo < (Uint16) (pageRefPtr.p->prPageNo + 8)) {
+ jam();
+ tfprIndex = commitLogRecord->startPageNo - pageRefPtr.p->prPageNo;
+ logPagePtr.i = pageRefPtr.p->pageRef[tfprIndex];
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ return;
+ }//if
+ }//if
+ }//if
+ pageRefPtr.i = pageRefPtr.p->prPrev;
+ } while (pageRefPtr.i != RNIL);
+}//Dblqh::findPageRef()
+
+/* ------------------------------------------------------------------------- */
+/* ------ GET FIRST OPERATION QUEUED FOR LOGGING ------- */
+/* */
+/* SUBROUTINE SHORT NAME = GFL */
+/* ------------------------------------------------------------------------- */
+void Dblqh::getFirstInLogQueue(Signal* signal)
+{
+ TcConnectionrecPtr gflTcConnectptr;
+/* -------------------------------------------------- */
+/* GET THE FIRST FROM THE LOG QUEUE AND REMOVE */
+/* IT FROM THE QUEUE. */
+/* -------------------------------------------------- */
+ gflTcConnectptr.i = logPartPtr.p->firstLogQueue;
+ ptrCheckGuard(gflTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ logPartPtr.p->firstLogQueue = gflTcConnectptr.p->nextTcLogQueue;
+ if (logPartPtr.p->firstLogQueue == RNIL) {
+ jam();
+ logPartPtr.p->lastLogQueue = RNIL;
+ }//if
+}//Dblqh::getFirstInLogQueue()
+
+/* ---------------------------------------------------------------- */
+/* ---------------- GET FRAGMENT RECORD --------------------------- */
+/* INPUT: TFRAGID FRAGMENT ID LOOKING FOR */
+/* TABPTR TABLE ID */
+/* SUBROUTINE SHORT NAME = GFR */
+/* ---------------------------------------------------------------- */
+bool Dblqh::getFragmentrec(Signal* signal, Uint32 fragId)
+{
+ for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (UintR)~i; i--) {
+ jam();
+ if (tabptr.p->fragid[i] == fragId) {
+ fragptr.i = tabptr.p->fragrec[i];
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ return true;
+ }//if
+ }//for
+ return false;
+}//Dblqh::getFragmentrec()
+
+/* ========================================================================= */
+/* ====== INITIATE FRAGMENT RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseAddfragrec(Signal* signal)
+{
+ if (caddfragrecFileSize != 0) {
+ for (addfragptr.i = 0; addfragptr.i < caddfragrecFileSize; addfragptr.i++) {
+ ptrAss(addfragptr, addFragRecord);
+ addfragptr.p->addfragStatus = AddFragRecord::FREE;
+ addfragptr.p->nextAddfragrec = addfragptr.i + 1;
+ }//for
+ addfragptr.i = caddfragrecFileSize - 1;
+ ptrAss(addfragptr, addFragRecord);
+ addfragptr.p->nextAddfragrec = RNIL;
+ cfirstfreeAddfragrec = 0;
+ } else {
+ jam();
+ cfirstfreeAddfragrec = RNIL;
+ }//if
+}//Dblqh::initialiseAddfragrec()
+
+/* ========================================================================= */
+/* ====== INITIATE ATTRIBUTE IN AND OUT DATA BUFFER ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseAttrbuf(Signal* signal)
+{
+ if (cattrinbufFileSize != 0) {
+ for (attrinbufptr.i = 0;
+ attrinbufptr.i < cattrinbufFileSize;
+ attrinbufptr.i++) {
+ refresh_watch_dog();
+ ptrAss(attrinbufptr, attrbuf);
+ attrinbufptr.p->attrbuf[ZINBUF_NEXT] = attrinbufptr.i + 1;
+ }//for
+ /* NEXT ATTRINBUF */
+ attrinbufptr.i = cattrinbufFileSize - 1;
+ ptrAss(attrinbufptr, attrbuf);
+ attrinbufptr.p->attrbuf[ZINBUF_NEXT] = RNIL; /* NEXT ATTRINBUF */
+ cfirstfreeAttrinbuf = 0;
+ } else {
+ jam();
+ cfirstfreeAttrinbuf = RNIL;
+ }//if
+}//Dblqh::initialiseAttrbuf()
+
+/* ========================================================================= */
+/* ====== INITIATE DATA BUFFER ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseDatabuf(Signal* signal)
+{
+ if (cdatabufFileSize != 0) {
+ for (databufptr.i = 0; databufptr.i < cdatabufFileSize; databufptr.i++) {
+ refresh_watch_dog();
+ ptrAss(databufptr, databuf);
+ databufptr.p->nextDatabuf = databufptr.i + 1;
+ }//for
+ databufptr.i = cdatabufFileSize - 1;
+ ptrAss(databufptr, databuf);
+ databufptr.p->nextDatabuf = RNIL;
+ cfirstfreeDatabuf = 0;
+ } else {
+ jam();
+ cfirstfreeDatabuf = RNIL;
+ }//if
+}//Dblqh::initialiseDatabuf()
+
+/* ========================================================================= */
+/* ====== INITIATE FRAGMENT RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseFragrec(Signal* signal)
+{
+ if (cfragrecFileSize != 0) {
+ for (fragptr.i = 0; fragptr.i < cfragrecFileSize; fragptr.i++) {
+ refresh_watch_dog();
+ ptrAss(fragptr, fragrecord);
+ fragptr.p->fragStatus = Fragrecord::FREE;
+ fragptr.p->fragActiveStatus = ZFALSE;
+ fragptr.p->execSrStatus = Fragrecord::IDLE;
+ fragptr.p->srStatus = Fragrecord::SS_IDLE;
+ fragptr.p->nextFrag = fragptr.i + 1;
+ }//for
+ fragptr.i = cfragrecFileSize - 1;
+ ptrAss(fragptr, fragrecord);
+ fragptr.p->nextFrag = RNIL;
+ cfirstfreeFragrec = 0;
+ } else {
+ jam();
+ cfirstfreeFragrec = RNIL;
+ }//if
+}//Dblqh::initialiseFragrec()
+
+/* ========================================================================= */
+/* ====== INITIATE FRAGMENT RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseGcprec(Signal* signal)
+{
+ UintR tigpIndex;
+
+ if (cgcprecFileSize != 0) {
+ for (gcpPtr.i = 0; gcpPtr.i < cgcprecFileSize; gcpPtr.i++) {
+ ptrAss(gcpPtr, gcpRecord);
+ for (tigpIndex = 0; tigpIndex <= 3; tigpIndex++) {
+ gcpPtr.p->gcpLogPartState[tigpIndex] = ZIDLE;
+ gcpPtr.p->gcpSyncReady[tigpIndex] = ZFALSE;
+ }//for
+ }//for
+ }//if
+}//Dblqh::initialiseGcprec()
+
+/* ========================================================================= */
+/* ====== INITIATE LCP RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseLcpRec(Signal* signal)
+{
+ if (clcpFileSize != 0) {
+ for (lcpPtr.i = 0; lcpPtr.i < clcpFileSize; lcpPtr.i++) {
+ ptrAss(lcpPtr, lcpRecord);
+ lcpPtr.p->lcpState = LcpRecord::LCP_IDLE;
+ lcpPtr.p->lcpQueued = false;
+ lcpPtr.p->firstLcpLocAcc = RNIL;
+ lcpPtr.p->firstLcpLocTup = RNIL;
+ lcpPtr.p->reportEmpty = false;
+ lcpPtr.p->lastFragmentFlag = false;
+ }//for
+ }//if
+}//Dblqh::initialiseLcpRec()
+
+/* ========================================================================= */
+/* ====== INITIATE LCP LOCAL RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseLcpLocrec(Signal* signal)
+{
+ if (clcpLocrecFileSize != 0) {
+ for (lcpLocptr.i = 0; lcpLocptr.i < clcpLocrecFileSize; lcpLocptr.i++) {
+ ptrAss(lcpLocptr, lcpLocRecord);
+ lcpLocptr.p->nextLcpLoc = lcpLocptr.i + 1;
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::IDLE;
+ lcpLocptr.p->masterLcpRec = RNIL;
+ lcpLocptr.p->waitingBlock = LcpLocRecord::NONE;
+ }//for
+ lcpLocptr.i = clcpLocrecFileSize - 1;
+ ptrAss(lcpLocptr, lcpLocRecord);
+ lcpLocptr.p->nextLcpLoc = RNIL;
+ cfirstfreeLcpLoc = 0;
+ } else {
+ jam();
+ cfirstfreeLcpLoc = RNIL;
+ }//if
+}//Dblqh::initialiseLcpLocrec()
+
+/* ========================================================================= */
+/* ====== INITIATE LOG FILE OPERATION RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseLfo(Signal* signal)
+{
+ if (clfoFileSize != 0) {
+ for (lfoPtr.i = 0; lfoPtr.i < clfoFileSize; lfoPtr.i++) {
+ ptrAss(lfoPtr, logFileOperationRecord);
+ lfoPtr.p->lfoState = LogFileOperationRecord::IDLE;
+ lfoPtr.p->lfoTimer = 0;
+ lfoPtr.p->nextLfo = lfoPtr.i + 1;
+ }//for
+ lfoPtr.i = clfoFileSize - 1;
+ ptrAss(lfoPtr, logFileOperationRecord);
+ lfoPtr.p->nextLfo = RNIL;
+ cfirstfreeLfo = 0;
+ } else {
+ jam();
+ cfirstfreeLfo = RNIL;
+ }//if
+}//Dblqh::initialiseLfo()
+
+/* ========================================================================= */
+/* ====== INITIATE LOG FILE RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseLogFile(Signal* signal)
+{
+ if (clogFileFileSize != 0) {
+ for (logFilePtr.i = 0; logFilePtr.i < clogFileFileSize; logFilePtr.i++) {
+ ptrAss(logFilePtr, logFileRecord);
+ logFilePtr.p->nextLogFile = logFilePtr.i + 1;
+ logFilePtr.p->logFileStatus = LogFileRecord::LFS_IDLE;
+ }//for
+ logFilePtr.i = clogFileFileSize - 1;
+ ptrAss(logFilePtr, logFileRecord);
+ logFilePtr.p->nextLogFile = RNIL;
+ cfirstfreeLogFile = 0;
+ } else {
+ jam();
+ cfirstfreeLogFile = RNIL;
+ }//if
+}//Dblqh::initialiseLogFile()
+
+/* ========================================================================= */
+/* ====== INITIATE LOG PAGES ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseLogPage(Signal* signal)
+{
+ if (clogPageFileSize != 0) {
+ for (logPagePtr.i = 0; logPagePtr.i < clogPageFileSize; logPagePtr.i++) {
+ refresh_watch_dog();
+ ptrAss(logPagePtr, logPageRecord);
+ logPagePtr.p->logPageWord[ZNEXT_PAGE] = logPagePtr.i + 1;
+ }//for
+ logPagePtr.i = clogPageFileSize - 1;
+ ptrAss(logPagePtr, logPageRecord);
+ logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
+ cfirstfreeLogPage = 0;
+ } else {
+ jam();
+ cfirstfreeLogPage = RNIL;
+ }//if
+ cnoOfLogPages = clogPageFileSize;
+}//Dblqh::initialiseLogPage()
+
+/* =========================================================================
+ * ====== INITIATE LOG PART RECORD =======
+ *
+ * ========================================================================= */
+void Dblqh::initialiseLogPart(Signal* signal)
+{
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ ptrAss(logPartPtr, logPartRecord);
+ logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_FALSE;
+ logPartPtr.p->LogLqhKeyReqSent = ZFALSE;
+ logPartPtr.p->logPartNewestCompletedGCI = (UintR)-1;
+ }//for
+}//Dblqh::initialiseLogPart()
+
+void Dblqh::initialisePageRef(Signal* signal)
+{
+ if (cpageRefFileSize != 0) {
+ for (pageRefPtr.i = 0;
+ pageRefPtr.i < cpageRefFileSize;
+ pageRefPtr.i++) {
+ ptrAss(pageRefPtr, pageRefRecord);
+ pageRefPtr.p->prNext = pageRefPtr.i + 1;
+ }//for
+ pageRefPtr.i = cpageRefFileSize - 1;
+ ptrAss(pageRefPtr, pageRefRecord);
+ pageRefPtr.p->prNext = RNIL;
+ cfirstfreePageRef = 0;
+ } else {
+ jam();
+ cfirstfreePageRef = RNIL;
+ }//if
+}//Dblqh::initialisePageRef()
+
+/* ==========================================================================
+ * ======= INITIATE RECORDS =======
+ *
+ * TAKES CARE OF INITIATION OF ALL RECORDS IN THIS BLOCK.
+ * ========================================================================= */
+void Dblqh::initialiseRecordsLab(Signal* signal, Uint32 data,
+ Uint32 retRef, Uint32 retData)
+{
+ Uint32 i;
+ switch (data) {
+ case 0:
+ jam();
+ for (i = 0; i < MAX_NDB_NODES; i++) {
+ cnodeSrState[i] = ZSTART_SR;
+ cnodeExecSrState[i] = ZSTART_SR;
+ }//for
+ for (i = 0; i < 1024; i++) {
+ ctransidHash[i] = RNIL;
+ }//for
+ for (i = 0; i < 4; i++) {
+ cactiveCopy[i] = RNIL;
+ }//for
+ cnoActiveCopy = 0;
+ cCounterAccCommitBlocked = 0;
+ cCounterTupCommitBlocked = 0;
+ caccCommitBlocked = false;
+ ctupCommitBlocked = false;
+ cCommitBlocked = false;
+ ccurrentGcprec = RNIL;
+ caddNodeState = ZFALSE;
+ cstartRecReq = ZFALSE;
+ cnewestGci = (UintR)-1;
+ cnewestCompletedGci = (UintR)-1;
+ crestartOldestGci = 0;
+ crestartNewestGci = 0;
+ cfirstWaitFragSr = RNIL;
+ cfirstCompletedFragSr = RNIL;
+ csrPhaseStarted = ZSR_NO_PHASE_STARTED;
+ csrPhasesCompleted = 0;
+ cmasterDihBlockref = 0;
+ cnoFragmentsExecSr = 0;
+ clcpCompletedState = LCP_IDLE;
+ csrExecUndoLogState = EULS_IDLE;
+ c_lcpId = 0;
+ cnoOfFragsCheckpointed = 0;
+ break;
+ case 1:
+ jam();
+ initialiseAddfragrec(signal);
+ break;
+ case 2:
+ jam();
+ initialiseAttrbuf(signal);
+ break;
+ case 3:
+ jam();
+ initialiseDatabuf(signal);
+ break;
+ case 4:
+ jam();
+ initialiseFragrec(signal);
+ break;
+ case 5:
+ jam();
+ initialiseGcprec(signal);
+ initialiseLcpRec(signal);
+ initialiseLcpLocrec(signal);
+ break;
+ case 6:
+ jam();
+ initialiseLogPage(signal);
+ break;
+ case 7:
+ jam();
+ initialiseLfo(signal);
+ break;
+ case 8:
+ jam();
+ initialiseLogFile(signal);
+ initialiseLogPart(signal);
+ break;
+ case 9:
+ jam();
+ initialisePageRef(signal);
+ break;
+ case 10:
+ jam();
+ initialiseScanrec(signal);
+ break;
+ case 11:
+ jam();
+ initialiseTabrec(signal);
+ break;
+ case 12:
+ jam();
+ initialiseTcNodeFailRec(signal);
+ initialiseTcrec(signal);
+ {
+ ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = retData;
+ sendSignal(retRef, GSN_READ_CONFIG_CONF, signal,
+ ReadConfigConf::SignalLength, JBB);
+ }
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+
+ signal->theData[0] = ZINITIALISE_RECORDS;
+ signal->theData[1] = data + 1;
+ signal->theData[2] = 0;
+ signal->theData[3] = retRef;
+ signal->theData[4] = retData;
+ sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 5, JBB);
+
+ return;
+}//Dblqh::initialiseRecordsLab()
+
+/* ==========================================================================
+ * ======= INITIATE TC CONNECTION RECORD =======
+ *
+ * ========================================================================= */
+void Dblqh::initialiseScanrec(Signal* signal)
+{
+ ndbrequire(cscanrecFileSize > 1);
+ DLList<ScanRecord> tmp(c_scanRecordPool);
+ while (tmp.seize(scanptr)){
+ //new (scanptr.p) ScanRecord();
+ refresh_watch_dog();
+ scanptr.p->scanType = ScanRecord::ST_IDLE;
+ scanptr.p->scanState = ScanRecord::SCAN_FREE;
+ scanptr.p->scanTcWaiting = ZFALSE;
+ scanptr.p->nextHash = RNIL;
+ scanptr.p->prevHash = RNIL;
+ scanptr.p->scan_acc_index= 0;
+ scanptr.p->scan_acc_attr_recs= 0;
+ }
+ tmp.release();
+}//Dblqh::initialiseScanrec()
+
+/* ==========================================================================
+ * ======= INITIATE TABLE RECORD =======
+ *
+ * ========================================================================= */
+void Dblqh::initialiseTabrec(Signal* signal)
+{
+ if (ctabrecFileSize != 0) {
+ for (tabptr.i = 0; tabptr.i < ctabrecFileSize; tabptr.i++) {
+ refresh_watch_dog();
+ ptrAss(tabptr, tablerec);
+ tabptr.p->tableStatus = Tablerec::NOT_DEFINED;
+ tabptr.p->usageCount = 0;
+ for (Uint32 i = 0; i <= (MAX_FRAG_PER_NODE - 1); i++) {
+ tabptr.p->fragid[i] = ZNIL;
+ tabptr.p->fragrec[i] = RNIL;
+ }//for
+ }//for
+ }//if
+}//Dblqh::initialiseTabrec()
+
+/* ==========================================================================
+ * ======= INITIATE TC CONNECTION RECORD =======
+ *
+ * ========================================================================= */
+void Dblqh::initialiseTcrec(Signal* signal)
+{
+ if (ctcConnectrecFileSize != 0) {
+ for (tcConnectptr.i = 0;
+ tcConnectptr.i < ctcConnectrecFileSize;
+ tcConnectptr.i++) {
+ refresh_watch_dog();
+ ptrAss(tcConnectptr, tcConnectionrec);
+ tcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED;
+ tcConnectptr.p->tcScanRec = RNIL;
+ tcConnectptr.p->logWriteState = TcConnectionrec::NOT_STARTED;
+ tcConnectptr.p->firstAttrinbuf = RNIL;
+ tcConnectptr.p->lastAttrinbuf = RNIL;
+ tcConnectptr.p->firstTupkeybuf = RNIL;
+ tcConnectptr.p->lastTupkeybuf = RNIL;
+ tcConnectptr.p->tcTimer = 0;
+ tcConnectptr.p->nextTcConnectrec = tcConnectptr.i + 1;
+ }//for
+ tcConnectptr.i = ctcConnectrecFileSize - 1;
+ ptrAss(tcConnectptr, tcConnectionrec);
+ tcConnectptr.p->nextTcConnectrec = RNIL;
+ cfirstfreeTcConrec = 0;
+ } else {
+ jam();
+ cfirstfreeTcConrec = RNIL;
+ }//if
+}//Dblqh::initialiseTcrec()
+
+/* ==========================================================================
+ * ======= INITIATE TC CONNECTION RECORD =======
+ *
+ * ========================================================================= */
+void Dblqh::initialiseTcNodeFailRec(Signal* signal)
+{
+ if (ctcNodeFailrecFileSize != 0) {
+ for (tcNodeFailptr.i = 0;
+ tcNodeFailptr.i < ctcNodeFailrecFileSize;
+ tcNodeFailptr.i++) {
+ ptrAss(tcNodeFailptr, tcNodeFailRecord);
+ tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_FALSE;
+ }//for
+ }//if
+}//Dblqh::initialiseTcNodeFailRec()
+
+/* ==========================================================================
+ * ======= INITIATE FRAGMENT RECORD =======
+ *
+ * SUBROUTINE SHORT NAME = IF
+ * ========================================================================= */
+void Dblqh::initFragrec(Signal* signal,
+ Uint32 tableId,
+ Uint32 fragId,
+ Uint32 copyType)
+{
+ new (fragptr.p) Fragrecord();
+ fragptr.p->m_scanNumberMask.set(); // All is free
+ fragptr.p->accBlockref = caccBlockref;
+ fragptr.p->accBlockedList = RNIL;
+ fragptr.p->activeList = RNIL;
+ fragptr.p->firstWaitQueue = RNIL;
+ fragptr.p->lastWaitQueue = RNIL;
+ fragptr.p->fragStatus = Fragrecord::DEFINED;
+ fragptr.p->fragCopy = copyType;
+ fragptr.p->tupBlockref = ctupBlockref;
+ fragptr.p->tuxBlockref = ctuxBlockref;
+ fragptr.p->lcpRef = RNIL;
+ fragptr.p->logFlag = Fragrecord::STATE_TRUE;
+ fragptr.p->lcpFlag = Fragrecord::LCP_STATE_TRUE;
+ for (Uint32 i = 0; i < MAX_LCP_STORED; i++) {
+ fragptr.p->lcpId[i] = 0;
+ }//for
+ fragptr.p->maxGciCompletedInLcp = 0;
+ fragptr.p->maxGciInLcp = 0;
+ fragptr.p->copyFragState = ZIDLE;
+ fragptr.p->nextFrag = RNIL;
+ fragptr.p->newestGci = cnewestGci;
+ fragptr.p->nextLcp = 0;
+ fragptr.p->tabRef = tableId;
+ fragptr.p->fragId = fragId;
+ fragptr.p->srStatus = Fragrecord::SS_IDLE;
+ fragptr.p->execSrStatus = Fragrecord::IDLE;
+ fragptr.p->execSrNoReplicas = 0;
+ fragptr.p->fragDistributionKey = 0;
+ fragptr.p->activeTcCounter = 0;
+ fragptr.p->tableFragptr = RNIL;
+}//Dblqh::initFragrec()
+
+/* ==========================================================================
+ * ======= INITIATE FRAGMENT RECORD FOR SYSTEM RESTART =======
+ *
+ * SUBROUTINE SHORT NAME = IFS
+ * ========================================================================= */
+void Dblqh::initFragrecSr(Signal* signal)
+{
+ const StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0];
+ Uint32 lcpNo = startFragReq->lcpNo;
+ Uint32 noOfLogNodes = startFragReq->noOfLogNodes;
+ ndbrequire(noOfLogNodes <= 4);
+ fragptr.p->fragStatus = Fragrecord::CRASH_RECOVERING;
+ fragptr.p->srBlockref = startFragReq->userRef;
+ fragptr.p->srUserptr = startFragReq->userPtr;
+ fragptr.p->srChkpnr = lcpNo;
+ if (lcpNo == (MAX_LCP_STORED - 1)) {
+ jam();
+ fragptr.p->lcpId[lcpNo] = startFragReq->lcpId;
+ fragptr.p->nextLcp = 0;
+ } else if (lcpNo < (MAX_LCP_STORED - 1)) {
+ jam();
+ fragptr.p->lcpId[lcpNo] = startFragReq->lcpId;
+ fragptr.p->nextLcp = lcpNo + 1;
+ } else {
+ ndbrequire(lcpNo == ZNIL);
+ jam();
+ fragptr.p->nextLcp = 0;
+ }//if
+ fragptr.p->srNoLognodes = noOfLogNodes;
+ fragptr.p->logFlag = Fragrecord::STATE_FALSE;
+ fragptr.p->srStatus = Fragrecord::SS_IDLE;
+ if (noOfLogNodes > 0) {
+ jam();
+ for (Uint32 i = 0; i < noOfLogNodes; i++) {
+ jam();
+ fragptr.p->srStartGci[i] = startFragReq->startGci[i];
+ fragptr.p->srLastGci[i] = startFragReq->lastGci[i];
+ fragptr.p->srLqhLognode[i] = startFragReq->lqhLogNode[i];
+ }//for
+ fragptr.p->newestGci = startFragReq->lastGci[noOfLogNodes - 1];
+ } else {
+ fragptr.p->newestGci = cnewestGci;
+ }//if
+}//Dblqh::initFragrecSr()
+
+/* ==========================================================================
+ * ======= INITIATE INFORMATION ABOUT GLOBAL CHECKPOINTS =======
+ * IN LOG FILE RECORDS
+ *
+ * INPUT: LOG_FILE_PTR CURRENT LOG FILE
+ * TNO_FD_DESCRIPTORS THE NUMBER OF FILE DESCRIPTORS
+ * TO READ FROM THE LOG PAGE
+ * LOG_PAGE_PTR PAGE ZERO IN LOG FILE
+ * SUBROUTINE SHORT NAME = IGL
+ * ========================================================================= */
+void Dblqh::initGciInLogFileRec(Signal* signal, Uint32 noFdDescriptors)
+{
+ LogFileRecordPtr iglLogFilePtr;
+ UintR tiglLoop;
+ UintR tiglIndex;
+
+ tiglLoop = 0;
+ iglLogFilePtr.i = logFilePtr.i;
+ iglLogFilePtr.p = logFilePtr.p;
+IGL_LOOP:
+ for (tiglIndex = 0; tiglIndex <= ZNO_MBYTES_IN_FILE - 1; tiglIndex++) {
+ arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE);
+ iglLogFilePtr.p->logMaxGciCompleted[tiglIndex] =
+ logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
+ arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + ZNO_MBYTES_IN_FILE) +
+ (tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE);
+ iglLogFilePtr.p->logMaxGciStarted[tiglIndex] =
+ logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ ZNO_MBYTES_IN_FILE) +
+ (tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
+ arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (2 * ZNO_MBYTES_IN_FILE)) + (tiglLoop * ZFD_PART_SIZE)) +
+ tiglIndex, ZPAGE_SIZE);
+ iglLogFilePtr.p->logLastPrepRef[tiglIndex] =
+ logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (2 * ZNO_MBYTES_IN_FILE)) +
+ (tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
+ }//for
+ tiglLoop = tiglLoop + 1;
+ if (tiglLoop < noFdDescriptors) {
+ jam();
+ iglLogFilePtr.i = iglLogFilePtr.p->prevLogFile;
+ ptrCheckGuard(iglLogFilePtr, clogFileFileSize, logFileRecord);
+ goto IGL_LOOP;
+ }//if
+}//Dblqh::initGciInLogFileRec()
+
+/* ==========================================================================
+ * ======= INITIATE LCP RECORD WHEN USED FOR SYSTEM RESTART =======
+ *
+ * SUBROUTINE SHORT NAME = ILS
+ * ========================================================================= */
+void Dblqh::initLcpSr(Signal* signal,
+ Uint32 lcpNo,
+ Uint32 lcpId,
+ Uint32 tableId,
+ Uint32 fragId,
+ Uint32 fragPtr)
+{
+ lcpPtr.p->lcpQueued = false;
+ lcpPtr.p->currentFragment.fragPtrI = fragPtr;
+ lcpPtr.p->currentFragment.lcpFragOrd.lcpNo = lcpNo;
+ lcpPtr.p->currentFragment.lcpFragOrd.lcpId = lcpId;
+ lcpPtr.p->currentFragment.lcpFragOrd.tableId = tableId;
+ lcpPtr.p->currentFragment.lcpFragOrd.fragmentId = fragId;
+ lcpPtr.p->lcpState = LcpRecord::LCP_SR_WAIT_FRAGID;
+ lcpPtr.p->firstLcpLocAcc = RNIL;
+ lcpPtr.p->firstLcpLocTup = RNIL;
+ lcpPtr.p->lcpAccptr = RNIL;
+}//Dblqh::initLcpSr()
+
+/* ==========================================================================
+ * ======= INITIATE LOG PART =======
+ *
+ * ========================================================================= */
+void Dblqh::initLogpart(Signal* signal)
+{
+ logPartPtr.p->execSrLogPage = RNIL;
+ logPartPtr.p->execSrLogPageIndex = ZNIL;
+ logPartPtr.p->execSrExecuteIndex = 0;
+ logPartPtr.p->noLogFiles = cnoLogFiles;
+ logPartPtr.p->logLap = 0;
+ logPartPtr.p->logTailFileNo = 0;
+ logPartPtr.p->logTailMbyte = 0;
+ logPartPtr.p->lastMbyte = ZNIL;
+ logPartPtr.p->logPartState = LogPartRecord::SR_FIRST_PHASE;
+ logPartPtr.p->logExecState = LogPartRecord::LES_IDLE;
+ logPartPtr.p->firstLogTcrec = RNIL;
+ logPartPtr.p->lastLogTcrec = RNIL;
+ logPartPtr.p->firstLogQueue = RNIL;
+ logPartPtr.p->lastLogQueue = RNIL;
+ logPartPtr.p->gcprec = RNIL;
+ logPartPtr.p->firstPageRef = RNIL;
+ logPartPtr.p->lastPageRef = RNIL;
+ logPartPtr.p->headFileNo = ZNIL;
+ logPartPtr.p->headPageNo = ZNIL;
+ logPartPtr.p->headPageIndex = ZNIL;
+}//Dblqh::initLogpart()
+
+/* ==========================================================================
+ * ======= INITIATE LOG POINTERS =======
+ *
+ * ========================================================================= */
+void Dblqh::initLogPointers(Signal* signal)
+{
+ logPartPtr.i = tcConnectptr.p->hashValue & 3;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+}//Dblqh::initLogPointers()
+
+/* ------------------------------------------------------------------------- */
+/* ------- INIT REQUEST INFO BEFORE EXECUTING A LOG RECORD ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::initReqinfoExecSr(Signal* signal)
+{
+ UintR Treqinfo = 0;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ LqhKeyReq::setKeyLen(Treqinfo, regTcPtr->primKeyLen);
+/* ------------------------------------------------------------------------- */
+/* NUMBER OF BACKUPS AND STANDBYS ARE ZERO AND NEED NOT BE SET. */
+/* REPLICA TYPE IS CLEARED BY SEND_LQHKEYREQ. */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* SET LAST REPLICA NUMBER TO ZERO (BIT 10-11) */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* SET DIRTY FLAG */
+/* ------------------------------------------------------------------------- */
+ LqhKeyReq::setDirtyFlag(Treqinfo, 1);
+/* ------------------------------------------------------------------------- */
+/* SET SIMPLE TRANSACTION */
+/* ------------------------------------------------------------------------- */
+ LqhKeyReq::setSimpleFlag(Treqinfo, 1);
+/* ------------------------------------------------------------------------- */
+/* SET OPERATION TYPE AND LOCK MODE (NEVER READ OPERATION OR SCAN IN LOG) */
+/* ------------------------------------------------------------------------- */
+ LqhKeyReq::setLockType(Treqinfo, regTcPtr->operation);
+ LqhKeyReq::setOperation(Treqinfo, regTcPtr->operation);
+ regTcPtr->reqinfo = Treqinfo;
+/* ------------------------------------------------------------------------ */
+/* NO OF BACKUP IS SET TO ONE AND NUMBER OF STANDBY NODES IS SET TO ZERO. */
+/* THUS THE RECEIVING NODE WILL EXPECT THAT IT IS THE LAST NODE AND WILL */
+/* SEND COMPLETED AS THE RESPONSE SIGNAL SINCE DIRTY_OP BIT IS SET. */
+/* ------------------------------------------------------------------------ */
+/* ------------------------------------------------------------------------- */
+/* SET REPLICA TYPE TO PRIMARY AND NUMBER OF REPLICA TO ONE */
+/* ------------------------------------------------------------------------- */
+ regTcPtr->lastReplicaNo = 0;
+ regTcPtr->apiVersionNo = 0;
+ regTcPtr->nextSeqNoReplica = 0;
+ regTcPtr->opExec = 0;
+ regTcPtr->storedProcId = ZNIL;
+ regTcPtr->readlenAi = 0;
+ regTcPtr->nodeAfterNext[0] = ZNIL;
+ regTcPtr->nodeAfterNext[1] = ZNIL;
+ regTcPtr->dirtyOp = ZFALSE;
+ regTcPtr->tcBlockref = cownref;
+}//Dblqh::initReqinfoExecSr()
+
+/* --------------------------------------------------------------------------
+ * ------- INSERT FRAGMENT -------
+ *
+ * ------------------------------------------------------------------------- */
+bool Dblqh::insertFragrec(Signal* signal, Uint32 fragId)
+{
+ terrorCode = ZOK;
+ if (cfirstfreeFragrec == RNIL) {
+ jam();
+ terrorCode = ZNO_FREE_FRAGMENTREC;
+ return false;
+ }//if
+ seizeFragmentrec(signal);
+ for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
+ jam();
+ if (tabptr.p->fragid[i] == ZNIL) {
+ jam();
+ tabptr.p->fragid[i] = fragId;
+ tabptr.p->fragrec[i] = fragptr.i;
+ return true;
+ }//if
+ }//for
+ terrorCode = ZTOO_MANY_FRAGMENTS;
+ return false;
+}//Dblqh::insertFragrec()
+
+/* --------------------------------------------------------------------------
+ * ------- LINK OPERATION IN ACTIVE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME: LFQ
+// Input Pointers:
+// tcConnectptr
+// fragptr
+* ------------------------------------------------------------------------- */
+void Dblqh::linkFragQueue(Signal* signal)
+{
+ TcConnectionrecPtr lfqTcConnectptr;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Fragrecord * const regFragPtr = fragptr.p;
+ Uint32 tcIndex = tcConnectptr.i;
+
+ lfqTcConnectptr.i = regFragPtr->lastWaitQueue;
+ regTcPtr->nextTc = RNIL;
+ regFragPtr->lastWaitQueue = tcIndex;
+ regTcPtr->prevTc = lfqTcConnectptr.i;
+ ndbrequire(regTcPtr->listState == TcConnectionrec::NOT_IN_LIST);
+ regTcPtr->listState = TcConnectionrec::WAIT_QUEUE_LIST;
+ if (lfqTcConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(lfqTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ lfqTcConnectptr.p->nextTc = tcIndex;
+ } else {
+ regFragPtr->firstWaitQueue = tcIndex;
+ }//if
+ return;
+}//Dblqh::linkFragQueue()
+
+/* -------------------------------------------------------------------------
+ * ------- LINK OPERATION INTO WAITING FOR LOGGING -------
+ *
+ * SUBROUTINE SHORT NAME = LWL
+// Input Pointers:
+// tcConnectptr
+// logPartPtr
+ * ------------------------------------------------------------------------- */
+void Dblqh::linkWaitLog(Signal* signal, LogPartRecordPtr regLogPartPtr)
+{
+ TcConnectionrecPtr lwlTcConnectptr;
+
+/* -------------------------------------------------- */
+/* LINK ACTIVE OPERATION INTO QUEUE WAITING FOR */
+/* ACCESS TO THE LOG PART. */
+/* -------------------------------------------------- */
+ lwlTcConnectptr.i = regLogPartPtr.p->lastLogQueue;
+ if (lwlTcConnectptr.i == RNIL) {
+ jam();
+ regLogPartPtr.p->firstLogQueue = tcConnectptr.i;
+ } else {
+ jam();
+ ptrCheckGuard(lwlTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ lwlTcConnectptr.p->nextTcLogQueue = tcConnectptr.i;
+ }//if
+ regLogPartPtr.p->lastLogQueue = tcConnectptr.i;
+ tcConnectptr.p->nextTcLogQueue = RNIL;
+ if (regLogPartPtr.p->LogLqhKeyReqSent == ZFALSE) {
+ jam();
+ regLogPartPtr.p->LogLqhKeyReqSent = ZTRUE;
+ signal->theData[0] = ZLOG_LQHKEYREQ;
+ signal->theData[1] = regLogPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+}//Dblqh::linkWaitLog()
+
+/* --------------------------------------------------------------------------
+ * ------- START THE NEXT OPERATION ON THIS LOG PART IF ANY -------
+ * ------- OPERATIONS ARE QUEUED. -------
+ *
+ * SUBROUTINE SHORT NAME = LNS
+// Input Pointers:
+// tcConnectptr
+// logPartPtr
+ * ------------------------------------------------------------------------- */
+void Dblqh::logNextStart(Signal* signal)
+{
+ LogPartRecordPtr lnsLogPartPtr;
+ UintR tlnsStillWaiting;
+ LogPartRecord * const regLogPartPtr = logPartPtr.p;
+
+ if ((regLogPartPtr->firstLogQueue == RNIL) &&
+ (regLogPartPtr->logPartState == LogPartRecord::ACTIVE) &&
+ (regLogPartPtr->waitWriteGciLog != LogPartRecord::WWGL_TRUE)) {
+// --------------------------------------------------------------------------
+// Optimised route for the common case
+// --------------------------------------------------------------------------
+ regLogPartPtr->logPartState = LogPartRecord::IDLE;
+ return;
+ }//if
+ if (regLogPartPtr->firstLogQueue != RNIL) {
+ jam();
+ if (regLogPartPtr->LogLqhKeyReqSent == ZFALSE) {
+ jam();
+ regLogPartPtr->LogLqhKeyReqSent = ZTRUE;
+ signal->theData[0] = ZLOG_LQHKEYREQ;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+ } else {
+ if (regLogPartPtr->logPartState == LogPartRecord::ACTIVE) {
+ jam();
+ regLogPartPtr->logPartState = LogPartRecord::IDLE;
+ } else {
+ jam();
+ }//if
+ }//if
+ if (regLogPartPtr->waitWriteGciLog != LogPartRecord::WWGL_TRUE) {
+ jam();
+ return;
+ } else {
+ jam();
+/* --------------------------------------------------------------------------
+ * A COMPLETE GCI LOG RECORD IS WAITING TO BE WRITTEN. WE GIVE THIS HIGHEST
+ * PRIORITY AND WRITE IT IMMEDIATELY. AFTER WRITING IT WE CHECK IF ANY MORE
+ * LOG PARTS ARE WAITING. IF NOT WE SEND A SIGNAL THAT INITIALISES THE GCP
+ * RECORD TO WAIT UNTIL ALL COMPLETE GCI LOG RECORDS HAVE REACHED TO DISK.
+ * -------------------------------------------------------------------------- */
+ writeCompletedGciLog(signal);
+ logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_FALSE;
+ tlnsStillWaiting = ZFALSE;
+ for (lnsLogPartPtr.i = 0; lnsLogPartPtr.i < 4; lnsLogPartPtr.i++) {
+ jam();
+ ptrAss(lnsLogPartPtr, logPartRecord);
+ if (lnsLogPartPtr.p->waitWriteGciLog == LogPartRecord::WWGL_TRUE) {
+ jam();
+ tlnsStillWaiting = ZTRUE;
+ }//if
+ }//for
+ if (tlnsStillWaiting == ZFALSE) {
+ jam();
+ signal->theData[0] = ZINIT_GCP_REC;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 1, JBB);
+ }//if
+ }//if
+}//Dblqh::logNextStart()
+
+/* --------------------------------------------------------------------------
+ * ------- MOVE PAGES FROM LFO RECORD TO PAGE REFERENCE RECORD -------
+ * WILL ALWAYS MOVE 8 PAGES TO A PAGE REFERENCE RECORD.
+ *
+ * SUBROUTINE SHORT NAME = MPR
+ * ------------------------------------------------------------------------- */
+void Dblqh::moveToPageRef(Signal* signal)
+{
+ LogPageRecordPtr mprLogPagePtr;
+ PageRefRecordPtr mprPageRefPtr;
+ UintR tmprIndex;
+
+/* --------------------------------------------------------------------------
+ * ------- INSERT PAGE REFERENCE RECORD -------
+ *
+ * INPUT: LFO_PTR LOG FILE OPERATION RECORD
+ * LOG_PART_PTR LOG PART RECORD
+ * PAGE_REF_PTR THE PAGE REFERENCE RECORD TO BE INSERTED.
+ * ------------------------------------------------------------------------- */
+ PageRefRecordPtr iprPageRefPtr;
+
+ if ((logPartPtr.p->mmBufferSize + 8) >= ZMAX_MM_BUFFER_SIZE) {
+ jam();
+ pageRefPtr.i = logPartPtr.p->firstPageRef;
+ ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
+ releasePrPages(signal);
+ removePageRef(signal);
+ } else {
+ jam();
+ logPartPtr.p->mmBufferSize = logPartPtr.p->mmBufferSize + 8;
+ }//if
+ seizePageRef(signal);
+ if (logPartPtr.p->firstPageRef == RNIL) {
+ jam();
+ logPartPtr.p->firstPageRef = pageRefPtr.i;
+ } else {
+ jam();
+ iprPageRefPtr.i = logPartPtr.p->lastPageRef;
+ ptrCheckGuard(iprPageRefPtr, cpageRefFileSize, pageRefRecord);
+ iprPageRefPtr.p->prNext = pageRefPtr.i;
+ }//if
+ pageRefPtr.p->prPrev = logPartPtr.p->lastPageRef;
+ logPartPtr.p->lastPageRef = pageRefPtr.i;
+
+ pageRefPtr.p->prFileNo = logFilePtr.p->fileNo;
+ pageRefPtr.p->prPageNo = lfoPtr.p->lfoPageNo;
+ tmprIndex = 0;
+ mprLogPagePtr.i = lfoPtr.p->firstLfoPage;
+MPR_LOOP:
+ arrGuard(tmprIndex, 8);
+ pageRefPtr.p->pageRef[tmprIndex] = mprLogPagePtr.i;
+ tmprIndex = tmprIndex + 1;
+ ptrCheckGuard(mprLogPagePtr, clogPageFileSize, logPageRecord);
+ mprLogPagePtr.i = mprLogPagePtr.p->logPageWord[ZNEXT_PAGE];
+ if (mprLogPagePtr.i != RNIL) {
+ jam();
+ goto MPR_LOOP;
+ }//if
+ mprPageRefPtr.i = pageRefPtr.p->prPrev;
+ if (mprPageRefPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(mprPageRefPtr, cpageRefFileSize, pageRefRecord);
+ mprLogPagePtr.i = mprPageRefPtr.p->pageRef[7];
+ ptrCheckGuard(mprLogPagePtr, clogPageFileSize, logPageRecord);
+ mprLogPagePtr.p->logPageWord[ZNEXT_PAGE] = pageRefPtr.p->pageRef[0];
+ }//if
+}//Dblqh::moveToPageRef()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ THE ATTRINFO FROM THE LOG ------- */
+/* */
+/* SUBROUTINE SHORT NAME = RA */
+/* ------------------------------------------------------------------------- */
+void Dblqh::readAttrinfo(Signal* signal)
+{
+ Uint32 remainingLen = tcConnectptr.p->totSendlenAi;
+ if (remainingLen == 0) {
+ jam();
+ tcConnectptr.p->reclenAiLqhkey = 0;
+ return;
+ }//if
+ Uint32 dataLen = remainingLen;
+ if (remainingLen > 5)
+ dataLen = 5;
+ readLogData(signal, dataLen, &tcConnectptr.p->firstAttrinfo[0]);
+ tcConnectptr.p->reclenAiLqhkey = dataLen;
+ remainingLen -= dataLen;
+ while (remainingLen > 0) {
+ jam();
+ dataLen = remainingLen;
+ if (remainingLen > 22)
+ dataLen = 22;
+ seizeAttrinbuf(signal);
+ readLogData(signal, dataLen, &attrinbufptr.p->attrbuf[0]);
+ attrinbufptr.p->attrbuf[ZINBUF_DATA_LEN] = dataLen;
+ remainingLen -= dataLen;
+ }//while
+}//Dblqh::readAttrinfo()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ COMMIT LOG ------- */
+/* */
+/* SUBROUTINE SHORT NAME = RCL */
+/* ------------------------------------------------------------------------- */
+void Dblqh::readCommitLog(Signal* signal, CommitLogRecord* commitLogRecord)
+{
+ Uint32 trclPageIndex = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ if ((trclPageIndex + (ZCOMMIT_LOG_SIZE - 1)) < ZPAGE_SIZE) {
+ jam();
+ tcConnectptr.p->tableref = logPagePtr.p->logPageWord[trclPageIndex + 0];
+ tcConnectptr.p->schemaVersion = logPagePtr.p->logPageWord[trclPageIndex + 1];
+ tcConnectptr.p->fragmentid = logPagePtr.p->logPageWord[trclPageIndex + 2];
+ commitLogRecord->fileNo = logPagePtr.p->logPageWord[trclPageIndex + 3];
+ commitLogRecord->startPageNo = logPagePtr.p->logPageWord[trclPageIndex + 4];
+ commitLogRecord->startPageIndex = logPagePtr.p->logPageWord[trclPageIndex + 5];
+ commitLogRecord->stopPageNo = logPagePtr.p->logPageWord[trclPageIndex + 6];
+ tcConnectptr.p->gci = logPagePtr.p->logPageWord[trclPageIndex + 7];
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
+ (trclPageIndex + ZCOMMIT_LOG_SIZE) - 1;
+ } else {
+ jam();
+ tcConnectptr.p->tableref = readLogword(signal);
+ tcConnectptr.p->schemaVersion = readLogword(signal);
+ tcConnectptr.p->fragmentid = readLogword(signal);
+ commitLogRecord->fileNo = readLogword(signal);
+ commitLogRecord->startPageNo = readLogword(signal);
+ commitLogRecord->startPageIndex = readLogword(signal);
+ commitLogRecord->stopPageNo = readLogword(signal);
+ tcConnectptr.p->gci = readLogword(signal);
+ }//if
+ tcConnectptr.p->transid[0] = logPartPtr.i + 65536;
+ tcConnectptr.p->transid[1] = (DBLQH << 20) + (cownNodeid << 8);
+}//Dblqh::readCommitLog()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ LOG PAGES FROM DISK IN ORDER TO EXECUTE A LOG ------- */
+/* RECORD WHICH WAS NOT FOUND IN MAIN MEMORY. */
+/* */
+/* SUBROUTINE SHORT NAME = REL */
+/* ------------------------------------------------------------------------- */
+void Dblqh::readExecLog(Signal* signal)
+{
+ UintR trelIndex;
+ UintR trelI;
+
+ seizeLfo(signal);
+ initLfo(signal);
+ trelI = logPartPtr.p->execSrStopPageNo - logPartPtr.p->execSrStartPageNo;
+ arrGuard(trelI + 1, 16);
+ lfoPtr.p->logPageArray[trelI + 1] = logPartPtr.p->execSrStartPageNo;
+ for (trelIndex = logPartPtr.p->execSrStopPageNo; (trelIndex >= logPartPtr.p->execSrStartPageNo) &&
+ (UintR)~trelIndex; trelIndex--) {
+ jam();
+ seizeLogpage(signal);
+ arrGuard(trelI, 16);
+ lfoPtr.p->logPageArray[trelI] = logPagePtr.i;
+ trelI--;
+ }//for
+ lfoPtr.p->lfoPageNo = logPartPtr.p->execSrStartPageNo;
+ lfoPtr.p->noPagesRw = (logPartPtr.p->execSrStopPageNo -
+ logPartPtr.p->execSrStartPageNo) + 1;
+ lfoPtr.p->firstLfoPage = lfoPtr.p->logPageArray[0];
+ signal->theData[0] = logFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lfoPtr.i;
+ signal->theData[3] = ZLIST_OF_MEM_PAGES; // edtjamo TR509 //ZLIST_OF_PAIRS;
+ signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
+ signal->theData[5] = lfoPtr.p->noPagesRw;
+ signal->theData[6] = lfoPtr.p->logPageArray[0];
+ signal->theData[7] = lfoPtr.p->logPageArray[1];
+ signal->theData[8] = lfoPtr.p->logPageArray[2];
+ signal->theData[9] = lfoPtr.p->logPageArray[3];
+ signal->theData[10] = lfoPtr.p->logPageArray[4];
+ signal->theData[11] = lfoPtr.p->logPageArray[5];
+ signal->theData[12] = lfoPtr.p->logPageArray[6];
+ signal->theData[13] = lfoPtr.p->logPageArray[7];
+ signal->theData[14] = lfoPtr.p->logPageArray[8];
+ signal->theData[15] = lfoPtr.p->logPageArray[9];
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 16, JBA);
+}//Dblqh::readExecLog()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ 64 KBYTES WHEN EXECUTING THE FRAGMENT LOG ------- */
+/* */
+/* SUBROUTINE SHORT NAME = RES */
+/* ------------------------------------------------------------------------- */
+void Dblqh::readExecSrNewMbyte(Signal* signal)
+{
+ logFilePtr.p->currentFilepage = logFilePtr.p->currentMbyte * ZPAGES_IN_MBYTE;
+ logFilePtr.p->filePosition = logFilePtr.p->currentMbyte * ZPAGES_IN_MBYTE;
+ logPartPtr.p->execSrPagesRead = 0;
+ logPartPtr.p->execSrPagesReading = 0;
+ logPartPtr.p->execSrPagesExecuted = 0;
+ readExecSr(signal);
+ logPartPtr.p->logExecState = LogPartRecord::LES_WAIT_READ_EXEC_SR_NEW_MBYTE;
+}//Dblqh::readExecSrNewMbyte()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ 64 KBYTES WHEN EXECUTING THE FRAGMENT LOG ------- */
+/* */
+/* SUBROUTINE SHORT NAME = RES */
+/* ------------------------------------------------------------------------- */
+void Dblqh::readExecSr(Signal* signal)
+{
+ UintR tresPageid;
+ UintR tresIndex;
+
+ tresPageid = logFilePtr.p->filePosition;
+ seizeLfo(signal);
+ initLfo(signal);
+ for (tresIndex = 7; (UintR)~tresIndex; tresIndex--) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* GO BACKWARDS SINCE WE INSERT AT THE BEGINNING AND WE WANT THAT FIRST PAGE */
+/* SHALL BE FIRST AND LAST PAGE LAST. */
+/* ------------------------------------------------------------------------- */
+ seizeLogpage(signal);
+ lfoPtr.p->logPageArray[tresIndex] = logPagePtr.i;
+ }//for
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_EXEC_SR;
+ lfoPtr.p->lfoPageNo = tresPageid;
+ logFilePtr.p->filePosition = logFilePtr.p->filePosition + 8;
+ logPartPtr.p->execSrPagesReading = logPartPtr.p->execSrPagesReading + 8;
+ lfoPtr.p->noPagesRw = 8;
+ lfoPtr.p->firstLfoPage = lfoPtr.p->logPageArray[0];
+ signal->theData[0] = logFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lfoPtr.i;
+ signal->theData[3] = ZLIST_OF_MEM_PAGES;
+ signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
+ signal->theData[5] = 8;
+ signal->theData[6] = lfoPtr.p->logPageArray[0];
+ signal->theData[7] = lfoPtr.p->logPageArray[1];
+ signal->theData[8] = lfoPtr.p->logPageArray[2];
+ signal->theData[9] = lfoPtr.p->logPageArray[3];
+ signal->theData[10] = lfoPtr.p->logPageArray[4];
+ signal->theData[11] = lfoPtr.p->logPageArray[5];
+ signal->theData[12] = lfoPtr.p->logPageArray[6];
+ signal->theData[13] = lfoPtr.p->logPageArray[7];
+ signal->theData[14] = tresPageid;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 15, JBA);
+}//Dblqh::readExecSr()
+
+/* ------------------------------------------------------------------------- */
+/* ------------ READ THE PRIMARY KEY FROM THE LOG ---------------- */
+/* */
+/* SUBROUTINE SHORT NAME = RK */
+/* --------------------------------------------------------------------------*/
+void Dblqh::readKey(Signal* signal)
+{
+ Uint32 remainingLen = tcConnectptr.p->primKeyLen;
+ ndbrequire(remainingLen != 0);
+ Uint32 dataLen = remainingLen;
+ if (remainingLen > 4)
+ dataLen = 4;
+ readLogData(signal, dataLen, &tcConnectptr.p->tupkeyData[0]);
+ remainingLen -= dataLen;
+ while (remainingLen > 0) {
+ jam();
+ seizeTupkeybuf(signal);
+ dataLen = remainingLen;
+ if (dataLen > 4)
+ dataLen = 4;
+ readLogData(signal, dataLen, &databufptr.p->data[0]);
+ remainingLen -= dataLen;
+ }//while
+}//Dblqh::readKey()
+
+/* ------------------------------------------------------------------------- */
+/* ------------ READ A NUMBER OF WORDS FROM LOG INTO CDATA ---------------- */
+/* */
+/* SUBROUTINE SHORT NAME = RLD */
+/* --------------------------------------------------------------------------*/
+void Dblqh::readLogData(Signal* signal, Uint32 noOfWords, Uint32* dataPtr)
+{
+ ndbrequire(noOfWords < 32);
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ if ((logPos + noOfWords) >= ZPAGE_SIZE) {
+ for (Uint32 i = 0; i < noOfWords; i++)
+ dataPtr[i] = readLogwordExec(signal);
+ } else {
+ MEMCOPY_NO_WORDS(dataPtr, &logPagePtr.p->logPageWord[logPos], noOfWords);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + noOfWords;
+ }//if
+}//Dblqh::readLogData()
+
+/* ------------------------------------------------------------------------- */
+/* ------------ READ THE LOG HEADER OF A PREPARE LOG HEADER ---------------- */
+/* */
+/* SUBROUTINE SHORT NAME = RLH */
+/* --------------------------------------------------------------------------*/
+void Dblqh::readLogHeader(Signal* signal)
+{
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ if ((logPos + ZLOG_HEAD_SIZE) < ZPAGE_SIZE) {
+ jam();
+ tcConnectptr.p->hashValue = logPagePtr.p->logPageWord[logPos + 2];
+ tcConnectptr.p->operation = logPagePtr.p->logPageWord[logPos + 3];
+ tcConnectptr.p->totSendlenAi = logPagePtr.p->logPageWord[logPos + 4];
+ tcConnectptr.p->primKeyLen = logPagePtr.p->logPageWord[logPos + 5];
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + ZLOG_HEAD_SIZE;
+ } else {
+ jam();
+ readLogwordExec(signal); /* IGNORE PREPARE LOG RECORD TYPE */
+ readLogwordExec(signal); /* IGNORE LOG RECORD SIZE */
+ tcConnectptr.p->hashValue = readLogwordExec(signal);
+ tcConnectptr.p->operation = readLogwordExec(signal);
+ tcConnectptr.p->totSendlenAi = readLogwordExec(signal);
+ tcConnectptr.p->primKeyLen = readLogwordExec(signal);
+ }//if
+}//Dblqh::readLogHeader()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ A WORD FROM THE LOG ------- */
+/* */
+/* OUTPUT: TLOG_WORD */
+/* SUBROUTINE SHORT NAME = RLW */
+/* ------------------------------------------------------------------------- */
+Uint32 Dblqh::readLogword(Signal* signal)
+{
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ ndbrequire(logPos < ZPAGE_SIZE);
+ Uint32 logWord = logPagePtr.p->logPageWord[logPos];
+ logPos++;
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos;
+ if (logPos >= ZPAGE_SIZE) {
+ jam();
+ logPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
+ logFilePtr.p->currentLogpage = logPagePtr.i;
+ logFilePtr.p->currentFilepage++;
+ logPartPtr.p->execSrPagesRead--;
+ logPartPtr.p->execSrPagesExecuted++;
+ }//if
+ return logWord;
+}//Dblqh::readLogword()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ A WORD FROM THE LOG WHEN EXECUTING A LOG RECORD ------- */
+/* */
+/* OUTPUT: TLOG_WORD */
+/* SUBROUTINE SHORT NAME = RWE */
+/* ------------------------------------------------------------------------- */
+Uint32 Dblqh::readLogwordExec(Signal* signal)
+{
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ ndbrequire(logPos < ZPAGE_SIZE);
+ Uint32 logWord = logPagePtr.p->logPageWord[logPos];
+ logPos++;
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos;
+ if (logPos >= ZPAGE_SIZE) {
+ jam();
+ logPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ if (logPagePtr.i != RNIL){
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
+ } else {
+ // Reading word at the last pos in the last page
+ // Don't step forward to next page!
+ jam();
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]++;
+ }
+ }//if
+ return logWord;
+}//Dblqh::readLogwordExec()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ A SINGLE PAGE FROM THE LOG ------- */
+/* */
+/* INPUT: TRSP_PAGE_NO */
+/* SUBROUTINE SHORT NAME = RSP */
+/* ------------------------------------------------------------------------- */
+void Dblqh::readSinglePage(Signal* signal, Uint32 pageNo)
+{
+ seizeLfo(signal);
+ initLfo(signal);
+ seizeLogpage(signal);
+ lfoPtr.p->firstLfoPage = logPagePtr.i;
+ lfoPtr.p->lfoPageNo = pageNo;
+ lfoPtr.p->noPagesRw = 1;
+ signal->theData[0] = logFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lfoPtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS;
+ signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
+ signal->theData[5] = 1;
+ signal->theData[6] = logPagePtr.i;
+ signal->theData[7] = pageNo;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+}//Dblqh::readSinglePage()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE OPERATION FROM ACTIVE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME = RAC
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseAccList(Signal* signal)
+{
+ TcConnectionrecPtr racTcNextConnectptr;
+ TcConnectionrecPtr racTcPrevConnectptr;
+
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ racTcPrevConnectptr.i = tcConnectptr.p->prevTc;
+ racTcNextConnectptr.i = tcConnectptr.p->nextTc;
+ if (tcConnectptr.p->listState != TcConnectionrec::ACC_BLOCK_LIST) {
+ jam();
+ systemError(signal);
+ }//if
+ tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
+ if (racTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(racTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ racTcNextConnectptr.p->prevTc = racTcPrevConnectptr.i;
+ }//if
+ if (racTcPrevConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(racTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ racTcPrevConnectptr.p->nextTc = tcConnectptr.p->nextTc;
+ } else {
+ jam();
+ /* ---------------------------------------------------------------------
+ * OPERATION RECORD IS FIRST IN ACTIVE LIST
+ * THIS MEANS THAT THERE EXISTS NO PREVIOUS TC THAT NEEDS TO BE UPDATED.
+ * --------------------------------------------------------------------- */
+ fragptr.p->accBlockedList = racTcNextConnectptr.i;
+ }//if
+}//Dblqh::releaseAccList()
+
+/* --------------------------------------------------------------------------
+ * ------- REMOVE COPY FRAGMENT FROM ACTIVE COPY LIST -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseActiveCopy(Signal* signal)
+{
+ /* MUST BE 8 BIT */
+ UintR tracFlag;
+ UintR tracIndex;
+
+ tracFlag = ZFALSE;
+ for (tracIndex = 0; tracIndex < 4; tracIndex++) {
+ if (tracFlag == ZFALSE) {
+ jam();
+ if (cactiveCopy[tracIndex] == fragptr.i) {
+ jam();
+ tracFlag = ZTRUE;
+ }//if
+ } else {
+ if (tracIndex < 3) {
+ jam();
+ cactiveCopy[tracIndex - 1] = cactiveCopy[tracIndex];
+ } else {
+ jam();
+ cactiveCopy[3] = RNIL;
+ }//if
+ }//if
+ }//for
+ ndbrequire(tracFlag == ZTRUE);
+ cnoActiveCopy--;
+}//Dblqh::releaseActiveCopy()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE OPERATION FROM ACTIVE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME = RAL
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseActiveList(Signal* signal)
+{
+ TcConnectionrecPtr ralTcNextConnectptr;
+ TcConnectionrecPtr ralTcPrevConnectptr;
+ ralTcPrevConnectptr.i = tcConnectptr.p->prevTc;
+ ralTcNextConnectptr.i = tcConnectptr.p->nextTc;
+ ndbrequire(tcConnectptr.p->listState == TcConnectionrec::IN_ACTIVE_LIST);
+ tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
+ if (ralTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(ralTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ ralTcNextConnectptr.p->prevTc = ralTcPrevConnectptr.i;
+ }//if
+ if (ralTcPrevConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(ralTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ ralTcPrevConnectptr.p->nextTc = tcConnectptr.p->nextTc;
+ } else {
+ jam();
+ /* ----------------------------------------------------------------------
+ * OPERATION RECORD IS FIRST IN ACTIVE LIST
+ * THIS MEANS THAT THERE EXISTS NO PREVIOUS TC THAT NEEDS TO BE UPDATED.
+ * --------------------------------------------------------------------- */
+ fragptr.p->activeList = ralTcNextConnectptr.i;
+ }//if
+}//Dblqh::releaseActiveList()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE ADD FRAGMENT RECORD -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseAddfragrec(Signal* signal)
+{
+ addfragptr.p->addfragStatus = AddFragRecord::FREE;
+ addfragptr.p->nextAddfragrec = cfirstfreeAddfragrec;
+ cfirstfreeAddfragrec = addfragptr.i;
+}//Dblqh::releaseAddfragrec()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE FRAGMENT RECORD -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseFragrec()
+{
+ fragptr.p->fragStatus = Fragrecord::FREE;
+ fragptr.p->nextFrag = cfirstfreeFragrec;
+ cfirstfreeFragrec = fragptr.i;
+}//Dblqh::releaseFragrec()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE LCP LOCAL RECORD -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseLcpLoc(Signal* signal)
+{
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::IDLE;
+ lcpLocptr.p->nextLcpLoc = cfirstfreeLcpLoc;
+ cfirstfreeLcpLoc = lcpLocptr.i;
+}//Dblqh::releaseLcpLoc()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE A PAGE REFERENCE RECORD. -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releasePageRef(Signal* signal)
+{
+ pageRefPtr.p->prNext = cfirstfreePageRef;
+ cfirstfreePageRef = pageRefPtr.i;
+}//Dblqh::releasePageRef()
+
+/* --------------------------------------------------------------------------
+ * --- RELEASE ALL PAGES IN THE MM BUFFER AFTER EXECUTING THE LOG ON IT. ----
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseMmPages(Signal* signal)
+{
+RMP_LOOP:
+ jam();
+ pageRefPtr.i = logPartPtr.p->firstPageRef;
+ if (pageRefPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
+ releasePrPages(signal);
+ removePageRef(signal);
+ goto RMP_LOOP;
+ }//if
+}//Dblqh::releaseMmPages()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE A SET OF PAGES AFTER EXECUTING THE LOG ON IT. -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releasePrPages(Signal* signal)
+{
+ UintR trppIndex;
+
+ for (trppIndex = 0; trppIndex <= 7; trppIndex++) {
+ jam();
+ logPagePtr.i = pageRefPtr.p->pageRef[trppIndex];
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ releaseLogpage(signal);
+ }//for
+}//Dblqh::releasePrPages()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE OPERATION FROM WAIT QUEUE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME : RWA
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseWaitQueue(Signal* signal)
+{
+ TcConnectionrecPtr rwaTcNextConnectptr;
+ TcConnectionrecPtr rwaTcPrevConnectptr;
+
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ rwaTcPrevConnectptr.i = tcConnectptr.p->prevTc;
+ rwaTcNextConnectptr.i = tcConnectptr.p->nextTc;
+ if (tcConnectptr.p->listState != TcConnectionrec::WAIT_QUEUE_LIST) {
+ jam();
+ systemError(signal);
+ }//if
+ tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
+ if (rwaTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(rwaTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ rwaTcNextConnectptr.p->prevTc = rwaTcPrevConnectptr.i;
+ } else {
+ jam();
+ fragptr.p->lastWaitQueue = rwaTcPrevConnectptr.i;
+ }//if
+ if (rwaTcPrevConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(rwaTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ rwaTcPrevConnectptr.p->nextTc = rwaTcNextConnectptr.i;
+ } else {
+ jam();
+ fragptr.p->firstWaitQueue = rwaTcNextConnectptr.i;
+ }//if
+}//Dblqh::releaseWaitQueue()
+
+/* --------------------------------------------------------------------------
+ * ------- REMOVE OPERATION RECORD FROM LIST ON LOG PART OF NOT -------
+ * COMPLETED OPERATIONS IN THE LOG.
+ *
+ * SUBROUTINE SHORT NAME = RLO
+ * ------------------------------------------------------------------------- */
+void Dblqh::removeLogTcrec(Signal* signal)
+{
+ TcConnectionrecPtr rloTcNextConnectptr;
+ TcConnectionrecPtr rloTcPrevConnectptr;
+ rloTcPrevConnectptr.i = tcConnectptr.p->prevLogTcrec;
+ rloTcNextConnectptr.i = tcConnectptr.p->nextLogTcrec;
+ if (rloTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(rloTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ rloTcNextConnectptr.p->prevLogTcrec = rloTcPrevConnectptr.i;
+ } else {
+ jam();
+ logPartPtr.p->lastLogTcrec = rloTcPrevConnectptr.i;
+ }//if
+ if (rloTcPrevConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(rloTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ rloTcPrevConnectptr.p->nextLogTcrec = rloTcNextConnectptr.i;
+ } else {
+ jam();
+ logPartPtr.p->firstLogTcrec = rloTcNextConnectptr.i;
+ }//if
+}//Dblqh::removeLogTcrec()
+
+/* --------------------------------------------------------------------------
+ * ------- REMOVE PAGE REFERENCE RECORD FROM LIST IN THIS LOG PART -------
+ *
+ * SUBROUTINE SHORT NAME = RPR
+ * ------------------------------------------------------------------------- */
+void Dblqh::removePageRef(Signal* signal)
+{
+ PageRefRecordPtr rprPageRefPtr;
+
+ pageRefPtr.i = logPartPtr.p->firstPageRef;
+ if (pageRefPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
+ if (pageRefPtr.p->prNext == RNIL) {
+ jam();
+ logPartPtr.p->lastPageRef = RNIL;
+ logPartPtr.p->firstPageRef = RNIL;
+ } else {
+ jam();
+ logPartPtr.p->firstPageRef = pageRefPtr.p->prNext;
+ rprPageRefPtr.i = pageRefPtr.p->prNext;
+ ptrCheckGuard(rprPageRefPtr, cpageRefFileSize, pageRefRecord);
+ rprPageRefPtr.p->prPrev = RNIL;
+ }//if
+ releasePageRef(signal);
+ }//if
+}//Dblqh::removePageRef()
+
+/* ------------------------------------------------------------------------- */
+/* ------- RETURN FROM EXECUTION OF LOG ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+Uint32 Dblqh::returnExecLog(Signal* signal)
+{
+ tcConnectptr.p->connectState = TcConnectionrec::CONNECTED;
+ initLogPointers(signal);
+ logPartPtr.p->execSrExecuteIndex++;
+ Uint32 result = checkIfExecLog(signal);
+ if (result == ZOK) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* THIS LOG RECORD WILL BE EXECUTED AGAIN TOWARDS ANOTHER NODE. */
+/* ------------------------------------------------------------------------- */
+ logPagePtr.i = logPartPtr.p->execSrLogPage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
+ logPartPtr.p->execSrLogPageIndex;
+ } else {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* NO MORE EXECUTION OF THIS LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+ if (logPartPtr.p->logExecState ==
+ LogPartRecord::LES_EXEC_LOGREC_FROM_FILE) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* THE LOG RECORD WAS READ FROM DISK. RELEASE ITS PAGES IMMEDIATELY. */
+/* ------------------------------------------------------------------------- */
+ lfoPtr.i = logPartPtr.p->execSrLfoRec;
+ ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
+ releaseLfoPages(signal);
+ releaseLfo(signal);
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG;
+ if (logPartPtr.p->execSrExecLogFile != logPartPtr.p->currentLogfile) {
+ jam();
+ LogFileRecordPtr clfLogFilePtr;
+ clfLogFilePtr.i = logPartPtr.p->execSrExecLogFile;
+ ptrCheckGuard(clfLogFilePtr, clogFileFileSize, logFileRecord);
+ clfLogFilePtr.p->logFileStatus = LogFileRecord::CLOSING_EXEC_LOG;
+ closeFile(signal, clfLogFilePtr);
+ result = ZCLOSE_FILE;
+ }//if
+ }//if
+ logPartPtr.p->execSrExecuteIndex = 0;
+ logPartPtr.p->execSrLogPage = RNIL;
+ logPartPtr.p->execSrLogPageIndex = ZNIL;
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPartPtr.p->savePageIndex;
+ }//if
+ return result;
+}//Dblqh::returnExecLog()
+
+/* --------------------------------------------------------------------------
+ * ------- SEIZE ADD FRAGMENT RECORD ------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::seizeAddfragrec(Signal* signal)
+{
+ addfragptr.i = cfirstfreeAddfragrec;
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ cfirstfreeAddfragrec = addfragptr.p->nextAddfragrec;
+}//Dblqh::seizeAddfragrec()
+
+/* --------------------------------------------------------------------------
+ * ------- SEIZE FRAGMENT RECORD -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::seizeFragmentrec(Signal* signal)
+{
+ fragptr.i = cfirstfreeFragrec;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ cfirstfreeFragrec = fragptr.p->nextFrag;
+ fragptr.p->nextFrag = RNIL;
+}//Dblqh::seizeFragmentrec()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEIZE A PAGE REFERENCE RECORD. ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::seizePageRef(Signal* signal)
+{
+ pageRefPtr.i = cfirstfreePageRef;
+ ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
+ cfirstfreePageRef = pageRefPtr.p->prNext;
+ pageRefPtr.p->prNext = RNIL;
+}//Dblqh::seizePageRef()
+
+/* --------------------------------------------------------------------------
+ * ------- SEND ABORTED -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::sendAborted(Signal* signal)
+{
+ UintR TlastInd;
+ if (tcConnectptr.p->nextReplica == ZNIL) {
+ TlastInd = ZTRUE;
+ } else {
+ TlastInd = ZFALSE;
+ }//if
+ signal->theData[0] = tcConnectptr.p->tcOprec;
+ signal->theData[1] = tcConnectptr.p->transid[0];
+ signal->theData[2] = tcConnectptr.p->transid[1];
+ signal->theData[3] = cownNodeid;
+ signal->theData[4] = TlastInd;
+ sendSignal(tcConnectptr.p->tcBlockref, GSN_ABORTED, signal, 5, JBB);
+ return;
+}//Dblqh::sendAborted()
+
+/* --------------------------------------------------------------------------
+ * ------- SEND LQH_TRANSCONF -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::sendLqhTransconf(Signal* signal, LqhTransConf::OperationStatus stat)
+{
+ tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec;
+ ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+
+ Uint32 reqInfo = 0;
+ LqhTransConf::setReplicaType(reqInfo, tcConnectptr.p->replicaType);
+ LqhTransConf::setReplicaNo(reqInfo, tcConnectptr.p->seqNoReplica);
+ LqhTransConf::setLastReplicaNo(reqInfo, tcConnectptr.p->lastReplicaNo);
+ LqhTransConf::setSimpleFlag(reqInfo, tcConnectptr.p->opSimple);
+ LqhTransConf::setDirtyFlag(reqInfo, tcConnectptr.p->dirtyOp);
+ LqhTransConf::setOperation(reqInfo, tcConnectptr.p->operation);
+
+ LqhTransConf * const lqhTransConf = (LqhTransConf *)&signal->theData[0];
+ lqhTransConf->tcRef = tcNodeFailptr.p->newTcRef;
+ lqhTransConf->lqhNodeId = cownNodeid;
+ lqhTransConf->operationStatus = stat;
+ lqhTransConf->lqhConnectPtr = tcConnectptr.i;
+ lqhTransConf->transId1 = tcConnectptr.p->transid[0];
+ lqhTransConf->transId2 = tcConnectptr.p->transid[1];
+ lqhTransConf->oldTcOpRec = tcConnectptr.p->tcOprec;
+ lqhTransConf->requestInfo = reqInfo;
+ lqhTransConf->gci = tcConnectptr.p->gci;
+ lqhTransConf->nextNodeId1 = tcConnectptr.p->nextReplica;
+ lqhTransConf->nextNodeId2 = tcConnectptr.p->nodeAfterNext[0];
+ lqhTransConf->nextNodeId3 = tcConnectptr.p->nodeAfterNext[1];
+ lqhTransConf->apiRef = tcConnectptr.p->applRef;
+ lqhTransConf->apiOpRec = tcConnectptr.p->applOprec;
+ lqhTransConf->tableId = tcConnectptr.p->tableref;
+ sendSignal(tcNodeFailptr.p->newTcBlockref, GSN_LQH_TRANSCONF,
+ signal, LqhTransConf::SignalLength, JBB);
+ tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1;
+ signal->theData[0] = ZLQH_TRANS_NEXT;
+ signal->theData[1] = tcNodeFailptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+}//Dblqh::sendLqhTransconf()
+
+/* --------------------------------------------------------------------------
+ * ------- START ANOTHER PHASE OF LOG EXECUTION -------
+ * RESET THE VARIABLES NEEDED BY THIS PROCESS AND SEND THE START SIGNAL
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::startExecSr(Signal* signal)
+{
+ cnoFragmentsExecSr = 0;
+ signal->theData[0] = cfirstCompletedFragSr;
+ signal->theData[1] = RNIL;
+ sendSignal(cownref, GSN_START_EXEC_SR, signal, 2, JBB);
+}//Dblqh::startExecSr()
+
+/* ¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤
+ * ¤¤¤¤¤¤¤ LOG MODULE ¤¤¤¤¤¤¤
+ * ¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤ */
+/* --------------------------------------------------------------------------
+ * ------- STEP FORWARD IN FRAGMENT LOG DURING LOG EXECUTION -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::stepAhead(Signal* signal, Uint32 stepAheadWords)
+{
+ UintR tsaPos;
+
+ tsaPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ while ((stepAheadWords + tsaPos) >= ZPAGE_SIZE) {
+ jam();
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_SIZE;
+ stepAheadWords = stepAheadWords - (ZPAGE_SIZE - tsaPos);
+ logFilePtr.p->currentLogpage = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ logPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ logFilePtr.p->currentFilepage++;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
+ logPartPtr.p->execSrPagesRead--;
+ logPartPtr.p->execSrPagesExecuted++;
+ tsaPos = ZPAGE_HEADER_SIZE;
+ }//while
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = stepAheadWords + tsaPos;
+}//Dblqh::stepAhead()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE A ABORT LOG RECORD -------
+ *
+ * SUBROUTINE SHORT NAME: WAL
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeAbortLog(Signal* signal)
+{
+ if ((ZABORT_LOG_SIZE + ZNEXT_LOG_SIZE) >
+ logFilePtr.p->remainingWordsInMbyte) {
+ jam();
+ changeMbyte(signal);
+ }//if
+ logFilePtr.p->remainingWordsInMbyte =
+ logFilePtr.p->remainingWordsInMbyte - ZABORT_LOG_SIZE;
+ writeLogWord(signal, ZABORT_TYPE);
+ writeLogWord(signal, tcConnectptr.p->transid[0]);
+ writeLogWord(signal, tcConnectptr.p->transid[1]);
+}//Dblqh::writeAbortLog()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE A COMMIT LOG RECORD -------
+ *
+ * SUBROUTINE SHORT NAME: WCL
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeCommitLog(Signal* signal, LogPartRecordPtr regLogPartPtr)
+{
+ LogFileRecordPtr regLogFilePtr;
+ LogPageRecordPtr regLogPagePtr;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ regLogFilePtr.i = regLogPartPtr.p->currentLogfile;
+ ptrCheckGuard(regLogFilePtr, clogFileFileSize, logFileRecord);
+ regLogPagePtr.i = regLogFilePtr.p->currentLogpage;
+ Uint32 twclTmp = regLogFilePtr.p->remainingWordsInMbyte;
+ ptrCheckGuard(regLogPagePtr, clogPageFileSize, logPageRecord);
+ logPartPtr = regLogPartPtr;
+ logFilePtr = regLogFilePtr;
+ logPagePtr = regLogPagePtr;
+ if ((ZCOMMIT_LOG_SIZE + ZNEXT_LOG_SIZE) > twclTmp) {
+ jam();
+ changeMbyte(signal);
+ twclTmp = logFilePtr.p->remainingWordsInMbyte;
+ }//if
+
+ Uint32 twclLogPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ Uint32 tableId = regTcPtr->tableref;
+ Uint32 schemaVersion = regTcPtr->schemaVersion;
+ Uint32 fragId = regTcPtr->fragmentid;
+ Uint32 fileNo = regTcPtr->logStartFileNo;
+ Uint32 startPageNo = regTcPtr->logStartPageNo;
+ Uint32 pageIndex = regTcPtr->logStartPageIndex;
+ Uint32 stopPageNo = regTcPtr->logStopPageNo;
+ Uint32 gci = regTcPtr->gci;
+ logFilePtr.p->remainingWordsInMbyte = twclTmp - ZCOMMIT_LOG_SIZE;
+
+ if ((twclLogPos + ZCOMMIT_LOG_SIZE) >= ZPAGE_SIZE) {
+ writeLogWord(signal, ZCOMMIT_TYPE);
+ writeLogWord(signal, tableId);
+ writeLogWord(signal, schemaVersion);
+ writeLogWord(signal, fragId);
+ writeLogWord(signal, fileNo);
+ writeLogWord(signal, startPageNo);
+ writeLogWord(signal, pageIndex);
+ writeLogWord(signal, stopPageNo);
+ writeLogWord(signal, gci);
+ } else {
+ Uint32* dataPtr = &logPagePtr.p->logPageWord[twclLogPos];
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = twclLogPos + ZCOMMIT_LOG_SIZE;
+ dataPtr[0] = ZCOMMIT_TYPE;
+ dataPtr[1] = tableId;
+ dataPtr[2] = schemaVersion;
+ dataPtr[3] = fragId;
+ dataPtr[4] = fileNo;
+ dataPtr[5] = startPageNo;
+ dataPtr[6] = pageIndex;
+ dataPtr[7] = stopPageNo;
+ dataPtr[8] = gci;
+ }//if
+ TcConnectionrecPtr rloTcNextConnectptr;
+ TcConnectionrecPtr rloTcPrevConnectptr;
+ rloTcPrevConnectptr.i = regTcPtr->prevLogTcrec;
+ rloTcNextConnectptr.i = regTcPtr->nextLogTcrec;
+ if (rloTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(rloTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ rloTcNextConnectptr.p->prevLogTcrec = rloTcPrevConnectptr.i;
+ } else {
+ regLogPartPtr.p->lastLogTcrec = rloTcPrevConnectptr.i;
+ }//if
+ if (rloTcPrevConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(rloTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ rloTcPrevConnectptr.p->nextLogTcrec = rloTcNextConnectptr.i;
+ } else {
+ regLogPartPtr.p->firstLogTcrec = rloTcNextConnectptr.i;
+ }//if
+}//Dblqh::writeCommitLog()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE A COMPLETED GCI LOG RECORD -------
+ *
+ * SUBROUTINE SHORT NAME: WCG
+// Input Pointers:
+// logFilePtr
+// logPartPtr
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeCompletedGciLog(Signal* signal)
+{
+ if ((ZCOMPLETED_GCI_LOG_SIZE + ZNEXT_LOG_SIZE) >
+ logFilePtr.p->remainingWordsInMbyte) {
+ jam();
+ changeMbyte(signal);
+ }//if
+ logFilePtr.p->remainingWordsInMbyte =
+ logFilePtr.p->remainingWordsInMbyte - ZCOMPLETED_GCI_LOG_SIZE;
+ writeLogWord(signal, ZCOMPLETED_GCI_TYPE);
+ writeLogWord(signal, cnewestCompletedGci);
+ logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci;
+}//Dblqh::writeCompletedGciLog()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE A DIRTY PAGE DURING LOG EXECUTION -------
+ *
+ * SUBROUTINE SHORT NAME: WD
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeDirty(Signal* signal)
+{
+ logPagePtr.p->logPageWord[ZPOS_DIRTY] = ZNOT_DIRTY;
+
+ // Calculate checksum for page
+ logPagePtr.p->logPageWord[ZPOS_CHECKSUM] = calcPageCheckSum(logPagePtr);
+
+ seizeLfo(signal);
+ initLfo(signal);
+ lfoPtr.p->lfoPageNo = logPartPtr.p->prevFilepage;
+ lfoPtr.p->noPagesRw = 1;
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_DIRTY;
+ lfoPtr.p->firstLfoPage = logPagePtr.i;
+ signal->theData[0] = logFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lfoPtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS_SYNCH;
+ signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
+ signal->theData[5] = 1;
+ signal->theData[6] = logPagePtr.i;
+ signal->theData[7] = logPartPtr.p->prevFilepage;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+}//Dblqh::writeDirty()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE A WORD INTO THE LOG, CHECK FOR NEW PAGE -------
+ *
+ * SUBROUTINE SHORT NAME: WLW
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeLogWord(Signal* signal, Uint32 data)
+{
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ ndbrequire(logPos < ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[logPos] = data;
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + 1;
+ if ((logPos + 1) == ZPAGE_SIZE) {
+ jam();
+ completedLogPage(signal, ZNORMAL);
+ seizeLogpage(signal);
+ initLogpage(signal);
+ logFilePtr.p->currentLogpage = logPagePtr.i;
+ logFilePtr.p->currentFilepage++;
+ }//if
+}//Dblqh::writeLogWord()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE A NEXT LOG RECORD AND CHANGE TO NEXT MBYTE -------
+ *
+ * SUBROUTINE SHORT NAME: WNL
+// Input Pointers:
+// logFilePtr(Redefines)
+// logPagePtr (Redefines)
+// logPartPtr
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeNextLog(Signal* signal)
+{
+ LogFileRecordPtr wnlNextLogFilePtr;
+ UintR twnlNextFileNo;
+ UintR twnlNewMbyte;
+ UintR twnlRemWords;
+ UintR twnlNextMbyte;
+
+/* -------------------------------------------------- */
+/* CALCULATE THE NEW NUMBER OF REMAINING WORDS */
+/* AS 128*2036 WHERE 128 * 8 KBYTE = 1 MBYTE */
+/* AND 2036 IS THE NUMBER OF WORDS IN A PAGE */
+/* THAT IS USED FOR LOG INFORMATION. */
+/* -------------------------------------------------- */
+ twnlRemWords = ZPAGE_SIZE - ZPAGE_HEADER_SIZE;
+ twnlRemWords = twnlRemWords * ZPAGES_IN_MBYTE;
+ wnlNextLogFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(wnlNextLogFilePtr, clogFileFileSize, logFileRecord);
+/* -------------------------------------------------- */
+/* WRITE THE NEXT LOG RECORD. */
+/* -------------------------------------------------- */
+ ndbrequire(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] < ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] =
+ ZNEXT_MBYTE_TYPE;
+ if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) {
+ jam();
+/* -------------------------------------------------- */
+/* CALCULATE THE NEW REMAINING WORDS WHEN */
+/* CHANGING LOG FILE IS PERFORMED */
+/* -------------------------------------------------- */
+ twnlRemWords = twnlRemWords - (ZPAGE_SIZE - ZPAGE_HEADER_SIZE);
+/* -------------------------------------------------- */
+/* ENSURE THAT THE LOG PAGES ARE WRITTEN AFTER */
+/* WE HAVE CHANGED MBYTE. */
+/* -------------------------------------------------- */
+/* ENSURE LAST PAGE IN PREVIOUS MBYTE IS */
+/* WRITTEN AND THAT THE STATE OF THE WRITE IS */
+/* PROPERLY SET. */
+/* -------------------------------------------------- */
+/* WE HAVE TO CHANGE LOG FILE */
+/* -------------------------------------------------- */
+ completedLogPage(signal, ZLAST_WRITE_IN_FILE);
+ if (wnlNextLogFilePtr.p->fileNo == 0) {
+ jam();
+/* -------------------------------------------------- */
+/* WE HAVE FINALISED A LOG LAP, START FROM LOG */
+/* FILE 0 AGAIN */
+/* -------------------------------------------------- */
+ logPartPtr.p->logLap++;
+ }//if
+ logPartPtr.p->currentLogfile = wnlNextLogFilePtr.i;
+ logFilePtr.i = wnlNextLogFilePtr.i;
+ logFilePtr.p = wnlNextLogFilePtr.p;
+ twnlNewMbyte = 0;
+ } else {
+ jam();
+/* -------------------------------------------------- */
+/* INCREMENT THE CURRENT MBYTE */
+/* SET PAGE INDEX TO PAGE HEADER SIZE */
+/* -------------------------------------------------- */
+ completedLogPage(signal, ZENFORCE_WRITE);
+ twnlNewMbyte = logFilePtr.p->currentMbyte + 1;
+ }//if
+/* -------------------------------------------------- */
+/* CHANGE TO NEW LOG FILE IF NECESSARY */
+/* UPDATE THE FILE POSITION TO THE NEW MBYTE */
+/* FOUND IN PAGE PART OF TNEXT_LOG_PTR */
+/* ALLOCATE AND INITIATE A NEW PAGE SINCE WE */
+/* HAVE SENT THE PREVIOUS PAGE TO DISK. */
+/* SET THE NEW NUMBER OF REMAINING WORDS IN THE */
+/* NEW MBYTE ALLOCATED. */
+/* -------------------------------------------------- */
+ logFilePtr.p->currentMbyte = twnlNewMbyte;
+ logFilePtr.p->filePosition = twnlNewMbyte * ZPAGES_IN_MBYTE;
+ logFilePtr.p->currentFilepage = twnlNewMbyte * ZPAGES_IN_MBYTE;
+ logFilePtr.p->remainingWordsInMbyte = twnlRemWords;
+ seizeLogpage(signal);
+ if (logFilePtr.p->currentMbyte == 0) {
+ jam();
+ logFilePtr.p->lastPageWritten = 0;
+ if (logFilePtr.p->fileNo == 0) {
+ jam();
+ releaseLogpage(signal);
+ logPagePtr.i = logFilePtr.p->logPageZero;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ }//if
+ }//if
+ initLogpage(signal);
+ logFilePtr.p->currentLogpage = logPagePtr.i;
+ if (logFilePtr.p->currentMbyte == 0) {
+ jam();
+/* -------------------------------------------------- */
+/* THIS IS A NEW FILE, WRITE THE FILE DESCRIPTOR*/
+/* ALSO OPEN THE NEXT LOG FILE TO ENSURE THAT */
+/* THIS FILE IS OPEN WHEN ITS TURN COMES. */
+/* -------------------------------------------------- */
+ writeFileHeaderOpen(signal, ZNORMAL);
+ openNextLogfile(signal);
+ logFilePtr.p->fileChangeState = LogFileRecord::BOTH_WRITES_ONGOING;
+ }//if
+ if (logFilePtr.p->fileNo == logPartPtr.p->logTailFileNo) {
+ if (logFilePtr.p->currentMbyte == logPartPtr.p->logTailMbyte) {
+ jam();
+/* -------------------------------------------------- */
+/* THE HEAD AND TAIL HAS MET. THIS SHOULD NEVER */
+/* OCCUR. CAN HAPPEN IF THE LOCAL CHECKPOINTS */
+/* TAKE FAR TOO LONG TIME. SO TIMING PROBLEMS */
+/* CAN INVOKE THIS SYSTEM CRASH. HOWEVER ONLY */
+/* VERY SERIOUS TIMING PROBLEMS. */
+/* -------------------------------------------------- */
+ systemError(signal);
+ }//if
+ }//if
+ if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) {
+ jam();
+ twnlNextMbyte = 0;
+ if (logFilePtr.p->fileChangeState != LogFileRecord::NOT_ONGOING) {
+ jam();
+ logPartPtr.p->logPartState = LogPartRecord::FILE_CHANGE_PROBLEM;
+ }//if
+ twnlNextFileNo = wnlNextLogFilePtr.p->fileNo;
+ } else {
+ jam();
+ twnlNextMbyte = logFilePtr.p->currentMbyte + 1;
+ twnlNextFileNo = logFilePtr.p->fileNo;
+ }//if
+ if (twnlNextFileNo == logPartPtr.p->logTailFileNo) {
+ if (logPartPtr.p->logTailMbyte == twnlNextMbyte) {
+ jam();
+/* -------------------------------------------------- */
+/* THE NEXT MBYTE WILL BE THE TAIL. WE MUST */
+/* STOP LOGGING NEW OPERATIONS. THIS OPERATION */
+/* ALLOWED TO PASS. ALSO COMMIT, NEXT, COMPLETED*/
+/* GCI, ABORT AND FRAGMENT SPLIT IS ALLOWED. */
+/* OPERATIONS ARE ALLOWED AGAIN WHEN THE TAIL */
+/* IS MOVED FORWARD AS A RESULT OF A START_LCP */
+/* _ROUND SIGNAL ARRIVING FROM DBDIH. */
+/* -------------------------------------------------- */
+ logPartPtr.p->logPartState = LogPartRecord::TAIL_PROBLEM;
+ }//if
+ }//if
+}//Dblqh::writeNextLog()
+
+void
+Dblqh::execDUMP_STATE_ORD(Signal* signal)
+{
+ DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0];
+ if(dumpState->args[0] == DumpStateOrd::CommitAckMarkersSize){
+ infoEvent("LQH: m_commitAckMarkerPool: %d free size: %d",
+ m_commitAckMarkerPool.getNoOfFree(),
+ m_commitAckMarkerPool.getSize());
+ }
+ if(dumpState->args[0] == DumpStateOrd::CommitAckMarkersDump){
+ infoEvent("LQH: m_commitAckMarkerPool: %d free size: %d",
+ m_commitAckMarkerPool.getNoOfFree(),
+ m_commitAckMarkerPool.getSize());
+
+ CommitAckMarkerIterator iter;
+ for(m_commitAckMarkerHash.first(iter); iter.curr.i != RNIL;
+ m_commitAckMarkerHash.next(iter)){
+ infoEvent("CommitAckMarker: i = %d (0x%x, 0x%x)"
+ " ApiRef: 0x%x apiOprec: 0x%x TcNodeId: %d",
+ iter.curr.i,
+ iter.curr.p->transid1,
+ iter.curr.p->transid2,
+ iter.curr.p->apiRef,
+ iter.curr.p->apiOprec,
+ iter.curr.p->tcNodeId);
+ }
+ }
+
+ // Dump info about number of log pages
+ if(dumpState->args[0] == DumpStateOrd::LqhDumpNoLogPages){
+ infoEvent("LQH: Log pages : %d Free: %d",
+ clogPageFileSize,
+ cnoOfLogPages);
+ }
+
+ // Dump all defined tables that LQH knowns about
+ if(dumpState->args[0] == DumpStateOrd::LqhDumpAllDefinedTabs){
+ for(Uint32 i = 0; i<ctabrecFileSize; i++){
+ TablerecPtr tabPtr;
+ tabPtr.i = i;
+ ptrAss(tabPtr, tablerec);
+ if(tabPtr.p->tableStatus != Tablerec::NOT_DEFINED){
+ infoEvent("Table %d Status: %d Usage: %d",
+ i, tabPtr.p->tableStatus, tabPtr.p->usageCount);
+ }
+ }
+ return;
+ }
+
+ // Dump all ScanRecords
+ if (dumpState->args[0] == DumpStateOrd::LqhDumpAllScanRec){
+ Uint32 recordNo = 0;
+ if (signal->length() == 1)
+ infoEvent("LQH: Dump all ScanRecords - size: %d",
+ cscanrecFileSize);
+ else if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ dumpState->args[0] = DumpStateOrd::LqhDumpOneScanRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+
+ if (recordNo < cscanrecFileSize-1){
+ dumpState->args[0] = DumpStateOrd::LqhDumpAllScanRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ return;
+ }
+
+ // Dump all active ScanRecords
+ if (dumpState->args[0] == DumpStateOrd::LqhDumpAllActiveScanRec){
+ Uint32 recordNo = 0;
+ if (signal->length() == 1)
+ infoEvent("LQH: Dump active ScanRecord - size: %d",
+ cscanrecFileSize);
+ else if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ ScanRecordPtr sp;
+ sp.i = recordNo;
+ c_scanRecordPool.getPtr(scanptr);
+ if (sp.p->scanState != ScanRecord::SCAN_FREE){
+ dumpState->args[0] = DumpStateOrd::LqhDumpOneScanRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+ }
+
+ if (recordNo < cscanrecFileSize-1){
+ dumpState->args[0] = DumpStateOrd::LqhDumpAllActiveScanRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ return;
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::LqhDumpOneScanRec){
+ Uint32 recordNo = RNIL;
+ if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ if (recordNo >= cscanrecFileSize)
+ return;
+
+ ScanRecordPtr sp;
+ sp.i = recordNo;
+ c_scanRecordPool.getPtr(sp);
+ infoEvent("Dblqh::ScanRecord[%d]: state=%d, type=%d, "
+ "complStatus=%d, scanNodeId=%d",
+ sp.i,
+ sp.p->scanState,
+ sp.p->scanType,
+ sp.p->scanCompletedStatus,
+ sp.p->scanNodeId);
+ infoEvent(" apiBref=0x%x, scanAccPtr=%d",
+ sp.p->scanApiBlockref,
+ sp.p->scanAccPtr);
+ infoEvent(" copyptr=%d, ailen=%d, complOps=%d, concurrOps=%d",
+ sp.p->copyPtr,
+ sp.p->scanAiLength,
+ sp.p->m_curr_batch_size_rows,
+ sp.p->m_max_batch_size_rows);
+ infoEvent(" errCnt=%d, localFid=%d, schV=%d",
+ sp.p->scanErrorCounter,
+ sp.p->scanLocalFragid,
+ sp.p->scanSchemaVersion);
+ infoEvent(" stpid=%d, flag=%d, lhold=%d, lmode=%d, num=%d",
+ sp.p->scanStoredProcId,
+ sp.p->scanFlag,
+ sp.p->scanLockHold,
+ sp.p->scanLockMode,
+ sp.p->scanNumber);
+ infoEvent(" relCount=%d, TCwait=%d, TCRec=%d, KIflag=%d",
+ sp.p->scanReleaseCounter,
+ sp.p->scanTcWaiting,
+ sp.p->scanTcrec,
+ sp.p->scanKeyinfoFlag);
+ return;
+ }
+ if(dumpState->args[0] == DumpStateOrd::LqhDumpLcpState){
+
+ infoEvent("== LQH LCP STATE ==");
+ infoEvent(" clcpCompletedState=%d, c_lcpId=%d, cnoOfFragsCheckpointed=%d",
+ clcpCompletedState,
+ c_lcpId,
+ cnoOfFragsCheckpointed);
+
+ LcpRecordPtr TlcpPtr;
+ // Print information about the current local checkpoint
+ TlcpPtr.i = 0;
+ ptrAss(TlcpPtr, lcpRecord);
+ infoEvent(" lcpState=%d firstLcpLocTup=%d firstLcpLocAcc=%d",
+ TlcpPtr.p->lcpState,
+ TlcpPtr.p->firstLcpLocTup,
+ TlcpPtr.p->firstLcpLocAcc);
+ infoEvent(" lcpAccptr=%d lastFragmentFlag=%d",
+ TlcpPtr.p->lcpAccptr,
+ TlcpPtr.p->lastFragmentFlag);
+ infoEvent("currentFragment.fragPtrI=%d",
+ TlcpPtr.p->currentFragment.fragPtrI);
+ infoEvent("currentFragment.lcpFragOrd.tableId=%d",
+ TlcpPtr.p->currentFragment.lcpFragOrd.tableId);
+ infoEvent(" lcpQueued=%d reportEmpty=%d",
+ TlcpPtr.p->lcpQueued,
+ TlcpPtr.p->reportEmpty);
+ char buf[8*_NDB_NODE_BITMASK_SIZE+1];
+ infoEvent(" m_EMPTY_LCP_REQ=%d",
+ TlcpPtr.p->m_EMPTY_LCP_REQ.getText(buf));
+
+ return;
+ }
+
+
+
+}//Dblqh::execDUMP_STATE_ORD()
+
+void Dblqh::execSET_VAR_REQ(Signal* signal)
+{
+#if 0
+ SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
+ ConfigParamId var = setVarReq->variable();
+
+ switch (var) {
+
+ case NoOfConcurrentCheckpointsAfterRestart:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ case NoOfConcurrentCheckpointsDuringRestart:
+ // Valid only during start so value not set.
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ default:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
+ } // switch
+#endif
+}//execSET_VAR_REQ()
+
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* ---------------------- TRIGGER HANDLING ------------------------ */
+/* ---------------------------------------------------------------- */
+/* */
+/* All trigger signals from TRIX are forwarded top TUP */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+// Trigger signals
+void
+Dblqh::execCREATE_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ NodeId myNodeId = getOwnNodeId();
+ BlockReference tupref = calcTupBlockRef(myNodeId);
+
+ sendSignal(tupref, GSN_CREATE_TRIG_REQ, signal, CreateTrigReq::SignalLength, JBB);
+}
+
+void
+Dblqh::execCREATE_TRIG_CONF(Signal* signal)
+{
+ jamEntry();
+ NodeId myNodeId = getOwnNodeId();
+ BlockReference dictref = calcDictBlockRef(myNodeId);
+
+ sendSignal(dictref, GSN_CREATE_TRIG_CONF, signal, CreateTrigConf::SignalLength, JBB);
+}
+
+void
+Dblqh::execCREATE_TRIG_REF(Signal* signal)
+{
+ jamEntry();
+ NodeId myNodeId = getOwnNodeId();
+ BlockReference dictref = calcDictBlockRef(myNodeId);
+
+ sendSignal(dictref, GSN_CREATE_TRIG_REF, signal, CreateTrigRef::SignalLength, JBB);
+}
+
+void
+Dblqh::execDROP_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ NodeId myNodeId = getOwnNodeId();
+ BlockReference tupref = calcTupBlockRef(myNodeId);
+
+ sendSignal(tupref, GSN_DROP_TRIG_REQ, signal, DropTrigReq::SignalLength, JBB);
+}
+
+void
+Dblqh::execDROP_TRIG_CONF(Signal* signal)
+{
+ jamEntry();
+ NodeId myNodeId = getOwnNodeId();
+ BlockReference dictref = calcDictBlockRef(myNodeId);
+
+ sendSignal(dictref, GSN_DROP_TRIG_CONF, signal, DropTrigConf::SignalLength, JBB);
+}
+
+void
+Dblqh::execDROP_TRIG_REF(Signal* signal)
+{
+ jamEntry();
+ NodeId myNodeId = getOwnNodeId();
+ BlockReference dictref = calcDictBlockRef(myNodeId);
+
+ sendSignal(dictref, GSN_DROP_TRIG_REF, signal, DropTrigRef::SignalLength, JBB);
+}
+
+Uint32 Dblqh::calcPageCheckSum(LogPageRecordPtr logP){
+ Uint32 checkSum = 37;
+#ifdef VM_TRACE
+ for (Uint32 i = (ZPOS_CHECKSUM+1); i<ZPAGE_SIZE; i++)
+ checkSum = logP.p->logPageWord[i] ^ checkSum;
+#endif
+ return checkSum;
+ }
+
diff --git a/storage/ndb/src/kernel/blocks/dblqh/Makefile.am b/storage/ndb/src/kernel/blocks/dblqh/Makefile.am
new file mode 100644
index 00000000000..4807a8ec5d0
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dblqh/Makefile.am
@@ -0,0 +1,25 @@
+#SUBDIRS = redoLogReader
+
+noinst_LIBRARIES = libdblqh.a
+
+libdblqh_a_SOURCES = DblqhInit.cpp DblqhMain.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdblqh.dsp
+
+libdblqh.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libdblqh_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dblqh/redoLogReader/Makefile b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/Makefile
index a89b648de77..a89b648de77 100644
--- a/ndb/src/kernel/blocks/dblqh/redoLogReader/Makefile
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/Makefile
diff --git a/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp
index 092b7840c20..092b7840c20 100644
--- a/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp
diff --git a/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp
index e73986e4d73..e73986e4d73 100644
--- a/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp
diff --git a/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp
index 540df7b507e..540df7b507e 100644
--- a/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp
diff --git a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
new file mode 100644
index 00000000000..79b6cec6d44
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -0,0 +1,1955 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBTC_H
+#define DBTC_H
+
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <SimulatedBlock.hpp>
+#include <DLHashTable.hpp>
+#include <SLList.hpp>
+#include <DLList.hpp>
+#include <DLFifoList.hpp>
+#include <DataBuffer.hpp>
+#include <Bitmask.hpp>
+#include <AttributeList.hpp>
+#include <signaldata/AttrInfo.hpp>
+#include <signaldata/LqhTransConf.hpp>
+#include <signaldata/LqhKey.hpp>
+#include <signaldata/TrigAttrInfo.hpp>
+#include <signaldata/TcIndx.hpp>
+#include <signaldata/TransIdAI.hpp>
+#include <signaldata/EventReport.hpp>
+#include <trigger_definitions.h>
+#include <SignalCounter.hpp>
+
+#ifdef DBTC_C
+/*
+ * 2.2 LOCAL SYMBOLS
+ * -----------------
+ */
+#define Z8NIL 255
+#define ZAPI_CONNECT_FILESIZE 20
+#define ZATTRBUF_FILESIZE 4000
+#define ZCLOSED 2
+#define ZCOMMITING 0 /* VALUE FOR TRANSTATUS */
+#define ZCOMMIT_SETUP 2
+#define ZCONTINUE_ABORT_080 4
+#define ZDATABUF_FILESIZE 4000
+#define ZGCP_FILESIZE 10
+#define ZINBUF_DATA_LEN 24 /* POSITION OF 'DATA LENGHT'-VARIABLE. */
+#define ZINBUF_NEXT 27 /* POSITION OF 'NEXT'-VARIABLE. */
+#define ZINBUF_PREV 26 /* POSITION OF 'PREVIOUS'-VARIABLE. */
+#define ZINTSPH1 1
+#define ZINTSPH2 2
+#define ZINTSPH3 3
+#define ZINTSPH6 6
+#define ZLASTPHASE 255
+#define ZMAX_DATA_IN_LQHKEYREQ 12
+#define ZNODEBUF_FILESIZE 2000
+#define ZNR_OF_SEIZE 10
+#define ZSCANREC_FILE_SIZE 100
+#define ZSCAN_FRAGREC_FILE_SIZE 400
+#define ZSCAN_OPREC_FILE_SIZE 400
+#define ZSEND_ATTRINFO 0
+#define ZSPH1 1
+#define ZTABREC_FILESIZE 16
+#define ZTAKE_OVER_ACTIVE 1
+#define ZTAKE_OVER_IDLE 0
+#define ZTC_CONNECT_FILESIZE 200
+#define ZTCOPCONF_SIZE 6
+
+// ----------------------------------------
+// Error Codes for Scan
+// ----------------------------------------
+#define ZNO_CONCURRENCY_ERROR 242
+#define ZTOO_HIGH_CONCURRENCY_ERROR 244
+#define ZNO_SCANREC_ERROR 245
+#define ZNO_FRAGMENT_ERROR 246
+#define ZSCAN_AI_LEN_ERROR 269
+#define ZSCAN_LQH_ERROR 270
+#define ZSCAN_FRAG_LQH_ERROR 274
+
+#define ZSCANTIME_OUT_ERROR 296
+#define ZSCANTIME_OUT_ERROR2 297
+
+// ----------------------------------------
+// Error Codes for transactions
+// ----------------------------------------
+#define ZSTATE_ERROR 202
+#define ZLENGTH_ERROR 207 // Also Scan
+#define ZERO_KEYLEN_ERROR 208
+#define ZSIGNAL_ERROR 209
+#define ZGET_ATTRBUF_ERROR 217 // Also Scan
+#define ZGET_DATAREC_ERROR 218
+#define ZMORE_AI_IN_TCKEYREQ_ERROR 220
+#define ZCOMMITINPROGRESS 230
+#define ZROLLBACKNOTALLOWED 232
+#define ZNO_FREE_TC_CONNECTION 233 // Also Scan
+#define ZABORTINPROGRESS 237
+#define ZPREPAREINPROGRESS 238
+#define ZWRONG_SCHEMA_VERSION_ERROR 241 // Also Scan
+#define ZSCAN_NODE_ERROR 250
+#define ZTRANS_STATUS_ERROR 253
+#define ZTIME_OUT_ERROR 266
+#define ZSIMPLE_READ_WITHOUT_AI 271
+#define ZNO_AI_WITH_UPDATE 272
+#define ZSEIZE_API_COPY_ERROR 275
+#define ZSCANINPROGRESS 276
+#define ZABORT_ERROR 277
+#define ZCOMMIT_TYPE_ERROR 278
+
+#define ZNO_FREE_TC_MARKER 279
+#define ZNODE_SHUTDOWN_IN_PROGRESS 280
+#define ZCLUSTER_SHUTDOWN_IN_PROGRESS 281
+#define ZWRONG_STATE 282
+#define ZCLUSTER_IN_SINGLEUSER_MODE 299
+
+#define ZDROP_TABLE_IN_PROGRESS 283
+#define ZNO_SUCH_TABLE 284
+#define ZUNKNOWN_TABLE_ERROR 285
+#define ZNODEFAIL_BEFORE_COMMIT 286
+#define ZINDEX_CORRUPT_ERROR 287
+
+// ----------------------------------------
+// Seize error
+// ----------------------------------------
+#define ZNO_FREE_API_CONNECTION 219
+#define ZSYSTEM_NOT_STARTED_ERROR 203
+
+// ----------------------------------------
+// Release errors
+// ----------------------------------------
+#define ZINVALID_CONNECTION 229
+
+
+#define ZNOT_FOUND 626
+#define ZALREADYEXIST 630
+#define ZINCONSISTENTHASHINDEX 892
+#define ZNOTUNIQUE 893
+#endif
+
+class Dbtc: public SimulatedBlock {
+public:
+ enum ConnectionState {
+ CS_CONNECTED = 0,
+ CS_DISCONNECTED = 1,
+ CS_STARTED = 2,
+ CS_RECEIVING = 3,
+ CS_PREPARED = 4,
+ CS_START_PREPARING = 5,
+ CS_REC_PREPARING = 6,
+ CS_RESTART = 7,
+ CS_ABORTING = 8,
+ CS_COMPLETING = 9,
+ CS_COMPLETE_SENT = 10,
+ CS_PREPARE_TO_COMMIT = 11,
+ CS_COMMIT_SENT = 12,
+ CS_START_COMMITTING = 13,
+ CS_COMMITTING = 14,
+ CS_REC_COMMITTING = 15,
+ CS_WAIT_ABORT_CONF = 16,
+ CS_WAIT_COMPLETE_CONF = 17,
+ CS_WAIT_COMMIT_CONF = 18,
+ CS_FAIL_ABORTING = 19,
+ CS_FAIL_ABORTED = 20,
+ CS_FAIL_PREPARED = 21,
+ CS_FAIL_COMMITTING = 22,
+ CS_FAIL_COMMITTED = 23,
+ CS_FAIL_COMPLETED = 24,
+ CS_START_SCAN = 25
+ };
+
+ enum OperationState {
+ OS_CONNECTING_DICT = 0,
+ OS_CONNECTED = 1,
+ OS_OPERATING = 2,
+ OS_PREPARED = 3,
+ OS_COMMITTING = 4,
+ OS_COMMITTED = 5,
+ OS_COMPLETING = 6,
+ OS_COMPLETED = 7,
+ OS_RESTART = 8,
+ OS_ABORTING = 9,
+ OS_ABORT_SENT = 10,
+ OS_TAKE_OVER = 11,
+ OS_WAIT_DIH = 12,
+ OS_WAIT_KEYINFO = 13,
+ OS_WAIT_ATTR = 14,
+ OS_WAIT_COMMIT_CONF = 15,
+ OS_WAIT_ABORT_CONF = 16,
+ OS_WAIT_COMPLETE_CONF = 17,
+ OS_WAIT_SCAN = 18
+ };
+
+ enum AbortState {
+ AS_IDLE = 0,
+ AS_ACTIVE = 1
+ };
+
+ enum HostState {
+ HS_ALIVE = 0,
+ HS_DEAD = 1
+ };
+
+ enum LqhTransState {
+ LTS_IDLE = 0,
+ LTS_ACTIVE = 1
+ };
+
+ enum TakeOverState {
+ TOS_NOT_DEFINED = 0,
+ TOS_IDLE = 1,
+ TOS_ACTIVE = 2,
+ TOS_COMPLETED = 3,
+ TOS_NODE_FAILED = 4
+ };
+
+ enum FailState {
+ FS_IDLE = 0,
+ FS_LISTENING = 1,
+ FS_COMPLETING = 2
+ };
+
+ enum SystemStartState {
+ SSS_TRUE = 0,
+ SSS_FALSE = 1
+ };
+
+ enum TimeOutCheckState {
+ TOCS_TRUE = 0,
+ TOCS_FALSE = 1
+ };
+
+ enum ReturnSignal {
+ RS_NO_RETURN = 0,
+ RS_TCKEYCONF = 1,
+ RS_TC_COMMITCONF = 3,
+ RS_TCROLLBACKCONF = 4,
+ RS_TCROLLBACKREP = 5
+ };
+
+ enum IndexOperationState {
+ IOS_NOOP = 0,
+ IOS_INDEX_ACCESS = 1,
+ IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF = 2,
+ IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI = 3,
+ IOS_INDEX_OPERATION = 4
+ };
+
+ enum IndexState {
+ IS_BUILDING = 0, // build in progress, start state at create
+ IS_ONLINE = 1 // ready to use
+ };
+
+
+ /**--------------------------------------------------------------------------
+ * LOCAL SYMBOLS PER 'SYMBOL-VALUED' VARIABLE
+ *
+ *
+ * NSYMB ZAPI_CONNECT_FILESIZE = 20
+ * NSYMB ZTC_CONNECT_FILESIZE = 200
+ * NSYMB ZHOST_FILESIZE = 16
+ * NSYMB ZDATABUF_FILESIZE = 4000
+ * NSYMB ZATTRBUF_FILESIZE = 4000
+ * NSYMB ZGCP_FILESIZE = 10
+ *
+ *
+ * ABORTED CODES
+ * TPHASE NSYMB ZSPH1 = 1
+ * NSYMB ZLASTPHASE = 255
+ *
+ *
+ * LQH_TRANS
+ * NSYMB ZTRANS_ABORTED = 1
+ * NSYMB ZTRANS_PREPARED = 2
+ * NSYMB ZTRANS_COMMITTED = 3
+ * NSYMB ZCOMPLETED_LQH_TRANS = 4
+ * NSYMB ZTRANS_COMPLETED = 5
+ *
+ *
+ * TAKE OVER
+ * NSYMB ZTAKE_OVER_IDLE = 0
+ * NSYMB ZTAKE_OVER_ACTIVE = 1
+ *
+ * ATTRBUF (ATTRBUF_RECORD)
+ * NSYMB ZINBUF_DATA_LEN = 24
+ * NSYMB ZINBUF_NEXTFREE = 25 (NOT USED )
+ * NSYMB ZINBUF_PREV = 26
+ * NSYMB ZINBUF_NEXT = 27
+ -------------------------------------------------------------------------*/
+ /*
+ 2.3 RECORDS AND FILESIZES
+ -------------------------
+ */
+ /* **************************************************************** */
+ /* ---------------------------------------------------------------- */
+ /* ------------------- TRIGGER AND INDEX DATA --------------------- */
+ /* ---------------------------------------------------------------- */
+ /* **************************************************************** */
+ /* ********* DEFINED TRIGGER DATA ********* */
+ /* THIS RECORD FORMS LISTS OF ACTIVE */
+ /* TRIGGERS FOR EACH TABLE. */
+ /* THE RECORDS ARE MANAGED BY A TRIGGER */
+ /* POOL WHERE A TRIGGER RECORD IS SEIZED */
+ /* WHEN A TRIGGER IS ACTIVATED AND RELEASED */
+ /* WHEN THE TRIGGER IS DEACTIVATED. */
+ /* **************************************** */
+ struct TcDefinedTriggerData {
+ /**
+ * Trigger id, used to identify the trigger
+ */
+ UintR triggerId;
+
+ /**
+ * Trigger type, defines what the trigger is used for
+ */
+ TriggerType::Value triggerType;
+
+ /**
+ * Trigger type, defines what the trigger is used for
+ */
+ TriggerEvent::Value triggerEvent;
+
+ /**
+ * Attribute mask, defines what attributes are to be monitored
+ * Can be seen as a compact representation of SQL column name list
+ */
+ Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask;
+
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+
+ /**
+ * Index id, only used by secondary_index triggers. This is same as
+ * index table id in DICT.
+ **/
+ Uint32 indexId;
+
+ /**
+ * Prev pointer (used in list)
+ */
+ Uint32 prevList;
+
+ inline void print(NdbOut & s) const {
+ s << "[DefinedTriggerData = " << triggerId << "]";
+ }
+ };
+ typedef Ptr<TcDefinedTriggerData> DefinedTriggerPtr;
+
+ /**
+ * Pool of trigger data record
+ */
+ ArrayPool<TcDefinedTriggerData> c_theDefinedTriggerPool;
+
+ /**
+ * The list of active triggers
+ */
+ DLList<TcDefinedTriggerData> c_theDefinedTriggers;
+
+ typedef DataBuffer<11> AttributeBuffer;
+
+ AttributeBuffer::DataBufferPool c_theAttributeBufferPool;
+
+ UintR c_transactionBufferSpace;
+
+
+ /* ********** FIRED TRIGGER DATA ********** */
+ /* THIS RECORD FORMS LISTS OF FIRED */
+ /* TRIGGERS FOR A TRANSACTION. */
+ /* THE RECORDS ARE MANAGED BY A TRIGGER */
+ /* POOL WHERE A TRIGGER RECORD IS SEIZED */
+ /* WHEN A TRIGGER IS ACTIVATED AND RELEASED */
+ /* WHEN THE TRIGGER IS DEACTIVATED. */
+ /* **************************************** */
+ struct TcFiredTriggerData {
+ TcFiredTriggerData() {}
+
+ /**
+ * Trigger id, used to identify the trigger
+ **/
+ Uint32 triggerId;
+
+ /**
+ * The operation that fired the trigger
+ */
+ Uint32 fireingOperation;
+
+ /**
+ * The fragment id of the firing operation. This will be appended
+ * to the Primary Key such that the record can be found even in the
+ * case of user defined partitioning.
+ */
+ Uint32 fragId;
+
+ /**
+ * Used for scrapping in case of node failure
+ */
+ Uint32 nodeId;
+
+ /**
+ * Trigger attribute info, primary key value(s)
+ */
+ AttributeBuffer::Head keyValues;
+
+ /**
+ * Trigger attribute info, attribute value(s) before operation
+ */
+ AttributeBuffer::Head beforeValues;
+
+ /**
+ * Trigger attribute info, attribute value(s) after operation
+ */
+ AttributeBuffer::Head afterValues;
+
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ Uint32 nextHash;
+ };
+
+ /**
+ * Prev pointer (used in list)
+ */
+ union {
+ Uint32 prevList;
+ Uint32 prevHash;
+ };
+
+ inline void print(NdbOut & s) const {
+ s << "[FiredTriggerData = " << triggerId << "]";
+ }
+
+ inline Uint32 hashValue() const {
+ return fireingOperation ^ nodeId;
+ }
+
+ inline bool equal(const TcFiredTriggerData & rec) const {
+ return fireingOperation == rec.fireingOperation && nodeId == rec.nodeId;
+ }
+ };
+ typedef Ptr<TcFiredTriggerData> FiredTriggerPtr;
+
+ /**
+ * Pool of trigger data record
+ */
+ ArrayPool<TcFiredTriggerData> c_theFiredTriggerPool;
+ DLHashTable<TcFiredTriggerData> c_firedTriggerHash;
+ AttributeBuffer::DataBufferPool c_theTriggerAttrInfoPool;
+
+ Uint32 c_maxNumberOfDefinedTriggers;
+ Uint32 c_maxNumberOfFiredTriggers;
+
+ struct AttrInfoRecord {
+ /**
+ * Pre-allocated AttrInfo signal
+ */
+ AttrInfo attrInfo;
+
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ /**
+ * Prev pointer (used in list)
+ */
+ Uint32 prevList;
+ };
+
+
+ /* ************* INDEX DATA *************** */
+ /* THIS RECORD FORMS LISTS OF ACTIVE */
+ /* INDEX FOR EACH TABLE. */
+ /* THE RECORDS ARE MANAGED BY A INDEX */
+ /* POOL WHERE AN INDEX RECORD IS SEIZED */
+ /* WHEN AN INDEX IS CREATED AND RELEASED */
+ /* WHEN THE INDEX IS DROPPED. */
+ /* **************************************** */
+ struct TcIndexData {
+ /**
+ * IndexState
+ */
+ IndexState indexState;
+
+ /**
+ * Index id, same as index table id in DICT
+ */
+ Uint32 indexId;
+
+ /**
+ * Index attribute list. Only the length is used in v21x.
+ */
+ AttributeList attributeList;
+
+ /**
+ * Primary table id, the primary table to be indexed
+ */
+ Uint32 primaryTableId;
+
+ /**
+ * Primary key position in secondary table
+ */
+ Uint32 primaryKeyPos;
+
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ /**
+ * Prev pointer (used in list)
+ */
+ Uint32 prevList;
+ };
+
+ typedef Ptr<TcIndexData> TcIndexDataPtr;
+
+ /**
+ * Pool of index data record
+ */
+ ArrayPool<TcIndexData> c_theIndexPool;
+
+ /**
+ * The list of defined indexes
+ */
+ ArrayList<TcIndexData> c_theIndexes;
+ UintR c_maxNumberOfIndexes;
+
+ struct TcIndexOperation {
+ TcIndexOperation(AttributeBuffer::DataBufferPool & abp) :
+ indexOpState(IOS_NOOP),
+ expectedKeyInfo(0),
+ keyInfo(abp),
+ expectedAttrInfo(0),
+ attrInfo(abp),
+ expectedTransIdAI(0),
+ transIdAI(abp),
+ indexReadTcConnect(RNIL)
+ {}
+
+ ~TcIndexOperation()
+ {
+ }
+
+ // Index data
+ Uint32 indexOpId;
+ IndexOperationState indexOpState; // Used to mark on-going TcKeyReq
+ Uint32 expectedKeyInfo;
+ AttributeBuffer keyInfo; // For accumulating IndxKeyInfo
+ Uint32 expectedAttrInfo;
+ AttributeBuffer attrInfo; // For accumulating IndxAttrInfo
+ Uint32 expectedTransIdAI;
+ AttributeBuffer transIdAI; // For accumulating TransId_AI
+
+ TcKeyReq tcIndxReq;
+ UintR connectionIndex;
+ UintR indexReadTcConnect; //
+
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ /**
+ * Prev pointer (used in list)
+ */
+ Uint32 prevList;
+ };
+
+ typedef Ptr<TcIndexOperation> TcIndexOperationPtr;
+
+ /**
+ * Pool of index data record
+ */
+ ArrayPool<TcIndexOperation> c_theIndexOperationPool;
+
+ UintR c_maxNumberOfIndexOperations;
+
+ /************************** API CONNECT RECORD ***********************
+ * The API connect record contains the connection record to which the
+ * application connects.
+ *
+ * The application can send one operation at a time. It can send a
+ * new operation immediately after sending the previous operation.
+ * Thereby several operations can be active in one transaction within TC.
+ * This is achieved by using the API connect record.
+ * Each active operation is handled by the TC connect record.
+ * As soon as the TC connect record has sent the
+ * request to the LQH it is ready to receive new operations.
+ * The LQH connect record takes care of waiting for an operation to
+ * complete.
+ * When an operation has completed on the LQH connect record,
+ * a new operation can be started on this LQH connect record.
+ *******************************************************************
+ *
+ * API CONNECT RECORD ALIGNED TO BE 256 BYTES
+ ********************************************************************/
+
+ /*******************************************************************>*/
+ // We break out the API Timer for optimisation on scanning rather than
+ // on fast access.
+ /*******************************************************************>*/
+ inline void setApiConTimer(Uint32 apiConPtrI, Uint32 value, Uint32 line){
+ c_apiConTimer[apiConPtrI] = value;
+ c_apiConTimer_line[apiConPtrI] = line;
+ }
+
+ inline Uint32 getApiConTimer(Uint32 apiConPtrI) const {
+ return c_apiConTimer[apiConPtrI];
+ }
+ UintR* c_apiConTimer;
+ UintR* c_apiConTimer_line;
+
+ struct ApiConnectRecord {
+ ApiConnectRecord(ArrayPool<TcFiredTriggerData> & firedTriggerPool,
+ ArrayPool<TcIndexOperation> & seizedIndexOpPool):
+ theFiredTriggers(firedTriggerPool),
+ isIndexOp(false),
+ theSeizedIndexOperations(seizedIndexOpPool)
+ {}
+
+ //---------------------------------------------------
+ // First 16 byte cache line. Hot variables.
+ //---------------------------------------------------
+ ConnectionState apiConnectstate;
+ UintR transid[2];
+ UintR firstTcConnect;
+
+ //---------------------------------------------------
+ // Second 16 byte cache line. Hot variables.
+ //---------------------------------------------------
+ UintR lqhkeyconfrec;
+ UintR cachePtr;
+ UintR currSavePointId;
+ UintR counter;
+
+ //---------------------------------------------------
+ // Third 16 byte cache line. First and second cache
+ // line plus this will be enough for copy API records.
+ // Variables used in late phases.
+ //---------------------------------------------------
+ UintR nextGcpConnect;
+ UintR prevGcpConnect;
+ UintR gcpPointer;
+ UintR ndbapiConnect;
+
+ //---------------------------------------------------
+ // Fourth 16 byte cache line. Only used in late phases.
+ // Plus 4 bytes of error handling.
+ //---------------------------------------------------
+ UintR nextApiConnect;
+ BlockReference ndbapiBlockref;
+ UintR apiCopyRecord;
+ UintR globalcheckpointid;
+
+ //---------------------------------------------------
+ // Second 64 byte cache line starts. First 16 byte
+ // cache line in this one. Variables primarily used
+ // in early phase.
+ //---------------------------------------------------
+ UintR lastTcConnect;
+ UintR lqhkeyreqrec;
+ AbortState abortState;
+ Uint32 buddyPtr;
+ Uint8 m_exec_flag;
+ Uint8 unused2;
+ Uint8 takeOverRec;
+ Uint8 currentReplicaNo;
+
+ //---------------------------------------------------
+ // Error Handling variables. If cache line 32 bytes
+ // ensures that cache line is still only read in
+ // early phases.
+ //---------------------------------------------------
+ union {
+ UintR apiScanRec;
+ UintR commitAckMarker;
+ };
+ UintR currentTcConnect;
+ BlockReference tcBlockref;
+ Uint16 returncode;
+ Uint16 takeOverInd;
+
+ //---------------------------------------------------
+ // Second 64 byte cache line. Third 16 byte cache line
+ // in this one. Variables primarily used in early phase
+ // and checked in late phase.
+ // Fourth cache line is the tcSendArray that is used
+ // when two and three operations are responded to in
+ // parallel. The first two entries in tcSendArray is
+ // part of the third cache line.
+ //---------------------------------------------------
+ //---------------------------------------------------
+ // timeOutCounter is used waiting for ABORTCONF, COMMITCONF
+ // and COMPLETECONF
+ //---------------------------------------------------
+ UintR failureNr;
+ Uint8 tckeyrec; // Ändrad från R
+ Uint8 tcindxrec;
+ Uint8 apiFailState; // Ändrad från R
+ ReturnSignal returnsignal;
+ Uint8 timeOutCounter;
+
+ UintR tcSendArray[6];
+
+ // Trigger data
+
+ /**
+ * The list of fired triggers
+ */
+ DLFifoList<TcFiredTriggerData> theFiredTriggers;
+
+ bool triggerPending; // Used to mark waiting for a CONTINUEB
+
+ // Index data
+
+ bool isIndexOp; // Used to mark on-going TcKeyReq as indx table access
+ bool indexOpReturn;
+ UintR noIndexOp; // No outstanding index ops
+
+ // Index op return context
+ UintR indexOp;
+ UintR clientData;
+ UintR attrInfoLen;
+
+ UintR accumulatingIndexOp;
+ UintR executingIndexOp;
+ UintR tcIndxSendArray[6];
+ ArrayList<TcIndexOperation> theSeizedIndexOperations;
+ };
+
+ typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr;
+
+
+ /************************** TC CONNECT RECORD ************************/
+ /* *******************************************************************/
+ /* TC CONNECT RECORD KEEPS ALL INFORMATION TO CARRY OUT A TRANSACTION*/
+ /* THE TRANSACTION CONTROLLER ESTABLISHES CONNECTIONS TO DIFFERENT */
+ /* BLOCKS TO CARRY OUT THE TRANSACTION. THERE CAN BE SEVERAL RECORDS */
+ /* PER ACTIVE TRANSACTION. THE TC CONNECT RECORD COOPERATES WITH THE */
+ /* API CONNECT RECORD FOR COMMUNICATION WITH THE API AND WITH THE */
+ /* LQH CONNECT RECORD FOR COMMUNICATION WITH THE LQH'S INVOLVED IN */
+ /* THE TRANSACTION. TC CONNECT RECORD IS PERMANENTLY CONNECTED TO A */
+ /* RECORD IN DICT AND ONE IN DIH. IT CONTAINS A LIST OF ACTIVE LQH */
+ /* CONNECT RECORDS AND A LIST OF STARTED BUT NOT ACTIVE LQH CONNECT */
+ /* RECORDS. IT DOES ALSO CONTAIN A LIST OF ALL OPERATIONS THAT ARE */
+ /* EXECUTED WITH THE TC CONNECT RECORD. */
+ /*******************************************************************>*/
+ /* TC_CONNECT RECORD ALIGNED TO BE 128 BYTES */
+ /*******************************************************************>*/
+ struct TcConnectRecord {
+ //---------------------------------------------------
+ // First 16 byte cache line. Those variables are only
+ // used in error cases.
+ //---------------------------------------------------
+ UintR tcOprec; /* TC OPREC of operation being taken over */
+ Uint16 failData[4]; /* Failed nodes when taking over an operation */
+ UintR nextTcFailHash;
+
+ //---------------------------------------------------
+ // Second 16 byte cache line. Those variables are used
+ // from LQHKEYCONF to sending COMMIT and COMPLETED.
+ //---------------------------------------------------
+ UintR lastLqhCon; /* Connect record in last replicas Lqh record */
+ Uint16 lastLqhNodeId; /* Node id of last replicas Lqh */
+ Uint16 m_execAbortOption;/* TcKeyReq::ExecuteAbortOption */
+ UintR commitAckMarker; /* CommitMarker I value */
+
+ //---------------------------------------------------
+ // Third 16 byte cache line. The hottest variables.
+ //---------------------------------------------------
+ OperationState tcConnectstate; /* THE STATE OF THE CONNECT*/
+ UintR apiConnect; /* POINTER TO API CONNECT RECORD */
+ UintR nextTcConnect; /* NEXT TC RECORD*/
+ Uint8 dirtyOp;
+ Uint8 lastReplicaNo; /* NUMBER OF THE LAST REPLICA IN THE OPERATION */
+ Uint8 noOfNodes; /* TOTAL NUMBER OF NODES IN OPERATION */
+ Uint8 operation; /* OPERATION TYPE */
+ /* 0 = READ REQUEST */
+ /* 1 = UPDATE REQUEST */
+ /* 2 = INSERT REQUEST */
+ /* 3 = DELETE REQUEST */
+
+ //---------------------------------------------------
+ // Fourth 16 byte cache line. The mildly hot variables.
+ // tcNodedata expands 4 Bytes into the next cache line
+ // with indexes almost never used.
+ //---------------------------------------------------
+ UintR clientData; /* SENDERS OPERATION POINTER */
+ UintR dihConnectptr; /* CONNECTION TO DIH BLOCK ON THIS NODE */
+ UintR prevTcConnect; /* DOUBLY LINKED LIST OF TC CONNECT RECORDS*/
+ UintR savePointId;
+
+ Uint16 tcNodedata[4];
+
+ // Trigger data
+ FiredTriggerPtr accumulatingTriggerData;
+ UintR noFiredTriggers;
+ UintR noReceivedTriggers;
+ UintR triggerExecutionCount;
+ UintR triggeringOperation;
+ UintR savedState[LqhKeyConf::SignalLength];
+
+ // Index data
+ bool isIndexOp; // Used to mark on-going TcKeyReq as index table access
+ UintR indexOp;
+ UintR currentIndexId;
+ UintR attrInfoLen;
+ };
+
+ friend struct TcConnectRecord;
+
+ typedef Ptr<TcConnectRecord> TcConnectRecordPtr;
+
+ // ********************** CACHE RECORD **************************************
+ //---------------------------------------------------------------------------
+ // This record is used between reception of TCKEYREQ and sending of LQHKEYREQ
+ // It is separatedso as to improve the cache hit rate and also to minimise
+ // the necessary memory storage in NDB Cluster.
+ //---------------------------------------------------------------------------
+
+ struct CacheRecord {
+ //---------------------------------------------------
+ // First 16 byte cache line. Variables used by
+ // ATTRINFO processing.
+ //---------------------------------------------------
+ UintR firstAttrbuf; /* POINTER TO LINKED LIST OF ATTRIBUTE BUFFERS */
+ UintR lastAttrbuf; /* POINTER TO LINKED LIST OF ATTRIBUTE BUFFERS */
+ UintR currReclenAi;
+ Uint16 attrlength; /* ATTRIBUTE INFORMATION LENGTH */
+ Uint16 save1;
+
+ //---------------------------------------------------
+ // Second 16 byte cache line. Variables initiated by
+ // TCKEYREQ and used in LQHKEYREQ.
+ //---------------------------------------------------
+ UintR attrinfo15[4];
+
+ //---------------------------------------------------
+ // Third 16 byte cache line. Variables initiated by
+ // TCKEYREQ and used in LQHKEYREQ.
+ //---------------------------------------------------
+ UintR attrinfo0;
+ UintR schemaVersion;/* SCHEMA VERSION USED IN TRANSACTION */
+ UintR tableref; /* POINTER TO THE TABLE IN WHICH THE FRAGMENT EXISTS*/
+ Uint16 apiVersionNo;
+ Uint16 keylen; /* KEY LENGTH SENT BY REQUEST SIGNAL */
+
+ //---------------------------------------------------
+ // Fourth 16 byte cache line. Variables initiated by
+ // TCKEYREQ and used in LQHKEYREQ.
+ //---------------------------------------------------
+ UintR keydata[4]; /* RECEIVES FIRST 16 BYTES OF TUPLE KEY */
+
+ //---------------------------------------------------
+ // First 16 byte cache line in second 64 byte cache
+ // line. Diverse use.
+ //---------------------------------------------------
+ UintR fragmentid; /* THE COMPUTED FRAGMENT ID */
+ UintR hashValue; /* THE HASH VALUE USED TO LOCATE FRAGMENT */
+
+ Uint8 distributionKeyIndicator;
+ Uint8 m_special_hash; // collation or distribution key
+ Uint8 unused2;
+ Uint8 lenAiInTckeyreq; /* LENGTH OF ATTRIBUTE INFORMATION IN TCKEYREQ */
+
+ Uint8 fragmentDistributionKey; /* DIH generation no */
+
+ /**
+ * EXECUTION MODE OF OPERATION
+ * 0 = NORMAL EXECUTION, 1 = INTERPRETED EXECUTION
+ */
+ Uint8 opExec;
+
+ /**
+ * LOCK TYPE OF OPERATION IF READ OPERATION
+ * 0 = READ LOCK, 1 = WRITE LOCK
+ */
+ Uint8 opLock;
+
+ /**
+ * IS THE OPERATION A SIMPLE TRANSACTION
+ * 0 = NO, 1 = YES
+ */
+ Uint8 opSimple;
+
+ //---------------------------------------------------
+ // Second 16 byte cache line in second 64 byte cache
+ // line. Diverse use.
+ //---------------------------------------------------
+ UintR distributionKey;
+ UintR nextCacheRec;
+ UintR unused3;
+ Uint32 scanInfo;
+
+ //---------------------------------------------------
+ // Third 16 byte cache line in second 64
+ // byte cache line. Diverse use.
+ //---------------------------------------------------
+ Uint32 unused4;
+ Uint32 scanTakeOverInd;
+ UintR firstKeybuf; /* POINTER THE LINKED LIST OF KEY BUFFERS */
+ UintR lastKeybuf; /* VARIABLE POINTING TO THE LAST KEY BUFFER */
+
+ //---------------------------------------------------
+ // Fourth 16 byte cache line in second 64
+ // byte cache line. Not used currently.
+ //---------------------------------------------------
+ UintR packedCacheVar[4];
+ };
+
+ typedef Ptr<CacheRecord> CacheRecordPtr;
+
+ /* ************************ HOST RECORD ********************************** */
+ /********************************************************/
+ /* THIS RECORD CONTAINS ALIVE-STATUS ON ALL NODES IN THE*/
+ /* SYSTEM */
+ /********************************************************/
+ /* THIS RECORD IS ALIGNED TO BE 128 BYTES. */
+ /********************************************************/
+ struct HostRecord {
+ HostState hostStatus;
+ LqhTransState lqhTransStatus;
+ TakeOverState takeOverStatus;
+ bool inPackedList;
+ UintR noOfPackedWordsLqh;
+ UintR packedWordsLqh[26];
+ UintR noOfWordsTCKEYCONF;
+ UintR packedWordsTCKEYCONF[30];
+ UintR noOfWordsTCINDXCONF;
+ UintR packedWordsTCINDXCONF[30];
+ BlockReference hostLqhBlockRef;
+ }; /* p2c: size = 128 bytes */
+
+ typedef Ptr<HostRecord> HostRecordPtr;
+
+ /* *********** TABLE RECORD ********************************************* */
+
+ /********************************************************/
+ /* THIS RECORD CONTAINS THE CURRENT SCHEMA VERSION OF */
+ /* ALL TABLES IN THE SYSTEM. */
+ /********************************************************/
+ struct TableRecord {
+ Uint32 currentSchemaVersion;
+ Uint8 enabled;
+ Uint8 dropping;
+ Uint8 tableType;
+ Uint8 storedTable;
+
+ Uint8 noOfKeyAttr;
+ Uint8 hasCharAttr;
+ Uint8 noOfDistrKeys;
+
+ struct KeyAttr {
+ Uint32 attributeDescriptor;
+ CHARSET_INFO* charsetInfo;
+ } keyAttr[MAX_ATTRIBUTES_IN_INDEX];
+
+ bool checkTable(Uint32 schemaVersion) const {
+ return enabled && !dropping && (schemaVersion == currentSchemaVersion);
+ }
+
+ Uint32 getErrorCode(Uint32 schemaVersion) const;
+
+ struct DropTable {
+ Uint32 senderRef;
+ Uint32 senderData;
+ SignalCounter waitDropTabCount;
+ } dropTable;
+ };
+ typedef Ptr<TableRecord> TableRecordPtr;
+
+ /**
+ * There is max 16 ScanFragRec's for
+ * each scan started in TC. Each ScanFragRec is used by
+ * a scan fragment "process" that scans one fragment at a time.
+ * It will receive max 16 tuples in each request
+ */
+ struct ScanFragRec {
+ ScanFragRec(){
+ stopFragTimer();
+ lqhBlockref = 0;
+ scanFragState = IDLE;
+ scanRec = RNIL;
+ }
+ /**
+ * ScanFragState
+ * WAIT_GET_PRIMCONF : Waiting for DIGETPRIMCONF when starting a new
+ * fragment scan
+ * LQH_ACTIVE : The scan process has sent a command to LQH and is
+ * waiting for the response
+ * LQH_ACTIVE_CLOSE : The scan process has sent close to LQH and is
+ * waiting for the response
+ * DELIVERED : The result have been delivered, this scan frag process
+ * are waiting for a SCAN_NEXTREQ to tell us to continue scanning
+ * RETURNING_FROM_DELIVERY : SCAN_NEXTREQ received and continuing scan
+ * soon
+ * QUEUED_FOR_DELIVERY : Result queued in TC and waiting for delivery
+ * to API
+ * COMPLETED : The fragment scan processes has completed and finally
+ * sent a SCAN_PROCCONF
+ */
+ enum ScanFragState {
+ IDLE = 0,
+ WAIT_GET_PRIMCONF = 1,
+ LQH_ACTIVE = 2,
+ DELIVERED = 4,
+ QUEUED_FOR_DELIVERY = 6,
+ COMPLETED = 7
+ };
+ // Timer for checking timeout of this fragment scan
+ Uint32 scanFragTimer;
+
+ // Id of the current scanned fragment
+ Uint32 scanFragId;
+
+ // Blockreference of LQH
+ BlockReference lqhBlockref;
+
+ // getNodeInfo.m_connectCount, set at seize used so that
+ // I don't accidently kill a starting node
+ Uint32 m_connectCount;
+
+ // State of this fragment scan
+ ScanFragState scanFragState;
+
+ // Id of the ScanRecord this fragment scan belongs to
+ Uint32 scanRec;
+
+ // The value of fragmentCompleted in the last received SCAN_FRAGCONF
+ Uint8 m_scan_frag_conf_status;
+
+ inline void startFragTimer(Uint32 timeVal){
+ scanFragTimer = timeVal;
+ }
+ inline void stopFragTimer(void){
+ scanFragTimer = 0;
+ }
+
+ Uint32 m_ops;
+ Uint32 m_chksum;
+ Uint32 m_apiPtr;
+ Uint32 m_totalLen;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ };
+
+ typedef Ptr<ScanFragRec> ScanFragRecPtr;
+ typedef LocalDLList<ScanFragRec> ScanFragList;
+
+ /**
+ * Each scan allocates one ScanRecord to store information
+ * about the current scan
+ *
+ */
+ struct ScanRecord {
+ ScanRecord() {}
+ /** NOTE! This is the old comment for ScanState. - MASV
+ * STATE TRANSITIONS OF SCAN_STATE. SCAN_STATE IS THE STATE
+ * VARIABLE OF THE RECEIVE AND DELIVERY PROCESS.
+ * THE PROCESS HAS THREE STEPS IT GOES THROUGH.
+ * 1) THE INITIAL STATES WHEN RECEIVING DATA FOR THE SCAN.
+ * - WAIT_SCAN_TAB_INFO
+ * - WAIT_AI
+ * - WAIT_FRAGMENT_COUNT
+ * 2) THE EXECUTION STATES WHEN THE SCAN IS PERFORMED.
+ * - SCAN_NEXT_ORDERED
+ * - DELIVERED
+ * - QUEUED_DELIVERED
+ * 3) THE CLOSING STATE WHEN THE SCAN PROCESS IS CLOSING UP
+ * EVERYTHING.
+ * - CLOSING_SCAN
+ * INITIAL START WHEN SCAN_TABREQ RECEIVED
+ * -> WAIT_SCAN_TAB_INFO (IF ANY SCAN_TABINFO TO BE RECEIVED)
+ * -> WAIT_AI (IF NO SCAN_TAB_INFO BUT ATTRINFO IS RECEIVED)
+ * -> WAIT_FRAGMENT_COUNT (IF NEITHER SCAN_TABINFO OR ATTRINFO
+ * RECEIVED)
+ *
+ * WAIT_SCAN_TAB_INFO TRANSITIONS:
+ * -> WAIT_SCAN_TABINFO (WHEN MORE SCAN_TABINFO RECEIVED)
+ * -> WAIT_AI (WHEN ATTRINFO RECEIVED AFTER RECEIVING ALL
+ * SCAN_TABINFO)
+ * -> WAIT_FRAGMENT_COUNT (WHEN NO ATTRINFO RECEIVED AFTER
+ * RECEIVING ALL SCAN_TABINFO )
+ * WAIT_AI TRANSITIONS:
+ * -> WAIT_AI (WHEN MORE ATTRINFO RECEIVED)
+ * -> WAIT_FRAGMENT_COUNT (WHEN ALL ATTRINFO RECEIVED)
+ *
+ * WAIT_FRAGMENT_COUNT TRANSITIONS:
+ * -> SCAN_NEXT_ORDERED
+ *
+ * SCAN_NEXT_ORDERED TRANSITIONS:
+ * -> DELIVERED (WHEN FIRST SCAN_FRAGCONF ARRIVES WITH OPERATIONS
+ * TO REPORT IN IT)
+ * -> CLOSING_SCAN (WHEN SCAN IS CLOSED BY SCAN_NEXTREQ OR BY SOME
+ * ERROR)
+ *
+ * DELIVERED TRANSITIONS:
+ * -> SCAN_NEXT_ORDERED (IF SCAN_NEXTREQ ARRIVES BEFORE ANY NEW
+ * OPERATIONS TO REPORT ARRIVES)
+ * -> QUEUED_DELIVERED (IF NEW OPERATION TO REPORT ARRIVES BEFORE
+ * SCAN_NEXTREQ)
+ * -> CLOSING_SCAN (WHEN SCAN IS CLOSED BY SCAN_NEXTREQ OR BY SOME
+ * ERROR)
+ *
+ * QUEUED_DELIVERED TRANSITIONS:
+ * -> DELIVERED (WHEN SCAN_NEXTREQ ARRIVES AND QUEUED OPERATIONS
+ * TO REPORT ARE SENT TO THE APPLICATION)
+ * -> CLOSING_SCAN (WHEN SCAN IS CLOSED BY SCAN_NEXTREQ OR BY
+ * SOME ERROR)
+ */
+ enum ScanState {
+ IDLE = 0,
+ WAIT_SCAN_TAB_INFO = 1,
+ WAIT_AI = 2,
+ WAIT_FRAGMENT_COUNT = 3,
+ RUNNING = 4,
+ CLOSING_SCAN = 5
+ };
+
+ // State of this scan
+ ScanState scanState;
+
+ DLList<ScanFragRec>::Head m_running_scan_frags; // Currently in LQH
+ union { Uint32 m_queued_count; Uint32 scanReceivedOperations; };
+ DLList<ScanFragRec>::Head m_queued_scan_frags; // In TC !sent to API
+ DLList<ScanFragRec>::Head m_delivered_scan_frags;// Delivered to API
+
+ // Id of the next fragment to be scanned. Used by scan fragment
+ // processes when they are ready for the next fragment
+ Uint32 scanNextFragId;
+
+ // Total number of fragments in the table we are scanning
+ Uint32 scanNoFrag;
+
+ // Index of next ScanRecords when in free list
+ Uint32 nextScan;
+
+ // Length of expected attribute information
+ union { Uint32 scanAiLength; Uint32 m_booked_fragments_count; };
+
+ Uint32 scanKeyLen;
+
+ // Reference to ApiConnectRecord
+ Uint32 scanApiRec;
+
+ // Reference to TcConnectRecord
+ Uint32 scanTcrec;
+
+ // Number of scan frag processes that belong to this scan
+ Uint32 scanParallel;
+
+ // Schema version used by this scan
+ Uint32 scanSchemaVersion;
+
+ // Index of stored procedure belonging to this scan
+ Uint32 scanStoredProcId;
+
+ // The index of table that is scanned
+ Uint32 scanTableref;
+
+ // Number of operation records per scanned fragment
+ // Number of operations in first batch
+ // Max number of bytes per batch
+ union {
+ Uint16 first_batch_size_rows;
+ Uint16 batch_size_rows;
+ };
+ Uint32 batch_byte_size;
+
+ Uint32 scanRequestInfo; // ScanFrag format
+
+ // Close is ordered
+ bool m_close_scan_req;
+ };
+ typedef Ptr<ScanRecord> ScanRecordPtr;
+
+ /* **********************************************************************$ */
+ /* ******$ DATA BUFFER ******$ */
+ /* */
+ /* THIS BUFFER IS USED AS A GENERAL DATA STORAGE. */
+ /* **********************************************************************$ */
+ struct DatabufRecord {
+ UintR data[4];
+ /* 4 * 1 WORD = 4 WORD */
+ UintR nextDatabuf;
+ }; /* p2c: size = 20 bytes */
+
+ typedef Ptr<DatabufRecord> DatabufRecordPtr;
+
+ /* **********************************************************************$ */
+ /* ******$ ATTRIBUTE INFORMATION RECORD ******$ */
+ /*
+ * CAN CONTAIN ONE (1) ATTRINFO SIGNAL. ONE SIGNAL CONTAINS 24 ATTR.
+ * INFO WORDS. BUT 32 ELEMENTS ARE USED TO MAKE PLEX HAPPY.
+ * SOME OF THE ELEMENTS ARE USED TO THE FOLLOWING THINGS:
+ * DATA LENGHT IN THIS RECORD IS STORED IN THE ELEMENT INDEXED BY
+ * ZINBUF_DATA_LEN.
+ * NEXT FREE ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY
+ * PREVIOUS ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_PREV
+ * (NOT USED YET).
+ * NEXT ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_NEXT. */
+ /* ******************************************************************** */
+ struct AttrbufRecord {
+ UintR attrbuf[32];
+ }; /* p2c: size = 128 bytes */
+
+ typedef Ptr<AttrbufRecord> AttrbufRecordPtr;
+
+ /*************************************************************************>*/
+ /* GLOBAL CHECKPOINT INFORMATION RECORD */
+ /* */
+ /* THIS RECORD IS USED TO STORE THE GLOBALCHECKPOINT NUMBER AND A
+ * COUNTER DURING THE COMPLETION PHASE OF THE TRANSACTION */
+ /*************************************************************************>*/
+ /* */
+ /* GCP RECORD ALIGNED TO BE 32 BYTES */
+ /*************************************************************************>*/
+ struct GcpRecord {
+ UintR gcpUnused1[2]; /* p2c: Not used */
+ UintR firstApiConnect;
+ UintR lastApiConnect;
+ UintR gcpId;
+ UintR nextGcp;
+ UintR gcpUnused2; /* p2c: Not used */
+ Uint16 gcpNomoretransRec;
+ }; /* p2c: size = 32 bytes */
+
+ typedef Ptr<GcpRecord> GcpRecordPtr;
+
+ /*************************************************************************>*/
+ /* TC_FAIL_RECORD */
+ /* THIS RECORD IS USED WHEN HANDLING TAKE OVER OF ANOTHER FAILED
+ * TC NODE. */
+ /*************************************************************************>*/
+ struct TcFailRecord {
+ Uint16 queueList[MAX_NDB_NODES];
+ Uint8 takeOverProcState[MAX_NDB_NODES];
+ UintR completedTakeOver;
+ UintR currentHashIndexTakeOver;
+ FailState failStatus;
+ Uint16 queueIndex;
+ Uint16 takeOverNode;
+ }; /* p2c: size = 64 bytes */
+
+ typedef Ptr<TcFailRecord> TcFailRecordPtr;
+
+public:
+ Dbtc(const class Configuration &);
+ virtual ~Dbtc();
+
+private:
+ BLOCK_DEFINES(Dbtc);
+
+ // Transit signals
+ void execPACKED_SIGNAL(Signal* signal);
+ void execABORTED(Signal* signal);
+ void execATTRINFO(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+ void execKEYINFO(Signal* signal);
+ void execSCAN_NEXTREQ(Signal* signal);
+ void execSCAN_PROCREQ(Signal* signal);
+ void execSCAN_PROCCONF(Signal* signal);
+ void execTAKE_OVERTCREQ(Signal* signal);
+ void execTAKE_OVERTCCONF(Signal* signal);
+ void execLQHKEYREF(Signal* signal);
+ void execTRANSID_AI_R(Signal* signal);
+ void execKEYINFO20_R(Signal* signal);
+
+ // Received signals
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execSEND_PACKED(Signal* signal);
+ void execCOMPLETED(Signal* signal);
+ void execCOMMITTED(Signal* signal);
+ void execDIGETNODESREF(Signal* signal);
+ void execDIGETPRIMCONF(Signal* signal);
+ void execDIGETPRIMREF(Signal* signal);
+ void execDISEIZECONF(Signal* signal);
+ void execDIVERIFYCONF(Signal* signal);
+ void execDI_FCOUNTCONF(Signal* signal);
+ void execDI_FCOUNTREF(Signal* signal);
+ void execGCP_NOMORETRANS(Signal* signal);
+ void execLQHKEYCONF(Signal* signal);
+ void execNDB_STTOR(Signal* signal);
+ void execREAD_NODESCONF(Signal* signal);
+ void execREAD_NODESREF(Signal* signal);
+ void execSTTOR(Signal* signal);
+ void execTC_COMMITREQ(Signal* signal);
+ void execTC_CLOPSIZEREQ(Signal* signal);
+ void execTCGETOPSIZEREQ(Signal* signal);
+ void execTCKEYREQ(Signal* signal);
+ void execTCRELEASEREQ(Signal* signal);
+ void execTCSEIZEREQ(Signal* signal);
+ void execTCROLLBACKREQ(Signal* signal);
+ void execTC_HBREP(Signal* signal);
+ void execTC_SCHVERREQ(Signal* signal);
+ void execSCAN_TABREQ(Signal* signal);
+ void execSCAN_TABINFO(Signal* signal);
+ void execSCAN_FRAGCONF(Signal* signal);
+ void execSCAN_FRAGREF(Signal* signal);
+ void execREAD_CONFIG_REQ(Signal* signal);
+ void execLQH_TRANSCONF(Signal* signal);
+ void execCOMPLETECONF(Signal* signal);
+ void execCOMMITCONF(Signal* signal);
+ void execABORTCONF(Signal* signal);
+ void execNODE_FAILREP(Signal* signal);
+ void execINCL_NODEREQ(Signal* signal);
+ void execTIME_SIGNAL(Signal* signal);
+ void execAPI_FAILREQ(Signal* signal);
+ void execSCAN_HBREP(Signal* signal);
+ void execSET_VAR_REQ(Signal* signal);
+
+ void execABORT_ALL_REQ(Signal* signal);
+
+ void execCREATE_TRIG_REQ(Signal* signal);
+ void execDROP_TRIG_REQ(Signal* signal);
+ void execFIRE_TRIG_ORD(Signal* signal);
+ void execTRIG_ATTRINFO(Signal* signal);
+ void execCREATE_INDX_REQ(Signal* signal);
+ void execDROP_INDX_REQ(Signal* signal);
+ void execTCINDXREQ(Signal* signal);
+ void execINDXKEYINFO(Signal* signal);
+ void execINDXATTRINFO(Signal* signal);
+ void execALTER_INDX_REQ(Signal* signal);
+
+ // Index table lookup
+ void execTCKEYCONF(Signal* signal);
+ void execTCKEYREF(Signal* signal);
+ void execTRANSID_AI(Signal* signal);
+ void execTCROLLBACKREP(Signal* signal);
+
+ void execCREATE_TAB_REQ(Signal* signal);
+ void execPREP_DROP_TAB_REQ(Signal* signal);
+ void execDROP_TAB_REQ(Signal* signal);
+ void execWAIT_DROP_TAB_REF(Signal* signal);
+ void execWAIT_DROP_TAB_CONF(Signal* signal);
+ void checkWaitDropTabFailedLqh(Signal*, Uint32 nodeId, Uint32 tableId);
+ void execALTER_TAB_REQ(Signal* signal);
+ void set_timeout_value(Uint32 timeOut);
+ void set_appl_timeout_value(Uint32 timeOut);
+ void set_no_parallel_takeover(Uint32);
+ void updateBuddyTimer(ApiConnectRecordPtr);
+
+ // Statement blocks
+ void updatePackedList(Signal* signal, HostRecord* ahostptr,
+ Uint16 ahostIndex);
+ void clearTcNodeData(Signal* signal,
+ UintR TLastLqhIndicator,
+ UintR Tstart);
+ void errorReport(Signal* signal, int place);
+ void warningReport(Signal* signal, int place);
+ void printState(Signal* signal, int place);
+ int seizeTcRecord(Signal* signal);
+ int seizeCacheRecord(Signal* signal);
+ void TCKEY_abort(Signal* signal, int place);
+ void copyFromToLen(UintR* sourceBuffer, UintR* destBuffer, UintR copyLen);
+ void reportNodeFailed(Signal* signal, Uint32 nodeId);
+ void sendPackedTCKEYCONF(Signal* signal,
+ HostRecord * ahostptr,
+ UintR hostId);
+ void sendPackedTCINDXCONF(Signal* signal,
+ HostRecord * ahostptr,
+ UintR hostId);
+ void sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr);
+ void sendCommitLqh(Signal* signal,
+ TcConnectRecord * const regTcPtr);
+ void sendCompleteLqh(Signal* signal,
+ TcConnectRecord * const regTcPtr);
+ void sendTCKEY_FAILREF(Signal* signal, const ApiConnectRecord *);
+ void sendTCKEY_FAILCONF(Signal* signal, ApiConnectRecord *);
+ void checkStartTimeout(Signal* signal);
+ void checkStartFragTimeout(Signal* signal);
+ void timeOutFoundFragLab(Signal* signal, Uint32 TscanConPtr);
+ void timeOutLoopStartFragLab(Signal* signal, Uint32 TscanConPtr);
+ int releaseAndAbort(Signal* signal);
+ void findApiConnectFail(Signal* signal);
+ void findTcConnectFail(Signal* signal);
+ void initApiConnectFail(Signal* signal);
+ void initTcConnectFail(Signal* signal);
+ void initTcFail(Signal* signal);
+ void releaseTakeOver(Signal* signal);
+ void setupFailData(Signal* signal);
+ void updateApiStateFail(Signal* signal);
+ void updateTcStateFail(Signal* signal);
+ void handleApiFailState(Signal* signal, UintR anApiConnectptr);
+ void handleFailedApiNode(Signal* signal,
+ UintR aFailedNode,
+ UintR anApiConnectPtr);
+ void handleScanStop(Signal* signal, UintR aFailedNode);
+ void initScanTcrec(Signal* signal);
+ void initScanrec(ScanRecordPtr, const class ScanTabReq*,
+ const UintR scanParallel,
+ const UintR noOprecPerFrag);
+ void initScanfragrec(Signal* signal);
+ void releaseScanResources(ScanRecordPtr);
+ ScanRecordPtr seizeScanrec(Signal* signal);
+ void sendScanFragReq(Signal*, ScanRecord*, ScanFragRec*);
+ void sendScanTabConf(Signal* signal, ScanRecordPtr);
+ void close_scan_req(Signal*, ScanRecordPtr, bool received_req);
+ void close_scan_req_send_conf(Signal*, ScanRecordPtr);
+
+ void checkGcp(Signal* signal);
+ void commitGciHandling(Signal* signal, UintR Tgci);
+ void copyApi(Signal* signal);
+ void DIVER_node_fail_handling(Signal* signal, UintR Tgci);
+ void gcpTcfinished(Signal* signal);
+ void handleGcp(Signal* signal);
+ void hash(Signal* signal);
+ bool handle_special_hash(Uint32 dstHash[4],
+ Uint32* src, Uint32 srcLen,
+ Uint32 tabPtrI, bool distr);
+
+ void initApiConnect(Signal* signal);
+ void initApiConnectRec(Signal* signal,
+ ApiConnectRecord * const regApiPtr,
+ bool releaseIndexOperations = false);
+ void initattrbuf(Signal* signal);
+ void initdatabuf(Signal* signal);
+ void initgcp(Signal* signal);
+ void inithost(Signal* signal);
+ void initialiseScanrec(Signal* signal);
+ void initialiseScanFragrec(Signal* signal);
+ void initialiseScanOprec(Signal* signal);
+ void initTable(Signal* signal);
+ void initialiseTcConnect(Signal* signal);
+ void linkApiToGcp(Signal* signal);
+ void linkGciInGcilist(Signal* signal);
+ void linkKeybuf(Signal* signal);
+ void linkTcInConnectionlist(Signal* signal);
+ void releaseAbortResources(Signal* signal);
+ void releaseApiCon(Signal* signal, UintR aApiConnectPtr);
+ void releaseApiConCopy(Signal* signal);
+ void releaseApiConnectFail(Signal* signal);
+ void releaseAttrinfo();
+ void releaseGcp(Signal* signal);
+ void releaseKeys();
+ void releaseSimpleRead(Signal*, ApiConnectRecordPtr, TcConnectRecord*);
+ void releaseDirtyWrite(Signal* signal);
+ void releaseTcCon();
+ void releaseTcConnectFail(Signal* signal);
+ void releaseTransResources(Signal* signal);
+ void saveAttrbuf(Signal* signal);
+ void seizeApiConnect(Signal* signal);
+ void seizeApiConnectCopy(Signal* signal);
+ void seizeApiConnectFail(Signal* signal);
+ void seizeDatabuf(Signal* signal);
+ void seizeGcp(Signal* signal);
+ void seizeTcConnect(Signal* signal);
+ void seizeTcConnectFail(Signal* signal);
+ void sendApiCommit(Signal* signal);
+ void sendAttrinfo(Signal* signal,
+ UintR TattrinfoPtr,
+ AttrbufRecord * const regAttrPtr,
+ UintR TBref);
+ void sendContinueTimeOutControl(Signal* signal, Uint32 TapiConPtr);
+ void sendKeyinfo(Signal* signal, BlockReference TBRef, Uint32 len);
+ void sendlqhkeyreq(Signal* signal, BlockReference TBRef);
+ void sendSystemError(Signal* signal);
+ void sendtckeyconf(Signal* signal, UintR TcommitFlag);
+ void sendTcIndxConf(Signal* signal, UintR TcommitFlag);
+ void unlinkApiConnect(Signal* signal);
+ void unlinkGcp(Signal* signal);
+ void unlinkReadyTcCon(Signal* signal);
+ void handleFailedOperation(Signal* signal,
+ const LqhKeyRef * const lqhKeyRef,
+ bool gotLqhKeyRef);
+ void markOperationAborted(ApiConnectRecord * const regApiPtr,
+ TcConnectRecord * const regTcPtr);
+ void clearCommitAckMarker(ApiConnectRecord * const regApiPtr,
+ TcConnectRecord * const regTcPtr);
+ // Trigger and index handling
+ bool saveINDXKEYINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len);
+ bool receivedAllINDXKEYINFO(TcIndexOperation* indexOp);
+ bool saveINDXATTRINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len);
+ bool receivedAllINDXATTRINFO(TcIndexOperation* indexOp);
+ bool saveTRANSID_AI(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len);
+ bool receivedAllTRANSID_AI(TcIndexOperation* indexOp);
+ void readIndexTable(Signal* signal,
+ ApiConnectRecord* regApiPtr,
+ TcIndexOperation* indexOp);
+ void executeIndexOperation(Signal* signal,
+ ApiConnectRecord* regApiPtr,
+ TcIndexOperation* indexOp);
+ bool seizeIndexOperation(ApiConnectRecord* regApiPtr,
+ TcIndexOperationPtr& indexOpPtr);
+ void releaseIndexOperation(ApiConnectRecord* regApiPtr,
+ TcIndexOperation* indexOp);
+ void releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr);
+ void setupIndexOpReturn(ApiConnectRecord* regApiPtr,
+ TcConnectRecord* regTcPtr);
+
+ void saveTriggeringOpState(Signal* signal,
+ TcConnectRecord* trigOp);
+ void restoreTriggeringOpState(Signal* signal,
+ TcConnectRecord* trigOp);
+ void continueTriggeringOp(Signal* signal,
+ TcConnectRecord* trigOp);
+
+ void scheduleFiredTrigger(ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr);
+ void executeTriggers(Signal* signal, ApiConnectRecordPtr* transPtr);
+ void executeTrigger(Signal* signal,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr);
+ void executeIndexTrigger(Signal* signal,
+ TcDefinedTriggerData* definedTriggerData,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr);
+ void insertIntoIndexTable(Signal* signal,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr,
+ TcIndexData* indexData,
+ bool holdOperation = false);
+ void deleteFromIndexTable(Signal* signal,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr,
+ TcIndexData* indexData,
+ bool holdOperation = false);
+ void releaseFiredTriggerData(DLFifoList<TcFiredTriggerData>* triggers);
+ // Generated statement blocks
+ void warningHandlerLab(Signal* signal);
+ void systemErrorLab(Signal* signal);
+ void sendSignalErrorRefuseLab(Signal* signal);
+ void scanTabRefLab(Signal* signal, Uint32 errCode);
+ void diFcountReqLab(Signal* signal, ScanRecordPtr);
+ void signalErrorRefuseLab(Signal* signal);
+ void abort080Lab(Signal* signal);
+ void packKeyData000Lab(Signal* signal, BlockReference TBRef, Uint32 len);
+ void abortScanLab(Signal* signal, ScanRecordPtr, Uint32 errCode);
+ void sendAbortedAfterTimeout(Signal* signal, int Tcheck);
+ void abort010Lab(Signal* signal);
+ void abort015Lab(Signal* signal);
+ void packLqhkeyreq(Signal* signal, BlockReference TBRef);
+ void packLqhkeyreq040Lab(Signal* signal,
+ UintR anAttrBufIndex,
+ BlockReference TBRef);
+ void packLqhkeyreq040Lab(Signal* signal);
+ void returnFromQueuedDeliveryLab(Signal* signal);
+ void startTakeOverLab(Signal* signal);
+ void toCompleteHandlingLab(Signal* signal);
+ void toCommitHandlingLab(Signal* signal);
+ void toAbortHandlingLab(Signal* signal);
+ void abortErrorLab(Signal* signal);
+ void nodeTakeOverCompletedLab(Signal* signal);
+ void ndbsttorry010Lab(Signal* signal);
+ void commit020Lab(Signal* signal);
+ void complete010Lab(Signal* signal);
+ void releaseAtErrorLab(Signal* signal);
+ void seizeDatabuferrorLab(Signal* signal);
+ void scanAttrinfoLab(Signal* signal, UintR Tlen);
+ void seizeAttrbuferrorLab(Signal* signal);
+ void attrinfoDihReceivedLab(Signal* signal);
+ void aiErrorLab(Signal* signal);
+ void attrinfo020Lab(Signal* signal);
+ void scanReleaseResourcesLab(Signal* signal);
+ void scanCompletedLab(Signal* signal);
+ void scanError(Signal* signal, ScanRecordPtr, Uint32 errorCode);
+ void diverify010Lab(Signal* signal);
+ void intstartphase2x010Lab(Signal* signal);
+ void intstartphase3x010Lab(Signal* signal);
+ void sttorryLab(Signal* signal);
+ void abortBeginErrorLab(Signal* signal);
+ void tabStateErrorLab(Signal* signal);
+ void wrongSchemaVersionErrorLab(Signal* signal);
+ void noFreeConnectionErrorLab(Signal* signal);
+ void tckeyreq050Lab(Signal* signal);
+ void timeOutFoundLab(Signal* signal, UintR anAdd);
+ void completeTransAtTakeOverLab(Signal* signal, UintR TtakeOverInd);
+ void completeTransAtTakeOverDoLast(Signal* signal, UintR TtakeOverInd);
+ void completeTransAtTakeOverDoOne(Signal* signal, UintR TtakeOverInd);
+ void timeOutLoopStartLab(Signal* signal, Uint32 apiConnectPtr);
+ void initialiseRecordsLab(Signal* signal, UintR Tdata0, Uint32, Uint32);
+ void tckeyreq020Lab(Signal* signal);
+ void intstartphase2x020Lab(Signal* signal);
+ void intstartphase1x010Lab(Signal* signal);
+ void startphase1x010Lab(Signal* signal);
+
+ void lqhKeyConf_checkTransactionState(Signal * signal,
+ ApiConnectRecord * const regApiPtr);
+
+ void checkDropTab(Signal* signal);
+
+ void checkScanActiveInFailedLqh(Signal* signal,
+ Uint32 scanPtrI,
+ Uint32 failedNodeId);
+ void checkScanFragList(Signal*, Uint32 failedNodeId, ScanRecord * scanP,
+ LocalDLList<ScanFragRec>::Head&);
+
+ // Initialisation
+ void initData();
+ void initRecords();
+
+ // Transit signals
+
+
+ ApiConnectRecord *apiConnectRecord;
+ ApiConnectRecordPtr apiConnectptr;
+ UintR capiConnectFilesize;
+
+ TcConnectRecord *tcConnectRecord;
+ TcConnectRecordPtr tcConnectptr;
+ UintR ctcConnectFilesize;
+
+ CacheRecord *cacheRecord;
+ CacheRecordPtr cachePtr;
+ UintR ccacheFilesize;
+
+ AttrbufRecord *attrbufRecord;
+ AttrbufRecordPtr attrbufptr;
+ UintR cattrbufFilesize;
+
+ HostRecord *hostRecord;
+ HostRecordPtr hostptr;
+ UintR chostFilesize;
+
+ GcpRecord *gcpRecord;
+ GcpRecordPtr gcpPtr;
+ UintR cgcpFilesize;
+
+ TableRecord *tableRecord;
+ UintR ctabrecFilesize;
+
+ UintR thashValue;
+ UintR tdistrHashValue;
+
+ UintR ttransid_ptr;
+ UintR cfailure_nr;
+ UintR coperationsize;
+ UintR ctcTimer;
+
+ ApiConnectRecordPtr tmpApiConnectptr;
+ UintR tcheckGcpId;
+
+ struct TransCounters {
+ enum { Off, Timer, Started } c_trans_status;
+ UintR cattrinfoCount;
+ UintR ctransCount;
+ UintR ccommitCount;
+ UintR creadCount;
+ UintR csimpleReadCount;
+ UintR cwriteCount;
+ UintR cabortCount;
+ UintR cconcurrentOp;
+ Uint32 c_scan_count;
+ Uint32 c_range_scan_count;
+ void reset () {
+ cattrinfoCount = ctransCount = ccommitCount = creadCount =
+ csimpleReadCount = cwriteCount = cabortCount =
+ c_scan_count = c_range_scan_count = 0;
+ }
+ Uint32 report(Signal* signal){
+ signal->theData[0] = NDB_LE_TransReportCounters;
+ signal->theData[1] = ctransCount;
+ signal->theData[2] = ccommitCount;
+ signal->theData[3] = creadCount;
+ signal->theData[4] = csimpleReadCount;
+ signal->theData[5] = cwriteCount;
+ signal->theData[6] = cattrinfoCount;
+ signal->theData[7] = cconcurrentOp;
+ signal->theData[8] = cabortCount;
+ signal->theData[9] = c_scan_count;
+ signal->theData[10] = c_range_scan_count;
+ return 11;
+ }
+ } c_counters;
+
+ Uint16 cownNodeid;
+ Uint16 terrorCode;
+
+ UintR cfirstfreeAttrbuf;
+ UintR cfirstfreeTcConnect;
+ UintR cfirstfreeApiConnectCopy;
+ UintR cfirstfreeCacheRec;
+
+ UintR cfirstgcp;
+ UintR clastgcp;
+ UintR cfirstfreeGcp;
+ UintR cfirstfreeScanrec;
+
+ TableRecordPtr tabptr;
+ UintR cfirstfreeApiConnectFail;
+ UintR cfirstfreeApiConnect;
+
+ UintR cfirstfreeDatabuf;
+ BlockReference cdihblockref;
+ BlockReference cownref; /* OWN BLOCK REFERENCE */
+
+ ApiConnectRecordPtr timeOutptr;
+
+ ScanRecord *scanRecord;
+ UintR cscanrecFileSize;
+
+ UnsafeArrayPool<ScanFragRec> c_scan_frag_pool;
+ ScanFragRecPtr scanFragptr;
+
+ UintR cscanFragrecFileSize;
+ UintR cdatabufFilesize;
+
+ BlockReference cdictblockref;
+ BlockReference cerrorBlockref;
+ BlockReference clqhblockref;
+ BlockReference cndbcntrblockref;
+
+ Uint16 csignalKey;
+ Uint16 csystemnodes;
+ Uint16 cnodes[4];
+ NodeId cmasterNodeId;
+ UintR cnoParallelTakeOver;
+ TimeOutCheckState ctimeOutCheckFragActive;
+
+ UintR ctimeOutCheckFragCounter;
+ UintR ctimeOutCheckCounter;
+ UintR ctimeOutValue;
+ UintR ctimeOutCheckDelay;
+ Uint32 ctimeOutCheckHeartbeat;
+ Uint32 ctimeOutCheckLastHeartbeat;
+ Uint32 ctimeOutMissedHeartbeats;
+ Uint32 c_appl_timeout_value;
+
+ SystemStartState csystemStart;
+ TimeOutCheckState ctimeOutCheckActive;
+
+ BlockReference capiFailRef;
+ UintR cpackedListIndex;
+ Uint16 cpackedList[MAX_NODES];
+ UintR capiConnectClosing[MAX_NODES];
+ UintR con_lineNodes;
+
+ DatabufRecord *databufRecord;
+ DatabufRecordPtr databufptr;
+ DatabufRecordPtr tmpDatabufptr;
+
+ UintR treqinfo;
+ UintR ttransid1;
+ UintR ttransid2;
+
+ UintR tabortInd;
+
+ NodeId tnodeid;
+ BlockReference tblockref;
+
+ LqhTransConf::OperationStatus ttransStatus;
+ UintR ttcOprec;
+ NodeId tfailedNodeId;
+ Uint8 tcurrentReplicaNo;
+ Uint8 tpad1;
+
+ UintR tgci;
+ UintR tapplRef;
+ UintR tapplOprec;
+
+ UintR tindex;
+ UintR tmaxData;
+ UintR tmp;
+
+ UintR tnodes;
+ BlockReference tusersblkref;
+ UintR tuserpointer;
+ UintR tloadCode;
+
+ UintR tconfig1;
+ UintR tconfig2;
+
+ UintR cdata[32];
+ UintR ctransidFailHash[512];
+ UintR ctcConnectFailHash[1024];
+
+ /**
+ * Commit Ack handling
+ */
+public:
+ struct CommitAckMarker {
+ Uint32 transid1;
+ Uint32 transid2;
+ union { Uint32 nextPool; Uint32 nextHash; };
+ Uint32 prevHash;
+ Uint32 apiConnectPtr;
+ Uint16 apiNodeId;
+ Uint16 noOfLqhs;
+ Uint16 lqhNodeId[MAX_REPLICAS];
+
+ inline bool equal(const CommitAckMarker & p) const {
+ return ((p.transid1 == transid1) && (p.transid2 == transid2));
+ }
+
+ inline Uint32 hashValue() const {
+ return transid1;
+ }
+ };
+private:
+ typedef Ptr<CommitAckMarker> CommitAckMarkerPtr;
+ typedef DLHashTable<CommitAckMarker>::Iterator CommitAckMarkerIterator;
+
+ ArrayPool<CommitAckMarker> m_commitAckMarkerPool;
+ DLHashTable<CommitAckMarker> m_commitAckMarkerHash;
+
+ void execTC_COMMIT_ACK(Signal* signal);
+ void sendRemoveMarkers(Signal*, const CommitAckMarker *);
+ void sendRemoveMarker(Signal* signal,
+ NodeId nodeId,
+ Uint32 transid1,
+ Uint32 transid2);
+ void removeMarkerForFailedAPI(Signal* signal, Uint32 nodeId, Uint32 bucket);
+
+ bool getAllowStartTransaction() const {
+ if(getNodeState().getSingleUserMode())
+ return true;
+ return getNodeState().startLevel < NodeState::SL_STOPPING_2;
+ }
+
+ void checkAbortAllTimeout(Signal* signal, Uint32 sleepTime);
+ struct AbortAllRecord {
+ AbortAllRecord(){ clientRef = 0; }
+ Uint32 clientData;
+ BlockReference clientRef;
+
+ Uint32 oldTimeOutValue;
+ };
+ AbortAllRecord c_abortRec;
+
+ /************************** API CONNECT RECORD ***********************/
+ /* *******************************************************************/
+ /* THE API CONNECT RECORD CONTAINS THE CONNECTION RECORD TO WHICH THE*/
+ /* APPLICATION CONNECTS. THE APPLICATION CAN SEND ONE OPERATION AT A */
+ /* TIME. IT CAN SEND A NEW OPERATION IMMEDIATELY AFTER SENDING THE */
+ /* PREVIOUS OPERATION. THEREBY SEVERAL OPERATIONS CAN BE ACTIVE IN */
+ /* ONE TRANSACTION WITHIN TC. THIS IS ACHIEVED BY USING THE API */
+ /* CONNECT RECORD. EACH ACTIVE OPERATION IS HANDLED BY THE TC */
+ /* CONNECT RECORD. AS SOON AS THE TC CONNECT RECORD HAS SENT THE */
+ /* REQUEST TO THE LQH IT IS READY TO RECEIVE NEW OPERATIONS. THE */
+ /* LQH CONNECT RECORD TAKES CARE OF WAITING FOR AN OPERATION TO */
+ /* COMPLETE. WHEN AN OPERATION HAS COMPLETED ON THE LQH CONNECT */
+ /* RECORD A NEW OPERATION CAN BE STARTED ON THIS LQH CONNECT RECORD. */
+ /*******************************************************************>*/
+ /* */
+ /* API CONNECT RECORD ALIGNED TO BE 256 BYTES */
+ /*******************************************************************>*/
+ /************************** TC CONNECT RECORD ************************/
+ /* *******************************************************************/
+ /* TC CONNECT RECORD KEEPS ALL INFORMATION TO CARRY OUT A TRANSACTION*/
+ /* THE TRANSACTION CONTROLLER ESTABLISHES CONNECTIONS TO DIFFERENT */
+ /* BLOCKS TO CARRY OUT THE TRANSACTION. THERE CAN BE SEVERAL RECORDS */
+ /* PER ACTIVE TRANSACTION. THE TC CONNECT RECORD COOPERATES WITH THE */
+ /* API CONNECT RECORD FOR COMMUNICATION WITH THE API AND WITH THE */
+ /* LQH CONNECT RECORD FOR COMMUNICATION WITH THE LQH'S INVOLVED IN */
+ /* THE TRANSACTION. TC CONNECT RECORD IS PERMANENTLY CONNECTED TO A */
+ /* RECORD IN DICT AND ONE IN DIH. IT CONTAINS A LIST OF ACTIVE LQH */
+ /* CONNECT RECORDS AND A LIST OF STARTED BUT NOT ACTIVE LQH CONNECT */
+ /* RECORDS. IT DOES ALSO CONTAIN A LIST OF ALL OPERATIONS THAT ARE */
+ /* EXECUTED WITH THE TC CONNECT RECORD. */
+ /*******************************************************************>*/
+ /* TC_CONNECT RECORD ALIGNED TO BE 128 BYTES */
+ /*******************************************************************>*/
+ UintR cfirstfreeTcConnectFail;
+
+ /* POINTER FOR THE LQH RECORD*/
+ /* ************************ HOST RECORD ********************************* */
+ /********************************************************/
+ /* THIS RECORD CONTAINS ALIVE-STATUS ON ALL NODES IN THE*/
+ /* SYSTEM */
+ /********************************************************/
+ /* THIS RECORD IS ALIGNED TO BE 8 BYTES. */
+ /********************************************************/
+ /* ************************ TABLE RECORD ******************************** */
+ /********************************************************/
+ /* THIS RECORD CONTAINS THE CURRENT SCHEMA VERSION OF */
+ /* ALL TABLES IN THE SYSTEM. */
+ /********************************************************/
+ /*-------------------------------------------------------------------------*/
+ /* THE TC CONNECTION USED BY THIS SCAN. */
+ /*-------------------------------------------------------------------------*/
+ /*-------------------------------------------------------------------------*/
+ /* LENGTH READ FOR A PARTICULAR SCANNED OPERATION. */
+ /*-------------------------------------------------------------------------*/
+ /*-------------------------------------------------------------------------*/
+ /* REFERENCE TO THE SCAN RECORD FOR THIS SCAN PROCESS. */
+ /*-------------------------------------------------------------------------*/
+ /* *********************************************************************** */
+ /* ******$ DATA BUFFER ******$ */
+ /* */
+ /* THIS BUFFER IS USED AS A GENERAL DATA STORAGE. */
+ /* *********************************************************************** */
+ /* *********************************************************************** */
+ /* ******$ ATTRIBUTE INFORMATION RECORD ******$ */
+ /*
+ CAN CONTAIN ONE (1) ATTRINFO SIGNAL. ONE SIGNAL CONTAINS 24 ATTR.
+ INFO WORDS. BUT 32 ELEMENTS ARE USED TO MAKE PLEX HAPPY.
+ SOME OF THE ELEMENTS ARE USED TO THE FOLLOWING THINGS:
+ DATA LENGHT IN THIS RECORD IS STORED IN THE ELEMENT INDEXED BY
+ ZINBUF_DATA_LEN.
+ NEXT FREE ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY
+ PREVIOUS ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_PREV
+ (NOT USED YET).
+ NEXT ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_NEXT.
+ */
+ /* ********************************************************************** */
+ /**************************************************************************/
+ /* GLOBAL CHECKPOINT INFORMATION RECORD */
+ /* */
+ /* THIS RECORD IS USED TO STORE THE GCP NUMBER AND A COUNTER */
+ /* DURING THE COMPLETION PHASE OF THE TRANSACTION */
+ /**************************************************************************/
+ /* */
+ /* GCP RECORD ALIGNED TO BE 32 BYTES */
+ /**************************************************************************/
+ /**************************************************************************/
+ /* TC_FAIL_RECORD */
+ /* THIS RECORD IS USED WHEN HANDLING TAKE OVER OF ANOTHER FAILED TC NODE.*/
+ /**************************************************************************/
+ TcFailRecord *tcFailRecord;
+ TcFailRecordPtr tcNodeFailptr;
+ /**************************************************************************/
+ // Temporary variables that are not allowed to use for storage between
+ // signals. They
+ // can only be used in a signal to transfer values between subroutines.
+ // In the long run
+ // those variables should be removed and exchanged for stack
+ // variable communication.
+ /**************************************************************************/
+};
+#endif
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
index f99b4bf15af..f99b4bf15af 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
new file mode 100644
index 00000000000..717aa9688c4
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -0,0 +1,13140 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTC_C
+
+#include "Dbtc.hpp"
+#include "md5_hash.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <my_sys.h>
+
+#include <signaldata/EventReport.hpp>
+#include <signaldata/TcKeyReq.hpp>
+#include <signaldata/TcKeyConf.hpp>
+#include <signaldata/TcKeyRef.hpp>
+#include <signaldata/KeyInfo.hpp>
+#include <signaldata/AttrInfo.hpp>
+#include <signaldata/TransIdAI.hpp>
+#include <signaldata/TcRollbackRep.hpp>
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/ReadNodesConf.hpp>
+#include <signaldata/NFCompleteRep.hpp>
+#include <signaldata/LqhKey.hpp>
+#include <signaldata/TcCommit.hpp>
+#include <signaldata/TcContinueB.hpp>
+#include <signaldata/TcKeyFailConf.hpp>
+#include <signaldata/AbortAll.hpp>
+#include <signaldata/ScanFrag.hpp>
+#include <signaldata/ScanTab.hpp>
+#include <signaldata/PrepDropTab.hpp>
+#include <signaldata/DropTab.hpp>
+#include <signaldata/AlterTab.hpp>
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/DropTrig.hpp>
+#include <signaldata/FireTrigOrd.hpp>
+#include <signaldata/TrigAttrInfo.hpp>
+#include <signaldata/CreateIndx.hpp>
+#include <signaldata/DropIndx.hpp>
+#include <signaldata/AlterIndx.hpp>
+#include <signaldata/ScanTab.hpp>
+#include <signaldata/SystemError.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+#include <signaldata/DisconnectRep.hpp>
+#include <signaldata/TcHbRep.hpp>
+
+#include <signaldata/PrepDropTab.hpp>
+#include <signaldata/DropTab.hpp>
+#include <signaldata/TcIndx.hpp>
+#include <signaldata/IndxKeyInfo.hpp>
+#include <signaldata/IndxAttrInfo.hpp>
+#include <signaldata/PackedSignal.hpp>
+#include <AttributeHeader.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <AttributeDescriptor.hpp>
+#include <SectionReader.hpp>
+
+#include <NdbOut.hpp>
+#include <DebuggerNames.hpp>
+
+// Use DEBUG to print messages that should be
+// seen only when we debug the product
+#ifdef VM_TRACE
+#define DEBUG(x) ndbout << "DBTC: "<< x << endl;
+#else
+#define DEBUG(x)
+#endif
+
+#define INTERNAL_TRIGGER_TCKEYREQ_JBA 0
+
+#ifdef VM_TRACE
+NdbOut &
+operator<<(NdbOut& out, Dbtc::ConnectionState state){
+ switch(state){
+ case Dbtc::CS_CONNECTED: out << "CS_CONNECTED"; break;
+ case Dbtc::CS_DISCONNECTED: out << "CS_DISCONNECTED"; break;
+ case Dbtc::CS_STARTED: out << "CS_STARTED"; break;
+ case Dbtc::CS_RECEIVING: out << "CS_RECEIVING"; break;
+ case Dbtc::CS_PREPARED: out << "CS_PREPARED"; break;
+ case Dbtc::CS_START_PREPARING: out << "CS_START_PREPARING"; break;
+ case Dbtc::CS_REC_PREPARING: out << "CS_REC_PREPARING"; break;
+ case Dbtc::CS_RESTART: out << "CS_RESTART"; break;
+ case Dbtc::CS_ABORTING: out << "CS_ABORTING"; break;
+ case Dbtc::CS_COMPLETING: out << "CS_COMPLETING"; break;
+ case Dbtc::CS_COMPLETE_SENT: out << "CS_COMPLETE_SENT"; break;
+ case Dbtc::CS_PREPARE_TO_COMMIT: out << "CS_PREPARE_TO_COMMIT"; break;
+ case Dbtc::CS_COMMIT_SENT: out << "CS_COMMIT_SENT"; break;
+ case Dbtc::CS_START_COMMITTING: out << "CS_START_COMMITTING"; break;
+ case Dbtc::CS_COMMITTING: out << "CS_COMMITTING"; break;
+ case Dbtc::CS_REC_COMMITTING: out << "CS_REC_COMMITTING"; break;
+ case Dbtc::CS_WAIT_ABORT_CONF: out << "CS_WAIT_ABORT_CONF"; break;
+ case Dbtc::CS_WAIT_COMPLETE_CONF: out << "CS_WAIT_COMPLETE_CONF"; break;
+ case Dbtc::CS_WAIT_COMMIT_CONF: out << "CS_WAIT_COMMIT_CONF"; break;
+ case Dbtc::CS_FAIL_ABORTING: out << "CS_FAIL_ABORTING"; break;
+ case Dbtc::CS_FAIL_ABORTED: out << "CS_FAIL_ABORTED"; break;
+ case Dbtc::CS_FAIL_PREPARED: out << "CS_FAIL_PREPARED"; break;
+ case Dbtc::CS_FAIL_COMMITTING: out << "CS_FAIL_COMMITTING"; break;
+ case Dbtc::CS_FAIL_COMMITTED: out << "CS_FAIL_COMMITTED"; break;
+ case Dbtc::CS_FAIL_COMPLETED: out << "CS_FAIL_COMPLETED"; break;
+ case Dbtc::CS_START_SCAN: out << "CS_START_SCAN"; break;
+ default:
+ out << "Unknown: " << (int)state; break;
+ }
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::OperationState state){
+ out << (int)state;
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::AbortState state){
+ out << (int)state;
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::ReturnSignal state){
+ out << (int)state;
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::ScanRecord::ScanState state){
+ out << (int)state;
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::ScanFragRec::ScanFragState state){
+ out << (int)state;
+ return out;
+}
+#endif
+
+void
+Dbtc::updateBuddyTimer(ApiConnectRecordPtr apiPtr)
+{
+ if (apiPtr.p->buddyPtr != RNIL) {
+ jam();
+ ApiConnectRecordPtr buddyApiPtr;
+ buddyApiPtr.i = apiPtr.p->buddyPtr;
+ ptrCheckGuard(buddyApiPtr, capiConnectFilesize, apiConnectRecord);
+ if (getApiConTimer(buddyApiPtr.i) != 0) {
+ if ((apiPtr.p->transid[0] == buddyApiPtr.p->transid[0]) &&
+ (apiPtr.p->transid[1] == buddyApiPtr.p->transid[1])) {
+ jam();
+ setApiConTimer(buddyApiPtr.i, ctcTimer, __LINE__);
+ } else {
+ jam();
+ // Not a buddy anymore since not the same transid
+ apiPtr.p->buddyPtr = RNIL;
+ }//if
+ }//if
+ }//if
+}
+
+void Dbtc::execCONTINUEB(Signal* signal)
+{
+ UintR tcase;
+
+ jamEntry();
+ tcase = signal->theData[0];
+ UintR Tdata0 = signal->theData[1];
+ UintR Tdata1 = signal->theData[2];
+ UintR Tdata2 = signal->theData[3];
+ switch (tcase) {
+ case TcContinueB::ZRETURN_FROM_QUEUED_DELIVERY:
+ jam();
+ ndbrequire(false);
+ return;
+ case TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER:
+ jam();
+ tcNodeFailptr.i = Tdata0;
+ ptrCheckGuard(tcNodeFailptr, 1, tcFailRecord);
+ completeTransAtTakeOverLab(signal, Tdata1);
+ return;
+ case TcContinueB::ZCONTINUE_TIME_OUT_CONTROL:
+ jam();
+ timeOutLoopStartLab(signal, Tdata0);
+ return;
+ case TcContinueB::ZNODE_TAKE_OVER_COMPLETED:
+ jam();
+ tnodeid = Tdata0;
+ tcNodeFailptr.i = 0;
+ ptrAss(tcNodeFailptr, tcFailRecord);
+ nodeTakeOverCompletedLab(signal);
+ return;
+ case TcContinueB::ZINITIALISE_RECORDS:
+ jam();
+ initialiseRecordsLab(signal, Tdata0, Tdata2, signal->theData[4]);
+ return;
+ case TcContinueB::ZSEND_COMMIT_LOOP:
+ jam();
+ apiConnectptr.i = Tdata0;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ tcConnectptr.i = Tdata1;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ commit020Lab(signal);
+ return;
+ case TcContinueB::ZSEND_COMPLETE_LOOP:
+ jam();
+ apiConnectptr.i = Tdata0;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ tcConnectptr.i = Tdata1;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ complete010Lab(signal);
+ return;
+ case TcContinueB::ZHANDLE_FAILED_API_NODE:
+ jam();
+ handleFailedApiNode(signal, Tdata0, Tdata1);
+ return;
+ case TcContinueB::ZTRANS_EVENT_REP:
+ jam();
+ /* -------------------------------------------------------------------- */
+ // Report information about transaction activity once per second.
+ /* -------------------------------------------------------------------- */
+ if (c_counters.c_trans_status == TransCounters::Timer){
+ Uint32 len = c_counters.report(signal);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, len, JBB);
+
+ c_counters.reset();
+ signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 1);
+ }
+ return;
+ case TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL:
+ jam();
+ timeOutLoopStartFragLab(signal, Tdata0);
+ return;
+ case TcContinueB::ZABORT_BREAK:
+ jam();
+ tcConnectptr.i = Tdata0;
+ apiConnectptr.i = Tdata1;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ apiConnectptr.p->counter--;
+ abort015Lab(signal);
+ return;
+ case TcContinueB::ZABORT_TIMEOUT_BREAK:
+ jam();
+ tcConnectptr.i = Tdata0;
+ apiConnectptr.i = Tdata1;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ apiConnectptr.p->counter--;
+ sendAbortedAfterTimeout(signal, 1);
+ return;
+ case TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS:
+ jam();
+ removeMarkerForFailedAPI(signal, Tdata0, Tdata1);
+ return;
+ case TcContinueB::ZWAIT_ABORT_ALL:
+ jam();
+ checkAbortAllTimeout(signal, Tdata0);
+ return;
+ case TcContinueB::ZCHECK_SCAN_ACTIVE_FAILED_LQH:
+ jam();
+ checkScanActiveInFailedLqh(signal, Tdata0, Tdata1);
+ return;
+ case TcContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH:
+ jam();
+ checkWaitDropTabFailedLqh(signal, Tdata0, Tdata1);
+ return;
+ case TcContinueB::TRIGGER_PENDING:
+ jam();
+ ApiConnectRecordPtr transPtr;
+ transPtr.i = Tdata0;
+ ptrCheckGuard(transPtr, capiConnectFilesize, apiConnectRecord);
+ transPtr.p->triggerPending = false;
+ executeTriggers(signal, &transPtr);
+ return;
+ case TcContinueB::DelayTCKEYCONF:
+ jam();
+ apiConnectptr.i = Tdata0;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ sendtckeyconf(signal, Tdata1);
+ return;
+ default:
+ ndbrequire(false);
+ }//switch
+}
+
+void Dbtc::execDIGETNODESREF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ releaseAtErrorLab(signal);
+}
+
+void Dbtc::execINCL_NODEREQ(Signal* signal)
+{
+ jamEntry();
+ tblockref = signal->theData[0];
+ hostptr.i = signal->theData[1];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ hostptr.p->hostStatus = HS_ALIVE;
+ hostptr.p->takeOverStatus = TOS_IDLE;
+ signal->theData[0] = cownref;
+ sendSignal(tblockref, GSN_INCL_NODECONF, signal, 1, JBB);
+}
+
+void Dbtc::execREAD_NODESREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}
+
+void Dbtc::execTC_SCHVERREQ(Signal* signal)
+{
+ jamEntry();
+ if (! assembleFragments(signal)) {
+ jam();
+ return;
+ }
+ tabptr.i = signal->theData[0];
+ ptrCheckGuard(tabptr, ctabrecFilesize, tableRecord);
+ tabptr.p->currentSchemaVersion = signal->theData[1];
+ tabptr.p->storedTable = (bool)signal->theData[2];
+ BlockReference retRef = signal->theData[3];
+ tabptr.p->tableType = (Uint8)signal->theData[4];
+ BlockReference retPtr = signal->theData[5];
+ Uint32 noOfKeyAttr = signal->theData[6];
+ ndbrequire(noOfKeyAttr <= MAX_ATTRIBUTES_IN_INDEX);
+ Uint32 hasCharAttr = 0;
+ Uint32 noOfDistrKeys = 0;
+ SegmentedSectionPtr s0Ptr;
+ signal->getSection(s0Ptr, 0);
+ SectionReader r0(s0Ptr, getSectionSegmentPool());
+ Uint32 i = 0;
+ while (i < noOfKeyAttr) {
+ jam();
+ Uint32 attributeDescriptor = ~0;
+ Uint32 csNumber = ~0;
+ if (! r0.getWord(&attributeDescriptor) ||
+ ! r0.getWord(&csNumber)) {
+ jam();
+ break;
+ }
+ CHARSET_INFO* cs = 0;
+ if (csNumber != 0) {
+ cs = all_charsets[csNumber];
+ ndbrequire(cs != 0);
+ hasCharAttr = 1;
+ }
+
+ noOfDistrKeys += AttributeDescriptor::getDKey(attributeDescriptor);
+ tabptr.p->keyAttr[i].attributeDescriptor = attributeDescriptor;
+ tabptr.p->keyAttr[i].charsetInfo = cs;
+ i++;
+ }
+ ndbrequire(i == noOfKeyAttr);
+ releaseSections(signal);
+
+ ndbrequire(tabptr.p->enabled == false);
+ tabptr.p->enabled = true;
+ tabptr.p->dropping = false;
+ tabptr.p->noOfKeyAttr = noOfKeyAttr;
+ tabptr.p->hasCharAttr = hasCharAttr;
+ tabptr.p->noOfDistrKeys = noOfDistrKeys;
+
+ signal->theData[0] = tabptr.i;
+ signal->theData[1] = retPtr;
+ sendSignal(retRef, GSN_TC_SCHVERCONF, signal, 2, JBB);
+}//Dbtc::execTC_SCHVERREQ()
+
+void
+Dbtc::execPREP_DROP_TAB_REQ(Signal* signal)
+{
+ jamEntry();
+
+ PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+
+ if(!tabPtr.p->enabled){
+ jam();
+ PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = PrepDropTabRef::NoSuchTable;
+ sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal,
+ PrepDropTabRef::SignalLength, JBB);
+ return;
+ }
+
+ if(tabPtr.p->dropping){
+ jam();
+ PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = PrepDropTabRef::DropInProgress;
+ sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal,
+ PrepDropTabRef::SignalLength, JBB);
+ return;
+ }
+
+ tabPtr.p->dropping = true;
+ tabPtr.p->dropTable.senderRef = senderRef;
+ tabPtr.p->dropTable.senderData = senderData;
+
+ {
+ WaitDropTabReq * req = (WaitDropTabReq*)signal->getDataPtrSend();
+ req->tableId = tabPtr.i;
+ req->senderRef = reference();
+
+ HostRecordPtr hostPtr;
+ tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor();
+ for (hostPtr.i = 1; hostPtr.i < MAX_NDB_NODES; hostPtr.i++) {
+ jam();
+ ptrAss(hostPtr, hostRecord);
+ if (hostPtr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tabPtr.p->dropTable.waitDropTabCount.setWaitingFor(hostPtr.i);
+ sendSignal(calcLqhBlockRef(hostPtr.i), GSN_WAIT_DROP_TAB_REQ,
+ signal, WaitDropTabReq::SignalLength, JBB);
+ }//for
+ }//if
+
+ ndbrequire(tabPtr.p->dropTable.waitDropTabCount.done() != true);
+ }
+}
+
+void
+Dbtc::execWAIT_DROP_TAB_CONF(Signal* signal)
+{
+ jamEntry();
+ WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr();
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = conf->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+
+ ndbrequire(tabPtr.p->dropping == true);
+ Uint32 nodeId = refToNode(conf->senderRef);
+ tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId);
+
+ if(!tabPtr.p->dropTable.waitDropTabCount.done()){
+ jam();
+ return;
+ }
+
+ {
+ PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ conf->senderData = tabPtr.p->dropTable.senderData;
+ sendSignal(tabPtr.p->dropTable.senderRef, GSN_PREP_DROP_TAB_CONF, signal,
+ PrepDropTabConf::SignalLength, JBB);
+ tabPtr.p->dropTable.senderRef = 0;
+ }
+}
+
+void
+Dbtc::execWAIT_DROP_TAB_REF(Signal* signal)
+{
+ jamEntry();
+ WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtr();
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = ref->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+
+ ndbrequire(tabPtr.p->dropping == true);
+ Uint32 nodeId = refToNode(ref->senderRef);
+ tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId);
+
+ ndbrequire(ref->errorCode == WaitDropTabRef::NoSuchTable ||
+ ref->errorCode == WaitDropTabRef::NF_FakeErrorREF);
+
+ if(!tabPtr.p->dropTable.waitDropTabCount.done()){
+ jam();
+ return;
+ }
+
+ {
+ PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ conf->senderData = tabPtr.p->dropTable.senderData;
+ sendSignal(tabPtr.p->dropTable.senderRef, GSN_PREP_DROP_TAB_CONF, signal,
+ PrepDropTabConf::SignalLength, JBB);
+ tabPtr.p->dropTable.senderRef = 0;
+ }
+}
+
+void
+Dbtc::checkWaitDropTabFailedLqh(Signal* signal, Uint32 nodeId, Uint32 tableId)
+{
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = tableId;
+
+ WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr();
+ conf->tableId = tableId;
+
+ const Uint32 RT_BREAK = 16;
+ for(Uint32 i = 0; i<RT_BREAK && tabPtr.i < ctabrecFilesize; i++, tabPtr.i++){
+ jam();
+ ptrAss(tabPtr, tableRecord);
+ if(tabPtr.p->enabled && tabPtr.p->dropping){
+ if(tabPtr.p->dropTable.waitDropTabCount.isWaitingFor(nodeId)){
+ jam();
+ conf->senderRef = calcLqhBlockRef(nodeId);
+ execWAIT_DROP_TAB_CONF(signal);
+ tabPtr.i++;
+ break;
+ }
+ }
+ }
+
+ if(tabPtr.i == ctabrecFilesize){
+ /**
+ * Finished
+ */
+ jam();
+ return;
+ }
+
+ signal->theData[0] = TcContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+}
+
+void
+Dbtc::execDROP_TAB_REQ(Signal* signal)
+{
+ jamEntry();
+
+ DropTabReq* req = (DropTabReq*)signal->getDataPtr();
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+ DropTabReq::RequestType rt = (DropTabReq::RequestType)req->requestType;
+
+ if(!tabPtr.p->enabled && rt == DropTabReq::OnlineDropTab){
+ jam();
+ DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = DropTabRef::NoSuchTable;
+ sendSignal(senderRef, GSN_DROP_TAB_REF, signal,
+ DropTabRef::SignalLength, JBB);
+ return;
+ }
+
+ if(!tabPtr.p->dropping && rt == DropTabReq::OnlineDropTab){
+ jam();
+ DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = DropTabRef::DropWoPrep;
+ sendSignal(senderRef, GSN_DROP_TAB_REF, signal,
+ DropTabRef::SignalLength, JBB);
+ return;
+ }
+
+ tabPtr.p->enabled = false;
+ tabPtr.p->dropping = false;
+
+ DropTabConf * conf = (DropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ sendSignal(senderRef, GSN_DROP_TAB_CONF, signal,
+ PrepDropTabConf::SignalLength, JBB);
+}
+
+void Dbtc::execALTER_TAB_REQ(Signal * signal)
+{
+ AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr();
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ const Uint32 changeMask = req->changeMask;
+ const Uint32 tableId = req->tableId;
+ const Uint32 tableVersion = req->tableVersion;
+ const Uint32 gci = req->gci;
+ AlterTabReq::RequestType requestType =
+ (AlterTabReq::RequestType) req->requestType;
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+ tabPtr.p->currentSchemaVersion = tableVersion;
+
+ // Request handled successfully
+ AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->changeMask = changeMask;
+ conf->tableId = tableId;
+ conf->tableVersion = tableVersion;
+ conf->gci = gci;
+ conf->requestType = requestType;
+ sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal,
+ AlterTabConf::SignalLength, JBB);
+}
+
+/* ***************************************************************************/
+/* START / RESTART */
+/* ***************************************************************************/
+void Dbtc::execREAD_CONFIG_REQ(Signal* signal)
+{
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ jamEntry();
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ UintR apiConnect;
+ UintR tcConnect;
+ UintR tables;
+ UintR localScan;
+ UintR tcScan;
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_API_CONNECT, &apiConnect));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_TC_CONNECT, &tcConnect));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_TABLE, &tables));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_LOCAL_SCAN, &localScan));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_SCAN, &tcScan));
+
+ ccacheFilesize = (apiConnect/3) + 1;
+ capiConnectFilesize = apiConnect;
+ ctcConnectFilesize = tcConnect;
+ ctabrecFilesize = tables;
+ cscanrecFileSize = tcScan;
+ cscanFragrecFileSize = localScan;
+
+ initRecords();
+ initialiseRecordsLab(signal, 0, ref, senderData);
+
+ Uint32 val = 3000;
+ ndb_mgm_get_int_parameter(p, CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT, &val);
+ set_timeout_value(val);
+
+ val = 3000;
+ ndb_mgm_get_int_parameter(p, CFG_DB_TRANSACTION_INACTIVE_TIMEOUT, &val);
+ set_appl_timeout_value(val);
+
+ val = 1;
+ //ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_TRANSACTION_TAKEOVER, &val);
+ set_no_parallel_takeover(val);
+
+ ctimeOutCheckDelay = 50; // 500ms
+}//Dbtc::execSIZEALT_REP()
+
+void Dbtc::execSTTOR(Signal* signal)
+{
+ Uint16 tphase;
+
+ jamEntry();
+ /* START CASE */
+ tphase = signal->theData[1];
+ csignalKey = signal->theData[6];
+ switch (tphase) {
+ case ZSPH1:
+ jam();
+ startphase1x010Lab(signal);
+ return;
+ default:
+ jam();
+ sttorryLab(signal); /* START PHASE 255 */
+ return;
+ }//switch
+}//Dbtc::execSTTOR()
+
+void Dbtc::sttorryLab(Signal* signal)
+{
+ signal->theData[0] = csignalKey;
+ signal->theData[1] = 3; /* BLOCK CATEGORY */
+ signal->theData[2] = 2; /* SIGNAL VERSION NUMBER */
+ signal->theData[3] = ZSPH1;
+ signal->theData[4] = 255;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
+}//Dbtc::sttorryLab()
+
+/* ***************************************************************************/
+/* INTERNAL START / RESTART */
+/*****************************************************************************/
+void Dbtc::execNDB_STTOR(Signal* signal)
+{
+ Uint16 tndbstartphase;
+ Uint16 tstarttype;
+
+ jamEntry();
+ tusersblkref = signal->theData[0];
+ tnodeid = signal->theData[1];
+ tndbstartphase = signal->theData[2]; /* START PHASE */
+ tstarttype = signal->theData[3]; /* START TYPE */
+ switch (tndbstartphase) {
+ case ZINTSPH1:
+ jam();
+ intstartphase1x010Lab(signal);
+ return;
+ case ZINTSPH2:
+ jam();
+ intstartphase2x010Lab(signal);
+ return;
+ case ZINTSPH3:
+ jam();
+ intstartphase3x010Lab(signal); /* SEIZE CONNECT RECORD IN EACH LQH*/
+// Start transaction event reporting.
+ c_counters.c_trans_status = TransCounters::Timer;
+ c_counters.reset();
+ signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 1);
+ return;
+ case ZINTSPH6:
+ jam();
+ csystemStart = SSS_TRUE;
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ ndbsttorry010Lab(signal);
+ return;
+}//Dbtc::execNDB_STTOR()
+
+void Dbtc::ndbsttorry010Lab(Signal* signal)
+{
+ signal->theData[0] = cownref;
+ sendSignal(cndbcntrblockref, GSN_NDB_STTORRY, signal, 1, JBB);
+}//Dbtc::ndbsttorry010Lab()
+
+void
+Dbtc::set_timeout_value(Uint32 timeOut)
+{
+ timeOut = timeOut / 10;
+ if (timeOut < 2) {
+ jam();
+ timeOut = 100;
+ }//if
+ ctimeOutValue = timeOut;
+}
+
+void
+Dbtc::set_appl_timeout_value(Uint32 timeOut)
+{
+ if (timeOut)
+ {
+ timeOut /= 10;
+ if (timeOut < ctimeOutValue) {
+ jam();
+ c_appl_timeout_value = ctimeOutValue;
+ }//if
+ }
+ c_appl_timeout_value = timeOut;
+}
+
+void
+Dbtc::set_no_parallel_takeover(Uint32 noParallelTakeOver)
+{
+ if (noParallelTakeOver == 0) {
+ jam();
+ noParallelTakeOver = 1;
+ } else if (noParallelTakeOver > MAX_NDB_NODES) {
+ jam();
+ noParallelTakeOver = MAX_NDB_NODES;
+ }//if
+ cnoParallelTakeOver = noParallelTakeOver;
+}
+
+/* ***************************************************************************/
+/* S T A R T P H A S E 1 X */
+/* INITIALISE BLOCKREF AND BLOCKNUMBERS */
+/* ***************************************************************************/
+void Dbtc::startphase1x010Lab(Signal* signal)
+{
+ csystemStart = SSS_FALSE;
+ ctimeOutCheckCounter = 0;
+ ctimeOutCheckFragCounter = 0;
+ ctimeOutMissedHeartbeats = 0;
+ ctimeOutCheckHeartbeat = 0;
+ ctimeOutCheckLastHeartbeat = 0;
+ ctimeOutCheckActive = TOCS_FALSE;
+ ctimeOutCheckFragActive = TOCS_FALSE;
+ sttorryLab(signal);
+}//Dbtc::startphase1x010Lab()
+
+/*****************************************************************************/
+/* I N T S T A R T P H A S E 1 X */
+/* INITIALISE ALL RECORDS. */
+/*****************************************************************************/
+void Dbtc::intstartphase1x010Lab(Signal* signal)
+{
+ cownNodeid = tnodeid;
+ cownref = calcTcBlockRef(cownNodeid);
+ clqhblockref = calcLqhBlockRef(cownNodeid);
+ cdihblockref = calcDihBlockRef(cownNodeid);
+ cdictblockref = calcDictBlockRef(cownNodeid);
+ cndbcntrblockref = calcNdbCntrBlockRef(cownNodeid);
+ cerrorBlockref = calcNdbCntrBlockRef(cownNodeid);
+ coperationsize = 0;
+ cfailure_nr = 0;
+ ndbsttorry010Lab(signal);
+}//Dbtc::intstartphase1x010Lab()
+
+/*****************************************************************************/
+/* I N T S T A R T P H A S E 2 X */
+/* SET-UP LOCAL CONNECTIONS. */
+/*****************************************************************************/
+void Dbtc::intstartphase2x010Lab(Signal* signal)
+{
+ tcConnectptr.i = cfirstfreeTcConnect;
+ intstartphase2x020Lab(signal);
+}//Dbtc::intstartphase2x010Lab()
+
+void Dbtc::intstartphase2x020Lab(Signal* signal)
+{
+ if (tcConnectptr.i == RNIL) {
+ jam();
+ ndbsttorry010Lab(signal);
+ return;
+ }//if
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ tcConnectptr.p->tcConnectstate = OS_CONNECTING_DICT;
+/* ****************** */
+/* DISEIZEREQ < */
+/* ****************** */
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ sendSignal(cdihblockref, GSN_DISEIZEREQ, signal, 2, JBB);
+}//Dbtc::intstartphase2x020Lab()
+
+void Dbtc::execDISEIZECONF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ tcConnectptr.p->dihConnectptr = signal->theData[1];
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ intstartphase2x020Lab(signal);
+}//Dbtc::execDISEIZECONF()
+
+/*****************************************************************************/
+/* I N T S T A R T P H A S E 3 X */
+/* PREPARE DISTRIBUTED CONNECTIONS */
+/*****************************************************************************/
+void Dbtc::intstartphase3x010Lab(Signal* signal)
+{
+ signal->theData[0] = cownref;
+ sendSignal(cndbcntrblockref, GSN_READ_NODESREQ, signal, 1, JBB);
+}//Dbtc::intstartphase3x010Lab()
+
+void Dbtc::execREAD_NODESCONF(Signal* signal)
+{
+ UintR guard0;
+
+ jamEntry();
+
+ ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
+
+ csystemnodes = readNodes->noOfNodes;
+ cmasterNodeId = readNodes->masterNodeId;
+
+ con_lineNodes = 0;
+ arrGuard(csystemnodes, MAX_NDB_NODES);
+ guard0 = csystemnodes - 1;
+ arrGuard(guard0, MAX_NDB_NODES); // Check not zero nodes
+
+ for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
+ jam();
+ if (NodeBitmask::get(readNodes->allNodes, i)) {
+ hostptr.i = i;
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+
+ hostptr.p->takeOverStatus = TOS_IDLE;
+
+ if (NodeBitmask::get(readNodes->inactiveNodes, i)) {
+ jam();
+ hostptr.p->hostStatus = HS_DEAD;
+ } else {
+ jam();
+ con_lineNodes++;
+ hostptr.p->hostStatus = HS_ALIVE;
+ }//if
+ }//if
+ }//for
+ ndbsttorry010Lab(signal);
+}//Dbtc::execREAD_NODESCONF()
+
+/*****************************************************************************/
+/* A P I _ F A I L R E Q */
+// An API node has failed for some reason. We need to disconnect all API
+// connections to the API node. This also includes
+/*****************************************************************************/
+void Dbtc::execAPI_FAILREQ(Signal* signal)
+{
+ /***************************************************************************
+ * Set the block reference to return API_FAILCONF to. Set the number of api
+ * connects currently closing to one to indicate that we are still in the
+ * process of going through the api connect records. Thus checking for zero
+ * can only be true after all api connect records have been checked.
+ **************************************************************************/
+ jamEntry();
+ capiFailRef = signal->theData[1];
+ arrGuard(signal->theData[0], MAX_NODES);
+ capiConnectClosing[signal->theData[0]] = 1;
+ handleFailedApiNode(signal, signal->theData[0], (UintR)0);
+}
+
+void
+Dbtc::handleFailedApiNode(Signal* signal,
+ UintR TapiFailedNode,
+ UintR TapiConnectPtr)
+{
+ UintR TloopCount = 0;
+ arrGuard(TapiFailedNode, MAX_NODES);
+ apiConnectptr.i = TapiConnectPtr;
+ do {
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ const UintR TapiNode = refToNode(apiConnectptr.p->ndbapiBlockref);
+ if (TapiNode == TapiFailedNode) {
+#ifdef VM_TRACE
+ if (apiConnectptr.p->apiFailState != ZFALSE) {
+ ndbout << "Error in previous API fail handling discovered" << endl
+ << " apiConnectptr.i = " << apiConnectptr.i << endl
+ << " apiConnectstate = " << apiConnectptr.p->apiConnectstate
+ << endl
+ << " ndbapiBlockref = " << hex
+ << apiConnectptr.p->ndbapiBlockref << endl
+ << " apiNode = " << refToNode(apiConnectptr.p->ndbapiBlockref)
+ << endl;
+ if (apiConnectptr.p->lastTcConnect != RNIL){
+ jam();
+ tcConnectptr.i = apiConnectptr.p->lastTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ ndbout << " tcConnectptr.i = " << tcConnectptr.i << endl
+ << " tcConnectstate = " << tcConnectptr.p->tcConnectstate
+ << endl;
+ }
+ }//if
+#endif
+
+ apiConnectptr.p->returnsignal = RS_NO_RETURN;
+ /***********************************************************************/
+ // The connected node is the failed node.
+ /**********************************************************************/
+ switch(apiConnectptr.p->apiConnectstate) {
+ case CS_DISCONNECTED:
+ /*********************************************************************/
+ // These states do not need any special handling.
+ // Simply continue with the next.
+ /*********************************************************************/
+ jam();
+ break;
+ case CS_ABORTING:
+ /*********************************************************************/
+ // This could actually mean that the API connection is already
+ // ready to release if the abortState is IDLE.
+ /*********************************************************************/
+ if (apiConnectptr.p->abortState == AS_IDLE) {
+ jam();
+ releaseApiCon(signal, apiConnectptr.i);
+ } else {
+ jam();
+ capiConnectClosing[TapiFailedNode]++;
+ apiConnectptr.p->apiFailState = ZTRUE;
+ }//if
+ break;
+ case CS_WAIT_ABORT_CONF:
+ case CS_WAIT_COMMIT_CONF:
+ case CS_START_COMMITTING:
+ case CS_PREPARE_TO_COMMIT:
+ case CS_COMMITTING:
+ case CS_COMMIT_SENT:
+ /*********************************************************************/
+ // These states indicate that an abort process or commit process is
+ // already ongoing. We will set a state in the api record indicating
+ // that the API node has failed.
+ // Also we will increase the number of outstanding api records to
+ // wait for before we can respond with API_FAILCONF.
+ /*********************************************************************/
+ jam();
+ capiConnectClosing[TapiFailedNode]++;
+ apiConnectptr.p->apiFailState = ZTRUE;
+ break;
+ case CS_START_SCAN:
+ /*********************************************************************/
+ // The api record was performing a scan operation. We need to check
+ // on the scan state. Since completing a scan process might involve
+ // sending several signals we will increase the loop count by 64.
+ /*********************************************************************/
+ jam();
+
+ apiConnectptr.p->apiFailState = ZTRUE;
+ capiConnectClosing[TapiFailedNode]++;
+
+ ScanRecordPtr scanPtr;
+ scanPtr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanPtr, cscanrecFileSize, scanRecord);
+ close_scan_req(signal, scanPtr, true);
+
+ TloopCount += 64;
+ break;
+ case CS_CONNECTED:
+ /*********************************************************************/
+ // The api record is connected to failed node. We need to release the
+ // connection and set it in a disconnected state.
+ /*********************************************************************/
+ jam();
+ releaseApiCon(signal, apiConnectptr.i);
+ break;
+ case CS_REC_COMMITTING:
+ case CS_RECEIVING:
+ case CS_STARTED:
+ /*********************************************************************/
+ // The api record was in the process of performing a transaction but
+ // had not yet sent all information.
+ // We need to initiate an ABORT since the API will not provide any
+ // more information.
+ // Since the abort can send many signals we will insert a real-time
+ // break after checking this record.
+ /*********************************************************************/
+ jam();
+ apiConnectptr.p->apiFailState = ZTRUE;
+ capiConnectClosing[TapiFailedNode]++;
+ abort010Lab(signal);
+ TloopCount = 256;
+ break;
+ case CS_PREPARED:
+ jam();
+ case CS_REC_PREPARING:
+ jam();
+ case CS_START_PREPARING:
+ jam();
+ /*********************************************************************/
+ // Not implemented yet.
+ /*********************************************************************/
+ systemErrorLab(signal);
+ break;
+ case CS_RESTART:
+ jam();
+ case CS_COMPLETING:
+ jam();
+ case CS_COMPLETE_SENT:
+ jam();
+ case CS_WAIT_COMPLETE_CONF:
+ jam();
+ case CS_FAIL_ABORTING:
+ jam();
+ case CS_FAIL_ABORTED:
+ jam();
+ case CS_FAIL_PREPARED:
+ jam();
+ case CS_FAIL_COMMITTING:
+ jam();
+ case CS_FAIL_COMMITTED:
+ /*********************************************************************/
+ // These states are only valid on copy and fail API connections.
+ /*********************************************************************/
+ default:
+ jam();
+ systemErrorLab(signal);
+ break;
+ }//switch
+ } else {
+ jam();
+ }//if
+ apiConnectptr.i++;
+ if (apiConnectptr.i > ((capiConnectFilesize / 3) - 1)) {
+ jam();
+ /**
+ * Finished with scanning connection record
+ *
+ * Now scan markers
+ */
+ removeMarkerForFailedAPI(signal, TapiFailedNode, 0);
+ return;
+ }//if
+ } while (TloopCount++ < 256);
+ signal->theData[0] = TcContinueB::ZHANDLE_FAILED_API_NODE;
+ signal->theData[1] = TapiFailedNode;
+ signal->theData[2] = apiConnectptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+}//Dbtc::handleFailedApiNode()
+
+void
+Dbtc::removeMarkerForFailedAPI(Signal* signal,
+ Uint32 nodeId,
+ Uint32 startBucket)
+{
+ TcFailRecordPtr node_fail_ptr;
+ node_fail_ptr.i = 0;
+ ptrAss(node_fail_ptr, tcFailRecord);
+ if(node_fail_ptr.p->failStatus != FS_IDLE) {
+ jam();
+ DEBUG("Restarting removeMarkerForFailedAPI");
+ /**
+ * TC take-over in progress
+ * needs to restart as this
+ * creates new markers
+ */
+ signal->theData[0] = TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = 0;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 500, 3);
+ return;
+ }
+
+ CommitAckMarkerIterator iter;
+ m_commitAckMarkerHash.next(startBucket, iter);
+
+ const Uint32 RT_BREAK = 256;
+ for(Uint32 i = 0; i<RT_BREAK || iter.bucket == startBucket; i++){
+ jam();
+
+ if(iter.curr.i == RNIL){
+ jam();
+ /**
+ * Done with iteration
+ */
+ capiConnectClosing[nodeId]--;
+ if (capiConnectClosing[nodeId] == 0) {
+ jam();
+ /********************************************************************/
+ // No outstanding ABORT or COMMIT's of this failed API node.
+ // We can respond with API_FAILCONF
+ /********************************************************************/
+ signal->theData[0] = nodeId;
+ signal->theData[1] = cownref;
+ sendSignal(capiFailRef, GSN_API_FAILCONF, signal, 2, JBB);
+ }
+ return;
+ }
+
+ if(iter.curr.p->apiNodeId == nodeId){
+ jam();
+
+ /**
+ * Check so that the record is not still in use
+ *
+ */
+ ApiConnectRecordPtr apiConnectPtr;
+ apiConnectPtr.i = iter.curr.p->apiConnectPtr;
+ ptrCheckGuard(apiConnectPtr, capiConnectFilesize, apiConnectRecord);
+ if(apiConnectPtr.p->commitAckMarker == iter.curr.i){
+ jam();
+ /**
+ * The record is still active
+ *
+ * Don't remove it, but continueb instead
+ */
+ break;
+ }
+ sendRemoveMarkers(signal, iter.curr.p);
+ m_commitAckMarkerHash.release(iter.curr);
+
+ break;
+ }
+ m_commitAckMarkerHash.next(iter);
+ }
+
+ signal->theData[0] = TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = iter.bucket;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+}
+
+void Dbtc::handleApiFailState(Signal* signal, UintR TapiConnectptr)
+{
+ ApiConnectRecordPtr TlocalApiConnectptr;
+ UintR TfailedApiNode;
+
+ TlocalApiConnectptr.i = TapiConnectptr;
+ ptrCheckGuard(TlocalApiConnectptr, capiConnectFilesize, apiConnectRecord);
+ TfailedApiNode = refToNode(TlocalApiConnectptr.p->ndbapiBlockref);
+ arrGuard(TfailedApiNode, MAX_NODES);
+ capiConnectClosing[TfailedApiNode]--;
+ releaseApiCon(signal, TapiConnectptr);
+ TlocalApiConnectptr.p->apiFailState = ZFALSE;
+ if (capiConnectClosing[TfailedApiNode] == 0) {
+ jam();
+ signal->theData[0] = TfailedApiNode;
+ signal->theData[1] = cownref;
+ sendSignal(capiFailRef, GSN_API_FAILCONF, signal, 2, JBB);
+ }//if
+}//Dbtc::handleApiFailState()
+
+/****************************************************************************
+ * T C S E I Z E R E Q
+ * THE APPLICATION SENDS A REQUEST TO SEIZE A CONNECT RECORD TO CARRY OUT A
+ * TRANSACTION
+ * TC BLOCK TAKE OUT A CONNECT RECORD FROM THE FREE LIST AND ESTABLISHES ALL
+ * NECESSARY CONNECTION BEFORE REPLYING TO THE APPLICATION BLOCK
+ ****************************************************************************/
+void Dbtc::execTCSEIZEREQ(Signal* signal)
+{
+ UintR tapiPointer;
+ BlockReference tapiBlockref; /* SENDER BLOCK REFERENCE*/
+
+ jamEntry();
+ tapiPointer = signal->theData[0]; /* REQUEST SENDERS CONNECT RECORD POINTER*/
+ tapiBlockref = signal->theData[1]; /* SENDERS BLOCK REFERENCE*/
+
+ const NodeState::StartLevel sl =
+ (NodeState::StartLevel)getNodeState().startLevel;
+
+ const NodeId senderNodeId = refToNode(tapiBlockref);
+ const bool local = senderNodeId == getOwnNodeId() || senderNodeId == 0;
+
+ if(!(senderNodeId == getNodeState().getSingleUserApi()) &&
+ !getNodeState().getSingleUserMode()) {
+ if(!(sl==NodeState::SL_SINGLEUSER &&
+ senderNodeId == getNodeState().getSingleUserApi())) {
+ if (!(sl == NodeState::SL_STARTED ||
+ (sl == NodeState::SL_STARTING && local == true))) {
+ jam();
+
+ Uint32 errCode;
+ if(!(sl == NodeState::SL_SINGLEUSER && local))
+ {
+ switch(sl){
+ case NodeState::SL_STARTING:
+ errCode = ZSYSTEM_NOT_STARTED_ERROR;
+ break;
+ case NodeState::SL_STOPPING_1:
+ case NodeState::SL_STOPPING_2:
+ case NodeState::SL_STOPPING_3:
+ case NodeState::SL_STOPPING_4:
+ if(getNodeState().stopping.systemShutdown)
+ errCode = ZCLUSTER_SHUTDOWN_IN_PROGRESS;
+ else
+ errCode = ZNODE_SHUTDOWN_IN_PROGRESS;
+ break;
+ case NodeState::SL_SINGLEUSER:
+ errCode = ZCLUSTER_IN_SINGLEUSER_MODE;
+ break;
+ default:
+ errCode = ZWRONG_STATE;
+ break;
+ }
+ signal->theData[0] = tapiPointer;
+ signal->theData[1] = errCode;
+ sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB);
+ return;
+ }//if (!(sl == SL_SINGLEUSER))
+ } //if
+ }
+ }
+
+ seizeApiConnect(signal);
+ if (terrorCode == ZOK) {
+ jam();
+ apiConnectptr.p->ndbapiConnect = tapiPointer;
+ apiConnectptr.p->ndbapiBlockref = tapiBlockref;
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = apiConnectptr.i;
+ sendSignal(tapiBlockref, GSN_TCSEIZECONF, signal, 2, JBB);
+ return;
+ }
+
+ signal->theData[0] = tapiPointer;
+ signal->theData[1] = terrorCode;
+ sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB);
+}//Dbtc::execTCSEIZEREQ()
+
+/****************************************************************************/
+/* T C R E L E A S E Q */
+/* REQUEST TO RELEASE A CONNECT RECORD */
+/****************************************************************************/
+void Dbtc::execTCRELEASEREQ(Signal* signal)
+{
+ UintR tapiPointer;
+ BlockReference tapiBlockref; /* SENDER BLOCK REFERENCE*/
+
+ jamEntry();
+ tapiPointer = signal->theData[0]; /* REQUEST SENDERS CONNECT RECORD POINTER*/
+ tapiBlockref = signal->theData[1];/* SENDERS BLOCK REFERENCE*/
+ tuserpointer = signal->theData[2];
+ if (tapiPointer >= capiConnectFilesize) {
+ jam();
+ signal->theData[0] = tuserpointer;
+ signal->theData[1] = ZINVALID_CONNECTION;
+ signal->theData[2] = __LINE__;
+ sendSignal(tapiBlockref, GSN_TCRELEASEREF, signal, 3, JBB);
+ return;
+ } else {
+ jam();
+ apiConnectptr.i = tapiPointer;
+ }//if
+ ptrAss(apiConnectptr, apiConnectRecord);
+ if (apiConnectptr.p->apiConnectstate == CS_DISCONNECTED) {
+ jam();
+ signal->theData[0] = tuserpointer;
+ sendSignal(tapiBlockref, GSN_TCRELEASECONF, signal, 1, JBB);
+ } else {
+ if (tapiBlockref == apiConnectptr.p->ndbapiBlockref) {
+ if (apiConnectptr.p->apiConnectstate == CS_CONNECTED ||
+ (apiConnectptr.p->apiConnectstate == CS_ABORTING &&
+ apiConnectptr.p->abortState == AS_IDLE) ||
+ (apiConnectptr.p->apiConnectstate == CS_STARTED &&
+ apiConnectptr.p->firstTcConnect == RNIL))
+ {
+ jam(); /* JUST REPLY OK */
+ releaseApiCon(signal, apiConnectptr.i);
+ signal->theData[0] = tuserpointer;
+ sendSignal(tapiBlockref,
+ GSN_TCRELEASECONF, signal, 1, JBB);
+ } else {
+ jam();
+ signal->theData[0] = tuserpointer;
+ signal->theData[1] = ZINVALID_CONNECTION;
+ signal->theData[2] = __LINE__;
+ signal->theData[3] = apiConnectptr.p->apiConnectstate;
+ sendSignal(tapiBlockref,
+ GSN_TCRELEASEREF, signal, 4, JBB);
+ }
+ } else {
+ jam();
+ signal->theData[0] = tuserpointer;
+ signal->theData[1] = ZINVALID_CONNECTION;
+ signal->theData[2] = __LINE__;
+ signal->theData[3] = tapiBlockref;
+ signal->theData[4] = apiConnectptr.p->ndbapiBlockref;
+ sendSignal(tapiBlockref, GSN_TCRELEASEREF, signal, 5, JBB);
+ }//if
+ }//if
+}//Dbtc::execTCRELEASEREQ()
+
+/****************************************************************************/
+// Error Handling for TCKEYREQ messages
+/****************************************************************************/
+void Dbtc::signalErrorRefuseLab(Signal* signal)
+{
+ ptrGuard(apiConnectptr);
+ if (apiConnectptr.p->apiConnectstate != CS_DISCONNECTED) {
+ jam();
+ apiConnectptr.p->abortState = AS_IDLE;
+ apiConnectptr.p->apiConnectstate = CS_ABORTING;
+ }//if
+ sendSignalErrorRefuseLab(signal);
+}//Dbtc::signalErrorRefuseLab()
+
+void Dbtc::sendSignalErrorRefuseLab(Signal* signal)
+{
+ ndbassert(false);
+ ptrGuard(apiConnectptr);
+ if (apiConnectptr.p->apiConnectstate != CS_DISCONNECTED) {
+ jam();
+ ndbrequire(false);
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = signal->theData[ttransid_ptr];
+ signal->theData[2] = signal->theData[ttransid_ptr + 1];
+ signal->theData[3] = ZSIGNAL_ERROR;
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREP,
+ signal, 4, JBB);
+ }
+}//Dbtc::sendSignalErrorRefuseLab()
+
+void Dbtc::abortBeginErrorLab(Signal* signal)
+{
+ apiConnectptr.p->transid[0] = signal->theData[ttransid_ptr];
+ apiConnectptr.p->transid[1] = signal->theData[ttransid_ptr + 1];
+ abortErrorLab(signal);
+}//Dbtc::abortBeginErrorLab()
+
+void Dbtc::printState(Signal* signal, int place)
+{
+#ifdef VM_TRACE // Change to if 0 to disable these printouts
+ ndbout << "-- Dbtc::printState -- " << endl;
+ ndbout << "Received from place = " << place
+ << " apiConnectptr.i = " << apiConnectptr.i
+ << " apiConnectstate = " << apiConnectptr.p->apiConnectstate << endl;
+ ndbout << "ctcTimer = " << ctcTimer
+ << " ndbapiBlockref = " << hex <<apiConnectptr.p->ndbapiBlockref
+ << " Transid = " << apiConnectptr.p->transid[0]
+ << " " << apiConnectptr.p->transid[1] << endl;
+ ndbout << " apiTimer = " << getApiConTimer(apiConnectptr.i)
+ << " counter = " << apiConnectptr.p->counter
+ << " lqhkeyconfrec = " << apiConnectptr.p->lqhkeyconfrec
+ << " lqhkeyreqrec = " << apiConnectptr.p->lqhkeyreqrec << endl;
+ ndbout << "abortState = " << apiConnectptr.p->abortState
+ << " apiScanRec = " << apiConnectptr.p->apiScanRec
+ << " returncode = " << apiConnectptr.p->returncode << endl;
+ ndbout << "tckeyrec = " << apiConnectptr.p->tckeyrec
+ << " returnsignal = " << apiConnectptr.p->returnsignal
+ << " apiFailState = " << apiConnectptr.p->apiFailState << endl;
+ if (apiConnectptr.p->cachePtr != RNIL) {
+ jam();
+ CacheRecord *localCacheRecord = cacheRecord;
+ UintR TcacheFilesize = ccacheFilesize;
+ UintR TcachePtr = apiConnectptr.p->cachePtr;
+ if (TcachePtr < TcacheFilesize) {
+ jam();
+ CacheRecord * const regCachePtr = &localCacheRecord[TcachePtr];
+ ndbout << "currReclenAi = " << regCachePtr->currReclenAi
+ << " attrlength = " << regCachePtr->attrlength
+ << " tableref = " << regCachePtr->tableref
+ << " keylen = " << regCachePtr->keylen << endl;
+ } else {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ }//if
+#endif
+ return;
+}//Dbtc::printState()
+
+void
+Dbtc::TCKEY_abort(Signal* signal, int place)
+{
+ switch (place) {
+ case 0:
+ jam();
+ terrorCode = ZSTATE_ERROR;
+ apiConnectptr.p->firstTcConnect = RNIL;
+ printState(signal, 4);
+ abortBeginErrorLab(signal);
+ return;
+ case 1:
+ jam();
+ printState(signal, 3);
+ sendSignalErrorRefuseLab(signal);
+ return;
+ case 2:{
+ printState(signal, 6);
+ const TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0];
+ const Uint32 t1 = tcKeyReq->transId1;
+ const Uint32 t2 = tcKeyReq->transId2;
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = t1;
+ signal->theData[2] = t2;
+ signal->theData[3] = ZABORT_ERROR;
+ ndbrequire(false);
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREP,
+ signal, 4, JBB);
+ return;
+ }
+ case 3:
+ jam();
+ printState(signal, 7);
+ noFreeConnectionErrorLab(signal);
+ return;
+ case 4:
+ jam();
+ terrorCode = ZERO_KEYLEN_ERROR;
+ releaseAtErrorLab(signal);
+ return;
+ case 5:
+ jam();
+ terrorCode = ZNO_AI_WITH_UPDATE;
+ releaseAtErrorLab(signal);
+ return;
+ case 6:
+ jam();
+ warningHandlerLab(signal);
+ return;
+
+ case 7:
+ jam();
+ tabStateErrorLab(signal);
+ return;
+
+ case 8:
+ jam();
+ wrongSchemaVersionErrorLab(signal);
+ return;
+
+ case 9:
+ jam();
+ terrorCode = ZSTATE_ERROR;
+ releaseAtErrorLab(signal);
+ return;
+
+ case 10:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 11:
+ jam();
+ terrorCode = ZMORE_AI_IN_TCKEYREQ_ERROR;
+ releaseAtErrorLab(signal);
+ return;
+
+ case 12:
+ jam();
+ terrorCode = ZSIMPLE_READ_WITHOUT_AI;
+ releaseAtErrorLab(signal);
+ return;
+
+ case 13:
+ jam();
+ switch (tcConnectptr.p->tcConnectstate) {
+ case OS_WAIT_KEYINFO:
+ jam();
+ printState(signal, 8);
+ terrorCode = ZSTATE_ERROR;
+ abortErrorLab(signal);
+ return;
+ default:
+ jam();
+ /********************************************************************/
+ /* MISMATCH BETWEEN STATE ON API CONNECTION AND THIS */
+ /* PARTICULAR TC CONNECT RECORD. THIS MUST BE CAUSED BY NDB */
+ /* INTERNAL ERROR. */
+ /********************************************************************/
+ systemErrorLab(signal);
+ return;
+ }//switch
+ return;
+
+ case 15:
+ jam();
+ terrorCode = ZSCAN_NODE_ERROR;
+ releaseAtErrorLab(signal);
+ return;
+
+ case 16:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 17:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 18:
+ jam();
+ warningHandlerLab(signal);
+ return;
+
+ case 19:
+ jam();
+ return;
+
+ case 20:
+ jam();
+ warningHandlerLab(signal);
+ return;
+
+ case 21:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 22:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 23:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 24:
+ jam();
+ seizeAttrbuferrorLab(signal);
+ return;
+
+ case 25:
+ jam();
+ warningHandlerLab(signal);
+ return;
+
+ case 26:
+ jam();
+ return;
+
+ case 27:
+ systemErrorLab(signal);
+ jam();
+ return;
+
+ case 28:
+ jam();
+ // NOT USED
+ return;
+
+ case 29:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 30:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 31:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 32:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 33:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 34:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 35:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 36:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 37:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 38:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 39:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 40:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 41:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 42:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 43:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 44:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 45:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 46:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 47:
+ jam();
+ terrorCode = apiConnectptr.p->returncode;
+ releaseAtErrorLab(signal);
+ return;
+
+ case 48:
+ jam();
+ terrorCode = ZCOMMIT_TYPE_ERROR;
+ releaseAtErrorLab(signal);
+ return;
+
+ case 49:
+ jam();
+ abortErrorLab(signal);
+ return;
+
+ case 50:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 51:
+ jam();
+ abortErrorLab(signal);
+ return;
+
+ case 52:
+ jam();
+ abortErrorLab(signal);
+ return;
+
+ case 53:
+ jam();
+ abortErrorLab(signal);
+ return;
+
+ case 54:
+ jam();
+ abortErrorLab(signal);
+ return;
+
+ case 55:
+ jam();
+ printState(signal, 5);
+ sendSignalErrorRefuseLab(signal);
+ return;
+
+ case 56:{
+ jam();
+ terrorCode = ZNO_FREE_TC_MARKER;
+ abortErrorLab(signal);
+ return;
+ }
+ case 57:{
+ jam();
+ /**
+ * Initialize object before starting error handling
+ */
+ initApiConnectRec(signal, apiConnectptr.p, true);
+ switch(getNodeState().startLevel){
+ case NodeState::SL_STOPPING_2:
+ case NodeState::SL_STOPPING_3:
+ case NodeState::SL_STOPPING_4:
+ if(getNodeState().stopping.systemShutdown)
+ terrorCode = ZCLUSTER_SHUTDOWN_IN_PROGRESS;
+ else
+ terrorCode = ZNODE_SHUTDOWN_IN_PROGRESS;
+ break;
+ case NodeState::SL_SINGLEUSER:
+ terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE;
+ break;
+ default:
+ terrorCode = ZWRONG_STATE;
+ break;
+ }
+ abortErrorLab(signal);
+ return;
+ }
+
+ case 58:{
+ jam();
+ releaseAtErrorLab(signal);
+ return;
+ }
+
+ case 59:{
+ jam();
+ terrorCode = ZABORTINPROGRESS;
+ abortErrorLab(signal);
+ return;
+ }
+
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//switch
+}
+
+void Dbtc::execKEYINFO(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+ jamEntry();
+ apiConnectptr.i = signal->theData[0];
+ tmaxData = 20;
+ if (apiConnectptr.i >= capiConnectFilesize) {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+ ptrAss(apiConnectptr, apiConnectRecord);
+ ttransid_ptr = 1;
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ jam();
+ printState(signal, 10);
+ sendSignalErrorRefuseLab(signal);
+ return;
+ }//if
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_RECEIVING:
+ case CS_REC_COMMITTING:
+ case CS_START_SCAN:
+ jam();
+ /*empty*/;
+ break;
+ /* OK */
+ case CS_ABORTING:
+ jam();
+ return; /* IGNORE */
+ case CS_CONNECTED:
+ jam();
+ /****************************************************************>*/
+ /* MOST LIKELY CAUSED BY A MISSED SIGNAL. SEND REFUSE AND */
+ /* SET STATE TO ABORTING. */
+ /****************************************************************>*/
+ printState(signal, 11);
+ signalErrorRefuseLab(signal);
+ return;
+ case CS_STARTED:
+ jam();
+ /****************************************************************>*/
+ /* MOST LIKELY CAUSED BY A MISSED SIGNAL. SEND REFUSE AND */
+ /* SET STATE TO ABORTING. SINCE A TRANSACTION WAS STARTED */
+ /* WE ALSO NEED TO ABORT THIS TRANSACTION. */
+ /****************************************************************>*/
+ terrorCode = ZSIGNAL_ERROR;
+ printState(signal, 2);
+ abortErrorLab(signal);
+ return;
+ default:
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//switch
+
+ CacheRecord *localCacheRecord = cacheRecord;
+ UintR TcacheFilesize = ccacheFilesize;
+ UintR TcachePtr = apiConnectptr.p->cachePtr;
+ UintR TtcTimer = ctcTimer;
+ CacheRecord * const regCachePtr = &localCacheRecord[TcachePtr];
+ if (TcachePtr >= TcacheFilesize) {
+ TCKEY_abort(signal, 42);
+ return;
+ }//if
+ setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
+ cachePtr.i = TcachePtr;
+ cachePtr.p = regCachePtr;
+
+ tcConnectptr.i = apiConnectptr.p->lastTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ switch (tcConnectptr.p->tcConnectstate) {
+ case OS_WAIT_KEYINFO:
+ jam();
+ tckeyreq020Lab(signal);
+ return;
+ case OS_WAIT_SCAN:
+ break;
+ default:
+ jam();
+ terrorCode = ZSTATE_ERROR;
+ abortErrorLab(signal);
+ return;
+ }//switch
+
+ UintR TdataPos = 0;
+ UintR TkeyLen = regCachePtr->keylen;
+ UintR Tlen = regCachePtr->save1;
+
+ do {
+ if (cfirstfreeDatabuf == RNIL) {
+ jam();
+ seizeDatabuferrorLab(signal);
+ return;
+ }//if
+ linkKeybuf(signal);
+ arrGuard(TdataPos, 19);
+ databufptr.p->data[0] = signal->theData[TdataPos + 3];
+ databufptr.p->data[1] = signal->theData[TdataPos + 4];
+ databufptr.p->data[2] = signal->theData[TdataPos + 5];
+ databufptr.p->data[3] = signal->theData[TdataPos + 6];
+ Tlen = Tlen + 4;
+ TdataPos = TdataPos + 4;
+ if (Tlen < TkeyLen) {
+ jam();
+ if (TdataPos >= tmaxData) {
+ jam();
+ /*----------------------------------------------------*/
+ /** EXIT AND WAIT FOR SIGNAL KEYINFO OR KEYINFO9 **/
+ /** WHEN EITHER OF THE SIGNALS IS RECEIVED A JUMP **/
+ /** TO LABEL "KEYINFO_LABEL" IS DONE. THEN THE **/
+ /** PROGRAM RETURNS TO LABEL TCKEYREQ020 **/
+ /*----------------------------------------------------*/
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ regCachePtr->save1 = Tlen;
+ return;
+ }//if
+ } else {
+ jam();
+ return;
+ }//if
+ } while (1);
+ return;
+}//Dbtc::execKEYINFO()
+
+/*---------------------------------------------------------------------------*/
+/* */
+/* MORE THAN FOUR WORDS OF KEY DATA. WE NEED TO PACK THIS IN KEYINFO SIGNALS.*/
+/* WE WILL ALWAYS PACK 4 WORDS AT A TIME. */
+/*---------------------------------------------------------------------------*/
+void Dbtc::packKeyData000Lab(Signal* signal,
+ BlockReference TBRef,
+ Uint32 totalLen)
+{
+ CacheRecord * const regCachePtr = cachePtr.p;
+
+ jam();
+ Uint32 len = 0;
+ databufptr.i = regCachePtr->firstKeybuf;
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ Uint32 * dst = signal->theData+3;
+ ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
+
+ do {
+ jam();
+ databufptr.i = databufptr.p->nextDatabuf;
+ dst[len + 0] = databufptr.p->data[0];
+ dst[len + 1] = databufptr.p->data[1];
+ dst[len + 2] = databufptr.p->data[2];
+ dst[len + 3] = databufptr.p->data[3];
+ len += 4;
+ if (totalLen <= 4) {
+ jam();
+ /*---------------------------------------------------------------------*/
+ /* LAST PACK OF KEY DATA HAVE BEEN SENT */
+ /*---------------------------------------------------------------------*/
+ /* THERE WERE UNSENT INFORMATION, SEND IT. */
+ /*---------------------------------------------------------------------*/
+ sendSignal(TBRef, GSN_KEYINFO, signal, 3 + len, JBB);
+ return;
+ } else if(len == KeyInfo::DataLength){
+ jam();
+ len = 0;
+ sendSignal(TBRef, GSN_KEYINFO, signal, 3 + KeyInfo::DataLength, JBB);
+ }
+ totalLen -= 4;
+ ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
+ } while (1);
+}//Dbtc::packKeyData000Lab()
+
+void Dbtc::tckeyreq020Lab(Signal* signal)
+{
+ CacheRecord * const regCachePtr = cachePtr.p;
+ UintR TdataPos = 0;
+ UintR TkeyLen = regCachePtr->keylen;
+ UintR Tlen = regCachePtr->save1;
+
+ do {
+ if (cfirstfreeDatabuf == RNIL) {
+ jam();
+ seizeDatabuferrorLab(signal);
+ return;
+ }//if
+ linkKeybuf(signal);
+ arrGuard(TdataPos, 19);
+ databufptr.p->data[0] = signal->theData[TdataPos + 3];
+ databufptr.p->data[1] = signal->theData[TdataPos + 4];
+ databufptr.p->data[2] = signal->theData[TdataPos + 5];
+ databufptr.p->data[3] = signal->theData[TdataPos + 6];
+ Tlen = Tlen + 4;
+ TdataPos = TdataPos + 4;
+ if (Tlen < TkeyLen) {
+ jam();
+ if (TdataPos >= tmaxData) {
+ jam();
+ /*----------------------------------------------------*/
+ /** EXIT AND WAIT FOR SIGNAL KEYINFO OR KEYINFO9 **/
+ /** WHEN EITHER OF THE SIGNALS IS RECEIVED A JUMP **/
+ /** TO LABEL "KEYINFO_LABEL" IS DONE. THEN THE **/
+ /** PROGRAM RETURNS TO LABEL TCKEYREQ020 **/
+ /*----------------------------------------------------*/
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ regCachePtr->save1 = Tlen;
+ tcConnectptr.p->tcConnectstate = OS_WAIT_KEYINFO;
+ return;
+ }//if
+ } else {
+ jam();
+ tckeyreq050Lab(signal);
+ return;
+ }//if
+ } while (1);
+ return;
+}//Dbtc::tckeyreq020Lab()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SAVE ATTRIBUTE INFORMATION IN OPERATION RECORD ------- */
+/* ------------------------------------------------------------------------- */
+void Dbtc::saveAttrbuf(Signal* signal)
+{
+ CacheRecord * const regCachePtr = cachePtr.p;
+ UintR TfirstfreeAttrbuf = cfirstfreeAttrbuf;
+ UintR TattrbufFilesize = cattrbufFilesize;
+ UintR TTcfirstAttrbuf = regCachePtr->firstAttrbuf;
+ UintR Tlen = signal->length() - 3;
+ AttrbufRecord *localAttrbufRecord = attrbufRecord;
+
+ AttrbufRecord * const regAttrPtr = &localAttrbufRecord[TfirstfreeAttrbuf];
+ if (TfirstfreeAttrbuf >= TattrbufFilesize) {
+ TCKEY_abort(signal, 21);
+ return;
+ }//if
+ UintR Tnext = regAttrPtr->attrbuf[ZINBUF_NEXT];
+ if (TTcfirstAttrbuf == RNIL) {
+ jam();
+ regCachePtr->firstAttrbuf = TfirstfreeAttrbuf;
+ } else {
+ AttrbufRecordPtr saAttrbufptr;
+
+ saAttrbufptr.i = regCachePtr->lastAttrbuf;
+ jam();
+ if (saAttrbufptr.i >= TattrbufFilesize) {
+ TCKEY_abort(signal, 22);
+ return;
+ }//if
+ saAttrbufptr.p = &localAttrbufRecord[saAttrbufptr.i];
+ saAttrbufptr.p->attrbuf[ZINBUF_NEXT] = TfirstfreeAttrbuf;
+ }//if
+
+ cfirstfreeAttrbuf = Tnext;
+ regAttrPtr->attrbuf[ZINBUF_NEXT] = RNIL;
+ regCachePtr->lastAttrbuf = TfirstfreeAttrbuf;
+ regAttrPtr->attrbuf[ZINBUF_DATA_LEN] = Tlen;
+
+ UintR Tdata1 = signal->theData[3];
+ UintR Tdata2 = signal->theData[4];
+ UintR Tdata3 = signal->theData[5];
+ UintR Tdata4 = signal->theData[6];
+ UintR Tdata5 = signal->theData[7];
+ UintR Tdata6 = signal->theData[8];
+ UintR Tdata7 = signal->theData[9];
+ UintR Tdata8 = signal->theData[10];
+
+ regAttrPtr->attrbuf[0] = Tdata1;
+ regAttrPtr->attrbuf[1] = Tdata2;
+ regAttrPtr->attrbuf[2] = Tdata3;
+ regAttrPtr->attrbuf[3] = Tdata4;
+ regAttrPtr->attrbuf[4] = Tdata5;
+ regAttrPtr->attrbuf[5] = Tdata6;
+ regAttrPtr->attrbuf[6] = Tdata7;
+ regAttrPtr->attrbuf[7] = Tdata8;
+
+ if (Tlen > 8) {
+
+ Tdata1 = signal->theData[11];
+ Tdata2 = signal->theData[12];
+ Tdata3 = signal->theData[13];
+ Tdata4 = signal->theData[14];
+ Tdata5 = signal->theData[15];
+ Tdata6 = signal->theData[16];
+ Tdata7 = signal->theData[17];
+
+ regAttrPtr->attrbuf[8] = Tdata1;
+ regAttrPtr->attrbuf[9] = Tdata2;
+ regAttrPtr->attrbuf[10] = Tdata3;
+ regAttrPtr->attrbuf[11] = Tdata4;
+ regAttrPtr->attrbuf[12] = Tdata5;
+ regAttrPtr->attrbuf[13] = Tdata6;
+ regAttrPtr->attrbuf[14] = Tdata7;
+ jam();
+ if (Tlen > 15) {
+
+ Tdata1 = signal->theData[18];
+ Tdata2 = signal->theData[19];
+ Tdata3 = signal->theData[20];
+ Tdata4 = signal->theData[21];
+ Tdata5 = signal->theData[22];
+ Tdata6 = signal->theData[23];
+ Tdata7 = signal->theData[24];
+
+ jam();
+ regAttrPtr->attrbuf[15] = Tdata1;
+ regAttrPtr->attrbuf[16] = Tdata2;
+ regAttrPtr->attrbuf[17] = Tdata3;
+ regAttrPtr->attrbuf[18] = Tdata4;
+ regAttrPtr->attrbuf[19] = Tdata5;
+ regAttrPtr->attrbuf[20] = Tdata6;
+ regAttrPtr->attrbuf[21] = Tdata7;
+ }//if
+ }//if
+}//Dbtc::saveAttrbuf()
+
+void Dbtc::execATTRINFO(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+ UintR Tdata1 = signal->theData[0];
+ UintR Tlength = signal->length();
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+ jamEntry();
+ apiConnectptr.i = Tdata1;
+ ttransid_ptr = 1;
+ if (Tdata1 >= TapiConnectFilesize) {
+ DEBUG("Drop ATTRINFO, wrong apiConnectptr");
+ TCKEY_abort(signal, 18);
+ return;
+ }//if
+
+ UintR Tdata2 = signal->theData[1];
+ UintR Tdata3 = signal->theData[2];
+ ApiConnectRecord * const regApiPtr = &localApiConnectRecord[Tdata1];
+ compare_transid1 = regApiPtr->transid[0] ^ Tdata2;
+ compare_transid2 = regApiPtr->transid[1] ^ Tdata3;
+ apiConnectptr.p = regApiPtr;
+ compare_transid1 = compare_transid1 | compare_transid2;
+
+ if (compare_transid1 != 0) {
+ DEBUG("Drop ATTRINFO, wrong transid, lenght="<<Tlength
+ << " transid("<<hex<<Tdata2<<", "<<Tdata3);
+ TCKEY_abort(signal, 19);
+ return;
+ }//if
+ if (Tlength < 4) {
+ DEBUG("Drop ATTRINFO, wrong length = " << Tlength);
+ TCKEY_abort(signal, 20);
+ return;
+ }
+ Tlength -= 3;
+ UintR TcompREC_COMMIT = (regApiPtr->apiConnectstate == CS_REC_COMMITTING);
+ UintR TcompRECEIVING = (regApiPtr->apiConnectstate == CS_RECEIVING);
+ UintR TcompBOTH = TcompREC_COMMIT | TcompRECEIVING;
+
+ if (TcompBOTH) {
+ jam();
+ if (ERROR_INSERTED(8015)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ if (ERROR_INSERTED(8016)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ CacheRecord *localCacheRecord = cacheRecord;
+ UintR TcacheFilesize = ccacheFilesize;
+ UintR TcachePtr = regApiPtr->cachePtr;
+ UintR TtcTimer = ctcTimer;
+ CacheRecord * const regCachePtr = &localCacheRecord[TcachePtr];
+ if (TcachePtr >= TcacheFilesize) {
+ TCKEY_abort(signal, 43);
+ return;
+ }//if
+ UintR TfirstfreeAttrbuf = cfirstfreeAttrbuf;
+ UintR TcurrReclenAi = regCachePtr->currReclenAi;
+ UintR TattrLen = regCachePtr->attrlength;
+
+ setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
+ cachePtr.i = TcachePtr;
+ cachePtr.p = regCachePtr;
+ TcurrReclenAi = TcurrReclenAi + Tlength;
+ regCachePtr->currReclenAi = TcurrReclenAi;
+ int TattrlengthRemain = TattrLen - TcurrReclenAi;
+
+ if (TfirstfreeAttrbuf == RNIL) {
+ DEBUG("No more attrinfo buffers");
+ TCKEY_abort(signal, 24);
+ return;
+ }//if
+ saveAttrbuf(signal);
+ if (TattrlengthRemain == 0) {
+ /****************************************************************>*/
+ /* HERE WE HAVE FOUND THAT THE LAST SIGNAL BELONGING TO THIS */
+ /* OPERATION HAVE BEEN RECEIVED. THIS MEANS THAT WE CAN NOW REUSE */
+ /* THE API CONNECT RECORD. HOWEVER IF PREPARE OR COMMIT HAVE BEEN */
+ /* RECEIVED THEN IT IS NOT ALLOWED TO RECEIVE ANY FURTHER */
+ /* OPERATIONS. */
+ /****************************************************************>*/
+ UintR TlastConnect = regApiPtr->lastTcConnect;
+ if (TcompRECEIVING) {
+ jam();
+ regApiPtr->apiConnectstate = CS_STARTED;
+ } else {
+ jam();
+ regApiPtr->apiConnectstate = CS_START_COMMITTING;
+ }//if
+ tcConnectptr.i = TlastConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ attrinfoDihReceivedLab(signal);
+ } else if (TattrlengthRemain < 0) {
+ jam();
+ DEBUG("ATTRINFO wrong total length="<<Tlength
+ <<", TattrlengthRemain="<<TattrlengthRemain
+ <<", TattrLen="<<TattrLen
+ <<", TcurrReclenAi="<<TcurrReclenAi);
+ tcConnectptr.i = regApiPtr->lastTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ aiErrorLab(signal);
+ }//if
+ return;
+ } else if (regApiPtr->apiConnectstate == CS_START_SCAN) {
+ jam();
+ scanAttrinfoLab(signal, Tlength);
+ return;
+ } else {
+ switch (regApiPtr->apiConnectstate) {
+ case CS_ABORTING:
+ jam();
+ /* JUST IGNORE THE SIGNAL*/
+ // DEBUG("Drop ATTRINFO, CS_ABORTING");
+ return;
+ case CS_CONNECTED:
+ jam();
+ /* MOST LIKELY CAUSED BY A MISSED SIGNAL.*/
+ // DEBUG("Drop ATTRINFO, CS_CONNECTED");
+ return;
+ case CS_STARTED:
+ jam();
+ /****************************************************************>*/
+ /* MOST LIKELY CAUSED BY A MISSED SIGNAL. SEND REFUSE AND */
+ /* SET STATE TO ABORTING. SINCE A TRANSACTION WAS STARTED */
+ /* WE ALSO NEED TO ABORT THIS TRANSACTION. */
+ /****************************************************************>*/
+ terrorCode = ZSIGNAL_ERROR;
+ printState(signal, 1);
+ abortErrorLab(signal);
+ return;
+ default:
+ jam();
+ /****************************************************************>*/
+ /* SIGNAL RECEIVED IN AN UNEXPECTED STATE. WE IGNORE SIGNAL */
+ /* SINCE WE DO NOT REALLY KNOW WHERE THE ERROR OCCURRED. */
+ /****************************************************************>*/
+ DEBUG("Drop ATTRINFO, illegal state="<<regApiPtr->apiConnectstate);
+ printState(signal, 9);
+ return;
+ }//switch
+ }//if
+}//Dbtc::execATTRINFO()
+
+/* *********************************************************************>> */
+/* */
+/* MODULE: HASH MODULE */
+/* DESCRIPTION: CONTAINS THE HASH VALUE CALCULATION */
+/* *********************************************************************> */
+void Dbtc::hash(Signal* signal)
+{
+ DatabufRecordPtr locDatabufptr;
+ UintR ti;
+ UintR Tdata0;
+ UintR Tdata1;
+ UintR Tdata2;
+ UintR Tdata3;
+ UintR* Tdata32;
+
+ CacheRecord * const regCachePtr = cachePtr.p;
+ Tdata32 = signal->theData;
+
+ Tdata0 = regCachePtr->keydata[0];
+ Tdata1 = regCachePtr->keydata[1];
+ Tdata2 = regCachePtr->keydata[2];
+ Tdata3 = regCachePtr->keydata[3];
+ Tdata32[0] = Tdata0;
+ Tdata32[1] = Tdata1;
+ Tdata32[2] = Tdata2;
+ Tdata32[3] = Tdata3;
+ if (regCachePtr->keylen > 4) {
+ locDatabufptr.i = regCachePtr->firstKeybuf;
+ ti = 4;
+ while (locDatabufptr.i != RNIL) {
+ ptrCheckGuard(locDatabufptr, cdatabufFilesize, databufRecord);
+ Tdata0 = locDatabufptr.p->data[0];
+ Tdata1 = locDatabufptr.p->data[1];
+ Tdata2 = locDatabufptr.p->data[2];
+ Tdata3 = locDatabufptr.p->data[3];
+ Tdata32[ti ] = Tdata0;
+ Tdata32[ti + 1] = Tdata1;
+ Tdata32[ti + 2] = Tdata2;
+ Tdata32[ti + 3] = Tdata3;
+ locDatabufptr.i = locDatabufptr.p->nextDatabuf;
+ ti += 4;
+ }//while
+ }//if
+
+ UintR keylen = (UintR)regCachePtr->keylen;
+ Uint32 distKey = regCachePtr->distributionKeyIndicator;
+
+ Uint32 tmp[4];
+ if(!regCachePtr->m_special_hash)
+ {
+ md5_hash(tmp, (Uint64*)&Tdata32[0], keylen);
+ }
+ else
+ {
+ handle_special_hash(tmp, Tdata32, keylen, regCachePtr->tableref, !distKey);
+ }
+
+ thashValue = tmp[0];
+ if (distKey){
+ jam();
+ tdistrHashValue = regCachePtr->distributionKey;
+ } else {
+ jam();
+ tdistrHashValue = tmp[1];
+ }//if
+}//Dbtc::hash()
+
+bool
+Dbtc::handle_special_hash(Uint32 dstHash[4], Uint32* src, Uint32 srcLen,
+ Uint32 tabPtrI,
+ bool distr)
+{
+ Uint64 Tmp[MAX_KEY_SIZE_IN_WORDS * 4 * MAX_XFRM_MULTIPLY];
+ const Uint32 dstSize = sizeof(Tmp) / 4;
+ const TableRecord* tabPtrP = &tableRecord[tabPtrI];
+ const Uint32 noOfKeyAttr = tabPtrP->noOfKeyAttr;
+ Uint32 noOfDistrKeys = tabPtrP->noOfDistrKeys;
+ const bool hasCharAttr = tabPtrP->hasCharAttr;
+
+ Uint32 *dst = (Uint32*)Tmp;
+ Uint32 dstPos = 0;
+ Uint32 srcPos = 0;
+ Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX];
+ if(hasCharAttr){
+ Uint32 i = 0;
+ while (i < noOfKeyAttr) {
+ const TableRecord::KeyAttr& keyAttr = tabPtrP->keyAttr[i];
+
+ Uint32 srcBytes =
+ AttributeDescriptor::getSizeInBytes(keyAttr.attributeDescriptor);
+ Uint32 srcWords = (srcBytes + 3) / 4;
+ Uint32 dstWords = ~0;
+ uchar* dstPtr = (uchar*)&dst[dstPos];
+ const uchar* srcPtr = (const uchar*)&src[srcPos];
+ CHARSET_INFO* cs = keyAttr.charsetInfo;
+
+ if (cs == NULL) {
+ jam();
+ memcpy(dstPtr, srcPtr, srcWords << 2);
+ dstWords = srcWords;
+ } else {
+ jam();
+ Uint32 typeId =
+ AttributeDescriptor::getType(keyAttr.attributeDescriptor);
+ Uint32 lb, len;
+ bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
+ ndbrequire(ok);
+ Uint32 xmul = cs->strxfrm_multiply;
+ if (xmul == 0)
+ xmul = 1;
+ /*
+ * Varchar is really Char. End spaces do not matter. To get
+ * same hash we blank-pad to maximum length via strnxfrm.
+ * TODO use MySQL charset-aware hash function instead
+ */
+ Uint32 dstLen = xmul * (srcBytes - lb);
+ ndbrequire(dstLen <= ((dstSize - dstPos) << 2));
+ int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
+ ndbrequire(n != -1);
+ while ((n & 3) != 0) {
+ dstPtr[n++] = 0;
+ }
+ dstWords = (n >> 2);
+ }
+ dstPos += dstWords;
+ srcPos += srcWords;
+ keyPartLen[i++] = dstWords;
+ }
+ }
+ else
+ {
+ dst = src;
+ dstPos = srcLen;
+ }
+
+ md5_hash(dstHash, (Uint64*)dst, dstPos);
+
+ if(distr && noOfDistrKeys)
+ {
+ jam();
+ src = dst;
+ dstPos = 0;
+ Uint32 i = 0;
+ if(hasCharAttr)
+ {
+ while (i < noOfKeyAttr && noOfDistrKeys)
+ {
+ const TableRecord::KeyAttr& keyAttr = tabPtrP->keyAttr[i];
+ Uint32 len = keyPartLen[i];
+ if(AttributeDescriptor::getDKey(keyAttr.attributeDescriptor))
+ {
+ noOfDistrKeys--;
+ memmove(dst+dstPos, src, len << 2);
+ dstPos += len;
+ }
+ src += len;
+ i++;
+ }
+ }
+ else
+ {
+ while (i < noOfKeyAttr && noOfDistrKeys)
+ {
+ const TableRecord::KeyAttr& keyAttr = tabPtrP->keyAttr[i];
+ Uint32 len =
+ AttributeDescriptor::getSizeInBytes(keyAttr.attributeDescriptor);
+ len = (len + 3) / 4;
+ if(AttributeDescriptor::getDKey(keyAttr.attributeDescriptor))
+ {
+ noOfDistrKeys--;
+ memmove(dst+dstPos, src, len << 2);
+ dstPos += len;
+ }
+ src += len;
+ i++;
+ }
+ }
+ Uint32 tmp[4];
+ md5_hash(tmp, (Uint64*)dst, dstPos);
+ dstHash[1] = tmp[1];
+ }
+ return true; // success
+}
+
+/*
+INIT_API_CONNECT_REC
+---------------------------
+*/
+/* ========================================================================= */
+/* ======= INIT_API_CONNECT_REC ======= */
+/* */
+/* ========================================================================= */
+void Dbtc::initApiConnectRec(Signal* signal,
+ ApiConnectRecord * const regApiPtr,
+ bool releaseIndexOperations)
+{
+ const TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0];
+ UintR TfailureNr = cfailure_nr;
+ UintR TtransCount = c_counters.ctransCount;
+ UintR Ttransid0 = tcKeyReq->transId1;
+ UintR Ttransid1 = tcKeyReq->transId2;
+
+ regApiPtr->m_exec_flag = 0;
+ regApiPtr->returncode = 0;
+ regApiPtr->returnsignal = RS_TCKEYCONF;
+ ndbassert(regApiPtr->firstTcConnect == RNIL);
+ regApiPtr->firstTcConnect = RNIL;
+ regApiPtr->lastTcConnect = RNIL;
+ regApiPtr->globalcheckpointid = 0;
+ regApiPtr->lqhkeyconfrec = 0;
+ regApiPtr->lqhkeyreqrec = 0;
+ regApiPtr->tckeyrec = 0;
+ regApiPtr->tcindxrec = 0;
+ regApiPtr->failureNr = TfailureNr;
+ regApiPtr->transid[0] = Ttransid0;
+ regApiPtr->transid[1] = Ttransid1;
+ regApiPtr->commitAckMarker = RNIL;
+ regApiPtr->buddyPtr = RNIL;
+ regApiPtr->currSavePointId = 0;
+ // Trigger data
+ releaseFiredTriggerData(&regApiPtr->theFiredTriggers),
+ // Index data
+ regApiPtr->indexOpReturn = false;
+ regApiPtr->noIndexOp = 0;
+ if(releaseIndexOperations)
+ releaseAllSeizedIndexOperations(regApiPtr);
+
+ c_counters.ctransCount = TtransCount + 1;
+}//Dbtc::initApiConnectRec()
+
+int
+Dbtc::seizeTcRecord(Signal* signal)
+{
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+ UintR TfirstfreeTcConnect = cfirstfreeTcConnect;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ tcConnectptr.i = TfirstfreeTcConnect;
+ if (TfirstfreeTcConnect >= TtcConnectFilesize) {
+ int place = 3;
+ if (TfirstfreeTcConnect != RNIL) {
+ place = 10;
+ }//if
+ TCKEY_abort(signal, place);
+ return 1;
+ }//if
+ //--------------------------------------------------------------------------
+ // Optimised version of ptrAss(tcConnectptr, tcConnectRecord)
+ //--------------------------------------------------------------------------
+ TcConnectRecord * const regTcPtr =
+ &localTcConnectRecord[TfirstfreeTcConnect];
+
+ UintR TconcurrentOp = c_counters.cconcurrentOp;
+ UintR TlastTcConnect = regApiPtr->lastTcConnect;
+ UintR TtcConnectptrIndex = tcConnectptr.i;
+ TcConnectRecordPtr tmpTcConnectptr;
+
+ cfirstfreeTcConnect = regTcPtr->nextTcConnect;
+ tcConnectptr.p = regTcPtr;
+
+ c_counters.cconcurrentOp = TconcurrentOp + 1;
+ regTcPtr->prevTcConnect = TlastTcConnect;
+ regTcPtr->nextTcConnect = RNIL;
+ regTcPtr->accumulatingTriggerData.i = RNIL;
+ regTcPtr->accumulatingTriggerData.p = NULL;
+ regTcPtr->noFiredTriggers = 0;
+ regTcPtr->noReceivedTriggers = 0;
+ regTcPtr->triggerExecutionCount = 0;
+ regTcPtr->triggeringOperation = RNIL;
+ regTcPtr->isIndexOp = false;
+ regTcPtr->indexOp = RNIL;
+ regTcPtr->currentIndexId = RNIL;
+
+ regApiPtr->lastTcConnect = TtcConnectptrIndex;
+
+ if (TlastTcConnect == RNIL) {
+ jam();
+ regApiPtr->firstTcConnect = TtcConnectptrIndex;
+ } else {
+ tmpTcConnectptr.i = TlastTcConnect;
+ jam();
+ ptrCheckGuard(tmpTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ tmpTcConnectptr.p->nextTcConnect = TtcConnectptrIndex;
+ }//if
+ return 0;
+}//Dbtc::seizeTcRecord()
+
+int
+Dbtc::seizeCacheRecord(Signal* signal)
+{
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ UintR TfirstfreeCacheRec = cfirstfreeCacheRec;
+ UintR TcacheFilesize = ccacheFilesize;
+ CacheRecord *localCacheRecord = cacheRecord;
+ if (TfirstfreeCacheRec >= TcacheFilesize) {
+ TCKEY_abort(signal, 41);
+ return 1;
+ }//if
+ CacheRecord * const regCachePtr = &localCacheRecord[TfirstfreeCacheRec];
+
+ regApiPtr->cachePtr = TfirstfreeCacheRec;
+ cfirstfreeCacheRec = regCachePtr->nextCacheRec;
+ cachePtr.i = TfirstfreeCacheRec;
+ cachePtr.p = regCachePtr;
+
+#ifdef VM_TRACE
+ // This is a good place to check that resources have
+ // been properly released from CacheRecord
+ ndbrequire(regCachePtr->firstKeybuf == RNIL);
+ ndbrequire(regCachePtr->lastKeybuf == RNIL);
+#endif
+ regCachePtr->firstKeybuf = RNIL;
+ regCachePtr->lastKeybuf = RNIL;
+ regCachePtr->firstAttrbuf = RNIL;
+ regCachePtr->lastAttrbuf = RNIL;
+ regCachePtr->currReclenAi = 0;
+ return 0;
+}//Dbtc::seizeCacheRecord()
+
+/*****************************************************************************/
+/* T C K E Y R E Q */
+/* AFTER HAVING ESTABLISHED THE CONNECT, THE APPLICATION BLOCK SENDS AN */
+/* OPERATION REQUEST TO TC. ALL NECESSARY INFORMATION TO CARRY OUT REQUEST */
+/* IS FURNISHED IN PARAMETERS. TC STORES THIS INFORMATION AND ENQUIRES */
+/* FROM DIH ABOUT THE NODES WHICH MAY HAVE THE REQUESTED DATA */
+/*****************************************************************************/
+void Dbtc::execTCKEYREQ(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+ UintR titcLenAiInTckeyreq;
+ UintR TkeyLength;
+ const TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtr();
+ UintR Treqinfo;
+
+ jamEntry();
+ /*-------------------------------------------------------------------------
+ * Common error routines are used for several signals, they need to know
+ * where to find the transaction identifier in the signal.
+ *-------------------------------------------------------------------------*/
+ const UintR TapiIndex = tcKeyReq->apiConnectPtr;
+ const UintR TapiMaxIndex = capiConnectFilesize;
+ const UintR TtabIndex = tcKeyReq->tableId;
+ const UintR TtabMaxIndex = ctabrecFilesize;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+ ttransid_ptr = 6;
+ apiConnectptr.i = TapiIndex;
+ if (TapiIndex >= TapiMaxIndex) {
+ TCKEY_abort(signal, 6);
+ return;
+ }//if
+ if (TtabIndex >= TtabMaxIndex) {
+ TCKEY_abort(signal, 7);
+ return;
+ }//if
+
+ Treqinfo = tcKeyReq->requestInfo;
+ //--------------------------------------------------------------------------
+ // Optimised version of ptrAss(tabptr, tableRecord)
+ // Optimised version of ptrAss(apiConnectptr, apiConnectRecord)
+ //--------------------------------------------------------------------------
+ ApiConnectRecord * const regApiPtr = &localApiConnectRecord[TapiIndex];
+ apiConnectptr.p = regApiPtr;
+
+ Uint32 TstartFlag = tcKeyReq->getStartFlag(Treqinfo);
+ Uint32 TexecFlag = TcKeyReq::getExecuteFlag(Treqinfo);
+
+ bool isIndexOp = regApiPtr->isIndexOp;
+ bool isIndexOpReturn = regApiPtr->indexOpReturn;
+ regApiPtr->isIndexOp = false; // Reset marker
+ regApiPtr->m_exec_flag |= TexecFlag;
+ switch (regApiPtr->apiConnectstate) {
+ case CS_CONNECTED:{
+ if (TstartFlag == 1 && getAllowStartTransaction() == true){
+ //---------------------------------------------------------------------
+ // Initialise API connect record if transaction is started.
+ //---------------------------------------------------------------------
+ jam();
+ initApiConnectRec(signal, regApiPtr);
+ regApiPtr->m_exec_flag = TexecFlag;
+ } else {
+ if(getAllowStartTransaction() == true){
+ /*------------------------------------------------------------------
+ * WE EXPECTED A START TRANSACTION. SINCE NO OPERATIONS HAVE BEEN
+ * RECEIVED WE INDICATE THIS BY SETTING FIRST_TC_CONNECT TO RNIL TO
+ * ENSURE PROPER OPERATION OF THE COMMON ABORT HANDLING.
+ *-----------------------------------------------------------------*/
+ TCKEY_abort(signal, 0);
+ return;
+ } else {
+ /**
+ * getAllowStartTransaction() == false
+ */
+ TCKEY_abort(signal, 57);
+ return;
+ }//if
+ }
+ }
+ break;
+ case CS_STARTED:
+ if(TstartFlag == 1 && regApiPtr->firstTcConnect == RNIL)
+ {
+ /**
+ * If last operation in last transaction was a simple/dirty read
+ * it does not have to be committed or rollbacked hence,
+ * the state will be CS_STARTED
+ */
+ jam();
+ initApiConnectRec(signal, regApiPtr);
+ regApiPtr->m_exec_flag = TexecFlag;
+ } else {
+ //----------------------------------------------------------------------
+ // Transaction is started already.
+ // Check that the operation is on the same transaction.
+ //-----------------------------------------------------------------------
+ compare_transid1 = regApiPtr->transid[0] ^ tcKeyReq->transId1;
+ compare_transid2 = regApiPtr->transid[1] ^ tcKeyReq->transId2;
+ jam();
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ TCKEY_abort(signal, 1);
+ return;
+ }//if
+ }
+ break;
+ case CS_ABORTING:
+ if (regApiPtr->abortState == AS_IDLE) {
+ if (TstartFlag == 1) {
+ //--------------------------------------------------------------------
+ // Previous transaction had been aborted and the abort was completed.
+ // It is then OK to start a new transaction again.
+ //--------------------------------------------------------------------
+ jam();
+ initApiConnectRec(signal, regApiPtr);
+ regApiPtr->m_exec_flag = TexecFlag;
+ } else if(TexecFlag) {
+ TCKEY_abort(signal, 59);
+ return;
+ } else {
+ //--------------------------------------------------------------------
+ // The current transaction was aborted successfully.
+ // We will not do anything before we receive an operation
+ // with a start indicator. We will ignore this signal.
+ //--------------------------------------------------------------------
+ jam();
+ DEBUG("Drop TCKEYREQ - apiConnectState=CS_ABORTING, ==AS_IDLE");
+ return;
+ }//if
+ } else {
+ //----------------------------------------------------------------------
+ // Previous transaction is still aborting
+ //----------------------------------------------------------------------
+ jam();
+ if (TstartFlag == 1) {
+ //--------------------------------------------------------------------
+ // If a new transaction tries to start while the old is
+ // still aborting, we will report this to the starting API.
+ //--------------------------------------------------------------------
+ TCKEY_abort(signal, 2);
+ return;
+ } else if(TexecFlag) {
+ TCKEY_abort(signal, 59);
+ return;
+ }
+ //----------------------------------------------------------------------
+ // Ignore signals without start indicator set when aborting transaction.
+ //----------------------------------------------------------------------
+ DEBUG("Drop TCKEYREQ - apiConnectState=CS_ABORTING, !=AS_IDLE");
+ return;
+ }//if
+ break;
+ case CS_START_COMMITTING:
+ jam();
+ if(isIndexOpReturn || TcKeyReq::getExecutingTrigger(Treqinfo)){
+ break;
+ }
+ default:
+ jam();
+ /*----------------------------------------------------------------------
+ * IN THIS CASE THE NDBAPI IS AN UNTRUSTED ENTITY THAT HAS SENT A SIGNAL
+ * WHEN IT WAS NOT EXPECTED TO.
+ * WE MIGHT BE IN A PROCESS TO RECEIVE, PREPARE,
+ * COMMIT OR COMPLETE AND OBVIOUSLY THIS IS NOT A DESIRED EVENT.
+ * WE WILL ALWAYS COMPLETE THE ABORT HANDLING BEFORE WE ALLOW
+ * ANYTHING TO HAPPEN ON THIS CONNECTION AGAIN.
+ * THUS THERE IS NO ACTION FROM THE API THAT CAN SPEED UP THIS PROCESS.
+ *---------------------------------------------------------------------*/
+ TCKEY_abort(signal, 55);
+ return;
+ }//switch
+
+ TableRecordPtr localTabptr;
+ localTabptr.i = TtabIndex;
+ localTabptr.p = &tableRecord[TtabIndex];
+ if (localTabptr.p->checkTable(tcKeyReq->tableSchemaVersion)) {
+ ;
+ } else {
+ /*-----------------------------------------------------------------------*/
+ /* THE API IS WORKING WITH AN OLD SCHEMA VERSION. IT NEEDS REPLACEMENT. */
+ /* COULD ALSO BE THAT THE TABLE IS NOT DEFINED. */
+ /*-----------------------------------------------------------------------*/
+ TCKEY_abort(signal, 8);
+ return;
+ }//if
+
+ //-------------------------------------------------------------------------
+ // Error Insertion for testing purposes. Test to see what happens when no
+ // more TC records available.
+ //-------------------------------------------------------------------------
+ if (ERROR_INSERTED(8032)) {
+ TCKEY_abort(signal, 3);
+ return;
+ }//if
+
+ if (seizeTcRecord(signal) != 0) {
+ return;
+ }//if
+
+ if (seizeCacheRecord(signal) != 0) {
+ return;
+ }//if
+
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ CacheRecord * const regCachePtr = cachePtr.p;
+
+ /*
+ INIT_TC_CONNECT_REC
+ -------------------------
+ */
+ /* ---------------------------------------------------------------------- */
+ /* ------- INIT OPERATION RECORD WITH SIGNAL DATA AND RNILS ------- */
+ /* */
+ /* ---------------------------------------------------------------------- */
+
+ UintR TapiVersionNo = tcKeyReq->getAPIVersion(tcKeyReq->attrLen);
+ UintR Tlqhkeyreqrec = regApiPtr->lqhkeyreqrec;
+ regApiPtr->lqhkeyreqrec = Tlqhkeyreqrec + 1;
+ regCachePtr->apiVersionNo = TapiVersionNo;
+
+ UintR TapiConnectptrIndex = apiConnectptr.i;
+ UintR TsenderData = tcKeyReq->senderData;
+ UintR TattrLen = tcKeyReq->getAttrinfoLen(tcKeyReq->attrLen);
+ UintR TattrinfoCount = c_counters.cattrinfoCount;
+
+ regTcPtr->apiConnect = TapiConnectptrIndex;
+ regTcPtr->clientData = TsenderData;
+ regTcPtr->commitAckMarker = RNIL;
+ regTcPtr->isIndexOp = isIndexOp;
+ regTcPtr->indexOp = regApiPtr->executingIndexOp;
+ regTcPtr->savePointId = regApiPtr->currSavePointId;
+ regApiPtr->executingIndexOp = RNIL;
+
+ if (TcKeyReq::getExecutingTrigger(Treqinfo)) {
+ // Save the TcOperationPtr for fireing operation
+ regTcPtr->triggeringOperation = TsenderData;
+ }
+
+ if (TexecFlag){
+ Uint32 currSPId = regApiPtr->currSavePointId;
+ regApiPtr->currSavePointId = ++currSPId;
+ }
+
+ regCachePtr->attrlength = TattrLen;
+ c_counters.cattrinfoCount = TattrinfoCount + TattrLen;
+
+ UintR TtabptrIndex = localTabptr.i;
+ UintR TtableSchemaVersion = tcKeyReq->tableSchemaVersion;
+ Uint8 TOperationType = tcKeyReq->getOperationType(Treqinfo);
+ regCachePtr->tableref = TtabptrIndex;
+ regCachePtr->schemaVersion = TtableSchemaVersion;
+ regTcPtr->operation = TOperationType;
+
+ Uint8 TSimpleFlag = tcKeyReq->getSimpleFlag(Treqinfo);
+ Uint8 TDirtyFlag = tcKeyReq->getDirtyFlag(Treqinfo);
+ Uint8 TInterpretedFlag = tcKeyReq->getInterpretedFlag(Treqinfo);
+ Uint8 TDistrKeyFlag = tcKeyReq->getDistributionKeyFlag(Treqinfo);
+ Uint8 TexecuteFlag = TexecFlag;
+
+ regCachePtr->opSimple = TSimpleFlag;
+ regCachePtr->opExec = TInterpretedFlag;
+ regTcPtr->dirtyOp = TDirtyFlag;
+ regCachePtr->distributionKeyIndicator = TDistrKeyFlag;
+
+ //-------------------------------------------------------------
+ // The next step is to read the upto three conditional words.
+ //-------------------------------------------------------------
+ Uint32 TkeyIndex;
+ Uint32* TOptionalDataPtr = (Uint32*)&tcKeyReq->scanInfo;
+ {
+ Uint32 TDistrGHIndex = tcKeyReq->getScanIndFlag(Treqinfo);
+ Uint32 TDistrKeyIndex = TDistrGHIndex;
+
+ Uint32 TscanInfo = tcKeyReq->getTakeOverScanInfo(TOptionalDataPtr[0]);
+
+ regCachePtr->scanTakeOverInd = TDistrGHIndex;
+ regCachePtr->scanInfo = TscanInfo;
+
+ regCachePtr->distributionKey = TOptionalDataPtr[TDistrKeyIndex];
+
+ TkeyIndex = TDistrKeyIndex + TDistrKeyFlag;
+ }
+ Uint32* TkeyDataPtr = &TOptionalDataPtr[TkeyIndex];
+
+ UintR Tdata1 = TkeyDataPtr[0];
+ UintR Tdata2 = TkeyDataPtr[1];
+ UintR Tdata3 = TkeyDataPtr[2];
+ UintR Tdata4 = TkeyDataPtr[3];
+ UintR Tdata5;
+
+ regCachePtr->keydata[0] = Tdata1;
+ regCachePtr->keydata[1] = Tdata2;
+ regCachePtr->keydata[2] = Tdata3;
+ regCachePtr->keydata[3] = Tdata4;
+
+ TkeyLength = tcKeyReq->getKeyLength(Treqinfo);
+ Uint32 TAIDataIndex;
+ if (TkeyLength > 8) {
+ TAIDataIndex = TkeyIndex + 8;
+ } else {
+ if (TkeyLength == 0) {
+ TCKEY_abort(signal, 4);
+ return;
+ }//if
+ TAIDataIndex = TkeyIndex + TkeyLength;
+ }//if
+ Uint32* TAIDataPtr = &TOptionalDataPtr[TAIDataIndex];
+
+ titcLenAiInTckeyreq = tcKeyReq->getAIInTcKeyReq(Treqinfo);
+ regCachePtr->keylen = TkeyLength;
+ regCachePtr->lenAiInTckeyreq = titcLenAiInTckeyreq;
+ regCachePtr->currReclenAi = titcLenAiInTckeyreq;
+ regCachePtr->m_special_hash =
+ localTabptr.p->hasCharAttr | (localTabptr.p->noOfDistrKeys > 0);
+ Tdata1 = TAIDataPtr[0];
+ Tdata2 = TAIDataPtr[1];
+ Tdata3 = TAIDataPtr[2];
+ Tdata4 = TAIDataPtr[3];
+ Tdata5 = TAIDataPtr[4];
+
+ regCachePtr->attrinfo0 = Tdata1;
+ regCachePtr->attrinfo15[0] = Tdata2;
+ regCachePtr->attrinfo15[1] = Tdata3;
+ regCachePtr->attrinfo15[2] = Tdata4;
+ regCachePtr->attrinfo15[3] = Tdata5;
+
+ if (TOperationType == ZREAD) {
+ Uint32 TreadCount = c_counters.creadCount;
+ jam();
+ regCachePtr->opLock = 0;
+ c_counters.creadCount = TreadCount + 1;
+ } else if(TOperationType == ZREAD_EX){
+ Uint32 TreadCount = c_counters.creadCount;
+ jam();
+ TOperationType = ZREAD;
+ regTcPtr->operation = ZREAD;
+ regCachePtr->opLock = ZUPDATE;
+ c_counters.creadCount = TreadCount + 1;
+ } else {
+ if(regApiPtr->commitAckMarker == RNIL){
+ jam();
+ CommitAckMarkerPtr tmp;
+ if(!m_commitAckMarkerHash.seize(tmp)){
+ TCKEY_abort(signal, 56);
+ return;
+ } else {
+ regTcPtr->commitAckMarker = tmp.i;
+ regApiPtr->commitAckMarker = tmp.i;
+ tmp.p->transid1 = tcKeyReq->transId1;
+ tmp.p->transid2 = tcKeyReq->transId2;
+ tmp.p->apiNodeId = refToNode(regApiPtr->ndbapiBlockref);
+ tmp.p->apiConnectPtr = TapiIndex;
+ tmp.p->noOfLqhs = 0;
+ m_commitAckMarkerHash.add(tmp);
+ }
+ }
+
+ UintR TwriteCount = c_counters.cwriteCount;
+ UintR Toperationsize = coperationsize;
+ /* --------------------------------------------------------------------
+ * THIS IS A TEMPORARY TABLE, DON'T UPDATE coperationsize.
+ * THIS VARIABLE CONTROLS THE INTERVAL BETWEEN LCP'S AND
+ * TEMP TABLES DON'T PARTICIPATE.
+ * -------------------------------------------------------------------- */
+ if (localTabptr.p->storedTable) {
+ coperationsize = ((Toperationsize + TattrLen) + TkeyLength) + 17;
+ }
+ c_counters.cwriteCount = TwriteCount + 1;
+ switch (TOperationType) {
+ case ZUPDATE:
+ jam();
+ if (TattrLen == 0) {
+ //TCKEY_abort(signal, 5);
+ //return;
+ }//if
+ /*---------------------------------------------------------------------*/
+ // The missing break is intentional since we also want to set the opLock
+ // variable also for updates
+ /*---------------------------------------------------------------------*/
+ case ZINSERT:
+ case ZDELETE:
+ jam();
+ regCachePtr->opLock = TOperationType;
+ break;
+ case ZWRITE:
+ jam();
+ // A write operation is originally an insert operation.
+ regCachePtr->opLock = ZINSERT;
+ break;
+ default:
+ TCKEY_abort(signal, 9);
+ return;
+ }//switch
+ }//if
+
+ Uint32 TabortOption = tcKeyReq->getAbortOption(Treqinfo);
+ regTcPtr->m_execAbortOption = TabortOption;
+
+ /*-------------------------------------------------------------------------
+ * Check error handling per operation
+ * If CommitFlag is set state accordingly and check for early abort
+ *------------------------------------------------------------------------*/
+ if (tcKeyReq->getCommitFlag(Treqinfo) == 1) {
+ ndbrequire(TexecuteFlag);
+ regApiPtr->apiConnectstate = CS_REC_COMMITTING;
+ } else {
+ /* ---------------------------------------------------------------------
+ * PREPARE TRANSACTION IS NOT IMPLEMENTED YET.
+ * ---------------------------------------------------------------------
+ * ELSIF (TREQINFO => 3) (*) 1 = 1 THEN
+ * IF PREPARE TRANSACTION THEN
+ * API_CONNECTPTR:API_CONNECTSTATE = REC_PREPARING
+ * SET STATE TO PREPARING
+ * --------------------------------------------------------------------- */
+ if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
+ jam();
+ // Trigger execution at commit
+ regApiPtr->apiConnectstate = CS_REC_COMMITTING;
+ } else {
+ jam();
+ regApiPtr->apiConnectstate = CS_RECEIVING;
+ }//if
+ }//if
+ if (TkeyLength <= 4) {
+ tckeyreq050Lab(signal);
+ return;
+ } else {
+ if (cfirstfreeDatabuf != RNIL) {
+ jam();
+ linkKeybuf(signal);
+ Tdata1 = TkeyDataPtr[4];
+ Tdata2 = TkeyDataPtr[5];
+ Tdata3 = TkeyDataPtr[6];
+ Tdata4 = TkeyDataPtr[7];
+
+ DatabufRecord * const regDataPtr = databufptr.p;
+ regDataPtr->data[0] = Tdata1;
+ regDataPtr->data[1] = Tdata2;
+ regDataPtr->data[2] = Tdata3;
+ regDataPtr->data[3] = Tdata4;
+ } else {
+ jam();
+ seizeDatabuferrorLab(signal);
+ return;
+ }//if
+ if (TkeyLength <= 8) {
+ jam();
+ tckeyreq050Lab(signal);
+ return;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * THE TCKEYREQ DIDN'T CONTAIN ALL KEY DATA,
+ * SAVE STATE AND WAIT FOR KEYINFO
+ * --------------------------------------------------------------------*/
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ regCachePtr->save1 = 8;
+ regTcPtr->tcConnectstate = OS_WAIT_KEYINFO;
+ return;
+ }//if
+ }//if
+ return;
+}//Dbtc::execTCKEYREQ()
+
+void Dbtc::tckeyreq050Lab(Signal* signal)
+{
+ UintR tnoOfBackup;
+ UintR tnoOfStandby;
+ UintR tnodeinfo;
+
+ hash(signal); /* NOW IT IS TIME TO CALCULATE THE HASH VALUE*/
+
+ CacheRecord * const regCachePtr = cachePtr.p;
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+
+ UintR TtcTimer = ctcTimer;
+ UintR ThashValue = thashValue;
+ UintR TdistrHashValue = tdistrHashValue;
+ UintR TdihConnectptr = regTcPtr->dihConnectptr;
+ UintR Ttableref = regCachePtr->tableref;
+
+ TableRecordPtr localTabptr;
+ localTabptr.i = Ttableref;
+ localTabptr.p = &tableRecord[localTabptr.i];
+ Uint32 schemaVersion = regCachePtr->schemaVersion;
+ if(localTabptr.p->checkTable(schemaVersion)){
+ ;
+ } else {
+ terrorCode = localTabptr.p->getErrorCode(schemaVersion);
+ TCKEY_abort(signal, 58);
+ return;
+ }
+
+ setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
+ regCachePtr->hashValue = ThashValue;
+
+ signal->theData[0] = TdihConnectptr;
+ signal->theData[1] = Ttableref;
+ signal->theData[2] = TdistrHashValue;
+
+ /*-------------------------------------------------------------*/
+ /* FOR EFFICIENCY REASONS WE AVOID THE SIGNAL SENDING HERE AND */
+ /* PROCEED IMMEDIATELY TO DIH. IN MULTI-THREADED VERSIONS WE */
+ /* HAVE TO INSERT A MUTEX ON DIH TO ENSURE PROPER OPERATION. */
+ /* SINCE THIS SIGNAL AND DIVERIFYREQ ARE THE ONLY SIGNALS SENT */
+ /* TO DIH IN TRAFFIC IT SHOULD BE OK (3% OF THE EXECUTION TIME */
+ /* IS SPENT IN DIH AND EVEN LESS IN REPLICATED NDB. */
+ /*-------------------------------------------------------------*/
+ EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal, 3);
+ UintR TerrorIndicator = signal->theData[0];
+ jamEntry();
+ if (TerrorIndicator != 0) {
+ execDIGETNODESREF(signal);
+ return;
+ }
+
+ if(ERROR_INSERTED(8050) && signal->theData[3] != getOwnNodeId())
+ {
+ ndbassert(false);
+ signal->theData[1] = 626;
+ execDIGETNODESREF(signal);
+ return;
+ }
+
+ /****************>>*/
+ /* DIGETNODESCONF >*/
+ /* ***************>*/
+
+ UintR Tdata1 = signal->theData[1];
+ UintR Tdata2 = signal->theData[2];
+ UintR Tdata3 = signal->theData[3];
+ UintR Tdata4 = signal->theData[4];
+ UintR Tdata5 = signal->theData[5];
+ UintR Tdata6 = signal->theData[6];
+
+ regCachePtr->fragmentid = Tdata1;
+ tnodeinfo = Tdata2;
+
+ regTcPtr->tcNodedata[0] = Tdata3;
+ regTcPtr->tcNodedata[1] = Tdata4;
+ regTcPtr->tcNodedata[2] = Tdata5;
+ regTcPtr->tcNodedata[3] = Tdata6;
+
+ Uint8 Toperation = regTcPtr->operation;
+ Uint8 Tdirty = regTcPtr->dirtyOp;
+ tnoOfBackup = tnodeinfo & 3;
+ tnoOfStandby = (tnodeinfo >> 8) & 3;
+
+ regCachePtr->fragmentDistributionKey = (tnodeinfo >> 16) & 255;
+ if (Toperation == ZREAD) {
+ if (Tdirty == 1) {
+ jam();
+ /*-------------------------------------------------------------*/
+ /* A SIMPLE READ CAN SELECT ANY OF THE PRIMARY AND */
+ /* BACKUP NODES TO READ. WE WILL TRY TO SELECT THIS */
+ /* NODE IF POSSIBLE TO AVOID UNNECESSARY COMMUNICATION */
+ /* WITH SIMPLE READS. */
+ /*-------------------------------------------------------------*/
+ arrGuard(tnoOfBackup, 4);
+ UintR Tindex;
+ UintR TownNode = cownNodeid;
+ for (Tindex = 1; Tindex <= tnoOfBackup; Tindex++) {
+ UintR Tnode = regTcPtr->tcNodedata[Tindex];
+ jam();
+ if (Tnode == TownNode) {
+ jam();
+ regTcPtr->tcNodedata[0] = Tnode;
+ }//if
+ }//for
+ if(ERROR_INSERTED(8048) || ERROR_INSERTED(8049))
+ {
+ for (Tindex = 0; Tindex <= tnoOfBackup; Tindex++)
+ {
+ UintR Tnode = regTcPtr->tcNodedata[Tindex];
+ jam();
+ if (Tnode != TownNode) {
+ jam();
+ regTcPtr->tcNodedata[0] = Tnode;
+ ndbout_c("Choosing %d", Tnode);
+ }//if
+ }//for
+ }
+ }//if
+ jam();
+ regTcPtr->lastReplicaNo = 0;
+ regTcPtr->noOfNodes = 1;
+ } else {
+ UintR TlastReplicaNo;
+ jam();
+ TlastReplicaNo = tnoOfBackup + tnoOfStandby;
+ regTcPtr->lastReplicaNo = (Uint8)TlastReplicaNo;
+ regTcPtr->noOfNodes = (Uint8)(TlastReplicaNo + 1);
+ }//if
+ if (regCachePtr->lenAiInTckeyreq == regCachePtr->attrlength) {
+ /****************************************************************>*/
+ /* HERE WE HAVE FOUND THAT THE LAST SIGNAL BELONGING TO THIS */
+ /* OPERATION HAVE BEEN RECEIVED. THIS MEANS THAT WE CAN NOW REUSE */
+ /* THE API CONNECT RECORD. HOWEVER IF PREPARE OR COMMIT HAVE BEEN */
+ /* RECEIVED THEN IT IS NOT ALLOWED TO RECEIVE ANY FURTHER */
+ /* OPERATIONS. WE KNOW THAT WE WILL WAIT FOR DICT NEXT. IT IS NOT */
+ /* POSSIBLE FOR THE TC CONNECTION TO BE READY YET. */
+ /****************************************************************>*/
+ switch (regApiPtr->apiConnectstate) {
+ case CS_RECEIVING:
+ jam();
+ regApiPtr->apiConnectstate = CS_STARTED;
+ break;
+ case CS_REC_COMMITTING:
+ jam();
+ regApiPtr->apiConnectstate = CS_START_COMMITTING;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//switch
+ attrinfoDihReceivedLab(signal);
+ return;
+ } else {
+ if (regCachePtr->lenAiInTckeyreq < regCachePtr->attrlength) {
+ TtcTimer = ctcTimer;
+ jam();
+ setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
+ regTcPtr->tcConnectstate = OS_WAIT_ATTR;
+ return;
+ } else {
+ TCKEY_abort(signal, 11);
+ return;
+ }//if
+ }//if
+ return;
+}//Dbtc::tckeyreq050Lab()
+
+void Dbtc::attrinfoDihReceivedLab(Signal* signal)
+{
+ CacheRecord * const regCachePtr = cachePtr.p;
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ Uint16 Tnode = regTcPtr->tcNodedata[0];
+
+ TableRecordPtr localTabptr;
+ localTabptr.i = regCachePtr->tableref;
+ localTabptr.p = &tableRecord[localTabptr.i];
+
+ if(localTabptr.p->checkTable(regCachePtr->schemaVersion)){
+ ;
+ } else {
+ terrorCode = localTabptr.p->getErrorCode(regCachePtr->schemaVersion);
+ TCKEY_abort(signal, 58);
+ return;
+ }
+ arrGuard(Tnode, MAX_NDB_NODES);
+ packLqhkeyreq(signal, calcLqhBlockRef(Tnode));
+}//Dbtc::attrinfoDihReceivedLab()
+
+void Dbtc::packLqhkeyreq(Signal* signal,
+ BlockReference TBRef)
+{
+ CacheRecord * const regCachePtr = cachePtr.p;
+ UintR Tkeylen = regCachePtr->keylen;
+ UintR TfirstAttrbuf = regCachePtr->firstAttrbuf;
+ sendlqhkeyreq(signal, TBRef);
+ if (Tkeylen > 4) {
+ packKeyData000Lab(signal, TBRef, Tkeylen - 4);
+ releaseKeys();
+ }//if
+ packLqhkeyreq040Lab(signal,
+ TfirstAttrbuf,
+ TBRef);
+}//Dbtc::packLqhkeyreq()
+
+void Dbtc::sendlqhkeyreq(Signal* signal,
+ BlockReference TBRef)
+{
+ UintR tslrAttrLen;
+ UintR Tdata10;
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ CacheRecord * const regCachePtr = cachePtr.p;
+#ifdef ERROR_INSERT
+ if (ERROR_INSERTED(8002)) {
+ systemErrorLab(signal);
+ }//if
+ if (ERROR_INSERTED(8007)) {
+ if (apiConnectptr.p->apiConnectstate == CS_STARTED) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8008)) {
+ if (apiConnectptr.p->apiConnectstate == CS_START_COMMITTING) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8009)) {
+ if (apiConnectptr.p->apiConnectstate == CS_STARTED) {
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8010)) {
+ if (apiConnectptr.p->apiConnectstate == CS_START_COMMITTING) {
+ return;
+ }//if
+ }//if
+#endif
+
+ tslrAttrLen = 0;
+ LqhKeyReq::setAttrLen(tslrAttrLen, regCachePtr->attrlength);
+ /* ---------------------------------------------------------------------- */
+ // Bit16 == 0 since StoredProcedures are not yet supported.
+ /* ---------------------------------------------------------------------- */
+ LqhKeyReq::setDistributionKey(tslrAttrLen, regCachePtr->fragmentDistributionKey);
+ LqhKeyReq::setScanTakeOverFlag(tslrAttrLen, regCachePtr->scanTakeOverInd);
+
+ Tdata10 = 0;
+ LqhKeyReq::setKeyLen(Tdata10, regCachePtr->keylen);
+ LqhKeyReq::setLastReplicaNo(Tdata10, regTcPtr->lastReplicaNo);
+ LqhKeyReq::setLockType(Tdata10, regCachePtr->opLock);
+ /* ---------------------------------------------------------------------- */
+ // Indicate Application Reference is present in bit 15
+ /* ---------------------------------------------------------------------- */
+ LqhKeyReq::setApplicationAddressFlag(Tdata10, 1);
+ LqhKeyReq::setDirtyFlag(Tdata10, regTcPtr->dirtyOp);
+ LqhKeyReq::setInterpretedFlag(Tdata10, regCachePtr->opExec);
+ LqhKeyReq::setSimpleFlag(Tdata10, regCachePtr->opSimple);
+ LqhKeyReq::setOperation(Tdata10, regTcPtr->operation);
+ /* -----------------------------------------------------------------------
+ * Sequential Number of first LQH = 0, bit 22-23
+ * IF ATTRIBUTE INFORMATION IS SENT IN TCKEYREQ,
+ * IT IS ALSO SENT IN LQHKEYREQ
+ * ----------------------------------------------------------------------- */
+ LqhKeyReq::setAIInLqhKeyReq(Tdata10, regCachePtr->lenAiInTckeyreq);
+ /* -----------------------------------------------------------------------
+ * Bit 27 == 0 since TC record is the same as the client record.
+ * Bit 28 == 0 since readLenAi can only be set after reading in LQH.
+ * ----------------------------------------------------------------------- */
+ //LqhKeyReq::setAPIVersion(Tdata10, regCachePtr->apiVersionNo);
+ Uint32 commitAckMarker = regTcPtr->commitAckMarker;
+ if(commitAckMarker != RNIL){
+ jam();
+
+ LqhKeyReq::setMarkerFlag(Tdata10, 1);
+
+ CommitAckMarker * tmp;
+ tmp = m_commitAckMarkerHash.getPtr(commitAckMarker);
+
+ /**
+ * Populate LQH array
+ */
+ const Uint32 noOfLqhs = regTcPtr->noOfNodes;
+ tmp->noOfLqhs = noOfLqhs;
+ for(Uint32 i = 0; i<noOfLqhs; i++){
+ tmp->lqhNodeId[i] = regTcPtr->tcNodedata[i];
+ }
+ }
+
+ /* ************************************************************> */
+ /* NO READ LENGTH SENT FROM TC. SEQUENTIAL NUMBER IS 1 AND IT */
+ /* IS SENT TO A PRIMARY NODE. */
+ /* ************************************************************> */
+ UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6;
+
+ LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)signal->getDataPtrSend();
+
+ sig0 = tcConnectptr.i;
+ sig2 = regCachePtr->hashValue;
+ sig4 = cownref;
+ sig5 = regTcPtr->savePointId;
+
+ lqhKeyReq->clientConnectPtr = sig0;
+ lqhKeyReq->attrLen = tslrAttrLen;
+ lqhKeyReq->hashValue = sig2;
+ lqhKeyReq->requestInfo = Tdata10;
+ lqhKeyReq->tcBlockref = sig4;
+ lqhKeyReq->savePointId = sig5;
+
+ sig0 = regCachePtr->tableref + (regCachePtr->schemaVersion << 16);
+ sig1 = regCachePtr->fragmentid + (regTcPtr->tcNodedata[1] << 16);
+ sig2 = regApiPtr->transid[0];
+ sig3 = regApiPtr->transid[1];
+ sig4 = regApiPtr->ndbapiBlockref;
+ sig5 = regTcPtr->clientData;
+ sig6 = regCachePtr->scanInfo;
+
+ lqhKeyReq->tableSchemaVersion = sig0;
+ lqhKeyReq->fragmentData = sig1;
+ lqhKeyReq->transId1 = sig2;
+ lqhKeyReq->transId2 = sig3;
+ lqhKeyReq->scanInfo = sig6;
+
+ lqhKeyReq->variableData[0] = sig4;
+ lqhKeyReq->variableData[1] = sig5;
+
+ UintR nextPos = 2;
+
+ if (regTcPtr->lastReplicaNo > 1) {
+ sig0 = (UintR)regTcPtr->tcNodedata[2] +
+ (UintR)(regTcPtr->tcNodedata[3] << 16);
+ lqhKeyReq->variableData[nextPos] = sig0;
+ nextPos++;
+ }//if
+
+ sig0 = regCachePtr->keydata[0];
+ sig1 = regCachePtr->keydata[1];
+ sig2 = regCachePtr->keydata[2];
+ sig3 = regCachePtr->keydata[3];
+ UintR Tkeylen = regCachePtr->keylen;
+
+ lqhKeyReq->variableData[nextPos + 0] = sig0;
+ lqhKeyReq->variableData[nextPos + 1] = sig1;
+ lqhKeyReq->variableData[nextPos + 2] = sig2;
+ lqhKeyReq->variableData[nextPos + 3] = sig3;
+
+ if (Tkeylen < 4) {
+ nextPos += Tkeylen;
+ } else {
+ nextPos += 4;
+ }//if
+
+ sig0 = regCachePtr->attrinfo0;
+ sig1 = regCachePtr->attrinfo15[0];
+ sig2 = regCachePtr->attrinfo15[1];
+ sig3 = regCachePtr->attrinfo15[2];
+ sig4 = regCachePtr->attrinfo15[3];
+ UintR TlenAi = regCachePtr->lenAiInTckeyreq;
+
+ lqhKeyReq->variableData[nextPos + 0] = sig0;
+ lqhKeyReq->variableData[nextPos + 1] = sig1;
+ lqhKeyReq->variableData[nextPos + 2] = sig2;
+ lqhKeyReq->variableData[nextPos + 3] = sig3;
+ lqhKeyReq->variableData[nextPos + 4] = sig4;
+
+ nextPos += TlenAi;
+
+ // Reset trigger count
+ regTcPtr->accumulatingTriggerData.i = RNIL;
+ regTcPtr->accumulatingTriggerData.p = NULL;
+ regTcPtr->noFiredTriggers = 0;
+ regTcPtr->triggerExecutionCount = 0;
+
+ sendSignal(TBRef, GSN_LQHKEYREQ, signal,
+ nextPos + LqhKeyReq::FixedSignalLength, JBB);
+}//Dbtc::sendlqhkeyreq()
+
+void Dbtc::packLqhkeyreq040Lab(Signal* signal,
+ UintR anAttrBufIndex,
+ BlockReference TBRef)
+{
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ CacheRecord * const regCachePtr = cachePtr.p;
+#ifdef ERROR_INSERT
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ if (ERROR_INSERTED(8009)) {
+ if (regApiPtr->apiConnectstate == CS_STARTED) {
+ attrbufptr.i = RNIL;
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8010)) {
+ if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
+ attrbufptr.i = RNIL;
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+#endif
+
+ UintR TattrbufFilesize = cattrbufFilesize;
+ AttrbufRecord *localAttrbufRecord = attrbufRecord;
+ while (1) {
+ if (anAttrBufIndex == RNIL) {
+ UintR TtcTimer = ctcTimer;
+ UintR Tread = (regTcPtr->operation == ZREAD);
+ UintR Tsimple = (regCachePtr->opSimple == ZTRUE);
+ UintR Tboth = Tread & Tsimple;
+ setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
+ jam();
+ /*--------------------------------------------------------------------
+ * WE HAVE SENT ALL THE SIGNALS OF THIS OPERATION. SET STATE AND EXIT.
+ *---------------------------------------------------------------------*/
+ releaseAttrinfo();
+ if (Tboth) {
+ jam();
+ releaseSimpleRead(signal, apiConnectptr, tcConnectptr.p);
+ return;
+ }//if
+ regTcPtr->tcConnectstate = OS_OPERATING;
+ return;
+ }//if
+ if (anAttrBufIndex < TattrbufFilesize) {
+ AttrbufRecord * const regAttrPtr = &localAttrbufRecord[anAttrBufIndex];
+ anAttrBufIndex = regAttrPtr->attrbuf[ZINBUF_NEXT];
+ sendAttrinfo(signal,
+ tcConnectptr.i,
+ regAttrPtr,
+ TBRef);
+ } else {
+ TCKEY_abort(signal, 17);
+ return;
+ }//if
+ }//while
+}//Dbtc::packLqhkeyreq040Lab()
+
+/* ========================================================================= */
+/* ------- RELEASE ALL ATTRINFO RECORDS IN AN OPERATION RECORD ------- */
+/* ========================================================================= */
+void Dbtc::releaseAttrinfo()
+{
+ UintR Tmp;
+ AttrbufRecordPtr Tattrbufptr;
+ CacheRecord * const regCachePtr = cachePtr.p;
+ UintR TattrbufFilesize = cattrbufFilesize;
+ UintR TfirstfreeAttrbuf = cfirstfreeAttrbuf;
+ Tattrbufptr.i = regCachePtr->firstAttrbuf;
+ AttrbufRecord *localAttrbufRecord = attrbufRecord;
+
+ while (Tattrbufptr.i < TattrbufFilesize) {
+ Tattrbufptr.p = &localAttrbufRecord[Tattrbufptr.i];
+ Tmp = Tattrbufptr.p->attrbuf[ZINBUF_NEXT];
+ Tattrbufptr.p->attrbuf[ZINBUF_NEXT] = TfirstfreeAttrbuf;
+ TfirstfreeAttrbuf = Tattrbufptr.i;
+ Tattrbufptr.i = Tmp;
+ jam();
+ }//while
+ if (Tattrbufptr.i == RNIL) {
+//---------------------------------------------------
+// Now we will release the cache record at the same
+// time as releasing the attrinfo records.
+//---------------------------------------------------
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ UintR TfirstfreeCacheRec = cfirstfreeCacheRec;
+ UintR TCacheIndex = cachePtr.i;
+ cfirstfreeAttrbuf = TfirstfreeAttrbuf;
+ regCachePtr->nextCacheRec = TfirstfreeCacheRec;
+ cfirstfreeCacheRec = TCacheIndex;
+ regApiPtr->cachePtr = RNIL;
+ return;
+ }//if
+ systemErrorLab(0);
+ return;
+}//Dbtc::releaseAttrinfo()
+
+/* ========================================================================= */
+/* ------- RELEASE ALL RECORDS CONNECTED TO A SIMPLE OPERATION ------- */
+/* ========================================================================= */
+void Dbtc::releaseSimpleRead(Signal* signal,
+ ApiConnectRecordPtr regApiPtr,
+ TcConnectRecord* regTcPtr)
+{
+ Uint32 Ttckeyrec = regApiPtr.p->tckeyrec;
+ Uint32 TclientData = regTcPtr->clientData;
+ Uint32 Tnode = regTcPtr->tcNodedata[0];
+ Uint32 Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec;
+ Uint32 TsimpleReadCount = c_counters.csimpleReadCount;
+ ConnectionState state = regApiPtr.p->apiConnectstate;
+
+ regApiPtr.p->tcSendArray[Ttckeyrec] = TclientData;
+ regApiPtr.p->tcSendArray[Ttckeyrec + 1] = TcKeyConf::SimpleReadBit | Tnode;
+ regApiPtr.p->tckeyrec = Ttckeyrec + 2;
+
+ unlinkReadyTcCon(signal);
+ releaseTcCon();
+
+ /**
+ * No LQHKEYCONF in Simple/Dirty read
+ * Therefore decrese no LQHKEYCONF(REF) we are waiting for
+ */
+ c_counters.csimpleReadCount = TsimpleReadCount + 1;
+ regApiPtr.p->lqhkeyreqrec = --Tlqhkeyreqrec;
+
+ if(Tlqhkeyreqrec == 0)
+ {
+ /**
+ * Special case of lqhKeyConf_checkTransactionState:
+ * - commit with zero operations: handle only for simple read
+ */
+ sendtckeyconf(signal, state == CS_START_COMMITTING);
+ regApiPtr.p->apiConnectstate =
+ (state == CS_START_COMMITTING ? CS_CONNECTED : state);
+ setApiConTimer(regApiPtr.i, 0, __LINE__);
+
+ return;
+ }
+
+ /**
+ * Emulate LQHKEYCONF
+ */
+ lqhKeyConf_checkTransactionState(signal, regApiPtr.p);
+}//Dbtc::releaseSimpleRead()
+
+/* ------------------------------------------------------------------------- */
+/* ------- CHECK IF ALL TC CONNECTIONS ARE COMPLETED ------- */
+/* ------------------------------------------------------------------------- */
+void Dbtc::unlinkReadyTcCon(Signal* signal)
+{
+ TcConnectRecordPtr urtTcConnectptr;
+
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ if (regTcPtr->prevTcConnect != RNIL) {
+ jam();
+ urtTcConnectptr.i = regTcPtr->prevTcConnect;
+ ptrCheckGuard(urtTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ urtTcConnectptr.p->nextTcConnect = regTcPtr->nextTcConnect;
+ } else {
+ jam();
+ regApiPtr->firstTcConnect = regTcPtr->nextTcConnect;
+ }//if
+ if (regTcPtr->nextTcConnect != RNIL) {
+ jam();
+ urtTcConnectptr.i = regTcPtr->nextTcConnect;
+ ptrCheckGuard(urtTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ urtTcConnectptr.p->prevTcConnect = regTcPtr->prevTcConnect;
+ } else {
+ jam();
+ regApiPtr->lastTcConnect = tcConnectptr.p->prevTcConnect;
+ }//if
+}//Dbtc::unlinkReadyTcCon()
+
+void Dbtc::releaseTcCon()
+{
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ UintR TfirstfreeTcConnect = cfirstfreeTcConnect;
+ UintR TconcurrentOp = c_counters.cconcurrentOp;
+ UintR TtcConnectptrIndex = tcConnectptr.i;
+
+ regTcPtr->tcConnectstate = OS_CONNECTED;
+ regTcPtr->nextTcConnect = TfirstfreeTcConnect;
+ regTcPtr->apiConnect = RNIL;
+ regTcPtr->isIndexOp = false;
+ regTcPtr->indexOp = RNIL;
+ cfirstfreeTcConnect = TtcConnectptrIndex;
+ c_counters.cconcurrentOp = TconcurrentOp - 1;
+}//Dbtc::releaseTcCon()
+
+void Dbtc::execPACKED_SIGNAL(Signal* signal)
+{
+ LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+
+ UintR Ti;
+ UintR Tstep = 0;
+ UintR Tlength;
+ UintR TpackedData[28];
+ UintR Tdata1, Tdata2, Tdata3, Tdata4;
+
+ jamEntry();
+ Tlength = signal->length();
+ if (Tlength > 25) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+ Uint32* TpackDataPtr;
+ for (Ti = 0; Ti < Tlength; Ti += 4) {
+ Uint32* TsigDataPtr = &signal->theData[Ti];
+ Tdata1 = TsigDataPtr[0];
+ Tdata2 = TsigDataPtr[1];
+ Tdata3 = TsigDataPtr[2];
+ Tdata4 = TsigDataPtr[3];
+
+ TpackDataPtr = &TpackedData[Ti];
+ TpackDataPtr[0] = Tdata1;
+ TpackDataPtr[1] = Tdata2;
+ TpackDataPtr[2] = Tdata3;
+ TpackDataPtr[3] = Tdata4;
+ }//for
+ while (Tlength > Tstep) {
+
+ TpackDataPtr = &TpackedData[Tstep];
+ Tdata1 = TpackDataPtr[0];
+ Tdata2 = TpackDataPtr[1];
+ Tdata3 = TpackDataPtr[2];
+
+ lqhKeyConf->connectPtr = Tdata1 & 0x0FFFFFFF;
+ lqhKeyConf->opPtr = Tdata2;
+ lqhKeyConf->userRef = Tdata3;
+
+ switch (Tdata1 >> 28) {
+ case ZCOMMITTED:
+ signal->header.theLength = 3;
+ execCOMMITTED(signal);
+ Tstep += 3;
+ break;
+ case ZCOMPLETED:
+ signal->header.theLength = 3;
+ execCOMPLETED(signal);
+ Tstep += 3;
+ break;
+ case ZLQHKEYCONF:
+ jam();
+ Tdata1 = TpackDataPtr[3];
+ Tdata2 = TpackDataPtr[4];
+ Tdata3 = TpackDataPtr[5];
+ Tdata4 = TpackDataPtr[6];
+
+ lqhKeyConf->readLen = Tdata1;
+ lqhKeyConf->transId1 = Tdata2;
+ lqhKeyConf->transId2 = Tdata3;
+ lqhKeyConf->noFiredTriggers = Tdata4;
+ signal->header.theLength = LqhKeyConf::SignalLength;
+ execLQHKEYCONF(signal);
+ Tstep += LqhKeyConf::SignalLength;
+ break;
+ default:
+ systemErrorLab(signal);
+ return;
+ }//switch
+ }//while
+ return;
+}//Dbtc::execPACKED_SIGNAL()
+
+void Dbtc::execLQHKEYCONF(Signal* signal)
+{
+ const LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+ UintR compare_transid1, compare_transid2;
+ BlockReference tlastLqhBlockref;
+ UintR tlastLqhConnect;
+ UintR treadlenAi;
+ UintR TtcConnectptrIndex;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+
+ tlastLqhConnect = lqhKeyConf->connectPtr;
+ TtcConnectptrIndex = lqhKeyConf->opPtr;
+ tlastLqhBlockref = lqhKeyConf->userRef;
+ treadlenAi = lqhKeyConf->readLen;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+
+ /*------------------------------------------------------------------------
+ * NUMBER OF EXTERNAL TRIGGERS FIRED IN DATA[6]
+ * OPERATION IS NOW COMPLETED. CHECK FOR CORRECT OPERATION POINTER
+ * TO ENSURE NO CRASHES BECAUSE OF ERRONEUS NODES. CHECK STATE OF
+ * OPERATION. THEN SET OPERATION STATE AND RETRIEVE ALL POINTERS
+ * OF THIS OPERATION. PUT COMPLETED OPERATION IN LIST OF COMPLETED
+ * OPERATIONS ON THE LQH CONNECT RECORD.
+ *------------------------------------------------------------------------
+ * THIS SIGNAL ALWAYS ARRIVE BEFORE THE ABORTED SIGNAL ARRIVES SINCE IT USES
+ * THE SAME PATH BACK TO TC AS THE ABORTED SIGNAL DO. WE DO HOWEVER HAVE A
+ * PROBLEM WHEN WE ENCOUNTER A TIME-OUT WAITING FOR THE ABORTED SIGNAL.
+ * THEN THIS SIGNAL MIGHT ARRIVE WHEN THE TC CONNECT RECORD HAVE BEEN REUSED
+ * BY OTHER TRANSACTION THUS WE CHECK THE TRANSACTION ID OF THE SIGNAL
+ * BEFORE ACCEPTING THIS SIGNAL.
+ * Due to packing of LQHKEYCONF the ABORTED signal can now arrive before
+ * this.
+ * This is more reason to ignore the signal if not all states are correct.
+ *------------------------------------------------------------------------*/
+ if (TtcConnectptrIndex >= TtcConnectFilesize) {
+ TCKEY_abort(signal, 25);
+ return;
+ }//if
+ TcConnectRecord* const regTcPtr = &localTcConnectRecord[TtcConnectptrIndex];
+ OperationState TtcConnectstate = regTcPtr->tcConnectstate;
+ tcConnectptr.i = TtcConnectptrIndex;
+ tcConnectptr.p = regTcPtr;
+ if (TtcConnectstate != OS_OPERATING) {
+ warningReport(signal, 23);
+ return;
+ }//if
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+ UintR TapiConnectptrIndex = regTcPtr->apiConnect;
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ UintR Ttrans1 = lqhKeyConf->transId1;
+ UintR Ttrans2 = lqhKeyConf->transId2;
+ Uint32 noFired = lqhKeyConf->noFiredTriggers;
+
+ if (TapiConnectptrIndex >= TapiConnectFilesize) {
+ TCKEY_abort(signal, 29);
+ return;
+ }//if
+ ApiConnectRecord * const regApiPtr =
+ &localApiConnectRecord[TapiConnectptrIndex];
+ apiConnectptr.i = TapiConnectptrIndex;
+ apiConnectptr.p = regApiPtr;
+ compare_transid1 = regApiPtr->transid[0] ^ Ttrans1;
+ compare_transid2 = regApiPtr->transid[1] ^ Ttrans2;
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ warningReport(signal, 24);
+ return;
+ }//if
+
+#ifdef ERROR_INSERT
+ if (ERROR_INSERTED(8029)) {
+ systemErrorLab(signal);
+ }//if
+ if (ERROR_INSERTED(8003)) {
+ if (regApiPtr->apiConnectstate == CS_STARTED) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8004)) {
+ if (regApiPtr->apiConnectstate == CS_RECEIVING) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8005)) {
+ if (regApiPtr->apiConnectstate == CS_REC_COMMITTING) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8006)) {
+ if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8023)) {
+ SET_ERROR_INSERT_VALUE(8024);
+ return;
+ }//if
+#endif
+ UintR TtcTimer = ctcTimer;
+ regTcPtr->lastLqhCon = tlastLqhConnect;
+ regTcPtr->lastLqhNodeId = refToNode(tlastLqhBlockref);
+ regTcPtr->noFiredTriggers = noFired;
+
+ UintR Ttckeyrec = (UintR)regApiPtr->tckeyrec;
+ UintR TclientData = regTcPtr->clientData;
+ UintR TdirtyOp = regTcPtr->dirtyOp;
+ ConnectionState TapiConnectstate = regApiPtr->apiConnectstate;
+ if (Ttckeyrec > (ZTCOPCONF_SIZE - 2)) {
+ TCKEY_abort(signal, 30);
+ return;
+ }
+ if (TapiConnectstate == CS_ABORTING) {
+ warningReport(signal, 27);
+ return;
+ }//if
+
+ setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
+
+ if (regTcPtr->isIndexOp) {
+ jam();
+ // This was an internal TCKEYREQ
+ // will be returned unpacked
+ regTcPtr->attrInfoLen = treadlenAi;
+ } else {
+ if (noFired == 0 && regTcPtr->triggeringOperation == RNIL) {
+ jam();
+ /*
+ * Skip counting triggering operations the first round
+ * since they will enter execLQHKEYCONF a second time
+ * Skip counting internally generated TcKeyReq
+ */
+ regApiPtr->tcSendArray[Ttckeyrec] = TclientData;
+ regApiPtr->tcSendArray[Ttckeyrec + 1] = treadlenAi;
+ regApiPtr->tckeyrec = Ttckeyrec + 2;
+ }//if
+ }//if
+ if (TdirtyOp == ZTRUE) {
+ UintR Tlqhkeyreqrec = regApiPtr->lqhkeyreqrec;
+ jam();
+ releaseDirtyWrite(signal);
+ regApiPtr->lqhkeyreqrec = Tlqhkeyreqrec - 1;
+ } else {
+ jam();
+ if (noFired == 0) {
+ jam();
+ // No triggers to execute
+ UintR Tlqhkeyconfrec = regApiPtr->lqhkeyconfrec;
+ regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec + 1;
+ regTcPtr->tcConnectstate = OS_PREPARED;
+ }
+ }//if
+
+ /**
+ * And now decide what to do next
+ */
+ if (regTcPtr->triggeringOperation != RNIL) {
+ jam();
+ // This operation was created by a trigger execting operation
+ // Restart it if we have executed all it's triggers
+ TcConnectRecordPtr opPtr;
+
+ opPtr.i = regTcPtr->triggeringOperation;
+ ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
+ opPtr.p->triggerExecutionCount--;
+ if (opPtr.p->triggerExecutionCount == 0) {
+ /*
+ We have completed current trigger execution
+ Continue triggering operation
+ */
+ jam();
+ continueTriggeringOp(signal, opPtr.p);
+ }
+ } else if (noFired == 0) {
+ // This operation did not fire any triggers, finish operation
+ jam();
+ if (regTcPtr->isIndexOp) {
+ jam();
+ setupIndexOpReturn(regApiPtr, regTcPtr);
+ }
+ lqhKeyConf_checkTransactionState(signal, regApiPtr);
+ } else {
+ // We have fired triggers
+ jam();
+ saveTriggeringOpState(signal, regTcPtr);
+ if (regTcPtr->noReceivedTriggers == noFired) {
+ ApiConnectRecordPtr transPtr;
+
+ // We have received all data
+ jam();
+ transPtr.i = TapiConnectptrIndex;
+ transPtr.p = regApiPtr;
+ executeTriggers(signal, &transPtr);
+ }
+ // else wait for more trigger data
+ }
+}//Dbtc::execLQHKEYCONF()
+
+void Dbtc::setupIndexOpReturn(ApiConnectRecord* regApiPtr,
+ TcConnectRecord* regTcPtr)
+{
+ regApiPtr->indexOpReturn = true;
+ regApiPtr->indexOp = regTcPtr->indexOp;
+ regApiPtr->clientData = regTcPtr->clientData;
+ regApiPtr->attrInfoLen = regTcPtr->attrInfoLen;
+}
+
+/**
+ * lqhKeyConf_checkTransactionState
+ *
+ * This functions checks state variables, and
+ * decides if it should wait for more LQHKEYCONF signals
+ * or if it should start commiting
+ */
+void
+Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
+ ApiConnectRecord * const apiConnectPtrP)
+{
+/*---------------------------------------------------------------*/
+/* IF THE COMMIT FLAG IS SET IN SIGNAL TCKEYREQ THEN DBTC HAS TO */
+/* SEND TCKEYCONF FOR ALL OPERATIONS EXCEPT THE LAST ONE. WHEN */
+/* THE TRANSACTION THEN IS COMMITTED TCKEYCONF IS SENT FOR THE */
+/* WHOLE TRANSACTION */
+/* IF THE COMMIT FLAG IS NOT RECECIVED DBTC WILL SEND TCKEYCONF */
+/* FOR ALL OPERATIONS, AND THEN WAIT FOR THE API TO CONCLUDE THE */
+/* TRANSACTION */
+/*---------------------------------------------------------------*/
+ ConnectionState TapiConnectstate = apiConnectPtrP->apiConnectstate;
+ UintR Tlqhkeyconfrec = apiConnectPtrP->lqhkeyconfrec;
+ UintR Tlqhkeyreqrec = apiConnectPtrP->lqhkeyreqrec;
+ int TnoOfOutStanding = Tlqhkeyreqrec - Tlqhkeyconfrec;
+
+ switch (TapiConnectstate) {
+ case CS_START_COMMITTING:
+ if (TnoOfOutStanding == 0) {
+ jam();
+ diverify010Lab(signal);
+ return;
+ } else if (TnoOfOutStanding > 0) {
+ if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
+ jam();
+ sendtckeyconf(signal, 0);
+ return;
+ } else if (apiConnectPtrP->indexOpReturn) {
+ jam();
+ sendtckeyconf(signal, 0);
+ return;
+ }//if
+ jam();
+ return;
+ } else {
+ TCKEY_abort(signal, 44);
+ return;
+ }//if
+ return;
+ case CS_STARTED:
+ case CS_RECEIVING:
+ if (TnoOfOutStanding == 0) {
+ jam();
+ sendtckeyconf(signal, 2);
+ return;
+ } else {
+ if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
+ jam();
+ sendtckeyconf(signal, 0);
+ return;
+ } else if (apiConnectPtrP->indexOpReturn) {
+ jam();
+ sendtckeyconf(signal, 0);
+ return;
+ }//if
+ jam();
+ }//if
+ return;
+ case CS_REC_COMMITTING:
+ if (TnoOfOutStanding > 0) {
+ if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
+ jam();
+ sendtckeyconf(signal, 0);
+ return;
+ } else if (apiConnectPtrP->indexOpReturn) {
+ jam();
+ sendtckeyconf(signal, 0);
+ return;
+ }//if
+ jam();
+ return;
+ }//if
+ TCKEY_abort(signal, 45);
+ return;
+ case CS_CONNECTED:
+ jam();
+/*---------------------------------------------------------------*/
+/* WE HAVE CONCLUDED THE TRANSACTION SINCE IT WAS ONLY */
+/* CONSISTING OF DIRTY WRITES AND ALL OF THOSE WERE */
+/* COMPLETED. ENSURE TCKEYREC IS ZERO TO PREVENT ERRORS. */
+/*---------------------------------------------------------------*/
+ apiConnectPtrP->tckeyrec = 0;
+ return;
+ default:
+ TCKEY_abort(signal, 46);
+ return;
+ }//switch
+}//Dbtc::lqhKeyConf_checkTransactionState()
+
+void Dbtc::sendtckeyconf(Signal* signal, UintR TcommitFlag)
+{
+ if(ERROR_INSERTED(8049)){
+ CLEAR_ERROR_INSERT_VALUE;
+ signal->theData[0] = TcContinueB::DelayTCKEYCONF;
+ signal->theData[1] = apiConnectptr.i;
+ signal->theData[2] = TcommitFlag;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 3000, 3);
+ return;
+ }
+
+ HostRecordPtr localHostptr;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ const UintR TopWords = (UintR)regApiPtr->tckeyrec;
+ localHostptr.i = refToNode(regApiPtr->ndbapiBlockref);
+ const Uint32 type = getNodeInfo(localHostptr.i).m_type;
+ const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+ const BlockNumber TblockNum = refToBlock(regApiPtr->ndbapiBlockref);
+ const Uint32 Tmarker = (regApiPtr->commitAckMarker == RNIL) ? 0 : 1;
+ ptrAss(localHostptr, hostRecord);
+ UintR TcurrLen = localHostptr.p->noOfWordsTCKEYCONF;
+ UintR confInfo = 0;
+ TcKeyConf::setCommitFlag(confInfo, TcommitFlag == 1);
+ TcKeyConf::setMarkerFlag(confInfo, Tmarker);
+ const UintR TpacketLen = 6 + TopWords;
+ regApiPtr->tckeyrec = 0;
+
+ if (regApiPtr->indexOpReturn) {
+ jam();
+ // Return internally generated TCKEY
+ TcKeyConf * const tcKeyConf = (TcKeyConf *)signal->getDataPtrSend();
+ TcKeyConf::setNoOfOperations(confInfo, 1);
+ tcKeyConf->apiConnectPtr = regApiPtr->indexOp;
+ tcKeyConf->gci = regApiPtr->globalcheckpointid;
+ tcKeyConf->confInfo = confInfo;
+ tcKeyConf->transId1 = regApiPtr->transid[0];
+ tcKeyConf->transId2 = regApiPtr->transid[1];
+ tcKeyConf->operations[0].apiOperationPtr = regApiPtr->clientData;
+ tcKeyConf->operations[0].attrInfoLen = regApiPtr->attrInfoLen;
+ Uint32 sigLen = TcKeyConf::StaticLength + TcKeyConf::OperationLength;
+ EXECUTE_DIRECT(DBTC, GSN_TCKEYCONF, signal, sigLen);
+ regApiPtr->indexOpReturn = false;
+ if (TopWords == 0) {
+ jam();
+ return; // No queued TcKeyConf
+ }//if
+ }//if
+ if(TcommitFlag){
+ jam();
+ regApiPtr->m_exec_flag = 0;
+ }
+ TcKeyConf::setNoOfOperations(confInfo, (TopWords >> 1));
+ if ((TpacketLen > 25) || !is_api){
+ TcKeyConf * const tcKeyConf = (TcKeyConf *)signal->getDataPtrSend();
+
+ jam();
+ tcKeyConf->apiConnectPtr = regApiPtr->ndbapiConnect;
+ tcKeyConf->gci = regApiPtr->globalcheckpointid;;
+ tcKeyConf->confInfo = confInfo;
+ tcKeyConf->transId1 = regApiPtr->transid[0];
+ tcKeyConf->transId2 = regApiPtr->transid[1];
+ copyFromToLen(&regApiPtr->tcSendArray[0],
+ (UintR*)&tcKeyConf->operations,
+ (UintR)ZTCOPCONF_SIZE);
+ sendSignal(regApiPtr->ndbapiBlockref,
+ GSN_TCKEYCONF, signal, (TpacketLen - 1), JBB);
+ return;
+ } else if (((TcurrLen + TpacketLen) > 25) && (TcurrLen > 0)) {
+ jam();
+ sendPackedTCKEYCONF(signal, localHostptr.p, localHostptr.i);
+ TcurrLen = 0;
+ } else {
+ jam();
+ updatePackedList(signal, localHostptr.p, localHostptr.i);
+ }//if
+ // -------------------------------------------------------------------------
+ // The header contains the block reference of receiver plus the real signal
+ // length - 3, since we have the real signal length plus one additional word
+ // for the header we have to do - 4.
+ // -------------------------------------------------------------------------
+ UintR Tpack0 = (TblockNum << 16) + (TpacketLen - 4);
+ UintR Tpack1 = regApiPtr->ndbapiConnect;
+ UintR Tpack2 = regApiPtr->globalcheckpointid;
+ UintR Tpack3 = confInfo;
+ UintR Tpack4 = regApiPtr->transid[0];
+ UintR Tpack5 = regApiPtr->transid[1];
+
+ localHostptr.p->noOfWordsTCKEYCONF = TcurrLen + TpacketLen;
+
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 0] = Tpack0;
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 1] = Tpack1;
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 2] = Tpack2;
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 3] = Tpack3;
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 4] = Tpack4;
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 5] = Tpack5;
+
+ UintR Ti;
+ for (Ti = 6; Ti < TpacketLen; Ti++) {
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + Ti] =
+ regApiPtr->tcSendArray[Ti - 6];
+ }//for
+}//Dbtc::sendtckeyconf()
+
+void Dbtc::copyFromToLen(UintR* sourceBuffer, UintR* destBuffer, UintR Tlen)
+{
+ UintR Tindex = 0;
+ UintR Ti;
+ while (Tlen >= 4) {
+ UintR Tdata0 = sourceBuffer[Tindex + 0];
+ UintR Tdata1 = sourceBuffer[Tindex + 1];
+ UintR Tdata2 = sourceBuffer[Tindex + 2];
+ UintR Tdata3 = sourceBuffer[Tindex + 3];
+ Tlen -= 4;
+ destBuffer[Tindex + 0] = Tdata0;
+ destBuffer[Tindex + 1] = Tdata1;
+ destBuffer[Tindex + 2] = Tdata2;
+ destBuffer[Tindex + 3] = Tdata3;
+ Tindex += 4;
+ }//while
+ for (Ti = 0; Ti < Tlen; Ti++, Tindex++) {
+ destBuffer[Tindex] = sourceBuffer[Tindex];
+ }//for
+}//Dbtc::copyFromToLen()
+
+void Dbtc::execSEND_PACKED(Signal* signal)
+{
+ HostRecordPtr Thostptr;
+ HostRecord *localHostRecord = hostRecord;
+ UintR i;
+ UintR TpackedListIndex = cpackedListIndex;
+ jamEntry();
+ for (i = 0; i < TpackedListIndex; i++) {
+ Thostptr.i = cpackedList[i];
+ ptrAss(Thostptr, localHostRecord);
+ arrGuard(Thostptr.i - 1, MAX_NODES - 1);
+ UintR TnoOfPackedWordsLqh = Thostptr.p->noOfPackedWordsLqh;
+ UintR TnoOfWordsTCKEYCONF = Thostptr.p->noOfWordsTCKEYCONF;
+ UintR TnoOfWordsTCINDXCONF = Thostptr.p->noOfWordsTCINDXCONF;
+ jam();
+ if (TnoOfPackedWordsLqh > 0) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ }//if
+ if (TnoOfWordsTCKEYCONF > 0) {
+ jam();
+ sendPackedTCKEYCONF(signal, Thostptr.p, (Uint32)Thostptr.i);
+ }//if
+ if (TnoOfWordsTCINDXCONF > 0) {
+ jam();
+ sendPackedTCINDXCONF(signal, Thostptr.p, (Uint32)Thostptr.i);
+ }//if
+ Thostptr.p->inPackedList = false;
+ }//for
+ cpackedListIndex = 0;
+ return;
+}//Dbtc::execSEND_PACKED()
+
+void
+Dbtc::updatePackedList(Signal* signal, HostRecord* ahostptr, Uint16 ahostIndex)
+{
+ if (ahostptr->inPackedList == false) {
+ UintR TpackedListIndex = cpackedListIndex;
+ jam();
+ ahostptr->inPackedList = true;
+ cpackedList[TpackedListIndex] = ahostIndex;
+ cpackedListIndex = TpackedListIndex + 1;
+ }//if
+}//Dbtc::updatePackedList()
+
+void Dbtc::sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr)
+{
+ UintR Tj;
+ UintR TnoOfWords = ahostptr->noOfPackedWordsLqh;
+ for (Tj = 0; Tj < TnoOfWords; Tj += 4) {
+ UintR sig0 = ahostptr->packedWordsLqh[Tj + 0];
+ UintR sig1 = ahostptr->packedWordsLqh[Tj + 1];
+ UintR sig2 = ahostptr->packedWordsLqh[Tj + 2];
+ UintR sig3 = ahostptr->packedWordsLqh[Tj + 3];
+ signal->theData[Tj + 0] = sig0;
+ signal->theData[Tj + 1] = sig1;
+ signal->theData[Tj + 2] = sig2;
+ signal->theData[Tj + 3] = sig3;
+ }//for
+ ahostptr->noOfPackedWordsLqh = 0;
+ sendSignal(ahostptr->hostLqhBlockRef,
+ GSN_PACKED_SIGNAL,
+ signal,
+ TnoOfWords,
+ JBB);
+}//Dbtc::sendPackedSignalLqh()
+
+void Dbtc::sendPackedTCKEYCONF(Signal* signal,
+ HostRecord * ahostptr,
+ UintR hostId)
+{
+ UintR Tj;
+ UintR TnoOfWords = ahostptr->noOfWordsTCKEYCONF;
+ BlockReference TBref = numberToRef(API_PACKED, hostId);
+ for (Tj = 0; Tj < ahostptr->noOfWordsTCKEYCONF; Tj += 4) {
+ UintR sig0 = ahostptr->packedWordsTCKEYCONF[Tj + 0];
+ UintR sig1 = ahostptr->packedWordsTCKEYCONF[Tj + 1];
+ UintR sig2 = ahostptr->packedWordsTCKEYCONF[Tj + 2];
+ UintR sig3 = ahostptr->packedWordsTCKEYCONF[Tj + 3];
+ signal->theData[Tj + 0] = sig0;
+ signal->theData[Tj + 1] = sig1;
+ signal->theData[Tj + 2] = sig2;
+ signal->theData[Tj + 3] = sig3;
+ }//for
+ ahostptr->noOfWordsTCKEYCONF = 0;
+ sendSignal(TBref, GSN_TCKEYCONF, signal, TnoOfWords, JBB);
+}//Dbtc::sendPackedTCKEYCONF()
+
+void Dbtc::sendPackedTCINDXCONF(Signal* signal,
+ HostRecord * ahostptr,
+ UintR hostId)
+{
+ UintR Tj;
+ UintR TnoOfWords = ahostptr->noOfWordsTCINDXCONF;
+ BlockReference TBref = numberToRef(API_PACKED, hostId);
+ for (Tj = 0; Tj < ahostptr->noOfWordsTCINDXCONF; Tj += 4) {
+ UintR sig0 = ahostptr->packedWordsTCINDXCONF[Tj + 0];
+ UintR sig1 = ahostptr->packedWordsTCINDXCONF[Tj + 1];
+ UintR sig2 = ahostptr->packedWordsTCINDXCONF[Tj + 2];
+ UintR sig3 = ahostptr->packedWordsTCINDXCONF[Tj + 3];
+ signal->theData[Tj + 0] = sig0;
+ signal->theData[Tj + 1] = sig1;
+ signal->theData[Tj + 2] = sig2;
+ signal->theData[Tj + 3] = sig3;
+ }//for
+ ahostptr->noOfWordsTCINDXCONF = 0;
+ sendSignal(TBref, GSN_TCINDXCONF, signal, TnoOfWords, JBB);
+}//Dbtc::sendPackedTCINDXCONF()
+
+/*
+4.3.11 DIVERIFY
+---------------
+*/
+/*****************************************************************************/
+/* D I V E R I F Y */
+/* */
+/*****************************************************************************/
+void Dbtc::diverify010Lab(Signal* signal)
+{
+ UintR TfirstfreeApiConnectCopy = cfirstfreeApiConnectCopy;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ signal->theData[0] = apiConnectptr.i;
+ if (ERROR_INSERTED(8022)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ if (TfirstfreeApiConnectCopy != RNIL) {
+ seizeApiConnectCopy(signal);
+ regApiPtr->apiConnectstate = CS_PREPARE_TO_COMMIT;
+ /*-----------------------------------------------------------------------
+ * WE COME HERE ONLY IF THE TRANSACTION IS PREPARED ON ALL TC CONNECTIONS.
+ * THUS WE CAN START THE COMMIT PHASE BY SENDING DIVERIFY ON ALL TC
+ * CONNECTIONS AND THEN WHEN ALL DIVERIFYCONF HAVE BEEN RECEIVED THE
+ * COMMIT MESSAGE CAN BE SENT TO ALL INVOLVED PARTS.
+ *-----------------------------------------------------------------------*/
+ EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal, 1);
+ if (signal->theData[2] == 0) {
+ execDIVERIFYCONF(signal);
+ }
+ return;
+ } else {
+ /*-----------------------------------------------------------------------
+ * There were no free copy connections available. We must abort the
+ * transaction since otherwise we will have a problem with the report
+ * to the application.
+ * This should more or less not happen but if it happens we do not want to
+ * crash and we do not want to create code to handle it properly since
+ * it is difficult to test it and will be complex to handle a problem
+ * more or less not occurring.
+ *-----------------------------------------------------------------------*/
+ terrorCode = ZSEIZE_API_COPY_ERROR;
+ abortErrorLab(signal);
+ return;
+ }//if
+}//Dbtc::diverify010Lab()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEIZE_API_CONNECT ------- */
+/* SEIZE CONNECT RECORD FOR A REQUEST */
+/* ------------------------------------------------------------------------- */
+void Dbtc::seizeApiConnectCopy(Signal* signal)
+{
+ ApiConnectRecordPtr locApiConnectptr;
+
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+
+ locApiConnectptr.i = cfirstfreeApiConnectCopy;
+ ptrCheckGuard(locApiConnectptr, TapiConnectFilesize, localApiConnectRecord);
+ cfirstfreeApiConnectCopy = locApiConnectptr.p->nextApiConnect;
+ locApiConnectptr.p->nextApiConnect = RNIL;
+ regApiPtr->apiCopyRecord = locApiConnectptr.i;
+ regApiPtr->triggerPending = false;
+ regApiPtr->isIndexOp = false;
+}//Dbtc::seizeApiConnectCopy()
+
+void Dbtc::execDIVERIFYCONF(Signal* signal)
+{
+ UintR TapiConnectptrIndex = signal->theData[0];
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ UintR Tgci = signal->theData[1];
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+ jamEntry();
+ if (ERROR_INSERTED(8017)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ if (TapiConnectptrIndex >= TapiConnectFilesize) {
+ TCKEY_abort(signal, 31);
+ return;
+ }//if
+ ApiConnectRecord * const regApiPtr =
+ &localApiConnectRecord[TapiConnectptrIndex];
+ ConnectionState TapiConnectstate = regApiPtr->apiConnectstate;
+ UintR TApifailureNr = regApiPtr->failureNr;
+ UintR Tfailure_nr = cfailure_nr;
+ apiConnectptr.i = TapiConnectptrIndex;
+ apiConnectptr.p = regApiPtr;
+ if (TapiConnectstate != CS_PREPARE_TO_COMMIT) {
+ TCKEY_abort(signal, 32);
+ return;
+ }//if
+ /*--------------------------------------------------------------------------
+ * THIS IS THE COMMIT POINT. IF WE ARRIVE HERE THE TRANSACTION IS COMMITTED
+ * UNLESS EVERYTHING CRASHES BEFORE WE HAVE BEEN ABLE TO REPORT THE COMMIT
+ * DECISION. THERE IS NO TURNING BACK FROM THIS DECISION FROM HERE ON.
+ * WE WILL INSERT THE TRANSACTION INTO ITS PROPER QUEUE OF
+ * TRANSACTIONS FOR ITS GLOBAL CHECKPOINT.
+ *-------------------------------------------------------------------------*/
+ if (TApifailureNr != Tfailure_nr) {
+ DIVER_node_fail_handling(signal, Tgci);
+ return;
+ }//if
+ commitGciHandling(signal, Tgci);
+
+ /**************************************************************************
+ * C O M M I T
+ * THE TRANSACTION HAVE NOW BEEN VERIFIED AND NOW THE COMMIT PHASE CAN START
+ **************************************************************************/
+
+ UintR TtcConnectptrIndex = regApiPtr->firstTcConnect;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+
+ regApiPtr->counter = regApiPtr->lqhkeyconfrec;
+ regApiPtr->apiConnectstate = CS_COMMITTING;
+ if (TtcConnectptrIndex >= TtcConnectFilesize) {
+ TCKEY_abort(signal, 33);
+ return;
+ }//if
+ TcConnectRecord* const regTcPtr = &localTcConnectRecord[TtcConnectptrIndex];
+ tcConnectptr.i = TtcConnectptrIndex;
+ tcConnectptr.p = regTcPtr;
+ commit020Lab(signal);
+}//Dbtc::execDIVERIFYCONF()
+
+/*--------------------------------------------------------------------------*/
+/* COMMIT_GCI_HANDLING */
+/* SET UP GLOBAL CHECKPOINT DATA STRUCTURE AT THE COMMIT POINT. */
+/*--------------------------------------------------------------------------*/
+void Dbtc::commitGciHandling(Signal* signal, UintR Tgci)
+{
+ GcpRecordPtr localGcpPointer;
+
+ UintR TgcpFilesize = cgcpFilesize;
+ UintR Tfirstgcp = cfirstgcp;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ GcpRecord *localGcpRecord = gcpRecord;
+
+ regApiPtr->globalcheckpointid = Tgci;
+ if (Tfirstgcp != RNIL) {
+ /* IF THIS GLOBAL CHECKPOINT ALREADY EXISTS */
+ localGcpPointer.i = Tfirstgcp;
+ ptrCheckGuard(localGcpPointer, TgcpFilesize, localGcpRecord);
+ do {
+ if (regApiPtr->globalcheckpointid == localGcpPointer.p->gcpId) {
+ jam();
+ gcpPtr.i = localGcpPointer.i;
+ gcpPtr.p = localGcpPointer.p;
+ linkApiToGcp(signal);
+ return;
+ } else {
+ localGcpPointer.i = localGcpPointer.p->nextGcp;
+ jam();
+ if (localGcpPointer.i != RNIL) {
+ jam();
+ ptrCheckGuard(localGcpPointer, TgcpFilesize, localGcpRecord);
+ continue;
+ }//if
+ }//if
+ seizeGcp(signal);
+ linkApiToGcp(signal);
+ return;
+ } while (1);
+ } else {
+ jam();
+ seizeGcp(signal);
+ linkApiToGcp(signal);
+ }//if
+}//Dbtc::commitGciHandling()
+
+/* --------------------------------------------------------------------------*/
+/* -LINK AN API CONNECT RECORD IN STATE PREPARED INTO THE LIST WITH GLOBAL - */
+/* CHECKPOINTS. WHEN THE TRANSACTION I COMPLETED THE API CONNECT RECORD IS */
+/* LINKED OUT OF THE LIST. */
+/*---------------------------------------------------------------------------*/
+void Dbtc::linkApiToGcp(Signal* signal)
+{
+ ApiConnectRecordPtr localApiConnectptr;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ GcpRecord * const regGcpPtr = gcpPtr.p;
+ UintR TapiConnectptrIndex = apiConnectptr.i;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+ regApiPtr->nextGcpConnect = RNIL;
+ if (regGcpPtr->firstApiConnect == RNIL) {
+ regGcpPtr->firstApiConnect = TapiConnectptrIndex;
+ jam();
+ } else {
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ localApiConnectptr.i = regGcpPtr->lastApiConnect;
+ jam();
+ ptrCheckGuard(localApiConnectptr,
+ TapiConnectFilesize, localApiConnectRecord);
+ localApiConnectptr.p->nextGcpConnect = TapiConnectptrIndex;
+ }//if
+ UintR TlastApiConnect = regGcpPtr->lastApiConnect;
+ regApiPtr->gcpPointer = gcpPtr.i;
+ regApiPtr->prevGcpConnect = TlastApiConnect;
+ regGcpPtr->lastApiConnect = TapiConnectptrIndex;
+}//Dbtc::linkApiToGcp()
+
+void Dbtc::seizeGcp(Signal* signal)
+{
+ GcpRecordPtr tmpGcpPointer;
+ GcpRecordPtr localGcpPointer;
+
+ UintR Tfirstgcp = cfirstgcp;
+ UintR Tglobalcheckpointid = apiConnectptr.p->globalcheckpointid;
+ UintR TgcpFilesize = cgcpFilesize;
+ GcpRecord *localGcpRecord = gcpRecord;
+
+ localGcpPointer.i = cfirstfreeGcp;
+ ptrCheckGuard(localGcpPointer, TgcpFilesize, localGcpRecord);
+ UintR TfirstfreeGcp = localGcpPointer.p->nextGcp;
+ localGcpPointer.p->gcpId = Tglobalcheckpointid;
+ localGcpPointer.p->nextGcp = RNIL;
+ localGcpPointer.p->firstApiConnect = RNIL;
+ localGcpPointer.p->lastApiConnect = RNIL;
+ localGcpPointer.p->gcpNomoretransRec = ZFALSE;
+ cfirstfreeGcp = TfirstfreeGcp;
+
+ if (Tfirstgcp == RNIL) {
+ jam();
+ cfirstgcp = localGcpPointer.i;
+ } else {
+ tmpGcpPointer.i = clastgcp;
+ jam();
+ ptrCheckGuard(tmpGcpPointer, TgcpFilesize, localGcpRecord);
+ tmpGcpPointer.p->nextGcp = localGcpPointer.i;
+ }//if
+ clastgcp = localGcpPointer.i;
+ gcpPtr = localGcpPointer;
+}//Dbtc::seizeGcp()
+
+/*---------------------------------------------------------------------------*/
+// Send COMMIT messages to all LQH operations involved in the transaction.
+/*---------------------------------------------------------------------------*/
+void Dbtc::commit020Lab(Signal* signal)
+{
+ TcConnectRecordPtr localTcConnectptr;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+
+ localTcConnectptr.p = tcConnectptr.p;
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ UintR Tcount = 0;
+ do {
+ /*-----------------------------------------------------------------------
+ * WE ARE NOW READY TO RELEASE ALL OPERATIONS ON THE LQH
+ *-----------------------------------------------------------------------*/
+ /* *********< */
+ /* COMMIT < */
+ /* *********< */
+ localTcConnectptr.i = localTcConnectptr.p->nextTcConnect;
+ localTcConnectptr.p->tcConnectstate = OS_COMMITTING;
+ sendCommitLqh(signal, localTcConnectptr.p);
+
+ if (localTcConnectptr.i != RNIL) {
+ Tcount = Tcount + 1;
+ if (Tcount < 16) {
+ ptrCheckGuard(localTcConnectptr,
+ TtcConnectFilesize, localTcConnectRecord);
+ jam();
+ continue;
+ } else {
+ jam();
+ if (ERROR_INSERTED(8014)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ signal->theData[0] = TcContinueB::ZSEND_COMMIT_LOOP;
+ signal->theData[1] = apiConnectptr.i;
+ signal->theData[2] = localTcConnectptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }//if
+ } else {
+ jam();
+ regApiPtr->apiConnectstate = CS_COMMIT_SENT;
+ return;
+ }//if
+ } while (1);
+}//Dbtc::commit020Lab()
+
+void Dbtc::sendCommitLqh(Signal* signal,
+ TcConnectRecord * const regTcPtr)
+{
+ HostRecordPtr Thostptr;
+ UintR ThostFilesize = chostFilesize;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ Thostptr.i = regTcPtr->lastLqhNodeId;
+ ptrCheckGuard(Thostptr, ThostFilesize, hostRecord);
+ if (Thostptr.p->noOfPackedWordsLqh > 21) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ UintR Tindex = Thostptr.p->noOfPackedWordsLqh;
+ UintR* TDataPtr = &Thostptr.p->packedWordsLqh[Tindex];
+ UintR Tdata1 = regTcPtr->lastLqhCon;
+ UintR Tdata2 = regApiPtr->globalcheckpointid;
+ UintR Tdata3 = regApiPtr->transid[0];
+ UintR Tdata4 = regApiPtr->transid[1];
+
+ TDataPtr[0] = Tdata1 | (ZCOMMIT << 28);
+ TDataPtr[1] = Tdata2;
+ TDataPtr[2] = Tdata3;
+ TDataPtr[3] = Tdata4;
+ Thostptr.p->noOfPackedWordsLqh = Tindex + 4;
+}//Dbtc::sendCommitLqh()
+
+void
+Dbtc::DIVER_node_fail_handling(Signal* signal, UintR Tgci)
+{
+ /*------------------------------------------------------------------------
+ * AT LEAST ONE NODE HAS FAILED DURING THE TRANSACTION. WE NEED TO CHECK IF
+ * THIS IS SO SERIOUS THAT WE NEED TO ABORT THE TRANSACTION. IN BOTH THE
+ * ABORT AND THE COMMIT CASES WE NEED TO SET-UP THE DATA FOR THE
+ * ABORT/COMMIT/COMPLETE HANDLING AS ALSO USED BY TAKE OVER FUNCTIONALITY.
+ *------------------------------------------------------------------------*/
+ tabortInd = ZFALSE;
+ setupFailData(signal);
+ if (tabortInd == ZFALSE) {
+ jam();
+ commitGciHandling(signal, Tgci);
+ toCommitHandlingLab(signal);
+ } else {
+ jam();
+ apiConnectptr.p->returnsignal = RS_TCROLLBACKREP;
+ apiConnectptr.p->returncode = ZNODEFAIL_BEFORE_COMMIT;
+ toAbortHandlingLab(signal);
+ }//if
+ return;
+}//Dbtc::DIVER_node_fail_handling()
+
+
+/* ------------------------------------------------------------------------- */
+/* ------- ENTER COMMITTED ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dbtc::execCOMMITTED(Signal* signal)
+{
+ TcConnectRecordPtr localTcConnectptr;
+ ApiConnectRecordPtr localApiConnectptr;
+
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+#ifdef ERROR_INSERT
+ if (ERROR_INSERTED(8018)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ if (ERROR_INSERTED(8030)) {
+ systemErrorLab(signal);
+ }//if
+ if (ERROR_INSERTED(8025)) {
+ SET_ERROR_INSERT_VALUE(8026);
+ return;
+ }//if
+ if (ERROR_INSERTED(8041)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMMITTED, signal, 2000, 3);
+ return;
+ }//if
+ if (ERROR_INSERTED(8042)) {
+ SET_ERROR_INSERT_VALUE(8046);
+ sendSignalWithDelay(cownref, GSN_COMMITTED, signal, 2000, 4);
+ return;
+ }//if
+#endif
+ localTcConnectptr.i = signal->theData[0];
+ jamEntry();
+ ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ localApiConnectptr.i = localTcConnectptr.p->apiConnect;
+ if (localTcConnectptr.p->tcConnectstate != OS_COMMITTING) {
+ warningReport(signal, 4);
+ return;
+ }//if
+ ptrCheckGuard(localApiConnectptr, TapiConnectFilesize,
+ localApiConnectRecord);
+ UintR Tcounter = localApiConnectptr.p->counter - 1;
+ ConnectionState TapiConnectstate = localApiConnectptr.p->apiConnectstate;
+ UintR Tdata1 = localApiConnectptr.p->transid[0] - signal->theData[1];
+ UintR Tdata2 = localApiConnectptr.p->transid[1] - signal->theData[2];
+ Tdata1 = Tdata1 | Tdata2;
+ bool TcheckCondition =
+ (TapiConnectstate != CS_COMMIT_SENT) || (Tcounter != 0);
+
+ setApiConTimer(localApiConnectptr.i, ctcTimer, __LINE__);
+ localApiConnectptr.p->counter = Tcounter;
+ localTcConnectptr.p->tcConnectstate = OS_COMMITTED;
+ if (Tdata1 != 0) {
+ warningReport(signal, 5);
+ return;
+ }//if
+ if (TcheckCondition) {
+ jam();
+ /*-------------------------------------------------------*/
+ // We have not sent all COMMIT requests yet. We could be
+ // in the state that all sent are COMMITTED but we are
+ // still waiting for a CONTINUEB to send the rest of the
+ // COMMIT requests.
+ /*-------------------------------------------------------*/
+ return;
+ }//if
+ if (ERROR_INSERTED(8020)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ /*-------------------------------------------------------*/
+ /* THE ENTIRE TRANSACTION IS NOW COMMITED */
+ /* NOW WE NEED TO SEND THE RESPONSE TO THE APPLICATION. */
+ /* THE APPLICATION CAN THEN REUSE THE API CONNECTION AND */
+ /* THEREFORE WE NEED TO MOVE THE API CONNECTION TO A */
+ /* NEW API CONNECT RECORD. */
+ /*-------------------------------------------------------*/
+
+ apiConnectptr = localApiConnectptr;
+ sendApiCommit(signal);
+
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ localTcConnectptr.i = regApiPtr->firstTcConnect;
+ UintR Tlqhkeyconfrec = regApiPtr->lqhkeyconfrec;
+ ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ regApiPtr->counter = Tlqhkeyconfrec;
+
+ tcConnectptr = localTcConnectptr;
+ complete010Lab(signal);
+ return;
+
+}//Dbtc::execCOMMITTED()
+
+/*-------------------------------------------------------*/
+/* SEND_API_COMMIT */
+/* SEND COMMIT DECISION TO THE API. */
+/*-------------------------------------------------------*/
+void Dbtc::sendApiCommit(Signal* signal)
+{
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+
+ if (regApiPtr->returnsignal == RS_TCKEYCONF) {
+ sendtckeyconf(signal, 1);
+ } else if (regApiPtr->returnsignal == RS_TC_COMMITCONF) {
+ jam();
+ TcCommitConf * const commitConf = (TcCommitConf *)&signal->theData[0];
+ if(regApiPtr->commitAckMarker == RNIL){
+ jam();
+ commitConf->apiConnectPtr = regApiPtr->ndbapiConnect;
+ } else {
+ jam();
+ commitConf->apiConnectPtr = regApiPtr->ndbapiConnect | 1;
+ }
+ commitConf->transId1 = regApiPtr->transid[0];
+ commitConf->transId2 = regApiPtr->transid[1];
+ commitConf->gci = regApiPtr->globalcheckpointid;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TC_COMMITCONF, signal,
+ TcCommitConf::SignalLength, JBB);
+ } else if (regApiPtr->returnsignal == RS_NO_RETURN) {
+ jam();
+ } else {
+ TCKEY_abort(signal, 37);
+ return;
+ }//if
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ UintR TcommitCount = c_counters.ccommitCount;
+ UintR TapiIndex = apiConnectptr.i;
+ UintR TnewApiIndex = regApiPtr->apiCopyRecord;
+ UintR TapiFailState = regApiPtr->apiFailState;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+ tmpApiConnectptr.p = apiConnectptr.p;
+ tmpApiConnectptr.i = TapiIndex;
+ c_counters.ccommitCount = TcommitCount + 1;
+ apiConnectptr.i = TnewApiIndex;
+ ptrCheckGuard(apiConnectptr, TapiConnectFilesize, localApiConnectRecord);
+ copyApi(signal);
+ if (TapiFailState != ZTRUE) {
+ return;
+ } else {
+ jam();
+ handleApiFailState(signal, tmpApiConnectptr.i);
+ return;
+ }//if
+}//Dbtc::sendApiCommit()
+
+/* ========================================================================= */
+/* ======= COPY_API ======= */
+/* COPY API RECORD ALSO RESET THE OLD API RECORD SO THAT IT */
+/* IS PREPARED TO RECEIVE A NEW TRANSACTION. */
+/*===========================================================================*/
+void Dbtc::copyApi(Signal* signal)
+{
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ ApiConnectRecord * const regTmpApiPtr = tmpApiConnectptr.p;
+
+ UintR TndbapiConnect = regTmpApiPtr->ndbapiConnect;
+ UintR TfirstTcConnect = regTmpApiPtr->firstTcConnect;
+ UintR Ttransid1 = regTmpApiPtr->transid[0];
+ UintR Ttransid2 = regTmpApiPtr->transid[1];
+ UintR Tlqhkeyconfrec = regTmpApiPtr->lqhkeyconfrec;
+ UintR TgcpPointer = regTmpApiPtr->gcpPointer;
+ UintR TgcpFilesize = cgcpFilesize;
+ UintR TcommitAckMarker = regTmpApiPtr->commitAckMarker;
+ GcpRecord *localGcpRecord = gcpRecord;
+
+ regApiPtr->ndbapiBlockref = regTmpApiPtr->ndbapiBlockref;
+ regApiPtr->ndbapiConnect = TndbapiConnect;
+ regApiPtr->firstTcConnect = TfirstTcConnect;
+ regApiPtr->apiConnectstate = CS_COMPLETING;
+ regApiPtr->transid[0] = Ttransid1;
+ regApiPtr->transid[1] = Ttransid2;
+ regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec;
+ regApiPtr->commitAckMarker = TcommitAckMarker;
+
+ gcpPtr.i = TgcpPointer;
+ ptrCheckGuard(gcpPtr, TgcpFilesize, localGcpRecord);
+ unlinkApiConnect(signal);
+ linkApiToGcp(signal);
+ setApiConTimer(tmpApiConnectptr.i, 0, __LINE__);
+ regTmpApiPtr->apiConnectstate = CS_CONNECTED;
+ regTmpApiPtr->commitAckMarker = RNIL;
+ regTmpApiPtr->firstTcConnect = RNIL;
+ regTmpApiPtr->lastTcConnect = RNIL;
+}//Dbtc::copyApi()
+
+void Dbtc::unlinkApiConnect(Signal* signal)
+{
+ ApiConnectRecordPtr localApiConnectptr;
+ ApiConnectRecord * const regTmpApiPtr = tmpApiConnectptr.p;
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ UintR TprevGcpConnect = regTmpApiPtr->prevGcpConnect;
+ UintR TnextGcpConnect = regTmpApiPtr->nextGcpConnect;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+ if (TprevGcpConnect == RNIL) {
+ gcpPtr.p->firstApiConnect = TnextGcpConnect;
+ jam();
+ } else {
+ localApiConnectptr.i = TprevGcpConnect;
+ jam();
+ ptrCheckGuard(localApiConnectptr,
+ TapiConnectFilesize, localApiConnectRecord);
+ localApiConnectptr.p->nextGcpConnect = TnextGcpConnect;
+ }//if
+ if (TnextGcpConnect == RNIL) {
+ gcpPtr.p->lastApiConnect = TprevGcpConnect;
+ jam();
+ } else {
+ localApiConnectptr.i = TnextGcpConnect;
+ jam();
+ ptrCheckGuard(localApiConnectptr,
+ TapiConnectFilesize, localApiConnectRecord);
+ localApiConnectptr.p->prevGcpConnect = TprevGcpConnect;
+ }//if
+}//Dbtc::unlinkApiConnect()
+
+void Dbtc::complete010Lab(Signal* signal)
+{
+ TcConnectRecordPtr localTcConnectptr;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+
+ localTcConnectptr.p = tcConnectptr.p;
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ UintR TapiConnectptrIndex = apiConnectptr.i;
+ UintR Tcount = 0;
+ do {
+ localTcConnectptr.p->apiConnect = TapiConnectptrIndex;
+ localTcConnectptr.p->tcConnectstate = OS_COMPLETING;
+
+ /* ************ */
+ /* COMPLETE < */
+ /* ************ */
+ const Uint32 nextTcConnect = localTcConnectptr.p->nextTcConnect;
+ sendCompleteLqh(signal, localTcConnectptr.p);
+ localTcConnectptr.i = nextTcConnect;
+ if (localTcConnectptr.i != RNIL) {
+ Tcount++;
+ if (Tcount < 16) {
+ ptrCheckGuard(localTcConnectptr,
+ TtcConnectFilesize, localTcConnectRecord);
+ jam();
+ continue;
+ } else {
+ jam();
+ if (ERROR_INSERTED(8013)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ signal->theData[0] = TcContinueB::ZSEND_COMPLETE_LOOP;
+ signal->theData[1] = apiConnectptr.i;
+ signal->theData[2] = localTcConnectptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }//if
+ } else {
+ jam();
+ regApiPtr->apiConnectstate = CS_COMPLETE_SENT;
+ return;
+ }//if
+ } while (1);
+}//Dbtc::complete010Lab()
+
+void Dbtc::sendCompleteLqh(Signal* signal,
+ TcConnectRecord * const regTcPtr)
+{
+ HostRecordPtr Thostptr;
+ UintR ThostFilesize = chostFilesize;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ Thostptr.i = regTcPtr->lastLqhNodeId;
+ ptrCheckGuard(Thostptr, ThostFilesize, hostRecord);
+ if (Thostptr.p->noOfPackedWordsLqh > 22) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+
+ UintR Tindex = Thostptr.p->noOfPackedWordsLqh;
+ UintR* TDataPtr = &Thostptr.p->packedWordsLqh[Tindex];
+ UintR Tdata1 = regTcPtr->lastLqhCon | (ZCOMPLETE << 28);
+ UintR Tdata2 = regApiPtr->transid[0];
+ UintR Tdata3 = regApiPtr->transid[1];
+
+ TDataPtr[0] = Tdata1;
+ TDataPtr[1] = Tdata2;
+ TDataPtr[2] = Tdata3;
+ Thostptr.p->noOfPackedWordsLqh = Tindex + 3;
+}//Dbtc::sendCompleteLqh()
+
+void
+Dbtc::execTC_COMMIT_ACK(Signal* signal){
+ jamEntry();
+
+ CommitAckMarker key;
+ key.transid1 = signal->theData[0];
+ key.transid2 = signal->theData[1];
+
+ CommitAckMarkerPtr removedMarker;
+ m_commitAckMarkerHash.release(removedMarker, key);
+ if (removedMarker.i == RNIL) {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+ sendRemoveMarkers(signal, removedMarker.p);
+}
+
+void
+Dbtc::sendRemoveMarkers(Signal* signal, const CommitAckMarker * marker){
+ jam();
+ const Uint32 noOfLqhs = marker->noOfLqhs;
+ const Uint32 transId1 = marker->transid1;
+ const Uint32 transId2 = marker->transid2;
+
+ for(Uint32 i = 0; i<noOfLqhs; i++){
+ jam();
+ const NodeId nodeId = marker->lqhNodeId[i];
+ sendRemoveMarker(signal, nodeId, transId1, transId2);
+ }
+}
+
+void
+Dbtc::sendRemoveMarker(Signal* signal,
+ NodeId nodeId,
+ Uint32 transid1,
+ Uint32 transid2){
+ /**
+ * Seize host ptr
+ */
+ HostRecordPtr hostPtr;
+ const UintR ThostFilesize = chostFilesize;
+ hostPtr.i = nodeId;
+ ptrCheckGuard(hostPtr, ThostFilesize, hostRecord);
+
+ if (hostPtr.p->noOfPackedWordsLqh > (25 - 3)){
+ jam();
+ sendPackedSignalLqh(signal, hostPtr.p);
+ } else {
+ jam();
+ updatePackedList(signal, hostPtr.p, hostPtr.i);
+ }//if
+
+ UintR numWord = hostPtr.p->noOfPackedWordsLqh;
+ UintR* dataPtr = &hostPtr.p->packedWordsLqh[numWord];
+
+ dataPtr[0] = (ZREMOVE_MARKER << 28);
+ dataPtr[1] = transid1;
+ dataPtr[2] = transid2;
+ hostPtr.p->noOfPackedWordsLqh = numWord + 3;
+}
+
+void Dbtc::execCOMPLETED(Signal* signal)
+{
+ TcConnectRecordPtr localTcConnectptr;
+ ApiConnectRecordPtr localApiConnectptr;
+
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+#ifdef ERROR_INSERT
+ if (ERROR_INSERTED(8031)) {
+ systemErrorLab(signal);
+ }//if
+ if (ERROR_INSERTED(8019)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ if (ERROR_INSERTED(8027)) {
+ SET_ERROR_INSERT_VALUE(8028);
+ return;
+ }//if
+ if (ERROR_INSERTED(8043)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMPLETED, signal, 2000, 3);
+ return;
+ }//if
+ if (ERROR_INSERTED(8044)) {
+ SET_ERROR_INSERT_VALUE(8047);
+ sendSignalWithDelay(cownref, GSN_COMPLETED, signal, 2000, 3);
+ return;
+ }//if
+#endif
+ localTcConnectptr.i = signal->theData[0];
+ jamEntry();
+ ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ bool Tcond1 = (localTcConnectptr.p->tcConnectstate != OS_COMPLETING);
+ localApiConnectptr.i = localTcConnectptr.p->apiConnect;
+ if (Tcond1) {
+ warningReport(signal, 6);
+ return;
+ }//if
+ ptrCheckGuard(localApiConnectptr, TapiConnectFilesize,
+ localApiConnectRecord);
+ UintR Tdata1 = localApiConnectptr.p->transid[0] - signal->theData[1];
+ UintR Tdata2 = localApiConnectptr.p->transid[1] - signal->theData[2];
+ UintR Tcounter = localApiConnectptr.p->counter - 1;
+ ConnectionState TapiConnectstate = localApiConnectptr.p->apiConnectstate;
+ Tdata1 = Tdata1 | Tdata2;
+ bool TcheckCondition =
+ (TapiConnectstate != CS_COMPLETE_SENT) || (Tcounter != 0);
+ if (Tdata1 != 0) {
+ warningReport(signal, 7);
+ return;
+ }//if
+ setApiConTimer(localApiConnectptr.i, ctcTimer, __LINE__);
+ localApiConnectptr.p->counter = Tcounter;
+ localTcConnectptr.p->tcConnectstate = OS_COMPLETED;
+ localTcConnectptr.p->noOfNodes = 0; // == releaseNodes(signal)
+ if (TcheckCondition) {
+ jam();
+ /*-------------------------------------------------------*/
+ // We have not sent all COMPLETE requests yet. We could be
+ // in the state that all sent are COMPLETED but we are
+ // still waiting for a CONTINUEB to send the rest of the
+ // COMPLETE requests.
+ /*-------------------------------------------------------*/
+ return;
+ }//if
+ if (ERROR_INSERTED(8021)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ apiConnectptr = localApiConnectptr;
+ releaseTransResources(signal);
+}//Dbtc::execCOMPLETED()
+
+/*---------------------------------------------------------------------------*/
+/* RELEASE_TRANS_RESOURCES */
+/* RELEASE ALL RESOURCES THAT ARE CONNECTED TO THIS TRANSACTION. */
+/*---------------------------------------------------------------------------*/
+void Dbtc::releaseTransResources(Signal* signal)
+{
+ TcConnectRecordPtr localTcConnectptr;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+
+ localTcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ do {
+ jam();
+ ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ UintR rtrTcConnectptrIndex = localTcConnectptr.p->nextTcConnect;
+ tcConnectptr.i = localTcConnectptr.i;
+ tcConnectptr.p = localTcConnectptr.p;
+ localTcConnectptr.i = rtrTcConnectptrIndex;
+ releaseTcCon();
+ } while (localTcConnectptr.i != RNIL);
+ handleGcp(signal);
+ releaseFiredTriggerData(&apiConnectptr.p->theFiredTriggers);
+ releaseAllSeizedIndexOperations(apiConnectptr.p);
+ releaseApiConCopy(signal);
+}//Dbtc::releaseTransResources()
+
+/* *********************************************************************>> */
+/* MODULE: HANDLE_GCP */
+/* DESCRIPTION: HANDLES GLOBAL CHECKPOINT HANDLING AT THE COMPLETION */
+/* OF THE COMMIT PHASE AND THE ABORT PHASE. WE MUST ENSURE THAT TC */
+/* SENDS GCP_TCFINISHED WHEN ALL TRANSACTIONS BELONGING TO A CERTAIN */
+/* GLOBAL CHECKPOINT HAVE COMPLETED. */
+/* *********************************************************************>> */
+void Dbtc::handleGcp(Signal* signal)
+{
+ GcpRecord *localGcpRecord = gcpRecord;
+ GcpRecordPtr localGcpPtr;
+ UintR TapiConnectptrIndex = apiConnectptr.i;
+ UintR TgcpFilesize = cgcpFilesize;
+ localGcpPtr.i = apiConnectptr.p->gcpPointer;
+ tmpApiConnectptr.i = TapiConnectptrIndex;
+ tmpApiConnectptr.p = apiConnectptr.p;
+ ptrCheckGuard(localGcpPtr, TgcpFilesize, localGcpRecord);
+ gcpPtr.i = localGcpPtr.i;
+ gcpPtr.p = localGcpPtr.p;
+ unlinkApiConnect(signal);
+ if (localGcpPtr.p->firstApiConnect == RNIL) {
+ if (localGcpPtr.p->gcpNomoretransRec == ZTRUE) {
+ jam();
+ tcheckGcpId = localGcpPtr.p->gcpId;
+ gcpTcfinished(signal);
+ unlinkGcp(signal);
+ }//if
+ }//if
+}//Dbtc::handleGcp()
+
+void Dbtc::releaseApiConCopy(Signal* signal)
+{
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ UintR TfirstfreeApiConnectCopyOld = cfirstfreeApiConnectCopy;
+ cfirstfreeApiConnectCopy = apiConnectptr.i;
+ regApiPtr->nextApiConnect = TfirstfreeApiConnectCopyOld;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ regApiPtr->apiConnectstate = CS_RESTART;
+}//Dbtc::releaseApiConCopy()
+
+/* ========================================================================= */
+/* ------- RELEASE ALL RECORDS CONNECTED TO A DIRTY WRITE OPERATION ------- */
+/* ========================================================================= */
+void Dbtc::releaseDirtyWrite(Signal* signal)
+{
+ unlinkReadyTcCon(signal);
+ releaseTcCon();
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
+ if (regApiPtr->firstTcConnect == RNIL) {
+ jam();
+ regApiPtr->apiConnectstate = CS_CONNECTED;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ sendtckeyconf(signal, 1);
+ }//if
+ }//if
+}//Dbtc::releaseDirtyWrite()
+
+/*****************************************************************************
+ * L Q H K E Y R E F
+ * WHEN LQHKEYREF IS RECEIVED DBTC WILL CHECK IF COMMIT FLAG WAS SENT FROM THE
+ * APPLICATION. IF SO, THE WHOLE TRANSACTION WILL BE ROLLED BACK AND SIGNAL
+ * TCROLLBACKREP WILL BE SENT TO THE API.
+ *
+ * OTHERWISE TC WILL CHECK THE ERRORCODE. IF THE ERRORCODE IS INDICATING THAT
+ * THE "ROW IS NOT FOUND" FOR UPDATE/READ/DELETE OPERATIONS AND "ROW ALREADY
+ * EXISTS" FOR INSERT OPERATIONS, DBTC WILL RELEASE THE OPERATION AND THEN
+ * SEND RETURN SIGNAL TCKEYREF TO THE USER. THE USER THEN HAVE TO SEND
+ * SIGNAL TC_COMMITREQ OR TC_ROLLBACKREQ TO CONCLUDE THE TRANSACTION.
+ * IF ANY TCKEYREQ WITH COMMIT IS RECEIVED AND API_CONNECTSTATE EQUALS
+ * "REC_LQHREFUSE",
+ * THE OPERATION WILL BE TREATED AS AN OPERATION WITHOUT COMMIT. WHEN ANY
+ * OTHER FAULTCODE IS RECEIVED THE WHOLE TRANSACTION MUST BE ROLLED BACK
+ *****************************************************************************/
+void Dbtc::execLQHKEYREF(Signal* signal)
+{
+ const LqhKeyRef * const lqhKeyRef = (LqhKeyRef *)signal->getDataPtr();
+ jamEntry();
+
+ UintR compare_transid1, compare_transid2;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ /*-------------------------------------------------------------------------
+ *
+ * RELEASE NODE BUFFER(S) TO INDICATE THAT THIS OPERATION HAVE NO
+ * TRANSACTION PARTS ACTIVE ANYMORE.
+ * LQHKEYREF HAVE CLEARED ALL PARTS ON ITS PATH BACK TO TC.
+ *-------------------------------------------------------------------------*/
+ if (lqhKeyRef->connectPtr < TtcConnectFilesize) {
+ /*-----------------------------------------------------------------------
+ * WE HAVE TO CHECK THAT THE TRANSACTION IS STILL VALID. FIRST WE CHECK
+ * THAT THE LQH IS STILL CONNECTED TO A TC, IF THIS HOLDS TRUE THEN THE
+ * TC MUST BE CONNECTED TO AN API CONNECT RECORD.
+ * WE MUST ENSURE THAT THE TRANSACTION ID OF THIS API CONNECT
+ * RECORD IS STILL THE SAME AS THE ONE LQHKEYREF REFERS TO.
+ * IF NOT SIMPLY EXIT AND FORGET THE SIGNAL SINCE THE TRANSACTION IS
+ * ALREADY COMPLETED (ABORTED).
+ *-----------------------------------------------------------------------*/
+ tcConnectptr.i = lqhKeyRef->connectPtr;
+ Uint32 errCode = terrorCode = lqhKeyRef->errorCode;
+ ptrAss(tcConnectptr, tcConnectRecord);
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->tcConnectstate == OS_OPERATING) {
+ apiConnectptr.i = regTcPtr->apiConnect;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ compare_transid1 = regApiPtr->transid[0] ^ lqhKeyRef->transId1;
+ compare_transid2 = regApiPtr->transid[1] ^ lqhKeyRef->transId2;
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ warningReport(signal, 25);
+ return;
+ }//if
+
+ const ConnectionState state = regApiPtr->apiConnectstate;
+ const Uint32 triggeringOp = regTcPtr->triggeringOperation;
+ if (triggeringOp != RNIL) {
+ jam();
+ // This operation was created by a trigger execting operation
+ TcConnectRecordPtr opPtr;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+
+ const Uint32 currentIndexId = regTcPtr->currentIndexId;
+ ndbassert(currentIndexId != 0); // Only index triggers so far
+
+ opPtr.i = triggeringOp;
+ ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
+
+ // The operation executed an index trigger
+ const Uint32 opType = regTcPtr->operation;
+ if (errCode == ZALREADYEXIST)
+ errCode = terrorCode = ZNOTUNIQUE;
+ else if (!(opType == ZDELETE && errCode == ZNOT_FOUND)) {
+ jam();
+ /**
+ * "Normal path"
+ */
+ // fall-through
+ } else {
+ jam();
+ /** ZDELETE && NOT_FOUND */
+ TcIndexData* indexData = c_theIndexes.getPtr(currentIndexId);
+ if(indexData->indexState == IS_BUILDING && state != CS_ABORTING){
+ jam();
+ /**
+ * Ignore error
+ */
+ regApiPtr->lqhkeyconfrec++;
+
+ unlinkReadyTcCon(signal);
+ releaseTcCon();
+
+ opPtr.p->triggerExecutionCount--;
+ if (opPtr.p->triggerExecutionCount == 0) {
+ /**
+ * We have completed current trigger execution
+ * Continue triggering operation
+ */
+ jam();
+ continueTriggeringOp(signal, opPtr.p);
+ }
+ return;
+ }
+ }
+ }
+
+ Uint32 marker = regTcPtr->commitAckMarker;
+ markOperationAborted(regApiPtr, regTcPtr);
+
+ if(regApiPtr->apiConnectstate == CS_ABORTING){
+ /**
+ * We're already aborting' so don't send an "extra" TCKEYREF
+ */
+ jam();
+ return;
+ }
+
+ const Uint32 abort = regTcPtr->m_execAbortOption;
+ if (abort == TcKeyReq::AbortOnError || triggeringOp != RNIL) {
+ /**
+ * No error is allowed on this operation
+ */
+ TCKEY_abort(signal, 49);
+ return;
+ }//if
+
+ if (marker != RNIL){
+ /**
+ * This was an insert/update/delete/write which failed
+ * that contained the marker
+ * Currently unsupported to place new marker
+ */
+ TCKEY_abort(signal, 49);
+ return;
+ }
+
+ /* *************** */
+ /* TCKEYREF < */
+ /* *************** */
+ TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend();
+ tcKeyRef->transId[0] = regApiPtr->transid[0];
+ tcKeyRef->transId[1] = regApiPtr->transid[1];
+ tcKeyRef->errorCode = terrorCode;
+ bool isIndexOp = regTcPtr->isIndexOp;
+ Uint32 indexOp = tcConnectptr.p->indexOp;
+ Uint32 clientData = regTcPtr->clientData;
+ unlinkReadyTcCon(signal); /* LINK TC CONNECT RECORD OUT OF */
+ releaseTcCon(); /* RELEASE THE TC CONNECT RECORD */
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ if (isIndexOp) {
+ jam();
+ regApiPtr->lqhkeyreqrec--; // Compensate for extra during read
+ tcKeyRef->connectPtr = indexOp;
+ EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength);
+ apiConnectptr.i = regTcPtr->apiConnect;
+ apiConnectptr.p = regApiPtr;
+ } else {
+ jam();
+ tcKeyRef->connectPtr = clientData;
+ sendSignal(regApiPtr->ndbapiBlockref,
+ GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB);
+ }//if
+
+ /*---------------------------------------------------------------------
+ * SINCE WE ARE NOT ABORTING WE NEED TO UPDATE THE COUNT OF HOW MANY
+ * LQHKEYREQ THAT HAVE RETURNED.
+ * IF NO MORE OUTSTANDING LQHKEYREQ'S THEN WE NEED TO
+ * TCKEYCONF (IF THERE IS ANYTHING TO SEND).
+ *---------------------------------------------------------------------*/
+ regApiPtr->lqhkeyreqrec--;
+ if (regApiPtr->lqhkeyconfrec == regApiPtr->lqhkeyreqrec) {
+ if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
+ if(regApiPtr->lqhkeyconfrec) {
+ jam();
+ diverify010Lab(signal);
+ } else {
+ jam();
+ sendtckeyconf(signal, 1);
+ regApiPtr->apiConnectstate = CS_CONNECTED;
+ }
+ return;
+ } else if (regApiPtr->tckeyrec > 0 || regApiPtr->m_exec_flag) {
+ jam();
+ sendtckeyconf(signal, 2);
+ return;
+ }
+ }//if
+ return;
+
+ } else {
+ warningReport(signal, 26);
+ }//if
+ } else {
+ errorReport(signal, 6);
+ }//if
+ return;
+}//Dbtc::execLQHKEYREF()
+
+void Dbtc::clearCommitAckMarker(ApiConnectRecord * const regApiPtr,
+ TcConnectRecord * const regTcPtr)
+{
+ const Uint32 commitAckMarker = regTcPtr->commitAckMarker;
+ if (regApiPtr->commitAckMarker == RNIL)
+ ndbassert(commitAckMarker == RNIL);
+ if (commitAckMarker != RNIL)
+ ndbassert(regApiPtr->commitAckMarker != RNIL);
+ if(commitAckMarker != RNIL){
+ jam();
+ m_commitAckMarkerHash.release(commitAckMarker);
+ regTcPtr->commitAckMarker = RNIL;
+ regApiPtr->commitAckMarker = RNIL;
+ }
+}
+
+void Dbtc::markOperationAborted(ApiConnectRecord * const regApiPtr,
+ TcConnectRecord * const regTcPtr)
+{
+ /*------------------------------------------------------------------------
+ * RELEASE NODES TO INDICATE THAT THE OPERATION IS ALREADY ABORTED IN THE
+ * LQH'S ALSO SET STATE TO ABORTING TO INDICATE THE ABORT IS
+ * ALREADY COMPLETED.
+ *------------------------------------------------------------------------*/
+ regTcPtr->noOfNodes = 0; // == releaseNodes(signal)
+ regTcPtr->tcConnectstate = OS_ABORTING;
+ clearCommitAckMarker(regApiPtr, regTcPtr);
+}
+
+/*--------------------------------------*/
+/* EXIT AND WAIT FOR SIGNAL TCOMMITREQ */
+/* OR TCROLLBACKREQ FROM THE USER TO */
+/* CONTINUE THE TRANSACTION */
+/*--------------------------------------*/
+void Dbtc::execTC_COMMITREQ(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+
+ jamEntry();
+ apiConnectptr.i = signal->theData[0];
+ if (apiConnectptr.i < capiConnectFilesize) {
+ ptrAss(apiConnectptr, apiConnectRecord);
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ jam();
+ return;
+ }//if
+
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+
+ const Uint32 apiConnectPtr = regApiPtr->ndbapiConnect;
+ const Uint32 apiBlockRef = regApiPtr->ndbapiBlockref;
+ const Uint32 transId1 = regApiPtr->transid[0];
+ const Uint32 transId2 = regApiPtr->transid[1];
+ Uint32 errorCode = 0;
+
+ regApiPtr->m_exec_flag = 1;
+ switch (regApiPtr->apiConnectstate) {
+ case CS_STARTED:
+ tcConnectptr.i = regApiPtr->firstTcConnect;
+ if (tcConnectptr.i != RNIL) {
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ if (regApiPtr->lqhkeyconfrec == regApiPtr->lqhkeyreqrec) {
+ jam();
+ /*******************************************************************/
+ // The proper case where the application is waiting for commit or
+ // abort order.
+ // Start the commit order.
+ /*******************************************************************/
+ regApiPtr->returnsignal = RS_TC_COMMITCONF;
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ diverify010Lab(signal);
+ return;
+ } else {
+ jam();
+ /*******************************************************************/
+ // The transaction is started but not all operations are completed.
+ // It is not possible to commit the transaction in this state.
+ // We will abort it instead.
+ /*******************************************************************/
+ regApiPtr->returnsignal = RS_NO_RETURN;
+ errorCode = ZTRANS_STATUS_ERROR;
+ abort010Lab(signal);
+ }//if
+ } else {
+ jam();
+ /**
+ * No operations, accept commit
+ */
+ TcCommitConf * const commitConf = (TcCommitConf *)&signal->theData[0];
+ commitConf->apiConnectPtr = apiConnectPtr;
+ commitConf->transId1 = transId1;
+ commitConf->transId2 = transId2;
+ commitConf->gci = 0;
+ sendSignal(apiBlockRef, GSN_TC_COMMITCONF, signal,
+ TcCommitConf::SignalLength, JBB);
+
+ regApiPtr->returnsignal = RS_NO_RETURN;
+ releaseAbortResources(signal);
+ return;
+ }//if
+ break;
+ case CS_RECEIVING:
+ jam();
+ /***********************************************************************/
+ // A transaction is still receiving data. We cannot commit an unfinished
+ // transaction. We will abort it instead.
+ /***********************************************************************/
+ regApiPtr->returnsignal = RS_NO_RETURN;
+ errorCode = ZPREPAREINPROGRESS;
+ abort010Lab(signal);
+ break;
+
+ case CS_START_COMMITTING:
+ case CS_COMMITTING:
+ case CS_COMMIT_SENT:
+ case CS_COMPLETING:
+ case CS_COMPLETE_SENT:
+ case CS_REC_COMMITTING:
+ case CS_PREPARE_TO_COMMIT:
+ jam();
+ /***********************************************************************/
+ // The transaction is already performing a commit but it is not concluded
+ // yet.
+ /***********************************************************************/
+ errorCode = ZCOMMITINPROGRESS;
+ break;
+ case CS_ABORTING:
+ jam();
+ errorCode = ZABORTINPROGRESS;
+ break;
+ case CS_START_SCAN:
+ jam();
+ /***********************************************************************/
+ // The transaction is a scan. Scans cannot commit
+ /***********************************************************************/
+ errorCode = ZSCANINPROGRESS;
+ break;
+ case CS_PREPARED:
+ jam();
+ return;
+ case CS_START_PREPARING:
+ jam();
+ return;
+ case CS_REC_PREPARING:
+ jam();
+ return;
+ break;
+ default:
+ warningHandlerLab(signal);
+ return;
+ }//switch
+ TcCommitRef * const commitRef = (TcCommitRef*)&signal->theData[0];
+ commitRef->apiConnectPtr = apiConnectPtr;
+ commitRef->transId1 = transId1;
+ commitRef->transId2 = transId2;
+ commitRef->errorCode = errorCode;
+ sendSignal(apiBlockRef, GSN_TC_COMMITREF, signal,
+ TcCommitRef::SignalLength, JBB);
+ return;
+ } else /** apiConnectptr.i < capiConnectFilesize */ {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }
+}//Dbtc::execTC_COMMITREQ()
+
+void Dbtc::execTCROLLBACKREQ(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+
+ jamEntry();
+ apiConnectptr.i = signal->theData[0];
+ if (apiConnectptr.i >= capiConnectFilesize) {
+ goto TC_ROLL_warning;
+ }//if
+ ptrAss(apiConnectptr, apiConnectRecord);
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ jam();
+ return;
+ }//if
+
+ apiConnectptr.p->m_exec_flag = 1;
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_STARTED:
+ case CS_RECEIVING:
+ jam();
+ apiConnectptr.p->returnsignal = RS_TCROLLBACKCONF;
+ abort010Lab(signal);
+ return;
+ case CS_CONNECTED:
+ jam();
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKCONF,
+ signal, 3, JBB);
+ break;
+ case CS_START_SCAN:
+ case CS_PREPARE_TO_COMMIT:
+ case CS_COMMITTING:
+ case CS_COMMIT_SENT:
+ case CS_COMPLETING:
+ case CS_COMPLETE_SENT:
+ case CS_WAIT_COMMIT_CONF:
+ case CS_WAIT_COMPLETE_CONF:
+ case CS_RESTART:
+ case CS_DISCONNECTED:
+ case CS_START_COMMITTING:
+ case CS_REC_COMMITTING:
+ jam();
+ /* ***************< */
+ /* TC_ROLLBACKREF < */
+ /* ***************< */
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ signal->theData[3] = ZROLLBACKNOTALLOWED;
+ signal->theData[4] = apiConnectptr.p->apiConnectstate;
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREF,
+ signal, 5, JBB);
+ break;
+ /* SEND A REFUSAL SIGNAL*/
+ case CS_ABORTING:
+ jam();
+ if (apiConnectptr.p->abortState == AS_IDLE) {
+ jam();
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKCONF,
+ signal, 3, JBB);
+ } else {
+ jam();
+ apiConnectptr.p->returnsignal = RS_TCROLLBACKCONF;
+ }//if
+ break;
+ case CS_WAIT_ABORT_CONF:
+ jam();
+ apiConnectptr.p->returnsignal = RS_TCROLLBACKCONF;
+ break;
+ case CS_START_PREPARING:
+ jam();
+ case CS_PREPARED:
+ jam();
+ case CS_REC_PREPARING:
+ jam();
+ default:
+ goto TC_ROLL_system_error;
+ break;
+ }//switch
+ return;
+
+TC_ROLL_warning:
+ jam();
+ warningHandlerLab(signal);
+ return;
+
+TC_ROLL_system_error:
+ jam();
+ systemErrorLab(signal);
+ return;
+}//Dbtc::execTCROLLBACKREQ()
+
+void Dbtc::execTC_HBREP(Signal* signal)
+{
+ const TcHbRep * const tcHbRep =
+ (TcHbRep *)signal->getDataPtr();
+
+ jamEntry();
+ apiConnectptr.i = tcHbRep->apiConnectPtr;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+
+ if (apiConnectptr.p->transid[0] == tcHbRep->transId1 &&
+ apiConnectptr.p->transid[1] == tcHbRep->transId2){
+
+ if (getApiConTimer(apiConnectptr.i) != 0){
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ } else {
+ DEBUG("TCHBREP received when timer was off apiConnectptr.i="
+ << apiConnectptr.i);
+ }
+ }
+}//Dbtc::execTCHBREP()
+
+/*
+4.3.15 ABORT
+-----------
+*/
+/*****************************************************************************/
+/* A B O R T */
+/* */
+/*****************************************************************************/
+void Dbtc::warningReport(Signal* signal, int place)
+{
+ switch (place) {
+ case 0:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "ABORTED to not active TC record" << endl;
+#endif
+ break;
+ case 1:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "ABORTED to TC record active with new transaction" << endl;
+#endif
+ break;
+ case 2:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "ABORTED to active TC record not expecting ABORTED" << endl;
+#endif
+ break;
+ case 3:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "ABORTED to TC rec active with trans but wrong node" << endl;
+ ndbout << "This is ok when aborting in node failure situations" << endl;
+#endif
+ break;
+ case 4:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMMITTED in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 5:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMMITTED with wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 6:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMPLETED in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 7:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMPLETED with wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 8:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMMITCONF with tc-rec in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 9:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMMITCONF with api-rec in wrong state in Dbtc" <<endl;
+#endif
+ break;
+ case 10:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMMITCONF with wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 11:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMMITCONF from wrong nodeid in Dbtc" << endl;
+#endif
+ break;
+ case 12:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMPLETECONF, tc-rec in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 13:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMPLETECONF, api-rec in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 14:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMPLETECONF with wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 15:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMPLETECONF from wrong nodeid in Dbtc" << endl;
+#endif
+ break;
+ case 16:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received ABORTCONF, tc-rec in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 17:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received ABORTCONF, api-rec in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 18:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received ABORTCONF with wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 19:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received ABORTCONF from wrong nodeid in Dbtc" << endl;
+#endif
+ break;
+ case 20:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Time-out waiting for ABORTCONF in Dbtc" << endl;
+#endif
+ break;
+ case 21:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Time-out waiting for COMMITCONF in Dbtc" << endl;
+#endif
+ break;
+ case 22:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Time-out waiting for COMPLETECONF in Dbtc" << endl;
+#endif
+ break;
+ case 23:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received LQHKEYCONF in wrong tc-state in Dbtc" << endl;
+#endif
+ break;
+ case 24:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received LQHKEYREF to wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 25:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received LQHKEYREF in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 26:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received LQHKEYCONF to wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 27:
+ jam();
+ // printState(signal, 27);
+#ifdef ABORT_TRACE
+ ndbout << "Received LQHKEYCONF in wrong api-state in Dbtc" << endl;
+#endif
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ return;
+}//Dbtc::warningReport()
+
+void Dbtc::errorReport(Signal* signal, int place)
+{
+ switch (place) {
+ case 0:
+ jam();
+ break;
+ case 1:
+ jam();
+ break;
+ case 2:
+ jam();
+ break;
+ case 3:
+ jam();
+ break;
+ case 4:
+ jam();
+ break;
+ case 5:
+ jam();
+ break;
+ case 6:
+ jam();
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ systemErrorLab(signal);
+ return;
+}//Dbtc::errorReport()
+
+/* ------------------------------------------------------------------------- */
+/* ------- ENTER ABORTED ------- */
+/* */
+/*-------------------------------------------------------------------------- */
+void Dbtc::execABORTED(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ UintR Tnodeid = signal->theData[3];
+ UintR TlastLqhInd = signal->theData[4];
+
+ if (ERROR_INSERTED(8040)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_ABORTED, signal, 2000, 5);
+ return;
+ }//if
+ /*------------------------------------------------------------------------
+ * ONE PARTICIPANT IN THE TRANSACTION HAS REPORTED THAT IT IS ABORTED.
+ *------------------------------------------------------------------------*/
+ if (tcConnectptr.i >= ctcConnectFilesize) {
+ errorReport(signal, 0);
+ return;
+ }//if
+ /*-------------------------------------------------------------------------
+ * WE HAVE TO CHECK THAT THIS IS NOT AN OLD SIGNAL BELONGING TO A
+ * TRANSACTION ALREADY ABORTED. THIS CAN HAPPEN WHEN TIME-OUT OCCURS
+ * IN TC WAITING FOR ABORTED.
+ *-------------------------------------------------------------------------*/
+ ptrAss(tcConnectptr, tcConnectRecord);
+ if (tcConnectptr.p->tcConnectstate != OS_ABORT_SENT) {
+ warningReport(signal, 2);
+ return;
+ /*-----------------------------------------------------------------------*/
+ // ABORTED reported on an operation not expecting ABORT.
+ /*-----------------------------------------------------------------------*/
+ }//if
+ apiConnectptr.i = tcConnectptr.p->apiConnect;
+ if (apiConnectptr.i >= capiConnectFilesize) {
+ warningReport(signal, 0);
+ return;
+ }//if
+ ptrAss(apiConnectptr, apiConnectRecord);
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ warningReport(signal, 1);
+ return;
+ }//if
+ if (ERROR_INSERTED(8024)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+
+ /**
+ * Release marker
+ */
+ clearCommitAckMarker(apiConnectptr.p, tcConnectptr.p);
+
+ Uint32 i;
+ Uint32 Tfound = 0;
+ for (i = 0; i < tcConnectptr.p->noOfNodes; i++) {
+ jam();
+ if (tcConnectptr.p->tcNodedata[i] == Tnodeid) {
+ /*---------------------------------------------------------------------
+ * We have received ABORTED from one of the participants in this
+ * operation in this aborted transaction.
+ * Record all nodes that have completed abort.
+ * If last indicator is set it means that no more replica has
+ * heard of the operation and are thus also aborted.
+ *---------------------------------------------------------------------*/
+ jam();
+ Tfound = 1;
+ clearTcNodeData(signal, TlastLqhInd, i);
+ }//if
+ }//for
+ if (Tfound == 0) {
+ warningReport(signal, 3);
+ return;
+ }
+ for (i = 0; i < tcConnectptr.p->noOfNodes; i++) {
+ if (tcConnectptr.p->tcNodedata[i] != 0) {
+ /*--------------------------------------------------------------------
+ * There are still outstanding ABORTED's to wait for.
+ *--------------------------------------------------------------------*/
+ jam();
+ return;
+ }//if
+ }//for
+ tcConnectptr.p->noOfNodes = 0;
+ tcConnectptr.p->tcConnectstate = OS_ABORTING;
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ apiConnectptr.p->counter--;
+ if (apiConnectptr.p->counter > 0) {
+ jam();
+ /*----------------------------------------------------------------------
+ * WE ARE STILL WAITING FOR MORE PARTICIPANTS TO SEND ABORTED.
+ *----------------------------------------------------------------------*/
+ return;
+ }//if
+ /*------------------------------------------------------------------------*/
+ /* */
+ /* WE HAVE NOW COMPLETED THE ABORT PROCESS. WE HAVE RECEIVED ABORTED */
+ /* FROM ALL PARTICIPANTS IN THE TRANSACTION. WE CAN NOW RELEASE ALL */
+ /* RESOURCES CONNECTED TO THE TRANSACTION AND SEND THE ABORT RESPONSE */
+ /*------------------------------------------------------------------------*/
+ releaseAbortResources(signal);
+}//Dbtc::execABORTED()
+
+void Dbtc::clearTcNodeData(Signal* signal,
+ UintR TLastLqhIndicator,
+ UintR Tstart)
+{
+ UintR Ti;
+ if (TLastLqhIndicator == ZTRUE) {
+ for (Ti = Tstart ; Ti < tcConnectptr.p->noOfNodes; Ti++) {
+ jam();
+ tcConnectptr.p->tcNodedata[Ti] = 0;
+ }//for
+ } else {
+ jam();
+ tcConnectptr.p->tcNodedata[Tstart] = 0;
+ }//for
+}//clearTcNodeData()
+
+void Dbtc::abortErrorLab(Signal* signal)
+{
+ ptrGuard(apiConnectptr);
+ ApiConnectRecord * transP = apiConnectptr.p;
+ if (transP->apiConnectstate == CS_ABORTING && transP->abortState != AS_IDLE){
+ jam();
+ return;
+ }
+ transP->returnsignal = RS_TCROLLBACKREP;
+ if(transP->returncode == 0){
+ jam();
+ transP->returncode = terrorCode;
+ }
+ abort010Lab(signal);
+}//Dbtc::abortErrorLab()
+
+void Dbtc::abort010Lab(Signal* signal)
+{
+ ApiConnectRecord * transP = apiConnectptr.p;
+ if (transP->apiConnectstate == CS_ABORTING && transP->abortState != AS_IDLE){
+ jam();
+ return;
+ }
+ transP->apiConnectstate = CS_ABORTING;
+ /*------------------------------------------------------------------------*/
+ /* AN ABORT DECISION HAS BEEN TAKEN FOR SOME REASON. WE NEED TO ABORT */
+ /* ALL PARTICIPANTS IN THE TRANSACTION. */
+ /*------------------------------------------------------------------------*/
+ transP->abortState = AS_ACTIVE;
+ transP->counter = 0;
+
+ if (transP->firstTcConnect == RNIL) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ /* WE HAVE NO PARTICIPANTS IN THE TRANSACTION. */
+ /*-----------------------------------------------------------------------*/
+ releaseAbortResources(signal);
+ return;
+ }//if
+ tcConnectptr.i = transP->firstTcConnect;
+ abort015Lab(signal);
+}//Dbtc::abort010Lab()
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* WE WILL ABORT ONE NODE PER OPERATION AT A TIME. THIS IS TO KEEP */
+/* ERROR HANDLING OF THIS PROCESS FAIRLY SIMPLE AND TRACTABLE. */
+/* EVEN IF NO NODE OF THIS PARTICULAR NODE NUMBER NEEDS ABORTION WE */
+/* MUST ENSURE THAT ALL NODES ARE CHECKED. THUS A FAULTY NODE DOES */
+/* NOT MEAN THAT ALL NODES IN AN OPERATION IS ABORTED. FOR THIS REASON*/
+/* WE SET THE TCONTINUE_ABORT TO TRUE WHEN A FAULTY NODE IS DETECTED. */
+/*--------------------------------------------------------------------------*/
+void Dbtc::abort015Lab(Signal* signal)
+{
+ Uint32 TloopCount = 0;
+ABORT020:
+ jam();
+ TloopCount++;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ switch (tcConnectptr.p->tcConnectstate) {
+ case OS_WAIT_DIH:
+ case OS_WAIT_KEYINFO:
+ case OS_WAIT_ATTR:
+ jam();
+ /*----------------------------------------------------------------------*/
+ /* WE ARE STILL WAITING FOR MORE KEYINFO/ATTRINFO. WE HAVE NOT CONTACTED*/
+ /* ANY LQH YET AND SO WE CAN SIMPLY SET STATE TO ABORTING. */
+ /*----------------------------------------------------------------------*/
+ tcConnectptr.p->noOfNodes = 0; // == releaseAbort(signal)
+ tcConnectptr.p->tcConnectstate = OS_ABORTING;
+ break;
+ case OS_CONNECTED:
+ jam();
+ /*-----------------------------------------------------------------------
+ * WE ARE STILL IN THE INITIAL PHASE OF THIS OPERATION.
+ * NEED NOT BOTHER ABOUT ANY LQH ABORTS.
+ *-----------------------------------------------------------------------*/
+ tcConnectptr.p->noOfNodes = 0; // == releaseAbort(signal)
+ tcConnectptr.p->tcConnectstate = OS_ABORTING;
+ break;
+ case OS_PREPARED:
+ jam();
+ case OS_OPERATING:
+ jam();
+ /*----------------------------------------------------------------------
+ * WE HAVE SENT LQHKEYREQ AND ARE IN SOME STATE OF EITHER STILL
+ * SENDING THE OPERATION, WAITING FOR REPLIES, WAITING FOR MORE
+ * ATTRINFO OR OPERATION IS PREPARED. WE NEED TO ABORT ALL LQH'S.
+ *----------------------------------------------------------------------*/
+ releaseAndAbort(signal);
+ tcConnectptr.p->tcConnectstate = OS_ABORT_SENT;
+ TloopCount += 127;
+ break;
+ case OS_ABORTING:
+ jam();
+ break;
+ case OS_ABORT_SENT:
+ jam();
+ DEBUG("ABORT_SENT state in abort015Lab(), not expected");
+ systemErrorLab(signal);
+ return;
+ default:
+ jam();
+ DEBUG("tcConnectstate = " << tcConnectptr.p->tcConnectstate);
+ systemErrorLab(signal);
+ return;
+ }//switch
+
+ if (tcConnectptr.p->nextTcConnect != RNIL) {
+ jam();
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ if (TloopCount < 1024) {
+ goto ABORT020;
+ } else {
+ jam();
+ /*---------------------------------------------------------------------
+ * Reset timer to avoid time-out in real-time break.
+ * Increase counter to ensure that we don't think that all ABORTED have
+ * been received before all have been sent.
+ *---------------------------------------------------------------------*/
+ apiConnectptr.p->counter++;
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ signal->theData[0] = TcContinueB::ZABORT_BREAK;
+ signal->theData[1] = tcConnectptr.i;
+ signal->theData[2] = apiConnectptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }//if
+ }//if
+ if (apiConnectptr.p->counter > 0) {
+ jam();
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ return;
+ }//if
+ /*-----------------------------------------------------------------------
+ * WE HAVE NOW COMPLETED THE ABORT PROCESS. WE HAVE RECEIVED ABORTED
+ * FROM ALL PARTICIPANTS IN THE TRANSACTION. WE CAN NOW RELEASE ALL
+ * RESOURCES CONNECTED TO THE TRANSACTION AND SEND THE ABORT RESPONSE
+ *------------------------------------------------------------------------*/
+ releaseAbortResources(signal);
+}//Dbtc::abort015Lab()
+
+/*--------------------------------------------------------------------------*/
+/* RELEASE KEY AND ATTRINFO OBJECTS AND SEND ABORT TO THE LQH BLOCK. */
+/*--------------------------------------------------------------------------*/
+int Dbtc::releaseAndAbort(Signal* signal)
+{
+ HostRecordPtr localHostptr;
+ UintR TnoLoops = tcConnectptr.p->noOfNodes;
+
+ apiConnectptr.p->counter++;
+ bool prevAlive = false;
+ for (Uint32 Ti = 0; Ti < TnoLoops ; Ti++) {
+ localHostptr.i = tcConnectptr.p->tcNodedata[Ti];
+ ptrCheckGuard(localHostptr, chostFilesize, hostRecord);
+ if (localHostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ if (prevAlive) {
+ // if previous is alive, its LQH forwards abort to this node
+ jam();
+ continue;
+ }
+ /* ************< */
+ /* ABORT < */
+ /* ************< */
+ tblockref = calcLqhBlockRef(localHostptr.i);
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = apiConnectptr.p->transid[0];
+ signal->theData[3] = apiConnectptr.p->transid[1];
+ sendSignal(tblockref, GSN_ABORT, signal, 4, JBB);
+ prevAlive = true;
+ } else {
+ jam();
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ signal->theData[3] = localHostptr.i;
+ signal->theData[4] = ZFALSE;
+ sendSignal(cownref, GSN_ABORTED, signal, 5, JBB);
+ prevAlive = false;
+ }//if
+ }//for
+ return 1;
+}//Dbtc::releaseAndAbort()
+
+/* ------------------------------------------------------------------------- */
+/* ------- ENTER TIME_SIGNAL ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dbtc::execTIME_SIGNAL(Signal* signal)
+{
+
+ jamEntry();
+ ctcTimer++;
+ if (csystemStart != SSS_TRUE) {
+ jam();
+ return;
+ }//if
+ checkStartTimeout(signal);
+ checkStartFragTimeout(signal);
+}//Dbtc::execTIME_SIGNAL()
+
+/*------------------------------------------------*/
+/* Start timeout handling if not already going on */
+/*------------------------------------------------*/
+void Dbtc::checkStartTimeout(Signal* signal)
+{
+ ctimeOutCheckCounter++;
+ if (ctimeOutCheckActive == TOCS_TRUE) {
+ jam();
+ // Check heartbeat of timeout loop
+ if(ctimeOutCheckHeartbeat > ctimeOutCheckLastHeartbeat){
+ jam();
+ ctimeOutMissedHeartbeats = 0;
+ }else{
+ jam();
+ ctimeOutMissedHeartbeats++;
+ if (ctimeOutMissedHeartbeats > 100){
+ jam();
+ systemErrorLab(signal);
+ }
+ }
+ ctimeOutCheckLastHeartbeat = ctimeOutCheckHeartbeat;
+ return;
+ }//if
+ if (ctimeOutCheckCounter < ctimeOutCheckDelay) {
+ jam();
+ /*------------------------------------------------------------------*/
+ /* */
+ /* NO TIME-OUT CHECKED THIS TIME. WAIT MORE. */
+ /*------------------------------------------------------------------*/
+ return;
+ }//if
+ ctimeOutCheckActive = TOCS_TRUE;
+ ctimeOutCheckCounter = 0;
+ timeOutLoopStartLab(signal, 0); // 0 is first api connect record
+ return;
+}//Dbtc::execTIME_SIGNAL()
+
+/*----------------------------------------------------------------*/
+/* Start fragment (scan) timeout handling if not already going on */
+/*----------------------------------------------------------------*/
+void Dbtc::checkStartFragTimeout(Signal* signal)
+{
+ ctimeOutCheckFragCounter++;
+ if (ctimeOutCheckFragActive == TOCS_TRUE) {
+ jam();
+ return;
+ }//if
+ if (ctimeOutCheckFragCounter < ctimeOutCheckDelay) {
+ jam();
+ /*------------------------------------------------------------------*/
+ /* NO TIME-OUT CHECKED THIS TIME. WAIT MORE. */
+ /*------------------------------------------------------------------*/
+ return;
+ }//if
+
+ // Go through the fragment records and look for timeout in a scan.
+ ctimeOutCheckFragActive = TOCS_TRUE;
+ ctimeOutCheckFragCounter = 0;
+ timeOutLoopStartFragLab(signal, 0); // 0 means first scan record
+}//checkStartFragTimeout()
+
+/*------------------------------------------------------------------*/
+/* IT IS NOW TIME TO CHECK WHETHER ANY TRANSACTIONS HAVE */
+/* BEEN DELAYED FOR SO LONG THAT WE ARE FORCED TO PERFORM */
+/* SOME ACTION, EITHER ABORT OR RESEND OR REMOVE A NODE FROM */
+/* THE WAITING PART OF A PROTOCOL. */
+/*
+The algorithm used here is to check 1024 transactions at a time before
+doing a real-time break.
+To avoid aborting both transactions in a deadlock detected by time-out
+we insert a random extra time-out of upto 630 ms by using the lowest
+six bits of the api connect reference.
+We spread it out from 0 to 630 ms if base time-out is larger than 3 sec,
+we spread it out from 0 to 70 ms if base time-out is smaller than 300 msec,
+and otherwise we spread it out 310 ms.
+*/
+/*------------------------------------------------------------------*/
+void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr)
+{
+ Uint32 end_ptr, time_passed, time_out_value, mask_value;
+ const Uint32 api_con_sz= capiConnectFilesize;
+ const Uint32 tc_timer= ctcTimer;
+ const Uint32 time_out_param= ctimeOutValue;
+
+ ctimeOutCheckHeartbeat = tc_timer;
+
+ if (api_con_ptr + 1024 < api_con_sz) {
+ jam();
+ end_ptr= api_con_ptr + 1024;
+ } else {
+ jam();
+ end_ptr= api_con_sz;
+ }
+ if (time_out_param > 300) {
+ jam();
+ mask_value= 63;
+ } else if (time_out_param < 30) {
+ jam();
+ mask_value= 7;
+ } else {
+ jam();
+ mask_value= 31;
+ }
+ for ( ; api_con_ptr < end_ptr; api_con_ptr++) {
+ Uint32 api_timer= getApiConTimer(api_con_ptr);
+ jam();
+ if (api_timer != 0) {
+ time_out_value= time_out_param + (api_con_ptr & mask_value);
+ time_passed= tc_timer - api_timer;
+ if (time_passed > time_out_value) {
+ jam();
+ timeOutFoundLab(signal, api_con_ptr);
+ return;
+ }
+ }
+ }
+ if (api_con_ptr == api_con_sz) {
+ jam();
+ /*------------------------------------------------------------------*/
+ /* */
+ /* WE HAVE NOW CHECKED ALL TRANSACTIONS FOR TIME-OUT AND ALSO */
+ /* STARTED TIME-OUT HANDLING OF THOSE WE FOUND. WE ARE NOW */
+ /* READY AND CAN WAIT FOR THE NEXT TIME-OUT CHECK. */
+ /*------------------------------------------------------------------*/
+ ctimeOutCheckActive = TOCS_FALSE;
+ } else {
+ jam();
+ sendContinueTimeOutControl(signal, api_con_ptr);
+ }
+ return;
+}//Dbtc::timeOutLoopStartLab()
+
+void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr)
+{
+ sendContinueTimeOutControl(signal, TapiConPtr + 1);
+
+ apiConnectptr.i = TapiConPtr;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ /*------------------------------------------------------------------*/
+ /* */
+ /* THIS TRANSACTION HAVE EXPERIENCED A TIME-OUT AND WE NEED TO*/
+ /* FIND OUT WHAT WE NEED TO DO BASED ON THE STATE INFORMATION.*/
+ /*------------------------------------------------------------------*/
+ DEBUG("[ H'" << hex << apiConnectptr.p->transid[0]
+ << " H'" << apiConnectptr.p->transid[1] << "] " << dec
+ << "Time-out in state = " << apiConnectptr.p->apiConnectstate
+ << " apiConnectptr.i = " << apiConnectptr.i
+ << " - exec: " << apiConnectptr.p->m_exec_flag
+ << " - place: " << c_apiConTimer_line[apiConnectptr.i]);
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_STARTED:
+ ndbrequire(c_apiConTimer_line[apiConnectptr.i] != 3615);
+ if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec){
+ jam();
+ /*
+ We are waiting for application to continue the transaction. In this
+ particular state we will use the application timeout parameter rather
+ than the shorter Deadlock detection timeout.
+ */
+ if (c_appl_timeout_value == 0 ||
+ (ctcTimer - getApiConTimer(apiConnectptr.i)) <= c_appl_timeout_value) {
+ jam();
+ return;
+ }//if
+ }
+ apiConnectptr.p->returnsignal = RS_TCROLLBACKREP;
+ apiConnectptr.p->returncode = ZTIME_OUT_ERROR;
+ abort010Lab(signal);
+ return;
+ case CS_RECEIVING:
+ case CS_REC_COMMITTING:
+ case CS_START_COMMITTING:
+ jam();
+ /*------------------------------------------------------------------*/
+ /* WE ARE STILL IN THE PREPARE PHASE AND THE TRANSACTION HAS */
+ /* NOT YET REACHED ITS COMMIT POINT. THUS IT IS NOW OK TO */
+ /* START ABORTING THE TRANSACTION. ALSO START CHECKING THE */
+ /* REMAINING TRANSACTIONS. */
+ /*------------------------------------------------------------------*/
+ terrorCode = ZTIME_OUT_ERROR;
+ abortErrorLab(signal);
+ return;
+ case CS_COMMITTING:
+ jam();
+ /*------------------------------------------------------------------*/
+ // We are simply waiting for a signal in the job buffer. Only extreme
+ // conditions should get us here. We ignore it.
+ /*------------------------------------------------------------------*/
+ case CS_COMPLETING:
+ jam();
+ /*------------------------------------------------------------------*/
+ // We are simply waiting for a signal in the job buffer. Only extreme
+ // conditions should get us here. We ignore it.
+ /*------------------------------------------------------------------*/
+ case CS_PREPARE_TO_COMMIT:
+ jam();
+ /*------------------------------------------------------------------*/
+ /* WE ARE WAITING FOR DIH TO COMMIT THE TRANSACTION. WE SIMPLY*/
+ /* KEEP WAITING SINCE THERE IS NO BETTER IDEA ON WHAT TO DO. */
+ /* IF IT IS BLOCKED THEN NO TRANSACTION WILL PASS THIS GATE. */
+ // To ensure against strange bugs we crash the system if we have passed
+ // time-out period by a factor of 10 and it is also at least 5 seconds.
+ /*------------------------------------------------------------------*/
+ if (((ctcTimer - getApiConTimer(apiConnectptr.i)) > (10 * ctimeOutValue)) &&
+ ((ctcTimer - getApiConTimer(apiConnectptr.i)) > 500)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ break;
+ case CS_COMMIT_SENT:
+ jam();
+ /*------------------------------------------------------------------*/
+ /* WE HAVE SENT COMMIT TO A NUMBER OF NODES. WE ARE CURRENTLY */
+ /* WAITING FOR THEIR REPLY. WITH NODE RECOVERY SUPPORTED WE */
+ /* WILL CHECK FOR CRASHED NODES AND RESEND THE COMMIT SIGNAL */
+ /* TO THOSE NODES THAT HAVE MISSED THE COMMIT SIGNAL DUE TO */
+ /* A NODE FAILURE. */
+ /*------------------------------------------------------------------*/
+ tabortInd = ZCOMMIT_SETUP;
+ setupFailData(signal);
+ toCommitHandlingLab(signal);
+ return;
+ case CS_COMPLETE_SENT:
+ jam();
+ /*--------------------------------------------------------------------*/
+ /* WE HAVE SENT COMPLETE TO A NUMBER OF NODES. WE ARE CURRENTLY */
+ /* WAITING FOR THEIR REPLY. WITH NODE RECOVERY SUPPORTED WE */
+ /* WILL CHECK FOR CRASHED NODES AND RESEND THE COMPLETE SIGNAL */
+ /* TO THOSE NODES THAT HAVE MISSED THE COMPLETE SIGNAL DUE TO */
+ /* A NODE FAILURE. */
+ /*--------------------------------------------------------------------*/
+ tabortInd = ZCOMMIT_SETUP;
+ setupFailData(signal);
+ toCompleteHandlingLab(signal);
+ return;
+ case CS_ABORTING:
+ jam();
+ /*------------------------------------------------------------------*/
+ /* TIME-OUT DURING ABORT. WE NEED TO SEND ABORTED FOR ALL */
+ /* NODES THAT HAVE FAILED BEFORE SENDING ABORTED. */
+ /*------------------------------------------------------------------*/
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ sendAbortedAfterTimeout(signal, 0);
+ break;
+ case CS_START_SCAN:{
+ jam();
+ ScanRecordPtr scanPtr;
+ scanPtr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanPtr, cscanrecFileSize, scanRecord);
+ scanError(signal, scanPtr, ZSCANTIME_OUT_ERROR);
+ break;
+ }
+ case CS_WAIT_ABORT_CONF:
+ jam();
+ tcConnectptr.i = apiConnectptr.p->currentTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ arrGuard(apiConnectptr.p->currentReplicaNo, 4);
+ hostptr.i = tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ /*------------------------------------------------------------------*/
+ // Time-out waiting for ABORTCONF. We will resend the ABORTREQ just in
+ // case.
+ /*------------------------------------------------------------------*/
+ warningReport(signal, 20);
+ apiConnectptr.p->timeOutCounter++;
+ if (apiConnectptr.p->timeOutCounter > 3) {
+ /*------------------------------------------------------------------*/
+ // 100 time-outs are not acceptable. We will shoot down the node
+ // not responding.
+ /*------------------------------------------------------------------*/
+ reportNodeFailed(signal, hostptr.i);
+ }//if
+ apiConnectptr.p->currentReplicaNo++;
+ }//if
+ tcurrentReplicaNo = (Uint8)Z8NIL;
+ toAbortHandlingLab(signal);
+ return;
+ case CS_WAIT_COMMIT_CONF:
+ jam();
+ tcConnectptr.i = apiConnectptr.p->currentTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ arrGuard(apiConnectptr.p->currentReplicaNo, 4);
+ hostptr.i = tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ /*------------------------------------------------------------------*/
+ // Time-out waiting for COMMITCONF. We will resend the COMMITREQ just in
+ // case.
+ /*------------------------------------------------------------------*/
+ warningReport(signal, 21);
+ apiConnectptr.p->timeOutCounter++;
+ if (apiConnectptr.p->timeOutCounter > 3) {
+ /*------------------------------------------------------------------*/
+ // 100 time-outs are not acceptable. We will shoot down the node
+ // not responding.
+ /*------------------------------------------------------------------*/
+ reportNodeFailed(signal, hostptr.i);
+ }//if
+ apiConnectptr.p->currentReplicaNo++;
+ }//if
+ tcurrentReplicaNo = (Uint8)Z8NIL;
+ toCommitHandlingLab(signal);
+ return;
+ case CS_WAIT_COMPLETE_CONF:
+ jam();
+ tcConnectptr.i = apiConnectptr.p->currentTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ arrGuard(apiConnectptr.p->currentReplicaNo, 4);
+ hostptr.i = tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ /*------------------------------------------------------------------*/
+ // Time-out waiting for COMPLETECONF. We will resend the COMPLETEREQ
+ // just in case.
+ /*------------------------------------------------------------------*/
+ warningReport(signal, 22);
+ apiConnectptr.p->timeOutCounter++;
+ if (apiConnectptr.p->timeOutCounter > 100) {
+ /*------------------------------------------------------------------*/
+ // 100 time-outs are not acceptable. We will shoot down the node
+ // not responding.
+ /*------------------------------------------------------------------*/
+ reportNodeFailed(signal, hostptr.i);
+ }//if
+ apiConnectptr.p->currentReplicaNo++;
+ }//if
+ tcurrentReplicaNo = (Uint8)Z8NIL;
+ toCompleteHandlingLab(signal);
+ return;
+ case CS_FAIL_PREPARED:
+ jam();
+ case CS_FAIL_COMMITTING:
+ jam();
+ case CS_FAIL_COMMITTED:
+ jam();
+ case CS_REC_PREPARING:
+ jam();
+ case CS_START_PREPARING:
+ jam();
+ case CS_PREPARED:
+ jam();
+ case CS_RESTART:
+ jam();
+ case CS_FAIL_ABORTED:
+ jam();
+ case CS_DISCONNECTED:
+ jam();
+ default:
+ jam();
+ /*------------------------------------------------------------------*/
+ /* AN IMPOSSIBLE STATE IS SET. CRASH THE SYSTEM. */
+ /*------------------------------------------------------------------*/
+ DEBUG("State = " << apiConnectptr.p->apiConnectstate);
+ systemErrorLab(signal);
+ return;
+ }//switch
+ return;
+}//Dbtc::timeOutFoundLab()
+
+void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
+{
+ ApiConnectRecord * transP = apiConnectptr.p;
+ if(transP->abortState == AS_IDLE){
+ jam();
+ warningEvent("TC: %d: %d state=%d abort==IDLE place: %d fop=%d t: %d",
+ __LINE__,
+ apiConnectptr.i,
+ transP->apiConnectstate,
+ c_apiConTimer_line[apiConnectptr.i],
+ transP->firstTcConnect,
+ c_apiConTimer[apiConnectptr.i]
+ );
+ ndbout_c("TC: %d: %d state=%d abort==IDLE place: %d fop=%d t: %d",
+ __LINE__,
+ apiConnectptr.i,
+ transP->apiConnectstate,
+ c_apiConTimer_line[apiConnectptr.i],
+ transP->firstTcConnect,
+ c_apiConTimer[apiConnectptr.i]
+ );
+ ndbrequire(false);
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ return;
+ }
+
+ OperationState tmp[16];
+
+ Uint32 TloopCount = 0;
+ do {
+ jam();
+ if (tcConnectptr.i == RNIL) {
+ jam();
+ if (Tcheck == 0) {
+ jam();
+ /*------------------------------------------------------------------
+ * All nodes had already reported ABORTED for all tcConnect records.
+ * Crash since it is an error situation that we then received a
+ * time-out.
+ *------------------------------------------------------------------*/
+ char buf[96]; buf[0] = 0;
+ char buf2[96];
+ BaseString::snprintf(buf, sizeof(buf), "TC %d: %d ops:",
+ __LINE__, apiConnectptr.i);
+ for(Uint32 i = 0; i<TloopCount; i++){
+ BaseString::snprintf(buf2, sizeof(buf2), "%s %d", buf, tmp[i]);
+ BaseString::snprintf(buf, sizeof(buf), buf2);
+ }
+ warningEvent(buf);
+ ndbout_c(buf);
+ ndbrequire(false);
+ }
+ releaseAbortResources(signal);
+ return;
+ }//if
+ TloopCount++;
+ if (TloopCount >= 1024) {
+ jam();
+ /*------------------------------------------------------------------*/
+ // Insert a real-time break for large transactions to avoid blowing
+ // away the job buffer.
+ /*------------------------------------------------------------------*/
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ apiConnectptr.p->counter++;
+ signal->theData[0] = TcContinueB::ZABORT_TIMEOUT_BREAK;
+ signal->theData[1] = tcConnectptr.i;
+ signal->theData[2] = apiConnectptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }//if
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ if(TloopCount < 16){
+ jam();
+ tmp[TloopCount-1] = tcConnectptr.p->tcConnectstate;
+ }
+
+ if (tcConnectptr.p->tcConnectstate == OS_ABORT_SENT) {
+ jam();
+ /*------------------------------------------------------------------*/
+ // We have sent an ABORT signal to this node but not yet received any
+ // reply. We have to send an ABORTED signal on our own in some cases.
+ // If the node is declared as up and running and still do not respond
+ // in time to the ABORT signal we will declare it as dead.
+ /*------------------------------------------------------------------*/
+ UintR Ti = 0;
+ arrGuard(tcConnectptr.p->noOfNodes, 4);
+ for (Ti = 0; Ti < tcConnectptr.p->noOfNodes; Ti++) {
+ jam();
+ if (tcConnectptr.p->tcNodedata[Ti] != 0) {
+ TloopCount += 31;
+ Tcheck = 1;
+ hostptr.i = tcConnectptr.p->tcNodedata[Ti];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ /*---------------------------------------------------------------
+ * A backup replica has not sent ABORTED.
+ * Could be that a node before him has crashed.
+ * Send an ABORT signal specifically to this node.
+ * We will not send to any more nodes after this
+ * to avoid race problems.
+ * To also ensure that we use this message also as a heartbeat
+ * we will move this node to the primary replica seat.
+ * The primary replica and any failed node after it will
+ * be removed from the node list. Update also number of nodes.
+ * Finally break the loop to ensure we don't mess
+ * things up by executing another loop.
+ * We also update the timer to ensure we don't get time-out
+ * too early.
+ *--------------------------------------------------------------*/
+ BlockReference TBRef = calcLqhBlockRef(hostptr.i);
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = apiConnectptr.p->transid[0];
+ signal->theData[3] = apiConnectptr.p->transid[1];
+ sendSignal(TBRef, GSN_ABORT, signal, 4, JBB);
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ break;
+ } else {
+ jam();
+ /*--------------------------------------------------------------
+ * The node we are waiting for is dead. We will send ABORTED to
+ * ourselves vicarious for the failed node.
+ *--------------------------------------------------------------*/
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ signal->theData[3] = hostptr.i;
+ signal->theData[4] = ZFALSE;
+ sendSignal(cownref, GSN_ABORTED, signal, 5, JBB);
+ }//if
+ }//if
+ }//for
+ }//if
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ } while (1);
+}//Dbtc::sendAbortedAfterTimeout()
+
+void Dbtc::reportNodeFailed(Signal* signal, Uint32 nodeId)
+{
+ DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0];
+ rep->nodeId = nodeId;
+ rep->err = DisconnectRep::TcReportNodeFailed;
+ sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal,
+ DisconnectRep::SignalLength, JBB);
+}//Dbtc::reportNodeFailed()
+
+/*-------------------------------------------------*/
+/* Timeout-loop for scanned fragments. */
+/*-------------------------------------------------*/
+void Dbtc::timeOutLoopStartFragLab(Signal* signal, Uint32 TscanConPtr)
+{
+ ScanFragRecPtr timeOutPtr[8];
+ UintR tfragTimer[8];
+ UintR texpiredTime[8];
+ UintR TloopCount = 0;
+ Uint32 TtcTimer = ctcTimer;
+
+ while ((TscanConPtr + 8) < cscanFragrecFileSize) {
+ jam();
+ timeOutPtr[0].i = TscanConPtr + 0;
+ timeOutPtr[1].i = TscanConPtr + 1;
+ timeOutPtr[2].i = TscanConPtr + 2;
+ timeOutPtr[3].i = TscanConPtr + 3;
+ timeOutPtr[4].i = TscanConPtr + 4;
+ timeOutPtr[5].i = TscanConPtr + 5;
+ timeOutPtr[6].i = TscanConPtr + 6;
+ timeOutPtr[7].i = TscanConPtr + 7;
+
+ c_scan_frag_pool.getPtrForce(timeOutPtr[0]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[1]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[2]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[3]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[4]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[5]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[6]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[7]);
+
+ tfragTimer[0] = timeOutPtr[0].p->scanFragTimer;
+ tfragTimer[1] = timeOutPtr[1].p->scanFragTimer;
+ tfragTimer[2] = timeOutPtr[2].p->scanFragTimer;
+ tfragTimer[3] = timeOutPtr[3].p->scanFragTimer;
+ tfragTimer[4] = timeOutPtr[4].p->scanFragTimer;
+ tfragTimer[5] = timeOutPtr[5].p->scanFragTimer;
+ tfragTimer[6] = timeOutPtr[6].p->scanFragTimer;
+ tfragTimer[7] = timeOutPtr[7].p->scanFragTimer;
+
+ texpiredTime[0] = TtcTimer - tfragTimer[0];
+ texpiredTime[1] = TtcTimer - tfragTimer[1];
+ texpiredTime[2] = TtcTimer - tfragTimer[2];
+ texpiredTime[3] = TtcTimer - tfragTimer[3];
+ texpiredTime[4] = TtcTimer - tfragTimer[4];
+ texpiredTime[5] = TtcTimer - tfragTimer[5];
+ texpiredTime[6] = TtcTimer - tfragTimer[6];
+ texpiredTime[7] = TtcTimer - tfragTimer[7];
+
+ for (Uint32 Ti = 0; Ti < 8; Ti++) {
+ jam();
+ if (tfragTimer[Ti] != 0) {
+
+ if (texpiredTime[Ti] > ctimeOutValue) {
+ jam();
+ DEBUG("Fragment timeout found:"<<
+ " ctimeOutValue=" <<ctimeOutValue
+ <<", texpiredTime="<<texpiredTime[Ti]<<endl
+ <<" tfragTimer="<<tfragTimer[Ti]
+ <<", ctcTimer="<<ctcTimer);
+ timeOutFoundFragLab(signal, TscanConPtr + Ti);
+ return;
+ }//if
+ }//if
+ }//for
+ TscanConPtr += 8;
+ /*----------------------------------------------------------------*/
+ /* We split the process up checking 1024 fragmentrecords at a time*/
+ /* to maintain real time behaviour. */
+ /*----------------------------------------------------------------*/
+ if (TloopCount++ > 128 ) {
+ jam();
+ signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL;
+ signal->theData[1] = TscanConPtr;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }//if
+ }//while
+ for ( ; TscanConPtr < cscanFragrecFileSize; TscanConPtr++){
+ jam();
+ timeOutPtr[0].i = TscanConPtr;
+ c_scan_frag_pool.getPtrForce(timeOutPtr[0]);
+ if (timeOutPtr[0].p->scanFragTimer != 0) {
+ texpiredTime[0] = ctcTimer - timeOutPtr[0].p->scanFragTimer;
+ if (texpiredTime[0] > ctimeOutValue) {
+ jam();
+ DEBUG("Fragment timeout found:"<<
+ " ctimeOutValue=" <<ctimeOutValue
+ <<", texpiredTime="<<texpiredTime[0]<<endl
+ <<" tfragTimer="<<tfragTimer[0]
+ <<", ctcTimer="<<ctcTimer);
+ timeOutFoundFragLab(signal, TscanConPtr);
+ return;
+ }//if
+ }//if
+ }//for
+ ctimeOutCheckFragActive = TOCS_FALSE;
+
+ return;
+}//timeOutLoopStartFragLab()
+
+/*--------------------------------------------------------------------------*/
+/*Handle the heartbeat signal from LQH in a scan process */
+// (Set timer on fragrec.)
+/*--------------------------------------------------------------------------*/
+void Dbtc::execSCAN_HBREP(Signal* signal)
+{
+ jamEntry();
+
+ scanFragptr.i = signal->theData[0];
+ c_scan_frag_pool.getPtr(scanFragptr);
+ switch (scanFragptr.p->scanFragState){
+ case ScanFragRec::LQH_ACTIVE:
+ break;
+ default:
+ DEBUG("execSCAN_HBREP: scanFragState="<<scanFragptr.p->scanFragState);
+ systemErrorLab(signal);
+ break;
+ }
+
+ ScanRecordPtr scanptr;
+ scanptr.i = scanFragptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ apiConnectptr.i = scanptr.p->scanApiRec;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+
+ if (!(apiConnectptr.p->transid[0] == signal->theData[1] &&
+ apiConnectptr.p->transid[1] == signal->theData[2])){
+ jam();
+ /**
+ * Send signal back to sender so that the crash occurs there
+ */
+ // Save original transid
+ signal->theData[3] = signal->theData[0];
+ signal->theData[4] = signal->theData[1];
+ // Set transid to illegal values
+ signal->theData[1] = RNIL;
+ signal->theData[2] = RNIL;
+
+ sendSignal(signal->senderBlockRef(), GSN_SCAN_HBREP, signal, 5, JBA);
+ DEBUG("SCAN_HBREP with wrong transid("
+ <<signal->theData[3]<<", "<<signal->theData[4]<<")");
+ return;
+ }//if
+
+ // Update timer on ScanFragRec
+ if (scanFragptr.p->scanFragTimer != 0){
+ updateBuddyTimer(apiConnectptr);
+ scanFragptr.p->startFragTimer(ctcTimer);
+ } else {
+ ndbassert(false);
+ DEBUG("SCAN_HBREP when scanFragTimer was turned off");
+ }
+}//execSCAN_HBREP()
+
+/*--------------------------------------------------------------------------*/
+/* Timeout has occured on a fragment which means a scan has timed out. */
+/* If this is true we have an error in LQH/ACC. */
+/*--------------------------------------------------------------------------*/
+void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr)
+{
+ ScanFragRecPtr ptr;
+ c_scan_frag_pool.getPtr(ptr, TscanConPtr);
+ DEBUG(TscanConPtr << " timeOutFoundFragLab: scanFragState = "<< ptr.p->scanFragState);
+
+ /*-------------------------------------------------------------------------*/
+ // The scan fragment has expired its timeout. Check its state to decide
+ // what to do.
+ /*-------------------------------------------------------------------------*/
+ switch (ptr.p->scanFragState) {
+ case ScanFragRec::WAIT_GET_PRIMCONF:
+ jam();
+ ndbrequire(false);
+ break;
+ case ScanFragRec::LQH_ACTIVE:{
+ jam();
+
+ /**
+ * The LQH expired it's timeout, try to close it
+ */
+ Uint32 nodeId = refToNode(ptr.p->lqhBlockref);
+ Uint32 connectCount = getNodeInfo(nodeId).m_connectCount;
+ ScanRecordPtr scanptr;
+ scanptr.i = ptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ if(connectCount != ptr.p->m_connectCount){
+ jam();
+ /**
+ * The node has died
+ */
+ ptr.p->scanFragState = ScanFragRec::COMPLETED;
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ run.release(ptr);
+ ptr.p->stopFragTimer();
+ }
+
+ scanError(signal, scanptr, ZSCAN_FRAG_LQH_ERROR);
+ break;
+ }
+ case ScanFragRec::DELIVERED:
+ jam();
+ case ScanFragRec::IDLE:
+ jam();
+ case ScanFragRec::QUEUED_FOR_DELIVERY:
+ jam();
+ /*-----------------------------------------------------------------------
+ * Should never occur. We will simply report set the timer to zero and
+ * continue. In a debug version we should crash here but not in a release
+ * version. In a release version we will simply set the time-out to zero.
+ *-----------------------------------------------------------------------*/
+#ifdef VM_TRACE
+ systemErrorLab(signal);
+#endif
+ scanFragptr.p->stopFragTimer();
+ break;
+ default:
+ jam();
+ /*-----------------------------------------------------------------------
+ * Non-existent state. Crash.
+ *-----------------------------------------------------------------------*/
+ systemErrorLab(signal);
+ break;
+ }//switch
+
+ signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL;
+ signal->theData[1] = TscanConPtr + 1;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//timeOutFoundFragLab()
+
+
+/*
+ 4.3.16 GCP_NOMORETRANS
+ ----------------------
+*/
+/*****************************************************************************
+ * G C P _ N O M O R E T R A N S
+ *
+ * WHEN DBTC RECEIVES SIGNAL GCP_NOMORETRANS A CHECK IS DONE TO FIND OUT IF
+ * THERE ARE ANY GLOBAL CHECKPOINTS GOING ON - CFIRSTGCP /= RNIL. DBTC THEN
+ * SEARCHES THE GCP_RECORD FILE TO FIND OUT IF THERE ARE ANY TRANSACTIONS NOT
+ * CONCLUDED WITH THIS SPECIFIC CHECKPOINT - GCP_PTR:GCP_ID = TCHECK_GCP_ID.
+ * FOR EACH TRANSACTION WHERE API_CONNECTSTATE EQUALS PREPARED, COMMITTING,
+ * COMMITTED OR COMPLETING SIGNAL CONTINUEB IS SENT WITH A DELAY OF 100 MS,
+ * THE COUNTER GCP_PTR:OUTSTANDINGAPI IS INCREASED. WHEN CONTINUEB IS RECEIVED
+ * THE COUNTER IS DECREASED AND A CHECK IS DONE TO FIND OUT IF ALL
+ * TRANSACTIONS ARE CONCLUDED. IF SO, SIGNAL GCP_TCFINISHED IS SENT.
+ *****************************************************************************/
+void Dbtc::execGCP_NOMORETRANS(Signal* signal)
+{
+ jamEntry();
+ tcheckGcpId = signal->theData[1];
+ if (cfirstgcp != RNIL) {
+ jam();
+ /* A GLOBAL CHECKPOINT IS GOING ON */
+ gcpPtr.i = cfirstgcp; /* SET POINTER TO FIRST GCP IN QUEUE*/
+ ptrCheckGuard(gcpPtr, cgcpFilesize, gcpRecord);
+ if (gcpPtr.p->gcpId == tcheckGcpId) {
+ jam();
+ if (gcpPtr.p->firstApiConnect != RNIL) {
+ jam();
+ gcpPtr.p->gcpNomoretransRec = ZTRUE;
+ } else {
+ jam();
+ gcpTcfinished(signal);
+ unlinkGcp(signal);
+ }//if
+ } else {
+ jam();
+ /*------------------------------------------------------------*/
+ /* IF IT IS NOT THE FIRST THEN THERE SHOULD BE NO */
+ /* RECORD FOR THIS GLOBAL CHECKPOINT. WE ALWAYS REMOVE */
+ /* THE GLOBAL CHECKPOINTS IN ORDER. */
+ /*------------------------------------------------------------*/
+ gcpTcfinished(signal);
+ }//if
+ } else {
+ jam();
+ gcpTcfinished(signal);
+ }//if
+ return;
+}//Dbtc::execGCP_NOMORETRANS()
+
+/*****************************************************************************/
+/* */
+/* TAKE OVER MODULE */
+/* */
+/*****************************************************************************/
+/* */
+/* THIS PART OF TC TAKES OVER THE COMMIT/ABORT OF TRANSACTIONS WHERE THE */
+/* NODE ACTING AS TC HAVE FAILED. IT STARTS BY QUERYING ALL NODES ABOUT */
+/* ANY OPERATIONS PARTICIPATING IN A TRANSACTION WHERE THE TC NODE HAVE */
+/* FAILED. */
+/* */
+/* AFTER RECEIVING INFORMATION FROM ALL NODES ABOUT OPERATION STATUS THIS */
+/* CODE WILL ENSURE THAT ALL AFFECTED TRANSACTIONS ARE PROPERLY ABORTED OR*/
+/* COMMITTED. THE ORIGINATING APPLICATION NODE WILL ALSO BE CONTACTED. */
+/* IF THE ORIGINATING APPLICATION ALSO FAILED THEN THERE IS CURRENTLY NO */
+/* WAY TO FIND OUT WHETHER A TRANSACTION WAS PERFORMED OR NOT. */
+/*****************************************************************************/
+void Dbtc::execNODE_FAILREP(Signal* signal)
+{
+ HostRecordPtr tmpHostptr;
+ jamEntry();
+
+ NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
+
+ cfailure_nr = nodeFail->failNo;
+ const Uint32 tnoOfNodes = nodeFail->noOfNodes;
+ const Uint32 tnewMasterId = nodeFail->masterNodeId;
+
+ arrGuard(tnoOfNodes, MAX_NDB_NODES);
+ int index = 0;
+ for (unsigned i = 1; i< MAX_NDB_NODES; i++) {
+ if(NodeBitmask::get(nodeFail->theNodes, i)){
+ cdata[index] = i;
+ index++;
+ }//if
+ }//for
+
+ tcNodeFailptr.i = 0;
+ ptrAss(tcNodeFailptr, tcFailRecord);
+ Uint32 tindex;
+ for (tindex = 0; tindex < tnoOfNodes; tindex++) {
+ jam();
+ hostptr.i = cdata[tindex];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ /*------------------------------------------------------------*/
+ /* SET STATUS OF THE FAILED NODE TO DEAD SINCE IT HAS */
+ /* FAILED. */
+ /*------------------------------------------------------------*/
+ hostptr.p->hostStatus = HS_DEAD;
+
+ if (hostptr.p->takeOverStatus == TOS_COMPLETED) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* A VERY UNUSUAL SITUATION. THE TAKE OVER WAS COMPLETED*/
+ /* EVEN BEFORE WE HEARD ABOUT THE NODE FAILURE REPORT. */
+ /* HOWEVER UNUSUAL THIS SITUATION IS POSSIBLE. */
+ /*------------------------------------------------------------*/
+ /* RELEASE THE CURRENTLY UNUSED LQH CONNECTIONS. THE */
+ /* REMAINING WILL BE RELEASED WHEN THE TRANSACTION THAT */
+ /* USED THEM IS COMPLETED. */
+ /*------------------------------------------------------------*/
+ {
+ NFCompleteRep * const nfRep = (NFCompleteRep *)&signal->theData[0];
+ nfRep->blockNo = DBTC;
+ nfRep->nodeId = cownNodeid;
+ nfRep->failedNodeId = hostptr.i;
+ }
+ sendSignal(cdihblockref, GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+ } else {
+ ndbrequire(hostptr.p->takeOverStatus == TOS_IDLE);
+ hostptr.p->takeOverStatus = TOS_NODE_FAILED;
+ }//if
+
+ if (tcNodeFailptr.p->failStatus == FS_LISTENING) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* THE CURRENT TAKE OVER CAN BE AFFECTED BY THIS NODE */
+ /* FAILURE. */
+ /*------------------------------------------------------------*/
+ if (hostptr.p->lqhTransStatus == LTS_ACTIVE) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE WERE WAITING FOR THE FAILED NODE IN THE TAKE OVER */
+ /* PROTOCOL FOR TC. */
+ /*------------------------------------------------------------*/
+ signal->theData[0] = TcContinueB::ZNODE_TAKE_OVER_COMPLETED;
+ signal->theData[1] = hostptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+ }//if
+
+ }//for
+
+ const bool masterFailed = (cmasterNodeId != tnewMasterId);
+ cmasterNodeId = tnewMasterId;
+
+ if(getOwnNodeId() == cmasterNodeId && masterFailed){
+ /**
+ * Master has failed and I'm the new master
+ */
+ jam();
+
+ for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
+ jam();
+ ptrAss(hostptr, hostRecord);
+ if (hostptr.p->hostStatus != HS_ALIVE) {
+ jam();
+ if (hostptr.p->takeOverStatus == TOS_COMPLETED) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* SEND TAKE OVER CONFIRMATION TO ALL ALIVE NODES IF */
+ /* TAKE OVER IS COMPLETED. THIS IS PERFORMED TO ENSURE */
+ /* THAT ALL NODES AGREE ON THE IDLE STATE OF THE TAKE */
+ /* OVER. THIS MIGHT BE MISSED IN AN ERROR SITUATION IF */
+ /* MASTER FAILS AFTER SENDING CONFIRMATION TO NEW */
+ /* MASTER BUT FAILING BEFORE SENDING TO ANOTHER NODE */
+ /* WHICH WAS NOT MASTER. IF THIS NODE LATER BECOMES */
+ /* MASTER IT MIGHT START A NEW TAKE OVER EVEN AFTER THE */
+ /* CRASHED NODE HAVE ALREADY RECOVERED. */
+ /*------------------------------------------------------------*/
+ for(tmpHostptr.i = 1; tmpHostptr.i < MAX_NDB_NODES;tmpHostptr.i++) {
+ jam();
+ ptrAss(tmpHostptr, hostRecord);
+ if (tmpHostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tblockref = calcTcBlockRef(tmpHostptr.i);
+ signal->theData[0] = hostptr.i;
+ sendSignal(tblockref, GSN_TAKE_OVERTCCONF, signal, 1, JBB);
+ }//if
+ }//for
+ }//if
+ }//if
+ }//for
+ }
+
+ if(getOwnNodeId() == cmasterNodeId){
+ jam();
+ for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
+ jam();
+ ptrAss(hostptr, hostRecord);
+ if (hostptr.p->hostStatus != HS_ALIVE) {
+ jam();
+ if (hostptr.p->takeOverStatus == TOS_NODE_FAILED) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* CONCLUDE ALL ACTIVITIES THE FAILED TC DID CONTROL */
+ /* SINCE WE ARE THE MASTER. THIS COULD HAVE BEEN STARTED*/
+ /* BY A PREVIOUS MASTER BUT HAVE NOT BEEN CONCLUDED YET.*/
+ /*------------------------------------------------------------*/
+ hostptr.p->takeOverStatus = TOS_ACTIVE;
+ signal->theData[0] = hostptr.i;
+ sendSignal(cownref, GSN_TAKE_OVERTCREQ, signal, 1, JBB);
+ }//if
+ }//if
+ }//for
+ }//if
+ for (tindex = 0; tindex < tnoOfNodes; tindex++) {
+ jam();
+ hostptr.i = cdata[tindex];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ /*------------------------------------------------------------*/
+ /* LOOP THROUGH AND ABORT ALL SCANS THAT WHERE */
+ /* CONTROLLED BY THIS TC AND ACTIVE IN THE FAILED */
+ /* NODE'S LQH */
+ /*------------------------------------------------------------*/
+ checkScanActiveInFailedLqh(signal, 0, hostptr.i);
+ checkWaitDropTabFailedLqh(signal, hostptr.i, 0); // nodeid, tableid
+ }//for
+
+}//Dbtc::execNODE_FAILREP()
+
+void Dbtc::checkScanActiveInFailedLqh(Signal* signal,
+ Uint32 scanPtrI,
+ Uint32 failedNodeId){
+
+ ScanRecordPtr scanptr;
+ for (scanptr.i = scanPtrI; scanptr.i < cscanrecFileSize; scanptr.i++) {
+ jam();
+ ptrAss(scanptr, scanRecord);
+ bool found = false;
+ if (scanptr.p->scanState != ScanRecord::IDLE){
+ jam();
+ ScanFragRecPtr ptr;
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ for(run.first(ptr); !ptr.isNull(); ){
+ jam();
+ ScanFragRecPtr curr = ptr;
+ run.next(ptr);
+ if (curr.p->scanFragState == ScanFragRec::LQH_ACTIVE &&
+ refToNode(curr.p->lqhBlockref) == failedNodeId){
+ jam();
+
+ run.release(curr);
+ curr.p->scanFragState = ScanFragRec::COMPLETED;
+ curr.p->stopFragTimer();
+ found = true;
+ }
+ }
+ }
+ if(found){
+ jam();
+ scanError(signal, scanptr, ZSCAN_LQH_ERROR);
+ }
+
+ // Send CONTINUEB to continue later
+ signal->theData[0] = TcContinueB::ZCHECK_SCAN_ACTIVE_FAILED_LQH;
+ signal->theData[1] = scanptr.i + 1; // Check next scanptr
+ signal->theData[2] = failedNodeId;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }//for
+}
+
+void
+Dbtc::checkScanFragList(Signal* signal,
+ Uint32 failedNodeId,
+ ScanRecord * scanP,
+ ScanFragList::Head & head){
+
+ DEBUG("checkScanActiveInFailedLqh: scanFragError");
+}
+
+void Dbtc::execTAKE_OVERTCCONF(Signal* signal)
+{
+ jamEntry();
+ tfailedNodeId = signal->theData[0];
+ hostptr.i = tfailedNodeId;
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ switch (hostptr.p->takeOverStatus) {
+ case TOS_IDLE:
+ jam();
+ /*------------------------------------------------------------*/
+ /* THIS MESSAGE ARRIVED EVEN BEFORE THE NODE_FAILREP */
+ /* MESSAGE. THIS IS POSSIBLE IN EXTREME SITUATIONS. */
+ /* WE SET THE STATE TO TAKE_OVER_COMPLETED AND WAIT */
+ /* FOR THE NODE_FAILREP MESSAGE. */
+ /*------------------------------------------------------------*/
+ hostptr.p->takeOverStatus = TOS_COMPLETED;
+ break;
+ case TOS_NODE_FAILED:
+ case TOS_ACTIVE:
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE ARE NOT MASTER AND THE TAKE OVER IS ACTIVE OR WE */
+ /* ARE MASTER AND THE TAKE OVER IS ACTIVE. IN BOTH */
+ /* WE SET THE STATE TO TAKE_OVER_COMPLETED. */
+ /*------------------------------------------------------------*/
+ /* RELEASE THE CURRENTLY UNUSED LQH CONNECTIONS. THE */
+ /* REMAINING WILL BE RELEASED WHEN THE TRANSACTION THAT */
+ /* USED THEM IS COMPLETED. */
+ /*------------------------------------------------------------*/
+ hostptr.p->takeOverStatus = TOS_COMPLETED;
+ {
+ NFCompleteRep * const nfRep = (NFCompleteRep *)&signal->theData[0];
+ nfRep->blockNo = DBTC;
+ nfRep->nodeId = cownNodeid;
+ nfRep->failedNodeId = hostptr.i;
+ }
+ sendSignal(cdihblockref, GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+ break;
+ case TOS_COMPLETED:
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE HAVE ALREADY RECEIVED THE CONF SIGNAL. IT IS MOST */
+ /* LIKELY SENT FROM A NEW MASTER WHICH WASN'T SURE IF */
+ /* THIS NODE HEARD THE CONF SIGNAL FROM THE OLD MASTER. */
+ /* WE SIMPLY IGNORE THE MESSAGE. */
+ /*------------------------------------------------------------*/
+ /*empty*/;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//switch
+}//Dbtc::execTAKE_OVERTCCONF()
+
+void Dbtc::execTAKE_OVERTCREQ(Signal* signal)
+{
+ jamEntry();
+ tfailedNodeId = signal->theData[0];
+ tcNodeFailptr.i = 0;
+ ptrAss(tcNodeFailptr, tcFailRecord);
+ if (tcNodeFailptr.p->failStatus != FS_IDLE) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE CAN CURRENTLY ONLY HANDLE ONE TAKE OVER AT A TIME */
+ /*------------------------------------------------------------*/
+ /* IF MORE THAN ONE TAKE OVER IS REQUESTED WE WILL */
+ /* QUEUE THE TAKE OVER AND START IT AS SOON AS THE */
+ /* PREVIOUS ARE COMPLETED. */
+ /*------------------------------------------------------------*/
+ arrGuard(tcNodeFailptr.p->queueIndex, MAX_NDB_NODES);
+ tcNodeFailptr.p->queueList[tcNodeFailptr.p->queueIndex] = tfailedNodeId;
+ tcNodeFailptr.p->queueIndex = tcNodeFailptr.p->queueIndex + 1;
+ return;
+ }//if
+ startTakeOverLab(signal);
+}//Dbtc::execTAKE_OVERTCREQ()
+
+/*------------------------------------------------------------*/
+/* INITIALISE THE HASH TABLES FOR STORING TRANSACTIONS */
+/* AND OPERATIONS DURING TC TAKE OVER. */
+/*------------------------------------------------------------*/
+void Dbtc::startTakeOverLab(Signal* signal)
+{
+ for (tindex = 0; tindex <= 511; tindex++) {
+ ctransidFailHash[tindex] = RNIL;
+ }//for
+ for (tindex = 0; tindex <= 1023; tindex++) {
+ ctcConnectFailHash[tindex] = RNIL;
+ }//for
+ tcNodeFailptr.p->failStatus = FS_LISTENING;
+ tcNodeFailptr.p->takeOverNode = tfailedNodeId;
+ for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
+ jam();
+ ptrAss(hostptr, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tblockref = calcLqhBlockRef(hostptr.i);
+ hostptr.p->lqhTransStatus = LTS_ACTIVE;
+ signal->theData[0] = tcNodeFailptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = tfailedNodeId;
+ sendSignal(tblockref, GSN_LQH_TRANSREQ, signal, 3, JBB);
+ }//if
+ }//for
+}//Dbtc::startTakeOverLab()
+
+/*------------------------------------------------------------*/
+/* A REPORT OF AN OPERATION WHERE TC FAILED HAS ARRIVED.*/
+/*------------------------------------------------------------*/
+void Dbtc::execLQH_TRANSCONF(Signal* signal)
+{
+ jamEntry();
+ LqhTransConf * const lqhTransConf = (LqhTransConf *)&signal->theData[0];
+
+ tcNodeFailptr.i = lqhTransConf->tcRef;
+ ptrCheckGuard(tcNodeFailptr, 1, tcFailRecord);
+ tnodeid = lqhTransConf->lqhNodeId;
+ ttransStatus = (LqhTransConf::OperationStatus)lqhTransConf->operationStatus;
+ ttransid1 = lqhTransConf->transId1;
+ ttransid2 = lqhTransConf->transId2;
+ ttcOprec = lqhTransConf->oldTcOpRec;
+ treqinfo = lqhTransConf->requestInfo;
+ tgci = lqhTransConf->gci;
+ cnodes[0] = lqhTransConf->nextNodeId1;
+ cnodes[1] = lqhTransConf->nextNodeId2;
+ cnodes[2] = lqhTransConf->nextNodeId3;
+ const Uint32 ref = tapplRef = lqhTransConf->apiRef;
+ tapplOprec = lqhTransConf->apiOpRec;
+ const Uint32 tableId = lqhTransConf->tableId;
+
+ if (ttransStatus == LqhTransConf::LastTransConf){
+ jam();
+ /*------------------------------------------------------------*/
+ /* A NODE HAS REPORTED COMPLETION OF TAKE OVER REPORTING*/
+ /*------------------------------------------------------------*/
+ nodeTakeOverCompletedLab(signal);
+ return;
+ }//if
+ if (ttransStatus == LqhTransConf::Marker){
+ jam();
+ treqinfo = 0;
+ LqhTransConf::setMarkerFlag(treqinfo, 1);
+ } else {
+ TableRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+ switch((DictTabInfo::TableType)tabPtr.p->tableType){
+ case DictTabInfo::SystemTable:
+ case DictTabInfo::UserTable:
+ break;
+ default:
+ tapplRef = 0;
+ tapplOprec = 0;
+ }
+ }
+
+ findApiConnectFail(signal);
+
+ if(apiConnectptr.p->ndbapiBlockref == 0 && tapplRef != 0){
+ apiConnectptr.p->ndbapiBlockref = ref;
+ apiConnectptr.p->ndbapiConnect = tapplOprec;
+ }
+
+ if (ttransStatus != LqhTransConf::Marker){
+ jam();
+ findTcConnectFail(signal);
+ }
+}//Dbtc::execLQH_TRANSCONF()
+
+/*------------------------------------------------------------*/
+/* A NODE HAS REPORTED COMPLETION OF TAKE OVER REPORTING*/
+/*------------------------------------------------------------*/
+void Dbtc::nodeTakeOverCompletedLab(Signal* signal)
+{
+ Uint32 guard0;
+
+ hostptr.i = tnodeid;
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ hostptr.p->lqhTransStatus = LTS_IDLE;
+ for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
+ jam();
+ ptrAss(hostptr, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ if (hostptr.p->lqhTransStatus == LTS_ACTIVE) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* NOT ALL NODES ARE COMPLETED WITH REPORTING IN THE */
+ /* TAKE OVER. */
+ /*------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+ }//for
+ /*------------------------------------------------------------*/
+ /* ALL NODES HAVE REPORTED ON THE STATUS OF THE VARIOUS */
+ /* OPERATIONS THAT WAS CONTROLLED BY THE FAILED TC. WE */
+ /* ARE NOW IN A POSITION TO COMPLETE ALL OF THOSE */
+ /* TRANSACTIONS EITHER IN A SUCCESSFUL WAY OR IN AN */
+ /* UNSUCCESSFUL WAY. WE WILL ALSO REPORT THIS CONCLUSION*/
+ /* TO THE APPLICATION IF THAT IS STILL ALIVE. */
+ /*------------------------------------------------------------*/
+ tcNodeFailptr.p->currentHashIndexTakeOver = 0;
+ tcNodeFailptr.p->completedTakeOver = 0;
+ tcNodeFailptr.p->failStatus = FS_COMPLETING;
+ guard0 = cnoParallelTakeOver - 1;
+ /*------------------------------------------------------------*/
+ /* WE WILL COMPLETE THE TRANSACTIONS BY STARTING A */
+ /* NUMBER OF PARALLEL ACTIVITIES. EACH ACTIVITY WILL */
+ /* COMPLETE ONE TRANSACTION AT A TIME AND IN THAT */
+ /* TRANSACTION IT WILL COMPLETE ONE OPERATION AT A TIME.*/
+ /* WHEN ALL ACTIVITIES ARE COMPLETED THEN THE TAKE OVER */
+ /* IS COMPLETED. */
+ /*------------------------------------------------------------*/
+ arrGuard(guard0, MAX_NDB_NODES);
+ for (tindex = 0; tindex <= guard0; tindex++) {
+ jam();
+ tcNodeFailptr.p->takeOverProcState[tindex] = ZTAKE_OVER_ACTIVE;
+ signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
+ signal->theData[1] = tcNodeFailptr.i;
+ signal->theData[2] = tindex;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ }//for
+}//Dbtc::nodeTakeOverCompletedLab()
+
+/*------------------------------------------------------------*/
+/* COMPLETE A NEW TRANSACTION FROM THE HASH TABLE OF */
+/* TRANSACTIONS TO COMPLETE. */
+/*------------------------------------------------------------*/
+void Dbtc::completeTransAtTakeOverLab(Signal* signal, UintR TtakeOverInd)
+{
+ jam();
+ while (tcNodeFailptr.p->currentHashIndexTakeOver < 512){
+ jam();
+ apiConnectptr.i =
+ ctransidFailHash[tcNodeFailptr.p->currentHashIndexTakeOver];
+ if (apiConnectptr.i != RNIL) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE HAVE FOUND A TRANSACTION THAT NEEDS TO BE */
+ /* COMPLETED. REMOVE IT FROM THE HASH TABLE SUCH THAT */
+ /* NOT ANOTHER ACTIVITY ALSO TRIES TO COMPLETE THIS */
+ /* TRANSACTION. */
+ /*------------------------------------------------------------*/
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ ctransidFailHash[tcNodeFailptr.p->currentHashIndexTakeOver] =
+ apiConnectptr.p->nextApiConnect;
+
+ completeTransAtTakeOverDoOne(signal, TtakeOverInd);
+ // One transaction taken care of, return from this function
+ // and wait for the next CONTINUEB to continue processing
+ break;
+
+ } else {
+ if (tcNodeFailptr.p->currentHashIndexTakeOver < 511){
+ jam();
+ tcNodeFailptr.p->currentHashIndexTakeOver++;
+ } else {
+ jam();
+ completeTransAtTakeOverDoLast(signal, TtakeOverInd);
+ tcNodeFailptr.p->currentHashIndexTakeOver++;
+ }//if
+ }//if
+ }//while
+}//Dbtc::completeTransAtTakeOverLab()
+
+
+
+
+void Dbtc::completeTransAtTakeOverDoLast(Signal* signal, UintR TtakeOverInd)
+{
+ Uint32 guard0;
+ /*------------------------------------------------------------*/
+ /* THERE ARE NO MORE TRANSACTIONS TO COMPLETE. THIS */
+ /* ACTIVITY IS COMPLETED. */
+ /*------------------------------------------------------------*/
+ arrGuard(TtakeOverInd, MAX_NDB_NODES);
+ if (tcNodeFailptr.p->takeOverProcState[TtakeOverInd] != ZTAKE_OVER_ACTIVE) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+ tcNodeFailptr.p->takeOverProcState[TtakeOverInd] = ZTAKE_OVER_IDLE;
+ tcNodeFailptr.p->completedTakeOver++;
+
+ if (tcNodeFailptr.p->completedTakeOver == cnoParallelTakeOver) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE WERE THE LAST ACTIVITY THAT WAS COMPLETED. WE NEED*/
+ /* TO REPORT THE COMPLETION OF THE TAKE OVER TO ALL */
+ /* NODES THAT ARE ALIVE. */
+ /*------------------------------------------------------------*/
+ for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
+ jam();
+ ptrAss(hostptr, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tblockref = calcTcBlockRef(hostptr.i);
+ signal->theData[0] = tcNodeFailptr.p->takeOverNode;
+ sendSignal(tblockref, GSN_TAKE_OVERTCCONF, signal, 1, JBB);
+ }//if
+ }//for
+ if (tcNodeFailptr.p->queueIndex > 0) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* THERE ARE MORE NODES TO TAKE OVER. WE NEED TO START */
+ /* THE TAKE OVER. */
+ /*------------------------------------------------------------*/
+ tfailedNodeId = tcNodeFailptr.p->queueList[0];
+ guard0 = tcNodeFailptr.p->queueIndex - 1;
+ arrGuard(guard0 + 1, MAX_NDB_NODES);
+ for (tindex = 0; tindex <= guard0; tindex++) {
+ jam();
+ tcNodeFailptr.p->queueList[tindex] =
+ tcNodeFailptr.p->queueList[tindex + 1];
+ }//for
+ tcNodeFailptr.p->queueIndex--;
+ startTakeOverLab(signal);
+ return;
+ } else {
+ jam();
+ tcNodeFailptr.p->failStatus = FS_IDLE;
+ }//if
+ }//if
+ return;
+}//Dbtc::completeTransAtTakeOverDoLast()
+
+void Dbtc::completeTransAtTakeOverDoOne(Signal* signal, UintR TtakeOverInd)
+{
+ apiConnectptr.p->takeOverRec = (Uint8)tcNodeFailptr.i;
+ apiConnectptr.p->takeOverInd = TtakeOverInd;
+
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_FAIL_COMMITTED:
+ jam();
+ /*------------------------------------------------------------*/
+ /* ALL PARTS OF THE TRANSACTIONS REPORTED COMMITTED. WE */
+ /* HAVE THUS COMPLETED THE COMMIT PHASE. WE CAN REPORT */
+ /* COMMITTED TO THE APPLICATION AND CONTINUE WITH THE */
+ /* COMPLETE PHASE. */
+ /*------------------------------------------------------------*/
+ sendTCKEY_FAILCONF(signal, apiConnectptr.p);
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->currentTcConnect = tcConnectptr.i;
+ apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ toCompleteHandlingLab(signal);
+ return;
+ case CS_FAIL_COMMITTING:
+ jam();
+ /*------------------------------------------------------------*/
+ /* AT LEAST ONE PART WAS ONLY PREPARED AND AT LEAST ONE */
+ /* PART WAS COMMITTED. COMPLETE THE COMMIT PHASE FIRST. */
+ /* THEN CONTINUE AS AFTER COMMITTED. */
+ /*------------------------------------------------------------*/
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->currentTcConnect = tcConnectptr.i;
+ apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ toCommitHandlingLab(signal);
+ return;
+ case CS_FAIL_ABORTING:
+ case CS_FAIL_PREPARED:
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE WILL ABORT THE TRANSACTION IF IT IS IN A PREPARED */
+ /* STATE IN THIS VERSION. IN LATER VERSIONS WE WILL */
+ /* HAVE TO ADD CODE FOR HANDLING OF PREPARED-TO-COMMIT */
+ /* TRANSACTIONS. THESE ARE NOT ALLOWED TO ABORT UNTIL WE*/
+ /* HAVE HEARD FROM THE TRANSACTION COORDINATOR. */
+ /* */
+ /* IT IS POSSIBLE TO COMMIT TRANSACTIONS THAT ARE */
+ /* PREPARED ACTUALLY. WE WILL LEAVE THIS PROBLEM UNTIL */
+ /* LATER VERSIONS. */
+ /*------------------------------------------------------------*/
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->currentTcConnect = tcConnectptr.i;
+ apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ toAbortHandlingLab(signal);
+ return;
+ case CS_FAIL_ABORTED:
+ jam();
+ sendTCKEY_FAILREF(signal, apiConnectptr.p);
+
+ signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
+ signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec;
+ signal->theData[2] = apiConnectptr.p->takeOverInd;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ releaseTakeOver(signal);
+ break;
+ case CS_FAIL_COMPLETED:
+ jam();
+ sendTCKEY_FAILCONF(signal, apiConnectptr.p);
+
+ signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
+ signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec;
+ signal->theData[2] = apiConnectptr.p->takeOverInd;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ releaseApiConnectFail(signal);
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//switch
+}//Dbtc::completeTransAtTakeOverDoOne()
+
+void
+Dbtc::sendTCKEY_FAILREF(Signal* signal, const ApiConnectRecord * regApiPtr){
+ jam();
+
+ const Uint32 ref = regApiPtr->ndbapiBlockref;
+ if(ref != 0){
+ signal->theData[0] = regApiPtr->ndbapiConnect;
+ signal->theData[1] = regApiPtr->transid[0];
+ signal->theData[2] = regApiPtr->transid[1];
+
+ sendSignal(ref, GSN_TCKEY_FAILREF, signal, 3, JBB);
+ }
+}
+
+void
+Dbtc::sendTCKEY_FAILCONF(Signal* signal, ApiConnectRecord * regApiPtr){
+ jam();
+ TcKeyFailConf * const failConf = (TcKeyFailConf *)&signal->theData[0];
+
+ const Uint32 ref = regApiPtr->ndbapiBlockref;
+ const Uint32 marker = regApiPtr->commitAckMarker;
+ if(ref != 0){
+ failConf->apiConnectPtr = regApiPtr->ndbapiConnect | (marker != RNIL);
+ failConf->transId1 = regApiPtr->transid[0];
+ failConf->transId2 = regApiPtr->transid[1];
+
+ sendSignal(regApiPtr->ndbapiBlockref,
+ GSN_TCKEY_FAILCONF, signal, TcKeyFailConf::SignalLength, JBB);
+ }
+ regApiPtr->commitAckMarker = RNIL;
+}
+
+/*------------------------------------------------------------*/
+/* THIS PART HANDLES THE ABORT PHASE IN THE CASE OF A */
+/* NODE FAILURE BEFORE THE COMMIT DECISION. */
+/*------------------------------------------------------------*/
+/* ABORT REQUEST SUCCESSFULLY COMPLETED ON TNODEID */
+/*------------------------------------------------------------*/
+void Dbtc::execABORTCONF(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ tnodeid = signal->theData[2];
+ if (ERROR_INSERTED(8045)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_ABORTCONF, signal, 2000, 5);
+ return;
+ }//if
+ if (tcConnectptr.i >= ctcConnectFilesize) {
+ errorReport(signal, 5);
+ return;
+ }//if
+ ptrAss(tcConnectptr, tcConnectRecord);
+ if (tcConnectptr.p->tcConnectstate != OS_WAIT_ABORT_CONF) {
+ warningReport(signal, 16);
+ return;
+ }//if
+ apiConnectptr.i = tcConnectptr.p->apiConnect;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ if (apiConnectptr.p->apiConnectstate != CS_WAIT_ABORT_CONF) {
+ warningReport(signal, 17);
+ return;
+ }//if
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[3];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[4];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ warningReport(signal, 18);
+ return;
+ }//if
+ arrGuard(apiConnectptr.p->currentReplicaNo, 4);
+ if (tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo] !=
+ tnodeid) {
+ warningReport(signal, 19);
+ return;
+ }//if
+ tcurrentReplicaNo = (Uint8)Z8NIL;
+ tcConnectptr.p->tcConnectstate = OS_ABORTING;
+ toAbortHandlingLab(signal);
+}//Dbtc::execABORTCONF()
+
+void Dbtc::toAbortHandlingLab(Signal* signal)
+{
+ do {
+ if (tcurrentReplicaNo != (Uint8)Z8NIL) {
+ jam();
+ arrGuard(tcurrentReplicaNo, 4);
+ const LqhTransConf::OperationStatus stat =
+ (LqhTransConf::OperationStatus)
+ tcConnectptr.p->failData[tcurrentReplicaNo];
+ switch(stat){
+ case LqhTransConf::InvalidStatus:
+ case LqhTransConf::Aborted:
+ jam();
+ /*empty*/;
+ break;
+ case LqhTransConf::Prepared:
+ jam();
+ hostptr.i = tcConnectptr.p->tcNodedata[tcurrentReplicaNo];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tblockref = calcLqhBlockRef(hostptr.i);
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ tcConnectptr.p->tcConnectstate = OS_WAIT_ABORT_CONF;
+ apiConnectptr.p->apiConnectstate = CS_WAIT_ABORT_CONF;
+ apiConnectptr.p->timeOutCounter = 0;
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = apiConnectptr.p->transid[0];
+ signal->theData[3] = apiConnectptr.p->transid[1];
+ signal->theData[4] = apiConnectptr.p->tcBlockref;
+ signal->theData[5] = tcConnectptr.p->tcOprec;
+ sendSignal(tblockref, GSN_ABORTREQ, signal, 6, JBB);
+ return;
+ }//if
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//switch
+ }//if
+ if (apiConnectptr.p->currentReplicaNo > 0) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* THERE IS STILL ANOTHER REPLICA THAT NEEDS TO BE */
+ /* ABORTED. */
+ /*------------------------------------------------------------*/
+ apiConnectptr.p->currentReplicaNo--;
+ tcurrentReplicaNo = apiConnectptr.p->currentReplicaNo;
+ } else {
+ /*------------------------------------------------------------*/
+ /* THE LAST REPLICA IN THIS OPERATION HAVE COMMITTED. */
+ /*------------------------------------------------------------*/
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ if (tcConnectptr.i == RNIL) {
+ /*------------------------------------------------------------*/
+ /* WE HAVE COMPLETED THE ABORT PHASE. WE CAN NOW REPORT */
+ /* THE ABORT STATUS TO THE APPLICATION AND CONTINUE */
+ /* WITH THE NEXT TRANSACTION. */
+ /*------------------------------------------------------------*/
+ if (apiConnectptr.p->takeOverRec != (Uint8)Z8NIL) {
+ jam();
+ sendTCKEY_FAILREF(signal, apiConnectptr.p);
+ const Uint32 marker = apiConnectptr.p->commitAckMarker;
+ if(marker != RNIL){
+ jam();
+
+ CommitAckMarkerPtr tmp;
+ tmp.i = marker;
+ tmp.p = m_commitAckMarkerHash.getPtr(tmp.i);
+
+ m_commitAckMarkerHash.release(tmp);
+ apiConnectptr.p->commitAckMarker = RNIL;
+ }
+
+ /*------------------------------------------------------------*/
+ /* WE HAVE COMPLETED THIS TRANSACTION NOW AND CAN */
+ /* CONTINUE THE PROCESS WITH THE NEXT TRANSACTION. */
+ /*------------------------------------------------------------*/
+ signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
+ signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec;
+ signal->theData[2] = apiConnectptr.p->takeOverInd;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ releaseTakeOver(signal);
+ } else {
+ jam();
+ releaseAbortResources(signal);
+ }//if
+ return;
+ }//if
+ apiConnectptr.p->currentTcConnect = tcConnectptr.i;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ }//if
+ } while (1);
+}//Dbtc::toAbortHandlingLab()
+
+/*------------------------------------------------------------*/
+/* THIS PART HANDLES THE COMMIT PHASE IN THE CASE OF A */
+/* NODE FAILURE IN THE MIDDLE OF THE COMMIT PHASE. */
+/*------------------------------------------------------------*/
+/* COMMIT REQUEST SUCCESSFULLY COMPLETED ON TNODEID */
+/*------------------------------------------------------------*/
+void Dbtc::execCOMMITCONF(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ tnodeid = signal->theData[1];
+ if (ERROR_INSERTED(8046)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMMITCONF, signal, 2000, 4);
+ return;
+ }//if
+ if (tcConnectptr.i >= ctcConnectFilesize) {
+ errorReport(signal, 4);
+ return;
+ }//if
+ ptrAss(tcConnectptr, tcConnectRecord);
+ if (tcConnectptr.p->tcConnectstate != OS_WAIT_COMMIT_CONF) {
+ warningReport(signal, 8);
+ return;
+ }//if
+ apiConnectptr.i = tcConnectptr.p->apiConnect;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ if (apiConnectptr.p->apiConnectstate != CS_WAIT_COMMIT_CONF) {
+ warningReport(signal, 9);
+ return;
+ }//if
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[2];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[3];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ warningReport(signal, 10);
+ return;
+ }//if
+ arrGuard(apiConnectptr.p->currentReplicaNo, 4);
+ if (tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo] !=
+ tnodeid) {
+ warningReport(signal, 11);
+ return;
+ }//if
+ if (ERROR_INSERTED(8026)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ tcurrentReplicaNo = (Uint8)Z8NIL;
+ tcConnectptr.p->tcConnectstate = OS_COMMITTED;
+ toCommitHandlingLab(signal);
+}//Dbtc::execCOMMITCONF()
+
+void Dbtc::toCommitHandlingLab(Signal* signal)
+{
+ do {
+ if (tcurrentReplicaNo != (Uint8)Z8NIL) {
+ jam();
+ arrGuard(tcurrentReplicaNo, 4);
+ switch (tcConnectptr.p->failData[tcurrentReplicaNo]) {
+ case LqhTransConf::InvalidStatus:
+ jam();
+ /*empty*/;
+ break;
+ case LqhTransConf::Committed:
+ jam();
+ /*empty*/;
+ break;
+ case LqhTransConf::Prepared:
+ jam();
+ /*------------------------------------------------------------*/
+ /* THE NODE WAS PREPARED AND IS WAITING FOR ABORT OR */
+ /* COMMIT REQUEST FROM TC. */
+ /*------------------------------------------------------------*/
+ hostptr.i = tcConnectptr.p->tcNodedata[tcurrentReplicaNo];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tblockref = calcLqhBlockRef(hostptr.i);
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ apiConnectptr.p->apiConnectstate = CS_WAIT_COMMIT_CONF;
+ apiConnectptr.p->timeOutCounter = 0;
+ tcConnectptr.p->tcConnectstate = OS_WAIT_COMMIT_CONF;
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = apiConnectptr.p->globalcheckpointid;
+ signal->theData[3] = apiConnectptr.p->transid[0];
+ signal->theData[4] = apiConnectptr.p->transid[1];
+ signal->theData[5] = apiConnectptr.p->tcBlockref;
+ signal->theData[6] = tcConnectptr.p->tcOprec;
+ sendSignal(tblockref, GSN_COMMITREQ, signal, 7, JBB);
+ return;
+ }//if
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ }//if
+ if (apiConnectptr.p->currentReplicaNo > 0) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* THERE IS STILL ANOTHER REPLICA THAT NEEDS TO BE */
+ /* COMMITTED. */
+ /*------------------------------------------------------------*/
+ apiConnectptr.p->currentReplicaNo--;
+ tcurrentReplicaNo = apiConnectptr.p->currentReplicaNo;
+ } else {
+ /*------------------------------------------------------------*/
+ /* THE LAST REPLICA IN THIS OPERATION HAVE COMMITTED. */
+ /*------------------------------------------------------------*/
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ if (tcConnectptr.i == RNIL) {
+ /*------------------------------------------------------------*/
+ /* WE HAVE COMPLETED THE COMMIT PHASE. WE CAN NOW REPORT*/
+ /* THE COMMIT STATUS TO THE APPLICATION AND CONTINUE */
+ /* WITH THE COMPLETE PHASE. */
+ /*------------------------------------------------------------*/
+ if (apiConnectptr.p->takeOverRec != (Uint8)Z8NIL) {
+ jam();
+ sendTCKEY_FAILCONF(signal, apiConnectptr.p);
+ } else {
+ jam();
+ sendApiCommit(signal);
+ }//if
+ apiConnectptr.p->currentTcConnect = apiConnectptr.p->firstTcConnect;
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ apiConnectptr.p->currentReplicaNo = tcurrentReplicaNo;
+ toCompleteHandlingLab(signal);
+ return;
+ }//if
+ apiConnectptr.p->currentTcConnect = tcConnectptr.i;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ }//if
+ } while (1);
+}//Dbtc::toCommitHandlingLab()
+
+/*------------------------------------------------------------*/
+/* COMMON PART TO HANDLE COMPLETE PHASE WHEN ANY NODE */
+/* HAVE FAILED. */
+/*------------------------------------------------------------*/
+/* THE NODE WITH TNODEID HAVE COMPLETED THE OPERATION */
+/*------------------------------------------------------------*/
+void Dbtc::execCOMPLETECONF(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ tnodeid = signal->theData[1];
+ if (ERROR_INSERTED(8047)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMPLETECONF, signal, 2000, 4);
+ return;
+ }//if
+ if (tcConnectptr.i >= ctcConnectFilesize) {
+ errorReport(signal, 3);
+ return;
+ }//if
+ ptrAss(tcConnectptr, tcConnectRecord);
+ if (tcConnectptr.p->tcConnectstate != OS_WAIT_COMPLETE_CONF) {
+ warningReport(signal, 12);
+ return;
+ }//if
+ apiConnectptr.i = tcConnectptr.p->apiConnect;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ if (apiConnectptr.p->apiConnectstate != CS_WAIT_COMPLETE_CONF) {
+ warningReport(signal, 13);
+ return;
+ }//if
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[2];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[3];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ warningReport(signal, 14);
+ return;
+ }//if
+ arrGuard(apiConnectptr.p->currentReplicaNo, 4);
+ if (tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo] !=
+ tnodeid) {
+ warningReport(signal, 15);
+ return;
+ }//if
+ if (ERROR_INSERTED(8028)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ tcConnectptr.p->tcConnectstate = OS_COMPLETED;
+ tcurrentReplicaNo = (Uint8)Z8NIL;
+ toCompleteHandlingLab(signal);
+}//Dbtc::execCOMPLETECONF()
+
+void Dbtc::toCompleteHandlingLab(Signal* signal)
+{
+ do {
+ if (tcurrentReplicaNo != (Uint8)Z8NIL) {
+ jam();
+ arrGuard(tcurrentReplicaNo, 4);
+ switch (tcConnectptr.p->failData[tcurrentReplicaNo]) {
+ case LqhTransConf::InvalidStatus:
+ jam();
+ /*empty*/;
+ break;
+ default:
+ jam();
+ /*------------------------------------------------------------*/
+ /* THIS NODE DID NOT REPORT ANYTHING FOR THIS OPERATION */
+ /* IT MUST HAVE FAILED. */
+ /*------------------------------------------------------------*/
+ /*------------------------------------------------------------*/
+ /* SEND COMPLETEREQ TO THE NEXT REPLICA. */
+ /*------------------------------------------------------------*/
+ hostptr.i = tcConnectptr.p->tcNodedata[tcurrentReplicaNo];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tblockref = calcLqhBlockRef(hostptr.i);
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ tcConnectptr.p->tcConnectstate = OS_WAIT_COMPLETE_CONF;
+ apiConnectptr.p->apiConnectstate = CS_WAIT_COMPLETE_CONF;
+ apiConnectptr.p->timeOutCounter = 0;
+ tcConnectptr.p->apiConnect = apiConnectptr.i;
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = apiConnectptr.p->transid[0];
+ signal->theData[3] = apiConnectptr.p->transid[1];
+ signal->theData[4] = apiConnectptr.p->tcBlockref;
+ signal->theData[5] = tcConnectptr.p->tcOprec;
+ sendSignal(tblockref, GSN_COMPLETEREQ, signal, 6, JBB);
+ return;
+ }//if
+ break;
+ }//switch
+ }//if
+ if (apiConnectptr.p->currentReplicaNo != 0) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* THERE ARE STILL MORE REPLICAS IN THIS OPERATION. WE */
+ /* NEED TO CONTINUE WITH THOSE REPLICAS. */
+ /*------------------------------------------------------------*/
+ apiConnectptr.p->currentReplicaNo--;
+ tcurrentReplicaNo = apiConnectptr.p->currentReplicaNo;
+ } else {
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ if (tcConnectptr.i == RNIL) {
+ /*------------------------------------------------------------*/
+ /* WE HAVE COMPLETED THIS TRANSACTION NOW AND CAN */
+ /* CONTINUE THE PROCESS WITH THE NEXT TRANSACTION. */
+ /*------------------------------------------------------------*/
+ if (apiConnectptr.p->takeOverRec != (Uint8)Z8NIL) {
+ jam();
+ signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
+ signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec;
+ signal->theData[2] = apiConnectptr.p->takeOverInd;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ releaseTakeOver(signal);
+ } else {
+ jam();
+ releaseTransResources(signal);
+ }//if
+ return;
+ }//if
+ /*------------------------------------------------------------*/
+ /* WE HAVE COMPLETED AN OPERATION AND THERE ARE MORE TO */
+ /* COMPLETE. TAKE THE NEXT OPERATION AND START WITH THE */
+ /* FIRST REPLICA SINCE IT IS THE COMPLETE PHASE. */
+ /*------------------------------------------------------------*/
+ apiConnectptr.p->currentTcConnect = tcConnectptr.i;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ apiConnectptr.p->currentReplicaNo = tcurrentReplicaNo;
+ }//if
+ } while (1);
+}//Dbtc::toCompleteHandlingLab()
+
+/*------------------------------------------------------------*/
+/* */
+/* FIND THE API CONNECT RECORD FOR THIS TRANSACTION */
+/* DURING TAKE OVER FROM A FAILED TC. IF NONE EXISTS */
+/* YET THEN SEIZE A NEW API CONNECT RECORD AND LINK IT */
+/* INTO THE HASH TABLE. */
+/*------------------------------------------------------------*/
+void Dbtc::findApiConnectFail(Signal* signal)
+{
+ ApiConnectRecordPtr fafPrevApiConnectptr;
+ ApiConnectRecordPtr fafNextApiConnectptr;
+ UintR tfafHashNumber;
+
+ tfafHashNumber = ttransid1 & 511;
+ fafPrevApiConnectptr.i = RNIL;
+ ptrNull(fafPrevApiConnectptr);
+ arrGuard(tfafHashNumber, 512);
+ fafNextApiConnectptr.i = ctransidFailHash[tfafHashNumber];
+ ptrCheck(fafNextApiConnectptr, capiConnectFilesize, apiConnectRecord);
+FAF_LOOP:
+ jam();
+ if (fafNextApiConnectptr.i == RNIL) {
+ jam();
+ if (cfirstfreeApiConnectFail == RNIL) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+ seizeApiConnectFail(signal);
+ if (fafPrevApiConnectptr.i == RNIL) {
+ jam();
+ ctransidFailHash[tfafHashNumber] = apiConnectptr.i;
+ } else {
+ jam();
+ ptrGuard(fafPrevApiConnectptr);
+ fafPrevApiConnectptr.p->nextApiConnect = apiConnectptr.i;
+ }//if
+ apiConnectptr.p->nextApiConnect = RNIL;
+ initApiConnectFail(signal);
+ } else {
+ jam();
+ fafPrevApiConnectptr.i = fafNextApiConnectptr.i;
+ fafPrevApiConnectptr.p = fafNextApiConnectptr.p;
+ apiConnectptr.i = fafNextApiConnectptr.i;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ fafNextApiConnectptr.i = apiConnectptr.p->nextApiConnect;
+ ptrCheck(fafNextApiConnectptr, capiConnectFilesize, apiConnectRecord);
+ if ((apiConnectptr.p->transid[1] != ttransid2) ||
+ (apiConnectptr.p->transid[0] != ttransid1)) {
+ goto FAF_LOOP;
+ }//if
+ updateApiStateFail(signal);
+ }//if
+}//Dbtc::findApiConnectFail()
+
+/*----------------------------------------------------------*/
+/* FIND THE TC CONNECT AND IF NOT FOUND ALLOCATE A NEW */
+/*----------------------------------------------------------*/
+void Dbtc::findTcConnectFail(Signal* signal)
+{
+ UintR tftfHashNumber;
+
+ tftfHashNumber = (ttransid1 ^ ttcOprec) & 1023;
+ tcConnectptr.i = ctcConnectFailHash[tftfHashNumber];
+ do {
+ if (tcConnectptr.i == RNIL) {
+ jam();
+ if (cfirstfreeTcConnectFail == RNIL) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+ seizeTcConnectFail(signal);
+ linkTcInConnectionlist(signal);
+ tcConnectptr.p->nextTcFailHash = ctcConnectFailHash[tftfHashNumber];
+ ctcConnectFailHash[tftfHashNumber] = tcConnectptr.i;
+ initTcConnectFail(signal);
+ return;
+ } else {
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ if (tcConnectptr.p->tcOprec != ttcOprec) {
+ jam(); /* FRAGMENTID = TC_OPREC HERE, LOOP ANOTHER TURN */
+ tcConnectptr.i = tcConnectptr.p->nextTcFailHash;
+ } else {
+ updateTcStateFail(signal);
+ return;
+ }//if
+ }//if
+ } while (1);
+}//Dbtc::findTcConnectFail()
+
+/*----------------------------------------------------------*/
+/* INITIALISE AN API CONNECT FAIL RECORD */
+/*----------------------------------------------------------*/
+void Dbtc::initApiConnectFail(Signal* signal)
+{
+ apiConnectptr.p->transid[0] = ttransid1;
+ apiConnectptr.p->transid[1] = ttransid2;
+ apiConnectptr.p->firstTcConnect = RNIL;
+ apiConnectptr.p->currSavePointId = 0;
+ apiConnectptr.p->lastTcConnect = RNIL;
+ tblockref = calcTcBlockRef(tcNodeFailptr.p->takeOverNode);
+
+ apiConnectptr.p->tcBlockref = tblockref;
+ apiConnectptr.p->ndbapiBlockref = 0;
+ apiConnectptr.p->ndbapiConnect = 0;
+ apiConnectptr.p->buddyPtr = RNIL;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ switch(ttransStatus){
+ case LqhTransConf::Committed:
+ jam();
+ apiConnectptr.p->globalcheckpointid = tgci;
+ apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTED;
+ break;
+ case LqhTransConf::Prepared:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_PREPARED;
+ break;
+ case LqhTransConf::Aborted:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_ABORTED;
+ break;
+ case LqhTransConf::Marker:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_COMPLETED;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ }//if
+ apiConnectptr.p->commitAckMarker = RNIL;
+ if(LqhTransConf::getMarkerFlag(treqinfo)){
+ jam();
+ CommitAckMarkerPtr tmp;
+ m_commitAckMarkerHash.seize(tmp);
+
+ ndbrequire(tmp.i != RNIL);
+
+ apiConnectptr.p->commitAckMarker = tmp.i;
+ tmp.p->transid1 = ttransid1;
+ tmp.p->transid2 = ttransid2;
+ tmp.p->apiNodeId = refToNode(tapplRef);
+ tmp.p->noOfLqhs = 1;
+ tmp.p->lqhNodeId[0] = tnodeid;
+ tmp.p->apiConnectPtr = apiConnectptr.i;
+ m_commitAckMarkerHash.add(tmp);
+ }
+}//Dbtc::initApiConnectFail()
+
+/*------------------------------------------------------------*/
+/* INITIALISE AT TC CONNECT AT TAKE OVER WHEN ALLOCATING*/
+/* THE TC CONNECT RECORD. */
+/*------------------------------------------------------------*/
+void Dbtc::initTcConnectFail(Signal* signal)
+{
+ tcConnectptr.p->apiConnect = apiConnectptr.i;
+ tcConnectptr.p->tcOprec = ttcOprec;
+ Uint32 treplicaNo = LqhTransConf::getReplicaNo(treqinfo);
+ for (Uint32 i = 0; i < MAX_REPLICAS; i++) {
+ tcConnectptr.p->failData[i] = LqhTransConf::InvalidStatus;
+ }//for
+ tcConnectptr.p->tcNodedata[treplicaNo] = tnodeid;
+ tcConnectptr.p->failData[treplicaNo] = ttransStatus;
+ tcConnectptr.p->lastReplicaNo = LqhTransConf::getLastReplicaNo(treqinfo);
+ tcConnectptr.p->dirtyOp = LqhTransConf::getDirtyFlag(treqinfo);
+
+}//Dbtc::initTcConnectFail()
+
+/*----------------------------------------------------------*/
+/* INITIALISE TC NODE FAIL RECORD. */
+/*----------------------------------------------------------*/
+void Dbtc::initTcFail(Signal* signal)
+{
+ tcNodeFailptr.i = 0;
+ ptrAss(tcNodeFailptr, tcFailRecord);
+ tcNodeFailptr.p->queueIndex = 0;
+ tcNodeFailptr.p->failStatus = FS_IDLE;
+}//Dbtc::initTcFail()
+
+/*----------------------------------------------------------*/
+/* RELEASE_TAKE_OVER */
+/*----------------------------------------------------------*/
+void Dbtc::releaseTakeOver(Signal* signal)
+{
+ TcConnectRecordPtr rtoNextTcConnectptr;
+
+ rtoNextTcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ do {
+ jam();
+ tcConnectptr.i = rtoNextTcConnectptr.i;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ rtoNextTcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ releaseTcConnectFail(signal);
+ } while (rtoNextTcConnectptr.i != RNIL);
+ releaseApiConnectFail(signal);
+}//Dbtc::releaseTakeOver()
+
+/*---------------------------------------------------------------------------*/
+/* SETUP_FAIL_DATA */
+/* SETUP DATA TO REUSE TAKE OVER CODE FOR HANDLING ABORT/COMMIT IN NODE */
+/* FAILURE SITUATIONS. */
+/*---------------------------------------------------------------------------*/
+void Dbtc::setupFailData(Signal* signal)
+{
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ do {
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ switch (tcConnectptr.p->tcConnectstate) {
+ case OS_PREPARED:
+ case OS_COMMITTING:
+ jam();
+ for (tindex = 0; tindex <= tcConnectptr.p->lastReplicaNo; tindex++) {
+ jam();
+ /*-------------------------------------------------------------------
+ * KEYDATA IS USED TO KEEP AN INDICATION OF STATE IN LQH.
+ * IN THIS CASE ALL LQH'S ARE PREPARED AND WAITING FOR
+ * COMMIT/ABORT DECISION.
+ *------------------------------------------------------------------*/
+ arrGuard(tindex, 4);
+ tcConnectptr.p->failData[tindex] = LqhTransConf::Prepared;
+ }//for
+ break;
+ case OS_COMMITTED:
+ case OS_COMPLETING:
+ jam();
+ for (tindex = 0; tindex <= tcConnectptr.p->lastReplicaNo; tindex++) {
+ jam();
+ /*-------------------------------------------------------------------
+ * KEYDATA IS USED TO KEEP AN INDICATION OF STATE IN LQH.
+ * IN THIS CASE ALL LQH'S ARE COMMITTED AND WAITING FOR
+ * COMPLETE MESSAGE.
+ *------------------------------------------------------------------*/
+ arrGuard(tindex, 4);
+ tcConnectptr.p->failData[tindex] = LqhTransConf::Committed;
+ }//for
+ break;
+ case OS_COMPLETED:
+ jam();
+ for (tindex = 0; tindex <= tcConnectptr.p->lastReplicaNo; tindex++) {
+ jam();
+ /*-------------------------------------------------------------------
+ * KEYDATA IS USED TO KEEP AN INDICATION OF STATE IN LQH.
+ * IN THIS CASE ALL LQH'S ARE COMPLETED.
+ *-------------------------------------------------------------------*/
+ arrGuard(tindex, 4);
+ tcConnectptr.p->failData[tindex] = LqhTransConf::InvalidStatus;
+ }//for
+ break;
+ default:
+ jam();
+ sendSystemError(signal);
+ break;
+ }//switch
+ if (tabortInd != ZCOMMIT_SETUP) {
+ jam();
+ for (UintR Ti = 0; Ti <= tcConnectptr.p->lastReplicaNo; Ti++) {
+ hostptr.i = tcConnectptr.p->tcNodedata[Ti];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus != HS_ALIVE) {
+ jam();
+ /*-----------------------------------------------------------------
+ * FAILURE OF ANY INVOLVED NODE ALWAYS INVOKES AN ABORT DECISION.
+ *-----------------------------------------------------------------*/
+ tabortInd = ZTRUE;
+ }//if
+ }//for
+ }//if
+ tcConnectptr.p->tcConnectstate = OS_TAKE_OVER;
+ tcConnectptr.p->tcOprec = tcConnectptr.i;
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ } while (tcConnectptr.i != RNIL);
+ apiConnectptr.p->tcBlockref = cownref;
+ apiConnectptr.p->currentTcConnect = apiConnectptr.p->firstTcConnect;
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+}//Dbtc::setupFailData()
+
+/*----------------------------------------------------------*/
+/* UPDATE THE STATE OF THE API CONNECT FOR THIS PART. */
+/*----------------------------------------------------------*/
+void Dbtc::updateApiStateFail(Signal* signal)
+{
+ if(LqhTransConf::getMarkerFlag(treqinfo)){
+ jam();
+ const Uint32 marker = apiConnectptr.p->commitAckMarker;
+ if(marker == RNIL){
+ jam();
+
+ CommitAckMarkerPtr tmp;
+ m_commitAckMarkerHash.seize(tmp);
+ ndbrequire(tmp.i != RNIL);
+
+ apiConnectptr.p->commitAckMarker = tmp.i;
+ tmp.p->transid1 = ttransid1;
+ tmp.p->transid2 = ttransid2;
+ tmp.p->apiNodeId = refToNode(tapplRef);
+ tmp.p->noOfLqhs = 1;
+ tmp.p->lqhNodeId[0] = tnodeid;
+ tmp.p->apiConnectPtr = apiConnectptr.i;
+ m_commitAckMarkerHash.add(tmp);
+ } else {
+ jam();
+
+ CommitAckMarkerPtr tmp;
+ tmp.i = marker;
+ tmp.p = m_commitAckMarkerHash.getPtr(marker);
+
+ const Uint32 noOfLqhs = tmp.p->noOfLqhs;
+ ndbrequire(noOfLqhs < MAX_REPLICAS);
+ tmp.p->lqhNodeId[noOfLqhs] = tnodeid;
+ tmp.p->noOfLqhs = (noOfLqhs + 1);
+ }
+ }
+
+ switch (ttransStatus) {
+ case LqhTransConf::Committed:
+ jam();
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_FAIL_COMMITTING:
+ case CS_FAIL_COMMITTED:
+ jam();
+ ndbrequire(tgci == apiConnectptr.p->globalcheckpointid);
+ break;
+ case CS_FAIL_PREPARED:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTING;
+ apiConnectptr.p->globalcheckpointid = tgci;
+ break;
+ case CS_FAIL_COMPLETED:
+ jam();
+ apiConnectptr.p->globalcheckpointid = tgci;
+ apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTED;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ break;
+ }//switch
+ break;
+ case LqhTransConf::Prepared:
+ jam();
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_FAIL_COMMITTED:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTING;
+ break;
+ case CS_FAIL_ABORTED:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_ABORTING;
+ break;
+ case CS_FAIL_COMMITTING:
+ case CS_FAIL_PREPARED:
+ case CS_FAIL_ABORTING:
+ jam();
+ /*empty*/;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ break;
+ }//switch
+ break;
+ case LqhTransConf::Aborted:
+ jam();
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_FAIL_COMMITTING:
+ case CS_FAIL_COMMITTED:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case CS_FAIL_PREPARED:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_ABORTING;
+ break;
+ case CS_FAIL_ABORTING:
+ case CS_FAIL_ABORTED:
+ jam();
+ /*empty*/;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ break;
+ }//switch
+ break;
+ case LqhTransConf::Marker:
+ jam();
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ break;
+ }//switch
+}//Dbtc::updateApiStateFail()
+
+/*------------------------------------------------------------*/
+/* UPDATE_TC_STATE_FAIL */
+/* */
+/* WE NEED TO UPDATE THE STATUS OF TC_CONNECT RECORD AND*/
+/* WE ALSO NEED TO CHECK THAT THERE IS CONSISTENCY */
+/* BETWEEN THE DIFFERENT REPLICAS. */
+/*------------------------------------------------------------*/
+void Dbtc::updateTcStateFail(Signal* signal)
+{
+ const Uint8 treplicaNo = LqhTransConf::getReplicaNo(treqinfo);
+ const Uint8 tlastReplicaNo = LqhTransConf::getLastReplicaNo(treqinfo);
+ const Uint8 tdirtyOp = LqhTransConf::getDirtyFlag(treqinfo);
+
+ TcConnectRecord * regTcPtr = tcConnectptr.p;
+
+ ndbrequire(regTcPtr->apiConnect == apiConnectptr.i);
+ ndbrequire(regTcPtr->failData[treplicaNo] == LqhTransConf::InvalidStatus);
+ ndbrequire(regTcPtr->lastReplicaNo == tlastReplicaNo);
+ ndbrequire(regTcPtr->dirtyOp == tdirtyOp);
+
+ regTcPtr->tcNodedata[treplicaNo] = tnodeid;
+ regTcPtr->failData[treplicaNo] = ttransStatus;
+}//Dbtc::updateTcStateFail()
+
+void Dbtc::execTCGETOPSIZEREQ(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(8000);
+
+ UintR Tuserpointer = signal->theData[0]; /* DBDIH POINTER */
+ BlockReference Tusersblkref = signal->theData[1];/* DBDIH BLOCK REFERENCE */
+ signal->theData[0] = Tuserpointer;
+ signal->theData[1] = coperationsize;
+ sendSignal(Tusersblkref, GSN_TCGETOPSIZECONF, signal, 2, JBB);
+}//Dbtc::execTCGETOPSIZEREQ()
+
+void Dbtc::execTC_CLOPSIZEREQ(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(8001);
+
+ tuserpointer = signal->theData[0];
+ tusersblkref = signal->theData[1];
+ /* DBDIH BLOCK REFERENCE */
+ coperationsize = 0;
+ signal->theData[0] = tuserpointer;
+ sendSignal(tusersblkref, GSN_TC_CLOPSIZECONF, signal, 1, JBB);
+}//Dbtc::execTC_CLOPSIZEREQ()
+
+/* ######################################################################### */
+/* ####### ERROR MODULE ####### */
+/* ######################################################################### */
+void Dbtc::tabStateErrorLab(Signal* signal)
+{
+ terrorCode = ZSTATE_ERROR;
+ releaseAtErrorLab(signal);
+}//Dbtc::tabStateErrorLab()
+
+void Dbtc::wrongSchemaVersionErrorLab(Signal* signal)
+{
+ const TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0];
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = tcKeyReq->tableId;
+ const Uint32 schemVer = tcKeyReq->tableSchemaVersion;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+
+ terrorCode = tabPtr.p->getErrorCode(schemVer);
+
+ abortErrorLab(signal);
+}//Dbtc::wrongSchemaVersionErrorLab()
+
+void Dbtc::noFreeConnectionErrorLab(Signal* signal)
+{
+ terrorCode = ZNO_FREE_TC_CONNECTION;
+ abortErrorLab(signal); /* RECORD. OTHERWISE GOTO ERRORHANDLING */
+}//Dbtc::noFreeConnectionErrorLab()
+
+void Dbtc::aiErrorLab(Signal* signal)
+{
+ terrorCode = ZLENGTH_ERROR;
+ abortErrorLab(signal);
+}//Dbtc::aiErrorLab()
+
+void Dbtc::seizeAttrbuferrorLab(Signal* signal)
+{
+ terrorCode = ZGET_ATTRBUF_ERROR;
+ abortErrorLab(signal);
+}//Dbtc::seizeAttrbuferrorLab()
+
+void Dbtc::seizeDatabuferrorLab(Signal* signal)
+{
+ terrorCode = ZGET_DATAREC_ERROR;
+ releaseAtErrorLab(signal);
+}//Dbtc::seizeDatabuferrorLab()
+
+void Dbtc::releaseAtErrorLab(Signal* signal)
+{
+ ptrGuard(tcConnectptr);
+ tcConnectptr.p->tcConnectstate = OS_ABORTING;
+ /*-------------------------------------------------------------------------*
+ * A FAILURE OF THIS OPERATION HAS OCCURRED. THIS FAILURE WAS EITHER A
+ * FAULTY PARAMETER OR A RESOURCE THAT WAS NOT AVAILABLE.
+ * WE WILL ABORT THE ENTIRE TRANSACTION SINCE THIS IS THE SAFEST PATH
+ * TO HANDLE THIS PROBLEM.
+ * SINCE WE HAVE NOT YET CONTACTED ANY LQH WE SET NUMBER OF NODES TO ZERO
+ * WE ALSO SET THE STATE TO ABORTING TO INDICATE THAT WE ARE NOT EXPECTING
+ * ANY SIGNALS.
+ *-------------------------------------------------------------------------*/
+ tcConnectptr.p->noOfNodes = 0;
+ abortErrorLab(signal);
+}//Dbtc::releaseAtErrorLab()
+
+void Dbtc::warningHandlerLab(Signal* signal)
+{
+ ndbassert(false);
+}//Dbtc::warningHandlerLab()
+
+void Dbtc::systemErrorLab(Signal* signal)
+{
+ progError(0, 0);
+}//Dbtc::systemErrorLab()
+
+
+/* ######################################################################### *
+ * ####### SCAN MODULE ####### *
+ * ######################################################################### *
+
+ The application orders a scan of a table. We divide the scan into a scan on
+ each fragment. The scan uses the primary replicas since the scan might be
+ used for an update in a separate transaction.
+
+ Scans are always done as a separate transaction. Locks from the scan
+ can be overtaken by another transaction. Scans can never lock the entire
+ table. Locks are released immediately after the read has been verified
+ by the application. There is not even an option to leave the locks.
+ The reason is that this would hurt real-time behaviour too much.
+
+ -# The first step in handling a scan of a table is to receive all signals
+ defining the scan. If failures occur during this step we release all
+ resource and reply with SCAN_TABREF providing the error code.
+ If system load is too high, the request will not be allowed.
+
+ -# The second step retrieves the number of fragments that exist in the
+ table. It also ensures that the table actually exist. After this,
+ the scan is ready to be parallelised. The idea is that the receiving
+ process (hereafter called delivery process) will start up a number
+ of scan processes. Each of these scan processes will
+ independently scan one fragment at a time. The delivery
+ process object is the scan record and the scan process object is
+ the scan fragment record plus the scan operation record.
+
+ -# The third step is thus performed in parallel. In the third step each
+ scan process retrieves the primary replica of the fragment it will
+ scan. Then it starts the scan as soon as the load on that node permits.
+
+ The LQH returns either when it retrieved the maximum number of tuples or
+ when it has retrived at least one tuple and is hindered by a lock to
+ retrieve the next tuple. This is to ensure that a scan process never
+ can be involved in a deadlock situation.
+
+ When the scan process receives a number of tuples to report to the
+ application it checks the state of the delivery process. Only one delivery
+ at a time is handled by the application. Thus if the delivery process
+ has already sent a number of tuples to the application this set of tuples
+ are queued.
+
+ When the application requests the next set of tuples it is immediately
+ delivered if any are queued, otherwise it waits for the next scan
+ process that is ready to deliver.
+
+
+ ERROR HANDLING
+
+ As already mentioned it is rather easy to handle errors before the scan
+ processes have started. In this case it is enough to release the resources
+ and send SCAN_TAB_REF.
+
+ If an error occurs in any of the scan processes then we have to stop all
+ scan processes. We do however only stop the delivery process and ask
+ the api to order us to close the scan. The reason is that we can easily
+ enter into difficult timing problems since the application and this
+ block is out of synch we will thus always start by report the error to
+ the application and wait for a close request. This error report uses the
+ SCAN_TABREF signal with a special error code that the api must check for.
+
+
+ CLOSING AN ACTIVE SCAN
+
+ The application can close a scan for several reasons before it is completed.
+ One reason was mentioned above where an error in a scan process led to a
+ request to close the scan. Another reason could simply be that the
+ application found what it looked for and is thus not interested in the
+ rest of the scan.
+
+ IT COULD ALSO BE DEPENDENT ON INTERNAL ERRORS IN THE API.
+
+ When a close scan request is received, all scan processes are stopped and all
+ resources belonging to those scan processes are released. Stopping the scan
+ processes most often includes communication with an LQH where the local scan
+ is controlled. Finally all resources belonging to the scan is released and
+ the SCAN_TABCONF is sent with an indication of that the scan is closed.
+
+
+ CLOSING A COMPLETED SCAN
+
+ When all scan processes are completed then a report is sent to the
+ application which indicates that no more tuples can be fetched.
+ The application will send a close scan and the same action as when
+ closing an active scan is performed.
+ In this case it will of course not find any active scan processes.
+ It will even find all scan processes already released.
+
+ The reason for requiring the api to close the scan is the same as above.
+ It is to avoid any timing problems due to that the api and this block
+ is out of synch.
+
+ * ######################################################################## */
+void Dbtc::execSCAN_TABREQ(Signal* signal)
+{
+ const ScanTabReq * const scanTabReq = (ScanTabReq *)&signal->theData[0];
+ const Uint32 ri = scanTabReq->requestInfo;
+ const Uint32 aiLength = (scanTabReq->attrLenKeyLen & 0xFFFF);
+ const Uint32 keyLen = scanTabReq->attrLenKeyLen >> 16;
+ const Uint32 schemaVersion = scanTabReq->tableSchemaVersion;
+ const Uint32 transid1 = scanTabReq->transId1;
+ const Uint32 transid2 = scanTabReq->transId2;
+ const Uint32 tmpXX = scanTabReq->buddyConPtr;
+ const Uint32 buddyPtr = (tmpXX == 0xFFFFFFFF ? RNIL : tmpXX);
+ Uint32 currSavePointId = 0;
+
+ Uint32 scanConcurrency = scanTabReq->getParallelism(ri);
+ Uint32 noOprecPerFrag = ScanTabReq::getScanBatch(ri);
+ Uint32 scanParallel = scanConcurrency;
+ Uint32 errCode;
+ ScanRecordPtr scanptr;
+
+ jamEntry();
+
+ SegmentedSectionPtr api_op_ptr;
+ signal->getSection(api_op_ptr, 0);
+ copy(&cdata[0], api_op_ptr);
+ releaseSections(signal);
+
+ apiConnectptr.i = scanTabReq->apiConnectPtr;
+ tabptr.i = scanTabReq->tableId;
+
+ if (apiConnectptr.i >= capiConnectFilesize)
+ {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+
+ ptrAss(apiConnectptr, apiConnectRecord);
+ ApiConnectRecord * transP = apiConnectptr.p;
+
+ if (transP->apiConnectstate != CS_CONNECTED) {
+ jam();
+ // could be left over from TCKEYREQ rollback
+ if (transP->apiConnectstate == CS_ABORTING &&
+ transP->abortState == AS_IDLE) {
+ jam();
+ } else if(transP->apiConnectstate == CS_STARTED &&
+ transP->firstTcConnect == RNIL){
+ jam();
+ // left over from simple/dirty read
+ } else {
+ jam();
+ errCode = ZSTATE_ERROR;
+ goto SCAN_TAB_error_no_state_change;
+ }
+ }
+
+ if(tabptr.i >= ctabrecFilesize)
+ {
+ errCode = ZUNKNOWN_TABLE_ERROR;
+ goto SCAN_TAB_error;
+ }
+
+ ptrAss(tabptr, tableRecord);
+ if ((aiLength == 0) ||
+ (!tabptr.p->checkTable(schemaVersion)) ||
+ (scanConcurrency == 0) ||
+ (cfirstfreeTcConnect == RNIL) ||
+ (cfirstfreeScanrec == RNIL)) {
+ goto SCAN_error_check;
+ }
+ if (buddyPtr != RNIL) {
+ jam();
+ ApiConnectRecordPtr buddyApiPtr;
+ buddyApiPtr.i = buddyPtr;
+ ptrCheckGuard(buddyApiPtr, capiConnectFilesize, apiConnectRecord);
+ if ((transid1 == buddyApiPtr.p->transid[0]) &&
+ (transid2 == buddyApiPtr.p->transid[1])) {
+ jam();
+
+ if (buddyApiPtr.p->apiConnectstate == CS_ABORTING) {
+ // transaction has been aborted
+ jam();
+ errCode = buddyApiPtr.p->returncode;
+ goto SCAN_TAB_error;
+ }//if
+ currSavePointId = buddyApiPtr.p->currSavePointId;
+ buddyApiPtr.p->currSavePointId++;
+ }
+ }
+
+ seizeTcConnect(signal);
+ tcConnectptr.p->apiConnect = apiConnectptr.i;
+ tcConnectptr.p->tcConnectstate = OS_WAIT_SCAN;
+ apiConnectptr.p->lastTcConnect = tcConnectptr.i;
+
+ seizeCacheRecord(signal);
+ cachePtr.p->keylen = keyLen;
+ cachePtr.p->save1 = 0;
+ cachePtr.p->distributionKey = scanTabReq->distributionKey;
+ cachePtr.p->distributionKeyIndicator= ScanTabReq::getDistributionKeyFlag(ri);
+ scanptr = seizeScanrec(signal);
+
+ ndbrequire(transP->apiScanRec == RNIL);
+ ndbrequire(scanptr.p->scanApiRec == RNIL);
+
+ initScanrec(scanptr, scanTabReq, scanParallel, noOprecPerFrag);
+
+ transP->apiScanRec = scanptr.i;
+ transP->returncode = 0;
+ transP->transid[0] = transid1;
+ transP->transid[1] = transid2;
+ transP->buddyPtr = buddyPtr;
+
+ // The scan is started
+ transP->apiConnectstate = CS_START_SCAN;
+ transP->currSavePointId = currSavePointId;
+
+ /**********************************************************
+ * We start the timer on scanRec to be able to discover a
+ * timeout in the API the API now is in charge!
+ ***********************************************************/
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ updateBuddyTimer(apiConnectptr);
+
+ /***********************************************************
+ * WE HAVE NOW RECEIVED ALL REFERENCES TO SCAN OBJECTS IN
+ * THE API. WE ARE NOW READY TO RECEIVE THE ATTRIBUTE INFO
+ * IF ANY TO RECEIVE.
+ **********************************************************/
+ scanptr.p->scanState = ScanRecord::WAIT_AI;
+ return;
+
+ SCAN_error_check:
+ if (aiLength == 0) {
+ jam()
+ errCode = ZSCAN_AI_LEN_ERROR;
+ goto SCAN_TAB_error;
+ }//if
+ if (!tabptr.p->checkTable(schemaVersion)){
+ jam();
+ errCode = tabptr.p->getErrorCode(schemaVersion);
+ goto SCAN_TAB_error;
+ }//if
+ if (scanConcurrency == 0) {
+ jam();
+ errCode = ZNO_CONCURRENCY_ERROR;
+ goto SCAN_TAB_error;
+ }//if
+ if (cfirstfreeTcConnect == RNIL) {
+ jam();
+ errCode = ZNO_FREE_TC_CONNECTION;
+ goto SCAN_TAB_error;
+ }//if
+ ndbrequire(cfirstfreeScanrec == RNIL);
+ jam();
+ errCode = ZNO_SCANREC_ERROR;
+ goto SCAN_TAB_error;
+
+SCAN_TAB_error:
+ jam();
+ /**
+ * Prepare for up coming ATTRINFO/KEYINFO
+ */
+ transP->apiConnectstate = CS_ABORTING;
+ transP->abortState = AS_IDLE;
+ transP->transid[0] = transid1;
+ transP->transid[1] = transid2;
+
+SCAN_TAB_error_no_state_change:
+
+ ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
+ ref->apiConnectPtr = transP->ndbapiConnect;
+ ref->transId1 = transid1;
+ ref->transId2 = transid2;
+ ref->errorCode = errCode;
+ ref->closeNeeded = 0;
+ sendSignal(transP->ndbapiBlockref, GSN_SCAN_TABREF,
+ signal, ScanTabRef::SignalLength, JBB);
+ return;
+}//Dbtc::execSCAN_TABREQ()
+
+void Dbtc::initScanrec(ScanRecordPtr scanptr,
+ const ScanTabReq * scanTabReq,
+ UintR scanParallel,
+ UintR noOprecPerFrag)
+{
+ const UintR ri = scanTabReq->requestInfo;
+ scanptr.p->scanTcrec = tcConnectptr.i;
+ scanptr.p->scanApiRec = apiConnectptr.i;
+ scanptr.p->scanAiLength = scanTabReq->attrLenKeyLen & 0xFFFF;
+ scanptr.p->scanKeyLen = scanTabReq->attrLenKeyLen >> 16;
+ scanptr.p->scanTableref = tabptr.i;
+ scanptr.p->scanSchemaVersion = scanTabReq->tableSchemaVersion;
+ scanptr.p->scanParallel = scanParallel;
+ scanptr.p->first_batch_size_rows = scanTabReq->first_batch_size;
+ scanptr.p->batch_byte_size = scanTabReq->batch_byte_size;
+ scanptr.p->batch_size_rows = noOprecPerFrag;
+
+ Uint32 tmp = 0;
+ ScanFragReq::setLockMode(tmp, ScanTabReq::getLockMode(ri));
+ ScanFragReq::setHoldLockFlag(tmp, ScanTabReq::getHoldLockFlag(ri));
+ ScanFragReq::setKeyinfoFlag(tmp, ScanTabReq::getKeyinfoFlag(ri));
+ ScanFragReq::setReadCommittedFlag(tmp,ScanTabReq::getReadCommittedFlag(ri));
+ ScanFragReq::setRangeScanFlag(tmp, ScanTabReq::getRangeScanFlag(ri));
+ ScanFragReq::setDescendingFlag(tmp, ScanTabReq::getDescendingFlag(ri));
+ ScanFragReq::setTupScanFlag(tmp, ScanTabReq::getTupScanFlag(ri));
+ ScanFragReq::setAttrLen(tmp, scanTabReq->attrLenKeyLen & 0xFFFF);
+
+ scanptr.p->scanRequestInfo = tmp;
+ scanptr.p->scanStoredProcId = scanTabReq->storedProcId;
+ scanptr.p->scanState = ScanRecord::RUNNING;
+ scanptr.p->m_queued_count = 0;
+
+ ScanFragList list(c_scan_frag_pool,
+ scanptr.p->m_running_scan_frags);
+ for (Uint32 i = 0; i < scanParallel; i++) {
+ jam();
+ ScanFragRecPtr ptr;
+ ndbrequire(list.seize(ptr));
+ ptr.p->scanRec = scanptr.i;
+ ptr.p->scanFragId = 0;
+ ptr.p->m_apiPtr = cdata[i];
+ }//for
+
+ (* (ScanTabReq::getRangeScanFlag(ri) ?
+ &c_counters.c_range_scan_count :
+ &c_counters.c_scan_count))++;
+}//Dbtc::initScanrec()
+
+void Dbtc::scanTabRefLab(Signal* signal, Uint32 errCode)
+{
+ ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
+ ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
+ ref->transId1 = apiConnectptr.p->transid[0];
+ ref->transId2 = apiConnectptr.p->transid[1];
+ ref->errorCode = errCode;
+ ref->closeNeeded = 0;
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABREF,
+ signal, ScanTabRef::SignalLength, JBB);
+}//Dbtc::scanTabRefLab()
+
+/*---------------------------------------------------------------------------*/
+/* */
+/* RECEPTION OF ATTRINFO FOR SCAN TABLE REQUEST. */
+/*---------------------------------------------------------------------------*/
+void Dbtc::scanAttrinfoLab(Signal* signal, UintR Tlen)
+{
+ ScanRecordPtr scanptr;
+ scanptr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ cachePtr.i = apiConnectptr.p->cachePtr;
+ ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
+ CacheRecord * const regCachePtr = cachePtr.p;
+ ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_AI);
+
+ regCachePtr->currReclenAi = regCachePtr->currReclenAi + Tlen;
+ if (regCachePtr->currReclenAi < scanptr.p->scanAiLength) {
+ if (cfirstfreeAttrbuf == RNIL) {
+ goto scanAttrinfo_attrbuf_error;
+ }//if
+ saveAttrbuf(signal);
+ } else {
+ if (regCachePtr->currReclenAi > scanptr.p->scanAiLength) {
+ goto scanAttrinfo_len_error;
+ } else {
+ /* CURR_RECLEN_AI = SCAN_AI_LENGTH */
+ if (cfirstfreeAttrbuf == RNIL) {
+ goto scanAttrinfo_attrbuf2_error;
+ }//if
+ saveAttrbuf(signal);
+ /**************************************************
+ * WE HAVE NOW RECEIVED ALL INFORMATION CONCERNING
+ * THIS SCAN. WE ARE READY TO START THE ACTUAL
+ * EXECUTION OF THE SCAN QUERY
+ **************************************************/
+ diFcountReqLab(signal, scanptr);
+ return;
+ }//if
+ }//if
+ return;
+
+scanAttrinfo_attrbuf_error:
+ jam();
+ abortScanLab(signal, scanptr, ZGET_ATTRBUF_ERROR);
+ return;
+
+scanAttrinfo_attrbuf2_error:
+ jam();
+ abortScanLab(signal, scanptr, ZGET_ATTRBUF_ERROR);
+ return;
+
+scanAttrinfo_len_error:
+ jam();
+ abortScanLab(signal, scanptr, ZLENGTH_ERROR);
+ return;
+}//Dbtc::scanAttrinfoLab()
+
+void Dbtc::diFcountReqLab(Signal* signal, ScanRecordPtr scanptr)
+{
+ /**
+ * Check so that the table is not being dropped
+ */
+ TableRecordPtr tabPtr;
+ tabPtr.i = scanptr.p->scanTableref;
+ tabPtr.p = &tableRecord[tabPtr.i];
+ if (tabPtr.p->checkTable(scanptr.p->scanSchemaVersion)){
+ ;
+ } else {
+ abortScanLab(signal, scanptr,
+ tabPtr.p->getErrorCode(scanptr.p->scanSchemaVersion));
+ return;
+ }
+
+ scanptr.p->scanNextFragId = 0;
+ scanptr.p->m_booked_fragments_count= 0;
+ scanptr.p->scanState = ScanRecord::WAIT_FRAGMENT_COUNT;
+
+ if(!cachePtr.p->distributionKeyIndicator)
+ {
+ jam();
+ /*************************************************
+ * THE FIRST STEP TO RECEIVE IS SUCCESSFULLY COMPLETED.
+ * WE MUST FIRST GET THE NUMBER OF FRAGMENTS IN THE TABLE.
+ ***************************************************/
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = scanptr.p->scanTableref;
+ sendSignal(cdihblockref, GSN_DI_FCOUNTREQ, signal, 2, JBB);
+ }
+ else
+ {
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = cachePtr.p->distributionKey;
+ EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal, 3);
+ UintR TerrorIndicator = signal->theData[0];
+ jamEntry();
+ if (TerrorIndicator != 0) {
+ signal->theData[0] = tcConnectptr.i;
+ //signal->theData[1] Contains error
+ execDI_FCOUNTREF(signal);
+ return;
+ }
+
+ UintR Tdata1 = signal->theData[1];
+ scanptr.p->scanNextFragId = Tdata1;
+
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = 1; // Frag count
+ execDI_FCOUNTCONF(signal);
+ }
+ return;
+}//Dbtc::diFcountReqLab()
+
+/********************************************************************
+ * execDI_FCOUNTCONF
+ *
+ * WE HAVE ASKED DIH ABOUT THE NUMBER OF FRAGMENTS IN THIS TABLE.
+ * WE WILL NOW START A NUMBER OF PARALLEL SCAN PROCESSES. EACH OF
+ * THESE WILL SCAN ONE FRAGMENT AT A TIME. THEY WILL CONTINUE THIS
+ * UNTIL THERE ARE NO MORE FRAGMENTS TO SCAN OR UNTIL THE APPLICATION
+ * CLOSES THE SCAN.
+ ********************************************************************/
+void Dbtc::execDI_FCOUNTCONF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ Uint32 tfragCount = signal->theData[1];
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.i = tcConnectptr.p->apiConnect;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ ScanRecordPtr scanptr;
+ scanptr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_FRAGMENT_COUNT);
+ if (apiConnectptr.p->apiFailState == ZTRUE) {
+ jam();
+ releaseScanResources(scanptr);
+ handleApiFailState(signal, apiConnectptr.i);
+ return;
+ }//if
+ if (tfragCount == 0) {
+ jam();
+ abortScanLab(signal, scanptr, ZNO_FRAGMENT_ERROR);
+ return;
+ }//if
+
+ /**
+ * Check so that the table is not being dropped
+ */
+ TableRecordPtr tabPtr;
+ tabPtr.i = scanptr.p->scanTableref;
+ tabPtr.p = &tableRecord[tabPtr.i];
+ if (tabPtr.p->checkTable(scanptr.p->scanSchemaVersion)){
+ ;
+ } else {
+ abortScanLab(signal, scanptr,
+ tabPtr.p->getErrorCode(scanptr.p->scanSchemaVersion));
+ return;
+ }
+
+ scanptr.p->scanParallel = tfragCount;
+ scanptr.p->scanNoFrag = tfragCount;
+ scanptr.p->scanState = ScanRecord::RUNNING;
+
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ updateBuddyTimer(apiConnectptr);
+
+ ScanFragRecPtr ptr;
+ ScanFragList list(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+ for (list.first(ptr); !ptr.isNull() && tfragCount;
+ list.next(ptr), tfragCount--){
+ jam();
+
+ ptr.p->lqhBlockref = 0;
+ ptr.p->startFragTimer(ctcTimer);
+ ptr.p->scanFragId = scanptr.p->scanNextFragId++;
+ ptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
+ ptr.p->startFragTimer(ctcTimer);
+
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = ptr.i;
+ signal->theData[2] = scanptr.p->scanTableref;
+ signal->theData[3] = ptr.p->scanFragId;
+ sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
+ }//for
+
+ ScanFragList queued(c_scan_frag_pool, scanptr.p->m_queued_scan_frags);
+ for (; !ptr.isNull();)
+ {
+ ptr.p->m_ops = 0;
+ ptr.p->m_totalLen = 0;
+ ptr.p->m_scan_frag_conf_status = 1;
+ ptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY;
+ ptr.p->stopFragTimer();
+
+ ScanFragRecPtr tmp = ptr;
+ list.next(ptr);
+ list.remove(tmp);
+ queued.add(tmp);
+ scanptr.p->m_queued_count++;
+ }
+}//Dbtc::execDI_FCOUNTCONF()
+
+/******************************************************
+ * execDI_FCOUNTREF
+ ******************************************************/
+void Dbtc::execDI_FCOUNTREF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ const Uint32 errCode = signal->theData[1];
+ apiConnectptr.i = tcConnectptr.p->apiConnect;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ ScanRecordPtr scanptr;
+ scanptr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_FRAGMENT_COUNT);
+ if (apiConnectptr.p->apiFailState == ZTRUE) {
+ jam();
+ releaseScanResources(scanptr);
+ handleApiFailState(signal, apiConnectptr.i);
+ return;
+ }//if
+ abortScanLab(signal, scanptr, errCode);
+}//Dbtc::execDI_FCOUNTREF()
+
+void Dbtc::abortScanLab(Signal* signal, ScanRecordPtr scanptr, Uint32 errCode)
+{
+ scanTabRefLab(signal, errCode);
+ releaseScanResources(scanptr);
+}//Dbtc::abortScanLab()
+
+void Dbtc::releaseScanResources(ScanRecordPtr scanPtr)
+{
+ if (apiConnectptr.p->cachePtr != RNIL) {
+ cachePtr.i = apiConnectptr.p->cachePtr;
+ ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
+ releaseKeys();
+ releaseAttrinfo();
+ }//if
+ tcConnectptr.i = scanPtr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ releaseTcCon();
+
+ ndbrequire(scanPtr.p->m_running_scan_frags.isEmpty());
+ ndbrequire(scanPtr.p->m_queued_scan_frags.isEmpty());
+ ndbrequire(scanPtr.p->m_delivered_scan_frags.isEmpty());
+
+ ndbassert(scanPtr.p->scanApiRec == apiConnectptr.i);
+ ndbassert(apiConnectptr.p->apiScanRec == scanPtr.i);
+
+ // link into free list
+ scanPtr.p->nextScan = cfirstfreeScanrec;
+ scanPtr.p->scanState = ScanRecord::IDLE;
+ scanPtr.p->scanTcrec = RNIL;
+ scanPtr.p->scanApiRec = RNIL;
+ cfirstfreeScanrec = scanPtr.i;
+
+ apiConnectptr.p->apiScanRec = RNIL;
+ apiConnectptr.p->apiConnectstate = CS_CONNECTED;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+}//Dbtc::releaseScanResources()
+
+
+/****************************************************************
+ * execDIGETPRIMCONF
+ *
+ * WE HAVE RECEIVED THE PRIMARY NODE OF THIS FRAGMENT.
+ * WE ARE NOW READY TO ASK FOR PERMISSION TO LOAD THIS
+ * SPECIFIC NODE WITH A SCAN OPERATION.
+ ****************************************************************/
+void Dbtc::execDIGETPRIMCONF(Signal* signal)
+{
+ jamEntry();
+ // tcConnectptr.i in theData[0] is not used
+ scanFragptr.i = signal->theData[1];
+ c_scan_frag_pool.getPtr(scanFragptr);
+
+ tnodeid = signal->theData[2];
+ arrGuard(tnodeid, MAX_NDB_NODES);
+
+ ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF);
+ scanFragptr.p->stopFragTimer();
+
+ ScanRecordPtr scanptr;
+ scanptr.i = scanFragptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ /**
+ * This must be false as select count(*) otherwise
+ * can "pass" committing on backup fragments and
+ * get incorrect row count
+ */
+ if(false && ScanFragReq::getReadCommittedFlag(scanptr.p->scanRequestInfo))
+ {
+ jam();
+ Uint32 max = 3+signal->theData[6];
+ Uint32 nodeid = getOwnNodeId();
+ for(Uint32 i = 3; i<max; i++)
+ if(signal->theData[i] == nodeid)
+ {
+ jam();
+ tnodeid = nodeid;
+ break;
+ }
+ }
+
+ {
+ /**
+ * Check table
+ */
+ TableRecordPtr tabPtr;
+ tabPtr.i = scanptr.p->scanTableref;
+ ptrAss(tabPtr, tableRecord);
+ Uint32 schemaVersion = scanptr.p->scanSchemaVersion;
+ if(tabPtr.p->checkTable(schemaVersion) == false){
+ jam();
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ run.release(scanFragptr);
+ scanError(signal, scanptr, tabPtr.p->getErrorCode(schemaVersion));
+ return;
+ }
+ }
+
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.i = scanptr.p->scanApiRec;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ cachePtr.i = apiConnectptr.p->cachePtr;
+ ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
+ switch (scanptr.p->scanState) {
+ case ScanRecord::CLOSING_SCAN:
+ jam();
+ updateBuddyTimer(apiConnectptr);
+ {
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ run.release(scanFragptr);
+ }
+ close_scan_req_send_conf(signal, scanptr);
+ return;
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }//switch
+ Uint32 ref = calcLqhBlockRef(tnodeid);
+ scanFragptr.p->lqhBlockref = ref;
+ scanFragptr.p->m_connectCount = getNodeInfo(tnodeid).m_connectCount;
+ sendScanFragReq(signal, scanptr.p, scanFragptr.p);
+ if(ERROR_INSERTED(8035))
+ globalTransporterRegistry.performSend();
+ attrbufptr.i = cachePtr.p->firstAttrbuf;
+ while (attrbufptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(attrbufptr, cattrbufFilesize, attrbufRecord);
+ sendAttrinfo(signal,
+ scanFragptr.i,
+ attrbufptr.p,
+ ref);
+ attrbufptr.i = attrbufptr.p->attrbuf[ZINBUF_NEXT];
+ if(ERROR_INSERTED(8035))
+ globalTransporterRegistry.performSend();
+ }//while
+ scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ scanFragptr.p->startFragTimer(ctcTimer);
+ updateBuddyTimer(apiConnectptr);
+ /*********************************************
+ * WE HAVE NOW STARTED A FRAGMENT SCAN. NOW
+ * WAIT FOR THE FIRST SCANNED RECORDS
+ *********************************************/
+}//Dbtc::execDIGETPRIMCONF
+
+/***************************************************
+ * execDIGETPRIMREF
+ *
+ * WE ARE NOW FORCED TO STOP THE SCAN. THIS ERROR
+ * IS NOT RECOVERABLE SINCE THERE IS A PROBLEM WITH
+ * FINDING A PRIMARY REPLICA OF A CERTAIN FRAGMENT.
+ ***************************************************/
+void Dbtc::execDIGETPRIMREF(Signal* signal)
+{
+ jamEntry();
+ // tcConnectptr.i in theData[0] is not used.
+ scanFragptr.i = signal->theData[1];
+ const Uint32 errCode = signal->theData[2];
+ c_scan_frag_pool.getPtr(scanFragptr);
+ ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF);
+
+ ScanRecordPtr scanptr;
+ scanptr.i = scanFragptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ run.release(scanFragptr);
+
+ scanError(signal, scanptr, errCode);
+}//Dbtc::execDIGETPRIMREF()
+
+/**
+ * Dbtc::execSCAN_FRAGREF
+ * Our attempt to scan a fragment was refused
+ * set error code and close all other fragment
+ * scan's belonging to this scan
+ */
+void Dbtc::execSCAN_FRAGREF(Signal* signal)
+{
+ const ScanFragRef * const ref = (ScanFragRef *)&signal->theData[0];
+
+ jamEntry();
+ const Uint32 errCode = ref->errorCode;
+
+ scanFragptr.i = ref->senderData;
+ c_scan_frag_pool.getPtr(scanFragptr);
+
+ ScanRecordPtr scanptr;
+ scanptr.i = scanFragptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ apiConnectptr.i = scanptr.p->scanApiRec;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+
+ Uint32 transid1 = apiConnectptr.p->transid[0] ^ ref->transId1;
+ Uint32 transid2 = apiConnectptr.p->transid[1] ^ ref->transId2;
+ transid1 = transid1 | transid2;
+ if (transid1 != 0) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+
+ /**
+ * Set errorcode, close connection to this lqh fragment,
+ * stop fragment timer and call scanFragError to start
+ * close of the other fragment scans
+ */
+ ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
+ {
+ scanFragptr.p->scanFragState = ScanFragRec::COMPLETED;
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ run.release(scanFragptr);
+ scanFragptr.p->stopFragTimer();
+ }
+ scanError(signal, scanptr, errCode);
+}//Dbtc::execSCAN_FRAGREF()
+
+/**
+ * Dbtc::scanError
+ *
+ * Called when an error occurs during
+ */
+void Dbtc::scanError(Signal* signal, ScanRecordPtr scanptr, Uint32 errorCode)
+{
+ jam();
+ ScanRecord* scanP = scanptr.p;
+
+ DEBUG("scanError, errorCode = "<< errorCode <<
+ ", scanState = " << scanptr.p->scanState);
+
+ apiConnectptr.i = scanP->scanApiRec;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ ndbrequire(apiConnectptr.p->apiScanRec == scanptr.i);
+
+ if(scanP->scanState == ScanRecord::CLOSING_SCAN){
+ jam();
+ close_scan_req_send_conf(signal, scanptr);
+ return;
+ }
+
+ ndbrequire(scanP->scanState == ScanRecord::RUNNING);
+
+ /**
+ * Close scan wo/ having received an order to do so
+ */
+ close_scan_req(signal, scanptr, false);
+
+ const bool apiFail = (apiConnectptr.p->apiFailState == ZTRUE);
+ if(apiFail){
+ jam();
+ return;
+ }
+
+ ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
+ ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
+ ref->transId1 = apiConnectptr.p->transid[0];
+ ref->transId2 = apiConnectptr.p->transid[1];
+ ref->errorCode = errorCode;
+ ref->closeNeeded = 1;
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABREF,
+ signal, ScanTabRef::SignalLength, JBB);
+}//Dbtc::scanError()
+
+/************************************************************
+ * execSCAN_FRAGCONF
+ *
+ * A NUMBER OF OPERATIONS HAVE BEEN COMPLETED IN THIS
+ * FRAGMENT. TAKE CARE OF AND ISSUE FURTHER ACTIONS.
+ ************************************************************/
+void Dbtc::execSCAN_FRAGCONF(Signal* signal)
+{
+ Uint32 transid1, transid2, total_len;
+ jamEntry();
+
+ const ScanFragConf * const conf = (ScanFragConf*)&signal->theData[0];
+ const Uint32 noCompletedOps = conf->completedOps;
+ const Uint32 status = conf->fragmentCompleted;
+
+ scanFragptr.i = conf->senderData;
+ c_scan_frag_pool.getPtr(scanFragptr);
+
+ ScanRecordPtr scanptr;
+ scanptr.i = scanFragptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ apiConnectptr.i = scanptr.p->scanApiRec;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+
+ transid1 = apiConnectptr.p->transid[0] ^ conf->transId1;
+ transid2 = apiConnectptr.p->transid[1] ^ conf->transId2;
+ total_len= conf->total_len;
+ transid1 = transid1 | transid2;
+ if (transid1 != 0) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+
+ ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
+
+ if(scanptr.p->scanState == ScanRecord::CLOSING_SCAN){
+ jam();
+ if(status == 0){
+ /**
+ * We have started closing = we sent a close -> ignore this
+ */
+ return;
+ } else {
+ jam();
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ run.release(scanFragptr);
+ scanFragptr.p->stopFragTimer();
+ scanFragptr.p->scanFragState = ScanFragRec::COMPLETED;
+ }
+ close_scan_req_send_conf(signal, scanptr);
+ return;
+ }
+
+ if(noCompletedOps == 0 && status != 0 &&
+ scanptr.p->scanNextFragId+scanptr.p->m_booked_fragments_count < scanptr.p->scanNoFrag){
+ /**
+ * Start on next fragment
+ */
+ scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
+ scanFragptr.p->startFragTimer(ctcTimer);
+
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ scanFragptr.p->scanFragId = scanptr.p->scanNextFragId++;
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = scanFragptr.i;
+ signal->theData[2] = scanptr.p->scanTableref;
+ signal->theData[3] = scanFragptr.p->scanFragId;
+ sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
+ return;
+ }
+ /*
+ Uint32 totalLen = 0;
+ for(Uint32 i = 0; i<noCompletedOps; i++){
+ Uint32 tmp = conf->opReturnDataLen[i];
+ totalLen += tmp;
+ }
+ */
+ {
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+ ScanFragList queued(c_scan_frag_pool, scanptr.p->m_queued_scan_frags);
+
+ run.remove(scanFragptr);
+ queued.add(scanFragptr);
+ scanptr.p->m_queued_count++;
+ }
+
+ scanFragptr.p->m_scan_frag_conf_status = status;
+ scanFragptr.p->m_ops = noCompletedOps;
+ scanFragptr.p->m_totalLen = total_len;
+ scanFragptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY;
+ scanFragptr.p->stopFragTimer();
+
+ if(scanptr.p->m_queued_count > /** Min */ 0){
+ jam();
+ sendScanTabConf(signal, scanptr);
+ }
+}//Dbtc::execSCAN_FRAGCONF()
+
+/****************************************************************************
+ * execSCAN_NEXTREQ
+ *
+ * THE APPLICATION HAVE PROCESSED THE TUPLES TRANSFERRED AND IS NOW READY FOR
+ * MORE. THIS SIGNAL IS ALSO USED TO CLOSE THE SCAN.
+ ****************************************************************************/
+void Dbtc::execSCAN_NEXTREQ(Signal* signal)
+{
+ const ScanNextReq * const req = (ScanNextReq *)&signal->theData[0];
+ const UintR transid1 = req->transId1;
+ const UintR transid2 = req->transId2;
+ const UintR stopScan = req->stopScan;
+
+ jamEntry();
+
+ apiConnectptr.i = req->apiConnectPtr;
+ if (apiConnectptr.i >= capiConnectFilesize) {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+ ptrAss(apiConnectptr, apiConnectRecord);
+
+ /**
+ * Check transid
+ */
+ const UintR ctransid1 = apiConnectptr.p->transid[0] ^ transid1;
+ const UintR ctransid2 = apiConnectptr.p->transid[1] ^ transid2;
+ if ((ctransid1 | ctransid2) != 0){
+ ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
+ ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
+ ref->transId1 = transid1;
+ ref->transId2 = transid2;
+ ref->errorCode = ZSTATE_ERROR;
+ ref->closeNeeded = 0;
+ sendSignal(signal->senderBlockRef(), GSN_SCAN_TABREF,
+ signal, ScanTabRef::SignalLength, JBB);
+ DEBUG("Wrong transid");
+ return;
+ }
+
+ /**
+ * Check state of API connection
+ */
+ if (apiConnectptr.p->apiConnectstate != CS_START_SCAN) {
+ jam();
+ if (apiConnectptr.p->apiConnectstate == CS_CONNECTED) {
+ jam();
+ /*********************************************************************
+ * The application sends a SCAN_NEXTREQ after experiencing a time-out.
+ * We will send a SCAN_TABREF to indicate a time-out occurred.
+ *********************************************************************/
+ DEBUG("scanTabRefLab: ZSCANTIME_OUT_ERROR2");
+ ndbout_c("apiConnectptr(%d) -> abort", apiConnectptr.i);
+ ndbrequire(false); //B2 indication of strange things going on
+ scanTabRefLab(signal, ZSCANTIME_OUT_ERROR2);
+ return;
+ }
+ DEBUG("scanTabRefLab: ZSTATE_ERROR");
+ DEBUG(" apiConnectstate="<<apiConnectptr.p->apiConnectstate);
+ ndbrequire(false); //B2 indication of strange things going on
+ scanTabRefLab(signal, ZSTATE_ERROR);
+ return;
+ }//if
+
+ /*******************************************************
+ * START THE ACTUAL LOGIC OF SCAN_NEXTREQ.
+ ********************************************************/
+ // Stop the timer that is used to check for timeout in the API
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ ScanRecordPtr scanptr;
+ scanptr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ ScanRecord* scanP = scanptr.p;
+
+ const Uint32 len = signal->getLength() - 4;
+
+ if (stopScan == ZTRUE) {
+ jam();
+ /*********************************************************************
+ * APPLICATION IS CLOSING THE SCAN.
+ **********************************************************************/
+ close_scan_req(signal, scanptr, true);
+ return;
+ }//if
+
+ if (scanptr.p->scanState == ScanRecord::CLOSING_SCAN){
+ jam();
+ /**
+ * The scan is closing (typically due to error)
+ * but the API hasn't understood it yet
+ *
+ * Wait for API close request
+ */
+ return;
+ }
+
+ // Copy op ptrs so I dont overwrite them when sending...
+ memcpy(signal->getDataPtrSend()+25, signal->getDataPtr()+4, 4 * len);
+
+ ScanFragNextReq tmp;
+ tmp.closeFlag = ZFALSE;
+ tmp.transId1 = apiConnectptr.p->transid[0];
+ tmp.transId2 = apiConnectptr.p->transid[1];
+ tmp.batch_size_rows = scanP->batch_size_rows;
+ tmp.batch_size_bytes = scanP->batch_byte_size;
+
+ ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags);
+ ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
+ for(Uint32 i = 0 ; i<len; i++){
+ jam();
+ scanFragptr.i = signal->theData[i+25];
+ c_scan_frag_pool.getPtr(scanFragptr);
+ ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::DELIVERED);
+
+ scanFragptr.p->startFragTimer(ctcTimer);
+ scanFragptr.p->m_ops = 0;
+
+ if(scanFragptr.p->m_scan_frag_conf_status)
+ {
+ /**
+ * last scan was complete
+ */
+ jam();
+ ndbrequire(scanptr.p->scanNextFragId < scanptr.p->scanNoFrag);
+ jam();
+ ndbassert(scanptr.p->m_booked_fragments_count);
+ scanptr.p->m_booked_fragments_count--;
+ scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
+
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ scanFragptr.p->scanFragId = scanptr.p->scanNextFragId++;
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = scanFragptr.i;
+ signal->theData[2] = scanptr.p->scanTableref;
+ signal->theData[3] = scanFragptr.p->scanFragId;
+ sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
+ }
+ else
+ {
+ jam();
+ scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ ScanFragNextReq * req = (ScanFragNextReq*)signal->getDataPtrSend();
+ * req = tmp;
+ req->senderData = scanFragptr.i;
+ sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+ }
+ delivered.remove(scanFragptr);
+ running.add(scanFragptr);
+ }//for
+
+}//Dbtc::execSCAN_NEXTREQ()
+
+void
+Dbtc::close_scan_req(Signal* signal, ScanRecordPtr scanPtr, bool req_received){
+
+ ScanRecord* scanP = scanPtr.p;
+ ndbrequire(scanPtr.p->scanState != ScanRecord::IDLE);
+ scanPtr.p->scanState = ScanRecord::CLOSING_SCAN;
+ scanPtr.p->m_close_scan_req = req_received;
+
+ /**
+ * Queue : Action
+ * ============= : =================
+ * completed : -
+ * running : close -> LQH
+ * delivered w/ : close -> LQH
+ * delivered wo/ : move to completed
+ * queued w/ : close -> LQH
+ * queued wo/ : move to completed
+ */
+
+ ScanFragNextReq * nextReq = (ScanFragNextReq*)&signal->theData[0];
+ nextReq->closeFlag = ZTRUE;
+ nextReq->transId1 = apiConnectptr.p->transid[0];
+ nextReq->transId2 = apiConnectptr.p->transid[1];
+
+ {
+ ScanFragRecPtr ptr;
+ ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags);
+ ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
+ ScanFragList queued(c_scan_frag_pool, scanP->m_queued_scan_frags);
+
+ // Close running
+ for(running.first(ptr); !ptr.isNull(); ){
+ ScanFragRecPtr curr = ptr; // Remove while iterating...
+ running.next(ptr);
+
+ if(curr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF){
+ jam();
+ continue;
+ }
+ ndbrequire(curr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
+
+ curr.p->startFragTimer(ctcTimer);
+ curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ nextReq->senderData = curr.i;
+ sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+ }
+
+ // Close delivered
+ for(delivered.first(ptr); !ptr.isNull(); ){
+ jam();
+ ScanFragRecPtr curr = ptr; // Remove while iterating...
+ delivered.next(ptr);
+
+ ndbrequire(curr.p->scanFragState == ScanFragRec::DELIVERED);
+ delivered.remove(curr);
+
+ if(curr.p->m_ops > 0 && curr.p->m_scan_frag_conf_status == 0){
+ jam();
+ running.add(curr);
+ curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ curr.p->startFragTimer(ctcTimer);
+ nextReq->senderData = curr.i;
+ sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+
+ } else {
+ jam();
+ c_scan_frag_pool.release(curr);
+ curr.p->scanFragState = ScanFragRec::COMPLETED;
+ curr.p->stopFragTimer();
+ }
+ }//for
+
+ /**
+ * All queued with data should be closed
+ */
+ for(queued.first(ptr); !ptr.isNull(); ){
+ jam();
+ ndbrequire(ptr.p->scanFragState == ScanFragRec::QUEUED_FOR_DELIVERY);
+ ScanFragRecPtr curr = ptr; // Remove while iterating...
+ queued.next(ptr);
+
+ queued.remove(curr);
+ scanP->m_queued_count--;
+
+ if(curr.p->m_ops > 0){
+ jam();
+ running.add(curr);
+ curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ curr.p->startFragTimer(ctcTimer);
+ nextReq->senderData = curr.i;
+ sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+ } else {
+ jam();
+ c_scan_frag_pool.release(curr);
+ curr.p->scanFragState = ScanFragRec::COMPLETED;
+ curr.p->stopFragTimer();
+ }
+ }
+ }
+ close_scan_req_send_conf(signal, scanPtr);
+}
+
+void
+Dbtc::close_scan_req_send_conf(Signal* signal, ScanRecordPtr scanPtr){
+
+ jam();
+
+ ndbrequire(scanPtr.p->m_queued_scan_frags.isEmpty());
+ ndbrequire(scanPtr.p->m_delivered_scan_frags.isEmpty());
+ //ndbrequire(scanPtr.p->m_running_scan_frags.isEmpty());
+
+#if 0
+ {
+ ScanFragList comp(c_scan_frag_pool, scanPtr.p->m_completed_scan_frags);
+ ScanFragRecPtr ptr;
+ for(comp.first(ptr); !ptr.isNull(); comp.next(ptr)){
+ ndbrequire(ptr.p->scanFragTimer == 0);
+ ndbrequire(ptr.p->scanFragState == ScanFragRec::COMPLETED);
+ }
+ }
+#endif
+
+ if(!scanPtr.p->m_running_scan_frags.isEmpty()){
+ jam();
+ return;
+ }
+
+ const bool apiFail = (apiConnectptr.p->apiFailState == ZTRUE);
+
+ if(!scanPtr.p->m_close_scan_req){
+ jam();
+ /**
+ * The API hasn't order closing yet
+ */
+ return;
+ }
+
+ Uint32 ref = apiConnectptr.p->ndbapiBlockref;
+ if(!apiFail && ref){
+ jam();
+ ScanTabConf * conf = (ScanTabConf*)&signal->theData[0];
+ conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
+ conf->requestInfo = ScanTabConf::EndOfData;
+ conf->transId1 = apiConnectptr.p->transid[0];
+ conf->transId2 = apiConnectptr.p->transid[1];
+ sendSignal(ref, GSN_SCAN_TABCONF, signal, ScanTabConf::SignalLength, JBB);
+ }
+
+ releaseScanResources(scanPtr);
+
+ if(apiFail){
+ jam();
+ /**
+ * API has failed
+ */
+ handleApiFailState(signal, apiConnectptr.i);
+ }
+}
+
+Dbtc::ScanRecordPtr
+Dbtc::seizeScanrec(Signal* signal) {
+ ScanRecordPtr scanptr;
+ scanptr.i = cfirstfreeScanrec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ cfirstfreeScanrec = scanptr.p->nextScan;
+ scanptr.p->nextScan = RNIL;
+ ndbrequire(scanptr.p->scanState == ScanRecord::IDLE);
+ return scanptr;
+}//Dbtc::seizeScanrec()
+
+void Dbtc::sendScanFragReq(Signal* signal,
+ ScanRecord* scanP,
+ ScanFragRec* scanFragP)
+{
+ ScanFragReq * const req = (ScanFragReq *)&signal->theData[0];
+ Uint32 requestInfo = scanP->scanRequestInfo;
+ ScanFragReq::setScanPrio(requestInfo, 1);
+ apiConnectptr.i = scanP->scanApiRec;
+ req->tableId = scanP->scanTableref;
+ req->schemaVersion = scanP->scanSchemaVersion;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ req->senderData = scanFragptr.i;
+ req->requestInfo = requestInfo;
+ req->fragmentNoKeyLen = scanFragP->scanFragId | (scanP->scanKeyLen << 16);
+ req->resultRef = apiConnectptr.p->ndbapiBlockref;
+ req->savePointId = apiConnectptr.p->currSavePointId;
+ req->transId1 = apiConnectptr.p->transid[0];
+ req->transId2 = apiConnectptr.p->transid[1];
+ req->clientOpPtr = scanFragP->m_apiPtr;
+ req->batch_size_rows= scanP->batch_size_rows;
+ req->batch_size_bytes= scanP->batch_byte_size;
+ sendSignal(scanFragP->lqhBlockref, GSN_SCAN_FRAGREQ, signal,
+ ScanFragReq::SignalLength, JBB);
+ if(scanP->scanKeyLen > 0)
+ {
+ tcConnectptr.i = scanFragptr.i;
+ packKeyData000Lab(signal, scanFragP->lqhBlockref, scanP->scanKeyLen);
+ }
+ updateBuddyTimer(apiConnectptr);
+ scanFragP->startFragTimer(ctcTimer);
+}//Dbtc::sendScanFragReq()
+
+
+void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) {
+ jam();
+ Uint32* ops = signal->getDataPtrSend()+4;
+ Uint32 op_count = scanPtr.p->m_queued_count;
+ if(4 + 3 * op_count > 25){
+ jam();
+ ops += 21;
+ }
+
+ int left = scanPtr.p->scanNoFrag - scanPtr.p->scanNextFragId;
+ Uint32 booked = scanPtr.p->m_booked_fragments_count;
+
+ ScanTabConf * conf = (ScanTabConf*)&signal->theData[0];
+ conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
+ conf->requestInfo = op_count;
+ conf->transId1 = apiConnectptr.p->transid[0];
+ conf->transId2 = apiConnectptr.p->transid[1];
+ ScanFragRecPtr ptr;
+ {
+ ScanFragList queued(c_scan_frag_pool, scanPtr.p->m_queued_scan_frags);
+ ScanFragList delivered(c_scan_frag_pool,scanPtr.p->m_delivered_scan_frags);
+ for(queued.first(ptr); !ptr.isNull(); ){
+ ndbrequire(ptr.p->scanFragState == ScanFragRec::QUEUED_FOR_DELIVERY);
+ ScanFragRecPtr curr = ptr; // Remove while iterating...
+ queued.next(ptr);
+
+ bool done = curr.p->m_scan_frag_conf_status && (left <= (int)booked);
+ if(curr.p->m_scan_frag_conf_status)
+ booked++;
+
+ * ops++ = curr.p->m_apiPtr;
+ * ops++ = done ? RNIL : curr.i;
+ * ops++ = (curr.p->m_totalLen << 10) + curr.p->m_ops;
+
+ queued.remove(curr);
+ if(!done){
+ delivered.add(curr);
+ curr.p->scanFragState = ScanFragRec::DELIVERED;
+ curr.p->stopFragTimer();
+ } else {
+ c_scan_frag_pool.release(curr);
+ curr.p->scanFragState = ScanFragRec::COMPLETED;
+ curr.p->stopFragTimer();
+ }
+ }
+ }
+
+ scanPtr.p->m_booked_fragments_count = booked;
+ if(scanPtr.p->m_delivered_scan_frags.isEmpty() &&
+ scanPtr.p->m_running_scan_frags.isEmpty())
+ {
+ conf->requestInfo = op_count | ScanTabConf::EndOfData;
+ releaseScanResources(scanPtr);
+ }
+
+ if(4 + 3 * op_count > 25){
+ jam();
+ LinearSectionPtr ptr[3];
+ ptr[0].p = signal->getDataPtrSend()+25;
+ ptr[0].sz = 3 * op_count;
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABCONF, signal,
+ ScanTabConf::SignalLength, JBB, ptr, 1);
+ } else {
+ jam();
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABCONF, signal,
+ ScanTabConf::SignalLength + 3 * op_count, JBB);
+ }
+ scanPtr.p->m_queued_count = 0;
+}//Dbtc::sendScanTabConf()
+
+
+void Dbtc::gcpTcfinished(Signal* signal)
+{
+ signal->theData[1] = tcheckGcpId;
+ sendSignal(cdihblockref, GSN_GCP_TCFINISHED, signal, 2, JBB);
+}//Dbtc::gcpTcfinished()
+
+void Dbtc::initApiConnect(Signal* signal)
+{
+ Uint32 tiacTmp;
+ Uint32 guard4;
+
+ tiacTmp = capiConnectFilesize / 3;
+ ndbrequire(tiacTmp > 0);
+ guard4 = tiacTmp + 1;
+ for (cachePtr.i = 0; cachePtr.i < guard4; cachePtr.i++) {
+ refresh_watch_dog();
+ ptrAss(cachePtr, cacheRecord);
+ cachePtr.p->firstAttrbuf = RNIL;
+ cachePtr.p->lastAttrbuf = RNIL;
+ cachePtr.p->firstKeybuf = RNIL;
+ cachePtr.p->lastKeybuf = RNIL;
+ cachePtr.p->nextCacheRec = cachePtr.i + 1;
+ }//for
+ cachePtr.i = tiacTmp;
+ ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
+ cachePtr.p->nextCacheRec = RNIL;
+ cfirstfreeCacheRec = 0;
+
+ guard4 = tiacTmp - 1;
+ for (apiConnectptr.i = 0; apiConnectptr.i <= guard4; apiConnectptr.i++) {
+ refresh_watch_dog();
+ jam();
+ ptrAss(apiConnectptr, apiConnectRecord);
+ apiConnectptr.p->apiConnectstate = CS_DISCONNECTED;
+ apiConnectptr.p->apiFailState = ZFALSE;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ apiConnectptr.p->takeOverRec = (Uint8)Z8NIL;
+ apiConnectptr.p->cachePtr = RNIL;
+ apiConnectptr.p->nextApiConnect = apiConnectptr.i + 1;
+ apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref
+ apiConnectptr.p->commitAckMarker = RNIL;
+ apiConnectptr.p->firstTcConnect = RNIL;
+ apiConnectptr.p->lastTcConnect = RNIL;
+ apiConnectptr.p->triggerPending = false;
+ apiConnectptr.p->isIndexOp = false;
+ apiConnectptr.p->accumulatingIndexOp = RNIL;
+ apiConnectptr.p->executingIndexOp = RNIL;
+ apiConnectptr.p->buddyPtr = RNIL;
+ apiConnectptr.p->currSavePointId = 0;
+ }//for
+ apiConnectptr.i = tiacTmp - 1;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ apiConnectptr.p->nextApiConnect = RNIL;
+ cfirstfreeApiConnect = 0;
+ guard4 = (2 * tiacTmp) - 1;
+ for (apiConnectptr.i = tiacTmp; apiConnectptr.i <= guard4; apiConnectptr.i++)
+ {
+ refresh_watch_dog();
+ jam();
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ apiConnectptr.p->apiConnectstate = CS_RESTART;
+ apiConnectptr.p->apiFailState = ZFALSE;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ apiConnectptr.p->takeOverRec = (Uint8)Z8NIL;
+ apiConnectptr.p->cachePtr = RNIL;
+ apiConnectptr.p->nextApiConnect = apiConnectptr.i + 1;
+ apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref
+ apiConnectptr.p->commitAckMarker = RNIL;
+ apiConnectptr.p->firstTcConnect = RNIL;
+ apiConnectptr.p->lastTcConnect = RNIL;
+ apiConnectptr.p->triggerPending = false;
+ apiConnectptr.p->isIndexOp = false;
+ apiConnectptr.p->accumulatingIndexOp = RNIL;
+ apiConnectptr.p->executingIndexOp = RNIL;
+ apiConnectptr.p->buddyPtr = RNIL;
+ apiConnectptr.p->currSavePointId = 0;
+ }//for
+ apiConnectptr.i = (2 * tiacTmp) - 1;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ apiConnectptr.p->nextApiConnect = RNIL;
+ cfirstfreeApiConnectCopy = tiacTmp;
+ guard4 = (3 * tiacTmp) - 1;
+ for (apiConnectptr.i = 2 * tiacTmp; apiConnectptr.i <= guard4;
+ apiConnectptr.i++) {
+ refresh_watch_dog();
+ jam();
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ apiConnectptr.p->apiFailState = ZFALSE;
+ apiConnectptr.p->apiConnectstate = CS_RESTART;
+ apiConnectptr.p->takeOverRec = (Uint8)Z8NIL;
+ apiConnectptr.p->cachePtr = RNIL;
+ apiConnectptr.p->nextApiConnect = apiConnectptr.i + 1;
+ apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref
+ apiConnectptr.p->commitAckMarker = RNIL;
+ apiConnectptr.p->firstTcConnect = RNIL;
+ apiConnectptr.p->lastTcConnect = RNIL;
+ apiConnectptr.p->triggerPending = false;
+ apiConnectptr.p->isIndexOp = false;
+ apiConnectptr.p->accumulatingIndexOp = RNIL;
+ apiConnectptr.p->executingIndexOp = RNIL;
+ apiConnectptr.p->buddyPtr = RNIL;
+ apiConnectptr.p->currSavePointId = 0;
+ }//for
+ apiConnectptr.i = (3 * tiacTmp) - 1;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ apiConnectptr.p->nextApiConnect = RNIL;
+ cfirstfreeApiConnectFail = 2 * tiacTmp;
+}//Dbtc::initApiConnect()
+
+void Dbtc::initattrbuf(Signal* signal)
+{
+ ndbrequire(cattrbufFilesize > 0);
+ for (attrbufptr.i = 0; attrbufptr.i < cattrbufFilesize; attrbufptr.i++) {
+ refresh_watch_dog();
+ jam();
+ ptrAss(attrbufptr, attrbufRecord);
+ attrbufptr.p->attrbuf[ZINBUF_NEXT] = attrbufptr.i + 1; /* NEXT ATTRBUF */
+ }//for
+ attrbufptr.i = cattrbufFilesize - 1;
+ ptrAss(attrbufptr, attrbufRecord);
+ attrbufptr.p->attrbuf[ZINBUF_NEXT] = RNIL; /* NEXT ATTRBUF */
+ cfirstfreeAttrbuf = 0;
+}//Dbtc::initattrbuf()
+
+void Dbtc::initdatabuf(Signal* signal)
+{
+ ndbrequire(cdatabufFilesize > 0);
+ for (databufptr.i = 0; databufptr.i < cdatabufFilesize; databufptr.i++) {
+ refresh_watch_dog();
+ ptrAss(databufptr, databufRecord);
+ databufptr.p->nextDatabuf = databufptr.i + 1;
+ }//for
+ databufptr.i = cdatabufFilesize - 1;
+ ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
+ databufptr.p->nextDatabuf = RNIL;
+ cfirstfreeDatabuf = 0;
+}//Dbtc::initdatabuf()
+
+void Dbtc::initgcp(Signal* signal)
+{
+ ndbrequire(cgcpFilesize > 0);
+ for (gcpPtr.i = 0; gcpPtr.i < cgcpFilesize; gcpPtr.i++) {
+ ptrAss(gcpPtr, gcpRecord);
+ gcpPtr.p->nextGcp = gcpPtr.i + 1;
+ }//for
+ gcpPtr.i = cgcpFilesize - 1;
+ ptrCheckGuard(gcpPtr, cgcpFilesize, gcpRecord);
+ gcpPtr.p->nextGcp = RNIL;
+ cfirstfreeGcp = 0;
+ cfirstgcp = RNIL;
+ clastgcp = RNIL;
+}//Dbtc::initgcp()
+
+void Dbtc::inithost(Signal* signal)
+{
+ cpackedListIndex = 0;
+ ndbrequire(chostFilesize > 0);
+ for (hostptr.i = 0; hostptr.i < chostFilesize; hostptr.i++) {
+ jam();
+ ptrAss(hostptr, hostRecord);
+ hostptr.p->hostStatus = HS_DEAD;
+ hostptr.p->inPackedList = false;
+ hostptr.p->takeOverStatus = TOS_NOT_DEFINED;
+ hostptr.p->lqhTransStatus = LTS_IDLE;
+ hostptr.p->noOfWordsTCKEYCONF = 0;
+ hostptr.p->noOfWordsTCINDXCONF = 0;
+ hostptr.p->noOfPackedWordsLqh = 0;
+ hostptr.p->hostLqhBlockRef = calcLqhBlockRef(hostptr.i);
+ }//for
+}//Dbtc::inithost()
+
+void Dbtc::initialiseRecordsLab(Signal* signal, UintR Tdata0,
+ Uint32 retRef, Uint32 retData)
+{
+ switch (Tdata0) {
+ case 0:
+ jam();
+ initApiConnect(signal);
+ break;
+ case 1:
+ jam();
+ initattrbuf(signal);
+ break;
+ case 2:
+ jam();
+ initdatabuf(signal);
+ break;
+ case 3:
+ jam();
+ initgcp(signal);
+ break;
+ case 4:
+ jam();
+ inithost(signal);
+ break;
+ case 5:
+ jam();
+ // UNUSED Free to initialise something
+ break;
+ case 6:
+ jam();
+ initTable(signal);
+ break;
+ case 7:
+ jam();
+ initialiseScanrec(signal);
+ break;
+ case 8:
+ jam();
+ initialiseScanOprec(signal);
+ break;
+ case 9:
+ jam();
+ initialiseScanFragrec(signal);
+ break;
+ case 10:
+ jam();
+ initialiseTcConnect(signal);
+ break;
+ case 11:
+ jam();
+ initTcFail(signal);
+
+ {
+ ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = retData;
+ sendSignal(retRef, GSN_READ_CONFIG_CONF, signal,
+ ReadConfigConf::SignalLength, JBB);
+ }
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+
+ signal->theData[0] = TcContinueB::ZINITIALISE_RECORDS;
+ signal->theData[1] = Tdata0 + 1;
+ signal->theData[2] = 0;
+ signal->theData[3] = retRef;
+ signal->theData[4] = retData;
+ sendSignal(DBTC_REF, GSN_CONTINUEB, signal, 5, JBB);
+}
+
+/* ========================================================================= */
+/* ======= INITIALISE_SCANREC ======= */
+/* */
+/* ========================================================================= */
+void Dbtc::initialiseScanrec(Signal* signal)
+{
+ ScanRecordPtr scanptr;
+ ndbrequire(cscanrecFileSize > 0);
+ for (scanptr.i = 0; scanptr.i < cscanrecFileSize; scanptr.i++) {
+ refresh_watch_dog();
+ jam();
+ ptrAss(scanptr, scanRecord);
+ new (scanptr.p) ScanRecord();
+ scanptr.p->scanState = ScanRecord::IDLE;
+ scanptr.p->scanApiRec = RNIL;
+ scanptr.p->nextScan = scanptr.i + 1;
+ }//for
+ scanptr.i = cscanrecFileSize - 1;
+ ptrAss(scanptr, scanRecord);
+ scanptr.p->nextScan = RNIL;
+ cfirstfreeScanrec = 0;
+}//Dbtc::initialiseScanrec()
+
+void Dbtc::initialiseScanFragrec(Signal* signal)
+{
+}//Dbtc::initialiseScanFragrec()
+
+void Dbtc::initialiseScanOprec(Signal* signal)
+{
+}//Dbtc::initialiseScanOprec()
+
+void Dbtc::initTable(Signal* signal)
+{
+
+ ndbrequire(ctabrecFilesize > 0);
+ for (tabptr.i = 0; tabptr.i < ctabrecFilesize; tabptr.i++) {
+ refresh_watch_dog();
+ ptrAss(tabptr, tableRecord);
+ tabptr.p->currentSchemaVersion = 0;
+ tabptr.p->storedTable = true;
+ tabptr.p->tableType = 0;
+ tabptr.p->enabled = false;
+ tabptr.p->dropping = false;
+ tabptr.p->noOfKeyAttr = 0;
+ tabptr.p->hasCharAttr = 0;
+ tabptr.p->noOfDistrKeys = 0;
+ for (unsigned k = 0; k < MAX_ATTRIBUTES_IN_INDEX; k++) {
+ tabptr.p->keyAttr[k].attributeDescriptor = 0;
+ tabptr.p->keyAttr[k].charsetInfo = 0;
+ }
+ }//for
+}//Dbtc::initTable()
+
+void Dbtc::initialiseTcConnect(Signal* signal)
+{
+ ndbrequire(ctcConnectFilesize >= 2);
+
+ // Place half of tcConnectptr's in cfirstfreeTcConnectFail list
+ Uint32 titcTmp = ctcConnectFilesize / 2;
+ for (tcConnectptr.i = 0; tcConnectptr.i < titcTmp; tcConnectptr.i++) {
+ refresh_watch_dog();
+ jam();
+ ptrAss(tcConnectptr, tcConnectRecord);
+ tcConnectptr.p->tcConnectstate = OS_RESTART;
+ tcConnectptr.p->apiConnect = RNIL;
+ tcConnectptr.p->noOfNodes = 0;
+ tcConnectptr.p->nextTcConnect = tcConnectptr.i + 1;
+ }//for
+ tcConnectptr.i = titcTmp - 1;
+ ptrAss(tcConnectptr, tcConnectRecord);
+ tcConnectptr.p->nextTcConnect = RNIL;
+ cfirstfreeTcConnectFail = 0;
+
+ // Place other half in cfirstfreeTcConnect list
+ for (tcConnectptr.i = titcTmp; tcConnectptr.i < ctcConnectFilesize;
+ tcConnectptr.i++) {
+ refresh_watch_dog();
+ jam();
+ ptrAss(tcConnectptr, tcConnectRecord);
+ tcConnectptr.p->tcConnectstate = OS_RESTART;
+ tcConnectptr.p->apiConnect = RNIL;
+ tcConnectptr.p->noOfNodes = 0;
+ tcConnectptr.p->nextTcConnect = tcConnectptr.i + 1;
+ }//for
+ tcConnectptr.i = ctcConnectFilesize - 1;
+ ptrAss(tcConnectptr, tcConnectRecord);
+ tcConnectptr.p->nextTcConnect = RNIL;
+ cfirstfreeTcConnect = titcTmp;
+ c_counters.cconcurrentOp = 0;
+}//Dbtc::initialiseTcConnect()
+
+/* ------------------------------------------------------------------------- */
+/* ---- LINK A GLOBAL CHECKPOINT RECORD INTO THE LIST WITH TRANSACTIONS */
+/* WAITING FOR COMPLETION. */
+/* ------------------------------------------------------------------------- */
+void Dbtc::linkGciInGcilist(Signal* signal)
+{
+ GcpRecordPtr tmpGcpPointer;
+ if (cfirstgcp == RNIL) {
+ jam();
+ cfirstgcp = gcpPtr.i;
+ } else {
+ jam();
+ tmpGcpPointer.i = clastgcp;
+ ptrCheckGuard(tmpGcpPointer, cgcpFilesize, gcpRecord);
+ tmpGcpPointer.p->nextGcp = gcpPtr.i;
+ }//if
+ clastgcp = gcpPtr.i;
+}//Dbtc::linkGciInGcilist()
+
+/* ------------------------------------------------------------------------- */
+/* ------- LINK SECONDARY KEY BUFFER IN OPERATION RECORD ------- */
+/* ------------------------------------------------------------------------- */
+void Dbtc::linkKeybuf(Signal* signal)
+{
+ seizeDatabuf(signal);
+ tmpDatabufptr.i = cachePtr.p->lastKeybuf;
+ cachePtr.p->lastKeybuf = databufptr.i;
+ if (tmpDatabufptr.i == RNIL) {
+ jam();
+ cachePtr.p->firstKeybuf = databufptr.i;
+ } else {
+ jam();
+ ptrCheckGuard(tmpDatabufptr, cdatabufFilesize, databufRecord);
+ tmpDatabufptr.p->nextDatabuf = databufptr.i;
+ }//if
+}//Dbtc::linkKeybuf()
+
+/* ------------------------------------------------------------------------- */
+/* ------- LINK A TC CONNECT RECORD INTO THE API LIST OF TC CONNECTIONS --- */
+/* ------------------------------------------------------------------------- */
+void Dbtc::linkTcInConnectionlist(Signal* signal)
+{
+ /* POINTER FOR THE CONNECT_RECORD */
+ TcConnectRecordPtr ltcTcConnectptr;
+
+ tcConnectptr.p->nextTcConnect = RNIL;
+ ltcTcConnectptr.i = apiConnectptr.p->lastTcConnect;
+ ptrCheck(ltcTcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->lastTcConnect = tcConnectptr.i;
+ if (ltcTcConnectptr.i == RNIL) {
+ jam();
+ apiConnectptr.p->firstTcConnect = tcConnectptr.i;
+ } else {
+ jam();
+ ptrGuard(ltcTcConnectptr);
+ ltcTcConnectptr.p->nextTcConnect = tcConnectptr.i;
+ }//if
+}//Dbtc::linkTcInConnectionlist()
+
+/*---------------------------------------------------------------------------*/
+/* RELEASE_ABORT_RESOURCES */
+/* THIS CODE RELEASES ALL RESOURCES AFTER AN ABORT OF A TRANSACTION AND ALSO */
+/* SENDS THE ABORT DECISION TO THE APPLICATION. */
+/*---------------------------------------------------------------------------*/
+void Dbtc::releaseAbortResources(Signal* signal)
+{
+ TcConnectRecordPtr rarTcConnectptr;
+
+ c_counters.cabortCount++;
+ if (apiConnectptr.p->cachePtr != RNIL) {
+ cachePtr.i = apiConnectptr.p->cachePtr;
+ ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
+ releaseAttrinfo();
+ releaseKeys();
+ }//if
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ while (tcConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ // Clear any markers that were set in CS_RECEIVING state
+ clearCommitAckMarker(apiConnectptr.p, tcConnectptr.p);
+ rarTcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ releaseTcCon();
+ tcConnectptr.i = rarTcConnectptr.i;
+ }//while
+ apiConnectptr.p->firstTcConnect = RNIL;
+ apiConnectptr.p->lastTcConnect = RNIL;
+
+ // MASV let state be CS_ABORTING until all
+ // signals in the "air" have been received. Reset to CS_CONNECTED
+ // will be done when a TCKEYREQ with start flag is recieved
+ // or releaseApiCon is called
+ // apiConnectptr.p->apiConnectstate = CS_CONNECTED;
+ apiConnectptr.p->apiConnectstate = CS_ABORTING;
+ apiConnectptr.p->abortState = AS_IDLE;
+
+ if(apiConnectptr.p->m_exec_flag || apiConnectptr.p->apiFailState == ZTRUE){
+ jam();
+ bool ok = false;
+ Uint32 blockRef = apiConnectptr.p->ndbapiBlockref;
+ ReturnSignal ret = apiConnectptr.p->returnsignal;
+ apiConnectptr.p->returnsignal = RS_NO_RETURN;
+ apiConnectptr.p->m_exec_flag = 0;
+ switch(ret){
+ case RS_TCROLLBACKCONF:
+ jam();
+ ok = true;
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ sendSignal(blockRef, GSN_TCROLLBACKCONF, signal, 3, JBB);
+ break;
+ case RS_TCROLLBACKREP:{
+ jam();
+ ok = true;
+ TcRollbackRep * const tcRollbackRep =
+ (TcRollbackRep *) signal->getDataPtr();
+
+ tcRollbackRep->connectPtr = apiConnectptr.p->ndbapiConnect;
+ tcRollbackRep->transId[0] = apiConnectptr.p->transid[0];
+ tcRollbackRep->transId[1] = apiConnectptr.p->transid[1];
+ tcRollbackRep->returnCode = apiConnectptr.p->returncode;
+ sendSignal(blockRef, GSN_TCROLLBACKREP, signal,
+ TcRollbackRep::SignalLength, JBB);
+ }
+ break;
+ case RS_NO_RETURN:
+ jam();
+ ok = true;
+ break;
+ case RS_TCKEYCONF:
+ case RS_TC_COMMITCONF:
+ break;
+ }
+ if(!ok){
+ jam();
+ ndbout_c("returnsignal = %d", apiConnectptr.p->returnsignal);
+ sendSystemError(signal);
+ }//if
+
+ }
+ setApiConTimer(apiConnectptr.i, 0,
+ 100000+c_apiConTimer_line[apiConnectptr.i]);
+ if (apiConnectptr.p->apiFailState == ZTRUE) {
+ jam();
+ handleApiFailState(signal, apiConnectptr.i);
+ return;
+ }//if
+}//Dbtc::releaseAbortResources()
+
+void Dbtc::releaseApiCon(Signal* signal, UintR TapiConnectPtr)
+{
+ ApiConnectRecordPtr TlocalApiConnectptr;
+
+ TlocalApiConnectptr.i = TapiConnectPtr;
+ ptrCheckGuard(TlocalApiConnectptr, capiConnectFilesize, apiConnectRecord);
+ TlocalApiConnectptr.p->nextApiConnect = cfirstfreeApiConnect;
+ cfirstfreeApiConnect = TlocalApiConnectptr.i;
+ setApiConTimer(TlocalApiConnectptr.i, 0, __LINE__);
+ TlocalApiConnectptr.p->apiConnectstate = CS_DISCONNECTED;
+ ndbassert(TlocalApiConnectptr.p->apiScanRec == RNIL);
+ TlocalApiConnectptr.p->ndbapiBlockref = 0;
+}//Dbtc::releaseApiCon()
+
+void Dbtc::releaseApiConnectFail(Signal* signal)
+{
+ apiConnectptr.p->apiConnectstate = CS_RESTART;
+ apiConnectptr.p->takeOverRec = (Uint8)Z8NIL;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ apiConnectptr.p->nextApiConnect = cfirstfreeApiConnectFail;
+ cfirstfreeApiConnectFail = apiConnectptr.i;
+}//Dbtc::releaseApiConnectFail()
+
+void Dbtc::releaseGcp(Signal* signal)
+{
+ ptrGuard(gcpPtr);
+ gcpPtr.p->nextGcp = cfirstfreeGcp;
+ cfirstfreeGcp = gcpPtr.i;
+}//Dbtc::releaseGcp()
+
+void Dbtc::releaseKeys()
+{
+ UintR Tmp;
+ databufptr.i = cachePtr.p->firstKeybuf;
+ while (databufptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
+ Tmp = databufptr.p->nextDatabuf;
+ databufptr.p->nextDatabuf = cfirstfreeDatabuf;
+ cfirstfreeDatabuf = databufptr.i;
+ databufptr.i = Tmp;
+ }//while
+ cachePtr.p->firstKeybuf = RNIL;
+ cachePtr.p->lastKeybuf = RNIL;
+}//Dbtc::releaseKeys()
+
+void Dbtc::releaseTcConnectFail(Signal* signal)
+{
+ ptrGuard(tcConnectptr);
+ tcConnectptr.p->nextTcConnect = cfirstfreeTcConnectFail;
+ cfirstfreeTcConnectFail = tcConnectptr.i;
+}//Dbtc::releaseTcConnectFail()
+
+void Dbtc::seizeApiConnect(Signal* signal)
+{
+ if (cfirstfreeApiConnect != RNIL) {
+ jam();
+ terrorCode = ZOK;
+ apiConnectptr.i = cfirstfreeApiConnect; /* ASSIGN A FREE RECORD FROM */
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ cfirstfreeApiConnect = apiConnectptr.p->nextApiConnect;
+ apiConnectptr.p->nextApiConnect = RNIL;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ apiConnectptr.p->apiConnectstate = CS_CONNECTED; /* STATE OF CONNECTION */
+ apiConnectptr.p->triggerPending = false;
+ apiConnectptr.p->isIndexOp = false;
+ } else {
+ jam();
+ terrorCode = ZNO_FREE_API_CONNECTION;
+ }//if
+}//Dbtc::seizeApiConnect()
+
+void Dbtc::seizeApiConnectFail(Signal* signal)
+{
+ apiConnectptr.i = cfirstfreeApiConnectFail;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ cfirstfreeApiConnectFail = apiConnectptr.p->nextApiConnect;
+}//Dbtc::seizeApiConnectFail()
+
+void Dbtc::seizeDatabuf(Signal* signal)
+{
+ databufptr.i = cfirstfreeDatabuf;
+ ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
+ cfirstfreeDatabuf = databufptr.p->nextDatabuf;
+ databufptr.p->nextDatabuf = RNIL;
+}//Dbtc::seizeDatabuf()
+
+void Dbtc::seizeTcConnect(Signal* signal)
+{
+ tcConnectptr.i = cfirstfreeTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ cfirstfreeTcConnect = tcConnectptr.p->nextTcConnect;
+ c_counters.cconcurrentOp++;
+ tcConnectptr.p->isIndexOp = false;
+}//Dbtc::seizeTcConnect()
+
+void Dbtc::seizeTcConnectFail(Signal* signal)
+{
+ tcConnectptr.i = cfirstfreeTcConnectFail;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ cfirstfreeTcConnectFail = tcConnectptr.p->nextTcConnect;
+}//Dbtc::seizeTcConnectFail()
+
+void Dbtc::sendAttrinfo(Signal* signal,
+ UintR TattrinfoPtr,
+ AttrbufRecord * const regAttrPtr,
+ UintR TBref)
+{
+ UintR TdataPos;
+ UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6, sig7;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ TdataPos = regAttrPtr->attrbuf[ZINBUF_DATA_LEN];
+ sig0 = TattrinfoPtr;
+ sig1 = regApiPtr->transid[0];
+ sig2 = regApiPtr->transid[1];
+
+ signal->theData[0] = sig0;
+ signal->theData[1] = sig1;
+ signal->theData[2] = sig2;
+
+ sig0 = regAttrPtr->attrbuf[0];
+ sig1 = regAttrPtr->attrbuf[1];
+ sig2 = regAttrPtr->attrbuf[2];
+ sig3 = regAttrPtr->attrbuf[3];
+ sig4 = regAttrPtr->attrbuf[4];
+ sig5 = regAttrPtr->attrbuf[5];
+ sig6 = regAttrPtr->attrbuf[6];
+ sig7 = regAttrPtr->attrbuf[7];
+
+ signal->theData[3] = sig0;
+ signal->theData[4] = sig1;
+ signal->theData[5] = sig2;
+ signal->theData[6] = sig3;
+ signal->theData[7] = sig4;
+ signal->theData[8] = sig5;
+ signal->theData[9] = sig6;
+ signal->theData[10] = sig7;
+
+ if (TdataPos > 8) {
+ sig0 = regAttrPtr->attrbuf[8];
+ sig1 = regAttrPtr->attrbuf[9];
+ sig2 = regAttrPtr->attrbuf[10];
+ sig3 = regAttrPtr->attrbuf[11];
+ sig4 = regAttrPtr->attrbuf[12];
+ sig5 = regAttrPtr->attrbuf[13];
+ sig6 = regAttrPtr->attrbuf[14];
+
+ jam();
+ signal->theData[11] = sig0;
+ signal->theData[12] = sig1;
+ signal->theData[13] = sig2;
+ signal->theData[14] = sig3;
+ signal->theData[15] = sig4;
+ signal->theData[16] = sig5;
+ signal->theData[17] = sig6;
+
+ if (TdataPos > 15) {
+
+ sig0 = regAttrPtr->attrbuf[15];
+ sig1 = regAttrPtr->attrbuf[16];
+ sig2 = regAttrPtr->attrbuf[17];
+ sig3 = regAttrPtr->attrbuf[18];
+ sig4 = regAttrPtr->attrbuf[19];
+ sig5 = regAttrPtr->attrbuf[20];
+ sig6 = regAttrPtr->attrbuf[21];
+
+ jam();
+ signal->theData[18] = sig0;
+ signal->theData[19] = sig1;
+ signal->theData[20] = sig2;
+ signal->theData[21] = sig3;
+ signal->theData[22] = sig4;
+ signal->theData[23] = sig5;
+ signal->theData[24] = sig6;
+ }//if
+ }//if
+ sendSignal(TBref, GSN_ATTRINFO, signal, TdataPos + 3, JBB);
+}//Dbtc::sendAttrinfo()
+
+void Dbtc::sendContinueTimeOutControl(Signal* signal, Uint32 TapiConPtr)
+{
+ signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_CONTROL;
+ signal->theData[1] = TapiConPtr;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+}//Dbtc::sendContinueTimeOutControl()
+
+void Dbtc::sendKeyinfo(Signal* signal, BlockReference TBRef, Uint32 len)
+{
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ signal->theData[3] = cdata[0];
+ signal->theData[4] = cdata[1];
+ signal->theData[5] = cdata[2];
+ signal->theData[6] = cdata[3];
+ signal->theData[7] = cdata[4];
+ signal->theData[8] = cdata[5];
+ signal->theData[9] = cdata[6];
+ signal->theData[10] = cdata[7];
+ signal->theData[11] = cdata[8];
+ signal->theData[12] = cdata[9];
+ signal->theData[13] = cdata[10];
+ signal->theData[14] = cdata[11];
+ signal->theData[15] = cdata[12];
+ signal->theData[16] = cdata[13];
+ signal->theData[17] = cdata[14];
+ signal->theData[18] = cdata[15];
+ signal->theData[19] = cdata[16];
+ signal->theData[20] = cdata[17];
+ signal->theData[21] = cdata[18];
+ signal->theData[22] = cdata[19];
+ sendSignal(TBRef, GSN_KEYINFO, signal, 3 + len, JBB);
+}//Dbtc::sendKeyinfo()
+
+void Dbtc::sendSystemError(Signal* signal)
+{
+ progError(0, 0);
+}//Dbtc::sendSystemError()
+
+/* ========================================================================= */
+/* ------- LINK ACTUAL GCP OUT OF LIST ------- */
+/* ------------------------------------------------------------------------- */
+void Dbtc::unlinkGcp(Signal* signal)
+{
+ if (cfirstgcp == gcpPtr.i) {
+ jam();
+ cfirstgcp = gcpPtr.p->nextGcp;
+ if (gcpPtr.i == clastgcp) {
+ jam();
+ clastgcp = RNIL;
+ }//if
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * WE ARE TRYING TO REMOVE A GLOBAL CHECKPOINT WHICH WAS NOT THE OLDEST.
+ * THIS IS A SYSTEM ERROR.
+ * ------------------------------------------------------------------- */
+ sendSystemError(signal);
+ }//if
+ gcpPtr.p->nextGcp = cfirstfreeGcp;
+ cfirstfreeGcp = gcpPtr.i;
+}//Dbtc::unlinkGcp()
+
+void
+Dbtc::execDUMP_STATE_ORD(Signal* signal)
+{
+ DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0];
+ if(signal->theData[0] == DumpStateOrd::CommitAckMarkersSize){
+ infoEvent("TC: m_commitAckMarkerPool: %d free size: %d",
+ m_commitAckMarkerPool.getNoOfFree(),
+ m_commitAckMarkerPool.getSize());
+ }
+ if(signal->theData[0] == DumpStateOrd::CommitAckMarkersDump){
+ infoEvent("TC: m_commitAckMarkerPool: %d free size: %d",
+ m_commitAckMarkerPool.getNoOfFree(),
+ m_commitAckMarkerPool.getSize());
+
+ CommitAckMarkerIterator iter;
+ for(m_commitAckMarkerHash.first(iter); iter.curr.i != RNIL;
+ m_commitAckMarkerHash.next(iter)){
+ infoEvent("CommitAckMarker: i = %d (0x%x, 0x%x)"
+ " Api: %d Lghs(%d): %d %d %d %d bucket = %d",
+ iter.curr.i,
+ iter.curr.p->transid1,
+ iter.curr.p->transid2,
+ iter.curr.p->apiNodeId,
+ iter.curr.p->noOfLqhs,
+ iter.curr.p->lqhNodeId[0],
+ iter.curr.p->lqhNodeId[1],
+ iter.curr.p->lqhNodeId[2],
+ iter.curr.p->lqhNodeId[3],
+ iter.bucket);
+ }
+ }
+ // Dump all ScanFragRecs
+ if (dumpState->args[0] == DumpStateOrd::TcDumpAllScanFragRec){
+ Uint32 recordNo = 0;
+ if (signal->getLength() == 1)
+ infoEvent("TC: Dump all ScanFragRec - size: %d",
+ cscanFragrecFileSize);
+ else if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ dumpState->args[0] = DumpStateOrd::TcDumpOneScanFragRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+
+ if (recordNo < cscanFragrecFileSize-1){
+ dumpState->args[0] = DumpStateOrd::TcDumpAllScanFragRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ }
+
+ // Dump one ScanFragRec
+ if (dumpState->args[0] == DumpStateOrd::TcDumpOneScanFragRec){
+ Uint32 recordNo = RNIL;
+ if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ if (recordNo >= cscanFragrecFileSize)
+ return;
+
+ ScanFragRecPtr sfp;
+ sfp.i = recordNo;
+ c_scan_frag_pool.getPtr(sfp);
+ infoEvent("Dbtc::ScanFragRec[%d]: state=%d fragid=%d",
+ sfp.i,
+ sfp.p->scanFragState,
+ sfp.p->scanFragId);
+ infoEvent(" nodeid=%d, timer=%d",
+ refToNode(sfp.p->lqhBlockref),
+ sfp.p->scanFragTimer);
+ }
+
+ // Dump all ScanRecords
+ if (dumpState->args[0] == DumpStateOrd::TcDumpAllScanRec){
+ Uint32 recordNo = 0;
+ if (signal->getLength() == 1)
+ infoEvent("TC: Dump all ScanRecord - size: %d",
+ cscanrecFileSize);
+ else if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ dumpState->args[0] = DumpStateOrd::TcDumpOneScanRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+
+ if (recordNo < cscanrecFileSize-1){
+ dumpState->args[0] = DumpStateOrd::TcDumpAllScanRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ }
+
+ // Dump all active ScanRecords
+ if (dumpState->args[0] == DumpStateOrd::TcDumpAllActiveScanRec){
+ Uint32 recordNo = 0;
+ if (signal->getLength() == 1)
+ infoEvent("TC: Dump active ScanRecord - size: %d",
+ cscanrecFileSize);
+ else if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ ScanRecordPtr sp;
+ sp.i = recordNo;
+ ptrAss(sp, scanRecord);
+ if (sp.p->scanState != ScanRecord::IDLE){
+ dumpState->args[0] = DumpStateOrd::TcDumpOneScanRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+ }
+
+ if (recordNo < cscanrecFileSize-1){
+ dumpState->args[0] = DumpStateOrd::TcDumpAllActiveScanRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ }
+
+ // Dump one ScanRecord
+ // and associated ScanFragRec and ApiConnectRecord
+ if (dumpState->args[0] == DumpStateOrd::TcDumpOneScanRec){
+ Uint32 recordNo = RNIL;
+ if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ if (recordNo >= cscanrecFileSize)
+ return;
+
+ ScanRecordPtr sp;
+ sp.i = recordNo;
+ ptrAss(sp, scanRecord);
+ infoEvent("Dbtc::ScanRecord[%d]: state=%d"
+ "nextfrag=%d, nofrag=%d",
+ sp.i,
+ sp.p->scanState,
+ sp.p->scanNextFragId,
+ sp.p->scanNoFrag);
+ infoEvent(" ailen=%d, para=%d, receivedop=%d, noOprePperFrag=%d",
+ sp.p->scanAiLength,
+ sp.p->scanParallel,
+ sp.p->scanReceivedOperations,
+ sp.p->batch_size_rows);
+ infoEvent(" schv=%d, tab=%d, sproc=%d",
+ sp.p->scanSchemaVersion,
+ sp.p->scanTableref,
+ sp.p->scanStoredProcId);
+ infoEvent(" apiRec=%d, next=%d",
+ sp.p->scanApiRec, sp.p->nextScan);
+
+ if (sp.p->scanState != ScanRecord::IDLE){
+ // Request dump of ScanFragRec
+ ScanFragRecPtr sfptr;
+#define DUMP_SFR(x){\
+ ScanFragList list(c_scan_frag_pool, x);\
+ for(list.first(sfptr); !sfptr.isNull(); list.next(sfptr)){\
+ dumpState->args[0] = DumpStateOrd::TcDumpOneScanFragRec; \
+ dumpState->args[1] = sfptr.i;\
+ execDUMP_STATE_ORD(signal);\
+ }}
+
+ DUMP_SFR(sp.p->m_running_scan_frags);
+ DUMP_SFR(sp.p->m_queued_scan_frags);
+ DUMP_SFR(sp.p->m_delivered_scan_frags);
+
+ // Request dump of ApiConnectRecord
+ dumpState->args[0] = DumpStateOrd::TcDumpOneApiConnectRec;
+ dumpState->args[1] = sp.p->scanApiRec;
+ execDUMP_STATE_ORD(signal);
+ }
+
+ }
+
+ // Dump all ApiConnectRecord(s)
+ if (dumpState->args[0] == DumpStateOrd::TcDumpAllApiConnectRec){
+ Uint32 recordNo = 0;
+ if (signal->getLength() == 1)
+ infoEvent("TC: Dump all ApiConnectRecord - size: %d",
+ capiConnectFilesize);
+ else if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ dumpState->args[0] = DumpStateOrd::TcDumpOneApiConnectRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+
+ if (recordNo < capiConnectFilesize-1){
+ dumpState->args[0] = DumpStateOrd::TcDumpAllApiConnectRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ }
+
+ // Dump one ApiConnectRecord
+ if (dumpState->args[0] == DumpStateOrd::TcDumpOneApiConnectRec){
+ Uint32 recordNo = RNIL;
+ if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ if (recordNo >= capiConnectFilesize)
+ return;
+
+ ApiConnectRecordPtr ap;
+ ap.i = recordNo;
+ ptrAss(ap, apiConnectRecord);
+ infoEvent("Dbtc::ApiConnectRecord[%d]: state=%d, abortState=%d, "
+ "apiFailState=%d",
+ ap.i,
+ ap.p->apiConnectstate,
+ ap.p->abortState,
+ ap.p->apiFailState);
+ infoEvent(" transid(0x%x, 0x%x), apiBref=0x%x, scanRec=%d",
+ ap.p->transid[0],
+ ap.p->transid[1],
+ ap.p->ndbapiBlockref,
+ ap.p->apiScanRec);
+ infoEvent(" ctcTimer=%d, apiTimer=%d, counter=%d, retcode=%d, "
+ "retsig=%d",
+ ctcTimer, getApiConTimer(ap.i),
+ ap.p->counter,
+ ap.p->returncode,
+ ap.p->returnsignal);
+ infoEvent(" lqhkeyconfrec=%d, lqhkeyreqrec=%d, "
+ "tckeyrec=%d",
+ ap.p->lqhkeyconfrec,
+ ap.p->lqhkeyreqrec,
+ ap.p->tckeyrec);
+ infoEvent(" next=%d ",
+ ap.p->nextApiConnect);
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::TcSetTransactionTimeout){
+ jam();
+ if(signal->getLength() > 1){
+ set_timeout_value(signal->theData[1]);
+ }
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::TcSetApplTransactionTimeout){
+ jam();
+ if(signal->getLength() > 1){
+ set_appl_timeout_value(signal->theData[1]);
+ }
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::StartTcTimer){
+ c_counters.c_trans_status = TransCounters::Started;
+ c_counters.reset();
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::StopTcTimer){
+ c_counters.c_trans_status = TransCounters::Off;
+ Uint32 len = c_counters.report(signal);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, len, JBB);
+ c_counters.reset();
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::StartPeriodicTcTimer){
+ c_counters.c_trans_status = TransCounters::Timer;
+ c_counters.reset();
+ signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 1);
+ }
+}//Dbtc::execDUMP_STATE_ORD()
+
+void Dbtc::execSET_VAR_REQ(Signal* signal)
+{
+#if 0
+ SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
+ ConfigParamId var = setVarReq->variable();
+ int val = setVarReq->value();
+
+
+ switch (var) {
+
+ case TransactionInactiveTime:
+ jam();
+ set_appl_timeout_value(val);
+ break;
+ case TransactionDeadlockDetectionTimeout:
+ set_timeout_value(val);
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ case NoOfConcurrentProcessesHandleTakeover:
+ set_no_parallel_takeover(val);
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ default:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
+ } // switch
+#endif
+}
+
+void Dbtc::execABORT_ALL_REQ(Signal* signal)
+{
+ jamEntry();
+ AbortAllReq * req = (AbortAllReq*)&signal->theData[0];
+ AbortAllRef * ref = (AbortAllRef*)&signal->theData[0];
+
+ const Uint32 senderData = req->senderData;
+ const BlockReference senderRef = req->senderRef;
+
+ if(getAllowStartTransaction() == true && !getNodeState().getSingleUserMode()){
+ jam();
+
+ ref->senderData = senderData;
+ ref->errorCode = AbortAllRef::InvalidState;
+ sendSignal(senderRef, GSN_ABORT_ALL_REF, signal,
+ AbortAllRef::SignalLength, JBB);
+ return;
+ }
+
+ if(c_abortRec.clientRef != 0){
+ jam();
+
+ ref->senderData = senderData;
+ ref->errorCode = AbortAllRef::AbortAlreadyInProgress;
+ sendSignal(senderRef, GSN_ABORT_ALL_REF, signal,
+ AbortAllRef::SignalLength, JBB);
+ return;
+ }
+
+ if(refToNode(senderRef) != getOwnNodeId()){
+ jam();
+
+ ref->senderData = senderData;
+ ref->errorCode = AbortAllRef::FunctionNotImplemented;
+ sendSignal(senderRef, GSN_ABORT_ALL_REF, signal,
+ AbortAllRef::SignalLength, JBB);
+ return;
+ }
+
+ c_abortRec.clientRef = senderRef;
+ c_abortRec.clientData = senderData;
+ c_abortRec.oldTimeOutValue = ctimeOutValue;
+
+ ctimeOutValue = 0;
+ const Uint32 sleepTime = (2 * 10 * ctimeOutCheckDelay + 199) / 200;
+
+ checkAbortAllTimeout(signal, (sleepTime == 0 ? 1 : sleepTime));
+}
+
+void Dbtc::checkAbortAllTimeout(Signal* signal, Uint32 sleepTime)
+{
+
+ ndbrequire(c_abortRec.clientRef != 0);
+
+ if(sleepTime > 0){
+ jam();
+
+ sleepTime -= 1;
+ signal->theData[0] = TcContinueB::ZWAIT_ABORT_ALL;
+ signal->theData[1] = sleepTime;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 200, 2);
+ return;
+ }
+
+ AbortAllConf * conf = (AbortAllConf*)&signal->theData[0];
+ conf->senderData = c_abortRec.clientData;
+ sendSignal(c_abortRec.clientRef, GSN_ABORT_ALL_CONF, signal,
+ AbortAllConf::SignalLength, JBB);
+
+ ctimeOutValue = c_abortRec.oldTimeOutValue;
+ c_abortRec.clientRef = 0;
+}
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* ------------------ TRIGGER AND INDEX HANDLING ------------------ */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+void Dbtc::execCREATE_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ CreateTrigReq * const createTrigReq =
+ (CreateTrigReq *)&signal->theData[0];
+ TcDefinedTriggerData* triggerData;
+ DefinedTriggerPtr triggerPtr;
+ BlockReference sender = signal->senderBlockRef();
+
+ releaseSections(signal);
+
+ triggerPtr.i = createTrigReq->getTriggerId();
+ if (ERROR_INSERTED(8033) ||
+ !c_theDefinedTriggers.seizeId(triggerPtr,
+ createTrigReq->getTriggerId())) {
+ CLEAR_ERROR_INSERT_VALUE;
+ // Failed to allocate trigger record
+ CreateTrigRef * const createTrigRef =
+ (CreateTrigRef *)&signal->theData[0];
+
+ createTrigRef->setConnectionPtr(createTrigReq->getConnectionPtr());
+ createTrigRef->setErrorCode(CreateTrigRef::TooManyTriggers);
+ sendSignal(sender, GSN_CREATE_TRIG_REF,
+ signal, CreateTrigRef::SignalLength, JBB);
+ return;
+ }
+
+ triggerData = triggerPtr.p;
+ triggerData->triggerId = createTrigReq->getTriggerId();
+ triggerData->triggerType = createTrigReq->getTriggerType();
+ triggerData->triggerEvent = createTrigReq->getTriggerEvent();
+ triggerData->attributeMask = createTrigReq->getAttributeMask();
+ if (triggerData->triggerType == TriggerType::SECONDARY_INDEX)
+ triggerData->indexId = createTrigReq->getIndexId();
+ CreateTrigConf * const createTrigConf =
+ (CreateTrigConf *)&signal->theData[0];
+
+ createTrigConf->setConnectionPtr(createTrigReq->getConnectionPtr());
+ sendSignal(sender, GSN_CREATE_TRIG_CONF,
+ signal, CreateTrigConf::SignalLength, JBB);
+}
+
+
+void Dbtc::execDROP_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ DropTrigReq * const dropTrigReq = (DropTrigReq *)&signal->theData[0];
+ BlockReference sender = signal->senderBlockRef();
+
+ if ((c_theDefinedTriggers.getPtr(dropTrigReq->getTriggerId())) == NULL) {
+ jam();
+ // Failed to find find trigger record
+ DropTrigRef * const dropTrigRef = (DropTrigRef *)&signal->theData[0];
+
+ dropTrigRef->setConnectionPtr(dropTrigReq->getConnectionPtr());
+ dropTrigRef->setErrorCode(DropTrigRef::TriggerNotFound);
+ sendSignal(sender, GSN_DROP_TRIG_REF,
+ signal, DropTrigRef::SignalLength, JBB);
+ return;
+ }
+
+ // Release trigger record
+ c_theDefinedTriggers.release(dropTrigReq->getTriggerId());
+
+ DropTrigConf * const dropTrigConf = (DropTrigConf *)&signal->theData[0];
+
+ dropTrigConf->setConnectionPtr(dropTrigReq->getConnectionPtr());
+ sendSignal(sender, GSN_DROP_TRIG_CONF,
+ signal, DropTrigConf::SignalLength, JBB);
+}
+
+void Dbtc::execCREATE_INDX_REQ(Signal* signal)
+{
+ jamEntry();
+ CreateIndxReq * const createIndxReq =
+ (CreateIndxReq *)signal->getDataPtr();
+ TcIndexData* indexData;
+ TcIndexDataPtr indexPtr;
+ BlockReference sender = signal->senderBlockRef();
+
+ if (ERROR_INSERTED(8034) ||
+ !c_theIndexes.seizeId(indexPtr, createIndxReq->getIndexId())) {
+ CLEAR_ERROR_INSERT_VALUE;
+ // Failed to allocate index record
+ CreateIndxRef * const createIndxRef =
+ (CreateIndxRef *)&signal->theData[0];
+
+ createIndxRef->setConnectionPtr(createIndxReq->getConnectionPtr());
+ createIndxRef->setErrorCode(CreateIndxRef::TooManyIndexes);
+ releaseSections(signal);
+ sendSignal(sender, GSN_CREATE_INDX_REF,
+ signal, CreateIndxRef::SignalLength, JBB);
+ return;
+ }
+ indexData = indexPtr.p;
+ // Indexes always start in state IS_BUILDING
+ // Will become IS_ONLINE in execALTER_INDX_REQ
+ indexData->indexState = IS_BUILDING;
+ indexData->indexId = indexPtr.i;
+ indexData->primaryTableId = createIndxReq->getTableId();
+
+ // So far need only attribute count
+ SegmentedSectionPtr ssPtr;
+ signal->getSection(ssPtr, CreateIndxReq::ATTRIBUTE_LIST_SECTION);
+ SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
+ r0.reset(); // undo implicit first()
+ if (!r0.getWord(&indexData->attributeList.sz) ||
+ !r0.getWords(indexData->attributeList.id, indexData->attributeList.sz)) {
+ ndbrequire(false);
+ }
+ indexData->primaryKeyPos = indexData->attributeList.sz;
+
+ releaseSections(signal);
+
+ CreateIndxConf * const createIndxConf =
+ (CreateIndxConf *)&signal->theData[0];
+
+ createIndxConf->setConnectionPtr(createIndxReq->getConnectionPtr());
+ createIndxConf->setTableId(createIndxReq->getTableId());
+ createIndxConf->setIndexId(createIndxReq->getIndexId());
+ sendSignal(sender, GSN_CREATE_INDX_CONF,
+ signal, CreateIndxConf::SignalLength, JBB);
+}
+
+void Dbtc::execALTER_INDX_REQ(Signal* signal)
+{
+ jamEntry();
+ AlterIndxReq * const alterIndxReq = (AlterIndxReq *)signal->getDataPtr();
+ TcIndexData* indexData;
+ //BlockReference sender = signal->senderBlockRef();
+ BlockReference sender = (BlockReference) alterIndxReq->getUserRef();
+ Uint32 connectionPtr = alterIndxReq->getConnectionPtr();
+ AlterIndxReq::RequestType requestType = alterIndxReq->getRequestType();
+ Uint32 tableId = alterIndxReq->getTableId();
+ Uint32 indexId = alterIndxReq->getIndexId();
+ bool online = (alterIndxReq->getOnline() == 1) ? true : false;
+
+ if ((indexData = c_theIndexes.getPtr(indexId)) == NULL) {
+ jam();
+ // Failed to find index record
+ AlterIndxRef * const alterIndxRef =
+ (AlterIndxRef *)signal->getDataPtrSend();
+
+ alterIndxRef->setUserRef(reference());
+ alterIndxRef->setConnectionPtr(connectionPtr);
+ alterIndxRef->setRequestType(requestType);
+ alterIndxRef->setTableId(tableId);
+ alterIndxRef->setIndexId(indexId);
+ alterIndxRef->setErrorCode(AlterIndxRef::IndexNotFound);
+ alterIndxRef->setErrorLine(__LINE__);
+ alterIndxRef->setErrorNode(getOwnNodeId());
+ sendSignal(sender, GSN_ALTER_INDX_REF,
+ signal, AlterIndxRef::SignalLength, JBB);
+ return;
+ }
+ // Found index record, alter it's state
+ if (online) {
+ jam();
+ indexData->indexState = IS_ONLINE;
+ } else {
+ jam();
+ indexData->indexState = IS_BUILDING;
+ }//if
+ AlterIndxConf * const alterIndxConf =
+ (AlterIndxConf *)signal->getDataPtrSend();
+
+ alterIndxConf->setUserRef(reference());
+ alterIndxConf->setConnectionPtr(connectionPtr);
+ alterIndxConf->setRequestType(requestType);
+ alterIndxConf->setTableId(tableId);
+ alterIndxConf->setIndexId(indexId);
+ sendSignal(sender, GSN_ALTER_INDX_CONF,
+ signal, AlterIndxConf::SignalLength, JBB);
+}
+
+void Dbtc::execFIRE_TRIG_ORD(Signal* signal)
+{
+ jamEntry();
+ FireTrigOrd * const fireOrd = (FireTrigOrd *)signal->getDataPtr();
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+ ApiConnectRecordPtr transPtr;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+ TcConnectRecordPtr opPtr;
+
+ /**
+ * TODO
+ * Check transid,
+ * Fix overload i.e invalid word count
+ */
+ TcFiredTriggerData key;
+ key.fireingOperation = fireOrd->getConnectionPtr();
+ key.nodeId = refToNode(signal->getSendersBlockRef());
+ FiredTriggerPtr trigPtr;
+ if(c_firedTriggerHash.find(trigPtr, key)){
+
+ c_firedTriggerHash.remove(trigPtr);
+
+ trigPtr.p->fragId= fireOrd->fragId;
+ bool ok = trigPtr.p->keyValues.getSize() == fireOrd->m_noPrimKeyWords;
+ ok &= trigPtr.p->afterValues.getSize() == fireOrd->m_noAfterValueWords;
+ ok &= trigPtr.p->beforeValues.getSize() == fireOrd->m_noBeforeValueWords;
+ if(ok){
+ opPtr.i = key.fireingOperation;
+ ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
+ transPtr.i = opPtr.p->apiConnect;
+ transPtr.p = &localApiConnectRecord[transPtr.i];
+
+ opPtr.p->noReceivedTriggers++;
+ opPtr.p->triggerExecutionCount++;
+
+ // Insert fired trigger in execution queue
+ transPtr.p->theFiredTriggers.add(trigPtr);
+ if (opPtr.p->noReceivedTriggers == opPtr.p->noFiredTriggers) {
+ executeTriggers(signal, &transPtr);
+ }
+ return;
+ }
+ jam();
+ c_theFiredTriggerPool.release(trigPtr);
+ }
+ jam();
+ /**
+ * Failed to find record or invalid word counts
+ */
+ ndbrequire(false);
+}
+
+void Dbtc::execTRIG_ATTRINFO(Signal* signal)
+{
+ jamEntry();
+ TrigAttrInfo * const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtr();
+ Uint32 attrInfoLength = signal->getLength() - TrigAttrInfo::StaticLength;
+ const Uint32 *src = trigAttrInfo->getData();
+ FiredTriggerPtr firedTrigPtr;
+
+ TcFiredTriggerData key;
+ key.fireingOperation = trigAttrInfo->getConnectionPtr();
+ key.nodeId = refToNode(signal->getSendersBlockRef());
+ if(!c_firedTriggerHash.find(firedTrigPtr, key)){
+ jam();
+ if(!c_firedTriggerHash.seize(firedTrigPtr)){
+ jam();
+ /**
+ * Will be handled when FIRE_TRIG_ORD arrives
+ */
+ ndbout_c("op: %d node: %d failed to seize",
+ key.fireingOperation, key.nodeId);
+ return;
+ }
+ ndbrequire(firedTrigPtr.p->keyValues.getSize() == 0 &&
+ firedTrigPtr.p->beforeValues.getSize() == 0 &&
+ firedTrigPtr.p->afterValues.getSize() == 0);
+
+ firedTrigPtr.p->nodeId = refToNode(signal->getSendersBlockRef());
+ firedTrigPtr.p->fireingOperation = key.fireingOperation;
+ firedTrigPtr.p->triggerId = trigAttrInfo->getTriggerId();
+ c_firedTriggerHash.add(firedTrigPtr);
+ }
+
+ AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
+ switch (trigAttrInfo->getAttrInfoType()) {
+ case(TrigAttrInfo::PRIMARY_KEY):
+ jam();
+ {
+ LocalDataBuffer<11> buf(pool, firedTrigPtr.p->keyValues);
+ buf.append(src, attrInfoLength);
+ }
+ break;
+ case(TrigAttrInfo::BEFORE_VALUES):
+ jam();
+ {
+ LocalDataBuffer<11> buf(pool, firedTrigPtr.p->beforeValues);
+ buf.append(src, attrInfoLength);
+ }
+ break;
+ case(TrigAttrInfo::AFTER_VALUES):
+ jam();
+ {
+ LocalDataBuffer<11> buf(pool, firedTrigPtr.p->afterValues);
+ buf.append(src, attrInfoLength);
+ }
+ break;
+ default:
+ ndbrequire(false);
+ }
+}
+
+void Dbtc::execDROP_INDX_REQ(Signal* signal)
+{
+ jamEntry();
+ DropIndxReq * const dropIndxReq = (DropIndxReq *)signal->getDataPtr();
+ TcIndexData* indexData;
+ BlockReference sender = signal->senderBlockRef();
+
+ if ((indexData = c_theIndexes.getPtr(dropIndxReq->getIndexId())) == NULL) {
+ jam();
+ // Failed to find index record
+ DropIndxRef * const dropIndxRef =
+ (DropIndxRef *)signal->getDataPtrSend();
+
+ dropIndxRef->setConnectionPtr(dropIndxReq->getConnectionPtr());
+ dropIndxRef->setErrorCode(DropIndxRef::IndexNotFound);
+ sendSignal(sender, GSN_DROP_INDX_REF,
+ signal, DropIndxRef::SignalLength, JBB);
+ return;
+ }
+ // Release index record
+ c_theIndexes.release(dropIndxReq->getIndexId());
+
+ DropIndxConf * const dropIndxConf =
+ (DropIndxConf *)signal->getDataPtrSend();
+
+ dropIndxConf->setConnectionPtr(dropIndxReq->getConnectionPtr());
+ sendSignal(sender, GSN_DROP_INDX_CONF,
+ signal, DropIndxConf::SignalLength, JBB);
+}
+
+void Dbtc::execTCINDXREQ(Signal* signal)
+{
+ jamEntry();
+
+ TcKeyReq * const tcIndxReq = (TcKeyReq *)signal->getDataPtr();
+ const UintR TapiIndex = tcIndxReq->apiConnectPtr;
+ Uint32 tcIndxRequestInfo = tcIndxReq->requestInfo;
+ Uint32 startFlag = tcIndxReq->getStartFlag(tcIndxRequestInfo);
+ Uint32 * dataPtr = &tcIndxReq->scanInfo;
+ Uint32 indexBufSize = 8; // Maximum for index in TCINDXREQ
+ Uint32 attrBufSize = 5; // Maximum for attrInfo in TCINDXREQ
+ ApiConnectRecordPtr transPtr;
+ transPtr.i = TapiIndex;
+ if (transPtr.i >= capiConnectFilesize) {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+ ptrAss(transPtr, apiConnectRecord);
+ ApiConnectRecord * const regApiPtr = transPtr.p;
+ // Seize index operation
+ TcIndexOperationPtr indexOpPtr;
+ if ((startFlag == 1) &&
+ (regApiPtr->apiConnectstate == CS_CONNECTED ||
+ (regApiPtr->apiConnectstate == CS_STARTED &&
+ regApiPtr->firstTcConnect == RNIL)) ||
+ (regApiPtr->apiConnectstate == CS_ABORTING &&
+ regApiPtr->abortState == AS_IDLE)) {
+ jam();
+ // This is a newly started transaction, clean-up
+ releaseAllSeizedIndexOperations(regApiPtr);
+
+ regApiPtr->transid[0] = tcIndxReq->transId1;
+ regApiPtr->transid[1] = tcIndxReq->transId2;
+ }//if
+
+ if (ERROR_INSERTED(8036) || !seizeIndexOperation(regApiPtr, indexOpPtr)) {
+ jam();
+ // Failed to allocate index operation
+ terrorCode = 288;
+ regApiPtr->m_exec_flag |= TcKeyReq::getExecuteFlag(tcIndxRequestInfo);
+ apiConnectptr = transPtr;
+ abortErrorLab(signal);
+ return;
+ }
+ TcIndexOperation* indexOp = indexOpPtr.p;
+ indexOp->indexOpId = indexOpPtr.i;
+
+ // Save original signal
+ indexOp->tcIndxReq = *tcIndxReq;
+ indexOp->connectionIndex = TapiIndex;
+ regApiPtr->accumulatingIndexOp = indexOp->indexOpId;
+
+ // If operation is readTupleExclusive or updateTuple then read index
+ // table with exclusive lock
+ Uint32 indexLength = TcKeyReq::getKeyLength(tcIndxRequestInfo);
+ Uint32 attrLength = tcIndxReq->attrLen;
+ indexOp->expectedKeyInfo = indexLength;
+ Uint32 includedIndexLength = MIN(indexLength, indexBufSize);
+ indexOp->expectedAttrInfo = attrLength;
+ Uint32 includedAttrLength = MIN(attrLength, attrBufSize);
+ if (saveINDXKEYINFO(signal,
+ indexOp,
+ dataPtr,
+ includedIndexLength)) {
+ jam();
+ // We have received all we need
+ readIndexTable(signal, regApiPtr, indexOp);
+ return;
+ }
+ dataPtr += includedIndexLength;
+ if (saveINDXATTRINFO(signal,
+ indexOp,
+ dataPtr,
+ includedAttrLength)) {
+ jam();
+ // We have received all we need
+ readIndexTable(signal, regApiPtr, indexOp);
+ return;
+ }
+}
+
+
+void Dbtc::sendTcIndxConf(Signal* signal, UintR TcommitFlag)
+{
+ HostRecordPtr localHostptr;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ const UintR TopWords = (UintR)regApiPtr->tcindxrec;
+ localHostptr.i = refToNode(regApiPtr->ndbapiBlockref);
+ const Uint32 type = getNodeInfo(localHostptr.i).m_type;
+ const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+ const BlockNumber TblockNum = refToBlock(regApiPtr->ndbapiBlockref);
+ const Uint32 Tmarker = (regApiPtr->commitAckMarker == RNIL ? 0 : 1);
+ ptrAss(localHostptr, hostRecord);
+ UintR TcurrLen = localHostptr.p->noOfWordsTCINDXCONF;
+ UintR confInfo = 0;
+ TcIndxConf::setNoOfOperations(confInfo, (TopWords >> 1));
+ TcIndxConf::setCommitFlag(confInfo, TcommitFlag == 1);
+ TcIndxConf::setMarkerFlag(confInfo, Tmarker);
+ const UintR TpacketLen = 6 + TopWords;
+ regApiPtr->tcindxrec = 0;
+
+ if(TcommitFlag || (regApiPtr->lqhkeyreqrec == regApiPtr->lqhkeyconfrec)){
+ jam();
+ regApiPtr->m_exec_flag = 0;
+ }
+
+ if ((TpacketLen > 25) || !is_api){
+ TcIndxConf * const tcIndxConf = (TcIndxConf *)signal->getDataPtrSend();
+
+ jam();
+ tcIndxConf->apiConnectPtr = regApiPtr->ndbapiConnect;
+ tcIndxConf->gci = regApiPtr->globalcheckpointid;;
+ tcIndxConf->confInfo = confInfo;
+ tcIndxConf->transId1 = regApiPtr->transid[0];
+ tcIndxConf->transId2 = regApiPtr->transid[1];
+ copyFromToLen(&regApiPtr->tcIndxSendArray[0],
+ (UintR*)&tcIndxConf->operations,
+ (UintR)ZTCOPCONF_SIZE);
+ sendSignal(regApiPtr->ndbapiBlockref,
+ GSN_TCINDXCONF, signal, (TpacketLen - 1), JBB);
+ return;
+ } else if (((TcurrLen + TpacketLen) > 25) && (TcurrLen > 0)) {
+ jam();
+ sendPackedTCINDXCONF(signal, localHostptr.p, localHostptr.i);
+ TcurrLen = 0;
+ } else {
+ jam();
+ updatePackedList(signal, localHostptr.p, localHostptr.i);
+ }//if
+// -------------------------------------------------------------------------
+// The header contains the block reference of receiver plus the real signal
+// length - 3, since we have the real signal length plus one additional word
+// for the header we have to do - 4.
+// -------------------------------------------------------------------------
+ UintR Tpack0 = (TblockNum << 16) + (TpacketLen - 4);
+ UintR Tpack1 = regApiPtr->ndbapiConnect;
+ UintR Tpack2 = regApiPtr->globalcheckpointid;
+ UintR Tpack3 = confInfo;
+ UintR Tpack4 = regApiPtr->transid[0];
+ UintR Tpack5 = regApiPtr->transid[1];
+
+ localHostptr.p->noOfWordsTCINDXCONF = TcurrLen + TpacketLen;
+
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 0] = Tpack0;
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 1] = Tpack1;
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 2] = Tpack2;
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 3] = Tpack3;
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 4] = Tpack4;
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 5] = Tpack5;
+
+ UintR Ti;
+ for (Ti = 6; Ti < TpacketLen; Ti++) {
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + Ti] =
+ regApiPtr->tcIndxSendArray[Ti - 6];
+ }//for
+}//Dbtc::sendTcIndxConf()
+
+void Dbtc::execINDXKEYINFO(Signal* signal)
+{
+ jamEntry();
+ Uint32 keyInfoLength = signal->getLength() - IndxKeyInfo::HeaderLength;
+ IndxKeyInfo * const indxKeyInfo = (IndxKeyInfo *)signal->getDataPtr();
+ const Uint32 *src = indxKeyInfo->getData();
+ const UintR TconnectIndex = indxKeyInfo->connectPtr;
+ ApiConnectRecordPtr transPtr;
+ transPtr.i = TconnectIndex;
+ if (transPtr.i >= capiConnectFilesize) {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+ ptrAss(transPtr, apiConnectRecord);
+ ApiConnectRecord * const regApiPtr = transPtr.p;
+ TcIndexOperationPtr indexOpPtr;
+ TcIndexOperation* indexOp;
+
+ if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
+ {
+ indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
+ if (saveINDXKEYINFO(signal,
+ indexOp,
+ src,
+ keyInfoLength)) {
+ jam();
+ // We have received all we need
+ readIndexTable(signal, regApiPtr, indexOp);
+ }
+ }
+}
+
+void Dbtc::execINDXATTRINFO(Signal* signal)
+{
+ jamEntry();
+ Uint32 attrInfoLength = signal->getLength() - IndxAttrInfo::HeaderLength;
+ IndxAttrInfo * const indxAttrInfo = (IndxAttrInfo *)signal->getDataPtr();
+ const Uint32 *src = indxAttrInfo->getData();
+ const UintR TconnectIndex = indxAttrInfo->connectPtr;
+ ApiConnectRecordPtr transPtr;
+ transPtr.i = TconnectIndex;
+ if (transPtr.i >= capiConnectFilesize) {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+ ptrAss(transPtr, apiConnectRecord);
+ ApiConnectRecord * const regApiPtr = transPtr.p;
+ TcIndexOperationPtr indexOpPtr;
+ TcIndexOperation* indexOp;
+
+ if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
+ {
+ indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
+ if (saveINDXATTRINFO(signal,
+ indexOp,
+ src,
+ attrInfoLength)) {
+ jam();
+ // We have received all we need
+ readIndexTable(signal, regApiPtr, indexOp);
+ }
+ }
+}
+
+/**
+ * Save signal INDXKEYINFO
+ * Return true if we have received all needed data
+ */
+bool Dbtc::saveINDXKEYINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len)
+{
+ if (!indexOp->keyInfo.append(src, len)) {
+ jam();
+ // Failed to seize keyInfo, abort transaction
+#ifdef VM_TRACE
+ ndbout_c("Dbtc::saveINDXKEYINFO: Failed to seize keyinfo\n");
+#endif
+ // Abort transaction
+ apiConnectptr.i = indexOp->connectionIndex;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ releaseIndexOperation(apiConnectptr.p, indexOp);
+ terrorCode = 4000;
+ abortErrorLab(signal);
+ return false;
+ }
+ if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
+ jam();
+ return true;
+ }
+ return false;
+}
+
+bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp)
+{
+ return (indexOp->keyInfo.getSize() == indexOp->expectedKeyInfo);
+}
+
+/**
+ * Save signal INDXATTRINFO
+ * Return true if we have received all needed data
+ */
+bool Dbtc::saveINDXATTRINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len)
+{
+ if (!indexOp->attrInfo.append(src, len)) {
+ jam();
+#ifdef VM_TRACE
+ ndbout_c("Dbtc::saveINDXATTRINFO: Failed to seize attrInfo\n");
+#endif
+ apiConnectptr.i = indexOp->connectionIndex;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ releaseIndexOperation(apiConnectptr.p, indexOp);
+ terrorCode = 4000;
+ abortErrorLab(signal);
+ return false;
+ }
+ if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
+ jam();
+ return true;
+ }
+ return false;
+}
+
+bool Dbtc::receivedAllINDXATTRINFO(TcIndexOperation* indexOp)
+{
+ return (indexOp->attrInfo.getSize() == indexOp->expectedAttrInfo);
+}
+
+bool Dbtc::saveTRANSID_AI(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len)
+{
+ Uint32 currentTransIdAILength = indexOp->transIdAI.getSize();
+
+ if (currentTransIdAILength == 0) {
+ jam();
+ // Read first AttributeHeader to get expected size
+ // of the single key attribute expected
+ AttributeHeader* head = (AttributeHeader *) src;
+ indexOp->expectedTransIdAI = head->getHeaderSize() + head->getDataSize();
+ }
+ if (!indexOp->transIdAI.append(src, len)) {
+ jam();
+#ifdef VM_TRACE
+ ndbout_c("Dbtc::saveTRANSID_AI: Failed to seize transIdAI\n");
+#endif
+ apiConnectptr.i = indexOp->connectionIndex;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ releaseIndexOperation(apiConnectptr.p, indexOp);
+ terrorCode = 4000;
+ abortErrorLab(signal);
+ return false;
+ }
+ return true;
+}
+
+bool Dbtc::receivedAllTRANSID_AI(TcIndexOperation* indexOp)
+{
+ return (indexOp->transIdAI.getSize() == indexOp->expectedTransIdAI);
+}
+
+/**
+ * Receive signal TCINDXCONF
+ * This can be either the return of reading an index table
+ * or performing an index operation
+ */
+void Dbtc::execTCKEYCONF(Signal* signal)
+{
+ TcKeyConf * const tcKeyConf = (TcKeyConf *)signal->getDataPtr();
+ TcIndexOperationPtr indexOpPtr;
+
+ jamEntry();
+ indexOpPtr.i = tcKeyConf->apiConnectPtr;
+ TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
+ Uint32 confInfo = tcKeyConf->confInfo;
+
+ /**
+ * Check on TCKEYCONF wheater the the transaction was committed
+ */
+ Uint32 Tcommit = TcKeyConf::getCommitFlag(confInfo);
+
+ indexOpPtr.p = indexOp;
+ if (!indexOp) {
+ jam();
+ // Missing index operation
+ return;
+ }
+ const UintR TconnectIndex = indexOp->connectionIndex;
+ ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
+ apiConnectptr.p = regApiPtr;
+ apiConnectptr.i = TconnectIndex;
+ switch(indexOp->indexOpState) {
+ case(IOS_NOOP): {
+ jam();
+ // Should never happen, abort
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4349;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ }
+ case(IOS_INDEX_ACCESS): {
+ jam();
+ // Wait for TRANSID_AI
+ indexOp->indexOpState = IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI;
+ break;
+ }
+ case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI): {
+ jam();
+ // Double TCKEYCONF, should never happen, abort
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4349;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ }
+ case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): {
+ jam();
+ // Continue with index operation
+ executeIndexOperation(signal, regApiPtr, indexOp);
+ break;
+ }
+ case(IOS_INDEX_OPERATION): {
+ // We are done, send TCINDXCONF
+ jam();
+ Uint32 Ttcindxrec = regApiPtr->tcindxrec;
+ // Copy reply from TcKeyConf
+
+ ndbassert(regApiPtr->noIndexOp);
+ regApiPtr->noIndexOp--; // Decrease count
+ regApiPtr->tcIndxSendArray[Ttcindxrec] = indexOp->tcIndxReq.senderData;
+ regApiPtr->tcIndxSendArray[Ttcindxrec + 1] =
+ tcKeyConf->operations[0].attrInfoLen;
+ regApiPtr->tcindxrec = Ttcindxrec + 2;
+ if (regApiPtr->noIndexOp == 0) {
+ jam();
+ sendTcIndxConf(signal, Tcommit);
+ } else if (regApiPtr->tcindxrec == ZTCOPCONF_SIZE) {
+ jam();
+ sendTcIndxConf(signal, 0);
+ }
+ releaseIndexOperation(regApiPtr, indexOp);
+ break;
+ }
+ }
+}
+
+void Dbtc::execTCKEYREF(Signal* signal)
+{
+ TcKeyRef * const tcKeyRef = (TcKeyRef *)signal->getDataPtr();
+ TcIndexOperationPtr indexOpPtr;
+
+ jamEntry();
+ indexOpPtr.i = tcKeyRef->connectPtr;
+ TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
+ indexOpPtr.p = indexOp;
+ if (!indexOp) {
+ jam();
+ // Missing index operation
+ return;
+ }
+ const UintR TconnectIndex = indexOp->connectionIndex;
+ ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
+ Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
+ Uint32 commitFlg = TcKeyReq::getCommitFlag(tcKeyRequestInfo);
+
+ switch(indexOp->indexOpState) {
+ case(IOS_NOOP): {
+ jam();
+ // Should never happen, abort
+ break;
+ }
+ case(IOS_INDEX_ACCESS):
+ case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI):
+ case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): {
+ jam();
+ // If we fail index access for a non-read operation during commit
+ // we abort transaction
+ if (commitFlg == 1) {
+ jam();
+ releaseIndexOperation(regApiPtr, indexOp);
+ apiConnectptr.i = indexOp->connectionIndex;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ terrorCode = tcKeyRef->errorCode;
+ abortErrorLab(signal);
+ break;
+ }
+ /**
+ * Increase count as it will be decreased below...
+ * (and the code is written to handle failing lookup on "real" table
+ * not lookup on index table)
+ */
+ regApiPtr->noIndexOp++;
+ // else continue
+ }
+ case(IOS_INDEX_OPERATION): {
+ // Send TCINDXREF
+
+ jam();
+ TcKeyReq * const tcIndxReq = &indexOp->tcIndxReq;
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ ndbassert(regApiPtr->noIndexOp);
+ regApiPtr->noIndexOp--; // Decrease count
+ tcIndxRef->connectPtr = tcIndxReq->senderData;
+ tcIndxRef->transId[0] = tcKeyRef->transId[0];
+ tcIndxRef->transId[1] = tcKeyRef->transId[1];
+ tcIndxRef->errorCode = tcKeyRef->errorCode;
+ sendSignal(regApiPtr->ndbapiBlockref,
+ GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB);
+ return;
+ }
+ }
+}
+
+void Dbtc::execTRANSID_AI_R(Signal* signal){
+ TransIdAI * const transIdAI = (TransIdAI *)signal->getDataPtr();
+ Uint32 sigLen = signal->length();
+ Uint32 dataLen = sigLen - TransIdAI::HeaderLength - 1;
+ Uint32 recBlockref = transIdAI->attrData[dataLen];
+
+ jamEntry();
+
+ /**
+ * Forward signal to final destination
+ * Truncate last word since that was used to hold the final dest.
+ */
+ sendSignal(recBlockref, GSN_TRANSID_AI,
+ signal, sigLen - 1, JBB);
+}
+
+void Dbtc::execKEYINFO20_R(Signal* signal){
+ KeyInfo20 * const keyInfo = (KeyInfo20 *)signal->getDataPtr();
+ Uint32 sigLen = signal->length();
+ Uint32 dataLen = sigLen - KeyInfo20::HeaderLength - 1;
+ Uint32 recBlockref = keyInfo->keyData[dataLen];
+
+ jamEntry();
+
+ /**
+ * Forward signal to final destination
+ * Truncate last word since that was used to hold the final dest.
+ */
+ sendSignal(recBlockref, GSN_KEYINFO20,
+ signal, sigLen - 1, JBB);
+}
+
+
+void Dbtc::execTRANSID_AI(Signal* signal)
+{
+ TransIdAI * const transIdAI = (TransIdAI *)signal->getDataPtr();
+
+ jamEntry();
+ TcIndexOperationPtr indexOpPtr;
+ indexOpPtr.i = transIdAI->connectPtr;
+ TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
+ indexOpPtr.p = indexOp;
+ if (!indexOp) {
+ jam();
+ // Missing index operation
+ }
+ const UintR TconnectIndex = indexOp->connectionIndex;
+ // ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
+ ApiConnectRecordPtr transPtr;
+
+ transPtr.i = TconnectIndex;
+ ptrCheckGuard(transPtr, capiConnectFilesize, apiConnectRecord);
+ ApiConnectRecord * const regApiPtr = transPtr.p;
+
+ // Acccumulate attribute data
+ if (!saveTRANSID_AI(signal,
+ indexOp,
+ transIdAI->getData(),
+ signal->getLength() - TransIdAI::HeaderLength)) {
+ jam();
+ // Failed to allocate space for TransIdAI
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4000;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ }
+
+ switch(indexOp->indexOpState) {
+ case(IOS_NOOP): {
+ jam();
+ // Should never happen, abort
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4349;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ break;
+ }
+ case(IOS_INDEX_ACCESS): {
+ jam();
+ // Check if all TRANSID_AI have been received
+ if (receivedAllTRANSID_AI(indexOp)) {
+ jam();
+ // Wait for TRANSID_AI
+ indexOp->indexOpState = IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF;
+ }
+ break;
+ }
+ case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): {
+ jam();
+#ifdef VM_TRACE
+ ndbout_c("Dbtc::execTRANSID_AI: Too many TRANSID_AI, ignore for now\n");
+#endif
+ /*
+ // Too many TRANSID_AI
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndexRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4349;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ */
+ break;
+ }
+ case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI): {
+ jam();
+ // Check if all TRANSID_AI have been received
+ if (receivedAllTRANSID_AI(indexOp)) {
+ jam();
+ // Continue with index operation
+ executeIndexOperation(signal, regApiPtr, indexOp);
+ }
+ // else continue waiting for more TRANSID_AI
+ break;
+ }
+ case(IOS_INDEX_OPERATION): {
+ // Should never receive TRANSID_AI in this state!!
+ jam();
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4349;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ }
+ }
+}
+
+void Dbtc::execTCROLLBACKREP(Signal* signal)
+{
+ TcRollbackRep* tcRollbackRep = (TcRollbackRep *)signal->getDataPtr();
+ jamEntry();
+ TcIndexOperationPtr indexOpPtr;
+ indexOpPtr.i = tcRollbackRep->connectPtr;
+ TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
+ indexOpPtr.p = indexOp;
+ tcRollbackRep = (TcRollbackRep *)signal->getDataPtrSend();
+ tcRollbackRep->connectPtr = indexOp->tcIndxReq.senderData;
+ sendSignal(apiConnectptr.p->ndbapiBlockref,
+ GSN_TCROLLBACKREP, signal, TcRollbackRep::SignalLength, JBB);
+}
+
+/**
+ * Read index table with the index attributes as PK
+ */
+void Dbtc::readIndexTable(Signal* signal,
+ ApiConnectRecord* regApiPtr,
+ TcIndexOperation* indexOp)
+{
+ Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
+ Uint32 dataPos = 0;
+ TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
+ Uint32 * dataPtr = &tcKeyReq->scanInfo;
+ Uint32 tcKeyLength = TcKeyReq::StaticLength;
+ Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
+ AttributeBuffer::DataBufferIterator keyIter;
+ Uint32 keyLength = TcKeyReq::getKeyLength(tcKeyRequestInfo);
+ TcIndexData* indexData;
+ Uint32 transId1 = indexOp->tcIndxReq.transId1;
+ Uint32 transId2 = indexOp->tcIndxReq.transId2;
+
+ const Operation_t opType =
+ (Operation_t)TcKeyReq::getOperationType(tcKeyRequestInfo);
+
+ // Find index table
+ if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq.tableId)) == NULL) {
+ jam();
+ // Failed to find index record
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4000;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ }
+ tcKeyReq->transId1 = transId1;
+ tcKeyReq->transId2 = transId2;
+ tcKeyReq->tableId = indexData->indexId;
+ tcKeyLength += MIN(keyLength, keyBufSize);
+ tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq.tableSchemaVersion;
+ TcKeyReq::setOperationType(tcKeyRequestInfo,
+ opType == ZREAD ? ZREAD : ZREAD_EX);
+ TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 1); // Allways send one AttrInfo
+ TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, 0);
+ BlockReference originalReceiver = regApiPtr->ndbapiBlockref;
+ regApiPtr->ndbapiBlockref = reference(); // Send result to me
+ tcKeyReq->senderData = indexOp->indexOpId;
+ indexOp->indexOpState = IOS_INDEX_ACCESS;
+ regApiPtr->executingIndexOp = regApiPtr->accumulatingIndexOp;
+ regApiPtr->accumulatingIndexOp = RNIL;
+ regApiPtr->isIndexOp = true;
+
+ Uint32 remainingKey = indexOp->keyInfo.getSize();
+ bool moreKeyData = indexOp->keyInfo.first(keyIter);
+ // *********** KEYINFO in TCKEYREQ ***********
+ while((dataPos < keyBufSize) &&
+ (remainingKey-- != 0)) {
+ *dataPtr++ = *keyIter.data;
+ dataPos++;
+ moreKeyData = indexOp->keyInfo.next(keyIter);
+ }
+ // *********** ATTRINFO in TCKEYREQ ***********
+ tcKeyReq->attrLen = 1; // Primary key is stored as one attribute
+ AttributeHeader::init(dataPtr, indexData->primaryKeyPos, 0);
+ tcKeyLength++;
+ tcKeyReq->requestInfo = tcKeyRequestInfo;
+
+ ndbassert(TcKeyReq::getDirtyFlag(tcKeyRequestInfo) == 0);
+ ndbassert(TcKeyReq::getSimpleFlag(tcKeyRequestInfo) == 0);
+ EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
+
+ /**
+ * "Fool" TC not to start commiting transaction since it always will
+ * have one outstanding lqhkeyreq
+ * This is later decreased when the index read is complete
+ */
+ regApiPtr->lqhkeyreqrec++;
+
+ /**
+ * Remember ptr to index read operation
+ * (used to set correct save point id on index operation later)
+ */
+ indexOp->indexReadTcConnect = regApiPtr->lastTcConnect;
+
+ jamEntry();
+ // *********** KEYINFO ***********
+ if (moreKeyData) {
+ jam();
+ // Send KEYINFO sequence
+ KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
+
+ keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
+ keyInfo->transId[0] = transId1;
+ keyInfo->transId[1] = transId2;
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ while(remainingKey-- != 0) {// If we have not read complete key
+ *dataPtr++ = *keyIter.data;
+ dataPos++;
+ if (dataPos == KeyInfo::DataLength) {
+ // Flush KEYINFO
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength);
+ jamEntry();
+ dataPos = 0;
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ }
+ moreKeyData = indexOp->keyInfo.next(keyIter);
+ }
+ if (dataPos != 0) {
+ // Flush last KEYINFO
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + dataPos);
+ jamEntry();
+ }
+ }
+
+ regApiPtr->ndbapiBlockref = originalReceiver; // reset original receiver
+}
+
+/**
+ * Execute the index operation with the result from
+ * the index table read as PK
+ */
+void Dbtc::executeIndexOperation(Signal* signal,
+ ApiConnectRecord* regApiPtr,
+ TcIndexOperation* indexOp) {
+
+ Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
+ Uint32 attrBufSize = 5;
+ Uint32 dataPos = 0;
+ TcKeyReq * const tcIndxReq = &indexOp->tcIndxReq;
+ TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
+ /*
+ Data points to distrGroupHashValue since scanInfo is used to send
+ fragment id of receiving fragment
+ */
+ Uint32 * dataPtr = &tcKeyReq->distrGroupHashValue;
+ Uint32 tcKeyLength = TcKeyReq::StaticLength;
+ Uint32 tcKeyRequestInfo = tcIndxReq->requestInfo;
+ TcIndexData* indexData;
+ AttributeBuffer::DataBufferIterator attrIter;
+ AttributeBuffer::DataBufferIterator aiIter;
+ bool moreKeyData = indexOp->transIdAI.first(aiIter);
+
+ // Find index table
+ if ((indexData = c_theIndexes.getPtr(tcIndxReq->tableId)) == NULL) {
+ jam();
+ // Failed to find index record
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4349;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ }
+ // Find schema version of primary table
+ TableRecordPtr tabPtr;
+ tabPtr.i = indexData->primaryTableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+
+ tcKeyReq->apiConnectPtr = tcIndxReq->apiConnectPtr;
+ tcKeyReq->attrLen = tcIndxReq->attrLen;
+ tcKeyReq->tableId = indexData->primaryTableId;
+ tcKeyReq->tableSchemaVersion = tabPtr.p->currentSchemaVersion;
+ tcKeyReq->transId1 = regApiPtr->transid[0];
+ tcKeyReq->transId2 = regApiPtr->transid[1];
+ tcKeyReq->senderData = tcIndxReq->senderData; // Needed for TRANSID_AI to API
+ indexOp->indexOpState = IOS_INDEX_OPERATION;
+ regApiPtr->isIndexOp = true;
+ regApiPtr->executingIndexOp = indexOp->indexOpId;;
+ regApiPtr->noIndexOp++; // Increase count
+
+ /*
+ Filter out AttributeHeader:s since this should not be in key.
+ Also filter out fragment id from primary key and handle that
+ separately by setting it as Distribution Key and set indicator.
+ */
+
+ AttributeHeader* attrHeader = (AttributeHeader *) aiIter.data;
+
+ Uint32 headerSize = attrHeader->getHeaderSize();
+ Uint32 keySize = attrHeader->getDataSize() - 1;
+ TcKeyReq::setKeyLength(tcKeyRequestInfo, keySize);
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreKeyData = indexOp->transIdAI.next(aiIter);
+ } else {
+ jam();
+ moreKeyData = indexOp->transIdAI.next(aiIter, headerSize - 1);
+ }//if
+ tcKeyReq->scanInfo = *aiIter.data; //Fragment Id
+ moreKeyData = indexOp->transIdAI.next(aiIter);
+ TcKeyReq::setDistributionKeyFlag(tcKeyRequestInfo, 1U);
+ while(// If we have not read complete key
+ (keySize != 0) &&
+ (dataPos < keyBufSize)) {
+ *dataPtr++ = *aiIter.data;
+ dataPos++;
+ keySize--;
+ moreKeyData = indexOp->transIdAI.next(aiIter);
+ }
+ tcKeyLength += dataPos;
+
+ Uint32 attributesLength = indexOp->attrInfo.getSize();
+ if (attributesLength <= attrBufSize) {
+ jam();
+ // ATTRINFO fits in TCKEYREQ
+ // Pack ATTRINFO IN TCKEYREQ
+ TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, indexOp->attrInfo.getSize());
+ // Insert IndxAttrInfo
+ for(bool moreAttrData = indexOp->attrInfo.first(attrIter);
+ moreAttrData;
+ moreAttrData = indexOp->attrInfo.next(attrIter)) {
+ *dataPtr++ = *attrIter.data;
+ }
+ tcKeyLength += attributesLength;
+ } else {
+ jam();
+ // No ATTRINFO in TCKEYREQ
+ TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 0);
+ }
+
+ TcKeyReq::setCommitFlag(tcKeyRequestInfo, 0);
+ TcKeyReq::setExecuteFlag(tcKeyRequestInfo, 0);
+ TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, 0);
+ tcKeyReq->requestInfo = tcKeyRequestInfo;
+
+ ndbassert(TcKeyReq::getDirtyFlag(tcKeyRequestInfo) == 0);
+ ndbassert(TcKeyReq::getSimpleFlag(tcKeyRequestInfo) == 0);
+
+ /**
+ * Decrease lqhkeyreqrec to compensate for addition
+ * during read of index table
+ * I.e. let TC start committing when other operations has completed
+ */
+ regApiPtr->lqhkeyreqrec--;
+
+ /**
+ * Fix savepoint id -
+ * fix so that index operation has the same savepoint id
+ * as the read of the index table (TCINDXREQ)
+ */
+ TcConnectRecordPtr tmp;
+ tmp.i = indexOp->indexReadTcConnect;
+ ptrCheckGuard(tmp, ctcConnectFilesize, tcConnectRecord);
+ const Uint32 currSavePointId = regApiPtr->currSavePointId;
+ regApiPtr->currSavePointId = tmp.p->savePointId;
+ EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
+ regApiPtr->currSavePointId = currSavePointId;
+
+ jamEntry();
+ // *********** KEYINFO ***********
+ if (moreKeyData) {
+ jam();
+ // Send KEYINFO sequence
+ KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
+
+ keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
+ keyInfo->transId[0] = regApiPtr->transid[0];
+ keyInfo->transId[1] = regApiPtr->transid[1];
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ // Pack any part of a key attribute that did no fit TCKEYREQ
+ while(keySize-- != 0) {// If we have not read complete key
+ *dataPtr++ = *aiIter.data;
+ dataPos++;
+ if (dataPos == KeyInfo::DataLength) {
+ // Flush KEYINFO
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength);
+ jamEntry();
+ dataPos = 0;
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ }
+ moreKeyData = indexOp->transIdAI.next(aiIter);
+ }
+ if (dataPos != 0) {
+ // Flush last KEYINFO
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + dataPos);
+ jamEntry();
+ }
+ }
+
+ // *********** ATTRINFO ***********
+ if (attributesLength > attrBufSize) {
+ jam();
+ // No ATTRINFO in TcKeyReq
+ TcKeyReq::setAIInTcKeyReq(tcKeyReq->requestInfo, 0);
+ // Send ATTRINFO sequence
+ AttrInfo * const attrInfo = (AttrInfo *)signal->getDataPtrSend();
+ Uint32 attrInfoPos = 0;
+
+ attrInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
+ attrInfo->transId[0] = regApiPtr->transid[0];
+ attrInfo->transId[1] = regApiPtr->transid[1];
+ dataPtr = (Uint32 *) &attrInfo->attrData;
+
+
+ // Insert attribute values (insert key values of primary table)
+ for(bool moreAttrData = indexOp->attrInfo.first(attrIter);
+ moreAttrData;
+ moreAttrData = indexOp->attrInfo.next(attrIter)) {
+ *dataPtr++ = *attrIter.data;
+ attrInfoPos++;
+ if (attrInfoPos == AttrInfo::DataLength) {
+ // Flush ATTRINFO
+ EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength);
+ jamEntry();
+ attrInfoPos = 0;
+ dataPtr = (Uint32 *) &attrInfo->attrData;
+ }
+ }
+ if (attrInfoPos != 0) {
+ // Send last ATTRINFO
+ EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + attrInfoPos);
+ jamEntry();
+ }
+ }
+}
+
+bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr,
+ TcIndexOperationPtr& indexOpPtr)
+{
+ return regApiPtr->theSeizedIndexOperations.seize(indexOpPtr);
+}
+
+void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
+ TcIndexOperation* indexOp)
+{
+ indexOp->indexOpState = IOS_NOOP;
+ indexOp->expectedKeyInfo = 0;
+ indexOp->keyInfo.release();
+ indexOp->expectedAttrInfo = 0;
+ indexOp->attrInfo.release();
+ indexOp->expectedTransIdAI = 0;
+ indexOp->transIdAI.release();
+ regApiPtr->theSeizedIndexOperations.release(indexOp->indexOpId);
+}
+
+void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr)
+{
+ TcIndexOperationPtr seizedIndexOpPtr;
+
+ regApiPtr->theSeizedIndexOperations.first(seizedIndexOpPtr);
+ while(seizedIndexOpPtr.i != RNIL) {
+ jam();
+ TcIndexOperation* indexOp = seizedIndexOpPtr.p;
+
+ indexOp->indexOpState = IOS_NOOP;
+ indexOp->expectedKeyInfo = 0;
+ indexOp->keyInfo.release();
+ indexOp->expectedAttrInfo = 0;
+ indexOp->attrInfo.release();
+ indexOp->expectedTransIdAI = 0;
+ indexOp->transIdAI.release();
+ regApiPtr->theSeizedIndexOperations.next(seizedIndexOpPtr);
+ }
+ regApiPtr->theSeizedIndexOperations.release();
+}
+
+void Dbtc::saveTriggeringOpState(Signal* signal, TcConnectRecord* trigOp)
+{
+ LqhKeyConf * lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+ copyFromToLen((UintR*)lqhKeyConf,
+ &trigOp->savedState[0],
+ LqhKeyConf::SignalLength);
+}
+
+void Dbtc::continueTriggeringOp(Signal* signal, TcConnectRecord* trigOp)
+{
+ LqhKeyConf * lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+ copyFromToLen(&trigOp->savedState[0],
+ (UintR*)lqhKeyConf,
+ LqhKeyConf::SignalLength);
+
+ lqhKeyConf->noFiredTriggers = 0;
+ trigOp->noReceivedTriggers = 0;
+
+ // All triggers executed successfully, continue operation
+ execLQHKEYCONF(signal);
+}
+
+void Dbtc::scheduleFiredTrigger(ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr)
+{
+ // Set initial values for trigger fireing operation
+ opPtr->p->triggerExecutionCount++;
+
+ // Insert fired trigger in execution queue
+ transPtr->p->theFiredTriggers.add(opPtr->p->accumulatingTriggerData);
+ opPtr->p->accumulatingTriggerData.i = RNIL;
+ opPtr->p->accumulatingTriggerData.p = NULL;
+}
+
+void Dbtc::executeTriggers(Signal* signal, ApiConnectRecordPtr* transPtr)
+{
+ ApiConnectRecord* regApiPtr = transPtr->p;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+ TcConnectRecordPtr opPtr;
+ FiredTriggerPtr trigPtr;
+
+ if (!regApiPtr->theFiredTriggers.isEmpty()) {
+ jam();
+ if ((regApiPtr->apiConnectstate == CS_STARTED) ||
+ (regApiPtr->apiConnectstate == CS_START_COMMITTING)) {
+ jam();
+ regApiPtr->theFiredTriggers.first(trigPtr);
+ while (trigPtr.i != RNIL) {
+ jam();
+ // Execute all ready triggers in parallel
+ opPtr.i = trigPtr.p->fireingOperation;
+ ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
+ FiredTriggerPtr nextTrigPtr = trigPtr;
+ regApiPtr->theFiredTriggers.next(nextTrigPtr);
+ if (opPtr.p->noReceivedTriggers == opPtr.p->noFiredTriggers) {
+ jam();
+ // Fireing operation is ready to have a trigger executing
+ executeTrigger(signal, trigPtr.p, transPtr, &opPtr);
+ // Should allow for interleaving here by sending a CONTINUEB and
+ // return
+ // Release trigger records
+ AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
+ LocalDataBuffer<11> tmp1(pool, trigPtr.p->keyValues);
+ tmp1.release();
+ LocalDataBuffer<11> tmp2(pool, trigPtr.p->beforeValues);
+ tmp2.release();
+ LocalDataBuffer<11> tmp3(pool, trigPtr.p->afterValues);
+ tmp3.release();
+ regApiPtr->theFiredTriggers.release(trigPtr.i);
+ }
+ trigPtr = nextTrigPtr;
+ }
+ return;
+ // No more triggers, continue transaction after last executed trigger has
+ // reurned (in execLQHKEYCONF or execLQHKEYREF)
+ } else {
+ // Wait until transaction is ready to execute a trigger
+ jam();
+ if (!regApiPtr->triggerPending) {
+ jam();
+ regApiPtr->triggerPending = true;
+ signal->theData[0] = TcContinueB::TRIGGER_PENDING;
+ signal->theData[1] = transPtr->i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ }
+ // else
+ // We are already waiting for a pending trigger (CONTINUEB)
+ }
+ }
+}
+
+void Dbtc::executeTrigger(Signal* signal,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr)
+{
+ TcDefinedTriggerData* definedTriggerData;
+
+ if ((definedTriggerData =
+ c_theDefinedTriggers.getPtr(firedTriggerData->triggerId))
+ != NULL) {
+ switch(definedTriggerData->triggerType) {
+ case(TriggerType::SECONDARY_INDEX):
+ jam();
+ executeIndexTrigger(signal, definedTriggerData, firedTriggerData,
+ transPtr, opPtr);
+ break;
+ default:
+ ndbrequire(false);
+ }
+ }
+}
+
+void Dbtc::executeIndexTrigger(Signal* signal,
+ TcDefinedTriggerData* definedTriggerData,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr)
+{
+ TcIndexData* indexData;
+
+ indexData = c_theIndexes.getPtr(definedTriggerData->indexId);
+ ndbassert(indexData != NULL);
+
+ switch (definedTriggerData->triggerEvent) {
+ case(TriggerEvent::TE_INSERT): {
+ jam();
+ insertIntoIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData);
+ break;
+ }
+ case(TriggerEvent::TE_DELETE): {
+ jam();
+ deleteFromIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData);
+ break;
+ }
+ case(TriggerEvent::TE_UPDATE): {
+ jam();
+ deleteFromIndexTable(signal, firedTriggerData, transPtr, opPtr,
+ indexData, true); // Hold the triggering operation
+ insertIntoIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData);
+ break;
+ }
+ default:
+ ndbrequire(false);
+ }
+}
+
+void Dbtc::releaseFiredTriggerData(DLFifoList<TcFiredTriggerData>* triggers)
+{
+ FiredTriggerPtr trigPtr;
+
+ triggers->first(trigPtr);
+ while (trigPtr.i != RNIL) {
+ jam();
+ // Release trigger records
+
+ AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
+ LocalDataBuffer<11> tmp1(pool, trigPtr.p->keyValues);
+ tmp1.release();
+ LocalDataBuffer<11> tmp2(pool, trigPtr.p->beforeValues);
+ tmp2.release();
+ LocalDataBuffer<11> tmp3(pool, trigPtr.p->afterValues);
+ tmp3.release();
+
+ triggers->next(trigPtr);
+ }
+ triggers->release();
+}
+
+void Dbtc::insertIntoIndexTable(Signal* signal,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr,
+ TcIndexData* indexData,
+ bool holdOperation)
+{
+ ApiConnectRecord* regApiPtr = transPtr->p;
+ TcConnectRecord* opRecord = opPtr->p;
+ TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
+ Uint32 tcKeyRequestInfo = 0;
+ Uint32 tcKeyLength = TcKeyReq::StaticLength;
+ TableRecordPtr indexTabPtr;
+ AttributeBuffer::DataBufferIterator iter;
+ Uint32 attrId = 0;
+ Uint32 keyLength = 0;
+ Uint32 totalPrimaryKeyLength = 0;
+ Uint32 hops;
+
+ indexTabPtr.i = indexData->indexId;
+ ptrCheckGuard(indexTabPtr, ctabrecFilesize, tableRecord);
+ tcKeyReq->apiConnectPtr = transPtr->i;
+ tcKeyReq->senderData = opPtr->i;
+ if (holdOperation) {
+ jam();
+ opRecord->triggerExecutionCount++;
+ }//if
+ // Calculate key length and renumber attribute id:s
+ AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
+ LocalDataBuffer<11> afterValues(pool, firedTriggerData->afterValues);
+ bool skipNull = false;
+ for(bool moreKeyAttrs = afterValues.first(iter); moreKeyAttrs; attrId++) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ // Filter out NULL valued attributes
+ if (attrHeader->isNULL()) {
+ skipNull = true;
+ break;
+ }
+ attrHeader->setAttributeId(attrId);
+ keyLength += attrHeader->getDataSize();
+ hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
+ moreKeyAttrs = afterValues.next(iter, hops);
+ }
+ if (skipNull) {
+ jam();
+ opRecord->triggerExecutionCount--;
+ if (opRecord->triggerExecutionCount == 0) {
+ /*
+ We have completed current trigger execution
+ Continue triggering operation
+ */
+ jam();
+ continueTriggeringOp(signal, opRecord);
+ }//if
+ return;
+ }//if
+
+ // Calculate total length of primary key to be stored in index table
+ LocalDataBuffer<11> keyValues(pool, firedTriggerData->keyValues);
+ for(bool moreAttrData = keyValues.first(iter); moreAttrData; ) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ totalPrimaryKeyLength += attrHeader->getDataSize();
+ hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
+ moreAttrData = keyValues.next(iter, hops);
+ }
+ AttributeHeader pkAttrHeader(attrId, totalPrimaryKeyLength);
+ Uint32 attributesLength = afterValues.getSize() +
+ pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize() + 1;
+
+ TcKeyReq::setKeyLength(tcKeyRequestInfo, keyLength);
+ tcKeyReq->attrLen = attributesLength;
+ tcKeyReq->tableId = indexData->indexId;
+ TcKeyReq::setOperationType(tcKeyRequestInfo, ZINSERT);
+ TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, true);
+ tcKeyReq->tableSchemaVersion = indexTabPtr.p->currentSchemaVersion;
+ tcKeyReq->transId1 = regApiPtr->transid[0];
+ tcKeyReq->transId2 = regApiPtr->transid[1];
+ Uint32 * dataPtr = &tcKeyReq->scanInfo;
+ // Write first part of key in TCKEYREQ
+ Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
+ Uint32 attrBufSize = 5; // Maximum for key in TCKEYREQ
+ Uint32 dataPos = 0;
+ // Filter out AttributeHeader:s since this should no be in key
+ bool moreKeyData = afterValues.first(iter);
+ Uint32 headerSize = 0, keyAttrSize = 0, dataSize = 0, headAndData = 0;
+
+ while (moreKeyData && (dataPos < keyBufSize)) {
+ /*
+ * If we have not read complete key
+ * and it fits in the signal
+ */
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ headerSize = attrHeader->getHeaderSize();
+ keyAttrSize = attrHeader->getDataSize();
+ headAndData = headerSize + attrHeader->getDataSize();
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreKeyData = afterValues.next(iter);
+ } else {
+ jam();
+ moreKeyData = afterValues.next(iter, headerSize - 1);
+ }//if
+ while((keyAttrSize != 0) && (dataPos < keyBufSize)) {
+ // If we have not read complete key
+ jam();
+ *dataPtr++ = *iter.data;
+ dataPos++;
+ keyAttrSize--;
+ moreKeyData = afterValues.next(iter);
+ }
+ if (keyAttrSize != 0) {
+ jam();
+ break;
+ }//if
+ }
+
+ tcKeyLength += dataPos;
+ /*
+ Size of attrinfo is unique index attributes one by one, header for each
+ of them (all contained in the afterValues data structure), plus a header,
+ the primary key (compacted) and the fragment id before the primary key
+ */
+ if (attributesLength <= attrBufSize) {
+ jam();
+ // ATTRINFO fits in TCKEYREQ
+ // Pack ATTRINFO IN TCKEYREQ as one attribute
+ TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, attributesLength);
+ bool moreAttrData;
+ // Insert primary key attributes (insert after values of primary table)
+ for(moreAttrData = afterValues.first(iter);
+ moreAttrData;
+ moreAttrData = afterValues.next(iter)) {
+ *dataPtr++ = *iter.data;
+ }
+ // Insert attribute values (insert key values of primary table)
+ // as one attribute
+ pkAttrHeader.insertHeader(dataPtr);
+ dataPtr += pkAttrHeader.getHeaderSize();
+ /*
+ Insert fragment id before primary key as part of reference to tuple
+ */
+ *dataPtr++ = firedTriggerData->fragId;
+ moreAttrData = keyValues.first(iter);
+ while(moreAttrData) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ headerSize = attrHeader->getHeaderSize();
+ dataSize = attrHeader->getDataSize();
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreAttrData = keyValues.next(iter);
+ } else {
+ jam();
+ moreAttrData = keyValues.next(iter, headerSize - 1);
+ }//if
+ // Copy attribute data
+ while(dataSize-- != 0) {
+ *dataPtr++ = *iter.data;
+ moreAttrData = keyValues.next(iter);
+ }
+ }
+ tcKeyLength += attributesLength;
+ } else {
+ jam();
+ // No ATTRINFO in TCKEYREQ
+ TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 0);
+ }
+ tcKeyReq->requestInfo = tcKeyRequestInfo;
+
+ /**
+ * Fix savepoint id -
+ * fix so that insert has same savepoint id as triggering operation
+ */
+ const Uint32 currSavePointId = regApiPtr->currSavePointId;
+ regApiPtr->currSavePointId = opRecord->savePointId;
+ EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
+ regApiPtr->currSavePointId = currSavePointId;
+ tcConnectptr.p->currentIndexId = indexData->indexId;
+ jamEntry();
+
+ // *********** KEYINFO ***********
+ if (moreKeyData) {
+ jam();
+ // Send KEYINFO sequence
+ KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
+
+ keyInfo->connectPtr = transPtr->i;
+ keyInfo->transId[0] = regApiPtr->transid[0];
+ keyInfo->transId[1] = regApiPtr->transid[1];
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ // Pack any part of a key attribute that did no fit TCKEYREQ
+ while((keyAttrSize != 0) && (dataPos < KeyInfo::DataLength)) {
+ // If we have not read complete key
+ *dataPtr++ = *iter.data;
+ dataPos++;
+ keyAttrSize--;
+ if (dataPos == KeyInfo::DataLength) {
+ jam();
+ // Flush KEYINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ }
+ moreKeyData = afterValues.next(iter);
+ }
+
+ while(moreKeyData) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ headerSize = attrHeader->getHeaderSize();
+ keyAttrSize = attrHeader->getDataSize();
+ headAndData = headerSize + attrHeader->getDataSize();
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreKeyData = afterValues.next(iter);
+ } else {
+ jam();
+ moreKeyData = afterValues.next(iter, headerSize - 1);
+ }//if
+ while (keyAttrSize-- != 0) {
+ *dataPtr++ = *iter.data;
+ dataPos++;
+ if (dataPos == KeyInfo::DataLength) {
+ jam();
+ // Flush KEYINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ }
+ moreKeyData = afterValues.next(iter);
+ }
+ }
+ if (dataPos != 0) {
+ jam();
+ // Flush last KEYINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + dataPos, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + dataPos);
+ jamEntry();
+#endif
+ }
+ }
+
+ // *********** ATTRINFO ***********
+ if (attributesLength > attrBufSize) {
+ jam();
+ // No ATTRINFO in TcKeyReq
+ TcKeyReq::setAIInTcKeyReq(tcKeyReq->requestInfo, 0);
+ // Send ATTRINFO sequence
+ AttrInfo * const attrInfo = (AttrInfo *)signal->getDataPtrSend();
+ Uint32 attrInfoPos = 0;
+
+ attrInfo->connectPtr = transPtr->i;
+ attrInfo->transId[0] = regApiPtr->transid[0];
+ attrInfo->transId[1] = regApiPtr->transid[1];
+ dataPtr = (Uint32 *) &attrInfo->attrData;
+
+ bool moreAttrData;
+ // Insert primary key attributes (insert after values of primary table)
+ for(moreAttrData = afterValues.first(iter);
+ moreAttrData;
+ moreAttrData = afterValues.next(iter)) {
+ *dataPtr++ = *iter.data;
+ attrInfoPos++;
+ if (attrInfoPos == AttrInfo::DataLength) {
+ jam();
+ // Flush ATTRINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &attrInfo->attrData;
+ attrInfoPos = 0;
+ }
+ }
+ // Insert attribute values (insert key values of primary table)
+ // as one attribute
+ pkAttrHeader.insertHeader(dataPtr);
+ dataPtr += pkAttrHeader.getHeaderSize();
+ attrInfoPos += pkAttrHeader.getHeaderSize();
+ /*
+ Add fragment id before primary key
+ TODO: This code really needs to be made into a long signal
+ to remove this messy code.
+ */
+ if (attrInfoPos == AttrInfo::DataLength)
+ {
+ jam();
+ // Flush ATTRINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &attrInfo->attrData;
+ attrInfoPos = 0;
+ }
+ attrInfoPos++;
+ *dataPtr++ = firedTriggerData->fragId;
+
+ moreAttrData = keyValues.first(iter);
+ while(moreAttrData) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ headerSize = attrHeader->getHeaderSize();
+ dataSize = attrHeader->getDataSize();
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreAttrData = keyValues.next(iter);
+ } else {
+ jam();
+ moreAttrData = keyValues.next(iter, headerSize - 1);
+ }//if
+ while(dataSize-- != 0) { // If we have not read complete key
+ if (attrInfoPos == AttrInfo::DataLength) {
+ jam();
+ // Flush ATTRINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &attrInfo->attrData;
+ attrInfoPos = 0;
+ }
+ *dataPtr++ = *iter.data;
+ attrInfoPos++;
+ moreAttrData = keyValues.next(iter);
+ }
+ }
+ if (attrInfoPos != 0) {
+ jam();
+ // Flush last ATTRINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + attrInfoPos, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + attrInfoPos);
+ jamEntry();
+#endif
+ }
+ }
+}
+
+void Dbtc::deleteFromIndexTable(Signal* signal,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr,
+ TcIndexData* indexData,
+ bool holdOperation)
+{
+ ApiConnectRecord* regApiPtr = transPtr->p;
+ TcConnectRecord* opRecord = opPtr->p;
+ TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
+ Uint32 tcKeyRequestInfo = 0;
+ Uint32 tcKeyLength = 12; // Static length
+ TableRecordPtr indexTabPtr;
+ AttributeBuffer::DataBufferIterator iter;
+ Uint32 attrId = 0;
+ Uint32 keyLength = 0;
+ Uint32 hops;
+
+ indexTabPtr.i = indexData->indexId;
+ ptrCheckGuard(indexTabPtr, ctabrecFilesize, tableRecord);
+ tcKeyReq->apiConnectPtr = transPtr->i;
+ tcKeyReq->senderData = opPtr->i;
+ if (holdOperation) {
+ jam();
+ opRecord->triggerExecutionCount++;
+ }//if
+ // Calculate key length and renumber attribute id:s
+ AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
+ LocalDataBuffer<11> beforeValues(pool, firedTriggerData->beforeValues);
+ bool skipNull = false;
+ for(bool moreKeyAttrs = beforeValues.first(iter);
+ (moreKeyAttrs);
+ attrId++) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ // Filter out NULL valued attributes
+ if (attrHeader->isNULL()) {
+ skipNull = true;
+ break;
+ }
+ attrHeader->setAttributeId(attrId);
+ keyLength += attrHeader->getDataSize();
+ hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
+ moreKeyAttrs = beforeValues.next(iter, hops);
+ }
+
+ if (skipNull) {
+ jam();
+ opRecord->triggerExecutionCount--;
+ if (opRecord->triggerExecutionCount == 0) {
+ /*
+ We have completed current trigger execution
+ Continue triggering operation
+ */
+ jam();
+ continueTriggeringOp(signal, opRecord);
+ }//if
+ return;
+ }//if
+
+ TcKeyReq::setKeyLength(tcKeyRequestInfo, keyLength);
+ tcKeyReq->attrLen = 0;
+ tcKeyReq->tableId = indexData->indexId;
+ TcKeyReq::setOperationType(tcKeyRequestInfo, ZDELETE);
+ TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, true);
+ tcKeyReq->tableSchemaVersion = indexTabPtr.p->currentSchemaVersion;
+ tcKeyReq->transId1 = regApiPtr->transid[0];
+ tcKeyReq->transId2 = regApiPtr->transid[1];
+ Uint32 * dataPtr = &tcKeyReq->scanInfo;
+ // Write first part of key in TCKEYREQ
+ Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
+ Uint32 dataPos = 0;
+ // Filter out AttributeHeader:s since this should no be in key
+ bool moreKeyData = beforeValues.first(iter);
+ Uint32 headerSize = 0, keyAttrSize = 0, headAndData = 0;
+
+ while (moreKeyData &&
+ (dataPos < keyBufSize)) {
+ /*
+ If we have not read complete key
+ and it fits in the signal
+ */
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ headerSize = attrHeader->getHeaderSize();
+ keyAttrSize = attrHeader->getDataSize();
+ headAndData = headerSize + attrHeader->getDataSize();
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreKeyData = beforeValues.next(iter);
+ } else {
+ jam();
+ moreKeyData = beforeValues.next(iter, headerSize - 1);
+ }//if
+ while((keyAttrSize != 0) &&
+ (dataPos < keyBufSize)) {
+ // If we have not read complete key
+ jam();
+ *dataPtr++ = *iter.data;
+ dataPos++;
+ keyAttrSize--;
+ moreKeyData = beforeValues.next(iter);
+ }
+ if (keyAttrSize != 0) {
+ jam();
+ break;
+ }//if
+ }
+
+ tcKeyLength += dataPos;
+ tcKeyReq->requestInfo = tcKeyRequestInfo;
+
+ /**
+ * Fix savepoint id -
+ * fix so that delete has same savepoint id as triggering operation
+ */
+ const Uint32 currSavePointId = regApiPtr->currSavePointId;
+ regApiPtr->currSavePointId = opRecord->savePointId;
+ EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
+ regApiPtr->currSavePointId = currSavePointId;
+ tcConnectptr.p->currentIndexId = indexData->indexId;
+ jamEntry();
+
+ // *********** KEYINFO ***********
+ if (moreKeyData) {
+ jam();
+ // Send KEYINFO sequence
+ KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
+
+ keyInfo->connectPtr = transPtr->i;
+ keyInfo->transId[0] = regApiPtr->transid[0];
+ keyInfo->transId[1] = regApiPtr->transid[1];
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ // Pack any part of a key attribute that did no fit TCKEYREQ
+ while((keyAttrSize != 0) &&
+ (dataPos < KeyInfo::DataLength)) {
+ // If we have not read complete key
+ *dataPtr++ = *iter.data;
+ dataPos++;
+ keyAttrSize--;
+ if (dataPos == KeyInfo::DataLength) {
+ jam();
+ // Flush KEYINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ }
+ moreKeyData = beforeValues.next(iter);
+ }
+
+ while(moreKeyData) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ headerSize = attrHeader->getHeaderSize();
+ keyAttrSize = attrHeader->getDataSize();
+ headAndData = headerSize + attrHeader->getDataSize();
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreKeyData = beforeValues.next(iter);
+ } else {
+ jam();
+ moreKeyData = beforeValues.next(iter,
+ headerSize - 1);
+ }//if
+ while (keyAttrSize-- != 0) {
+ *dataPtr++ = *iter.data;
+ dataPos++;
+ if (dataPos == KeyInfo::DataLength) {
+ jam();
+ // Flush KEYINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ }
+ moreKeyData = beforeValues.next(iter);
+ }
+ }
+ if (dataPos != 0) {
+ jam();
+ // Flush last KEYINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + dataPos, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + dataPos);
+ jamEntry();
+#endif
+ }
+ }
+}
+
+Uint32
+Dbtc::TableRecord::getErrorCode(Uint32 schemaVersion) const {
+ if(!enabled)
+ return ZNO_SUCH_TABLE;
+ if(dropping)
+ return ZDROP_TABLE_IN_PROGRESS;
+ if(schemaVersion != currentSchemaVersion)
+ return ZWRONG_SCHEMA_VERSION_ERROR;
+ ErrorReporter::handleAssert("Dbtc::TableRecord::getErrorCode",
+ __FILE__, __LINE__);
+ return 0;
+}
+
diff --git a/storage/ndb/src/kernel/blocks/dbtc/Makefile.am b/storage/ndb/src/kernel/blocks/dbtc/Makefile.am
new file mode 100644
index 00000000000..4b3b102d8ac
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtc/Makefile.am
@@ -0,0 +1,23 @@
+noinst_LIBRARIES = libdbtc.a
+
+libdbtc_a_SOURCES = DbtcInit.cpp DbtcMain.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbtc.dsp
+
+libdbtc.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libdbtc_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp b/storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp
index 2c62adab3e5..2c62adab3e5 100644
--- a/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp
diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
new file mode 100644
index 00000000000..e4dc2fcf2ee
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -0,0 +1,2469 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBTUP_H
+#define DBTUP_H
+
+#include <pc.hpp>
+#include <SimulatedBlock.hpp>
+#include <ndb_limits.h>
+#include <trigger_definitions.h>
+#include <ArrayList.hpp>
+#include <AttributeHeader.hpp>
+#include <Bitmask.hpp>
+#include <signaldata/TupKey.hpp>
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/DropTrig.hpp>
+#include <signaldata/TrigAttrInfo.hpp>
+#include <signaldata/BuildIndx.hpp>
+
+#define ZWORDS_ON_PAGE 8192 /* NUMBER OF WORDS ON A PAGE. */
+#define ZATTRBUF_SIZE 32 /* SIZE OF ATTRIBUTE RECORD BUFFER */
+#define ZMIN_PAGE_LIMIT_TUPKEYREQ 5
+#define ZTUP_VERSION_BITS 15
+
+#ifdef DBTUP_C
+//------------------------------------------------------------------
+// Jam Handling:
+//
+// When DBTUP reports lines through jam in the trace files it has to
+// be interpreted. 4024 means as an example line 24 in DbtupCommit.cpp
+// Thus 4000 is added to the line number beacuse it is located in the
+// file DbtupCommit.cpp. The following is the exhaustive list of the
+// added value in the various files. ndbrequire, ptrCheckGuard still
+// only reports the line number in the file it currently is located in.
+//
+// DbtupExecQuery.cpp 0
+// DbtupBuffer.cpp 2000
+// DbtupRoutines.cpp 3000
+// DbtupCommit.cpp 5000
+// DbtupFixAlloc.cpp 6000
+// DbtupTrigger.cpp 7000
+// DbtupAbort.cpp 9000
+// DbtupLCP.cpp 10000
+// DbtupUndoLog.cpp 12000
+// DbtupPageMap.cpp 14000
+// DbtupPagMan.cpp 16000
+// DbtupStoredProcDef.cpp 18000
+// DbtupMeta.cpp 20000
+// DbtupTabDesMan.cpp 22000
+// DbtupGen.cpp 24000
+// DbtupSystemRestart.cpp 26000
+// DbtupIndex.cpp 28000
+// DbtupDebug.cpp 30000
+// DbtupScan.cpp 32000
+//------------------------------------------------------------------
+
+/*
+2.2 LOCAL SYMBOLS
+-----------------
+*/
+/* ---------------------------------------------------------------- */
+/* S I Z E O F R E C O R D S */
+/* ---------------------------------------------------------------- */
+#define ZNO_OF_ATTRBUFREC 10000 /* SIZE OF ATTRIBUTE INFO FILE */
+#define ZNO_OF_CONCURRENT_OPEN_OP 40 /* NUMBER OF CONCURRENT OPENS */
+#define ZNO_OF_CONCURRENT_WRITE_OP 80 /* NUMBER OF CONCURRENT DISK WRITES*/
+#define ZNO_OF_FRAGOPREC 20 /* NUMBER OF CONCURRENT ADD FRAG. */
+#define ZNO_OF_LCP_REC 10 /* NUMBER OF CONCURRENT CHECKPOINTS*/
+#define TOT_PAGE_RECORD_SPACE 262144 /* SIZE OF PAGE RECORD FILE. */
+#define ZNO_OF_PAGE TOT_PAGE_RECORD_SPACE/ZWORDS_ON_PAGE
+#define ZNO_OF_PAGE_RANGE_REC 128 /* SIZE OF PAGE RANGE FILE */
+#define ZNO_OF_PARALLELL_UNDO_FILES 16 /* NUMBER OF PARALLEL UNDO FILES */
+#define ZNO_OF_RESTART_INFO_REC 10 /* MAXIMUM PARALLELL RESTART INFOS */
+ /* 24 SEGMENTS WITH 8 PAGES IN EACH*/
+ /* PLUS ONE UNDO BUFFER CACHE */
+// Undo record identifiers are 32-bits with page index 13-bits
+#define ZUNDO_RECORD_ID_PAGE_INDEX 13 /* 13 BITS = 8192 WORDS/PAGE */
+#define ZUNDO_RECORD_ID_PAGE_INDEX_MASK (ZWORDS_ON_PAGE - 1) /* 1111111111111 */
+
+// Trigger constants
+#define ZDEFAULT_MAX_NO_TRIGGERS_PER_TABLE 16
+
+/* ---------------------------------------------------------------- */
+// VARIABLE NUMBERS OF PAGE_WORD, UNDO_WORD AND LOGIC_WORD FOR
+// COMMUNICATION WITH FILE SYSTEM
+/* ---------------------------------------------------------------- */
+#define ZBASE_ADDR_PAGE_WORD 1 /* BASE ADDRESS OF PAGE_WORD VAR */
+#define ZBASE_ADDR_UNDO_WORD 2 /* BASE ADDRESS OF UNDO_WORD VAR */
+#define ZBASE_ADDR_LOGIC_WORD 3 /* BASE ADDRESS OF LOGIC_WORD VAR */
+
+/* ---------------------------------------------------------------- */
+// NUMBER OF PAGES SENT TO DISK IN DATA BUFFER AND UNDO BUFFER WHEN
+// OPTIMUM PERFORMANCE IS ACHIEVED.
+/* ---------------------------------------------------------------- */
+#define ZUB_SEGMENT_SIZE 8 /* SEGMENT SIZE OF UNDO BUFFER */
+#define ZDB_SEGMENT_SIZE 8 /* SEGMENT SIZE OF DATA BUFFER */
+
+/* ---------------------------------------------------------------- */
+/* A ATTRIBUTE MAY BE NULL, DYNAMIC OR NORMAL. A NORMAL ATTRIBUTE */
+/* IS A ATTRIBUTE THAT IS NOT NULL OR DYNAMIC. A NULL ATTRIBUTE */
+/* MAY HAVE NO VALUE. A DYNAMIC ATTRIBUTE IS A NULL ATTRIBUTE THAT */
+/* DOES NOT HAVE TO BE A MEMBER OF EVERY TUPLE I A CERTAIN TABLE. */
+/* ---------------------------------------------------------------- */
+/**
+ * #defines moved into include/kernel/Interpreter.hpp
+ */
+#define ZMAX_REGISTER 21
+#define ZINSERT_DELETE 0
+/* ---------------------------------------------------------------- */
+/* THE MINIMUM SIZE OF AN 'EMPTY' TUPLE HEADER IN R-WORDS */
+/* ---------------------------------------------------------------- */
+#define ZTUP_HEAD_MINIMUM_SIZE 2
+ /* THE TUPLE HEADER FIELD 'SIZE OF NULL ATTR. FIELD' SPECIFYES */
+ /* THE SIZE OF THE TUPLE HEADER FIELD 'NULL ATTR. FIELD'. */
+ /* THE TUPLE HEADER FIELD 'TYPE' SPECIFYES THE TYPE OF THE TUPLE */
+ /* HEADER. */
+ /* TUPLE ATTRIBUTE INDEX CLUSTERS, ATTRIBUTE */
+ /* CLUSTERS AND A DYNAMIC ATTRIBUTE HEADER. */
+ /* IT MAY ALSO CONTAIN SHORT ATTRIBUTES AND */
+ /* POINTERS TO LONG ATTRIBUTE HEADERS. */
+ /* TUPLE ATTRIBUTE INDEX CLUSTERS, ATTRIBUTE */
+ /* CLUSTERS AND A DYNAMIC ATTRIBUTE HEADER. */
+
+#define ZTH_TYPE3 2 /* TUPLE HEADER THAT MAY HAVE A POINTER TO */
+ /* A DYNAMIC ATTRIBUTE HEADER. IT MAY ALSO */
+ /* CONTAIN SHORT ATTRIBUTES AND POINTERS */
+ /* TO LONG ATTRIBUTE HEADERS. */
+
+ /* DATA STRUCTURE TYPES */
+ /* WHEN ATTRIBUTE INFO IS SENT WITH A ATTRINFO-SIGNAL THE */
+ /* VARIABLE TYPE IS SPECIFYED. THIS MUST BE DONE TO BE ABLE TO */
+ /* NOW HOW MUCH DATA OF A ATTRIBUTE TO READ FROM ATTRINFO. */
+#define ZFIXED_ARRAY 2 /* ZFIXED ARRAY FIELD. */
+#define ZNON_ARRAY 1 /* NORMAL FIELD. */
+#define ZVAR_ARRAY 0 /* VARIABLE ARRAY FIELD */
+#define ZNOT_STORE 3 /* THE ATTR IS STORED IN THE INDEX BLOCK */
+#define ZMAX_SMALL_VAR_ARRAY 256
+
+ /* PLEASE OBSERVE THAT THEESE CONSTANTS CORRESPONDS TO THE NUMBER */
+ /* OF BITS NEEDED TO REPRESENT THEM D O N O T C H A N G E */
+#define Z1BIT_VAR 0 /* 1 BIT VARIABLE. */
+#define Z2BIT_VAR 1 /* 2 BIT VARIABLE. */
+#define Z4BIT_VAR 2 /* 4 BIT VARIABLE. */
+#define Z8BIT_VAR 3 /* 8 BIT VARIABLE. */
+#define Z16BIT_VAR 4 /* 16 BIT VARIABLE. */
+#define Z32BIT_VAR 5 /* 32 BIT VARIABLE. */
+#define Z64BIT_VAR 6 /* 64 BIT VARIABLE. */
+#define Z128BIT_VAR 7 /* 128 BIT VARIABLE. */
+
+ /* WHEN A REQUEST CAN NOT BE EXECUTED BECAUSE OF A ERROR THE */
+ /* ERROR MUST BE IDENTIFYED BY MEANS OF A ERROR CODE AND SENT TO */
+ /* THE REQUESTER. */
+#define ZGET_OPREC_ERROR 804 // TUP_SEIZEREF
+
+#define ZEXIST_FRAG_ERROR 816 // Add fragment
+#define ZFULL_FRAGRECORD_ERROR 817 // Add fragment
+#define ZNO_FREE_PAGE_RANGE_ERROR 818 // Add fragment
+#define ZNOFREE_FRAGOP_ERROR 830 // Add fragment
+#define ZTOO_LARGE_TUPLE_ERROR 851 // Add fragment
+#define ZNO_FREE_TAB_ENTRY_ERROR 852 // Add fragment
+#define ZNO_PAGES_ALLOCATED_ERROR 881 // Add fragment
+
+#define ZGET_REALPID_ERROR 809
+#define ZNOT_IMPLEMENTED_ERROR 812
+#define ZSEIZE_ATTRINBUFREC_ERROR 805
+#define ZTOO_MUCH_ATTRINFO_ERROR 823
+#define ZMEM_NOTABDESCR_ERROR 826
+#define ZMEM_NOMEM_ERROR 827
+#define ZAI_INCONSISTENCY_ERROR 829
+#define ZNO_ILLEGAL_NULL_ATTR 839
+#define ZNOT_NULL_ATTR 840
+#define ZNO_INSTRUCTION_ERROR 871
+#define ZOUTSIDE_OF_PROGRAM_ERROR 876
+#define ZSTORED_PROC_ID_ERROR 877
+#define ZREGISTER_INIT_ERROR 878
+#define ZATTRIBUTE_ID_ERROR 879
+#define ZTRY_TO_READ_TOO_MUCH_ERROR 880
+#define ZTOTAL_LEN_ERROR 882
+#define ZATTR_INTERPRETER_ERROR 883
+#define ZSTACK_OVERFLOW_ERROR 884
+#define ZSTACK_UNDERFLOW_ERROR 885
+#define ZTOO_MANY_INSTRUCTIONS_ERROR 886
+#define ZTRY_TO_UPDATE_ERROR 888
+#define ZCALL_ERROR 890
+#define ZTEMPORARY_RESOURCE_FAILURE 891
+
+#define ZSTORED_SEIZE_ATTRINBUFREC_ERROR 873 // Part of Scan
+
+#define ZREAD_ONLY_CONSTRAINT_VIOLATION 893
+#define ZVAR_SIZED_NOT_SUPPORTED 894
+#define ZINCONSISTENT_NULL_ATTRIBUTE_COUNT 895
+#define ZTUPLE_CORRUPTED_ERROR 896
+#define ZTRY_UPDATE_PRIMARY_KEY 897
+#define ZMUST_BE_ABORTED_ERROR 898
+#define ZTUPLE_DELETED_ERROR 626
+#define ZINSERT_ERROR 630
+
+#define ZINVALID_CHAR_FORMAT 744
+
+
+ /* SOME WORD POSITIONS OF FIELDS IN SOME HEADERS */
+#define ZPAGE_STATE_POS 0 /* POSITION OF PAGE STATE */
+#define ZPAGE_NEXT_POS 1 /* POSITION OF THE NEXT POINTER WHEN IN FREELIST */
+#define ZPAGE_PREV_POS 2 /* POSITION OF THE PREVIOUS POINTER WHEN IN FREELIST */
+#define ZFREELIST_HEADER_POS 3 /* POSITION OF THE FIRST FREELIST */
+#define ZPAGE_FRAG_PAGE_ID_POS 4 /* POSITION OF FRAG PAGE ID WHEN USED*/
+#define ZPAGE_NEXT_CLUST_POS 5 /* POSITION OF NEXT FREE SET OF PAGES */
+#define ZPAGE_FIRST_CLUST_POS 2 /* POSITION OF THE POINTER TO THE FIRST PAGE IN A CLUSTER */
+#define ZPAGE_LAST_CLUST_POS 6 /* POSITION OF THE POINTER TO THE LAST PAGE IN A CLUSTER */
+#define ZPAGE_PREV_CLUST_POS 7 /* POSITION OF THE PREVIOUS POINTER */
+#define ZPAGE_HEADER_SIZE 32 /* NUMBER OF WORDS IN MEM PAGEHEADER */
+#define ZDISK_PAGE_HEADER_SIZE 32 /* NUMBER OF WORDS IN DISK PAGEHEADER */
+#define ZNO_OF_FREE_BLOCKS 3 /* NO OF FREE BLOCK IN THE DISK PAGE */
+#define ZDISK_PAGE_ID 8 /* ID OF THE PAGE ON THE DISK */
+#define ZBLOCK_LIST 9
+#define ZCOPY_OF_PAGE 10
+#define ZPAGE_PHYSICAL_INDEX 11
+#define ZNEXT_IN_PAGE_USED_LIST 12
+#define ZPREV_IN_PAGE_USED_LIST 13
+#define ZDISK_USED_TYPE 14
+#define ZFREE_COMMON 1 /* PAGE STATE, PAGE IN COMMON AREA */
+#define ZEMPTY_MM 2 /* PAGE STATE, PAGE IN EMPTY LIST */
+#define ZTH_MM_FREE 3 /* PAGE STATE, TUPLE HEADER PAGE WITH FREE AREA */
+#define ZTH_MM_FULL 4 /* PAGE STATE, TUPLE HEADER PAGE WHICH IS FULL */
+#define ZAC_MM_FREE 5 /* PAGE STATE, ATTRIBUTE CLUSTER PAGE WITH FREE AREA */
+#define ZTH_MM_FREE_COPY 7 /* PAGE STATE, TH COPY PAGE WITH FREE AREA */
+#define ZTH_MM_FULL_COPY 8 /* PAGE STATE, TH COPY PAGE WHICH IS FULL */
+#define ZAC_MM_FREE_COPY 9 /* PAGE STATE, AC COPY PAGE WITH FREE AREA */
+#define ZMAX_NO_COPY_PAGES 4 /* THE MAXIMUM NUMBER OF COPY PAGES ALLOWED PER FRAGMENT */
+
+ /* CONSTANTS USED TO HANDLE TABLE DESCRIPTOR RECORDS */
+ /* ALL POSITIONS AND SIZES IS BASED ON R-WORDS (32-BIT ON APZ 212) */
+#define ZTD_HEADER 0 /* HEADER POSITION */
+#define ZTD_DATASIZE 1 /* SIZE OF THE DATA IN THIS CHUNK */
+#define ZTD_SIZE 2 /* TOTAL SIZE OF TABLE DESCRIPTOR */
+
+ /* TRAILER POSITIONS FROM END OF TABLE DESCRIPTOR RECORD */
+#define ZTD_TR_SIZE 1 /* SIZE DESCRIPTOR POS FROM END+1 */
+#define ZTD_TR_TYPE 2
+#define ZTD_TRAILER_SIZE 2 /* TOTAL SIZE OF TABLE TRAILER */
+#define ZAD_SIZE 2 /* TOTAL SIZE OF ATTR DESCRIPTOR */
+#define ZAD_LOG_SIZE 1 /* TWO LOG OF TOTAL SIZE OF ATTR DESCRIPTOR */
+
+ /* CONSTANTS USED TO HANDLE TABLE DESCRIPTOR AS A FREELIST */
+#define ZTD_FL_HEADER 0 /* HEADER POSITION */
+#define ZTD_FL_SIZE 1 /* TOTAL SIZE OF THIS FREELIST ENTRY */
+#define ZTD_FL_PREV 2 /* PREVIOUS RECORD IN FREELIST */
+#define ZTD_FL_NEXT 3 /* NEXT RECORD IN FREELIST */
+#define ZTD_FREE_SIZE 16 /* SIZE NEEDED TO HOLD ONE FL ENTRY */
+
+ /* CONSTANTS USED IN LSB OF TABLE DESCRIPTOR HEADER DESCRIBING USAGE */
+#define ZTD_TYPE_FREE 0 /* RECORD LINKED INTO FREELIST */
+#define ZTD_TYPE_NORMAL 1 /* RECORD USED AS TABLE DESCRIPTOR */
+ /* ATTRIBUTE OPERATION CONSTANTS */
+#define ZLEAF 1
+#define ZNON_LEAF 2
+
+ /* ATTRINBUFREC VARIABLE POSITIONS. */
+#define ZBUF_PREV 29 /* POSITION OF 'PREV'-VARIABLE (USED BY INTERPRETED EXEC) */
+#define ZBUF_DATA_LEN 30 /* POSITION OF 'DATA LENGTH'-VARIABLE. */
+#define ZBUF_NEXT 31 /* POSITION OF 'NEXT'-VARIABLE. */
+#define ZSAVE_BUF_NEXT 28
+#define ZSAVE_BUF_DATA_LEN 27
+
+ /* RETURN POINTS. */
+ /* RESTART PHASES */
+#define ZSTARTPHASE1 1
+#define ZSTARTPHASE2 2
+#define ZSTARTPHASE3 3
+#define ZSTARTPHASE4 4
+#define ZSTARTPHASE6 6
+
+#define ZADDFRAG 0
+
+ /* CHECKPOINT RECORD TYPES */
+#define ZLCPR_TYPE_INSERT_TH 0 /* INSERT TUPLE HEADER */
+#define ZLCPR_TYPE_DELETE_TH 1 /* DELETE TUPLE HEADER */
+#define ZLCPR_TYPE_UPDATE_TH 2 /* DON'T CREATE IT, JUST UPDETE */
+#define ZLCPR_TYPE_INSERT_TH_NO_DATA 3 /* INSERT TUPLE HEADER */
+#define ZLCPR_ABORT_UPDATE 4 /* UNDO AN UPDATE OPERATION THAT WAS ACTIVE IN LCP */
+#define ZLCPR_ABORT_INSERT 5 /* UNDO AN INSERT OPERATION THAT WAS ACTIVE IN LCP */
+#define ZTABLE_DESCRIPTOR 6 /* TABLE DESCRIPTOR */
+#define ZINDICATE_NO_OP_ACTIVE 7 /* ENSURE THAT NO OPERATION ACTIVE AFTER RESTART */
+#define ZLCPR_UNDO_LOG_PAGE_HEADER 8 /* CHANGE IN PAGE HEADER IS UNDO LOGGED */
+#define ZLCPR_TYPE_UPDATE_GCI 9 /* Update GCI at commit time */
+#define ZNO_CHECKPOINT_RECORDS 10 /* NUMBER OF CHECKPOINTRECORD TYPES */
+
+ /* RESULT CODES */
+ /* ELEMENT POSITIONS IN SYSTEM RESTART INFO PAGE OF THE DATA FILE */
+#define ZSRI_NO_OF_FRAG_PAGES_POS 10 /* NUMBER OF FRAGMENT PAGES WHEN CHECKPOINT STARTED */
+#define ZSRI_TUP_RESERVED_SIZE_POS 11 /* RESERVED SIZE OF THE TUPLE WHEN CP STARTED */
+#define ZSRI_TUP_FIXED_AREA_POS 12 /* SIZE OF THE TUPLE FIXED AREA WHEN CP STARTED */
+#define ZSRI_TAB_DESCR_SIZE 13 /* SIZE OF THE TABLE DESCRIPTOR WHEN CP STARTED */
+#define ZSRI_NO_OF_ATTRIBUTES_POS 14 /* NUMBER OF ATTRIBUTES */
+#define ZSRI_UNDO_LOG_END_REC_ID 15 /* LAST UNDO LOG RECORD ID FOR THIS CHECKPOINT */
+#define ZSRI_UNDO_LOG_END_PAGE_ID 16 /* LAST USED LOG PAGE ID FOR THIS CHECKPOINT */
+#define ZSRI_TH_FREE_FIRST 17 /* FIRST FREE PAGE OF TUPLE HEADERS */
+#define ZSRI_TH_FREE_COPY_FIRST 18 /* FIRST FREE PAGE OF TUPLE HEADER COPIES */
+#define ZSRI_EMPTY_PRIM_PAGE 27 /* FIRST EMPTY PAGE */
+#define ZSRI_NO_COPY_PAGES_ALLOC 28 /* NO COPY PAGES IN FRAGMENT AT LOCAL CHECKPOINT */
+#define ZSRI_UNDO_FILE_VER 29 /* CHECK POINT ID OF THE UNDO FILE */
+#define ZSRI_NO_OF_INDEX_ATTR 30 /* No of index attributes */
+#define ZNO_OF_PAGES_CLUSTER_REC 0
+
+//------------------------------------------------------------
+// TUP_CONTINUEB codes
+//------------------------------------------------------------
+#define ZSTART_EXEC_UNDO_LOG 0
+#define ZCONT_START_SAVE_CL 1
+#define ZCONT_SAVE_DP 2
+#define ZCONT_EXECUTE_LC 3
+#define ZCONT_LOAD_DP 4
+#define ZLOAD_BAL_LCP_TIMER 5
+#define ZINITIALISE_RECORDS 6
+#define ZREL_FRAG 7
+#define ZREPORT_MEMORY_USAGE 8
+#define ZBUILD_INDEX 9
+
+#define ZINDEX_STORAGE 0
+#define ZDATA_WORD_AT_DISK_PAGE 2030
+#define ZALLOC_DISK_PAGE_LAST_INDEX 2047
+#define ZWORD_IN_BLOCK 127 /* NO OF WORD IN A BLOCK */
+#define ZNO_DISK_PAGES_FILE_REC 100
+#define ZMASK_PAGE_INDEX 0x7ff
+#define ZBIT_PAGE_INDEX 11 /* 8 KBYT PAGE = 2048 WORDS */
+#define ZSCAN_PROCEDURE 0
+#define ZCOPY_PROCEDURE 2
+#define ZSTORED_PROCEDURE_DELETE 3
+#define ZSTORED_PROCEDURE_FREE 0xffff
+#define ZMIN_PAGE_LIMIT_TUP_COMMITREQ 2
+#define ZUNDO_PAGE_HEADER_SIZE 2 /* SIZE OF UNDO PAGE HEADER */
+#endif
+
+class Dbtup: public SimulatedBlock {
+public:
+
+ typedef bool (Dbtup::* ReadFunction)(Uint32*,
+ AttributeHeader*,
+ Uint32,
+ Uint32);
+ typedef bool (Dbtup::* UpdateFunction)(Uint32*,
+ Uint32,
+ Uint32);
+// State values
+enum State {
+ NOT_INITIALIZED = 0,
+ COMMON_AREA_PAGES = 1,
+ UNDO_RESTART_PAGES = 2,
+ UNDO_PAGES = 3,
+ READ_ONE_PAGE = 4,
+ CHECKPOINT_DATA_READ = 7,
+ CHECKPOINT_DATA_READ_PAGE_ZERO = 8,
+ CHECKPOINT_DATA_WRITE = 9,
+ CHECKPOINT_DATA_WRITE_LAST = 10,
+ CHECKPOINT_DATA_WRITE_FLUSH = 11,
+ CHECKPOINT_UNDO_READ = 12,
+ CHECKPOINT_UNDO_READ_FIRST = 13,
+ CHECKPOINT_UNDO_WRITE = 14,
+ CHECKPOINT_UNDO_WRITE_FLUSH = 15,
+ CHECKPOINT_TD_READ = 16,
+ IDLE = 17,
+ ACTIVE = 18,
+ SYSTEM_RESTART = 19,
+ NO_OTHER_OP = 20,
+ COMMIT_DELETE = 21,
+ TO_BE_COMMITTED = 22,
+ ABORTED = 23,
+ ALREADY_ABORTED_INSERT = 24,
+ ALREADY_ABORTED = 25,
+ ABORT_INSERT = 26,
+ ABORT_UPDATE = 27,
+ INIT = 28,
+ INITIAL_READ = 29,
+ INTERPRETED_EXECUTION = 30,
+ FINAL_READ = 31,
+ FINAL_UPDATE = 32,
+ DISCONNECTED = 33,
+ DEFINED = 34,
+ ERROR_WAIT_TUPKEYREQ = 35,
+ STARTED = 36,
+ NOT_DEFINED = 37,
+ COMPLETED = 38,
+ WAIT_ABORT = 39,
+ NORMAL_PAGE = 40,
+ COPY_PAGE = 41,
+ DELETE_BLOCK = 42,
+ WAIT_STORED_PROCEDURE_ATTR_INFO = 43,
+ DATA_FILE_READ = 45,
+ DATA_FILE_WRITE = 46,
+ LCP_DATA_FILE_READ = 47,
+ LCP_DATA_FILE_WRITE = 48,
+ LCP_DATA_FILE_WRITE_WITH_UNDO = 49,
+ LCP_DATA_FILE_CLOSE = 50,
+ LCP_UNDO_FILE_READ = 51,
+ LCP_UNDO_FILE_CLOSE = 52,
+ LCP_UNDO_FILE_WRITE = 53,
+ OPENING_DATA_FILE = 54,
+ INITIATING_RESTART_INFO = 55,
+ INITIATING_FRAGMENT = 56,
+ OPENING_UNDO_FILE = 57,
+ READING_RESTART_INFO = 58,
+ INIT_UNDO_SEGMENTS = 59,
+ READING_TAB_DESCR = 60,
+ READING_DATA_PAGES = 61,
+ WAIT_COPY_PROCEDURE = 62,
+ TOO_MUCH_AI = 63,
+ SAME_PAGE = 64,
+ DEFINING = 65,
+ TUPLE_BLOCKED = 66,
+ ERROR_WAIT_STORED_PROCREQ = 67
+};
+
+// Records
+/* ************** ATTRIBUTE INFO BUFFER RECORD ****************** */
+/* THIS RECORD IS USED AS A BUFFER FOR INCOMING AND OUTGOING DATA */
+/* ************************************************************** */
+struct Attrbufrec {
+ Uint32 attrbuf[ZATTRBUF_SIZE];
+}; /* p2c: size = 128 bytes */
+
+typedef Ptr<Attrbufrec> AttrbufrecPtr;
+
+/* ********** CHECKPOINT INFORMATION ************ */
+/* THIS RECORD HOLDS INFORMATION NEEDED TO */
+/* PERFORM A CHECKPOINT. IT'S POSSIBLE TO RUN */
+/* MULTIPLE CHECKPOINTS AT A TIME. THIS RECORD */
+/* MAKES IT POSSIBLE TO DISTINGER BETWEEN THE */
+/* DIFFERENT CHECKPOINTS. */
+/* ********************************************** */
+struct CheckpointInfo {
+ Uint32 lcpNextRec; /* NEXT RECORD IN FREELIST */
+ Uint32 lcpCheckpointVersion; /* VERSION OF THE CHECKPOINT */
+ Uint32 lcpLocalLogInfoP; /* POINTER TO A LOCAL LOG INFO RECORD */
+ Uint32 lcpUserptr; /* USERPOINTER TO THE BLOCK REQUESTING THE CP */
+ Uint32 lcpFragmentP; /* FRAGMENT POINTER TO WHICH THE CHECKPOINT APPLIES */
+ Uint32 lcpFragmentId; /* FRAGMENT ID */
+ Uint32 lcpTabPtr; /* TABLE POINTER */
+ Uint32 lcpDataBufferSegmentP; /* POINTER TO A DISK BUFFER SEGMENT POINTER (DATA) */
+ Uint32 lcpDataFileHandle; /* FILE HANDLES FOR DATA FILE. LOG FILE HANDLE IN LOCAL_LOG_INFO_RECORD */
+ /* FILE HANDLE TO THE OPEN DATA FILE */
+ Uint32 lcpNoOfPages;
+ Uint32 lcpThFreeFirst;
+ Uint32 lcpThFreeCopyFirst;
+ Uint32 lcpEmptyPrimPage;
+ Uint32 lcpNoCopyPagesAlloc;
+ Uint32 lcpTmpOperPtr; /* TEMPORARY STORAGE OF OPER_PTR DURING SAVE */
+ BlockReference lcpBlockref; /* BLOCKREFERENCE TO THE BLOCK REQUESTING THE CP */
+};
+typedef Ptr<CheckpointInfo> CheckpointInfoPtr;
+
+/* *********** DISK BUFFER SEGMENT INFO ********* */
+/* THIS RECORD HOLDS INFORMATION NEEDED DURING */
+/* A WRITE OF THE DATA BUFFER TO DISK. WHEN THE */
+/* WRITE SIGNAL IS SENT A POINTER TO THIS RECORD */
+/* IS INCLUDED. WHEN THE WRITE IS COMPLETED AND */
+/* CONFIRMED THE PTR TO THIS RECORD IS RETURNED */
+/* AND THE BUFFER PAGES COULD EASILY BE LOCATED */
+/* AND DEALLOCATED. THE CHECKPOINT_INFO_VERSION */
+/* KEEPS TRACK OF THE CHECPOINT_INFO_RECORD THAT */
+/* INITIATED THE WRITE AND THE CP_PAGE_TO_DISK */
+/* ELEMENT COULD BE INCREASED BY THE NUMBER OF */
+/* PAGES WRITTEN. */
+/* ********************************************** */
+struct DiskBufferSegmentInfo {
+ Uint32 pdxDataPage[16]; /* ARRAY OF DATA BUFFER PAGES */
+ Uint32 pdxUndoBufferSet[2];
+ Uint32 pdxNextRec;
+ State pdxBuffertype;
+ State pdxOperation;
+ /*---------------------------------------------------------------------------*/
+ /* PDX_FLAGS BITS AND THEIR USAGE: */
+ /* BIT 0 1 COMMENT */
+ /*---------------------------------------------------------------------------*/
+ /* 0 SEGMENT INVALID SEGMENT VALID USED DURING READS */
+ /* 1-15 NOT USED */
+ /*---------------------------------------------------------------------------*/
+ Uint32 pdxCheckpointInfoP; /* USED DURING LOCAL CHKP */
+ Uint32 pdxRestartInfoP; /* USED DURING RESTART */
+ Uint32 pdxLocalLogInfoP; /* POINTS TO A LOCAL LOG INFO */
+ Uint32 pdxFilePage; /* START PAGE IN FILE */
+ Uint32 pdxNumDataPages; /* NUMBER OF DATA PAGES */
+};
+typedef Ptr<DiskBufferSegmentInfo> DiskBufferSegmentInfoPtr;
+
+struct Fragoperrec {
+ bool definingFragment;
+ Uint32 nextFragoprec;
+ Uint32 lqhPtrFrag;
+ Uint32 fragidFrag;
+ Uint32 tableidFrag;
+ Uint32 fragPointer;
+ Uint32 attributeCount;
+ Uint32 currNullBit;
+ Uint32 noOfNullBits;
+ Uint32 noOfNewAttrCount;
+ Uint32 charsetIndex;
+ BlockReference lqhBlockrefFrag;
+ bool inUse;
+};
+typedef Ptr<Fragoperrec> FragoperrecPtr;
+
+ // Position for use by scan
+ struct PagePos {
+ Uint32 m_fragId; // "base" fragment id
+ Uint32 m_fragBit; // two fragments in 5.0
+ Uint32 m_pageId;
+ Uint32 m_tupleNo;
+ bool m_match;
+ };
+
+ // Tup scan op (compare Dbtux::ScanOp)
+ struct ScanOp {
+ enum {
+ Undef = 0,
+ First = 1, // before first entry
+ Locked = 4, // at current entry (no lock needed)
+ Next = 5, // looking for next extry
+ Last = 6, // after last entry
+ Invalid = 9 // cannot return REF to LQH currently
+ };
+ Uint16 m_state;
+ Uint16 m_lockwait; // unused
+ Uint32 m_userPtr; // scanptr.i in LQH
+ Uint32 m_userRef;
+ Uint32 m_tableId;
+ Uint32 m_fragId; // "base" fragment id
+ Uint32 m_fragPtrI[2];
+ Uint32 m_transId1;
+ Uint32 m_transId2;
+ PagePos m_scanPos;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ };
+ typedef Ptr<ScanOp> ScanOpPtr;
+ ArrayPool<ScanOp> c_scanOpPool;
+
+ void scanFirst(Signal* signal, ScanOpPtr scanPtr);
+ void scanNext(Signal* signal, ScanOpPtr scanPtr);
+ void scanClose(Signal* signal, ScanOpPtr scanPtr);
+ void releaseScanOp(ScanOpPtr& scanPtr);
+
+struct Fragrecord {
+ Uint32 nextStartRange;
+ Uint32 currentPageRange;
+ Uint32 rootPageRange;
+ Uint32 noOfPages;
+ Uint32 emptyPrimPage;
+
+ Uint32 firstusedOprec;
+ Uint32 lastusedOprec;
+
+ Uint32 thFreeFirst;
+ Uint32 thFreeCopyFirst;
+ Uint32 noCopyPagesAlloc;
+
+ Uint32 checkpointVersion;
+ Uint32 minPageNotWrittenInCheckpoint;
+ Uint32 maxPageWrittenInCheckpoint;
+ State fragStatus;
+ Uint32 fragTableId;
+ Uint32 fragmentId;
+ Uint32 nextfreefrag;
+
+ DLList<ScanOp> m_scanList;
+ Fragrecord(ArrayPool<ScanOp> & scanOpPool) : m_scanList(scanOpPool) {}
+};
+typedef Ptr<Fragrecord> FragrecordPtr;
+
+ /* ************ LOCAL LOG FILE INFO ************* */
+ /* THIS RECORD HOLDS INFORMATION NEEDED DURING */
+ /* CHECKPOINT AND RESTART. THERE ARE FOUR */
+ /* PARALLELL UNDO LOG FILES, EACH ONE REPRESENTED */
+ /* BY AN ENTITY OF THIS RECORD. */
+ /* BECAUSE EACH FILE IS SHARED BETWEEN FOUR */
+ /* TABLES AND HAS ITS OWN PAGEPOINTERS AND */
+ /* WORDPOINTERS. */
+ /* ********************************************** */
+struct LocalLogInfo {
+ Uint32 lliActiveLcp; /* NUMBER OF ACTIVE LOCAL CHECKPOINTS ON THIS FILE */
+ Uint32 lliEndPageId; /* PAGE IDENTIFIER OF LAST PAGE WITH LOG DATA */
+ Uint32 lliPrevRecordId; /* PREVIOUS RECORD IN THIS LOGFILE */
+ Uint32 lliLogFilePage; /* PAGE IN LOGFILE */
+ Uint32 lliNumFragments; /* NO OF FRAGMENTS RESTARTING FROM THIS LOCAL LOG */
+ Uint32 lliUndoBufferSegmentP; /* POINTER TO A DISK BUFFER SEGMENT POINTER (UNDO) */
+ Uint32 lliUndoFileHandle; /* FILE HANDLE OF UNDO LOG FILE */
+ Uint32 lliUndoPage; /* UNDO PAGE IN BUFFER */
+ Uint32 lliUndoWord;
+ Uint32 lliUndoPagesToDiskWithoutSynch;
+};
+typedef Ptr<LocalLogInfo> LocalLogInfoPtr;
+
+struct Operationrec {
+// Easy to remove (2 words)
+ Uint32 attroutbufLen;
+ Uint32 logSize;
+
+// Needed (20 words)
+ State tupleState;
+ Uint32 prevActiveOp;
+ Uint32 nextActiveOp;
+ Uint32 nextOprecInList;
+ Uint32 prevOprecInList;
+ Uint32 tableRef;
+ Uint32 fragId;
+ Uint32 fragmentPtr;
+ Uint32 fragPageId;
+ Uint32 realPageId;
+ bool undoLogged;
+ Uint32 realPageIdC;
+ Uint32 fragPageIdC;
+ Uint32 firstAttrinbufrec;
+ Uint32 lastAttrinbufrec;
+ Uint32 attrinbufLen;
+ Uint32 currentAttrinbufLen;
+ Uint32 userpointer;
+ State transstate;
+ Uint32 savePointId;
+
+// Easy to remove (3 words)
+ Uint32 tcOperationPtr;
+ Uint32 transid1;
+ Uint32 transid2;
+
+// Needed (2 words)
+ Uint16 pageIndex;
+ Uint16 pageOffset;
+ Uint16 pageOffsetC;
+ Uint16 pageIndexC;
+// Hard to remove
+ Uint16 tupVersion;
+
+// Easy to remove (1.5 word)
+ BlockReference recBlockref;
+ BlockReference userblockref;
+ Uint16 storedProcedureId;
+
+ Uint8 inFragList;
+ Uint8 inActiveOpList;
+ Uint8 deleteInsertFlag;
+
+// Needed (1 word)
+ Uint8 dirtyOp;
+ Uint8 interpretedExec;
+ Uint8 optype;
+ Uint8 opSimple;
+
+// Used by triggers
+ Uint32 primaryReplica;
+ BlockReference coordinatorTC;
+ Uint32 tcOpIndex;
+ Uint32 gci;
+ Uint32 noFiredTriggers;
+ union {
+ Uint32 hashValue; // only used in TUP_COMMITREQ
+ Uint32 lastRow;
+ };
+ Bitmask<MAXNROFATTRIBUTESINWORDS> changeMask;
+};
+typedef Ptr<Operationrec> OperationrecPtr;
+
+struct Page {
+ Uint32 pageWord[ZWORDS_ON_PAGE];
+};
+typedef Ptr<Page> PagePtr;
+
+ /* ****************************** PAGE RANGE RECORD ************************** */
+ /* PAGE RANGES AND BASE PAGE ID. EACH RANGE HAS A CORRESPONDING BASE PAGE ID */
+ /* THAT IS USED TO CALCULATE REAL PAGE ID FROM A FRAGMENT PAGE ID AND A TABLE */
+ /* REFERENCE. */
+ /* THE PAGE RANGES ARE ORGANISED IN A B-TREE FASHION WHERE THE VARIABLE TYPE */
+ /* SPECIFIES IF A LEAF NODE HAS BEEN REACHED. IF A LEAF NODE HAS BEEN REACHED */
+ /* THEN BASE_PAGE_ID IS THE BASE_PAGE_ID OF THE SET OF PAGES THAT WAS */
+ /* ALLOCATED IN THAT RANGE. OTHERWISE BASE_PAGE_ID IS THE POINTER TO THE NEXT */
+ /* PAGE_RANGE RECORD. */
+ /* *************************************************************************** */
+struct PageRange {
+ Uint32 startRange[4]; /* START OF RANGE */
+ Uint32 endRange[4]; /* END OF THIS RANGE */
+ Uint32 basePageId[4]; /* BASE PAGE ID. */
+/*---- VARIABLE BASE_PAGE_ID2 (4) 8 DS NEEDED WHEN SUPPORTING 40 BIT PAGE ID -------*/
+ Uint8 type[4]; /* TYPE OF BASE PAGE ID */
+ Uint32 nextFree; /* NEXT FREE PAGE RANGE RECORD */
+ Uint32 parentPtr; /* THE PARENT TO THE PAGE RANGE REC IN THE B-TREE */
+ Uint8 currentIndexPos;
+};
+typedef Ptr<PageRange> PageRangePtr;
+
+ /* *********** PENDING UNDO WRITE INFO ********** */
+ /* THIS RECORD HOLDS INFORMATION NEEDED DURING */
+ /* A FILE OPEN OPERATION */
+ /* IF THE FILE OPEN IS A PART OF A CHECKPOINT THE */
+ /* CHECKPOINT_INFO_P WILL HOLD A POINTER TO THE */
+ /* CHECKPOINT_INFOR_PTR RECORD */
+ /* IF IT IS A PART OF RESTART THE PFO_RESTART_INFO*/
+ /* ELEMENT WILL POINT TO A RESTART INFO RECORD */
+ /* ********************************************** */
+struct PendingFileOpenInfo {
+ Uint32 pfoNextRec;
+ State pfoOpenType;
+ Uint32 pfoCheckpointInfoP;
+ Uint32 pfoRestartInfoP;
+};
+typedef Ptr<PendingFileOpenInfo> PendingFileOpenInfoPtr;
+
+struct RestartInfoRecord {
+ Uint32 sriNextRec;
+ State sriState; /* BLOCKREFERENCE TO THE REQUESTING BLOCK */
+ Uint32 sriUserptr; /* USERPOINTER TO THE REQUESTING BLOCK */
+ Uint32 sriDataBufferSegmentP; /* POINTER TO A DISK BUFFER SEGMENT POINTER (DATA) */
+ Uint32 sriDataFileHandle; /* FILE HANDLE TO THE OPEN DATA FILE */
+ Uint32 sriCheckpointVersion; /* CHECKPOINT VERSION TO RESTART FROM */
+ Uint32 sriFragid; /* FRAGMENT ID */
+ Uint32 sriFragP; /* FRAGMENT POINTER */
+ Uint32 sriTableId; /* TABLE ID */
+ Uint32 sriLocalLogInfoP; /* POINTER TO A LOCAL LOG INFO RECORD */
+ Uint32 sriNumDataPages; /* NUMBER OF DATA PAGES TO READ */
+ Uint32 sriCurDataPageFromBuffer; /* THE CHECKPOINT IS COMPLETED */
+ BlockReference sriBlockref;
+};
+typedef Ptr<RestartInfoRecord> RestartInfoRecordPtr;
+
+ /* ************* TRIGGER DATA ************* */
+ /* THIS RECORD FORMS LISTS OF ACTIVE */
+ /* TRIGGERS FOR EACH TABLE. */
+ /* THE RECORDS ARE MANAGED BY A TRIGGER */
+ /* POOL wHERE A TRIGGER RECORD IS SEIZED */
+ /* WHEN A TRIGGER IS ACTIVATED AND RELEASED */
+ /* WHEN THE TRIGGER IS DEACTIVATED. */
+ /* **************************************** */
+struct TupTriggerData {
+
+ /**
+ * Trigger id, used by DICT/TRIX to identify the trigger
+ */
+ Uint32 triggerId;
+
+ /**
+ * Index id is needed for ordered index.
+ */
+ Uint32 indexId;
+
+ /**
+ * Trigger type etc, defines what the trigger is used for
+ */
+ TriggerType::Value triggerType;
+ TriggerActionTime::Value triggerActionTime;
+ TriggerEvent::Value triggerEvent;
+ /**
+ * Receiver block
+ */
+ Uint32 m_receiverBlock;
+
+ /**
+ * Monitor all replicas, i.e. trigger will fire on all nodes where tuples
+ * are stored
+ */
+ bool monitorReplicas;
+
+ /**
+ * Monitor all attributes, the trigger monitors all changes to attributes
+ * in the table
+ */
+ bool monitorAllAttributes;
+
+ /**
+ * Send only changed attributes at trigger firing time.
+ */
+ bool sendOnlyChangedAttributes;
+
+ /**
+ * Send also before values at trigger firing time.
+ */
+ bool sendBeforeValues;
+
+ /**
+ * Attribute mask, defines what attributes are to be monitored
+ * Can be seen as a compact representation of SQL column name list
+ */
+ Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask;
+
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+
+ /**
+ * Prev pointer (used in list)
+ */
+ Uint32 prevList;
+
+ inline void print(NdbOut & s) const { s << "[TriggerData = " << triggerId << "]"; };
+};
+
+typedef Ptr<TupTriggerData> TriggerPtr;
+
+/**
+ * Pool of trigger data record
+ */
+ArrayPool<TupTriggerData> c_triggerPool;
+
+ /* ************ TABLE RECORD ************ */
+ /* THIS RECORD FORMS A LIST OF TABLE */
+ /* REFERENCE INFORMATION. ONE RECORD */
+ /* PER TABLE REFERENCE. */
+ /* ************************************** */
+struct Tablerec {
+ Tablerec(ArrayPool<TupTriggerData> & triggerPool) :
+ afterInsertTriggers(triggerPool),
+ afterDeleteTriggers(triggerPool),
+ afterUpdateTriggers(triggerPool),
+ subscriptionInsertTriggers(triggerPool),
+ subscriptionDeleteTriggers(triggerPool),
+ subscriptionUpdateTriggers(triggerPool),
+ constraintUpdateTriggers(triggerPool),
+ tuxCustomTriggers(triggerPool)
+ {}
+
+ Bitmask<MAXNROFATTRIBUTESINWORDS> notNullAttributeMask;
+
+ ReadFunction* readFunctionArray;
+ UpdateFunction* updateFunctionArray;
+ CHARSET_INFO** charsetArray;
+
+ Uint32 readKeyArray;
+ Uint32 tabDescriptor;
+ Uint32 attributeGroupDescriptor;
+
+ bool GCPIndicator;
+ bool checksumIndicator;
+
+ Uint16 tupheadsize;
+ Uint16 noOfAttr;
+ Uint16 noOfKeyAttr;
+ Uint16 noOfCharsets;
+ Uint16 noOfNewAttr;
+ Uint16 noOfNullAttr;
+ Uint16 noOfAttributeGroups;
+
+ Uint8 tupChecksumIndex;
+ Uint8 tupNullIndex;
+ Uint8 tupNullWords;
+ Uint8 tupGCPIndex;
+
+ // Lists of trigger data for active triggers
+ ArrayList<TupTriggerData> afterInsertTriggers;
+ ArrayList<TupTriggerData> afterDeleteTriggers;
+ ArrayList<TupTriggerData> afterUpdateTriggers;
+ ArrayList<TupTriggerData> subscriptionInsertTriggers;
+ ArrayList<TupTriggerData> subscriptionDeleteTriggers;
+ ArrayList<TupTriggerData> subscriptionUpdateTriggers;
+ ArrayList<TupTriggerData> constraintUpdateTriggers;
+
+ // List of ordered indexes
+ ArrayList<TupTriggerData> tuxCustomTriggers;
+
+ Uint32 fragid[2 * MAX_FRAG_PER_NODE];
+ Uint32 fragrec[2 * MAX_FRAG_PER_NODE];
+
+ struct {
+ Uint32 tabUserPtr;
+ Uint32 tabUserRef;
+ } m_dropTable;
+ State tableStatus;
+};
+
+typedef Ptr<Tablerec> TablerecPtr;
+
+struct storedProc {
+ Uint32 storedLinkFirst;
+ Uint32 storedLinkLast;
+ Uint32 storedCounter;
+ Uint32 nextPool;
+ Uint16 storedCode;
+ Uint16 storedProcLength;
+};
+
+typedef Ptr<storedProc> StoredProcPtr;
+
+ArrayPool<storedProc> c_storedProcPool;
+
+/* **************************** TABLE_DESCRIPTOR RECORD ******************************** */
+/* THIS VARIABLE IS USED TO STORE TABLE DESCRIPTIONS. A TABLE DESCRIPTION IS STORED AS A */
+/* CONTIGUOS ARRAY IN THIS VARIABLE. WHEN A NEW TABLE IS ADDED A CHUNK IS ALLOCATED IN */
+/* THIS RECORD. WHEN ATTRIBUTES ARE ADDED TO THE TABLE, A NEW CHUNK OF PROPER SIZE IS */
+/* ALLOCATED AND ALL DATA IS COPIED TO THIS NEW CHUNK AND THEN THE OLD CHUNK IS PUT IN */
+/* THE FREE LIST. EACH TABLE IS DESCRIBED BY A NUMBER OF TABLE DESCRIPTIVE ATTRIBUTES */
+/* AND A NUMBER OF ATTRIBUTE DESCRIPTORS AS SHOWN IN FIGURE BELOW */
+/* */
+/* WHEN ALLOCATING A TABLE DESCRIPTOR THE SIZE IS ALWAYS A MULTIPLE OF 16 WORDS. */
+/* */
+/* ---------------------------------------------- */
+/* | TRAILER USED FOR ALLOC/DEALLOC | */
+/* ---------------------------------------------- */
+/* | TABLE DESCRIPTIVE ATTRIBUTES | */
+/* ---------------------------------------------- */
+/* | ATTRIBUTE DESCRIPTION 1 | */
+/* ---------------------------------------------- */
+/* | ATTRIBUTE DESCRIPTION 2 | */
+/* ---------------------------------------------- */
+/* | | */
+/* | | */
+/* | | */
+/* ---------------------------------------------- */
+/* | ATTRIBUTE DESCRIPTION N | */
+/* ---------------------------------------------- */
+/* */
+/* THE TABLE DESCRIPTIVE ATTRIBUTES CONTAINS THE FOLLOWING ATTRIBUTES: */
+/* */
+/* ---------------------------------------------- */
+/* | HEADER (TYPE OF INFO) | */
+/* ---------------------------------------------- */
+/* | SIZE OF WHOLE CHUNK (INCL. TRAILER) | */
+/* ---------------------------------------------- */
+/* | TABLE IDENTITY | */
+/* ---------------------------------------------- */
+/* | FRAGMENT IDENTITY | */
+/* ---------------------------------------------- */
+/* | NUMBER OF ATTRIBUTES | */
+/* ---------------------------------------------- */
+/* | SIZE OF FIXED ATTRIBUTES | */
+/* ---------------------------------------------- */
+/* | NUMBER OF NULL FIELDS | */
+/* ---------------------------------------------- */
+/* | NOT USED | */
+/* ---------------------------------------------- */
+/* */
+/* THESE ATTRIBUTES ARE ALL ONE R-VARIABLE IN THE RECORD. */
+/* NORMALLY ONLY ONE TABLE DESCRIPTOR IS USED. DURING SCHEMA CHANGES THERE COULD */
+/* HOWEVER EXIST MORE THAN ONE TABLE DESCRIPTION SINCE THE SCHEMA CHANGE OF VARIOUS */
+/* FRAGMENTS ARE NOT SYNCHRONISED. THIS MEANS THAT ALTHOUGH THE SCHEMA HAS CHANGED */
+/* IN ALL FRAGMENTS, BUT THE FRAGMENTS HAVE NOT REMOVED THE ATTRIBUTES IN THE SAME */
+/* TIME-FRAME. THEREBY SOME ATTRIBUTE INFORMATION MIGHT DIFFER BETWEEN FRAGMENTS. */
+/* EXAMPLES OF ATTRIBUTES THAT MIGHT DIFFER ARE SIZE OF FIXED ATTRIBUTES, NUMBER OF */
+/* ATTRIBUTES, FIELD START WORD, START BIT. */
+/* */
+/* AN ATTRIBUTE DESCRIPTION CONTAINS THE FOLLOWING ATTRIBUTES: */
+/* */
+/* ---------------------------------------------- */
+/* | Field Type, 4 bits (LSB Bits) | */
+/* ---------------------------------------------- */
+/* | Attribute Size, 4 bits | */
+/* ---------------------------------------------- */
+/* | NULL indicator 1 bit | */
+/* ---------------------------------------------- */
+/* | Indicator if TUP stores attr. 1 bit | */
+/* ---------------------------------------------- */
+/* | Not used 6 bits | */
+/* ---------------------------------------------- */
+/* | No. of elements in fixed array 16 bits | */
+/* ---------------------------------------------- */
+/* ---------------------------------------------- */
+/* | Field Start Word, 21 bits (LSB Bits) | */
+/* ---------------------------------------------- */
+/* | NULL Bit, 11 bits | */
+/* ---------------------------------------------- */
+/* */
+/* THE ATTRIBUTE SIZE CAN BE 1,2,4,8,16,32,64 AND 128 BITS. */
+/* */
+/* THE UNUSED PARTS OF THE RECORDS ARE PUT IN A LINKED LIST OF FREE PARTS. EACH OF */
+/* THOSE FREE PARTS HAVE THREE RECORDS ASSIGNED AS SHOWN IN THIS STRUCTURE */
+/* ALL FREE PARTS ARE SET INTO A CHUNK LIST WHERE EACH CHUNK IS AT LEAST 16 WORDS */
+/* */
+/* ---------------------------------------------- */
+/* | HEADER = RNIL | */
+/* ---------------------------------------------- */
+/* | SIZE OF FREE AREA | */
+/* ---------------------------------------------- */
+/* | POINTER TO PREVIOUS FREE AREA | */
+/* ---------------------------------------------- */
+/* | POINTER TO NEXT FREE AREA | */
+/* ---------------------------------------------- */
+/* */
+/* IF THE POINTER TO THE NEXT AREA IS RNIL THEN THIS IS THE LAST FREE AREA. */
+/* */
+/*****************************************************************************************/
+struct TableDescriptor {
+ Uint32 tabDescr;
+};
+typedef Ptr<TableDescriptor> TableDescriptorPtr;
+
+struct HostBuffer {
+ bool inPackedList;
+ Uint32 packetLenTA;
+ Uint32 noOfPacketsTA;
+ Uint32 packetBufferTA[30];
+};
+typedef Ptr<HostBuffer> HostBufferPtr;
+
+ /* **************** UNDO PAGE RECORD ******************* */
+ /* THIS RECORD FORMS AN UNDO PAGE CONTAINING A NUMBER OF */
+ /* DATA WORDS. CURRENTLY THERE ARE 2048 WORDS ON A PAGE */
+ /* EACH OF 32 BITS (4 BYTES) WHICH FORMS AN UNDO PAGE */
+ /* WITH A TOTAL OF 8192 BYTES */
+ /* ***************************************************** */
+struct UndoPage {
+ Uint32 undoPageWord[ZWORDS_ON_PAGE]; /* 32 KB */
+};
+typedef Ptr<UndoPage> UndoPagePtr;
+
+ /*
+ * Build index operation record.
+ */
+ struct BuildIndexRec {
+ // request cannot use signal class due to extra members
+ Uint32 m_request[BuildIndxReq::SignalLength];
+ Uint32 m_triggerPtrI; // the index trigger
+ Uint32 m_fragNo; // fragment number under Tablerec
+ Uint32 m_pageId; // logical fragment page id
+ Uint32 m_tupleNo; // tuple number on page (pageIndex >> 1)
+ BuildIndxRef::ErrorCode m_errorCode;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ };
+ typedef Ptr<BuildIndexRec> BuildIndexPtr;
+ ArrayPool<BuildIndexRec> c_buildIndexPool;
+ ArrayList<BuildIndexRec> c_buildIndexList;
+ Uint32 c_noOfBuildIndexRec;
+
+public:
+ Dbtup(const class Configuration &);
+ virtual ~Dbtup();
+
+ /*
+ * TUX uses logical tuple address when talking to ACC and LQH.
+ */
+ void tuxGetTupAddr(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32& tupAddr);
+
+ /*
+ * TUX index in TUP has single Uint32 array attribute which stores an
+ * index node. TUX reads and writes the node directly via pointer.
+ */
+ int tuxAllocNode(Signal* signal, Uint32 fragPtrI, Uint32& pageId, Uint32& pageOffset, Uint32*& node);
+ void tuxFreeNode(Signal* signal, Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* node);
+ void tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& node);
+
+ /*
+ * TUX reads primary table attributes for index keys. Tuple is
+ * specified by location of original tuple and version number. Input
+ * is attribute ids in AttributeHeader format. Output is attribute
+ * data with headers. Uses readAttributes with xfrm option set.
+ * Returns number of words or negative (-terrorCode) on error.
+ */
+ int tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut);
+
+ /*
+ * TUX reads primary key without headers into an array of words. Used
+ * for md5 summing and when returning keyinfo. Returns number of
+ * words or negative (-terrorCode) on error.
+ */
+ int tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut, bool xfrmFlag);
+
+ /*
+ * ACC reads primary key without headers into an array of words. At
+ * this point in ACC deconstruction, ACC still uses logical references
+ * to fragment and tuple.
+ */
+ int accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag);
+
+ /*
+ * TUX checks if tuple is visible to scan.
+ */
+ bool tuxQueryTh(Uint32 fragPtrI, Uint32 tupAddr, Uint32 tupVersion, Uint32 transId1, Uint32 transId2, Uint32 savePointId);
+
+private:
+ BLOCK_DEFINES(Dbtup);
+
+ // Transit signals
+ void execDEBUG_SIG(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+
+ // Received signals
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execSEND_PACKED(Signal* signal);
+ void execSTTOR(Signal* signal);
+ void execTUP_LCPREQ(Signal* signal);
+ void execEND_LCPREQ(Signal* signal);
+ void execSTART_RECREQ(Signal* signal);
+ void execMEMCHECKREQ(Signal* signal);
+ void execTUPSEIZEREQ(Signal* signal);
+ void execTUPRELEASEREQ(Signal* signal);
+ void execSTORED_PROCREQ(Signal* signal);
+ void execTUPFRAGREQ(Signal* signal);
+ void execTUP_ADD_ATTRREQ(Signal* signal);
+ void execTUP_COMMITREQ(Signal* signal);
+ void execTUP_ABORTREQ(Signal* signal);
+ void execTUP_SRREQ(Signal* signal);
+ void execTUP_PREPLCPREQ(Signal* signal);
+ void execFSOPENCONF(Signal* signal);
+ void execFSOPENREF(Signal* signal);
+ void execFSCLOSECONF(Signal* signal);
+ void execFSCLOSEREF(Signal* signal);
+ void execFSWRITECONF(Signal* signal);
+ void execFSWRITEREF(Signal* signal);
+ void execFSREADCONF(Signal* signal);
+ void execFSREADREF(Signal* signal);
+ void execNDB_STTOR(Signal* signal);
+ void execREAD_CONFIG_REQ(Signal* signal);
+ void execSET_VAR_REQ(Signal* signal);
+ void execDROP_TAB_REQ(Signal* signal);
+ void execALTER_TAB_REQ(Signal* signal);
+ void execFSREMOVECONF(Signal* signal);
+ void execFSREMOVEREF(Signal* signal);
+ void execTUP_ALLOCREQ(Signal* signal);
+ void execTUP_DEALLOCREQ(Signal* signal);
+ void execTUP_WRITELOG_REQ(Signal* signal);
+
+ // Ordered index related
+ void execBUILDINDXREQ(Signal* signal);
+ void buildIndex(Signal* signal, Uint32 buildPtrI);
+ void buildIndexReply(Signal* signal, const BuildIndexRec* buildRec);
+
+ // Tup scan
+ void execACC_SCANREQ(Signal* signal);
+ void execNEXT_SCANREQ(Signal* signal);
+ void execACC_CHECK_SCAN(Signal* signal);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+// Methods to handle execution of TUPKEYREQ + ATTRINFO.
+//
+// Module Execution Manager
+//
+// The TUPKEYREQ signal is central to this block. This signal is used
+// by everybody that needs to read data residing in DBTUP. The data is
+// read using an interpreter approach.
+//
+// Operations only needing to read execute a simplified version of the
+// interpreter where the only instruction is read Attribute to send.
+// Operations only needing to update the record (insert or update)
+// execute a simplified version of the interpreter where the only
+// instruction is write Attribute.
+//
+// Currently TUPKEYREQ is used in the following situations.
+// 1) Normal transaction execution. Can be any of the types described
+// below.
+// 2) Execution of fragment redo log during system restart.
+// In this situation there will only be normal updates, inserts
+// and deletes performed.
+// 3) A special type of normal transaction execution is to write the
+// records arriving from the primary replica in the node restart
+// processing. This will always be normal write operations which
+// are translated to inserts or updates before arriving to TUP.
+// 4) Scan processing. The scan processing will use normal reads or
+// interpreted reads in their execution. There will be one TUPKEYREQ
+// signal for each record processed.
+// 5) Copy fragment processing. This is a special type of scan used in the
+// primary replica at system restart. It reads the entire reads and
+// converts those to writes to the starting node. In this special case
+// LQH acts as an API node and receives also the ATTRINFO sent in the
+// TRANSID_AI signals.
+//
+// Signal Diagram:
+//
+// In Signals:
+// -----------
+//
+// Logically there is one request TUPKEYREQ which requests to read/write data
+// of one tuple in the database. Since the definition of what to read and write
+// can be bigger than the maximum signal size we segment the signal. The definition
+// of what to read/write/interpreted program is sent before the TUPKEYREQ signal.
+//
+// ---> ATTRINFO
+// ...
+// ---> ATTRINFO
+// ---> TUPKEYREQ
+// The number of ATTRINFO signals can be anything between 0 and upwards.
+// The total size of the ATTRINFO is not allowed to be more than 16384 words.
+// There is always one and only one TUPKEYREQ.
+//
+// Response Signals (successful case):
+//
+// Simple/Dirty Read Operation
+// ---------------------------
+//
+// <---- TRANSID_AI (to API)
+// ...
+// <---- TRANSID_AI (to API)
+// <---- READCONF (to API)
+// <---- TUPKEYCONF (to LQH)
+// There is always exactly one READCONF25 sent last. The number of
+// TRANSID_AI is dependent on how much that was read. The maximum size
+// of the ATTRINFO sent back is 16384 words. The signals are sent
+// directly to the application with an address provided by the
+// TUPKEYREQ signal.
+// A positive response signal is also sent to LQH.
+//
+// Normal Read Operation
+// ---------------------
+//
+// <---- TRANSID_AI (to API)
+// ...
+// <---- TRANSID_AI (to API)
+// <---- TUPKEYCONF (to LQH)
+// The number of TRANSID_AI is dependent on how much that was read.
+// The maximum size of the ATTRINFO sent back is 16384 words. The
+// signals are sent directly to the application with an address
+// provided by the TUPKEYREQ signal.
+// A positive response signal is also sent to LQH.
+//
+// Normal update/insert/delete operation
+// -------------------------------------
+//
+// <---- TUPKEYCONF
+// After successful updating of the tuple LQH is informed of this.
+//
+// Delete with read
+// ----------------
+//
+// Will behave as a normal read although it also prepares the
+// deletion of the tuple.
+//
+// Interpreted Update
+// ------------------
+//
+// <---- TRANSID_AI (to API)
+// ...
+// <---- TRANSID_AI (to API)
+// <---- TUP_ATTRINFO (to LQH)
+// ...
+// <---- TUP_ATTRINFO (to LQH)
+// <---- TUPKEYCONF (to LQH)
+//
+// The interpreted Update contains five sections:
+// The first section performs read Attribute operations
+// that send results back to the API.
+//
+// The second section executes the interpreted program
+// where data from attributes can be updated and it
+// can also read attribute values into the registers.
+//
+// The third section performs unconditional updates of
+// attributes.
+//
+// The fourth section can read the attributes to be sent to the
+// API after updating the record.
+//
+// The fifth section contains subroutines used by the interpreter
+// in the second section.
+//
+// All types of interpreted programs contains the same five sections.
+// The only difference is that only interpreted updates can update
+// attributes. Interpreted inserts are not allowed.
+//
+// Interpreted Updates have to send back the information about the
+// attributes they have updated. This information will be shipped to
+// the log and also to any other replicas. Thus interpreted updates
+// are only performed in the primary replica. The fragment redo log
+// in LQH will contain information so that normal update/inserts/deletes
+// can be performed using TUPKEYREQ.
+//
+// Interpreted Read
+// ----------------
+//
+// From a signalling point of view the Interpreted Read behaves as
+// as a Normal Read. The interpreted Read is often used by Scan's.
+//
+// Interpreted Delete
+// ------------------
+//
+// <---- TUPKEYCONF
+// After successful prepartion to delete the tuple LQH is informed
+// of this.
+//
+// Interpreted Delete with Read
+// ----------------------------
+//
+// From a signalling point of view an interpreted delete with read
+// behaves as a normal read.
+//
+// Continuation after successful case:
+//
+// After a read of any kind the operation record is ready to be used
+// again by a new operation.
+//
+// Any updates, inserts or deletes waits for either of two messages.
+// A commit specifying that the operation is to be performed for real
+// or an abort specifying that the operation is to be rolled back and
+// the record to be restored in its original format.
+//
+// This is handled by the module Transaction Manager.
+//
+// Response Signals (unsuccessful case):
+//
+// <---- TUPKEYREF (to LQH)
+// A signal is sent back to LQH informing about the unsuccessful
+// operation. In this case TUP waits for an abort signal to arrive
+// before the operation record is ready for the next operation.
+// This is handled by the Transaction Manager.
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+
+// *****************************************************************
+// Signal Reception methods.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void execTUPKEYREQ(Signal* signal);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void execATTRINFO(Signal* signal);
+
+// Trigger signals
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void execCREATE_TRIG_REQ(Signal* signal);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void execDROP_TRIG_REQ(Signal* signal);
+
+// *****************************************************************
+// Support methods for ATTRINFO.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void handleATTRINFOforTUPKEYREQ(Signal* signal,
+ Uint32 length,
+ Operationrec * const regOperPtr);
+
+// *****************************************************************
+// Setting up the environment for reads, inserts, updates and deletes.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int handleReadReq(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr,
+ Page* pagePtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int handleUpdateReq(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Page* const pagePtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int handleInsertReq(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Page* const pagePtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int handleDeleteReq(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Page* const pagePtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int updateStartLab(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr,
+ Page* const pagePtr);
+
+// *****************************************************************
+// Interpreter Handling methods.
+// *****************************************************************
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int interpreterStartLab(Signal* signal,
+ Page* const pagePtr,
+ Uint32 TupHeadOffset);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int interpreterNextLab(Signal* signal,
+ Page* const pagePtr,
+ Uint32 TupHeadOffset,
+ Uint32* logMemory,
+ Uint32* mainProgram,
+ Uint32 TmainProgLen,
+ Uint32* subroutineProg,
+ Uint32 TsubroutineLen,
+ Uint32 * tmpArea,
+ Uint32 tmpAreaSz);
+
+// *****************************************************************
+// Signal Sending methods.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void sendReadAttrinfo(Signal* signal,
+ Uint32 TnoOfData,
+ const Operationrec * const regOperPtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void sendLogAttrinfo(Signal* signal,
+ Uint32 TlogSize,
+ Operationrec * const regOperPtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void sendTUPKEYCONF(Signal* signal, Operationrec *
+ const regOperPtr,
+ Uint32 TlogSize);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+// *****************************************************************
+// The methods that perform the actual read and update of attributes
+// in the tuple.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int readAttributes(Page* const pagePtr,
+ Uint32 TupHeadOffset,
+ const Uint32* inBuffer,
+ Uint32 inBufLen,
+ Uint32* outBuffer,
+ Uint32 TmaxRead,
+ bool xfrmFlag);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int readAttributesWithoutHeader(Page* const pagePtr,
+ Uint32 TupHeadOffset,
+ Uint32* inBuffer,
+ Uint32 inBufLen,
+ Uint32* outBuffer,
+ Uint32* attrBuffer,
+ Uint32 TmaxRead);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int updateAttributes(Page* const pagePtr,
+ Uint32 TupHeadOffset,
+ Uint32* inBuffer,
+ Uint32 inBufLen);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHOneWordNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateFixedSizeTHOneWordNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHTwoWordNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateFixedSizeTHTwoWordNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHManyWordNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHOneWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateFixedSizeTHOneWordNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHTwoWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateFixedSizeTHTwoWordNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHManyWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHZeroWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateFixedSizeTHManyWordNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readVariableSizedAttr(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateVariableSizedAttr(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readVarSizeUnlimitedNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateVarSizeUnlimitedNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readVarSizeUnlimitedNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateVarSizeUnlimitedNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readBigVarSizeNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateBigVarSizeNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readBigVarSizeNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateBigVarSizeNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readSmallVarSizeNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateSmallVarSizeNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readSmallVarSizeNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateSmallVarSizeNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readDynFixedSize(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateDynFixedSize(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readDynVarSizeUnlimited(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateDynVarSizeUnlimited(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readDynBigVarSize(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateDynBigVarSize(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readDynSmallVarSize(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateDynSmallVarSize(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+
+ bool readBitsNULLable(Uint32* outBuffer, AttributeHeader*, Uint32, Uint32);
+ bool updateBitsNULLable(Uint32* inBuffer, Uint32, Uint32);
+ bool readBitsNotNULL(Uint32* outBuffer, AttributeHeader*, Uint32, Uint32);
+ bool updateBitsNotNULL(Uint32* inBuffer, Uint32, Uint32);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool nullFlagCheck(Uint32 attrDes2);
+ Uint32 read_pseudo(Uint32 attrId, Uint32* outBuffer);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void setUpQueryRoutines(Tablerec* const regTabPtr);
+
+// *****************************************************************
+// Service methods.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void copyAttrinfo(Signal* signal, Operationrec * const regOperPtr, Uint32* inBuffer);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void initOpConnection(Operationrec* regOperPtr, Fragrecord*);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void initOperationrec(Signal* signal);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int initStoredOperationrec(Operationrec* const regOperPtr,
+ Uint32 storedId);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void insertActiveOpList(Signal* signal,
+ OperationrecPtr regOperPtr,
+ Page * const pagePtr,
+ Uint32 pageOffset);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void linkOpIntoFragList(OperationrecPtr regOperPtr,
+ Fragrecord* const regFragPtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void bufferTRANSID_AI(Signal* signal, BlockReference aRef, Uint32 Tlen);
+
+//------------------------------------------------------------------
+// Trigger handling routines
+//------------------------------------------------------------------
+ ArrayList<TupTriggerData>* findTriggerList(Tablerec* table,
+ TriggerType::Value ttype,
+ TriggerActionTime::Value ttime,
+ TriggerEvent::Value tevent);
+
+ bool createTrigger(Tablerec* table, const CreateTrigReq* req);
+
+ Uint32 dropTrigger(Tablerec* table, const DropTrigReq* req);
+
+ void checkImmediateTriggersAfterInsert(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const tablePtr);
+
+ void checkImmediateTriggersAfterUpdate(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const tablePtr);
+
+ void checkImmediateTriggersAfterDelete(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const tablePtr);
+
+#if 0
+ void checkDeferredTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr);
+#endif
+ void checkDetachedTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr);
+
+ void fireImmediateTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr);
+
+ void fireDeferredTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr);
+
+ void fireDetachedTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr);
+
+ void executeTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr);
+
+ void executeTrigger(Signal* signal,
+ TupTriggerData* const trigPtr,
+ Operationrec* const regOperPtr);
+
+ bool readTriggerInfo(TupTriggerData* const trigPtr,
+ Operationrec* const regOperPtr,
+ Uint32* const keyBuffer,
+ Uint32& noPrimKey,
+ Uint32* const mainBuffer,
+ Uint32& noMainWords,
+ Uint32* const copyBuffer,
+ Uint32& noCopyWords);
+
+ void sendTrigAttrInfo(Signal* signal,
+ Uint32* data,
+ Uint32 dataLen,
+ bool executeDirect,
+ BlockReference receiverReference);
+
+ Uint32 setAttrIds(Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask,
+ Uint32 noOfAttributes,
+ Uint32* inBuffer);
+
+ void sendFireTrigOrd(Signal* signal,
+ Operationrec * const regOperPtr,
+ TupTriggerData* const trigPtr,
+ Uint32 noPrimKeySignals,
+ Uint32 noBeforeSignals,
+ Uint32 noAfterSignals);
+
+ bool primaryKey(Tablerec* const, Uint32);
+
+ // these set terrorCode and return non-zero on error
+
+ int executeTuxInsertTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr);
+
+ int executeTuxUpdateTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr);
+
+ int executeTuxDeleteTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr);
+
+ int addTuxEntries(Signal* signal,
+ Operationrec* regOperPtr,
+ Tablerec* regTabPtr);
+
+ // these crash the node on error
+
+ void executeTuxCommitTriggers(Signal* signal,
+ Operationrec* regOperPtr,
+ Tablerec* const regTabPtr);
+
+ void executeTuxAbortTriggers(Signal* signal,
+ Operationrec* regOperPtr,
+ Tablerec* const regTabPtr);
+
+ void removeTuxEntries(Signal* signal,
+ Operationrec* regOperPtr,
+ Tablerec* regTabPtr);
+
+// *****************************************************************
+// Error Handling routines.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int TUPKEY_abort(Signal* signal, int error_type);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void tupkeyErrorLab(Signal* signal);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+// Methods to handle execution of TUP_COMMITREQ + TUP_ABORTREQ.
+//
+// Module Transaction Manager
+//
+// The Transaction Manager module is responsible for the commit
+// and abort of operations started by the Execution Manager.
+//
+// Commit Operation:
+// ----------------
+//
+// Failures in commit processing is not allowed since that would
+// leave the database in an unreliable state. Thus the only way
+// to handle failures in commit processing is to crash the node.
+//
+// TUP_COMMITREQ can only be received in the wait state after a
+// successful TUPKEYREQ which was not a read operation.
+//
+// Commit of Delete:
+// -----------------
+//
+// This will actually perform the deletion of the record unless
+// other operations also are connected to the record. In this case
+// we will set the delete state on the record that becomes the owner
+// of the record.
+//
+// Commit of Update:
+// ----------------
+//
+// We will release the copy record where the original record was kept.
+// Also here we will take special care if more operations are updating
+// the record simultaneously.
+//
+// Commit of Insert:
+// -----------------
+//
+// Will simply reset the state of the operation record.
+//
+// Signal Diagram:
+// ---> TUP_COMMITREQ (from LQH)
+// <---- TUP_COMMITCONF (to LQH)
+//
+//
+// Abort Operation:
+// ----------------
+//
+// Signal Diagram:
+// ---> TUP_ABORTREQ (from LQH)
+// <---- TUP_ABORTCONF (to LQH)
+//
+// Failures in abort processing is not allowed since that would
+// leave the database in an unreliable state. Thus the only way
+// to handle failures in abort processing is to crash the node.
+//
+// Abort messages can arrive at any time. It can arrive even before
+// anything at all have arrived of the operation. It can arrive after
+// receiving a number of ATTRINFO but before TUPKEYREQ has been received.
+// It must arrive after that we sent TUPKEYREF in response to TUPKEYREQ
+// and finally it can arrive after successfully performing the TUPKEYREQ
+// in all cases including the read case.
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+
+#if 0
+ void checkPages(Fragrecord* const regFragPtr);
+#endif
+ void printoutTuplePage(Uint32 fragid, Uint32 pageid, Uint32 printLimit);
+
+ bool checkUpdateOfPrimaryKey(Uint32* updateBuffer, Tablerec* const regTabPtr);
+
+ void setNullBits(Page* const regPage, Tablerec* const regTabPtr, Uint32 pageOffset);
+ bool checkNullAttributes(Operationrec* const, Tablerec* const);
+ bool getPage(PagePtr& pagePtr,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr);
+
+ bool getPageLastCommitted(Operationrec* const regOperPtr,
+ Operationrec* const leaderOpPtr);
+
+ bool getPageThroughSavePoint(Operationrec* const regOperPtr,
+ Operationrec* const leaderOpPtr);
+
+ Uint32 calculateChecksum(Page* const pagePtr, Uint32 tupHeadOffset, Uint32 tupHeadSize);
+ void setChecksum(Page* const pagePtr, Uint32 tupHeadOffset, Uint32 tupHeadSize);
+
+ void commitSimple(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr);
+
+ void commitRecord(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr);
+
+ void setTupleStatesSetOpType(Operationrec* const regOperPtr,
+ Page* const pagePtr,
+ Uint32& opType,
+ OperationrecPtr& firstOpPtr);
+
+ void findBeforeValueOperation(OperationrecPtr& befOpPtr,
+ OperationrecPtr firstOpPtr);
+
+ void calculateChangeMask(Page* const PagePtr,
+ Tablerec* const regTabPtr,
+ Uint32 pageOffset,
+ Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask);
+
+ void updateGcpId(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr);
+
+ void abortUpdate(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr);
+ void commitUpdate(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr);
+
+ void setTupleStateOnPreviousOps(Uint32 prevOpIndex);
+ void copyMem(Signal* signal, Uint32 sourceIndex, Uint32 destIndex);
+
+ void freeAllAttrBuffers(Operationrec* const regOperPtr);
+ void freeAttrinbufrec(Uint32 anAttrBufRec);
+ void removeActiveOpList(Operationrec* const regOperPtr);
+
+ void updatePackedList(Signal* signal, Uint16 ahostIndex);
+
+ void setUpDescriptorReferences(Uint32 descriptorReference,
+ Tablerec* const regTabPtr,
+ const Uint32* offset);
+ void setUpKeyArray(Tablerec* const regTabPtr);
+ bool addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex);
+ void deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId);
+ void abortAddFragOp(Signal* signal);
+ void releaseTabDescr(Tablerec* const regTabPtr);
+ void getFragmentrec(FragrecordPtr& regFragPtr, Uint32 fragId, Tablerec* const regTabPtr);
+
+ void initialiseRecordsLab(Signal* signal, Uint32 switchData, Uint32, Uint32);
+ void initializeAttrbufrec();
+ void initializeCheckpointInfoRec();
+ void initializeDiskBufferSegmentRecord();
+ void initializeFragoperrec();
+ void initializeFragrecord();
+ void initializeHostBuffer();
+ void initializeLocalLogInfo();
+ void initializeOperationrec();
+ void initializePendingFileOpenInfoRecord();
+ void initializeRestartInfoRec();
+ void initializeTablerec();
+ void initializeTabDescr();
+ void initializeUndoPage();
+
+ void initTab(Tablerec* const regTabPtr);
+
+ void startphase3Lab(Signal* signal, Uint32 config1, Uint32 config2);
+
+ void fragrefuseLab(Signal* signal, FragoperrecPtr fragOperPtr);
+ void fragrefuse1Lab(Signal* signal, FragoperrecPtr fragOperPtr);
+ void fragrefuse2Lab(Signal* signal, FragoperrecPtr fragOperPtr, FragrecordPtr regFragPtr);
+ void fragrefuse3Lab(Signal* signal,
+ FragoperrecPtr fragOperPtr,
+ FragrecordPtr regFragPtr,
+ Tablerec* const regTabPtr,
+ Uint32 fragId);
+ void fragrefuse4Lab(Signal* signal,
+ FragoperrecPtr fragOperPtr,
+ FragrecordPtr regFragPtr,
+ Tablerec* const regTabPtr,
+ Uint32 fragId);
+ void addattrrefuseLab(Signal* signal,
+ FragrecordPtr regFragPtr,
+ FragoperrecPtr fragOperPtr,
+ Tablerec* const regTabPtr,
+ Uint32 fragId);
+
+
+ void checkLcpActiveBufferPage(Uint32 minPageNotWrittenInCheckpoint, DiskBufferSegmentInfoPtr dbsiPtr);
+ void lcpWriteListDataPageSegment(Signal* signal,
+ DiskBufferSegmentInfoPtr dbsiPtr,
+ CheckpointInfoPtr ciPtr,
+ bool flushFlag);
+ void lcpFlushLogLab(Signal* signal, CheckpointInfoPtr ciPtr);
+ void lcpClosedDataFileLab(Signal* signal, CheckpointInfoPtr ciPtr);
+ void lcpEndconfLab(Signal* signal);
+ void lcpSaveDataPageLab(Signal* signal, Uint32 ciIndex);
+ void lcpCompletedLab(Signal* signal, Uint32 ciIndex);
+ void lcpFlushRestartInfoLab(Signal* signal, Uint32 ciIndex);
+ void lcpSaveCopyListLab(Signal* signal, CheckpointInfoPtr ciPtr);
+
+ void sendFSREMOVEREQ(Signal* signal, TablerecPtr tabPtr);
+ void releaseFragment(Signal* signal, Uint32 tableId);
+
+ void allocDataBufferSegment(Signal* signal, DiskBufferSegmentInfoPtr& dbsiPtr);
+ void allocRestartUndoBufferSegment(Signal* signal, DiskBufferSegmentInfoPtr& dbsiPtr, LocalLogInfoPtr lliPtr);
+ void freeDiskBufferSegmentRecord(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr);
+ void freeUndoBufferPages(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr);
+
+ void releaseCheckpointInfoRecord(CheckpointInfoPtr ciPtr);
+ void releaseDiskBufferSegmentRecord(DiskBufferSegmentInfoPtr dbsiPtr);
+ void releaseFragoperrec(FragoperrecPtr fragOperPtr);
+ void releaseFragrec(FragrecordPtr regFragPtr);
+ void releasePendingFileOpenInfoRecord(PendingFileOpenInfoPtr pfoPtr);
+ void releaseRestartInfoRecord(RestartInfoRecordPtr riPtr);
+
+ void seizeDiskBufferSegmentRecord(DiskBufferSegmentInfoPtr& dbsiPtr);
+ void seizeCheckpointInfoRecord(CheckpointInfoPtr& ciPtr);
+ void seizeFragoperrec(FragoperrecPtr& fragOperPtr);
+ void seizeFragrecord(FragrecordPtr& regFragPtr);
+ void seizeOpRec(OperationrecPtr& regOperPtr);
+ void seizePendingFileOpenInfoRecord(PendingFileOpenInfoPtr& pfoiPtr);
+ void seizeRestartInfoRecord(RestartInfoRecordPtr& riPtr);
+
+ // Initialisation
+ void initData();
+ void initRecords();
+
+ void rfrClosedDataFileLab(Signal* signal, Uint32 restartIndex);
+ void rfrCompletedLab(Signal* signal, RestartInfoRecordPtr riPtr);
+ void rfrInitRestartInfoLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr);
+ void rfrLoadDataPagesLab(Signal* signal, RestartInfoRecordPtr riPtr, DiskBufferSegmentInfoPtr dbsiPtr);
+ void rfrReadFirstUndoSegment(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr);
+ void rfrReadNextDataSegment(Signal* signal, RestartInfoRecordPtr riPtr, DiskBufferSegmentInfoPtr dbsiPtr);
+ void rfrReadNextUndoSegment(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr);
+ void rfrReadRestartInfoLab(Signal* signal, RestartInfoRecordPtr riPtr);
+ void rfrReadSecondUndoLogLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr);
+
+ void startExecUndoLogLab(Signal* signal, Uint32 lliIndex);
+ void readExecUndoLogLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr);
+ void closeExecUndoLogLab(Signal* signal, LocalLogInfoPtr lliPtr);
+ void endExecUndoLogLab(Signal* signal, Uint32 lliIndex);
+
+ struct XlcStruct {
+ Uint32 PageId;
+ Uint32 PageIndex;
+ Uint32 LogRecordType;
+ Uint32 FragId;
+ FragrecordPtr FragPtr;
+ LocalLogInfoPtr LliPtr;
+ DiskBufferSegmentInfoPtr DbsiPtr;
+ UndoPagePtr UPPtr;
+ TablerecPtr TabPtr;
+ };
+
+ void xlcGetNextRecordLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr);
+ void xlcRestartCompletedLab(Signal* signal);
+
+ void xlcCopyData(XlcStruct& xlcStruct, Uint32 pageOffset, Uint32 noOfWords, PagePtr pagePtr);
+ void xlcGetLogHeader(XlcStruct& xlcStruct);
+ Uint32 xlcGetLogWord(XlcStruct& xlcStruct);
+
+ void xlcAbortInsert(Signal* signal, XlcStruct& xlcStruct);
+ void xlcAbortUpdate(Signal* signal, XlcStruct& xlcStruct);
+ void xlcDeleteTh(XlcStruct& xlcStruct);
+ void xlcIndicateNoOpActive(XlcStruct& xlcStruct);
+ void xlcInsertTh(XlcStruct& xlcStruct);
+ void xlcTableDescriptor(XlcStruct& xlcStruct);
+ void xlcUndoLogPageHeader(XlcStruct& xlcStruct);
+ void xlcUpdateTh(XlcStruct& xlcStruct);
+ void xlcUpdateGCI(XlcStruct& xlcStruct);
+
+
+ void cprAddData(Signal* signal,
+ Fragrecord* const regFragPtr,
+ Uint32 pageIndex,
+ Uint32 noOfWords,
+ Uint32 startOffset);
+ void cprAddGCIUpdate(Signal* signal,
+ Uint32 prevGCI,
+ Fragrecord* const regFragPtr);
+ void cprAddLogHeader(Signal* signal,
+ LocalLogInfo* const lliPtr,
+ Uint32 recordType,
+ Uint32 tableId,
+ Uint32 fragId);
+ void cprAddUndoLogPageHeader(Signal* signal,
+ Page* const regPagePtr,
+ Fragrecord* const regFragPtr);
+ void cprAddUndoLogRecord(Signal* signal,
+ Uint32 recordType,
+ Uint32 pageId,
+ Uint32 pageIndex,
+ Uint32 tableId,
+ Uint32 fragId,
+ Uint32 localLogIndex);
+ void cprAddAbortUpdate(Signal* signal,
+ LocalLogInfo* const lliPtr,
+ Operationrec* const regOperPtr);
+ void cprAddUndoLogWord(Signal* signal,
+ LocalLogInfo* const lliPtr,
+ Uint32 undoWord);
+ bool isUndoLoggingNeeded(Fragrecord* const regFragPtr, Uint32 pageId);
+ bool isUndoLoggingActive(Fragrecord* const regFragPtr);
+ bool isUndoLoggingBlocked(Fragrecord* const regFragPtr);
+ bool isPageUndoLogged(Fragrecord* const regFragPtr, Uint32 pageId);
+
+ void seizeUndoBufferSegment(Signal* signal, UndoPagePtr& regUndoPagePtr);
+ void lcpWriteUndoSegment(Signal* signal, LocalLogInfo* const lliPtr, bool flushFlag);
+
+
+ void deleteScanProcedure(Signal* signal, Operationrec* regOperPtr);
+ void copyProcedure(Signal* signal,
+ TablerecPtr regTabPtr,
+ Operationrec* regOperPtr);
+ void scanProcedure(Signal* signal,
+ Operationrec* regOperPtr,
+ Uint32 lenAttrInfo);
+ void storedSeizeAttrinbufrecErrorLab(Signal* signal,
+ Operationrec* regOperPtr);
+ bool storedProcedureAttrInfo(Signal* signal,
+ Operationrec* regOperPtr,
+ Uint32 length,
+ Uint32 firstWord,
+ bool copyProc);
+
+//-----------------------------------------------------------------------------
+// Table Descriptor Memory Manager
+//-----------------------------------------------------------------------------
+
+// Public methods
+ Uint32 getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset);
+ Uint32 allocTabDescr(const Tablerec* regTabPtr, Uint32* offset);
+ void freeTabDescr(Uint32 retRef, Uint32 retNo);
+ Uint32 getTabDescrWord(Uint32 index);
+ void setTabDescrWord(Uint32 index, Uint32 word);
+
+// Private methods
+ Uint32 sizeOfReadFunction();
+ void removeTdArea(Uint32 tabDesRef, Uint32 list);
+ void insertTdArea(Uint32 sizeOfChunk, Uint32 tabDesRef, Uint32 list);
+ Uint32 itdaMergeTabDescr(Uint32 retRef, Uint32 retNo);
+
+//------------------------------------------------------------------------------------------------------
+// Page Memory Manager
+//------------------------------------------------------------------------------------------------------
+
+// Public methods
+ void allocConsPages(Uint32 noOfPagesToAllocate,
+ Uint32& noOfPagesAllocated,
+ Uint32& allocPageRef);
+ void returnCommonArea(Uint32 retPageRef, Uint32 retNo);
+ void initializePage();
+
+// Private methods
+ void removeCommonArea(Uint32 remPageRef, Uint32 list);
+ void insertCommonArea(Uint32 insPageRef, Uint32 list);
+ void findFreeLeftNeighbours(Uint32& allocPageRef, Uint32& noPagesAllocated, Uint32 noPagesToAllocate);
+ void findFreeRightNeighbours(Uint32& allocPageRef, Uint32& noPagesAllocated, Uint32 noPagesToAllocate);
+ Uint32 nextHigherTwoLog(Uint32 input);
+
+// Private data
+ Uint32 cfreepageList[16];
+
+//------------------------------------------------------------------------------------------------------
+// Page Mapper, convert logical page id's to physical page id's
+// The page mapper also handles the pages allocated to the fragment.
+//------------------------------------------------------------------------------------------------------
+//
+// Public methods
+ Uint32 getRealpid(Fragrecord* const regFragPtr, Uint32 logicalPageId);
+ Uint32 getNoOfPages(Fragrecord* const regFragPtr);
+ void initPageRangeSize(Uint32 size);
+ bool insertPageRangeTab(Fragrecord* const regFragPtr,
+ Uint32 startPageId,
+ Uint32 noPages);
+ void releaseFragPages(Fragrecord* const regFragPtr);
+ void initFragRange(Fragrecord* const regFragPtr);
+ void initializePageRange();
+ Uint32 getEmptyPage(Fragrecord* const regFragPtr);
+ Uint32 allocFragPages(Fragrecord* const regFragPtr, Uint32 noOfPagesAllocated);
+
+// Private methods
+ Uint32 leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr currPageRangePtr);
+ void releasePagerange(PageRangePtr regPRPtr);
+ void seizePagerange(PageRangePtr& regPageRangePtr);
+ void errorHandler(Uint32 errorCode);
+ void allocMoreFragPages(Fragrecord* const regFragPtr);
+
+// Private data
+ Uint32 cfirstfreerange;
+ PageRange *pageRange;
+ Uint32 c_noOfFreePageRanges;
+ Uint32 cnoOfPageRangeRec;
+
+//------------------------------------------------------------------------------------------------------
+// Fixed Allocator
+// Allocates and deallocates tuples of fixed size on a fragment.
+//------------------------------------------------------------------------------------------------------
+//
+// Public methods
+ bool allocTh(Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Uint32 pageType,
+ Signal* signal,
+ Uint32& pageOffset,
+ PagePtr& pagePtr);
+
+ void freeThSr(Tablerec* const regTabPtr,
+ Page* const regPagePtr,
+ Uint32 freePageOffset);
+
+ void freeTh(Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Signal* signal,
+ Page* const regPagePtr,
+ Uint32 freePageOffset);
+
+ void getThAtPageSr(Page* const regPagePtr,
+ Uint32& pageOffset);
+
+// Private methods
+ void convertThPage(Uint32 Tupheadsize,
+ Page* const regPagePtr);
+
+ void getThAtPage(Fragrecord* const regFragPtr,
+ Page* const regPagePtr,
+ Signal* signal,
+ Uint32& pageOffset);
+
+ void getEmptyPageThCopy(Fragrecord* const regFragPtr,
+ Signal* signal,
+ Page* const regPagePtr);
+
+ void getEmptyPageTh(Fragrecord* const regFragPtr,
+ Signal* signal,
+ Page* const regPagePtr);
+
+//------------------------------------------------------------------------------------------------------
+// Temporary variables used for storing commonly used variables in certain modules
+//------------------------------------------------------------------------------------------------------
+
+ FragrecordPtr fragptr;
+ OperationrecPtr operPtr;
+ TablerecPtr tabptr;
+
+// readAttributes and updateAttributes module
+ Uint32 tCheckOffset;
+ Uint32 tMaxRead;
+ Uint32 tOutBufIndex;
+ Uint32* tTupleHeader;
+ bool tXfrmFlag;
+
+// updateAttributes module
+ Uint32 tInBufIndex;
+ Uint32 tInBufLen;
+
+ Uint32 terrorCode;
+
+//------------------------------------------------------------------------------------------------------
+// Common stored variables. Variables that have a valid value always.
+//------------------------------------------------------------------------------------------------------
+ Uint32 cnoOfLcpRec;
+ Uint32 cnoOfParallellUndoFiles;
+ Uint32 cnoOfUndoPage;
+
+ Attrbufrec *attrbufrec;
+ Uint32 cfirstfreeAttrbufrec;
+ Uint32 cnoOfAttrbufrec;
+ Uint32 cnoFreeAttrbufrec;
+
+ CheckpointInfo *checkpointInfo;
+ Uint32 cfirstfreeLcp;
+
+ DiskBufferSegmentInfo *diskBufferSegmentInfo;
+ Uint32 cfirstfreePdx;
+ Uint32 cnoOfConcurrentWriteOp;
+
+ Fragoperrec *fragoperrec;
+ Uint32 cfirstfreeFragopr;
+ Uint32 cnoOfFragoprec;
+
+ Fragrecord *fragrecord;
+ Uint32 cfirstfreefrag;
+ Uint32 cnoOfFragrec;
+
+ HostBuffer *hostBuffer;
+
+ LocalLogInfo *localLogInfo;
+ Uint32 cnoOfLocalLogInfo;
+
+ Uint32 cfirstfreeOprec;
+ Operationrec *operationrec;
+ Uint32 cnoOfOprec;
+
+ Page *page;
+ Uint32 cnoOfPage;
+ Uint32 cnoOfAllocatedPages;
+
+ PendingFileOpenInfo *pendingFileOpenInfo;
+ Uint32 cfirstfreePfo;
+ Uint32 cnoOfConcurrentOpenOp;
+
+ RestartInfoRecord *restartInfoRecord;
+ Uint32 cfirstfreeSri;
+ Uint32 cnoOfRestartInfoRec;
+
+ Tablerec *tablerec;
+ Uint32 cnoOfTablerec;
+
+ TableDescriptor *tableDescriptor;
+ Uint32 cnoOfTabDescrRec;
+
+ UndoPage *undoPage;
+ Uint32 cfirstfreeUndoSeg;
+ Int32 cnoFreeUndoSeg;
+
+
+
+ Uint32 cnoOfDataPagesToDiskWithoutSynch;
+
+ Uint32 cdata[32];
+ Uint32 cdataPages[16];
+ Uint32 cpackedListIndex;
+ Uint32 cpackedList[MAX_NODES];
+ Uint32 cfreeTdList[16];
+ Uint32 clastBitMask;
+ Uint32 clblPageCounter;
+ Uint32 clblPagesPerTick;
+ Uint32 clblPagesPerTickAfterSr;
+ BlockReference clqhBlockref;
+ Uint32 clqhUserpointer;
+ Uint32 cminusOne;
+ BlockReference cndbcntrRef;
+ Uint32 cundoFileVersion;
+ BlockReference cownref;
+ Uint32 cownNodeId;
+ Uint32 czero;
+
+ // A little bit bigger to cover overwrites in copy algorithms (16384 real size).
+#define ZATTR_BUFFER_SIZE 16384
+ Uint32 clogMemBuffer[ZATTR_BUFFER_SIZE + 16];
+ Uint32 coutBuffer[ZATTR_BUFFER_SIZE + 16];
+ Uint32 cinBuffer[ZATTR_BUFFER_SIZE + 16];
+ Uint32 totNoOfPagesAllocated;
+
+ // Trigger variables
+ Uint32 c_maxTriggersPerTable;
+
+ // Counters for num UNDO log records executed
+ Uint32 cSrUndoRecords[9];
+
+ STATIC_CONST(MAX_PARALLELL_TUP_SRREQ = 2);
+ Uint32 c_sr_free_page_0;
+
+ Uint32 c_errorInsert4000TableId;
+
+ void initGlobalTemporaryVars();
+ void reportMemoryUsage(Signal* signal, int incDec);
+
+
+#ifdef VM_TRACE
+ struct Th {
+ Uint32 data[1];
+ };
+ friend class NdbOut& operator<<(NdbOut&, const Operationrec&);
+ friend class NdbOut& operator<<(NdbOut&, const Th&);
+#endif
+};
+
+inline
+bool Dbtup::isUndoLoggingNeeded(Fragrecord* const regFragPtr,
+ Uint32 pageId)
+{
+ if ((regFragPtr->checkpointVersion != RNIL) &&
+ (pageId >= regFragPtr->minPageNotWrittenInCheckpoint) &&
+ (pageId < regFragPtr->maxPageWrittenInCheckpoint)) {
+ return true;
+ }//if
+ return false;
+}//Dbtup::isUndoLoggingNeeded()
+
+inline
+bool Dbtup::isUndoLoggingActive(Fragrecord* const regFragPtr)
+{
+ if (regFragPtr->checkpointVersion != RNIL) {
+ return true;
+ }//if
+ return false;
+}//Dbtup::isUndoLoggingNeeded()
+
+inline
+bool Dbtup::isUndoLoggingBlocked(Fragrecord* const regFragPtr)
+{
+ if ((regFragPtr->checkpointVersion != RNIL) &&
+ (cnoFreeUndoSeg < ZMIN_PAGE_LIMIT_TUPKEYREQ)) {
+ return true;
+ }//if
+ return false;
+}//Dbtup::isUndoLoggingNeeded()
+
+inline
+bool Dbtup::isPageUndoLogged(Fragrecord* const regFragPtr,
+ Uint32 pageId)
+{
+ if ((pageId >= regFragPtr->minPageNotWrittenInCheckpoint) &&
+ (pageId < regFragPtr->maxPageWrittenInCheckpoint)) {
+ return true;
+ }//if
+ return false;
+}//Dbtup::isUndoLoggingNeeded()
+
+#endif
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
index e9043a8b52d..e9043a8b52d 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
index 6a478bea917..6a478bea917 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
index 470b98fd04c..470b98fd04c 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
index 8c43de52a75..8c43de52a75 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index 761f959acdc..761f959acdc 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp
index cdd54ba2337..cdd54ba2337 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
index 03f02dd0b92..03f02dd0b92 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
index ab6e0642e11..ab6e0642e11 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp
index 370ef4c4ba5..370ef4c4ba5 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index 4ce807528c4..4ce807528c4 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
index 9722aa437c0..9722aa437c0 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
index 1f674876642..1f674876642 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
new file mode 100644
index 00000000000..535ff50bcd5
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
@@ -0,0 +1,1186 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <AttributeDescriptor.hpp>
+#include "AttributeOffset.hpp"
+#include <AttributeHeader.hpp>
+
+#define ljam() { jamLine(3000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(3000 + __LINE__); }
+
+void
+Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
+{
+ Uint32 startDescriptor = regTabPtr->tabDescriptor;
+ ndbrequire((startDescriptor + (regTabPtr->noOfAttr << ZAD_LOG_SIZE)) <= cnoOfTabDescrRec);
+ for (Uint32 i = 0; i < regTabPtr->noOfAttr; i++) {
+ Uint32 attrDescriptorStart = startDescriptor + (i << ZAD_LOG_SIZE);
+ Uint32 attrDescriptor = tableDescriptor[attrDescriptorStart].tabDescr;
+ Uint32 attrOffset = tableDescriptor[attrDescriptorStart + 1].tabDescr;
+ if (!AttributeDescriptor::getDynamic(attrDescriptor)) {
+ if ((AttributeDescriptor::getArrayType(attrDescriptor) == ZNON_ARRAY) ||
+ (AttributeDescriptor::getArrayType(attrDescriptor) == ZFIXED_ARRAY)) {
+ if (!AttributeDescriptor::getNullable(attrDescriptor)) {
+ if (AttributeDescriptor::getSize(attrDescriptor) == 0){
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNotNULL;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1){
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHOneWordNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHOneWordNotNULL;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 2) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHTwoWordNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHTwoWordNotNULL;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) > 2) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNotNULL;
+ } else {
+ ndbrequire(false);
+ }//if
+ // replace functions for char attribute
+ if (AttributeOffset::getCharsetFlag(attrOffset)) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNotNULL;
+ }
+ } else {
+ if (AttributeDescriptor::getSize(attrDescriptor) == 0){
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNULLable;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1){
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHOneWordNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 2) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHTwoWordNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) > 2) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
+ } else {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHZeroWordNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
+ }//if
+ // replace functions for char attribute
+ if (AttributeOffset::getCharsetFlag(attrOffset)) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
+ }
+ }//if
+ } else if (AttributeDescriptor::getArrayType(attrDescriptor) == ZVAR_ARRAY) {
+ if (!AttributeDescriptor::getNullable(attrDescriptor)) {
+ if (AttributeDescriptor::getArraySize(attrDescriptor) == 0) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readVarSizeUnlimitedNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateVarSizeUnlimitedNotNULL;
+ } else if (AttributeDescriptor::getArraySize(attrDescriptor) > ZMAX_SMALL_VAR_ARRAY) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readBigVarSizeNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateBigVarSizeNotNULL;
+ } else {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readSmallVarSizeNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateSmallVarSizeNotNULL;
+ }//if
+ } else {
+ if (AttributeDescriptor::getArraySize(attrDescriptor) == 0) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readVarSizeUnlimitedNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateVarSizeUnlimitedNULLable;
+ } else if (AttributeDescriptor::getArraySize(attrDescriptor) > ZMAX_SMALL_VAR_ARRAY) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readBigVarSizeNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateBigVarSizeNULLable;
+ } else {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readSmallVarSizeNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateSmallVarSizeNULLable;
+ }//if
+ }//if
+ } else {
+ ndbrequire(false);
+ }//if
+ } else {
+ if ((AttributeDescriptor::getArrayType(attrDescriptor) == ZNON_ARRAY) ||
+ (AttributeDescriptor::getArrayType(attrDescriptor) == ZFIXED_ARRAY)) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readDynFixedSize;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateDynFixedSize;
+ } else if (AttributeDescriptor::getType(attrDescriptor) == ZVAR_ARRAY) {
+ if (AttributeDescriptor::getArraySize(attrDescriptor) == 0) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readDynVarSizeUnlimited;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateDynVarSizeUnlimited;
+ } else if (AttributeDescriptor::getArraySize(attrDescriptor) > ZMAX_SMALL_VAR_ARRAY) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readDynBigVarSize;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateDynBigVarSize;
+ } else {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readDynSmallVarSize;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateDynSmallVarSize;
+ }//if
+ } else {
+ ndbrequire(false);
+ }//if
+ }//if
+ }//for
+}//Dbtup::setUpQueryRoutines()
+
+/* ---------------------------------------------------------------- */
+/* THIS ROUTINE IS USED TO READ A NUMBER OF ATTRIBUTES IN THE */
+/* DATABASE AND PLACE THE RESULT IN ATTRINFO RECORDS. */
+//
+// In addition to the parameters used in the call it also relies on the
+// following variables set-up properly.
+//
+// operPtr.p Operation record pointer
+// fragptr.p Fragment record pointer
+// tabptr.p Table record pointer
+/* ---------------------------------------------------------------- */
+int Dbtup::readAttributes(Page* const pagePtr,
+ Uint32 tupHeadOffset,
+ const Uint32* inBuffer,
+ Uint32 inBufLen,
+ Uint32* outBuffer,
+ Uint32 maxRead,
+ bool xfrmFlag)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 numAttributes = regTabPtr->noOfAttr;
+ Uint32 attrDescriptorStart = regTabPtr->tabDescriptor;
+ Uint32 inBufIndex = 0;
+
+ ndbrequire(attrDescriptorStart + (numAttributes << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
+
+ tOutBufIndex = 0;
+ tCheckOffset = regTabPtr->tupheadsize;
+ tMaxRead = maxRead;
+ tTupleHeader = &pagePtr->pageWord[tupHeadOffset];
+ tXfrmFlag = xfrmFlag;
+
+ ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE);
+ while (inBufIndex < inBufLen) {
+ Uint32 tmpAttrBufIndex = tOutBufIndex;
+ AttributeHeader ahIn(inBuffer[inBufIndex]);
+ inBufIndex++;
+ Uint32 attributeId = ahIn.getAttributeId();
+ Uint32 attrDescriptorIndex = attrDescriptorStart + (attributeId << ZAD_LOG_SIZE);
+ ljam();
+
+ AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, 0);
+ AttributeHeader* ahOut = (AttributeHeader*)&outBuffer[tmpAttrBufIndex];
+ tOutBufIndex = tmpAttrBufIndex + 1;
+ if (attributeId < numAttributes) {
+ Uint32 attributeDescriptor = tableDescriptor[attrDescriptorIndex].tabDescr;
+ Uint32 attributeOffset = tableDescriptor[attrDescriptorIndex + 1].tabDescr;
+ ReadFunction f = regTabPtr->readFunctionArray[attributeId];
+ if ((this->*f)(outBuffer,
+ ahOut,
+ attributeDescriptor,
+ attributeOffset)) {
+ continue;
+ } else {
+ return -1;
+ }//if
+ } else if(attributeId & AttributeHeader::PSEUDO){
+ Uint32 sz = read_pseudo(attributeId,
+ outBuffer+tmpAttrBufIndex+1);
+ AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, sz);
+ tOutBufIndex = tmpAttrBufIndex + 1 + sz;
+ } else {
+ terrorCode = ZATTRIBUTE_ID_ERROR;
+ return -1;
+ }//if
+ }//while
+ return tOutBufIndex;
+}//Dbtup::readAttributes()
+
+#if 0
+int Dbtup::readAttributesWithoutHeader(Page* const pagePtr,
+ Uint32 tupHeadOffset,
+ Uint32* inBuffer,
+ Uint32 inBufLen,
+ Uint32* outBuffer,
+ Uint32* attrBuffer,
+ Uint32 maxRead)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 numAttributes = regTabPtr->noOfAttr;
+ Uint32 attrDescriptorStart = regTabPtr->tabDescriptor;
+ Uint32 inBufIndex = 0;
+ Uint32 attrBufIndex = 0;
+
+ ndbrequire(attrDescriptorStart + (numAttributes << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
+
+ tOutBufIndex = 0;
+ tCheckOffset = regTabPtr->tupheadsize;
+ tMaxRead = maxRead;
+ tTupleHeader = &pagePtr->pageWord[tupHeadOffset];
+
+ ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE);
+ while (inBufIndex < inBufLen) {
+ AttributeHeader ahIn(inBuffer[inBufIndex]);
+ inBufIndex++;
+ Uint32 attributeId = ahIn.getAttributeId();
+ Uint32 attrDescriptorIndex = attrDescriptorStart + (attributeId << ZAD_LOG_SIZE);
+ ljam();
+
+ AttributeHeader::init(&attrBuffer[attrBufIndex], attributeId, 0);
+ AttributeHeader* ahOut = (AttributeHeader*)&attrBuffer[attrBufIndex];
+ attrBufIndex++;
+ if (attributeId < numAttributes) {
+ Uint32 attributeDescriptor = tableDescriptor[attrDescriptorIndex].tabDescr;
+ Uint32 attributeOffset = tableDescriptor[attrDescriptorIndex + 1].tabDescr;
+ ReadFunction f = regTabPtr->readFunctionArray[attributeId];
+ if ((this->*f)(outBuffer,
+ ahOut,
+ attributeDescriptor,
+ attributeOffset)) {
+ continue;
+ } else {
+ return -1;
+ }//if
+ } else {
+ terrorCode = ZATTRIBUTE_ID_ERROR;
+ return -1;
+ }//if
+ }//while
+ ndbrequire(attrBufIndex == inBufLen);
+ return tOutBufIndex;
+}//Dbtup::readAttributes()
+#endif
+
+bool
+Dbtup::readFixedSizeTHOneWordNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
+ Uint32 const wordRead = tTupleHeader[readOffset];
+ Uint32 newIndexBuf = indexBuf + 1;
+ Uint32 maxRead = tMaxRead;
+
+ ndbrequire(readOffset < tCheckOffset);
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ outBuffer[indexBuf] = wordRead;
+ ahOut->setDataSize(1);
+ tOutBufIndex = newIndexBuf;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ return false;
+ }//if
+}//Dbtup::readFixedSizeTHOneWordNotNULL()
+
+bool
+Dbtup::readFixedSizeTHTwoWordNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
+ Uint32 const wordReadFirst = tTupleHeader[readOffset];
+ Uint32 const wordReadSecond = tTupleHeader[readOffset + 1];
+ Uint32 newIndexBuf = indexBuf + 2;
+ Uint32 maxRead = tMaxRead;
+
+ ndbrequire(readOffset + 1 < tCheckOffset);
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ ahOut->setDataSize(2);
+ outBuffer[indexBuf] = wordReadFirst;
+ outBuffer[indexBuf + 1] = wordReadSecond;
+ tOutBufIndex = newIndexBuf;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ return false;
+ }//if
+}//Dbtup::readFixedSizeTHTwoWordNotNULL()
+
+bool
+Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attrDes2);
+ Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
+ Uint32 attrNoOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor);
+ Uint32 maxRead = tMaxRead;
+
+ ndbrequire((readOffset + attrNoOfWords - 1) < tCheckOffset);
+ if (! charsetFlag || ! tXfrmFlag) {
+ Uint32 newIndexBuf = indexBuf + attrNoOfWords;
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ ahOut->setDataSize(attrNoOfWords);
+ MEMCOPY_NO_WORDS(&outBuffer[indexBuf],
+ &tTupleHeader[readOffset],
+ attrNoOfWords);
+ tOutBufIndex = newIndexBuf;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ }//if
+ } else {
+ ljam();
+ Tablerec* regTabPtr = tabptr.p;
+ Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(attrDescriptor);
+ uchar* dstPtr = (uchar*)&outBuffer[indexBuf];
+ const uchar* srcPtr = (uchar*)&tTupleHeader[readOffset];
+ Uint32 i = AttributeOffset::getCharsetPos(attrDes2);
+ ndbrequire(i < regTabPtr->noOfCharsets);
+ CHARSET_INFO* cs = regTabPtr->charsetArray[i];
+ Uint32 typeId = AttributeDescriptor::getType(attrDescriptor);
+ Uint32 lb, len;
+ bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
+ if (ok) {
+ Uint32 xmul = cs->strxfrm_multiply;
+ if (xmul == 0)
+ xmul = 1;
+ // see comment in DbtcMain.cpp
+ Uint32 dstLen = xmul * (srcBytes - lb);
+ Uint32 maxIndexBuf = indexBuf + (dstLen >> 2);
+ if (maxIndexBuf <= maxRead) {
+ ljam();
+ int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
+ ndbrequire(n != -1);
+ while ((n & 3) != 0) {
+ dstPtr[n++] = 0;
+ }
+ Uint32 dstWords = (n >> 2);
+ ahOut->setDataSize(dstWords);
+ Uint32 newIndexBuf = indexBuf + dstWords;
+ ndbrequire(newIndexBuf <= maxRead);
+ tOutBufIndex = newIndexBuf;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ }
+ } else {
+ ljam();
+ terrorCode = ZTUPLE_CORRUPTED_ERROR;
+ }
+ }
+ return false;
+}//Dbtup::readFixedSizeTHManyWordNotNULL()
+
+bool
+Dbtup::readFixedSizeTHOneWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ if (!nullFlagCheck(attrDes2)) {
+ ljam();
+ return readFixedSizeTHOneWordNotNULL(outBuffer,
+ ahOut,
+ attrDescriptor,
+ attrDes2);
+ } else {
+ ljam();
+ ahOut->setNULL();
+ return true;
+ }//if
+}//Dbtup::readFixedSizeTHOneWordNULLable()
+
+bool
+Dbtup::readFixedSizeTHTwoWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ if (!nullFlagCheck(attrDes2)) {
+ ljam();
+ return readFixedSizeTHTwoWordNotNULL(outBuffer,
+ ahOut,
+ attrDescriptor,
+ attrDes2);
+ } else {
+ ljam();
+ ahOut->setNULL();
+ return true;
+ }//if
+}//Dbtup::readFixedSizeTHTwoWordNULLable()
+
+bool
+Dbtup::readFixedSizeTHManyWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ if (!nullFlagCheck(attrDes2)) {
+ ljam();
+ return readFixedSizeTHManyWordNotNULL(outBuffer,
+ ahOut,
+ attrDescriptor,
+ attrDes2);
+ } else {
+ ljam();
+ ahOut->setNULL();
+ return true;
+ }//if
+}//Dbtup::readFixedSizeTHManyWordNULLable()
+
+bool
+Dbtup::readFixedSizeTHZeroWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ if (nullFlagCheck(attrDes2)) {
+ ljam();
+ ahOut->setNULL();
+ }//if
+ return true;
+}//Dbtup::readFixedSizeTHZeroWordNULLable()
+
+bool
+Dbtup::nullFlagCheck(Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 nullFlagOffsetInTuple = AttributeOffset::getNullFlagOffset(attrDes2);
+ ndbrequire(nullFlagOffsetInTuple < regTabPtr->tupNullWords);
+ nullFlagOffsetInTuple += regTabPtr->tupNullIndex;
+ ndbrequire(nullFlagOffsetInTuple < tCheckOffset);
+
+ return (AttributeOffset::isNULL(tTupleHeader[nullFlagOffsetInTuple], attrDes2));
+}//Dbtup::nullFlagCheck()
+
+bool
+Dbtup::readVariableSizedAttr(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readVariableSizedAttr()
+
+bool
+Dbtup::readVarSizeUnlimitedNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readVarSizeUnlimitedNotNULL()
+
+bool
+Dbtup::readVarSizeUnlimitedNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readVarSizeUnlimitedNULLable()
+
+bool
+Dbtup::readBigVarSizeNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readBigVarSizeNotNULL()
+
+bool
+Dbtup::readBigVarSizeNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readBigVarSizeNULLable()
+
+bool
+Dbtup::readSmallVarSizeNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readSmallVarSizeNotNULL()
+
+bool
+Dbtup::readSmallVarSizeNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readSmallVarSizeNULLable()
+
+bool
+Dbtup::readDynFixedSize(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readDynFixedSize()
+
+bool
+Dbtup::readDynVarSizeUnlimited(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readDynVarSizeUnlimited()
+
+bool
+Dbtup::readDynBigVarSize(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readDynBigVarSize()
+
+bool
+Dbtup::readDynSmallVarSize(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readDynSmallVarSize()
+
+/* ---------------------------------------------------------------------- */
+/* THIS ROUTINE IS USED TO UPDATE A NUMBER OF ATTRIBUTES. IT IS */
+/* USED BY THE INSERT ROUTINE, THE UPDATE ROUTINE AND IT CAN BE */
+/* CALLED SEVERAL TIMES FROM THE INTERPRETER. */
+// In addition to the parameters used in the call it also relies on the
+// following variables set-up properly.
+//
+// pagep.p Page record pointer
+// fragptr.p Fragment record pointer
+// operPtr.p Operation record pointer
+// tabptr.p Table record pointer
+/* ---------------------------------------------------------------------- */
+int Dbtup::updateAttributes(Page* const pagePtr,
+ Uint32 tupHeadOffset,
+ Uint32* inBuffer,
+ Uint32 inBufLen)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Operationrec* const regOperPtr = operPtr.p;
+ Uint32 numAttributes = regTabPtr->noOfAttr;
+ Uint32 attrDescriptorStart = regTabPtr->tabDescriptor;
+ ndbrequire(attrDescriptorStart + (numAttributes << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
+
+ tCheckOffset = regTabPtr->tupheadsize;
+ tTupleHeader = &pagePtr->pageWord[tupHeadOffset];
+ Uint32 inBufIndex = 0;
+ tInBufIndex = 0;
+ tInBufLen = inBufLen;
+
+ ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE);
+ while (inBufIndex < inBufLen) {
+ AttributeHeader ahIn(inBuffer[inBufIndex]);
+ Uint32 attributeId = ahIn.getAttributeId();
+ Uint32 attrDescriptorIndex = attrDescriptorStart + (attributeId << ZAD_LOG_SIZE);
+ if (attributeId < numAttributes) {
+ Uint32 attrDescriptor = tableDescriptor[attrDescriptorIndex].tabDescr;
+ Uint32 attributeOffset = tableDescriptor[attrDescriptorIndex + 1].tabDescr;
+ if ((AttributeDescriptor::getPrimaryKey(attrDescriptor)) &&
+ (regOperPtr->optype != ZINSERT)) {
+ if (checkUpdateOfPrimaryKey(&inBuffer[inBufIndex], regTabPtr)) {
+ ljam();
+ terrorCode = ZTRY_UPDATE_PRIMARY_KEY;
+ return -1;
+ }//if
+ }//if
+ UpdateFunction f = regTabPtr->updateFunctionArray[attributeId];
+ ljam();
+ regOperPtr->changeMask.set(attributeId);
+ if ((this->*f)(inBuffer,
+ attrDescriptor,
+ attributeOffset)) {
+ inBufIndex = tInBufIndex;
+ continue;
+ } else {
+ ljam();
+ return -1;
+ }//if
+ } else {
+ ljam();
+ terrorCode = ZATTRIBUTE_ID_ERROR;
+ return -1;
+ }//if
+ }//while
+ return 0;
+}//Dbtup::updateAttributes()
+
+bool
+Dbtup::checkUpdateOfPrimaryKey(Uint32* updateBuffer, Tablerec* const regTabPtr)
+{
+ Uint32 keyReadBuffer[MAX_KEY_SIZE_IN_WORDS];
+ Uint32 attributeHeader;
+ AttributeHeader* ahOut = (AttributeHeader*)&attributeHeader;
+ AttributeHeader ahIn(*updateBuffer);
+ Uint32 attributeId = ahIn.getAttributeId();
+ Uint32 attrDescriptorIndex = regTabPtr->tabDescriptor + (attributeId << ZAD_LOG_SIZE);
+ Uint32 attrDescriptor = tableDescriptor[attrDescriptorIndex].tabDescr;
+ Uint32 attributeOffset = tableDescriptor[attrDescriptorIndex + 1].tabDescr;
+ ReadFunction f = regTabPtr->readFunctionArray[attributeId];
+
+ AttributeHeader::init(&attributeHeader, attributeId, 0);
+ tOutBufIndex = 0;
+ tMaxRead = MAX_KEY_SIZE_IN_WORDS;
+
+ bool tmp = tXfrmFlag;
+ tXfrmFlag = false;
+ ndbrequire((this->*f)(&keyReadBuffer[0], ahOut, attrDescriptor, attributeOffset));
+ tXfrmFlag = tmp;
+ ndbrequire(tOutBufIndex == ahOut->getDataSize());
+ if (ahIn.getDataSize() != ahOut->getDataSize()) {
+ ljam();
+ return true;
+ }//if
+ if (memcmp(&keyReadBuffer[0], &updateBuffer[1], tOutBufIndex << 2) != 0) {
+ ljam();
+ return true;
+ }//if
+ return false;
+}//Dbtup::checkUpdateOfPrimaryKey()
+
+#if 0
+void Dbtup::checkPages(Fragrecord* const regFragPtr)
+{
+ Uint32 noPages = getNoOfPages(regFragPtr);
+ for (Uint32 i = 0; i < noPages ; i++) {
+ PagePtr pagePtr;
+ pagePtr.i = getRealpid(regFragPtr, i);
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ ndbrequire(pagePtr.p->pageWord[1] != (RNIL - 1));
+ }
+}
+#endif
+
+bool
+Dbtup::updateFixedSizeTHOneWordNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tInBufIndex;
+ Uint32 inBufLen = tInBufLen;
+ Uint32 updateOffset = AttributeOffset::getOffset(attrDes2);
+ AttributeHeader ahIn(inBuffer[indexBuf]);
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 newIndex = indexBuf + 2;
+ ndbrequire(updateOffset < tCheckOffset);
+
+ if (newIndex <= inBufLen) {
+ Uint32 updateWord = inBuffer[indexBuf + 1];
+ if (!nullIndicator) {
+ ljam();
+ tInBufIndex = newIndex;
+ tTupleHeader[updateOffset] = updateWord;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZNOT_NULL_ATTR;
+ return false;
+ }//if
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+ return true;
+}//Dbtup::updateFixedSizeTHOneWordNotNULL()
+
+bool
+Dbtup::updateFixedSizeTHTwoWordNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tInBufIndex;
+ Uint32 inBufLen = tInBufLen;
+ Uint32 updateOffset = AttributeOffset::getOffset(attrDes2);
+ AttributeHeader ahIn(inBuffer[indexBuf]);
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 newIndex = indexBuf + 3;
+ ndbrequire((updateOffset + 1) < tCheckOffset);
+
+ if (newIndex <= inBufLen) {
+ Uint32 updateWord1 = inBuffer[indexBuf + 1];
+ Uint32 updateWord2 = inBuffer[indexBuf + 2];
+ if (!nullIndicator) {
+ ljam();
+ tInBufIndex = newIndex;
+ tTupleHeader[updateOffset] = updateWord1;
+ tTupleHeader[updateOffset + 1] = updateWord2;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZNOT_NULL_ATTR;
+ return false;
+ }//if
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+}//Dbtup::updateFixedSizeTHTwoWordNotNULL()
+
+bool
+Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tInBufIndex;
+ Uint32 inBufLen = tInBufLen;
+ Uint32 updateOffset = AttributeOffset::getOffset(attrDes2);
+ Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attrDes2);
+ AttributeHeader ahIn(inBuffer[indexBuf]);
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 noOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor);
+ Uint32 newIndex = indexBuf + noOfWords + 1;
+ ndbrequire((updateOffset + noOfWords - 1) < tCheckOffset);
+
+ if (newIndex <= inBufLen) {
+ if (!nullIndicator) {
+ ljam();
+ if (charsetFlag) {
+ ljam();
+ Tablerec* regTabPtr = tabptr.p;
+ Uint32 typeId = AttributeDescriptor::getType(attrDescriptor);
+ Uint32 bytes = AttributeDescriptor::getSizeInBytes(attrDescriptor);
+ Uint32 i = AttributeOffset::getCharsetPos(attrDes2);
+ ndbrequire(i < regTabPtr->noOfCharsets);
+ // not const in MySQL
+ CHARSET_INFO* cs = regTabPtr->charsetArray[i];
+ int not_used;
+ const char* ssrc = (const char*)&inBuffer[tInBufIndex + 1];
+ Uint32 lb, len;
+ if (! NdbSqlUtil::get_var_length(typeId, ssrc, bytes, lb, len)) {
+ ljam();
+ terrorCode = ZINVALID_CHAR_FORMAT;
+ return false;
+ }
+ // fast fix bug#7340
+ if (typeId != NDB_TYPE_TEXT &&
+ (*cs->cset->well_formed_len)(cs, ssrc + lb, ssrc + lb + len, ZNIL, &not_used) != len) {
+ ljam();
+ terrorCode = ZINVALID_CHAR_FORMAT;
+ return false;
+ }
+ }
+ tInBufIndex = newIndex;
+ MEMCOPY_NO_WORDS(&tTupleHeader[updateOffset],
+ &inBuffer[indexBuf + 1],
+ noOfWords);
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZNOT_NULL_ATTR;
+ return false;
+ }//if
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+}//Dbtup::updateFixedSizeTHManyWordNotNULL()
+
+bool
+Dbtup::updateFixedSizeTHManyWordNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ AttributeHeader ahIn(inBuffer[tInBufIndex]);
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 nullFlagOffset = AttributeOffset::getNullFlagOffset(attrDes2);
+ Uint32 nullFlagBitOffset = AttributeOffset::getNullFlagBitOffset(attrDes2);
+ Uint32 nullWordOffset = nullFlagOffset + regTabPtr->tupNullIndex;
+ ndbrequire((nullFlagOffset < regTabPtr->tupNullWords) &&
+ (nullWordOffset < tCheckOffset));
+ Uint32 nullBits = tTupleHeader[nullWordOffset];
+
+ if (!nullIndicator) {
+ nullBits &= (~(1 << nullFlagBitOffset));
+ ljam();
+ tTupleHeader[nullWordOffset] = nullBits;
+ return updateFixedSizeTHManyWordNotNULL(inBuffer,
+ attrDescriptor,
+ attrDes2);
+ } else {
+ Uint32 newIndex = tInBufIndex + 1;
+ if (newIndex <= tInBufLen) {
+ nullBits |= (1 << nullFlagBitOffset);
+ ljam();
+ tTupleHeader[nullWordOffset] = nullBits;
+ tInBufIndex = newIndex;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+ }//if
+}//Dbtup::updateFixedSizeTHManyWordNULLable()
+
+bool
+Dbtup::updateVariableSizedAttr(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateVariableSizedAttr()
+
+bool
+Dbtup::updateVarSizeUnlimitedNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateVarSizeUnlimitedNotNULL()
+
+bool
+Dbtup::updateVarSizeUnlimitedNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateVarSizeUnlimitedNULLable()
+
+bool
+Dbtup::updateBigVarSizeNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateBigVarSizeNotNULL()
+
+bool
+Dbtup::updateBigVarSizeNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateBigVarSizeNULLable()
+
+bool
+Dbtup::updateSmallVarSizeNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateSmallVarSizeNotNULL()
+
+bool
+Dbtup::updateSmallVarSizeNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateSmallVarSizeNULLable()
+
+bool
+Dbtup::updateDynFixedSize(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateDynFixedSize()
+
+bool
+Dbtup::updateDynVarSizeUnlimited(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateDynVarSizeUnlimited()
+
+bool
+Dbtup::updateDynBigVarSize(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateDynBigVarSize()
+
+bool
+Dbtup::updateDynSmallVarSize(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateDynSmallVarSize()
+
+Uint32
+Dbtup::read_pseudo(Uint32 attrId, Uint32* outBuffer){
+ Uint32 tmp[sizeof(SignalHeader)+25];
+ Signal * signal = (Signal*)&tmp;
+ switch(attrId){
+ case AttributeHeader::FRAGMENT:
+ * outBuffer = operPtr.p->fragId >> 1; // remove "hash" bit
+ return 1;
+ case AttributeHeader::FRAGMENT_MEMORY:
+ {
+ Uint64 tmp= fragptr.p->noOfPages;
+ tmp*= 32768;
+ memcpy(outBuffer,&tmp,8);
+ }
+ return 2;
+ case AttributeHeader::ROW_SIZE:
+ * outBuffer = tabptr.p->tupheadsize << 2;
+ return 1;
+ case AttributeHeader::ROW_COUNT:
+ case AttributeHeader::COMMIT_COUNT:
+ signal->theData[0] = operPtr.p->userpointer;
+ signal->theData[1] = attrId;
+
+ EXECUTE_DIRECT(DBLQH, GSN_READ_PSEUDO_REQ, signal, 2);
+ outBuffer[0] = signal->theData[0];
+ outBuffer[1] = signal->theData[1];
+ return 2;
+ case AttributeHeader::RANGE_NO:
+ signal->theData[0] = operPtr.p->userpointer;
+ signal->theData[1] = attrId;
+
+ EXECUTE_DIRECT(DBLQH, GSN_READ_PSEUDO_REQ, signal, 2);
+ outBuffer[0] = signal->theData[0];
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+bool
+Dbtup::readBitsNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
+ Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 newIndexBuf = indexBuf + ((bitCount + 31) >> 5);
+ Uint32 maxRead = tMaxRead;
+
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ ahOut->setDataSize((bitCount + 31) >> 5);
+ tOutBufIndex = newIndexBuf;
+
+ BitmaskImpl::getField(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos,
+ bitCount,
+ outBuffer+indexBuf);
+
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ return false;
+ }//if
+}
+
+bool
+Dbtup::readBitsNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
+ Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 newIndexBuf = indexBuf + ((bitCount + 31) >> 5);
+ Uint32 maxRead = tMaxRead;
+
+ if(BitmaskImpl::get(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos))
+ {
+ ljam();
+ ahOut->setNULL();
+ return true;
+ }
+
+
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ ahOut->setDataSize((bitCount + 31) >> 5);
+ tOutBufIndex = newIndexBuf;
+ BitmaskImpl::getField(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos+1,
+ bitCount,
+ outBuffer+indexBuf);
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ return false;
+ }//if
+}
+
+bool
+Dbtup::updateBitsNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 indexBuf = tInBufIndex;
+ Uint32 inBufLen = tInBufLen;
+ AttributeHeader ahIn(inBuffer[indexBuf]);
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
+ Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+ Uint32 newIndex = indexBuf + 1 + ((bitCount + 31) >> 5);
+
+ if (newIndex <= inBufLen) {
+ if (!nullIndicator) {
+ BitmaskImpl::setField(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos,
+ bitCount,
+ inBuffer+indexBuf+1);
+ tInBufIndex = newIndex;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZNOT_NULL_ATTR;
+ return false;
+ }//if
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+ return true;
+}
+
+bool
+Dbtup::updateBitsNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ AttributeHeader ahIn(inBuffer[tInBufIndex]);
+ Uint32 indexBuf = tInBufIndex;
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
+ Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+
+ if (!nullIndicator) {
+ BitmaskImpl::clear(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos);
+ BitmaskImpl::setField(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos+1,
+ bitCount,
+ inBuffer+indexBuf+1);
+
+ Uint32 newIndex = indexBuf + 1 + ((bitCount + 31) >> 5);
+ tInBufIndex = newIndex;
+ return true;
+ } else {
+ Uint32 newIndex = tInBufIndex + 1;
+ if (newIndex <= tInBufLen) {
+ ljam();
+ BitmaskImpl::set(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos);
+
+ tInBufIndex = newIndex;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+ }//if
+}
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
index 396404faa8c..396404faa8c 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp
index 3b957688a1c..3b957688a1c 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
index 33d63e8ce49..33d63e8ce49 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
index 642ba270760..642ba270760 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
new file mode 100644
index 00000000000..ce3889f0682
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
@@ -0,0 +1,1153 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <AttributeDescriptor.hpp>
+#include "AttributeOffset.hpp"
+#include <AttributeHeader.hpp>
+#include <signaldata/FireTrigOrd.hpp>
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/TuxMaint.hpp>
+
+#define ljam() { jamLine(7000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(7000 + __LINE__); }
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* ----------------------- TRIGGER HANDLING ----------------------- */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+ArrayList<Dbtup::TupTriggerData>*
+Dbtup::findTriggerList(Tablerec* table,
+ TriggerType::Value ttype,
+ TriggerActionTime::Value ttime,
+ TriggerEvent::Value tevent)
+{
+ ArrayList<TupTriggerData>* tlist = NULL;
+ switch (ttype) {
+ case TriggerType::SUBSCRIPTION:
+ case TriggerType::SUBSCRIPTION_BEFORE:
+ switch (tevent) {
+ case TriggerEvent::TE_INSERT:
+ ljam();
+ if (ttime == TriggerActionTime::TA_DETACHED)
+ tlist = &table->subscriptionInsertTriggers;
+ break;
+ case TriggerEvent::TE_UPDATE:
+ ljam();
+ if (ttime == TriggerActionTime::TA_DETACHED)
+ tlist = &table->subscriptionUpdateTriggers;
+ break;
+ case TriggerEvent::TE_DELETE:
+ ljam();
+ if (ttime == TriggerActionTime::TA_DETACHED)
+ tlist = &table->subscriptionDeleteTriggers;
+ break;
+ default:
+ break;
+ }
+ break;
+ case TriggerType::SECONDARY_INDEX:
+ switch (tevent) {
+ case TriggerEvent::TE_INSERT:
+ ljam();
+ if (ttime == TriggerActionTime::TA_AFTER)
+ tlist = &table->afterInsertTriggers;
+ break;
+ case TriggerEvent::TE_UPDATE:
+ ljam();
+ if (ttime == TriggerActionTime::TA_AFTER)
+ tlist = &table->afterUpdateTriggers;
+ break;
+ case TriggerEvent::TE_DELETE:
+ ljam();
+ if (ttime == TriggerActionTime::TA_AFTER)
+ tlist = &table->afterDeleteTriggers;
+ break;
+ default:
+ break;
+ }
+ break;
+ case TriggerType::ORDERED_INDEX:
+ switch (tevent) {
+ case TriggerEvent::TE_CUSTOM:
+ ljam();
+ if (ttime == TriggerActionTime::TA_CUSTOM)
+ tlist = &table->tuxCustomTriggers;
+ break;
+ default:
+ break;
+ }
+ break;
+ case TriggerType::READ_ONLY_CONSTRAINT:
+ switch (tevent) {
+ case TriggerEvent::TE_UPDATE:
+ ljam();
+ if (ttime == TriggerActionTime::TA_AFTER)
+ tlist = &table->constraintUpdateTriggers;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return tlist;
+}
+
+// Trigger signals
+void
+Dbtup::execCREATE_TRIG_REQ(Signal* signal)
+{
+ ljamEntry();
+ BlockReference senderRef = signal->getSendersBlockRef();
+ const CreateTrigReq reqCopy = *(const CreateTrigReq*)signal->getDataPtr();
+ const CreateTrigReq* const req = &reqCopy;
+
+ // Find table
+ TablerecPtr tabPtr;
+ tabPtr.i = req->getTableId();
+ ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
+
+ // Create trigger and associate it with the table
+ if (createTrigger(tabPtr.p, req)) {
+ ljam();
+ // Send conf
+ CreateTrigConf* const conf = (CreateTrigConf*)signal->getDataPtrSend();
+ conf->setUserRef(reference());
+ conf->setConnectionPtr(req->getConnectionPtr());
+ conf->setRequestType(req->getRequestType());
+ conf->setTableId(req->getTableId());
+ conf->setIndexId(req->getIndexId());
+ conf->setTriggerId(req->getTriggerId());
+ conf->setTriggerInfo(req->getTriggerInfo());
+ sendSignal(senderRef, GSN_CREATE_TRIG_CONF,
+ signal, CreateTrigConf::SignalLength, JBB);
+ } else {
+ ljam();
+ // Send ref
+ CreateTrigRef* const ref = (CreateTrigRef*)signal->getDataPtrSend();
+ ref->setUserRef(reference());
+ ref->setConnectionPtr(req->getConnectionPtr());
+ ref->setRequestType(req->getRequestType());
+ ref->setTableId(req->getTableId());
+ ref->setIndexId(req->getIndexId());
+ ref->setTriggerId(req->getTriggerId());
+ ref->setTriggerInfo(req->getTriggerInfo());
+ ref->setErrorCode(CreateTrigRef::TooManyTriggers);
+ sendSignal(senderRef, GSN_CREATE_TRIG_REF,
+ signal, CreateTrigRef::SignalLength, JBB);
+ }
+}//Dbtup::execCREATE_TRIG_REQ()
+
+void
+Dbtup::execDROP_TRIG_REQ(Signal* signal)
+{
+ ljamEntry();
+ BlockReference senderRef = signal->getSendersBlockRef();
+ const DropTrigReq reqCopy = *(const DropTrigReq*)signal->getDataPtr();
+ const DropTrigReq* const req = &reqCopy;
+
+ // Find table
+ TablerecPtr tabPtr;
+ tabPtr.i = req->getTableId();
+ ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
+
+ // Drop trigger
+ Uint32 r = dropTrigger(tabPtr.p, req);
+ if (r == 0){
+ // Send conf
+ DropTrigConf* const conf = (DropTrigConf*)signal->getDataPtrSend();
+ conf->setUserRef(senderRef);
+ conf->setConnectionPtr(req->getConnectionPtr());
+ conf->setRequestType(req->getRequestType());
+ conf->setTableId(req->getTableId());
+ conf->setIndexId(req->getIndexId());
+ conf->setTriggerId(req->getTriggerId());
+ sendSignal(senderRef, GSN_DROP_TRIG_CONF,
+ signal, DropTrigConf::SignalLength, JBB);
+ } else {
+ // Send ref
+ DropTrigRef* const ref = (DropTrigRef*)signal->getDataPtrSend();
+ ref->setUserRef(senderRef);
+ ref->setConnectionPtr(req->getConnectionPtr());
+ ref->setRequestType(req->getRequestType());
+ ref->setTableId(req->getTableId());
+ ref->setIndexId(req->getIndexId());
+ ref->setTriggerId(req->getTriggerId());
+ ref->setErrorCode((DropTrigRef::ErrorCode)r);
+ ref->setErrorLine(__LINE__);
+ ref->setErrorNode(refToNode(reference()));
+ sendSignal(senderRef, GSN_DROP_TRIG_REF,
+ signal, DropTrigRef::SignalLength, JBB);
+ }
+}//Dbtup::DROP_TRIG_REQ()
+
+/* ---------------------------------------------------------------- */
+/* ------------------------- createTrigger ------------------------ */
+/* */
+/* Creates a new trigger record by fetching one from the trigger */
+/* pool and associates it with the given table. */
+/* Trigger type can be one of secondary_index, subscription, */
+/* constraint(NYI), foreign_key(NYI), schema_upgrade(NYI), */
+/* api_trigger(NYI) or sql_trigger(NYI). */
+/* Note that this method only checks for total number of allowed */
+/* triggers. Checking the number of allowed triggers per table is */
+/* done by TRIX. */
+/* */
+/* ---------------------------------------------------------------- */
+bool
+Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req)
+{
+ if (ERROR_INSERTED(4003)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return false;
+ }
+ TriggerType::Value ttype = req->getTriggerType();
+ TriggerActionTime::Value ttime = req->getTriggerActionTime();
+ TriggerEvent::Value tevent = req->getTriggerEvent();
+
+ ArrayList<TupTriggerData>* tlist = findTriggerList(table, ttype, ttime, tevent);
+ ndbrequire(tlist != NULL);
+
+ TriggerPtr tptr;
+ if (!tlist->seize(tptr))
+ return false;
+
+ // Set trigger id
+ tptr.p->triggerId = req->getTriggerId();
+
+ // ndbout_c("Create TupTrigger %u = %u %u %u %u", tptr.p->triggerId, table, ttype, ttime, tevent);
+
+ // Set index id
+ tptr.p->indexId = req->getIndexId();
+
+ // Set trigger type etc
+ tptr.p->triggerType = ttype;
+ tptr.p->triggerActionTime = ttime;
+ tptr.p->triggerEvent = tevent;
+
+ tptr.p->sendBeforeValues = true;
+ if ((tptr.p->triggerType == TriggerType::SUBSCRIPTION) &&
+ ((tptr.p->triggerEvent == TriggerEvent::TE_UPDATE) ||
+ (tptr.p->triggerEvent == TriggerEvent::TE_DELETE))) {
+ ljam();
+ tptr.p->sendBeforeValues = false;
+ }
+ tptr.p->sendOnlyChangedAttributes = false;
+ if (((tptr.p->triggerType == TriggerType::SUBSCRIPTION) ||
+ (tptr.p->triggerType == TriggerType::SUBSCRIPTION_BEFORE)) &&
+ (tptr.p->triggerEvent == TriggerEvent::TE_UPDATE)) {
+ ljam();
+ tptr.p->sendOnlyChangedAttributes = true;
+ }
+
+ // Set monitor all
+ tptr.p->monitorAllAttributes = req->getMonitorAllAttributes();
+ tptr.p->monitorReplicas = req->getMonitorReplicas();
+ tptr.p->m_receiverBlock = refToBlock(req->getReceiverRef());
+
+ tptr.p->attributeMask.clear();
+ if (tptr.p->monitorAllAttributes) {
+ ljam();
+ for(Uint32 i = 0; i < table->noOfAttr; i++) {
+ if (!primaryKey(table, i)) {
+ ljam();
+ tptr.p->attributeMask.set(i);
+ }
+ }
+ } else {
+ // Set attribute mask
+ ljam();
+ tptr.p->attributeMask = req->getAttributeMask();
+ }
+ return true;
+}//Dbtup::createTrigger()
+
+bool
+Dbtup::primaryKey(Tablerec* const regTabPtr, Uint32 attrId)
+{
+ Uint32 attrDescriptorStart = regTabPtr->tabDescriptor;
+ Uint32 attrDescriptor = getTabDescrWord(attrDescriptorStart + (attrId * ZAD_SIZE));
+ return (bool)AttributeDescriptor::getPrimaryKey(attrDescriptor);
+}//Dbtup::primaryKey()
+
+/* ---------------------------------------------------------------- */
+/* -------------------------- dropTrigger ------------------------- */
+/* */
+/* Deletes a trigger record by disassociating it with the given */
+/* table and returning it to the trigger pool. */
+/* Trigger type can be one of secondary_index, subscription, */
+/* constraint(NYI), foreign_key(NYI), schema_upgrade(NYI), */
+/* api_trigger(NYI) or sql_trigger(NYI). */
+/* */
+/* ---------------------------------------------------------------- */
+Uint32
+Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req)
+{
+ Uint32 triggerId = req->getTriggerId();
+
+ TriggerType::Value ttype = req->getTriggerType();
+ TriggerActionTime::Value ttime = req->getTriggerActionTime();
+ TriggerEvent::Value tevent = req->getTriggerEvent();
+
+ // ndbout_c("Drop TupTrigger %u = %u %u %u %u", triggerId, table, ttype, ttime, tevent);
+
+ ArrayList<TupTriggerData>* tlist = findTriggerList(table, ttype, ttime, tevent);
+ ndbrequire(tlist != NULL);
+
+ Ptr<TupTriggerData> ptr;
+ for (tlist->first(ptr); !ptr.isNull(); tlist->next(ptr)) {
+ ljam();
+ if (ptr.p->triggerId == triggerId) {
+ ljam();
+ tlist->release(ptr.i);
+ return 0;
+ }
+ }
+ return DropTrigRef::TriggerNotFound;
+}//Dbtup::dropTrigger()
+
+/* ---------------------------------------------------------------- */
+/* -------------- checkImmediateTriggersAfterOp ------------------ */
+/* */
+/* Called after an insert, delete, or update operation takes */
+/* place. Fetches before tuple for deletes and updates and */
+/* after tuple for inserts and updates. */
+/* Executes immediate triggers by sending FIRETRIGORD */
+/* */
+/* ---------------------------------------------------------------- */
+void Dbtup::checkImmediateTriggersAfterInsert(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr)
+{
+ if(refToBlock(regOperPtr->coordinatorTC) == DBLQH) {
+ return;
+ }
+
+ if ((regOperPtr->primaryReplica) &&
+ (!(regTablePtr->afterInsertTriggers.isEmpty()))) {
+ ljam();
+ fireImmediateTriggers(signal,
+ regTablePtr->afterInsertTriggers,
+ regOperPtr);
+ }//if
+}//Dbtup::checkImmediateTriggersAfterInsert()
+
+void Dbtup::checkImmediateTriggersAfterUpdate(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr)
+{
+ if(refToBlock(regOperPtr->coordinatorTC) == DBLQH) {
+ return;
+ }
+
+ if ((regOperPtr->primaryReplica) &&
+ (!(regTablePtr->afterUpdateTriggers.isEmpty()))) {
+ ljam();
+ fireImmediateTriggers(signal,
+ regTablePtr->afterUpdateTriggers,
+ regOperPtr);
+ }//if
+ if ((regOperPtr->primaryReplica) &&
+ (!(regTablePtr->constraintUpdateTriggers.isEmpty()))) {
+ ljam();
+ fireImmediateTriggers(signal,
+ regTablePtr->constraintUpdateTriggers,
+ regOperPtr);
+ }//if
+}//Dbtup::checkImmediateTriggersAfterUpdate()
+
+void Dbtup::checkImmediateTriggersAfterDelete(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr)
+{
+ if(refToBlock(regOperPtr->coordinatorTC) == DBLQH) {
+ return;
+ }
+
+ if ((regOperPtr->primaryReplica) &&
+ (!(regTablePtr->afterDeleteTriggers.isEmpty()))) {
+ ljam();
+ executeTriggers(signal,
+ regTablePtr->afterDeleteTriggers,
+ regOperPtr);
+ }//if
+}//Dbtup::checkImmediateTriggersAfterDelete()
+
+#if 0
+/* ---------------------------------------------------------------- */
+/* --------------------- checkDeferredTriggers -------------------- */
+/* */
+/* Called before commit after an insert, delete, or update */
+/* operation. Fetches before tuple for deletes and updates and */
+/* after tuple for inserts and updates. */
+/* Executes deferred triggers by sending FIRETRIGORD */
+/* */
+/* ---------------------------------------------------------------- */
+void Dbtup::checkDeferredTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr)
+{
+ ljam();
+ // NYI
+}//Dbtup::checkDeferredTriggers()
+#endif
+
+/* ---------------------------------------------------------------- */
+/* --------------------- checkDetachedTriggers -------------------- */
+/* */
+/* Called at commit after an insert, delete, or update operation. */
+/* Fetches before tuple for deletes and updates and */
+/* after tuple for inserts and updates. */
+/* Executes detached triggers by sending FIRETRIGORD */
+/* */
+/* ---------------------------------------------------------------- */
+void Dbtup::checkDetachedTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr)
+{
+ switch(regOperPtr->optype) {
+ case(ZINSERT):
+ ljam();
+ if (regTablePtr->subscriptionInsertTriggers.isEmpty()) {
+ // Table has no active triggers monitoring inserts at commit
+ ljam();
+ return;
+ }//if
+
+ // If any fired immediate insert trigger then fetch after tuple
+ fireDetachedTriggers(signal,
+ regTablePtr->subscriptionInsertTriggers,
+ regOperPtr);
+ break;
+ case(ZDELETE):
+ ljam();
+ if (regTablePtr->subscriptionDeleteTriggers.isEmpty()) {
+ // Table has no active triggers monitoring deletes at commit
+ ljam();
+ return;
+ }//if
+
+ // Execute any after delete triggers by sending
+ // FIRETRIGORD with the before tuple
+ executeTriggers(signal,
+ regTablePtr->subscriptionDeleteTriggers,
+ regOperPtr);
+ break;
+ case(ZUPDATE):
+ ljam();
+ if (regTablePtr->subscriptionUpdateTriggers.isEmpty()) {
+ // Table has no active triggers monitoring updates at commit
+ ljam();
+ return;
+ }//if
+
+ // If any fired immediate update trigger then fetch after tuple
+ // and send two FIRETRIGORD one with before tuple and one with after tuple
+ fireDetachedTriggers(signal,
+ regTablePtr->subscriptionUpdateTriggers,
+ regOperPtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dbtup::CheckDetachedTriggers()
+
+void
+Dbtup::fireImmediateTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr)
+{
+ TriggerPtr trigPtr;
+ triggerList.first(trigPtr);
+ while (trigPtr.i != RNIL) {
+ ljam();
+ if (trigPtr.p->monitorAllAttributes ||
+ trigPtr.p->attributeMask.overlaps(regOperPtr->changeMask)) {
+ ljam();
+ executeTrigger(signal,
+ trigPtr.p,
+ regOperPtr);
+ }//if
+ triggerList.next(trigPtr);
+ }//while
+}//Dbtup::fireImmediateTriggers()
+
+#if 0
+void
+Dbtup::fireDeferredTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr)
+{
+ TriggerPtr trigPtr;
+ triggerList.first(trigPtr);
+ while (trigPtr.i != RNIL) {
+ ljam();
+ if (trigPtr.p->monitorAllAttributes ||
+ trigPtr.p->attributeMask.overlaps(regOperPtr->changeMask)) {
+ ljam();
+ executeTrigger(signal,
+ trigPtr,
+ regOperPtr);
+ }//if
+ triggerList.next(trigPtr);
+ }//while
+}//Dbtup::fireDeferredTriggers()
+#endif
+
+void
+Dbtup::fireDetachedTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr)
+{
+ TriggerPtr trigPtr;
+ triggerList.first(trigPtr);
+ while (trigPtr.i != RNIL) {
+ ljam();
+ if ((trigPtr.p->monitorReplicas || regOperPtr->primaryReplica) &&
+ (trigPtr.p->monitorAllAttributes ||
+ trigPtr.p->attributeMask.overlaps(regOperPtr->changeMask))) {
+ ljam();
+ executeTrigger(signal,
+ trigPtr.p,
+ regOperPtr);
+ }//if
+ triggerList.next(trigPtr);
+ }//while
+}//Dbtup::fireDetachedTriggers()
+
+void Dbtup::executeTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* regOperPtr)
+{
+ TriggerPtr trigPtr;
+ triggerList.first(trigPtr);
+ while (trigPtr.i != RNIL) {
+ ljam();
+ executeTrigger(signal,
+ trigPtr.p,
+ regOperPtr);
+ triggerList.next(trigPtr);
+
+ }//while
+}//Dbtup::executeTriggers()
+
+void Dbtup::executeTrigger(Signal* signal,
+ TupTriggerData* const trigPtr,
+ Operationrec* const regOperPtr)
+{
+
+ /**
+ * The block below does not work together with GREP.
+ * I have 2 db nodes (2 replicas) -> one node group.
+ * I want to have FIRETRIG_ORD sent to all SumaParticipants,
+ * from all nodes in the node group described above. However,
+ * only one of the nodes in the node group actually sends the
+ * FIRE_TRIG_ORD, and the other node enters this "hack" below.
+ * I don't really know what the code snippet below does, but it
+ * does not work with GREP the way Lars and I want it.
+ * We need to have triggers fired from both the primary and the
+ * backup replica, not only the primary as it is now.
+ *
+ * Note: In Suma, I have changed triggers to be created with
+ * setMonitorReplicas(true).
+ * /Johan
+ *
+ * See RT 709
+ */
+ // XXX quick fix to NR, should fix in LQHKEYREQ instead
+ /*
+ if (refToBlock(regOperPtr->coordinatorTC) == DBLQH) {
+ jam();
+ return;
+ }
+ */
+ BlockReference ref = trigPtr->m_receiverBlock;
+ Uint32* const keyBuffer = &cinBuffer[0];
+ Uint32* const mainBuffer = &coutBuffer[0];
+ Uint32* const copyBuffer = &clogMemBuffer[0];
+
+ Uint32 noPrimKey, noMainWords, noCopyWords;
+
+ if (ref == BACKUP) {
+ ljam();
+ /*
+ In order for the implementation of BACKUP to work even when changing
+ primaries in the middle of the backup we need to set the trigger on
+ all replicas. This check checks whether this is the node where this
+ trigger should be fired. The check should preferably have been put
+ completely in the BACKUP block but it was about five times simpler
+ to put it here and also much faster for the backup (small overhead
+ for everybody else.
+ */
+ signal->theData[0] = trigPtr->triggerId;
+ signal->theData[1] = regOperPtr->fragId;
+ EXECUTE_DIRECT(BACKUP, GSN_BACKUP_TRIG_REQ, signal, 2);
+ ljamEntry();
+ if (signal->theData[0] == 0) {
+ ljam();
+ return;
+ }//if
+ }//if
+ if (!readTriggerInfo(trigPtr,
+ regOperPtr,
+ keyBuffer,
+ noPrimKey,
+ mainBuffer,
+ noMainWords,
+ copyBuffer,
+ noCopyWords)) {
+ ljam();
+ return;
+ }//if
+//--------------------------------------------------------------------
+// Now all data for this trigger has been read. It is now time to send
+// the trigger information consisting of two or three sets of TRIG_
+// ATTRINFO signals and one FIRE_TRIG_ORD signal.
+// We start by setting common header info for all TRIG_ATTRINFO signals.
+//--------------------------------------------------------------------
+ bool executeDirect;
+ TrigAttrInfo* const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtrSend();
+ trigAttrInfo->setConnectionPtr(regOperPtr->tcOpIndex);
+ trigAttrInfo->setTriggerId(trigPtr->triggerId);
+
+ switch(trigPtr->triggerType) {
+ case (TriggerType::SECONDARY_INDEX):
+ ljam();
+ ref = regOperPtr->coordinatorTC;
+ executeDirect = false;
+ break;
+ case (TriggerType::SUBSCRIPTION):
+ case (TriggerType::SUBSCRIPTION_BEFORE):
+ ljam();
+ // Since only backup uses subscription triggers we send to backup directly for now
+ ref = trigPtr->m_receiverBlock;
+ executeDirect = true;
+ break;
+ case (TriggerType::READ_ONLY_CONSTRAINT):
+ terrorCode = ZREAD_ONLY_CONSTRAINT_VIOLATION;
+ // XXX should return status and abort the rest
+ return;
+ default:
+ ndbrequire(false);
+ executeDirect= false; // remove warning
+ }//switch
+
+ regOperPtr->noFiredTriggers++;
+
+ trigAttrInfo->setAttrInfoType(TrigAttrInfo::PRIMARY_KEY);
+ sendTrigAttrInfo(signal, keyBuffer, noPrimKey, executeDirect, ref);
+
+ Uint32 noAfter = 0;
+ Uint32 noBefore = 0;
+ switch(regOperPtr->optype) {
+ case(ZINSERT):
+ ljam();
+ // Send AttrInfo signals with new attribute values
+ trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
+ sendTrigAttrInfo(signal, mainBuffer, noMainWords, executeDirect, ref);
+ noAfter = noMainWords;
+ break;
+ case(ZDELETE):
+ if (trigPtr->sendBeforeValues) {
+ ljam();
+ trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
+ sendTrigAttrInfo(signal, mainBuffer, noMainWords, executeDirect, ref);
+ noBefore = noMainWords;
+ }//if
+ break;
+ case(ZUPDATE):
+ ljam();
+ if (trigPtr->sendBeforeValues) {
+ ljam();
+ trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
+ sendTrigAttrInfo(signal, copyBuffer, noCopyWords, executeDirect, ref);
+ noBefore = noCopyWords;
+ }//if
+ trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
+ sendTrigAttrInfo(signal, mainBuffer, noMainWords, executeDirect, ref);
+ noAfter = noMainWords;
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+ sendFireTrigOrd(signal,
+ regOperPtr,
+ trigPtr,
+ noPrimKey,
+ noBefore,
+ noAfter);
+}//Dbtup::executeTrigger()
+
+Uint32 Dbtup::setAttrIds(Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask,
+ Uint32 noOfAttributes,
+ Uint32* inBuffer)
+{
+ Uint32 bufIndx = 0;
+ for (Uint32 i = 0; i < noOfAttributes; i++) {
+ ljam();
+ if (attributeMask.get(i)) {
+ ljam();
+ AttributeHeader::init(&inBuffer[bufIndx++], i, 0);
+ }//if
+ }//for
+ return bufIndx;
+}//Dbtup::setAttrIds()
+
+bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
+ Operationrec* const regOperPtr,
+ Uint32* const keyBuffer,
+ Uint32& noPrimKey,
+ Uint32* const mainBuffer,
+ Uint32& noMainWords,
+ Uint32* const copyBuffer,
+ Uint32& noCopyWords)
+{
+ noCopyWords = 0;
+ noMainWords = 0;
+ Uint32 readBuffer[MAX_ATTRIBUTES_IN_TABLE];
+ PagePtr pagep;
+
+//---------------------------------------------------------------------------
+// Set-up variables needed by readAttributes operPtr.p, tabptr.p
+//---------------------------------------------------------------------------
+ operPtr.p = regOperPtr;
+ tabptr.i = regOperPtr->tableRef;
+ ptrCheckGuard(tabptr, cnoOfTablerec, tablerec);
+ Tablerec* const regTabPtr = tabptr.p;
+//--------------------------------------------------------------------
+// Initialise pagep and tuple offset for read of main tuple
+//--------------------------------------------------------------------
+ Uint32 tupheadoffset = regOperPtr->pageOffset;
+ pagep.i = regOperPtr->realPageId;
+ ptrCheckGuard(pagep, cnoOfPage, page);
+
+//--------------------------------------------------------------------
+// Read Primary Key Values
+//--------------------------------------------------------------------
+ int ret= readAttributes(pagep.p,
+ tupheadoffset,
+ &tableDescriptor[regTabPtr->readKeyArray].tabDescr,
+ regTabPtr->noOfKeyAttr,
+ keyBuffer,
+ ZATTR_BUFFER_SIZE,
+ false);
+ ndbrequire(ret != -1);
+ noPrimKey= ret;
+
+ Uint32 numAttrsToRead;
+ if ((regOperPtr->optype == ZUPDATE) &&
+ (trigPtr->sendOnlyChangedAttributes)) {
+ ljam();
+//--------------------------------------------------------------------
+// Update that sends only changed information
+//--------------------------------------------------------------------
+ Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask;
+ attributeMask = trigPtr->attributeMask;
+ attributeMask.bitAND(regOperPtr->changeMask);
+ numAttrsToRead = setAttrIds(attributeMask, regTabPtr->noOfAttr, &readBuffer[0]);
+
+ } else if ((regOperPtr->optype == ZDELETE) &&
+ (!trigPtr->sendBeforeValues)) {
+ ljam();
+//--------------------------------------------------------------------
+// Delete without sending before values only read Primary Key
+//--------------------------------------------------------------------
+ return true;
+ } else {
+ ljam();
+//--------------------------------------------------------------------
+// All others send all attributes that are monitored
+//--------------------------------------------------------------------
+ numAttrsToRead = setAttrIds(trigPtr->attributeMask, regTabPtr->noOfAttr, &readBuffer[0]);
+ }//if
+ ndbrequire(numAttrsToRead < MAX_ATTRIBUTES_IN_TABLE);
+//--------------------------------------------------------------------
+// Read Main tuple values
+//--------------------------------------------------------------------
+ if ((regOperPtr->optype != ZDELETE) ||
+ (trigPtr->sendBeforeValues)) {
+ ljam();
+ int ret= readAttributes(pagep.p,
+ tupheadoffset,
+ &readBuffer[0],
+ numAttrsToRead,
+ mainBuffer,
+ ZATTR_BUFFER_SIZE,
+ false);
+ ndbrequire(ret != -1);
+ noMainWords= ret;
+ } else {
+ ljam();
+ noMainWords = 0;
+ }//if
+//--------------------------------------------------------------------
+// Read Copy tuple values for UPDATE's
+//--------------------------------------------------------------------
+// Initialise pagep and tuple offset for read of copy tuple
+//--------------------------------------------------------------------
+ if ((regOperPtr->optype == ZUPDATE) &&
+ (trigPtr->sendBeforeValues)) {
+ ljam();
+
+ tupheadoffset = regOperPtr->pageOffsetC;
+ pagep.i = regOperPtr->realPageIdC;
+ ptrCheckGuard(pagep, cnoOfPage, page);
+
+ int ret= readAttributes(pagep.p,
+ tupheadoffset,
+ &readBuffer[0],
+ numAttrsToRead,
+ copyBuffer,
+ ZATTR_BUFFER_SIZE,
+ false);
+
+ ndbrequire(ret != -1);
+ noCopyWords = ret;
+ if ((noMainWords == noCopyWords) &&
+ (memcmp(mainBuffer, copyBuffer, noMainWords << 2) == 0)) {
+//--------------------------------------------------------------------
+// Although a trigger was fired it was not necessary since the old
+// value and the new value was exactly the same
+//--------------------------------------------------------------------
+ ljam();
+ return false;
+ }//if
+ }//if
+ return true;
+}//Dbtup::readTriggerInfo()
+
+void Dbtup::sendTrigAttrInfo(Signal* signal,
+ Uint32* data,
+ Uint32 dataLen,
+ bool executeDirect,
+ BlockReference receiverReference)
+{
+ TrigAttrInfo* const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtrSend();
+ Uint32 sigLen;
+ Uint32 dataIndex = 0;
+ do {
+ sigLen = dataLen - dataIndex;
+ if (sigLen > TrigAttrInfo::DataLength) {
+ ljam();
+ sigLen = TrigAttrInfo::DataLength;
+ }//if
+ MEMCOPY_NO_WORDS(trigAttrInfo->getData(),
+ data + dataIndex,
+ sigLen);
+ if (executeDirect) {
+ ljam();
+ EXECUTE_DIRECT(receiverReference,
+ GSN_TRIG_ATTRINFO,
+ signal,
+ TrigAttrInfo::StaticLength + sigLen);
+ ljamEntry();
+ } else {
+ ljam();
+ sendSignal(receiverReference,
+ GSN_TRIG_ATTRINFO,
+ signal,
+ TrigAttrInfo::StaticLength + sigLen,
+ JBB);
+ }//if
+ dataIndex += sigLen;
+ } while (dataLen != dataIndex);
+}//Dbtup::sendTrigAttrInfo()
+
+void Dbtup::sendFireTrigOrd(Signal* signal,
+ Operationrec * const regOperPtr,
+ TupTriggerData* const trigPtr,
+ Uint32 noPrimKeyWords,
+ Uint32 noBeforeValueWords,
+ Uint32 noAfterValueWords)
+{
+ FireTrigOrd* const fireTrigOrd = (FireTrigOrd *)signal->getDataPtrSend();
+
+ fireTrigOrd->setConnectionPtr(regOperPtr->tcOpIndex);
+ fireTrigOrd->setTriggerId(trigPtr->triggerId);
+ fireTrigOrd->fragId= regOperPtr->fragId >> 1; //Handle two local frags
+
+ switch(regOperPtr->optype) {
+ case(ZINSERT):
+ ljam();
+ fireTrigOrd->setTriggerEvent(TriggerEvent::TE_INSERT);
+ break;
+ case(ZDELETE):
+ ljam();
+ fireTrigOrd->setTriggerEvent(TriggerEvent::TE_DELETE);
+ break;
+ case(ZUPDATE):
+ ljam();
+ fireTrigOrd->setTriggerEvent(TriggerEvent::TE_UPDATE);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+
+ fireTrigOrd->setNoOfPrimaryKeyWords(noPrimKeyWords);
+ fireTrigOrd->setNoOfBeforeValueWords(noBeforeValueWords);
+ fireTrigOrd->setNoOfAfterValueWords(noAfterValueWords);
+
+ switch(trigPtr->triggerType) {
+ case (TriggerType::SECONDARY_INDEX):
+ ljam();
+ sendSignal(regOperPtr->coordinatorTC, GSN_FIRE_TRIG_ORD,
+ signal, FireTrigOrd::SignalLength, JBB);
+ break;
+ case (TriggerType::SUBSCRIPTION_BEFORE): // Only Suma
+ ljam();
+ // Since only backup uses subscription triggers we
+ // send to backup directly for now
+ fireTrigOrd->setGCI(regOperPtr->gci);
+ fireTrigOrd->setHashValue(regOperPtr->hashValue);
+ EXECUTE_DIRECT(trigPtr->m_receiverBlock,
+ GSN_FIRE_TRIG_ORD,
+ signal,
+ FireTrigOrd::SignalWithHashValueLength);
+ break;
+ case (TriggerType::SUBSCRIPTION):
+ ljam();
+ // Since only backup uses subscription triggers we
+ // send to backup directly for now
+ fireTrigOrd->setGCI(regOperPtr->gci);
+ EXECUTE_DIRECT(trigPtr->m_receiverBlock,
+ GSN_FIRE_TRIG_ORD,
+ signal,
+ FireTrigOrd::SignalWithGCILength);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dbtup::sendFireTrigOrd()
+
+/*
+ * Ordered index triggers.
+ *
+ * Insert: add entry to index
+ * Update: add entry to index, de|ay remove until commit
+ * Delete: do nothing, delay remove until commit
+ * Commit: remove entry delayed from update and delete
+ * Abort : remove entry added by insert and update
+ *
+ * See Notes.txt for the details.
+ */
+
+int
+Dbtup::executeTuxInsertTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr)
+{
+ TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
+ PagePtr pagePtr;
+ pagePtr.i = regOperPtr->realPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffset + 1];
+ ndbrequire(tupVersion == regOperPtr->tupVersion);
+ // fill in constant part
+ req->tableId = regOperPtr->tableRef;
+ req->fragId = regOperPtr->fragId;
+ req->pageId = regOperPtr->realPageId;
+ req->pageOffset = regOperPtr->pageOffset;
+ req->tupVersion = tupVersion;
+ req->opInfo = TuxMaintReq::OpAdd;
+ return addTuxEntries(signal, regOperPtr, regTabPtr);
+}
+
+int
+Dbtup::executeTuxUpdateTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr)
+{
+ TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
+ PagePtr pagePtr;
+ pagePtr.i = regOperPtr->realPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffset + 1];
+ ndbrequire(tupVersion == regOperPtr->tupVersion);
+ // fill in constant part
+ req->tableId = regOperPtr->tableRef;
+ req->fragId = regOperPtr->fragId;
+ req->pageId = regOperPtr->realPageId;
+ req->pageOffset = regOperPtr->pageOffset;
+ req->tupVersion = tupVersion;
+ req->opInfo = TuxMaintReq::OpAdd;
+ return addTuxEntries(signal, regOperPtr, regTabPtr);
+}
+
+int
+Dbtup::addTuxEntries(Signal* signal,
+ Operationrec* regOperPtr,
+ Tablerec* regTabPtr)
+{
+ TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
+ const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
+ TriggerPtr triggerPtr;
+ Uint32 failPtrI;
+ triggerList.first(triggerPtr);
+ while (triggerPtr.i != RNIL) {
+ ljam();
+ req->indexId = triggerPtr.p->indexId;
+ req->errorCode = RNIL;
+ EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
+ signal, TuxMaintReq::SignalLength);
+ ljamEntry();
+ if (req->errorCode != 0) {
+ ljam();
+ terrorCode = req->errorCode;
+ failPtrI = triggerPtr.i;
+ goto fail;
+ }
+ triggerList.next(triggerPtr);
+ }
+ return 0;
+fail:
+ req->opInfo = TuxMaintReq::OpRemove;
+ triggerList.first(triggerPtr);
+ while (triggerPtr.i != failPtrI) {
+ ljam();
+ req->indexId = triggerPtr.p->indexId;
+ req->errorCode = RNIL;
+ EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
+ signal, TuxMaintReq::SignalLength);
+ ljamEntry();
+ ndbrequire(req->errorCode == 0);
+ triggerList.next(triggerPtr);
+ }
+#ifdef VM_TRACE
+ ndbout << "aborted partial tux update: op " << hex << regOperPtr << endl;
+#endif
+ return -1;
+}
+
+int
+Dbtup::executeTuxDeleteTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr)
+{
+ // do nothing
+ return 0;
+}
+
+void
+Dbtup::executeTuxCommitTriggers(Signal* signal,
+ Operationrec* regOperPtr,
+ Tablerec* const regTabPtr)
+{
+ TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
+ // get version
+ Uint32 tupVersion;
+ if (regOperPtr->optype == ZINSERT) {
+ if (! regOperPtr->deleteInsertFlag)
+ return;
+ ljam();
+ PagePtr pagePtr;
+ pagePtr.i = regOperPtr->realPageIdC;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffsetC + 1];
+ ndbrequire(tupVersion != regOperPtr->tupVersion);
+ } else if (regOperPtr->optype == ZUPDATE) {
+ ljam();
+ PagePtr pagePtr;
+ pagePtr.i = regOperPtr->realPageIdC;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffsetC + 1];
+ ndbrequire(tupVersion != regOperPtr->tupVersion);
+ } else if (regOperPtr->optype == ZDELETE) {
+ if (regOperPtr->deleteInsertFlag)
+ return;
+ ljam();
+ PagePtr pagePtr;
+ pagePtr.i = regOperPtr->realPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffset + 1];
+ ndbrequire(tupVersion == regOperPtr->tupVersion);
+ } else {
+ ndbrequire(false);
+ tupVersion= 0; // remove warning
+ }
+ // fill in constant part
+ req->tableId = regOperPtr->tableRef;
+ req->fragId = regOperPtr->fragId;
+ req->pageId = regOperPtr->realPageId;
+ req->pageOffset = regOperPtr->pageOffset;
+ req->tupVersion = tupVersion;
+ req->opInfo = TuxMaintReq::OpRemove;
+ removeTuxEntries(signal, regOperPtr, regTabPtr);
+}
+
+void
+Dbtup::executeTuxAbortTriggers(Signal* signal,
+ Operationrec* regOperPtr,
+ Tablerec* const regTabPtr)
+{
+ TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
+ // get version
+ Uint32 tupVersion;
+ if (regOperPtr->optype == ZINSERT) {
+ ljam();
+ tupVersion = regOperPtr->tupVersion;
+ } else if (regOperPtr->optype == ZUPDATE) {
+ ljam();
+ tupVersion = regOperPtr->tupVersion;
+ } else if (regOperPtr->optype == ZDELETE) {
+ ljam();
+ return;
+ } else {
+ ndbrequire(false);
+ tupVersion= 0; // remove warning
+ }
+ // fill in constant part
+ req->tableId = regOperPtr->tableRef;
+ req->fragId = regOperPtr->fragId;
+ req->pageId = regOperPtr->realPageId;
+ req->pageOffset = regOperPtr->pageOffset;
+ req->tupVersion = tupVersion;
+ req->opInfo = TuxMaintReq::OpRemove;
+ removeTuxEntries(signal, regOperPtr, regTabPtr);
+}
+
+void
+Dbtup::removeTuxEntries(Signal* signal,
+ Operationrec* regOperPtr,
+ Tablerec* regTabPtr)
+{
+ TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
+ const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
+ TriggerPtr triggerPtr;
+ triggerList.first(triggerPtr);
+ while (triggerPtr.i != RNIL) {
+ ljam();
+ req->indexId = triggerPtr.p->indexId;
+ req->errorCode = RNIL,
+ EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
+ signal, TuxMaintReq::SignalLength);
+ ljamEntry();
+ // must succeed
+ ndbrequire(req->errorCode == 0);
+ triggerList.next(triggerPtr);
+ }
+}
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupUndoLog.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupUndoLog.cpp
index 869f399583f..869f399583f 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupUndoLog.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupUndoLog.cpp
diff --git a/storage/ndb/src/kernel/blocks/dbtup/Makefile.am b/storage/ndb/src/kernel/blocks/dbtup/Makefile.am
new file mode 100644
index 00000000000..3aee511d039
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/Makefile.am
@@ -0,0 +1,42 @@
+noinst_LIBRARIES = libdbtup.a
+
+libdbtup_a_SOURCES = \
+ DbtupExecQuery.cpp \
+ DbtupBuffer.cpp \
+ DbtupRoutines.cpp \
+ DbtupCommit.cpp \
+ DbtupFixAlloc.cpp \
+ DbtupTrigger.cpp \
+ DbtupAbort.cpp \
+ DbtupLCP.cpp \
+ DbtupUndoLog.cpp \
+ DbtupPageMap.cpp \
+ DbtupPagMan.cpp \
+ DbtupStoredProcDef.cpp \
+ DbtupMeta.cpp \
+ DbtupTabDesMan.cpp \
+ DbtupGen.cpp \
+ DbtupSystemRestart.cpp \
+ DbtupIndex.cpp \
+ DbtupScan.cpp \
+ DbtupDebug.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbtup.dsp
+
+libdbtup.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libdbtup_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbtup/Notes.txt b/storage/ndb/src/kernel/blocks/dbtup/Notes.txt
index c2973bb0a76..c2973bb0a76 100644
--- a/ndb/src/kernel/blocks/dbtup/Notes.txt
+++ b/storage/ndb/src/kernel/blocks/dbtup/Notes.txt
diff --git a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
index d4a44b9e641..d4a44b9e641 100644
--- a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
index cf815b14c1a..cf815b14c1a 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
index ed29dc57915..ed29dc57915 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
index 5640fdf2899..5640fdf2899 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
index 4b568badc67..4b568badc67 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
index 93c4a583624..93c4a583624 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
index 68a3e78ce9e..68a3e78ce9e 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
index a61b7c1f5ca..a61b7c1f5ca 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
index b0e2a664bfd..b0e2a664bfd 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
index 5107a8d8e31..5107a8d8e31 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
diff --git a/storage/ndb/src/kernel/blocks/dbtux/Makefile.am b/storage/ndb/src/kernel/blocks/dbtux/Makefile.am
new file mode 100644
index 00000000000..12d450e8632
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/Makefile.am
@@ -0,0 +1,34 @@
+noinst_LIBRARIES = libdbtux.a
+
+libdbtux_a_SOURCES = \
+ DbtuxGen.cpp \
+ DbtuxMeta.cpp \
+ DbtuxMaint.cpp \
+ DbtuxNode.cpp \
+ DbtuxTree.cpp \
+ DbtuxScan.cpp \
+ DbtuxSearch.cpp \
+ DbtuxCmp.cpp \
+ DbtuxDebug.cpp
+
+INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/kernel/blocks/dbtup
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbtux.dsp
+
+libdbtux.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libdbtux_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbtux/Times.txt b/storage/ndb/src/kernel/blocks/dbtux/Times.txt
index 68120084846..68120084846 100644
--- a/ndb/src/kernel/blocks/dbtux/Times.txt
+++ b/storage/ndb/src/kernel/blocks/dbtux/Times.txt
diff --git a/ndb/src/kernel/blocks/dbtux/tuxstatus.html b/storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html
index 264809cefd3..264809cefd3 100644
--- a/ndb/src/kernel/blocks/dbtux/tuxstatus.html
+++ b/storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html
diff --git a/ndb/src/kernel/blocks/dbutil/DbUtil.cpp b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
index b94bb8e6d7e..b94bb8e6d7e 100644
--- a/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
diff --git a/ndb/src/kernel/blocks/dbutil/DbUtil.hpp b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp
index 5499970fde3..5499970fde3 100644
--- a/ndb/src/kernel/blocks/dbutil/DbUtil.hpp
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp
diff --git a/ndb/src/kernel/blocks/dbutil/DbUtil.txt b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt
index cc8c1985009..cc8c1985009 100644
--- a/ndb/src/kernel/blocks/dbutil/DbUtil.txt
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt
diff --git a/storage/ndb/src/kernel/blocks/dbutil/Makefile.am b/storage/ndb/src/kernel/blocks/dbutil/Makefile.am
new file mode 100644
index 00000000000..4a0b180283e
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbutil/Makefile.am
@@ -0,0 +1,23 @@
+noinst_LIBRARIES = libdbutil.a
+
+libdbutil_a_SOURCES = DbUtil.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbutil.dsp
+
+libdbutil.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libdbutil_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/grep/Grep.cpp b/storage/ndb/src/kernel/blocks/grep/Grep.cpp
index 0527c5415ab..0527c5415ab 100644
--- a/ndb/src/kernel/blocks/grep/Grep.cpp
+++ b/storage/ndb/src/kernel/blocks/grep/Grep.cpp
diff --git a/ndb/src/kernel/blocks/grep/Grep.hpp b/storage/ndb/src/kernel/blocks/grep/Grep.hpp
index a14143294e1..a14143294e1 100644
--- a/ndb/src/kernel/blocks/grep/Grep.hpp
+++ b/storage/ndb/src/kernel/blocks/grep/Grep.hpp
diff --git a/ndb/src/kernel/blocks/grep/GrepInit.cpp b/storage/ndb/src/kernel/blocks/grep/GrepInit.cpp
index d764fb1f473..d764fb1f473 100644
--- a/ndb/src/kernel/blocks/grep/GrepInit.cpp
+++ b/storage/ndb/src/kernel/blocks/grep/GrepInit.cpp
diff --git a/storage/ndb/src/kernel/blocks/grep/Makefile.am b/storage/ndb/src/kernel/blocks/grep/Makefile.am
new file mode 100644
index 00000000000..92d75d002a5
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/grep/Makefile.am
@@ -0,0 +1,23 @@
+noinst_LIBRARIES = libgrep.a
+
+libgrep_a_SOURCES = Grep.cpp GrepInit.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libgrep.dsp
+
+libgrep.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libgrep_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/grep/systab_test/Makefile b/storage/ndb/src/kernel/blocks/grep/systab_test/Makefile
index bd69e0f3799..bd69e0f3799 100644
--- a/ndb/src/kernel/blocks/grep/systab_test/Makefile
+++ b/storage/ndb/src/kernel/blocks/grep/systab_test/Makefile
diff --git a/ndb/src/kernel/blocks/grep/systab_test/grep_systab_test.cpp b/storage/ndb/src/kernel/blocks/grep/systab_test/grep_systab_test.cpp
index e3a77af4e4e..e3a77af4e4e 100644
--- a/ndb/src/kernel/blocks/grep/systab_test/grep_systab_test.cpp
+++ b/storage/ndb/src/kernel/blocks/grep/systab_test/grep_systab_test.cpp
diff --git a/ndb/src/kernel/blocks/mutexes.hpp b/storage/ndb/src/kernel/blocks/mutexes.hpp
index 5c0276fc4fa..5c0276fc4fa 100644
--- a/ndb/src/kernel/blocks/mutexes.hpp
+++ b/storage/ndb/src/kernel/blocks/mutexes.hpp
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/Makefile.am b/storage/ndb/src/kernel/blocks/ndbcntr/Makefile.am
new file mode 100644
index 00000000000..7bdcf046a19
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/Makefile.am
@@ -0,0 +1,26 @@
+noinst_LIBRARIES = libndbcntr.a
+
+libndbcntr_a_SOURCES = \
+ NdbcntrInit.cpp \
+ NdbcntrSysTable.cpp \
+ NdbcntrMain.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libndbcntr.dsp
+
+libndbcntr.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libstorage/ndbcntr_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
index 639d300d6df..639d300d6df 100644
--- a/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
index c7b472fc91a..c7b472fc91a 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
index 524a40697bf..524a40697bf 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
index 2a65271a32a..2a65271a32a 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
index f76440a462a..f76440a462a 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
index 2176c93c5d5..2176c93c5d5 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp
index 004752c9543..004752c9543 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/Makefile b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/Makefile
index b0356e6da68..b0356e6da68 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/Makefile
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/Makefile
diff --git a/ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp b/storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp
index 30b40097c9b..30b40097c9b 100644
--- a/ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp
diff --git a/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp b/storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp
index 349cccdbcb4..349cccdbcb4 100644
--- a/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp
diff --git a/ndb/src/kernel/blocks/ndbfs/Filename.cpp b/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp
index 15158ec19ef..15158ec19ef 100644
--- a/ndb/src/kernel/blocks/ndbfs/Filename.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp
diff --git a/ndb/src/kernel/blocks/ndbfs/Filename.hpp b/storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp
index 249c1b1ca10..249c1b1ca10 100644
--- a/ndb/src/kernel/blocks/ndbfs/Filename.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Makefile.am b/storage/ndb/src/kernel/blocks/ndbfs/Makefile.am
new file mode 100644
index 00000000000..b4233720003
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Makefile.am
@@ -0,0 +1,27 @@
+noinst_LIBRARIES = libndbfs.a
+
+libndbfs_a_SOURCES = \
+ AsyncFile.cpp \
+ Ndbfs.cpp VoidFs.cpp \
+ Filename.cpp \
+ CircularIndex.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libndbfs.dsp
+
+libndbfs.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libstorage/ndbfs_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp
index a1aebdef7a1..a1aebdef7a1 100644
--- a/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp
diff --git a/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
index 03911d195ec..03911d195ec 100644
--- a/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
diff --git a/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp
index ca90bc60153..ca90bc60153 100644
--- a/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp
diff --git a/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile
index 68f71bfc4cd..68f71bfc4cd 100644
--- a/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile
+++ b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile
diff --git a/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp
index b98c60693f4..b98c60693f4 100644
--- a/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp
diff --git a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
index 6f848d7fe16..6f848d7fe16 100644
--- a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
diff --git a/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp
index c5aaa4e5c49..c5aaa4e5c49 100644
--- a/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp
diff --git a/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp b/storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp
index 0fee687f1bc..0fee687f1bc 100644
--- a/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp
diff --git a/ndb/src/kernel/blocks/ndbfs/Pool.hpp b/storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp
index 0410673af6f..0410673af6f 100644
--- a/ndb/src/kernel/blocks/ndbfs/Pool.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp
diff --git a/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp b/storage/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp
index d093089acfc..d093089acfc 100644
--- a/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp
diff --git a/ndb/src/kernel/blocks/new-block.tar.gz b/storage/ndb/src/kernel/blocks/new-block.tar.gz
index 327503ea0b1..327503ea0b1 100644
--- a/ndb/src/kernel/blocks/new-block.tar.gz
+++ b/storage/ndb/src/kernel/blocks/new-block.tar.gz
Binary files differ
diff --git a/storage/ndb/src/kernel/blocks/qmgr/Makefile.am b/storage/ndb/src/kernel/blocks/qmgr/Makefile.am
new file mode 100644
index 00000000000..577a31348ad
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/qmgr/Makefile.am
@@ -0,0 +1,25 @@
+noinst_LIBRARIES = libqmgr.a
+
+libqmgr_a_SOURCES = \
+ QmgrInit.cpp \
+ QmgrMain.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libqmgr.dsp
+
+libqmgr.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libqmgr_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
index e134609df0a..e134609df0a 100644
--- a/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
diff --git a/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
index ecaeadff47a..ecaeadff47a 100644
--- a/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
index 621ec70fbe1..621ec70fbe1 100644
--- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
diff --git a/ndb/src/kernel/blocks/qmgr/timer.hpp b/storage/ndb/src/kernel/blocks/qmgr/timer.hpp
index 9c35a23766c..9c35a23766c 100644
--- a/ndb/src/kernel/blocks/qmgr/timer.hpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/timer.hpp
diff --git a/storage/ndb/src/kernel/blocks/suma/Makefile.am b/storage/ndb/src/kernel/blocks/suma/Makefile.am
new file mode 100644
index 00000000000..6fd7033e068
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/suma/Makefile.am
@@ -0,0 +1,23 @@
+noinst_LIBRARIES = libsuma.a
+
+libsuma_a_SOURCES = Suma.cpp SumaInit.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libsuma.dsp
+
+libsuma.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libsuma_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
index ed54505b729..ed54505b729 100644
--- a/ndb/src/kernel/blocks/suma/Suma.cpp
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
diff --git a/ndb/src/kernel/blocks/suma/Suma.hpp b/storage/ndb/src/kernel/blocks/suma/Suma.hpp
index 65869f44423..65869f44423 100644
--- a/ndb/src/kernel/blocks/suma/Suma.hpp
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp
diff --git a/ndb/src/kernel/blocks/suma/Suma.txt b/storage/ndb/src/kernel/blocks/suma/Suma.txt
index eba031226ef..eba031226ef 100644
--- a/ndb/src/kernel/blocks/suma/Suma.txt
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.txt
diff --git a/ndb/src/kernel/blocks/suma/SumaInit.cpp b/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp
index b5945db3811..b5945db3811 100644
--- a/ndb/src/kernel/blocks/suma/SumaInit.cpp
+++ b/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp
diff --git a/storage/ndb/src/kernel/blocks/trix/Makefile.am b/storage/ndb/src/kernel/blocks/trix/Makefile.am
new file mode 100644
index 00000000000..886a9dc60f0
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/trix/Makefile.am
@@ -0,0 +1,23 @@
+noinst_LIBRARIES = libtrix.a
+
+libtrix_a_SOURCES = Trix.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libtrix.dsp
+
+libtrix.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libtrix_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/trix/Trix.cpp b/storage/ndb/src/kernel/blocks/trix/Trix.cpp
index cd11cb4d575..cd11cb4d575 100644
--- a/ndb/src/kernel/blocks/trix/Trix.cpp
+++ b/storage/ndb/src/kernel/blocks/trix/Trix.cpp
diff --git a/ndb/src/kernel/blocks/trix/Trix.hpp b/storage/ndb/src/kernel/blocks/trix/Trix.hpp
index 8dc01375fa1..8dc01375fa1 100644
--- a/ndb/src/kernel/blocks/trix/Trix.hpp
+++ b/storage/ndb/src/kernel/blocks/trix/Trix.hpp
diff --git a/ndb/src/kernel/error/Error.hpp b/storage/ndb/src/kernel/error/Error.hpp
index e19d6782793..e19d6782793 100644
--- a/ndb/src/kernel/error/Error.hpp
+++ b/storage/ndb/src/kernel/error/Error.hpp
diff --git a/ndb/src/kernel/error/ErrorHandlingMacros.hpp b/storage/ndb/src/kernel/error/ErrorHandlingMacros.hpp
index d8bb7ff759b..d8bb7ff759b 100644
--- a/ndb/src/kernel/error/ErrorHandlingMacros.hpp
+++ b/storage/ndb/src/kernel/error/ErrorHandlingMacros.hpp
diff --git a/ndb/src/kernel/error/ErrorMessages.cpp b/storage/ndb/src/kernel/error/ErrorMessages.cpp
index 059aa4af61c..059aa4af61c 100644
--- a/ndb/src/kernel/error/ErrorMessages.cpp
+++ b/storage/ndb/src/kernel/error/ErrorMessages.cpp
diff --git a/ndb/src/kernel/error/ErrorMessages.hpp b/storage/ndb/src/kernel/error/ErrorMessages.hpp
index 38c8eec636b..38c8eec636b 100644
--- a/ndb/src/kernel/error/ErrorMessages.hpp
+++ b/storage/ndb/src/kernel/error/ErrorMessages.hpp
diff --git a/ndb/src/kernel/error/ErrorReporter.cpp b/storage/ndb/src/kernel/error/ErrorReporter.cpp
index e4ead4ce34d..e4ead4ce34d 100644
--- a/ndb/src/kernel/error/ErrorReporter.cpp
+++ b/storage/ndb/src/kernel/error/ErrorReporter.cpp
diff --git a/ndb/src/kernel/error/ErrorReporter.hpp b/storage/ndb/src/kernel/error/ErrorReporter.hpp
index 2c79f242eea..2c79f242eea 100644
--- a/ndb/src/kernel/error/ErrorReporter.hpp
+++ b/storage/ndb/src/kernel/error/ErrorReporter.hpp
diff --git a/storage/ndb/src/kernel/error/Makefile.am b/storage/ndb/src/kernel/error/Makefile.am
new file mode 100644
index 00000000000..b6e3b7962fa
--- /dev/null
+++ b/storage/ndb/src/kernel/error/Makefile.am
@@ -0,0 +1,25 @@
+noinst_LIBRARIES = liberror.a
+
+liberror_a_SOURCES = TimeModule.cpp \
+ ErrorReporter.cpp \
+ ErrorMessages.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: liberror.dsp
+
+liberror.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(liberror_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/error/TimeModule.cpp b/storage/ndb/src/kernel/error/TimeModule.cpp
index 4bd8e3daf99..4bd8e3daf99 100644
--- a/ndb/src/kernel/error/TimeModule.cpp
+++ b/storage/ndb/src/kernel/error/TimeModule.cpp
diff --git a/ndb/src/kernel/error/TimeModule.hpp b/storage/ndb/src/kernel/error/TimeModule.hpp
index f1414c77af3..f1414c77af3 100644
--- a/ndb/src/kernel/error/TimeModule.hpp
+++ b/storage/ndb/src/kernel/error/TimeModule.hpp
diff --git a/ndb/src/kernel/main.cpp b/storage/ndb/src/kernel/main.cpp
index f679646e14a..f679646e14a 100644
--- a/ndb/src/kernel/main.cpp
+++ b/storage/ndb/src/kernel/main.cpp
diff --git a/ndb/src/kernel/vm/Array.hpp b/storage/ndb/src/kernel/vm/Array.hpp
index 97b0a345cb4..97b0a345cb4 100644
--- a/ndb/src/kernel/vm/Array.hpp
+++ b/storage/ndb/src/kernel/vm/Array.hpp
diff --git a/ndb/src/kernel/vm/ArrayFifoList.hpp b/storage/ndb/src/kernel/vm/ArrayFifoList.hpp
index b21bf449734..b21bf449734 100644
--- a/ndb/src/kernel/vm/ArrayFifoList.hpp
+++ b/storage/ndb/src/kernel/vm/ArrayFifoList.hpp
diff --git a/ndb/src/kernel/vm/ArrayList.hpp b/storage/ndb/src/kernel/vm/ArrayList.hpp
index 4b46347a39b..4b46347a39b 100644
--- a/ndb/src/kernel/vm/ArrayList.hpp
+++ b/storage/ndb/src/kernel/vm/ArrayList.hpp
diff --git a/ndb/src/kernel/vm/ArrayPool.hpp b/storage/ndb/src/kernel/vm/ArrayPool.hpp
index 924ed51ee15..924ed51ee15 100644
--- a/ndb/src/kernel/vm/ArrayPool.hpp
+++ b/storage/ndb/src/kernel/vm/ArrayPool.hpp
diff --git a/ndb/src/kernel/vm/CArray.hpp b/storage/ndb/src/kernel/vm/CArray.hpp
index a6e84e2c041..a6e84e2c041 100644
--- a/ndb/src/kernel/vm/CArray.hpp
+++ b/storage/ndb/src/kernel/vm/CArray.hpp
diff --git a/ndb/src/kernel/vm/Callback.hpp b/storage/ndb/src/kernel/vm/Callback.hpp
index 6a619ba7859..6a619ba7859 100644
--- a/ndb/src/kernel/vm/Callback.hpp
+++ b/storage/ndb/src/kernel/vm/Callback.hpp
diff --git a/ndb/src/kernel/vm/ClusterConfiguration.cpp b/storage/ndb/src/kernel/vm/ClusterConfiguration.cpp
index d5bd03f69d5..d5bd03f69d5 100644
--- a/ndb/src/kernel/vm/ClusterConfiguration.cpp
+++ b/storage/ndb/src/kernel/vm/ClusterConfiguration.cpp
diff --git a/ndb/src/kernel/vm/ClusterConfiguration.hpp b/storage/ndb/src/kernel/vm/ClusterConfiguration.hpp
index cc7000a54ef..cc7000a54ef 100644
--- a/ndb/src/kernel/vm/ClusterConfiguration.hpp
+++ b/storage/ndb/src/kernel/vm/ClusterConfiguration.hpp
diff --git a/ndb/src/kernel/vm/Configuration.cpp b/storage/ndb/src/kernel/vm/Configuration.cpp
index 650d914035f..650d914035f 100644
--- a/ndb/src/kernel/vm/Configuration.cpp
+++ b/storage/ndb/src/kernel/vm/Configuration.cpp
diff --git a/ndb/src/kernel/vm/Configuration.hpp b/storage/ndb/src/kernel/vm/Configuration.hpp
index 6ca6d9a1f17..6ca6d9a1f17 100644
--- a/ndb/src/kernel/vm/Configuration.hpp
+++ b/storage/ndb/src/kernel/vm/Configuration.hpp
diff --git a/ndb/src/kernel/vm/DLFifoList.hpp b/storage/ndb/src/kernel/vm/DLFifoList.hpp
index b139ade831d..b139ade831d 100644
--- a/ndb/src/kernel/vm/DLFifoList.hpp
+++ b/storage/ndb/src/kernel/vm/DLFifoList.hpp
diff --git a/ndb/src/kernel/vm/DLHashTable.hpp b/storage/ndb/src/kernel/vm/DLHashTable.hpp
index 13a9632f8da..13a9632f8da 100644
--- a/ndb/src/kernel/vm/DLHashTable.hpp
+++ b/storage/ndb/src/kernel/vm/DLHashTable.hpp
diff --git a/ndb/src/kernel/vm/DLHashTable2.hpp b/storage/ndb/src/kernel/vm/DLHashTable2.hpp
index 6b166331631..6b166331631 100644
--- a/ndb/src/kernel/vm/DLHashTable2.hpp
+++ b/storage/ndb/src/kernel/vm/DLHashTable2.hpp
diff --git a/ndb/src/kernel/vm/DLList.hpp b/storage/ndb/src/kernel/vm/DLList.hpp
index b7820eb9229..b7820eb9229 100644
--- a/ndb/src/kernel/vm/DLList.hpp
+++ b/storage/ndb/src/kernel/vm/DLList.hpp
diff --git a/ndb/src/kernel/vm/DataBuffer.hpp b/storage/ndb/src/kernel/vm/DataBuffer.hpp
index 7f553898eb5..7f553898eb5 100644
--- a/ndb/src/kernel/vm/DataBuffer.hpp
+++ b/storage/ndb/src/kernel/vm/DataBuffer.hpp
diff --git a/ndb/src/kernel/vm/Emulator.cpp b/storage/ndb/src/kernel/vm/Emulator.cpp
index d6ed6c0dafd..d6ed6c0dafd 100644
--- a/ndb/src/kernel/vm/Emulator.cpp
+++ b/storage/ndb/src/kernel/vm/Emulator.cpp
diff --git a/ndb/src/kernel/vm/Emulator.hpp b/storage/ndb/src/kernel/vm/Emulator.hpp
index dba8cb3ab9b..dba8cb3ab9b 100644
--- a/ndb/src/kernel/vm/Emulator.hpp
+++ b/storage/ndb/src/kernel/vm/Emulator.hpp
diff --git a/ndb/src/kernel/vm/FastScheduler.cpp b/storage/ndb/src/kernel/vm/FastScheduler.cpp
index a2d806571fe..a2d806571fe 100644
--- a/ndb/src/kernel/vm/FastScheduler.cpp
+++ b/storage/ndb/src/kernel/vm/FastScheduler.cpp
diff --git a/ndb/src/kernel/vm/FastScheduler.hpp b/storage/ndb/src/kernel/vm/FastScheduler.hpp
index dc707e47eef..dc707e47eef 100644
--- a/ndb/src/kernel/vm/FastScheduler.hpp
+++ b/storage/ndb/src/kernel/vm/FastScheduler.hpp
diff --git a/ndb/src/kernel/vm/GlobalData.hpp b/storage/ndb/src/kernel/vm/GlobalData.hpp
index 99b65727374..99b65727374 100644
--- a/ndb/src/kernel/vm/GlobalData.hpp
+++ b/storage/ndb/src/kernel/vm/GlobalData.hpp
diff --git a/ndb/src/kernel/vm/KeyTable.hpp b/storage/ndb/src/kernel/vm/KeyTable.hpp
index e78837b5c8a..e78837b5c8a 100644
--- a/ndb/src/kernel/vm/KeyTable.hpp
+++ b/storage/ndb/src/kernel/vm/KeyTable.hpp
diff --git a/ndb/src/kernel/vm/KeyTable2.hpp b/storage/ndb/src/kernel/vm/KeyTable2.hpp
index 5c2b3096abe..5c2b3096abe 100644
--- a/ndb/src/kernel/vm/KeyTable2.hpp
+++ b/storage/ndb/src/kernel/vm/KeyTable2.hpp
diff --git a/ndb/src/kernel/vm/LongSignal.hpp b/storage/ndb/src/kernel/vm/LongSignal.hpp
index 9818358011f..9818358011f 100644
--- a/ndb/src/kernel/vm/LongSignal.hpp
+++ b/storage/ndb/src/kernel/vm/LongSignal.hpp
diff --git a/storage/ndb/src/kernel/vm/Makefile.am b/storage/ndb/src/kernel/vm/Makefile.am
new file mode 100644
index 00000000000..fc12a3371fd
--- /dev/null
+++ b/storage/ndb/src/kernel/vm/Makefile.am
@@ -0,0 +1,44 @@
+#SUBDIRS = testCopy testDataBuffer testSimplePropertiesSection
+#ifneq ($(USE_EDITLINE), N)
+#DIRS += testLongSig
+#endif
+
+noinst_LIBRARIES = libkernel.a
+
+libkernel_a_SOURCES = \
+ SimulatedBlock.cpp \
+ FastScheduler.cpp \
+ TimeQueue.cpp \
+ VMSignal.cpp \
+ ThreadConfig.cpp \
+ TransporterCallback.cpp \
+ Emulator.cpp \
+ Configuration.cpp \
+ WatchDog.cpp \
+ SimplePropertiesSection.cpp \
+ SectionReader.cpp \
+ MetaData.cpp \
+ Mutex.cpp SafeCounter.cpp \
+ SuperPool.cpp
+
+INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/mgmapi
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libkernel.dsp
+
+libkernel.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libkernel_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/vm/MetaData.cpp b/storage/ndb/src/kernel/vm/MetaData.cpp
index 51afbf21503..51afbf21503 100644
--- a/ndb/src/kernel/vm/MetaData.cpp
+++ b/storage/ndb/src/kernel/vm/MetaData.cpp
diff --git a/ndb/src/kernel/vm/MetaData.hpp b/storage/ndb/src/kernel/vm/MetaData.hpp
index 1000114a421..1000114a421 100644
--- a/ndb/src/kernel/vm/MetaData.hpp
+++ b/storage/ndb/src/kernel/vm/MetaData.hpp
diff --git a/ndb/src/kernel/vm/Mutex.cpp b/storage/ndb/src/kernel/vm/Mutex.cpp
index aab9e74312b..aab9e74312b 100644
--- a/ndb/src/kernel/vm/Mutex.cpp
+++ b/storage/ndb/src/kernel/vm/Mutex.cpp
diff --git a/ndb/src/kernel/vm/Mutex.hpp b/storage/ndb/src/kernel/vm/Mutex.hpp
index 7a16046188c..7a16046188c 100644
--- a/ndb/src/kernel/vm/Mutex.hpp
+++ b/storage/ndb/src/kernel/vm/Mutex.hpp
diff --git a/ndb/src/kernel/vm/Prio.hpp b/storage/ndb/src/kernel/vm/Prio.hpp
index 4c9c22b0afe..4c9c22b0afe 100644
--- a/ndb/src/kernel/vm/Prio.hpp
+++ b/storage/ndb/src/kernel/vm/Prio.hpp
diff --git a/ndb/src/kernel/vm/RequestTracker.hpp b/storage/ndb/src/kernel/vm/RequestTracker.hpp
index 5fd1ae7255a..5fd1ae7255a 100644
--- a/ndb/src/kernel/vm/RequestTracker.hpp
+++ b/storage/ndb/src/kernel/vm/RequestTracker.hpp
diff --git a/ndb/src/kernel/vm/SLList.hpp b/storage/ndb/src/kernel/vm/SLList.hpp
index 5fde41aa3e0..5fde41aa3e0 100644
--- a/ndb/src/kernel/vm/SLList.hpp
+++ b/storage/ndb/src/kernel/vm/SLList.hpp
diff --git a/ndb/src/kernel/vm/SafeCounter.cpp b/storage/ndb/src/kernel/vm/SafeCounter.cpp
index b09ad08b026..b09ad08b026 100644
--- a/ndb/src/kernel/vm/SafeCounter.cpp
+++ b/storage/ndb/src/kernel/vm/SafeCounter.cpp
diff --git a/ndb/src/kernel/vm/SafeCounter.hpp b/storage/ndb/src/kernel/vm/SafeCounter.hpp
index 1f3cc15c2d6..1f3cc15c2d6 100644
--- a/ndb/src/kernel/vm/SafeCounter.hpp
+++ b/storage/ndb/src/kernel/vm/SafeCounter.hpp
diff --git a/ndb/src/kernel/vm/SectionReader.cpp b/storage/ndb/src/kernel/vm/SectionReader.cpp
index dd474a49e50..dd474a49e50 100644
--- a/ndb/src/kernel/vm/SectionReader.cpp
+++ b/storage/ndb/src/kernel/vm/SectionReader.cpp
diff --git a/ndb/src/kernel/vm/SectionReader.hpp b/storage/ndb/src/kernel/vm/SectionReader.hpp
index b51006b6128..b51006b6128 100644
--- a/ndb/src/kernel/vm/SectionReader.hpp
+++ b/storage/ndb/src/kernel/vm/SectionReader.hpp
diff --git a/ndb/src/kernel/vm/SignalCounter.hpp b/storage/ndb/src/kernel/vm/SignalCounter.hpp
index 62242cb65bd..62242cb65bd 100644
--- a/ndb/src/kernel/vm/SignalCounter.hpp
+++ b/storage/ndb/src/kernel/vm/SignalCounter.hpp
diff --git a/ndb/src/kernel/vm/SimBlockList.hpp b/storage/ndb/src/kernel/vm/SimBlockList.hpp
index 40485a37425..40485a37425 100644
--- a/ndb/src/kernel/vm/SimBlockList.hpp
+++ b/storage/ndb/src/kernel/vm/SimBlockList.hpp
diff --git a/ndb/src/kernel/vm/SimplePropertiesSection.cpp b/storage/ndb/src/kernel/vm/SimplePropertiesSection.cpp
index 070563be36b..070563be36b 100644
--- a/ndb/src/kernel/vm/SimplePropertiesSection.cpp
+++ b/storage/ndb/src/kernel/vm/SimplePropertiesSection.cpp
diff --git a/ndb/src/kernel/vm/SimulatedBlock.cpp b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
index 35c0781a24d..35c0781a24d 100644
--- a/ndb/src/kernel/vm/SimulatedBlock.cpp
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
diff --git a/ndb/src/kernel/vm/SimulatedBlock.hpp b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
index 787d14ca5cb..787d14ca5cb 100644
--- a/ndb/src/kernel/vm/SimulatedBlock.hpp
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
diff --git a/ndb/src/kernel/vm/SuperPool.cpp b/storage/ndb/src/kernel/vm/SuperPool.cpp
index 65e5dd99629..65e5dd99629 100644
--- a/ndb/src/kernel/vm/SuperPool.cpp
+++ b/storage/ndb/src/kernel/vm/SuperPool.cpp
diff --git a/ndb/src/kernel/vm/SuperPool.hpp b/storage/ndb/src/kernel/vm/SuperPool.hpp
index 157c75aa0d5..157c75aa0d5 100644
--- a/ndb/src/kernel/vm/SuperPool.hpp
+++ b/storage/ndb/src/kernel/vm/SuperPool.hpp
diff --git a/ndb/src/kernel/vm/ThreadConfig.cpp b/storage/ndb/src/kernel/vm/ThreadConfig.cpp
index 76fcc4ba84f..76fcc4ba84f 100644
--- a/ndb/src/kernel/vm/ThreadConfig.cpp
+++ b/storage/ndb/src/kernel/vm/ThreadConfig.cpp
diff --git a/ndb/src/kernel/vm/ThreadConfig.hpp b/storage/ndb/src/kernel/vm/ThreadConfig.hpp
index 91c2cafe0e0..91c2cafe0e0 100644
--- a/ndb/src/kernel/vm/ThreadConfig.hpp
+++ b/storage/ndb/src/kernel/vm/ThreadConfig.hpp
diff --git a/ndb/src/kernel/vm/TimeQueue.cpp b/storage/ndb/src/kernel/vm/TimeQueue.cpp
index 56988c2e3da..56988c2e3da 100644
--- a/ndb/src/kernel/vm/TimeQueue.cpp
+++ b/storage/ndb/src/kernel/vm/TimeQueue.cpp
diff --git a/ndb/src/kernel/vm/TimeQueue.hpp b/storage/ndb/src/kernel/vm/TimeQueue.hpp
index 1203ace10f5..1203ace10f5 100644
--- a/ndb/src/kernel/vm/TimeQueue.hpp
+++ b/storage/ndb/src/kernel/vm/TimeQueue.hpp
diff --git a/ndb/src/kernel/vm/TransporterCallback.cpp b/storage/ndb/src/kernel/vm/TransporterCallback.cpp
index 0f292143c21..0f292143c21 100644
--- a/ndb/src/kernel/vm/TransporterCallback.cpp
+++ b/storage/ndb/src/kernel/vm/TransporterCallback.cpp
diff --git a/ndb/src/kernel/vm/VMSignal.cpp b/storage/ndb/src/kernel/vm/VMSignal.cpp
index e4eafb47ff7..e4eafb47ff7 100644
--- a/ndb/src/kernel/vm/VMSignal.cpp
+++ b/storage/ndb/src/kernel/vm/VMSignal.cpp
diff --git a/ndb/src/kernel/vm/VMSignal.hpp b/storage/ndb/src/kernel/vm/VMSignal.hpp
index 45543c5d174..45543c5d174 100644
--- a/ndb/src/kernel/vm/VMSignal.hpp
+++ b/storage/ndb/src/kernel/vm/VMSignal.hpp
diff --git a/ndb/src/kernel/vm/WaitQueue.hpp b/storage/ndb/src/kernel/vm/WaitQueue.hpp
index 4d7240b6866..4d7240b6866 100644
--- a/ndb/src/kernel/vm/WaitQueue.hpp
+++ b/storage/ndb/src/kernel/vm/WaitQueue.hpp
diff --git a/ndb/src/kernel/vm/WatchDog.cpp b/storage/ndb/src/kernel/vm/WatchDog.cpp
index 23475a478d3..23475a478d3 100644
--- a/ndb/src/kernel/vm/WatchDog.cpp
+++ b/storage/ndb/src/kernel/vm/WatchDog.cpp
diff --git a/ndb/src/kernel/vm/WatchDog.hpp b/storage/ndb/src/kernel/vm/WatchDog.hpp
index 4b44b1a96a2..4b44b1a96a2 100644
--- a/ndb/src/kernel/vm/WatchDog.hpp
+++ b/storage/ndb/src/kernel/vm/WatchDog.hpp
diff --git a/ndb/src/kernel/vm/al_test/Makefile b/storage/ndb/src/kernel/vm/al_test/Makefile
index a7287a341fd..a7287a341fd 100644
--- a/ndb/src/kernel/vm/al_test/Makefile
+++ b/storage/ndb/src/kernel/vm/al_test/Makefile
diff --git a/ndb/src/kernel/vm/al_test/arrayListTest.cpp b/storage/ndb/src/kernel/vm/al_test/arrayListTest.cpp
index bb320106653..bb320106653 100644
--- a/ndb/src/kernel/vm/al_test/arrayListTest.cpp
+++ b/storage/ndb/src/kernel/vm/al_test/arrayListTest.cpp
diff --git a/ndb/src/kernel/vm/al_test/arrayPoolTest.cpp b/storage/ndb/src/kernel/vm/al_test/arrayPoolTest.cpp
index e80905121e1..e80905121e1 100644
--- a/ndb/src/kernel/vm/al_test/arrayPoolTest.cpp
+++ b/storage/ndb/src/kernel/vm/al_test/arrayPoolTest.cpp
diff --git a/ndb/src/kernel/vm/al_test/main.cpp b/storage/ndb/src/kernel/vm/al_test/main.cpp
index 23193b50725..23193b50725 100644
--- a/ndb/src/kernel/vm/al_test/main.cpp
+++ b/storage/ndb/src/kernel/vm/al_test/main.cpp
diff --git a/ndb/src/kernel/vm/pc.hpp b/storage/ndb/src/kernel/vm/pc.hpp
index 2d745d26b1c..2d745d26b1c 100644
--- a/ndb/src/kernel/vm/pc.hpp
+++ b/storage/ndb/src/kernel/vm/pc.hpp
diff --git a/ndb/src/kernel/vm/testCopy/Makefile b/storage/ndb/src/kernel/vm/testCopy/Makefile
index 5abd93eb74f..5abd93eb74f 100644
--- a/ndb/src/kernel/vm/testCopy/Makefile
+++ b/storage/ndb/src/kernel/vm/testCopy/Makefile
diff --git a/ndb/src/kernel/vm/testCopy/rr.cpp b/storage/ndb/src/kernel/vm/testCopy/rr.cpp
index 1e8305dfe4c..1e8305dfe4c 100644
--- a/ndb/src/kernel/vm/testCopy/rr.cpp
+++ b/storage/ndb/src/kernel/vm/testCopy/rr.cpp
diff --git a/ndb/src/kernel/vm/testCopy/testCopy.cpp b/storage/ndb/src/kernel/vm/testCopy/testCopy.cpp
index 78a1dab2619..78a1dab2619 100644
--- a/ndb/src/kernel/vm/testCopy/testCopy.cpp
+++ b/storage/ndb/src/kernel/vm/testCopy/testCopy.cpp
diff --git a/ndb/src/kernel/vm/testDataBuffer/Makefile b/storage/ndb/src/kernel/vm/testDataBuffer/Makefile
index 693989dfe3c..693989dfe3c 100644
--- a/ndb/src/kernel/vm/testDataBuffer/Makefile
+++ b/storage/ndb/src/kernel/vm/testDataBuffer/Makefile
diff --git a/ndb/src/kernel/vm/testDataBuffer/testDataBuffer.cpp b/storage/ndb/src/kernel/vm/testDataBuffer/testDataBuffer.cpp
index 5ba59418223..5ba59418223 100644
--- a/ndb/src/kernel/vm/testDataBuffer/testDataBuffer.cpp
+++ b/storage/ndb/src/kernel/vm/testDataBuffer/testDataBuffer.cpp
diff --git a/ndb/src/kernel/vm/testLongSig/Makefile b/storage/ndb/src/kernel/vm/testLongSig/Makefile
index ecf33dca109..ecf33dca109 100644
--- a/ndb/src/kernel/vm/testLongSig/Makefile
+++ b/storage/ndb/src/kernel/vm/testLongSig/Makefile
diff --git a/ndb/src/kernel/vm/testLongSig/testLongSig.cpp b/storage/ndb/src/kernel/vm/testLongSig/testLongSig.cpp
index 1d1fb8ebc82..1d1fb8ebc82 100644
--- a/ndb/src/kernel/vm/testLongSig/testLongSig.cpp
+++ b/storage/ndb/src/kernel/vm/testLongSig/testLongSig.cpp
diff --git a/ndb/src/kernel/vm/testSimplePropertiesSection/Makefile b/storage/ndb/src/kernel/vm/testSimplePropertiesSection/Makefile
index fb3aea00507..fb3aea00507 100644
--- a/ndb/src/kernel/vm/testSimplePropertiesSection/Makefile
+++ b/storage/ndb/src/kernel/vm/testSimplePropertiesSection/Makefile
diff --git a/ndb/src/kernel/vm/testSimplePropertiesSection/test.cpp b/storage/ndb/src/kernel/vm/testSimplePropertiesSection/test.cpp
index e16870edf11..e16870edf11 100644
--- a/ndb/src/kernel/vm/testSimplePropertiesSection/test.cpp
+++ b/storage/ndb/src/kernel/vm/testSimplePropertiesSection/test.cpp
diff --git a/ndb/src/kernel/vm/testSuperPool.cpp b/storage/ndb/src/kernel/vm/testSuperPool.cpp
index 194b3a43fa0..194b3a43fa0 100644
--- a/ndb/src/kernel/vm/testSuperPool.cpp
+++ b/storage/ndb/src/kernel/vm/testSuperPool.cpp
diff --git a/ndb/src/mgmapi/LocalConfig.cpp b/storage/ndb/src/mgmapi/LocalConfig.cpp
index 75ad8b40a1f..75ad8b40a1f 100644
--- a/ndb/src/mgmapi/LocalConfig.cpp
+++ b/storage/ndb/src/mgmapi/LocalConfig.cpp
diff --git a/ndb/src/mgmapi/LocalConfig.hpp b/storage/ndb/src/mgmapi/LocalConfig.hpp
index c415ec1be91..c415ec1be91 100644
--- a/ndb/src/mgmapi/LocalConfig.hpp
+++ b/storage/ndb/src/mgmapi/LocalConfig.hpp
diff --git a/storage/ndb/src/mgmapi/Makefile.am b/storage/ndb/src/mgmapi/Makefile.am
new file mode 100644
index 00000000000..21966fd69de
--- /dev/null
+++ b/storage/ndb/src/mgmapi/Makefile.am
@@ -0,0 +1,30 @@
+
+noinst_LTLIBRARIES = libmgmapi.la
+
+libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp
+
+INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/include/mgmapi
+
+DEFS_LOC = -DNO_DEBUG_MESSAGES -DNDB_PORT="\"@ndb_port@\""
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_util.mk.am
+
+#ndbtest_PROGRAMS = ndb_test_mgmapi
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libmgmapi.dsp
+
+libmgmapi.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libmgmapi_la_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/mgmapi/mgmapi.cpp b/storage/ndb/src/mgmapi/mgmapi.cpp
index 5808e2ed534..5808e2ed534 100644
--- a/ndb/src/mgmapi/mgmapi.cpp
+++ b/storage/ndb/src/mgmapi/mgmapi.cpp
diff --git a/ndb/src/mgmapi/mgmapi_configuration.cpp b/storage/ndb/src/mgmapi/mgmapi_configuration.cpp
index 80ab428c05a..80ab428c05a 100644
--- a/ndb/src/mgmapi/mgmapi_configuration.cpp
+++ b/storage/ndb/src/mgmapi/mgmapi_configuration.cpp
diff --git a/ndb/src/mgmapi/mgmapi_configuration.hpp b/storage/ndb/src/mgmapi/mgmapi_configuration.hpp
index 7d60a4842a1..7d60a4842a1 100644
--- a/ndb/src/mgmapi/mgmapi_configuration.hpp
+++ b/storage/ndb/src/mgmapi/mgmapi_configuration.hpp
diff --git a/ndb/src/mgmapi/mgmapi_internal.h b/storage/ndb/src/mgmapi/mgmapi_internal.h
index 90f93129f2a..90f93129f2a 100644
--- a/ndb/src/mgmapi/mgmapi_internal.h
+++ b/storage/ndb/src/mgmapi/mgmapi_internal.h
diff --git a/ndb/src/mgmapi/ndb_logevent.cpp b/storage/ndb/src/mgmapi/ndb_logevent.cpp
index 27e7c1f36f5..27e7c1f36f5 100644
--- a/ndb/src/mgmapi/ndb_logevent.cpp
+++ b/storage/ndb/src/mgmapi/ndb_logevent.cpp
diff --git a/ndb/src/mgmapi/ndb_logevent.hpp b/storage/ndb/src/mgmapi/ndb_logevent.hpp
index cb1a0e388e5..cb1a0e388e5 100644
--- a/ndb/src/mgmapi/ndb_logevent.hpp
+++ b/storage/ndb/src/mgmapi/ndb_logevent.hpp
diff --git a/ndb/src/mgmapi/test/Makefile b/storage/ndb/src/mgmapi/test/Makefile
index c6d3efa6fcc..c6d3efa6fcc 100644
--- a/ndb/src/mgmapi/test/Makefile
+++ b/storage/ndb/src/mgmapi/test/Makefile
diff --git a/ndb/src/mgmapi/test/keso.c b/storage/ndb/src/mgmapi/test/keso.c
index d2675b2ca8a..d2675b2ca8a 100644
--- a/ndb/src/mgmapi/test/keso.c
+++ b/storage/ndb/src/mgmapi/test/keso.c
diff --git a/ndb/src/mgmapi/test/mgmSrvApi.cpp b/storage/ndb/src/mgmapi/test/mgmSrvApi.cpp
index 4a8e38c9ba5..4a8e38c9ba5 100644
--- a/ndb/src/mgmapi/test/mgmSrvApi.cpp
+++ b/storage/ndb/src/mgmapi/test/mgmSrvApi.cpp
diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/storage/ndb/src/mgmclient/CommandInterpreter.cpp
index 72f6e7869b2..72f6e7869b2 100644
--- a/ndb/src/mgmclient/CommandInterpreter.cpp
+++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp
diff --git a/storage/ndb/src/mgmclient/Makefile.am b/storage/ndb/src/mgmclient/Makefile.am
new file mode 100644
index 00000000000..217f3e993a7
--- /dev/null
+++ b/storage/ndb/src/mgmclient/Makefile.am
@@ -0,0 +1,58 @@
+
+noinst_LTLIBRARIES = libndbmgmclient.la
+ndbtools_PROGRAMS = ndb_mgm
+
+libndbmgmclient_la_SOURCES = CommandInterpreter.cpp
+libndbmgmclient_la_LIBADD = ../mgmapi/libmgmapi.la \
+ ../common/logger/liblogger.la \
+ ../common/portlib/libportlib.la \
+ ../common/util/libgeneral.la \
+ ../common/portlib/libportlib.la
+
+
+ndb_mgm_SOURCES = main.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am
+
+INCLUDES += -I$(top_srcdir)/storage/ndb/include/mgmapi \
+ -I$(top_srcdir)/storage/ndb/src/common/mgmcommon
+
+LDADD_LOC = $(noinst_LTLIBRARIES) \
+ ../common/portlib/libportlib.la \
+ @readline_link@ \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a \
+ @TERMCAP_LIB@ @NDB_SCI_LIBS@
+
+ndb_mgm_LDFLAGS = @ndb_bin_am_ldflags@
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: ndb_mgm.dsp libndbmgmclient.dsp
+
+ndb_mgm.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(ndbtools_PROGRAMS)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(ndb_mgm_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
+
+libndbmgmclient.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libndbmgmclient_la_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB
diff --git a/ndb/src/mgmclient/main.cpp b/storage/ndb/src/mgmclient/main.cpp
index ba5d0308f1f..ba5d0308f1f 100644
--- a/ndb/src/mgmclient/main.cpp
+++ b/storage/ndb/src/mgmclient/main.cpp
diff --git a/ndb/src/mgmclient/ndb_mgmclient.h b/storage/ndb/src/mgmclient/ndb_mgmclient.h
index b62a33999a3..b62a33999a3 100644
--- a/ndb/src/mgmclient/ndb_mgmclient.h
+++ b/storage/ndb/src/mgmclient/ndb_mgmclient.h
diff --git a/ndb/src/mgmclient/ndb_mgmclient.hpp b/storage/ndb/src/mgmclient/ndb_mgmclient.hpp
index bffdf69f920..bffdf69f920 100644
--- a/ndb/src/mgmclient/ndb_mgmclient.hpp
+++ b/storage/ndb/src/mgmclient/ndb_mgmclient.hpp
diff --git a/ndb/src/mgmclient/test_cpcd/Makefile b/storage/ndb/src/mgmclient/test_cpcd/Makefile
index 4ced10cfc59..4ced10cfc59 100644
--- a/ndb/src/mgmclient/test_cpcd/Makefile
+++ b/storage/ndb/src/mgmclient/test_cpcd/Makefile
diff --git a/ndb/src/mgmclient/test_cpcd/test_cpcd.cpp b/storage/ndb/src/mgmclient/test_cpcd/test_cpcd.cpp
index 32f0adbcf26..32f0adbcf26 100644
--- a/ndb/src/mgmclient/test_cpcd/test_cpcd.cpp
+++ b/storage/ndb/src/mgmclient/test_cpcd/test_cpcd.cpp
diff --git a/ndb/src/mgmsrv/Config.cpp b/storage/ndb/src/mgmsrv/Config.cpp
index 5ff9cbe04ad..5ff9cbe04ad 100644
--- a/ndb/src/mgmsrv/Config.cpp
+++ b/storage/ndb/src/mgmsrv/Config.cpp
diff --git a/ndb/src/mgmsrv/Config.hpp b/storage/ndb/src/mgmsrv/Config.hpp
index b5e1e17b027..b5e1e17b027 100644
--- a/ndb/src/mgmsrv/Config.hpp
+++ b/storage/ndb/src/mgmsrv/Config.hpp
diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp
index 34a2d8c1302..34a2d8c1302 100644
--- a/ndb/src/mgmsrv/ConfigInfo.cpp
+++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp
diff --git a/ndb/src/mgmsrv/ConfigInfo.hpp b/storage/ndb/src/mgmsrv/ConfigInfo.hpp
index 871ee62040e..871ee62040e 100644
--- a/ndb/src/mgmsrv/ConfigInfo.hpp
+++ b/storage/ndb/src/mgmsrv/ConfigInfo.hpp
diff --git a/ndb/src/mgmsrv/InitConfigFileParser.cpp b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
index 822e10c89aa..822e10c89aa 100644
--- a/ndb/src/mgmsrv/InitConfigFileParser.cpp
+++ b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
diff --git a/ndb/src/mgmsrv/InitConfigFileParser.hpp b/storage/ndb/src/mgmsrv/InitConfigFileParser.hpp
index 1ea0a094ccd..1ea0a094ccd 100644
--- a/ndb/src/mgmsrv/InitConfigFileParser.hpp
+++ b/storage/ndb/src/mgmsrv/InitConfigFileParser.hpp
diff --git a/storage/ndb/src/mgmsrv/Makefile.am b/storage/ndb/src/mgmsrv/Makefile.am
new file mode 100644
index 00000000000..3ee39767834
--- /dev/null
+++ b/storage/ndb/src/mgmsrv/Makefile.am
@@ -0,0 +1,60 @@
+MYSQLDATAdir = $(localstatedir)
+MYSQLSHAREdir = $(pkgdatadir)
+MYSQLBASEdir= $(prefix)
+#MYSQLCLUSTERdir= $(prefix)/mysql-cluster
+MYSQLCLUSTERdir= .
+
+ndbbin_PROGRAMS = ndb_mgmd
+
+ndb_mgmd_SOURCES = \
+ MgmtSrvr.cpp \
+ MgmtSrvrGeneralSignalHandling.cpp \
+ main.cpp \
+ Services.cpp \
+ convertStrToInt.cpp \
+ SignalQueue.cpp \
+ MgmtSrvrConfig.cpp \
+ ConfigInfo.cpp \
+ InitConfigFileParser.cpp \
+ Config.cpp
+
+INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/ndbapi \
+ -I$(top_srcdir)/storage/ndb/src/mgmapi \
+ -I$(top_srcdir)/storage/ndb/src/common/mgmcommon \
+ -I$(top_srcdir)/storage/ndb/src/mgmclient
+
+LDADD_LOC = $(top_srcdir)/storage/ndb/src/mgmclient/CommandInterpreter.o \
+ $(top_builddir)/storage/ndb/src/libndbclient.la \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a \
+ @readline_link@ \
+ @NDB_SCI_LIBS@ \
+ @TERMCAP_LIB@
+
+DEFS_LOC = -DDEFAULT_MYSQL_HOME="\"$(MYSQLBASEdir)\"" \
+ -DDATADIR="\"$(MYSQLDATAdir)\"" \
+ -DSHAREDIR="\"$(MYSQLSHAREdir)\"" \
+ -DMYSQLCLUSTERDIR="\"$(MYSQLCLUSTERdir)\""
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am
+
+ndb_mgmd_LDFLAGS = @ndb_bin_am_ldflags@
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: ndb_mgmd.dsp
+
+ndb_mgmd.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(ndbbin_PROGRAMS)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(ndb_mgmd_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
index 51356cb75b1..51356cb75b1 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
index b7983e6b441..b7983e6b441 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
diff --git a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp b/storage/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
index 6c4b4e9ae3c..6c4b4e9ae3c 100644
--- a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
diff --git a/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp b/storage/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp
index f93948abc75..f93948abc75 100644
--- a/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp
diff --git a/ndb/src/mgmsrv/Services.cpp b/storage/ndb/src/mgmsrv/Services.cpp
index 270d7f716dd..270d7f716dd 100644
--- a/ndb/src/mgmsrv/Services.cpp
+++ b/storage/ndb/src/mgmsrv/Services.cpp
diff --git a/ndb/src/mgmsrv/Services.hpp b/storage/ndb/src/mgmsrv/Services.hpp
index ff9008b05a8..ff9008b05a8 100644
--- a/ndb/src/mgmsrv/Services.hpp
+++ b/storage/ndb/src/mgmsrv/Services.hpp
diff --git a/ndb/src/mgmsrv/SignalQueue.cpp b/storage/ndb/src/mgmsrv/SignalQueue.cpp
index 08ad5f363a6..08ad5f363a6 100644
--- a/ndb/src/mgmsrv/SignalQueue.cpp
+++ b/storage/ndb/src/mgmsrv/SignalQueue.cpp
diff --git a/ndb/src/mgmsrv/SignalQueue.hpp b/storage/ndb/src/mgmsrv/SignalQueue.hpp
index bacbad53415..bacbad53415 100644
--- a/ndb/src/mgmsrv/SignalQueue.hpp
+++ b/storage/ndb/src/mgmsrv/SignalQueue.hpp
diff --git a/ndb/src/mgmsrv/convertStrToInt.cpp b/storage/ndb/src/mgmsrv/convertStrToInt.cpp
index e5216047d10..e5216047d10 100644
--- a/ndb/src/mgmsrv/convertStrToInt.cpp
+++ b/storage/ndb/src/mgmsrv/convertStrToInt.cpp
diff --git a/ndb/src/mgmsrv/convertStrToInt.hpp b/storage/ndb/src/mgmsrv/convertStrToInt.hpp
index 0b2a96ed0bf..0b2a96ed0bf 100644
--- a/ndb/src/mgmsrv/convertStrToInt.hpp
+++ b/storage/ndb/src/mgmsrv/convertStrToInt.hpp
diff --git a/ndb/src/mgmsrv/main.cpp b/storage/ndb/src/mgmsrv/main.cpp
index ec20101493e..ec20101493e 100644
--- a/ndb/src/mgmsrv/main.cpp
+++ b/storage/ndb/src/mgmsrv/main.cpp
diff --git a/ndb/src/mgmsrv/mkconfig/Makefile b/storage/ndb/src/mgmsrv/mkconfig/Makefile
index 43574eefbd1..43574eefbd1 100644
--- a/ndb/src/mgmsrv/mkconfig/Makefile
+++ b/storage/ndb/src/mgmsrv/mkconfig/Makefile
diff --git a/ndb/src/mgmsrv/mkconfig/mkconfig.cpp b/storage/ndb/src/mgmsrv/mkconfig/mkconfig.cpp
index 28823aaa35e..28823aaa35e 100644
--- a/ndb/src/mgmsrv/mkconfig/mkconfig.cpp
+++ b/storage/ndb/src/mgmsrv/mkconfig/mkconfig.cpp
diff --git a/ndb/src/ndbapi/API.hpp b/storage/ndb/src/ndbapi/API.hpp
index 05e2d863cb6..05e2d863cb6 100644
--- a/ndb/src/ndbapi/API.hpp
+++ b/storage/ndb/src/ndbapi/API.hpp
diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/storage/ndb/src/ndbapi/ClusterMgr.cpp
index ef9367ef10e..ef9367ef10e 100644
--- a/ndb/src/ndbapi/ClusterMgr.cpp
+++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp
diff --git a/ndb/src/ndbapi/ClusterMgr.hpp b/storage/ndb/src/ndbapi/ClusterMgr.hpp
index d75b820e9cb..d75b820e9cb 100644
--- a/ndb/src/ndbapi/ClusterMgr.hpp
+++ b/storage/ndb/src/ndbapi/ClusterMgr.hpp
diff --git a/ndb/src/ndbapi/DictCache.cpp b/storage/ndb/src/ndbapi/DictCache.cpp
index 3d14df908a0..3d14df908a0 100644
--- a/ndb/src/ndbapi/DictCache.cpp
+++ b/storage/ndb/src/ndbapi/DictCache.cpp
diff --git a/ndb/src/ndbapi/DictCache.hpp b/storage/ndb/src/ndbapi/DictCache.hpp
index d9bf810a685..d9bf810a685 100644
--- a/ndb/src/ndbapi/DictCache.hpp
+++ b/storage/ndb/src/ndbapi/DictCache.hpp
diff --git a/storage/ndb/src/ndbapi/Makefile.am b/storage/ndb/src/ndbapi/Makefile.am
new file mode 100644
index 00000000000..0656aad7ed3
--- /dev/null
+++ b/storage/ndb/src/ndbapi/Makefile.am
@@ -0,0 +1,62 @@
+#SUBDIRS = signal-sender
+
+noinst_LTLIBRARIES = libndbapi.la
+
+libndbapi_la_SOURCES = \
+ TransporterFacade.cpp \
+ ClusterMgr.cpp \
+ Ndb.cpp \
+ NdbPoolImpl.cpp \
+ NdbPool.cpp \
+ Ndblist.cpp \
+ Ndbif.cpp \
+ Ndbinit.cpp \
+ Ndberr.cpp \
+ ndberror.c \
+ NdbErrorOut.cpp \
+ NdbTransaction.cpp \
+ NdbTransactionScan.cpp \
+ NdbOperation.cpp \
+ NdbOperationSearch.cpp \
+ NdbOperationScan.cpp \
+ NdbOperationInt.cpp \
+ NdbOperationDefine.cpp \
+ NdbOperationExec.cpp \
+ NdbScanOperation.cpp NdbScanFilter.cpp \
+ NdbIndexOperation.cpp \
+ NdbEventOperation.cpp \
+ NdbEventOperationImpl.cpp \
+ NdbApiSignal.cpp \
+ NdbRecAttr.cpp \
+ NdbUtil.cpp \
+ NdbReceiver.cpp \
+ NdbDictionary.cpp \
+ NdbDictionaryImpl.cpp \
+ DictCache.cpp \
+ ndb_cluster_connection.cpp \
+ NdbBlob.cpp
+
+INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/mgmapi
+
+# Ndbapi cannot handle -O3
+NDB_CXXFLAGS_RELEASE_LOC = -O2
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libndbapi.dsp
+
+libndbapi.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libndbapi_la_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp
index 7893aaae15c..7893aaae15c 100644
--- a/ndb/src/ndbapi/Ndb.cpp
+++ b/storage/ndb/src/ndbapi/Ndb.cpp
diff --git a/ndb/src/ndbapi/NdbApiSignal.cpp b/storage/ndb/src/ndbapi/NdbApiSignal.cpp
index b1671e593e1..b1671e593e1 100644
--- a/ndb/src/ndbapi/NdbApiSignal.cpp
+++ b/storage/ndb/src/ndbapi/NdbApiSignal.cpp
diff --git a/ndb/src/ndbapi/NdbApiSignal.hpp b/storage/ndb/src/ndbapi/NdbApiSignal.hpp
index 353c575d420..353c575d420 100644
--- a/ndb/src/ndbapi/NdbApiSignal.hpp
+++ b/storage/ndb/src/ndbapi/NdbApiSignal.hpp
diff --git a/storage/ndb/src/ndbapi/NdbBlob.cpp b/storage/ndb/src/ndbapi/NdbBlob.cpp
new file mode 100644
index 00000000000..77ab87ce5c1
--- /dev/null
+++ b/storage/ndb/src/ndbapi/NdbBlob.cpp
@@ -0,0 +1,1619 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <Ndb.hpp>
+#include <NdbDictionaryImpl.hpp>
+#include <NdbTransaction.hpp>
+#include <NdbOperation.hpp>
+#include <NdbIndexOperation.hpp>
+#include <NdbRecAttr.hpp>
+#include <NdbBlob.hpp>
+#include "NdbBlobImpl.hpp"
+#include <NdbScanOperation.hpp>
+
+/*
+ * Reading index table directly (as a table) is faster but there are
+ * bugs or limitations. Keep the code and make possible to choose.
+ */
+static const bool g_ndb_blob_ok_to_read_index_table = false;
+
+// state (inline)
+
+inline void
+NdbBlob::setState(State newState)
+{
+ DBUG_ENTER("NdbBlob::setState");
+ DBUG_PRINT("info", ("this=%p newState=%u", this, newState));
+ theState = newState;
+ DBUG_VOID_RETURN;
+}
+
+// define blob table
+
+int
+NdbBlob::getBlobTableName(char* btname, Ndb* anNdb, const char* tableName, const char* columnName)
+{
+ NdbTableImpl* t = anNdb->theDictionary->m_impl.getTable(tableName);
+ if (t == NULL)
+ return -1;
+ NdbColumnImpl* c = t->getColumn(columnName);
+ if (c == NULL)
+ return -1;
+ getBlobTableName(btname, t, c);
+ return 0;
+}
+
+void
+NdbBlob::getBlobTableName(char* btname, const NdbTableImpl* t, const NdbColumnImpl* c)
+{
+ assert(t != 0 && c != 0 && c->getBlobType());
+ memset(btname, 0, NdbBlobImpl::BlobTableNameSize);
+ sprintf(btname, "NDB$BLOB_%d_%d", (int)t->m_tableId, (int)c->m_attrId);
+}
+
+void
+NdbBlob::getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnImpl* c)
+{
+ DBUG_ENTER("NdbBlob::getBlobTable");
+ char btname[NdbBlobImpl::BlobTableNameSize];
+ getBlobTableName(btname, t, c);
+ bt.setName(btname);
+ bt.setLogging(t->getLogging());
+ /*
+ BLOB tables use the same fragmentation as the original table
+ but may change the fragment type if it is UserDefined since it
+ must be hash based so that the kernel can handle it on its own.
+ */
+ bt.m_primaryTableId = t->m_tableId;
+ bt.m_ng.clear();
+ switch (t->getFragmentType())
+ {
+ case NdbDictionary::Object::FragAllSmall:
+ case NdbDictionary::Object::FragAllMedium:
+ case NdbDictionary::Object::FragAllLarge:
+ case NdbDictionary::Object::FragSingle:
+ bt.setFragmentType(t->getFragmentType());
+ break;
+ case NdbDictionary::Object::DistrKeyLin:
+ case NdbDictionary::Object::DistrKeyHash:
+ bt.setFragmentType(t->getFragmentType());
+ break;
+ case NdbDictionary::Object::UserDefined:
+ bt.setFragmentType(NdbDictionary::Object::DistrKeyHash);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+ DBUG_PRINT("info",
+ ("Create BLOB table with primary table = %u and Fragment Type = %u",
+ bt.m_primaryTableId, (uint)bt.getFragmentType()));
+ { NdbDictionary::Column bc("PK");
+ bc.setType(NdbDictionary::Column::Unsigned);
+ assert(t->m_keyLenInWords != 0);
+ bc.setLength(t->m_keyLenInWords);
+ bc.setPrimaryKey(true);
+ bc.setDistributionKey(true);
+ bt.addColumn(bc);
+ }
+ { NdbDictionary::Column bc("DIST");
+ bc.setType(NdbDictionary::Column::Unsigned);
+ bc.setPrimaryKey(true);
+ bc.setDistributionKey(true);
+ bt.addColumn(bc);
+ }
+ { NdbDictionary::Column bc("PART");
+ bc.setType(NdbDictionary::Column::Unsigned);
+ bc.setPrimaryKey(true);
+ bc.setDistributionKey(false);
+ bt.addColumn(bc);
+ }
+ { NdbDictionary::Column bc("DATA");
+ switch (c->m_type) {
+ case NdbDictionary::Column::Blob:
+ bc.setType(NdbDictionary::Column::Binary);
+ break;
+ case NdbDictionary::Column::Text:
+ bc.setType(NdbDictionary::Column::Char);
+ break;
+ default:
+ assert(false);
+ break;
+ }
+ bc.setLength(c->getPartSize());
+ bt.addColumn(bc);
+ }
+ DBUG_VOID_RETURN;
+}
+
+// initialization
+
+NdbBlob::NdbBlob()
+{
+ init();
+}
+
+void
+NdbBlob::init()
+{
+ theState = Idle;
+ theNdb = NULL;
+ theNdbCon = NULL;
+ theNdbOp = NULL;
+ theTable = NULL;
+ theAccessTable = NULL;
+ theBlobTable = NULL;
+ theColumn = NULL;
+ theFillChar = 0;
+ theInlineSize = 0;
+ thePartSize = 0;
+ theStripeSize = 0;
+ theGetFlag = false;
+ theGetBuf = NULL;
+ theSetFlag = false;
+ theSetBuf = NULL;
+ theGetSetBytes = 0;
+ thePendingBlobOps = 0;
+ theActiveHook = NULL;
+ theActiveHookArg = NULL;
+ theHead = NULL;
+ theInlineData = NULL;
+ theHeadInlineRecAttr = NULL;
+ theHeadInlineReadOp = NULL;
+ theHeadInlineUpdateFlag = false;
+ theNullFlag = -1;
+ theLength = 0;
+ thePos = 0;
+ theNext = NULL;
+}
+
+void
+NdbBlob::release()
+{
+ setState(Idle);
+}
+
+// buffers
+
+NdbBlob::Buf::Buf() :
+ data(NULL),
+ size(0),
+ maxsize(0)
+{
+}
+
+NdbBlob::Buf::~Buf()
+{
+ delete [] data;
+}
+
+void
+NdbBlob::Buf::alloc(unsigned n)
+{
+ size = n;
+ if (maxsize < n) {
+ delete [] data;
+ // align to Uint64
+ if (n % 8 != 0)
+ n += 8 - n % 8;
+ data = new char [n];
+ maxsize = n;
+ }
+#ifdef VM_TRACE
+ memset(data, 'X', maxsize);
+#endif
+}
+
+void
+NdbBlob::Buf::copyfrom(const NdbBlob::Buf& src)
+{
+ assert(size == src.size);
+ memcpy(data, src.data, size);
+}
+
+// classify operations (inline)
+
+inline bool
+NdbBlob::isTableOp()
+{
+ return theTable == theAccessTable;
+}
+
+inline bool
+NdbBlob::isIndexOp()
+{
+ return theTable != theAccessTable;
+}
+
+inline bool
+NdbBlob::isKeyOp()
+{
+ return
+ theNdbOp->theOperationType == NdbOperation::InsertRequest ||
+ theNdbOp->theOperationType == NdbOperation::UpdateRequest ||
+ theNdbOp->theOperationType == NdbOperation::WriteRequest ||
+ theNdbOp->theOperationType == NdbOperation::ReadRequest ||
+ theNdbOp->theOperationType == NdbOperation::ReadExclusive ||
+ theNdbOp->theOperationType == NdbOperation::DeleteRequest;
+}
+
+inline bool
+NdbBlob::isReadOp()
+{
+ return
+ theNdbOp->theOperationType == NdbOperation::ReadRequest ||
+ theNdbOp->theOperationType == NdbOperation::ReadExclusive;
+}
+
+inline bool
+NdbBlob::isInsertOp()
+{
+ return
+ theNdbOp->theOperationType == NdbOperation::InsertRequest;
+}
+
+inline bool
+NdbBlob::isUpdateOp()
+{
+ return
+ theNdbOp->theOperationType == NdbOperation::UpdateRequest;
+}
+
+inline bool
+NdbBlob::isWriteOp()
+{
+ return
+ theNdbOp->theOperationType == NdbOperation::WriteRequest;
+}
+
+inline bool
+NdbBlob::isDeleteOp()
+{
+ return
+ theNdbOp->theOperationType == NdbOperation::DeleteRequest;
+}
+
+inline bool
+NdbBlob::isScanOp()
+{
+ return
+ theNdbOp->theOperationType == NdbOperation::OpenScanRequest ||
+ theNdbOp->theOperationType == NdbOperation::OpenRangeScanRequest;
+}
+
+// computations (inline)
+
+inline Uint32
+NdbBlob::getPartNumber(Uint64 pos)
+{
+ assert(thePartSize != 0 && pos >= theInlineSize);
+ return (pos - theInlineSize) / thePartSize;
+}
+
+inline Uint32
+NdbBlob::getPartCount()
+{
+ if (theLength <= theInlineSize)
+ return 0;
+ return 1 + getPartNumber(theLength - 1);
+}
+
+inline Uint32
+NdbBlob::getDistKey(Uint32 part)
+{
+ assert(theStripeSize != 0);
+ return (part / theStripeSize) % theStripeSize;
+}
+
+// getters and setters
+
+int
+NdbBlob::getTableKeyValue(NdbOperation* anOp)
+{
+ DBUG_ENTER("NdbBlob::getTableKeyValue");
+ Uint32* data = (Uint32*)theKeyBuf.data;
+ unsigned pos = 0;
+ for (unsigned i = 0; i < theTable->m_columns.size(); i++) {
+ NdbColumnImpl* c = theTable->m_columns[i];
+ assert(c != NULL);
+ if (c->m_pk) {
+ unsigned len = c->m_attrSize * c->m_arraySize;
+ if (anOp->getValue_impl(c, (char*)&data[pos]) == NULL) {
+ setErrorCode(anOp);
+ DBUG_RETURN(-1);
+ }
+ // odd bytes receive no data and must be zeroed
+ while (len % 4 != 0) {
+ char* p = (char*)&data[pos] + len++;
+ *p = 0;
+ }
+ pos += len / 4;
+ }
+ }
+ assert(pos == theKeyBuf.size / 4);
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::setTableKeyValue(NdbOperation* anOp)
+{
+ DBUG_ENTER("NdbBlob::setTableKeyValue");
+ DBUG_DUMP("info", theKeyBuf.data, 4 * theTable->m_keyLenInWords);
+ const Uint32* data = (const Uint32*)theKeyBuf.data;
+ const unsigned columns = theTable->m_columns.size();
+ unsigned pos = 0;
+ for (unsigned i = 0; i < columns; i++) {
+ NdbColumnImpl* c = theTable->m_columns[i];
+ assert(c != NULL);
+ if (c->m_pk) {
+ unsigned len = c->m_attrSize * c->m_arraySize;
+ if (anOp->equal_impl(c, (const char*)&data[pos], len) == -1) {
+ setErrorCode(anOp);
+ DBUG_RETURN(-1);
+ }
+ pos += (len + 3) / 4;
+ }
+ }
+ assert(pos == theKeyBuf.size / 4);
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::setAccessKeyValue(NdbOperation* anOp)
+{
+ DBUG_ENTER("NdbBlob::setAccessKeyValue");
+ DBUG_DUMP("info", theAccessKeyBuf.data, 4 * theAccessTable->m_keyLenInWords);
+ const Uint32* data = (const Uint32*)theAccessKeyBuf.data;
+ const unsigned columns = theAccessTable->m_columns.size();
+ unsigned pos = 0;
+ for (unsigned i = 0; i < columns; i++) {
+ NdbColumnImpl* c = theAccessTable->m_columns[i];
+ assert(c != NULL);
+ if (c->m_pk) {
+ unsigned len = c->m_attrSize * c->m_arraySize;
+ if (anOp->equal_impl(c, (const char*)&data[pos], len) == -1) {
+ setErrorCode(anOp);
+ DBUG_RETURN(-1);
+ }
+ pos += (len + 3) / 4;
+ }
+ }
+ assert(pos == theAccessKeyBuf.size / 4);
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::setPartKeyValue(NdbOperation* anOp, Uint32 part)
+{
+ DBUG_ENTER("NdbBlob::setPartKeyValue");
+ DBUG_PRINT("info", ("dist=%u part=%u key=", getDistKey(part), part));
+ DBUG_DUMP("info", theKeyBuf.data, 4 * theTable->m_keyLenInWords);
+ //Uint32* data = (Uint32*)theKeyBuf.data;
+ //unsigned size = theTable->m_keyLenInWords;
+ // TODO use attr ids after compatibility with 4.1.7 not needed
+ if (anOp->equal("PK", theKeyBuf.data) == -1 ||
+ anOp->equal("DIST", getDistKey(part)) == -1 ||
+ anOp->equal("PART", part) == -1) {
+ setErrorCode(anOp);
+ DBUG_RETURN(-1);
+ }
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::getHeadInlineValue(NdbOperation* anOp)
+{
+ DBUG_ENTER("NdbBlob::getHeadInlineValue");
+ theHeadInlineRecAttr = anOp->getValue_impl(theColumn, theHeadInlineBuf.data);
+ if (theHeadInlineRecAttr == NULL) {
+ setErrorCode(anOp);
+ DBUG_RETURN(-1);
+ }
+ DBUG_RETURN(0);
+}
+
+void
+NdbBlob::getHeadFromRecAttr()
+{
+ DBUG_ENTER("NdbBlob::getHeadFromRecAttr");
+ assert(theHeadInlineRecAttr != NULL);
+ theNullFlag = theHeadInlineRecAttr->isNULL();
+ assert(theNullFlag != -1);
+ theLength = ! theNullFlag ? theHead->length : 0;
+ DBUG_VOID_RETURN;
+}
+
+int
+NdbBlob::setHeadInlineValue(NdbOperation* anOp)
+{
+ DBUG_ENTER("NdbBlob::setHeadInlineValue");
+ theHead->length = theLength;
+ if (theLength < theInlineSize)
+ memset(theInlineData + theLength, 0, theInlineSize - theLength);
+ assert(theNullFlag != -1);
+ const char* aValue = theNullFlag ? 0 : theHeadInlineBuf.data;
+ if (anOp->setValue(theColumn, aValue, theHeadInlineBuf.size) == -1) {
+ setErrorCode(anOp);
+ DBUG_RETURN(-1);
+ }
+ theHeadInlineUpdateFlag = false;
+ DBUG_RETURN(0);
+}
+
+// getValue/setValue
+
+int
+NdbBlob::getValue(void* data, Uint32 bytes)
+{
+ DBUG_ENTER("NdbBlob::getValue");
+ DBUG_PRINT("info", ("data=%p bytes=%u", data, bytes));
+ if (theGetFlag || theState != Prepared) {
+ setErrorCode(NdbBlobImpl::ErrState);
+ DBUG_RETURN(-1);
+ }
+ if (! isReadOp() && ! isScanOp()) {
+ setErrorCode(NdbBlobImpl::ErrUsage);
+ DBUG_RETURN(-1);
+ }
+ if (data == NULL && bytes != 0) {
+ setErrorCode(NdbBlobImpl::ErrUsage);
+ DBUG_RETURN(-1);
+ }
+ theGetFlag = true;
+ theGetBuf = static_cast<char*>(data);
+ theGetSetBytes = bytes;
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::setValue(const void* data, Uint32 bytes)
+{
+ DBUG_ENTER("NdbBlob::setValue");
+ DBUG_PRINT("info", ("data=%p bytes=%u", data, bytes));
+ if (theSetFlag || theState != Prepared) {
+ setErrorCode(NdbBlobImpl::ErrState);
+ DBUG_RETURN(-1);
+ }
+ if (! isInsertOp() && ! isUpdateOp() && ! isWriteOp()) {
+ setErrorCode(NdbBlobImpl::ErrUsage);
+ DBUG_RETURN(-1);
+ }
+ if (data == NULL && bytes != 0) {
+ setErrorCode(NdbBlobImpl::ErrUsage);
+ DBUG_RETURN(-1);
+ }
+ theSetFlag = true;
+ theSetBuf = static_cast<const char*>(data);
+ theGetSetBytes = bytes;
+ if (isInsertOp()) {
+ // write inline part now
+ if (theSetBuf != NULL) {
+ Uint32 n = theGetSetBytes;
+ if (n > theInlineSize)
+ n = theInlineSize;
+ assert(thePos == 0);
+ if (writeDataPrivate(theSetBuf, n) == -1)
+ DBUG_RETURN(-1);
+ } else {
+ theNullFlag = true;
+ theLength = 0;
+ }
+ if (setHeadInlineValue(theNdbOp) == -1)
+ DBUG_RETURN(-1);
+ }
+ DBUG_RETURN(0);
+}
+
+// activation hook
+
+int
+NdbBlob::setActiveHook(ActiveHook activeHook, void* arg)
+{
+ DBUG_ENTER("NdbBlob::setActiveHook");
+ DBUG_PRINT("info", ("hook=%p arg=%p", (void*)activeHook, arg));
+ if (theState != Prepared) {
+ setErrorCode(NdbBlobImpl::ErrState);
+ DBUG_RETURN(-1);
+ }
+ theActiveHook = activeHook;
+ theActiveHookArg = arg;
+ DBUG_RETURN(0);
+}
+
+// misc operations
+
+int
+NdbBlob::getNull(bool& isNull)
+{
+ DBUG_ENTER("NdbBlob::getNull");
+ if (theState == Prepared && theSetFlag) {
+ isNull = (theSetBuf == NULL);
+ DBUG_RETURN(0);
+ }
+ if (theNullFlag == -1) {
+ setErrorCode(NdbBlobImpl::ErrState);
+ DBUG_RETURN(-1);
+ }
+ isNull = theNullFlag;
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::setNull()
+{
+ DBUG_ENTER("NdbBlob::setNull");
+ if (theNullFlag == -1) {
+ if (theState == Prepared) {
+ DBUG_RETURN(setValue(0, 0));
+ }
+ setErrorCode(NdbBlobImpl::ErrState);
+ DBUG_RETURN(-1);
+ }
+ if (theNullFlag)
+ DBUG_RETURN(0);
+ if (deleteParts(0, getPartCount()) == -1)
+ DBUG_RETURN(-1);
+ theNullFlag = true;
+ theLength = 0;
+ theHeadInlineUpdateFlag = true;
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::getLength(Uint64& len)
+{
+ DBUG_ENTER("NdbBlob::getLength");
+ if (theState == Prepared && theSetFlag) {
+ len = theGetSetBytes;
+ DBUG_RETURN(0);
+ }
+ if (theNullFlag == -1) {
+ setErrorCode(NdbBlobImpl::ErrState);
+ DBUG_RETURN(-1);
+ }
+ len = theLength;
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::truncate(Uint64 length)
+{
+ DBUG_ENTER("NdbBlob::truncate");
+ DBUG_PRINT("info", ("length=%llu", length));
+ if (theNullFlag == -1) {
+ setErrorCode(NdbBlobImpl::ErrState);
+ DBUG_RETURN(-1);
+ }
+ if (theLength > length) {
+ if (length > theInlineSize) {
+ Uint32 part1 = getPartNumber(length - 1);
+ Uint32 part2 = getPartNumber(theLength - 1);
+ assert(part2 >= part1);
+ if (part2 > part1 && deleteParts(part1 + 1, part2 - part1) == -1)
+ DBUG_RETURN(-1);
+ } else {
+ if (deleteParts(0, getPartCount()) == -1)
+ DBUG_RETURN(-1);
+ }
+ theLength = length;
+ theHeadInlineUpdateFlag = true;
+ if (thePos > length)
+ thePos = length;
+ }
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::getPos(Uint64& pos)
+{
+ DBUG_ENTER("NdbBlob::getPos");
+ if (theNullFlag == -1) {
+ setErrorCode(NdbBlobImpl::ErrState);
+ DBUG_RETURN(-1);
+ }
+ pos = thePos;
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::setPos(Uint64 pos)
+{
+ DBUG_ENTER("NdbBlob::setPos");
+ DBUG_PRINT("info", ("pos=%llu", pos));
+ if (theNullFlag == -1) {
+ setErrorCode(NdbBlobImpl::ErrState);
+ DBUG_RETURN(-1);
+ }
+ if (pos > theLength) {
+ setErrorCode(NdbBlobImpl::ErrSeek);
+ DBUG_RETURN(-1);
+ }
+ thePos = pos;
+ DBUG_RETURN(0);
+}
+
+// read/write
+
+int
+NdbBlob::readData(void* data, Uint32& bytes)
+{
+ if (theState != Active) {
+ setErrorCode(NdbBlobImpl::ErrState);
+ return -1;
+ }
+ char* buf = static_cast<char*>(data);
+ return readDataPrivate(buf, bytes);
+}
+
+int
+NdbBlob::readDataPrivate(char* buf, Uint32& bytes)
+{
+ DBUG_ENTER("NdbBlob::readDataPrivate");
+ DBUG_PRINT("info", ("bytes=%u", bytes));
+ assert(thePos <= theLength);
+ Uint64 pos = thePos;
+ if (bytes > theLength - pos)
+ bytes = theLength - pos;
+ Uint32 len = bytes;
+ if (len > 0) {
+ // inline part
+ if (pos < theInlineSize) {
+ Uint32 n = theInlineSize - pos;
+ if (n > len)
+ n = len;
+ memcpy(buf, theInlineData + pos, n);
+ pos += n;
+ buf += n;
+ len -= n;
+ }
+ }
+ if (len > 0 && thePartSize == 0) {
+ setErrorCode(NdbBlobImpl::ErrSeek);
+ DBUG_RETURN(-1);
+ }
+ if (len > 0) {
+ assert(pos >= theInlineSize);
+ Uint32 off = (pos - theInlineSize) % thePartSize;
+ // partial first block
+ if (off != 0) {
+ DBUG_PRINT("info", ("partial first block pos=%llu len=%u", pos, len));
+ Uint32 part = (pos - theInlineSize) / thePartSize;
+ if (readParts(thePartBuf.data, part, 1) == -1)
+ DBUG_RETURN(-1);
+ // need result now
+ if (executePendingBlobReads() == -1)
+ DBUG_RETURN(-1);
+ Uint32 n = thePartSize - off;
+ if (n > len)
+ n = len;
+ memcpy(buf, thePartBuf.data + off, n);
+ pos += n;
+ buf += n;
+ len -= n;
+ }
+ }
+ if (len > 0) {
+ assert((pos - theInlineSize) % thePartSize == 0);
+ // complete blocks in the middle
+ if (len >= thePartSize) {
+ Uint32 part = (pos - theInlineSize) / thePartSize;
+ Uint32 count = len / thePartSize;
+ if (readParts(buf, part, count) == -1)
+ DBUG_RETURN(-1);
+ Uint32 n = thePartSize * count;
+ pos += n;
+ buf += n;
+ len -= n;
+ }
+ }
+ if (len > 0) {
+ // partial last block
+ DBUG_PRINT("info", ("partial last block pos=%llu len=%u", pos, len));
+ assert((pos - theInlineSize) % thePartSize == 0 && len < thePartSize);
+ Uint32 part = (pos - theInlineSize) / thePartSize;
+ if (readParts(thePartBuf.data, part, 1) == -1)
+ DBUG_RETURN(-1);
+ // need result now
+ if (executePendingBlobReads() == -1)
+ DBUG_RETURN(-1);
+ memcpy(buf, thePartBuf.data, len);
+ Uint32 n = len;
+ pos += n;
+ buf += n;
+ len -= n;
+ }
+ assert(len == 0);
+ thePos = pos;
+ assert(thePos <= theLength);
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::writeData(const void* data, Uint32 bytes)
+{
+ if (theState != Active) {
+ setErrorCode(NdbBlobImpl::ErrState);
+ return -1;
+ }
+ const char* buf = static_cast<const char*>(data);
+ return writeDataPrivate(buf, bytes);
+}
+
+int
+NdbBlob::writeDataPrivate(const char* buf, Uint32 bytes)
+{
+ DBUG_ENTER("NdbBlob::writeDataPrivate");
+ DBUG_PRINT("info", ("bytes=%u", bytes));
+ assert(thePos <= theLength);
+ Uint64 pos = thePos;
+ Uint32 len = bytes;
+ // any write makes blob not NULL
+ if (theNullFlag) {
+ theNullFlag = false;
+ theHeadInlineUpdateFlag = true;
+ }
+ if (len > 0) {
+ // inline part
+ if (pos < theInlineSize) {
+ Uint32 n = theInlineSize - pos;
+ if (n > len)
+ n = len;
+ memcpy(theInlineData + pos, buf, n);
+ theHeadInlineUpdateFlag = true;
+ pos += n;
+ buf += n;
+ len -= n;
+ }
+ }
+ if (len > 0 && thePartSize == 0) {
+ setErrorCode(NdbBlobImpl::ErrSeek);
+ DBUG_RETURN(-1);
+ }
+ if (len > 0) {
+ assert(pos >= theInlineSize);
+ Uint32 off = (pos - theInlineSize) % thePartSize;
+ // partial first block
+ if (off != 0) {
+ DBUG_PRINT("info", ("partial first block pos=%llu len=%u", pos, len));
+ // flush writes to guarantee correct read
+ if (executePendingBlobWrites() == -1)
+ DBUG_RETURN(-1);
+ Uint32 part = (pos - theInlineSize) / thePartSize;
+ if (readParts(thePartBuf.data, part, 1) == -1)
+ DBUG_RETURN(-1);
+ // need result now
+ if (executePendingBlobReads() == -1)
+ DBUG_RETURN(-1);
+ Uint32 n = thePartSize - off;
+ if (n > len) {
+ memset(thePartBuf.data + off + len, theFillChar, n - len);
+ n = len;
+ }
+ memcpy(thePartBuf.data + off, buf, n);
+ if (updateParts(thePartBuf.data, part, 1) == -1)
+ DBUG_RETURN(-1);
+ pos += n;
+ buf += n;
+ len -= n;
+ }
+ }
+ if (len > 0) {
+ assert((pos - theInlineSize) % thePartSize == 0);
+ // complete blocks in the middle
+ if (len >= thePartSize) {
+ Uint32 part = (pos - theInlineSize) / thePartSize;
+ Uint32 count = len / thePartSize;
+ for (unsigned i = 0; i < count; i++) {
+ if (part + i < getPartCount()) {
+ if (updateParts(buf, part + i, 1) == -1)
+ DBUG_RETURN(-1);
+ } else {
+ if (insertParts(buf, part + i, 1) == -1)
+ DBUG_RETURN(-1);
+ }
+ Uint32 n = thePartSize;
+ pos += n;
+ buf += n;
+ len -= n;
+ }
+ }
+ }
+ if (len > 0) {
+ // partial last block
+ DBUG_PRINT("info", ("partial last block pos=%llu len=%u", pos, len));
+ assert((pos - theInlineSize) % thePartSize == 0 && len < thePartSize);
+ Uint32 part = (pos - theInlineSize) / thePartSize;
+ if (theLength > pos + len) {
+ // flush writes to guarantee correct read
+ if (executePendingBlobWrites() == -1)
+ DBUG_RETURN(-1);
+ if (readParts(thePartBuf.data, part, 1) == -1)
+ DBUG_RETURN(-1);
+ // need result now
+ if (executePendingBlobReads() == -1)
+ DBUG_RETURN(-1);
+ memcpy(thePartBuf.data, buf, len);
+ if (updateParts(thePartBuf.data, part, 1) == -1)
+ DBUG_RETURN(-1);
+ } else {
+ memcpy(thePartBuf.data, buf, len);
+ memset(thePartBuf.data + len, theFillChar, thePartSize - len);
+ if (part < getPartCount()) {
+ if (updateParts(thePartBuf.data, part, 1) == -1)
+ DBUG_RETURN(-1);
+ } else {
+ if (insertParts(thePartBuf.data, part, 1) == -1)
+ DBUG_RETURN(-1);
+ }
+ }
+ Uint32 n = len;
+ pos += n;
+ buf += n;
+ len -= n;
+ }
+ assert(len == 0);
+ if (theLength < pos) {
+ theLength = pos;
+ theHeadInlineUpdateFlag = true;
+ }
+ thePos = pos;
+ assert(thePos <= theLength);
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::readParts(char* buf, Uint32 part, Uint32 count)
+{
+ DBUG_ENTER("NdbBlob::readParts");
+ DBUG_PRINT("info", ("part=%u count=%u", part, count));
+ Uint32 n = 0;
+ while (n < count) {
+ NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
+ if (tOp == NULL ||
+ tOp->committedRead() == -1 ||
+ setPartKeyValue(tOp, part + n) == -1 ||
+ tOp->getValue((Uint32)3, buf) == NULL) {
+ setErrorCode(tOp);
+ DBUG_RETURN(-1);
+ }
+ tOp->m_abortOption = NdbTransaction::AbortOnError;
+ buf += thePartSize;
+ n++;
+ thePendingBlobOps |= (1 << NdbOperation::ReadRequest);
+ theNdbCon->thePendingBlobOps |= (1 << NdbOperation::ReadRequest);
+ }
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::insertParts(const char* buf, Uint32 part, Uint32 count)
+{
+ DBUG_ENTER("NdbBlob::insertParts");
+ DBUG_PRINT("info", ("part=%u count=%u", part, count));
+ Uint32 n = 0;
+ while (n < count) {
+ NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
+ if (tOp == NULL ||
+ tOp->insertTuple() == -1 ||
+ setPartKeyValue(tOp, part + n) == -1 ||
+ tOp->setValue((Uint32)3, buf) == -1) {
+ setErrorCode(tOp);
+ DBUG_RETURN(-1);
+ }
+ tOp->m_abortOption = NdbTransaction::AbortOnError;
+ buf += thePartSize;
+ n++;
+ thePendingBlobOps |= (1 << NdbOperation::InsertRequest);
+ theNdbCon->thePendingBlobOps |= (1 << NdbOperation::InsertRequest);
+ }
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::updateParts(const char* buf, Uint32 part, Uint32 count)
+{
+ DBUG_ENTER("NdbBlob::updateParts");
+ DBUG_PRINT("info", ("part=%u count=%u", part, count));
+ Uint32 n = 0;
+ while (n < count) {
+ NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
+ if (tOp == NULL ||
+ tOp->updateTuple() == -1 ||
+ setPartKeyValue(tOp, part + n) == -1 ||
+ tOp->setValue((Uint32)3, buf) == -1) {
+ setErrorCode(tOp);
+ DBUG_RETURN(-1);
+ }
+ tOp->m_abortOption = NdbTransaction::AbortOnError;
+ buf += thePartSize;
+ n++;
+ thePendingBlobOps |= (1 << NdbOperation::UpdateRequest);
+ theNdbCon->thePendingBlobOps |= (1 << NdbOperation::UpdateRequest);
+ }
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::deleteParts(Uint32 part, Uint32 count)
+{
+ DBUG_ENTER("NdbBlob::deleteParts");
+ DBUG_PRINT("info", ("part=%u count=%u", part, count));
+ Uint32 n = 0;
+ while (n < count) {
+ NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
+ if (tOp == NULL ||
+ tOp->deleteTuple() == -1 ||
+ setPartKeyValue(tOp, part + n) == -1) {
+ setErrorCode(tOp);
+ DBUG_RETURN(-1);
+ }
+ tOp->m_abortOption = NdbTransaction::AbortOnError;
+ n++;
+ thePendingBlobOps |= (1 << NdbOperation::DeleteRequest);
+ theNdbCon->thePendingBlobOps |= (1 << NdbOperation::DeleteRequest);
+ }
+ DBUG_RETURN(0);
+}
+
+/*
+ * Number of blob parts not known. Used to check for race condition
+ * when writeTuple is used for insert. Deletes all parts found.
+ */
+int
+NdbBlob::deletePartsUnknown(Uint32 part)
+{
+ DBUG_ENTER("NdbBlob::deletePartsUnknown");
+ DBUG_PRINT("info", ("part=%u count=all", part));
+ static const unsigned maxbat = 256;
+ static const unsigned minbat = 1;
+ unsigned bat = minbat;
+ NdbOperation* tOpList[maxbat];
+ Uint32 count = 0;
+ while (true) {
+ Uint32 n;
+ n = 0;
+ while (n < bat) {
+ NdbOperation*& tOp = tOpList[n]; // ref
+ tOp = theNdbCon->getNdbOperation(theBlobTable);
+ if (tOp == NULL ||
+ tOp->deleteTuple() == -1 ||
+ setPartKeyValue(tOp, part + count + n) == -1) {
+ setErrorCode(tOp);
+ DBUG_RETURN(-1);
+ }
+ tOp->m_abortOption= NdbTransaction::AO_IgnoreError;
+ n++;
+ }
+ DBUG_PRINT("info", ("bat=%u", bat));
+ if (theNdbCon->executeNoBlobs(NdbTransaction::NoCommit) == -1)
+ DBUG_RETURN(-1);
+ n = 0;
+ while (n < bat) {
+ NdbOperation* tOp = tOpList[n];
+ if (tOp->theError.code != 0) {
+ if (tOp->theError.code != 626) {
+ setErrorCode(tOp);
+ DBUG_RETURN(-1);
+ }
+ // first non-existent part
+ DBUG_PRINT("info", ("count=%u", count));
+ DBUG_RETURN(0);
+ }
+ n++;
+ count++;
+ }
+ bat *= 4;
+ if (bat > maxbat)
+ bat = maxbat;
+ }
+}
+
+// pending ops
+
+int
+NdbBlob::executePendingBlobReads()
+{
+ DBUG_ENTER("NdbBlob::executePendingBlobReads");
+ Uint8 flags = (1 << NdbOperation::ReadRequest);
+ if (thePendingBlobOps & flags) {
+ if (theNdbCon->executeNoBlobs(NdbTransaction::NoCommit) == -1)
+ DBUG_RETURN(-1);
+ thePendingBlobOps = 0;
+ theNdbCon->thePendingBlobOps = 0;
+ }
+ DBUG_RETURN(0);
+}
+
+int
+NdbBlob::executePendingBlobWrites()
+{
+ DBUG_ENTER("NdbBlob::executePendingBlobWrites");
+ Uint8 flags = 0xFF & ~(1 << NdbOperation::ReadRequest);
+ if (thePendingBlobOps & flags) {
+ if (theNdbCon->executeNoBlobs(NdbTransaction::NoCommit) == -1)
+ DBUG_RETURN(-1);
+ thePendingBlobOps = 0;
+ theNdbCon->thePendingBlobOps = 0;
+ }
+ DBUG_RETURN(0);
+}
+
+// callbacks
+
+int
+NdbBlob::invokeActiveHook()
+{
+ DBUG_ENTER("NdbBlob::invokeActiveHook");
+ assert(theState == Active && theActiveHook != NULL);
+ int ret = (*theActiveHook)(this, theActiveHookArg);
+ if (ret != 0) {
+ // no error is set on blob level
+ DBUG_RETURN(-1);
+ }
+ DBUG_RETURN(0);
+}
+
+// blob handle maintenance
+
+/*
+ * Prepare blob handle linked to an operation. Checks blob table.
+ * Allocates buffers. For key operation fetches key data from signal
+ * data. For read operation adds read of head+inline.
+ */
+int
+NdbBlob::atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl* aColumn)
+{
+ DBUG_ENTER("NdbBlob::atPrepare");
+ DBUG_PRINT("info", ("this=%p op=%p con=%p", this, anOp, aCon));
+ assert(theState == Idle);
+ // ndb api stuff
+ theNdb = anOp->theNdb;
+ theNdbCon = aCon; // for scan, this is the real transaction (m_transConnection)
+ theNdbOp = anOp;
+ theTable = anOp->m_currentTable;
+ theAccessTable = anOp->m_accessTable;
+ theColumn = aColumn;
+ NdbDictionary::Column::Type partType = NdbDictionary::Column::Undefined;
+ switch (theColumn->getType()) {
+ case NdbDictionary::Column::Blob:
+ partType = NdbDictionary::Column::Binary;
+ theFillChar = 0x0;
+ break;
+ case NdbDictionary::Column::Text:
+ partType = NdbDictionary::Column::Char;
+ theFillChar = 0x20;
+ break;
+ default:
+ setErrorCode(NdbBlobImpl::ErrUsage);
+ DBUG_RETURN(-1);
+ }
+ // sizes
+ theInlineSize = theColumn->getInlineSize();
+ thePartSize = theColumn->getPartSize();
+ theStripeSize = theColumn->getStripeSize();
+ // sanity check
+ assert((NDB_BLOB_HEAD_SIZE << 2) == sizeof(Head));
+ assert(theColumn->m_attrSize * theColumn->m_arraySize == sizeof(Head) + theInlineSize);
+ if (thePartSize > 0) {
+ const NdbDictionary::Table* bt = NULL;
+ const NdbDictionary::Column* bc = NULL;
+ if (theStripeSize == 0 ||
+ (bt = theColumn->getBlobTable()) == NULL ||
+ (bc = bt->getColumn("DATA")) == NULL ||
+ bc->getType() != partType ||
+ bc->getLength() != (int)thePartSize) {
+ setErrorCode(NdbBlobImpl::ErrTable);
+ DBUG_RETURN(-1);
+ }
+ theBlobTable = &NdbTableImpl::getImpl(*bt);
+ }
+ // buffers
+ theKeyBuf.alloc(theTable->m_keyLenInWords << 2);
+ theAccessKeyBuf.alloc(theAccessTable->m_keyLenInWords << 2);
+ theHeadInlineBuf.alloc(sizeof(Head) + theInlineSize);
+ theHeadInlineCopyBuf.alloc(sizeof(Head) + theInlineSize);
+ thePartBuf.alloc(thePartSize);
+ theHead = (Head*)theHeadInlineBuf.data;
+ theInlineData = theHeadInlineBuf.data + sizeof(Head);
+ // handle different operation types
+ bool supportedOp = false;
+ if (isKeyOp()) {
+ if (isTableOp()) {
+ // get table key
+ Uint32* data = (Uint32*)theKeyBuf.data;
+ unsigned size = theTable->m_keyLenInWords;
+ if (theNdbOp->getKeyFromTCREQ(data, size) == -1) {
+ setErrorCode(NdbBlobImpl::ErrUsage);
+ DBUG_RETURN(-1);
+ }
+ }
+ if (isIndexOp()) {
+ // get index key
+ Uint32* data = (Uint32*)theAccessKeyBuf.data;
+ unsigned size = theAccessTable->m_keyLenInWords;
+ if (theNdbOp->getKeyFromTCREQ(data, size) == -1) {
+ setErrorCode(NdbBlobImpl::ErrUsage);
+ DBUG_RETURN(-1);
+ }
+ }
+ if (isReadOp()) {
+ // add read of head+inline in this op
+ if (getHeadInlineValue(theNdbOp) == -1)
+ DBUG_RETURN(-1);
+ }
+ if (isInsertOp()) {
+ // becomes NULL unless set before execute
+ theNullFlag = true;
+ theLength = 0;
+ }
+ if (isWriteOp()) {
+ // becomes NULL unless set before execute
+ theNullFlag = true;
+ theLength = 0;
+ theHeadInlineUpdateFlag = true;
+ }
+ supportedOp = true;
+ }
+ if (isScanOp()) {
+ // add read of head+inline in this op
+ if (getHeadInlineValue(theNdbOp) == -1)
+ DBUG_RETURN(-1);
+ supportedOp = true;
+ }
+ if (! supportedOp) {
+ setErrorCode(NdbBlobImpl::ErrUsage);
+ DBUG_RETURN(-1);
+ }
+ setState(Prepared);
+ DBUG_RETURN(0);
+}
+
+/*
+ * Before execute of prepared operation. May add new operations before
+ * this one. May ask that this operation and all before it (a "batch")
+ * is executed immediately in no-commit mode. In this case remaining
+ * prepared operations are saved in a separate list. They are added
+ * back after postExecute.
+ */
+int
+NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch)
+{
+ DBUG_ENTER("NdbBlob::preExecute");
+ DBUG_PRINT("info", ("this=%p op=%p con=%p", this, theNdbOp, theNdbCon));
+ if (theState == Invalid)
+ DBUG_RETURN(-1);
+ assert(theState == Prepared);
+ // handle different operation types
+ assert(isKeyOp());
+ if (isReadOp()) {
+ if (theGetFlag && theGetSetBytes > theInlineSize) {
+ // need blob head before proceeding
+ batch = true;
+ }
+ }
+ if (isInsertOp()) {
+ if (theSetFlag && theGetSetBytes > theInlineSize) {
+ // add ops to write rest of a setValue
+ assert(theSetBuf != NULL);
+ const char* buf = theSetBuf + theInlineSize;
+ Uint32 bytes = theGetSetBytes - theInlineSize;
+ assert(thePos == theInlineSize);
+ if (writeDataPrivate(buf, bytes) == -1)
+ DBUG_RETURN(-1);
+ if (theHeadInlineUpdateFlag) {
+ // add an operation to update head+inline
+ NdbOperation* tOp = theNdbCon->getNdbOperation(theTable);
+ if (tOp == NULL ||
+ tOp->updateTuple() == -1 ||
+ setTableKeyValue(tOp) == -1 ||
+ setHeadInlineValue(tOp) == -1) {
+ setErrorCode(NdbBlobImpl::ErrAbort);
+ DBUG_RETURN(-1);
+ }
+ DBUG_PRINT("info", ("add op to update head+inline"));
+ }
+ }
+ }
+ if (isTableOp()) {
+ if (isUpdateOp() || isWriteOp() || isDeleteOp()) {
+ // add operation before this one to read head+inline
+ NdbOperation* tOp = theNdbCon->getNdbOperation(theTable, theNdbOp);
+ if (tOp == NULL ||
+ tOp->readTuple() == -1 ||
+ setTableKeyValue(tOp) == -1 ||
+ getHeadInlineValue(tOp) == -1) {
+ setErrorCode(tOp);
+ DBUG_RETURN(-1);
+ }
+ if (isWriteOp()) {
+ tOp->m_abortOption = NdbTransaction::AO_IgnoreError;
+ }
+ theHeadInlineReadOp = tOp;
+ // execute immediately
+ batch = true;
+ DBUG_PRINT("info", ("add op before to read head+inline"));
+ }
+ }
+ if (isIndexOp()) {
+ // add op before this one to read table key
+ NdbBlob* tFirstBlob = theNdbOp->theBlobList;
+ if (this == tFirstBlob) {
+ // first blob does it for all
+ if (g_ndb_blob_ok_to_read_index_table) {
+ Uint32 pkAttrId = theAccessTable->getNoOfColumns() - 1;
+ NdbOperation* tOp = theNdbCon->getNdbOperation(theAccessTable, theNdbOp);
+ if (tOp == NULL ||
+ tOp->readTuple() == -1 ||
+ setAccessKeyValue(tOp) == -1 ||
+ tOp->getValue(pkAttrId, theKeyBuf.data) == NULL) {
+ setErrorCode(tOp);
+ DBUG_RETURN(-1);
+ }
+ } else {
+ NdbIndexOperation* tOp = theNdbCon->getNdbIndexOperation(theAccessTable->m_index, theTable, theNdbOp);
+ if (tOp == NULL ||
+ tOp->readTuple() == -1 ||
+ setAccessKeyValue(tOp) == -1 ||
+ getTableKeyValue(tOp) == -1) {
+ setErrorCode(tOp);
+ DBUG_RETURN(-1);
+ }
+ }
+ }
+ DBUG_PRINT("info", ("added op before to read table key"));
+ if (isUpdateOp() || isDeleteOp()) {
+ // add op before this one to read head+inline via index
+ NdbIndexOperation* tOp = theNdbCon->getNdbIndexOperation(theAccessTable->m_index, theTable, theNdbOp);
+ if (tOp == NULL ||
+ tOp->readTuple() == -1 ||
+ setAccessKeyValue(tOp) == -1 ||
+ getHeadInlineValue(tOp) == -1) {
+ setErrorCode(tOp);
+ DBUG_RETURN(-1);
+ }
+ if (isWriteOp()) {
+ tOp->m_abortOption = NdbTransaction::AO_IgnoreError;
+ }
+ theHeadInlineReadOp = tOp;
+ // execute immediately
+ batch = true;
+ DBUG_PRINT("info", ("added index op before to read head+inline"));
+ }
+ if (isWriteOp()) {
+ // XXX until IgnoreError fixed for index op
+ batch = true;
+ }
+ }
+ if (isWriteOp()) {
+ if (theSetFlag) {
+ // write head+inline now
+ theNullFlag = true;
+ theLength = 0;
+ if (theSetBuf != NULL) {
+ Uint32 n = theGetSetBytes;
+ if (n > theInlineSize)
+ n = theInlineSize;
+ assert(thePos == 0);
+ if (writeDataPrivate(theSetBuf, n) == -1)
+ DBUG_RETURN(-1);
+ }
+ if (setHeadInlineValue(theNdbOp) == -1)
+ DBUG_RETURN(-1);
+ // the read op before us may overwrite
+ theHeadInlineCopyBuf.copyfrom(theHeadInlineBuf);
+ }
+ }
+ if (theActiveHook != NULL) {
+ // need blob head for callback
+ batch = true;
+ }
+ DBUG_PRINT("info", ("batch=%u", batch));
+ DBUG_RETURN(0);
+}
+
+/*
+ * After execute, for any operation. If already Active, this routine
+ * has been done previously. Operations which requested a no-commit
+ * batch can add new operations after this one. They are added before
+ * any remaining prepared operations.
+ */
+int
+NdbBlob::postExecute(NdbTransaction::ExecType anExecType)
+{
+ DBUG_ENTER("NdbBlob::postExecute");
+ DBUG_PRINT("info", ("this=%p op=%p con=%p anExecType=%u", this, theNdbOp, theNdbCon, anExecType));
+ if (theState == Invalid)
+ DBUG_RETURN(-1);
+ if (theState == Active) {
+ setState(anExecType == NdbTransaction::NoCommit ? Active : Closed);
+ DBUG_PRINT("info", ("skip active"));
+ DBUG_RETURN(0);
+ }
+ assert(theState == Prepared);
+ setState(anExecType == NdbTransaction::NoCommit ? Active : Closed);
+ assert(isKeyOp());
+ if (isIndexOp()) {
+ NdbBlob* tFirstBlob = theNdbOp->theBlobList;
+ if (this != tFirstBlob) {
+ // copy key from first blob
+ assert(theKeyBuf.size == tFirstBlob->theKeyBuf.size);
+ memcpy(theKeyBuf.data, tFirstBlob->theKeyBuf.data, tFirstBlob->theKeyBuf.size);
+ }
+ }
+ if (isReadOp()) {
+ getHeadFromRecAttr();
+ if (setPos(0) == -1)
+ DBUG_RETURN(-1);
+ if (theGetFlag) {
+ assert(theGetSetBytes == 0 || theGetBuf != 0);
+ assert(theGetSetBytes <= theInlineSize ||
+ anExecType == NdbTransaction::NoCommit);
+ Uint32 bytes = theGetSetBytes;
+ if (readDataPrivate(theGetBuf, bytes) == -1)
+ DBUG_RETURN(-1);
+ }
+ }
+ if (isUpdateOp()) {
+ assert(anExecType == NdbTransaction::NoCommit);
+ getHeadFromRecAttr();
+ if (theSetFlag) {
+ // setValue overwrites everything
+ if (theSetBuf != NULL) {
+ if (truncate(0) == -1)
+ DBUG_RETURN(-1);
+ assert(thePos == 0);
+ if (writeDataPrivate(theSetBuf, theGetSetBytes) == -1)
+ DBUG_RETURN(-1);
+ } else {
+ if (setNull() == -1)
+ DBUG_RETURN(-1);
+ }
+ }
+ }
+ if (isWriteOp() && isTableOp()) {
+ assert(anExecType == NdbTransaction::NoCommit);
+ if (theHeadInlineReadOp->theError.code == 0) {
+ int tNullFlag = theNullFlag;
+ Uint64 tLength = theLength;
+ Uint64 tPos = thePos;
+ getHeadFromRecAttr();
+ DBUG_PRINT("info", ("tuple found"));
+ if (truncate(0) == -1)
+ DBUG_RETURN(-1);
+ // restore previous head+inline
+ theHeadInlineBuf.copyfrom(theHeadInlineCopyBuf);
+ theNullFlag = tNullFlag;
+ theLength = tLength;
+ thePos = tPos;
+ } else {
+ if (theHeadInlineReadOp->theError.code != 626) {
+ setErrorCode(theHeadInlineReadOp);
+ DBUG_RETURN(-1);
+ }
+ DBUG_PRINT("info", ("tuple not found"));
+ /*
+ * Read found no tuple but it is possible that a tuple was
+ * created after the read by another transaction. Delete all
+ * blob parts which may exist.
+ */
+ if (deletePartsUnknown(0) == -1)
+ DBUG_RETURN(-1);
+ }
+ if (theSetFlag && theGetSetBytes > theInlineSize) {
+ assert(theSetBuf != NULL);
+ const char* buf = theSetBuf + theInlineSize;
+ Uint32 bytes = theGetSetBytes - theInlineSize;
+ assert(thePos == theInlineSize);
+ if (writeDataPrivate(buf, bytes) == -1)
+ DBUG_RETURN(-1);
+ }
+ }
+ if (isWriteOp() && isIndexOp()) {
+ // XXX until IgnoreError fixed for index op
+ if (deletePartsUnknown(0) == -1)
+ DBUG_RETURN(-1);
+ if (theSetFlag && theGetSetBytes > theInlineSize) {
+ assert(theSetBuf != NULL);
+ const char* buf = theSetBuf + theInlineSize;
+ Uint32 bytes = theGetSetBytes - theInlineSize;
+ assert(thePos == theInlineSize);
+ if (writeDataPrivate(buf, bytes) == -1)
+ DBUG_RETURN(-1);
+ }
+ }
+ if (isDeleteOp()) {
+ assert(anExecType == NdbTransaction::NoCommit);
+ getHeadFromRecAttr();
+ if (deleteParts(0, getPartCount()) == -1)
+ DBUG_RETURN(-1);
+ }
+ setState(anExecType == NdbTransaction::NoCommit ? Active : Closed);
+ // activation callback
+ if (theActiveHook != NULL) {
+ if (invokeActiveHook() == -1)
+ DBUG_RETURN(-1);
+ }
+ if (anExecType == NdbTransaction::NoCommit && theHeadInlineUpdateFlag) {
+ NdbOperation* tOp = theNdbCon->getNdbOperation(theTable);
+ if (tOp == NULL ||
+ tOp->updateTuple() == -1 ||
+ setTableKeyValue(tOp) == -1 ||
+ setHeadInlineValue(tOp) == -1) {
+ setErrorCode(NdbBlobImpl::ErrAbort);
+ DBUG_RETURN(-1);
+ }
+ tOp->m_abortOption = NdbTransaction::AbortOnError;
+ DBUG_PRINT("info", ("added op to update head+inline"));
+ }
+ DBUG_RETURN(0);
+}
+
+/*
+ * Before commit of completed operation. For write add operation to
+ * update head+inline.
+ */
+int
+NdbBlob::preCommit()
+{
+ DBUG_ENTER("NdbBlob::preCommit");
+ DBUG_PRINT("info", ("this=%p op=%p con=%p", this, theNdbOp, theNdbCon));
+ if (theState == Invalid)
+ DBUG_RETURN(-1);
+ assert(theState == Active);
+ assert(isKeyOp());
+ if (isInsertOp() || isUpdateOp() || isWriteOp()) {
+ if (theHeadInlineUpdateFlag) {
+ // add an operation to update head+inline
+ NdbOperation* tOp = theNdbCon->getNdbOperation(theTable);
+ if (tOp == NULL ||
+ tOp->updateTuple() == -1 ||
+ setTableKeyValue(tOp) == -1 ||
+ setHeadInlineValue(tOp) == -1) {
+ setErrorCode(NdbBlobImpl::ErrAbort);
+ DBUG_RETURN(-1);
+ }
+ tOp->m_abortOption = NdbTransaction::AbortOnError;
+ DBUG_PRINT("info", ("added op to update head+inline"));
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+/*
+ * After next scan result. Handle like read op above.
+ */
+int
+NdbBlob::atNextResult()
+{
+ DBUG_ENTER("NdbBlob::atNextResult");
+ DBUG_PRINT("info", ("this=%p op=%p con=%p", this, theNdbOp, theNdbCon));
+ if (theState == Invalid)
+ DBUG_RETURN(-1);
+ assert(isScanOp());
+ // get primary key
+ { Uint32* data = (Uint32*)theKeyBuf.data;
+ unsigned size = theTable->m_keyLenInWords;
+ if (((NdbScanOperation*)theNdbOp)->getKeyFromKEYINFO20(data, size) == -1) {
+ setErrorCode(NdbBlobImpl::ErrUsage);
+ DBUG_RETURN(-1);
+ }
+ }
+ getHeadFromRecAttr();
+ if (setPos(0) == -1)
+ DBUG_RETURN(-1);
+ if (theGetFlag) {
+ assert(theGetSetBytes == 0 || theGetBuf != 0);
+ Uint32 bytes = theGetSetBytes;
+ if (readDataPrivate(theGetBuf, bytes) == -1)
+ DBUG_RETURN(-1);
+ }
+ setState(Active);
+ // activation callback
+ if (theActiveHook != NULL) {
+ if (invokeActiveHook() == -1)
+ DBUG_RETURN(-1);
+ }
+ DBUG_RETURN(0);
+}
+
+// misc
+
+const NdbDictionary::Column*
+NdbBlob::getColumn()
+{
+ return theColumn;
+}
+
+// errors
+
+void
+NdbBlob::setErrorCode(int anErrorCode, bool invalidFlag)
+{
+ DBUG_ENTER("NdbBlob::setErrorCode");
+ DBUG_PRINT("info", ("this=%p code=%u", this, anErrorCode));
+ theError.code = anErrorCode;
+ // conditionally copy error to operation level
+ if (theNdbOp != NULL && theNdbOp->theError.code == 0)
+ theNdbOp->setErrorCode(theError.code);
+ if (invalidFlag)
+ setState(Invalid);
+ DBUG_VOID_RETURN;
+}
+
+void
+NdbBlob::setErrorCode(NdbOperation* anOp, bool invalidFlag)
+{
+ int code = 0;
+ if (anOp != NULL && (code = anOp->theError.code) != 0)
+ ;
+ else if ((code = theNdbCon->theError.code) != 0)
+ ;
+ else if ((code = theNdb->theError.code) != 0)
+ ;
+ else
+ code = NdbBlobImpl::ErrUnknown;
+ setErrorCode(code, invalidFlag);
+}
+
+void
+NdbBlob::setErrorCode(NdbTransaction* aCon, bool invalidFlag)
+{
+ int code = 0;
+ if (theNdbCon != NULL && (code = theNdbCon->theError.code) != 0)
+ ;
+ else if ((code = theNdb->theError.code) != 0)
+ ;
+ else
+ code = NdbBlobImpl::ErrUnknown;
+ setErrorCode(code, invalidFlag);
+}
+
+// info about all blobs in this operation
+
+NdbBlob*
+NdbBlob::blobsFirstBlob()
+{
+ return theNdbOp->theBlobList;
+}
+
+NdbBlob*
+NdbBlob::blobsNextBlob()
+{
+ return theNext;
+}
+
+// debug
+
+#ifdef VM_TRACE
+inline int
+NdbBlob::getOperationType() const
+{
+ return theNdbOp != NULL ? theNdbOp->theOperationType : -1;
+}
+
+NdbOut&
+operator<<(NdbOut& out, const NdbBlob& blob)
+{
+ ndbout << dec << "o=" << blob.getOperationType();
+ ndbout << dec << " s=" << (Uint32) blob.theState;
+ ndbout << dec << " n=" << blob.theNullFlag;;
+ ndbout << dec << " l=" << blob.theLength;
+ ndbout << dec << " p=" << blob.thePos;
+ ndbout << dec << " u=" << (Uint32)blob.theHeadInlineUpdateFlag;
+ ndbout << dec << " g=" << (Uint32)blob.theGetSetBytes;
+ return out;
+}
+#endif
diff --git a/ndb/src/ndbapi/NdbBlobImpl.hpp b/storage/ndb/src/ndbapi/NdbBlobImpl.hpp
index 0030e910c52..0030e910c52 100644
--- a/ndb/src/ndbapi/NdbBlobImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbBlobImpl.hpp
diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp
new file mode 100644
index 00000000000..0d464c6d412
--- /dev/null
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp
@@ -0,0 +1,1072 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <NdbDictionary.hpp>
+#include "NdbDictionaryImpl.hpp"
+#include <NdbOut.hpp>
+
+/*****************************************************************
+ * Column facade
+ */
+NdbDictionary::Column::Column(const char * name)
+ : m_impl(* new NdbColumnImpl(* this))
+{
+ setName(name);
+}
+
+NdbDictionary::Column::Column(const NdbDictionary::Column & org)
+ : m_impl(* new NdbColumnImpl(* this))
+{
+ m_impl = org.m_impl;
+}
+
+NdbDictionary::Column::Column(NdbColumnImpl& impl)
+ : m_impl(impl)
+{
+}
+
+NdbDictionary::Column::~Column(){
+ NdbColumnImpl * tmp = &m_impl;
+ if(this != tmp){
+ delete tmp;
+ }
+}
+
+NdbDictionary::Column&
+NdbDictionary::Column::operator=(const NdbDictionary::Column& column)
+{
+ m_impl = column.m_impl;
+
+ return *this;
+}
+
+void
+NdbDictionary::Column::setName(const char * name){
+ m_impl.m_name.assign(name);
+}
+
+const char*
+NdbDictionary::Column::getName() const {
+ return m_impl.m_name.c_str();
+}
+
+void
+NdbDictionary::Column::setType(Type t){
+ m_impl.init(t);
+}
+
+NdbDictionary::Column::Type
+NdbDictionary::Column::getType() const {
+ return m_impl.m_type;
+}
+
+void
+NdbDictionary::Column::setPrecision(int val){
+ m_impl.m_precision = val;
+}
+
+int
+NdbDictionary::Column::getPrecision() const {
+ return m_impl.m_precision;
+}
+
+void
+NdbDictionary::Column::setScale(int val){
+ m_impl.m_scale = val;
+}
+
+int
+NdbDictionary::Column::getScale() const{
+ return m_impl.m_scale;
+}
+
+void
+NdbDictionary::Column::setLength(int length){
+ m_impl.m_length = length;
+}
+
+int
+NdbDictionary::Column::getLength() const{
+ return m_impl.m_length;
+}
+
+void
+NdbDictionary::Column::setInlineSize(int size)
+{
+ m_impl.m_precision = size;
+}
+
+void
+NdbDictionary::Column::setCharset(CHARSET_INFO* cs)
+{
+ m_impl.m_cs = cs;
+}
+
+CHARSET_INFO*
+NdbDictionary::Column::getCharset() const
+{
+ return m_impl.m_cs;
+}
+
+int
+NdbDictionary::Column::getInlineSize() const
+{
+ return m_impl.m_precision;
+}
+
+void
+NdbDictionary::Column::setPartSize(int size)
+{
+ m_impl.m_scale = size;
+}
+
+int
+NdbDictionary::Column::getPartSize() const
+{
+ return m_impl.m_scale;
+}
+
+void
+NdbDictionary::Column::setStripeSize(int size)
+{
+ m_impl.m_length = size;
+}
+
+int
+NdbDictionary::Column::getStripeSize() const
+{
+ return m_impl.m_length;
+}
+
+int
+NdbDictionary::Column::getSize() const{
+ return m_impl.m_attrSize;
+}
+
+void
+NdbDictionary::Column::setNullable(bool val){
+ m_impl.m_nullable = val;
+}
+
+bool
+NdbDictionary::Column::getNullable() const {
+ return m_impl.m_nullable;
+}
+
+void
+NdbDictionary::Column::setPrimaryKey(bool val){
+ m_impl.m_pk = val;
+}
+
+bool
+NdbDictionary::Column::getPrimaryKey() const {
+ return m_impl.m_pk;
+}
+
+void
+NdbDictionary::Column::setPartitionKey(bool val){
+ m_impl.m_distributionKey = val;
+}
+
+bool
+NdbDictionary::Column::getPartitionKey() const{
+ return m_impl.m_distributionKey;
+}
+
+const NdbDictionary::Table *
+NdbDictionary::Column::getBlobTable() const {
+ NdbTableImpl * t = m_impl.m_blobTable;
+ if (t)
+ return t->m_facade;
+ return 0;
+}
+
+void
+NdbDictionary::Column::setAutoIncrement(bool val){
+ m_impl.m_autoIncrement = val;
+}
+
+bool
+NdbDictionary::Column::getAutoIncrement() const {
+ return m_impl.m_autoIncrement;
+}
+
+void
+NdbDictionary::Column::setAutoIncrementInitialValue(Uint64 val){
+ m_impl.m_autoIncrementInitialValue = val;
+}
+
+void
+NdbDictionary::Column::setDefaultValue(const char* defaultValue)
+{
+ m_impl.m_defaultValue.assign(defaultValue);
+}
+
+const char*
+NdbDictionary::Column::getDefaultValue() const
+{
+ return m_impl.m_defaultValue.c_str();
+}
+
+int
+NdbDictionary::Column::getColumnNo() const {
+ return m_impl.m_attrId;
+}
+
+bool
+NdbDictionary::Column::equal(const NdbDictionary::Column & col) const {
+ return m_impl.equal(col.m_impl);
+}
+
+int
+NdbDictionary::Column::getSizeInBytes() const
+{
+ return m_impl.m_attrSize * m_impl.m_arraySize;
+}
+
+/*****************************************************************
+ * Table facade
+ */
+NdbDictionary::Table::Table(const char * name)
+ : m_impl(* new NdbTableImpl(* this))
+{
+ setName(name);
+}
+
+NdbDictionary::Table::Table(const NdbDictionary::Table & org)
+ : NdbDictionary::Object(),
+ m_impl(* new NdbTableImpl(* this))
+{
+ m_impl.assign(org.m_impl);
+}
+
+NdbDictionary::Table::Table(NdbTableImpl & impl)
+ : m_impl(impl)
+{
+}
+
+NdbDictionary::Table::~Table(){
+ NdbTableImpl * tmp = &m_impl;
+ if(this != tmp){
+ delete tmp;
+ }
+}
+
+NdbDictionary::Table&
+NdbDictionary::Table::operator=(const NdbDictionary::Table& table)
+{
+ m_impl.assign(table.m_impl);
+
+ m_impl.m_facade = this;
+ return *this;
+}
+
+void
+NdbDictionary::Table::setName(const char * name){
+ m_impl.setName(name);
+}
+
+const char *
+NdbDictionary::Table::getName() const {
+ return m_impl.getName();
+}
+
+int
+NdbDictionary::Table::getTableId() const {
+ return m_impl.m_tableId;
+}
+
+void
+NdbDictionary::Table::addColumn(const Column & c){
+ NdbColumnImpl* col = new NdbColumnImpl;
+ (* col) = NdbColumnImpl::getImpl(c);
+ m_impl.m_columns.push_back(col);
+ if(c.getPrimaryKey()){
+ m_impl.m_noOfKeys++;
+ }
+ if (col->getBlobType()) {
+ m_impl.m_noOfBlobs++;
+ }
+ m_impl.buildColumnHash();
+}
+
+const NdbDictionary::Column*
+NdbDictionary::Table::getColumn(const char * name) const {
+ return m_impl.getColumn(name);
+}
+
+const NdbDictionary::Column*
+NdbDictionary::Table::getColumn(const int attrId) const {
+ return m_impl.getColumn(attrId);
+}
+
+NdbDictionary::Column*
+NdbDictionary::Table::getColumn(const char * name)
+{
+ return m_impl.getColumn(name);
+}
+
+NdbDictionary::Column*
+NdbDictionary::Table::getColumn(const int attrId)
+{
+ return m_impl.getColumn(attrId);
+}
+
+void
+NdbDictionary::Table::setLogging(bool val){
+ m_impl.m_logging = val;
+}
+
+bool
+NdbDictionary::Table::getLogging() const {
+ return m_impl.m_logging;
+}
+
+void
+NdbDictionary::Table::setFragmentType(FragmentType ft){
+ m_impl.m_fragmentType = ft;
+}
+
+NdbDictionary::Object::FragmentType
+NdbDictionary::Table::getFragmentType() const {
+ return m_impl.m_fragmentType;
+}
+
+void
+NdbDictionary::Table::setKValue(int kValue){
+ m_impl.m_kvalue = kValue;
+}
+
+int
+NdbDictionary::Table::getKValue() const {
+ return m_impl.m_kvalue;
+}
+
+void
+NdbDictionary::Table::setMinLoadFactor(int lf){
+ m_impl.m_minLoadFactor = lf;
+}
+
+int
+NdbDictionary::Table::getMinLoadFactor() const {
+ return m_impl.m_minLoadFactor;
+}
+
+void
+NdbDictionary::Table::setMaxLoadFactor(int lf){
+ m_impl.m_maxLoadFactor = lf;
+}
+
+int
+NdbDictionary::Table::getMaxLoadFactor() const {
+ return m_impl.m_maxLoadFactor;
+}
+
+int
+NdbDictionary::Table::getNoOfColumns() const {
+ return m_impl.m_columns.size();
+}
+
+int
+NdbDictionary::Table::getNoOfPrimaryKeys() const {
+ return m_impl.m_noOfKeys;
+}
+
+const char*
+NdbDictionary::Table::getPrimaryKey(int no) const {
+ int count = 0;
+ for (unsigned i = 0; i < m_impl.m_columns.size(); i++) {
+ if (m_impl.m_columns[i]->m_pk) {
+ if (count++ == no)
+ return m_impl.m_columns[i]->m_name.c_str();
+ }
+ }
+ return 0;
+}
+
+const void*
+NdbDictionary::Table::getFrmData() const {
+ return m_impl.m_frm.get_data();
+}
+
+Uint32
+NdbDictionary::Table::getFrmLength() const {
+ return m_impl.m_frm.length();
+}
+
+void
+NdbDictionary::Table::setFrm(const void* data, Uint32 len){
+ m_impl.m_frm.assign(data, len);
+}
+
+const void*
+NdbDictionary::Table::getNodeGroupIds() const {
+ return m_impl.m_ng.get_data();
+}
+
+Uint32
+NdbDictionary::Table::getNodeGroupIdsLength() const {
+ return m_impl.m_ng.length();
+}
+
+void
+NdbDictionary::Table::setNodeGroupIds(const void* data, Uint32 noWords)
+{
+ m_impl.m_ng.assign(data, 2*noWords);
+}
+
+NdbDictionary::Object::Status
+NdbDictionary::Table::getObjectStatus() const {
+ return m_impl.m_status;
+}
+
+int
+NdbDictionary::Table::getObjectVersion() const {
+ return m_impl.m_version;
+}
+
+bool
+NdbDictionary::Table::equal(const NdbDictionary::Table & col) const {
+ return m_impl.equal(col.m_impl);
+}
+
+int
+NdbDictionary::Table::getRowSizeInBytes() const {
+ int sz = 0;
+ for(int i = 0; i<getNoOfColumns(); i++){
+ const NdbDictionary::Column * c = getColumn(i);
+ sz += (c->getSizeInBytes()+ 3) / 4;
+ }
+ return sz * 4;
+}
+
+int
+NdbDictionary::Table::getReplicaCount() const {
+ return m_impl.m_replicaCount;
+}
+
+int
+NdbDictionary::Table::createTableInDb(Ndb* pNdb, bool equalOk) const {
+ const NdbDictionary::Table * pTab =
+ pNdb->getDictionary()->getTable(getName());
+ if(pTab != 0 && equal(* pTab))
+ return 0;
+ if(pTab != 0 && !equal(* pTab))
+ return -1;
+ return pNdb->getDictionary()->createTable(* this);
+}
+
+/*****************************************************************
+ * Index facade
+ */
+NdbDictionary::Index::Index(const char * name)
+ : m_impl(* new NdbIndexImpl(* this))
+{
+ setName(name);
+}
+
+NdbDictionary::Index::Index(NdbIndexImpl & impl)
+ : m_impl(impl)
+{
+}
+
+NdbDictionary::Index::~Index(){
+ NdbIndexImpl * tmp = &m_impl;
+ if(this != tmp){
+ delete tmp;
+ }
+}
+
+void
+NdbDictionary::Index::setName(const char * name){
+ m_impl.setName(name);
+}
+
+const char *
+NdbDictionary::Index::getName() const {
+ return m_impl.getName();
+}
+
+void
+NdbDictionary::Index::setTable(const char * table){
+ m_impl.setTable(table);
+}
+
+const char *
+NdbDictionary::Index::getTable() const {
+ return m_impl.getTable();
+}
+
+unsigned
+NdbDictionary::Index::getNoOfColumns() const {
+ return m_impl.m_columns.size();
+}
+
+int
+NdbDictionary::Index::getNoOfIndexColumns() const {
+ return m_impl.m_columns.size();
+}
+
+const NdbDictionary::Column *
+NdbDictionary::Index::getColumn(unsigned no) const {
+ if(no < m_impl.m_columns.size())
+ return m_impl.m_columns[no];
+ return NULL;
+}
+
+const char *
+NdbDictionary::Index::getIndexColumn(int no) const {
+ const NdbDictionary::Column* col = getColumn(no);
+
+ if (col)
+ return col->getName();
+ else
+ return NULL;
+}
+
+void
+NdbDictionary::Index::addColumn(const Column & c){
+ NdbColumnImpl* col = new NdbColumnImpl;
+ (* col) = NdbColumnImpl::getImpl(c);
+ m_impl.m_columns.push_back(col);
+}
+
+void
+NdbDictionary::Index::addColumnName(const char * name){
+ const Column c(name);
+ addColumn(c);
+}
+
+void
+NdbDictionary::Index::addIndexColumn(const char * name){
+ const Column c(name);
+ addColumn(c);
+}
+
+void
+NdbDictionary::Index::addColumnNames(unsigned noOfNames, const char ** names){
+ for(unsigned i = 0; i < noOfNames; i++) {
+ const Column c(names[i]);
+ addColumn(c);
+ }
+}
+
+void
+NdbDictionary::Index::addIndexColumns(int noOfNames, const char ** names){
+ for(int i = 0; i < noOfNames; i++) {
+ const Column c(names[i]);
+ addColumn(c);
+ }
+}
+
+void
+NdbDictionary::Index::setType(NdbDictionary::Index::Type t){
+ m_impl.m_type = t;
+}
+
+NdbDictionary::Index::Type
+NdbDictionary::Index::getType() const {
+ return m_impl.m_type;
+}
+
+void
+NdbDictionary::Index::setLogging(bool val){
+ m_impl.m_logging = val;
+}
+
+bool
+NdbDictionary::Index::getLogging() const {
+ return m_impl.m_logging;
+}
+
+NdbDictionary::Object::Status
+NdbDictionary::Index::getObjectStatus() const {
+ return m_impl.m_status;
+}
+
+int
+NdbDictionary::Index::getObjectVersion() const {
+ return m_impl.m_version;
+}
+
+/*****************************************************************
+ * Event facade
+ */
+NdbDictionary::Event::Event(const char * name)
+ : m_impl(* new NdbEventImpl(* this))
+{
+ setName(name);
+}
+
+NdbDictionary::Event::Event(const char * name, const Table& table)
+ : m_impl(* new NdbEventImpl(* this))
+{
+ setName(name);
+ setTable(table);
+}
+
+NdbDictionary::Event::Event(NdbEventImpl & impl)
+ : m_impl(impl)
+{
+}
+
+NdbDictionary::Event::~Event()
+{
+ NdbEventImpl * tmp = &m_impl;
+ if(this != tmp){
+ delete tmp;
+ }
+}
+
+void
+NdbDictionary::Event::setName(const char * name)
+{
+ m_impl.setName(name);
+}
+
+const char *
+NdbDictionary::Event::getName() const
+{
+ return m_impl.getName();
+}
+
+void
+NdbDictionary::Event::setTable(const Table& table)
+{
+ m_impl.setTable(table);
+}
+
+void
+NdbDictionary::Event::setTable(const char * table)
+{
+ m_impl.setTable(table);
+}
+
+const char*
+NdbDictionary::Event::getTableName() const
+{
+ return m_impl.getTableName();
+}
+
+void
+NdbDictionary::Event::addTableEvent(const TableEvent t)
+{
+ m_impl.addTableEvent(t);
+}
+
+void
+NdbDictionary::Event::setDurability(EventDurability d)
+{
+ m_impl.setDurability(d);
+}
+
+NdbDictionary::Event::EventDurability
+NdbDictionary::Event::getDurability() const
+{
+ return m_impl.getDurability();
+}
+
+void
+NdbDictionary::Event::addColumn(const Column & c){
+ NdbColumnImpl* col = new NdbColumnImpl;
+ (* col) = NdbColumnImpl::getImpl(c);
+ m_impl.m_columns.push_back(col);
+}
+
+void
+NdbDictionary::Event::addEventColumn(unsigned attrId)
+{
+ m_impl.m_attrIds.push_back(attrId);
+}
+
+void
+NdbDictionary::Event::addEventColumn(const char * name)
+{
+ const Column c(name);
+ addColumn(c);
+}
+
+void
+NdbDictionary::Event::addEventColumns(int n, const char ** names)
+{
+ for (int i = 0; i < n; i++)
+ addEventColumn(names[i]);
+}
+
+int NdbDictionary::Event::getNoOfEventColumns() const
+{
+ return m_impl.getNoOfEventColumns();
+}
+
+NdbDictionary::Object::Status
+NdbDictionary::Event::getObjectStatus() const
+{
+ return m_impl.m_status;
+}
+
+int
+NdbDictionary::Event::getObjectVersion() const
+{
+ return m_impl.m_version;
+}
+
+void NdbDictionary::Event::print()
+{
+ m_impl.print();
+}
+
+/*****************************************************************
+ * Dictionary facade
+ */
+NdbDictionary::Dictionary::Dictionary(Ndb & ndb)
+ : m_impl(* new NdbDictionaryImpl(ndb, *this))
+{
+}
+
+NdbDictionary::Dictionary::Dictionary(NdbDictionaryImpl & impl)
+ : m_impl(impl)
+{
+}
+NdbDictionary::Dictionary::~Dictionary(){
+ NdbDictionaryImpl * tmp = &m_impl;
+ if(this != tmp){
+ delete tmp;
+ }
+}
+
+int
+NdbDictionary::Dictionary::createTable(const Table & t)
+{
+ DBUG_ENTER("NdbDictionary::Dictionary::createTable");
+ DBUG_RETURN(m_impl.createTable(NdbTableImpl::getImpl(t)));
+}
+
+int
+NdbDictionary::Dictionary::dropTable(Table & t){
+ return m_impl.dropTable(NdbTableImpl::getImpl(t));
+}
+
+int
+NdbDictionary::Dictionary::dropTable(const char * name){
+ return m_impl.dropTable(name);
+}
+
+int
+NdbDictionary::Dictionary::alterTable(const Table & t){
+ return m_impl.alterTable(NdbTableImpl::getImpl(t));
+}
+
+const NdbDictionary::Table *
+NdbDictionary::Dictionary::getTable(const char * name, void **data) const
+{
+ NdbTableImpl * t = m_impl.getTable(name, data);
+ if(t)
+ return t->m_facade;
+ return 0;
+}
+
+void NdbDictionary::Dictionary::set_local_table_data_size(unsigned sz)
+{
+ m_impl.m_local_table_data_size= sz;
+}
+
+const NdbDictionary::Table *
+NdbDictionary::Dictionary::getTable(const char * name) const
+{
+ return getTable(name, 0);
+}
+
+void
+NdbDictionary::Dictionary::invalidateTable(const char * name){
+ DBUG_ENTER("NdbDictionaryImpl::invalidateTable");
+ NdbTableImpl * t = m_impl.getTable(name);
+ if(t)
+ m_impl.invalidateObject(* t);
+ DBUG_VOID_RETURN;
+}
+
+void
+NdbDictionary::Dictionary::removeCachedTable(const char * name){
+ NdbTableImpl * t = m_impl.getTable(name);
+ if(t)
+ m_impl.removeCachedObject(* t);
+}
+
+int
+NdbDictionary::Dictionary::createIndex(const Index & ind)
+{
+ return m_impl.createIndex(NdbIndexImpl::getImpl(ind));
+}
+
+int
+NdbDictionary::Dictionary::dropIndex(const char * indexName,
+ const char * tableName)
+{
+ return m_impl.dropIndex(indexName, tableName);
+}
+
+const NdbDictionary::Index *
+NdbDictionary::Dictionary::getIndex(const char * indexName,
+ const char * tableName) const
+{
+ NdbIndexImpl * i = m_impl.getIndex(indexName, tableName);
+ if(i)
+ return i->m_facade;
+ return 0;
+}
+
+void
+NdbDictionary::Dictionary::invalidateIndex(const char * indexName,
+ const char * tableName){
+ DBUG_ENTER("NdbDictionaryImpl::invalidateIndex");
+ NdbIndexImpl * i = m_impl.getIndex(indexName, tableName);
+ if(i) {
+ assert(i->m_table != 0);
+ m_impl.invalidateObject(* i->m_table);
+ }
+ DBUG_VOID_RETURN;
+}
+
+void
+NdbDictionary::Dictionary::removeCachedIndex(const char * indexName,
+ const char * tableName){
+ NdbIndexImpl * i = m_impl.getIndex(indexName, tableName);
+ if(i) {
+ assert(i->m_table != 0);
+ m_impl.removeCachedObject(* i->m_table);
+ }
+}
+
+const NdbDictionary::Table *
+NdbDictionary::Dictionary::getIndexTable(const char * indexName,
+ const char * tableName) const
+{
+ NdbIndexImpl * i = m_impl.getIndex(indexName, tableName);
+ NdbTableImpl * t = m_impl.getTable(tableName);
+ if(i && t) {
+ NdbTableImpl * it = m_impl.getIndexTable(i, t);
+ return it->m_facade;
+ }
+ return 0;
+}
+
+
+int
+NdbDictionary::Dictionary::createEvent(const Event & ev)
+{
+ return m_impl.createEvent(NdbEventImpl::getImpl(ev));
+}
+
+int
+NdbDictionary::Dictionary::dropEvent(const char * eventName)
+{
+ return m_impl.dropEvent(eventName);
+}
+
+const NdbDictionary::Event *
+NdbDictionary::Dictionary::getEvent(const char * eventName)
+{
+ NdbEventImpl * t = m_impl.getEvent(eventName);
+ if(t)
+ return t->m_facade;
+ return 0;
+}
+
+int
+NdbDictionary::Dictionary::listObjects(List& list, Object::Type type)
+{
+ return m_impl.listObjects(list, type);
+}
+
+int
+NdbDictionary::Dictionary::listObjects(List& list, Object::Type type) const
+{
+ return m_impl.listObjects(list, type);
+}
+
+int
+NdbDictionary::Dictionary::listIndexes(List& list, const char * tableName)
+{
+ const NdbDictionary::Table* tab= getTable(tableName);
+ if(tab == 0)
+ {
+ return -1;
+ }
+ return m_impl.listIndexes(list, tab->getTableId());
+}
+
+int
+NdbDictionary::Dictionary::listIndexes(List& list,
+ const char * tableName) const
+{
+ const NdbDictionary::Table* tab= getTable(tableName);
+ if(tab == 0)
+ {
+ return -1;
+ }
+ return m_impl.listIndexes(list, tab->getTableId());
+}
+
+const struct NdbError &
+NdbDictionary::Dictionary::getNdbError() const {
+ return m_impl.getNdbError();
+}
+
+// printers
+
+NdbOut&
+operator<<(NdbOut& out, const NdbDictionary::Column& col)
+{
+ const CHARSET_INFO *cs = col.getCharset();
+ const char *csname = cs ? cs->name : "?";
+ out << col.getName() << " ";
+ switch (col.getType()) {
+ case NdbDictionary::Column::Tinyint:
+ out << "Tinyint";
+ break;
+ case NdbDictionary::Column::Tinyunsigned:
+ out << "Tinyunsigned";
+ break;
+ case NdbDictionary::Column::Smallint:
+ out << "Smallint";
+ break;
+ case NdbDictionary::Column::Smallunsigned:
+ out << "Smallunsigned";
+ break;
+ case NdbDictionary::Column::Mediumint:
+ out << "Mediumint";
+ break;
+ case NdbDictionary::Column::Mediumunsigned:
+ out << "Mediumunsigned";
+ break;
+ case NdbDictionary::Column::Int:
+ out << "Int";
+ break;
+ case NdbDictionary::Column::Unsigned:
+ out << "Unsigned";
+ break;
+ case NdbDictionary::Column::Bigint:
+ out << "Bigint";
+ break;
+ case NdbDictionary::Column::Bigunsigned:
+ out << "Bigunsigned";
+ break;
+ case NdbDictionary::Column::Float:
+ out << "Float";
+ break;
+ case NdbDictionary::Column::Double:
+ out << "Double";
+ break;
+ case NdbDictionary::Column::Olddecimal:
+ out << "Olddecimal(" << col.getPrecision() << "," << col.getScale() << ")";
+ break;
+ case NdbDictionary::Column::Olddecimalunsigned:
+ out << "Olddecimalunsigned(" << col.getPrecision() << "," << col.getScale() << ")";
+ break;
+ case NdbDictionary::Column::Decimal:
+ out << "Decimal(" << col.getPrecision() << "," << col.getScale() << ")";
+ break;
+ case NdbDictionary::Column::Decimalunsigned:
+ out << "Decimalunsigned(" << col.getPrecision() << "," << col.getScale() << ")";
+ break;
+ case NdbDictionary::Column::Char:
+ out << "Char(" << col.getLength() << ";" << csname << ")";
+ break;
+ case NdbDictionary::Column::Varchar:
+ out << "Varchar(" << col.getLength() << ";" << csname << ")";
+ break;
+ case NdbDictionary::Column::Binary:
+ out << "Binary(" << col.getLength() << ")";
+ break;
+ case NdbDictionary::Column::Varbinary:
+ out << "Varbinary(" << col.getLength() << ")";
+ break;
+ case NdbDictionary::Column::Datetime:
+ out << "Datetime";
+ break;
+ case NdbDictionary::Column::Date:
+ out << "Date";
+ break;
+ case NdbDictionary::Column::Blob:
+ out << "Blob(" << col.getInlineSize() << "," << col.getPartSize()
+ << ";" << col.getStripeSize() << ")";
+ break;
+ case NdbDictionary::Column::Text:
+ out << "Text(" << col.getInlineSize() << "," << col.getPartSize()
+ << ";" << col.getStripeSize() << ";" << csname << ")";
+ break;
+ case NdbDictionary::Column::Time:
+ out << "Time";
+ break;
+ case NdbDictionary::Column::Year:
+ out << "Year";
+ break;
+ case NdbDictionary::Column::Timestamp:
+ out << "Timestamp";
+ break;
+ case NdbDictionary::Column::Undefined:
+ out << "Undefined";
+ break;
+ case NdbDictionary::Column::Bit:
+ out << "Bit(" << col.getLength() << ")";
+ break;
+ case NdbDictionary::Column::Longvarchar:
+ out << "Longvarchar(" << col.getLength() << ";" << csname << ")";
+ break;
+ case NdbDictionary::Column::Longvarbinary:
+ out << "Longvarbinary(" << col.getLength() << ")";
+ break;
+ default:
+ out << "Type" << (Uint32)col.getType();
+ break;
+ }
+ // show unusual (non-MySQL) array size
+ if (col.getLength() != 1) {
+ switch (col.getType()) {
+ case NdbDictionary::Column::Char:
+ case NdbDictionary::Column::Varchar:
+ case NdbDictionary::Column::Binary:
+ case NdbDictionary::Column::Varbinary:
+ case NdbDictionary::Column::Blob:
+ case NdbDictionary::Column::Text:
+ case NdbDictionary::Column::Bit:
+ case NdbDictionary::Column::Longvarchar:
+ case NdbDictionary::Column::Longvarbinary:
+ break;
+ default:
+ out << " [" << col.getLength() << "]";
+ break;
+ }
+ }
+ if (col.getPrimaryKey())
+ out << " PRIMARY KEY";
+ else if (! col.getNullable())
+ out << " NOT NULL";
+ else
+ out << " NULL";
+
+ if(col.getDistributionKey())
+ out << " DISTRIBUTION KEY";
+
+ return out;
+}
+
+const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT = 0;
+const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT_MEMORY = 0;
+const NdbDictionary::Column * NdbDictionary::Column::ROW_COUNT = 0;
+const NdbDictionary::Column * NdbDictionary::Column::COMMIT_COUNT = 0;
+const NdbDictionary::Column * NdbDictionary::Column::ROW_SIZE = 0;
+const NdbDictionary::Column * NdbDictionary::Column::RANGE_NO = 0;
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
new file mode 100644
index 00000000000..04b41b8dfbc
--- /dev/null
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -0,0 +1,3197 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "NdbDictionaryImpl.hpp"
+#include "API.hpp"
+#include <NdbOut.hpp>
+#include "NdbApiSignal.hpp"
+#include "TransporterFacade.hpp"
+#include <signaldata/GetTabInfo.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/CreateTable.hpp>
+#include <signaldata/CreateIndx.hpp>
+#include <signaldata/CreateEvnt.hpp>
+#include <signaldata/SumaImpl.hpp>
+#include <signaldata/DropTable.hpp>
+#include <signaldata/AlterTable.hpp>
+#include <signaldata/DropIndx.hpp>
+#include <signaldata/ListTables.hpp>
+#include <SimpleProperties.hpp>
+#include <Bitmask.hpp>
+#include <AttributeList.hpp>
+#include <NdbEventOperation.hpp>
+#include "NdbEventOperationImpl.hpp"
+#include <NdbBlob.hpp>
+#include "NdbBlobImpl.hpp"
+#include <AttributeHeader.hpp>
+#include <my_sys.h>
+
+#define DEBUG_PRINT 0
+#define INCOMPATIBLE_VERSION -2
+
+//#define EVENT_DEBUG
+
+/**
+ * Column
+ */
+NdbColumnImpl::NdbColumnImpl()
+ : NdbDictionary::Column(* this), m_attrId(-1), m_facade(this)
+{
+ init();
+}
+
+NdbColumnImpl::NdbColumnImpl(NdbDictionary::Column & f)
+ : NdbDictionary::Column(* this), m_attrId(-1), m_facade(&f)
+{
+ init();
+}
+
+NdbColumnImpl&
+NdbColumnImpl::operator=(const NdbColumnImpl& col)
+{
+ m_attrId = col.m_attrId;
+ m_name = col.m_name;
+ m_type = col.m_type;
+ m_precision = col.m_precision;
+ m_cs = col.m_cs;
+ m_scale = col.m_scale;
+ m_length = col.m_length;
+ m_pk = col.m_pk;
+ m_distributionKey = col.m_distributionKey;
+ m_nullable = col.m_nullable;
+ m_autoIncrement = col.m_autoIncrement;
+ m_autoIncrementInitialValue = col.m_autoIncrementInitialValue;
+ m_defaultValue = col.m_defaultValue;
+ m_attrSize = col.m_attrSize;
+ m_arraySize = col.m_arraySize;
+ m_keyInfoPos = col.m_keyInfoPos;
+ m_blobTable = col.m_blobTable;
+ // Do not copy m_facade !!
+
+ return *this;
+}
+
+void
+NdbColumnImpl::init(Type t)
+{
+ // do not use default_charset_info as it may not be initialized yet
+ // use binary collation until NDB tests can handle charsets
+ CHARSET_INFO* default_cs = &my_charset_bin;
+ m_type = t;
+ switch (m_type) {
+ case Tinyint:
+ case Tinyunsigned:
+ case Smallint:
+ case Smallunsigned:
+ case Mediumint:
+ case Mediumunsigned:
+ case Int:
+ case Unsigned:
+ case Bigint:
+ case Bigunsigned:
+ case Float:
+ case Double:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = NULL;
+ break;
+ case Olddecimal:
+ case Olddecimalunsigned:
+ case Decimal:
+ case Decimalunsigned:
+ m_precision = 10;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = NULL;
+ break;
+ case Char:
+ case Varchar:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = default_cs;
+ break;
+ case Binary:
+ case Varbinary:
+ case Datetime:
+ case Date:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = NULL;
+ break;
+ case Blob:
+ m_precision = 256;
+ m_scale = 8000;
+ m_length = 4;
+ m_cs = NULL;
+ break;
+ case Text:
+ m_precision = 256;
+ m_scale = 8000;
+ m_length = 4;
+ m_cs = default_cs;
+ break;
+ case Time:
+ case Year:
+ case Timestamp:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = NULL;
+ break;
+ case Bit:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = NULL;
+ break;
+ case Longvarchar:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1; // legal
+ m_cs = default_cs;
+ break;
+ case Longvarbinary:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1; // legal
+ m_cs = NULL;
+ break;
+ default:
+ case Undefined:
+ assert(false);
+ break;
+ }
+ m_pk = false;
+ m_nullable = false;
+ m_distributionKey = false;
+ m_keyInfoPos = 0;
+ // next 2 are set at run time
+ m_attrSize = 0;
+ m_arraySize = 0;
+ m_autoIncrement = false;
+ m_autoIncrementInitialValue = 1;
+ m_blobTable = NULL;
+}
+
+NdbColumnImpl::~NdbColumnImpl()
+{
+}
+
+bool
+NdbColumnImpl::equal(const NdbColumnImpl& col) const
+{
+ DBUG_ENTER("NdbColumnImpl::equal");
+ if(strcmp(m_name.c_str(), col.m_name.c_str()) != 0){
+ DBUG_RETURN(false);
+ }
+ if(m_type != col.m_type){
+ DBUG_RETURN(false);
+ }
+ if(m_pk != col.m_pk){
+ DBUG_RETURN(false);
+ }
+ if(m_nullable != col.m_nullable){
+ DBUG_RETURN(false);
+ }
+#ifdef ndb_dictionary_dkey_fixed
+ if(m_pk){
+ if(m_distributionKey != col.m_distributionKey){
+ DBUG_RETURN(false);
+ }
+ }
+#endif
+ if (m_precision != col.m_precision ||
+ m_scale != col.m_scale ||
+ m_length != col.m_length ||
+ m_cs != col.m_cs) {
+ DBUG_RETURN(false);
+ }
+ if (m_autoIncrement != col.m_autoIncrement){
+ DBUG_RETURN(false);
+ }
+ if(strcmp(m_defaultValue.c_str(), col.m_defaultValue.c_str()) != 0){
+ DBUG_RETURN(false);
+ }
+
+ DBUG_RETURN(true);
+}
+
+NdbDictionary::Column *
+NdbColumnImpl::create_pseudo(const char * name){
+ NdbDictionary::Column * col = new NdbDictionary::Column();
+ col->setName(name);
+ if(!strcmp(name, "NDB$FRAGMENT")){
+ col->setType(NdbDictionary::Column::Unsigned);
+ col->m_impl.m_attrId = AttributeHeader::FRAGMENT;
+ col->m_impl.m_attrSize = 4;
+ col->m_impl.m_arraySize = 1;
+ } else if(!strcmp(name, "NDB$FRAGMENT_MEMORY")){
+ col->setType(NdbDictionary::Column::Bigunsigned);
+ col->m_impl.m_attrId = AttributeHeader::FRAGMENT_MEMORY;
+ col->m_impl.m_attrSize = 8;
+ col->m_impl.m_arraySize = 1;
+ } else if(!strcmp(name, "NDB$ROW_COUNT")){
+ col->setType(NdbDictionary::Column::Bigunsigned);
+ col->m_impl.m_attrId = AttributeHeader::ROW_COUNT;
+ col->m_impl.m_attrSize = 8;
+ col->m_impl.m_arraySize = 1;
+ } else if(!strcmp(name, "NDB$COMMIT_COUNT")){
+ col->setType(NdbDictionary::Column::Bigunsigned);
+ col->m_impl.m_attrId = AttributeHeader::COMMIT_COUNT;
+ col->m_impl.m_attrSize = 8;
+ col->m_impl.m_arraySize = 1;
+ } else if(!strcmp(name, "NDB$ROW_SIZE")){
+ col->setType(NdbDictionary::Column::Unsigned);
+ col->m_impl.m_attrId = AttributeHeader::ROW_SIZE;
+ col->m_impl.m_attrSize = 4;
+ col->m_impl.m_arraySize = 1;
+ } else if(!strcmp(name, "NDB$RANGE_NO")){
+ col->setType(NdbDictionary::Column::Unsigned);
+ col->m_impl.m_attrId = AttributeHeader::RANGE_NO;
+ col->m_impl.m_attrSize = 4;
+ col->m_impl.m_arraySize = 1;
+ } else {
+ abort();
+ }
+ return col;
+}
+
+/**
+ * NdbTableImpl
+ */
+
+NdbTableImpl::NdbTableImpl()
+ : NdbDictionary::Table(* this), m_facade(this)
+{
+ init();
+}
+
+NdbTableImpl::NdbTableImpl(NdbDictionary::Table & f)
+ : NdbDictionary::Table(* this), m_facade(&f)
+{
+ init();
+}
+
+NdbTableImpl::~NdbTableImpl()
+{
+ if (m_index != 0) {
+ delete m_index;
+ m_index = 0;
+ }
+ for (unsigned i = 0; i < m_columns.size(); i++)
+ delete m_columns[i];
+}
+
+void
+NdbTableImpl::init(){
+ m_changeMask= 0;
+ m_tableId= RNIL;
+ m_primaryTableId= RNIL;
+ m_frm.clear();
+ m_fragmentType= NdbDictionary::Object::DistrKeyHash;
+ m_hashValueMask= 0;
+ m_hashpointerValue= 0;
+ m_logging= true;
+ m_kvalue= 6;
+ m_minLoadFactor= 78;
+ m_maxLoadFactor= 80;
+ m_keyLenInWords= 0;
+ m_fragmentCount= 0;
+ m_dictionary= NULL;
+ m_index= NULL;
+ m_indexType= NdbDictionary::Index::Undefined;
+ m_noOfKeys= 0;
+ m_noOfDistributionKeys= 0;
+ m_noOfBlobs= 0;
+ m_replicaCount= 0;
+}
+
+bool
+NdbTableImpl::equal(const NdbTableImpl& obj) const
+{
+ DBUG_ENTER("NdbTableImpl::equal");
+ if ((m_internalName.c_str() == NULL) ||
+ (strcmp(m_internalName.c_str(), "") == 0) ||
+ (obj.m_internalName.c_str() == NULL) ||
+ (strcmp(obj.m_internalName.c_str(), "") == 0)) {
+ // Shallow equal
+ if(strcmp(getName(), obj.getName()) != 0){
+ DBUG_PRINT("info",("name %s != %s",getName(),obj.getName()));
+ DBUG_RETURN(false);
+ }
+ } else
+ // Deep equal
+ if(strcmp(m_internalName.c_str(), obj.m_internalName.c_str()) != 0){
+ {
+ DBUG_PRINT("info",("m_internalName %s != %s",
+ m_internalName.c_str(),obj.m_internalName.c_str()));
+ DBUG_RETURN(false);
+ }
+ }
+ if(m_fragmentType != obj.m_fragmentType){
+ DBUG_PRINT("info",("m_fragmentType %d != %d",m_fragmentType,obj.m_fragmentType));
+ DBUG_RETURN(false);
+ }
+ if(m_columns.size() != obj.m_columns.size()){
+ DBUG_PRINT("info",("m_columns.size %d != %d",m_columns.size(),obj.m_columns.size()));
+ DBUG_RETURN(false);
+ }
+
+ for(unsigned i = 0; i<obj.m_columns.size(); i++){
+ if(!m_columns[i]->equal(* obj.m_columns[i])){
+ DBUG_PRINT("info",("m_columns [%d] != [%d]",i,i));
+ DBUG_RETURN(false);
+ }
+ }
+
+ if(m_logging != obj.m_logging){
+ DBUG_PRINT("info",("m_logging %d != %d",m_logging,obj.m_logging));
+ DBUG_RETURN(false);
+ }
+
+ if(m_kvalue != obj.m_kvalue){
+ DBUG_PRINT("info",("m_kvalue %d != %d",m_kvalue,obj.m_kvalue));
+ DBUG_RETURN(false);
+ }
+
+ if(m_minLoadFactor != obj.m_minLoadFactor){
+ DBUG_PRINT("info",("m_minLoadFactor %d != %d",m_minLoadFactor,obj.m_minLoadFactor));
+ DBUG_RETURN(false);
+ }
+
+ if(m_maxLoadFactor != obj.m_maxLoadFactor){
+ DBUG_PRINT("info",("m_maxLoadFactor %d != %d",m_maxLoadFactor,obj.m_maxLoadFactor));
+ DBUG_RETURN(false);
+ }
+
+ DBUG_RETURN(true);
+}
+
+void
+NdbTableImpl::assign(const NdbTableImpl& org)
+{
+ m_tableId = org.m_tableId;
+ m_internalName.assign(org.m_internalName);
+ m_externalName.assign(org.m_externalName);
+ m_newExternalName.assign(org.m_newExternalName);
+ m_frm.assign(org.m_frm.get_data(), org.m_frm.length());
+ m_ng.assign(org.m_ng.get_data(), org.m_ng.length());
+ m_fragmentType = org.m_fragmentType;
+ m_fragmentCount = org.m_fragmentCount;
+
+ for(unsigned i = 0; i<org.m_columns.size(); i++){
+ NdbColumnImpl * col = new NdbColumnImpl();
+ const NdbColumnImpl * iorg = org.m_columns[i];
+ (* col) = (* iorg);
+ m_columns.push_back(col);
+ }
+
+ m_logging = org.m_logging;
+ m_kvalue = org.m_kvalue;
+ m_minLoadFactor = org.m_minLoadFactor;
+ m_maxLoadFactor = org.m_maxLoadFactor;
+
+ if (m_index != 0)
+ delete m_index;
+ m_index = org.m_index;
+
+ m_noOfDistributionKeys = org.m_noOfDistributionKeys;
+ m_noOfKeys = org.m_noOfKeys;
+ m_keyLenInWords = org.m_keyLenInWords;
+ m_noOfBlobs = org.m_noOfBlobs;
+
+ m_version = org.m_version;
+ m_status = org.m_status;
+}
+
+void NdbTableImpl::setName(const char * name)
+{
+ m_newExternalName.assign(name);
+}
+
+const char *
+NdbTableImpl::getName() const
+{
+ if (m_newExternalName.empty())
+ return m_externalName.c_str();
+ else
+ return m_newExternalName.c_str();
+}
+
+
+void
+NdbTableImpl::buildColumnHash(){
+ const Uint32 size = m_columns.size();
+
+ int i;
+ for(i = 31; i >= 0; i--){
+ if(((1 << i) & size) != 0){
+ m_columnHashMask = (1 << (i + 1)) - 1;
+ break;
+ }
+ }
+
+ Vector<Uint32> hashValues;
+ Vector<Vector<Uint32> > chains; chains.fill(size, hashValues);
+ for(i = 0; i< (int) size; i++){
+ Uint32 hv = Hash(m_columns[i]->getName()) & 0xFFFE;
+ Uint32 bucket = hv & m_columnHashMask;
+ bucket = (bucket < size ? bucket : bucket - size);
+ assert(bucket < size);
+ hashValues.push_back(hv);
+ chains[bucket].push_back(i);
+ }
+
+ m_columnHash.clear();
+ Uint32 tmp = 1;
+ m_columnHash.fill((unsigned)size-1, tmp); // Default no chaining
+
+ Uint32 pos = 0; // In overflow vector
+ for(i = 0; i< (int) size; i++){
+ Uint32 sz = chains[i].size();
+ if(sz == 1){
+ Uint32 col = chains[i][0];
+ Uint32 hv = hashValues[col];
+ Uint32 bucket = hv & m_columnHashMask;
+ bucket = (bucket < size ? bucket : bucket - size);
+ m_columnHash[bucket] = (col << 16) | hv | 1;
+ } else if(sz > 1){
+ Uint32 col = chains[i][0];
+ Uint32 hv = hashValues[col];
+ Uint32 bucket = hv & m_columnHashMask;
+ bucket = (bucket < size ? bucket : bucket - size);
+ m_columnHash[bucket] = (sz << 16) | (((size - bucket) + pos) << 1);
+ for(size_t j = 0; j<sz; j++, pos++){
+ Uint32 col = chains[i][j];
+ Uint32 hv = hashValues[col];
+ m_columnHash.push_back((col << 16) | hv);
+ }
+ }
+ }
+
+ m_columnHash.push_back(0); // Overflow when looping in end of array
+
+#if 0
+ for(size_t i = 0; i<m_columnHash.size(); i++){
+ Uint32 tmp = m_columnHash[i];
+ int col = -1;
+ if(i < size && (tmp & 1) == 1){
+ col = (tmp >> 16);
+ } else if(i >= size){
+ col = (tmp >> 16);
+ }
+ ndbout_c("m_columnHash[%d] %s = %x",
+ i, col > 0 ? m_columns[col]->getName() : "" , m_columnHash[i]);
+ }
+#endif
+}
+
+Uint32
+NdbTableImpl::get_nodes(Uint32 hashValue, const Uint16 ** nodes) const
+{
+ if(m_replicaCount > 0)
+ {
+ Uint32 fragmentId = hashValue & m_hashValueMask;
+ if(fragmentId < m_hashpointerValue)
+ {
+ fragmentId = hashValue & ((m_hashValueMask << 1) + 1);
+ }
+ Uint32 pos = fragmentId * m_replicaCount;
+ if(pos + m_replicaCount <= m_fragments.size())
+ {
+ * nodes = m_fragments.getBase()+pos;
+ return m_replicaCount;
+ }
+ }
+ return 0;
+}
+
+/**
+ * NdbIndexImpl
+ */
+
+NdbIndexImpl::NdbIndexImpl() :
+ NdbDictionary::Index(* this),
+ m_facade(this)
+{
+ init();
+}
+
+NdbIndexImpl::NdbIndexImpl(NdbDictionary::Index & f) :
+ NdbDictionary::Index(* this),
+ m_facade(&f)
+{
+ init();
+}
+
+void NdbIndexImpl::init()
+{
+ m_indexId= RNIL;
+ m_type= NdbDictionary::Index::Undefined;
+ m_logging= true;
+ m_table= NULL;
+}
+
+NdbIndexImpl::~NdbIndexImpl(){
+ for (unsigned i = 0; i < m_columns.size(); i++)
+ delete m_columns[i];
+}
+
+void NdbIndexImpl::setName(const char * name)
+{
+ m_externalName.assign(name);
+}
+
+const char *
+NdbIndexImpl::getName() const
+{
+ return m_externalName.c_str();
+}
+
+void
+NdbIndexImpl::setTable(const char * table)
+{
+ m_tableName.assign(table);
+}
+
+const char *
+NdbIndexImpl::getTable() const
+{
+ return m_tableName.c_str();
+}
+
+const NdbTableImpl *
+NdbIndexImpl::getIndexTable() const
+{
+ return m_table;
+}
+
+/**
+ * NdbEventImpl
+ */
+
+NdbEventImpl::NdbEventImpl() :
+ NdbDictionary::Event(* this),
+ m_facade(this)
+{
+ init();
+}
+
+NdbEventImpl::NdbEventImpl(NdbDictionary::Event & f) :
+ NdbDictionary::Event(* this),
+ m_facade(&f)
+{
+ init();
+}
+
+void NdbEventImpl::init()
+{
+ m_eventId= RNIL;
+ m_eventKey= RNIL;
+ m_tableId= RNIL;
+ mi_type= 0;
+ m_dur= NdbDictionary::Event::ED_UNDEFINED;
+ m_tableImpl= NULL;
+ m_bufferId= RNIL;
+ eventOp= NULL;
+}
+
+NdbEventImpl::~NdbEventImpl()
+{
+ for (unsigned i = 0; i < m_columns.size(); i++)
+ delete m_columns[i];
+}
+
+void NdbEventImpl::setName(const char * name)
+{
+ m_externalName.assign(name);
+}
+
+const char *NdbEventImpl::getName() const
+{
+ return m_externalName.c_str();
+}
+
+void
+NdbEventImpl::setTable(const NdbDictionary::Table& table)
+{
+ m_tableImpl= &NdbTableImpl::getImpl(table);
+ m_tableName.assign(m_tableImpl->getName());
+}
+
+void
+NdbEventImpl::setTable(const char * table)
+{
+ m_tableName.assign(table);
+}
+
+const char *
+NdbEventImpl::getTableName() const
+{
+ return m_tableName.c_str();
+}
+
+void
+NdbEventImpl::addTableEvent(const NdbDictionary::Event::TableEvent t = NdbDictionary::Event::TE_ALL)
+{
+ switch (t) {
+ case NdbDictionary::Event::TE_INSERT : mi_type |= 1; break;
+ case NdbDictionary::Event::TE_DELETE : mi_type |= 2; break;
+ case NdbDictionary::Event::TE_UPDATE : mi_type |= 4; break;
+ default: mi_type = 4 | 2 | 1; // all types
+ }
+}
+
+void
+NdbEventImpl::setDurability(NdbDictionary::Event::EventDurability d)
+{
+ m_dur = d;
+}
+
+NdbDictionary::Event::EventDurability
+NdbEventImpl::getDurability() const
+{
+ return m_dur;
+}
+
+int NdbEventImpl::getNoOfEventColumns() const
+{
+ return m_attrIds.size() + m_columns.size();
+}
+
+/**
+ * NdbDictionaryImpl
+ */
+
+NdbDictionaryImpl::NdbDictionaryImpl(Ndb &ndb)
+ : NdbDictionary::Dictionary(* this),
+ m_facade(this),
+ m_receiver(m_error),
+ m_ndb(ndb)
+{
+ m_globalHash = 0;
+ m_local_table_data_size= 0;
+}
+
+NdbDictionaryImpl::NdbDictionaryImpl(Ndb &ndb,
+ NdbDictionary::Dictionary & f)
+ : NdbDictionary::Dictionary(* this),
+ m_facade(&f),
+ m_receiver(m_error),
+ m_ndb(ndb)
+{
+ m_globalHash = 0;
+ m_local_table_data_size= 0;
+}
+
+static int f_dictionary_count = 0;
+
+NdbDictionaryImpl::~NdbDictionaryImpl()
+{
+ NdbElement_t<Ndb_local_table_info> * curr = m_localHash.m_tableHash.getNext(0);
+ if(m_globalHash){
+ while(curr != 0){
+ m_globalHash->lock();
+ m_globalHash->release(curr->theData->m_table_impl);
+ Ndb_local_table_info::destroy(curr->theData);
+ m_globalHash->unlock();
+
+ curr = m_localHash.m_tableHash.getNext(curr);
+ }
+
+ m_globalHash->lock();
+ if(--f_dictionary_count == 0){
+ delete NdbDictionary::Column::FRAGMENT;
+ delete NdbDictionary::Column::FRAGMENT_MEMORY;
+ delete NdbDictionary::Column::ROW_COUNT;
+ delete NdbDictionary::Column::COMMIT_COUNT;
+ delete NdbDictionary::Column::ROW_SIZE;
+ delete NdbDictionary::Column::RANGE_NO;
+ NdbDictionary::Column::FRAGMENT= 0;
+ NdbDictionary::Column::FRAGMENT_MEMORY= 0;
+ NdbDictionary::Column::ROW_COUNT= 0;
+ NdbDictionary::Column::COMMIT_COUNT= 0;
+ NdbDictionary::Column::ROW_SIZE= 0;
+ NdbDictionary::Column::RANGE_NO= 0;
+ }
+ m_globalHash->unlock();
+ } else {
+ assert(curr == 0);
+ }
+}
+
+Ndb_local_table_info *
+NdbDictionaryImpl::fetchGlobalTableImpl(const BaseString& internalTableName)
+{
+ NdbTableImpl *impl;
+
+ m_globalHash->lock();
+ impl = m_globalHash->get(internalTableName.c_str());
+ m_globalHash->unlock();
+
+ if (impl == 0){
+ impl = m_receiver.getTable(internalTableName,
+ m_ndb.usingFullyQualifiedNames());
+ m_globalHash->lock();
+ m_globalHash->put(internalTableName.c_str(), impl);
+ m_globalHash->unlock();
+
+ if(impl == 0){
+ return 0;
+ }
+ }
+
+ Ndb_local_table_info *info=
+ Ndb_local_table_info::create(impl, m_local_table_data_size);
+
+ m_localHash.put(internalTableName.c_str(), info);
+
+ m_ndb.theFirstTupleId[impl->getTableId()] = ~0;
+ m_ndb.theLastTupleId[impl->getTableId()] = ~0;
+
+ return info;
+}
+
+#if 0
+bool
+NdbDictionaryImpl::setTransporter(class TransporterFacade * tf)
+{
+ if(tf != 0){
+ m_globalHash = &tf->m_globalDictCache;
+ return m_receiver.setTransporter(tf);
+ }
+
+ return false;
+}
+#endif
+
+bool
+NdbDictionaryImpl::setTransporter(class Ndb* ndb,
+ class TransporterFacade * tf)
+{
+ m_globalHash = &tf->m_globalDictCache;
+ if(m_receiver.setTransporter(ndb, tf)){
+ m_globalHash->lock();
+ if(f_dictionary_count++ == 0){
+ NdbDictionary::Column::FRAGMENT=
+ NdbColumnImpl::create_pseudo("NDB$FRAGMENT");
+ NdbDictionary::Column::FRAGMENT_MEMORY=
+ NdbColumnImpl::create_pseudo("NDB$FRAGMENT_MEMORY");
+ NdbDictionary::Column::ROW_COUNT=
+ NdbColumnImpl::create_pseudo("NDB$ROW_COUNT");
+ NdbDictionary::Column::COMMIT_COUNT=
+ NdbColumnImpl::create_pseudo("NDB$COMMIT_COUNT");
+ NdbDictionary::Column::ROW_SIZE=
+ NdbColumnImpl::create_pseudo("NDB$ROW_SIZE");
+ NdbDictionary::Column::RANGE_NO=
+ NdbColumnImpl::create_pseudo("NDB$RANGE_NO");
+ }
+ m_globalHash->unlock();
+ return true;
+ }
+ return false;
+}
+
+NdbTableImpl *
+NdbDictionaryImpl::getIndexTable(NdbIndexImpl * index,
+ NdbTableImpl * table)
+{
+ const BaseString internalName(
+ m_ndb.internalize_index_name(table, index->getName()));
+ return getTable(m_ndb.externalizeTableName(internalName.c_str()));
+}
+
+#if 0
+bool
+NdbDictInterface::setTransporter(class TransporterFacade * tf)
+{
+ if(tf == 0)
+ return false;
+
+ Guard g(tf->theMutexPtr);
+
+ m_blockNumber = tf->open(this,
+ execSignal,
+ execNodeStatus);
+
+ if ( m_blockNumber == -1 ) {
+ m_error.code= 4105;
+ return false; // no more free blocknumbers
+ }//if
+ Uint32 theNode = tf->ownId();
+ m_reference = numberToRef(m_blockNumber, theNode);
+ m_transporter = tf;
+ m_waiter.m_mutex = tf->theMutexPtr;
+
+ return true;
+}
+#endif
+
+bool
+NdbDictInterface::setTransporter(class Ndb* ndb, class TransporterFacade * tf)
+{
+ m_reference = ndb->getReference();
+ m_transporter = tf;
+ m_waiter.m_mutex = tf->theMutexPtr;
+
+ return true;
+}
+
+NdbDictInterface::~NdbDictInterface()
+{
+}
+
+void
+NdbDictInterface::execSignal(void* dictImpl,
+ class NdbApiSignal* signal,
+ class LinearSectionPtr ptr[3])
+{
+ NdbDictInterface * tmp = (NdbDictInterface*)dictImpl;
+
+ const Uint32 gsn = signal->readSignalNumber();
+ switch(gsn){
+ case GSN_GET_TABINFOREF:
+ tmp->execGET_TABINFO_REF(signal, ptr);
+ break;
+ case GSN_GET_TABINFO_CONF:
+ tmp->execGET_TABINFO_CONF(signal, ptr);
+ break;
+ case GSN_CREATE_TABLE_REF:
+ tmp->execCREATE_TABLE_REF(signal, ptr);
+ break;
+ case GSN_CREATE_TABLE_CONF:
+ tmp->execCREATE_TABLE_CONF(signal, ptr);
+ break;
+ case GSN_DROP_TABLE_REF:
+ tmp->execDROP_TABLE_REF(signal, ptr);
+ break;
+ case GSN_DROP_TABLE_CONF:
+ tmp->execDROP_TABLE_CONF(signal, ptr);
+ break;
+ case GSN_ALTER_TABLE_REF:
+ tmp->execALTER_TABLE_REF(signal, ptr);
+ break;
+ case GSN_ALTER_TABLE_CONF:
+ tmp->execALTER_TABLE_CONF(signal, ptr);
+ break;
+ case GSN_CREATE_INDX_REF:
+ tmp->execCREATE_INDX_REF(signal, ptr);
+ break;
+ case GSN_CREATE_INDX_CONF:
+ tmp->execCREATE_INDX_CONF(signal, ptr);
+ break;
+ case GSN_DROP_INDX_REF:
+ tmp->execDROP_INDX_REF(signal, ptr);
+ break;
+ case GSN_DROP_INDX_CONF:
+ tmp->execDROP_INDX_CONF(signal, ptr);
+ break;
+ case GSN_CREATE_EVNT_REF:
+ tmp->execCREATE_EVNT_REF(signal, ptr);
+ break;
+ case GSN_CREATE_EVNT_CONF:
+ tmp->execCREATE_EVNT_CONF(signal, ptr);
+ break;
+ case GSN_SUB_START_CONF:
+ tmp->execSUB_START_CONF(signal, ptr);
+ break;
+ case GSN_SUB_START_REF:
+ tmp->execSUB_START_REF(signal, ptr);
+ break;
+ case GSN_SUB_TABLE_DATA:
+ tmp->execSUB_TABLE_DATA(signal, ptr);
+ break;
+ case GSN_SUB_GCP_COMPLETE_REP:
+ tmp->execSUB_GCP_COMPLETE_REP(signal, ptr);
+ break;
+ case GSN_SUB_STOP_CONF:
+ tmp->execSUB_STOP_CONF(signal, ptr);
+ break;
+ case GSN_SUB_STOP_REF:
+ tmp->execSUB_STOP_REF(signal, ptr);
+ break;
+ case GSN_DROP_EVNT_REF:
+ tmp->execDROP_EVNT_REF(signal, ptr);
+ break;
+ case GSN_DROP_EVNT_CONF:
+ tmp->execDROP_EVNT_CONF(signal, ptr);
+ break;
+ case GSN_LIST_TABLES_CONF:
+ tmp->execLIST_TABLES_CONF(signal, ptr);
+ break;
+ default:
+ abort();
+ }
+}
+
+void
+NdbDictInterface::execNodeStatus(void* dictImpl, Uint32 aNode,
+ bool alive, bool nfCompleted)
+{
+ NdbDictInterface * tmp = (NdbDictInterface*)dictImpl;
+
+ if(!alive && !nfCompleted){
+ return;
+ }
+
+ if (!alive && nfCompleted){
+ tmp->m_waiter.nodeFail(aNode);
+ }
+}
+
+int
+NdbDictInterface::dictSignal(NdbApiSignal* signal,
+ LinearSectionPtr ptr[3],int noLSP,
+ const int useMasterNodeId,
+ const Uint32 RETRIES,
+ const WaitSignalType wst,
+ const int theWait,
+ const int *errcodes,
+ const int noerrcodes,
+ const int temporaryMask)
+{
+ DBUG_ENTER("NdbDictInterface::dictSignal");
+ DBUG_PRINT("enter", ("useMasterNodeId: %d", useMasterNodeId));
+ for(Uint32 i = 0; i<RETRIES; i++){
+ //if (useMasterNodeId == 0)
+ m_buffer.clear();
+
+ // Protected area
+ m_transporter->lock_mutex();
+ Uint32 aNodeId;
+ if (useMasterNodeId) {
+ if ((m_masterNodeId == 0) ||
+ (!m_transporter->get_node_alive(m_masterNodeId))) {
+ m_masterNodeId = m_transporter->get_an_alive_node();
+ }//if
+ aNodeId = m_masterNodeId;
+ } else {
+ aNodeId = m_transporter->get_an_alive_node();
+ }
+ if(aNodeId == 0){
+ m_error.code= 4009;
+ m_transporter->unlock_mutex();
+ DBUG_RETURN(-1);
+ }
+ {
+ int r;
+ if (ptr) {
+#ifdef EVENT_DEBUG
+ printf("Long signal %d ptr", noLSP);
+ for (int q=0;q<noLSP;q++) {
+ printf(" sz %d", ptr[q].sz);
+ }
+ printf("\n");
+#endif
+ r = m_transporter->sendFragmentedSignal(signal, aNodeId, ptr, noLSP);
+ } else {
+#ifdef EVENT_DEBUG
+ printf("Short signal\n");
+#endif
+ r = m_transporter->sendSignal(signal, aNodeId);
+ }
+ if(r != 0){
+ m_transporter->unlock_mutex();
+ continue;
+ }
+ }
+
+ m_error.code= 0;
+
+ m_waiter.m_node = aNodeId;
+ m_waiter.m_state = wst;
+
+ m_waiter.wait(theWait);
+ m_transporter->unlock_mutex();
+ // End of Protected area
+
+ if(m_waiter.m_state == NO_WAIT && m_error.code == 0){
+ // Normal return
+ DBUG_RETURN(0);
+ }
+
+ /**
+ * Handle error codes
+ */
+ if(m_waiter.m_state == WAIT_NODE_FAILURE)
+ continue;
+
+ if(m_waiter.m_state == WST_WAIT_TIMEOUT)
+ {
+ m_error.code = 4008;
+ DBUG_RETURN(-1);
+ }
+
+ if ( (temporaryMask & m_error.code) != 0 ) {
+ continue;
+ }
+ if (errcodes) {
+ int doContinue = 0;
+ for (int j=0; j < noerrcodes; j++)
+ if(m_error.code == errcodes[j]) {
+ doContinue = 1;
+ break;
+ }
+ if (doContinue)
+ continue;
+ }
+
+ DBUG_RETURN(-1);
+ }
+ DBUG_RETURN(-1);
+}
+#if 0
+/*
+ Get dictionary information for a table using table id as reference
+
+ DESCRIPTION
+ Sends a GET_TABINFOREQ signal containing the table id
+ */
+NdbTableImpl *
+NdbDictInterface::getTable(int tableId, bool fullyQualifiedNames)
+{
+ NdbApiSignal tSignal(m_reference);
+ GetTabInfoReq* const req = CAST_PTR(GetTabInfoReq, tSignal.getDataPtrSend());
+
+ req->senderRef = m_reference;
+ req->senderData = 0;
+ req->requestType =
+ GetTabInfoReq::RequestById | GetTabInfoReq::LongSignalConf;
+ req->tableId = tableId;
+ tSignal.theReceiversBlockNumber = DBDICT;
+ tSignal.theVerId_signalNumber = GSN_GET_TABINFOREQ;
+ tSignal.theLength = GetTabInfoReq::SignalLength;
+
+ return getTable(&tSignal, 0, 0, fullyQualifiedNames);
+}
+#endif
+
+
+/*
+ Get dictionary information for a table using table name as the reference
+
+ DESCRIPTION
+ Send GET_TABINFOREQ signal with the table name in the first
+ long section part
+*/
+
+NdbTableImpl *
+NdbDictInterface::getTable(const BaseString& name, bool fullyQualifiedNames)
+{
+ NdbApiSignal tSignal(m_reference);
+ GetTabInfoReq* const req = CAST_PTR(GetTabInfoReq, tSignal.getDataPtrSend());
+
+ const Uint32 namelen= name.length() + 1; // NULL terminated
+ const Uint32 namelen_words= (namelen + 3) >> 2; // Size in words
+
+ req->senderRef= m_reference;
+ req->senderData= 0;
+ req->requestType=
+ GetTabInfoReq::RequestByName | GetTabInfoReq::LongSignalConf;
+ req->tableNameLen= namelen;
+ tSignal.theReceiversBlockNumber= DBDICT;
+ tSignal.theVerId_signalNumber= GSN_GET_TABINFOREQ;
+ tSignal.theLength= GetTabInfoReq::SignalLength;
+
+ // Copy name to m_buffer to get a word sized buffer
+ m_buffer.clear();
+ m_buffer.grow(namelen_words*4);
+ m_buffer.append(name.c_str(), namelen);
+
+ LinearSectionPtr ptr[1];
+ ptr[0].p= (Uint32*)m_buffer.get_data();
+ ptr[0].sz= namelen_words;
+
+ return getTable(&tSignal, ptr, 1, fullyQualifiedNames);
+}
+
+
+NdbTableImpl *
+NdbDictInterface::getTable(class NdbApiSignal * signal,
+ LinearSectionPtr ptr[3],
+ Uint32 noOfSections, bool fullyQualifiedNames)
+{
+ int errCodes[] = {GetTabInfoRef::Busy };
+
+ int r = dictSignal(signal,ptr,noOfSections,
+ 0/*do not use masternode id*/,
+ 100,
+ WAIT_GET_TAB_INFO_REQ,
+ WAITFOR_RESPONSE_TIMEOUT,
+ errCodes, 1);
+ if (r) return 0;
+
+ NdbTableImpl * rt = 0;
+ m_error.code= parseTableInfo(&rt,
+ (Uint32*)m_buffer.get_data(),
+ m_buffer.length() / 4, fullyQualifiedNames);
+ rt->buildColumnHash();
+ return rt;
+}
+
+void
+NdbDictInterface::execGET_TABINFO_CONF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ const GetTabInfoConf* conf = CAST_CONSTPTR(GetTabInfoConf, signal->getDataPtr());
+ if(signal->isFirstFragment()){
+ m_fragmentId = signal->getFragmentId();
+ m_buffer.grow(4 * conf->totalLen);
+ } else {
+ if(m_fragmentId != signal->getFragmentId()){
+ abort();
+ }
+ }
+
+ const Uint32 i = GetTabInfoConf::DICT_TAB_INFO;
+ m_buffer.append(ptr[i].p, 4 * ptr[i].sz);
+
+ if(!signal->isLastFragment()){
+ return;
+ }
+
+ m_waiter.signal(NO_WAIT);
+}
+
+void
+NdbDictInterface::execGET_TABINFO_REF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ const GetTabInfoRef* ref = CAST_CONSTPTR(GetTabInfoRef, signal->getDataPtr());
+
+ m_error.code= ref->errorCode;
+ m_waiter.signal(NO_WAIT);
+}
+
+/*****************************************************************
+ * Pack/Unpack tables
+ */
+struct ApiKernelMapping {
+ Int32 kernelConstant;
+ Int32 apiConstant;
+};
+
+Uint32
+getApiConstant(Int32 kernelConstant, const ApiKernelMapping map[], Uint32 def)
+{
+ int i = 0;
+ while(map[i].kernelConstant != kernelConstant){
+ if(map[i].kernelConstant == -1 &&
+ map[i].apiConstant == -1){
+ return def;
+ }
+ i++;
+ }
+ return map[i].apiConstant;
+}
+
+Uint32
+getKernelConstant(Int32 apiConstant, const ApiKernelMapping map[], Uint32 def)
+{
+ int i = 0;
+ while(map[i].apiConstant != apiConstant){
+ if(map[i].kernelConstant == -1 &&
+ map[i].apiConstant == -1){
+ return def;
+ }
+ i++;
+ }
+ return map[i].kernelConstant;
+}
+
+static const
+ApiKernelMapping
+fragmentTypeMapping[] = {
+ { DictTabInfo::AllNodesSmallTable, NdbDictionary::Object::FragAllSmall },
+ { DictTabInfo::AllNodesMediumTable, NdbDictionary::Object::FragAllMedium },
+ { DictTabInfo::AllNodesLargeTable, NdbDictionary::Object::FragAllLarge },
+ { DictTabInfo::SingleFragment, NdbDictionary::Object::FragSingle },
+ { DictTabInfo::DistrKeyHash, NdbDictionary::Object::DistrKeyHash },
+ { DictTabInfo::DistrKeyLin, NdbDictionary::Object::DistrKeyLin },
+ { DictTabInfo::UserDefined, NdbDictionary::Object::UserDefined },
+ { -1, -1 }
+};
+
+static const
+ApiKernelMapping
+objectTypeMapping[] = {
+ { DictTabInfo::SystemTable, NdbDictionary::Object::SystemTable },
+ { DictTabInfo::UserTable, NdbDictionary::Object::UserTable },
+ { DictTabInfo::UniqueHashIndex, NdbDictionary::Object::UniqueHashIndex },
+ { DictTabInfo::OrderedIndex, NdbDictionary::Object::OrderedIndex },
+ { DictTabInfo::HashIndexTrigger, NdbDictionary::Object::HashIndexTrigger },
+ { DictTabInfo::IndexTrigger, NdbDictionary::Object::IndexTrigger },
+ { DictTabInfo::SubscriptionTrigger,NdbDictionary::Object::SubscriptionTrigger },
+ { DictTabInfo::ReadOnlyConstraint ,NdbDictionary::Object::ReadOnlyConstraint },
+ { -1, -1 }
+};
+
+static const
+ApiKernelMapping
+objectStateMapping[] = {
+ { DictTabInfo::StateOffline, NdbDictionary::Object::StateOffline },
+ { DictTabInfo::StateBuilding, NdbDictionary::Object::StateBuilding },
+ { DictTabInfo::StateDropping, NdbDictionary::Object::StateDropping },
+ { DictTabInfo::StateOnline, NdbDictionary::Object::StateOnline },
+ { DictTabInfo::StateBroken, NdbDictionary::Object::StateBroken },
+ { -1, -1 }
+};
+
+static const
+ApiKernelMapping
+objectStoreMapping[] = {
+ { DictTabInfo::StoreTemporary, NdbDictionary::Object::StoreTemporary },
+ { DictTabInfo::StorePermanent, NdbDictionary::Object::StorePermanent },
+ { -1, -1 }
+};
+
+static const
+ApiKernelMapping
+indexTypeMapping[] = {
+ { DictTabInfo::UniqueHashIndex, NdbDictionary::Index::UniqueHashIndex },
+ { DictTabInfo::OrderedIndex, NdbDictionary::Index::OrderedIndex },
+ { -1, -1 }
+};
+
+int
+NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
+ const Uint32 * data, Uint32 len,
+ bool fullyQualifiedNames)
+{
+ DBUG_ENTER("NdbDictInterface::parseTableInfo");
+
+ SimplePropertiesLinearReader it(data, len);
+ DictTabInfo::Table tableDesc; tableDesc.init();
+ SimpleProperties::UnpackStatus s;
+ s = SimpleProperties::unpack(it, &tableDesc,
+ DictTabInfo::TableMapping,
+ DictTabInfo::TableMappingSize,
+ true, true);
+
+ if(s != SimpleProperties::Break){
+ DBUG_RETURN(703);
+ }
+ const char * internalName = tableDesc.TableName;
+ const char * externalName = Ndb::externalizeTableName(internalName, fullyQualifiedNames);
+
+ NdbTableImpl * impl = new NdbTableImpl();
+ impl->m_tableId = tableDesc.TableId;
+ impl->m_version = tableDesc.TableVersion;
+ impl->m_status = NdbDictionary::Object::Retrieved;
+ impl->m_internalName.assign(internalName);
+ impl->m_externalName.assign(externalName);
+
+ impl->m_frm.assign(tableDesc.FrmData, tableDesc.FrmLen);
+ impl->m_ng.assign(tableDesc.FragmentData, tableDesc.FragmentDataLen);
+
+ impl->m_fragmentType = (NdbDictionary::Object::FragmentType)
+ getApiConstant(tableDesc.FragmentType,
+ fragmentTypeMapping,
+ (Uint32)NdbDictionary::Object::FragUndefined);
+
+ impl->m_logging = tableDesc.TableLoggedFlag;
+ impl->m_kvalue = tableDesc.TableKValue;
+ impl->m_minLoadFactor = tableDesc.MinLoadFactor;
+ impl->m_maxLoadFactor = tableDesc.MaxLoadFactor;
+
+ impl->m_indexType = (NdbDictionary::Index::Type)
+ getApiConstant(tableDesc.TableType,
+ indexTypeMapping,
+ NdbDictionary::Index::Undefined);
+
+ if(impl->m_indexType == NdbDictionary::Index::Undefined){
+ } else {
+ const char * externalPrimary =
+ Ndb::externalizeTableName(tableDesc.PrimaryTable, fullyQualifiedNames);
+ impl->m_primaryTable.assign(externalPrimary);
+ }
+
+ Uint32 keyInfoPos = 0;
+ Uint32 keyCount = 0;
+ Uint32 blobCount = 0;
+ Uint32 distKeys = 0;
+
+ Uint32 i;
+ for(i = 0; i < tableDesc.NoOfAttributes; i++) {
+ DictTabInfo::Attribute attrDesc; attrDesc.init();
+ s = SimpleProperties::unpack(it,
+ &attrDesc,
+ DictTabInfo::AttributeMapping,
+ DictTabInfo::AttributeMappingSize,
+ true, true);
+ if(s != SimpleProperties::Break){
+ delete impl;
+ DBUG_RETURN(703);
+ }
+
+ NdbColumnImpl * col = new NdbColumnImpl();
+ col->m_attrId = attrDesc.AttributeId;
+ col->setName(attrDesc.AttributeName);
+
+ // check type and compute attribute size and array size
+ if (! attrDesc.translateExtType()) {
+ delete impl;
+ DBUG_RETURN(703);
+ }
+ col->m_type = (NdbDictionary::Column::Type)attrDesc.AttributeExtType;
+ col->m_precision = (attrDesc.AttributeExtPrecision & 0xFFFF);
+ col->m_scale = attrDesc.AttributeExtScale;
+ col->m_length = attrDesc.AttributeExtLength;
+ // charset in upper half of precision
+ unsigned cs_number = (attrDesc.AttributeExtPrecision >> 16);
+ // charset is defined exactly for char types
+ if (col->getCharType() != (cs_number != 0)) {
+ delete impl;
+ DBUG_RETURN(703);
+ }
+ if (col->getCharType()) {
+ col->m_cs = get_charset(cs_number, MYF(0));
+ if (col->m_cs == NULL) {
+ delete impl;
+ DBUG_RETURN(743);
+ }
+ }
+ col->m_attrSize = (1 << attrDesc.AttributeSize) / 8;
+ col->m_arraySize = attrDesc.AttributeArraySize;
+ if(attrDesc.AttributeSize == 0)
+ {
+ col->m_attrSize = 4;
+ col->m_arraySize = (attrDesc.AttributeArraySize + 31) >> 5;
+ }
+
+ col->m_pk = attrDesc.AttributeKeyFlag;
+ col->m_distributionKey = attrDesc.AttributeDKey;
+ col->m_nullable = attrDesc.AttributeNullableFlag;
+ col->m_autoIncrement = (attrDesc.AttributeAutoIncrement ? true : false);
+ col->m_autoIncrementInitialValue = ~0;
+ col->m_defaultValue.assign(attrDesc.AttributeDefaultValue);
+
+ if(attrDesc.AttributeKeyFlag){
+ col->m_keyInfoPos = keyInfoPos + 1;
+ keyInfoPos += ((col->m_attrSize * col->m_arraySize + 3) / 4);
+ keyCount++;
+
+ if(attrDesc.AttributeDKey)
+ distKeys++;
+ } else {
+ col->m_keyInfoPos = 0;
+ }
+ if (col->getBlobType())
+ blobCount++;
+ NdbColumnImpl * null = 0;
+ impl->m_columns.fill(attrDesc.AttributeId, null);
+ if(impl->m_columns[attrDesc.AttributeId] != 0){
+ delete col;
+ delete impl;
+ DBUG_RETURN(703);
+ }
+ impl->m_columns[attrDesc.AttributeId] = col;
+ it.next();
+ }
+
+ impl->m_noOfKeys = keyCount;
+ impl->m_keyLenInWords = keyInfoPos;
+ impl->m_noOfBlobs = blobCount;
+ impl->m_noOfDistributionKeys = distKeys;
+
+ if(tableDesc.FragmentDataLen > 0)
+ {
+ Uint16 replicaCount = tableDesc.FragmentData[0];
+ Uint16 fragCount = tableDesc.FragmentData[1];
+
+ impl->m_replicaCount = replicaCount;
+ impl->m_fragmentCount = fragCount;
+ DBUG_PRINT("info", ("replicaCount=%x , fragCount=%x",replicaCount,fragCount));
+ for(i = 0; i<(fragCount*replicaCount); i++)
+ {
+ impl->m_fragments.push_back(tableDesc.FragmentData[i+2]);
+ }
+
+ Uint32 topBit = (1 << 31);
+ for(; topBit && !(fragCount & topBit); ){
+ topBit >>= 1;
+ }
+ impl->m_hashValueMask = topBit - 1;
+ impl->m_hashpointerValue = fragCount - (impl->m_hashValueMask + 1);
+ }
+ else
+ {
+ impl->m_fragmentCount = tableDesc.FragmentCount;
+ impl->m_replicaCount = 0;
+ impl->m_hashValueMask = 0;
+ impl->m_hashpointerValue = 0;
+ }
+
+ if(distKeys == 0)
+ {
+ for(i = 0; i < tableDesc.NoOfAttributes; i++)
+ {
+ if(impl->m_columns[i]->getPrimaryKey())
+ impl->m_columns[i]->m_distributionKey = true;
+ }
+ }
+
+ * ret = impl;
+
+ DBUG_RETURN(0);
+}
+
+/*****************************************************************
+ * Create table and alter table
+ */
+int
+NdbDictionaryImpl::createTable(NdbTableImpl &t)
+{
+ DBUG_ENTER("NdbDictionaryImpl::createTable");
+ if (m_receiver.createTable(m_ndb, t) != 0)
+ {
+ DBUG_RETURN(-1);
+ }
+ if (t.m_noOfBlobs == 0)
+ {
+ DBUG_RETURN(0);
+ }
+ // update table def from DICT
+ Ndb_local_table_info *info=
+ get_local_table_info(t.m_internalName,false);
+ if (info == NULL) {
+ m_error.code= 709;
+ DBUG_RETURN(-1);
+ }
+ if (createBlobTables(*(info->m_table_impl)) != 0) {
+ int save_code = m_error.code;
+ (void)dropTable(t);
+ m_error.code= save_code;
+ DBUG_RETURN(-1);
+ }
+ DBUG_RETURN(0);
+}
+
+int
+NdbDictionaryImpl::createBlobTables(NdbTableImpl &t)
+{
+ DBUG_ENTER("NdbDictionaryImpl::createBlobTables");
+ for (unsigned i = 0; i < t.m_columns.size(); i++) {
+ NdbColumnImpl & c = *t.m_columns[i];
+ if (! c.getBlobType() || c.getPartSize() == 0)
+ continue;
+ NdbTableImpl bt;
+ NdbBlob::getBlobTable(bt, &t, &c);
+ if (createTable(bt) != 0)
+ {
+ DBUG_RETURN(-1);
+ }
+ // Save BLOB table handle
+ Ndb_local_table_info *info=
+ get_local_table_info(bt.m_internalName, false);
+ if (info == 0)
+ {
+ DBUG_RETURN(-1);
+ }
+ c.m_blobTable = info->m_table_impl;
+ }
+ DBUG_RETURN(0);
+}
+
+int
+NdbDictionaryImpl::addBlobTables(NdbTableImpl &t)
+{
+ unsigned n= t.m_noOfBlobs;
+ DBUG_ENTER("NdbDictioanryImpl::addBlobTables");
+ // optimized for blob column being the last one
+ // and not looking for more than one if not neccessary
+ for (unsigned i = t.m_columns.size(); i > 0 && n > 0;) {
+ i--;
+ NdbColumnImpl & c = *t.m_columns[i];
+ if (! c.getBlobType() || c.getPartSize() == 0)
+ continue;
+ n--;
+ char btname[NdbBlobImpl::BlobTableNameSize];
+ NdbBlob::getBlobTableName(btname, &t, &c);
+ // Save BLOB table handle
+ NdbTableImpl * cachedBlobTable = getTable(btname);
+ if (cachedBlobTable == 0) {
+ DBUG_RETURN(-1);
+ }
+ c.m_blobTable = cachedBlobTable;
+ }
+ DBUG_RETURN(0);
+}
+
+int
+NdbDictInterface::createTable(Ndb & ndb,
+ NdbTableImpl & impl)
+{
+ DBUG_ENTER("NdbDictInterface::createTable");
+ DBUG_RETURN(createOrAlterTable(ndb, impl, false));
+}
+
+int NdbDictionaryImpl::alterTable(NdbTableImpl &impl)
+{
+ BaseString internalName(impl.m_internalName);
+ const char * originalInternalName = internalName.c_str();
+
+ DBUG_ENTER("NdbDictionaryImpl::alterTable");
+ if(!get_local_table_info(internalName, false)){
+ m_error.code= 709;
+ DBUG_RETURN(-1);
+ }
+ // Alter the table
+ int ret = m_receiver.alterTable(m_ndb, impl);
+ if(ret == 0){
+ // Remove cached information and let it be refreshed at next access
+ if (m_localHash.get(originalInternalName) != NULL) {
+ m_localHash.drop(originalInternalName);
+ m_globalHash->lock();
+ NdbTableImpl * cachedImpl = m_globalHash->get(originalInternalName);
+ // If in local cache it must be in global
+ if (!cachedImpl)
+ abort();
+ cachedImpl->m_status = NdbDictionary::Object::Invalid;
+ m_globalHash->drop(cachedImpl);
+ m_globalHash->unlock();
+ }
+ }
+ DBUG_RETURN(ret);
+}
+
+int
+NdbDictInterface::alterTable(Ndb & ndb,
+ NdbTableImpl & impl)
+{
+ DBUG_ENTER("NdbDictInterface::alterTable");
+ DBUG_RETURN(createOrAlterTable(ndb, impl, true));
+}
+
+int
+NdbDictInterface::createOrAlterTable(Ndb & ndb,
+ NdbTableImpl & impl,
+ bool alter)
+{
+ DBUG_ENTER("NdbDictInterface::createOrAlterTable");
+ unsigned i;
+ if((unsigned)impl.getNoOfPrimaryKeys() > NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY){
+ m_error.code= 4317;
+ DBUG_RETURN(-1);
+ }
+ unsigned sz = impl.m_columns.size();
+ if (sz > NDB_MAX_ATTRIBUTES_IN_TABLE){
+ m_error.code= 4318;
+ DBUG_RETURN(-1);
+ }
+
+ if (!impl.m_newExternalName.empty()) {
+ impl.m_externalName.assign(impl.m_newExternalName);
+ AlterTableReq::setNameFlag(impl.m_changeMask, true);
+ }
+
+ //validate();
+ //aggregate();
+
+ const BaseString internalName(
+ ndb.internalize_table_name(impl.m_externalName.c_str()));
+ impl.m_internalName.assign(internalName);
+ UtilBufferWriter w(m_buffer);
+ DictTabInfo::Table tmpTab;
+ tmpTab.init();
+ BaseString::snprintf(tmpTab.TableName,
+ sizeof(tmpTab.TableName),
+ internalName.c_str());
+
+ bool haveAutoIncrement = false;
+ Uint64 autoIncrementValue = 0;
+ Uint32 distKeys= 0;
+ for(i = 0; i<sz; i++){
+ const NdbColumnImpl * col = impl.m_columns[i];
+ if(col == 0)
+ continue;
+ if (col->m_autoIncrement) {
+ if (haveAutoIncrement) {
+ m_error.code= 4335;
+ DBUG_RETURN(-1);
+ }
+ haveAutoIncrement = true;
+ autoIncrementValue = col->m_autoIncrementInitialValue;
+ }
+ if (col->m_distributionKey)
+ distKeys++;
+ }
+ if (distKeys == impl.m_noOfKeys)
+ distKeys= 0;
+ impl.m_noOfDistributionKeys= distKeys;
+
+
+ // Check max length of frm data
+ if (impl.m_frm.length() > MAX_FRM_DATA_SIZE){
+ m_error.code= 1229;
+ DBUG_RETURN(-1);
+ }
+ tmpTab.FrmLen = impl.m_frm.length();
+ memcpy(tmpTab.FrmData, impl.m_frm.get_data(), impl.m_frm.length());
+ tmpTab.FragmentDataLen = impl.m_ng.length();
+ memcpy(tmpTab.FragmentData, impl.m_ng.get_data(), impl.m_ng.length());
+
+ tmpTab.TableLoggedFlag = impl.m_logging;
+ tmpTab.TableKValue = impl.m_kvalue;
+ tmpTab.MinLoadFactor = impl.m_minLoadFactor;
+ tmpTab.MaxLoadFactor = impl.m_maxLoadFactor;
+ tmpTab.TableType = DictTabInfo::UserTable;
+ tmpTab.PrimaryTableId = impl.m_primaryTableId;
+ tmpTab.NoOfAttributes = sz;
+
+ tmpTab.FragmentType = getKernelConstant(impl.m_fragmentType,
+ fragmentTypeMapping,
+ DictTabInfo::AllNodesSmallTable);
+ tmpTab.TableVersion = rand();
+
+ SimpleProperties::UnpackStatus s;
+ s = SimpleProperties::pack(w,
+ &tmpTab,
+ DictTabInfo::TableMapping,
+ DictTabInfo::TableMappingSize, true);
+
+ if(s != SimpleProperties::Eof){
+ abort();
+ }
+
+ DBUG_PRINT("info",("impl.m_noOfDistributionKeys: %d impl.m_noOfKeys: %d distKeys: %d",
+ impl.m_noOfDistributionKeys, impl.m_noOfKeys, distKeys));
+ if (distKeys == impl.m_noOfKeys)
+ distKeys= 0;
+ impl.m_noOfDistributionKeys= distKeys;
+
+ for(i = 0; i<sz; i++){
+ const NdbColumnImpl * col = impl.m_columns[i];
+ if(col == 0)
+ continue;
+
+ DBUG_PRINT("info",("column: %s(%d) col->m_distributionKey: %d",
+ col->m_name.c_str(), i, col->m_distributionKey));
+ DictTabInfo::Attribute tmpAttr; tmpAttr.init();
+ BaseString::snprintf(tmpAttr.AttributeName, sizeof(tmpAttr.AttributeName),
+ col->m_name.c_str());
+ tmpAttr.AttributeId = i;
+ tmpAttr.AttributeKeyFlag = col->m_pk;
+ tmpAttr.AttributeNullableFlag = col->m_nullable;
+ tmpAttr.AttributeDKey = distKeys ? col->m_distributionKey : 0;
+
+ tmpAttr.AttributeExtType = (Uint32)col->m_type;
+ tmpAttr.AttributeExtPrecision = ((unsigned)col->m_precision & 0xFFFF);
+ tmpAttr.AttributeExtScale = col->m_scale;
+ tmpAttr.AttributeExtLength = col->m_length;
+
+ // check type and compute attribute size and array size
+ if (! tmpAttr.translateExtType()) {
+ m_error.code= 703;
+ DBUG_RETURN(-1);
+ }
+ // charset is defined exactly for char types
+ if (col->getCharType() != (col->m_cs != NULL)) {
+ m_error.code= 703;
+ DBUG_RETURN(-1);
+ }
+ // primary key type check
+ if (col->m_pk && ! NdbSqlUtil::usable_in_pk(col->m_type, col->m_cs)) {
+ m_error.code= (col->m_cs != 0 ? 743 : 739);
+ DBUG_RETURN(-1);
+ }
+ // distribution key not supported for Char attribute
+ if (distKeys && col->m_distributionKey && col->m_cs != NULL) {
+ // we can allow this for non-var char where strxfrm does nothing
+ if (col->m_type == NdbDictionary::Column::Char &&
+ (col->m_cs->state & MY_CS_BINSORT))
+ ;
+ else {
+ m_error.code= 745;
+ DBUG_RETURN(-1);
+ }
+ }
+ // charset in upper half of precision
+ if (col->getCharType()) {
+ tmpAttr.AttributeExtPrecision |= (col->m_cs->number << 16);
+ }
+
+ tmpAttr.AttributeAutoIncrement = col->m_autoIncrement;
+ BaseString::snprintf(tmpAttr.AttributeDefaultValue,
+ sizeof(tmpAttr.AttributeDefaultValue),
+ col->m_defaultValue.c_str());
+ s = SimpleProperties::pack(w,
+ &tmpAttr,
+ DictTabInfo::AttributeMapping,
+ DictTabInfo::AttributeMappingSize, true);
+ w.add(DictTabInfo::AttributeEnd, 1);
+ }
+
+ NdbApiSignal tSignal(m_reference);
+ tSignal.theReceiversBlockNumber = DBDICT;
+
+ LinearSectionPtr ptr[1];
+ ptr[0].p = (Uint32*)m_buffer.get_data();
+ ptr[0].sz = m_buffer.length() / 4;
+ int ret;
+ if (alter)
+ {
+ AlterTableReq * const req =
+ CAST_PTR(AlterTableReq, tSignal.getDataPtrSend());
+
+ req->senderRef = m_reference;
+ req->senderData = 0;
+ req->changeMask = impl.m_changeMask;
+ req->tableId = impl.m_tableId;
+ req->tableVersion = impl.m_version;;
+ tSignal.theVerId_signalNumber = GSN_ALTER_TABLE_REQ;
+ tSignal.theLength = AlterTableReq::SignalLength;
+ ret= alterTable(&tSignal, ptr);
+ }
+ else
+ {
+ CreateTableReq * const req =
+ CAST_PTR(CreateTableReq, tSignal.getDataPtrSend());
+
+ req->senderRef = m_reference;
+ req->senderData = 0;
+ tSignal.theVerId_signalNumber = GSN_CREATE_TABLE_REQ;
+ tSignal.theLength = CreateTableReq::SignalLength;
+ ret= createTable(&tSignal, ptr);
+
+ if (ret)
+ DBUG_RETURN(ret);
+
+ if (haveAutoIncrement) {
+ if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(),
+ autoIncrementValue)) {
+ if (ndb.theError.code == 0) {
+ m_error.code= 4336;
+ ndb.theError = m_error;
+ } else
+ m_error= ndb.theError;
+ ret = -1; // errorcode set in initialize_autoincrement
+ }
+ }
+ }
+ DBUG_RETURN(ret);
+}
+
+int
+NdbDictInterface::createTable(NdbApiSignal* signal, LinearSectionPtr ptr[3])
+{
+#if DEBUG_PRINT
+ ndbout_c("BufferLen = %d", ptr[0].sz);
+ SimplePropertiesLinearReader r(ptr[0].p, ptr[0].sz);
+ r.printAll(ndbout);
+#endif
+ const int noErrCodes = 2;
+ int errCodes[noErrCodes] =
+ {CreateTableRef::Busy,
+ CreateTableRef::NotMaster};
+ return dictSignal(signal,ptr,1,
+ 1/*use masternode id*/,
+ 100,
+ WAIT_CREATE_INDX_REQ,
+ WAITFOR_RESPONSE_TIMEOUT,
+ errCodes,noErrCodes);
+}
+
+
+void
+NdbDictInterface::execCREATE_TABLE_CONF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+#if 0
+ const CreateTableConf* const conf=
+ CAST_CONSTPTR(CreateTableConf, signal->getDataPtr());
+ Uint32 tableId= conf->tableId;
+ Uint32 tableVersion= conf->tableVersion;
+#endif
+ m_waiter.signal(NO_WAIT);
+}
+
+void
+NdbDictInterface::execCREATE_TABLE_REF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ const CreateTableRef* const ref=
+ CAST_CONSTPTR(CreateTableRef, signal->getDataPtr());
+ m_error.code= ref->errorCode;
+ m_masterNodeId = ref->masterNodeId;
+ m_waiter.signal(NO_WAIT);
+}
+
+int
+NdbDictInterface::alterTable(NdbApiSignal* signal, LinearSectionPtr ptr[3])
+{
+#if DEBUG_PRINT
+ ndbout_c("BufferLen = %d", ptr[0].sz);
+ SimplePropertiesLinearReader r(ptr[0].p, ptr[0].sz);
+ r.printAll(ndbout);
+#endif
+ const int noErrCodes = 2;
+ int errCodes[noErrCodes] =
+ {AlterTableRef::NotMaster,
+ AlterTableRef::Busy};
+ int r = dictSignal(signal,ptr,1,
+ 1/*use masternode id*/,
+ 100,WAIT_ALTER_TAB_REQ,
+ WAITFOR_RESPONSE_TIMEOUT,
+ errCodes, noErrCodes);
+ if(m_error.code == AlterTableRef::InvalidTableVersion) {
+ // Clear caches and try again
+ return INCOMPATIBLE_VERSION;
+ }
+
+ return r;
+}
+
+void
+NdbDictInterface::execALTER_TABLE_CONF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ //AlterTableConf* const conf = CAST_CONSTPTR(AlterTableConf, signal->getDataPtr());
+ m_waiter.signal(NO_WAIT);
+}
+
+void
+NdbDictInterface::execALTER_TABLE_REF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ const AlterTableRef * const ref =
+ CAST_CONSTPTR(AlterTableRef, signal->getDataPtr());
+ m_error.code= ref->errorCode;
+ m_masterNodeId = ref->masterNodeId;
+ m_waiter.signal(NO_WAIT);
+}
+
+/*****************************************************************
+ * Drop table
+ */
+int
+NdbDictionaryImpl::dropTable(const char * name)
+{
+ DBUG_ENTER("NdbDictionaryImpl::dropTable");
+ DBUG_PRINT("enter",("name: %s", name));
+ NdbTableImpl * tab = getTable(name);
+ if(tab == 0){
+ DBUG_RETURN(-1);
+ }
+ int ret = dropTable(* tab);
+ // If table stored in cache is incompatible with the one in the kernel
+ // we must clear the cache and try again
+ if (ret == INCOMPATIBLE_VERSION) {
+ const BaseString internalTableName(m_ndb.internalize_table_name(name));
+
+ DBUG_PRINT("info",("INCOMPATIBLE_VERSION internal_name: %s", internalTableName.c_str()));
+ m_localHash.drop(internalTableName.c_str());
+ m_globalHash->lock();
+ tab->m_status = NdbDictionary::Object::Invalid;
+ m_globalHash->drop(tab);
+ m_globalHash->unlock();
+ DBUG_RETURN(dropTable(name));
+ }
+
+ DBUG_RETURN(ret);
+}
+
+int
+NdbDictionaryImpl::dropTable(NdbTableImpl & impl)
+{
+ int res;
+ const char * name = impl.getName();
+ if(impl.m_status == NdbDictionary::Object::New){
+ return dropTable(name);
+ }
+
+ if (impl.m_indexType != NdbDictionary::Index::Undefined) {
+ m_receiver.m_error.code= 1228;
+ return -1;
+ }
+
+ List list;
+ if ((res = listIndexes(list, impl.m_tableId)) == -1){
+ return -1;
+ }
+ for (unsigned i = 0; i < list.count; i++) {
+ const List::Element& element = list.elements[i];
+ if ((res = dropIndex(element.name, name)) == -1)
+ {
+ return -1;
+ }
+ }
+
+ if (impl.m_noOfBlobs != 0) {
+ if (dropBlobTables(impl) != 0){
+ return -1;
+ }
+ }
+
+ int ret = m_receiver.dropTable(impl);
+ if(ret == 0 || m_error.code == 709){
+ const char * internalTableName = impl.m_internalName.c_str();
+
+
+ m_localHash.drop(internalTableName);
+ m_globalHash->lock();
+ impl.m_status = NdbDictionary::Object::Invalid;
+ m_globalHash->drop(&impl);
+ m_globalHash->unlock();
+
+ return 0;
+ }
+
+ return ret;
+}
+
+int
+NdbDictionaryImpl::dropBlobTables(NdbTableImpl & t)
+{
+ DBUG_ENTER("NdbDictionaryImpl::dropBlobTables");
+ for (unsigned i = 0; i < t.m_columns.size(); i++) {
+ NdbColumnImpl & c = *t.m_columns[i];
+ if (! c.getBlobType() || c.getPartSize() == 0)
+ continue;
+ char btname[NdbBlobImpl::BlobTableNameSize];
+ NdbBlob::getBlobTableName(btname, &t, &c);
+ if (dropTable(btname) != 0) {
+ if (m_error.code != 709){
+ DBUG_PRINT("exit",("error %u - exiting",m_error.code));
+ DBUG_RETURN(-1);
+ }
+ DBUG_PRINT("info",("error %u - continuing",m_error.code));
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+int
+NdbDictInterface::dropTable(const NdbTableImpl & impl)
+{
+ NdbApiSignal tSignal(m_reference);
+ tSignal.theReceiversBlockNumber = DBDICT;
+ tSignal.theVerId_signalNumber = GSN_DROP_TABLE_REQ;
+ tSignal.theLength = DropTableReq::SignalLength;
+
+ DropTableReq * const req = CAST_PTR(DropTableReq, tSignal.getDataPtrSend());
+ req->senderRef = m_reference;
+ req->senderData = 0;
+ req->tableId = impl.m_tableId;
+ req->tableVersion = impl.m_version;
+
+ return dropTable(&tSignal, 0);
+}
+
+int
+NdbDictInterface::dropTable(NdbApiSignal* signal, LinearSectionPtr ptr[3])
+{
+ const int noErrCodes = 3;
+ int errCodes[noErrCodes] =
+ {DropTableRef::NoDropTableRecordAvailable,
+ DropTableRef::NotMaster,
+ DropTableRef::Busy};
+ int r = dictSignal(signal,NULL,0,
+ 1/*use masternode id*/,
+ 100,WAIT_DROP_TAB_REQ,
+ WAITFOR_RESPONSE_TIMEOUT,
+ errCodes, noErrCodes);
+ if(m_error.code == DropTableRef::InvalidTableVersion) {
+ // Clear caches and try again
+ return INCOMPATIBLE_VERSION;
+ }
+ return r;
+}
+
+void
+NdbDictInterface::execDROP_TABLE_CONF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ DBUG_ENTER("NdbDictInterface::execDROP_TABLE_CONF");
+ //DropTableConf* const conf = CAST_CONSTPTR(DropTableConf, signal->getDataPtr());
+
+ m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
+}
+
+void
+NdbDictInterface::execDROP_TABLE_REF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ DBUG_ENTER("NdbDictInterface::execDROP_TABLE_REF");
+ const DropTableRef* const ref = CAST_CONSTPTR(DropTableRef, signal->getDataPtr());
+ m_error.code= ref->errorCode;
+ m_masterNodeId = ref->masterNodeId;
+ m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
+}
+
+int
+NdbDictionaryImpl::invalidateObject(NdbTableImpl & impl)
+{
+ const char * internalTableName = impl.m_internalName.c_str();
+ DBUG_ENTER("NdbDictionaryImpl::invalidateObject");
+ DBUG_PRINT("enter", ("internal_name: %s", internalTableName));
+ m_localHash.drop(internalTableName);
+ m_globalHash->lock();
+ impl.m_status = NdbDictionary::Object::Invalid;
+ m_globalHash->drop(&impl);
+ m_globalHash->unlock();
+ DBUG_RETURN(0);
+}
+
+int
+NdbDictionaryImpl::removeCachedObject(NdbTableImpl & impl)
+{
+ const char * internalTableName = impl.m_internalName.c_str();
+
+ m_localHash.drop(internalTableName);
+ m_globalHash->lock();
+ m_globalHash->release(&impl);
+ m_globalHash->unlock();
+ return 0;
+}
+
+/*****************************************************************
+ * Get index info
+ */
+NdbIndexImpl*
+NdbDictionaryImpl::getIndexImpl(const char * externalName,
+ const BaseString& internalName)
+{
+ Ndb_local_table_info * info = get_local_table_info(internalName,
+ false);
+ if(info == 0){
+ m_error.code = 4243;
+ return 0;
+ }
+ NdbTableImpl * tab = info->m_table_impl;
+
+ if(tab->m_indexType == NdbDictionary::Index::Undefined){
+ // Not an index
+ m_error.code = 4243;
+ return 0;
+ }
+
+ NdbTableImpl* prim = getTable(tab->m_primaryTable.c_str());
+ if(prim == 0){
+ m_error.code = 4243;
+ return 0;
+ }
+
+ /**
+ * Create index impl
+ */
+ NdbIndexImpl* idx;
+ if(NdbDictInterface::create_index_obj_from_table(&idx, tab, prim) == 0){
+ idx->m_table = tab;
+ idx->m_externalName.assign(externalName);
+ idx->m_internalName.assign(internalName);
+ // TODO Assign idx to tab->m_index
+ // Don't do it right now since assign can't asign a table with index
+ // tab->m_index = idx;
+ return idx;
+ }
+ return 0;
+}
+
+int
+NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst,
+ NdbTableImpl* tab,
+ const NdbTableImpl* prim){
+ NdbIndexImpl *idx = new NdbIndexImpl();
+ idx->m_version = tab->m_version;
+ idx->m_status = tab->m_status;
+ idx->m_indexId = tab->m_tableId;
+ idx->m_externalName.assign(tab->getName());
+ idx->m_tableName.assign(prim->m_externalName);
+ NdbDictionary::Index::Type type = idx->m_type = tab->m_indexType;
+ idx->m_logging = tab->m_logging;
+ // skip last attribute (NDB$PK or NDB$TNODE)
+
+ const Uint32 distKeys = prim->m_noOfDistributionKeys;
+ Uint32 keyCount = (distKeys ? distKeys : prim->m_noOfKeys);
+
+ unsigned i;
+ for(i = 0; i+1<tab->m_columns.size(); i++){
+ NdbColumnImpl* org = tab->m_columns[i];
+
+ NdbColumnImpl* col = new NdbColumnImpl;
+ // Copy column definition
+ *col = * org;
+ idx->m_columns.push_back(col);
+
+ /**
+ * reverse map
+ */
+ const NdbColumnImpl* primCol = prim->getColumn(col->getName());
+ int key_id = primCol->getColumnNo();
+ int fill = -1;
+ idx->m_key_ids.fill(key_id, fill);
+ idx->m_key_ids[key_id] = i;
+ col->m_keyInfoPos = key_id;
+
+ if(type == NdbDictionary::Index::OrderedIndex &&
+ (primCol->m_distributionKey ||
+ (distKeys == 0 && primCol->getPrimaryKey())))
+ {
+ keyCount--;
+ org->m_distributionKey = 1;
+ }
+ }
+
+ if(keyCount == 0)
+ {
+ tab->m_noOfDistributionKeys = (distKeys ? distKeys : prim->m_noOfKeys);
+ }
+ else
+ {
+ for(i = 0; i+1<tab->m_columns.size(); i++)
+ tab->m_columns[i]->m_distributionKey = 0;
+ }
+
+ * dst = idx;
+ return 0;
+}
+
+/*****************************************************************
+ * Create index
+ */
+int
+NdbDictionaryImpl::createIndex(NdbIndexImpl &ix)
+{
+ NdbTableImpl* tab = getTable(ix.getTable());
+ if(tab == 0){
+ m_error.code = 4249;
+ return -1;
+ }
+
+ return m_receiver.createIndex(m_ndb, ix, * tab);
+}
+
+int
+NdbDictInterface::createIndex(Ndb & ndb,
+ NdbIndexImpl & impl,
+ const NdbTableImpl & table)
+{
+ //validate();
+ //aggregate();
+ unsigned i;
+ UtilBufferWriter w(m_buffer);
+ const size_t len = strlen(impl.m_externalName.c_str()) + 1;
+ if(len > MAX_TAB_NAME_SIZE) {
+ m_error.code = 4241;
+ return -1;
+ }
+ const BaseString internalName(
+ ndb.internalize_index_name(&table, impl.getName()));
+ impl.m_internalName.assign(internalName);
+
+ w.add(DictTabInfo::TableName, internalName.c_str());
+ w.add(DictTabInfo::TableLoggedFlag, impl.m_logging);
+
+ NdbApiSignal tSignal(m_reference);
+ tSignal.theReceiversBlockNumber = DBDICT;
+ tSignal.theVerId_signalNumber = GSN_CREATE_INDX_REQ;
+ tSignal.theLength = CreateIndxReq::SignalLength;
+
+ CreateIndxReq * const req = CAST_PTR(CreateIndxReq, tSignal.getDataPtrSend());
+
+ req->setUserRef(m_reference);
+ req->setConnectionPtr(0);
+ req->setRequestType(CreateIndxReq::RT_USER);
+
+ Uint32 it = getKernelConstant(impl.m_type,
+ indexTypeMapping,
+ DictTabInfo::UndefTableType);
+
+ if(it == DictTabInfo::UndefTableType){
+ m_error.code = 4250;
+ return -1;
+ }
+ req->setIndexType((DictTabInfo::TableType) it);
+
+ req->setTableId(table.m_tableId);
+ req->setOnline(true);
+ AttributeList attributeList;
+ attributeList.sz = impl.m_columns.size();
+ for(i = 0; i<attributeList.sz; i++){
+ const NdbColumnImpl* col =
+ table.getColumn(impl.m_columns[i]->m_name.c_str());
+ if(col == 0){
+ m_error.code = 4247;
+ return -1;
+ }
+ // Copy column definition
+ *impl.m_columns[i] = *col;
+
+ // index key type check
+ if (it == DictTabInfo::UniqueHashIndex &&
+ ! NdbSqlUtil::usable_in_hash_index(col->m_type, col->m_cs) ||
+ it == DictTabInfo::OrderedIndex &&
+ ! NdbSqlUtil::usable_in_ordered_index(col->m_type, col->m_cs)) {
+ m_error.code = 743;
+ return -1;
+ }
+ attributeList.id[i] = col->m_attrId;
+ }
+ LinearSectionPtr ptr[2];
+ ptr[0].p = (Uint32*)&attributeList;
+ ptr[0].sz = 1 + attributeList.sz;
+ ptr[1].p = (Uint32*)m_buffer.get_data();
+ ptr[1].sz = m_buffer.length() >> 2; //BUG?
+ return createIndex(&tSignal, ptr);
+}
+
+int
+NdbDictInterface::createIndex(NdbApiSignal* signal,
+ LinearSectionPtr ptr[3])
+{
+ const int noErrCodes = 2;
+ int errCodes[noErrCodes] = {CreateIndxRef::Busy, CreateIndxRef::NotMaster};
+ return dictSignal(signal,ptr,2,
+ 1 /*use masternode id*/,
+ 100,
+ WAIT_CREATE_INDX_REQ,
+ -1,
+ errCodes,noErrCodes);
+}
+
+void
+NdbDictInterface::execCREATE_INDX_CONF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ //CreateTableConf* const conf = CAST_CONSTPTR(CreateTableConf, signal->getDataPtr());
+
+ m_waiter.signal(NO_WAIT);
+}
+
+void
+NdbDictInterface::execCREATE_INDX_REF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ const CreateIndxRef* const ref = CAST_CONSTPTR(CreateIndxRef, signal->getDataPtr());
+ m_error.code = ref->getErrorCode();
+ if(m_error.code == ref->NotMaster)
+ m_masterNodeId= ref->masterNodeId;
+ m_waiter.signal(NO_WAIT);
+}
+
+/*****************************************************************
+ * Drop index
+ */
+int
+NdbDictionaryImpl::dropIndex(const char * indexName,
+ const char * tableName)
+{
+ NdbIndexImpl * idx = getIndex(indexName, tableName);
+ if (idx == 0) {
+ m_error.code = 4243;
+ return -1;
+ }
+ int ret = dropIndex(*idx, tableName);
+ // If index stored in cache is incompatible with the one in the kernel
+ // we must clear the cache and try again
+ if (ret == INCOMPATIBLE_VERSION) {
+ const BaseString internalIndexName((tableName)
+ ?
+ m_ndb.internalize_index_name(getTable(tableName), indexName)
+ :
+ m_ndb.internalize_table_name(indexName)); // Index is also a table
+
+ m_localHash.drop(internalIndexName.c_str());
+ m_globalHash->lock();
+ idx->m_table->m_status = NdbDictionary::Object::Invalid;
+ m_globalHash->drop(idx->m_table);
+ m_globalHash->unlock();
+ return dropIndex(indexName, tableName);
+ }
+
+ return ret;
+}
+
+int
+NdbDictionaryImpl::dropIndex(NdbIndexImpl & impl, const char * tableName)
+{
+ const char * indexName = impl.getName();
+ if (tableName || m_ndb.usingFullyQualifiedNames()) {
+ NdbTableImpl * timpl = impl.m_table;
+
+ if (timpl == 0) {
+ m_error.code = 709;
+ return -1;
+ }
+
+ const BaseString internalIndexName((tableName)
+ ?
+ m_ndb.internalize_index_name(getTable(tableName), indexName)
+ :
+ m_ndb.internalize_table_name(indexName)); // Index is also a table
+
+ if(impl.m_status == NdbDictionary::Object::New){
+ return dropIndex(indexName, tableName);
+ }
+
+ int ret = m_receiver.dropIndex(impl, *timpl);
+ if(ret == 0){
+ m_localHash.drop(internalIndexName.c_str());
+ m_globalHash->lock();
+ impl.m_table->m_status = NdbDictionary::Object::Invalid;
+ m_globalHash->drop(impl.m_table);
+ m_globalHash->unlock();
+ }
+ return ret;
+ }
+
+ m_error.code = 4243;
+ return -1;
+}
+
+int
+NdbDictInterface::dropIndex(const NdbIndexImpl & impl,
+ const NdbTableImpl & timpl)
+{
+ NdbApiSignal tSignal(m_reference);
+ tSignal.theReceiversBlockNumber = DBDICT;
+ tSignal.theVerId_signalNumber = GSN_DROP_INDX_REQ;
+ tSignal.theLength = DropIndxReq::SignalLength;
+
+ DropIndxReq * const req = CAST_PTR(DropIndxReq, tSignal.getDataPtrSend());
+ req->setUserRef(m_reference);
+ req->setConnectionPtr(0);
+ req->setRequestType(DropIndxReq::RT_USER);
+ req->setTableId(~0); // DICT overwrites
+ req->setIndexId(timpl.m_tableId);
+ req->setIndexVersion(timpl.m_version);
+
+ return dropIndex(&tSignal, 0);
+}
+
+int
+NdbDictInterface::dropIndex(NdbApiSignal* signal, LinearSectionPtr ptr[3])
+{
+ const int noErrCodes = 2;
+ int errCodes[noErrCodes] = {DropIndxRef::Busy, DropIndxRef::NotMaster};
+ int r = dictSignal(signal,NULL,0,
+ 1/*Use masternode id*/,
+ 100,
+ WAIT_DROP_INDX_REQ,
+ WAITFOR_RESPONSE_TIMEOUT,
+ errCodes,noErrCodes);
+ if(m_error.code == DropIndxRef::InvalidIndexVersion) {
+ // Clear caches and try again
+ return INCOMPATIBLE_VERSION;
+ }
+ return r;
+}
+
+void
+NdbDictInterface::execDROP_INDX_CONF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ m_waiter.signal(NO_WAIT);
+}
+
+void
+NdbDictInterface::execDROP_INDX_REF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ const DropIndxRef* const ref = CAST_CONSTPTR(DropIndxRef, signal->getDataPtr());
+ m_error.code = ref->getErrorCode();
+ if(m_error.code == ref->NotMaster)
+ m_masterNodeId= ref->masterNodeId;
+ m_waiter.signal(NO_WAIT);
+}
+
+/*****************************************************************
+ * Create event
+ */
+
+int
+NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
+{
+ int i;
+ NdbTableImpl* tab = getTable(evnt.getTableName());
+
+ if(tab == 0){
+#ifdef EVENT_DEBUG
+ ndbout_c("NdbDictionaryImpl::createEvent: table not found: %s",
+ evnt.getTableName());
+#endif
+ return -1;
+ }
+
+ evnt.m_tableId = tab->m_tableId;
+ evnt.m_tableImpl = tab;
+#ifdef EVENT_DEBUG
+ ndbout_c("Event on tableId=%d", evnt.m_tableId);
+#endif
+
+ NdbTableImpl &table = *evnt.m_tableImpl;
+
+
+ int attributeList_sz = evnt.m_attrIds.size();
+
+ for (i = 0; i < attributeList_sz; i++) {
+ NdbColumnImpl *col_impl = table.getColumn(evnt.m_attrIds[i]);
+ if (col_impl) {
+ evnt.m_facade->addColumn(*(col_impl->m_facade));
+ } else {
+ ndbout_c("Attr id %u in table %s not found", evnt.m_attrIds[i],
+ evnt.getTableName());
+ m_error.code= 4713;
+ return -1;
+ }
+ }
+
+ evnt.m_attrIds.clear();
+
+ attributeList_sz = evnt.m_columns.size();
+#ifdef EVENT_DEBUG
+ ndbout_c("creating event %s", evnt.m_externalName.c_str());
+ ndbout_c("no of columns %d", evnt.m_columns.size());
+#endif
+ int pk_count = 0;
+ evnt.m_attrListBitmask.clear();
+
+ for(i = 0; i<attributeList_sz; i++){
+ const NdbColumnImpl* col =
+ table.getColumn(evnt.m_columns[i]->m_name.c_str());
+ if(col == 0){
+ m_error.code= 4247;
+ return -1;
+ }
+ // Copy column definition
+ *evnt.m_columns[i] = *col;
+
+ if(col->m_pk){
+ pk_count++;
+ }
+
+ evnt.m_attrListBitmask.set(col->m_attrId);
+ }
+
+ // Sort index attributes according to primary table (using insertion sort)
+ for(i = 1; i < attributeList_sz; i++) {
+ NdbColumnImpl* temp = evnt.m_columns[i];
+ unsigned int j = i;
+ while((j > 0) && (evnt.m_columns[j - 1]->m_attrId > temp->m_attrId)) {
+ evnt.m_columns[j] = evnt.m_columns[j - 1];
+ j--;
+ }
+ evnt.m_columns[j] = temp;
+ }
+ // Check for illegal duplicate attributes
+ for(i = 1; i<attributeList_sz; i++) {
+ if (evnt.m_columns[i-1]->m_attrId == evnt.m_columns[i]->m_attrId) {
+ m_error.code= 4258;
+ return -1;
+ }
+ }
+
+#ifdef EVENT_DEBUG
+ char buf[128] = {0};
+ evnt.m_attrListBitmask.getText(buf);
+ ndbout_c("createEvent: mask = %s", buf);
+#endif
+
+ // NdbDictInterface m_receiver;
+ return m_receiver.createEvent(m_ndb, evnt, 0 /* getFlag unset */);
+}
+
+int
+NdbDictInterface::createEvent(class Ndb & ndb,
+ NdbEventImpl & evnt,
+ int getFlag)
+{
+ NdbApiSignal tSignal(m_reference);
+ tSignal.theReceiversBlockNumber = DBDICT;
+ tSignal.theVerId_signalNumber = GSN_CREATE_EVNT_REQ;
+ if (getFlag)
+ tSignal.theLength = CreateEvntReq::SignalLengthGet;
+ else
+ tSignal.theLength = CreateEvntReq::SignalLengthCreate;
+
+ CreateEvntReq * const req = CAST_PTR(CreateEvntReq, tSignal.getDataPtrSend());
+
+ req->setUserRef(m_reference);
+ req->setUserData(0);
+
+ if (getFlag) {
+ // getting event from Dictionary
+ req->setRequestType(CreateEvntReq::RT_USER_GET);
+ } else {
+ // creating event in Dictionary
+ req->setRequestType(CreateEvntReq::RT_USER_CREATE);
+ req->setTableId(evnt.m_tableId);
+ req->setAttrListBitmask(evnt.m_attrListBitmask);
+ req->setEventType(evnt.mi_type);
+ }
+
+ UtilBufferWriter w(m_buffer);
+
+ const size_t len = strlen(evnt.m_externalName.c_str()) + 1;
+ if(len > MAX_TAB_NAME_SIZE) {
+ m_error.code= 4241;
+ return -1;
+ }
+
+ w.add(SimpleProperties::StringValue, evnt.m_externalName.c_str());
+
+ if (getFlag == 0)
+ {
+ const BaseString internal_tabname(
+ ndb.internalize_table_name(evnt.m_tableName.c_str()));
+ w.add(SimpleProperties::StringValue,
+ internal_tabname.c_str());
+ }
+
+ LinearSectionPtr ptr[1];
+ ptr[0].p = (Uint32*)m_buffer.get_data();
+ ptr[0].sz = (m_buffer.length()+3) >> 2;
+
+ int ret = createEvent(&tSignal, ptr, 1);
+
+ if (ret) {
+ return ret;
+ }
+
+ char *dataPtr = (char *)m_buffer.get_data();
+ unsigned int lenCreateEvntConf = *((unsigned int *)dataPtr);
+ dataPtr += sizeof(lenCreateEvntConf);
+ CreateEvntConf const * evntConf = (CreateEvntConf *)dataPtr;
+ dataPtr += lenCreateEvntConf;
+
+ // NdbEventImpl *evntImpl = (NdbEventImpl *)evntConf->getUserData();
+
+ if (getFlag) {
+ evnt.m_tableId = evntConf->getTableId();
+ evnt.m_attrListBitmask = evntConf->getAttrListBitmask();
+ evnt.mi_type = evntConf->getEventType();
+ evnt.setTable(dataPtr);
+ } else {
+ if (evnt.m_tableId != evntConf->getTableId() ||
+ //evnt.m_attrListBitmask != evntConf->getAttrListBitmask() ||
+ evnt.mi_type != evntConf->getEventType()) {
+ ndbout_c("ERROR*************");
+ return 1;
+ }
+ }
+
+ evnt.m_eventId = evntConf->getEventId();
+ evnt.m_eventKey = evntConf->getEventKey();
+
+ return ret;
+}
+
+int
+NdbDictInterface::createEvent(NdbApiSignal* signal,
+ LinearSectionPtr ptr[3], int noLSP)
+{
+ const int noErrCodes = 1;
+ int errCodes[noErrCodes] = {CreateEvntRef::Busy};
+ return dictSignal(signal,ptr,noLSP,
+ 1 /*use masternode id*/,
+ 100,
+ WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/,
+ -1,
+ errCodes,noErrCodes, CreateEvntRef::Temporary);
+}
+
+int
+NdbDictionaryImpl::executeSubscribeEvent(NdbEventImpl & ev)
+{
+ // NdbDictInterface m_receiver;
+ return m_receiver.executeSubscribeEvent(m_ndb, ev);
+}
+
+int
+NdbDictInterface::executeSubscribeEvent(class Ndb & ndb,
+ NdbEventImpl & evnt)
+{
+ DBUG_ENTER("NdbDictInterface::executeSubscribeEvent");
+ NdbApiSignal tSignal(m_reference);
+ // tSignal.theReceiversBlockNumber = SUMA;
+ tSignal.theReceiversBlockNumber = DBDICT;
+ tSignal.theVerId_signalNumber = GSN_SUB_START_REQ;
+ tSignal.theLength = SubStartReq::SignalLength2;
+
+ SubStartReq * sumaStart = CAST_PTR(SubStartReq, tSignal.getDataPtrSend());
+
+ sumaStart->subscriptionId = evnt.m_eventId;
+ sumaStart->subscriptionKey = evnt.m_eventKey;
+ sumaStart->part = SubscriptionData::TableData;
+ sumaStart->subscriberData = evnt.m_bufferId & 0xFF;
+ sumaStart->subscriberRef = m_reference;
+
+ DBUG_RETURN(executeSubscribeEvent(&tSignal, NULL));
+}
+
+int
+NdbDictInterface::executeSubscribeEvent(NdbApiSignal* signal,
+ LinearSectionPtr ptr[3])
+{
+ return dictSignal(signal,NULL,0,
+ 1 /*use masternode id*/,
+ 100,
+ WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/,
+ -1,
+ NULL,0);
+}
+
+int
+NdbDictionaryImpl::stopSubscribeEvent(NdbEventImpl & ev)
+{
+ // NdbDictInterface m_receiver;
+ return m_receiver.stopSubscribeEvent(m_ndb, ev);
+}
+
+int
+NdbDictInterface::stopSubscribeEvent(class Ndb & ndb,
+ NdbEventImpl & evnt)
+{
+ DBUG_ENTER("NdbDictInterface::stopSubscribeEvent");
+
+ NdbApiSignal tSignal(m_reference);
+ // tSignal.theReceiversBlockNumber = SUMA;
+ tSignal.theReceiversBlockNumber = DBDICT;
+ tSignal.theVerId_signalNumber = GSN_SUB_STOP_REQ;
+ tSignal.theLength = SubStopReq::SignalLength;
+
+ SubStopReq * sumaStop = CAST_PTR(SubStopReq, tSignal.getDataPtrSend());
+
+ sumaStop->subscriptionId = evnt.m_eventId;
+ sumaStop->subscriptionKey = evnt.m_eventKey;
+ sumaStop->subscriberData = evnt.m_bufferId & 0xFF;
+ sumaStop->part = (Uint32) SubscriptionData::TableData;
+ sumaStop->subscriberRef = m_reference;
+
+ DBUG_RETURN(stopSubscribeEvent(&tSignal, NULL));
+}
+
+int
+NdbDictInterface::stopSubscribeEvent(NdbApiSignal* signal,
+ LinearSectionPtr ptr[3])
+{
+ return dictSignal(signal,NULL,0,
+ 1 /*use masternode id*/,
+ 100,
+ WAIT_CREATE_INDX_REQ /*WAIT_SUB_STOP__REQ*/,
+ -1,
+ NULL,0);
+}
+
+NdbEventImpl *
+NdbDictionaryImpl::getEvent(const char * eventName)
+{
+ NdbEventImpl *ev = new NdbEventImpl();
+
+ if (ev == NULL) {
+ return NULL;
+ }
+
+ ev->setName(eventName);
+
+ int ret = m_receiver.createEvent(m_ndb, *ev, 1 /* getFlag set */);
+
+ if (ret) {
+ delete ev;
+ return NULL;
+ }
+
+ // We only have the table name with internal name
+ ev->setTable(m_ndb.externalizeTableName(ev->getTableName()));
+ ev->m_tableImpl = getTable(ev->getTableName());
+
+ // get the columns from the attrListBitmask
+
+ NdbTableImpl &table = *ev->m_tableImpl;
+ AttributeMask & mask = ev->m_attrListBitmask;
+ int attributeList_sz = mask.count();
+ int id = -1;
+
+#ifdef EVENT_DEBUG
+ ndbout_c("NdbDictionaryImpl::getEvent attributeList_sz = %d",
+ attributeList_sz);
+ char buf[128] = {0};
+ mask.getText(buf);
+ ndbout_c("mask = %s", buf);
+#endif
+
+ for(int i = 0; i < attributeList_sz; i++) {
+ id++; while (!mask.get(id)) id++;
+
+ const NdbColumnImpl* col = table.getColumn(id);
+ if(col == 0) {
+#ifdef EVENT_DEBUG
+ ndbout_c("NdbDictionaryImpl::getEvent could not find column id %d", id);
+#endif
+ m_error.code= 4247;
+ delete ev;
+ return NULL;
+ }
+ NdbColumnImpl* new_col = new NdbColumnImpl;
+ // Copy column definition
+ *new_col = *col;
+
+ ev->m_columns.push_back(new_col);
+ }
+
+ return ev;
+}
+
+void
+NdbDictInterface::execCREATE_EVNT_CONF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ DBUG_ENTER("NdbDictInterface::execCREATE_EVNT_CONF");
+
+ m_buffer.clear();
+ unsigned int len = signal->getLength() << 2;
+ m_buffer.append((char *)&len, sizeof(len));
+ m_buffer.append(signal->getDataPtr(), len);
+
+ if (signal->m_noOfSections > 0) {
+ m_buffer.append((char *)ptr[0].p, strlen((char *)ptr[0].p)+1);
+ }
+
+ const CreateEvntConf * const createEvntConf=
+ CAST_CONSTPTR(CreateEvntConf, signal->getDataPtr());
+
+ Uint32 subscriptionId = createEvntConf->getEventId();
+ Uint32 subscriptionKey = createEvntConf->getEventKey();
+
+ DBUG_PRINT("info",("subscriptionId=%d,subscriptionKey=%d",
+ subscriptionId,subscriptionKey));
+ m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
+}
+
+void
+NdbDictInterface::execCREATE_EVNT_REF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ DBUG_ENTER("NdbDictInterface::execCREATE_EVNT_REF");
+
+ const CreateEvntRef* const ref=
+ CAST_CONSTPTR(CreateEvntRef, signal->getDataPtr());
+ m_error.code= ref->getErrorCode();
+ DBUG_PRINT("error",("error=%d,line=%d,node=%d",ref->getErrorCode(),
+ ref->getErrorLine(),ref->getErrorNode()));
+ m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
+}
+
+void
+NdbDictInterface::execSUB_STOP_CONF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ DBUG_ENTER("NdbDictInterface::execSUB_STOP_CONF");
+ const SubStopConf * const subStopConf=
+ CAST_CONSTPTR(SubStopConf, signal->getDataPtr());
+
+ Uint32 subscriptionId = subStopConf->subscriptionId;
+ Uint32 subscriptionKey = subStopConf->subscriptionKey;
+ Uint32 subscriberData = subStopConf->subscriberData;
+
+ DBUG_PRINT("info",("subscriptionId=%d,subscriptionKey=%d,subscriberData=%d",
+ subscriptionId,subscriptionKey,subscriberData));
+ m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
+}
+
+void
+NdbDictInterface::execSUB_STOP_REF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ DBUG_ENTER("NdbDictInterface::execSUB_STOP_REF");
+ const SubStopRef * const subStopRef=
+ CAST_CONSTPTR(SubStopRef, signal->getDataPtr());
+
+ Uint32 subscriptionId = subStopRef->subscriptionId;
+ Uint32 subscriptionKey = subStopRef->subscriptionKey;
+ Uint32 subscriberData = subStopRef->subscriberData;
+ m_error.code= subStopRef->errorCode;
+
+ DBUG_PRINT("error",("subscriptionId=%d,subscriptionKey=%d,subscriberData=%d,error=%d",
+ subscriptionId,subscriptionKey,subscriberData,m_error.code));
+ m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
+}
+
+void
+NdbDictInterface::execSUB_START_CONF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ DBUG_ENTER("NdbDictInterface::execSUB_START_CONF");
+ const SubStartConf * const subStartConf=
+ CAST_CONSTPTR(SubStartConf, signal->getDataPtr());
+
+ Uint32 subscriptionId = subStartConf->subscriptionId;
+ Uint32 subscriptionKey = subStartConf->subscriptionKey;
+ SubscriptionData::Part part =
+ (SubscriptionData::Part)subStartConf->part;
+ Uint32 subscriberData = subStartConf->subscriberData;
+
+ switch(part) {
+ case SubscriptionData::MetaData: {
+ DBUG_PRINT("error",("SubscriptionData::MetaData"));
+ m_error.code= 1;
+ break;
+ }
+ case SubscriptionData::TableData: {
+ DBUG_PRINT("info",("SubscriptionData::TableData"));
+ break;
+ }
+ default: {
+ DBUG_PRINT("error",("wrong data"));
+ m_error.code= 2;
+ break;
+ }
+ }
+ DBUG_PRINT("info",("subscriptionId=%d,subscriptionKey=%d,subscriberData=%d",
+ subscriptionId,subscriptionKey,subscriberData));
+ m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
+}
+
+void
+NdbDictInterface::execSUB_START_REF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ DBUG_ENTER("NdbDictInterface::execSUB_START_REF");
+ const SubStartRef * const subStartRef=
+ CAST_CONSTPTR(SubStartRef, signal->getDataPtr());
+ m_error.code= subStartRef->errorCode;
+ m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
+}
+void
+NdbDictInterface::execSUB_GCP_COMPLETE_REP(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ const SubGcpCompleteRep * const rep=
+ CAST_CONSTPTR(SubGcpCompleteRep, signal->getDataPtr());
+
+ const Uint32 gci = rep->gci;
+ // const Uint32 senderRef = rep->senderRef;
+ const Uint32 subscriberData = rep->subscriberData;
+
+ const Uint32 bufferId = subscriberData;
+
+ const Uint32 ref = signal->theSendersBlockRef;
+
+ NdbApiSignal tSignal(m_reference);
+ SubGcpCompleteAcc * acc=
+ CAST_PTR(SubGcpCompleteAcc, tSignal.getDataPtrSend());
+
+ acc->rep = *rep;
+
+ tSignal.theReceiversBlockNumber = refToBlock(ref);
+ tSignal.theVerId_signalNumber = GSN_SUB_GCP_COMPLETE_ACC;
+ tSignal.theLength = SubGcpCompleteAcc::SignalLength;
+
+ Uint32 aNodeId = refToNode(ref);
+
+ // m_transporter->lock_mutex();
+ int r;
+ r = m_transporter->sendSignal(&tSignal, aNodeId);
+ // m_transporter->unlock_mutex();
+
+ NdbGlobalEventBufferHandle::latestGCI(bufferId, gci);
+}
+
+void
+NdbDictInterface::execSUB_TABLE_DATA(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+#ifdef EVENT_DEBUG
+ const char * FNAME = "NdbDictInterface::execSUB_TABLE_DATA";
+#endif
+ //TODO
+ const SubTableData * const sdata = CAST_CONSTPTR(SubTableData, signal->getDataPtr());
+
+ // const Uint32 gci = sdata->gci;
+ // const Uint32 operation = sdata->operation;
+ // const Uint32 tableId = sdata->tableId;
+ // const Uint32 noOfAttrs = sdata->noOfAttributes;
+ // const Uint32 dataLen = sdata->dataSize;
+ const Uint32 subscriberData = sdata->subscriberData;
+ // const Uint32 logType = sdata->logType;
+
+ for (int i=signal->m_noOfSections;i < 3; i++) {
+ ptr[i].p = NULL;
+ ptr[i].sz = 0;
+ }
+#ifdef EVENT_DEBUG
+ ndbout_c("%s: senderData %d, gci %d, operation %d, tableId %d, noOfAttrs %d, dataLen %d",
+ FNAME, subscriberData, gci, operation, tableId, noOfAttrs, dataLen);
+ ndbout_c("ptr[0] %u %u ptr[1] %u %u ptr[2] %u %u\n",
+ ptr[0].p,ptr[0].sz,ptr[1].p,ptr[1].sz,ptr[2].p,ptr[2].sz);
+#endif
+ const Uint32 bufferId = subscriberData;
+
+ NdbGlobalEventBufferHandle::insertDataL(bufferId,
+ sdata, ptr);
+}
+
+/*****************************************************************
+ * Drop event
+ */
+int
+NdbDictionaryImpl::dropEvent(const char * eventName)
+{
+ NdbEventImpl *ev= new NdbEventImpl();
+ ev->setName(eventName);
+ int ret= m_receiver.dropEvent(*ev);
+ delete ev;
+
+ // printf("__________________RET %u\n", ret);
+ return ret;
+}
+
+int
+NdbDictInterface::dropEvent(const NdbEventImpl &evnt)
+{
+ NdbApiSignal tSignal(m_reference);
+ tSignal.theReceiversBlockNumber = DBDICT;
+ tSignal.theVerId_signalNumber = GSN_DROP_EVNT_REQ;
+ tSignal.theLength = DropEvntReq::SignalLength;
+
+ DropEvntReq * const req = CAST_PTR(DropEvntReq, tSignal.getDataPtrSend());
+
+ req->setUserRef(m_reference);
+ req->setUserData(0);
+
+ UtilBufferWriter w(m_buffer);
+
+ w.add(SimpleProperties::StringValue, evnt.m_externalName.c_str());
+
+ LinearSectionPtr ptr[1];
+ ptr[0].p = (Uint32*)m_buffer.get_data();
+ ptr[0].sz = (m_buffer.length()+3) >> 2;
+
+ return dropEvent(&tSignal, ptr, 1);
+}
+
+int
+NdbDictInterface::dropEvent(NdbApiSignal* signal,
+ LinearSectionPtr ptr[3], int noLSP)
+{
+ //TODO
+ const int noErrCodes = 1;
+ int errCodes[noErrCodes] = {DropEvntRef::Busy};
+ return dictSignal(signal,ptr,noLSP,
+ 1 /*use masternode id*/,
+ 100,
+ WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/,
+ -1,
+ errCodes,noErrCodes, DropEvntRef::Temporary);
+}
+void
+NdbDictInterface::execDROP_EVNT_CONF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ DBUG_ENTER("NdbDictInterface::execDROP_EVNT_CONF");
+ m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
+}
+
+void
+NdbDictInterface::execDROP_EVNT_REF(NdbApiSignal * signal,
+ LinearSectionPtr ptr[3])
+{
+ DBUG_ENTER("NdbDictInterface::execDROP_EVNT_REF");
+ const DropEvntRef* const ref=
+ CAST_CONSTPTR(DropEvntRef, signal->getDataPtr());
+ m_error.code= ref->getErrorCode();
+
+ DBUG_PRINT("info",("ErrorCode=%u Errorline=%u ErrorNode=%u",
+ ref->getErrorCode(), ref->getErrorLine(), ref->getErrorNode()));
+
+ m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
+}
+
+/*****************************************************************
+ * List objects or indexes
+ */
+int
+NdbDictionaryImpl::listObjects(List& list, NdbDictionary::Object::Type type)
+{
+ ListTablesReq req;
+ req.requestData = 0;
+ req.setTableType(getKernelConstant(type, objectTypeMapping, 0));
+ req.setListNames(true);
+ return m_receiver.listObjects(list, req.requestData, m_ndb.usingFullyQualifiedNames());
+}
+
+int
+NdbDictionaryImpl::listIndexes(List& list, Uint32 indexId)
+{
+ ListTablesReq req;
+ req.requestData = 0;
+ req.setTableId(indexId);
+ req.setListNames(true);
+ req.setListIndexes(true);
+ return m_receiver.listObjects(list, req.requestData, m_ndb.usingFullyQualifiedNames());
+}
+
+int
+NdbDictInterface::listObjects(NdbDictionary::Dictionary::List& list,
+ Uint32 requestData, bool fullyQualifiedNames)
+{
+ NdbApiSignal tSignal(m_reference);
+ ListTablesReq* const req = CAST_PTR(ListTablesReq, tSignal.getDataPtrSend());
+ req->senderRef = m_reference;
+ req->senderData = 0;
+ req->requestData = requestData;
+ tSignal.theReceiversBlockNumber = DBDICT;
+ tSignal.theVerId_signalNumber = GSN_LIST_TABLES_REQ;
+ tSignal.theLength = ListTablesReq::SignalLength;
+ if (listObjects(&tSignal) != 0)
+ return -1;
+ // count
+ const Uint32* data = (const Uint32*)m_buffer.get_data();
+ const unsigned length = m_buffer.length() / 4;
+ list.count = 0;
+ bool ok = true;
+ unsigned pos, count;
+ pos = count = 0;
+ while (pos < length) {
+ // table id - name length - name
+ pos++;
+ if (pos >= length) {
+ ok = false;
+ break;
+ }
+ Uint32 n = (data[pos++] + 3) >> 2;
+ pos += n;
+ if (pos > length) {
+ ok = false;
+ break;
+ }
+ count++;
+ }
+ if (! ok) {
+ // bad signal data
+ m_error.code= 4213;
+ return -1;
+ }
+ list.count = count;
+ list.elements = new NdbDictionary::Dictionary::List::Element[count];
+ pos = count = 0;
+ while (pos < length) {
+ NdbDictionary::Dictionary::List::Element& element = list.elements[count];
+ Uint32 d = data[pos++];
+ element.id = ListTablesConf::getTableId(d);
+ element.type = (NdbDictionary::Object::Type)
+ getApiConstant(ListTablesConf::getTableType(d), objectTypeMapping, 0);
+ element.state = (NdbDictionary::Object::State)
+ getApiConstant(ListTablesConf::getTableState(d), objectStateMapping, 0);
+ element.store = (NdbDictionary::Object::Store)
+ getApiConstant(ListTablesConf::getTableStore(d), objectStoreMapping, 0);
+ // table or index name
+ Uint32 n = (data[pos++] + 3) >> 2;
+ BaseString databaseName;
+ BaseString schemaName;
+ BaseString objectName;
+ if ((element.type == NdbDictionary::Object::UniqueHashIndex) ||
+ (element.type == NdbDictionary::Object::OrderedIndex)) {
+ char * indexName = new char[n << 2];
+ memcpy(indexName, &data[pos], n << 2);
+ databaseName = Ndb::getDatabaseFromInternalName(indexName);
+ schemaName = Ndb::getSchemaFromInternalName(indexName);
+ objectName = BaseString(Ndb::externalizeIndexName(indexName, fullyQualifiedNames));
+ delete [] indexName;
+ } else if ((element.type == NdbDictionary::Object::SystemTable) ||
+ (element.type == NdbDictionary::Object::UserTable)) {
+ char * tableName = new char[n << 2];
+ memcpy(tableName, &data[pos], n << 2);
+ databaseName = Ndb::getDatabaseFromInternalName(tableName);
+ schemaName = Ndb::getSchemaFromInternalName(tableName);
+ objectName = BaseString(Ndb::externalizeTableName(tableName, fullyQualifiedNames));
+ delete [] tableName;
+ }
+ else {
+ char * otherName = new char[n << 2];
+ memcpy(otherName, &data[pos], n << 2);
+ objectName = BaseString(otherName);
+ delete [] otherName;
+ }
+ element.database = new char[databaseName.length() + 1];
+ strcpy(element.database, databaseName.c_str());
+ element.schema = new char[schemaName.length() + 1];
+ strcpy(element.schema, schemaName.c_str());
+ element.name = new char[objectName.length() + 1];
+ strcpy(element.name, objectName.c_str());
+ pos += n;
+ count++;
+ }
+ return 0;
+}
+
+int
+NdbDictInterface::listObjects(NdbApiSignal* signal)
+{
+ const Uint32 RETRIES = 100;
+ for (Uint32 i = 0; i < RETRIES; i++) {
+ m_buffer.clear();
+ // begin protected
+ m_transporter->lock_mutex();
+ Uint16 aNodeId = m_transporter->get_an_alive_node();
+ if (aNodeId == 0) {
+ m_error.code= 4009;
+ m_transporter->unlock_mutex();
+ return -1;
+ }
+ if (m_transporter->sendSignal(signal, aNodeId) != 0) {
+ m_transporter->unlock_mutex();
+ continue;
+ }
+ m_error.code= 0;
+ m_waiter.m_node = aNodeId;
+ m_waiter.m_state = WAIT_LIST_TABLES_CONF;
+ m_waiter.wait(WAITFOR_RESPONSE_TIMEOUT);
+ m_transporter->unlock_mutex();
+ // end protected
+ if (m_waiter.m_state == NO_WAIT && m_error.code == 0)
+ return 0;
+ if (m_waiter.m_state == WAIT_NODE_FAILURE)
+ continue;
+ return -1;
+ }
+ return -1;
+}
+
+void
+NdbDictInterface::execLIST_TABLES_CONF(NdbApiSignal* signal,
+ LinearSectionPtr ptr[3])
+{
+ const unsigned off = ListTablesConf::HeaderLength;
+ const unsigned len = (signal->getLength() - off);
+ m_buffer.append(signal->getDataPtr() + off, len << 2);
+ if (signal->getLength() < ListTablesConf::SignalLength) {
+ // last signal has less than full length
+ m_waiter.signal(NO_WAIT);
+ }
+}
+
+template class Vector<int>;
+template class Vector<Uint16>;
+template class Vector<Uint32>;
+template class Vector<Vector<Uint32> >;
+template class Vector<NdbTableImpl*>;
+template class Vector<NdbColumnImpl*>;
+
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
new file mode 100644
index 00000000000..b4614ec3512
--- /dev/null
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -0,0 +1,708 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef NdbDictionaryImpl_H
+#define NdbDictionaryImpl_H
+
+#include <ndb_types.h>
+#include <kernel_types.h>
+#include <NdbError.hpp>
+#include <BaseString.hpp>
+#include <Vector.hpp>
+#include <UtilBuffer.hpp>
+#include <NdbDictionary.hpp>
+#include <Bitmask.hpp>
+#include <AttributeList.hpp>
+#include <Ndb.hpp>
+#include "NdbWaiter.hpp"
+#include "DictCache.hpp"
+
+class NdbDictObjectImpl {
+public:
+ Uint32 m_version;
+ NdbDictionary::Object::Status m_status;
+
+ bool change();
+protected:
+ NdbDictObjectImpl() :
+ m_status(NdbDictionary::Object::New) {
+ }
+};
+
+/**
+ * Column
+ */
+class NdbColumnImpl : public NdbDictionary::Column {
+public:
+ NdbColumnImpl();
+ NdbColumnImpl(NdbDictionary::Column &); // This is not a copy constructor
+ ~NdbColumnImpl();
+ NdbColumnImpl& operator=(const NdbColumnImpl&);
+ void init(Type t = Unsigned);
+
+ int m_attrId;
+ BaseString m_name;
+ NdbDictionary::Column::Type m_type;
+ int m_precision;
+ int m_scale;
+ int m_length;
+ CHARSET_INFO * m_cs; // not const in MySQL
+
+ bool m_pk;
+ bool m_distributionKey;
+ bool m_nullable;
+ bool m_autoIncrement;
+ Uint64 m_autoIncrementInitialValue;
+ BaseString m_defaultValue;
+ NdbTableImpl * m_blobTable;
+
+ /**
+ * Internal types and sizes, and aggregates
+ */
+ Uint32 m_attrSize; // element size (size when arraySize==1)
+ Uint32 m_arraySize; // length or length+2 for Var* types
+ Uint32 m_keyInfoPos;
+ // TODO: use bits in attr desc 2
+ bool getInterpretableType() const ;
+ bool getCharType() const;
+ bool getStringType() const;
+ bool getBlobType() const;
+
+ /**
+ * Equality/assign
+ */
+ bool equal(const NdbColumnImpl&) const;
+
+ static NdbColumnImpl & getImpl(NdbDictionary::Column & t);
+ static const NdbColumnImpl & getImpl(const NdbDictionary::Column & t);
+ NdbDictionary::Column * m_facade;
+
+ static NdbDictionary::Column * create_pseudo(const char *);
+};
+
+class NdbTableImpl : public NdbDictionary::Table, public NdbDictObjectImpl {
+public:
+ NdbTableImpl();
+ NdbTableImpl(NdbDictionary::Table &);
+ ~NdbTableImpl();
+
+ void init();
+ void setName(const char * name);
+ const char * getName() const;
+
+ Uint32 m_changeMask;
+ Uint32 m_tableId;
+ Uint32 m_primaryTableId;
+ BaseString m_internalName;
+ BaseString m_externalName;
+ BaseString m_newExternalName; // Used for alter table
+ UtilBuffer m_frm;
+ UtilBuffer m_ng;
+ NdbDictionary::Object::FragmentType m_fragmentType;
+
+ /**
+ *
+ */
+ Uint32 m_columnHashMask;
+ Vector<Uint32> m_columnHash;
+ Vector<NdbColumnImpl *> m_columns;
+ void buildColumnHash();
+
+ /**
+ * Fragment info
+ */
+ Uint32 m_hashValueMask;
+ Uint32 m_hashpointerValue;
+ Vector<Uint16> m_fragments;
+
+ bool m_logging;
+ int m_kvalue;
+ int m_minLoadFactor;
+ int m_maxLoadFactor;
+ Uint16 m_keyLenInWords;
+ Uint16 m_fragmentCount;
+
+ NdbDictionaryImpl * m_dictionary;
+ NdbIndexImpl * m_index;
+ NdbColumnImpl * getColumn(unsigned attrId);
+ NdbColumnImpl * getColumn(const char * name);
+ const NdbColumnImpl * getColumn(unsigned attrId) const;
+ const NdbColumnImpl * getColumn(const char * name) const;
+
+ /**
+ * Index only stuff
+ */
+ BaseString m_primaryTable;
+ NdbDictionary::Index::Type m_indexType;
+
+ /**
+ * Aggregates
+ */
+ Uint8 m_noOfKeys;
+ Uint8 m_noOfDistributionKeys;
+ Uint8 m_noOfBlobs;
+
+ Uint8 m_replicaCount;
+
+ /**
+ * Equality/assign
+ */
+ bool equal(const NdbTableImpl&) const;
+ void assign(const NdbTableImpl&);
+
+ static NdbTableImpl & getImpl(NdbDictionary::Table & t);
+ static NdbTableImpl & getImpl(const NdbDictionary::Table & t);
+ NdbDictionary::Table * m_facade;
+
+ /**
+ * Return count
+ */
+ Uint32 get_nodes(Uint32 hashValue, const Uint16** nodes) const ;
+};
+
+class NdbIndexImpl : public NdbDictionary::Index, public NdbDictObjectImpl {
+public:
+ NdbIndexImpl();
+ NdbIndexImpl(NdbDictionary::Index &);
+ ~NdbIndexImpl();
+
+ void init();
+ void setName(const char * name);
+ const char * getName() const;
+ void setTable(const char * table);
+ const char * getTable() const;
+ const NdbTableImpl * getIndexTable() const;
+
+ Uint32 m_indexId;
+ BaseString m_internalName;
+ BaseString m_externalName;
+ BaseString m_tableName;
+ Vector<NdbColumnImpl *> m_columns;
+ Vector<int> m_key_ids;
+ NdbDictionary::Index::Type m_type;
+
+ bool m_logging;
+
+ NdbTableImpl * m_table;
+
+ static NdbIndexImpl & getImpl(NdbDictionary::Index & t);
+ static NdbIndexImpl & getImpl(const NdbDictionary::Index & t);
+ NdbDictionary::Index * m_facade;
+};
+
+class NdbEventImpl : public NdbDictionary::Event, public NdbDictObjectImpl {
+public:
+ NdbEventImpl();
+ NdbEventImpl(NdbDictionary::Event &);
+ ~NdbEventImpl();
+
+ void init();
+ void setName(const char * name);
+ const char * getName() const;
+ void setTable(const NdbDictionary::Table& table);
+ void setTable(const char * table);
+ const char * getTableName() const;
+ void addTableEvent(const NdbDictionary::Event::TableEvent t);
+ void setDurability(NdbDictionary::Event::EventDurability d);
+ NdbDictionary::Event::EventDurability getDurability() const;
+ void addEventColumn(const NdbColumnImpl &c);
+ int getNoOfEventColumns() const;
+
+ void print() {
+ ndbout_c("NdbEventImpl: id=%d, key=%d",
+ m_eventId,
+ m_eventKey);
+ };
+
+ Uint32 m_eventId;
+ Uint32 m_eventKey;
+ Uint32 m_tableId;
+ AttributeMask m_attrListBitmask;
+ //BaseString m_internalName;
+ BaseString m_externalName;
+ Uint32 mi_type;
+ NdbDictionary::Event::EventDurability m_dur;
+
+
+ NdbTableImpl *m_tableImpl;
+ BaseString m_tableName;
+ Vector<NdbColumnImpl *> m_columns;
+ Vector<unsigned> m_attrIds;
+
+ int m_bufferId;
+
+ NdbEventOperation *eventOp;
+
+ static NdbEventImpl & getImpl(NdbDictionary::Event & t);
+ static NdbEventImpl & getImpl(const NdbDictionary::Event & t);
+ NdbDictionary::Event * m_facade;
+};
+
+
+class NdbDictInterface {
+public:
+ NdbDictInterface(NdbError& err) : m_error(err) {
+ m_reference = 0;
+ m_masterNodeId = 0;
+ m_transporter= NULL;
+ }
+ ~NdbDictInterface();
+
+ bool setTransporter(class Ndb * ndb, class TransporterFacade * tf);
+ bool setTransporter(class TransporterFacade * tf);
+
+ // To abstract the stuff thats made in all create/drop/lists below
+ int
+ dictSignal(NdbApiSignal* signal,
+ LinearSectionPtr ptr[3], int noLPTR,
+ const int useMasterNodeId,
+ const Uint32 RETRIES,
+ const WaitSignalType wst,
+ const int theWait,
+ const int *errcodes,
+ const int noerrcodes,
+ const int temporaryMask = 0);
+
+ int createOrAlterTable(class Ndb & ndb, NdbTableImpl &, bool alter);
+
+ int createTable(class Ndb & ndb, NdbTableImpl &);
+ int createTable(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
+
+ int alterTable(class Ndb & ndb, NdbTableImpl &);
+ int alterTable(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
+
+ int createIndex(class Ndb & ndb,
+ NdbIndexImpl &,
+ const NdbTableImpl &);
+ int createIndex(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
+
+ int createEvent(class Ndb & ndb, NdbEventImpl &, int getFlag);
+ int createEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3], int noLSP);
+
+ int dropTable(const NdbTableImpl &);
+ int dropTable(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
+
+ int dropIndex(const NdbIndexImpl &, const NdbTableImpl &);
+ int dropIndex(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
+
+ int dropEvent(const NdbEventImpl &);
+ int dropEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3], int noLSP);
+
+ int executeSubscribeEvent(class Ndb & ndb, NdbEventImpl &);
+ int executeSubscribeEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
+
+ int stopSubscribeEvent(class Ndb & ndb, NdbEventImpl &);
+ int stopSubscribeEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
+
+ int listObjects(NdbDictionary::Dictionary::List& list, Uint32 requestData, bool fullyQualifiedNames);
+ int listObjects(NdbApiSignal* signal);
+
+/* NdbTableImpl * getTable(int tableId, bool fullyQualifiedNames); */
+ NdbTableImpl * getTable(const BaseString& name, bool fullyQualifiedNames);
+ NdbTableImpl * getTable(class NdbApiSignal * signal,
+ LinearSectionPtr ptr[3],
+ Uint32 noOfSections, bool fullyQualifiedNames);
+
+ static int parseTableInfo(NdbTableImpl ** dst,
+ const Uint32 * data, Uint32 len,
+ bool fullyQualifiedNames);
+
+ static int create_index_obj_from_table(NdbIndexImpl ** dst,
+ NdbTableImpl* index_table,
+ const NdbTableImpl* primary_table);
+
+ NdbError & m_error;
+private:
+ Uint32 m_reference;
+ Uint32 m_masterNodeId;
+
+ NdbWaiter m_waiter;
+ class TransporterFacade * m_transporter;
+
+ friend class Ndb;
+ static void execSignal(void* dictImpl,
+ class NdbApiSignal* signal,
+ struct LinearSectionPtr ptr[3]);
+
+ static void execNodeStatus(void* dictImpl, Uint32,
+ bool alive, bool nfCompleted);
+
+ void execGET_TABINFO_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execGET_TABINFO_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execCREATE_TABLE_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execCREATE_TABLE_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execALTER_TABLE_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execALTER_TABLE_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+
+ void execCREATE_INDX_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execCREATE_INDX_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execDROP_INDX_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execDROP_INDX_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+
+ void execCREATE_EVNT_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execCREATE_EVNT_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execSUB_START_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execSUB_START_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execSUB_TABLE_DATA(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execSUB_GCP_COMPLETE_REP(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execSUB_STOP_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execSUB_STOP_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execDROP_EVNT_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execDROP_EVNT_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+
+ void execDROP_TABLE_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execDROP_TABLE_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execLIST_TABLES_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+
+ Uint32 m_fragmentId;
+ UtilBuffer m_buffer;
+};
+
+class NdbDictionaryImpl : public NdbDictionary::Dictionary {
+public:
+ NdbDictionaryImpl(Ndb &ndb);
+ NdbDictionaryImpl(Ndb &ndb, NdbDictionary::Dictionary & f);
+ ~NdbDictionaryImpl();
+
+ bool setTransporter(class Ndb * ndb, class TransporterFacade * tf);
+ bool setTransporter(class TransporterFacade * tf);
+
+ int createTable(NdbTableImpl &t);
+ int createBlobTables(NdbTableImpl &);
+ int addBlobTables(NdbTableImpl &);
+ int alterTable(NdbTableImpl &t);
+ int dropTable(const char * name);
+ int dropTable(NdbTableImpl &);
+ int dropBlobTables(NdbTableImpl &);
+ int invalidateObject(NdbTableImpl &);
+ int removeCachedObject(NdbTableImpl &);
+
+ int createIndex(NdbIndexImpl &ix);
+ int dropIndex(const char * indexName,
+ const char * tableName);
+ int dropIndex(NdbIndexImpl &, const char * tableName);
+ NdbTableImpl * getIndexTable(NdbIndexImpl * index,
+ NdbTableImpl * table);
+
+ int createEvent(NdbEventImpl &);
+ int dropEvent(const char * eventName);
+
+ int executeSubscribeEvent(NdbEventImpl &);
+ int stopSubscribeEvent(NdbEventImpl &);
+
+ int listObjects(List& list, NdbDictionary::Object::Type type);
+ int listIndexes(List& list, Uint32 indexId);
+
+ NdbTableImpl * getTable(const char * tableName, void **data= 0);
+ Ndb_local_table_info* get_local_table_info(
+ const BaseString& internalTableName, bool do_add_blob_tables);
+ NdbIndexImpl * getIndex(const char * indexName,
+ const char * tableName);
+ NdbEventImpl * getEvent(const char * eventName);
+ NdbEventImpl * getEventImpl(const char * internalName);
+
+ const NdbError & getNdbError() const;
+ NdbError m_error;
+ Uint32 m_local_table_data_size;
+
+ LocalDictCache m_localHash;
+ GlobalDictCache * m_globalHash;
+
+ static NdbDictionaryImpl & getImpl(NdbDictionary::Dictionary & t);
+ static const NdbDictionaryImpl & getImpl(const NdbDictionary::Dictionary &t);
+ NdbDictionary::Dictionary * m_facade;
+
+ NdbDictInterface m_receiver;
+ Ndb & m_ndb;
+private:
+ NdbIndexImpl * getIndexImpl(const char * name,
+ const BaseString& internalName);
+ Ndb_local_table_info * fetchGlobalTableImpl(const BaseString& internalName);
+};
+
+inline
+NdbEventImpl &
+NdbEventImpl::getImpl(const NdbDictionary::Event & t){
+ return t.m_impl;
+}
+
+inline
+NdbEventImpl &
+NdbEventImpl::getImpl(NdbDictionary::Event & t){
+ return t.m_impl;
+}
+
+inline
+NdbColumnImpl &
+NdbColumnImpl::getImpl(NdbDictionary::Column & t){
+ return t.m_impl;
+}
+
+inline
+const NdbColumnImpl &
+NdbColumnImpl::getImpl(const NdbDictionary::Column & t){
+ return t.m_impl;
+}
+
+inline
+bool
+NdbColumnImpl::getInterpretableType() const {
+ return (m_type == NdbDictionary::Column::Unsigned ||
+ m_type == NdbDictionary::Column::Bigunsigned);
+}
+
+inline
+bool
+NdbColumnImpl::getCharType() const {
+ return (m_type == NdbDictionary::Column::Char ||
+ m_type == NdbDictionary::Column::Varchar ||
+ m_type == NdbDictionary::Column::Text ||
+ m_type == NdbDictionary::Column::Longvarchar);
+}
+
+inline
+bool
+NdbColumnImpl::getStringType() const {
+ return (m_type == NdbDictionary::Column::Char ||
+ m_type == NdbDictionary::Column::Varchar ||
+ m_type == NdbDictionary::Column::Longvarchar ||
+ m_type == NdbDictionary::Column::Binary ||
+ m_type == NdbDictionary::Column::Varbinary ||
+ m_type == NdbDictionary::Column::Longvarbinary);
+}
+
+inline
+bool
+NdbColumnImpl::getBlobType() const {
+ return (m_type == NdbDictionary::Column::Blob ||
+ m_type == NdbDictionary::Column::Text);
+}
+
+inline
+NdbTableImpl &
+NdbTableImpl::getImpl(NdbDictionary::Table & t){
+ return t.m_impl;
+}
+
+inline
+NdbTableImpl &
+NdbTableImpl::getImpl(const NdbDictionary::Table & t){
+ return t.m_impl;
+}
+
+inline
+NdbColumnImpl *
+NdbTableImpl::getColumn(unsigned attrId){
+ if(m_columns.size() > attrId){
+ return m_columns[attrId];
+ }
+ return 0;
+}
+
+inline
+Uint32
+Hash( const char* str ){
+ Uint32 h = 0;
+ Uint32 len = strlen(str);
+ while(len >= 4){
+ h = (h << 5) + h + str[0];
+ h = (h << 5) + h + str[1];
+ h = (h << 5) + h + str[2];
+ h = (h << 5) + h + str[3];
+ len -= 4;
+ str += 4;
+ }
+
+ switch(len){
+ case 3:
+ h = (h << 5) + h + *str++;
+ case 2:
+ h = (h << 5) + h + *str++;
+ case 1:
+ h = (h << 5) + h + *str++;
+ }
+ return h + h;
+}
+
+
+inline
+NdbColumnImpl *
+NdbTableImpl::getColumn(const char * name){
+
+ Uint32 sz = m_columns.size();
+ NdbColumnImpl** cols = m_columns.getBase();
+ const Uint32 * hashtable = m_columnHash.getBase();
+
+ if(sz > 5 && false){
+ Uint32 hashValue = Hash(name) & 0xFFFE;
+ Uint32 bucket = hashValue & m_columnHashMask;
+ bucket = (bucket < sz ? bucket : bucket - sz);
+ hashtable += bucket;
+ Uint32 tmp = * hashtable;
+ if((tmp & 1) == 1 ){ // No chaining
+ sz = 1;
+ } else {
+ sz = (tmp >> 16);
+ hashtable += (tmp & 0xFFFE) >> 1;
+ tmp = * hashtable;
+ }
+ do {
+ if(hashValue == (tmp & 0xFFFE)){
+ NdbColumnImpl* col = cols[tmp >> 16];
+ if(strncmp(name, col->m_name.c_str(), col->m_name.length()) == 0){
+ return col;
+ }
+ }
+ hashtable++;
+ tmp = * hashtable;
+ } while(--sz > 0);
+#if 0
+ Uint32 dir = m_columnHash[bucket];
+ Uint32 pos = bucket + ((dir & 0xFFFE) >> 1);
+ Uint32 cnt = dir >> 16;
+ ndbout_c("col: %s hv: %x bucket: %d dir: %x pos: %d cnt: %d tmp: %d -> 0",
+ name, hashValue, bucket, dir, pos, cnt, tmp);
+#endif
+ return 0;
+ } else {
+ for(Uint32 i = 0; i<sz; i++){
+ NdbColumnImpl* col = * cols++;
+ if(col != 0 && strcmp(name, col->m_name.c_str()) == 0)
+ return col;
+ }
+ }
+ return 0;
+}
+
+inline
+const NdbColumnImpl *
+NdbTableImpl::getColumn(unsigned attrId) const {
+ if(m_columns.size() > attrId){
+ return m_columns[attrId];
+ }
+ return 0;
+}
+
+inline
+const NdbColumnImpl *
+NdbTableImpl::getColumn(const char * name) const {
+ Uint32 sz = m_columns.size();
+ NdbColumnImpl* const * cols = m_columns.getBase();
+ for(Uint32 i = 0; i<sz; i++, cols++){
+ NdbColumnImpl* col = * cols;
+ if(col != 0 && strcmp(name, col->m_name.c_str()) == 0)
+ return col;
+ }
+ return 0;
+}
+
+inline
+NdbIndexImpl &
+NdbIndexImpl::getImpl(NdbDictionary::Index & t){
+ return t.m_impl;
+}
+
+inline
+NdbIndexImpl &
+NdbIndexImpl::getImpl(const NdbDictionary::Index & t){
+ return t.m_impl;
+}
+
+inline
+NdbDictionaryImpl &
+NdbDictionaryImpl::getImpl(NdbDictionary::Dictionary & t){
+ return t.m_impl;
+}
+
+inline
+const NdbDictionaryImpl &
+NdbDictionaryImpl::getImpl(const NdbDictionary::Dictionary & t){
+ return t.m_impl;
+}
+
+/*****************************************************************
+ * Inline:d getters
+ */
+
+inline
+NdbTableImpl *
+NdbDictionaryImpl::getTable(const char * table_name, void **data)
+{
+ const BaseString internal_tabname(m_ndb.internalize_table_name(table_name));
+ Ndb_local_table_info *info=
+ get_local_table_info(internal_tabname, true);
+ if (info == 0)
+ return 0;
+
+ if (data)
+ *data= info->m_local_data;
+
+ return info->m_table_impl;
+}
+
+inline
+Ndb_local_table_info *
+NdbDictionaryImpl::get_local_table_info(const BaseString& internalTableName,
+ bool do_add_blob_tables)
+{
+ Ndb_local_table_info *info= m_localHash.get(internalTableName.c_str());
+ if (info == 0) {
+ info= fetchGlobalTableImpl(internalTableName);
+ if (info == 0) {
+ return 0;
+ }
+ }
+ if (do_add_blob_tables && info->m_table_impl->m_noOfBlobs)
+ addBlobTables(*(info->m_table_impl));
+
+ return info; // autoincrement already initialized
+}
+
+inline
+NdbIndexImpl *
+NdbDictionaryImpl::getIndex(const char * index_name,
+ const char * table_name)
+{
+ if (table_name || m_ndb.usingFullyQualifiedNames())
+ {
+ const BaseString internal_indexname(
+ (table_name)
+ ?
+ m_ndb.internalize_index_name(getTable(table_name), index_name)
+ :
+ m_ndb.internalize_table_name(index_name)); // Index is also a table
+
+ if (internal_indexname.length())
+ {
+ Ndb_local_table_info * info=
+ get_local_table_info(internal_indexname, false);
+ if (info)
+ {
+ NdbTableImpl * tab= info->m_table_impl;
+ if (tab->m_index == 0)
+ tab->m_index= getIndexImpl(index_name, internal_indexname);
+ if (tab->m_index != 0)
+ tab->m_index->m_table= tab;
+ return tab->m_index;
+ }
+ }
+ }
+
+ m_error.code= 4243;
+ return 0;
+}
+
+#endif
diff --git a/ndb/src/ndbapi/NdbErrorOut.cpp b/storage/ndb/src/ndbapi/NdbErrorOut.cpp
index 07e0b2fe6e8..07e0b2fe6e8 100644
--- a/ndb/src/ndbapi/NdbErrorOut.cpp
+++ b/storage/ndb/src/ndbapi/NdbErrorOut.cpp
diff --git a/ndb/src/ndbapi/NdbEventOperation.cpp b/storage/ndb/src/ndbapi/NdbEventOperation.cpp
index e99cad918c5..e99cad918c5 100644
--- a/ndb/src/ndbapi/NdbEventOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbEventOperation.cpp
diff --git a/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
index 208525bfc15..208525bfc15 100644
--- a/ndb/src/ndbapi/NdbEventOperationImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
diff --git a/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
index 96958979c76..96958979c76 100644
--- a/ndb/src/ndbapi/NdbEventOperationImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
diff --git a/ndb/src/ndbapi/NdbImpl.hpp b/storage/ndb/src/ndbapi/NdbImpl.hpp
index d73b8afe10c..d73b8afe10c 100644
--- a/ndb/src/ndbapi/NdbImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbImpl.hpp
diff --git a/ndb/src/ndbapi/NdbIndexOperation.cpp b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp
index 4cedffed4a2..4cedffed4a2 100644
--- a/ndb/src/ndbapi/NdbIndexOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp
diff --git a/ndb/src/ndbapi/NdbLinHash.hpp b/storage/ndb/src/ndbapi/NdbLinHash.hpp
index 05670534c95..05670534c95 100644
--- a/ndb/src/ndbapi/NdbLinHash.hpp
+++ b/storage/ndb/src/ndbapi/NdbLinHash.hpp
diff --git a/ndb/src/ndbapi/NdbOperation.cpp b/storage/ndb/src/ndbapi/NdbOperation.cpp
index c9143444908..c9143444908 100644
--- a/ndb/src/ndbapi/NdbOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperation.cpp
diff --git a/ndb/src/ndbapi/NdbOperationDefine.cpp b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
index 835e33dfb40..835e33dfb40 100644
--- a/ndb/src/ndbapi/NdbOperationDefine.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
diff --git a/ndb/src/ndbapi/NdbOperationExec.cpp b/storage/ndb/src/ndbapi/NdbOperationExec.cpp
index 58a816e3c1a..58a816e3c1a 100644
--- a/ndb/src/ndbapi/NdbOperationExec.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp
diff --git a/ndb/src/ndbapi/NdbOperationInt.cpp b/storage/ndb/src/ndbapi/NdbOperationInt.cpp
index 41e0cb1d140..41e0cb1d140 100644
--- a/ndb/src/ndbapi/NdbOperationInt.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperationInt.cpp
diff --git a/ndb/src/ndbapi/NdbOperationScan.cpp b/storage/ndb/src/ndbapi/NdbOperationScan.cpp
index 283eb591bdb..283eb591bdb 100644
--- a/ndb/src/ndbapi/NdbOperationScan.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperationScan.cpp
diff --git a/ndb/src/ndbapi/NdbOperationSearch.cpp b/storage/ndb/src/ndbapi/NdbOperationSearch.cpp
index 06d8ddd412b..06d8ddd412b 100644
--- a/ndb/src/ndbapi/NdbOperationSearch.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperationSearch.cpp
diff --git a/ndb/src/ndbapi/NdbPool.cpp b/storage/ndb/src/ndbapi/NdbPool.cpp
index a8263f564f1..a8263f564f1 100644
--- a/ndb/src/ndbapi/NdbPool.cpp
+++ b/storage/ndb/src/ndbapi/NdbPool.cpp
diff --git a/ndb/src/ndbapi/NdbPoolImpl.cpp b/storage/ndb/src/ndbapi/NdbPoolImpl.cpp
index 32e0a6f1410..32e0a6f1410 100644
--- a/ndb/src/ndbapi/NdbPoolImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbPoolImpl.cpp
diff --git a/ndb/src/ndbapi/NdbPoolImpl.hpp b/storage/ndb/src/ndbapi/NdbPoolImpl.hpp
index cd36f30e90b..cd36f30e90b 100644
--- a/ndb/src/ndbapi/NdbPoolImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbPoolImpl.hpp
diff --git a/ndb/src/ndbapi/NdbRecAttr.cpp b/storage/ndb/src/ndbapi/NdbRecAttr.cpp
index 5e5306fc33a..5e5306fc33a 100644
--- a/ndb/src/ndbapi/NdbRecAttr.cpp
+++ b/storage/ndb/src/ndbapi/NdbRecAttr.cpp
diff --git a/ndb/src/ndbapi/NdbReceiver.cpp b/storage/ndb/src/ndbapi/NdbReceiver.cpp
index df16ae66915..df16ae66915 100644
--- a/ndb/src/ndbapi/NdbReceiver.cpp
+++ b/storage/ndb/src/ndbapi/NdbReceiver.cpp
diff --git a/ndb/src/ndbapi/NdbScanFilter.cpp b/storage/ndb/src/ndbapi/NdbScanFilter.cpp
index b39fd10fe95..b39fd10fe95 100644
--- a/ndb/src/ndbapi/NdbScanFilter.cpp
+++ b/storage/ndb/src/ndbapi/NdbScanFilter.cpp
diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
index e0a480e02f7..e0a480e02f7 100644
--- a/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
diff --git a/ndb/src/ndbapi/NdbTransaction.cpp b/storage/ndb/src/ndbapi/NdbTransaction.cpp
index 675c9383c6e..675c9383c6e 100644
--- a/ndb/src/ndbapi/NdbTransaction.cpp
+++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp
diff --git a/ndb/src/ndbapi/NdbTransactionScan.cpp b/storage/ndb/src/ndbapi/NdbTransactionScan.cpp
index 4c507f6ab8c..4c507f6ab8c 100644
--- a/ndb/src/ndbapi/NdbTransactionScan.cpp
+++ b/storage/ndb/src/ndbapi/NdbTransactionScan.cpp
diff --git a/ndb/src/ndbapi/NdbUtil.cpp b/storage/ndb/src/ndbapi/NdbUtil.cpp
index 5c74d251ff9..5c74d251ff9 100644
--- a/ndb/src/ndbapi/NdbUtil.cpp
+++ b/storage/ndb/src/ndbapi/NdbUtil.cpp
diff --git a/ndb/src/ndbapi/NdbUtil.hpp b/storage/ndb/src/ndbapi/NdbUtil.hpp
index 80fc15ddd8c..80fc15ddd8c 100644
--- a/ndb/src/ndbapi/NdbUtil.hpp
+++ b/storage/ndb/src/ndbapi/NdbUtil.hpp
diff --git a/ndb/src/ndbapi/NdbWaiter.hpp b/storage/ndb/src/ndbapi/NdbWaiter.hpp
index 8b7b2a75879..8b7b2a75879 100644
--- a/ndb/src/ndbapi/NdbWaiter.hpp
+++ b/storage/ndb/src/ndbapi/NdbWaiter.hpp
diff --git a/ndb/src/ndbapi/Ndberr.cpp b/storage/ndb/src/ndbapi/Ndberr.cpp
index b05818de6f1..b05818de6f1 100644
--- a/ndb/src/ndbapi/Ndberr.cpp
+++ b/storage/ndb/src/ndbapi/Ndberr.cpp
diff --git a/ndb/src/ndbapi/Ndbif.cpp b/storage/ndb/src/ndbapi/Ndbif.cpp
index fee6f0930ad..fee6f0930ad 100644
--- a/ndb/src/ndbapi/Ndbif.cpp
+++ b/storage/ndb/src/ndbapi/Ndbif.cpp
diff --git a/ndb/src/ndbapi/Ndbinit.cpp b/storage/ndb/src/ndbapi/Ndbinit.cpp
index bbc1474f45d..bbc1474f45d 100644
--- a/ndb/src/ndbapi/Ndbinit.cpp
+++ b/storage/ndb/src/ndbapi/Ndbinit.cpp
diff --git a/ndb/src/ndbapi/Ndblist.cpp b/storage/ndb/src/ndbapi/Ndblist.cpp
index 96d0f4d7de5..96d0f4d7de5 100644
--- a/ndb/src/ndbapi/Ndblist.cpp
+++ b/storage/ndb/src/ndbapi/Ndblist.cpp
diff --git a/ndb/src/ndbapi/ObjectMap.hpp b/storage/ndb/src/ndbapi/ObjectMap.hpp
index 21407279f0b..21407279f0b 100644
--- a/ndb/src/ndbapi/ObjectMap.hpp
+++ b/storage/ndb/src/ndbapi/ObjectMap.hpp
diff --git a/ndb/src/ndbapi/ScanOperation.txt b/storage/ndb/src/ndbapi/ScanOperation.txt
index 27e4e8c1755..27e4e8c1755 100644
--- a/ndb/src/ndbapi/ScanOperation.txt
+++ b/storage/ndb/src/ndbapi/ScanOperation.txt
diff --git a/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp
index 96f376db5a5..96f376db5a5 100644
--- a/ndb/src/ndbapi/TransporterFacade.cpp
+++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp
diff --git a/ndb/src/ndbapi/TransporterFacade.hpp b/storage/ndb/src/ndbapi/TransporterFacade.hpp
index e74f4b51e00..e74f4b51e00 100644
--- a/ndb/src/ndbapi/TransporterFacade.hpp
+++ b/storage/ndb/src/ndbapi/TransporterFacade.hpp
diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp
index 7625da609b0..7625da609b0 100644
--- a/ndb/src/ndbapi/ndb_cluster_connection.cpp
+++ b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp
diff --git a/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp b/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
index 05652f3316a..05652f3316a 100644
--- a/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
+++ b/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
diff --git a/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c
index 840d358f225..840d358f225 100644
--- a/ndb/src/ndbapi/ndberror.c
+++ b/storage/ndb/src/ndbapi/ndberror.c
diff --git a/ndb/src/ndbapi/signal-sender/Makefile b/storage/ndb/src/ndbapi/signal-sender/Makefile
index 56e6ce1eac0..56e6ce1eac0 100644
--- a/ndb/src/ndbapi/signal-sender/Makefile
+++ b/storage/ndb/src/ndbapi/signal-sender/Makefile
diff --git a/ndb/src/ndbapi/signal-sender/SignalSender.cpp b/storage/ndb/src/ndbapi/signal-sender/SignalSender.cpp
index 680d0c23b4a..680d0c23b4a 100644
--- a/ndb/src/ndbapi/signal-sender/SignalSender.cpp
+++ b/storage/ndb/src/ndbapi/signal-sender/SignalSender.cpp
diff --git a/ndb/src/ndbapi/signal-sender/SignalSender.hpp b/storage/ndb/src/ndbapi/signal-sender/SignalSender.hpp
index e4e6c1931d2..e4e6c1931d2 100644
--- a/ndb/src/ndbapi/signal-sender/SignalSender.hpp
+++ b/storage/ndb/src/ndbapi/signal-sender/SignalSender.hpp
diff --git a/ndb/src/old_files/client/Makefile b/storage/ndb/src/old_files/client/Makefile
index 1751a98bdfe..1751a98bdfe 100644
--- a/ndb/src/old_files/client/Makefile
+++ b/storage/ndb/src/old_files/client/Makefile
diff --git a/ndb/src/old_files/client/odbc/Extra.mk b/storage/ndb/src/old_files/client/odbc/Extra.mk
index 762fb0bedd0..762fb0bedd0 100644
--- a/ndb/src/old_files/client/odbc/Extra.mk
+++ b/storage/ndb/src/old_files/client/odbc/Extra.mk
diff --git a/ndb/src/old_files/client/odbc/Makefile b/storage/ndb/src/old_files/client/odbc/Makefile
index 2da683e7d86..2da683e7d86 100644
--- a/ndb/src/old_files/client/odbc/Makefile
+++ b/storage/ndb/src/old_files/client/odbc/Makefile
diff --git a/ndb/src/old_files/client/odbc/NdbOdbc.cpp b/storage/ndb/src/old_files/client/odbc/NdbOdbc.cpp
index 67c6b5e0004..67c6b5e0004 100755
--- a/ndb/src/old_files/client/odbc/NdbOdbc.cpp
+++ b/storage/ndb/src/old_files/client/odbc/NdbOdbc.cpp
diff --git a/ndb/src/old_files/client/odbc/NdbOdbc.def b/storage/ndb/src/old_files/client/odbc/NdbOdbc.def
index 85619b91915..85619b91915 100755
--- a/ndb/src/old_files/client/odbc/NdbOdbc.def
+++ b/storage/ndb/src/old_files/client/odbc/NdbOdbc.def
diff --git a/ndb/src/old_files/client/odbc/codegen/CodeGen.cpp b/storage/ndb/src/old_files/client/odbc/codegen/CodeGen.cpp
index 6be78b62bd9..6be78b62bd9 100644
--- a/ndb/src/old_files/client/odbc/codegen/CodeGen.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/CodeGen.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/CodeGen.hpp b/storage/ndb/src/old_files/client/odbc/codegen/CodeGen.hpp
index ae61dab0c2a..ae61dab0c2a 100644
--- a/ndb/src/old_files/client/odbc/codegen/CodeGen.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/CodeGen.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_base.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_base.cpp
index dc02e071156..dc02e071156 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_base.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_base.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_base.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_base.hpp
index c67c0ca7adb..c67c0ca7adb 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_base.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_base.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_column.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_column.cpp
index c4c0480a5e7..c4c0480a5e7 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_column.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_column.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_column.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_column.hpp
index af0dcea690d..af0dcea690d 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_column.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_column.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_comp_op.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_comp_op.cpp
index 7782ed1ea2a..7782ed1ea2a 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_comp_op.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_comp_op.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_comp_op.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_comp_op.hpp
index 0585ab1dabf..0585ab1dabf 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_comp_op.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_comp_op.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_create_index.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_create_index.cpp
index 84f319338a4..84f319338a4 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_create_index.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_create_index.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_create_index.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_create_index.hpp
index ebd757e1118..ebd757e1118 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_create_index.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_create_index.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_create_row.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_create_row.cpp
index 5b90b658ed7..5b90b658ed7 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_create_row.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_create_row.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_create_row.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_create_row.hpp
index f03455ff28e..f03455ff28e 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_create_row.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_create_row.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_create_table.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_create_table.cpp
index 14e4abbd7fe..14e4abbd7fe 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_create_table.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_create_table.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_create_table.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_create_table.hpp
index cbb2189d8ce..cbb2189d8ce 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_create_table.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_create_table.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_data_type.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_data_type.cpp
index 1ff0fcebcbe..1ff0fcebcbe 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_data_type.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_data_type.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_data_type.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_data_type.hpp
index 735dc05014f..735dc05014f 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_data_type.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_data_type.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_ddl.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl.cpp
index 2ba4291a0e8..2ba4291a0e8 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_ddl.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_ddl.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl.hpp
index 1ceca62d55d..1ceca62d55d 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_ddl.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.cpp
index ee037e54c1f..ee037e54c1f 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.hpp
index 7d089d37440..7d089d37440 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.cpp
index 78c23e38d97..78c23e38d97 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.hpp
index ea7808b37cb..ea7808b37cb 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.cpp
index 87589ebbaa0..87589ebbaa0 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.hpp
index ac3eded1b2e..ac3eded1b2e 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_delete.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete.cpp
index 35b3daa1aca..35b3daa1aca 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_delete.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_delete.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete.hpp
index c7fa245497b..c7fa245497b 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_delete.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_delete_index.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete_index.cpp
index 8f2c3be2848..8f2c3be2848 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_delete_index.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete_index.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_delete_index.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete_index.hpp
index 1aaaa18abcb..1aaaa18abcb 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_delete_index.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete_index.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.cpp
index 4a6dec64654..4a6dec64654 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.hpp
index 4138baefa4c..4138baefa4c 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.cpp
index fed7244a026..fed7244a026 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.hpp
index eb013a8257e..eb013a8257e 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_dml.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_dml.cpp
index 44fd4478646..44fd4478646 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_dml.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_dml.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_dml.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_dml.hpp
index 0618f583984..0618f583984 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_dml.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_dml.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_dml_column.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_dml_column.cpp
index 808e2ac8c4b..808e2ac8c4b 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_dml_column.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_dml_column.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_dml_column.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_dml_column.hpp
index 0fb33944a3a..0fb33944a3a 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_dml_column.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_dml_column.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_dml_row.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_dml_row.cpp
index ceb63a9f7b9..ceb63a9f7b9 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_dml_row.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_dml_row.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_dml_row.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_dml_row.hpp
index 6c7e46ba9af..6c7e46ba9af 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_dml_row.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_dml_row.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_drop_index.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_drop_index.cpp
index b6bae88e270..b6bae88e270 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_drop_index.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_drop_index.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_drop_index.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_drop_index.hpp
index 99891c9a52f..99891c9a52f 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_drop_index.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_drop_index.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_drop_table.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_drop_table.cpp
index f20bf9fdae0..f20bf9fdae0 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_drop_table.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_drop_table.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_drop_table.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_drop_table.hpp
index 849a472ed94..849a472ed94 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_drop_table.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_drop_table.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr.cpp
index 4afa75986a0..4afa75986a0 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr.hpp
index b6f07471b4d..b6f07471b4d 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_column.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_column.cpp
index 17a9a502d4c..17a9a502d4c 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_column.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_column.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_column.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_column.hpp
index 2ce7c441e45..2ce7c441e45 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_column.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_column.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_const.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_const.cpp
index 564d307a4f8..564d307a4f8 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_const.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_const.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_const.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_const.hpp
index 2e26c637a23..2e26c637a23 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_const.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_const.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.cpp
index bc89482fedc..bc89482fedc 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.hpp
index 3294960c7b3..3294960c7b3 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_func.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_func.cpp
index 96b461a72d9..96b461a72d9 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_func.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_func.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_func.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_func.hpp
index 856d7529875..856d7529875 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_func.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_func.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_op.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_op.cpp
index 7e8314c1741..7e8314c1741 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_op.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_op.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_op.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_op.hpp
index f9686cad151..f9686cad151 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_op.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_op.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_param.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_param.cpp
index 93892cae5e6..93892cae5e6 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_param.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_param.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_param.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_param.hpp
index 783e5c087b4..783e5c087b4 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_param.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_param.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_row.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_row.cpp
index da1751d41d1..da1751d41d1 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_row.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_row.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_expr_row.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_row.hpp
index 94527931dba..94527931dba 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_expr_row.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_expr_row.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_idx_column.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_idx_column.cpp
index 584ffef3e01..584ffef3e01 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_idx_column.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_idx_column.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_idx_column.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_idx_column.hpp
index 209ed705b48..209ed705b48 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_idx_column.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_idx_column.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_insert.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_insert.cpp
index c442186c181..c442186c181 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_insert.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_insert.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_insert.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_insert.hpp
index 748b092e33a..748b092e33a 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_insert.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_insert.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_pred.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_pred.cpp
index fe7cac7606e..fe7cac7606e 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_pred.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_pred.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_pred.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_pred.hpp
index a77c1161fa1..a77c1161fa1 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_pred.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_pred.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_pred_op.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_pred_op.cpp
index 29736e45818..29736e45818 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_pred_op.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_pred_op.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_pred_op.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_pred_op.hpp
index 9130bc3cb81..9130bc3cb81 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_pred_op.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_pred_op.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query.cpp
index 9e983942601..9e983942601 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query.hpp
index 97f98f859ff..97f98f859ff 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_count.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_count.cpp
index f52c41df802..f52c41df802 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_count.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_count.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_count.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_count.hpp
index a094eba4519..a094eba4519 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_count.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_count.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.cpp
index 4cbfbfe812d..4cbfbfe812d 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.hpp
index 62c46bda901..62c46bda901 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_filter.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_filter.cpp
index 934a24d182d..934a24d182d 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_filter.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_filter.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_filter.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_filter.hpp
index 60cbf0f86a7..60cbf0f86a7 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_filter.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_filter.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_group.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_group.cpp
index c3019efaa85..c3019efaa85 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_group.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_group.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_group.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_group.hpp
index e79022c5284..e79022c5284 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_group.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_group.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_index.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_index.cpp
index ee19d6123cc..ee19d6123cc 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_index.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_index.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_index.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_index.hpp
index 87affd50580..87affd50580 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_index.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_index.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_join.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_join.cpp
index 89aafe13610..89aafe13610 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_join.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_join.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_join.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_join.hpp
index f6ac9205329..f6ac9205329 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_join.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_join.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.cpp
index bad4199190b..bad4199190b 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.hpp
index e66623d4030..e66623d4030 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_project.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_project.cpp
index 54043ce3d5d..54043ce3d5d 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_project.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_project.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_project.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_project.hpp
index 545685ab9df..545685ab9df 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_project.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_project.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_range.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_range.cpp
index 5d29c5af315..5d29c5af315 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_range.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_range.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_range.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_range.hpp
index 4438189522c..4438189522c 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_range.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_range.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.cpp
index 8b295a97916..8b295a97916 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.hpp
index 90d6ef55104..90d6ef55104 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_scan.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_scan.cpp
index 1c0f58980e5..1c0f58980e5 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_scan.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_scan.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_scan.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_scan.hpp
index d6d1630ddf8..d6d1630ddf8 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_scan.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_scan.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_sort.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_sort.cpp
index 4ea6db8c4e2..4ea6db8c4e2 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_sort.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_sort.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_sort.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_sort.hpp
index d1aa03d9aef..d1aa03d9aef 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_sort.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_sort.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_sys.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_sys.cpp
index affe3dc1264..affe3dc1264 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_sys.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_sys.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_query_sys.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_sys.hpp
index 8eb069d0413..8eb069d0413 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_query_sys.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_query_sys.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_root.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_root.cpp
index 4f45bdffdaf..4f45bdffdaf 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_root.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_root.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_root.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_root.hpp
index 4f0f96725e3..4f0f96725e3 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_root.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_root.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_select.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_select.cpp
index 611b491968d..611b491968d 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_select.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_select.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_select.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_select.hpp
index eaa9b801f29..eaa9b801f29 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_select.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_select.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_set_row.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_set_row.cpp
index dd13ba0c3f7..dd13ba0c3f7 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_set_row.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_set_row.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_set_row.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_set_row.hpp
index 10d62826ac7..10d62826ac7 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_set_row.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_set_row.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_stmt.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_stmt.cpp
index d790f667b84..d790f667b84 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_stmt.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_stmt.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_stmt.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_stmt.hpp
index 20b7fb965fb..20b7fb965fb 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_stmt.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_stmt.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_table.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_table.cpp
index ee3c2a2ed07..ee3c2a2ed07 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_table.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_table.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_table.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_table.hpp
index 8a95b8fa26c..8a95b8fa26c 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_table.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_table.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_table_list.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_table_list.cpp
index ea9f4fdc26e..ea9f4fdc26e 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_table_list.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_table_list.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_table_list.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_table_list.hpp
index 47989166cac..47989166cac 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_table_list.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_table_list.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_update.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_update.cpp
index 0b33cd628b4..0b33cd628b4 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_update.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_update.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_update.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_update.hpp
index 380b651518b..380b651518b 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_update.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_update.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_update_index.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_update_index.cpp
index 6f74db0d913..6f74db0d913 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_update_index.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_update_index.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_update_index.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_update_index.hpp
index bbad822650a..bbad822650a 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_update_index.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_update_index.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.cpp
index 7525fb72692..7525fb72692 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.hpp
index fc4341880dd..fc4341880dd 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_update_scan.cpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_update_scan.cpp
index 9fac1728469..9fac1728469 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_update_scan.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_update_scan.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Code_update_scan.hpp b/storage/ndb/src/old_files/client/odbc/codegen/Code_update_scan.hpp
index d742883e561..d742883e561 100644
--- a/ndb/src/old_files/client/odbc/codegen/Code_update_scan.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Code_update_scan.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/Makefile b/storage/ndb/src/old_files/client/odbc/codegen/Makefile
index 49e5439556d..49e5439556d 100644
--- a/ndb/src/old_files/client/odbc/codegen/Makefile
+++ b/storage/ndb/src/old_files/client/odbc/codegen/Makefile
diff --git a/ndb/src/old_files/client/odbc/codegen/SimpleGram.ypp b/storage/ndb/src/old_files/client/odbc/codegen/SimpleGram.ypp
index 07d8017e5ed..07d8017e5ed 100644
--- a/ndb/src/old_files/client/odbc/codegen/SimpleGram.ypp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/SimpleGram.ypp
diff --git a/ndb/src/old_files/client/odbc/codegen/SimpleParser.cpp b/storage/ndb/src/old_files/client/odbc/codegen/SimpleParser.cpp
index a2418f49e37..a2418f49e37 100644
--- a/ndb/src/old_files/client/odbc/codegen/SimpleParser.cpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/SimpleParser.cpp
diff --git a/ndb/src/old_files/client/odbc/codegen/SimpleParser.hpp b/storage/ndb/src/old_files/client/odbc/codegen/SimpleParser.hpp
index abadae8f905..abadae8f905 100644
--- a/ndb/src/old_files/client/odbc/codegen/SimpleParser.hpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/SimpleParser.hpp
diff --git a/ndb/src/old_files/client/odbc/codegen/SimpleScan.lpp b/storage/ndb/src/old_files/client/odbc/codegen/SimpleScan.lpp
index 29aa876f669..29aa876f669 100644
--- a/ndb/src/old_files/client/odbc/codegen/SimpleScan.lpp
+++ b/storage/ndb/src/old_files/client/odbc/codegen/SimpleScan.lpp
diff --git a/ndb/src/old_files/client/odbc/common/AttrArea.cpp b/storage/ndb/src/old_files/client/odbc/common/AttrArea.cpp
index ff9e085a7f6..ff9e085a7f6 100644
--- a/ndb/src/old_files/client/odbc/common/AttrArea.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/AttrArea.cpp
diff --git a/ndb/src/old_files/client/odbc/common/AttrArea.hpp b/storage/ndb/src/old_files/client/odbc/common/AttrArea.hpp
index 050cce719bf..050cce719bf 100644
--- a/ndb/src/old_files/client/odbc/common/AttrArea.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/AttrArea.hpp
diff --git a/ndb/src/old_files/client/odbc/common/CodeTree.cpp b/storage/ndb/src/old_files/client/odbc/common/CodeTree.cpp
index ebe4840c5f6..ebe4840c5f6 100644
--- a/ndb/src/old_files/client/odbc/common/CodeTree.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/CodeTree.cpp
diff --git a/ndb/src/old_files/client/odbc/common/CodeTree.hpp b/storage/ndb/src/old_files/client/odbc/common/CodeTree.hpp
index 1b0ae3199af..1b0ae3199af 100644
--- a/ndb/src/old_files/client/odbc/common/CodeTree.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/CodeTree.hpp
diff --git a/ndb/src/old_files/client/odbc/common/ConnArea.cpp b/storage/ndb/src/old_files/client/odbc/common/ConnArea.cpp
index d4d3be52a3c..d4d3be52a3c 100644
--- a/ndb/src/old_files/client/odbc/common/ConnArea.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/ConnArea.cpp
diff --git a/ndb/src/old_files/client/odbc/common/ConnArea.hpp b/storage/ndb/src/old_files/client/odbc/common/ConnArea.hpp
index 36367a39bae..36367a39bae 100644
--- a/ndb/src/old_files/client/odbc/common/ConnArea.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/ConnArea.hpp
diff --git a/ndb/src/old_files/client/odbc/common/Ctx.cpp b/storage/ndb/src/old_files/client/odbc/common/Ctx.cpp
index d6faa5cba77..d6faa5cba77 100644
--- a/ndb/src/old_files/client/odbc/common/Ctx.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/Ctx.cpp
diff --git a/ndb/src/old_files/client/odbc/common/Ctx.hpp b/storage/ndb/src/old_files/client/odbc/common/Ctx.hpp
index d25d45ff0c7..d25d45ff0c7 100644
--- a/ndb/src/old_files/client/odbc/common/Ctx.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/Ctx.hpp
diff --git a/ndb/src/old_files/client/odbc/common/DataField.cpp b/storage/ndb/src/old_files/client/odbc/common/DataField.cpp
index 11aae7d893b..11aae7d893b 100644
--- a/ndb/src/old_files/client/odbc/common/DataField.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/DataField.cpp
diff --git a/ndb/src/old_files/client/odbc/common/DataField.hpp b/storage/ndb/src/old_files/client/odbc/common/DataField.hpp
index 65138df25f1..65138df25f1 100644
--- a/ndb/src/old_files/client/odbc/common/DataField.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/DataField.hpp
diff --git a/ndb/src/old_files/client/odbc/common/DataRow.cpp b/storage/ndb/src/old_files/client/odbc/common/DataRow.cpp
index 509f2673e0d..509f2673e0d 100644
--- a/ndb/src/old_files/client/odbc/common/DataRow.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/DataRow.cpp
diff --git a/ndb/src/old_files/client/odbc/common/DataRow.hpp b/storage/ndb/src/old_files/client/odbc/common/DataRow.hpp
index 4a5a1e905b9..4a5a1e905b9 100644
--- a/ndb/src/old_files/client/odbc/common/DataRow.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/DataRow.hpp
diff --git a/ndb/src/old_files/client/odbc/common/DataType.cpp b/storage/ndb/src/old_files/client/odbc/common/DataType.cpp
index 96f6a6e0877..96f6a6e0877 100644
--- a/ndb/src/old_files/client/odbc/common/DataType.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/DataType.cpp
diff --git a/ndb/src/old_files/client/odbc/common/DataType.hpp b/storage/ndb/src/old_files/client/odbc/common/DataType.hpp
index e03e445cf05..e03e445cf05 100644
--- a/ndb/src/old_files/client/odbc/common/DataType.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/DataType.hpp
diff --git a/ndb/src/old_files/client/odbc/common/DescArea.cpp b/storage/ndb/src/old_files/client/odbc/common/DescArea.cpp
index bad9f23d3ef..bad9f23d3ef 100644
--- a/ndb/src/old_files/client/odbc/common/DescArea.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/DescArea.cpp
diff --git a/ndb/src/old_files/client/odbc/common/DescArea.hpp b/storage/ndb/src/old_files/client/odbc/common/DescArea.hpp
index e9f552d758d..e9f552d758d 100644
--- a/ndb/src/old_files/client/odbc/common/DescArea.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/DescArea.hpp
diff --git a/ndb/src/old_files/client/odbc/common/DiagArea.cpp b/storage/ndb/src/old_files/client/odbc/common/DiagArea.cpp
index 06e8da89495..06e8da89495 100644
--- a/ndb/src/old_files/client/odbc/common/DiagArea.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/DiagArea.cpp
diff --git a/ndb/src/old_files/client/odbc/common/DiagArea.hpp b/storage/ndb/src/old_files/client/odbc/common/DiagArea.hpp
index 79c03de6623..79c03de6623 100644
--- a/ndb/src/old_files/client/odbc/common/DiagArea.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/DiagArea.hpp
diff --git a/ndb/src/old_files/client/odbc/common/Makefile b/storage/ndb/src/old_files/client/odbc/common/Makefile
index 7ee29738d86..7ee29738d86 100644
--- a/ndb/src/old_files/client/odbc/common/Makefile
+++ b/storage/ndb/src/old_files/client/odbc/common/Makefile
diff --git a/ndb/src/old_files/client/odbc/common/OdbcData.cpp b/storage/ndb/src/old_files/client/odbc/common/OdbcData.cpp
index 32400e07c7a..32400e07c7a 100644
--- a/ndb/src/old_files/client/odbc/common/OdbcData.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/OdbcData.cpp
diff --git a/ndb/src/old_files/client/odbc/common/OdbcData.hpp b/storage/ndb/src/old_files/client/odbc/common/OdbcData.hpp
index c1884507cfe..c1884507cfe 100644
--- a/ndb/src/old_files/client/odbc/common/OdbcData.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/OdbcData.hpp
diff --git a/ndb/src/old_files/client/odbc/common/ResultArea.cpp b/storage/ndb/src/old_files/client/odbc/common/ResultArea.cpp
index 79d7fb0ccc4..79d7fb0ccc4 100644
--- a/ndb/src/old_files/client/odbc/common/ResultArea.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/ResultArea.cpp
diff --git a/ndb/src/old_files/client/odbc/common/ResultArea.hpp b/storage/ndb/src/old_files/client/odbc/common/ResultArea.hpp
index d4890c44d99..d4890c44d99 100644
--- a/ndb/src/old_files/client/odbc/common/ResultArea.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/ResultArea.hpp
diff --git a/ndb/src/old_files/client/odbc/common/Sqlstate.cpp b/storage/ndb/src/old_files/client/odbc/common/Sqlstate.cpp
index 2d625a7c159..2d625a7c159 100644
--- a/ndb/src/old_files/client/odbc/common/Sqlstate.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/Sqlstate.cpp
diff --git a/ndb/src/old_files/client/odbc/common/Sqlstate.hpp b/storage/ndb/src/old_files/client/odbc/common/Sqlstate.hpp
index 3b4665dc6ca..3b4665dc6ca 100644
--- a/ndb/src/old_files/client/odbc/common/Sqlstate.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/Sqlstate.hpp
diff --git a/ndb/src/old_files/client/odbc/common/StmtArea.cpp b/storage/ndb/src/old_files/client/odbc/common/StmtArea.cpp
index 5ce2d47d31a..5ce2d47d31a 100644
--- a/ndb/src/old_files/client/odbc/common/StmtArea.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/StmtArea.cpp
diff --git a/ndb/src/old_files/client/odbc/common/StmtArea.hpp b/storage/ndb/src/old_files/client/odbc/common/StmtArea.hpp
index a88c6d36e6d..a88c6d36e6d 100644
--- a/ndb/src/old_files/client/odbc/common/StmtArea.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/StmtArea.hpp
diff --git a/ndb/src/old_files/client/odbc/common/StmtInfo.cpp b/storage/ndb/src/old_files/client/odbc/common/StmtInfo.cpp
index 3467fb5023e..3467fb5023e 100644
--- a/ndb/src/old_files/client/odbc/common/StmtInfo.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/StmtInfo.cpp
diff --git a/ndb/src/old_files/client/odbc/common/StmtInfo.hpp b/storage/ndb/src/old_files/client/odbc/common/StmtInfo.hpp
index 9cd489be6da..9cd489be6da 100644
--- a/ndb/src/old_files/client/odbc/common/StmtInfo.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/StmtInfo.hpp
diff --git a/ndb/src/old_files/client/odbc/common/common.cpp b/storage/ndb/src/old_files/client/odbc/common/common.cpp
index 73d14c82efe..73d14c82efe 100644
--- a/ndb/src/old_files/client/odbc/common/common.cpp
+++ b/storage/ndb/src/old_files/client/odbc/common/common.cpp
diff --git a/ndb/src/old_files/client/odbc/common/common.hpp b/storage/ndb/src/old_files/client/odbc/common/common.hpp
index d2f243b6437..d2f243b6437 100644
--- a/ndb/src/old_files/client/odbc/common/common.hpp
+++ b/storage/ndb/src/old_files/client/odbc/common/common.hpp
diff --git a/ndb/src/old_files/client/odbc/dictionary/DictCatalog.cpp b/storage/ndb/src/old_files/client/odbc/dictionary/DictCatalog.cpp
index 433347c9a70..433347c9a70 100644
--- a/ndb/src/old_files/client/odbc/dictionary/DictCatalog.cpp
+++ b/storage/ndb/src/old_files/client/odbc/dictionary/DictCatalog.cpp
diff --git a/ndb/src/old_files/client/odbc/dictionary/DictCatalog.hpp b/storage/ndb/src/old_files/client/odbc/dictionary/DictCatalog.hpp
index 5452990a51b..5452990a51b 100644
--- a/ndb/src/old_files/client/odbc/dictionary/DictCatalog.hpp
+++ b/storage/ndb/src/old_files/client/odbc/dictionary/DictCatalog.hpp
diff --git a/ndb/src/old_files/client/odbc/dictionary/DictColumn.cpp b/storage/ndb/src/old_files/client/odbc/dictionary/DictColumn.cpp
index fa0128f1ddb..fa0128f1ddb 100644
--- a/ndb/src/old_files/client/odbc/dictionary/DictColumn.cpp
+++ b/storage/ndb/src/old_files/client/odbc/dictionary/DictColumn.cpp
diff --git a/ndb/src/old_files/client/odbc/dictionary/DictColumn.hpp b/storage/ndb/src/old_files/client/odbc/dictionary/DictColumn.hpp
index 945fb86367b..945fb86367b 100644
--- a/ndb/src/old_files/client/odbc/dictionary/DictColumn.hpp
+++ b/storage/ndb/src/old_files/client/odbc/dictionary/DictColumn.hpp
diff --git a/ndb/src/old_files/client/odbc/dictionary/DictIndex.cpp b/storage/ndb/src/old_files/client/odbc/dictionary/DictIndex.cpp
index 95d93318902..95d93318902 100644
--- a/ndb/src/old_files/client/odbc/dictionary/DictIndex.cpp
+++ b/storage/ndb/src/old_files/client/odbc/dictionary/DictIndex.cpp
diff --git a/ndb/src/old_files/client/odbc/dictionary/DictIndex.hpp b/storage/ndb/src/old_files/client/odbc/dictionary/DictIndex.hpp
index 7ba46daaae3..7ba46daaae3 100644
--- a/ndb/src/old_files/client/odbc/dictionary/DictIndex.hpp
+++ b/storage/ndb/src/old_files/client/odbc/dictionary/DictIndex.hpp
diff --git a/ndb/src/old_files/client/odbc/dictionary/DictSchema.cpp b/storage/ndb/src/old_files/client/odbc/dictionary/DictSchema.cpp
index 91939cb2f26..91939cb2f26 100644
--- a/ndb/src/old_files/client/odbc/dictionary/DictSchema.cpp
+++ b/storage/ndb/src/old_files/client/odbc/dictionary/DictSchema.cpp
diff --git a/ndb/src/old_files/client/odbc/dictionary/DictSchema.hpp b/storage/ndb/src/old_files/client/odbc/dictionary/DictSchema.hpp
index 099352edbb9..099352edbb9 100644
--- a/ndb/src/old_files/client/odbc/dictionary/DictSchema.hpp
+++ b/storage/ndb/src/old_files/client/odbc/dictionary/DictSchema.hpp
diff --git a/ndb/src/old_files/client/odbc/dictionary/DictSys.cpp b/storage/ndb/src/old_files/client/odbc/dictionary/DictSys.cpp
index 1ceef66ee57..1ceef66ee57 100644
--- a/ndb/src/old_files/client/odbc/dictionary/DictSys.cpp
+++ b/storage/ndb/src/old_files/client/odbc/dictionary/DictSys.cpp
diff --git a/ndb/src/old_files/client/odbc/dictionary/DictSys.hpp b/storage/ndb/src/old_files/client/odbc/dictionary/DictSys.hpp
index e6fa661fd59..e6fa661fd59 100644
--- a/ndb/src/old_files/client/odbc/dictionary/DictSys.hpp
+++ b/storage/ndb/src/old_files/client/odbc/dictionary/DictSys.hpp
diff --git a/ndb/src/old_files/client/odbc/dictionary/DictTable.cpp b/storage/ndb/src/old_files/client/odbc/dictionary/DictTable.cpp
index 4db7d3b3aec..4db7d3b3aec 100644
--- a/ndb/src/old_files/client/odbc/dictionary/DictTable.cpp
+++ b/storage/ndb/src/old_files/client/odbc/dictionary/DictTable.cpp
diff --git a/ndb/src/old_files/client/odbc/dictionary/DictTable.hpp b/storage/ndb/src/old_files/client/odbc/dictionary/DictTable.hpp
index 5cecfff9562..5cecfff9562 100644
--- a/ndb/src/old_files/client/odbc/dictionary/DictTable.hpp
+++ b/storage/ndb/src/old_files/client/odbc/dictionary/DictTable.hpp
diff --git a/ndb/src/old_files/client/odbc/dictionary/Makefile b/storage/ndb/src/old_files/client/odbc/dictionary/Makefile
index cdfd3b6ea0c..cdfd3b6ea0c 100644
--- a/ndb/src/old_files/client/odbc/dictionary/Makefile
+++ b/storage/ndb/src/old_files/client/odbc/dictionary/Makefile
diff --git a/ndb/src/old_files/client/odbc/docs/class.fig b/storage/ndb/src/old_files/client/odbc/docs/class.fig
index 38c24c1fba4..38c24c1fba4 100644
--- a/ndb/src/old_files/client/odbc/docs/class.fig
+++ b/storage/ndb/src/old_files/client/odbc/docs/class.fig
diff --git a/ndb/src/old_files/client/odbc/docs/descfield.pl b/storage/ndb/src/old_files/client/odbc/docs/descfield.pl
index 80fef22f303..80fef22f303 100644
--- a/ndb/src/old_files/client/odbc/docs/descfield.pl
+++ b/storage/ndb/src/old_files/client/odbc/docs/descfield.pl
diff --git a/ndb/src/old_files/client/odbc/docs/diag.txt b/storage/ndb/src/old_files/client/odbc/docs/diag.txt
index a9a0e0f42d0..a9a0e0f42d0 100644
--- a/ndb/src/old_files/client/odbc/docs/diag.txt
+++ b/storage/ndb/src/old_files/client/odbc/docs/diag.txt
diff --git a/ndb/src/old_files/client/odbc/docs/getinfo.pl b/storage/ndb/src/old_files/client/odbc/docs/getinfo.pl
index 34e26b47bab..34e26b47bab 100644
--- a/ndb/src/old_files/client/odbc/docs/getinfo.pl
+++ b/storage/ndb/src/old_files/client/odbc/docs/getinfo.pl
diff --git a/ndb/src/old_files/client/odbc/docs/gettypeinfo.pl b/storage/ndb/src/old_files/client/odbc/docs/gettypeinfo.pl
index 0a999fd7249..0a999fd7249 100644
--- a/ndb/src/old_files/client/odbc/docs/gettypeinfo.pl
+++ b/storage/ndb/src/old_files/client/odbc/docs/gettypeinfo.pl
diff --git a/ndb/src/old_files/client/odbc/docs/handleattr.pl b/storage/ndb/src/old_files/client/odbc/docs/handleattr.pl
index 892d34b105b..892d34b105b 100644
--- a/ndb/src/old_files/client/odbc/docs/handleattr.pl
+++ b/storage/ndb/src/old_files/client/odbc/docs/handleattr.pl
diff --git a/ndb/src/old_files/client/odbc/docs/main.hpp b/storage/ndb/src/old_files/client/odbc/docs/main.hpp
index ebb5b1f235a..ebb5b1f235a 100644
--- a/ndb/src/old_files/client/odbc/docs/main.hpp
+++ b/storage/ndb/src/old_files/client/odbc/docs/main.hpp
diff --git a/ndb/src/old_files/client/odbc/docs/ndbodbc.html b/storage/ndb/src/old_files/client/odbc/docs/ndbodbc.html
index 6be624dfa1b..6be624dfa1b 100644
--- a/ndb/src/old_files/client/odbc/docs/ndbodbc.html
+++ b/storage/ndb/src/old_files/client/odbc/docs/ndbodbc.html
diff --git a/ndb/src/old_files/client/odbc/docs/select.fig b/storage/ndb/src/old_files/client/odbc/docs/select.fig
index 4f51a2085b4..4f51a2085b4 100644
--- a/ndb/src/old_files/client/odbc/docs/select.fig
+++ b/storage/ndb/src/old_files/client/odbc/docs/select.fig
diff --git a/ndb/src/old_files/client/odbc/docs/systables.pl b/storage/ndb/src/old_files/client/odbc/docs/systables.pl
index 728d966a7a4..728d966a7a4 100644
--- a/ndb/src/old_files/client/odbc/docs/systables.pl
+++ b/storage/ndb/src/old_files/client/odbc/docs/systables.pl
diff --git a/ndb/src/old_files/client/odbc/docs/type.txt b/storage/ndb/src/old_files/client/odbc/docs/type.txt
index d7b391afc55..d7b391afc55 100644
--- a/ndb/src/old_files/client/odbc/docs/type.txt
+++ b/storage/ndb/src/old_files/client/odbc/docs/type.txt
diff --git a/ndb/src/old_files/client/odbc/driver/Func.data b/storage/ndb/src/old_files/client/odbc/driver/Func.data
index c32671e1135..c32671e1135 100644
--- a/ndb/src/old_files/client/odbc/driver/Func.data
+++ b/storage/ndb/src/old_files/client/odbc/driver/Func.data
diff --git a/ndb/src/old_files/client/odbc/driver/Func.pl b/storage/ndb/src/old_files/client/odbc/driver/Func.pl
index 1064a6a6c6e..1064a6a6c6e 100644
--- a/ndb/src/old_files/client/odbc/driver/Func.pl
+++ b/storage/ndb/src/old_files/client/odbc/driver/Func.pl
diff --git a/ndb/src/old_files/client/odbc/driver/Makefile b/storage/ndb/src/old_files/client/odbc/driver/Makefile
index 62f82371da4..62f82371da4 100644
--- a/ndb/src/old_files/client/odbc/driver/Makefile
+++ b/storage/ndb/src/old_files/client/odbc/driver/Makefile
diff --git a/ndb/src/old_files/client/odbc/driver/SQLAllocConnect.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLAllocConnect.cpp
index a7ffd8c89d1..a7ffd8c89d1 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLAllocConnect.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLAllocConnect.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLAllocEnv.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLAllocEnv.cpp
index a62dae61008..a62dae61008 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLAllocEnv.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLAllocEnv.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLAllocHandle.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLAllocHandle.cpp
index 9daf6ead946..9daf6ead946 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLAllocHandle.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLAllocHandle.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLAllocHandleStd.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLAllocHandleStd.cpp
index 61290e37b7b..61290e37b7b 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLAllocHandleStd.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLAllocHandleStd.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLAllocStmt.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLAllocStmt.cpp
index bf3f149f5de..bf3f149f5de 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLAllocStmt.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLAllocStmt.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLBindCol.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLBindCol.cpp
index 5562334e8cc..5562334e8cc 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLBindCol.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLBindCol.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLBindParam.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLBindParam.cpp
index 2fcc17b872f..2fcc17b872f 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLBindParam.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLBindParam.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLBindParameter.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLBindParameter.cpp
index e4ca5bbc731..e4ca5bbc731 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLBindParameter.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLBindParameter.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLBrowseConnect.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLBrowseConnect.cpp
index 7e629e199e5..7e629e199e5 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLBrowseConnect.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLBrowseConnect.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLBulkOperations.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLBulkOperations.cpp
index 7d256d66e3f..7d256d66e3f 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLBulkOperations.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLBulkOperations.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLCancel.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLCancel.cpp
index ac4e43c6e89..ac4e43c6e89 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLCancel.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLCancel.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLCloseCursor.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLCloseCursor.cpp
index 26d88c91e3b..26d88c91e3b 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLCloseCursor.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLCloseCursor.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLColAttribute.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLColAttribute.cpp
index 0e7e5446932..0e7e5446932 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLColAttribute.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLColAttribute.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLColAttributes.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLColAttributes.cpp
index 05a4c1d4d37..05a4c1d4d37 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLColAttributes.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLColAttributes.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLColumnPrivileges.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLColumnPrivileges.cpp
index cfbc9c2bc57..cfbc9c2bc57 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLColumnPrivileges.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLColumnPrivileges.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLColumns.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLColumns.cpp
index 4e0b646ee7d..4e0b646ee7d 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLColumns.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLColumns.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLConnect.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLConnect.cpp
index d8f30ed47e7..d8f30ed47e7 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLConnect.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLConnect.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLCopyDesc.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLCopyDesc.cpp
index b4d4b2e4122..b4d4b2e4122 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLCopyDesc.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLCopyDesc.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLDataSources.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLDataSources.cpp
index 6115e7175f9..6115e7175f9 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLDataSources.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLDataSources.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLDescribeCol.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLDescribeCol.cpp
index f15ce8962f1..f15ce8962f1 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLDescribeCol.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLDescribeCol.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLDescribeParam.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLDescribeParam.cpp
index beff41396fe..beff41396fe 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLDescribeParam.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLDescribeParam.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLDisconnect.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLDisconnect.cpp
index 75db5604da8..75db5604da8 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLDisconnect.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLDisconnect.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLDriverConnect.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLDriverConnect.cpp
index 340babd8523..340babd8523 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLDriverConnect.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLDriverConnect.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLDrivers.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLDrivers.cpp
index 9c52f900992..9c52f900992 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLDrivers.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLDrivers.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLEndTran.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLEndTran.cpp
index 20b0b2203f5..20b0b2203f5 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLEndTran.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLEndTran.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLError.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLError.cpp
index af78c931d37..af78c931d37 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLError.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLError.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLExecDirect.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLExecDirect.cpp
index 0ad99d29cd9..0ad99d29cd9 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLExecDirect.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLExecDirect.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLExecute.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLExecute.cpp
index 9c30d418f09..9c30d418f09 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLExecute.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLExecute.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLExtendedFetch.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLExtendedFetch.cpp
index e0dd078b5d0..e0dd078b5d0 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLExtendedFetch.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLExtendedFetch.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLFetch.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLFetch.cpp
index addba7b998c..addba7b998c 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLFetch.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLFetch.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLFetchScroll.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLFetchScroll.cpp
index cfbfc813fca..cfbfc813fca 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLFetchScroll.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLFetchScroll.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLForeignKeys.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLForeignKeys.cpp
index 886ac6bdaa5..886ac6bdaa5 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLForeignKeys.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLForeignKeys.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLFreeConnect.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLFreeConnect.cpp
index 9ac84710cce..9ac84710cce 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLFreeConnect.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLFreeConnect.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLFreeEnv.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLFreeEnv.cpp
index 7e35056feb5..7e35056feb5 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLFreeEnv.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLFreeEnv.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLFreeHandle.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLFreeHandle.cpp
index 284463cbb07..284463cbb07 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLFreeHandle.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLFreeHandle.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLFreeStmt.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLFreeStmt.cpp
index 7af6623a37a..7af6623a37a 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLFreeStmt.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLFreeStmt.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetConnectAttr.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetConnectAttr.cpp
index 66c1f3827e1..66c1f3827e1 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetConnectAttr.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetConnectAttr.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetConnectOption.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetConnectOption.cpp
index 514bedb12b9..514bedb12b9 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetConnectOption.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetConnectOption.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetCursorName.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetCursorName.cpp
index d54bdf42005..d54bdf42005 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetCursorName.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetCursorName.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetData.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetData.cpp
index 3b6987c515d..3b6987c515d 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetData.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetData.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetDescField.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetDescField.cpp
index 6cc390a58ed..6cc390a58ed 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetDescField.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetDescField.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetDescRec.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetDescRec.cpp
index c7e9631b075..c7e9631b075 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetDescRec.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetDescRec.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetDiagField.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetDiagField.cpp
index 3eb34f7ebf6..3eb34f7ebf6 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetDiagField.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetDiagField.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetDiagRec.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetDiagRec.cpp
index 448c5206d76..448c5206d76 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetDiagRec.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetDiagRec.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetEnvAttr.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetEnvAttr.cpp
index c93870326e4..c93870326e4 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetEnvAttr.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetEnvAttr.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetFunctions.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetFunctions.cpp
index 68416fab1a6..68416fab1a6 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetFunctions.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetFunctions.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetInfo.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetInfo.cpp
index 8f0a0d67cfa..8f0a0d67cfa 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetInfo.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetInfo.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetStmtAttr.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetStmtAttr.cpp
index 990ab68808a..990ab68808a 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetStmtAttr.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetStmtAttr.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetStmtOption.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetStmtOption.cpp
index 0b5758b1212..0b5758b1212 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetStmtOption.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetStmtOption.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLGetTypeInfo.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLGetTypeInfo.cpp
index e6a016cc400..e6a016cc400 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLGetTypeInfo.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLGetTypeInfo.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLMoreResults.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLMoreResults.cpp
index d23d653a319..d23d653a319 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLMoreResults.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLMoreResults.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLNativeSql.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLNativeSql.cpp
index fb8a9bbf3d9..fb8a9bbf3d9 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLNativeSql.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLNativeSql.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLNumParams.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLNumParams.cpp
index 7b1a6a07aec..7b1a6a07aec 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLNumParams.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLNumParams.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLNumResultCols.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLNumResultCols.cpp
index 2e70897a9a2..2e70897a9a2 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLNumResultCols.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLNumResultCols.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLParamData.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLParamData.cpp
index 4eb38a010f4..4eb38a010f4 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLParamData.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLParamData.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLParamOptions.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLParamOptions.cpp
index 59b7dcf7fa9..59b7dcf7fa9 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLParamOptions.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLParamOptions.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLPrepare.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLPrepare.cpp
index b1205fa6e3a..b1205fa6e3a 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLPrepare.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLPrepare.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLPrimaryKeys.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLPrimaryKeys.cpp
index 2d562ae3e19..2d562ae3e19 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLPrimaryKeys.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLPrimaryKeys.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLProcedureColumns.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLProcedureColumns.cpp
index 2e42e428b87..2e42e428b87 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLProcedureColumns.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLProcedureColumns.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLProcedures.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLProcedures.cpp
index 1f3a9f89073..1f3a9f89073 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLProcedures.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLProcedures.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLPutData.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLPutData.cpp
index a4715a836d2..a4715a836d2 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLPutData.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLPutData.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLRowCount.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLRowCount.cpp
index d03f954386a..d03f954386a 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLRowCount.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLRowCount.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLSetConnectAttr.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLSetConnectAttr.cpp
index 05bfce5e9cd..05bfce5e9cd 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLSetConnectAttr.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLSetConnectAttr.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLSetConnectOption.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLSetConnectOption.cpp
index a4794316971..a4794316971 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLSetConnectOption.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLSetConnectOption.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLSetCursorName.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLSetCursorName.cpp
index 291ad817d42..291ad817d42 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLSetCursorName.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLSetCursorName.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLSetDescField.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLSetDescField.cpp
index 19d34c2f46d..19d34c2f46d 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLSetDescField.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLSetDescField.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLSetDescRec.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLSetDescRec.cpp
index 80a00514a51..80a00514a51 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLSetDescRec.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLSetDescRec.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLSetEnvAttr.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLSetEnvAttr.cpp
index 86364eac5e8..86364eac5e8 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLSetEnvAttr.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLSetEnvAttr.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLSetParam.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLSetParam.cpp
index 03bde1076d8..03bde1076d8 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLSetParam.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLSetParam.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLSetPos.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLSetPos.cpp
index 653030f90bc..653030f90bc 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLSetPos.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLSetPos.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLSetScrollOptions.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLSetScrollOptions.cpp
index a5e89d8568b..a5e89d8568b 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLSetScrollOptions.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLSetScrollOptions.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLSetStmtAttr.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLSetStmtAttr.cpp
index 9ed6a83b563..9ed6a83b563 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLSetStmtAttr.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLSetStmtAttr.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLSetStmtOption.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLSetStmtOption.cpp
index b403fc8408c..b403fc8408c 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLSetStmtOption.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLSetStmtOption.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLSpecialColumns.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLSpecialColumns.cpp
index 5dd92c86053..5dd92c86053 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLSpecialColumns.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLSpecialColumns.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLStatistics.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLStatistics.cpp
index 941fb6249a5..941fb6249a5 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLStatistics.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLStatistics.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLTablePrivileges.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLTablePrivileges.cpp
index 23c6ad9fc4b..23c6ad9fc4b 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLTablePrivileges.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLTablePrivileges.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLTables.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLTables.cpp
index b2496bfba87..b2496bfba87 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLTables.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLTables.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/SQLTransact.cpp b/storage/ndb/src/old_files/client/odbc/driver/SQLTransact.cpp
index da8b46b1596..da8b46b1596 100644
--- a/ndb/src/old_files/client/odbc/driver/SQLTransact.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/SQLTransact.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/driver.cpp b/storage/ndb/src/old_files/client/odbc/driver/driver.cpp
index f992fa70878..f992fa70878 100644
--- a/ndb/src/old_files/client/odbc/driver/driver.cpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/driver.cpp
diff --git a/ndb/src/old_files/client/odbc/driver/driver.hpp b/storage/ndb/src/old_files/client/odbc/driver/driver.hpp
index 96d2e052c0d..96d2e052c0d 100644
--- a/ndb/src/old_files/client/odbc/driver/driver.hpp
+++ b/storage/ndb/src/old_files/client/odbc/driver/driver.hpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_comp_op.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_comp_op.cpp
index 40d3950a592..40d3950a592 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_comp_op.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_comp_op.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_create_index.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_create_index.cpp
index 3966c6d5db2..3966c6d5db2 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_create_index.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_create_index.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_create_table.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_create_table.cpp
index d6274119371..d6274119371 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_create_table.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_create_table.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_delete_index.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_delete_index.cpp
index 10814654a58..10814654a58 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_delete_index.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_delete_index.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_delete_lookup.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_delete_lookup.cpp
index d0795286122..d0795286122 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_delete_lookup.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_delete_lookup.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_delete_scan.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_delete_scan.cpp
index a0b3b8314b8..a0b3b8314b8 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_delete_scan.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_delete_scan.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_drop_index.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_drop_index.cpp
index 6bf451f6911..6bf451f6911 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_drop_index.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_drop_index.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_drop_table.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_drop_table.cpp
index 40d1d42fc61..40d1d42fc61 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_drop_table.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_drop_table.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_expr_conv.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_expr_conv.cpp
index 636bfda7d59..636bfda7d59 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_expr_conv.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_expr_conv.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_expr_func.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_expr_func.cpp
index 093d15c6e2b..093d15c6e2b 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_expr_func.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_expr_func.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_expr_op.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_expr_op.cpp
index fc8b6df9f5b..fc8b6df9f5b 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_expr_op.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_expr_op.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_insert.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_insert.cpp
index c2612c6aaab..c2612c6aaab 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_insert.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_insert.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_pred_op.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_pred_op.cpp
index 7caa4656473..7caa4656473 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_pred_op.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_pred_op.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_query_index.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_query_index.cpp
index 919743beac2..919743beac2 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_query_index.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_query_index.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_query_lookup.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_query_lookup.cpp
index 599e1a36461..599e1a36461 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_query_lookup.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_query_lookup.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_query_range.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_query_range.cpp
index 0bc878d760d..0bc878d760d 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_query_range.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_query_range.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_query_scan.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_query_scan.cpp
index 213dfdd616d..213dfdd616d 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_query_scan.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_query_scan.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_query_sys.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_query_sys.cpp
index acdc120e609..acdc120e609 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_query_sys.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_query_sys.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_update_index.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_update_index.cpp
index 35b6159d8ca..35b6159d8ca 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_update_index.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_update_index.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_update_lookup.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_update_lookup.cpp
index 2c801372de3..2c801372de3 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_update_lookup.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_update_lookup.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Exec_update_scan.cpp b/storage/ndb/src/old_files/client/odbc/executor/Exec_update_scan.cpp
index a36fdd27142..a36fdd27142 100644
--- a/ndb/src/old_files/client/odbc/executor/Exec_update_scan.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Exec_update_scan.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Executor.cpp b/storage/ndb/src/old_files/client/odbc/executor/Executor.cpp
index adabb28a4a5..adabb28a4a5 100644
--- a/ndb/src/old_files/client/odbc/executor/Executor.cpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Executor.cpp
diff --git a/ndb/src/old_files/client/odbc/executor/Executor.hpp b/storage/ndb/src/old_files/client/odbc/executor/Executor.hpp
index 5edb9d509ac..5edb9d509ac 100644
--- a/ndb/src/old_files/client/odbc/executor/Executor.hpp
+++ b/storage/ndb/src/old_files/client/odbc/executor/Executor.hpp
diff --git a/ndb/src/old_files/client/odbc/executor/Makefile b/storage/ndb/src/old_files/client/odbc/executor/Makefile
index d86781e212c..d86781e212c 100644
--- a/ndb/src/old_files/client/odbc/executor/Makefile
+++ b/storage/ndb/src/old_files/client/odbc/executor/Makefile
diff --git a/ndb/src/old_files/client/odbc/handles/AttrDbc.cpp b/storage/ndb/src/old_files/client/odbc/handles/AttrDbc.cpp
index 4768a8995a2..4768a8995a2 100644
--- a/ndb/src/old_files/client/odbc/handles/AttrDbc.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/AttrDbc.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/AttrEnv.cpp b/storage/ndb/src/old_files/client/odbc/handles/AttrEnv.cpp
index 3d57fddeb57..3d57fddeb57 100644
--- a/ndb/src/old_files/client/odbc/handles/AttrEnv.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/AttrEnv.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/AttrRoot.cpp b/storage/ndb/src/old_files/client/odbc/handles/AttrRoot.cpp
index d1b264835b6..d1b264835b6 100644
--- a/ndb/src/old_files/client/odbc/handles/AttrRoot.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/AttrRoot.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/AttrStmt.cpp b/storage/ndb/src/old_files/client/odbc/handles/AttrStmt.cpp
index ce9a9c03fd1..ce9a9c03fd1 100644
--- a/ndb/src/old_files/client/odbc/handles/AttrStmt.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/AttrStmt.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/DescSpec.cpp b/storage/ndb/src/old_files/client/odbc/handles/DescSpec.cpp
index 83905cf9822..83905cf9822 100644
--- a/ndb/src/old_files/client/odbc/handles/DescSpec.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/DescSpec.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/FuncTab.cpp b/storage/ndb/src/old_files/client/odbc/handles/FuncTab.cpp
index 6bd744d7a7f..6bd744d7a7f 100644
--- a/ndb/src/old_files/client/odbc/handles/FuncTab.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/FuncTab.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/HandleBase.cpp b/storage/ndb/src/old_files/client/odbc/handles/HandleBase.cpp
index 27379cdc3f8..27379cdc3f8 100644
--- a/ndb/src/old_files/client/odbc/handles/HandleBase.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/HandleBase.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/HandleBase.hpp b/storage/ndb/src/old_files/client/odbc/handles/HandleBase.hpp
index fc35c2b559b..fc35c2b559b 100644
--- a/ndb/src/old_files/client/odbc/handles/HandleBase.hpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/HandleBase.hpp
diff --git a/ndb/src/old_files/client/odbc/handles/HandleDbc.cpp b/storage/ndb/src/old_files/client/odbc/handles/HandleDbc.cpp
index 2d5ded2cc21..2d5ded2cc21 100644
--- a/ndb/src/old_files/client/odbc/handles/HandleDbc.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/HandleDbc.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/HandleDbc.hpp b/storage/ndb/src/old_files/client/odbc/handles/HandleDbc.hpp
index 130df08d02c..130df08d02c 100644
--- a/ndb/src/old_files/client/odbc/handles/HandleDbc.hpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/HandleDbc.hpp
diff --git a/ndb/src/old_files/client/odbc/handles/HandleDesc.cpp b/storage/ndb/src/old_files/client/odbc/handles/HandleDesc.cpp
index 4cff1bb8892..4cff1bb8892 100644
--- a/ndb/src/old_files/client/odbc/handles/HandleDesc.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/HandleDesc.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/HandleDesc.hpp b/storage/ndb/src/old_files/client/odbc/handles/HandleDesc.hpp
index 9419697f134..9419697f134 100644
--- a/ndb/src/old_files/client/odbc/handles/HandleDesc.hpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/HandleDesc.hpp
diff --git a/ndb/src/old_files/client/odbc/handles/HandleEnv.cpp b/storage/ndb/src/old_files/client/odbc/handles/HandleEnv.cpp
index bc9d8b420a6..bc9d8b420a6 100644
--- a/ndb/src/old_files/client/odbc/handles/HandleEnv.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/HandleEnv.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/HandleEnv.hpp b/storage/ndb/src/old_files/client/odbc/handles/HandleEnv.hpp
index 2b13b0256bc..2b13b0256bc 100644
--- a/ndb/src/old_files/client/odbc/handles/HandleEnv.hpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/HandleEnv.hpp
diff --git a/ndb/src/old_files/client/odbc/handles/HandleRoot.cpp b/storage/ndb/src/old_files/client/odbc/handles/HandleRoot.cpp
index 13560d55028..13560d55028 100644
--- a/ndb/src/old_files/client/odbc/handles/HandleRoot.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/HandleRoot.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/HandleRoot.hpp b/storage/ndb/src/old_files/client/odbc/handles/HandleRoot.hpp
index 08a22b3e400..08a22b3e400 100644
--- a/ndb/src/old_files/client/odbc/handles/HandleRoot.hpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/HandleRoot.hpp
diff --git a/ndb/src/old_files/client/odbc/handles/HandleStmt.cpp b/storage/ndb/src/old_files/client/odbc/handles/HandleStmt.cpp
index d33d33dbd5b..d33d33dbd5b 100644
--- a/ndb/src/old_files/client/odbc/handles/HandleStmt.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/HandleStmt.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/HandleStmt.hpp b/storage/ndb/src/old_files/client/odbc/handles/HandleStmt.hpp
index 0bee138bfc6..0bee138bfc6 100644
--- a/ndb/src/old_files/client/odbc/handles/HandleStmt.hpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/HandleStmt.hpp
diff --git a/ndb/src/old_files/client/odbc/handles/InfoTab.cpp b/storage/ndb/src/old_files/client/odbc/handles/InfoTab.cpp
index 1a93c4da264..1a93c4da264 100644
--- a/ndb/src/old_files/client/odbc/handles/InfoTab.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/InfoTab.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/Makefile b/storage/ndb/src/old_files/client/odbc/handles/Makefile
index d37e7d286ba..d37e7d286ba 100644
--- a/ndb/src/old_files/client/odbc/handles/Makefile
+++ b/storage/ndb/src/old_files/client/odbc/handles/Makefile
diff --git a/ndb/src/old_files/client/odbc/handles/PoolNdb.cpp b/storage/ndb/src/old_files/client/odbc/handles/PoolNdb.cpp
index 45d3c67ec77..45d3c67ec77 100644
--- a/ndb/src/old_files/client/odbc/handles/PoolNdb.cpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/PoolNdb.cpp
diff --git a/ndb/src/old_files/client/odbc/handles/PoolNdb.hpp b/storage/ndb/src/old_files/client/odbc/handles/PoolNdb.hpp
index 35eac055c30..35eac055c30 100644
--- a/ndb/src/old_files/client/odbc/handles/PoolNdb.hpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/PoolNdb.hpp
diff --git a/ndb/src/old_files/client/odbc/handles/handles.hpp b/storage/ndb/src/old_files/client/odbc/handles/handles.hpp
index a9f0fcae888..a9f0fcae888 100644
--- a/ndb/src/old_files/client/odbc/handles/handles.hpp
+++ b/storage/ndb/src/old_files/client/odbc/handles/handles.hpp
diff --git a/ndb/src/old_files/ndbbaseclient/Makefile b/storage/ndb/src/old_files/ndbbaseclient/Makefile
index f4c49a95ffa..f4c49a95ffa 100644
--- a/ndb/src/old_files/ndbbaseclient/Makefile
+++ b/storage/ndb/src/old_files/ndbbaseclient/Makefile
diff --git a/ndb/src/old_files/ndbbaseclient/ndbbaseclient_dummy.cpp b/storage/ndb/src/old_files/ndbbaseclient/ndbbaseclient_dummy.cpp
index e69de29bb2d..e69de29bb2d 100644
--- a/ndb/src/old_files/ndbbaseclient/ndbbaseclient_dummy.cpp
+++ b/storage/ndb/src/old_files/ndbbaseclient/ndbbaseclient_dummy.cpp
diff --git a/ndb/src/old_files/ndbclient/Makefile b/storage/ndb/src/old_files/ndbclient/Makefile
index 2c597eccfa1..2c597eccfa1 100644
--- a/ndb/src/old_files/ndbclient/Makefile
+++ b/storage/ndb/src/old_files/ndbclient/Makefile
diff --git a/ndb/src/old_files/ndbclient/ndbclient_dummy.cpp b/storage/ndb/src/old_files/ndbclient/ndbclient_dummy.cpp
index e69de29bb2d..e69de29bb2d 100644
--- a/ndb/src/old_files/ndbclient/ndbclient_dummy.cpp
+++ b/storage/ndb/src/old_files/ndbclient/ndbclient_dummy.cpp
diff --git a/ndb/src/old_files/newtonapi/Makefile b/storage/ndb/src/old_files/newtonapi/Makefile
index bed179046a5..bed179046a5 100644
--- a/ndb/src/old_files/newtonapi/Makefile
+++ b/storage/ndb/src/old_files/newtonapi/Makefile
diff --git a/ndb/src/old_files/newtonapi/dba_binding.cpp b/storage/ndb/src/old_files/newtonapi/dba_binding.cpp
index 63e48110b1d..63e48110b1d 100644
--- a/ndb/src/old_files/newtonapi/dba_binding.cpp
+++ b/storage/ndb/src/old_files/newtonapi/dba_binding.cpp
diff --git a/ndb/src/old_files/newtonapi/dba_bulkread.cpp b/storage/ndb/src/old_files/newtonapi/dba_bulkread.cpp
index 1f75037046b..1f75037046b 100644
--- a/ndb/src/old_files/newtonapi/dba_bulkread.cpp
+++ b/storage/ndb/src/old_files/newtonapi/dba_bulkread.cpp
diff --git a/ndb/src/old_files/newtonapi/dba_config.cpp b/storage/ndb/src/old_files/newtonapi/dba_config.cpp
index d84386a9438..d84386a9438 100644
--- a/ndb/src/old_files/newtonapi/dba_config.cpp
+++ b/storage/ndb/src/old_files/newtonapi/dba_config.cpp
diff --git a/ndb/src/old_files/newtonapi/dba_dac.cpp b/storage/ndb/src/old_files/newtonapi/dba_dac.cpp
index fcb4e676e46..fcb4e676e46 100644
--- a/ndb/src/old_files/newtonapi/dba_dac.cpp
+++ b/storage/ndb/src/old_files/newtonapi/dba_dac.cpp
diff --git a/ndb/src/old_files/newtonapi/dba_error.cpp b/storage/ndb/src/old_files/newtonapi/dba_error.cpp
index f05446522b0..f05446522b0 100644
--- a/ndb/src/old_files/newtonapi/dba_error.cpp
+++ b/storage/ndb/src/old_files/newtonapi/dba_error.cpp
diff --git a/ndb/src/old_files/newtonapi/dba_init.cpp b/storage/ndb/src/old_files/newtonapi/dba_init.cpp
index aa5fef1171c..aa5fef1171c 100644
--- a/ndb/src/old_files/newtonapi/dba_init.cpp
+++ b/storage/ndb/src/old_files/newtonapi/dba_init.cpp
diff --git a/ndb/src/old_files/newtonapi/dba_internal.hpp b/storage/ndb/src/old_files/newtonapi/dba_internal.hpp
index 84ae7ba222b..84ae7ba222b 100644
--- a/ndb/src/old_files/newtonapi/dba_internal.hpp
+++ b/storage/ndb/src/old_files/newtonapi/dba_internal.hpp
diff --git a/ndb/src/old_files/newtonapi/dba_process.cpp b/storage/ndb/src/old_files/newtonapi/dba_process.cpp
index ddb6e62f180..ddb6e62f180 100644
--- a/ndb/src/old_files/newtonapi/dba_process.cpp
+++ b/storage/ndb/src/old_files/newtonapi/dba_process.cpp
diff --git a/ndb/src/old_files/newtonapi/dba_process.hpp b/storage/ndb/src/old_files/newtonapi/dba_process.hpp
index ef24fbd9142..ef24fbd9142 100644
--- a/ndb/src/old_files/newtonapi/dba_process.hpp
+++ b/storage/ndb/src/old_files/newtonapi/dba_process.hpp
diff --git a/ndb/src/old_files/newtonapi/dba_schema.cpp b/storage/ndb/src/old_files/newtonapi/dba_schema.cpp
index 1bf21f1fe80..1bf21f1fe80 100644
--- a/ndb/src/old_files/newtonapi/dba_schema.cpp
+++ b/storage/ndb/src/old_files/newtonapi/dba_schema.cpp
diff --git a/ndb/src/old_files/rep/ExtSender.cpp b/storage/ndb/src/old_files/rep/ExtSender.cpp
index cf31001a85f..cf31001a85f 100644
--- a/ndb/src/old_files/rep/ExtSender.cpp
+++ b/storage/ndb/src/old_files/rep/ExtSender.cpp
diff --git a/ndb/src/old_files/rep/ExtSender.hpp b/storage/ndb/src/old_files/rep/ExtSender.hpp
index 0bdabd68f37..0bdabd68f37 100644
--- a/ndb/src/old_files/rep/ExtSender.hpp
+++ b/storage/ndb/src/old_files/rep/ExtSender.hpp
diff --git a/ndb/src/old_files/rep/Makefile b/storage/ndb/src/old_files/rep/Makefile
index 9688a68ec74..9688a68ec74 100644
--- a/ndb/src/old_files/rep/Makefile
+++ b/storage/ndb/src/old_files/rep/Makefile
diff --git a/ndb/src/old_files/rep/NodeConnectInfo.hpp b/storage/ndb/src/old_files/rep/NodeConnectInfo.hpp
index 403f92a5999..403f92a5999 100644
--- a/ndb/src/old_files/rep/NodeConnectInfo.hpp
+++ b/storage/ndb/src/old_files/rep/NodeConnectInfo.hpp
diff --git a/ndb/src/old_files/rep/README b/storage/ndb/src/old_files/rep/README
index 7be5e230eb3..7be5e230eb3 100644
--- a/ndb/src/old_files/rep/README
+++ b/storage/ndb/src/old_files/rep/README
diff --git a/ndb/src/old_files/rep/RepApiInterpreter.cpp b/storage/ndb/src/old_files/rep/RepApiInterpreter.cpp
index 6e6f150713a..6e6f150713a 100644
--- a/ndb/src/old_files/rep/RepApiInterpreter.cpp
+++ b/storage/ndb/src/old_files/rep/RepApiInterpreter.cpp
diff --git a/ndb/src/old_files/rep/RepApiInterpreter.hpp b/storage/ndb/src/old_files/rep/RepApiInterpreter.hpp
index 78f190156b3..78f190156b3 100644
--- a/ndb/src/old_files/rep/RepApiInterpreter.hpp
+++ b/storage/ndb/src/old_files/rep/RepApiInterpreter.hpp
diff --git a/ndb/src/old_files/rep/RepApiService.cpp b/storage/ndb/src/old_files/rep/RepApiService.cpp
index d07f7a59375..d07f7a59375 100644
--- a/ndb/src/old_files/rep/RepApiService.cpp
+++ b/storage/ndb/src/old_files/rep/RepApiService.cpp
diff --git a/ndb/src/old_files/rep/RepApiService.hpp b/storage/ndb/src/old_files/rep/RepApiService.hpp
index e1137e53258..e1137e53258 100644
--- a/ndb/src/old_files/rep/RepApiService.hpp
+++ b/storage/ndb/src/old_files/rep/RepApiService.hpp
diff --git a/ndb/src/old_files/rep/RepCommandInterpreter.cpp b/storage/ndb/src/old_files/rep/RepCommandInterpreter.cpp
index a0daf9529ab..a0daf9529ab 100644
--- a/ndb/src/old_files/rep/RepCommandInterpreter.cpp
+++ b/storage/ndb/src/old_files/rep/RepCommandInterpreter.cpp
diff --git a/ndb/src/old_files/rep/RepCommandInterpreter.hpp b/storage/ndb/src/old_files/rep/RepCommandInterpreter.hpp
index 398a7c0318c..398a7c0318c 100644
--- a/ndb/src/old_files/rep/RepCommandInterpreter.hpp
+++ b/storage/ndb/src/old_files/rep/RepCommandInterpreter.hpp
diff --git a/ndb/src/old_files/rep/RepComponents.cpp b/storage/ndb/src/old_files/rep/RepComponents.cpp
index 04b2e0e5fa5..04b2e0e5fa5 100644
--- a/ndb/src/old_files/rep/RepComponents.cpp
+++ b/storage/ndb/src/old_files/rep/RepComponents.cpp
diff --git a/ndb/src/old_files/rep/RepComponents.hpp b/storage/ndb/src/old_files/rep/RepComponents.hpp
index ff0f29e2128..ff0f29e2128 100644
--- a/ndb/src/old_files/rep/RepComponents.hpp
+++ b/storage/ndb/src/old_files/rep/RepComponents.hpp
diff --git a/ndb/src/old_files/rep/RepMain.cpp b/storage/ndb/src/old_files/rep/RepMain.cpp
index d9f057be9a1..d9f057be9a1 100644
--- a/ndb/src/old_files/rep/RepMain.cpp
+++ b/storage/ndb/src/old_files/rep/RepMain.cpp
diff --git a/ndb/src/old_files/rep/Requestor.cpp b/storage/ndb/src/old_files/rep/Requestor.cpp
index 3c93a6394a4..3c93a6394a4 100644
--- a/ndb/src/old_files/rep/Requestor.cpp
+++ b/storage/ndb/src/old_files/rep/Requestor.cpp
diff --git a/ndb/src/old_files/rep/Requestor.hpp b/storage/ndb/src/old_files/rep/Requestor.hpp
index 735d2094bde..735d2094bde 100644
--- a/ndb/src/old_files/rep/Requestor.hpp
+++ b/storage/ndb/src/old_files/rep/Requestor.hpp
diff --git a/ndb/src/old_files/rep/RequestorSubscriptions.cpp b/storage/ndb/src/old_files/rep/RequestorSubscriptions.cpp
index 75b41fae037..75b41fae037 100644
--- a/ndb/src/old_files/rep/RequestorSubscriptions.cpp
+++ b/storage/ndb/src/old_files/rep/RequestorSubscriptions.cpp
diff --git a/ndb/src/old_files/rep/SignalQueue.cpp b/storage/ndb/src/old_files/rep/SignalQueue.cpp
index 9b356a14b7d..9b356a14b7d 100644
--- a/ndb/src/old_files/rep/SignalQueue.cpp
+++ b/storage/ndb/src/old_files/rep/SignalQueue.cpp
diff --git a/ndb/src/old_files/rep/SignalQueue.hpp b/storage/ndb/src/old_files/rep/SignalQueue.hpp
index 697bca85893..697bca85893 100644
--- a/ndb/src/old_files/rep/SignalQueue.hpp
+++ b/storage/ndb/src/old_files/rep/SignalQueue.hpp
diff --git a/ndb/src/old_files/rep/TODO b/storage/ndb/src/old_files/rep/TODO
index a2462fae6cd..a2462fae6cd 100644
--- a/ndb/src/old_files/rep/TODO
+++ b/storage/ndb/src/old_files/rep/TODO
diff --git a/ndb/src/old_files/rep/adapters/AppNDB.cpp b/storage/ndb/src/old_files/rep/adapters/AppNDB.cpp
index 05f6d52807f..05f6d52807f 100644
--- a/ndb/src/old_files/rep/adapters/AppNDB.cpp
+++ b/storage/ndb/src/old_files/rep/adapters/AppNDB.cpp
diff --git a/ndb/src/old_files/rep/adapters/AppNDB.hpp b/storage/ndb/src/old_files/rep/adapters/AppNDB.hpp
index 9563a1e41ab..9563a1e41ab 100644
--- a/ndb/src/old_files/rep/adapters/AppNDB.hpp
+++ b/storage/ndb/src/old_files/rep/adapters/AppNDB.hpp
diff --git a/ndb/src/old_files/rep/adapters/ExtAPI.cpp b/storage/ndb/src/old_files/rep/adapters/ExtAPI.cpp
index 0dcd1e85465..0dcd1e85465 100644
--- a/ndb/src/old_files/rep/adapters/ExtAPI.cpp
+++ b/storage/ndb/src/old_files/rep/adapters/ExtAPI.cpp
diff --git a/ndb/src/old_files/rep/adapters/ExtAPI.hpp b/storage/ndb/src/old_files/rep/adapters/ExtAPI.hpp
index f10b6c7d682..f10b6c7d682 100644
--- a/ndb/src/old_files/rep/adapters/ExtAPI.hpp
+++ b/storage/ndb/src/old_files/rep/adapters/ExtAPI.hpp
diff --git a/ndb/src/old_files/rep/adapters/ExtNDB.cpp b/storage/ndb/src/old_files/rep/adapters/ExtNDB.cpp
index 6642b750b57..6642b750b57 100644
--- a/ndb/src/old_files/rep/adapters/ExtNDB.cpp
+++ b/storage/ndb/src/old_files/rep/adapters/ExtNDB.cpp
diff --git a/ndb/src/old_files/rep/adapters/ExtNDB.hpp b/storage/ndb/src/old_files/rep/adapters/ExtNDB.hpp
index 228c980fd06..228c980fd06 100644
--- a/ndb/src/old_files/rep/adapters/ExtNDB.hpp
+++ b/storage/ndb/src/old_files/rep/adapters/ExtNDB.hpp
diff --git a/ndb/src/old_files/rep/adapters/Makefile b/storage/ndb/src/old_files/rep/adapters/Makefile
index bdd711510c3..bdd711510c3 100644
--- a/ndb/src/old_files/rep/adapters/Makefile
+++ b/storage/ndb/src/old_files/rep/adapters/Makefile
diff --git a/ndb/src/old_files/rep/adapters/TableInfoPs.hpp b/storage/ndb/src/old_files/rep/adapters/TableInfoPs.hpp
index 3fa25979255..3fa25979255 100644
--- a/ndb/src/old_files/rep/adapters/TableInfoPs.hpp
+++ b/storage/ndb/src/old_files/rep/adapters/TableInfoPs.hpp
diff --git a/ndb/src/old_files/rep/dbug_hack.cpp b/storage/ndb/src/old_files/rep/dbug_hack.cpp
index 74e5f080777..74e5f080777 100644
--- a/ndb/src/old_files/rep/dbug_hack.cpp
+++ b/storage/ndb/src/old_files/rep/dbug_hack.cpp
diff --git a/ndb/src/old_files/rep/rep_version.hpp b/storage/ndb/src/old_files/rep/rep_version.hpp
index 3830f9c351c..3830f9c351c 100644
--- a/ndb/src/old_files/rep/rep_version.hpp
+++ b/storage/ndb/src/old_files/rep/rep_version.hpp
diff --git a/ndb/src/old_files/rep/repapi/Makefile b/storage/ndb/src/old_files/rep/repapi/Makefile
index fdd153f1060..fdd153f1060 100644
--- a/ndb/src/old_files/rep/repapi/Makefile
+++ b/storage/ndb/src/old_files/rep/repapi/Makefile
diff --git a/ndb/src/old_files/rep/repapi/repapi.cpp b/storage/ndb/src/old_files/rep/repapi/repapi.cpp
index d34ab098c9c..d34ab098c9c 100644
--- a/ndb/src/old_files/rep/repapi/repapi.cpp
+++ b/storage/ndb/src/old_files/rep/repapi/repapi.cpp
diff --git a/ndb/src/old_files/rep/repapi/repapi.h b/storage/ndb/src/old_files/rep/repapi/repapi.h
index 170e493cd86..170e493cd86 100644
--- a/ndb/src/old_files/rep/repapi/repapi.h
+++ b/storage/ndb/src/old_files/rep/repapi/repapi.h
diff --git a/ndb/src/old_files/rep/state/Channel.cpp b/storage/ndb/src/old_files/rep/state/Channel.cpp
index a7f7b90d3fe..a7f7b90d3fe 100644
--- a/ndb/src/old_files/rep/state/Channel.cpp
+++ b/storage/ndb/src/old_files/rep/state/Channel.cpp
diff --git a/ndb/src/old_files/rep/state/Channel.hpp b/storage/ndb/src/old_files/rep/state/Channel.hpp
index cdf4eecca63..cdf4eecca63 100644
--- a/ndb/src/old_files/rep/state/Channel.hpp
+++ b/storage/ndb/src/old_files/rep/state/Channel.hpp
diff --git a/ndb/src/old_files/rep/state/Interval.cpp b/storage/ndb/src/old_files/rep/state/Interval.cpp
index 8266f19c58d..8266f19c58d 100644
--- a/ndb/src/old_files/rep/state/Interval.cpp
+++ b/storage/ndb/src/old_files/rep/state/Interval.cpp
diff --git a/ndb/src/old_files/rep/state/Interval.hpp b/storage/ndb/src/old_files/rep/state/Interval.hpp
index 935adaf26b1..935adaf26b1 100644
--- a/ndb/src/old_files/rep/state/Interval.hpp
+++ b/storage/ndb/src/old_files/rep/state/Interval.hpp
diff --git a/ndb/src/old_files/rep/state/Makefile b/storage/ndb/src/old_files/rep/state/Makefile
index 3eed69a97dd..3eed69a97dd 100644
--- a/ndb/src/old_files/rep/state/Makefile
+++ b/storage/ndb/src/old_files/rep/state/Makefile
diff --git a/ndb/src/old_files/rep/state/RepState.cpp b/storage/ndb/src/old_files/rep/state/RepState.cpp
index d8a50961a3c..d8a50961a3c 100644
--- a/ndb/src/old_files/rep/state/RepState.cpp
+++ b/storage/ndb/src/old_files/rep/state/RepState.cpp
diff --git a/ndb/src/old_files/rep/state/RepState.hpp b/storage/ndb/src/old_files/rep/state/RepState.hpp
index 06bbca19f7e..06bbca19f7e 100644
--- a/ndb/src/old_files/rep/state/RepState.hpp
+++ b/storage/ndb/src/old_files/rep/state/RepState.hpp
diff --git a/ndb/src/old_files/rep/state/RepStateEvent.cpp b/storage/ndb/src/old_files/rep/state/RepStateEvent.cpp
index 9be304c8bfa..9be304c8bfa 100644
--- a/ndb/src/old_files/rep/state/RepStateEvent.cpp
+++ b/storage/ndb/src/old_files/rep/state/RepStateEvent.cpp
diff --git a/ndb/src/old_files/rep/state/RepStateRequests.cpp b/storage/ndb/src/old_files/rep/state/RepStateRequests.cpp
index 02677e141f6..02677e141f6 100644
--- a/ndb/src/old_files/rep/state/RepStateRequests.cpp
+++ b/storage/ndb/src/old_files/rep/state/RepStateRequests.cpp
diff --git a/ndb/src/old_files/rep/state/testInterval/Makefile b/storage/ndb/src/old_files/rep/state/testInterval/Makefile
index fbb0b48c280..fbb0b48c280 100644
--- a/ndb/src/old_files/rep/state/testInterval/Makefile
+++ b/storage/ndb/src/old_files/rep/state/testInterval/Makefile
diff --git a/ndb/src/old_files/rep/state/testInterval/testInterval.cpp b/storage/ndb/src/old_files/rep/state/testInterval/testInterval.cpp
index 463e4adffb7..463e4adffb7 100644
--- a/ndb/src/old_files/rep/state/testInterval/testInterval.cpp
+++ b/storage/ndb/src/old_files/rep/state/testInterval/testInterval.cpp
diff --git a/ndb/src/old_files/rep/state/testRepState/Makefile b/storage/ndb/src/old_files/rep/state/testRepState/Makefile
index 33c6076eff3..33c6076eff3 100644
--- a/ndb/src/old_files/rep/state/testRepState/Makefile
+++ b/storage/ndb/src/old_files/rep/state/testRepState/Makefile
diff --git a/ndb/src/old_files/rep/state/testRepState/testRequestor.cpp b/storage/ndb/src/old_files/rep/state/testRepState/testRequestor.cpp
index 8989f7098b8..8989f7098b8 100644
--- a/ndb/src/old_files/rep/state/testRepState/testRequestor.cpp
+++ b/storage/ndb/src/old_files/rep/state/testRepState/testRequestor.cpp
diff --git a/ndb/src/old_files/rep/state/testRepState/testRequestor.hpp b/storage/ndb/src/old_files/rep/state/testRepState/testRequestor.hpp
index 726b289114d..726b289114d 100644
--- a/ndb/src/old_files/rep/state/testRepState/testRequestor.hpp
+++ b/storage/ndb/src/old_files/rep/state/testRepState/testRequestor.hpp
diff --git a/ndb/src/old_files/rep/storage/GCIBuffer.cpp b/storage/ndb/src/old_files/rep/storage/GCIBuffer.cpp
index 013600b30a5..013600b30a5 100644
--- a/ndb/src/old_files/rep/storage/GCIBuffer.cpp
+++ b/storage/ndb/src/old_files/rep/storage/GCIBuffer.cpp
diff --git a/ndb/src/old_files/rep/storage/GCIBuffer.hpp b/storage/ndb/src/old_files/rep/storage/GCIBuffer.hpp
index 8a8473d1d49..8a8473d1d49 100644
--- a/ndb/src/old_files/rep/storage/GCIBuffer.hpp
+++ b/storage/ndb/src/old_files/rep/storage/GCIBuffer.hpp
diff --git a/ndb/src/old_files/rep/storage/GCIContainer.cpp b/storage/ndb/src/old_files/rep/storage/GCIContainer.cpp
index c161db0769b..c161db0769b 100644
--- a/ndb/src/old_files/rep/storage/GCIContainer.cpp
+++ b/storage/ndb/src/old_files/rep/storage/GCIContainer.cpp
diff --git a/ndb/src/old_files/rep/storage/GCIContainer.hpp b/storage/ndb/src/old_files/rep/storage/GCIContainer.hpp
index 48cbc66bfbd..48cbc66bfbd 100644
--- a/ndb/src/old_files/rep/storage/GCIContainer.hpp
+++ b/storage/ndb/src/old_files/rep/storage/GCIContainer.hpp
diff --git a/ndb/src/old_files/rep/storage/GCIContainerPS.cpp b/storage/ndb/src/old_files/rep/storage/GCIContainerPS.cpp
index 5adb53f965c..5adb53f965c 100644
--- a/ndb/src/old_files/rep/storage/GCIContainerPS.cpp
+++ b/storage/ndb/src/old_files/rep/storage/GCIContainerPS.cpp
diff --git a/ndb/src/old_files/rep/storage/GCIContainerPS.hpp b/storage/ndb/src/old_files/rep/storage/GCIContainerPS.hpp
index 7f5aaac4840..7f5aaac4840 100644
--- a/ndb/src/old_files/rep/storage/GCIContainerPS.hpp
+++ b/storage/ndb/src/old_files/rep/storage/GCIContainerPS.hpp
diff --git a/ndb/src/old_files/rep/storage/GCIPage.cpp b/storage/ndb/src/old_files/rep/storage/GCIPage.cpp
index 05ecde2fee1..05ecde2fee1 100644
--- a/ndb/src/old_files/rep/storage/GCIPage.cpp
+++ b/storage/ndb/src/old_files/rep/storage/GCIPage.cpp
diff --git a/ndb/src/old_files/rep/storage/GCIPage.hpp b/storage/ndb/src/old_files/rep/storage/GCIPage.hpp
index 50c5ab0cfba..50c5ab0cfba 100644
--- a/ndb/src/old_files/rep/storage/GCIPage.hpp
+++ b/storage/ndb/src/old_files/rep/storage/GCIPage.hpp
diff --git a/ndb/src/old_files/rep/storage/LogRecord.hpp b/storage/ndb/src/old_files/rep/storage/LogRecord.hpp
index a0bf3d52372..a0bf3d52372 100644
--- a/ndb/src/old_files/rep/storage/LogRecord.hpp
+++ b/storage/ndb/src/old_files/rep/storage/LogRecord.hpp
diff --git a/ndb/src/old_files/rep/storage/Makefile b/storage/ndb/src/old_files/rep/storage/Makefile
index 89b3af455e8..89b3af455e8 100644
--- a/ndb/src/old_files/rep/storage/Makefile
+++ b/storage/ndb/src/old_files/rep/storage/Makefile
diff --git a/ndb/src/old_files/rep/storage/NodeConnectInfo.hpp b/storage/ndb/src/old_files/rep/storage/NodeConnectInfo.hpp
index 403f92a5999..403f92a5999 100644
--- a/ndb/src/old_files/rep/storage/NodeConnectInfo.hpp
+++ b/storage/ndb/src/old_files/rep/storage/NodeConnectInfo.hpp
diff --git a/ndb/src/old_files/rep/storage/NodeGroup.cpp b/storage/ndb/src/old_files/rep/storage/NodeGroup.cpp
index 33451efb104..33451efb104 100644
--- a/ndb/src/old_files/rep/storage/NodeGroup.cpp
+++ b/storage/ndb/src/old_files/rep/storage/NodeGroup.cpp
diff --git a/ndb/src/old_files/rep/storage/NodeGroup.hpp b/storage/ndb/src/old_files/rep/storage/NodeGroup.hpp
index 1f515e02a23..1f515e02a23 100644
--- a/ndb/src/old_files/rep/storage/NodeGroup.hpp
+++ b/storage/ndb/src/old_files/rep/storage/NodeGroup.hpp
diff --git a/ndb/src/old_files/rep/storage/NodeGroupInfo.cpp b/storage/ndb/src/old_files/rep/storage/NodeGroupInfo.cpp
index 8c250268997..8c250268997 100644
--- a/ndb/src/old_files/rep/storage/NodeGroupInfo.cpp
+++ b/storage/ndb/src/old_files/rep/storage/NodeGroupInfo.cpp
diff --git a/ndb/src/old_files/rep/storage/NodeGroupInfo.hpp b/storage/ndb/src/old_files/rep/storage/NodeGroupInfo.hpp
index 3d0499d4425..3d0499d4425 100644
--- a/ndb/src/old_files/rep/storage/NodeGroupInfo.hpp
+++ b/storage/ndb/src/old_files/rep/storage/NodeGroupInfo.hpp
diff --git a/ndb/src/old_files/rep/transfer/Makefile b/storage/ndb/src/old_files/rep/transfer/Makefile
index 0d8851e287a..0d8851e287a 100644
--- a/ndb/src/old_files/rep/transfer/Makefile
+++ b/storage/ndb/src/old_files/rep/transfer/Makefile
diff --git a/ndb/src/old_files/rep/transfer/TransPS.cpp b/storage/ndb/src/old_files/rep/transfer/TransPS.cpp
index 11fb0203cbc..11fb0203cbc 100644
--- a/ndb/src/old_files/rep/transfer/TransPS.cpp
+++ b/storage/ndb/src/old_files/rep/transfer/TransPS.cpp
diff --git a/ndb/src/old_files/rep/transfer/TransPS.hpp b/storage/ndb/src/old_files/rep/transfer/TransPS.hpp
index 0464b9e47c0..0464b9e47c0 100644
--- a/ndb/src/old_files/rep/transfer/TransPS.hpp
+++ b/storage/ndb/src/old_files/rep/transfer/TransPS.hpp
diff --git a/ndb/src/old_files/rep/transfer/TransSS.cpp b/storage/ndb/src/old_files/rep/transfer/TransSS.cpp
index 376c6375bc4..376c6375bc4 100644
--- a/ndb/src/old_files/rep/transfer/TransSS.cpp
+++ b/storage/ndb/src/old_files/rep/transfer/TransSS.cpp
diff --git a/ndb/src/old_files/rep/transfer/TransSS.hpp b/storage/ndb/src/old_files/rep/transfer/TransSS.hpp
index 3340038c8d1..3340038c8d1 100644
--- a/ndb/src/old_files/rep/transfer/TransSS.hpp
+++ b/storage/ndb/src/old_files/rep/transfer/TransSS.hpp
diff --git a/ndb/src/old_files/rep/transfer/TransSSSubscriptions.cpp b/storage/ndb/src/old_files/rep/transfer/TransSSSubscriptions.cpp
index 582ba8040a6..582ba8040a6 100644
--- a/ndb/src/old_files/rep/transfer/TransSSSubscriptions.cpp
+++ b/storage/ndb/src/old_files/rep/transfer/TransSSSubscriptions.cpp
diff --git a/ndb/test/Makefile.am b/storage/ndb/test/Makefile.am
index b8753668c60..b8753668c60 100644
--- a/ndb/test/Makefile.am
+++ b/storage/ndb/test/Makefile.am
diff --git a/ndb/test/include/CpcClient.hpp b/storage/ndb/test/include/CpcClient.hpp
index 8d8e079d219..8d8e079d219 100644
--- a/ndb/test/include/CpcClient.hpp
+++ b/storage/ndb/test/include/CpcClient.hpp
diff --git a/ndb/test/include/HugoAsynchTransactions.hpp b/storage/ndb/test/include/HugoAsynchTransactions.hpp
index d7e6e8fc187..d7e6e8fc187 100644
--- a/ndb/test/include/HugoAsynchTransactions.hpp
+++ b/storage/ndb/test/include/HugoAsynchTransactions.hpp
diff --git a/ndb/test/include/HugoCalculator.hpp b/storage/ndb/test/include/HugoCalculator.hpp
index 03de46cd7ea..03de46cd7ea 100644
--- a/ndb/test/include/HugoCalculator.hpp
+++ b/storage/ndb/test/include/HugoCalculator.hpp
diff --git a/ndb/test/include/HugoOperations.hpp b/storage/ndb/test/include/HugoOperations.hpp
index 05137710609..05137710609 100644
--- a/ndb/test/include/HugoOperations.hpp
+++ b/storage/ndb/test/include/HugoOperations.hpp
diff --git a/ndb/test/include/HugoTransactions.hpp b/storage/ndb/test/include/HugoTransactions.hpp
index 5795bbc94c9..5795bbc94c9 100644
--- a/ndb/test/include/HugoTransactions.hpp
+++ b/storage/ndb/test/include/HugoTransactions.hpp
diff --git a/ndb/test/include/NDBT.hpp b/storage/ndb/test/include/NDBT.hpp
index 657a9cb03b6..657a9cb03b6 100644
--- a/ndb/test/include/NDBT.hpp
+++ b/storage/ndb/test/include/NDBT.hpp
diff --git a/ndb/test/include/NDBT_DataSet.hpp b/storage/ndb/test/include/NDBT_DataSet.hpp
index 1a0122f617c..1a0122f617c 100644
--- a/ndb/test/include/NDBT_DataSet.hpp
+++ b/storage/ndb/test/include/NDBT_DataSet.hpp
diff --git a/ndb/test/include/NDBT_DataSetTransaction.hpp b/storage/ndb/test/include/NDBT_DataSetTransaction.hpp
index 9f250c566dd..9f250c566dd 100644
--- a/ndb/test/include/NDBT_DataSetTransaction.hpp
+++ b/storage/ndb/test/include/NDBT_DataSetTransaction.hpp
diff --git a/ndb/test/include/NDBT_Error.hpp b/storage/ndb/test/include/NDBT_Error.hpp
index 6775a107196..6775a107196 100644
--- a/ndb/test/include/NDBT_Error.hpp
+++ b/storage/ndb/test/include/NDBT_Error.hpp
diff --git a/ndb/test/include/NDBT_Output.hpp b/storage/ndb/test/include/NDBT_Output.hpp
index aaa619ac479..aaa619ac479 100644
--- a/ndb/test/include/NDBT_Output.hpp
+++ b/storage/ndb/test/include/NDBT_Output.hpp
diff --git a/ndb/test/include/NDBT_ResultRow.hpp b/storage/ndb/test/include/NDBT_ResultRow.hpp
index cbb5d7f6c6a..cbb5d7f6c6a 100644
--- a/ndb/test/include/NDBT_ResultRow.hpp
+++ b/storage/ndb/test/include/NDBT_ResultRow.hpp
diff --git a/ndb/test/include/NDBT_ReturnCodes.h b/storage/ndb/test/include/NDBT_ReturnCodes.h
index 0bc71ad8ceb..0bc71ad8ceb 100644
--- a/ndb/test/include/NDBT_ReturnCodes.h
+++ b/storage/ndb/test/include/NDBT_ReturnCodes.h
diff --git a/ndb/test/include/NDBT_Stats.hpp b/storage/ndb/test/include/NDBT_Stats.hpp
index 28212bdba17..28212bdba17 100644
--- a/ndb/test/include/NDBT_Stats.hpp
+++ b/storage/ndb/test/include/NDBT_Stats.hpp
diff --git a/ndb/test/include/NDBT_Table.hpp b/storage/ndb/test/include/NDBT_Table.hpp
index d2f99b85187..d2f99b85187 100644
--- a/ndb/test/include/NDBT_Table.hpp
+++ b/storage/ndb/test/include/NDBT_Table.hpp
diff --git a/ndb/test/include/NDBT_Tables.hpp b/storage/ndb/test/include/NDBT_Tables.hpp
index fb0df8aa35b..fb0df8aa35b 100644
--- a/ndb/test/include/NDBT_Tables.hpp
+++ b/storage/ndb/test/include/NDBT_Tables.hpp
diff --git a/ndb/test/include/NDBT_Test.hpp b/storage/ndb/test/include/NDBT_Test.hpp
index 1b9c2751f64..1b9c2751f64 100644
--- a/ndb/test/include/NDBT_Test.hpp
+++ b/storage/ndb/test/include/NDBT_Test.hpp
diff --git a/ndb/test/include/NdbBackup.hpp b/storage/ndb/test/include/NdbBackup.hpp
index e2e672b8a72..e2e672b8a72 100644
--- a/ndb/test/include/NdbBackup.hpp
+++ b/storage/ndb/test/include/NdbBackup.hpp
diff --git a/ndb/test/include/NdbConfig.hpp b/storage/ndb/test/include/NdbConfig.hpp
index 19439fafbb2..19439fafbb2 100644
--- a/ndb/test/include/NdbConfig.hpp
+++ b/storage/ndb/test/include/NdbConfig.hpp
diff --git a/ndb/test/include/NdbGrep.hpp b/storage/ndb/test/include/NdbGrep.hpp
index 31c49d1e4da..31c49d1e4da 100644
--- a/ndb/test/include/NdbGrep.hpp
+++ b/storage/ndb/test/include/NdbGrep.hpp
diff --git a/ndb/test/include/NdbRestarter.hpp b/storage/ndb/test/include/NdbRestarter.hpp
index 19a88b4f8ad..19a88b4f8ad 100644
--- a/ndb/test/include/NdbRestarter.hpp
+++ b/storage/ndb/test/include/NdbRestarter.hpp
diff --git a/ndb/test/include/NdbRestarts.hpp b/storage/ndb/test/include/NdbRestarts.hpp
index aabcd7b9975..aabcd7b9975 100644
--- a/ndb/test/include/NdbRestarts.hpp
+++ b/storage/ndb/test/include/NdbRestarts.hpp
diff --git a/ndb/test/include/NdbSchemaCon.hpp b/storage/ndb/test/include/NdbSchemaCon.hpp
index 313daf0094b..313daf0094b 100644
--- a/ndb/test/include/NdbSchemaCon.hpp
+++ b/storage/ndb/test/include/NdbSchemaCon.hpp
diff --git a/ndb/test/include/NdbSchemaOp.hpp b/storage/ndb/test/include/NdbSchemaOp.hpp
index 1edbc155643..1edbc155643 100644
--- a/ndb/test/include/NdbSchemaOp.hpp
+++ b/storage/ndb/test/include/NdbSchemaOp.hpp
diff --git a/ndb/test/include/NdbTest.hpp b/storage/ndb/test/include/NdbTest.hpp
index a2e612b7ffa..a2e612b7ffa 100644
--- a/ndb/test/include/NdbTest.hpp
+++ b/storage/ndb/test/include/NdbTest.hpp
diff --git a/ndb/test/include/NdbTimer.hpp b/storage/ndb/test/include/NdbTimer.hpp
index b0d500b5c2c..b0d500b5c2c 100644
--- a/ndb/test/include/NdbTimer.hpp
+++ b/storage/ndb/test/include/NdbTimer.hpp
diff --git a/ndb/test/include/TestNdbEventOperation.hpp b/storage/ndb/test/include/TestNdbEventOperation.hpp
index 307b0e0089b..307b0e0089b 100644
--- a/ndb/test/include/TestNdbEventOperation.hpp
+++ b/storage/ndb/test/include/TestNdbEventOperation.hpp
diff --git a/ndb/test/include/UtilTransactions.hpp b/storage/ndb/test/include/UtilTransactions.hpp
index afdbc5c3445..afdbc5c3445 100644
--- a/ndb/test/include/UtilTransactions.hpp
+++ b/storage/ndb/test/include/UtilTransactions.hpp
diff --git a/ndb/test/include/getarg.h b/storage/ndb/test/include/getarg.h
index 03ed25f6828..03ed25f6828 100644
--- a/ndb/test/include/getarg.h
+++ b/storage/ndb/test/include/getarg.h
diff --git a/ndb/test/ndbapi/InsertRecs.cpp b/storage/ndb/test/ndbapi/InsertRecs.cpp
index f42786d666d..f42786d666d 100644
--- a/ndb/test/ndbapi/InsertRecs.cpp
+++ b/storage/ndb/test/ndbapi/InsertRecs.cpp
diff --git a/storage/ndb/test/ndbapi/Makefile.am b/storage/ndb/test/ndbapi/Makefile.am
new file mode 100644
index 00000000000..f096a7c74e9
--- /dev/null
+++ b/storage/ndb/test/ndbapi/Makefile.am
@@ -0,0 +1,159 @@
+
+SUBDIRS = bank
+
+ndbtest_PROGRAMS = \
+flexBench \
+drop_all_tabs \
+create_all_tabs \
+create_tab \
+flexAsynch \
+flexBench \
+flexHammer \
+flexTT \
+testBackup \
+testBasic \
+testBasicAsynch \
+testBlobs \
+testDataBuffers \
+testDict \
+testIndex \
+testMgm \
+testNdbApi \
+testNodeRestart \
+testOIBasic \
+testOperations \
+testRestartGci \
+testScan \
+testScanInterpreter \
+testScanPerf \
+testSystemRestart \
+testTimeout \
+testTransactions \
+testDeadlock \
+test_event ndbapi_slow_select testReadPerf testLcp \
+testPartitioning \
+testBitfield \
+DbCreate DbAsyncGenerator \
+test_event_multi_table
+
+#flexTimedAsynch
+#testBlobs
+#flex_bench_mysql
+
+create_all_tabs_SOURCES = create_all_tabs.cpp
+create_tab_SOURCES = create_tab.cpp
+drop_all_tabs_SOURCES = drop_all_tabs.cpp
+flexAsynch_SOURCES = flexAsynch.cpp
+flexBench_SOURCES = flexBench.cpp
+flexHammer_SOURCES = flexHammer.cpp
+flexTT_SOURCES = flexTT.cpp
+#flexTimedAsynch_SOURCES = flexTimedAsynch.cpp
+#flex_bench_mysql_SOURCES = flex_bench_mysql.cpp
+testBackup_SOURCES = testBackup.cpp
+testBasic_SOURCES = testBasic.cpp
+testBasicAsynch_SOURCES = testBasicAsynch.cpp
+testBlobs_SOURCES = testBlobs.cpp
+testDataBuffers_SOURCES = testDataBuffers.cpp
+testDict_SOURCES = testDict.cpp
+testIndex_SOURCES = testIndex.cpp
+testMgm_SOURCES = testMgm.cpp
+testNdbApi_SOURCES = testNdbApi.cpp
+testNodeRestart_SOURCES = testNodeRestart.cpp
+testOIBasic_SOURCES = testOIBasic.cpp
+testOperations_SOURCES = testOperations.cpp
+testRestartGci_SOURCES = testRestartGci.cpp
+testScan_SOURCES = testScan.cpp ScanFunctions.hpp
+testScanInterpreter_SOURCES = testScanInterpreter.cpp ScanFilter.hpp ScanInterpretTest.hpp
+testScanPerf_SOURCES = testScanPerf.cpp
+testSystemRestart_SOURCES = testSystemRestart.cpp
+testTimeout_SOURCES = testTimeout.cpp
+testTransactions_SOURCES = testTransactions.cpp
+testDeadlock_SOURCES = testDeadlock.cpp
+test_event_SOURCES = test_event.cpp
+ndbapi_slow_select_SOURCES = slow_select.cpp
+testReadPerf_SOURCES = testReadPerf.cpp
+testLcp_SOURCES = testLcp.cpp
+testPartitioning_SOURCES = testPartitioning.cpp
+testBitfield_SOURCES = testBitfield.cpp
+DbCreate_SOURCES = bench/mainPopulate.cpp bench/dbPopulate.cpp bench/userInterface.cpp bench/dbPopulate.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp
+DbAsyncGenerator_SOURCES = bench/mainAsyncGenerator.cpp bench/asyncGenerator.cpp bench/ndb_async2.cpp bench/dbGenerator.h bench/macros.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp
+test_event_multi_table_SOURCES = test_event_multi_table.cpp
+
+INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/include/kernel
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am
+
+##testDict_INCLUDES = $(INCLUDES) -I$(top_srcdir)/ndb/include/kernel
+##testIndex_INCLUDES = $(INCLUDES) -I$(top_srcdir)/ndb/include/kernel
+##testSystemRestart_INCLUDES = $(INCLUDES) -I$(top_srcdir)/ndb/include/kernel
+##testTransactions_INCLUDES = $(INCLUDES) -I$(top_srcdir)/ndb/include/kernel
+testBackup_LDADD = $(LDADD) bank/libbank.a
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+
+
+windoze-dsp: flexBench.dsp testBasic.dsp testBlobs.dsp \
+ testScan.dsp
+
+flexBench.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ flexBench
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(flexBench_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
+
+testBasic.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ testBasic
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(testBasic_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
+
+testOIBasic.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ testOIBasic
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(testOIBasic_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
+
+testBlobs.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ testBlobs
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(testBlobs_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
+
+testScan.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ testScan
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(testScan_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
diff --git a/ndb/test/ndbapi/ScanFilter.hpp b/storage/ndb/test/ndbapi/ScanFilter.hpp
index 09786756798..09786756798 100644
--- a/ndb/test/ndbapi/ScanFilter.hpp
+++ b/storage/ndb/test/ndbapi/ScanFilter.hpp
diff --git a/ndb/test/ndbapi/ScanFunctions.hpp b/storage/ndb/test/ndbapi/ScanFunctions.hpp
index 37389d9b7de..37389d9b7de 100644
--- a/ndb/test/ndbapi/ScanFunctions.hpp
+++ b/storage/ndb/test/ndbapi/ScanFunctions.hpp
diff --git a/ndb/test/ndbapi/ScanInterpretTest.hpp b/storage/ndb/test/ndbapi/ScanInterpretTest.hpp
index d4e9bbecc81..d4e9bbecc81 100644
--- a/ndb/test/ndbapi/ScanInterpretTest.hpp
+++ b/storage/ndb/test/ndbapi/ScanInterpretTest.hpp
diff --git a/ndb/test/ndbapi/TraceNdbApi.cpp b/storage/ndb/test/ndbapi/TraceNdbApi.cpp
index bd43b15f2e6..bd43b15f2e6 100644
--- a/ndb/test/ndbapi/TraceNdbApi.cpp
+++ b/storage/ndb/test/ndbapi/TraceNdbApi.cpp
diff --git a/ndb/test/ndbapi/VerifyNdbApi.cpp b/storage/ndb/test/ndbapi/VerifyNdbApi.cpp
index 79645827e2c..79645827e2c 100644
--- a/ndb/test/ndbapi/VerifyNdbApi.cpp
+++ b/storage/ndb/test/ndbapi/VerifyNdbApi.cpp
diff --git a/ndb/test/ndbapi/acid.cpp b/storage/ndb/test/ndbapi/acid.cpp
index 3eb1625be26..3eb1625be26 100644
--- a/ndb/test/ndbapi/acid.cpp
+++ b/storage/ndb/test/ndbapi/acid.cpp
diff --git a/ndb/test/ndbapi/acid2.cpp b/storage/ndb/test/ndbapi/acid2.cpp
index 7bd7ec00ac5..7bd7ec00ac5 100644
--- a/ndb/test/ndbapi/acid2.cpp
+++ b/storage/ndb/test/ndbapi/acid2.cpp
diff --git a/ndb/test/ndbapi/adoInsertRecs.cpp b/storage/ndb/test/ndbapi/adoInsertRecs.cpp
index 0bc67ef641b..0bc67ef641b 100644
--- a/ndb/test/ndbapi/adoInsertRecs.cpp
+++ b/storage/ndb/test/ndbapi/adoInsertRecs.cpp
diff --git a/ndb/test/ndbapi/asyncGenerator.cpp b/storage/ndb/test/ndbapi/asyncGenerator.cpp
index d91e38dff1a..d91e38dff1a 100644
--- a/ndb/test/ndbapi/asyncGenerator.cpp
+++ b/storage/ndb/test/ndbapi/asyncGenerator.cpp
diff --git a/ndb/test/ndbapi/bank/Bank.cpp b/storage/ndb/test/ndbapi/bank/Bank.cpp
index 40819ecc849..40819ecc849 100644
--- a/ndb/test/ndbapi/bank/Bank.cpp
+++ b/storage/ndb/test/ndbapi/bank/Bank.cpp
diff --git a/ndb/test/ndbapi/bank/Bank.hpp b/storage/ndb/test/ndbapi/bank/Bank.hpp
index d9dd7b25944..d9dd7b25944 100644
--- a/ndb/test/ndbapi/bank/Bank.hpp
+++ b/storage/ndb/test/ndbapi/bank/Bank.hpp
diff --git a/ndb/test/ndbapi/bank/BankLoad.cpp b/storage/ndb/test/ndbapi/bank/BankLoad.cpp
index 34947019a51..34947019a51 100644
--- a/ndb/test/ndbapi/bank/BankLoad.cpp
+++ b/storage/ndb/test/ndbapi/bank/BankLoad.cpp
diff --git a/storage/ndb/test/ndbapi/bank/Makefile.am b/storage/ndb/test/ndbapi/bank/Makefile.am
new file mode 100644
index 00000000000..d269ddc047a
--- /dev/null
+++ b/storage/ndb/test/ndbapi/bank/Makefile.am
@@ -0,0 +1,24 @@
+
+ndbtest_PROGRAMS = testBank bankSumAccounts bankValidateAllGLs bankMakeGL bankTransactionMaker bankCreator bankTimer
+
+noinst_LIBRARIES = libbank.a
+
+libbank_a_SOURCES = Bank.cpp BankLoad.cpp Bank.hpp
+
+testBank_SOURCES = testBank.cpp
+bankSumAccounts_SOURCES = bankSumAccounts.cpp
+bankValidateAllGLs_SOURCES = bankValidateAllGLs.cpp
+bankMakeGL_SOURCES = bankMakeGL.cpp
+bankTransactionMaker_SOURCES = bankTransactionMaker.cpp
+bankCreator_SOURCES = bankCreator.cpp
+bankTimer_SOURCES = bankTimer.cpp
+
+LDADD_LOC = $(noinst_LIBRARIES)
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp:
diff --git a/ndb/test/ndbapi/bank/bankCreator.cpp b/storage/ndb/test/ndbapi/bank/bankCreator.cpp
index 257255babc8..257255babc8 100644
--- a/ndb/test/ndbapi/bank/bankCreator.cpp
+++ b/storage/ndb/test/ndbapi/bank/bankCreator.cpp
diff --git a/ndb/test/ndbapi/bank/bankMakeGL.cpp b/storage/ndb/test/ndbapi/bank/bankMakeGL.cpp
index cf373481e3e..cf373481e3e 100644
--- a/ndb/test/ndbapi/bank/bankMakeGL.cpp
+++ b/storage/ndb/test/ndbapi/bank/bankMakeGL.cpp
diff --git a/ndb/test/ndbapi/bank/bankSumAccounts.cpp b/storage/ndb/test/ndbapi/bank/bankSumAccounts.cpp
index 034f70f8f95..034f70f8f95 100644
--- a/ndb/test/ndbapi/bank/bankSumAccounts.cpp
+++ b/storage/ndb/test/ndbapi/bank/bankSumAccounts.cpp
diff --git a/ndb/test/ndbapi/bank/bankTimer.cpp b/storage/ndb/test/ndbapi/bank/bankTimer.cpp
index 298f85e1e43..298f85e1e43 100644
--- a/ndb/test/ndbapi/bank/bankTimer.cpp
+++ b/storage/ndb/test/ndbapi/bank/bankTimer.cpp
diff --git a/ndb/test/ndbapi/bank/bankTransactionMaker.cpp b/storage/ndb/test/ndbapi/bank/bankTransactionMaker.cpp
index f8e646b6553..f8e646b6553 100644
--- a/ndb/test/ndbapi/bank/bankTransactionMaker.cpp
+++ b/storage/ndb/test/ndbapi/bank/bankTransactionMaker.cpp
diff --git a/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp b/storage/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp
index 0c268121d8a..0c268121d8a 100644
--- a/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp
+++ b/storage/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp
diff --git a/ndb/test/ndbapi/bank/testBank.cpp b/storage/ndb/test/ndbapi/bank/testBank.cpp
index 6be66d528b1..6be66d528b1 100644
--- a/ndb/test/ndbapi/bank/testBank.cpp
+++ b/storage/ndb/test/ndbapi/bank/testBank.cpp
diff --git a/ndb/test/ndbapi/bench/asyncGenerator.cpp b/storage/ndb/test/ndbapi/bench/asyncGenerator.cpp
index d91e38dff1a..d91e38dff1a 100644
--- a/ndb/test/ndbapi/bench/asyncGenerator.cpp
+++ b/storage/ndb/test/ndbapi/bench/asyncGenerator.cpp
diff --git a/ndb/test/ndbapi/bench/dbGenerator.h b/storage/ndb/test/ndbapi/bench/dbGenerator.h
index 2256498e151..2256498e151 100644
--- a/ndb/test/ndbapi/bench/dbGenerator.h
+++ b/storage/ndb/test/ndbapi/bench/dbGenerator.h
diff --git a/ndb/test/ndbapi/bench/dbPopulate.cpp b/storage/ndb/test/ndbapi/bench/dbPopulate.cpp
index 42fbb52f3b2..42fbb52f3b2 100644
--- a/ndb/test/ndbapi/bench/dbPopulate.cpp
+++ b/storage/ndb/test/ndbapi/bench/dbPopulate.cpp
diff --git a/ndb/test/ndbapi/bench/dbPopulate.h b/storage/ndb/test/ndbapi/bench/dbPopulate.h
index 1916720e141..1916720e141 100644
--- a/ndb/test/ndbapi/bench/dbPopulate.h
+++ b/storage/ndb/test/ndbapi/bench/dbPopulate.h
diff --git a/ndb/test/ndbapi/bench/macros.h b/storage/ndb/test/ndbapi/bench/macros.h
index 22b7f564490..22b7f564490 100644
--- a/ndb/test/ndbapi/bench/macros.h
+++ b/storage/ndb/test/ndbapi/bench/macros.h
diff --git a/ndb/test/ndbapi/bench/mainAsyncGenerator.cpp b/storage/ndb/test/ndbapi/bench/mainAsyncGenerator.cpp
index 828b924582f..828b924582f 100644
--- a/ndb/test/ndbapi/bench/mainAsyncGenerator.cpp
+++ b/storage/ndb/test/ndbapi/bench/mainAsyncGenerator.cpp
diff --git a/ndb/test/ndbapi/bench/mainPopulate.cpp b/storage/ndb/test/ndbapi/bench/mainPopulate.cpp
index 5ab1a5b015d..5ab1a5b015d 100644
--- a/ndb/test/ndbapi/bench/mainPopulate.cpp
+++ b/storage/ndb/test/ndbapi/bench/mainPopulate.cpp
diff --git a/ndb/test/ndbapi/bench/ndb_async1.cpp b/storage/ndb/test/ndbapi/bench/ndb_async1.cpp
index 2a84f6b2aca..2a84f6b2aca 100644
--- a/ndb/test/ndbapi/bench/ndb_async1.cpp
+++ b/storage/ndb/test/ndbapi/bench/ndb_async1.cpp
diff --git a/ndb/test/ndbapi/bench/ndb_async2.cpp b/storage/ndb/test/ndbapi/bench/ndb_async2.cpp
index 31cf1d8310a..31cf1d8310a 100644
--- a/ndb/test/ndbapi/bench/ndb_async2.cpp
+++ b/storage/ndb/test/ndbapi/bench/ndb_async2.cpp
diff --git a/ndb/test/ndbapi/bench/ndb_error.hpp b/storage/ndb/test/ndbapi/bench/ndb_error.hpp
index d90f5506813..d90f5506813 100644
--- a/ndb/test/ndbapi/bench/ndb_error.hpp
+++ b/storage/ndb/test/ndbapi/bench/ndb_error.hpp
diff --git a/ndb/test/ndbapi/bench/ndb_schema.hpp b/storage/ndb/test/ndbapi/bench/ndb_schema.hpp
index af08bc2eecd..af08bc2eecd 100644
--- a/ndb/test/ndbapi/bench/ndb_schema.hpp
+++ b/storage/ndb/test/ndbapi/bench/ndb_schema.hpp
diff --git a/ndb/test/ndbapi/bench/ndb_user_transaction.cpp b/storage/ndb/test/ndbapi/bench/ndb_user_transaction.cpp
index 182f1f99586..182f1f99586 100644
--- a/ndb/test/ndbapi/bench/ndb_user_transaction.cpp
+++ b/storage/ndb/test/ndbapi/bench/ndb_user_transaction.cpp
diff --git a/ndb/test/ndbapi/bench/ndb_user_transaction2.cpp b/storage/ndb/test/ndbapi/bench/ndb_user_transaction2.cpp
index df3c7a7989e..df3c7a7989e 100644
--- a/ndb/test/ndbapi/bench/ndb_user_transaction2.cpp
+++ b/storage/ndb/test/ndbapi/bench/ndb_user_transaction2.cpp
diff --git a/ndb/test/ndbapi/bench/ndb_user_transaction3.cpp b/storage/ndb/test/ndbapi/bench/ndb_user_transaction3.cpp
index d2c92ecd424..d2c92ecd424 100644
--- a/ndb/test/ndbapi/bench/ndb_user_transaction3.cpp
+++ b/storage/ndb/test/ndbapi/bench/ndb_user_transaction3.cpp
diff --git a/ndb/test/ndbapi/bench/ndb_user_transaction4.cpp b/storage/ndb/test/ndbapi/bench/ndb_user_transaction4.cpp
index e652c7bfed8..e652c7bfed8 100644
--- a/ndb/test/ndbapi/bench/ndb_user_transaction4.cpp
+++ b/storage/ndb/test/ndbapi/bench/ndb_user_transaction4.cpp
diff --git a/ndb/test/ndbapi/bench/ndb_user_transaction5.cpp b/storage/ndb/test/ndbapi/bench/ndb_user_transaction5.cpp
index 86580008d10..86580008d10 100644
--- a/ndb/test/ndbapi/bench/ndb_user_transaction5.cpp
+++ b/storage/ndb/test/ndbapi/bench/ndb_user_transaction5.cpp
diff --git a/ndb/test/ndbapi/bench/ndb_user_transaction6.cpp b/storage/ndb/test/ndbapi/bench/ndb_user_transaction6.cpp
index 262f38e9ffb..262f38e9ffb 100644
--- a/ndb/test/ndbapi/bench/ndb_user_transaction6.cpp
+++ b/storage/ndb/test/ndbapi/bench/ndb_user_transaction6.cpp
diff --git a/ndb/test/ndbapi/bench/testData.h b/storage/ndb/test/ndbapi/bench/testData.h
index 3db85e7342e..3db85e7342e 100644
--- a/ndb/test/ndbapi/bench/testData.h
+++ b/storage/ndb/test/ndbapi/bench/testData.h
diff --git a/ndb/test/ndbapi/bench/testDefinitions.h b/storage/ndb/test/ndbapi/bench/testDefinitions.h
index 2f4aeb30975..2f4aeb30975 100644
--- a/ndb/test/ndbapi/bench/testDefinitions.h
+++ b/storage/ndb/test/ndbapi/bench/testDefinitions.h
diff --git a/ndb/test/ndbapi/bench/userInterface.cpp b/storage/ndb/test/ndbapi/bench/userInterface.cpp
index 35e88183230..35e88183230 100644
--- a/ndb/test/ndbapi/bench/userInterface.cpp
+++ b/storage/ndb/test/ndbapi/bench/userInterface.cpp
diff --git a/ndb/test/ndbapi/bench/userInterface.h b/storage/ndb/test/ndbapi/bench/userInterface.h
index bad61fcf171..bad61fcf171 100644
--- a/ndb/test/ndbapi/bench/userInterface.h
+++ b/storage/ndb/test/ndbapi/bench/userInterface.h
diff --git a/ndb/test/ndbapi/benchronja.cpp b/storage/ndb/test/ndbapi/benchronja.cpp
index a7523e8e416..a7523e8e416 100644
--- a/ndb/test/ndbapi/benchronja.cpp
+++ b/storage/ndb/test/ndbapi/benchronja.cpp
diff --git a/ndb/test/ndbapi/bulk_copy.cpp b/storage/ndb/test/ndbapi/bulk_copy.cpp
index b53654ce0fb..b53654ce0fb 100644
--- a/ndb/test/ndbapi/bulk_copy.cpp
+++ b/storage/ndb/test/ndbapi/bulk_copy.cpp
diff --git a/ndb/test/ndbapi/cdrserver.cpp b/storage/ndb/test/ndbapi/cdrserver.cpp
index 976319034bf..976319034bf 100644
--- a/ndb/test/ndbapi/cdrserver.cpp
+++ b/storage/ndb/test/ndbapi/cdrserver.cpp
diff --git a/ndb/test/ndbapi/celloDb.cpp b/storage/ndb/test/ndbapi/celloDb.cpp
index 2d6401c355a..2d6401c355a 100644
--- a/ndb/test/ndbapi/celloDb.cpp
+++ b/storage/ndb/test/ndbapi/celloDb.cpp
diff --git a/ndb/test/ndbapi/create_all_tabs.cpp b/storage/ndb/test/ndbapi/create_all_tabs.cpp
index f06078d67a2..f06078d67a2 100644
--- a/ndb/test/ndbapi/create_all_tabs.cpp
+++ b/storage/ndb/test/ndbapi/create_all_tabs.cpp
diff --git a/ndb/test/ndbapi/create_tab.cpp b/storage/ndb/test/ndbapi/create_tab.cpp
index b35c8655236..b35c8655236 100644
--- a/ndb/test/ndbapi/create_tab.cpp
+++ b/storage/ndb/test/ndbapi/create_tab.cpp
diff --git a/ndb/test/ndbapi/drop_all_tabs.cpp b/storage/ndb/test/ndbapi/drop_all_tabs.cpp
index f12d750916e..f12d750916e 100644
--- a/ndb/test/ndbapi/drop_all_tabs.cpp
+++ b/storage/ndb/test/ndbapi/drop_all_tabs.cpp
diff --git a/ndb/test/ndbapi/flexAsynch.cpp b/storage/ndb/test/ndbapi/flexAsynch.cpp
index 8a7dbec1561..8a7dbec1561 100644
--- a/ndb/test/ndbapi/flexAsynch.cpp
+++ b/storage/ndb/test/ndbapi/flexAsynch.cpp
diff --git a/ndb/test/ndbapi/flexBench.cpp b/storage/ndb/test/ndbapi/flexBench.cpp
index abddecfdc40..abddecfdc40 100644
--- a/ndb/test/ndbapi/flexBench.cpp
+++ b/storage/ndb/test/ndbapi/flexBench.cpp
diff --git a/ndb/test/ndbapi/flexHammer.cpp b/storage/ndb/test/ndbapi/flexHammer.cpp
index f254b1e5ccf..f254b1e5ccf 100644
--- a/ndb/test/ndbapi/flexHammer.cpp
+++ b/storage/ndb/test/ndbapi/flexHammer.cpp
diff --git a/ndb/test/ndbapi/flexScan.cpp b/storage/ndb/test/ndbapi/flexScan.cpp
index 4d2c85d6955..4d2c85d6955 100644
--- a/ndb/test/ndbapi/flexScan.cpp
+++ b/storage/ndb/test/ndbapi/flexScan.cpp
diff --git a/ndb/test/ndbapi/flexTT.cpp b/storage/ndb/test/ndbapi/flexTT.cpp
index 7cd5ac8e3b4..7cd5ac8e3b4 100644
--- a/ndb/test/ndbapi/flexTT.cpp
+++ b/storage/ndb/test/ndbapi/flexTT.cpp
diff --git a/ndb/test/ndbapi/flexTimedAsynch.cpp b/storage/ndb/test/ndbapi/flexTimedAsynch.cpp
index 2b8c0bdd5f8..2b8c0bdd5f8 100644
--- a/ndb/test/ndbapi/flexTimedAsynch.cpp
+++ b/storage/ndb/test/ndbapi/flexTimedAsynch.cpp
diff --git a/ndb/test/ndbapi/flex_bench_mysql.cpp b/storage/ndb/test/ndbapi/flex_bench_mysql.cpp
index 3efb7ee2094..3efb7ee2094 100644
--- a/ndb/test/ndbapi/flex_bench_mysql.cpp
+++ b/storage/ndb/test/ndbapi/flex_bench_mysql.cpp
diff --git a/ndb/test/ndbapi/index.cpp b/storage/ndb/test/ndbapi/index.cpp
index c22da594164..c22da594164 100644
--- a/ndb/test/ndbapi/index.cpp
+++ b/storage/ndb/test/ndbapi/index.cpp
diff --git a/ndb/test/ndbapi/index2.cpp b/storage/ndb/test/ndbapi/index2.cpp
index f739468d7df..f739468d7df 100644
--- a/ndb/test/ndbapi/index2.cpp
+++ b/storage/ndb/test/ndbapi/index2.cpp
diff --git a/ndb/test/ndbapi/initronja.cpp b/storage/ndb/test/ndbapi/initronja.cpp
index 3ce274e4319..3ce274e4319 100644
--- a/ndb/test/ndbapi/initronja.cpp
+++ b/storage/ndb/test/ndbapi/initronja.cpp
diff --git a/ndb/test/ndbapi/interpreterInTup.cpp b/storage/ndb/test/ndbapi/interpreterInTup.cpp
index a07d5898213..a07d5898213 100644
--- a/ndb/test/ndbapi/interpreterInTup.cpp
+++ b/storage/ndb/test/ndbapi/interpreterInTup.cpp
diff --git a/ndb/test/ndbapi/mainAsyncGenerator.cpp b/storage/ndb/test/ndbapi/mainAsyncGenerator.cpp
index 73a8b98ab57..73a8b98ab57 100644
--- a/ndb/test/ndbapi/mainAsyncGenerator.cpp
+++ b/storage/ndb/test/ndbapi/mainAsyncGenerator.cpp
diff --git a/ndb/test/ndbapi/msa.cpp b/storage/ndb/test/ndbapi/msa.cpp
index e39f7a8c64a..e39f7a8c64a 100644
--- a/ndb/test/ndbapi/msa.cpp
+++ b/storage/ndb/test/ndbapi/msa.cpp
diff --git a/ndb/test/ndbapi/ndb_async1.cpp b/storage/ndb/test/ndbapi/ndb_async1.cpp
index 2a84f6b2aca..2a84f6b2aca 100644
--- a/ndb/test/ndbapi/ndb_async1.cpp
+++ b/storage/ndb/test/ndbapi/ndb_async1.cpp
diff --git a/ndb/test/ndbapi/ndb_async2.cpp b/storage/ndb/test/ndbapi/ndb_async2.cpp
index 0c1d138defb..0c1d138defb 100644
--- a/ndb/test/ndbapi/ndb_async2.cpp
+++ b/storage/ndb/test/ndbapi/ndb_async2.cpp
diff --git a/ndb/test/ndbapi/ndb_user_populate.cpp b/storage/ndb/test/ndbapi/ndb_user_populate.cpp
index ce3a76cdd59..ce3a76cdd59 100644
--- a/ndb/test/ndbapi/ndb_user_populate.cpp
+++ b/storage/ndb/test/ndbapi/ndb_user_populate.cpp
diff --git a/ndb/test/ndbapi/ndb_user_transaction.cpp b/storage/ndb/test/ndbapi/ndb_user_transaction.cpp
index 182f1f99586..182f1f99586 100644
--- a/ndb/test/ndbapi/ndb_user_transaction.cpp
+++ b/storage/ndb/test/ndbapi/ndb_user_transaction.cpp
diff --git a/ndb/test/ndbapi/ndb_user_transaction2.cpp b/storage/ndb/test/ndbapi/ndb_user_transaction2.cpp
index df3c7a7989e..df3c7a7989e 100644
--- a/ndb/test/ndbapi/ndb_user_transaction2.cpp
+++ b/storage/ndb/test/ndbapi/ndb_user_transaction2.cpp
diff --git a/ndb/test/ndbapi/ndb_user_transaction3.cpp b/storage/ndb/test/ndbapi/ndb_user_transaction3.cpp
index d2c92ecd424..d2c92ecd424 100644
--- a/ndb/test/ndbapi/ndb_user_transaction3.cpp
+++ b/storage/ndb/test/ndbapi/ndb_user_transaction3.cpp
diff --git a/ndb/test/ndbapi/ndb_user_transaction4.cpp b/storage/ndb/test/ndbapi/ndb_user_transaction4.cpp
index e652c7bfed8..e652c7bfed8 100644
--- a/ndb/test/ndbapi/ndb_user_transaction4.cpp
+++ b/storage/ndb/test/ndbapi/ndb_user_transaction4.cpp
diff --git a/ndb/test/ndbapi/ndb_user_transaction5.cpp b/storage/ndb/test/ndbapi/ndb_user_transaction5.cpp
index 86580008d10..86580008d10 100644
--- a/ndb/test/ndbapi/ndb_user_transaction5.cpp
+++ b/storage/ndb/test/ndbapi/ndb_user_transaction5.cpp
diff --git a/ndb/test/ndbapi/ndb_user_transaction6.cpp b/storage/ndb/test/ndbapi/ndb_user_transaction6.cpp
index 262f38e9ffb..262f38e9ffb 100644
--- a/ndb/test/ndbapi/ndb_user_transaction6.cpp
+++ b/storage/ndb/test/ndbapi/ndb_user_transaction6.cpp
diff --git a/ndb/test/ndbapi/old_dirs/acid/Makefile b/storage/ndb/test/ndbapi/old_dirs/acid/Makefile
index 33dc49fcdea..33dc49fcdea 100644
--- a/ndb/test/ndbapi/old_dirs/acid/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/acid/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/acid2/Makefile b/storage/ndb/test/ndbapi/old_dirs/acid2/Makefile
index 69c9d409b9e..69c9d409b9e 100644
--- a/ndb/test/ndbapi/old_dirs/acid2/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/acid2/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/acid2/TraceNdbApi.hpp b/storage/ndb/test/ndbapi/old_dirs/acid2/TraceNdbApi.hpp
index 2bd4eab6b70..2bd4eab6b70 100644
--- a/ndb/test/ndbapi/old_dirs/acid2/TraceNdbApi.hpp
+++ b/storage/ndb/test/ndbapi/old_dirs/acid2/TraceNdbApi.hpp
diff --git a/ndb/test/ndbapi/old_dirs/acid2/VerifyNdbApi.hpp b/storage/ndb/test/ndbapi/old_dirs/acid2/VerifyNdbApi.hpp
index 4a5b8cc8111..4a5b8cc8111 100644
--- a/ndb/test/ndbapi/old_dirs/acid2/VerifyNdbApi.hpp
+++ b/storage/ndb/test/ndbapi/old_dirs/acid2/VerifyNdbApi.hpp
diff --git a/ndb/test/ndbapi/old_dirs/basicAsynch/Makefile b/storage/ndb/test/ndbapi/old_dirs/basicAsynch/Makefile
index 802c5e5a2bd..802c5e5a2bd 100755
--- a/ndb/test/ndbapi/old_dirs/basicAsynch/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/basicAsynch/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/bulk_copy/Makefile b/storage/ndb/test/ndbapi/old_dirs/bulk_copy/Makefile
index 22c05b138b7..22c05b138b7 100644
--- a/ndb/test/ndbapi/old_dirs/bulk_copy/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/bulk_copy/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/create_all_tabs/Makefile b/storage/ndb/test/ndbapi/old_dirs/create_all_tabs/Makefile
index 58309807682..58309807682 100644
--- a/ndb/test/ndbapi/old_dirs/create_all_tabs/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/create_all_tabs/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/create_tab/Makefile b/storage/ndb/test/ndbapi/old_dirs/create_tab/Makefile
index c2ea0b52b15..c2ea0b52b15 100644
--- a/ndb/test/ndbapi/old_dirs/create_tab/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/create_tab/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/drop_all_tabs/Makefile b/storage/ndb/test/ndbapi/old_dirs/drop_all_tabs/Makefile
index 96db0781417..96db0781417 100644
--- a/ndb/test/ndbapi/old_dirs/drop_all_tabs/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/drop_all_tabs/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/flexAsynch/Makefile b/storage/ndb/test/ndbapi/old_dirs/flexAsynch/Makefile
index 2c77c8e21df..2c77c8e21df 100644
--- a/ndb/test/ndbapi/old_dirs/flexAsynch/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/flexAsynch/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/flexBench/Makefile.am b/storage/ndb/test/ndbapi/old_dirs/flexBench/Makefile.am
index d4de4b92b60..d4de4b92b60 100644
--- a/ndb/test/ndbapi/old_dirs/flexBench/Makefile.am
+++ b/storage/ndb/test/ndbapi/old_dirs/flexBench/Makefile.am
diff --git a/ndb/test/ndbapi/old_dirs/flexBench/ndbplot.pl b/storage/ndb/test/ndbapi/old_dirs/flexBench/ndbplot.pl
index b16f6d5897d..b16f6d5897d 100755
--- a/ndb/test/ndbapi/old_dirs/flexBench/ndbplot.pl
+++ b/storage/ndb/test/ndbapi/old_dirs/flexBench/ndbplot.pl
diff --git a/ndb/test/ndbapi/old_dirs/flexHammer/Makefile b/storage/ndb/test/ndbapi/old_dirs/flexHammer/Makefile
index c8e436fb7f5..c8e436fb7f5 100644
--- a/ndb/test/ndbapi/old_dirs/flexHammer/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/flexHammer/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/flexHammer/README b/storage/ndb/test/ndbapi/old_dirs/flexHammer/README
index 556582aab96..556582aab96 100644
--- a/ndb/test/ndbapi/old_dirs/flexHammer/README
+++ b/storage/ndb/test/ndbapi/old_dirs/flexHammer/README
diff --git a/ndb/test/ndbapi/old_dirs/flexScan/Makefile b/storage/ndb/test/ndbapi/old_dirs/flexScan/Makefile
index 78f9d481063..78f9d481063 100644
--- a/ndb/test/ndbapi/old_dirs/flexScan/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/flexScan/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/flexScan/README b/storage/ndb/test/ndbapi/old_dirs/flexScan/README
index cddbdea5336..cddbdea5336 100644
--- a/ndb/test/ndbapi/old_dirs/flexScan/README
+++ b/storage/ndb/test/ndbapi/old_dirs/flexScan/README
diff --git a/ndb/test/ndbapi/old_dirs/flexTT/Makefile b/storage/ndb/test/ndbapi/old_dirs/flexTT/Makefile
index a63bd803d95..a63bd803d95 100644
--- a/ndb/test/ndbapi/old_dirs/flexTT/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/flexTT/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/flexTimedAsynch/Makefile b/storage/ndb/test/ndbapi/old_dirs/flexTimedAsynch/Makefile
index e9995dbd16f..e9995dbd16f 100644
--- a/ndb/test/ndbapi/old_dirs/flexTimedAsynch/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/flexTimedAsynch/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/flex_bench_mysql/Makefile b/storage/ndb/test/ndbapi/old_dirs/flex_bench_mysql/Makefile
index d2608526cae..d2608526cae 100644
--- a/ndb/test/ndbapi/old_dirs/flex_bench_mysql/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/flex_bench_mysql/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/indexTest/Makefile b/storage/ndb/test/ndbapi/old_dirs/indexTest/Makefile
index d842e487ee5..d842e487ee5 100644
--- a/ndb/test/ndbapi/old_dirs/indexTest/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/indexTest/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/indexTest2/Makefile b/storage/ndb/test/ndbapi/old_dirs/indexTest2/Makefile
index ad78fd51986..ad78fd51986 100644
--- a/ndb/test/ndbapi/old_dirs/indexTest2/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/indexTest2/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/interpreterInTup/Makefile b/storage/ndb/test/ndbapi/old_dirs/interpreterInTup/Makefile
index 074adbf674a..074adbf674a 100644
--- a/ndb/test/ndbapi/old_dirs/interpreterInTup/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/interpreterInTup/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/Makefile b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/Makefile
index af472b1589f..af472b1589f 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/Makefile b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/Makefile
index 744d6171139..744d6171139 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/generator/Makefile b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/generator/Makefile
index c1f84a3ef70..c1f84a3ef70 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/generator/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/generator/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/dbGenerator.h b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/dbGenerator.h
index 2256498e151..2256498e151 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/dbGenerator.h
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/dbGenerator.h
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/testData.h b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/testData.h
index 3db85e7342e..3db85e7342e 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/testData.h
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/testData.h
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/userInterface.h b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/userInterface.h
index 94bd1e80ab3..94bd1e80ab3 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/userInterface.h
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/userInterface.h
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/Makefile b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/Makefile
index c0b532a8359..c0b532a8359 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/macros.h b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/macros.h
index 22b7f564490..22b7f564490 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/macros.h
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/macros.h
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/ndb_error.hpp b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/ndb_error.hpp
index 9e6c5e55e73..9e6c5e55e73 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/ndb_error.hpp
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/ndb_error.hpp
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/bin/.empty b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/bin/.empty
index e69de29bb2d..e69de29bb2d 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/bin/.empty
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/bin/.empty
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/include/ndb_schema.hpp b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/include/ndb_schema.hpp
index af08bc2eecd..af08bc2eecd 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/include/ndb_schema.hpp
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/include/ndb_schema.hpp
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/include/testDefinitions.h b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/include/testDefinitions.h
index 2f4aeb30975..2f4aeb30975 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/include/testDefinitions.h
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/include/testDefinitions.h
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/lib/.empty b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/lib/.empty
index e69de29bb2d..e69de29bb2d 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/lib/.empty
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/lib/.empty
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/script/Makefile b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/Makefile
index 240b5957573..240b5957573 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/script/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l-p10.sh b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l-p10.sh
index 1ce3969f9fb..1ce3969f9fb 100755
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l-p10.sh
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l-p10.sh
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l.sh b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l.sh
index a5de71395c4..a5de71395c4 100755
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l.sh
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l.sh
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-p10.sh b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-p10.sh
index 92c853cdd86..92c853cdd86 100755
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-p10.sh
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-p10.sh
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench.sh b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench.sh
index da8e9d9bf42..da8e9d9bf42 100755
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench.sh
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench.sh
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/Makefile b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/Makefile
index ae7fac9c49b..ae7fac9c49b 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/README b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/README
index e81c8ba0051..e81c8ba0051 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/README
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/README
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/Makefile b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/Makefile
index 143d9ba655e..143d9ba655e 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.c b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.c
index 7484c7647f5..7484c7647f5 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.c
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.c
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.h b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.h
index 824688b6cf9..824688b6cf9 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.h
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.h
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/mainGenerator.c b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/mainGenerator.c
index 4a31db0b4e9..4a31db0b4e9 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/mainGenerator.c
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/mainGenerator.c
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/testData.h b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/testData.h
index 863c230502b..863c230502b 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/testData.h
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/testData.h
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/userInterface.h b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/userInterface.h
index b70ded87756..b70ded87756 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/userInterface.h
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/userInterface.h
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.linux b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.linux
index a933669cfe7..a933669cfe7 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.linux
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.linux
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.sparc b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.sparc
index 57ab8bf982f..57ab8bf982f 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.sparc
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.sparc
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/Makefile b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/Makefile
index 2107c948843..2107c948843 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.c b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.c
index 42fbb52f3b2..42fbb52f3b2 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.c
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.c
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.h b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.h
index 1916720e141..1916720e141 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.h
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.h
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/mainPopulate.c b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/mainPopulate.c
index 838ac8a7196..838ac8a7196 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/mainPopulate.c
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/mainPopulate.c
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/Makefile b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/Makefile
index 9bf229ac84c..9bf229ac84c 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/localDbPrepare.c b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/localDbPrepare.c
index dd100507016..dd100507016 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/localDbPrepare.c
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/localDbPrepare.c
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/macros.h b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/macros.h
index 363f247b93f..363f247b93f 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/macros.h
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/macros.h
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/ndb_error.hpp b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/ndb_error.hpp
index b3aaeac822e..b3aaeac822e 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/ndb_error.hpp
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/ndb_error.hpp
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/Makefile b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/Makefile
index 9b1247d44af..9b1247d44af 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userHandle.h b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userHandle.h
index 1de468d4dad..1de468d4dad 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userHandle.h
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userHandle.h
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userInterface.c b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userInterface.c
index bacf1861dde..bacf1861dde 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userInterface.c
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userInterface.c
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userTransaction.c b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userTransaction.c
index a2f4787bb0c..a2f4787bb0c 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userTransaction.c
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userTransaction.c
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userHandle.h b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userHandle.h
index 6da76fc2bff..6da76fc2bff 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userHandle.h
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userHandle.h
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userInterface.cpp b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userInterface.cpp
index fe3c17acbf5..fe3c17acbf5 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userInterface.cpp
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userInterface.cpp
diff --git a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userTransaction.c b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userTransaction.c
index a2f4787bb0c..a2f4787bb0c 100644
--- a/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userTransaction.c
+++ b/storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userTransaction.c
diff --git a/ndb/test/ndbapi/old_dirs/restarter/Makefile b/storage/ndb/test/ndbapi/old_dirs/restarter/Makefile
index 041fbfd82ba..041fbfd82ba 100644
--- a/ndb/test/ndbapi/old_dirs/restarter/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/restarter/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/restarter2/Makefile b/storage/ndb/test/ndbapi/old_dirs/restarter2/Makefile
index ba33a2e21dc..ba33a2e21dc 100644
--- a/ndb/test/ndbapi/old_dirs/restarter2/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/restarter2/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/restarts/Makefile b/storage/ndb/test/ndbapi/old_dirs/restarts/Makefile
index 9f14b81fae5..9f14b81fae5 100644
--- a/ndb/test/ndbapi/old_dirs/restarts/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/restarts/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/ronja/Makefile b/storage/ndb/test/ndbapi/old_dirs/ronja/Makefile
index a11a27c5fd7..a11a27c5fd7 100644
--- a/ndb/test/ndbapi/old_dirs/ronja/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/ronja/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/ronja/benchronja/Makefile b/storage/ndb/test/ndbapi/old_dirs/ronja/benchronja/Makefile
index f0521c3ba77..f0521c3ba77 100644
--- a/ndb/test/ndbapi/old_dirs/ronja/benchronja/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/ronja/benchronja/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/ronja/initronja/Makefile b/storage/ndb/test/ndbapi/old_dirs/ronja/initronja/Makefile
index dd66dd813d1..dd66dd813d1 100644
--- a/ndb/test/ndbapi/old_dirs/ronja/initronja/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/ronja/initronja/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/telco/Makefile b/storage/ndb/test/ndbapi/old_dirs/telco/Makefile
index 8f82c714119..8f82c714119 100644
--- a/ndb/test/ndbapi/old_dirs/telco/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/telco/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/telco/readme b/storage/ndb/test/ndbapi/old_dirs/telco/readme
index 627b4256eef..627b4256eef 100644
--- a/ndb/test/ndbapi/old_dirs/telco/readme
+++ b/storage/ndb/test/ndbapi/old_dirs/telco/readme
diff --git a/ndb/test/ndbapi/old_dirs/testBackup/Makefile b/storage/ndb/test/ndbapi/old_dirs/testBackup/Makefile
index abf47dcfb2d..abf47dcfb2d 100644
--- a/ndb/test/ndbapi/old_dirs/testBackup/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testBackup/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testBasic/Makefile b/storage/ndb/test/ndbapi/old_dirs/testBasic/Makefile
index 755b19939cb..755b19939cb 100644
--- a/ndb/test/ndbapi/old_dirs/testBasic/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testBasic/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testBlobs/Makefile b/storage/ndb/test/ndbapi/old_dirs/testBlobs/Makefile
index cc5bb629c17..cc5bb629c17 100644
--- a/ndb/test/ndbapi/old_dirs/testBlobs/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testBlobs/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testDataBuffers/Makefile b/storage/ndb/test/ndbapi/old_dirs/testDataBuffers/Makefile
index 181fbc829d4..181fbc829d4 100644
--- a/ndb/test/ndbapi/old_dirs/testDataBuffers/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testDataBuffers/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testDict/Makefile b/storage/ndb/test/ndbapi/old_dirs/testDict/Makefile
index 75d493c3424..75d493c3424 100644
--- a/ndb/test/ndbapi/old_dirs/testDict/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testDict/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testGrep/Makefile b/storage/ndb/test/ndbapi/old_dirs/testGrep/Makefile
index 6bad3d56a00..6bad3d56a00 100644
--- a/ndb/test/ndbapi/old_dirs/testGrep/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testGrep/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testGrep/verify/Makefile b/storage/ndb/test/ndbapi/old_dirs/testGrep/verify/Makefile
index 256e3c98f36..256e3c98f36 100644
--- a/ndb/test/ndbapi/old_dirs/testGrep/verify/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testGrep/verify/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testIndex/Makefile b/storage/ndb/test/ndbapi/old_dirs/testIndex/Makefile
index e5cd4542c9c..e5cd4542c9c 100644
--- a/ndb/test/ndbapi/old_dirs/testIndex/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testIndex/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testInterpreter/Makefile b/storage/ndb/test/ndbapi/old_dirs/testInterpreter/Makefile
index e84287a1b16..e84287a1b16 100644
--- a/ndb/test/ndbapi/old_dirs/testInterpreter/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testInterpreter/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testMgm/Makefile b/storage/ndb/test/ndbapi/old_dirs/testMgm/Makefile
index be50d3dae7e..be50d3dae7e 100644
--- a/ndb/test/ndbapi/old_dirs/testMgm/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testMgm/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testNdbApi/Makefile b/storage/ndb/test/ndbapi/old_dirs/testNdbApi/Makefile
index 3bb3cba427e..3bb3cba427e 100644
--- a/ndb/test/ndbapi/old_dirs/testNdbApi/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testNdbApi/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testNodeRestart/Makefile b/storage/ndb/test/ndbapi/old_dirs/testNodeRestart/Makefile
index 8c13ab3beb4..8c13ab3beb4 100644
--- a/ndb/test/ndbapi/old_dirs/testNodeRestart/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testNodeRestart/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testOIBasic/Makefile b/storage/ndb/test/ndbapi/old_dirs/testOIBasic/Makefile
index 1bbbcf1d17e..1bbbcf1d17e 100644
--- a/ndb/test/ndbapi/old_dirs/testOIBasic/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testOIBasic/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testOIBasic/times.txt b/storage/ndb/test/ndbapi/old_dirs/testOIBasic/times.txt
index 641e9ddb4bf..641e9ddb4bf 100644
--- a/ndb/test/ndbapi/old_dirs/testOIBasic/times.txt
+++ b/storage/ndb/test/ndbapi/old_dirs/testOIBasic/times.txt
diff --git a/ndb/test/ndbapi/old_dirs/testOperations/Makefile b/storage/ndb/test/ndbapi/old_dirs/testOperations/Makefile
index 25546ade639..25546ade639 100644
--- a/ndb/test/ndbapi/old_dirs/testOperations/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testOperations/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testOrderedIndex/Makefile b/storage/ndb/test/ndbapi/old_dirs/testOrderedIndex/Makefile
index d8899a37895..d8899a37895 100644
--- a/ndb/test/ndbapi/old_dirs/testOrderedIndex/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testOrderedIndex/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testRestartGci/Makefile b/storage/ndb/test/ndbapi/old_dirs/testRestartGci/Makefile
index 24f449b747d..24f449b747d 100644
--- a/ndb/test/ndbapi/old_dirs/testRestartGci/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testRestartGci/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testScan/Makefile b/storage/ndb/test/ndbapi/old_dirs/testScan/Makefile
index fe48f5bc926..fe48f5bc926 100644
--- a/ndb/test/ndbapi/old_dirs/testScan/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testScan/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testScanInterpreter/Makefile b/storage/ndb/test/ndbapi/old_dirs/testScanInterpreter/Makefile
index c7d96494148..c7d96494148 100644
--- a/ndb/test/ndbapi/old_dirs/testScanInterpreter/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testScanInterpreter/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testSystemRestart/Makefile b/storage/ndb/test/ndbapi/old_dirs/testSystemRestart/Makefile
index 7a306eb313d..7a306eb313d 100644
--- a/ndb/test/ndbapi/old_dirs/testSystemRestart/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testSystemRestart/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testTimeout/Makefile b/storage/ndb/test/ndbapi/old_dirs/testTimeout/Makefile
index 01a9df9887f..01a9df9887f 100644
--- a/ndb/test/ndbapi/old_dirs/testTimeout/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testTimeout/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/testTransactions/Makefile b/storage/ndb/test/ndbapi/old_dirs/testTransactions/Makefile
index 0279a526923..0279a526923 100644
--- a/ndb/test/ndbapi/old_dirs/testTransactions/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/testTransactions/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/test_event/Makefile b/storage/ndb/test/ndbapi/old_dirs/test_event/Makefile
index 6299fa47845..6299fa47845 100644
--- a/ndb/test/ndbapi/old_dirs/test_event/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/test_event/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/vw_test/Makefile b/storage/ndb/test/ndbapi/old_dirs/vw_test/Makefile
index 144873dcc69..144873dcc69 100644
--- a/ndb/test/ndbapi/old_dirs/vw_test/Makefile
+++ b/storage/ndb/test/ndbapi/old_dirs/vw_test/Makefile
diff --git a/ndb/test/ndbapi/old_dirs/vw_test/bcd.h b/storage/ndb/test/ndbapi/old_dirs/vw_test/bcd.h
index d0aaffbd8b7..d0aaffbd8b7 100644
--- a/ndb/test/ndbapi/old_dirs/vw_test/bcd.h
+++ b/storage/ndb/test/ndbapi/old_dirs/vw_test/bcd.h
diff --git a/ndb/test/ndbapi/old_dirs/vw_test/script/client_start b/storage/ndb/test/ndbapi/old_dirs/vw_test/script/client_start
index 2965be6fbb5..2965be6fbb5 100644
--- a/ndb/test/ndbapi/old_dirs/vw_test/script/client_start
+++ b/storage/ndb/test/ndbapi/old_dirs/vw_test/script/client_start
diff --git a/ndb/test/ndbapi/old_dirs/vw_test/utv.h b/storage/ndb/test/ndbapi/old_dirs/vw_test/utv.h
index 6f378e5595b..6f378e5595b 100644
--- a/ndb/test/ndbapi/old_dirs/vw_test/utv.h
+++ b/storage/ndb/test/ndbapi/old_dirs/vw_test/utv.h
diff --git a/ndb/test/ndbapi/old_dirs/vw_test/vcdrfunc.h b/storage/ndb/test/ndbapi/old_dirs/vw_test/vcdrfunc.h
index 3c5444d733b..3c5444d733b 100644
--- a/ndb/test/ndbapi/old_dirs/vw_test/vcdrfunc.h
+++ b/storage/ndb/test/ndbapi/old_dirs/vw_test/vcdrfunc.h
diff --git a/ndb/test/ndbapi/restarter.cpp b/storage/ndb/test/ndbapi/restarter.cpp
index d6831494b48..d6831494b48 100644
--- a/ndb/test/ndbapi/restarter.cpp
+++ b/storage/ndb/test/ndbapi/restarter.cpp
diff --git a/ndb/test/ndbapi/restarter2.cpp b/storage/ndb/test/ndbapi/restarter2.cpp
index 846748a7bba..846748a7bba 100644
--- a/ndb/test/ndbapi/restarter2.cpp
+++ b/storage/ndb/test/ndbapi/restarter2.cpp
diff --git a/ndb/test/ndbapi/restarts.cpp b/storage/ndb/test/ndbapi/restarts.cpp
index 184e754de4a..184e754de4a 100644
--- a/ndb/test/ndbapi/restarts.cpp
+++ b/storage/ndb/test/ndbapi/restarts.cpp
diff --git a/ndb/test/ndbapi/size.cpp b/storage/ndb/test/ndbapi/size.cpp
index ff178b11d68..ff178b11d68 100644
--- a/ndb/test/ndbapi/size.cpp
+++ b/storage/ndb/test/ndbapi/size.cpp
diff --git a/ndb/test/ndbapi/slow_select.cpp b/storage/ndb/test/ndbapi/slow_select.cpp
index 8d615fa5771..8d615fa5771 100644
--- a/ndb/test/ndbapi/slow_select.cpp
+++ b/storage/ndb/test/ndbapi/slow_select.cpp
diff --git a/ndb/test/ndbapi/testBackup.cpp b/storage/ndb/test/ndbapi/testBackup.cpp
index 7a6f11a6bb9..7a6f11a6bb9 100644
--- a/ndb/test/ndbapi/testBackup.cpp
+++ b/storage/ndb/test/ndbapi/testBackup.cpp
diff --git a/ndb/test/ndbapi/testBasic.cpp b/storage/ndb/test/ndbapi/testBasic.cpp
index 4d64b15ecfa..4d64b15ecfa 100644
--- a/ndb/test/ndbapi/testBasic.cpp
+++ b/storage/ndb/test/ndbapi/testBasic.cpp
diff --git a/ndb/test/ndbapi/testBasicAsynch.cpp b/storage/ndb/test/ndbapi/testBasicAsynch.cpp
index 6daa22fdc6a..6daa22fdc6a 100644
--- a/ndb/test/ndbapi/testBasicAsynch.cpp
+++ b/storage/ndb/test/ndbapi/testBasicAsynch.cpp
diff --git a/ndb/test/ndbapi/testBitfield.cpp b/storage/ndb/test/ndbapi/testBitfield.cpp
index e26f495f5a4..e26f495f5a4 100644
--- a/ndb/test/ndbapi/testBitfield.cpp
+++ b/storage/ndb/test/ndbapi/testBitfield.cpp
diff --git a/ndb/test/ndbapi/testBlobs.cpp b/storage/ndb/test/ndbapi/testBlobs.cpp
index a88d7d21820..a88d7d21820 100644
--- a/ndb/test/ndbapi/testBlobs.cpp
+++ b/storage/ndb/test/ndbapi/testBlobs.cpp
diff --git a/ndb/test/ndbapi/testDataBuffers.cpp b/storage/ndb/test/ndbapi/testDataBuffers.cpp
index aaecb6ee61e..aaecb6ee61e 100644
--- a/ndb/test/ndbapi/testDataBuffers.cpp
+++ b/storage/ndb/test/ndbapi/testDataBuffers.cpp
diff --git a/ndb/test/ndbapi/testDeadlock.cpp b/storage/ndb/test/ndbapi/testDeadlock.cpp
index 0070a7ecc83..0070a7ecc83 100644
--- a/ndb/test/ndbapi/testDeadlock.cpp
+++ b/storage/ndb/test/ndbapi/testDeadlock.cpp
diff --git a/ndb/test/ndbapi/testDict.cpp b/storage/ndb/test/ndbapi/testDict.cpp
index dd5846f0d62..dd5846f0d62 100644
--- a/ndb/test/ndbapi/testDict.cpp
+++ b/storage/ndb/test/ndbapi/testDict.cpp
diff --git a/ndb/test/ndbapi/testGrep.cpp b/storage/ndb/test/ndbapi/testGrep.cpp
index 713aefbeafa..713aefbeafa 100644
--- a/ndb/test/ndbapi/testGrep.cpp
+++ b/storage/ndb/test/ndbapi/testGrep.cpp
diff --git a/ndb/test/ndbapi/testGrepVerify.cpp b/storage/ndb/test/ndbapi/testGrepVerify.cpp
index 52dcda9a162..52dcda9a162 100644
--- a/ndb/test/ndbapi/testGrepVerify.cpp
+++ b/storage/ndb/test/ndbapi/testGrepVerify.cpp
diff --git a/ndb/test/ndbapi/testIndex.cpp b/storage/ndb/test/ndbapi/testIndex.cpp
index 5785db232c4..5785db232c4 100644
--- a/ndb/test/ndbapi/testIndex.cpp
+++ b/storage/ndb/test/ndbapi/testIndex.cpp
diff --git a/ndb/test/ndbapi/testInterpreter.cpp b/storage/ndb/test/ndbapi/testInterpreter.cpp
index 0baba33d2b2..0baba33d2b2 100644
--- a/ndb/test/ndbapi/testInterpreter.cpp
+++ b/storage/ndb/test/ndbapi/testInterpreter.cpp
diff --git a/ndb/test/ndbapi/testLcp.cpp b/storage/ndb/test/ndbapi/testLcp.cpp
index 8bfc7ccf9b9..8bfc7ccf9b9 100644
--- a/ndb/test/ndbapi/testLcp.cpp
+++ b/storage/ndb/test/ndbapi/testLcp.cpp
diff --git a/ndb/test/ndbapi/testMgm.cpp b/storage/ndb/test/ndbapi/testMgm.cpp
index ef653d3f972..ef653d3f972 100644
--- a/ndb/test/ndbapi/testMgm.cpp
+++ b/storage/ndb/test/ndbapi/testMgm.cpp
diff --git a/ndb/test/ndbapi/testNdbApi.cpp b/storage/ndb/test/ndbapi/testNdbApi.cpp
index 137a1d51d82..137a1d51d82 100644
--- a/ndb/test/ndbapi/testNdbApi.cpp
+++ b/storage/ndb/test/ndbapi/testNdbApi.cpp
diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp
index 6ef3da2d760..6ef3da2d760 100644
--- a/ndb/test/ndbapi/testNodeRestart.cpp
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp
diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/storage/ndb/test/ndbapi/testOIBasic.cpp
index 942ee2ec966..942ee2ec966 100644
--- a/ndb/test/ndbapi/testOIBasic.cpp
+++ b/storage/ndb/test/ndbapi/testOIBasic.cpp
diff --git a/ndb/test/ndbapi/testOperations.cpp b/storage/ndb/test/ndbapi/testOperations.cpp
index 726f35b01fb..726f35b01fb 100644
--- a/ndb/test/ndbapi/testOperations.cpp
+++ b/storage/ndb/test/ndbapi/testOperations.cpp
diff --git a/ndb/test/ndbapi/testOrderedIndex.cpp b/storage/ndb/test/ndbapi/testOrderedIndex.cpp
index b3a75410646..b3a75410646 100644
--- a/ndb/test/ndbapi/testOrderedIndex.cpp
+++ b/storage/ndb/test/ndbapi/testOrderedIndex.cpp
diff --git a/ndb/test/ndbapi/testPartitioning.cpp b/storage/ndb/test/ndbapi/testPartitioning.cpp
index 9d67c27354b..9d67c27354b 100644
--- a/ndb/test/ndbapi/testPartitioning.cpp
+++ b/storage/ndb/test/ndbapi/testPartitioning.cpp
diff --git a/ndb/test/ndbapi/testReadPerf.cpp b/storage/ndb/test/ndbapi/testReadPerf.cpp
index ba5f3c4232d..ba5f3c4232d 100644
--- a/ndb/test/ndbapi/testReadPerf.cpp
+++ b/storage/ndb/test/ndbapi/testReadPerf.cpp
diff --git a/ndb/test/ndbapi/testRestartGci.cpp b/storage/ndb/test/ndbapi/testRestartGci.cpp
index 4e541d1f38f..4e541d1f38f 100644
--- a/ndb/test/ndbapi/testRestartGci.cpp
+++ b/storage/ndb/test/ndbapi/testRestartGci.cpp
diff --git a/ndb/test/ndbapi/testScan.cpp b/storage/ndb/test/ndbapi/testScan.cpp
index 2802f1c950e..2802f1c950e 100644
--- a/ndb/test/ndbapi/testScan.cpp
+++ b/storage/ndb/test/ndbapi/testScan.cpp
diff --git a/ndb/test/ndbapi/testScanInterpreter.cpp b/storage/ndb/test/ndbapi/testScanInterpreter.cpp
index 5a7ca30cd2a..5a7ca30cd2a 100644
--- a/ndb/test/ndbapi/testScanInterpreter.cpp
+++ b/storage/ndb/test/ndbapi/testScanInterpreter.cpp
diff --git a/ndb/test/ndbapi/testScanPerf.cpp b/storage/ndb/test/ndbapi/testScanPerf.cpp
index a730136c3af..a730136c3af 100644
--- a/ndb/test/ndbapi/testScanPerf.cpp
+++ b/storage/ndb/test/ndbapi/testScanPerf.cpp
diff --git a/ndb/test/ndbapi/testSystemRestart.cpp b/storage/ndb/test/ndbapi/testSystemRestart.cpp
index 35016896495..35016896495 100644
--- a/ndb/test/ndbapi/testSystemRestart.cpp
+++ b/storage/ndb/test/ndbapi/testSystemRestart.cpp
diff --git a/ndb/test/ndbapi/testTimeout.cpp b/storage/ndb/test/ndbapi/testTimeout.cpp
index b02751ec819..b02751ec819 100644
--- a/ndb/test/ndbapi/testTimeout.cpp
+++ b/storage/ndb/test/ndbapi/testTimeout.cpp
diff --git a/ndb/test/ndbapi/testTransactions.cpp b/storage/ndb/test/ndbapi/testTransactions.cpp
index 46be808d8a5..46be808d8a5 100644
--- a/ndb/test/ndbapi/testTransactions.cpp
+++ b/storage/ndb/test/ndbapi/testTransactions.cpp
diff --git a/ndb/test/ndbapi/test_event.cpp b/storage/ndb/test/ndbapi/test_event.cpp
index 2df50f21e43..2df50f21e43 100644
--- a/ndb/test/ndbapi/test_event.cpp
+++ b/storage/ndb/test/ndbapi/test_event.cpp
diff --git a/ndb/test/ndbapi/test_event_multi_table.cpp b/storage/ndb/test/ndbapi/test_event_multi_table.cpp
index f16504029fa..f16504029fa 100644
--- a/ndb/test/ndbapi/test_event_multi_table.cpp
+++ b/storage/ndb/test/ndbapi/test_event_multi_table.cpp
diff --git a/ndb/test/ndbapi/userInterface.cpp b/storage/ndb/test/ndbapi/userInterface.cpp
index 2f77c0f4857..2f77c0f4857 100644
--- a/ndb/test/ndbapi/userInterface.cpp
+++ b/storage/ndb/test/ndbapi/userInterface.cpp
diff --git a/ndb/test/ndbnet/test.run b/storage/ndb/test/ndbnet/test.run
index 30042488c92..30042488c92 100644
--- a/ndb/test/ndbnet/test.run
+++ b/storage/ndb/test/ndbnet/test.run
diff --git a/ndb/test/ndbnet/testError.run b/storage/ndb/test/ndbnet/testError.run
index 3cce489a3da..3cce489a3da 100644
--- a/ndb/test/ndbnet/testError.run
+++ b/storage/ndb/test/ndbnet/testError.run
diff --git a/ndb/test/ndbnet/testMNF.run b/storage/ndb/test/ndbnet/testMNF.run
index df226cd3359..df226cd3359 100644
--- a/ndb/test/ndbnet/testMNF.run
+++ b/storage/ndb/test/ndbnet/testMNF.run
diff --git a/ndb/test/ndbnet/testNR.run b/storage/ndb/test/ndbnet/testNR.run
index 01a3d76266d..01a3d76266d 100644
--- a/ndb/test/ndbnet/testNR.run
+++ b/storage/ndb/test/ndbnet/testNR.run
diff --git a/ndb/test/ndbnet/testNR1.run b/storage/ndb/test/ndbnet/testNR1.run
index 8819a92c8ca..8819a92c8ca 100644
--- a/ndb/test/ndbnet/testNR1.run
+++ b/storage/ndb/test/ndbnet/testNR1.run
diff --git a/ndb/test/ndbnet/testNR4.run b/storage/ndb/test/ndbnet/testNR4.run
index f7a5eef3494..f7a5eef3494 100644
--- a/ndb/test/ndbnet/testNR4.run
+++ b/storage/ndb/test/ndbnet/testNR4.run
diff --git a/ndb/test/ndbnet/testSRhang.run b/storage/ndb/test/ndbnet/testSRhang.run
index 8cb65a75ded..8cb65a75ded 100644
--- a/ndb/test/ndbnet/testSRhang.run
+++ b/storage/ndb/test/ndbnet/testSRhang.run
diff --git a/ndb/test/ndbnet/testTR295.run b/storage/ndb/test/ndbnet/testTR295.run
index ce4250b60ae..ce4250b60ae 100644
--- a/ndb/test/ndbnet/testTR295.run
+++ b/storage/ndb/test/ndbnet/testTR295.run
diff --git a/ndb/test/newtonapi/basic_test/Makefile b/storage/ndb/test/newtonapi/basic_test/Makefile
index d7eaf984b12..d7eaf984b12 100644
--- a/ndb/test/newtonapi/basic_test/Makefile
+++ b/storage/ndb/test/newtonapi/basic_test/Makefile
diff --git a/ndb/test/newtonapi/basic_test/basic/Makefile b/storage/ndb/test/newtonapi/basic_test/basic/Makefile
index 7e2945d2e5f..7e2945d2e5f 100644
--- a/ndb/test/newtonapi/basic_test/basic/Makefile
+++ b/storage/ndb/test/newtonapi/basic_test/basic/Makefile
diff --git a/ndb/test/newtonapi/basic_test/basic/basic.cpp b/storage/ndb/test/newtonapi/basic_test/basic/basic.cpp
index bc33400078d..bc33400078d 100644
--- a/ndb/test/newtonapi/basic_test/basic/basic.cpp
+++ b/storage/ndb/test/newtonapi/basic_test/basic/basic.cpp
diff --git a/ndb/test/newtonapi/basic_test/bulk_read/Makefile b/storage/ndb/test/newtonapi/basic_test/bulk_read/Makefile
index c45bbad7957..c45bbad7957 100644
--- a/ndb/test/newtonapi/basic_test/bulk_read/Makefile
+++ b/storage/ndb/test/newtonapi/basic_test/bulk_read/Makefile
diff --git a/ndb/test/newtonapi/basic_test/bulk_read/br_test.cpp b/storage/ndb/test/newtonapi/basic_test/bulk_read/br_test.cpp
index 4120cfba864..4120cfba864 100644
--- a/ndb/test/newtonapi/basic_test/bulk_read/br_test.cpp
+++ b/storage/ndb/test/newtonapi/basic_test/bulk_read/br_test.cpp
diff --git a/ndb/test/newtonapi/basic_test/common.cpp b/storage/ndb/test/newtonapi/basic_test/common.cpp
index d4c4e6a74a7..d4c4e6a74a7 100644
--- a/ndb/test/newtonapi/basic_test/common.cpp
+++ b/storage/ndb/test/newtonapi/basic_test/common.cpp
diff --git a/ndb/test/newtonapi/basic_test/common.hpp b/storage/ndb/test/newtonapi/basic_test/common.hpp
index 0df8f7e078d..0df8f7e078d 100644
--- a/ndb/test/newtonapi/basic_test/common.hpp
+++ b/storage/ndb/test/newtonapi/basic_test/common.hpp
diff --git a/ndb/test/newtonapi/basic_test/ptr_binding/Makefile b/storage/ndb/test/newtonapi/basic_test/ptr_binding/Makefile
index 95e87d47e62..95e87d47e62 100644
--- a/ndb/test/newtonapi/basic_test/ptr_binding/Makefile
+++ b/storage/ndb/test/newtonapi/basic_test/ptr_binding/Makefile
diff --git a/ndb/test/newtonapi/basic_test/ptr_binding/ptr_binding_test.cpp b/storage/ndb/test/newtonapi/basic_test/ptr_binding/ptr_binding_test.cpp
index 2c9cee5be87..2c9cee5be87 100644
--- a/ndb/test/newtonapi/basic_test/ptr_binding/ptr_binding_test.cpp
+++ b/storage/ndb/test/newtonapi/basic_test/ptr_binding/ptr_binding_test.cpp
diff --git a/ndb/test/newtonapi/basic_test/too_basic.cpp b/storage/ndb/test/newtonapi/basic_test/too_basic.cpp
index 883aacf8841..883aacf8841 100644
--- a/ndb/test/newtonapi/basic_test/too_basic.cpp
+++ b/storage/ndb/test/newtonapi/basic_test/too_basic.cpp
diff --git a/ndb/test/newtonapi/perf_test/Makefile b/storage/ndb/test/newtonapi/perf_test/Makefile
index 2be004d4277..2be004d4277 100644
--- a/ndb/test/newtonapi/perf_test/Makefile
+++ b/storage/ndb/test/newtonapi/perf_test/Makefile
diff --git a/ndb/test/newtonapi/perf_test/perf.cpp b/storage/ndb/test/newtonapi/perf_test/perf.cpp
index 7b818e93a2a..7b818e93a2a 100644
--- a/ndb/test/newtonapi/perf_test/perf.cpp
+++ b/storage/ndb/test/newtonapi/perf_test/perf.cpp
diff --git a/ndb/test/odbc/SQL99_test/Makefile b/storage/ndb/test/odbc/SQL99_test/Makefile
index 3ac06016670..3ac06016670 100644
--- a/ndb/test/odbc/SQL99_test/Makefile
+++ b/storage/ndb/test/odbc/SQL99_test/Makefile
diff --git a/ndb/test/odbc/SQL99_test/SQL99_test.cpp b/storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp
index eda9ff33834..eda9ff33834 100644
--- a/ndb/test/odbc/SQL99_test/SQL99_test.cpp
+++ b/storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp
diff --git a/ndb/test/odbc/SQL99_test/SQL99_test.h b/storage/ndb/test/odbc/SQL99_test/SQL99_test.h
index 1c49f4a9a51..1c49f4a9a51 100644
--- a/ndb/test/odbc/SQL99_test/SQL99_test.h
+++ b/storage/ndb/test/odbc/SQL99_test/SQL99_test.h
diff --git a/ndb/test/odbc/client/Makefile b/storage/ndb/test/odbc/client/Makefile
index 4b962f5b65a..4b962f5b65a 100644
--- a/ndb/test/odbc/client/Makefile
+++ b/storage/ndb/test/odbc/client/Makefile
diff --git a/ndb/test/odbc/client/NDBT_ALLOCHANDLE.cpp b/storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE.cpp
index 336f4a46554..336f4a46554 100644
--- a/ndb/test/odbc/client/NDBT_ALLOCHANDLE.cpp
+++ b/storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE.cpp
diff --git a/ndb/test/odbc/client/NDBT_ALLOCHANDLE_HDBC.cpp b/storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE_HDBC.cpp
index 8477a71edbf..8477a71edbf 100644
--- a/ndb/test/odbc/client/NDBT_ALLOCHANDLE_HDBC.cpp
+++ b/storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE_HDBC.cpp
diff --git a/ndb/test/odbc/client/NDBT_SQLConnect.cpp b/storage/ndb/test/odbc/client/NDBT_SQLConnect.cpp
index da97ffebea4..da97ffebea4 100644
--- a/ndb/test/odbc/client/NDBT_SQLConnect.cpp
+++ b/storage/ndb/test/odbc/client/NDBT_SQLConnect.cpp
diff --git a/ndb/test/odbc/client/NDBT_SQLPrepare.cpp b/storage/ndb/test/odbc/client/NDBT_SQLPrepare.cpp
index 4aaff6a7df9..4aaff6a7df9 100644
--- a/ndb/test/odbc/client/NDBT_SQLPrepare.cpp
+++ b/storage/ndb/test/odbc/client/NDBT_SQLPrepare.cpp
diff --git a/ndb/test/odbc/client/SQLAllocEnvTest.cpp b/storage/ndb/test/odbc/client/SQLAllocEnvTest.cpp
index ce50c4b7ccd..ce50c4b7ccd 100644
--- a/ndb/test/odbc/client/SQLAllocEnvTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLAllocEnvTest.cpp
diff --git a/ndb/test/odbc/client/SQLAllocHandleTest.cpp b/storage/ndb/test/odbc/client/SQLAllocHandleTest.cpp
index 0c51e2e46b7..0c51e2e46b7 100644
--- a/ndb/test/odbc/client/SQLAllocHandleTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLAllocHandleTest.cpp
diff --git a/ndb/test/odbc/client/SQLAllocHandleTest_bf.cpp b/storage/ndb/test/odbc/client/SQLAllocHandleTest_bf.cpp
index 7786675243a..7786675243a 100644
--- a/ndb/test/odbc/client/SQLAllocHandleTest_bf.cpp
+++ b/storage/ndb/test/odbc/client/SQLAllocHandleTest_bf.cpp
diff --git a/ndb/test/odbc/client/SQLBindColTest.cpp b/storage/ndb/test/odbc/client/SQLBindColTest.cpp
index e2cd4ce73d1..e2cd4ce73d1 100644
--- a/ndb/test/odbc/client/SQLBindColTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLBindColTest.cpp
diff --git a/ndb/test/odbc/client/SQLBindParameterTest.cpp b/storage/ndb/test/odbc/client/SQLBindParameterTest.cpp
index 2ffd2892064..2ffd2892064 100644
--- a/ndb/test/odbc/client/SQLBindParameterTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLBindParameterTest.cpp
diff --git a/ndb/test/odbc/client/SQLCancelTest.cpp b/storage/ndb/test/odbc/client/SQLCancelTest.cpp
index 904ffab6979..904ffab6979 100644
--- a/ndb/test/odbc/client/SQLCancelTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLCancelTest.cpp
diff --git a/ndb/test/odbc/client/SQLCloseCursorTest.cpp b/storage/ndb/test/odbc/client/SQLCloseCursorTest.cpp
index 35f125df59d..35f125df59d 100644
--- a/ndb/test/odbc/client/SQLCloseCursorTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLCloseCursorTest.cpp
diff --git a/ndb/test/odbc/client/SQLColAttributeTest.cpp b/storage/ndb/test/odbc/client/SQLColAttributeTest.cpp
index 4c067c21d7d..4c067c21d7d 100644
--- a/ndb/test/odbc/client/SQLColAttributeTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLColAttributeTest.cpp
diff --git a/ndb/test/odbc/client/SQLColAttributeTest1.cpp b/storage/ndb/test/odbc/client/SQLColAttributeTest1.cpp
index 322a21eefc1..322a21eefc1 100644
--- a/ndb/test/odbc/client/SQLColAttributeTest1.cpp
+++ b/storage/ndb/test/odbc/client/SQLColAttributeTest1.cpp
diff --git a/ndb/test/odbc/client/SQLColAttributeTest2.cpp b/storage/ndb/test/odbc/client/SQLColAttributeTest2.cpp
index 18cffae76c1..18cffae76c1 100644
--- a/ndb/test/odbc/client/SQLColAttributeTest2.cpp
+++ b/storage/ndb/test/odbc/client/SQLColAttributeTest2.cpp
diff --git a/ndb/test/odbc/client/SQLColAttributeTest3.cpp b/storage/ndb/test/odbc/client/SQLColAttributeTest3.cpp
index f8817565711..f8817565711 100644
--- a/ndb/test/odbc/client/SQLColAttributeTest3.cpp
+++ b/storage/ndb/test/odbc/client/SQLColAttributeTest3.cpp
diff --git a/ndb/test/odbc/client/SQLConnectTest.cpp b/storage/ndb/test/odbc/client/SQLConnectTest.cpp
index 552fc8640fe..552fc8640fe 100644
--- a/ndb/test/odbc/client/SQLConnectTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLConnectTest.cpp
diff --git a/ndb/test/odbc/client/SQLCopyDescTest.cpp b/storage/ndb/test/odbc/client/SQLCopyDescTest.cpp
index 4a3742f97ae..4a3742f97ae 100644
--- a/ndb/test/odbc/client/SQLCopyDescTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLCopyDescTest.cpp
diff --git a/ndb/test/odbc/client/SQLDescribeColTest.cpp b/storage/ndb/test/odbc/client/SQLDescribeColTest.cpp
index 9f55c6a1cfe..9f55c6a1cfe 100644
--- a/ndb/test/odbc/client/SQLDescribeColTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLDescribeColTest.cpp
diff --git a/ndb/test/odbc/client/SQLDisconnectTest.cpp b/storage/ndb/test/odbc/client/SQLDisconnectTest.cpp
index 823b446ab84..823b446ab84 100644
--- a/ndb/test/odbc/client/SQLDisconnectTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLDisconnectTest.cpp
diff --git a/ndb/test/odbc/client/SQLDriverConnectTest.cpp b/storage/ndb/test/odbc/client/SQLDriverConnectTest.cpp
index fc3b1d10f91..fc3b1d10f91 100644
--- a/ndb/test/odbc/client/SQLDriverConnectTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLDriverConnectTest.cpp
diff --git a/ndb/test/odbc/client/SQLEndTranTest.cpp b/storage/ndb/test/odbc/client/SQLEndTranTest.cpp
index 06c497954fd..06c497954fd 100644
--- a/ndb/test/odbc/client/SQLEndTranTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLEndTranTest.cpp
diff --git a/ndb/test/odbc/client/SQLErrorTest.cpp b/storage/ndb/test/odbc/client/SQLErrorTest.cpp
index 5220e7b5eed..5220e7b5eed 100644
--- a/ndb/test/odbc/client/SQLErrorTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLErrorTest.cpp
diff --git a/ndb/test/odbc/client/SQLExecDirectTest.cpp b/storage/ndb/test/odbc/client/SQLExecDirectTest.cpp
index b9b4e770412..b9b4e770412 100644
--- a/ndb/test/odbc/client/SQLExecDirectTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLExecDirectTest.cpp
diff --git a/ndb/test/odbc/client/SQLExecuteTest.cpp b/storage/ndb/test/odbc/client/SQLExecuteTest.cpp
index 5f6bdb5d4bf..5f6bdb5d4bf 100644
--- a/ndb/test/odbc/client/SQLExecuteTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLExecuteTest.cpp
diff --git a/ndb/test/odbc/client/SQLFetchScrollTest.cpp b/storage/ndb/test/odbc/client/SQLFetchScrollTest.cpp
index 4a11ccd143e..4a11ccd143e 100644
--- a/ndb/test/odbc/client/SQLFetchScrollTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLFetchScrollTest.cpp
diff --git a/ndb/test/odbc/client/SQLFetchTest.cpp b/storage/ndb/test/odbc/client/SQLFetchTest.cpp
index bd62fcb2f04..bd62fcb2f04 100644
--- a/ndb/test/odbc/client/SQLFetchTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLFetchTest.cpp
diff --git a/ndb/test/odbc/client/SQLFreeHandleTest.cpp b/storage/ndb/test/odbc/client/SQLFreeHandleTest.cpp
index 3a7241dbe68..3a7241dbe68 100644
--- a/ndb/test/odbc/client/SQLFreeHandleTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLFreeHandleTest.cpp
diff --git a/ndb/test/odbc/client/SQLFreeStmtTest.cpp b/storage/ndb/test/odbc/client/SQLFreeStmtTest.cpp
index e636b3063de..e636b3063de 100644
--- a/ndb/test/odbc/client/SQLFreeStmtTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLFreeStmtTest.cpp
diff --git a/ndb/test/odbc/client/SQLGetConnectAttrTest.cpp b/storage/ndb/test/odbc/client/SQLGetConnectAttrTest.cpp
index 8d5a5c0dbbb..8d5a5c0dbbb 100644
--- a/ndb/test/odbc/client/SQLGetConnectAttrTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLGetConnectAttrTest.cpp
diff --git a/ndb/test/odbc/client/SQLGetCursorNameTest.cpp b/storage/ndb/test/odbc/client/SQLGetCursorNameTest.cpp
index 1e3ed9f557e..1e3ed9f557e 100644
--- a/ndb/test/odbc/client/SQLGetCursorNameTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLGetCursorNameTest.cpp
diff --git a/ndb/test/odbc/client/SQLGetDataTest.cpp b/storage/ndb/test/odbc/client/SQLGetDataTest.cpp
index 9d958c6c953..9d958c6c953 100644
--- a/ndb/test/odbc/client/SQLGetDataTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLGetDataTest.cpp
diff --git a/ndb/test/odbc/client/SQLGetDescFieldTest.cpp b/storage/ndb/test/odbc/client/SQLGetDescFieldTest.cpp
index b789ed75378..b789ed75378 100644
--- a/ndb/test/odbc/client/SQLGetDescFieldTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLGetDescFieldTest.cpp
diff --git a/ndb/test/odbc/client/SQLGetDescRecTest.cpp b/storage/ndb/test/odbc/client/SQLGetDescRecTest.cpp
index 5944f393a71..5944f393a71 100644
--- a/ndb/test/odbc/client/SQLGetDescRecTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLGetDescRecTest.cpp
diff --git a/ndb/test/odbc/client/SQLGetDiagFieldTest.cpp b/storage/ndb/test/odbc/client/SQLGetDiagFieldTest.cpp
index ef9bc3eb3fc..ef9bc3eb3fc 100644
--- a/ndb/test/odbc/client/SQLGetDiagFieldTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLGetDiagFieldTest.cpp
diff --git a/ndb/test/odbc/client/SQLGetDiagRecSimpleTest.cpp b/storage/ndb/test/odbc/client/SQLGetDiagRecSimpleTest.cpp
index 8fa4a2b3dbb..8fa4a2b3dbb 100644
--- a/ndb/test/odbc/client/SQLGetDiagRecSimpleTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLGetDiagRecSimpleTest.cpp
diff --git a/ndb/test/odbc/client/SQLGetDiagRecTest.cpp b/storage/ndb/test/odbc/client/SQLGetDiagRecTest.cpp
index 27c78edaa4d..27c78edaa4d 100644
--- a/ndb/test/odbc/client/SQLGetDiagRecTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLGetDiagRecTest.cpp
diff --git a/ndb/test/odbc/client/SQLGetEnvAttrTest.cpp b/storage/ndb/test/odbc/client/SQLGetEnvAttrTest.cpp
index efc8117d6d2..efc8117d6d2 100644
--- a/ndb/test/odbc/client/SQLGetEnvAttrTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLGetEnvAttrTest.cpp
diff --git a/ndb/test/odbc/client/SQLGetFunctionsTest.cpp b/storage/ndb/test/odbc/client/SQLGetFunctionsTest.cpp
index c6feb8ec033..c6feb8ec033 100644
--- a/ndb/test/odbc/client/SQLGetFunctionsTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLGetFunctionsTest.cpp
diff --git a/ndb/test/odbc/client/SQLGetInfoTest.cpp b/storage/ndb/test/odbc/client/SQLGetInfoTest.cpp
index 95f7562dafe..95f7562dafe 100644
--- a/ndb/test/odbc/client/SQLGetInfoTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLGetInfoTest.cpp
diff --git a/ndb/test/odbc/client/SQLGetStmtAttrTest.cpp b/storage/ndb/test/odbc/client/SQLGetStmtAttrTest.cpp
index 2052af60ee0..2052af60ee0 100644
--- a/ndb/test/odbc/client/SQLGetStmtAttrTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLGetStmtAttrTest.cpp
diff --git a/ndb/test/odbc/client/SQLGetTypeInfoTest.cpp b/storage/ndb/test/odbc/client/SQLGetTypeInfoTest.cpp
index 5925d1cc1ae..5925d1cc1ae 100644
--- a/ndb/test/odbc/client/SQLGetTypeInfoTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLGetTypeInfoTest.cpp
diff --git a/ndb/test/odbc/client/SQLMoreResultsTest.cpp b/storage/ndb/test/odbc/client/SQLMoreResultsTest.cpp
index cba8b0dc53e..cba8b0dc53e 100644
--- a/ndb/test/odbc/client/SQLMoreResultsTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLMoreResultsTest.cpp
diff --git a/ndb/test/odbc/client/SQLNumResultColsTest.cpp b/storage/ndb/test/odbc/client/SQLNumResultColsTest.cpp
index 8f0c1dba94c..8f0c1dba94c 100644
--- a/ndb/test/odbc/client/SQLNumResultColsTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLNumResultColsTest.cpp
diff --git a/ndb/test/odbc/client/SQLParamDataTest.cpp b/storage/ndb/test/odbc/client/SQLParamDataTest.cpp
index 92d491dfaf5..92d491dfaf5 100644
--- a/ndb/test/odbc/client/SQLParamDataTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLParamDataTest.cpp
diff --git a/ndb/test/odbc/client/SQLPrepareTest.cpp b/storage/ndb/test/odbc/client/SQLPrepareTest.cpp
index 2ebbc224b85..2ebbc224b85 100644
--- a/ndb/test/odbc/client/SQLPrepareTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLPrepareTest.cpp
diff --git a/ndb/test/odbc/client/SQLPutDataTest.cpp b/storage/ndb/test/odbc/client/SQLPutDataTest.cpp
index 38a8458fec4..38a8458fec4 100644
--- a/ndb/test/odbc/client/SQLPutDataTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLPutDataTest.cpp
diff --git a/ndb/test/odbc/client/SQLRowCountTest.cpp b/storage/ndb/test/odbc/client/SQLRowCountTest.cpp
index f298017c519..f298017c519 100644
--- a/ndb/test/odbc/client/SQLRowCountTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLRowCountTest.cpp
diff --git a/ndb/test/odbc/client/SQLSetConnectAttrTest.cpp b/storage/ndb/test/odbc/client/SQLSetConnectAttrTest.cpp
index c41ef885521..c41ef885521 100644
--- a/ndb/test/odbc/client/SQLSetConnectAttrTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLSetConnectAttrTest.cpp
diff --git a/ndb/test/odbc/client/SQLSetCursorNameTest.cpp b/storage/ndb/test/odbc/client/SQLSetCursorNameTest.cpp
index b35cf9fefc2..b35cf9fefc2 100644
--- a/ndb/test/odbc/client/SQLSetCursorNameTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLSetCursorNameTest.cpp
diff --git a/ndb/test/odbc/client/SQLSetDescFieldTest.cpp b/storage/ndb/test/odbc/client/SQLSetDescFieldTest.cpp
index 798622e0f75..798622e0f75 100644
--- a/ndb/test/odbc/client/SQLSetDescFieldTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLSetDescFieldTest.cpp
diff --git a/ndb/test/odbc/client/SQLSetDescRecTest.cpp b/storage/ndb/test/odbc/client/SQLSetDescRecTest.cpp
index d97af576cb0..d97af576cb0 100644
--- a/ndb/test/odbc/client/SQLSetDescRecTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLSetDescRecTest.cpp
diff --git a/ndb/test/odbc/client/SQLSetEnvAttrTest.cpp b/storage/ndb/test/odbc/client/SQLSetEnvAttrTest.cpp
index 16ae5671ca3..16ae5671ca3 100644
--- a/ndb/test/odbc/client/SQLSetEnvAttrTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLSetEnvAttrTest.cpp
diff --git a/ndb/test/odbc/client/SQLSetStmtAttrTest.cpp b/storage/ndb/test/odbc/client/SQLSetStmtAttrTest.cpp
index 646f82cd306..646f82cd306 100644
--- a/ndb/test/odbc/client/SQLSetStmtAttrTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLSetStmtAttrTest.cpp
diff --git a/ndb/test/odbc/client/SQLTablesTest.cpp b/storage/ndb/test/odbc/client/SQLTablesTest.cpp
index 735efd81e9c..735efd81e9c 100644
--- a/ndb/test/odbc/client/SQLTablesTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLTablesTest.cpp
diff --git a/ndb/test/odbc/client/SQLTransactTest.cpp b/storage/ndb/test/odbc/client/SQLTransactTest.cpp
index e9abe42129d..e9abe42129d 100644
--- a/ndb/test/odbc/client/SQLTransactTest.cpp
+++ b/storage/ndb/test/odbc/client/SQLTransactTest.cpp
diff --git a/ndb/test/odbc/client/common.hpp b/storage/ndb/test/odbc/client/common.hpp
index 236decf1b95..236decf1b95 100644
--- a/ndb/test/odbc/client/common.hpp
+++ b/storage/ndb/test/odbc/client/common.hpp
diff --git a/ndb/test/odbc/client/main.cpp b/storage/ndb/test/odbc/client/main.cpp
index b202b6de111..b202b6de111 100644
--- a/ndb/test/odbc/client/main.cpp
+++ b/storage/ndb/test/odbc/client/main.cpp
diff --git a/ndb/test/odbc/dm-iodbc/Makefile b/storage/ndb/test/odbc/dm-iodbc/Makefile
index ad0f0d39f5f..ad0f0d39f5f 100644
--- a/ndb/test/odbc/dm-iodbc/Makefile
+++ b/storage/ndb/test/odbc/dm-iodbc/Makefile
diff --git a/ndb/test/odbc/dm-unixodbc/Makefile b/storage/ndb/test/odbc/dm-unixodbc/Makefile
index 50d8e3b5e05..50d8e3b5e05 100644
--- a/ndb/test/odbc/dm-unixodbc/Makefile
+++ b/storage/ndb/test/odbc/dm-unixodbc/Makefile
diff --git a/ndb/test/odbc/driver/Makefile b/storage/ndb/test/odbc/driver/Makefile
index 5cf83d73106..5cf83d73106 100644
--- a/ndb/test/odbc/driver/Makefile
+++ b/storage/ndb/test/odbc/driver/Makefile
diff --git a/ndb/test/odbc/driver/testOdbcDriver.cpp b/storage/ndb/test/odbc/driver/testOdbcDriver.cpp
index d3b3802ebe1..d3b3802ebe1 100644
--- a/ndb/test/odbc/driver/testOdbcDriver.cpp
+++ b/storage/ndb/test/odbc/driver/testOdbcDriver.cpp
diff --git a/ndb/test/odbc/test_compiler/Makefile b/storage/ndb/test/odbc/test_compiler/Makefile
index 34819f21171..34819f21171 100644
--- a/ndb/test/odbc/test_compiler/Makefile
+++ b/storage/ndb/test/odbc/test_compiler/Makefile
diff --git a/ndb/test/odbc/test_compiler/test_compiler.cpp b/storage/ndb/test/odbc/test_compiler/test_compiler.cpp
index 042e9e6d4bf..042e9e6d4bf 100644
--- a/ndb/test/odbc/test_compiler/test_compiler.cpp
+++ b/storage/ndb/test/odbc/test_compiler/test_compiler.cpp
diff --git a/ndb/test/run-test/16node-tests.txt b/storage/ndb/test/run-test/16node-tests.txt
index 11ade56c28c..11ade56c28c 100644
--- a/ndb/test/run-test/16node-tests.txt
+++ b/storage/ndb/test/run-test/16node-tests.txt
diff --git a/storage/ndb/test/run-test/Makefile.am b/storage/ndb/test/run-test/Makefile.am
new file mode 100644
index 00000000000..903e3ab824e
--- /dev/null
+++ b/storage/ndb/test/run-test/Makefile.am
@@ -0,0 +1,34 @@
+
+testdir=$(prefix)/mysql-test/ndb
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_util.mk.am
+include $(top_srcdir)/storage/ndb/config/type_mgmapiclient.mk.am
+
+test_PROGRAMS = atrt
+test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \
+ conf-daily-basic-ndbmaster.txt \
+ conf-daily-basic-shark.txt \
+ conf-daily-devel-ndbmaster.txt \
+ conf-daily-sql-ndbmaster.txt \
+ conf-daily-basic-dl145a.txt
+test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
+ atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh
+
+atrt_SOURCES = main.cpp run-test.hpp
+INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/test/include
+LDADD_LOC = $(top_builddir)/storage/ndb/test/src/libNDBT.a \
+ $(top_builddir)/storage/ndb/src/libndbclient.la \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
+
+wrappersdir=$(prefix)/bin
+wrappers_SCRIPTS=atrt-testBackup atrt-mysql-test-run
+
+EXTRA_DIST = $(test_DATA) $(test_SCRIPTS) $(wrappers_SCRIPTS) README.ATRT
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp:
diff --git a/storage/ndb/test/run-test/README b/storage/ndb/test/run-test/README
new file mode 100644
index 00000000000..57f085711ce
--- /dev/null
+++ b/storage/ndb/test/run-test/README
@@ -0,0 +1,43 @@
+run-test/README
+
+This document describes how atrt works and how to use it.
+
+atrt is a test program driver.
+atrt supports fully distributed test and utilizes ndb_cpcd.
+
+=================================
+atrt has the following main loop:
+
+/**
+ * Pseudo code for atrt
+ */
+ read config file (default d.txt)
+ contact each ndb_cpcd
+ start each ndb_mgmd
+ connect to each ndb_mgmd
+ for each read(test case)
+ do
+ if previous test failed (or is first test)
+ stop each ndbd
+ start each ndbd
+ wait for ndbd to get started
+
+ start each mysqld
+
+ start each test prg
+
+ wait while all is running and max time not elapsed
+
+ stop each mysqld
+
+ stop each test prg
+
+ gather result
+
+ done
+/**
+ * End of pseudo code
+ */
+
+=================================
+
diff --git a/ndb/test/run-test/README.ATRT b/storage/ndb/test/run-test/README.ATRT
index 7fe04ccdac4..7fe04ccdac4 100644
--- a/ndb/test/run-test/README.ATRT
+++ b/storage/ndb/test/run-test/README.ATRT
diff --git a/ndb/test/run-test/atrt-analyze-result.sh b/storage/ndb/test/run-test/atrt-analyze-result.sh
index 0fa46e918ef..0fa46e918ef 100755
--- a/ndb/test/run-test/atrt-analyze-result.sh
+++ b/storage/ndb/test/run-test/atrt-analyze-result.sh
diff --git a/ndb/test/run-test/atrt-clear-result.sh b/storage/ndb/test/run-test/atrt-clear-result.sh
index 57d3d43d247..57d3d43d247 100755
--- a/ndb/test/run-test/atrt-clear-result.sh
+++ b/storage/ndb/test/run-test/atrt-clear-result.sh
diff --git a/ndb/test/run-test/atrt-example.tgz b/storage/ndb/test/run-test/atrt-example.tgz
index 8455b2eb00d..8455b2eb00d 100644
--- a/ndb/test/run-test/atrt-example.tgz
+++ b/storage/ndb/test/run-test/atrt-example.tgz
Binary files differ
diff --git a/ndb/test/run-test/atrt-gather-result.sh b/storage/ndb/test/run-test/atrt-gather-result.sh
index 93d4ae428d0..93d4ae428d0 100755
--- a/ndb/test/run-test/atrt-gather-result.sh
+++ b/storage/ndb/test/run-test/atrt-gather-result.sh
diff --git a/ndb/test/run-test/atrt-mysql-test-run b/storage/ndb/test/run-test/atrt-mysql-test-run
index 2ebc11b0070..2ebc11b0070 100755
--- a/ndb/test/run-test/atrt-mysql-test-run
+++ b/storage/ndb/test/run-test/atrt-mysql-test-run
diff --git a/ndb/test/run-test/atrt-setup.sh b/storage/ndb/test/run-test/atrt-setup.sh
index aff5d4119dc..aff5d4119dc 100755
--- a/ndb/test/run-test/atrt-setup.sh
+++ b/storage/ndb/test/run-test/atrt-setup.sh
diff --git a/ndb/test/run-test/atrt-testBackup b/storage/ndb/test/run-test/atrt-testBackup
index 3ed7641a42e..3ed7641a42e 100755
--- a/ndb/test/run-test/atrt-testBackup
+++ b/storage/ndb/test/run-test/atrt-testBackup
diff --git a/ndb/test/run-test/basic.txt b/storage/ndb/test/run-test/basic.txt
index ec9e21359e5..ec9e21359e5 100644
--- a/ndb/test/run-test/basic.txt
+++ b/storage/ndb/test/run-test/basic.txt
diff --git a/ndb/test/run-test/conf-daily-basic-dl145a.txt b/storage/ndb/test/run-test/conf-daily-basic-dl145a.txt
index d8cf8d34d82..d8cf8d34d82 100644
--- a/ndb/test/run-test/conf-daily-basic-dl145a.txt
+++ b/storage/ndb/test/run-test/conf-daily-basic-dl145a.txt
diff --git a/ndb/test/run-test/conf-daily-basic-ndbmaster.txt b/storage/ndb/test/run-test/conf-daily-basic-ndbmaster.txt
index bcd809593f3..bcd809593f3 100644
--- a/ndb/test/run-test/conf-daily-basic-ndbmaster.txt
+++ b/storage/ndb/test/run-test/conf-daily-basic-ndbmaster.txt
diff --git a/ndb/test/run-test/conf-daily-basic-shark.txt b/storage/ndb/test/run-test/conf-daily-basic-shark.txt
index 6d1f8b64f44..6d1f8b64f44 100644
--- a/ndb/test/run-test/conf-daily-basic-shark.txt
+++ b/storage/ndb/test/run-test/conf-daily-basic-shark.txt
diff --git a/ndb/test/run-test/conf-daily-devel-ndbmaster.txt b/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt
index 8b340e6a39d..8b340e6a39d 100644
--- a/ndb/test/run-test/conf-daily-devel-ndbmaster.txt
+++ b/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt
diff --git a/ndb/test/run-test/conf-daily-sql-ndbmaster.txt b/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt
index 0d6a99f8d48..0d6a99f8d48 100644
--- a/ndb/test/run-test/conf-daily-sql-ndbmaster.txt
+++ b/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt
diff --git a/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt
index 5d7d7f58f89..5d7d7f58f89 100644
--- a/ndb/test/run-test/daily-basic-tests.txt
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt
diff --git a/ndb/test/run-test/daily-devel-tests.txt b/storage/ndb/test/run-test/daily-devel-tests.txt
index 20f54e031e5..20f54e031e5 100644
--- a/ndb/test/run-test/daily-devel-tests.txt
+++ b/storage/ndb/test/run-test/daily-devel-tests.txt
diff --git a/ndb/test/run-test/example.conf b/storage/ndb/test/run-test/example.conf
index 1e152da332d..1e152da332d 100644
--- a/ndb/test/run-test/example.conf
+++ b/storage/ndb/test/run-test/example.conf
diff --git a/ndb/test/run-test/main.cpp b/storage/ndb/test/run-test/main.cpp
index 0b0b7472a19..0b0b7472a19 100644
--- a/ndb/test/run-test/main.cpp
+++ b/storage/ndb/test/run-test/main.cpp
diff --git a/ndb/test/run-test/make-config.sh b/storage/ndb/test/run-test/make-config.sh
index e82acb8a7dd..e82acb8a7dd 100755
--- a/ndb/test/run-test/make-config.sh
+++ b/storage/ndb/test/run-test/make-config.sh
diff --git a/ndb/test/run-test/make-html-reports.sh b/storage/ndb/test/run-test/make-html-reports.sh
index 67395ceba47..67395ceba47 100755
--- a/ndb/test/run-test/make-html-reports.sh
+++ b/storage/ndb/test/run-test/make-html-reports.sh
diff --git a/ndb/test/run-test/make-index.sh b/storage/ndb/test/run-test/make-index.sh
index 944f0df790b..944f0df790b 100755
--- a/ndb/test/run-test/make-index.sh
+++ b/storage/ndb/test/run-test/make-index.sh
diff --git a/ndb/test/run-test/ndb-autotest.sh b/storage/ndb/test/run-test/ndb-autotest.sh
index 573a9953924..573a9953924 100755
--- a/ndb/test/run-test/ndb-autotest.sh
+++ b/storage/ndb/test/run-test/ndb-autotest.sh
diff --git a/ndb/test/run-test/run-test.hpp b/storage/ndb/test/run-test/run-test.hpp
index 7011aec33d3..7011aec33d3 100644
--- a/ndb/test/run-test/run-test.hpp
+++ b/storage/ndb/test/run-test/run-test.hpp
diff --git a/ndb/test/src/CpcClient.cpp b/storage/ndb/test/src/CpcClient.cpp
index 4d06b4a7ff5..4d06b4a7ff5 100644
--- a/ndb/test/src/CpcClient.cpp
+++ b/storage/ndb/test/src/CpcClient.cpp
diff --git a/ndb/test/src/HugoAsynchTransactions.cpp b/storage/ndb/test/src/HugoAsynchTransactions.cpp
index 5d2eb451c0b..5d2eb451c0b 100644
--- a/ndb/test/src/HugoAsynchTransactions.cpp
+++ b/storage/ndb/test/src/HugoAsynchTransactions.cpp
diff --git a/ndb/test/src/HugoCalculator.cpp b/storage/ndb/test/src/HugoCalculator.cpp
index 8e01f6442bb..8e01f6442bb 100644
--- a/ndb/test/src/HugoCalculator.cpp
+++ b/storage/ndb/test/src/HugoCalculator.cpp
diff --git a/ndb/test/src/HugoOperations.cpp b/storage/ndb/test/src/HugoOperations.cpp
index f9a09eddd1f..f9a09eddd1f 100644
--- a/ndb/test/src/HugoOperations.cpp
+++ b/storage/ndb/test/src/HugoOperations.cpp
diff --git a/ndb/test/src/HugoTransactions.cpp b/storage/ndb/test/src/HugoTransactions.cpp
index 3260b921985..3260b921985 100644
--- a/ndb/test/src/HugoTransactions.cpp
+++ b/storage/ndb/test/src/HugoTransactions.cpp
diff --git a/storage/ndb/test/src/Makefile.am b/storage/ndb/test/src/Makefile.am
new file mode 100644
index 00000000000..d141a356ac0
--- /dev/null
+++ b/storage/ndb/test/src/Makefile.am
@@ -0,0 +1,35 @@
+
+noinst_LIBRARIES = libNDBT.a
+
+libNDBT_a_SOURCES = \
+ NDBT_ReturnCodes.cpp \
+ NDBT_Error.cpp NDBT_Tables.cpp NDBT_ResultRow.cpp \
+ NDBT_Test.cpp HugoCalculator.cpp \
+ HugoOperations.cpp HugoTransactions.cpp \
+ HugoAsynchTransactions.cpp UtilTransactions.cpp \
+ NdbRestarter.cpp NdbRestarts.cpp NDBT_Output.cpp \
+ NdbBackup.cpp NdbConfig.cpp NdbGrep.cpp NDBT_Table.cpp \
+ NdbSchemaCon.cpp NdbSchemaOp.cpp getarg.c \
+ CpcClient.cpp
+
+INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/common/mgmcommon -I$(top_srcdir)/storage/ndb/include/mgmcommon -I$(top_srcdir)/storage/ndb/include/kernel -I$(top_srcdir)/storage/ndb/src/mgmapi
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libNDBT.dsp
+
+libNDBT.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-lib.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libNDBT_a_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB
diff --git a/ndb/test/src/NDBT_Error.cpp b/storage/ndb/test/src/NDBT_Error.cpp
index ffacb3eb928..ffacb3eb928 100644
--- a/ndb/test/src/NDBT_Error.cpp
+++ b/storage/ndb/test/src/NDBT_Error.cpp
diff --git a/ndb/test/src/NDBT_Output.cpp b/storage/ndb/test/src/NDBT_Output.cpp
index 633d71991d0..633d71991d0 100644
--- a/ndb/test/src/NDBT_Output.cpp
+++ b/storage/ndb/test/src/NDBT_Output.cpp
diff --git a/ndb/test/src/NDBT_ResultRow.cpp b/storage/ndb/test/src/NDBT_ResultRow.cpp
index ab8d7b07ea1..ab8d7b07ea1 100644
--- a/ndb/test/src/NDBT_ResultRow.cpp
+++ b/storage/ndb/test/src/NDBT_ResultRow.cpp
diff --git a/ndb/test/src/NDBT_ReturnCodes.cpp b/storage/ndb/test/src/NDBT_ReturnCodes.cpp
index 5bffc00177f..5bffc00177f 100644
--- a/ndb/test/src/NDBT_ReturnCodes.cpp
+++ b/storage/ndb/test/src/NDBT_ReturnCodes.cpp
diff --git a/ndb/test/src/NDBT_Table.cpp b/storage/ndb/test/src/NDBT_Table.cpp
index 8d398b75d81..8d398b75d81 100644
--- a/ndb/test/src/NDBT_Table.cpp
+++ b/storage/ndb/test/src/NDBT_Table.cpp
diff --git a/ndb/test/src/NDBT_Tables.cpp b/storage/ndb/test/src/NDBT_Tables.cpp
index 5a5fecd85c1..5a5fecd85c1 100644
--- a/ndb/test/src/NDBT_Tables.cpp
+++ b/storage/ndb/test/src/NDBT_Tables.cpp
diff --git a/ndb/test/src/NDBT_Test.cpp b/storage/ndb/test/src/NDBT_Test.cpp
index 7fd92db533e..7fd92db533e 100644
--- a/ndb/test/src/NDBT_Test.cpp
+++ b/storage/ndb/test/src/NDBT_Test.cpp
diff --git a/ndb/test/src/NdbBackup.cpp b/storage/ndb/test/src/NdbBackup.cpp
index fe101b9c80b..fe101b9c80b 100644
--- a/ndb/test/src/NdbBackup.cpp
+++ b/storage/ndb/test/src/NdbBackup.cpp
diff --git a/ndb/test/src/NdbConfig.cpp b/storage/ndb/test/src/NdbConfig.cpp
index 2fb466d1b8f..2fb466d1b8f 100644
--- a/ndb/test/src/NdbConfig.cpp
+++ b/storage/ndb/test/src/NdbConfig.cpp
diff --git a/ndb/test/src/NdbGrep.cpp b/storage/ndb/test/src/NdbGrep.cpp
index 6c0c9cabfcb..6c0c9cabfcb 100644
--- a/ndb/test/src/NdbGrep.cpp
+++ b/storage/ndb/test/src/NdbGrep.cpp
diff --git a/ndb/test/src/NdbRestarter.cpp b/storage/ndb/test/src/NdbRestarter.cpp
index 91c0963feae..91c0963feae 100644
--- a/ndb/test/src/NdbRestarter.cpp
+++ b/storage/ndb/test/src/NdbRestarter.cpp
diff --git a/ndb/test/src/NdbRestarts.cpp b/storage/ndb/test/src/NdbRestarts.cpp
index c0f31af84ce..c0f31af84ce 100644
--- a/ndb/test/src/NdbRestarts.cpp
+++ b/storage/ndb/test/src/NdbRestarts.cpp
diff --git a/ndb/test/src/NdbSchemaCon.cpp b/storage/ndb/test/src/NdbSchemaCon.cpp
index 0de49ff983f..0de49ff983f 100644
--- a/ndb/test/src/NdbSchemaCon.cpp
+++ b/storage/ndb/test/src/NdbSchemaCon.cpp
diff --git a/ndb/test/src/NdbSchemaOp.cpp b/storage/ndb/test/src/NdbSchemaOp.cpp
index 4281ceb02c8..4281ceb02c8 100644
--- a/ndb/test/src/NdbSchemaOp.cpp
+++ b/storage/ndb/test/src/NdbSchemaOp.cpp
diff --git a/ndb/test/src/UtilTransactions.cpp b/storage/ndb/test/src/UtilTransactions.cpp
index 65c1a7ded31..65c1a7ded31 100644
--- a/ndb/test/src/UtilTransactions.cpp
+++ b/storage/ndb/test/src/UtilTransactions.cpp
diff --git a/ndb/test/src/getarg.c b/storage/ndb/test/src/getarg.c
index 5b67eb6343d..5b67eb6343d 100644
--- a/ndb/test/src/getarg.c
+++ b/storage/ndb/test/src/getarg.c
diff --git a/storage/ndb/test/tools/Makefile.am b/storage/ndb/test/tools/Makefile.am
new file mode 100644
index 00000000000..a7ab7669052
--- /dev/null
+++ b/storage/ndb/test/tools/Makefile.am
@@ -0,0 +1,30 @@
+
+ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc
+
+# transproxy
+
+hugoFill_SOURCES = hugoFill.cpp
+hugoLoad_SOURCES = hugoLoad.cpp
+hugoLockRecords_SOURCES = hugoLockRecords.cpp
+hugoPkDelete_SOURCES = hugoPkDelete.cpp
+hugoPkRead_SOURCES = hugoPkRead.cpp
+hugoPkReadRecord_SOURCES = hugoPkReadRecord.cpp
+hugoPkUpdate_SOURCES = hugoPkUpdate.cpp
+hugoScanRead_SOURCES = hugoScanRead.cpp
+hugoScanUpdate_SOURCES = hugoScanUpdate.cpp
+restart_SOURCES = restart.cpp
+# transproxy_SOURCES = transproxy.cpp
+verify_index_SOURCES = verify_index.cpp
+copy_tab_SOURCES = copy_tab.cpp
+create_index_SOURCES = create_index.cpp
+ndb_cpcc_SOURCES = cpcc.cpp
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am
+
+ndb_cpcc_LDADD = $(LDADD)
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp:
diff --git a/ndb/test/tools/copy_tab.cpp b/storage/ndb/test/tools/copy_tab.cpp
index 97370b170ef..97370b170ef 100644
--- a/ndb/test/tools/copy_tab.cpp
+++ b/storage/ndb/test/tools/copy_tab.cpp
diff --git a/ndb/test/tools/cpcc.cpp b/storage/ndb/test/tools/cpcc.cpp
index dd59e577f2c..dd59e577f2c 100644
--- a/ndb/test/tools/cpcc.cpp
+++ b/storage/ndb/test/tools/cpcc.cpp
diff --git a/ndb/test/tools/create_index.cpp b/storage/ndb/test/tools/create_index.cpp
index 9f9c26aa0da..9f9c26aa0da 100644
--- a/ndb/test/tools/create_index.cpp
+++ b/storage/ndb/test/tools/create_index.cpp
diff --git a/ndb/test/tools/hugoCalculator.cpp b/storage/ndb/test/tools/hugoCalculator.cpp
index 82c4bbff1a4..82c4bbff1a4 100644
--- a/ndb/test/tools/hugoCalculator.cpp
+++ b/storage/ndb/test/tools/hugoCalculator.cpp
diff --git a/ndb/test/tools/hugoFill.cpp b/storage/ndb/test/tools/hugoFill.cpp
index 6408b2987f9..6408b2987f9 100644
--- a/ndb/test/tools/hugoFill.cpp
+++ b/storage/ndb/test/tools/hugoFill.cpp
diff --git a/ndb/test/tools/hugoLoad.cpp b/storage/ndb/test/tools/hugoLoad.cpp
index 1a229169650..1a229169650 100644
--- a/ndb/test/tools/hugoLoad.cpp
+++ b/storage/ndb/test/tools/hugoLoad.cpp
diff --git a/ndb/test/tools/hugoLockRecords.cpp b/storage/ndb/test/tools/hugoLockRecords.cpp
index c0d0b9f9c5a..c0d0b9f9c5a 100644
--- a/ndb/test/tools/hugoLockRecords.cpp
+++ b/storage/ndb/test/tools/hugoLockRecords.cpp
diff --git a/ndb/test/tools/hugoPkDelete.cpp b/storage/ndb/test/tools/hugoPkDelete.cpp
index 84e7ded0add..84e7ded0add 100644
--- a/ndb/test/tools/hugoPkDelete.cpp
+++ b/storage/ndb/test/tools/hugoPkDelete.cpp
diff --git a/ndb/test/tools/hugoPkRead.cpp b/storage/ndb/test/tools/hugoPkRead.cpp
index e3702dc5ca1..e3702dc5ca1 100644
--- a/ndb/test/tools/hugoPkRead.cpp
+++ b/storage/ndb/test/tools/hugoPkRead.cpp
diff --git a/ndb/test/tools/hugoPkReadRecord.cpp b/storage/ndb/test/tools/hugoPkReadRecord.cpp
index c60a994c7d4..c60a994c7d4 100644
--- a/ndb/test/tools/hugoPkReadRecord.cpp
+++ b/storage/ndb/test/tools/hugoPkReadRecord.cpp
diff --git a/ndb/test/tools/hugoPkUpdate.cpp b/storage/ndb/test/tools/hugoPkUpdate.cpp
index 7d46ae95c29..7d46ae95c29 100644
--- a/ndb/test/tools/hugoPkUpdate.cpp
+++ b/storage/ndb/test/tools/hugoPkUpdate.cpp
diff --git a/ndb/test/tools/hugoScanRead.cpp b/storage/ndb/test/tools/hugoScanRead.cpp
index a345bb88d0e..a345bb88d0e 100644
--- a/ndb/test/tools/hugoScanRead.cpp
+++ b/storage/ndb/test/tools/hugoScanRead.cpp
diff --git a/ndb/test/tools/hugoScanUpdate.cpp b/storage/ndb/test/tools/hugoScanUpdate.cpp
index 6960fa44b96..6960fa44b96 100644
--- a/ndb/test/tools/hugoScanUpdate.cpp
+++ b/storage/ndb/test/tools/hugoScanUpdate.cpp
diff --git a/ndb/test/tools/old_dirs/hugoCalculator/Makefile b/storage/ndb/test/tools/old_dirs/hugoCalculator/Makefile
index a29deeaacd3..a29deeaacd3 100644
--- a/ndb/test/tools/old_dirs/hugoCalculator/Makefile
+++ b/storage/ndb/test/tools/old_dirs/hugoCalculator/Makefile
diff --git a/ndb/test/tools/old_dirs/hugoFill/Makefile b/storage/ndb/test/tools/old_dirs/hugoFill/Makefile
index 3da745810b6..3da745810b6 100644
--- a/ndb/test/tools/old_dirs/hugoFill/Makefile
+++ b/storage/ndb/test/tools/old_dirs/hugoFill/Makefile
diff --git a/ndb/test/tools/old_dirs/hugoLoad/Makefile b/storage/ndb/test/tools/old_dirs/hugoLoad/Makefile
index 7c5756d0d41..7c5756d0d41 100644
--- a/ndb/test/tools/old_dirs/hugoLoad/Makefile
+++ b/storage/ndb/test/tools/old_dirs/hugoLoad/Makefile
diff --git a/ndb/test/tools/old_dirs/hugoLockRecords/Makefile b/storage/ndb/test/tools/old_dirs/hugoLockRecords/Makefile
index 3235750cbf8..3235750cbf8 100644
--- a/ndb/test/tools/old_dirs/hugoLockRecords/Makefile
+++ b/storage/ndb/test/tools/old_dirs/hugoLockRecords/Makefile
diff --git a/ndb/test/tools/old_dirs/hugoPkDelete/Makefile b/storage/ndb/test/tools/old_dirs/hugoPkDelete/Makefile
index e6d53611c54..e6d53611c54 100644
--- a/ndb/test/tools/old_dirs/hugoPkDelete/Makefile
+++ b/storage/ndb/test/tools/old_dirs/hugoPkDelete/Makefile
diff --git a/ndb/test/tools/old_dirs/hugoPkRead/Makefile b/storage/ndb/test/tools/old_dirs/hugoPkRead/Makefile
index 03580dc0d18..03580dc0d18 100644
--- a/ndb/test/tools/old_dirs/hugoPkRead/Makefile
+++ b/storage/ndb/test/tools/old_dirs/hugoPkRead/Makefile
diff --git a/ndb/test/tools/old_dirs/hugoPkReadRecord/Makefile b/storage/ndb/test/tools/old_dirs/hugoPkReadRecord/Makefile
index 158a79a5666..158a79a5666 100644
--- a/ndb/test/tools/old_dirs/hugoPkReadRecord/Makefile
+++ b/storage/ndb/test/tools/old_dirs/hugoPkReadRecord/Makefile
diff --git a/ndb/test/tools/old_dirs/hugoPkUpdate/Makefile b/storage/ndb/test/tools/old_dirs/hugoPkUpdate/Makefile
index 48795b62206..48795b62206 100644
--- a/ndb/test/tools/old_dirs/hugoPkUpdate/Makefile
+++ b/storage/ndb/test/tools/old_dirs/hugoPkUpdate/Makefile
diff --git a/ndb/test/tools/old_dirs/hugoScanRead/Makefile b/storage/ndb/test/tools/old_dirs/hugoScanRead/Makefile
index b88377c299e..b88377c299e 100644
--- a/ndb/test/tools/old_dirs/hugoScanRead/Makefile
+++ b/storage/ndb/test/tools/old_dirs/hugoScanRead/Makefile
diff --git a/ndb/test/tools/old_dirs/hugoScanUpdate/Makefile b/storage/ndb/test/tools/old_dirs/hugoScanUpdate/Makefile
index ec0e07bfd84..ec0e07bfd84 100644
--- a/ndb/test/tools/old_dirs/hugoScanUpdate/Makefile
+++ b/storage/ndb/test/tools/old_dirs/hugoScanUpdate/Makefile
diff --git a/ndb/test/tools/old_dirs/restart/Makefile b/storage/ndb/test/tools/old_dirs/restart/Makefile
index 05d9e98c5bc..05d9e98c5bc 100644
--- a/ndb/test/tools/old_dirs/restart/Makefile
+++ b/storage/ndb/test/tools/old_dirs/restart/Makefile
diff --git a/ndb/test/tools/old_dirs/transproxy/Makefile b/storage/ndb/test/tools/old_dirs/transproxy/Makefile
index d6a76ed2e3d..d6a76ed2e3d 100644
--- a/ndb/test/tools/old_dirs/transproxy/Makefile
+++ b/storage/ndb/test/tools/old_dirs/transproxy/Makefile
diff --git a/ndb/test/tools/old_dirs/verify_index/Makefile b/storage/ndb/test/tools/old_dirs/verify_index/Makefile
index f6b31e4dc8e..f6b31e4dc8e 100644
--- a/ndb/test/tools/old_dirs/verify_index/Makefile
+++ b/storage/ndb/test/tools/old_dirs/verify_index/Makefile
diff --git a/ndb/test/tools/old_dirs/waiter/waiter.cpp b/storage/ndb/test/tools/old_dirs/waiter/waiter.cpp
index d57daff3aea..d57daff3aea 100644
--- a/ndb/test/tools/old_dirs/waiter/waiter.cpp
+++ b/storage/ndb/test/tools/old_dirs/waiter/waiter.cpp
diff --git a/ndb/test/tools/restart.cpp b/storage/ndb/test/tools/restart.cpp
index 9ad20801fd7..9ad20801fd7 100644
--- a/ndb/test/tools/restart.cpp
+++ b/storage/ndb/test/tools/restart.cpp
diff --git a/ndb/test/tools/transproxy.cpp b/storage/ndb/test/tools/transproxy.cpp
index 28a621fa584..28a621fa584 100644
--- a/ndb/test/tools/transproxy.cpp
+++ b/storage/ndb/test/tools/transproxy.cpp
diff --git a/ndb/test/tools/verify_index.cpp b/storage/ndb/test/tools/verify_index.cpp
index acc97af883b..acc97af883b 100644
--- a/ndb/test/tools/verify_index.cpp
+++ b/storage/ndb/test/tools/verify_index.cpp
diff --git a/storage/ndb/tools/Makefile.am b/storage/ndb/tools/Makefile.am
new file mode 100644
index 00000000000..db6037df037
--- /dev/null
+++ b/storage/ndb/tools/Makefile.am
@@ -0,0 +1,157 @@
+
+ndbtools_PROGRAMS = \
+ ndb_test_platform \
+ ndb_waiter \
+ ndb_drop_table \
+ ndb_delete_all \
+ ndb_desc \
+ ndb_drop_index \
+ ndb_show_tables \
+ ndb_select_all \
+ ndb_select_count \
+ ndb_restore
+
+tools_common_sources = ../test/src/NDBT_ReturnCodes.cpp \
+ ../test/src/NDBT_Table.cpp \
+ ../test/src/NDBT_Output.cpp
+
+ndb_test_platform_SOURCES = ndb_test_platform.cpp
+ndb_waiter_SOURCES = waiter.cpp $(tools_common_sources)
+ndb_delete_all_SOURCES = delete_all.cpp $(tools_common_sources)
+ndb_desc_SOURCES = desc.cpp $(tools_common_sources)
+ndb_drop_index_SOURCES = drop_index.cpp $(tools_common_sources)
+ndb_drop_table_SOURCES = drop_tab.cpp $(tools_common_sources)
+ndb_show_tables_SOURCES = listTables.cpp $(tools_common_sources)
+ndb_select_all_SOURCES = select_all.cpp \
+ ../test/src/NDBT_ResultRow.cpp \
+ $(tools_common_sources)
+ndb_select_count_SOURCES = select_count.cpp $(tools_common_sources)
+ndb_restore_SOURCES = restore/restore_main.cpp \
+ restore/consumer.cpp \
+ restore/consumer_restore.cpp \
+ restore/consumer_printer.cpp \
+ restore/Restore.cpp \
+ ../test/src/NDBT_ResultRow.cpp $(tools_common_sources)
+
+include $(top_srcdir)/storage/ndb/config/common.mk.am
+include $(top_srcdir)/storage/ndb/config/type_ndbapitools.mk.am
+
+ndb_test_platform_LDFLAGS = @ndb_bin_am_ldflags@
+ndb_waiter_LDFLAGS = @ndb_bin_am_ldflags@
+ndb_drop_table_LDFLAGS = @ndb_bin_am_ldflags@
+ndb_delete_all_LDFLAGS = @ndb_bin_am_ldflags@
+ndb_desc_LDFLAGS = @ndb_bin_am_ldflags@
+ndb_drop_index_LDFLAGS = @ndb_bin_am_ldflags@
+ndb_show_tables_LDFLAGS = @ndb_bin_am_ldflags@
+ndb_select_all_LDFLAGS = @ndb_bin_am_ldflags@
+ndb_select_count_LDFLAGS = @ndb_bin_am_ldflags@
+ndb_restore_LDFLAGS = @ndb_bin_am_ldflags@
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: \
+ ndb_waiter.dsp \
+ ndb_drop_table.dsp \
+ ndb_delete_all.dsp \
+ ndb_desc.dsp \
+ ndb_drop_index.dsp \
+ ndb_show_tables.dsp \
+ ndb_select_all.dsp \
+ ndb_select_count.dsp
+
+ndb_waiter.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ ndb_waiter
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(ndb_waiter_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
+
+ndb_drop_table.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ ndb_drop_table
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(ndb_drop_table_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
+
+ndb_delete_all.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ ndb_delete_all
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(ndb_delete_all_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
+
+ndb_desc.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ ndb_desc
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(ndb_desc_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
+
+ndb_drop_index.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ ndb_drop_index
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(ndb_drop_index_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
+
+ndb_show_tables.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ ndb_show_tables
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(ndb_show_tables_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
+
+ndb_select_all.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ ndb_select_all
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(ndb_select_all_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
+
+ndb_select_count.dsp: Makefile \
+ $(top_srcdir)/storage/ndb/config/win-prg.am \
+ $(top_srcdir)/storage/ndb/config/win-name \
+ $(top_srcdir)/storage/ndb/config/win-includes \
+ $(top_srcdir)/storage/ndb/config/win-sources \
+ $(top_srcdir)/storage/ndb/config/win-libraries
+ cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@
+ @$(top_srcdir)/storage/ndb/config/win-name $@ ndb_select_count
+ @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/storage/ndb/config/win-sources $@ $(ndb_select_count_SOURCES)
+ @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD)
diff --git a/ndb/tools/clean-links.sh b/storage/ndb/tools/clean-links.sh
index 01820f30616..01820f30616 100755
--- a/ndb/tools/clean-links.sh
+++ b/storage/ndb/tools/clean-links.sh
diff --git a/ndb/tools/delete_all.cpp b/storage/ndb/tools/delete_all.cpp
index 2c395a67900..2c395a67900 100644
--- a/ndb/tools/delete_all.cpp
+++ b/storage/ndb/tools/delete_all.cpp
diff --git a/ndb/tools/desc.cpp b/storage/ndb/tools/desc.cpp
index be0f6942db5..be0f6942db5 100644
--- a/ndb/tools/desc.cpp
+++ b/storage/ndb/tools/desc.cpp
diff --git a/ndb/tools/drop_index.cpp b/storage/ndb/tools/drop_index.cpp
index e2bf7f0bfae..e2bf7f0bfae 100644
--- a/ndb/tools/drop_index.cpp
+++ b/storage/ndb/tools/drop_index.cpp
diff --git a/ndb/tools/drop_tab.cpp b/storage/ndb/tools/drop_tab.cpp
index 991e1505486..991e1505486 100644
--- a/ndb/tools/drop_tab.cpp
+++ b/storage/ndb/tools/drop_tab.cpp
diff --git a/ndb/tools/listTables.cpp b/storage/ndb/tools/listTables.cpp
index b4a2235f73b..b4a2235f73b 100644
--- a/ndb/tools/listTables.cpp
+++ b/storage/ndb/tools/listTables.cpp
diff --git a/ndb/tools/make-errors.pl b/storage/ndb/tools/make-errors.pl
index 65819209a89..65819209a89 100644
--- a/ndb/tools/make-errors.pl
+++ b/storage/ndb/tools/make-errors.pl
diff --git a/ndb/tools/make-links.sh b/storage/ndb/tools/make-links.sh
index e0c4f55986e..e0c4f55986e 100755
--- a/ndb/tools/make-links.sh
+++ b/storage/ndb/tools/make-links.sh
diff --git a/ndb/tools/ndb_test_platform.cpp b/storage/ndb/tools/ndb_test_platform.cpp
index 88f21b31d58..88f21b31d58 100644
--- a/ndb/tools/ndb_test_platform.cpp
+++ b/storage/ndb/tools/ndb_test_platform.cpp
diff --git a/ndb/tools/ndbsql.cpp b/storage/ndb/tools/ndbsql.cpp
index 1997e4abebd..1997e4abebd 100644
--- a/ndb/tools/ndbsql.cpp
+++ b/storage/ndb/tools/ndbsql.cpp
diff --git a/ndb/tools/old_dirs/copy_tab/Makefile b/storage/ndb/tools/old_dirs/copy_tab/Makefile
index 4ad33a26652..4ad33a26652 100644
--- a/ndb/tools/old_dirs/copy_tab/Makefile
+++ b/storage/ndb/tools/old_dirs/copy_tab/Makefile
diff --git a/ndb/tools/old_dirs/cpcc/Makefile b/storage/ndb/tools/old_dirs/cpcc/Makefile
index 78f8c61e464..78f8c61e464 100644
--- a/ndb/tools/old_dirs/cpcc/Makefile
+++ b/storage/ndb/tools/old_dirs/cpcc/Makefile
diff --git a/ndb/tools/old_dirs/create_index/Makefile b/storage/ndb/tools/old_dirs/create_index/Makefile
index 38f2df970c4..38f2df970c4 100644
--- a/ndb/tools/old_dirs/create_index/Makefile
+++ b/storage/ndb/tools/old_dirs/create_index/Makefile
diff --git a/ndb/tools/old_dirs/delete_all/Makefile b/storage/ndb/tools/old_dirs/delete_all/Makefile
index 1cae240eb8f..1cae240eb8f 100644
--- a/ndb/tools/old_dirs/delete_all/Makefile
+++ b/storage/ndb/tools/old_dirs/delete_all/Makefile
diff --git a/ndb/tools/old_dirs/desc/Makefile b/storage/ndb/tools/old_dirs/desc/Makefile
index 614984cfd35..614984cfd35 100644
--- a/ndb/tools/old_dirs/desc/Makefile
+++ b/storage/ndb/tools/old_dirs/desc/Makefile
diff --git a/ndb/tools/old_dirs/drop_index/Makefile b/storage/ndb/tools/old_dirs/drop_index/Makefile
index 969bee51064..969bee51064 100644
--- a/ndb/tools/old_dirs/drop_index/Makefile
+++ b/storage/ndb/tools/old_dirs/drop_index/Makefile
diff --git a/ndb/tools/old_dirs/drop_tab/Makefile b/storage/ndb/tools/old_dirs/drop_tab/Makefile
index d7b21fe982c..d7b21fe982c 100644
--- a/ndb/tools/old_dirs/drop_tab/Makefile
+++ b/storage/ndb/tools/old_dirs/drop_tab/Makefile
diff --git a/ndb/tools/old_dirs/list_tables/Makefile b/storage/ndb/tools/old_dirs/list_tables/Makefile
index b60f161ee68..b60f161ee68 100644
--- a/ndb/tools/old_dirs/list_tables/Makefile
+++ b/storage/ndb/tools/old_dirs/list_tables/Makefile
diff --git a/ndb/tools/old_dirs/ndbnet/Makefile.PL b/storage/ndb/tools/old_dirs/ndbnet/Makefile.PL
index 4b27a17de15..4b27a17de15 100644
--- a/ndb/tools/old_dirs/ndbnet/Makefile.PL
+++ b/storage/ndb/tools/old_dirs/ndbnet/Makefile.PL
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net.pm
index 3b7b16bb3cf..3b7b16bb3cf 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Base.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Base.pm
index 900446138e8..900446138e8 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Base.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Base.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Client.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Client.pm
index d34a18d63af..d34a18d63af 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Client.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Client.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Command.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Command.pm
index 30145d09fa9..30145d09fa9 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Command.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Command.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Config.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Config.pm
index 4c5db3cd3f5..4c5db3cd3f5 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Config.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Config.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Database.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Database.pm
index 7ea15be0650..7ea15be0650 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Database.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Database.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Env.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Env.pm
index d79e72f2bb3..d79e72f2bb3 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Env.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Env.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Node.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Node.pm
index f41bf51168d..f41bf51168d 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Node.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Node.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeApi.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeApi.pm
index 08f5f85577d..08f5f85577d 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeApi.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeApi.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeDb.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeDb.pm
index 88a35ba4f8d..88a35ba4f8d 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeDb.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeDb.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeMgmt.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeMgmt.pm
index 1056e3df623..1056e3df623 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeMgmt.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeMgmt.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Server.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Server.pm
index 5d2118f0ffe..5d2118f0ffe 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Server.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Server.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerINET.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerINET.pm
index a065c186855..a065c186855 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerINET.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerINET.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerUNIX.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerUNIX.pm
index b3fa245d5ee..b3fa245d5ee 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerUNIX.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerUNIX.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Run.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run.pm
index a8cabde544c..a8cabde544c 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Run.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Base.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Base.pm
index 4769f2c4441..4769f2c4441 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Base.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Base.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Database.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Database.pm
index 9a12ddb20b3..9a12ddb20b3 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Database.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Database.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Env.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Env.pm
index e851a82636b..e851a82636b 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Env.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Env.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Node.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Node.pm
index e657021b229..e657021b229 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Node.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Node.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util.pm
index d5db35cbf13..d5db35cbf13 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Base.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Base.pm
index 20df78a3b9b..20df78a3b9b 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Base.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Base.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Dir.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Dir.pm
index 90609b971c7..90609b971c7 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Dir.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Dir.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Event.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Event.pm
index a3ad32cd7fb..a3ad32cd7fb 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Event.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Event.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/File.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/File.pm
index 4b3cb38191c..4b3cb38191c 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/File.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/File.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/IO.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/IO.pm
index 34f4d0a150d..34f4d0a150d 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/IO.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/IO.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Lock.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Lock.pm
index b515e633059..b515e633059 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Lock.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Lock.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Log.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Log.pm
index 44b39df84e6..44b39df84e6 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Log.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Log.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Socket.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Socket.pm
index 00e8b6eca51..00e8b6eca51 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Socket.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Socket.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketINET.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketINET.pm
index faaa568a08e..faaa568a08e 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketINET.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketINET.pm
diff --git a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketUNIX.pm b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketUNIX.pm
index 9c6b3115f6a..9c6b3115f6a 100644
--- a/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketUNIX.pm
+++ b/storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketUNIX.pm
diff --git a/ndb/tools/old_dirs/ndbnet/ndbnet.pl b/storage/ndb/tools/old_dirs/ndbnet/ndbnet.pl
index 5f6648da46d..5f6648da46d 100644
--- a/ndb/tools/old_dirs/ndbnet/ndbnet.pl
+++ b/storage/ndb/tools/old_dirs/ndbnet/ndbnet.pl
diff --git a/ndb/tools/old_dirs/ndbnet/ndbnetd.pl b/storage/ndb/tools/old_dirs/ndbnet/ndbnetd.pl
index 95fa5322abc..95fa5322abc 100644
--- a/ndb/tools/old_dirs/ndbnet/ndbnetd.pl
+++ b/storage/ndb/tools/old_dirs/ndbnet/ndbnetd.pl
diff --git a/ndb/tools/old_dirs/ndbnet/ndbrun b/storage/ndb/tools/old_dirs/ndbnet/ndbrun
index 99121276d99..99121276d99 100644
--- a/ndb/tools/old_dirs/ndbnet/ndbrun
+++ b/storage/ndb/tools/old_dirs/ndbnet/ndbrun
diff --git a/ndb/tools/old_dirs/ndbsql/Makefile b/storage/ndb/tools/old_dirs/ndbsql/Makefile
index 81ca87b0414..81ca87b0414 100644
--- a/ndb/tools/old_dirs/ndbsql/Makefile
+++ b/storage/ndb/tools/old_dirs/ndbsql/Makefile
diff --git a/ndb/tools/old_dirs/select_all/Makefile b/storage/ndb/tools/old_dirs/select_all/Makefile
index e14e411b3a5..e14e411b3a5 100644
--- a/ndb/tools/old_dirs/select_all/Makefile
+++ b/storage/ndb/tools/old_dirs/select_all/Makefile
diff --git a/ndb/tools/old_dirs/select_count/Makefile b/storage/ndb/tools/old_dirs/select_count/Makefile
index 35a53c6b046..35a53c6b046 100644
--- a/ndb/tools/old_dirs/select_count/Makefile
+++ b/storage/ndb/tools/old_dirs/select_count/Makefile
diff --git a/ndb/tools/old_dirs/src/counterviewer/CounterViewer.java b/storage/ndb/tools/old_dirs/src/counterviewer/CounterViewer.java
index 317c1c75e28..317c1c75e28 100644
--- a/ndb/tools/old_dirs/src/counterviewer/CounterViewer.java
+++ b/storage/ndb/tools/old_dirs/src/counterviewer/CounterViewer.java
diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp
new file mode 100644
index 00000000000..18edb489d77
--- /dev/null
+++ b/storage/ndb/tools/restore/Restore.cpp
@@ -0,0 +1,941 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "Restore.hpp"
+#include <NdbTCP.h>
+#include <OutputStream.hpp>
+#include <Bitmask.hpp>
+
+#include <AttributeHeader.hpp>
+#include <trigger_definitions.h>
+#include <SimpleProperties.hpp>
+#include <signaldata/DictTabInfo.hpp>
+
+Uint16 Twiddle16(Uint16 in); // Byte shift 16-bit data
+Uint32 Twiddle32(Uint32 in); // Byte shift 32-bit data
+Uint64 Twiddle64(Uint64 in); // Byte shift 64-bit data
+
+bool
+BackupFile::Twiddle(const AttributeDesc* attr_desc, AttributeData* attr_data, Uint32 arraySize){
+ Uint32 i;
+
+ if(m_hostByteOrder)
+ return true;
+
+ if(arraySize == 0){
+ arraySize = attr_desc->arraySize;
+ }
+
+ switch(attr_desc->size){
+ case 8:
+
+ return true;
+ case 16:
+ for(i = 0; i<arraySize; i++){
+ attr_data->u_int16_value[i] = Twiddle16(attr_data->u_int16_value[i]);
+ }
+ return true;
+ case 32:
+ for(i = 0; i<arraySize; i++){
+ attr_data->u_int32_value[i] = Twiddle32(attr_data->u_int32_value[i]);
+ }
+ return true;
+ case 64:
+ for(i = 0; i<arraySize; i++){
+ attr_data->u_int64_value[i] = Twiddle64(attr_data->u_int64_value[i]);
+ }
+ return true;
+ default:
+ return false;
+ } // switch
+
+} // Twiddle
+
+FilteredNdbOut err(* new FileOutputStream(stderr), 0, 0);
+FilteredNdbOut info(* new FileOutputStream(stdout), 1, 1);
+FilteredNdbOut debug(* new FileOutputStream(stdout), 2, 0);
+
+// To decide in what byte order data is
+const Uint32 magicByteOrder = 0x12345678;
+const Uint32 swappedMagicByteOrder = 0x78563412;
+
+RestoreMetaData::RestoreMetaData(const char* path, Uint32 nodeId, Uint32 bNo) {
+
+ debug << "RestoreMetaData constructor" << endl;
+ setCtlFile(nodeId, bNo, path);
+}
+
+RestoreMetaData::~RestoreMetaData(){
+ for(Uint32 i= 0; i < allTables.size(); i++)
+ delete allTables[i];
+ allTables.clear();
+}
+
+TableS *
+RestoreMetaData::getTable(Uint32 tableId) const {
+ for(Uint32 i= 0; i < allTables.size(); i++)
+ if(allTables[i]->getTableId() == tableId)
+ return allTables[i];
+ return NULL;
+}
+
+Uint32
+RestoreMetaData::getStopGCP() const {
+ return m_stopGCP;
+}
+
+int
+RestoreMetaData::loadContent()
+{
+ Uint32 noOfTables = readMetaTableList();
+ if(noOfTables == 0) {
+ return 1;
+ }
+ for(Uint32 i = 0; i<noOfTables; i++){
+ if(!readMetaTableDesc()){
+ return 0;
+ }
+ }
+ if(!readGCPEntry())
+ return 0;
+ return 1;
+}
+
+Uint32
+RestoreMetaData::readMetaTableList() {
+
+ Uint32 sectionInfo[2];
+
+ if (buffer_read(&sectionInfo, sizeof(sectionInfo), 1) != 1){
+ err << "readMetaTableList read header error" << endl;
+ return 0;
+ }
+ sectionInfo[0] = ntohl(sectionInfo[0]);
+ sectionInfo[1] = ntohl(sectionInfo[1]);
+
+ const Uint32 tabCount = sectionInfo[1] - 2;
+
+ void *tmp;
+ if (buffer_get_ptr(&tmp, 4, tabCount) != tabCount){
+ err << "readMetaTableList read tabCount error" << endl;
+ return 0;
+ }
+
+ return tabCount;
+}
+
+bool
+RestoreMetaData::readMetaTableDesc() {
+
+ Uint32 sectionInfo[2];
+
+ // Read section header
+ if (buffer_read(&sectionInfo, sizeof(sectionInfo), 1) != 1){
+ err << "readMetaTableDesc read header error" << endl;
+ return false;
+ } // if
+ sectionInfo[0] = ntohl(sectionInfo[0]);
+ sectionInfo[1] = ntohl(sectionInfo[1]);
+
+ assert(sectionInfo[0] == BackupFormat::TABLE_DESCRIPTION);
+
+ // Read dictTabInfo buffer
+ const Uint32 len = (sectionInfo[1] - 2);
+ void *ptr;
+ if (buffer_get_ptr(&ptr, 4, len) != len){
+ err << "readMetaTableDesc read error" << endl;
+ return false;
+ } // if
+
+ return parseTableDescriptor((Uint32*)ptr, len);
+}
+
+bool
+RestoreMetaData::readGCPEntry() {
+
+ Uint32 data[4];
+
+ BackupFormat::CtlFile::GCPEntry * dst =
+ (BackupFormat::CtlFile::GCPEntry *)&data[0];
+
+ if(buffer_read(dst, 4, 4) != 4){
+ err << "readGCPEntry read error" << endl;
+ return false;
+ }
+
+ dst->SectionType = ntohl(dst->SectionType);
+ dst->SectionLength = ntohl(dst->SectionLength);
+
+ if(dst->SectionType != BackupFormat::GCP_ENTRY){
+ err << "readGCPEntry invalid format" << endl;
+ return false;
+ }
+
+ dst->StartGCP = ntohl(dst->StartGCP);
+ dst->StopGCP = ntohl(dst->StopGCP);
+
+ m_startGCP = dst->StartGCP;
+ m_stopGCP = dst->StopGCP;
+ return true;
+}
+
+TableS::TableS(Uint32 version, NdbTableImpl* tableImpl)
+ : m_dictTable(tableImpl)
+{
+ m_dictTable = tableImpl;
+ m_noOfNullable = m_nullBitmaskSize = 0;
+ m_auto_val_id= ~(Uint32)0;
+ m_max_auto_val= 0;
+ backupVersion = version;
+
+ for (int i = 0; i < tableImpl->getNoOfColumns(); i++)
+ createAttr(tableImpl->getColumn(i));
+}
+
+TableS::~TableS()
+{
+ for (Uint32 i= 0; i < allAttributesDesc.size(); i++)
+ delete allAttributesDesc[i];
+}
+
+// Parse dictTabInfo buffer and pushback to to vector storage
+bool
+RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len)
+{
+ NdbTableImpl* tableImpl = 0;
+ int ret = NdbDictInterface::parseTableInfo(&tableImpl, data, len, false);
+
+ if (ret != 0) {
+ err << "parseTableInfo " << " failed" << endl;
+ return false;
+ }
+ if(tableImpl == 0)
+ return false;
+
+ debug << "parseTableInfo " << tableImpl->getName() << " done" << endl;
+ tableImpl->m_ng.clear();
+ tableImpl->m_fragmentType = NdbDictionary::Object::FragAllSmall;
+ TableS * table = new TableS(m_fileHeader.NdbVersion, tableImpl);
+ if(table == NULL) {
+ return false;
+ }
+
+ debug << "Parsed table id " << table->getTableId() << endl;
+ debug << "Parsed table #attr " << table->getNoOfAttributes() << endl;
+ debug << "Parsed table schema version not used " << endl;
+
+ debug << "Pushing table " << table->getTableName() << endl;
+ debug << " with " << table->getNoOfAttributes() << " attributes" << endl;
+
+ allTables.push_back(table);
+
+ return true;
+}
+
+// Constructor
+RestoreDataIterator::RestoreDataIterator(const RestoreMetaData & md, void (* _free_data_callback)())
+ : BackupFile(_free_data_callback), m_metaData(md)
+{
+ debug << "RestoreDataIterator constructor" << endl;
+ setDataFile(md, 0);
+}
+
+TupleS & TupleS::operator=(const TupleS& tuple)
+{
+ prepareRecord(*tuple.m_currentTable);
+
+ if (allAttrData)
+ memcpy(allAttrData, tuple.allAttrData, getNoOfAttributes()*sizeof(AttributeData));
+
+ return *this;
+}
+int TupleS::getNoOfAttributes() const {
+ if (m_currentTable == 0)
+ return 0;
+ return m_currentTable->getNoOfAttributes();
+}
+
+TableS * TupleS::getTable() const {
+ return m_currentTable;
+}
+
+const AttributeDesc * TupleS::getDesc(int i) const {
+ return m_currentTable->allAttributesDesc[i];
+}
+
+AttributeData * TupleS::getData(int i) const{
+ return &(allAttrData[i]);
+}
+
+bool
+TupleS::prepareRecord(TableS & tab){
+ if (allAttrData) {
+ if (getNoOfAttributes() == tab.getNoOfAttributes())
+ {
+ m_currentTable = &tab;
+ return true;
+ }
+ delete [] allAttrData;
+ m_currentTable= 0;
+ }
+
+ allAttrData = new AttributeData[tab.getNoOfAttributes()];
+ if (allAttrData == 0)
+ return false;
+
+ m_currentTable = &tab;
+
+ return true;
+}
+
+const TupleS *
+RestoreDataIterator::getNextTuple(int & res)
+{
+ Uint32 dataLength = 0;
+ // Read record length
+ if (buffer_read(&dataLength, sizeof(dataLength), 1) != 1){
+ err << "getNextTuple:Error reading length of data part" << endl;
+ res = -1;
+ return NULL;
+ } // if
+
+ // Convert length from network byte order
+ dataLength = ntohl(dataLength);
+ const Uint32 dataLenBytes = 4 * dataLength;
+
+ if (dataLength == 0) {
+ // Zero length for last tuple
+ // End of this data fragment
+ debug << "End of fragment" << endl;
+ res = 0;
+ return NULL;
+ } // if
+
+ // Read tuple data
+ void *_buf_ptr;
+ if (buffer_get_ptr(&_buf_ptr, 1, dataLenBytes) != dataLenBytes) {
+ err << "getNextTuple:Read error: " << endl;
+ res = -1;
+ return NULL;
+ }
+
+ Uint32 *buf_ptr = (Uint32*)_buf_ptr, *ptr = buf_ptr;
+ ptr += m_currentTable->m_nullBitmaskSize;
+ Uint32 i;
+ for(i= 0; i < m_currentTable->m_fixedKeys.size(); i++){
+ assert(ptr < buf_ptr + dataLength);
+
+ const Uint32 attrId = m_currentTable->m_fixedKeys[i]->attrId;
+
+ AttributeData * attr_data = m_tuple.getData(attrId);
+ const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
+
+ const Uint32 sz = attr_desc->getSizeInWords();
+
+ attr_data->null = false;
+ attr_data->void_value = ptr;
+
+ if(!Twiddle(attr_desc, attr_data))
+ {
+ res = -1;
+ return NULL;
+ }
+ ptr += sz;
+ }
+
+ for(i = 0; i < m_currentTable->m_fixedAttribs.size(); i++){
+ assert(ptr < buf_ptr + dataLength);
+
+ const Uint32 attrId = m_currentTable->m_fixedAttribs[i]->attrId;
+
+ AttributeData * attr_data = m_tuple.getData(attrId);
+ const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
+
+ const Uint32 sz = attr_desc->getSizeInWords();
+
+ attr_data->null = false;
+ attr_data->void_value = ptr;
+
+ if(!Twiddle(attr_desc, attr_data))
+ {
+ res = -1;
+ return NULL;
+ }
+
+ ptr += sz;
+ }
+
+ for(i = 0; i < m_currentTable->m_variableAttribs.size(); i++){
+ const Uint32 attrId = m_currentTable->m_variableAttribs[i]->attrId;
+
+ AttributeData * attr_data = m_tuple.getData(attrId);
+ const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
+
+ if(attr_desc->m_column->getNullable()){
+ const Uint32 ind = attr_desc->m_nullBitIndex;
+ if(BitmaskImpl::get(m_currentTable->m_nullBitmaskSize,
+ buf_ptr,ind)){
+ attr_data->null = true;
+ attr_data->void_value = NULL;
+ continue;
+ }
+ }
+
+ assert(ptr < buf_ptr + dataLength);
+
+ typedef BackupFormat::DataFile::VariableData VarData;
+ VarData * data = (VarData *)ptr;
+ Uint32 sz = ntohl(data->Sz);
+ Uint32 id = ntohl(data->Id);
+ assert(id == attrId);
+
+ attr_data->null = false;
+ attr_data->void_value = &data->Data[0];
+
+ /**
+ * Compute array size
+ */
+ const Uint32 arraySize = (4 * sz) / (attr_desc->size / 8);
+ assert(arraySize >= attr_desc->arraySize);
+ if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize))
+ {
+ res = -1;
+ return NULL;
+ }
+
+ ptr += (sz + 2);
+ }
+
+ m_count ++;
+ res = 0;
+ return &m_tuple;
+} // RestoreDataIterator::getNextTuple
+
+BackupFile::BackupFile(void (* _free_data_callback)())
+ : free_data_callback(_free_data_callback)
+{
+ m_file = 0;
+ m_path[0] = 0;
+ m_fileName[0] = 0;
+
+ m_buffer_sz = 64*1024;
+ m_buffer = malloc(m_buffer_sz);
+ m_buffer_ptr = m_buffer;
+ m_buffer_data_left = 0;
+}
+
+BackupFile::~BackupFile(){
+ if(m_file != 0)
+ fclose(m_file);
+ if(m_buffer != 0)
+ free(m_buffer);
+}
+
+bool
+BackupFile::openFile(){
+ if(m_file != NULL){
+ fclose(m_file);
+ m_file = 0;
+ }
+
+ m_file = fopen(m_fileName, "r");
+ return m_file != 0;
+}
+
+Uint32 BackupFile::buffer_get_ptr_ahead(void **p_buf_ptr, Uint32 size, Uint32 nmemb)
+{
+ Uint32 sz = size*nmemb;
+ if (sz > m_buffer_data_left) {
+
+ if (free_data_callback)
+ (*free_data_callback)();
+
+ memcpy(m_buffer, m_buffer_ptr, m_buffer_data_left);
+
+ size_t r = fread(((char *)m_buffer) + m_buffer_data_left, 1, m_buffer_sz - m_buffer_data_left, m_file);
+ m_buffer_data_left += r;
+ m_buffer_ptr = m_buffer;
+
+ if (sz > m_buffer_data_left)
+ sz = size * (m_buffer_data_left / size);
+ }
+
+ *p_buf_ptr = m_buffer_ptr;
+
+ return sz/size;
+}
+Uint32 BackupFile::buffer_get_ptr(void **p_buf_ptr, Uint32 size, Uint32 nmemb)
+{
+ Uint32 r = buffer_get_ptr_ahead(p_buf_ptr, size, nmemb);
+
+ m_buffer_ptr = ((char*)m_buffer_ptr)+(r*size);
+ m_buffer_data_left -= (r*size);
+
+ return r;
+}
+
+Uint32 BackupFile::buffer_read_ahead(void *ptr, Uint32 size, Uint32 nmemb)
+{
+ void *buf_ptr;
+ Uint32 r = buffer_get_ptr_ahead(&buf_ptr, size, nmemb);
+ memcpy(ptr, buf_ptr, r*size);
+
+ return r;
+}
+
+Uint32 BackupFile::buffer_read(void *ptr, Uint32 size, Uint32 nmemb)
+{
+ void *buf_ptr;
+ Uint32 r = buffer_get_ptr(&buf_ptr, size, nmemb);
+ memcpy(ptr, buf_ptr, r*size);
+
+ return r;
+}
+
+void
+BackupFile::setCtlFile(Uint32 nodeId, Uint32 backupId, const char * path){
+ m_nodeId = nodeId;
+ m_expectedFileHeader.BackupId = backupId;
+ m_expectedFileHeader.FileType = BackupFormat::CTL_FILE;
+
+ char name[PATH_MAX]; const Uint32 sz = sizeof(name);
+ BaseString::snprintf(name, sz, "BACKUP-%d.%d.ctl", backupId, nodeId);
+ setName(path, name);
+}
+
+void
+BackupFile::setDataFile(const BackupFile & bf, Uint32 no){
+ m_nodeId = bf.m_nodeId;
+ m_expectedFileHeader = bf.m_fileHeader;
+ m_expectedFileHeader.FileType = BackupFormat::DATA_FILE;
+
+ char name[PATH_MAX]; const Uint32 sz = sizeof(name);
+ BaseString::snprintf(name, sz, "BACKUP-%d-%d.%d.Data",
+ m_expectedFileHeader.BackupId, no, m_nodeId);
+ setName(bf.m_path, name);
+}
+
+void
+BackupFile::setLogFile(const BackupFile & bf, Uint32 no){
+ m_nodeId = bf.m_nodeId;
+ m_expectedFileHeader = bf.m_fileHeader;
+ m_expectedFileHeader.FileType = BackupFormat::LOG_FILE;
+
+ char name[PATH_MAX]; const Uint32 sz = sizeof(name);
+ BaseString::snprintf(name, sz, "BACKUP-%d.%d.log",
+ m_expectedFileHeader.BackupId, m_nodeId);
+ setName(bf.m_path, name);
+}
+
+void
+BackupFile::setName(const char * p, const char * n){
+ const Uint32 sz = sizeof(m_path);
+ if(p != 0 && strlen(p) > 0){
+ if(p[strlen(p)-1] == '/'){
+ BaseString::snprintf(m_path, sz, "%s", p);
+ } else {
+ BaseString::snprintf(m_path, sz, "%s%s", p, "/");
+ }
+ } else {
+ m_path[0] = 0;
+ }
+
+ BaseString::snprintf(m_fileName, sizeof(m_fileName), "%s%s", m_path, n);
+ debug << "Filename = " << m_fileName << endl;
+}
+
+bool
+BackupFile::readHeader(){
+ if(!openFile()){
+ return false;
+ }
+
+ if(buffer_read(&m_fileHeader, sizeof(m_fileHeader), 1) != 1){
+ err << "readDataFileHeader: Error reading header" << endl;
+ return false;
+ }
+
+ // Convert from network to host byte order for platform compatibility
+ m_fileHeader.NdbVersion = ntohl(m_fileHeader.NdbVersion);
+ m_fileHeader.SectionType = ntohl(m_fileHeader.SectionType);
+ m_fileHeader.SectionLength = ntohl(m_fileHeader.SectionLength);
+ m_fileHeader.FileType = ntohl(m_fileHeader.FileType);
+ m_fileHeader.BackupId = ntohl(m_fileHeader.BackupId);
+ m_fileHeader.BackupKey_0 = ntohl(m_fileHeader.BackupKey_0);
+ m_fileHeader.BackupKey_1 = ntohl(m_fileHeader.BackupKey_1);
+
+ debug << "FileHeader: " << m_fileHeader.Magic << " " <<
+ m_fileHeader.NdbVersion << " " <<
+ m_fileHeader.SectionType << " " <<
+ m_fileHeader.SectionLength << " " <<
+ m_fileHeader.FileType << " " <<
+ m_fileHeader.BackupId << " " <<
+ m_fileHeader.BackupKey_0 << " " <<
+ m_fileHeader.BackupKey_1 << " " <<
+ m_fileHeader.ByteOrder << endl;
+
+ debug << "ByteOrder is " << m_fileHeader.ByteOrder << endl;
+ debug << "magicByteOrder is " << magicByteOrder << endl;
+
+ if (m_fileHeader.FileType != m_expectedFileHeader.FileType){
+ abort();
+ }
+
+ // Check for BackupFormat::FileHeader::ByteOrder if swapping is needed
+ if (m_fileHeader.ByteOrder == magicByteOrder) {
+ m_hostByteOrder = true;
+ } else if (m_fileHeader.ByteOrder == swappedMagicByteOrder){
+ m_hostByteOrder = false;
+ } else {
+ abort();
+ }
+
+ return true;
+} // BackupFile::readHeader
+
+bool
+BackupFile::validateFooter(){
+ return true;
+}
+
+bool RestoreDataIterator::readFragmentHeader(int & ret)
+{
+ BackupFormat::DataFile::FragmentHeader Header;
+
+ debug << "RestoreDataIterator::getNextFragment" << endl;
+
+ if (buffer_read(&Header, sizeof(Header), 1) != 1){
+ ret = 0;
+ return false;
+ } // if
+
+ Header.SectionType = ntohl(Header.SectionType);
+ Header.SectionLength = ntohl(Header.SectionLength);
+ Header.TableId = ntohl(Header.TableId);
+ Header.FragmentNo = ntohl(Header.FragmentNo);
+ Header.ChecksumType = ntohl(Header.ChecksumType);
+
+ debug << "FragmentHeader: " << Header.SectionType
+ << " " << Header.SectionLength
+ << " " << Header.TableId
+ << " " << Header.FragmentNo
+ << " " << Header.ChecksumType << endl;
+
+ m_currentTable = m_metaData.getTable(Header.TableId);
+ if(m_currentTable == 0){
+ ret = -1;
+ return false;
+ }
+
+ if(!m_tuple.prepareRecord(*m_currentTable))
+ {
+ ret =-1;
+ return false;
+ }
+
+ info << "_____________________________________________________" << endl
+ << "Restoring data in table: " << m_currentTable->getTableName()
+ << "(" << Header.TableId << ") fragment "
+ << Header.FragmentNo << endl;
+
+ m_count = 0;
+ ret = 0;
+
+ return true;
+} // RestoreDataIterator::getNextFragment
+
+
+bool
+RestoreDataIterator::validateFragmentFooter() {
+ BackupFormat::DataFile::FragmentFooter footer;
+
+ if (buffer_read(&footer, sizeof(footer), 1) != 1){
+ err << "getFragmentFooter:Error reading fragment footer" << endl;
+ return false;
+ }
+
+ // TODO: Handle footer, nothing yet
+ footer.SectionType = ntohl(footer.SectionType);
+ footer.SectionLength = ntohl(footer.SectionLength);
+ footer.TableId = ntohl(footer.TableId);
+ footer.FragmentNo = ntohl(footer.FragmentNo);
+ footer.NoOfRecords = ntohl(footer.NoOfRecords);
+ footer.Checksum = ntohl(footer.Checksum);
+
+ assert(m_count == footer.NoOfRecords);
+
+ return true;
+} // RestoreDataIterator::getFragmentFooter
+
+AttributeDesc::AttributeDesc(NdbDictionary::Column *c)
+ : m_column(c)
+{
+ size = 8*NdbColumnImpl::getImpl(* c).m_attrSize;
+ arraySize = NdbColumnImpl::getImpl(* c).m_arraySize;
+}
+
+void TableS::createAttr(NdbDictionary::Column *column)
+{
+ AttributeDesc * d = new AttributeDesc(column);
+ if(d == NULL) {
+ ndbout_c("Restore: Failed to allocate memory");
+ abort();
+ }
+ d->attrId = allAttributesDesc.size();
+ allAttributesDesc.push_back(d);
+
+ if (d->m_column->getAutoIncrement())
+ m_auto_val_id= d->attrId;
+
+ if(d->m_column->getPrimaryKey() && backupVersion <= MAKE_VERSION(4,1,7))
+ {
+ m_fixedKeys.push_back(d);
+ return;
+ }
+
+ if(!d->m_column->getNullable())
+ {
+ m_fixedAttribs.push_back(d);
+ return;
+ }
+
+ /* Nullable attr*/
+ d->m_nullBitIndex = m_noOfNullable;
+ m_noOfNullable++;
+ m_nullBitmaskSize = (m_noOfNullable + 31) / 32;
+ m_variableAttribs.push_back(d);
+} // TableS::createAttr
+
+Uint16 Twiddle16(Uint16 in)
+{
+ Uint16 retVal = 0;
+
+ retVal = ((in & 0xFF00) >> 8) |
+ ((in & 0x00FF) << 8);
+
+ return(retVal);
+} // Twiddle16
+
+Uint32 Twiddle32(Uint32 in)
+{
+ Uint32 retVal = 0;
+
+ retVal = ((in & 0x000000FF) << 24) |
+ ((in & 0x0000FF00) << 8) |
+ ((in & 0x00FF0000) >> 8) |
+ ((in & 0xFF000000) >> 24);
+
+ return(retVal);
+} // Twiddle32
+
+Uint64 Twiddle64(Uint64 in)
+{
+ Uint64 retVal = 0;
+
+ retVal =
+ ((in & (Uint64)0x00000000000000FFLL) << 56) |
+ ((in & (Uint64)0x000000000000FF00LL) << 40) |
+ ((in & (Uint64)0x0000000000FF0000LL) << 24) |
+ ((in & (Uint64)0x00000000FF000000LL) << 8) |
+ ((in & (Uint64)0x000000FF00000000LL) >> 8) |
+ ((in & (Uint64)0x0000FF0000000000LL) >> 24) |
+ ((in & (Uint64)0x00FF000000000000LL) >> 40) |
+ ((in & (Uint64)0xFF00000000000000LL) >> 56);
+
+ return(retVal);
+} // Twiddle64
+
+
+RestoreLogIterator::RestoreLogIterator(const RestoreMetaData & md)
+ : m_metaData(md)
+{
+ debug << "RestoreLog constructor" << endl;
+ setLogFile(md, 0);
+
+ m_count = 0;
+ m_last_gci = 0;
+}
+
+const LogEntry *
+RestoreLogIterator::getNextLogEntry(int & res) {
+ // Read record length
+ typedef BackupFormat::LogFile::LogEntry LogE;
+
+ LogE * logE= 0;
+ Uint32 len= ~0;
+ const Uint32 stopGCP = m_metaData.getStopGCP();
+ do {
+ if (buffer_read_ahead(&len, sizeof(Uint32), 1) != 1){
+ res= -1;
+ return 0;
+ }
+ len= ntohl(len);
+
+ Uint32 data_len = sizeof(Uint32) + len*4;
+ if (buffer_get_ptr((void **)(&logE), 1, data_len) != data_len) {
+ res= -2;
+ return 0;
+ }
+
+ if(len == 0){
+ res= 0;
+ return 0;
+ }
+
+ logE->TableId= ntohl(logE->TableId);
+ logE->TriggerEvent= ntohl(logE->TriggerEvent);
+
+ const bool hasGcp= (logE->TriggerEvent & 0x10000) != 0;
+ logE->TriggerEvent &= 0xFFFF;
+
+ if(hasGcp){
+ len--;
+ m_last_gci = ntohl(logE->Data[len-2]);
+ }
+ } while(m_last_gci > stopGCP + 1);
+
+ m_logEntry.m_table = m_metaData.getTable(logE->TableId);
+ switch(logE->TriggerEvent){
+ case TriggerEvent::TE_INSERT:
+ m_logEntry.m_type = LogEntry::LE_INSERT;
+ break;
+ case TriggerEvent::TE_UPDATE:
+ m_logEntry.m_type = LogEntry::LE_UPDATE;
+ break;
+ case TriggerEvent::TE_DELETE:
+ m_logEntry.m_type = LogEntry::LE_DELETE;
+ break;
+ default:
+ res = -1;
+ return NULL;
+ }
+
+ const TableS * tab = m_logEntry.m_table;
+ m_logEntry.clear();
+
+ AttributeHeader * ah = (AttributeHeader *)&logE->Data[0];
+ AttributeHeader *end = (AttributeHeader *)&logE->Data[len - 2];
+ AttributeS * attr;
+ while(ah < end){
+ attr= m_logEntry.add_attr();
+ if(attr == NULL) {
+ ndbout_c("Restore: Failed to allocate memory");
+ res = -1;
+ return 0;
+ }
+
+ attr->Desc = (* tab)[ah->getAttributeId()];
+ assert(attr->Desc != 0);
+
+ const Uint32 sz = ah->getDataSize();
+ if(sz == 0){
+ attr->Data.null = true;
+ attr->Data.void_value = NULL;
+ } else {
+ attr->Data.null = false;
+ attr->Data.void_value = ah->getDataPtr();
+ }
+
+ Twiddle(attr->Desc, &(attr->Data));
+
+ ah = ah->getNext();
+ }
+
+ m_count ++;
+ res = 0;
+ return &m_logEntry;
+}
+
+NdbOut &
+operator<<(NdbOut& ndbout, const AttributeS& attr){
+ const AttributeData & data = attr.Data;
+ const AttributeDesc & desc = *(attr.Desc);
+
+ if (data.null)
+ {
+ ndbout << "<NULL>";
+ return ndbout;
+ }
+
+ NdbRecAttr tmprec;
+ tmprec.setup(desc.m_column, (char *)data.void_value);
+ ndbout << tmprec;
+
+ return ndbout;
+}
+
+// Print tuple data
+NdbOut&
+operator<<(NdbOut& ndbout, const TupleS& tuple)
+{
+ ndbout << tuple.getTable()->getTableName() << "; ";
+ for (int i = 0; i < tuple.getNoOfAttributes(); i++)
+ {
+ AttributeData * attr_data = tuple.getData(i);
+ const AttributeDesc * attr_desc = tuple.getDesc(i);
+ const AttributeS attr = {attr_desc, *attr_data};
+ debug << i << " " << attr_desc->m_column->getName();
+ ndbout << attr;
+
+ if (i != (tuple.getNoOfAttributes() - 1))
+ ndbout << delimiter << " ";
+ } // for
+ return ndbout;
+}
+
+// Print tuple data
+NdbOut&
+operator<<(NdbOut& ndbout, const LogEntry& logE)
+{
+ switch(logE.m_type)
+ {
+ case LogEntry::LE_INSERT:
+ ndbout << "INSERT " << logE.m_table->getTableName() << " ";
+ break;
+ case LogEntry::LE_DELETE:
+ ndbout << "DELETE " << logE.m_table->getTableName() << " ";
+ break;
+ case LogEntry::LE_UPDATE:
+ ndbout << "UPDATE " << logE.m_table->getTableName() << " ";
+ break;
+ default:
+ ndbout << "Unknown log entry type (not insert, delete or update)" ;
+ }
+
+ for (Uint32 i= 0; i < logE.size();i++)
+ {
+ const AttributeS * attr = logE[i];
+ ndbout << attr->Desc->m_column->getName() << "=";
+ ndbout << (* attr);
+ if (i < (logE.size() - 1))
+ ndbout << ", ";
+ }
+ return ndbout;
+}
+
+#include <NDBT.hpp>
+
+NdbOut &
+operator<<(NdbOut& ndbout, const TableS & table){
+
+ ndbout << (* (NDBT_Table*)table.m_dictTable) << endl;
+ return ndbout;
+}
+
+template class Vector<TableS*>;
+template class Vector<AttributeS*>;
+template class Vector<AttributeDesc*>;
+
diff --git a/ndb/tools/restore/Restore.hpp b/storage/ndb/tools/restore/Restore.hpp
index 85793baf9df..85793baf9df 100644
--- a/ndb/tools/restore/Restore.hpp
+++ b/storage/ndb/tools/restore/Restore.hpp
diff --git a/ndb/tools/restore/consumer.cpp b/storage/ndb/tools/restore/consumer.cpp
index b130c4998d5..b130c4998d5 100644
--- a/ndb/tools/restore/consumer.cpp
+++ b/storage/ndb/tools/restore/consumer.cpp
diff --git a/ndb/tools/restore/consumer.hpp b/storage/ndb/tools/restore/consumer.hpp
index 692c814159f..692c814159f 100644
--- a/ndb/tools/restore/consumer.hpp
+++ b/storage/ndb/tools/restore/consumer.hpp
diff --git a/ndb/tools/restore/consumer_printer.cpp b/storage/ndb/tools/restore/consumer_printer.cpp
index 0aa5b521d29..0aa5b521d29 100644
--- a/ndb/tools/restore/consumer_printer.cpp
+++ b/storage/ndb/tools/restore/consumer_printer.cpp
diff --git a/ndb/tools/restore/consumer_printer.hpp b/storage/ndb/tools/restore/consumer_printer.hpp
index 7cbc924e364..7cbc924e364 100644
--- a/ndb/tools/restore/consumer_printer.hpp
+++ b/storage/ndb/tools/restore/consumer_printer.hpp
diff --git a/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp
index 9dd8c4bf92d..9dd8c4bf92d 100644
--- a/ndb/tools/restore/consumer_restore.cpp
+++ b/storage/ndb/tools/restore/consumer_restore.cpp
diff --git a/ndb/tools/restore/consumer_restore.hpp b/storage/ndb/tools/restore/consumer_restore.hpp
index 1bf6d89a912..1bf6d89a912 100644
--- a/ndb/tools/restore/consumer_restore.hpp
+++ b/storage/ndb/tools/restore/consumer_restore.hpp
diff --git a/ndb/tools/restore/consumer_restorem.cpp b/storage/ndb/tools/restore/consumer_restorem.cpp
index 56179a60ab0..56179a60ab0 100644
--- a/ndb/tools/restore/consumer_restorem.cpp
+++ b/storage/ndb/tools/restore/consumer_restorem.cpp
diff --git a/ndb/tools/restore/restore_main.cpp b/storage/ndb/tools/restore/restore_main.cpp
index d980f29057b..d980f29057b 100644
--- a/ndb/tools/restore/restore_main.cpp
+++ b/storage/ndb/tools/restore/restore_main.cpp
diff --git a/ndb/tools/rgrep b/storage/ndb/tools/rgrep
index 212b068639d..212b068639d 100755
--- a/ndb/tools/rgrep
+++ b/storage/ndb/tools/rgrep
diff --git a/ndb/tools/select_all.cpp b/storage/ndb/tools/select_all.cpp
index baa18db1ebd..baa18db1ebd 100644
--- a/ndb/tools/select_all.cpp
+++ b/storage/ndb/tools/select_all.cpp
diff --git a/ndb/tools/select_count.cpp b/storage/ndb/tools/select_count.cpp
index 6fa3c77f15a..6fa3c77f15a 100644
--- a/ndb/tools/select_count.cpp
+++ b/storage/ndb/tools/select_count.cpp
diff --git a/ndb/tools/waiter.cpp b/storage/ndb/tools/waiter.cpp
index db90bd8bd90..db90bd8bd90 100644
--- a/ndb/tools/waiter.cpp
+++ b/storage/ndb/tools/waiter.cpp
diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh
index 213530a2581..3e8e4e5abd1 100644
--- a/support-files/mysql.spec.sh
+++ b/support-files/mysql.spec.sh
@@ -567,7 +567,6 @@ fi
%attr(755, root, root) %{_bindir}/msql2mysql
%attr(755, root, root) %{_bindir}/mysql
%attr(755, root, root) %{_bindir}/mysql_find_rows
-%attr(755, root, root) %{_bindir}/mysql_tableinfo
%attr(755, root, root) %{_bindir}/mysql_waitpid
%attr(755, root, root) %{_bindir}/mysqlaccess
%attr(755, root, root) %{_bindir}/mysqladmin
@@ -696,6 +695,10 @@ fi
- Added a "make clean" between separate calls to "BuildMySQL".
+* Thu May 12 2005 Guilhem Bichot <guilhem@mysql.com>
+
+- Removed the mysql_tableinfo script made obsolete by the information schema
+
* Wed Apr 20 2005 Lenz Grimmer <lenz@mysql.com>
- Enabled the "blackhole" storage engine for the Max RPM